lsq_unit_impl.hh revision 10327:5b6279635c49
112647Santhony.gutierrez@amd.com
212647Santhony.gutierrez@amd.com/*
311308Santhony.gutierrez@amd.com * Copyright (c) 2010-2013 ARM Limited
412647Santhony.gutierrez@amd.com * Copyright (c) 2013 Advanced Micro Devices, Inc.
511308Santhony.gutierrez@amd.com * All rights reserved
612647Santhony.gutierrez@amd.com *
712647Santhony.gutierrez@amd.com * The license below extends only to copyright in the software and shall
811308Santhony.gutierrez@amd.com * not be construed as granting a license to any other intellectual
912647Santhony.gutierrez@amd.com * property including but not limited to intellectual property relating
1012647Santhony.gutierrez@amd.com * to a hardware implementation of the functionality of the software
1111308Santhony.gutierrez@amd.com * licensed hereunder.  You may use the software subject to the license
1212647Santhony.gutierrez@amd.com * terms below provided that you ensure that this notice is replicated
1312647Santhony.gutierrez@amd.com * unmodified and in its entirety in all distributions of the software,
1412647Santhony.gutierrez@amd.com * modified or unmodified, in source code or in binary form.
1511308Santhony.gutierrez@amd.com *
1612647Santhony.gutierrez@amd.com * Copyright (c) 2004-2005 The Regents of The University of Michigan
1712647Santhony.gutierrez@amd.com * All rights reserved.
1812647Santhony.gutierrez@amd.com *
1911308Santhony.gutierrez@amd.com * Redistribution and use in source and binary forms, with or without
2012647Santhony.gutierrez@amd.com * modification, are permitted provided that the following conditions are
2112647Santhony.gutierrez@amd.com * met: redistributions of source code must retain the above copyright
2212647Santhony.gutierrez@amd.com * notice, this list of conditions and the following disclaimer;
2312647Santhony.gutierrez@amd.com * redistributions in binary form must reproduce the above copyright
2412647Santhony.gutierrez@amd.com * notice, this list of conditions and the following disclaimer in the
2512647Santhony.gutierrez@amd.com * documentation and/or other materials provided with the distribution;
2612647Santhony.gutierrez@amd.com * neither the name of the copyright holders nor the names of its
2712647Santhony.gutierrez@amd.com * contributors may be used to endorse or promote products derived from
2812647Santhony.gutierrez@amd.com * this software without specific prior written permission.
2912647Santhony.gutierrez@amd.com *
3012647Santhony.gutierrez@amd.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
3111308Santhony.gutierrez@amd.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
3212647Santhony.gutierrez@amd.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
3311308Santhony.gutierrez@amd.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
3411308Santhony.gutierrez@amd.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
3511308Santhony.gutierrez@amd.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
3611308Santhony.gutierrez@amd.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
3711308Santhony.gutierrez@amd.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
3813400Sodanrc@yahoo.com.br * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3913774Sandreas.sandberg@arm.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
4013774Sandreas.sandberg@arm.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4111308Santhony.gutierrez@amd.com *
4213400Sodanrc@yahoo.com.br * Authors: Kevin Lim
4313400Sodanrc@yahoo.com.br *          Korey Sewell
4411670Sandreas.hansson@arm.com */
4511670Sandreas.hansson@arm.com
4611308Santhony.gutierrez@amd.com#ifndef __CPU_O3_LSQ_UNIT_IMPL_HH__
4711308Santhony.gutierrez@amd.com#define __CPU_O3_LSQ_UNIT_IMPL_HH__
4811308Santhony.gutierrez@amd.com
4911308Santhony.gutierrez@amd.com#include "arch/generic/debugfaults.hh"
5011308Santhony.gutierrez@amd.com#include "arch/locked_mem.hh"
5111308Santhony.gutierrez@amd.com#include "base/str.hh"
5211308Santhony.gutierrez@amd.com#include "config/the_isa.hh"
5311308Santhony.gutierrez@amd.com#include "cpu/checker/cpu.hh"
5411308Santhony.gutierrez@amd.com#include "cpu/o3/lsq.hh"
5511308Santhony.gutierrez@amd.com#include "cpu/o3/lsq_unit.hh"
5611308Santhony.gutierrez@amd.com#include "debug/Activity.hh"
5711308Santhony.gutierrez@amd.com#include "debug/IEW.hh"
5811308Santhony.gutierrez@amd.com#include "debug/LSQUnit.hh"
5911308Santhony.gutierrez@amd.com#include "debug/O3PipeView.hh"
6011308Santhony.gutierrez@amd.com#include "mem/packet.hh"
6111308Santhony.gutierrez@amd.com#include "mem/request.hh"
6211308Santhony.gutierrez@amd.com
6311308Santhony.gutierrez@amd.comtemplate<class Impl>
6411308Santhony.gutierrez@amd.comLSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt,
6511308Santhony.gutierrez@amd.com                                              LSQUnit *lsq_ptr)
6611308Santhony.gutierrez@amd.com    : Event(Default_Pri, AutoDelete),
6711308Santhony.gutierrez@amd.com      inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr)
6811308Santhony.gutierrez@amd.com{
6911308Santhony.gutierrez@amd.com}
7011308Santhony.gutierrez@amd.com
7111308Santhony.gutierrez@amd.comtemplate<class Impl>
7211308Santhony.gutierrez@amd.comvoid
7311308Santhony.gutierrez@amd.comLSQUnit<Impl>::WritebackEvent::process()
7411308Santhony.gutierrez@amd.com{
7511308Santhony.gutierrez@amd.com    assert(!lsqPtr->cpu->switchedOut());
7611308Santhony.gutierrez@amd.com
7711308Santhony.gutierrez@amd.com    lsqPtr->writeback(inst, pkt);
7811308Santhony.gutierrez@amd.com
7911308Santhony.gutierrez@amd.com    if (pkt->senderState)
8011308Santhony.gutierrez@amd.com        delete pkt->senderState;
8111308Santhony.gutierrez@amd.com
8211308Santhony.gutierrez@amd.com    delete pkt->req;
8311308Santhony.gutierrez@amd.com    delete pkt;
8411308Santhony.gutierrez@amd.com}
8511308Santhony.gutierrez@amd.com
8611308Santhony.gutierrez@amd.comtemplate<class Impl>
8711308Santhony.gutierrez@amd.comconst char *
8811308Santhony.gutierrez@amd.comLSQUnit<Impl>::WritebackEvent::description() const
8911308Santhony.gutierrez@amd.com{
9011308Santhony.gutierrez@amd.com    return "Store writeback";
9111308Santhony.gutierrez@amd.com}
9211308Santhony.gutierrez@amd.com
9311308Santhony.gutierrez@amd.comtemplate<class Impl>
9411308Santhony.gutierrez@amd.comvoid
9511308Santhony.gutierrez@amd.comLSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
9611308Santhony.gutierrez@amd.com{
9711308Santhony.gutierrez@amd.com    LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState);
9811308Santhony.gutierrez@amd.com    DynInstPtr inst = state->inst;
9911308Santhony.gutierrez@amd.com    DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum);
10011308Santhony.gutierrez@amd.com    DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum);
10111308Santhony.gutierrez@amd.com
10211308Santhony.gutierrez@amd.com    //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum);
10311308Santhony.gutierrez@amd.com
10411308Santhony.gutierrez@amd.com    // If this is a split access, wait until all packets are received.
10511308Santhony.gutierrez@amd.com    if (TheISA::HasUnalignedMemAcc && !state->complete()) {
10611308Santhony.gutierrez@amd.com        delete pkt->req;
10711308Santhony.gutierrez@amd.com        delete pkt;
10811308Santhony.gutierrez@amd.com        return;
10911308Santhony.gutierrez@amd.com    }
11011308Santhony.gutierrez@amd.com
11111308Santhony.gutierrez@amd.com    assert(!cpu->switchedOut());
11211308Santhony.gutierrez@amd.com    if (!inst->isSquashed()) {
11311308Santhony.gutierrez@amd.com        if (!state->noWB) {
11411308Santhony.gutierrez@amd.com            if (!TheISA::HasUnalignedMemAcc || !state->isSplit ||
11511308Santhony.gutierrez@amd.com                !state->isLoad) {
11611308Santhony.gutierrez@amd.com                writeback(inst, pkt);
11711308Santhony.gutierrez@amd.com            } else {
11811308Santhony.gutierrez@amd.com                writeback(inst, state->mainPkt);
11911308Santhony.gutierrez@amd.com            }
12011308Santhony.gutierrez@amd.com        }
12111308Santhony.gutierrez@amd.com
12211308Santhony.gutierrez@amd.com        if (inst->isStore()) {
12311308Santhony.gutierrez@amd.com            completeStore(state->idx);
12411308Santhony.gutierrez@amd.com        }
12511308Santhony.gutierrez@amd.com    }
12611308Santhony.gutierrez@amd.com
12711308Santhony.gutierrez@amd.com    if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) {
12811308Santhony.gutierrez@amd.com        delete state->mainPkt->req;
12911308Santhony.gutierrez@amd.com        delete state->mainPkt;
13011308Santhony.gutierrez@amd.com    }
13111308Santhony.gutierrez@amd.com
13211308Santhony.gutierrez@amd.com    pkt->req->setAccessLatency();
13313974Stiago.muck@arm.com    cpu->ppDataAccessComplete->notify(std::make_pair(inst, pkt));
13413974Stiago.muck@arm.com
13513974Stiago.muck@arm.com    delete state;
13611308Santhony.gutierrez@amd.com    delete pkt->req;
13711308Santhony.gutierrez@amd.com    delete pkt;
13811308Santhony.gutierrez@amd.com}
13911308Santhony.gutierrez@amd.com
14011308Santhony.gutierrez@amd.comtemplate <class Impl>
14111308Santhony.gutierrez@amd.comLSQUnit<Impl>::LSQUnit()
14211308Santhony.gutierrez@amd.com    : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false),
14311308Santhony.gutierrez@amd.com      isStoreBlocked(false), isLoadBlocked(false),
14411308Santhony.gutierrez@amd.com      loadBlockedHandled(false), storeInFlight(false), hasPendingPkt(false)
14511308Santhony.gutierrez@amd.com{
14611308Santhony.gutierrez@amd.com}
14711308Santhony.gutierrez@amd.com
14811308Santhony.gutierrez@amd.comtemplate<class Impl>
14911308Santhony.gutierrez@amd.comvoid
15011308Santhony.gutierrez@amd.comLSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params,
15111308Santhony.gutierrez@amd.com        LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries,
15211308Santhony.gutierrez@amd.com        unsigned id)
15311308Santhony.gutierrez@amd.com{
15411308Santhony.gutierrez@amd.com    cpu = cpu_ptr;
15511308Santhony.gutierrez@amd.com    iewStage = iew_ptr;
15611308Santhony.gutierrez@amd.com
15711308Santhony.gutierrez@amd.com    lsq = lsq_ptr;
15811308Santhony.gutierrez@amd.com
15911308Santhony.gutierrez@amd.com    lsqID = id;
16011308Santhony.gutierrez@amd.com
16111308Santhony.gutierrez@amd.com    DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id);
16211308Santhony.gutierrez@amd.com
16311308Santhony.gutierrez@amd.com    // Add 1 for the sentinel entry (they are circular queues).
16411308Santhony.gutierrez@amd.com    LQEntries = maxLQEntries + 1;
16511308Santhony.gutierrez@amd.com    SQEntries = maxSQEntries + 1;
16611308Santhony.gutierrez@amd.com
16711308Santhony.gutierrez@amd.com    //Due to uint8_t index in LSQSenderState
16811308Santhony.gutierrez@amd.com    assert(LQEntries <= 256);
16911308Santhony.gutierrez@amd.com    assert(SQEntries <= 256);
17011308Santhony.gutierrez@amd.com
17111308Santhony.gutierrez@amd.com    loadQueue.resize(LQEntries);
17211308Santhony.gutierrez@amd.com    storeQueue.resize(SQEntries);
17311308Santhony.gutierrez@amd.com
17411308Santhony.gutierrez@amd.com    depCheckShift = params->LSQDepCheckShift;
17511308Santhony.gutierrez@amd.com    checkLoads = params->LSQCheckLoads;
17611308Santhony.gutierrez@amd.com    cachePorts = params->cachePorts;
17711308Santhony.gutierrez@amd.com    needsTSO = params->needsTSO;
17811308Santhony.gutierrez@amd.com
17911308Santhony.gutierrez@amd.com    resetState();
18011308Santhony.gutierrez@amd.com}
18111308Santhony.gutierrez@amd.com
18211308Santhony.gutierrez@amd.com
18311308Santhony.gutierrez@amd.comtemplate<class Impl>
18411308Santhony.gutierrez@amd.comvoid
18511308Santhony.gutierrez@amd.comLSQUnit<Impl>::resetState()
18611308Santhony.gutierrez@amd.com{
18711308Santhony.gutierrez@amd.com    loads = stores = storesToWB = 0;
18811308Santhony.gutierrez@amd.com
18911308Santhony.gutierrez@amd.com    loadHead = loadTail = 0;
19011308Santhony.gutierrez@amd.com
19111308Santhony.gutierrez@amd.com    storeHead = storeWBIdx = storeTail = 0;
19211308Santhony.gutierrez@amd.com
19311308Santhony.gutierrez@amd.com    usedPorts = 0;
19411308Santhony.gutierrez@amd.com
19511308Santhony.gutierrez@amd.com    retryPkt = NULL;
19611308Santhony.gutierrez@amd.com    memDepViolator = NULL;
19711308Santhony.gutierrez@amd.com
19811308Santhony.gutierrez@amd.com    blockedLoadSeqNum = 0;
19911308Santhony.gutierrez@amd.com
20011308Santhony.gutierrez@amd.com    stalled = false;
20111308Santhony.gutierrez@amd.com    isLoadBlocked = false;
20211308Santhony.gutierrez@amd.com    loadBlockedHandled = false;
20311308Santhony.gutierrez@amd.com
20411308Santhony.gutierrez@amd.com    cacheBlockMask = ~(cpu->cacheLineSize() - 1);
20511308Santhony.gutierrez@amd.com}
20611308Santhony.gutierrez@amd.com
20711308Santhony.gutierrez@amd.comtemplate<class Impl>
20811308Santhony.gutierrez@amd.comstd::string
20911308Santhony.gutierrez@amd.comLSQUnit<Impl>::name() const
21011308Santhony.gutierrez@amd.com{
21111308Santhony.gutierrez@amd.com    if (Impl::MaxThreads == 1) {
21211308Santhony.gutierrez@amd.com        return iewStage->name() + ".lsq";
21311308Santhony.gutierrez@amd.com    } else {
21411308Santhony.gutierrez@amd.com        return iewStage->name() + ".lsq.thread" + to_string(lsqID);
21511308Santhony.gutierrez@amd.com    }
21611308Santhony.gutierrez@amd.com}
21711308Santhony.gutierrez@amd.com
21811308Santhony.gutierrez@amd.comtemplate<class Impl>
21911308Santhony.gutierrez@amd.comvoid
22011308Santhony.gutierrez@amd.comLSQUnit<Impl>::regStats()
22111308Santhony.gutierrez@amd.com{
22211308Santhony.gutierrez@amd.com    lsqForwLoads
22311308Santhony.gutierrez@amd.com        .name(name() + ".forwLoads")
22411308Santhony.gutierrez@amd.com        .desc("Number of loads that had data forwarded from stores");
22511308Santhony.gutierrez@amd.com
22611308Santhony.gutierrez@amd.com    invAddrLoads
22711308Santhony.gutierrez@amd.com        .name(name() + ".invAddrLoads")
22811308Santhony.gutierrez@amd.com        .desc("Number of loads ignored due to an invalid address");
22911308Santhony.gutierrez@amd.com
23011308Santhony.gutierrez@amd.com    lsqSquashedLoads
23111308Santhony.gutierrez@amd.com        .name(name() + ".squashedLoads")
23211308Santhony.gutierrez@amd.com        .desc("Number of loads squashed");
23311308Santhony.gutierrez@amd.com
23411308Santhony.gutierrez@amd.com    lsqIgnoredResponses
23511308Santhony.gutierrez@amd.com        .name(name() + ".ignoredResponses")
23611308Santhony.gutierrez@amd.com        .desc("Number of memory responses ignored because the instruction is squashed");
23711308Santhony.gutierrez@amd.com
23811308Santhony.gutierrez@amd.com    lsqMemOrderViolation
23911308Santhony.gutierrez@amd.com        .name(name() + ".memOrderViolation")
24011308Santhony.gutierrez@amd.com        .desc("Number of memory ordering violations");
24111308Santhony.gutierrez@amd.com
24211308Santhony.gutierrez@amd.com    lsqSquashedStores
24311308Santhony.gutierrez@amd.com        .name(name() + ".squashedStores")
24411308Santhony.gutierrez@amd.com        .desc("Number of stores squashed");
24511308Santhony.gutierrez@amd.com
24611308Santhony.gutierrez@amd.com    invAddrSwpfs
24711308Santhony.gutierrez@amd.com        .name(name() + ".invAddrSwpfs")
24811308Santhony.gutierrez@amd.com        .desc("Number of software prefetches ignored due to an invalid address");
24911308Santhony.gutierrez@amd.com
25011308Santhony.gutierrez@amd.com    lsqBlockedLoads
25111308Santhony.gutierrez@amd.com        .name(name() + ".blockedLoads")
25211308Santhony.gutierrez@amd.com        .desc("Number of blocked loads due to partial load-store forwarding");
25311308Santhony.gutierrez@amd.com
25411308Santhony.gutierrez@amd.com    lsqRescheduledLoads
25511308Santhony.gutierrez@amd.com        .name(name() + ".rescheduledLoads")
25611308Santhony.gutierrez@amd.com        .desc("Number of loads that were rescheduled");
25711308Santhony.gutierrez@amd.com
25811308Santhony.gutierrez@amd.com    lsqCacheBlocked
25911308Santhony.gutierrez@amd.com        .name(name() + ".cacheBlocked")
26011308Santhony.gutierrez@amd.com        .desc("Number of times an access to memory failed due to the cache being blocked");
26111308Santhony.gutierrez@amd.com}
26211308Santhony.gutierrez@amd.com
26311308Santhony.gutierrez@amd.comtemplate<class Impl>
26411308Santhony.gutierrez@amd.comvoid
26511308Santhony.gutierrez@amd.comLSQUnit<Impl>::setDcachePort(MasterPort *dcache_port)
26611308Santhony.gutierrez@amd.com{
26711308Santhony.gutierrez@amd.com    dcachePort = dcache_port;
26811308Santhony.gutierrez@amd.com}
26911308Santhony.gutierrez@amd.com
27011308Santhony.gutierrez@amd.comtemplate<class Impl>
27111308Santhony.gutierrez@amd.comvoid
27211308Santhony.gutierrez@amd.comLSQUnit<Impl>::clearLQ()
27311308Santhony.gutierrez@amd.com{
27411308Santhony.gutierrez@amd.com    loadQueue.clear();
27511308Santhony.gutierrez@amd.com}
27611308Santhony.gutierrez@amd.com
27711308Santhony.gutierrez@amd.comtemplate<class Impl>
27811308Santhony.gutierrez@amd.comvoid
27911308Santhony.gutierrez@amd.comLSQUnit<Impl>::clearSQ()
28011308Santhony.gutierrez@amd.com{
28111308Santhony.gutierrez@amd.com    storeQueue.clear();
28211308Santhony.gutierrez@amd.com}
28311308Santhony.gutierrez@amd.com
28411308Santhony.gutierrez@amd.comtemplate<class Impl>
28511308Santhony.gutierrez@amd.comvoid
28611308Santhony.gutierrez@amd.comLSQUnit<Impl>::drainSanityCheck() const
28711308Santhony.gutierrez@amd.com{
28811308Santhony.gutierrez@amd.com    for (int i = 0; i < loadQueue.size(); ++i)
28911308Santhony.gutierrez@amd.com        assert(!loadQueue[i]);
29011308Santhony.gutierrez@amd.com
29111308Santhony.gutierrez@amd.com    assert(storesToWB == 0);
29211308Santhony.gutierrez@amd.com    assert(!retryPkt);
29311308Santhony.gutierrez@amd.com}
29411308Santhony.gutierrez@amd.com
29511308Santhony.gutierrez@amd.comtemplate<class Impl>
29611308Santhony.gutierrez@amd.comvoid
29711308Santhony.gutierrez@amd.comLSQUnit<Impl>::takeOverFrom()
29811308Santhony.gutierrez@amd.com{
29911308Santhony.gutierrez@amd.com    resetState();
30011308Santhony.gutierrez@amd.com}
30111308Santhony.gutierrez@amd.com
30211308Santhony.gutierrez@amd.comtemplate<class Impl>
30311308Santhony.gutierrez@amd.comvoid
30411308Santhony.gutierrez@amd.comLSQUnit<Impl>::resizeLQ(unsigned size)
30511308Santhony.gutierrez@amd.com{
30611308Santhony.gutierrez@amd.com    unsigned size_plus_sentinel = size + 1;
30711308Santhony.gutierrez@amd.com    assert(size_plus_sentinel >= LQEntries);
30811308Santhony.gutierrez@amd.com
30911308Santhony.gutierrez@amd.com    if (size_plus_sentinel > LQEntries) {
31011308Santhony.gutierrez@amd.com        while (size_plus_sentinel > loadQueue.size()) {
31111308Santhony.gutierrez@amd.com            DynInstPtr dummy;
31211308Santhony.gutierrez@amd.com            loadQueue.push_back(dummy);
31311308Santhony.gutierrez@amd.com            LQEntries++;
31411308Santhony.gutierrez@amd.com        }
31511308Santhony.gutierrez@amd.com    } else {
31611308Santhony.gutierrez@amd.com        LQEntries = size_plus_sentinel;
31711308Santhony.gutierrez@amd.com    }
31811308Santhony.gutierrez@amd.com
31911308Santhony.gutierrez@amd.com    assert(LQEntries <= 256);
32011308Santhony.gutierrez@amd.com}
32111308Santhony.gutierrez@amd.com
32211308Santhony.gutierrez@amd.comtemplate<class Impl>
32311308Santhony.gutierrez@amd.comvoid
32411308Santhony.gutierrez@amd.comLSQUnit<Impl>::resizeSQ(unsigned size)
32511308Santhony.gutierrez@amd.com{
32611308Santhony.gutierrez@amd.com    unsigned size_plus_sentinel = size + 1;
32711308Santhony.gutierrez@amd.com    if (size_plus_sentinel > SQEntries) {
32811308Santhony.gutierrez@amd.com        while (size_plus_sentinel > storeQueue.size()) {
32911308Santhony.gutierrez@amd.com            SQEntry dummy;
33011308Santhony.gutierrez@amd.com            storeQueue.push_back(dummy);
33111308Santhony.gutierrez@amd.com            SQEntries++;
33211308Santhony.gutierrez@amd.com        }
33311308Santhony.gutierrez@amd.com    } else {
33411308Santhony.gutierrez@amd.com        SQEntries = size_plus_sentinel;
33511308Santhony.gutierrez@amd.com    }
33611308Santhony.gutierrez@amd.com
33711308Santhony.gutierrez@amd.com    assert(SQEntries <= 256);
33811308Santhony.gutierrez@amd.com}
33911308Santhony.gutierrez@amd.com
34011308Santhony.gutierrez@amd.comtemplate <class Impl>
34111308Santhony.gutierrez@amd.comvoid
34211308Santhony.gutierrez@amd.comLSQUnit<Impl>::insert(DynInstPtr &inst)
34311308Santhony.gutierrez@amd.com{
34411308Santhony.gutierrez@amd.com    assert(inst->isMemRef());
34511308Santhony.gutierrez@amd.com
34611308Santhony.gutierrez@amd.com    assert(inst->isLoad() || inst->isStore());
34711308Santhony.gutierrez@amd.com
34811308Santhony.gutierrez@amd.com    if (inst->isLoad()) {
34911308Santhony.gutierrez@amd.com        insertLoad(inst);
35011308Santhony.gutierrez@amd.com    } else {
35111308Santhony.gutierrez@amd.com        insertStore(inst);
35211308Santhony.gutierrez@amd.com    }
35311308Santhony.gutierrez@amd.com
35411308Santhony.gutierrez@amd.com    inst->setInLSQ();
35511308Santhony.gutierrez@amd.com}
35611308Santhony.gutierrez@amd.com
35711308Santhony.gutierrez@amd.comtemplate <class Impl>
35811308Santhony.gutierrez@amd.comvoid
35911308Santhony.gutierrez@amd.comLSQUnit<Impl>::insertLoad(DynInstPtr &load_inst)
36011308Santhony.gutierrez@amd.com{
36111308Santhony.gutierrez@amd.com    assert((loadTail + 1) % LQEntries != loadHead);
36211308Santhony.gutierrez@amd.com    assert(loads < LQEntries);
36311308Santhony.gutierrez@amd.com
36411308Santhony.gutierrez@amd.com    DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n",
36511308Santhony.gutierrez@amd.com            load_inst->pcState(), loadTail, load_inst->seqNum);
36611308Santhony.gutierrez@amd.com
36711308Santhony.gutierrez@amd.com    load_inst->lqIdx = loadTail;
36811308Santhony.gutierrez@amd.com
36911308Santhony.gutierrez@amd.com    if (stores == 0) {
37011308Santhony.gutierrez@amd.com        load_inst->sqIdx = -1;
37111308Santhony.gutierrez@amd.com    } else {
37211308Santhony.gutierrez@amd.com        load_inst->sqIdx = storeTail;
37311308Santhony.gutierrez@amd.com    }
37411308Santhony.gutierrez@amd.com
37512065Snikos.nikoleris@arm.com    loadQueue[loadTail] = load_inst;
37611308Santhony.gutierrez@amd.com
37711308Santhony.gutierrez@amd.com    incrLdIdx(loadTail);
37811308Santhony.gutierrez@amd.com
37911308Santhony.gutierrez@amd.com    ++loads;
38012065Snikos.nikoleris@arm.com}
38112065Snikos.nikoleris@arm.com
38211308Santhony.gutierrez@amd.comtemplate <class Impl>
38311308Santhony.gutierrez@amd.comvoid
38411308Santhony.gutierrez@amd.comLSQUnit<Impl>::insertStore(DynInstPtr &store_inst)
38511308Santhony.gutierrez@amd.com{
38611308Santhony.gutierrez@amd.com    // Make sure it is not full before inserting an instruction.
38711308Santhony.gutierrez@amd.com    assert((storeTail + 1) % SQEntries != storeHead);
38811308Santhony.gutierrez@amd.com    assert(stores < SQEntries);
38911308Santhony.gutierrez@amd.com
39011308Santhony.gutierrez@amd.com    DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n",
39111308Santhony.gutierrez@amd.com            store_inst->pcState(), storeTail, store_inst->seqNum);
39211308Santhony.gutierrez@amd.com
39311308Santhony.gutierrez@amd.com    store_inst->sqIdx = storeTail;
39411308Santhony.gutierrez@amd.com    store_inst->lqIdx = loadTail;
39511308Santhony.gutierrez@amd.com
39611308Santhony.gutierrez@amd.com    storeQueue[storeTail] = SQEntry(store_inst);
39711308Santhony.gutierrez@amd.com
39811308Santhony.gutierrez@amd.com    incrStIdx(storeTail);
39911308Santhony.gutierrez@amd.com
40011308Santhony.gutierrez@amd.com    ++stores;
40111308Santhony.gutierrez@amd.com}
40211308Santhony.gutierrez@amd.com
40311308Santhony.gutierrez@amd.comtemplate <class Impl>
40411308Santhony.gutierrez@amd.comtypename Impl::DynInstPtr
40511308Santhony.gutierrez@amd.comLSQUnit<Impl>::getMemDepViolator()
40611308Santhony.gutierrez@amd.com{
40711308Santhony.gutierrez@amd.com    DynInstPtr temp = memDepViolator;
40811308Santhony.gutierrez@amd.com
40911308Santhony.gutierrez@amd.com    memDepViolator = NULL;
41011308Santhony.gutierrez@amd.com
41111308Santhony.gutierrez@amd.com    return temp;
41211308Santhony.gutierrez@amd.com}
41311308Santhony.gutierrez@amd.com
41411308Santhony.gutierrez@amd.comtemplate <class Impl>
41511308Santhony.gutierrez@amd.comunsigned
41611308Santhony.gutierrez@amd.comLSQUnit<Impl>::numFreeLoadEntries()
41711308Santhony.gutierrez@amd.com{
41811308Santhony.gutierrez@amd.com        //LQ has an extra dummy entry to differentiate
41911308Santhony.gutierrez@amd.com        //empty/full conditions. Subtract 1 from the free entries.
42011308Santhony.gutierrez@amd.com        DPRINTF(LSQUnit, "LQ size: %d, #loads occupied: %d\n", LQEntries, loads);
42111308Santhony.gutierrez@amd.com        return LQEntries - loads - 1;
42211308Santhony.gutierrez@amd.com}
42311308Santhony.gutierrez@amd.com
42411308Santhony.gutierrez@amd.comtemplate <class Impl>
42511308Santhony.gutierrez@amd.comunsigned
42611308Santhony.gutierrez@amd.comLSQUnit<Impl>::numFreeStoreEntries()
42711308Santhony.gutierrez@amd.com{
42811308Santhony.gutierrez@amd.com        //SQ has an extra dummy entry to differentiate
42911308Santhony.gutierrez@amd.com        //empty/full conditions. Subtract 1 from the free entries.
43012598Snikos.nikoleris@arm.com        DPRINTF(LSQUnit, "SQ size: %d, #stores occupied: %d\n", SQEntries, stores);
43112598Snikos.nikoleris@arm.com        return SQEntries - stores - 1;
43211308Santhony.gutierrez@amd.com
43311308Santhony.gutierrez@amd.com }
43411308Santhony.gutierrez@amd.com
43511308Santhony.gutierrez@amd.comtemplate <class Impl>
43611308Santhony.gutierrez@amd.comvoid
43711308Santhony.gutierrez@amd.comLSQUnit<Impl>::checkSnoop(PacketPtr pkt)
43811308Santhony.gutierrez@amd.com{
43911308Santhony.gutierrez@amd.com    int load_idx = loadHead;
44011308Santhony.gutierrez@amd.com    DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr());
44111308Santhony.gutierrez@amd.com
44211308Santhony.gutierrez@amd.com    // Unlock the cpu-local monitor when the CPU sees a snoop to a locked
44311308Santhony.gutierrez@amd.com    // address. The CPU can speculatively execute a LL operation after a pending
44411308Santhony.gutierrez@amd.com    // SC operation in the pipeline and that can make the cache monitor the CPU
44511308Santhony.gutierrez@amd.com    // is connected to valid while it really shouldn't be.
44611308Santhony.gutierrez@amd.com    for (int x = 0; x < cpu->numContexts(); x++) {
44711308Santhony.gutierrez@amd.com        ThreadContext *tc = cpu->getContext(x);
44811308Santhony.gutierrez@amd.com        bool no_squash = cpu->thread[x]->noSquashFromTC;
44911308Santhony.gutierrez@amd.com        cpu->thread[x]->noSquashFromTC = true;
45011308Santhony.gutierrez@amd.com        TheISA::handleLockedSnoop(tc, pkt, cacheBlockMask);
45111308Santhony.gutierrez@amd.com        cpu->thread[x]->noSquashFromTC = no_squash;
45211308Santhony.gutierrez@amd.com    }
45311308Santhony.gutierrez@amd.com
45411308Santhony.gutierrez@amd.com    Addr invalidate_addr = pkt->getAddr() & cacheBlockMask;
45511308Santhony.gutierrez@amd.com
45611308Santhony.gutierrez@amd.com    DynInstPtr ld_inst = loadQueue[load_idx];
45711308Santhony.gutierrez@amd.com    if (ld_inst) {
45811308Santhony.gutierrez@amd.com        Addr load_addr = ld_inst->physEffAddr & cacheBlockMask;
45911308Santhony.gutierrez@amd.com        // Check that this snoop didn't just invalidate our lock flag
46011308Santhony.gutierrez@amd.com        if (ld_inst->effAddrValid() && load_addr == invalidate_addr &&
46112065Snikos.nikoleris@arm.com            ld_inst->memReqFlags & Request::LLSC)
46212065Snikos.nikoleris@arm.com            TheISA::handleLockedSnoopHit(ld_inst.get());
46312065Snikos.nikoleris@arm.com    }
46412065Snikos.nikoleris@arm.com
46512065Snikos.nikoleris@arm.com    // If this is the only load in the LSQ we don't care
46612065Snikos.nikoleris@arm.com    if (load_idx == loadTail)
46712065Snikos.nikoleris@arm.com        return;
46812065Snikos.nikoleris@arm.com
46912065Snikos.nikoleris@arm.com    incrLdIdx(load_idx);
47012065Snikos.nikoleris@arm.com
47112065Snikos.nikoleris@arm.com    bool force_squash = false;
47213731Sandreas.sandberg@arm.com
47312065Snikos.nikoleris@arm.com    while (load_idx != loadTail) {
47412065Snikos.nikoleris@arm.com        DynInstPtr ld_inst = loadQueue[load_idx];
47512065Snikos.nikoleris@arm.com
47612065Snikos.nikoleris@arm.com        if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) {
47712065Snikos.nikoleris@arm.com            incrLdIdx(load_idx);
47812065Snikos.nikoleris@arm.com            continue;
47912065Snikos.nikoleris@arm.com        }
48011308Santhony.gutierrez@amd.com
48111308Santhony.gutierrez@amd.com        Addr load_addr = ld_inst->physEffAddr & cacheBlockMask;
48212065Snikos.nikoleris@arm.com        DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n",
48311308Santhony.gutierrez@amd.com                    ld_inst->seqNum, load_addr, invalidate_addr);
48411308Santhony.gutierrez@amd.com
48511308Santhony.gutierrez@amd.com        if (load_addr == invalidate_addr || force_squash) {
48611308Santhony.gutierrez@amd.com            if (needsTSO) {
48711308Santhony.gutierrez@amd.com                // If we have a TSO system, as all loads must be ordered with
48811308Santhony.gutierrez@amd.com                // all other loads, this load as well as *all* subsequent loads
48911308Santhony.gutierrez@amd.com                // need to be squashed to prevent possible load reordering.
49011308Santhony.gutierrez@amd.com                force_squash = true;
49111308Santhony.gutierrez@amd.com            }
49211308Santhony.gutierrez@amd.com            if (ld_inst->possibleLoadViolation() || force_squash) {
49311308Santhony.gutierrez@amd.com                DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n",
49411308Santhony.gutierrez@amd.com                        pkt->getAddr(), ld_inst->seqNum);
49511308Santhony.gutierrez@amd.com
49611308Santhony.gutierrez@amd.com                // Mark the load for re-execution
49711308Santhony.gutierrez@amd.com                ld_inst->fault = new ReExec;
49811308Santhony.gutierrez@amd.com            } else {
49911308Santhony.gutierrez@amd.com                DPRINTF(LSQUnit, "HitExternal Snoop for addr %#x [sn:%lli]\n",
50011308Santhony.gutierrez@amd.com                        pkt->getAddr(), ld_inst->seqNum);
50111308Santhony.gutierrez@amd.com
50211308Santhony.gutierrez@amd.com                // Make sure that we don't lose a snoop hitting a LOCKED
50311308Santhony.gutierrez@amd.com                // address since the LOCK* flags don't get updated until
50411308Santhony.gutierrez@amd.com                // commit.
50511308Santhony.gutierrez@amd.com                if (ld_inst->memReqFlags & Request::LLSC)
50611308Santhony.gutierrez@amd.com                    TheISA::handleLockedSnoopHit(ld_inst.get());
50711308Santhony.gutierrez@amd.com
50811308Santhony.gutierrez@amd.com                // If a older load checks this and it's true
50911308Santhony.gutierrez@amd.com                // then we might have missed the snoop
51011308Santhony.gutierrez@amd.com                // in which case we need to invalidate to be sure
51111308Santhony.gutierrez@amd.com                ld_inst->hitExternalSnoop(true);
51211308Santhony.gutierrez@amd.com            }
51313731Sandreas.sandberg@arm.com        }
51411308Santhony.gutierrez@amd.com        incrLdIdx(load_idx);
51511308Santhony.gutierrez@amd.com    }
51611308Santhony.gutierrez@amd.com    return;
51711308Santhony.gutierrez@amd.com}
51811308Santhony.gutierrez@amd.com
51911308Santhony.gutierrez@amd.comtemplate <class Impl>
52011308Santhony.gutierrez@amd.comFault
52111308Santhony.gutierrez@amd.comLSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst)
52211308Santhony.gutierrez@amd.com{
52311308Santhony.gutierrez@amd.com    Addr inst_eff_addr1 = inst->effAddr >> depCheckShift;
52411308Santhony.gutierrez@amd.com    Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift;
52511308Santhony.gutierrez@amd.com
52611308Santhony.gutierrez@amd.com    /** @todo in theory you only need to check an instruction that has executed
52711308Santhony.gutierrez@amd.com     * however, there isn't a good way in the pipeline at the moment to check
52811308Santhony.gutierrez@amd.com     * all instructions that will execute before the store writes back. Thus,
52911308Santhony.gutierrez@amd.com     * like the implementation that came before it, we're overly conservative.
53011308Santhony.gutierrez@amd.com     */
53111308Santhony.gutierrez@amd.com    while (load_idx != loadTail) {
53211308Santhony.gutierrez@amd.com        DynInstPtr ld_inst = loadQueue[load_idx];
53311308Santhony.gutierrez@amd.com        if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) {
53411308Santhony.gutierrez@amd.com            incrLdIdx(load_idx);
53511308Santhony.gutierrez@amd.com            continue;
53611308Santhony.gutierrez@amd.com        }
53711308Santhony.gutierrez@amd.com
53811308Santhony.gutierrez@amd.com        Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift;
53911308Santhony.gutierrez@amd.com        Addr ld_eff_addr2 =
54011308Santhony.gutierrez@amd.com            (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift;
54111308Santhony.gutierrez@amd.com
54211308Santhony.gutierrez@amd.com        if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) {
54311308Santhony.gutierrez@amd.com            if (inst->isLoad()) {
54411308Santhony.gutierrez@amd.com                // If this load is to the same block as an external snoop
54511308Santhony.gutierrez@amd.com                // invalidate that we've observed then the load needs to be
54611308Santhony.gutierrez@amd.com                // squashed as it could have newer data
54713731Sandreas.sandberg@arm.com                if (ld_inst->hitExternalSnoop()) {
54811308Santhony.gutierrez@amd.com                    if (!memDepViolator ||
54911308Santhony.gutierrez@amd.com                            ld_inst->seqNum < memDepViolator->seqNum) {
55011308Santhony.gutierrez@amd.com                        DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] "
55111308Santhony.gutierrez@amd.com                                "and [sn:%lli] at address %#x\n",
55211308Santhony.gutierrez@amd.com                                inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
55311308Santhony.gutierrez@amd.com                        memDepViolator = ld_inst;
55411308Santhony.gutierrez@amd.com
55511308Santhony.gutierrez@amd.com                        ++lsqMemOrderViolation;
55611308Santhony.gutierrez@amd.com
55711308Santhony.gutierrez@amd.com                        return new GenericISA::M5PanicFault(
55811308Santhony.gutierrez@amd.com                                "Detected fault with inst [sn:%lli] and "
55911308Santhony.gutierrez@amd.com                                "[sn:%lli] at address %#x\n",
56011308Santhony.gutierrez@amd.com                                inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
56111308Santhony.gutierrez@amd.com                    }
56211308Santhony.gutierrez@amd.com                }
56311308Santhony.gutierrez@amd.com
56411308Santhony.gutierrez@amd.com                // Otherwise, mark the load has a possible load violation
56511308Santhony.gutierrez@amd.com                // and if we see a snoop before it's commited, we need to squash
56611308Santhony.gutierrez@amd.com                ld_inst->possibleLoadViolation(true);
56711308Santhony.gutierrez@amd.com                DPRINTF(LSQUnit, "Found possible load violaiton at addr: %#x"
56811308Santhony.gutierrez@amd.com                        " between instructions [sn:%lli] and [sn:%lli]\n",
56911308Santhony.gutierrez@amd.com                        inst_eff_addr1, inst->seqNum, ld_inst->seqNum);
57011308Santhony.gutierrez@amd.com            } else {
57111308Santhony.gutierrez@amd.com                // A load/store incorrectly passed this store.
57211308Santhony.gutierrez@amd.com                // Check if we already have a violator, or if it's newer
57311308Santhony.gutierrez@amd.com                // squash and refetch.
57411308Santhony.gutierrez@amd.com                if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum)
57511308Santhony.gutierrez@amd.com                    break;
57611308Santhony.gutierrez@amd.com
57711308Santhony.gutierrez@amd.com                DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and "
57811308Santhony.gutierrez@amd.com                        "[sn:%lli] at address %#x\n",
57911308Santhony.gutierrez@amd.com                        inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
58013731Sandreas.sandberg@arm.com                memDepViolator = ld_inst;
58111308Santhony.gutierrez@amd.com
58211308Santhony.gutierrez@amd.com                ++lsqMemOrderViolation;
58311308Santhony.gutierrez@amd.com
58411308Santhony.gutierrez@amd.com                return new GenericISA::M5PanicFault("Detected fault with "
58511308Santhony.gutierrez@amd.com                        "inst [sn:%lli] and [sn:%lli] at address %#x\n",
58611308Santhony.gutierrez@amd.com                        inst->seqNum, ld_inst->seqNum, ld_eff_addr1);
58711308Santhony.gutierrez@amd.com            }
58811308Santhony.gutierrez@amd.com        }
58911308Santhony.gutierrez@amd.com
59011308Santhony.gutierrez@amd.com        incrLdIdx(load_idx);
59111308Santhony.gutierrez@amd.com    }
59211308Santhony.gutierrez@amd.com    return NoFault;
59311308Santhony.gutierrez@amd.com}
59411308Santhony.gutierrez@amd.com
59511308Santhony.gutierrez@amd.com
59611308Santhony.gutierrez@amd.com
59711308Santhony.gutierrez@amd.com
59811308Santhony.gutierrez@amd.comtemplate <class Impl>
59911308Santhony.gutierrez@amd.comFault
60011308Santhony.gutierrez@amd.comLSQUnit<Impl>::executeLoad(DynInstPtr &inst)
60111308Santhony.gutierrez@amd.com{
60211308Santhony.gutierrez@amd.com    using namespace TheISA;
60311308Santhony.gutierrez@amd.com    // Execute a specific load.
60411308Santhony.gutierrez@amd.com    Fault load_fault = NoFault;
60511308Santhony.gutierrez@amd.com
60611308Santhony.gutierrez@amd.com    DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n",
60711308Santhony.gutierrez@amd.com            inst->pcState(), inst->seqNum);
60811308Santhony.gutierrez@amd.com
60911308Santhony.gutierrez@amd.com    assert(!inst->isSquashed());
61011308Santhony.gutierrez@amd.com
61111308Santhony.gutierrez@amd.com    load_fault = inst->initiateAcc();
61213731Sandreas.sandberg@arm.com
61311308Santhony.gutierrez@amd.com    if (inst->isTranslationDelayed() &&
61411308Santhony.gutierrez@amd.com        load_fault == NoFault)
61511308Santhony.gutierrez@amd.com        return load_fault;
61611308Santhony.gutierrez@amd.com
61711308Santhony.gutierrez@amd.com    // If the instruction faulted or predicated false, then we need to send it
61811308Santhony.gutierrez@amd.com    // along to commit without the instruction completing.
61911308Santhony.gutierrez@amd.com    if (load_fault != NoFault || !inst->readPredicate()) {
62011308Santhony.gutierrez@amd.com        // Send this instruction to commit, also make sure iew stage
62111308Santhony.gutierrez@amd.com        // realizes there is activity.
62211308Santhony.gutierrez@amd.com        // Mark it as executed unless it is an uncached load that
62311308Santhony.gutierrez@amd.com        // needs to hit the head of commit.
62411308Santhony.gutierrez@amd.com        if (!inst->readPredicate())
62511308Santhony.gutierrez@amd.com            inst->forwardOldRegs();
62611308Santhony.gutierrez@amd.com        DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n",
62711308Santhony.gutierrez@amd.com                inst->seqNum,
62811308Santhony.gutierrez@amd.com                (load_fault != NoFault ? "fault" : "predication"));
62911308Santhony.gutierrez@amd.com        if (!(inst->hasRequest() && inst->uncacheable()) ||
63011308Santhony.gutierrez@amd.com            inst->isAtCommit()) {
63111308Santhony.gutierrez@amd.com            inst->setExecuted();
63211308Santhony.gutierrez@amd.com        }
63311308Santhony.gutierrez@amd.com        iewStage->instToCommit(inst);
63411308Santhony.gutierrez@amd.com        iewStage->activityThisCycle();
63511308Santhony.gutierrez@amd.com    } else if (!loadBlocked()) {
63611308Santhony.gutierrez@amd.com        assert(inst->effAddrValid());
63711308Santhony.gutierrez@amd.com        int load_idx = inst->lqIdx;
63811308Santhony.gutierrez@amd.com        incrLdIdx(load_idx);
63911308Santhony.gutierrez@amd.com
64011308Santhony.gutierrez@amd.com        if (checkLoads)
64111308Santhony.gutierrez@amd.com            return checkViolations(load_idx, inst);
64211308Santhony.gutierrez@amd.com    }
64311308Santhony.gutierrez@amd.com
64411308Santhony.gutierrez@amd.com    return load_fault;
64511308Santhony.gutierrez@amd.com}
64611308Santhony.gutierrez@amd.com
64711308Santhony.gutierrez@amd.comtemplate <class Impl>
64811308Santhony.gutierrez@amd.comFault
64911308Santhony.gutierrez@amd.comLSQUnit<Impl>::executeStore(DynInstPtr &store_inst)
65011308Santhony.gutierrez@amd.com{
65111308Santhony.gutierrez@amd.com    using namespace TheISA;
65211308Santhony.gutierrez@amd.com    // Make sure that a store exists.
65311308Santhony.gutierrez@amd.com    assert(stores != 0);
65411308Santhony.gutierrez@amd.com
65511308Santhony.gutierrez@amd.com    int store_idx = store_inst->sqIdx;
65611308Santhony.gutierrez@amd.com
65711308Santhony.gutierrez@amd.com    DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n",
65811308Santhony.gutierrez@amd.com            store_inst->pcState(), store_inst->seqNum);
65911308Santhony.gutierrez@amd.com
66011308Santhony.gutierrez@amd.com    assert(!store_inst->isSquashed());
66111308Santhony.gutierrez@amd.com
66211308Santhony.gutierrez@amd.com    // Check the recently completed loads to see if any match this store's
66311308Santhony.gutierrez@amd.com    // address.  If so, then we have a memory ordering violation.
66411308Santhony.gutierrez@amd.com    int load_idx = store_inst->lqIdx;
66511308Santhony.gutierrez@amd.com
66611308Santhony.gutierrez@amd.com    Fault store_fault = store_inst->initiateAcc();
66711308Santhony.gutierrez@amd.com
66811308Santhony.gutierrez@amd.com    if (store_inst->isTranslationDelayed() &&
66911308Santhony.gutierrez@amd.com        store_fault == NoFault)
67011308Santhony.gutierrez@amd.com        return store_fault;
67111308Santhony.gutierrez@amd.com
67211308Santhony.gutierrez@amd.com    if (!store_inst->readPredicate())
67311308Santhony.gutierrez@amd.com        store_inst->forwardOldRegs();
67411308Santhony.gutierrez@amd.com
67513731Sandreas.sandberg@arm.com    if (storeQueue[store_idx].size == 0) {
67611308Santhony.gutierrez@amd.com        DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n",
67711308Santhony.gutierrez@amd.com                store_inst->pcState(), store_inst->seqNum);
67811308Santhony.gutierrez@amd.com
67911308Santhony.gutierrez@amd.com        return store_fault;
68011308Santhony.gutierrez@amd.com    } else if (!store_inst->readPredicate()) {
68111308Santhony.gutierrez@amd.com        DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n",
68211308Santhony.gutierrez@amd.com                store_inst->seqNum);
68311308Santhony.gutierrez@amd.com        return store_fault;
68411308Santhony.gutierrez@amd.com    }
68511308Santhony.gutierrez@amd.com
68611308Santhony.gutierrez@amd.com    assert(store_fault == NoFault);
68711308Santhony.gutierrez@amd.com
68811308Santhony.gutierrez@amd.com    if (store_inst->isStoreConditional()) {
68911308Santhony.gutierrez@amd.com        // Store conditionals need to set themselves as able to
69011308Santhony.gutierrez@amd.com        // writeback if we haven't had a fault by here.
69111308Santhony.gutierrez@amd.com        storeQueue[store_idx].canWB = true;
69211308Santhony.gutierrez@amd.com
69311308Santhony.gutierrez@amd.com        ++storesToWB;
69411308Santhony.gutierrez@amd.com    }
69511308Santhony.gutierrez@amd.com
69611308Santhony.gutierrez@amd.com    return checkViolations(load_idx, store_inst);
69711308Santhony.gutierrez@amd.com
69811308Santhony.gutierrez@amd.com}
69911308Santhony.gutierrez@amd.com
70011308Santhony.gutierrez@amd.comtemplate <class Impl>
70111308Santhony.gutierrez@amd.comvoid
70211308Santhony.gutierrez@amd.comLSQUnit<Impl>::commitLoad()
70311308Santhony.gutierrez@amd.com{
70411308Santhony.gutierrez@amd.com    assert(loadQueue[loadHead]);
70511308Santhony.gutierrez@amd.com
70611308Santhony.gutierrez@amd.com    DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n",
70711308Santhony.gutierrez@amd.com            loadQueue[loadHead]->pcState());
70811308Santhony.gutierrez@amd.com
70911308Santhony.gutierrez@amd.com    loadQueue[loadHead] = NULL;
71011308Santhony.gutierrez@amd.com
71111308Santhony.gutierrez@amd.com    incrLdIdx(loadHead);
71211308Santhony.gutierrez@amd.com
71311308Santhony.gutierrez@amd.com    --loads;
71411308Santhony.gutierrez@amd.com}
71511308Santhony.gutierrez@amd.com
71611308Santhony.gutierrez@amd.comtemplate <class Impl>
71711308Santhony.gutierrez@amd.comvoid
71811308Santhony.gutierrez@amd.comLSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst)
71911308Santhony.gutierrez@amd.com{
72011308Santhony.gutierrez@amd.com    assert(loads == 0 || loadQueue[loadHead]);
72111308Santhony.gutierrez@amd.com
72211308Santhony.gutierrez@amd.com    while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) {
72311308Santhony.gutierrez@amd.com        commitLoad();
72411308Santhony.gutierrez@amd.com    }
72511308Santhony.gutierrez@amd.com}
72611308Santhony.gutierrez@amd.com
72711308Santhony.gutierrez@amd.comtemplate <class Impl>
72811308Santhony.gutierrez@amd.comvoid
72911308Santhony.gutierrez@amd.comLSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst)
73011308Santhony.gutierrez@amd.com{
73111308Santhony.gutierrez@amd.com    assert(stores == 0 || storeQueue[storeHead].inst);
73211308Santhony.gutierrez@amd.com
73311308Santhony.gutierrez@amd.com    int store_idx = storeHead;
73411308Santhony.gutierrez@amd.com
73511308Santhony.gutierrez@amd.com    while (store_idx != storeTail) {
73611308Santhony.gutierrez@amd.com        assert(storeQueue[store_idx].inst);
73711308Santhony.gutierrez@amd.com        // Mark any stores that are now committed and have not yet
73811308Santhony.gutierrez@amd.com        // been marked as able to write back.
73911308Santhony.gutierrez@amd.com        if (!storeQueue[store_idx].canWB) {
74011308Santhony.gutierrez@amd.com            if (storeQueue[store_idx].inst->seqNum > youngest_inst) {
74111308Santhony.gutierrez@amd.com                break;
74211308Santhony.gutierrez@amd.com            }
74311308Santhony.gutierrez@amd.com            DPRINTF(LSQUnit, "Marking store as able to write back, PC "
74411308Santhony.gutierrez@amd.com                    "%s [sn:%lli]\n",
74511308Santhony.gutierrez@amd.com                    storeQueue[store_idx].inst->pcState(),
74611308Santhony.gutierrez@amd.com                    storeQueue[store_idx].inst->seqNum);
74711308Santhony.gutierrez@amd.com
74811308Santhony.gutierrez@amd.com            storeQueue[store_idx].canWB = true;
74911308Santhony.gutierrez@amd.com
75011308Santhony.gutierrez@amd.com            ++storesToWB;
75111308Santhony.gutierrez@amd.com        }
75211308Santhony.gutierrez@amd.com
75311308Santhony.gutierrez@amd.com        incrStIdx(store_idx);
75411308Santhony.gutierrez@amd.com    }
75511308Santhony.gutierrez@amd.com}
75611308Santhony.gutierrez@amd.com
75711308Santhony.gutierrez@amd.comtemplate <class Impl>
75811308Santhony.gutierrez@amd.comvoid
75911308Santhony.gutierrez@amd.comLSQUnit<Impl>::writebackPendingStore()
76011308Santhony.gutierrez@amd.com{
761    if (hasPendingPkt) {
762        assert(pendingPkt != NULL);
763
764        // If the cache is blocked, this will store the packet for retry.
765        if (sendStore(pendingPkt)) {
766            storePostSend(pendingPkt);
767        }
768        pendingPkt = NULL;
769        hasPendingPkt = false;
770    }
771}
772
773template <class Impl>
774void
775LSQUnit<Impl>::writebackStores()
776{
777    // First writeback the second packet from any split store that didn't
778    // complete last cycle because there weren't enough cache ports available.
779    if (TheISA::HasUnalignedMemAcc) {
780        writebackPendingStore();
781    }
782
783    while (storesToWB > 0 &&
784           storeWBIdx != storeTail &&
785           storeQueue[storeWBIdx].inst &&
786           storeQueue[storeWBIdx].canWB &&
787           ((!needsTSO) || (!storeInFlight)) &&
788           usedPorts < cachePorts) {
789
790        if (isStoreBlocked || lsq->cacheBlocked()) {
791            DPRINTF(LSQUnit, "Unable to write back any more stores, cache"
792                    " is blocked!\n");
793            break;
794        }
795
796        // Store didn't write any data so no need to write it back to
797        // memory.
798        if (storeQueue[storeWBIdx].size == 0) {
799            completeStore(storeWBIdx);
800
801            incrStIdx(storeWBIdx);
802
803            continue;
804        }
805
806        ++usedPorts;
807
808        if (storeQueue[storeWBIdx].inst->isDataPrefetch()) {
809            incrStIdx(storeWBIdx);
810
811            continue;
812        }
813
814        assert(storeQueue[storeWBIdx].req);
815        assert(!storeQueue[storeWBIdx].committed);
816
817        if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) {
818            assert(storeQueue[storeWBIdx].sreqLow);
819            assert(storeQueue[storeWBIdx].sreqHigh);
820        }
821
822        DynInstPtr inst = storeQueue[storeWBIdx].inst;
823
824        Request *req = storeQueue[storeWBIdx].req;
825        RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow;
826        RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh;
827
828        storeQueue[storeWBIdx].committed = true;
829
830        assert(!inst->memData);
831        inst->memData = new uint8_t[req->getSize()];
832
833        if (storeQueue[storeWBIdx].isAllZeros)
834            memset(inst->memData, 0, req->getSize());
835        else
836            memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize());
837
838        MemCmd command =
839            req->isSwap() ? MemCmd::SwapReq :
840            (req->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq);
841        PacketPtr data_pkt;
842        PacketPtr snd_data_pkt = NULL;
843
844        LSQSenderState *state = new LSQSenderState;
845        state->isLoad = false;
846        state->idx = storeWBIdx;
847        state->inst = inst;
848
849        if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) {
850
851            // Build a single data packet if the store isn't split.
852            data_pkt = new Packet(req, command);
853            data_pkt->dataStatic(inst->memData);
854            data_pkt->senderState = state;
855        } else {
856            // Create two packets if the store is split in two.
857            data_pkt = new Packet(sreqLow, command);
858            snd_data_pkt = new Packet(sreqHigh, command);
859
860            data_pkt->dataStatic(inst->memData);
861            snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize());
862
863            data_pkt->senderState = state;
864            snd_data_pkt->senderState = state;
865
866            state->isSplit = true;
867            state->outstanding = 2;
868
869            // Can delete the main request now.
870            delete req;
871            req = sreqLow;
872        }
873
874        DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s "
875                "to Addr:%#x, data:%#x [sn:%lli]\n",
876                storeWBIdx, inst->pcState(),
877                req->getPaddr(), (int)*(inst->memData),
878                inst->seqNum);
879
880        // @todo: Remove this SC hack once the memory system handles it.
881        if (inst->isStoreConditional()) {
882            assert(!storeQueue[storeWBIdx].isSplit);
883            // Disable recording the result temporarily.  Writing to
884            // misc regs normally updates the result, but this is not
885            // the desired behavior when handling store conditionals.
886            inst->recordResult(false);
887            bool success = TheISA::handleLockedWrite(inst.get(), req, cacheBlockMask);
888            inst->recordResult(true);
889
890            if (!success) {
891                // Instantly complete this store.
892                DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed.  "
893                        "Instantly completing it.\n",
894                        inst->seqNum);
895                WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this);
896                cpu->schedule(wb, curTick() + 1);
897                if (cpu->checker) {
898                    // Make sure to set the LLSC data for verification
899                    // if checker is loaded
900                    inst->reqToVerify->setExtraData(0);
901                    inst->completeAcc(data_pkt);
902                }
903                completeStore(storeWBIdx);
904                incrStIdx(storeWBIdx);
905                continue;
906            }
907        } else {
908            // Non-store conditionals do not need a writeback.
909            state->noWB = true;
910        }
911
912        bool split =
913            TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit;
914
915        ThreadContext *thread = cpu->tcBase(lsqID);
916
917        if (req->isMmappedIpr()) {
918            assert(!inst->isStoreConditional());
919            TheISA::handleIprWrite(thread, data_pkt);
920            delete data_pkt;
921            if (split) {
922                assert(snd_data_pkt->req->isMmappedIpr());
923                TheISA::handleIprWrite(thread, snd_data_pkt);
924                delete snd_data_pkt;
925                delete sreqLow;
926                delete sreqHigh;
927            }
928            delete state;
929            delete req;
930            completeStore(storeWBIdx);
931            incrStIdx(storeWBIdx);
932        } else if (!sendStore(data_pkt)) {
933            DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will"
934                    "retry later\n",
935                    inst->seqNum);
936
937            // Need to store the second packet, if split.
938            if (split) {
939                state->pktToSend = true;
940                state->pendingPacket = snd_data_pkt;
941            }
942        } else {
943
944            // If split, try to send the second packet too
945            if (split) {
946                assert(snd_data_pkt);
947
948                // Ensure there are enough ports to use.
949                if (usedPorts < cachePorts) {
950                    ++usedPorts;
951                    if (sendStore(snd_data_pkt)) {
952                        storePostSend(snd_data_pkt);
953                    } else {
954                        DPRINTF(IEW, "D-Cache became blocked when writing"
955                                " [sn:%lli] second packet, will retry later\n",
956                                inst->seqNum);
957                    }
958                } else {
959
960                    // Store the packet for when there's free ports.
961                    assert(pendingPkt == NULL);
962                    pendingPkt = snd_data_pkt;
963                    hasPendingPkt = true;
964                }
965            } else {
966
967                // Not a split store.
968                storePostSend(data_pkt);
969            }
970        }
971    }
972
973    // Not sure this should set it to 0.
974    usedPorts = 0;
975
976    assert(stores >= 0 && storesToWB >= 0);
977}
978
979/*template <class Impl>
980void
981LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum)
982{
983    list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(),
984                                              mshrSeqNums.end(),
985                                              seqNum);
986
987    if (mshr_it != mshrSeqNums.end()) {
988        mshrSeqNums.erase(mshr_it);
989        DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size());
990    }
991}*/
992
993template <class Impl>
994void
995LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
996{
997    DPRINTF(LSQUnit, "Squashing until [sn:%lli]!"
998            "(Loads:%i Stores:%i)\n", squashed_num, loads, stores);
999
1000    int load_idx = loadTail;
1001    decrLdIdx(load_idx);
1002
1003    while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) {
1004        DPRINTF(LSQUnit,"Load Instruction PC %s squashed, "
1005                "[sn:%lli]\n",
1006                loadQueue[load_idx]->pcState(),
1007                loadQueue[load_idx]->seqNum);
1008
1009        if (isStalled() && load_idx == stallingLoadIdx) {
1010            stalled = false;
1011            stallingStoreIsn = 0;
1012            stallingLoadIdx = 0;
1013        }
1014
1015        // Clear the smart pointer to make sure it is decremented.
1016        loadQueue[load_idx]->setSquashed();
1017        loadQueue[load_idx] = NULL;
1018        --loads;
1019
1020        // Inefficient!
1021        loadTail = load_idx;
1022
1023        decrLdIdx(load_idx);
1024        ++lsqSquashedLoads;
1025    }
1026
1027    if (isLoadBlocked) {
1028        if (squashed_num < blockedLoadSeqNum) {
1029            isLoadBlocked = false;
1030            loadBlockedHandled = false;
1031            blockedLoadSeqNum = 0;
1032        }
1033    }
1034
1035    if (memDepViolator && squashed_num < memDepViolator->seqNum) {
1036        memDepViolator = NULL;
1037    }
1038
1039    int store_idx = storeTail;
1040    decrStIdx(store_idx);
1041
1042    while (stores != 0 &&
1043           storeQueue[store_idx].inst->seqNum > squashed_num) {
1044        // Instructions marked as can WB are already committed.
1045        if (storeQueue[store_idx].canWB) {
1046            break;
1047        }
1048
1049        DPRINTF(LSQUnit,"Store Instruction PC %s squashed, "
1050                "idx:%i [sn:%lli]\n",
1051                storeQueue[store_idx].inst->pcState(),
1052                store_idx, storeQueue[store_idx].inst->seqNum);
1053
1054        // I don't think this can happen.  It should have been cleared
1055        // by the stalling load.
1056        if (isStalled() &&
1057            storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
1058            panic("Is stalled should have been cleared by stalling load!\n");
1059            stalled = false;
1060            stallingStoreIsn = 0;
1061        }
1062
1063        // Clear the smart pointer to make sure it is decremented.
1064        storeQueue[store_idx].inst->setSquashed();
1065        storeQueue[store_idx].inst = NULL;
1066        storeQueue[store_idx].canWB = 0;
1067
1068        // Must delete request now that it wasn't handed off to
1069        // memory.  This is quite ugly.  @todo: Figure out the proper
1070        // place to really handle request deletes.
1071        delete storeQueue[store_idx].req;
1072        if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) {
1073            delete storeQueue[store_idx].sreqLow;
1074            delete storeQueue[store_idx].sreqHigh;
1075
1076            storeQueue[store_idx].sreqLow = NULL;
1077            storeQueue[store_idx].sreqHigh = NULL;
1078        }
1079
1080        storeQueue[store_idx].req = NULL;
1081        --stores;
1082
1083        // Inefficient!
1084        storeTail = store_idx;
1085
1086        decrStIdx(store_idx);
1087        ++lsqSquashedStores;
1088    }
1089}
1090
1091template <class Impl>
1092void
1093LSQUnit<Impl>::storePostSend(PacketPtr pkt)
1094{
1095    if (isStalled() &&
1096        storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) {
1097        DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
1098                "load idx:%i\n",
1099                stallingStoreIsn, stallingLoadIdx);
1100        stalled = false;
1101        stallingStoreIsn = 0;
1102        iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
1103    }
1104
1105    if (!storeQueue[storeWBIdx].inst->isStoreConditional()) {
1106        // The store is basically completed at this time. This
1107        // only works so long as the checker doesn't try to
1108        // verify the value in memory for stores.
1109        storeQueue[storeWBIdx].inst->setCompleted();
1110
1111        if (cpu->checker) {
1112            cpu->checker->verify(storeQueue[storeWBIdx].inst);
1113        }
1114    }
1115
1116    if (needsTSO) {
1117        storeInFlight = true;
1118    }
1119
1120    incrStIdx(storeWBIdx);
1121}
1122
1123template <class Impl>
1124void
1125LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt)
1126{
1127    iewStage->wakeCPU();
1128
1129    // Squashed instructions do not need to complete their access.
1130    if (inst->isSquashed()) {
1131        assert(!inst->isStore());
1132        ++lsqIgnoredResponses;
1133        return;
1134    }
1135
1136    if (!inst->isExecuted()) {
1137        inst->setExecuted();
1138
1139        // Complete access to copy data to proper place.
1140        inst->completeAcc(pkt);
1141    }
1142
1143    // Need to insert instruction into queue to commit
1144    iewStage->instToCommit(inst);
1145
1146    iewStage->activityThisCycle();
1147
1148    // see if this load changed the PC
1149    iewStage->checkMisprediction(inst);
1150}
1151
1152template <class Impl>
1153void
1154LSQUnit<Impl>::completeStore(int store_idx)
1155{
1156    assert(storeQueue[store_idx].inst);
1157    storeQueue[store_idx].completed = true;
1158    --storesToWB;
1159    // A bit conservative because a store completion may not free up entries,
1160    // but hopefully avoids two store completions in one cycle from making
1161    // the CPU tick twice.
1162    cpu->wakeCPU();
1163    cpu->activityThisCycle();
1164
1165    if (store_idx == storeHead) {
1166        do {
1167            incrStIdx(storeHead);
1168
1169            --stores;
1170        } while (storeQueue[storeHead].completed &&
1171                 storeHead != storeTail);
1172
1173        iewStage->updateLSQNextCycle = true;
1174    }
1175
1176    DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head "
1177            "idx:%i\n",
1178            storeQueue[store_idx].inst->seqNum, store_idx, storeHead);
1179
1180#if TRACING_ON
1181    if (DTRACE(O3PipeView)) {
1182        storeQueue[store_idx].inst->storeTick =
1183            curTick() - storeQueue[store_idx].inst->fetchTick;
1184    }
1185#endif
1186
1187    if (isStalled() &&
1188        storeQueue[store_idx].inst->seqNum == stallingStoreIsn) {
1189        DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] "
1190                "load idx:%i\n",
1191                stallingStoreIsn, stallingLoadIdx);
1192        stalled = false;
1193        stallingStoreIsn = 0;
1194        iewStage->replayMemInst(loadQueue[stallingLoadIdx]);
1195    }
1196
1197    storeQueue[store_idx].inst->setCompleted();
1198
1199    if (needsTSO) {
1200        storeInFlight = false;
1201    }
1202
1203    // Tell the checker we've completed this instruction.  Some stores
1204    // may get reported twice to the checker, but the checker can
1205    // handle that case.
1206    if (cpu->checker) {
1207        cpu->checker->verify(storeQueue[store_idx].inst);
1208    }
1209}
1210
1211template <class Impl>
1212bool
1213LSQUnit<Impl>::sendStore(PacketPtr data_pkt)
1214{
1215    if (!dcachePort->sendTimingReq(data_pkt)) {
1216        // Need to handle becoming blocked on a store.
1217        isStoreBlocked = true;
1218        ++lsqCacheBlocked;
1219        assert(retryPkt == NULL);
1220        retryPkt = data_pkt;
1221        lsq->setRetryTid(lsqID);
1222        return false;
1223    }
1224    return true;
1225}
1226
1227template <class Impl>
1228void
1229LSQUnit<Impl>::recvRetry()
1230{
1231    if (isStoreBlocked) {
1232        DPRINTF(LSQUnit, "Receiving retry: store blocked\n");
1233        assert(retryPkt != NULL);
1234
1235        LSQSenderState *state =
1236            dynamic_cast<LSQSenderState *>(retryPkt->senderState);
1237
1238        if (dcachePort->sendTimingReq(retryPkt)) {
1239            // Don't finish the store unless this is the last packet.
1240            if (!TheISA::HasUnalignedMemAcc || !state->pktToSend ||
1241                    state->pendingPacket == retryPkt) {
1242                state->pktToSend = false;
1243                storePostSend(retryPkt);
1244            }
1245            retryPkt = NULL;
1246            isStoreBlocked = false;
1247            lsq->setRetryTid(InvalidThreadID);
1248
1249            // Send any outstanding packet.
1250            if (TheISA::HasUnalignedMemAcc && state->pktToSend) {
1251                assert(state->pendingPacket);
1252                if (sendStore(state->pendingPacket)) {
1253                    storePostSend(state->pendingPacket);
1254                }
1255            }
1256        } else {
1257            // Still blocked!
1258            ++lsqCacheBlocked;
1259            lsq->setRetryTid(lsqID);
1260        }
1261    } else if (isLoadBlocked) {
1262        DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, "
1263                "no need to resend packet.\n");
1264    } else {
1265        DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n");
1266    }
1267}
1268
1269template <class Impl>
1270inline void
1271LSQUnit<Impl>::incrStIdx(int &store_idx) const
1272{
1273    if (++store_idx >= SQEntries)
1274        store_idx = 0;
1275}
1276
1277template <class Impl>
1278inline void
1279LSQUnit<Impl>::decrStIdx(int &store_idx) const
1280{
1281    if (--store_idx < 0)
1282        store_idx += SQEntries;
1283}
1284
1285template <class Impl>
1286inline void
1287LSQUnit<Impl>::incrLdIdx(int &load_idx) const
1288{
1289    if (++load_idx >= LQEntries)
1290        load_idx = 0;
1291}
1292
1293template <class Impl>
1294inline void
1295LSQUnit<Impl>::decrLdIdx(int &load_idx) const
1296{
1297    if (--load_idx < 0)
1298        load_idx += LQEntries;
1299}
1300
1301template <class Impl>
1302void
1303LSQUnit<Impl>::dumpInsts() const
1304{
1305    cprintf("Load store queue: Dumping instructions.\n");
1306    cprintf("Load queue size: %i\n", loads);
1307    cprintf("Load queue: ");
1308
1309    int load_idx = loadHead;
1310
1311    while (load_idx != loadTail && loadQueue[load_idx]) {
1312        const DynInstPtr &inst(loadQueue[load_idx]);
1313        cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
1314
1315        incrLdIdx(load_idx);
1316    }
1317    cprintf("\n");
1318
1319    cprintf("Store queue size: %i\n", stores);
1320    cprintf("Store queue: ");
1321
1322    int store_idx = storeHead;
1323
1324    while (store_idx != storeTail && storeQueue[store_idx].inst) {
1325        const DynInstPtr &inst(storeQueue[store_idx].inst);
1326        cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum);
1327
1328        incrStIdx(store_idx);
1329    }
1330
1331    cprintf("\n");
1332}
1333
1334#endif//__CPU_O3_LSQ_UNIT_IMPL_HH__
1335