lsq.cc revision 10581:7c4f1d0a8cff
111308Santhony.gutierrez@amd.com/*
211308Santhony.gutierrez@amd.com * Copyright (c) 2013-2014 ARM Limited
311308Santhony.gutierrez@amd.com * All rights reserved
411308Santhony.gutierrez@amd.com *
511308Santhony.gutierrez@amd.com * The license below extends only to copyright in the software and shall
611308Santhony.gutierrez@amd.com * not be construed as granting a license to any other intellectual
711308Santhony.gutierrez@amd.com * property including but not limited to intellectual property relating
811308Santhony.gutierrez@amd.com * to a hardware implementation of the functionality of the software
911308Santhony.gutierrez@amd.com * licensed hereunder.  You may use the software subject to the license
1011308Santhony.gutierrez@amd.com * terms below provided that you ensure that this notice is replicated
1111308Santhony.gutierrez@amd.com * unmodified and in its entirety in all distributions of the software,
1211308Santhony.gutierrez@amd.com * modified or unmodified, in source code or in binary form.
1311308Santhony.gutierrez@amd.com *
1411308Santhony.gutierrez@amd.com * Redistribution and use in source and binary forms, with or without
1511308Santhony.gutierrez@amd.com * modification, are permitted provided that the following conditions are
1611308Santhony.gutierrez@amd.com * met: redistributions of source code must retain the above copyright
1711308Santhony.gutierrez@amd.com * notice, this list of conditions and the following disclaimer;
1811308Santhony.gutierrez@amd.com * redistributions in binary form must reproduce the above copyright
1911308Santhony.gutierrez@amd.com * notice, this list of conditions and the following disclaimer in the
2011308Santhony.gutierrez@amd.com * documentation and/or other materials provided with the distribution;
2111308Santhony.gutierrez@amd.com * neither the name of the copyright holders nor the names of its
2211308Santhony.gutierrez@amd.com * contributors may be used to endorse or promote products derived from
2311308Santhony.gutierrez@amd.com * this software without specific prior written permission.
2411308Santhony.gutierrez@amd.com *
2511308Santhony.gutierrez@amd.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2611308Santhony.gutierrez@amd.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2711308Santhony.gutierrez@amd.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
2811308Santhony.gutierrez@amd.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2911308Santhony.gutierrez@amd.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
3011308Santhony.gutierrez@amd.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
3111308Santhony.gutierrez@amd.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
3211308Santhony.gutierrez@amd.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
3311308Santhony.gutierrez@amd.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3411308Santhony.gutierrez@amd.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
3511308Santhony.gutierrez@amd.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3611308Santhony.gutierrez@amd.com *
3711308Santhony.gutierrez@amd.com * Authors: Andrew Bardsley
3811308Santhony.gutierrez@amd.com */
3911308Santhony.gutierrez@amd.com
4011308Santhony.gutierrez@amd.com#include <iomanip>
4111308Santhony.gutierrez@amd.com#include <sstream>
4211308Santhony.gutierrez@amd.com
4311308Santhony.gutierrez@amd.com#include "arch/locked_mem.hh"
4411308Santhony.gutierrez@amd.com#include "arch/mmapped_ipr.hh"
4511308Santhony.gutierrez@amd.com#include "cpu/minor/cpu.hh"
4611308Santhony.gutierrez@amd.com#include "cpu/minor/exec_context.hh"
4711308Santhony.gutierrez@amd.com#include "cpu/minor/execute.hh"
4811308Santhony.gutierrez@amd.com#include "cpu/minor/lsq.hh"
4911308Santhony.gutierrez@amd.com#include "cpu/minor/pipeline.hh"
5011308Santhony.gutierrez@amd.com#include "debug/Activity.hh"
5111308Santhony.gutierrez@amd.com#include "debug/MinorMem.hh"
5211308Santhony.gutierrez@amd.com
5311308Santhony.gutierrez@amd.comnamespace Minor
5411308Santhony.gutierrez@amd.com{
5511308Santhony.gutierrez@amd.com
5611308Santhony.gutierrez@amd.com/** Returns the offset of addr into an aligned a block of size block_size */
5711308Santhony.gutierrez@amd.comstatic Addr
5811308Santhony.gutierrez@amd.comaddrBlockOffset(Addr addr, unsigned int block_size)
5911308Santhony.gutierrez@amd.com{
6011308Santhony.gutierrez@amd.com    return addr & (block_size - 1);
6111308Santhony.gutierrez@amd.com}
6211308Santhony.gutierrez@amd.com
6311308Santhony.gutierrez@amd.com/** Returns true if the given [addr .. addr+size-1] transfer needs to be
6411308Santhony.gutierrez@amd.com *  fragmented across a block size of block_size */
6511308Santhony.gutierrez@amd.comstatic bool
6611308Santhony.gutierrez@amd.comtransferNeedsBurst(Addr addr, unsigned int size, unsigned int block_size)
6711308Santhony.gutierrez@amd.com{
6811308Santhony.gutierrez@amd.com    return (addrBlockOffset(addr, block_size) + size) > block_size;
6911308Santhony.gutierrez@amd.com}
7011308Santhony.gutierrez@amd.com
7111308Santhony.gutierrez@amd.comLSQ::LSQRequest::LSQRequest(LSQ &port_, MinorDynInstPtr inst_, bool isLoad_,
7211308Santhony.gutierrez@amd.com    PacketDataPtr data_, uint64_t *res_) :
7311308Santhony.gutierrez@amd.com    SenderState(),
7411308Santhony.gutierrez@amd.com    port(port_),
7511308Santhony.gutierrez@amd.com    inst(inst_),
7611308Santhony.gutierrez@amd.com    isLoad(isLoad_),
7711308Santhony.gutierrez@amd.com    data(data_),
7811308Santhony.gutierrez@amd.com    packet(NULL),
7911308Santhony.gutierrez@amd.com    request(),
8011308Santhony.gutierrez@amd.com    fault(NoFault),
8111308Santhony.gutierrez@amd.com    res(res_),
8211308Santhony.gutierrez@amd.com    skipped(false),
8311308Santhony.gutierrez@amd.com    issuedToMemory(false),
8411308Santhony.gutierrez@amd.com    state(NotIssued)
8511308Santhony.gutierrez@amd.com{ }
8611308Santhony.gutierrez@amd.com
8711308Santhony.gutierrez@amd.comLSQ::AddrRangeCoverage
8811308Santhony.gutierrez@amd.comLSQ::LSQRequest::containsAddrRangeOf(
8911308Santhony.gutierrez@amd.com    Addr req1_addr, unsigned int req1_size,
9011308Santhony.gutierrez@amd.com    Addr req2_addr, unsigned int req2_size)
9111308Santhony.gutierrez@amd.com{
9211308Santhony.gutierrez@amd.com    /* 'end' here means the address of the byte just past the request
9311308Santhony.gutierrez@amd.com     *  blocks */
9411308Santhony.gutierrez@amd.com    Addr req2_end_addr = req2_addr + req2_size;
9511308Santhony.gutierrez@amd.com    Addr req1_end_addr = req1_addr + req1_size;
9611308Santhony.gutierrez@amd.com
9711308Santhony.gutierrez@amd.com    AddrRangeCoverage ret;
9811308Santhony.gutierrez@amd.com
9911308Santhony.gutierrez@amd.com    if (req1_addr > req2_end_addr || req1_end_addr < req2_addr)
10011308Santhony.gutierrez@amd.com        ret = NoAddrRangeCoverage;
10111308Santhony.gutierrez@amd.com    else if (req1_addr <= req2_addr && req1_end_addr >= req2_end_addr)
10211308Santhony.gutierrez@amd.com        ret = FullAddrRangeCoverage;
10311308Santhony.gutierrez@amd.com    else
10411308Santhony.gutierrez@amd.com        ret = PartialAddrRangeCoverage;
10511308Santhony.gutierrez@amd.com
10611308Santhony.gutierrez@amd.com    return ret;
10711308Santhony.gutierrez@amd.com}
10811308Santhony.gutierrez@amd.com
10911308Santhony.gutierrez@amd.comLSQ::AddrRangeCoverage
11011308Santhony.gutierrez@amd.comLSQ::LSQRequest::containsAddrRangeOf(LSQRequestPtr other_request)
11111308Santhony.gutierrez@amd.com{
11211308Santhony.gutierrez@amd.com    return containsAddrRangeOf(request.getPaddr(), request.getSize(),
11311308Santhony.gutierrez@amd.com        other_request->request.getPaddr(), other_request->request.getSize());
11411308Santhony.gutierrez@amd.com}
11511308Santhony.gutierrez@amd.com
11611308Santhony.gutierrez@amd.combool
11711308Santhony.gutierrez@amd.comLSQ::LSQRequest::isBarrier()
11811308Santhony.gutierrez@amd.com{
11911308Santhony.gutierrez@amd.com    return inst->isInst() && inst->staticInst->isMemBarrier();
12011308Santhony.gutierrez@amd.com}
12111308Santhony.gutierrez@amd.com
12211308Santhony.gutierrez@amd.combool
12311308Santhony.gutierrez@amd.comLSQ::LSQRequest::needsToBeSentToStoreBuffer()
12411308Santhony.gutierrez@amd.com{
12511308Santhony.gutierrez@amd.com    return state == StoreToStoreBuffer;
12611308Santhony.gutierrez@amd.com}
12711308Santhony.gutierrez@amd.com
12811308Santhony.gutierrez@amd.comvoid
12911308Santhony.gutierrez@amd.comLSQ::LSQRequest::setState(LSQRequestState new_state)
13011308Santhony.gutierrez@amd.com{
13111308Santhony.gutierrez@amd.com    DPRINTFS(MinorMem, (&port), "Setting state from %d to %d for request:"
13211308Santhony.gutierrez@amd.com        " %s\n", state, new_state, *inst);
13311308Santhony.gutierrez@amd.com    state = new_state;
13411308Santhony.gutierrez@amd.com}
13511308Santhony.gutierrez@amd.com
13611308Santhony.gutierrez@amd.combool
13711308Santhony.gutierrez@amd.comLSQ::LSQRequest::isComplete() const
13811308Santhony.gutierrez@amd.com{
13911308Santhony.gutierrez@amd.com    /* @todo, There is currently only one 'completed' state.  This
14011308Santhony.gutierrez@amd.com     *  may not be a good choice */
14111308Santhony.gutierrez@amd.com    return state == Complete;
14211308Santhony.gutierrez@amd.com}
14311308Santhony.gutierrez@amd.com
14411308Santhony.gutierrez@amd.comvoid
14511308Santhony.gutierrez@amd.comLSQ::LSQRequest::reportData(std::ostream &os) const
14611308Santhony.gutierrez@amd.com{
14711308Santhony.gutierrez@amd.com    os << (isLoad ? 'R' : 'W') << ';';
14811308Santhony.gutierrez@amd.com    inst->reportData(os);
14911308Santhony.gutierrez@amd.com    os << ';' << state;
15011308Santhony.gutierrez@amd.com}
15111308Santhony.gutierrez@amd.com
15211308Santhony.gutierrez@amd.comstd::ostream &
15311308Santhony.gutierrez@amd.comoperator <<(std::ostream &os, LSQ::AddrRangeCoverage coverage)
15411308Santhony.gutierrez@amd.com{
15511308Santhony.gutierrez@amd.com    switch (coverage) {
15611308Santhony.gutierrez@amd.com      case LSQ::PartialAddrRangeCoverage:
15711308Santhony.gutierrez@amd.com        os << "PartialAddrRangeCoverage";
15811308Santhony.gutierrez@amd.com        break;
15911308Santhony.gutierrez@amd.com      case LSQ::FullAddrRangeCoverage:
16011308Santhony.gutierrez@amd.com        os << "FullAddrRangeCoverage";
16111308Santhony.gutierrez@amd.com        break;
16211308Santhony.gutierrez@amd.com      case LSQ::NoAddrRangeCoverage:
16311308Santhony.gutierrez@amd.com        os << "NoAddrRangeCoverage";
16411308Santhony.gutierrez@amd.com        break;
16511308Santhony.gutierrez@amd.com      default:
16611308Santhony.gutierrez@amd.com        os << "AddrRangeCoverage-" << static_cast<int>(coverage);
16711308Santhony.gutierrez@amd.com        break;
16811308Santhony.gutierrez@amd.com    }
16911308Santhony.gutierrez@amd.com    return os;
17011308Santhony.gutierrez@amd.com}
17111308Santhony.gutierrez@amd.com
17211308Santhony.gutierrez@amd.comstd::ostream &
17311308Santhony.gutierrez@amd.comoperator <<(std::ostream &os, LSQ::LSQRequest::LSQRequestState state)
17411308Santhony.gutierrez@amd.com{
17511308Santhony.gutierrez@amd.com    switch (state) {
17611308Santhony.gutierrez@amd.com      case LSQ::LSQRequest::NotIssued:
17711308Santhony.gutierrez@amd.com        os << "NotIssued";
17811308Santhony.gutierrez@amd.com        break;
17911308Santhony.gutierrez@amd.com      case LSQ::LSQRequest::InTranslation:
18011308Santhony.gutierrez@amd.com        os << "InTranslation";
18111308Santhony.gutierrez@amd.com        break;
18211308Santhony.gutierrez@amd.com      case LSQ::LSQRequest::Translated:
18311308Santhony.gutierrez@amd.com        os << "Translated";
18411308Santhony.gutierrez@amd.com        break;
18511308Santhony.gutierrez@amd.com      case LSQ::LSQRequest::Failed:
18611308Santhony.gutierrez@amd.com        os << "Failed";
18711308Santhony.gutierrez@amd.com        break;
18811308Santhony.gutierrez@amd.com      case LSQ::LSQRequest::RequestIssuing:
18911308Santhony.gutierrez@amd.com        os << "RequestIssuing";
19011308Santhony.gutierrez@amd.com        break;
19111308Santhony.gutierrez@amd.com      case LSQ::LSQRequest::StoreToStoreBuffer:
19211308Santhony.gutierrez@amd.com        os << "StoreToStoreBuffer";
19311308Santhony.gutierrez@amd.com        break;
19411308Santhony.gutierrez@amd.com      case LSQ::LSQRequest::StoreInStoreBuffer:
19511308Santhony.gutierrez@amd.com        os << "StoreInStoreBuffer";
19611308Santhony.gutierrez@amd.com        break;
19711308Santhony.gutierrez@amd.com      case LSQ::LSQRequest::StoreBufferIssuing:
19811308Santhony.gutierrez@amd.com        os << "StoreBufferIssuing";
19911308Santhony.gutierrez@amd.com        break;
20011308Santhony.gutierrez@amd.com      case LSQ::LSQRequest::RequestNeedsRetry:
20111308Santhony.gutierrez@amd.com        os << "RequestNeedsRetry";
20211308Santhony.gutierrez@amd.com        break;
20311308Santhony.gutierrez@amd.com      case LSQ::LSQRequest::StoreBufferNeedsRetry:
20411308Santhony.gutierrez@amd.com        os << "StoreBufferNeedsRetry";
20511308Santhony.gutierrez@amd.com        break;
20611308Santhony.gutierrez@amd.com      case LSQ::LSQRequest::Complete:
20711308Santhony.gutierrez@amd.com        os << "Complete";
20811308Santhony.gutierrez@amd.com        break;
20911308Santhony.gutierrez@amd.com      default:
21011308Santhony.gutierrez@amd.com        os << "LSQRequestState-" << static_cast<int>(state);
21111308Santhony.gutierrez@amd.com        break;
21211308Santhony.gutierrez@amd.com    }
21311308Santhony.gutierrez@amd.com    return os;
21411308Santhony.gutierrez@amd.com}
21511308Santhony.gutierrez@amd.com
21611308Santhony.gutierrez@amd.comvoid
21711308Santhony.gutierrez@amd.comLSQ::clearMemBarrier(MinorDynInstPtr inst)
21811308Santhony.gutierrez@amd.com{
21911308Santhony.gutierrez@amd.com    bool is_last_barrier = inst->id.execSeqNum >= lastMemBarrier;
22011308Santhony.gutierrez@amd.com
22111308Santhony.gutierrez@amd.com    DPRINTF(MinorMem, "Moving %s barrier out of store buffer inst: %s\n",
22211308Santhony.gutierrez@amd.com        (is_last_barrier ? "last" : "a"), *inst);
22311308Santhony.gutierrez@amd.com
22411308Santhony.gutierrez@amd.com    if (is_last_barrier)
22511308Santhony.gutierrez@amd.com        lastMemBarrier = 0;
22611308Santhony.gutierrez@amd.com}
22711308Santhony.gutierrez@amd.com
22811308Santhony.gutierrez@amd.comvoid
22911308Santhony.gutierrez@amd.comLSQ::SingleDataRequest::finish(const Fault &fault_, RequestPtr request_,
23011308Santhony.gutierrez@amd.com                               ThreadContext *tc, BaseTLB::Mode mode)
23111308Santhony.gutierrez@amd.com{
23211308Santhony.gutierrez@amd.com    fault = fault_;
23311308Santhony.gutierrez@amd.com
23411308Santhony.gutierrez@amd.com    port.numAccessesInDTLB--;
23511308Santhony.gutierrez@amd.com
23611308Santhony.gutierrez@amd.com    DPRINTFS(MinorMem, (&port), "Received translation response for"
23711308Santhony.gutierrez@amd.com        " request: %s\n", *inst);
23811308Santhony.gutierrez@amd.com
23911308Santhony.gutierrez@amd.com    makePacket();
24011308Santhony.gutierrez@amd.com
24111308Santhony.gutierrez@amd.com    setState(Translated);
24211308Santhony.gutierrez@amd.com    port.tryToSendToTransfers(this);
24311308Santhony.gutierrez@amd.com
24411308Santhony.gutierrez@amd.com    /* Let's try and wake up the processor for the next cycle */
24511308Santhony.gutierrez@amd.com    port.cpu.wakeupOnEvent(Pipeline::ExecuteStageId);
24611308Santhony.gutierrez@amd.com}
24711308Santhony.gutierrez@amd.com
24811308Santhony.gutierrez@amd.comvoid
24911308Santhony.gutierrez@amd.comLSQ::SingleDataRequest::startAddrTranslation()
25011308Santhony.gutierrez@amd.com{
25111308Santhony.gutierrez@amd.com    ThreadContext *thread = port.cpu.getContext(
25211308Santhony.gutierrez@amd.com        inst->id.threadId);
25311534Sjohn.kalamatianos@amd.com
25411534Sjohn.kalamatianos@amd.com    port.numAccessesInDTLB++;
25511308Santhony.gutierrez@amd.com
25611308Santhony.gutierrez@amd.com    setState(LSQ::LSQRequest::InTranslation);
25711308Santhony.gutierrez@amd.com
25811308Santhony.gutierrez@amd.com    DPRINTFS(MinorMem, (&port), "Submitting DTLB request\n");
25911308Santhony.gutierrez@amd.com    /* Submit the translation request.  The response will come through
26011308Santhony.gutierrez@amd.com     *  finish/markDelayed on the LSQRequest as it bears the Translation
26111308Santhony.gutierrez@amd.com     *  interface */
26211308Santhony.gutierrez@amd.com    thread->getDTBPtr()->translateTiming(
26311308Santhony.gutierrez@amd.com        &request, thread, this, (isLoad ? BaseTLB::Read : BaseTLB::Write));
26411308Santhony.gutierrez@amd.com}
26511308Santhony.gutierrez@amd.com
26611308Santhony.gutierrez@amd.comvoid
26711308Santhony.gutierrez@amd.comLSQ::SingleDataRequest::retireResponse(PacketPtr packet_)
26811308Santhony.gutierrez@amd.com{
26911308Santhony.gutierrez@amd.com    DPRINTFS(MinorMem, (&port), "Retiring packet\n");
27011308Santhony.gutierrez@amd.com    packet = packet_;
27111308Santhony.gutierrez@amd.com    packetInFlight = false;
27211308Santhony.gutierrez@amd.com    setState(Complete);
27311308Santhony.gutierrez@amd.com}
27411308Santhony.gutierrez@amd.com
27511308Santhony.gutierrez@amd.comvoid
27611308Santhony.gutierrez@amd.comLSQ::SplitDataRequest::finish(const Fault &fault_, RequestPtr request_,
27711308Santhony.gutierrez@amd.com                              ThreadContext *tc, BaseTLB::Mode mode)
27811308Santhony.gutierrez@amd.com{
27911308Santhony.gutierrez@amd.com    fault = fault_;
28011308Santhony.gutierrez@amd.com
28111308Santhony.gutierrez@amd.com    port.numAccessesInDTLB--;
28211308Santhony.gutierrez@amd.com
28311308Santhony.gutierrez@amd.com    unsigned int M5_VAR_USED expected_fragment_index =
28411308Santhony.gutierrez@amd.com        numTranslatedFragments;
28511308Santhony.gutierrez@amd.com
28611308Santhony.gutierrez@amd.com    numInTranslationFragments--;
28711308Santhony.gutierrez@amd.com    numTranslatedFragments++;
28811308Santhony.gutierrez@amd.com
28911308Santhony.gutierrez@amd.com    DPRINTFS(MinorMem, (&port), "Received translation response for fragment"
29011308Santhony.gutierrez@amd.com        " %d of request: %s\n", expected_fragment_index, *inst);
29111308Santhony.gutierrez@amd.com
29211308Santhony.gutierrez@amd.com    assert(request_ == fragmentRequests[expected_fragment_index]);
29311308Santhony.gutierrez@amd.com
29411308Santhony.gutierrez@amd.com    /* Wake up next cycle to get things going again in case the
29511308Santhony.gutierrez@amd.com     *  tryToSendToTransfers does take */
29611308Santhony.gutierrez@amd.com    port.cpu.wakeupOnEvent(Pipeline::ExecuteStageId);
29711308Santhony.gutierrez@amd.com
29811308Santhony.gutierrez@amd.com    if (fault != NoFault) {
29911308Santhony.gutierrez@amd.com        /* tryToSendToTransfers will handle the fault */
30011308Santhony.gutierrez@amd.com
30111308Santhony.gutierrez@amd.com        DPRINTFS(MinorMem, (&port), "Faulting translation for fragment:"
30211308Santhony.gutierrez@amd.com            " %d of request: %s\n",
30311308Santhony.gutierrez@amd.com            expected_fragment_index, *inst);
30411308Santhony.gutierrez@amd.com
30511308Santhony.gutierrez@amd.com        setState(Translated);
30611308Santhony.gutierrez@amd.com        port.tryToSendToTransfers(this);
30711308Santhony.gutierrez@amd.com    } else if (numTranslatedFragments == numFragments) {
30811308Santhony.gutierrez@amd.com        makeFragmentPackets();
30911308Santhony.gutierrez@amd.com
31011308Santhony.gutierrez@amd.com        setState(Translated);
31111308Santhony.gutierrez@amd.com        port.tryToSendToTransfers(this);
31211308Santhony.gutierrez@amd.com    } else {
31311308Santhony.gutierrez@amd.com        /* Avoid calling translateTiming from within ::finish */
31411308Santhony.gutierrez@amd.com        assert(!translationEvent.scheduled());
31511308Santhony.gutierrez@amd.com        port.cpu.schedule(translationEvent, curTick());
31611308Santhony.gutierrez@amd.com    }
31711308Santhony.gutierrez@amd.com}
31811308Santhony.gutierrez@amd.com
31911308Santhony.gutierrez@amd.comLSQ::SplitDataRequest::SplitDataRequest(LSQ &port_, MinorDynInstPtr inst_,
32011308Santhony.gutierrez@amd.com    bool isLoad_, PacketDataPtr data_, uint64_t *res_) :
32111308Santhony.gutierrez@amd.com    LSQRequest(port_, inst_, isLoad_, data_, res_),
32211308Santhony.gutierrez@amd.com    translationEvent(*this),
32311308Santhony.gutierrez@amd.com    numFragments(0),
32411308Santhony.gutierrez@amd.com    numInTranslationFragments(0),
32511308Santhony.gutierrez@amd.com    numTranslatedFragments(0),
32611308Santhony.gutierrez@amd.com    numIssuedFragments(0),
32711308Santhony.gutierrez@amd.com    numRetiredFragments(0),
32811308Santhony.gutierrez@amd.com    fragmentRequests(),
32911308Santhony.gutierrez@amd.com    fragmentPackets()
33011308Santhony.gutierrez@amd.com{
33111308Santhony.gutierrez@amd.com    /* Don't know how many elements are needed until the request is
33211308Santhony.gutierrez@amd.com     *  populated by the caller. */
33311308Santhony.gutierrez@amd.com}
33411308Santhony.gutierrez@amd.com
33511308Santhony.gutierrez@amd.comLSQ::SplitDataRequest::~SplitDataRequest()
33611308Santhony.gutierrez@amd.com{
33711308Santhony.gutierrez@amd.com    for (auto i = fragmentRequests.begin();
33811308Santhony.gutierrez@amd.com        i != fragmentRequests.end(); i++)
33911308Santhony.gutierrez@amd.com    {
34011308Santhony.gutierrez@amd.com        delete *i;
34111308Santhony.gutierrez@amd.com    }
34211308Santhony.gutierrez@amd.com
34311308Santhony.gutierrez@amd.com    for (auto i = fragmentPackets.begin();
34411308Santhony.gutierrez@amd.com         i != fragmentPackets.end(); i++)
34511308Santhony.gutierrez@amd.com    {
34611308Santhony.gutierrez@amd.com        delete *i;
34711308Santhony.gutierrez@amd.com    }
34811308Santhony.gutierrez@amd.com}
34911308Santhony.gutierrez@amd.com
35011308Santhony.gutierrez@amd.comvoid
35111308Santhony.gutierrez@amd.comLSQ::SplitDataRequest::makeFragmentRequests()
35211308Santhony.gutierrez@amd.com{
35311308Santhony.gutierrez@amd.com    Addr base_addr = request.getVaddr();
35411308Santhony.gutierrez@amd.com    unsigned int whole_size = request.getSize();
35511308Santhony.gutierrez@amd.com    unsigned int line_width = port.lineWidth;
35611308Santhony.gutierrez@amd.com
35711308Santhony.gutierrez@amd.com    unsigned int fragment_size;
35811308Santhony.gutierrez@amd.com    Addr fragment_addr;
35911308Santhony.gutierrez@amd.com
36011308Santhony.gutierrez@amd.com    /* Assume that this transfer is across potentially many block snap
36111308Santhony.gutierrez@amd.com     * boundaries:
36211308Santhony.gutierrez@amd.com     *
36311308Santhony.gutierrez@amd.com     * |      _|________|________|________|___     |
36411308Santhony.gutierrez@amd.com     * |     |0| 1      | 2      | 3      | 4 |    |
36511308Santhony.gutierrez@amd.com     * |     |_|________|________|________|___|    |
36611308Santhony.gutierrez@amd.com     * |       |        |        |        |        |
36711308Santhony.gutierrez@amd.com     *
36811308Santhony.gutierrez@amd.com     *  The first transfer (0) can be up to lineWidth in size.
36911308Santhony.gutierrez@amd.com     *  All the middle transfers (1-3) are lineWidth in size
37011308Santhony.gutierrez@amd.com     *  The last transfer (4) can be from zero to lineWidth - 1 in size
37111308Santhony.gutierrez@amd.com     */
37211308Santhony.gutierrez@amd.com    unsigned int first_fragment_offset =
37311308Santhony.gutierrez@amd.com        addrBlockOffset(base_addr, line_width);
37411308Santhony.gutierrez@amd.com    unsigned int last_fragment_size =
37511308Santhony.gutierrez@amd.com        addrBlockOffset(base_addr + whole_size, line_width);
37611308Santhony.gutierrez@amd.com    unsigned int first_fragment_size =
37711308Santhony.gutierrez@amd.com        line_width - first_fragment_offset;
37811308Santhony.gutierrez@amd.com
37911308Santhony.gutierrez@amd.com    unsigned int middle_fragments_total_size =
38011308Santhony.gutierrez@amd.com        whole_size - (first_fragment_size + last_fragment_size);
38111308Santhony.gutierrez@amd.com
38211308Santhony.gutierrez@amd.com    assert(addrBlockOffset(middle_fragments_total_size, line_width) == 0);
38311308Santhony.gutierrez@amd.com
38411308Santhony.gutierrez@amd.com    unsigned int middle_fragment_count =
38511308Santhony.gutierrez@amd.com        middle_fragments_total_size / line_width;
38611308Santhony.gutierrez@amd.com
38711308Santhony.gutierrez@amd.com    numFragments = 1 /* first */ + middle_fragment_count +
38811308Santhony.gutierrez@amd.com        (last_fragment_size == 0 ? 0 : 1);
38911308Santhony.gutierrez@amd.com
39011308Santhony.gutierrez@amd.com    DPRINTFS(MinorMem, (&port), "Dividing transfer into %d fragmentRequests."
39111308Santhony.gutierrez@amd.com        " First fragment size: %d Last fragment size: %d\n",
39211308Santhony.gutierrez@amd.com        numFragments, first_fragment_size,
39311308Santhony.gutierrez@amd.com        (last_fragment_size == 0 ? line_width : last_fragment_size));
39411308Santhony.gutierrez@amd.com
39511308Santhony.gutierrez@amd.com    assert(((middle_fragment_count * line_width) +
39611308Santhony.gutierrez@amd.com        first_fragment_size + last_fragment_size) == whole_size);
39711308Santhony.gutierrez@amd.com
39811308Santhony.gutierrez@amd.com    fragment_addr = base_addr;
39911308Santhony.gutierrez@amd.com    fragment_size = first_fragment_size;
40011308Santhony.gutierrez@amd.com
40111308Santhony.gutierrez@amd.com    /* Just past the last address in the request */
40211308Santhony.gutierrez@amd.com    Addr end_addr = base_addr + whole_size;
40311308Santhony.gutierrez@amd.com
40411308Santhony.gutierrez@amd.com    for (unsigned int fragment_index = 0; fragment_index < numFragments;
40511308Santhony.gutierrez@amd.com         fragment_index++)
40611308Santhony.gutierrez@amd.com    {
40711308Santhony.gutierrez@amd.com        bool M5_VAR_USED is_last_fragment = false;
40811308Santhony.gutierrez@amd.com
40911308Santhony.gutierrez@amd.com        if (fragment_addr == base_addr) {
41011308Santhony.gutierrez@amd.com            /* First fragment */
41111308Santhony.gutierrez@amd.com            fragment_size = first_fragment_size;
41211308Santhony.gutierrez@amd.com        } else {
41311308Santhony.gutierrez@amd.com            if ((fragment_addr + line_width) > end_addr) {
41411308Santhony.gutierrez@amd.com                /* Adjust size of last fragment */
41511308Santhony.gutierrez@amd.com                fragment_size = end_addr - fragment_addr;
41611308Santhony.gutierrez@amd.com                is_last_fragment = true;
41711308Santhony.gutierrez@amd.com            } else {
41811308Santhony.gutierrez@amd.com                /* Middle fragments */
41911308Santhony.gutierrez@amd.com                fragment_size = line_width;
42011308Santhony.gutierrez@amd.com            }
42111308Santhony.gutierrez@amd.com        }
42211308Santhony.gutierrez@amd.com
42311308Santhony.gutierrez@amd.com        Request *fragment = new Request();
42411308Santhony.gutierrez@amd.com
42511308Santhony.gutierrez@amd.com        fragment->setThreadContext(request.contextId(), /* thread id */ 0);
42611308Santhony.gutierrez@amd.com        fragment->setVirt(0 /* asid */,
42711308Santhony.gutierrez@amd.com            fragment_addr, fragment_size, request.getFlags(),
42811308Santhony.gutierrez@amd.com            request.masterId(),
42911308Santhony.gutierrez@amd.com            request.getPC());
43011308Santhony.gutierrez@amd.com
43111308Santhony.gutierrez@amd.com        DPRINTFS(MinorMem, (&port), "Generating fragment addr: 0x%x size: %d"
43211308Santhony.gutierrez@amd.com            " (whole request addr: 0x%x size: %d) %s\n",
43311308Santhony.gutierrez@amd.com            fragment_addr, fragment_size, base_addr, whole_size,
43411308Santhony.gutierrez@amd.com            (is_last_fragment ? "last fragment" : ""));
43511308Santhony.gutierrez@amd.com
43611308Santhony.gutierrez@amd.com        fragment_addr += fragment_size;
43711308Santhony.gutierrez@amd.com
43811308Santhony.gutierrez@amd.com        fragmentRequests.push_back(fragment);
43911308Santhony.gutierrez@amd.com    }
44011308Santhony.gutierrez@amd.com}
44111308Santhony.gutierrez@amd.com
44211308Santhony.gutierrez@amd.comvoid
44311308Santhony.gutierrez@amd.comLSQ::SplitDataRequest::makeFragmentPackets()
44411308Santhony.gutierrez@amd.com{
44511308Santhony.gutierrez@amd.com    Addr base_addr = request.getVaddr();
44611308Santhony.gutierrez@amd.com
44711308Santhony.gutierrez@amd.com    DPRINTFS(MinorMem, (&port), "Making packets for request: %s\n", *inst);
44811308Santhony.gutierrez@amd.com
44911308Santhony.gutierrez@amd.com    for (unsigned int fragment_index = 0; fragment_index < numFragments;
45011308Santhony.gutierrez@amd.com         fragment_index++)
45111308Santhony.gutierrez@amd.com    {
45211308Santhony.gutierrez@amd.com        Request *fragment = fragmentRequests[fragment_index];
45311308Santhony.gutierrez@amd.com
45411308Santhony.gutierrez@amd.com        DPRINTFS(MinorMem, (&port), "Making packet %d for request: %s"
45511308Santhony.gutierrez@amd.com            " (%d, 0x%x)\n",
45611308Santhony.gutierrez@amd.com            fragment_index, *inst,
45711308Santhony.gutierrez@amd.com            (fragment->hasPaddr() ? "has paddr" : "no paddr"),
45811308Santhony.gutierrez@amd.com            (fragment->hasPaddr() ? fragment->getPaddr() : 0));
45911308Santhony.gutierrez@amd.com
46011308Santhony.gutierrez@amd.com        Addr fragment_addr = fragment->getVaddr();
46111308Santhony.gutierrez@amd.com        unsigned int fragment_size = fragment->getSize();
46211308Santhony.gutierrez@amd.com
46311308Santhony.gutierrez@amd.com        uint8_t *request_data = NULL;
46411308Santhony.gutierrez@amd.com
46511308Santhony.gutierrez@amd.com        if (!isLoad) {
46611308Santhony.gutierrez@amd.com            /* Split data for Packets.  Will become the property of the
46711308Santhony.gutierrez@amd.com             *  outgoing Packets */
46811308Santhony.gutierrez@amd.com            request_data = new uint8_t[fragment_size];
46911308Santhony.gutierrez@amd.com            std::memcpy(request_data, data + (fragment_addr - base_addr),
47011308Santhony.gutierrez@amd.com                fragment_size);
47111308Santhony.gutierrez@amd.com        }
47211308Santhony.gutierrez@amd.com
47311308Santhony.gutierrez@amd.com        assert(fragment->hasPaddr());
47411308Santhony.gutierrez@amd.com
47511308Santhony.gutierrez@amd.com        PacketPtr fragment_packet =
47611308Santhony.gutierrez@amd.com            makePacketForRequest(*fragment, isLoad, this, request_data);
47711308Santhony.gutierrez@amd.com
47811308Santhony.gutierrez@amd.com        fragmentPackets.push_back(fragment_packet);
47911308Santhony.gutierrez@amd.com        /* Accumulate flags in parent request */
48011308Santhony.gutierrez@amd.com        request.setFlags(fragment->getFlags());
48111308Santhony.gutierrez@amd.com    }
48211308Santhony.gutierrez@amd.com
48311308Santhony.gutierrez@amd.com    /* Might as well make the overall/response packet here */
48411308Santhony.gutierrez@amd.com    /* Get the physical address for the whole request/packet from the first
48511308Santhony.gutierrez@amd.com     *  fragment */
48611308Santhony.gutierrez@amd.com    request.setPaddr(fragmentRequests[0]->getPaddr());
48711308Santhony.gutierrez@amd.com    makePacket();
48811308Santhony.gutierrez@amd.com}
48911308Santhony.gutierrez@amd.com
49011308Santhony.gutierrez@amd.comvoid
49111308Santhony.gutierrez@amd.comLSQ::SplitDataRequest::startAddrTranslation()
49211308Santhony.gutierrez@amd.com{
49311308Santhony.gutierrez@amd.com    setState(LSQ::LSQRequest::InTranslation);
49411308Santhony.gutierrez@amd.com
49511308Santhony.gutierrez@amd.com    makeFragmentRequests();
49611308Santhony.gutierrez@amd.com
49711308Santhony.gutierrez@amd.com    numInTranslationFragments = 0;
49811308Santhony.gutierrez@amd.com    numTranslatedFragments = 0;
49911308Santhony.gutierrez@amd.com
50011308Santhony.gutierrez@amd.com    /* @todo, just do these in sequence for now with
501     * a loop of:
502     * do {
503     *  sendNextFragmentToTranslation ; translateTiming ; finish
504     * } while (numTranslatedFragments != numFragments);
505     */
506
507    /* Do first translation */
508    sendNextFragmentToTranslation();
509}
510
511PacketPtr
512LSQ::SplitDataRequest::getHeadPacket()
513{
514    assert(numIssuedFragments < numFragments);
515
516    return fragmentPackets[numIssuedFragments];
517}
518
519void
520LSQ::SplitDataRequest::stepToNextPacket()
521{
522    assert(numIssuedFragments < numFragments);
523
524    numIssuedFragments++;
525}
526
527void
528LSQ::SplitDataRequest::retireResponse(PacketPtr response)
529{
530    assert(numRetiredFragments < numFragments);
531
532    DPRINTFS(MinorMem, (&port), "Retiring fragment addr: 0x%x size: %d"
533        " offset: 0x%x (retired fragment num: %d) %s\n",
534        response->req->getVaddr(), response->req->getSize(),
535        request.getVaddr() - response->req->getVaddr(),
536        numRetiredFragments,
537        (fault == NoFault ? "" : fault->name()));
538
539    numRetiredFragments++;
540
541    if (skipped) {
542        /* Skip because we already knew the request had faulted or been
543         *  skipped */
544        DPRINTFS(MinorMem, (&port), "Skipping this fragment\n");
545    } else if (response->isError()) {
546        /* Mark up the error and leave to execute to handle it */
547        DPRINTFS(MinorMem, (&port), "Fragment has an error, skipping\n");
548        setSkipped();
549        packet->copyError(response);
550    } else {
551        if (isLoad) {
552            if (!data) {
553                /* For a split transfer, a Packet must be constructed
554                 *  to contain all returning data.  This is that packet's
555                 *  data */
556                data = new uint8_t[request.getSize()];
557            }
558
559            /* Populate the portion of the overall response data represented
560             *  by the response fragment */
561            std::memcpy(
562                data + (response->req->getVaddr() - request.getVaddr()),
563                response->getConstPtr<uint8_t>(),
564                response->req->getSize());
565        }
566    }
567
568    /* Complete early if we're skipping are no more in-flight accesses */
569    if (skipped && !hasPacketsInMemSystem()) {
570        DPRINTFS(MinorMem, (&port), "Completed skipped burst\n");
571        setState(Complete);
572        if (packet->needsResponse())
573            packet->makeResponse();
574    }
575
576    if (numRetiredFragments == numFragments)
577        setState(Complete);
578
579    if (!skipped && isComplete()) {
580        DPRINTFS(MinorMem, (&port), "Completed burst %d\n", packet != NULL);
581
582        DPRINTFS(MinorMem, (&port), "Retired packet isRead: %d isWrite: %d"
583             " needsResponse: %d packetSize: %s requestSize: %s responseSize:"
584             " %s\n", packet->isRead(), packet->isWrite(),
585             packet->needsResponse(), packet->getSize(), request.getSize(),
586             response->getSize());
587
588        /* A request can become complete by several paths, this is a sanity
589         *  check to make sure the packet's data is created */
590        if (!data) {
591            data = new uint8_t[request.getSize()];
592        }
593
594        if (isLoad) {
595            DPRINTFS(MinorMem, (&port), "Copying read data\n");
596            std::memcpy(packet->getPtr<uint8_t>(), data, request.getSize());
597        }
598        packet->makeResponse();
599    }
600
601    /* Packets are all deallocated together in ~SplitLSQRequest */
602}
603
604void
605LSQ::SplitDataRequest::sendNextFragmentToTranslation()
606{
607    unsigned int fragment_index = numTranslatedFragments;
608
609    ThreadContext *thread = port.cpu.getContext(
610        inst->id.threadId);
611
612    DPRINTFS(MinorMem, (&port), "Submitting DTLB request for fragment: %d\n",
613        fragment_index);
614
615    port.numAccessesInDTLB++;
616    numInTranslationFragments++;
617
618    thread->getDTBPtr()->translateTiming(
619        fragmentRequests[fragment_index], thread, this, (isLoad ?
620        BaseTLB::Read : BaseTLB::Write));
621}
622
623bool
624LSQ::StoreBuffer::canInsert() const
625{
626    /* @todo, support store amalgamation */
627    return slots.size() < numSlots;
628}
629
630void
631LSQ::StoreBuffer::deleteRequest(LSQRequestPtr request)
632{
633    auto found = std::find(slots.begin(), slots.end(), request);
634
635    if (found != slots.end()) {
636        DPRINTF(MinorMem, "Deleting request: %s %s %s from StoreBuffer\n",
637            request, *found, *(request->inst));
638        slots.erase(found);
639
640        delete request;
641    }
642}
643
644void
645LSQ::StoreBuffer::insert(LSQRequestPtr request)
646{
647    if (!canInsert()) {
648        warn("%s: store buffer insertion without space to insert from"
649            " inst: %s\n", name(), *(request->inst));
650    }
651
652    DPRINTF(MinorMem, "Pushing store: %s into store buffer\n", request);
653
654    numUnissuedAccesses++;
655
656    if (request->state != LSQRequest::Complete)
657        request->setState(LSQRequest::StoreInStoreBuffer);
658
659    slots.push_back(request);
660
661    /* Let's try and wake up the processor for the next cycle to step
662     *  the store buffer */
663    lsq.cpu.wakeupOnEvent(Pipeline::ExecuteStageId);
664}
665
666LSQ::AddrRangeCoverage
667LSQ::StoreBuffer::canForwardDataToLoad(LSQRequestPtr request,
668    unsigned int &found_slot)
669{
670    unsigned int slot_index = slots.size() - 1;
671    auto i = slots.rbegin();
672    AddrRangeCoverage ret = NoAddrRangeCoverage;
673
674    /* Traverse the store buffer in reverse order (most to least recent)
675     *  and try to find a slot whose address range overlaps this request */
676    while (ret == NoAddrRangeCoverage && i != slots.rend()) {
677        LSQRequestPtr slot = *i;
678
679        if (slot->packet) {
680            AddrRangeCoverage coverage = slot->containsAddrRangeOf(request);
681
682            if (coverage != NoAddrRangeCoverage) {
683                DPRINTF(MinorMem, "Forwarding: slot: %d result: %s thisAddr:"
684                    " 0x%x thisSize: %d slotAddr: 0x%x slotSize: %d\n",
685                    slot_index, coverage,
686                    request->request.getPaddr(), request->request.getSize(),
687                    slot->request.getPaddr(), slot->request.getSize());
688
689                found_slot = slot_index;
690                ret = coverage;
691            }
692        }
693
694        i++;
695        slot_index--;
696    }
697
698    return ret;
699}
700
701/** Fill the given packet with appropriate date from slot slot_number */
702void
703LSQ::StoreBuffer::forwardStoreData(LSQRequestPtr load,
704    unsigned int slot_number)
705{
706    assert(slot_number < slots.size());
707    assert(load->packet);
708    assert(load->isLoad);
709
710    LSQRequestPtr store = slots[slot_number];
711
712    assert(store->packet);
713    assert(store->containsAddrRangeOf(load) == FullAddrRangeCoverage);
714
715    Addr load_addr = load->request.getPaddr();
716    Addr store_addr = store->request.getPaddr();
717    Addr addr_offset = load_addr - store_addr;
718
719    unsigned int load_size = load->request.getSize();
720
721    DPRINTF(MinorMem, "Forwarding %d bytes for addr: 0x%x from store buffer"
722        " slot: %d addr: 0x%x addressOffset: 0x%x\n",
723        load_size, load_addr, slot_number,
724        store_addr, addr_offset);
725
726    void *load_packet_data = load->packet->getPtr<void>();
727    void *store_packet_data = store->packet->getPtr<uint8_t>() + addr_offset;
728
729    std::memcpy(load_packet_data, store_packet_data, load_size);
730}
731
732void
733LSQ::StoreBuffer::countIssuedStore(LSQRequestPtr request)
734{
735    /* Barriers are accounted for as they are cleared from
736     *  the queue, not after their transfers are complete */
737    if (!request->isBarrier())
738        numUnissuedAccesses--;
739}
740
741void
742LSQ::StoreBuffer::step()
743{
744    DPRINTF(MinorMem, "StoreBuffer step numUnissuedAccesses: %d\n",
745        numUnissuedAccesses);
746
747    if (numUnissuedAccesses != 0 && lsq.state == LSQ::MemoryRunning) {
748        /* Clear all the leading barriers */
749        while (!slots.empty() &&
750            slots.front()->isComplete() && slots.front()->isBarrier())
751        {
752            LSQRequestPtr barrier = slots.front();
753
754            DPRINTF(MinorMem, "Clearing barrier for inst: %s\n",
755                *(barrier->inst));
756
757            numUnissuedAccesses--;
758            lsq.clearMemBarrier(barrier->inst);
759            slots.pop_front();
760
761            delete barrier;
762        }
763
764        auto i = slots.begin();
765        bool issued = true;
766        unsigned int issue_count = 0;
767
768        /* Skip trying if the memory system is busy */
769        if (lsq.state == LSQ::MemoryNeedsRetry)
770            issued = false;
771
772        /* Try to issue all stores in order starting from the head
773         *  of the queue.  Responses are allowed to be retired
774         *  out of order */
775        while (issued &&
776            issue_count < storeLimitPerCycle &&
777            lsq.canSendToMemorySystem() &&
778            i != slots.end())
779        {
780            LSQRequestPtr request = *i;
781
782            DPRINTF(MinorMem, "Considering request: %s, sentAllPackets: %d"
783                " state: %s\n",
784                *(request->inst), request->sentAllPackets(),
785                request->state);
786
787            if (request->isBarrier() && request->isComplete()) {
788                /* Give up at barriers */
789                issued = false;
790            } else if (!(request->state == LSQRequest::StoreBufferIssuing &&
791                request->sentAllPackets()))
792            {
793                DPRINTF(MinorMem, "Trying to send request: %s to memory"
794                    " system\n", *(request->inst));
795
796                if (lsq.tryToSend(request)) {
797                    countIssuedStore(request);
798                    issue_count++;
799                } else {
800                    /* Don't step on to the next store buffer entry if this
801                     *  one hasn't issued all its packets as the store
802                     *  buffer must still enforce ordering */
803                    issued = false;
804                }
805            }
806            i++;
807        }
808    }
809}
810
811void
812LSQ::completeMemBarrierInst(MinorDynInstPtr inst,
813    bool committed)
814{
815    if (committed) {
816        /* Not already sent to the store buffer as a store request? */
817        if (!inst->inStoreBuffer) {
818            /* Insert an entry into the store buffer to tick off barriers
819             *  until there are none in flight */
820            storeBuffer.insert(new BarrierDataRequest(*this, inst));
821        }
822    } else {
823        /* Clear the barrier anyway if it wasn't actually committed */
824        clearMemBarrier(inst);
825    }
826}
827
828void
829LSQ::StoreBuffer::minorTrace() const
830{
831    unsigned int size = slots.size();
832    unsigned int i = 0;
833    std::ostringstream os;
834
835    while (i < size) {
836        LSQRequestPtr request = slots[i];
837
838        request->reportData(os);
839
840        i++;
841        if (i < numSlots)
842            os << ',';
843    }
844
845    while (i < numSlots) {
846        os << '-';
847
848        i++;
849        if (i < numSlots)
850            os << ',';
851    }
852
853    MINORTRACE("addr=%s num_unissued_stores=%d\n", os.str(),
854        numUnissuedAccesses);
855}
856
857void
858LSQ::tryToSendToTransfers(LSQRequestPtr request)
859{
860    if (state == MemoryNeedsRetry) {
861        DPRINTF(MinorMem, "Request needs retry, not issuing to"
862            " memory until retry arrives\n");
863        return;
864    }
865
866    if (request->state == LSQRequest::InTranslation) {
867        DPRINTF(MinorMem, "Request still in translation, not issuing to"
868            " memory\n");
869        return;
870    }
871
872    assert(request->state == LSQRequest::Translated ||
873        request->state == LSQRequest::RequestIssuing ||
874        request->state == LSQRequest::Failed ||
875        request->state == LSQRequest::Complete);
876
877    if (requests.empty() || requests.front() != request) {
878        DPRINTF(MinorMem, "Request not at front of requests queue, can't"
879            " issue to memory\n");
880        return;
881    }
882
883    if (transfers.unreservedRemainingSpace() == 0) {
884        DPRINTF(MinorMem, "No space to insert request into transfers"
885            " queue\n");
886        return;
887    }
888
889    if (request->isComplete() || request->state == LSQRequest::Failed) {
890        DPRINTF(MinorMem, "Passing a %s transfer on to transfers"
891            " queue\n", (request->isComplete() ? "completed" : "failed"));
892        request->setState(LSQRequest::Complete);
893        request->setSkipped();
894        moveFromRequestsToTransfers(request);
895        return;
896    }
897
898    if (!execute.instIsRightStream(request->inst)) {
899        /* Wrong stream, try to abort the transfer but only do so if
900         *  there are no packets in flight */
901        if (request->hasPacketsInMemSystem()) {
902            DPRINTF(MinorMem, "Request's inst. is from the wrong stream,"
903                " waiting for responses before aborting request\n");
904        } else {
905            DPRINTF(MinorMem, "Request's inst. is from the wrong stream,"
906                " aborting request\n");
907            request->setState(LSQRequest::Complete);
908            request->setSkipped();
909            moveFromRequestsToTransfers(request);
910        }
911        return;
912    }
913
914    if (request->fault != NoFault) {
915        if (request->inst->staticInst->isPrefetch()) {
916            DPRINTF(MinorMem, "Not signalling fault for faulting prefetch\n");
917        }
918        DPRINTF(MinorMem, "Moving faulting request into the transfers"
919            " queue\n");
920        request->setState(LSQRequest::Complete);
921        request->setSkipped();
922        moveFromRequestsToTransfers(request);
923        return;
924    }
925
926    bool is_load = request->isLoad;
927    bool is_llsc = request->request.isLLSC();
928    bool is_swap = request->request.isSwap();
929    bool bufferable = !(request->request.isUncacheable() ||
930        is_llsc || is_swap);
931
932    if (is_load) {
933        if (numStoresInTransfers != 0) {
934            DPRINTF(MinorMem, "Load request with stores still in transfers"
935                " queue, stalling\n");
936            return;
937        }
938    } else {
939        /* Store.  Can it be sent to the store buffer? */
940        if (bufferable && !request->request.isMmappedIpr()) {
941            request->setState(LSQRequest::StoreToStoreBuffer);
942            moveFromRequestsToTransfers(request);
943            DPRINTF(MinorMem, "Moving store into transfers queue\n");
944            return;
945        }
946    }
947
948    /* Check if this is the head instruction (and so must be executable as
949     *  its stream sequence number was checked above) for loads which must
950     *  not be speculatively issued and stores which must be issued here */
951    if (!bufferable) {
952        if (!execute.instIsHeadInst(request->inst)) {
953            DPRINTF(MinorMem, "Memory access not the head inst., can't be"
954                " sure it can be performed, not issuing\n");
955            return;
956        }
957
958        unsigned int forwarding_slot = 0;
959
960        if (storeBuffer.canForwardDataToLoad(request, forwarding_slot) !=
961            NoAddrRangeCoverage)
962        {
963            DPRINTF(MinorMem, "Memory access can receive forwarded data"
964                " from the store buffer, need to wait for store buffer to"
965                " drain\n");
966            return;
967        }
968    }
969
970    /* True: submit this packet to the transfers queue to be sent to the
971     * memory system.
972     * False: skip the memory and push a packet for this request onto
973     * requests */
974    bool do_access = true;
975
976    if (!is_llsc) {
977        /* Check for match in the store buffer */
978        if (is_load) {
979            unsigned int forwarding_slot = 0;
980            AddrRangeCoverage forwarding_result =
981                storeBuffer.canForwardDataToLoad(request,
982                forwarding_slot);
983
984            switch (forwarding_result) {
985              case FullAddrRangeCoverage:
986                /* Forward data from the store buffer into this request and
987                 *  repurpose this request's packet into a response packet */
988                storeBuffer.forwardStoreData(request, forwarding_slot);
989                request->packet->makeResponse();
990
991                /* Just move between queues, no access */
992                do_access = false;
993                break;
994              case PartialAddrRangeCoverage:
995                DPRINTF(MinorMem, "Load partly satisfied by store buffer"
996                    " data. Must wait for the store to complete\n");
997                return;
998                break;
999              case NoAddrRangeCoverage:
1000                DPRINTF(MinorMem, "No forwardable data from store buffer\n");
1001                /* Fall through to try access */
1002                break;
1003            }
1004        }
1005    } else {
1006        if (!canSendToMemorySystem()) {
1007            DPRINTF(MinorMem, "Can't send request to memory system yet\n");
1008            return;
1009        }
1010
1011        SimpleThread &thread = *cpu.threads[request->inst->id.threadId];
1012
1013        TheISA::PCState old_pc = thread.pcState();
1014        ExecContext context(cpu, thread, execute, request->inst);
1015
1016        /* Handle LLSC requests and tests */
1017        if (is_load) {
1018            TheISA::handleLockedRead(&context, &request->request);
1019        } else {
1020            do_access = TheISA::handleLockedWrite(&context,
1021                &request->request, cacheBlockMask);
1022
1023            if (!do_access) {
1024                DPRINTF(MinorMem, "Not perfoming a memory "
1025                    "access for store conditional\n");
1026            }
1027        }
1028        thread.pcState(old_pc);
1029    }
1030
1031    /* See the do_access comment above */
1032    if (do_access) {
1033        if (!canSendToMemorySystem()) {
1034            DPRINTF(MinorMem, "Can't send request to memory system yet\n");
1035            return;
1036        }
1037
1038        /* Remember if this is an access which can't be idly
1039         *  discarded by an interrupt */
1040        if (!bufferable && !request->issuedToMemory) {
1041            numAccessesIssuedToMemory++;
1042            request->issuedToMemory = true;
1043        }
1044
1045        if (tryToSend(request))
1046            moveFromRequestsToTransfers(request);
1047    } else {
1048        request->setState(LSQRequest::Complete);
1049        moveFromRequestsToTransfers(request);
1050    }
1051}
1052
1053bool
1054LSQ::tryToSend(LSQRequestPtr request)
1055{
1056    bool ret = false;
1057
1058    if (!canSendToMemorySystem()) {
1059        DPRINTF(MinorMem, "Can't send request: %s yet, no space in memory\n",
1060            *(request->inst));
1061    } else {
1062        PacketPtr packet = request->getHeadPacket();
1063
1064        DPRINTF(MinorMem, "Trying to send request: %s addr: 0x%x\n",
1065            *(request->inst), packet->req->getVaddr());
1066
1067        /* The sender state of the packet *must* be an LSQRequest
1068         *  so the response can be correctly handled */
1069        assert(packet->findNextSenderState<LSQRequest>());
1070
1071        if (request->request.isMmappedIpr()) {
1072            ThreadContext *thread =
1073                cpu.getContext(request->request.threadId());
1074
1075            if (request->isLoad) {
1076                DPRINTF(MinorMem, "IPR read inst: %s\n", *(request->inst));
1077                TheISA::handleIprRead(thread, packet);
1078            } else {
1079                DPRINTF(MinorMem, "IPR write inst: %s\n", *(request->inst));
1080                TheISA::handleIprWrite(thread, packet);
1081            }
1082
1083            request->stepToNextPacket();
1084            ret = request->sentAllPackets();
1085
1086            if (!ret) {
1087                DPRINTF(MinorMem, "IPR access has another packet: %s\n",
1088                    *(request->inst));
1089            }
1090
1091            if (ret)
1092                request->setState(LSQRequest::Complete);
1093            else
1094                request->setState(LSQRequest::RequestIssuing);
1095        } else if (dcachePort.sendTimingReq(packet)) {
1096            DPRINTF(MinorMem, "Sent data memory request\n");
1097
1098            numAccessesInMemorySystem++;
1099
1100            request->stepToNextPacket();
1101
1102            ret = request->sentAllPackets();
1103
1104            switch (request->state) {
1105              case LSQRequest::Translated:
1106              case LSQRequest::RequestIssuing:
1107                /* Fully or partially issued a request in the transfers
1108                 *  queue */
1109                request->setState(LSQRequest::RequestIssuing);
1110                break;
1111              case LSQRequest::StoreInStoreBuffer:
1112              case LSQRequest::StoreBufferIssuing:
1113                /* Fully or partially issued a request in the store
1114                 *  buffer */
1115                request->setState(LSQRequest::StoreBufferIssuing);
1116                break;
1117              default:
1118                assert(false);
1119                break;
1120            }
1121
1122            state = MemoryRunning;
1123        } else {
1124            DPRINTF(MinorMem,
1125                "Sending data memory request - needs retry\n");
1126
1127            /* Needs to be resent, wait for that */
1128            state = MemoryNeedsRetry;
1129            retryRequest = request;
1130
1131            switch (request->state) {
1132              case LSQRequest::Translated:
1133              case LSQRequest::RequestIssuing:
1134                request->setState(LSQRequest::RequestNeedsRetry);
1135                break;
1136              case LSQRequest::StoreInStoreBuffer:
1137              case LSQRequest::StoreBufferIssuing:
1138                request->setState(LSQRequest::StoreBufferNeedsRetry);
1139                break;
1140              default:
1141                assert(false);
1142                break;
1143            }
1144        }
1145    }
1146
1147    return ret;
1148}
1149
1150void
1151LSQ::moveFromRequestsToTransfers(LSQRequestPtr request)
1152{
1153    assert(!requests.empty() && requests.front() == request);
1154    assert(transfers.unreservedRemainingSpace() != 0);
1155
1156    /* Need to count the number of stores in the transfers
1157     *  queue so that loads know when their store buffer forwarding
1158     *  results will be correct (only when all those stores
1159     *  have reached the store buffer) */
1160    if (!request->isLoad)
1161        numStoresInTransfers++;
1162
1163    requests.pop();
1164    transfers.push(request);
1165}
1166
1167bool
1168LSQ::canSendToMemorySystem()
1169{
1170    return state == MemoryRunning &&
1171        numAccessesInMemorySystem < inMemorySystemLimit;
1172}
1173
1174bool
1175LSQ::recvTimingResp(PacketPtr response)
1176{
1177    LSQRequestPtr request =
1178        safe_cast<LSQRequestPtr>(response->popSenderState());
1179
1180    DPRINTF(MinorMem, "Received response packet inst: %s"
1181        " addr: 0x%x cmd: %s\n",
1182        *(request->inst), response->getAddr(),
1183        response->cmd.toString());
1184
1185    numAccessesInMemorySystem--;
1186
1187    if (response->isError()) {
1188        DPRINTF(MinorMem, "Received error response packet: %s\n",
1189            *request->inst);
1190    }
1191
1192    switch (request->state) {
1193      case LSQRequest::RequestIssuing:
1194      case LSQRequest::RequestNeedsRetry:
1195        /* Response to a request from the transfers queue */
1196        request->retireResponse(response);
1197
1198        DPRINTF(MinorMem, "Has outstanding packets?: %d %d\n",
1199            request->hasPacketsInMemSystem(), request->isComplete());
1200
1201        break;
1202      case LSQRequest::StoreBufferIssuing:
1203      case LSQRequest::StoreBufferNeedsRetry:
1204        /* Response to a request from the store buffer */
1205        request->retireResponse(response);
1206
1207        /* Remove completed requests unless they are barriers (which will
1208         *  need to be removed in order */
1209        if (request->isComplete()) {
1210            if (!request->isBarrier()) {
1211                storeBuffer.deleteRequest(request);
1212            } else {
1213                DPRINTF(MinorMem, "Completed transfer for barrier: %s"
1214                    " leaving the request as it is also a barrier\n",
1215                    *(request->inst));
1216            }
1217        }
1218        break;
1219      default:
1220        /* Shouldn't be allowed to receive a response from another
1221         *  state */
1222        assert(false);
1223        break;
1224    }
1225
1226    /* We go to idle even if there are more things in the requests queue
1227     * as it's the job of step to actually step us on to the next
1228     * transaction */
1229
1230    /* Let's try and wake up the processor for the next cycle */
1231    cpu.wakeupOnEvent(Pipeline::ExecuteStageId);
1232
1233    /* Never busy */
1234    return true;
1235}
1236
1237void
1238LSQ::recvRetry()
1239{
1240    DPRINTF(MinorMem, "Received retry request\n");
1241
1242    assert(state == MemoryNeedsRetry);
1243
1244    switch (retryRequest->state) {
1245      case LSQRequest::RequestNeedsRetry:
1246        /* Retry in the requests queue */
1247        retryRequest->setState(LSQRequest::Translated);
1248        break;
1249      case LSQRequest::StoreBufferNeedsRetry:
1250        /* Retry in the store buffer */
1251        retryRequest->setState(LSQRequest::StoreInStoreBuffer);
1252        break;
1253      default:
1254        assert(false);
1255    }
1256
1257    /* Set state back to MemoryRunning so that the following
1258     *  tryToSend can actually send.  Note that this won't
1259     *  allow another transfer in as tryToSend should
1260     *  issue a memory request and either succeed for this
1261     *  request or return the LSQ back to MemoryNeedsRetry */
1262    state = MemoryRunning;
1263
1264    /* Try to resend the request */
1265    if (tryToSend(retryRequest)) {
1266        /* Successfully sent, need to move the request */
1267        switch (retryRequest->state) {
1268          case LSQRequest::RequestIssuing:
1269            /* In the requests queue */
1270            moveFromRequestsToTransfers(retryRequest);
1271            break;
1272          case LSQRequest::StoreBufferIssuing:
1273            /* In the store buffer */
1274            storeBuffer.countIssuedStore(retryRequest);
1275            break;
1276          default:
1277            assert(false);
1278            break;
1279        }
1280    }
1281
1282    retryRequest = NULL;
1283}
1284
1285LSQ::LSQ(std::string name_, std::string dcache_port_name_,
1286    MinorCPU &cpu_, Execute &execute_,
1287    unsigned int in_memory_system_limit, unsigned int line_width,
1288    unsigned int requests_queue_size, unsigned int transfers_queue_size,
1289    unsigned int store_buffer_size,
1290    unsigned int store_buffer_cycle_store_limit) :
1291    Named(name_),
1292    cpu(cpu_),
1293    execute(execute_),
1294    dcachePort(dcache_port_name_, *this, cpu_),
1295    lastMemBarrier(0),
1296    state(MemoryRunning),
1297    inMemorySystemLimit(in_memory_system_limit),
1298    lineWidth((line_width == 0 ? cpu.cacheLineSize() : line_width)),
1299    requests(name_ + ".requests", "addr", requests_queue_size),
1300    transfers(name_ + ".transfers", "addr", transfers_queue_size),
1301    storeBuffer(name_ + ".storeBuffer",
1302        *this, store_buffer_size, store_buffer_cycle_store_limit),
1303    numAccessesInMemorySystem(0),
1304    numAccessesInDTLB(0),
1305    numStoresInTransfers(0),
1306    numAccessesIssuedToMemory(0),
1307    retryRequest(NULL),
1308    cacheBlockMask(~(cpu_.cacheLineSize() - 1))
1309{
1310    if (in_memory_system_limit < 1) {
1311        fatal("%s: executeMaxAccessesInMemory must be >= 1 (%d)\n", name_,
1312            in_memory_system_limit);
1313    }
1314
1315    if (store_buffer_cycle_store_limit < 1) {
1316        fatal("%s: executeLSQMaxStoreBufferStoresPerCycle must be"
1317            " >= 1 (%d)\n", name_, store_buffer_cycle_store_limit);
1318    }
1319
1320    if (requests_queue_size < 1) {
1321        fatal("%s: executeLSQRequestsQueueSize must be"
1322            " >= 1 (%d)\n", name_, requests_queue_size);
1323    }
1324
1325    if (transfers_queue_size < 1) {
1326        fatal("%s: executeLSQTransfersQueueSize must be"
1327            " >= 1 (%d)\n", name_, transfers_queue_size);
1328    }
1329
1330    if (store_buffer_size < 1) {
1331        fatal("%s: executeLSQStoreBufferSize must be"
1332            " >= 1 (%d)\n", name_, store_buffer_size);
1333    }
1334
1335    if ((lineWidth & (lineWidth - 1)) != 0) {
1336        fatal("%s: lineWidth: %d must be a power of 2\n", name(), lineWidth);
1337    }
1338}
1339
1340LSQ::~LSQ()
1341{ }
1342
1343LSQ::LSQRequest::~LSQRequest()
1344{
1345    if (packet)
1346        delete packet;
1347    if (data)
1348        delete [] data;
1349}
1350
1351/**
1352 *  Step the memory access mechanism on to its next state.  In reality, most
1353 *  of the stepping is done by the callbacks on the LSQ but this
1354 *  function is responsible for issuing memory requests lodged in the
1355 *  requests queue.
1356 */
1357void
1358LSQ::step()
1359{
1360    /* Try to move address-translated requests between queues and issue
1361     *  them */
1362    if (!requests.empty())
1363        tryToSendToTransfers(requests.front());
1364
1365    storeBuffer.step();
1366}
1367
1368LSQ::LSQRequestPtr
1369LSQ::findResponse(MinorDynInstPtr inst)
1370{
1371    LSQ::LSQRequestPtr ret = NULL;
1372
1373    if (!transfers.empty()) {
1374        LSQRequestPtr request = transfers.front();
1375
1376        /* Same instruction and complete access or a store that's
1377         *  capable of being moved to the store buffer */
1378        if (request->inst->id == inst->id) {
1379            bool complete = request->isComplete();
1380            bool can_store = storeBuffer.canInsert();
1381            bool to_store_buffer = request->state ==
1382                LSQRequest::StoreToStoreBuffer;
1383
1384            if ((complete && !(request->isBarrier() && !can_store)) ||
1385                (to_store_buffer && can_store))
1386            {
1387                ret = request;
1388            }
1389        }
1390    }
1391
1392    if (ret) {
1393        DPRINTF(MinorMem, "Found matching memory response for inst: %s\n",
1394            *inst);
1395    } else {
1396        DPRINTF(MinorMem, "No matching memory response for inst: %s\n",
1397            *inst);
1398    }
1399
1400    return ret;
1401}
1402
1403void
1404LSQ::popResponse(LSQ::LSQRequestPtr response)
1405{
1406    assert(!transfers.empty() && transfers.front() == response);
1407
1408    transfers.pop();
1409
1410    if (!response->isLoad)
1411        numStoresInTransfers--;
1412
1413    if (response->issuedToMemory)
1414        numAccessesIssuedToMemory--;
1415
1416    if (response->state != LSQRequest::StoreInStoreBuffer) {
1417        DPRINTF(MinorMem, "Deleting %s request: %s\n",
1418            (response->isLoad ? "load" : "store"),
1419            *(response->inst));
1420
1421        delete response;
1422    }
1423}
1424
1425void
1426LSQ::sendStoreToStoreBuffer(LSQRequestPtr request)
1427{
1428    assert(request->state == LSQRequest::StoreToStoreBuffer);
1429
1430    DPRINTF(MinorMem, "Sending store: %s to store buffer\n",
1431        *(request->inst));
1432
1433    request->inst->inStoreBuffer = true;
1434
1435    storeBuffer.insert(request);
1436}
1437
1438bool
1439LSQ::isDrained()
1440{
1441    return requests.empty() && transfers.empty() &&
1442        storeBuffer.isDrained();
1443}
1444
1445bool
1446LSQ::needsToTick()
1447{
1448    bool ret = false;
1449
1450    if (canSendToMemorySystem()) {
1451        bool have_translated_requests = !requests.empty() &&
1452            requests.front()->state != LSQRequest::InTranslation &&
1453            transfers.unreservedRemainingSpace() != 0;
1454
1455        ret = have_translated_requests ||
1456            storeBuffer.numUnissuedStores() != 0;
1457    }
1458
1459    if (ret)
1460        DPRINTF(Activity, "Need to tick\n");
1461
1462    return ret;
1463}
1464
1465void
1466LSQ::pushRequest(MinorDynInstPtr inst, bool isLoad, uint8_t *data,
1467    unsigned int size, Addr addr, unsigned int flags, uint64_t *res)
1468{
1469    bool needs_burst = transferNeedsBurst(addr, size, lineWidth);
1470    LSQRequestPtr request;
1471
1472    /* Copy given data into the request.  The request will pass this to the
1473     *  packet and then it will own the data */
1474    uint8_t *request_data = NULL;
1475
1476    DPRINTF(MinorMem, "Pushing request (%s) addr: 0x%x size: %d flags:"
1477        " 0x%x%s lineWidth : 0x%x\n",
1478        (isLoad ? "load" : "store"), addr, size, flags,
1479            (needs_burst ? " (needs burst)" : ""), lineWidth);
1480
1481    if (!isLoad) {
1482        /* request_data becomes the property of a ...DataRequest (see below)
1483         *  and destroyed by its destructor */
1484        request_data = new uint8_t[size];
1485        if (flags & Request::CACHE_BLOCK_ZERO) {
1486            /* For cache zeroing, just use zeroed data */
1487            std::memset(request_data, 0, size);
1488        } else {
1489            std::memcpy(request_data, data, size);
1490        }
1491    }
1492
1493    if (needs_burst) {
1494        request = new SplitDataRequest(
1495            *this, inst, isLoad, request_data, res);
1496    } else {
1497        request = new SingleDataRequest(
1498            *this, inst, isLoad, request_data, res);
1499    }
1500
1501    if (inst->traceData)
1502        inst->traceData->setAddr(addr);
1503
1504    request->request.setThreadContext(cpu.cpuId(), /* thread id */ 0);
1505    request->request.setVirt(0 /* asid */,
1506        addr, size, flags, cpu.instMasterId(),
1507        /* I've no idea why we need the PC, but give it */
1508        inst->pc.instAddr());
1509
1510    requests.push(request);
1511    request->startAddrTranslation();
1512}
1513
1514void
1515LSQ::pushFailedRequest(MinorDynInstPtr inst)
1516{
1517    LSQRequestPtr request = new FailedDataRequest(*this, inst);
1518    requests.push(request);
1519}
1520
1521void
1522LSQ::minorTrace() const
1523{
1524    MINORTRACE("state=%s in_tlb_mem=%d/%d stores_in_transfers=%d"
1525        " lastMemBarrier=%d\n",
1526        state, numAccessesInDTLB, numAccessesInMemorySystem,
1527        numStoresInTransfers, lastMemBarrier);
1528    requests.minorTrace();
1529    transfers.minorTrace();
1530    storeBuffer.minorTrace();
1531}
1532
1533LSQ::StoreBuffer::StoreBuffer(std::string name_, LSQ &lsq_,
1534    unsigned int store_buffer_size,
1535    unsigned int store_limit_per_cycle) :
1536    Named(name_), lsq(lsq_),
1537    numSlots(store_buffer_size),
1538    storeLimitPerCycle(store_limit_per_cycle),
1539    slots(),
1540    numUnissuedAccesses(0)
1541{
1542}
1543
1544PacketPtr
1545makePacketForRequest(Request &request, bool isLoad,
1546    Packet::SenderState *sender_state, PacketDataPtr data)
1547{
1548    MemCmd command;
1549
1550    /* Make a ret with the right command type to match the request */
1551    if (request.isLLSC()) {
1552        command = (isLoad ? MemCmd::LoadLockedReq : MemCmd::StoreCondReq);
1553    } else if (request.isSwap()) {
1554        command = MemCmd::SwapReq;
1555    } else {
1556        command = (isLoad ? MemCmd::ReadReq : MemCmd::WriteReq);
1557    }
1558
1559    PacketPtr ret = new Packet(&request, command);
1560
1561    if (sender_state)
1562        ret->pushSenderState(sender_state);
1563
1564    if (isLoad)
1565        ret->allocate();
1566    else
1567        ret->dataDynamic(data);
1568
1569    return ret;
1570}
1571
1572void
1573LSQ::issuedMemBarrierInst(MinorDynInstPtr inst)
1574{
1575    assert(inst->isInst() && inst->staticInst->isMemBarrier());
1576    assert(inst->id.execSeqNum > lastMemBarrier);
1577
1578    /* Remember the barrier.  We only have a notion of one
1579     *  barrier so this may result in some mem refs being
1580     *  delayed if they are between barriers */
1581    lastMemBarrier = inst->id.execSeqNum;
1582}
1583
1584void
1585LSQ::LSQRequest::makePacket()
1586{
1587    /* Make the function idempotent */
1588    if (packet)
1589        return;
1590
1591    packet = makePacketForRequest(request, isLoad, this, data);
1592    /* Null the ret data so we know not to deallocate it when the
1593     * ret is destroyed.  The data now belongs to the ret and
1594     * the ret is responsible for its destruction */
1595    data = NULL;
1596}
1597
1598std::ostream &
1599operator <<(std::ostream &os, LSQ::MemoryState state)
1600{
1601    switch (state) {
1602      case LSQ::MemoryRunning:
1603        os << "MemoryRunning";
1604        break;
1605      case LSQ::MemoryNeedsRetry:
1606        os << "MemoryNeedsRetry";
1607        break;
1608      default:
1609        os << "MemoryState-" << static_cast<int>(state);
1610        break;
1611    }
1612    return os;
1613}
1614
1615void
1616LSQ::recvTimingSnoopReq(PacketPtr pkt)
1617{
1618    /* LLSC operations in Minor can't be speculative and are executed from
1619     * the head of the requests queue.  We shouldn't need to do more than
1620     * this action on snoops. */
1621
1622    /* THREAD */
1623    TheISA::handleLockedSnoop(cpu.getContext(0), pkt, cacheBlockMask);
1624}
1625
1626}
1627