lsq_unit.hh revision 7511
1955SN/A/*
2955SN/A * Copyright (c) 2004-2006 The Regents of The University of Michigan
31762SN/A * All rights reserved.
4955SN/A *
5955SN/A * Redistribution and use in source and binary forms, with or without
6955SN/A * modification, are permitted provided that the following conditions are
7955SN/A * met: redistributions of source code must retain the above copyright
8955SN/A * notice, this list of conditions and the following disclaimer;
9955SN/A * redistributions in binary form must reproduce the above copyright
10955SN/A * notice, this list of conditions and the following disclaimer in the
11955SN/A * documentation and/or other materials provided with the distribution;
12955SN/A * neither the name of the copyright holders nor the names of its
13955SN/A * contributors may be used to endorse or promote products derived from
14955SN/A * this software without specific prior written permission.
15955SN/A *
16955SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17955SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18955SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19955SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20955SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21955SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22955SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23955SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24955SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25955SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26955SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27955SN/A *
282665Ssaidi@eecs.umich.edu * Authors: Kevin Lim
294762Snate@binkert.org *          Korey Sewell
30955SN/A */
315522Snate@binkert.org
326143Snate@binkert.org#ifndef __CPU_O3_LSQ_UNIT_HH__
334762Snate@binkert.org#define __CPU_O3_LSQ_UNIT_HH__
345522Snate@binkert.org
35955SN/A#include <algorithm>
365522Snate@binkert.org#include <cstring>
37955SN/A#include <map>
385522Snate@binkert.org#include <queue>
394202Sbinkertn@umich.edu
405742Snate@binkert.org#include "arch/faults.hh"
41955SN/A#include "arch/locked_mem.hh"
424381Sbinkertn@umich.edu#include "config/full_system.hh"
434381Sbinkertn@umich.edu#include "config/the_isa.hh"
448334Snate@binkert.org#include "base/fast_alloc.hh"
45955SN/A#include "base/hashmap.hh"
46955SN/A#include "cpu/inst_seq.hh"
474202Sbinkertn@umich.edu#include "mem/packet.hh"
48955SN/A#include "mem/port.hh"
494382Sbinkertn@umich.edu
504382Sbinkertn@umich.educlass DerivO3CPUParams;
514382Sbinkertn@umich.edu
526654Snate@binkert.org/**
535517Snate@binkert.org * Class that implements the actual LQ and SQ for each specific
548614Sgblack@eecs.umich.edu * thread.  Both are circular queues; load entries are freed upon
557674Snate@binkert.org * committing, while store entries are freed once they writeback. The
566143Snate@binkert.org * LSQUnit tracks if there are memory ordering violations, and also
576143Snate@binkert.org * detects partial load to store forwarding cases (a store only has
586143Snate@binkert.org * part of a load's data) that requires the load to wait until the
598233Snate@binkert.org * store writes back. In the former case it holds onto the instruction
608233Snate@binkert.org * until the dependence unit looks at it, and in the latter it stalls
618233Snate@binkert.org * the LSQ until the store writes back. At that point the load is
628233Snate@binkert.org * replayed.
638233Snate@binkert.org */
648334Snate@binkert.orgtemplate <class Impl>
658334Snate@binkert.orgclass LSQUnit {
668233Snate@binkert.org  protected:
678233Snate@binkert.org    typedef TheISA::IntReg IntReg;
688233Snate@binkert.org  public:
698233Snate@binkert.org    typedef typename Impl::O3CPU O3CPU;
708233Snate@binkert.org    typedef typename Impl::DynInstPtr DynInstPtr;
718233Snate@binkert.org    typedef typename Impl::CPUPol::IEW IEW;
726143Snate@binkert.org    typedef typename Impl::CPUPol::LSQ LSQ;
738233Snate@binkert.org    typedef typename Impl::CPUPol::IssueStruct IssueStruct;
748233Snate@binkert.org
758233Snate@binkert.org  public:
766143Snate@binkert.org    /** Constructs an LSQ unit. init() must be called prior to use. */
776143Snate@binkert.org    LSQUnit();
786143Snate@binkert.org
796143Snate@binkert.org    /** Initializes the LSQ unit with the specified number of entries. */
808233Snate@binkert.org    void init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params,
818233Snate@binkert.org            LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries,
828233Snate@binkert.org            unsigned id);
836143Snate@binkert.org
848233Snate@binkert.org    /** Returns the name of the LSQ unit. */
858233Snate@binkert.org    std::string name() const;
868233Snate@binkert.org
878233Snate@binkert.org    /** Registers statistics. */
886143Snate@binkert.org    void regStats();
896143Snate@binkert.org
906143Snate@binkert.org    /** Sets the pointer to the dcache port. */
914762Snate@binkert.org    void setDcachePort(Port *dcache_port);
926143Snate@binkert.org
938233Snate@binkert.org    /** Switches out LSQ unit. */
948233Snate@binkert.org    void switchOut();
958233Snate@binkert.org
968233Snate@binkert.org    /** Takes over from another CPU's thread. */
978233Snate@binkert.org    void takeOverFrom();
986143Snate@binkert.org
998233Snate@binkert.org    /** Returns if the LSQ is switched out. */
1008233Snate@binkert.org    bool isSwitchedOut() { return switchedOut; }
1018233Snate@binkert.org
1028233Snate@binkert.org    /** Ticks the LSQ unit, which in this case only resets the number of
1036143Snate@binkert.org     * used cache ports.
1046143Snate@binkert.org     * @todo: Move the number of used ports up to the LSQ level so it can
1056143Snate@binkert.org     * be shared by all LSQ units.
1066143Snate@binkert.org     */
1076143Snate@binkert.org    void tick() { usedPorts = 0; }
1086143Snate@binkert.org
1096143Snate@binkert.org    /** Inserts an instruction. */
1106143Snate@binkert.org    void insert(DynInstPtr &inst);
1116143Snate@binkert.org    /** Inserts a load instruction. */
1127065Snate@binkert.org    void insertLoad(DynInstPtr &load_inst);
1136143Snate@binkert.org    /** Inserts a store instruction. */
1148233Snate@binkert.org    void insertStore(DynInstPtr &store_inst);
1158233Snate@binkert.org
1168233Snate@binkert.org    /** Executes a load instruction. */
1178233Snate@binkert.org    Fault executeLoad(DynInstPtr &inst);
1188233Snate@binkert.org
1198233Snate@binkert.org    Fault executeLoad(int lq_idx) { panic("Not implemented"); return NoFault; }
1208233Snate@binkert.org    /** Executes a store instruction. */
1218233Snate@binkert.org    Fault executeStore(DynInstPtr &inst);
1228233Snate@binkert.org
1238233Snate@binkert.org    /** Commits the head load. */
1248233Snate@binkert.org    void commitLoad();
1258233Snate@binkert.org    /** Commits loads older than a specific sequence number. */
1268233Snate@binkert.org    void commitLoads(InstSeqNum &youngest_inst);
1278233Snate@binkert.org
1288233Snate@binkert.org    /** Commits stores older than a specific sequence number. */
1298233Snate@binkert.org    void commitStores(InstSeqNum &youngest_inst);
1308233Snate@binkert.org
1318233Snate@binkert.org    /** Writes back stores. */
1328233Snate@binkert.org    void writebackStores();
1338233Snate@binkert.org
1348233Snate@binkert.org    /** Completes the data access that has been returned from the
1358233Snate@binkert.org     * memory system. */
1368233Snate@binkert.org    void completeDataAccess(PacketPtr pkt);
1378233Snate@binkert.org
1388233Snate@binkert.org    /** Clears all the entries in the LQ. */
1398233Snate@binkert.org    void clearLQ();
1408233Snate@binkert.org
1418233Snate@binkert.org    /** Clears all the entries in the SQ. */
1428233Snate@binkert.org    void clearSQ();
1438233Snate@binkert.org
1448233Snate@binkert.org    /** Resizes the LQ to a given size. */
1456143Snate@binkert.org    void resizeLQ(unsigned size);
1466143Snate@binkert.org
1476143Snate@binkert.org    /** Resizes the SQ to a given size. */
1486143Snate@binkert.org    void resizeSQ(unsigned size);
1496143Snate@binkert.org
1506143Snate@binkert.org    /** Squashes all instructions younger than a specific sequence number. */
1519982Satgutier@umich.edu    void squash(const InstSeqNum &squashed_num);
15210196SCurtis.Dunham@arm.com
15310196SCurtis.Dunham@arm.com    /** Returns if there is a memory ordering violation. Value is reset upon
15410196SCurtis.Dunham@arm.com     * call to getMemDepViolator().
15510196SCurtis.Dunham@arm.com     */
15610196SCurtis.Dunham@arm.com    bool violation() { return memDepViolator; }
15710196SCurtis.Dunham@arm.com
15810196SCurtis.Dunham@arm.com    /** Returns the memory ordering violator. */
15910196SCurtis.Dunham@arm.com    DynInstPtr getMemDepViolator();
1606143Snate@binkert.org
1616143Snate@binkert.org    /** Returns if a load became blocked due to the memory system. */
1628945Ssteve.reinhardt@amd.com    bool loadBlocked()
1638233Snate@binkert.org    { return isLoadBlocked; }
1648233Snate@binkert.org
1656143Snate@binkert.org    /** Clears the signal that a load became blocked. */
1668945Ssteve.reinhardt@amd.com    void clearLoadBlocked()
1676143Snate@binkert.org    { isLoadBlocked = false; }
1686143Snate@binkert.org
1696143Snate@binkert.org    /** Returns if the blocked load was handled. */
1706143Snate@binkert.org    bool isLoadBlockedHandled()
1715522Snate@binkert.org    { return loadBlockedHandled; }
1726143Snate@binkert.org
1736143Snate@binkert.org    /** Records the blocked load as being handled. */
1746143Snate@binkert.org    void setLoadBlockedHandled()
1759982Satgutier@umich.edu    { loadBlockedHandled = true; }
1768233Snate@binkert.org
1778233Snate@binkert.org    /** Returns the number of free entries (min of free LQ and SQ entries). */
1788233Snate@binkert.org    unsigned numFreeEntries();
1796143Snate@binkert.org
1806143Snate@binkert.org    /** Returns the number of loads ready to execute. */
1816143Snate@binkert.org    int numLoadsReady();
1826143Snate@binkert.org
1835522Snate@binkert.org    /** Returns the number of loads in the LQ. */
1845522Snate@binkert.org    int numLoads() { return loads; }
1855522Snate@binkert.org
1865522Snate@binkert.org    /** Returns the number of stores in the SQ. */
1875604Snate@binkert.org    int numStores() { return stores; }
1885604Snate@binkert.org
1896143Snate@binkert.org    /** Returns if either the LQ or SQ is full. */
1906143Snate@binkert.org    bool isFull() { return lqFull() || sqFull(); }
1914762Snate@binkert.org
1924762Snate@binkert.org    /** Returns if the LQ is full. */
1936143Snate@binkert.org    bool lqFull() { return loads >= (LQEntries - 1); }
1946727Ssteve.reinhardt@amd.com
1956727Ssteve.reinhardt@amd.com    /** Returns if the SQ is full. */
1966727Ssteve.reinhardt@amd.com    bool sqFull() { return stores >= (SQEntries - 1); }
1974762Snate@binkert.org
1986143Snate@binkert.org    /** Returns the number of instructions in the LSQ. */
1996143Snate@binkert.org    unsigned getCount() { return loads + stores; }
2006143Snate@binkert.org
2016143Snate@binkert.org    /** Returns if there are any stores to writeback. */
2026727Ssteve.reinhardt@amd.com    bool hasStoresToWB() { return storesToWB; }
2036143Snate@binkert.org
2047674Snate@binkert.org    /** Returns the number of stores to writeback. */
2057674Snate@binkert.org    int numStoresToWB() { return storesToWB; }
2065604Snate@binkert.org
2076143Snate@binkert.org    /** Returns if the LSQ unit will writeback on this cycle. */
2086143Snate@binkert.org    bool willWB() { return storeQueue[storeWBIdx].canWB &&
2096143Snate@binkert.org                        !storeQueue[storeWBIdx].completed &&
2104762Snate@binkert.org                        !isStoreBlocked; }
2116143Snate@binkert.org
2124762Snate@binkert.org    /** Handles doing the retry. */
2134762Snate@binkert.org    void recvRetry();
2144762Snate@binkert.org
2156143Snate@binkert.org  private:
2166143Snate@binkert.org    /** Writes back the instruction, sending it to IEW. */
2174762Snate@binkert.org    void writeback(DynInstPtr &inst, PacketPtr pkt);
2188233Snate@binkert.org
2198233Snate@binkert.org    /** Writes back a store that couldn't be completed the previous cycle. */
2208233Snate@binkert.org    void writebackPendingStore();
2218233Snate@binkert.org
2226143Snate@binkert.org    /** Handles completing the send of a store to memory. */
2236143Snate@binkert.org    void storePostSend(PacketPtr pkt);
2244762Snate@binkert.org
2256143Snate@binkert.org    /** Completes the store at the specified index. */
2264762Snate@binkert.org    void completeStore(int store_idx);
2276143Snate@binkert.org
2284762Snate@binkert.org    /** Attempts to send a store to the cache. */
2296143Snate@binkert.org    bool sendStore(PacketPtr data_pkt);
2308233Snate@binkert.org
2318233Snate@binkert.org    /** Increments the given store index (circular queue). */
2328233Snate@binkert.org    inline void incrStIdx(int &store_idx);
2336143Snate@binkert.org    /** Decrements the given store index (circular queue). */
2346143Snate@binkert.org    inline void decrStIdx(int &store_idx);
2356143Snate@binkert.org    /** Increments the given load index (circular queue). */
2366143Snate@binkert.org    inline void incrLdIdx(int &load_idx);
2376143Snate@binkert.org    /** Decrements the given load index (circular queue). */
2386143Snate@binkert.org    inline void decrLdIdx(int &load_idx);
2396143Snate@binkert.org
2406143Snate@binkert.org  public:
2418233Snate@binkert.org    /** Debugging function to dump instructions in the LSQ. */
2428233Snate@binkert.org    void dumpInsts();
243955SN/A
2449396Sandreas.hansson@arm.com  private:
2459396Sandreas.hansson@arm.com    /** Pointer to the CPU. */
2469396Sandreas.hansson@arm.com    O3CPU *cpu;
2479396Sandreas.hansson@arm.com
2489396Sandreas.hansson@arm.com    /** Pointer to the IEW stage. */
2499396Sandreas.hansson@arm.com    IEW *iewStage;
2509396Sandreas.hansson@arm.com
2519396Sandreas.hansson@arm.com    /** Pointer to the LSQ. */
2529396Sandreas.hansson@arm.com    LSQ *lsq;
2539396Sandreas.hansson@arm.com
2549396Sandreas.hansson@arm.com    /** Pointer to the dcache port.  Used only for sending. */
2559396Sandreas.hansson@arm.com    Port *dcachePort;
2569396Sandreas.hansson@arm.com
2579930Sandreas.hansson@arm.com    /** Derived class to hold any sender state the LSQ needs. */
2589930Sandreas.hansson@arm.com    class LSQSenderState : public Packet::SenderState, public FastAlloc
2599396Sandreas.hansson@arm.com    {
2608235Snate@binkert.org      public:
2618235Snate@binkert.org        /** Default constructor. */
2626143Snate@binkert.org        LSQSenderState()
2638235Snate@binkert.org            : noWB(false), isSplit(false), pktToSend(false), outstanding(1),
2649003SAli.Saidi@ARM.com              mainPkt(NULL), pendingPacket(NULL)
2658235Snate@binkert.org        { }
2668235Snate@binkert.org
2678235Snate@binkert.org        /** Instruction who initiated the access to memory. */
2688235Snate@binkert.org        DynInstPtr inst;
2698235Snate@binkert.org        /** Whether or not it is a load. */
2708235Snate@binkert.org        bool isLoad;
2718235Snate@binkert.org        /** The LQ/SQ index of the instruction. */
2728235Snate@binkert.org        int idx;
2738235Snate@binkert.org        /** Whether or not the instruction will need to writeback. */
2748235Snate@binkert.org        bool noWB;
2758235Snate@binkert.org        /** Whether or not this access is split in two. */
2768235Snate@binkert.org        bool isSplit;
2778235Snate@binkert.org        /** Whether or not there is a packet that needs sending. */
2788235Snate@binkert.org        bool pktToSend;
2799003SAli.Saidi@ARM.com        /** Number of outstanding packets to complete. */
2808235Snate@binkert.org        int outstanding;
2815584Snate@binkert.org        /** The main packet from a split load, used during writeback. */
2824382Sbinkertn@umich.edu        PacketPtr mainPkt;
2834202Sbinkertn@umich.edu        /** A second packet from a split store that needs sending. */
2844382Sbinkertn@umich.edu        PacketPtr pendingPacket;
2854382Sbinkertn@umich.edu
2864382Sbinkertn@umich.edu        /** Completes a packet and returns whether the access is finished. */
2879396Sandreas.hansson@arm.com        inline bool complete() { return --outstanding == 0; }
2885584Snate@binkert.org    };
2894382Sbinkertn@umich.edu
2904382Sbinkertn@umich.edu    /** Writeback event, specifically for when stores forward data to loads. */
2914382Sbinkertn@umich.edu    class WritebackEvent : public Event {
2928232Snate@binkert.org      public:
2935192Ssaidi@eecs.umich.edu        /** Constructs a writeback event. */
2948232Snate@binkert.org        WritebackEvent(DynInstPtr &_inst, PacketPtr pkt, LSQUnit *lsq_ptr);
2958232Snate@binkert.org
2968232Snate@binkert.org        /** Processes the writeback event. */
2975192Ssaidi@eecs.umich.edu        void process();
2988232Snate@binkert.org
2995192Ssaidi@eecs.umich.edu        /** Returns the description of this event. */
3005799Snate@binkert.org        const char *description() const;
3018232Snate@binkert.org
3025192Ssaidi@eecs.umich.edu      private:
3035192Ssaidi@eecs.umich.edu        /** Instruction whose results are being written back. */
3045192Ssaidi@eecs.umich.edu        DynInstPtr inst;
3058232Snate@binkert.org
3065192Ssaidi@eecs.umich.edu        /** The packet that would have been sent to memory. */
3078232Snate@binkert.org        PacketPtr pkt;
3085192Ssaidi@eecs.umich.edu
3095192Ssaidi@eecs.umich.edu        /** The pointer to the LSQ unit that issued the store. */
3105192Ssaidi@eecs.umich.edu        LSQUnit<Impl> *lsqPtr;
3115192Ssaidi@eecs.umich.edu    };
3124382Sbinkertn@umich.edu
3134382Sbinkertn@umich.edu  public:
3144382Sbinkertn@umich.edu    struct SQEntry {
3152667Sstever@eecs.umich.edu        /** Constructs an empty store queue entry. */
3162667Sstever@eecs.umich.edu        SQEntry()
3172667Sstever@eecs.umich.edu            : inst(NULL), req(NULL), size(0),
3182667Sstever@eecs.umich.edu              canWB(0), committed(0), completed(0)
3192667Sstever@eecs.umich.edu        {
3202667Sstever@eecs.umich.edu            std::memset(data, 0, sizeof(data));
3215742Snate@binkert.org        }
3225742Snate@binkert.org
3235742Snate@binkert.org        /** Constructs a store queue entry for a given instruction. */
3245793Snate@binkert.org        SQEntry(DynInstPtr &_inst)
3258334Snate@binkert.org            : inst(_inst), req(NULL), sreqLow(NULL), sreqHigh(NULL), size(0),
3265793Snate@binkert.org              isSplit(0), canWB(0), committed(0), completed(0)
3275793Snate@binkert.org        {
3285793Snate@binkert.org            std::memset(data, 0, sizeof(data));
3294382Sbinkertn@umich.edu        }
3304762Snate@binkert.org
3315344Sstever@gmail.com        /** The store instruction. */
3324382Sbinkertn@umich.edu        DynInstPtr inst;
3335341Sstever@gmail.com        /** The request for the store. */
3345742Snate@binkert.org        RequestPtr req;
3355742Snate@binkert.org        /** The split requests for the store. */
3365742Snate@binkert.org        RequestPtr sreqLow;
3375742Snate@binkert.org        RequestPtr sreqHigh;
3385742Snate@binkert.org        /** The size of the store. */
3394762Snate@binkert.org        int size;
3405742Snate@binkert.org        /** The store data. */
3415742Snate@binkert.org        char data[sizeof(IntReg)];
3427722Sgblack@eecs.umich.edu        /** Whether or not the store is split into two requests. */
3435742Snate@binkert.org        bool isSplit;
3445742Snate@binkert.org        /** Whether or not the store can writeback. */
3455742Snate@binkert.org        bool canWB;
3469930Sandreas.hansson@arm.com        /** Whether or not the store is committed. */
3479930Sandreas.hansson@arm.com        bool committed;
3489930Sandreas.hansson@arm.com        /** Whether or not the store is completed. */
3499930Sandreas.hansson@arm.com        bool completed;
3509930Sandreas.hansson@arm.com    };
3515742Snate@binkert.org
3528242Sbradley.danofsky@amd.com  private:
3538242Sbradley.danofsky@amd.com    /** The LSQUnit thread id. */
3548242Sbradley.danofsky@amd.com    ThreadID lsqID;
3558242Sbradley.danofsky@amd.com
3565341Sstever@gmail.com    /** The store queue. */
3575742Snate@binkert.org    std::vector<SQEntry> storeQueue;
3587722Sgblack@eecs.umich.edu
3594773Snate@binkert.org    /** The load queue. */
3606108Snate@binkert.org    std::vector<DynInstPtr> loadQueue;
3611858SN/A
3621085SN/A    /** The number of LQ entries, plus a sentinel entry (circular queue).
3636658Snate@binkert.org     *  @todo: Consider having var that records the true number of LQ entries.
3646658Snate@binkert.org     */
3657673Snate@binkert.org    unsigned LQEntries;
3666658Snate@binkert.org    /** The number of SQ entries, plus a sentinel entry (circular queue).
3676658Snate@binkert.org     *  @todo: Consider having var that records the true number of SQ entries.
3686658Snate@binkert.org     */
3696658Snate@binkert.org    unsigned SQEntries;
3706658Snate@binkert.org
3716658Snate@binkert.org    /** The number of load instructions in the LQ. */
3726658Snate@binkert.org    int loads;
3737673Snate@binkert.org    /** The number of store instructions in the SQ. */
3747673Snate@binkert.org    int stores;
3757673Snate@binkert.org    /** The number of store instructions in the SQ waiting to writeback. */
3767673Snate@binkert.org    int storesToWB;
3777673Snate@binkert.org
3787673Snate@binkert.org    /** The index of the head instruction in the LQ. */
3797673Snate@binkert.org    int loadHead;
3806658Snate@binkert.org    /** The index of the tail instruction in the LQ. */
3817673Snate@binkert.org    int loadTail;
3827673Snate@binkert.org
3837673Snate@binkert.org    /** The index of the head instruction in the SQ. */
3847673Snate@binkert.org    int storeHead;
3857673Snate@binkert.org    /** The index of the first instruction that may be ready to be
3867673Snate@binkert.org     * written back, and has not yet been written back.
3879048SAli.Saidi@ARM.com     */
3887673Snate@binkert.org    int storeWBIdx;
3897673Snate@binkert.org    /** The index of the tail instruction in the SQ. */
3907673Snate@binkert.org    int storeTail;
3917673Snate@binkert.org
3926658Snate@binkert.org    /// @todo Consider moving to a more advanced model with write vs read ports
3937756SAli.Saidi@ARM.com    /** The number of cache ports available each cycle. */
3947816Ssteve.reinhardt@amd.com    int cachePorts;
3956658Snate@binkert.org
3964382Sbinkertn@umich.edu    /** The number of used cache ports in this cycle. */
3974382Sbinkertn@umich.edu    int usedPorts;
3984762Snate@binkert.org
3994762Snate@binkert.org    /** Is the LSQ switched out. */
4004762Snate@binkert.org    bool switchedOut;
4016654Snate@binkert.org
4026654Snate@binkert.org    //list<InstSeqNum> mshrSeqNums;
4035517Snate@binkert.org
4045517Snate@binkert.org    /** Wire to read information from the issue stage time queue. */
4055517Snate@binkert.org    typename TimeBuffer<IssueStruct>::wire fromIssue;
4065517Snate@binkert.org
4075517Snate@binkert.org    /** Whether or not the LSQ is stalled. */
4085517Snate@binkert.org    bool stalled;
4095517Snate@binkert.org    /** The store that causes the stall due to partial store to load
4105517Snate@binkert.org     * forwarding.
4115517Snate@binkert.org     */
4125517Snate@binkert.org    InstSeqNum stallingStoreIsn;
4135517Snate@binkert.org    /** The index of the above store. */
4145517Snate@binkert.org    int stallingLoadIdx;
4155517Snate@binkert.org
4165517Snate@binkert.org    /** The packet that needs to be retried. */
4175517Snate@binkert.org    PacketPtr retryPkt;
4185517Snate@binkert.org
4195517Snate@binkert.org    /** Whehter or not a store is blocked due to the memory system. */
4206654Snate@binkert.org    bool isStoreBlocked;
4215517Snate@binkert.org
4225517Snate@binkert.org    /** Whether or not a load is blocked due to the memory system. */
4235517Snate@binkert.org    bool isLoadBlocked;
4245517Snate@binkert.org
4255517Snate@binkert.org    /** Has the blocked load been handled. */
4265517Snate@binkert.org    bool loadBlockedHandled;
4275517Snate@binkert.org
4285517Snate@binkert.org    /** The sequence number of the blocked load. */
4296143Snate@binkert.org    InstSeqNum blockedLoadSeqNum;
4306654Snate@binkert.org
4315517Snate@binkert.org    /** The oldest load that caused a memory ordering violation. */
4325517Snate@binkert.org    DynInstPtr memDepViolator;
4335517Snate@binkert.org
4345517Snate@binkert.org    /** Whether or not there is a packet that couldn't be sent because of
4355517Snate@binkert.org     * a lack of cache ports. */
4365517Snate@binkert.org    bool hasPendingPkt;
4375517Snate@binkert.org
4385517Snate@binkert.org    /** The packet that is pending free cache ports. */
4395517Snate@binkert.org    PacketPtr pendingPkt;
4405517Snate@binkert.org
4415517Snate@binkert.org    // Will also need how many read/write ports the Dcache has.  Or keep track
4425517Snate@binkert.org    // of that in stage that is one level up, and only call executeLoad/Store
4435517Snate@binkert.org    // the appropriate number of times.
4445517Snate@binkert.org    /** Total number of loads forwaded from LSQ stores. */
4456654Snate@binkert.org    Stats::Scalar lsqForwLoads;
4466654Snate@binkert.org
4475517Snate@binkert.org    /** Total number of loads ignored due to invalid addresses. */
4485517Snate@binkert.org    Stats::Scalar invAddrLoads;
4496143Snate@binkert.org
4506143Snate@binkert.org    /** Total number of squashed loads. */
4516143Snate@binkert.org    Stats::Scalar lsqSquashedLoads;
4526727Ssteve.reinhardt@amd.com
4535517Snate@binkert.org    /** Total number of responses from the memory system that are
4546727Ssteve.reinhardt@amd.com     * ignored due to the instruction already being squashed. */
4555517Snate@binkert.org    Stats::Scalar lsqIgnoredResponses;
4565517Snate@binkert.org
4575517Snate@binkert.org    /** Tota number of memory ordering violations. */
4586654Snate@binkert.org    Stats::Scalar lsqMemOrderViolation;
4596654Snate@binkert.org
4607673Snate@binkert.org    /** Total number of squashed stores. */
4616654Snate@binkert.org    Stats::Scalar lsqSquashedStores;
4626654Snate@binkert.org
4636654Snate@binkert.org    /** Total number of software prefetches ignored due to invalid addresses. */
4646654Snate@binkert.org    Stats::Scalar invAddrSwpfs;
4655517Snate@binkert.org
4665517Snate@binkert.org    /** Ready loads blocked due to partial store-forwarding. */
4675517Snate@binkert.org    Stats::Scalar lsqBlockedLoads;
4686143Snate@binkert.org
4695517Snate@binkert.org    /** Number of loads that were rescheduled. */
4704762Snate@binkert.org    Stats::Scalar lsqRescheduledLoads;
4715517Snate@binkert.org
4725517Snate@binkert.org    /** Number of times the LSQ is blocked due to the cache. */
4736143Snate@binkert.org    Stats::Scalar lsqCacheBlocked;
4746143Snate@binkert.org
4755517Snate@binkert.org  public:
4765517Snate@binkert.org    /** Executes the load at the given index. */
4775517Snate@binkert.org    template <class T>
4785517Snate@binkert.org    Fault read(Request *req, Request *sreqLow, Request *sreqHigh, T &data,
4795517Snate@binkert.org               int load_idx);
4805517Snate@binkert.org
4815517Snate@binkert.org    /** Executes the store at the given index. */
4825517Snate@binkert.org    template <class T>
4835517Snate@binkert.org    Fault write(Request *req, Request *sreqLow, Request *sreqHigh, T &data,
4849338SAndreas.Sandberg@arm.com                int store_idx);
4859338SAndreas.Sandberg@arm.com
4869338SAndreas.Sandberg@arm.com    /** Returns the index of the head load instruction. */
4879338SAndreas.Sandberg@arm.com    int getLoadHead() { return loadHead; }
4889338SAndreas.Sandberg@arm.com    /** Returns the sequence number of the head load instruction. */
4899338SAndreas.Sandberg@arm.com    InstSeqNum getLoadHeadSeqNum()
4908596Ssteve.reinhardt@amd.com    {
4918596Ssteve.reinhardt@amd.com        if (loadQueue[loadHead]) {
4928596Ssteve.reinhardt@amd.com            return loadQueue[loadHead]->seqNum;
4938596Ssteve.reinhardt@amd.com        } else {
4948596Ssteve.reinhardt@amd.com            return 0;
4958596Ssteve.reinhardt@amd.com        }
4968596Ssteve.reinhardt@amd.com
4976143Snate@binkert.org    }
4985517Snate@binkert.org
4996654Snate@binkert.org    /** Returns the index of the head store instruction. */
5006654Snate@binkert.org    int getStoreHead() { return storeHead; }
5016654Snate@binkert.org    /** Returns the sequence number of the head store instruction. */
5026654Snate@binkert.org    InstSeqNum getStoreHeadSeqNum()
5036654Snate@binkert.org    {
5046654Snate@binkert.org        if (storeQueue[storeHead].inst) {
5055517Snate@binkert.org            return storeQueue[storeHead].inst->seqNum;
5065517Snate@binkert.org        } else {
5075517Snate@binkert.org            return 0;
5088596Ssteve.reinhardt@amd.com        }
5098596Ssteve.reinhardt@amd.com
5104762Snate@binkert.org    }
5114762Snate@binkert.org
5124762Snate@binkert.org    /** Returns whether or not the LSQ unit is stalled. */
5134762Snate@binkert.org    bool isStalled()  { return stalled; }
5144762Snate@binkert.org};
5154762Snate@binkert.org
5167675Snate@binkert.orgtemplate <class Impl>
5174762Snate@binkert.orgtemplate <class T>
5184762Snate@binkert.orgFault
5194762Snate@binkert.orgLSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh,
5204762Snate@binkert.org                    T &data, int load_idx)
5214382Sbinkertn@umich.edu{
5224382Sbinkertn@umich.edu    DynInstPtr load_inst = loadQueue[load_idx];
5235517Snate@binkert.org
5246654Snate@binkert.org    assert(load_inst);
5255517Snate@binkert.org
5268126Sgblack@eecs.umich.edu    assert(!load_inst->isExecuted());
5276654Snate@binkert.org
5287673Snate@binkert.org    // Make sure this isn't an uncacheable access
5296654Snate@binkert.org    // A bit of a hackish way to get uncached accesses to work only if they're
5306654Snate@binkert.org    // at the head of the LSQ and are ready to commit (at the head of the ROB
5316654Snate@binkert.org    // too).
5326654Snate@binkert.org    if (req->isUncacheable() &&
5336654Snate@binkert.org        (load_idx != loadHead || !load_inst->isAtCommit())) {
5346654Snate@binkert.org        iewStage->rescheduleMemInst(load_inst);
5356654Snate@binkert.org        ++lsqRescheduledLoads;
5366669Snate@binkert.org
5376669Snate@binkert.org        // Must delete request now that it wasn't handed off to
5386669Snate@binkert.org        // memory.  This is quite ugly.  @todo: Figure out the proper
5396669Snate@binkert.org        // place to really handle request deletes.
5406669Snate@binkert.org        delete req;
5416669Snate@binkert.org        if (TheISA::HasUnalignedMemAcc && sreqLow) {
5426654Snate@binkert.org            delete sreqLow;
5437673Snate@binkert.org            delete sreqHigh;
5445517Snate@binkert.org        }
5458126Sgblack@eecs.umich.edu        return TheISA::genMachineCheckFault();
5465798Snate@binkert.org    }
5477756SAli.Saidi@ARM.com
5487816Ssteve.reinhardt@amd.com    // Check the SQ for any previous stores that might lead to forwarding
5495798Snate@binkert.org    int store_idx = load_inst->sqIdx;
5505798Snate@binkert.org
5515517Snate@binkert.org    int store_size = 0;
5525517Snate@binkert.org
5537673Snate@binkert.org    DPRINTF(LSQUnit, "Read called, load idx: %i, store idx: %i, "
5545517Snate@binkert.org            "storeHead: %i addr: %#x%s\n",
5555517Snate@binkert.org            load_idx, store_idx, storeHead, req->getPaddr(),
5567673Snate@binkert.org            sreqLow ? " split" : "");
5577673Snate@binkert.org
5585517Snate@binkert.org    if (req->isLLSC()) {
5595798Snate@binkert.org        assert(!sreqLow);
5605798Snate@binkert.org        // Disable recording the result temporarily.  Writing to misc
5618333Snate@binkert.org        // regs normally updates the result, but this is not the
5627816Ssteve.reinhardt@amd.com        // desired behavior when handling store conditionals.
5635798Snate@binkert.org        load_inst->recordResult = false;
5645798Snate@binkert.org        TheISA::handleLockedRead(load_inst.get(), req);
5654762Snate@binkert.org        load_inst->recordResult = true;
5664762Snate@binkert.org    }
5674762Snate@binkert.org
5684762Snate@binkert.org    while (store_idx != -1) {
5694762Snate@binkert.org        // End once we've reached the top of the LSQ
5708596Ssteve.reinhardt@amd.com        if (store_idx == storeWBIdx) {
5715517Snate@binkert.org            break;
5725517Snate@binkert.org        }
5735517Snate@binkert.org
5745517Snate@binkert.org        // Move the index to one younger
5755517Snate@binkert.org        if (--store_idx < 0)
5767673Snate@binkert.org            store_idx += SQEntries;
5778596Ssteve.reinhardt@amd.com
5787673Snate@binkert.org        assert(storeQueue[store_idx].inst);
5795517Snate@binkert.org
5808596Ssteve.reinhardt@amd.com        store_size = storeQueue[store_idx].size;
5815517Snate@binkert.org
5825517Snate@binkert.org        if (store_size == 0)
5835517Snate@binkert.org            continue;
5848596Ssteve.reinhardt@amd.com        else if (storeQueue[store_idx].inst->uncacheable())
5855517Snate@binkert.org            continue;
5867673Snate@binkert.org
5877673Snate@binkert.org        assert(storeQueue[store_idx].inst->effAddrValid);
5887673Snate@binkert.org
5895517Snate@binkert.org        // Check if the store data is within the lower and upper bounds of
5905517Snate@binkert.org        // addresses that the request needs.
5915517Snate@binkert.org        bool store_has_lower_limit =
5925517Snate@binkert.org            req->getVaddr() >= storeQueue[store_idx].inst->effAddr;
5935517Snate@binkert.org        bool store_has_upper_limit =
5945517Snate@binkert.org            (req->getVaddr() + req->getSize()) <=
5955517Snate@binkert.org            (storeQueue[store_idx].inst->effAddr + store_size);
5967673Snate@binkert.org        bool lower_load_has_store_part =
5977673Snate@binkert.org            req->getVaddr() < (storeQueue[store_idx].inst->effAddr +
5987673Snate@binkert.org                           store_size);
5995517Snate@binkert.org        bool upper_load_has_store_part =
6008596Ssteve.reinhardt@amd.com            (req->getVaddr() + req->getSize()) >
6015517Snate@binkert.org            storeQueue[store_idx].inst->effAddr;
6025517Snate@binkert.org
6035517Snate@binkert.org        // If the store's data has all of the data needed, we can forward.
6045517Snate@binkert.org        if ((store_has_lower_limit && store_has_upper_limit)) {
6055517Snate@binkert.org            // Get shift amount for offset into the store's data.
6067673Snate@binkert.org            int shift_amt = req->getVaddr() & (store_size - 1);
6077673Snate@binkert.org
6087673Snate@binkert.org            memcpy(&data, storeQueue[store_idx].data + shift_amt, sizeof(T));
6095517Snate@binkert.org
6108596Ssteve.reinhardt@amd.com            assert(!load_inst->memData);
6117675Snate@binkert.org            load_inst->memData = new uint8_t[64];
6127675Snate@binkert.org
6137675Snate@binkert.org            memcpy(load_inst->memData,
6147675Snate@binkert.org                    storeQueue[store_idx].data + shift_amt, req->getSize());
6157675Snate@binkert.org
6167675Snate@binkert.org            DPRINTF(LSQUnit, "Forwarding from store idx %i to load to "
6178596Ssteve.reinhardt@amd.com                    "addr %#x, data %#x\n",
6187675Snate@binkert.org                    store_idx, req->getVaddr(), data);
6197675Snate@binkert.org
6208596Ssteve.reinhardt@amd.com            PacketPtr data_pkt = new Packet(req, MemCmd::ReadReq,
6218596Ssteve.reinhardt@amd.com                                            Packet::Broadcast);
6228596Ssteve.reinhardt@amd.com            data_pkt->dataStatic(load_inst->memData);
6238596Ssteve.reinhardt@amd.com
6248596Ssteve.reinhardt@amd.com            WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this);
6258596Ssteve.reinhardt@amd.com
6268596Ssteve.reinhardt@amd.com            // We'll say this has a 1 cycle load-store forwarding latency
6278596Ssteve.reinhardt@amd.com            // for now.
6288596Ssteve.reinhardt@amd.com            // @todo: Need to make this a parameter.
6294762Snate@binkert.org            cpu->schedule(wb, curTick);
6306143Snate@binkert.org
6316143Snate@binkert.org            // Don't need to do anything special for split loads.
6326143Snate@binkert.org            if (TheISA::HasUnalignedMemAcc && sreqLow) {
6334762Snate@binkert.org                delete sreqLow;
6344762Snate@binkert.org                delete sreqHigh;
6354762Snate@binkert.org            }
6367756SAli.Saidi@ARM.com
6378596Ssteve.reinhardt@amd.com            ++lsqForwLoads;
6384762Snate@binkert.org            return NoFault;
6394762Snate@binkert.org        } else if ((store_has_lower_limit && lower_load_has_store_part) ||
6408596Ssteve.reinhardt@amd.com                   (store_has_upper_limit && upper_load_has_store_part) ||
6415463Snate@binkert.org                   (lower_load_has_store_part && upper_load_has_store_part)) {
6428596Ssteve.reinhardt@amd.com            // This is the partial store-load forwarding case where a store
6438596Ssteve.reinhardt@amd.com            // has only part of the load's data.
6445463Snate@binkert.org
6457756SAli.Saidi@ARM.com            // If it's already been written back, then don't worry about
6468596Ssteve.reinhardt@amd.com            // stalling on it.
6474762Snate@binkert.org            if (storeQueue[store_idx].completed) {
6487677Snate@binkert.org                panic("Should not check one of these");
6494762Snate@binkert.org                continue;
6504762Snate@binkert.org            }
6516143Snate@binkert.org
6526143Snate@binkert.org            // Must stall load and force it to retry, so long as it's the oldest
6536143Snate@binkert.org            // load that needs to do so.
6544762Snate@binkert.org            if (!stalled ||
6554762Snate@binkert.org                (stalled &&
6567756SAli.Saidi@ARM.com                 load_inst->seqNum <
6577816Ssteve.reinhardt@amd.com                 loadQueue[stallingLoadIdx]->seqNum)) {
6584762Snate@binkert.org                stalled = true;
6594762Snate@binkert.org                stallingStoreIsn = storeQueue[store_idx].inst->seqNum;
6604762Snate@binkert.org                stallingLoadIdx = load_idx;
6614762Snate@binkert.org            }
6627756SAli.Saidi@ARM.com
6638596Ssteve.reinhardt@amd.com            // Tell IQ/mem dep unit that this instruction will need to be
6644762Snate@binkert.org            // rescheduled eventually
6654762Snate@binkert.org            iewStage->rescheduleMemInst(load_inst);
6667677Snate@binkert.org            iewStage->decrWb(load_inst->seqNum);
6677756SAli.Saidi@ARM.com            load_inst->clearIssued();
6688596Ssteve.reinhardt@amd.com            ++lsqRescheduledLoads;
6697675Snate@binkert.org
6707677Snate@binkert.org            // Do not generate a writeback event as this instruction is not
6715517Snate@binkert.org            // complete.
6728596Ssteve.reinhardt@amd.com            DPRINTF(LSQUnit, "Load-store forwarding mis-match. "
6739248SAndreas.Sandberg@arm.com                    "Store idx %i to load addr %#x\n",
6749248SAndreas.Sandberg@arm.com                    store_idx, req->getVaddr());
6759248SAndreas.Sandberg@arm.com
6769248SAndreas.Sandberg@arm.com            // Must delete request now that it wasn't handed off to
6778596Ssteve.reinhardt@amd.com            // memory.  This is quite ugly.  @todo: Figure out the
6788596Ssteve.reinhardt@amd.com            // proper place to really handle request deletes.
6798596Ssteve.reinhardt@amd.com            delete req;
6809248SAndreas.Sandberg@arm.com            if (TheISA::HasUnalignedMemAcc && sreqLow) {
6818596Ssteve.reinhardt@amd.com                delete sreqLow;
6824762Snate@binkert.org                delete sreqHigh;
6837674Snate@binkert.org            }
6847674Snate@binkert.org
6857674Snate@binkert.org            return NoFault;
6867674Snate@binkert.org        }
6877674Snate@binkert.org    }
6887674Snate@binkert.org
6897674Snate@binkert.org    // If there's no forwarding case, then go access memory
6907674Snate@binkert.org    DPRINTF(LSQUnit, "Doing memory access for inst [sn:%lli] PC %#x\n",
6917674Snate@binkert.org            load_inst->seqNum, load_inst->readPC());
6927674Snate@binkert.org
6937674Snate@binkert.org    assert(!load_inst->memData);
6947674Snate@binkert.org    load_inst->memData = new uint8_t[64];
6957674Snate@binkert.org
6967674Snate@binkert.org    ++usedPorts;
6977674Snate@binkert.org
6984762Snate@binkert.org    // if we the cache is not blocked, do cache access
6996143Snate@binkert.org    bool completedFirst = false;
7006143Snate@binkert.org    if (!lsq->cacheBlocked()) {
7017756SAli.Saidi@ARM.com        MemCmd command =
7027816Ssteve.reinhardt@amd.com            req->isLLSC() ? MemCmd::LoadLockedReq : MemCmd::ReadReq;
7038235Snate@binkert.org        PacketPtr data_pkt = new Packet(req, command, Packet::Broadcast);
7048596Ssteve.reinhardt@amd.com        PacketPtr fst_data_pkt = NULL;
7057756SAli.Saidi@ARM.com        PacketPtr snd_data_pkt = NULL;
7067816Ssteve.reinhardt@amd.com
7078235Snate@binkert.org        data_pkt->dataStatic(load_inst->memData);
7084382Sbinkertn@umich.edu
7099396Sandreas.hansson@arm.com        LSQSenderState *state = new LSQSenderState;
7109396Sandreas.hansson@arm.com        state->isLoad = true;
7119396Sandreas.hansson@arm.com        state->idx = load_idx;
7129396Sandreas.hansson@arm.com        state->inst = load_inst;
7139396Sandreas.hansson@arm.com        data_pkt->senderState = state;
7149396Sandreas.hansson@arm.com
7159396Sandreas.hansson@arm.com        if (!TheISA::HasUnalignedMemAcc || !sreqLow) {
7169396Sandreas.hansson@arm.com
7179396Sandreas.hansson@arm.com            // Point the first packet at the main data packet.
7189396Sandreas.hansson@arm.com            fst_data_pkt = data_pkt;
7199396Sandreas.hansson@arm.com        } else {
7209396Sandreas.hansson@arm.com
7219396Sandreas.hansson@arm.com            // Create the split packets.
7229396Sandreas.hansson@arm.com            fst_data_pkt = new Packet(sreqLow, command, Packet::Broadcast);
7239396Sandreas.hansson@arm.com            snd_data_pkt = new Packet(sreqHigh, command, Packet::Broadcast);
7249396Sandreas.hansson@arm.com
7259396Sandreas.hansson@arm.com            fst_data_pkt->dataStatic(load_inst->memData);
7269396Sandreas.hansson@arm.com            snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize());
7278232Snate@binkert.org
7288232Snate@binkert.org            fst_data_pkt->senderState = state;
7298232Snate@binkert.org            snd_data_pkt->senderState = state;
7308232Snate@binkert.org
7318232Snate@binkert.org            state->isSplit = true;
7326229Snate@binkert.org            state->outstanding = 2;
7338232Snate@binkert.org            state->mainPkt = data_pkt;
7348232Snate@binkert.org        }
7358232Snate@binkert.org
7366229Snate@binkert.org        if (!dcachePort->sendTiming(fst_data_pkt)) {
7377673Snate@binkert.org            // Delete state and data packet because a load retry
7385517Snate@binkert.org            // initiates a pipeline restart; it does not retry.
7395517Snate@binkert.org            delete state;
7407673Snate@binkert.org            delete data_pkt->req;
7415517Snate@binkert.org            delete data_pkt;
7425517Snate@binkert.org            if (TheISA::HasUnalignedMemAcc && sreqLow) {
7435517Snate@binkert.org                delete fst_data_pkt->req;
7445517Snate@binkert.org                delete fst_data_pkt;
7458232Snate@binkert.org                delete snd_data_pkt->req;
7467673Snate@binkert.org                delete snd_data_pkt;
7477673Snate@binkert.org                sreqLow = NULL;
7488232Snate@binkert.org                sreqHigh = NULL;
7498232Snate@binkert.org            }
7508232Snate@binkert.org
7518232Snate@binkert.org            req = NULL;
7527673Snate@binkert.org
7535517Snate@binkert.org            // If the access didn't succeed, tell the LSQ by setting
7548232Snate@binkert.org            // the retry thread id.
7558232Snate@binkert.org            lsq->setRetryTid(lsqID);
7568232Snate@binkert.org        } else if (TheISA::HasUnalignedMemAcc && sreqLow) {
7578232Snate@binkert.org            completedFirst = true;
7587673Snate@binkert.org
7598232Snate@binkert.org            // The first packet was sent without problems, so send this one
7608232Snate@binkert.org            // too. If there is a problem with this packet then the whole
7618232Snate@binkert.org            // load will be squashed, so indicate this to the state object.
7628232Snate@binkert.org            // The first packet will return in completeDataAccess and be
7638232Snate@binkert.org            // handled there.
7648232Snate@binkert.org            ++usedPorts;
7657673Snate@binkert.org            if (!dcachePort->sendTiming(snd_data_pkt)) {
7665517Snate@binkert.org
7678232Snate@binkert.org                // The main packet will be deleted in completeDataAccess.
7688232Snate@binkert.org                delete snd_data_pkt->req;
7695517Snate@binkert.org                delete snd_data_pkt;
7707673Snate@binkert.org
7715517Snate@binkert.org                state->complete();
7728232Snate@binkert.org
7738232Snate@binkert.org                req = NULL;
7745517Snate@binkert.org                sreqHigh = NULL;
7758232Snate@binkert.org
7768232Snate@binkert.org                lsq->setRetryTid(lsqID);
7778232Snate@binkert.org            }
7787673Snate@binkert.org        }
7795517Snate@binkert.org    }
7805517Snate@binkert.org
7817673Snate@binkert.org    // If the cache was blocked, or has become blocked due to the access,
7825517Snate@binkert.org    // handle it.
7835517Snate@binkert.org    if (lsq->cacheBlocked()) {
7845517Snate@binkert.org        if (req)
7858232Snate@binkert.org            delete req;
7865517Snate@binkert.org        if (TheISA::HasUnalignedMemAcc && sreqLow && !completedFirst) {
7875517Snate@binkert.org            delete sreqLow;
7888232Snate@binkert.org            delete sreqHigh;
7898232Snate@binkert.org        }
7905517Snate@binkert.org
7918232Snate@binkert.org        ++lsqCacheBlocked;
7928232Snate@binkert.org
7935517Snate@binkert.org        iewStage->decrWb(load_inst->seqNum);
7948232Snate@binkert.org        // There's an older load that's already going to squash.
7958232Snate@binkert.org        if (isLoadBlocked && blockedLoadSeqNum < load_inst->seqNum)
7968232Snate@binkert.org            return NoFault;
7975517Snate@binkert.org
7988232Snate@binkert.org        // Record that the load was blocked due to memory.  This
7998232Snate@binkert.org        // load will squash all instructions after it, be
8008232Snate@binkert.org        // refetched, and re-executed.
8018232Snate@binkert.org        isLoadBlocked = true;
8028232Snate@binkert.org        loadBlockedHandled = false;
8038232Snate@binkert.org        blockedLoadSeqNum = load_inst->seqNum;
8045517Snate@binkert.org        // No fault occurred, even though the interface is blocked.
8058232Snate@binkert.org        return NoFault;
8068232Snate@binkert.org    }
8075517Snate@binkert.org
8088232Snate@binkert.org    return NoFault;
8097673Snate@binkert.org}
8105517Snate@binkert.org
8117673Snate@binkert.orgtemplate <class Impl>
8125517Snate@binkert.orgtemplate <class T>
8138232Snate@binkert.orgFault
8148232Snate@binkert.orgLSQUnit<Impl>::write(Request *req, Request *sreqLow, Request *sreqHigh,
8158232Snate@binkert.org                     T &data, int store_idx)
8165192Ssaidi@eecs.umich.edu{
8178232Snate@binkert.org    assert(storeQueue[store_idx].inst);
8188232Snate@binkert.org
8198232Snate@binkert.org    DPRINTF(LSQUnit, "Doing write to store idx %i, addr %#x data %#x"
8208232Snate@binkert.org            " | storeHead:%i [sn:%i]\n",
8218232Snate@binkert.org            store_idx, req->getPaddr(), data, storeHead,
8225192Ssaidi@eecs.umich.edu            storeQueue[store_idx].inst->seqNum);
8237674Snate@binkert.org
8245522Snate@binkert.org    storeQueue[store_idx].req = req;
8255522Snate@binkert.org    storeQueue[store_idx].sreqLow = sreqLow;
8267674Snate@binkert.org    storeQueue[store_idx].sreqHigh = sreqHigh;
8277674Snate@binkert.org    storeQueue[store_idx].size = sizeof(T);
8287674Snate@binkert.org
8297674Snate@binkert.org    // Split stores can only occur in ISAs with unaligned memory accesses.  If
8307674Snate@binkert.org    // a store request has been split, sreqLow and sreqHigh will be non-null.
8317674Snate@binkert.org    if (TheISA::HasUnalignedMemAcc && sreqLow) {
8327674Snate@binkert.org        storeQueue[store_idx].isSplit = true;
8337674Snate@binkert.org    }
8345522Snate@binkert.org    assert(sizeof(T) <= sizeof(storeQueue[store_idx].data));
8355522Snate@binkert.org
8365522Snate@binkert.org    T gData = htog(data);
8375517Snate@binkert.org    memcpy(storeQueue[store_idx].data, &gData, sizeof(T));
8385522Snate@binkert.org
8395517Snate@binkert.org    // This function only writes the data to the store queue, so no fault
8406143Snate@binkert.org    // can happen here.
8416727Ssteve.reinhardt@amd.com    return NoFault;
8425522Snate@binkert.org}
8435522Snate@binkert.org
8445522Snate@binkert.org#endif // __CPU_O3_LSQ_UNIT_HH__
8457674Snate@binkert.org