lsq_unit.hh revision 8545
12292SN/A/*
22329SN/A * Copyright (c) 2004-2006 The Regents of The University of Michigan
32292SN/A * All rights reserved.
42292SN/A *
52292SN/A * Redistribution and use in source and binary forms, with or without
62292SN/A * modification, are permitted provided that the following conditions are
72292SN/A * met: redistributions of source code must retain the above copyright
82292SN/A * notice, this list of conditions and the following disclaimer;
92292SN/A * redistributions in binary form must reproduce the above copyright
102292SN/A * notice, this list of conditions and the following disclaimer in the
112292SN/A * documentation and/or other materials provided with the distribution;
122292SN/A * neither the name of the copyright holders nor the names of its
132292SN/A * contributors may be used to endorse or promote products derived from
142292SN/A * this software without specific prior written permission.
152292SN/A *
162292SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
172292SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
182292SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
192292SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
202292SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
212292SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
222292SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
232292SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
242292SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
252292SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
262292SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
272689Sktlim@umich.edu *
282689Sktlim@umich.edu * Authors: Kevin Lim
292689Sktlim@umich.edu *          Korey Sewell
302292SN/A */
312292SN/A
322292SN/A#ifndef __CPU_O3_LSQ_UNIT_HH__
332292SN/A#define __CPU_O3_LSQ_UNIT_HH__
342292SN/A
352329SN/A#include <algorithm>
364395Ssaidi@eecs.umich.edu#include <cstring>
372292SN/A#include <map>
382292SN/A#include <queue>
392292SN/A
402329SN/A#include "arch/faults.hh"
413326Sktlim@umich.edu#include "arch/isa_traits.hh"
422292SN/A#include "arch/locked_mem.hh"
436658Snate@binkert.org#include "arch/mmapped_ipr.hh"
445386Sstever@gmail.com#include "base/fast_alloc.hh"
452292SN/A#include "base/hashmap.hh"
462292SN/A#include "config/full_system.hh"
473348Sbinkertn@umich.edu#include "config/the_isa.hh"
482669Sktlim@umich.edu#include "cpu/inst_seq.hh"
492292SN/A#include "cpu/timebuf.hh"
505529Snate@binkert.org#include "debug/LSQUnit.hh"
515529Snate@binkert.org#include "mem/packet.hh"
522292SN/A#include "mem/port.hh"
532329SN/A
542329SN/Aclass DerivO3CPUParams;
552329SN/A
562329SN/A/**
572329SN/A * Class that implements the actual LQ and SQ for each specific
582329SN/A * thread.  Both are circular queues; load entries are freed upon
592329SN/A * committing, while store entries are freed once they writeback. The
602329SN/A * LSQUnit tracks if there are memory ordering violations, and also
612329SN/A * detects partial load to store forwarding cases (a store only has
622329SN/A * part of a load's data) that requires the load to wait until the
632292SN/A * store writes back. In the former case it holds onto the instruction
642292SN/A * until the dependence unit looks at it, and in the latter it stalls
652292SN/A * the LSQ until the store writes back. At that point the load is
662292SN/A * replayed.
672292SN/A */
682292SN/Atemplate <class Impl>
692733Sktlim@umich.educlass LSQUnit {
702292SN/A  public:
712292SN/A    typedef typename Impl::O3CPU O3CPU;
722907Sktlim@umich.edu    typedef typename Impl::DynInstPtr DynInstPtr;
732292SN/A    typedef typename Impl::CPUPol::IEW IEW;
742292SN/A    typedef typename Impl::CPUPol::LSQ LSQ;
752292SN/A    typedef typename Impl::CPUPol::IssueStruct IssueStruct;
762292SN/A
772292SN/A  public:
782292SN/A    /** Constructs an LSQ unit. init() must be called prior to use. */
792292SN/A    LSQUnit();
805529Snate@binkert.org
815529Snate@binkert.org    /** Initializes the LSQ unit with the specified number of entries. */
825529Snate@binkert.org    void init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params,
832292SN/A            LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries,
842292SN/A            unsigned id);
852292SN/A
862292SN/A    /** Returns the name of the LSQ unit. */
872727Sktlim@umich.edu    std::string name() const;
882727Sktlim@umich.edu
892727Sktlim@umich.edu    /** Registers statistics. */
902907Sktlim@umich.edu    void regStats();
914329Sktlim@umich.edu
922907Sktlim@umich.edu    /** Sets the pointer to the dcache port. */
932348SN/A    void setDcachePort(Port *dcache_port);
942307SN/A
952307SN/A    /** Switches out LSQ unit. */
962348SN/A    void switchOut();
972307SN/A
982307SN/A    /** Takes over from another CPU's thread. */
992348SN/A    void takeOverFrom();
1002307SN/A
1012307SN/A    /** Returns if the LSQ is switched out. */
1022292SN/A    bool isSwitchedOut() { return switchedOut; }
1032292SN/A
1042292SN/A    /** Ticks the LSQ unit, which in this case only resets the number of
1052292SN/A     * used cache ports.
1062292SN/A     * @todo: Move the number of used ports up to the LSQ level so it can
1072292SN/A     * be shared by all LSQ units.
1082292SN/A     */
1092292SN/A    void tick() { usedPorts = 0; }
1102292SN/A
1112292SN/A    /** Inserts an instruction. */
1122292SN/A    void insert(DynInstPtr &inst);
1132292SN/A    /** Inserts a load instruction. */
1142292SN/A    void insertLoad(DynInstPtr &load_inst);
1152292SN/A    /** Inserts a store instruction. */
1162292SN/A    void insertStore(DynInstPtr &store_inst);
1172292SN/A
1182292SN/A    /** Check for ordering violations in the LSQ. For a store squash if we
1192329SN/A     * ever find a conflicting load. For a load, only squash if we
1202292SN/A     * an external snoop invalidate has been seen for that load address
1212292SN/A     * @param load_idx index to start checking at
1222292SN/A     * @param inst the instruction to check
1232292SN/A     */
1242292SN/A    Fault checkViolations(int load_idx, DynInstPtr &inst);
1252292SN/A
1262292SN/A    /** Check if an incoming invalidate hits in the lsq on a load
1272292SN/A     * that might have issued out of order wrt another load beacuse
1282292SN/A     * of the intermediate invalidate.
1292292SN/A     */
1302292SN/A    void checkSnoop(PacketPtr pkt);
1312292SN/A
1322292SN/A    /** Executes a load instruction. */
1332292SN/A    Fault executeLoad(DynInstPtr &inst);
1342790Sktlim@umich.edu
1352790Sktlim@umich.edu    Fault executeLoad(int lq_idx) { panic("Not implemented"); return NoFault; }
1362669Sktlim@umich.edu    /** Executes a store instruction. */
1372669Sktlim@umich.edu    Fault executeStore(DynInstPtr &inst);
1382292SN/A
1392292SN/A    /** Commits the head load. */
1402292SN/A    void commitLoad();
1412292SN/A    /** Commits loads older than a specific sequence number. */
1422292SN/A    void commitLoads(InstSeqNum &youngest_inst);
1432292SN/A
1442292SN/A    /** Commits stores older than a specific sequence number. */
1452292SN/A    void commitStores(InstSeqNum &youngest_inst);
1462292SN/A
1472292SN/A    /** Writes back stores. */
1482292SN/A    void writebackStores();
1492292SN/A
1502292SN/A    /** Completes the data access that has been returned from the
1512292SN/A     * memory system. */
1522292SN/A    void completeDataAccess(PacketPtr pkt);
1532292SN/A
1542292SN/A    /** Clears all the entries in the LQ. */
1552292SN/A    void clearLQ();
1562292SN/A
1572292SN/A    /** Clears all the entries in the SQ. */
1582292SN/A    void clearSQ();
1592292SN/A
1602292SN/A    /** Resizes the LQ to a given size. */
1612329SN/A    void resizeLQ(unsigned size);
1622292SN/A
1632292SN/A    /** Resizes the SQ to a given size. */
1642292SN/A    void resizeSQ(unsigned size);
1652348SN/A
1662292SN/A    /** Squashes all instructions younger than a specific sequence number. */
1672292SN/A    void squash(const InstSeqNum &squashed_num);
1682292SN/A
1692348SN/A    /** Returns if there is a memory ordering violation. Value is reset upon
1702292SN/A     * call to getMemDepViolator().
1712292SN/A     */
1722292SN/A    bool violation() { return memDepViolator; }
1732348SN/A
1742292SN/A    /** Returns the memory ordering violator. */
1752292SN/A    DynInstPtr getMemDepViolator();
1762292SN/A
1772292SN/A    /** Returns if a load became blocked due to the memory system. */
1782292SN/A    bool loadBlocked()
1792292SN/A    { return isLoadBlocked; }
1802292SN/A
1812292SN/A    /** Clears the signal that a load became blocked. */
1822292SN/A    void clearLoadBlocked()
1832292SN/A    { isLoadBlocked = false; }
1842292SN/A
1852292SN/A    /** Returns if the blocked load was handled. */
1862292SN/A    bool isLoadBlockedHandled()
1872292SN/A    { return loadBlockedHandled; }
1882292SN/A
1892292SN/A    /** Records the blocked load as being handled. */
1902292SN/A    void setLoadBlockedHandled()
1912292SN/A    { loadBlockedHandled = true; }
1922292SN/A
1932292SN/A    /** Returns the number of free entries (min of free LQ and SQ entries). */
1942292SN/A    unsigned numFreeEntries();
1952292SN/A
1962292SN/A    /** Returns the number of loads ready to execute. */
1972292SN/A    int numLoadsReady();
1982292SN/A
1992292SN/A    /** Returns the number of loads in the LQ. */
2002292SN/A    int numLoads() { return loads; }
2012292SN/A
2022292SN/A    /** Returns the number of stores in the SQ. */
2032292SN/A    int numStores() { return stores; }
2042292SN/A
2052292SN/A    /** Returns if either the LQ or SQ is full. */
2062292SN/A    bool isFull() { return lqFull() || sqFull(); }
2072292SN/A
2082292SN/A    /** Returns if the LQ is full. */
2092678Sktlim@umich.edu    bool lqFull() { return loads >= (LQEntries - 1); }
2102678Sktlim@umich.edu
2112292SN/A    /** Returns if the SQ is full. */
2122907Sktlim@umich.edu    bool sqFull() { return stores >= (SQEntries - 1); }
2132907Sktlim@umich.edu
2142907Sktlim@umich.edu    /** Returns the number of instructions in the LSQ. */
2152292SN/A    unsigned getCount() { return loads + stores; }
2162698Sktlim@umich.edu
2172678Sktlim@umich.edu    /** Returns if there are any stores to writeback. */
2182678Sktlim@umich.edu    bool hasStoresToWB() { return storesToWB; }
2196974Stjones1@inf.ed.ac.uk
2206974Stjones1@inf.ed.ac.uk    /** Returns the number of stores to writeback. */
2216974Stjones1@inf.ed.ac.uk    int numStoresToWB() { return storesToWB; }
2222698Sktlim@umich.edu
2233349Sbinkertn@umich.edu    /** Returns if the LSQ unit will writeback on this cycle. */
2242693Sktlim@umich.edu    bool willWB() { return storeQueue[storeWBIdx].canWB &&
2252292SN/A                        !storeQueue[storeWBIdx].completed &&
2262292SN/A                        !isStoreBlocked; }
2272292SN/A
2286974Stjones1@inf.ed.ac.uk    /** Handles doing the retry. */
2296974Stjones1@inf.ed.ac.uk    void recvRetry();
2306974Stjones1@inf.ed.ac.uk
2312292SN/A  private:
2322292SN/A    /** Writes back the instruction, sending it to IEW. */
2332292SN/A    void writeback(DynInstPtr &inst, PacketPtr pkt);
2342292SN/A
2352292SN/A    /** Writes back a store that couldn't be completed the previous cycle. */
2362292SN/A    void writebackPendingStore();
2372292SN/A
2382292SN/A    /** Handles completing the send of a store to memory. */
2392292SN/A    void storePostSend(PacketPtr pkt);
2402329SN/A
2412329SN/A    /** Completes the store at the specified index. */
2422329SN/A    void completeStore(int store_idx);
2432329SN/A
2442292SN/A    /** Attempts to send a store to the cache. */
2452292SN/A    bool sendStore(PacketPtr data_pkt);
2462733Sktlim@umich.edu
2472292SN/A    /** Increments the given store index (circular queue). */
2482292SN/A    inline void incrStIdx(int &store_idx);
2492292SN/A    /** Decrements the given store index (circular queue). */
2502292SN/A    inline void decrStIdx(int &store_idx);
2512907Sktlim@umich.edu    /** Increments the given load index (circular queue). */
2522907Sktlim@umich.edu    inline void incrLdIdx(int &load_idx);
2532669Sktlim@umich.edu    /** Decrements the given load index (circular queue). */
2542907Sktlim@umich.edu    inline void decrLdIdx(int &load_idx);
2552907Sktlim@umich.edu
2562292SN/A  public:
2572698Sktlim@umich.edu    /** Debugging function to dump instructions in the LSQ. */
2585386Sstever@gmail.com    void dumpInsts();
2592678Sktlim@umich.edu
2602678Sktlim@umich.edu  private:
2612698Sktlim@umich.edu    /** Pointer to the CPU. */
2622678Sktlim@umich.edu    O3CPU *cpu;
2636974Stjones1@inf.ed.ac.uk
2646974Stjones1@inf.ed.ac.uk    /** Pointer to the IEW stage. */
2652678Sktlim@umich.edu    IEW *iewStage;
2662678Sktlim@umich.edu
2672698Sktlim@umich.edu    /** Pointer to the LSQ. */
2682678Sktlim@umich.edu    LSQ *lsq;
2692698Sktlim@umich.edu
2702678Sktlim@umich.edu    /** Pointer to the dcache port.  Used only for sending. */
2712698Sktlim@umich.edu    Port *dcachePort;
2722678Sktlim@umich.edu
2732698Sktlim@umich.edu    /** Derived class to hold any sender state the LSQ needs. */
2742678Sktlim@umich.edu    class LSQSenderState : public Packet::SenderState, public FastAlloc
2756974Stjones1@inf.ed.ac.uk    {
2766974Stjones1@inf.ed.ac.uk      public:
2776974Stjones1@inf.ed.ac.uk        /** Default constructor. */
2786974Stjones1@inf.ed.ac.uk        LSQSenderState()
2796974Stjones1@inf.ed.ac.uk            : noWB(false), isSplit(false), pktToSend(false), outstanding(1),
2806974Stjones1@inf.ed.ac.uk              mainPkt(NULL), pendingPacket(NULL)
2816974Stjones1@inf.ed.ac.uk        { }
2826974Stjones1@inf.ed.ac.uk
2836974Stjones1@inf.ed.ac.uk        /** Instruction who initiated the access to memory. */
2846974Stjones1@inf.ed.ac.uk        DynInstPtr inst;
2856974Stjones1@inf.ed.ac.uk        /** Whether or not it is a load. */
2866974Stjones1@inf.ed.ac.uk        bool isLoad;
2876974Stjones1@inf.ed.ac.uk        /** The LQ/SQ index of the instruction. */
2882678Sktlim@umich.edu        int idx;
2892678Sktlim@umich.edu        /** Whether or not the instruction will need to writeback. */
2902698Sktlim@umich.edu        bool noWB;
2912678Sktlim@umich.edu        /** Whether or not this access is split in two. */
2922678Sktlim@umich.edu        bool isSplit;
2932678Sktlim@umich.edu        /** Whether or not there is a packet that needs sending. */
2942678Sktlim@umich.edu        bool pktToSend;
2952678Sktlim@umich.edu        /** Number of outstanding packets to complete. */
2962678Sktlim@umich.edu        int outstanding;
2972678Sktlim@umich.edu        /** The main packet from a split load, used during writeback. */
2982678Sktlim@umich.edu        PacketPtr mainPkt;
2992678Sktlim@umich.edu        /** A second packet from a split store that needs sending. */
3005336Shines@cs.fsu.edu        PacketPtr pendingPacket;
3012678Sktlim@umich.edu
3022678Sktlim@umich.edu        /** Completes a packet and returns whether the access is finished. */
3032698Sktlim@umich.edu        inline bool complete() { return --outstanding == 0; }
3042678Sktlim@umich.edu    };
3052678Sktlim@umich.edu
3062698Sktlim@umich.edu    /** Writeback event, specifically for when stores forward data to loads. */
3072678Sktlim@umich.edu    class WritebackEvent : public Event {
3082678Sktlim@umich.edu      public:
3092678Sktlim@umich.edu        /** Constructs a writeback event. */
3102678Sktlim@umich.edu        WritebackEvent(DynInstPtr &_inst, PacketPtr pkt, LSQUnit *lsq_ptr);
3112678Sktlim@umich.edu
3122678Sktlim@umich.edu        /** Processes the writeback event. */
3132292SN/A        void process();
3142292SN/A
3152292SN/A        /** Returns the description of this event. */
3162292SN/A        const char *description() const;
3174326Sgblack@eecs.umich.edu
3182292SN/A      private:
3194326Sgblack@eecs.umich.edu        /** Instruction whose results are being written back. */
3204395Ssaidi@eecs.umich.edu        DynInstPtr inst;
3214326Sgblack@eecs.umich.edu
3222292SN/A        /** The packet that would have been sent to memory. */
3232292SN/A        PacketPtr pkt;
3242292SN/A
3256974Stjones1@inf.ed.ac.uk        /** The pointer to the LSQ unit that issued the store. */
3266974Stjones1@inf.ed.ac.uk        LSQUnit<Impl> *lsqPtr;
3274326Sgblack@eecs.umich.edu    };
3284395Ssaidi@eecs.umich.edu
3294326Sgblack@eecs.umich.edu  public:
3302292SN/A    struct SQEntry {
3312292SN/A        /** Constructs an empty store queue entry. */
3322292SN/A        SQEntry()
3332669Sktlim@umich.edu            : inst(NULL), req(NULL), size(0),
3342669Sktlim@umich.edu              canWB(0), committed(0), completed(0)
3356974Stjones1@inf.ed.ac.uk        {
3366974Stjones1@inf.ed.ac.uk            std::memset(data, 0, sizeof(data));
3376974Stjones1@inf.ed.ac.uk        }
3382292SN/A
3392292SN/A        /** Constructs a store queue entry for a given instruction. */
3402292SN/A        SQEntry(DynInstPtr &_inst)
3414326Sgblack@eecs.umich.edu            : inst(_inst), req(NULL), sreqLow(NULL), sreqHigh(NULL), size(0),
3426974Stjones1@inf.ed.ac.uk              isSplit(0), canWB(0), committed(0), completed(0)
3436974Stjones1@inf.ed.ac.uk        {
3442292SN/A            std::memset(data, 0, sizeof(data));
3452292SN/A        }
3462292SN/A
3472292SN/A        /** The store instruction. */
3482292SN/A        DynInstPtr inst;
3492292SN/A        /** The request for the store. */
3502292SN/A        RequestPtr req;
3512329SN/A        /** The split requests for the store. */
3522292SN/A        RequestPtr sreqLow;
3532292SN/A        RequestPtr sreqHigh;
3546221Snate@binkert.org        /** The size of the store. */
3552292SN/A        int size;
3562292SN/A        /** The store data. */
3572292SN/A        char data[16];
3582292SN/A        /** Whether or not the store is split into two requests. */
3592292SN/A        bool isSplit;
3602292SN/A        /** Whether or not the store can writeback. */
3612292SN/A        bool canWB;
3622329SN/A        /** Whether or not the store is committed. */
3632329SN/A        bool committed;
3642329SN/A        /** Whether or not the store is completed. */
3652292SN/A        bool completed;
3662329SN/A    };
3672329SN/A
3682329SN/A  private:
3692292SN/A    /** The LSQUnit thread id. */
3702292SN/A    ThreadID lsqID;
3712292SN/A
3722292SN/A    /** The store queue. */
3732329SN/A    std::vector<SQEntry> storeQueue;
3742292SN/A
3752292SN/A    /** The load queue. */
3762292SN/A    std::vector<DynInstPtr> loadQueue;
3772292SN/A
3782292SN/A    /** The number of LQ entries, plus a sentinel entry (circular queue).
3792292SN/A     *  @todo: Consider having var that records the true number of LQ entries.
3802292SN/A     */
3812292SN/A    unsigned LQEntries;
3822292SN/A    /** The number of SQ entries, plus a sentinel entry (circular queue).
3832292SN/A     *  @todo: Consider having var that records the true number of SQ entries.
3842292SN/A     */
3852329SN/A    unsigned SQEntries;
3862329SN/A
3872292SN/A    /** The number of places to shift addresses in the LSQ before checking
3882292SN/A     * for dependency violations
3892292SN/A     */
3902292SN/A    unsigned depCheckShift;
3912292SN/A
3922292SN/A    /** Should loads be checked for dependency issues */
3932292SN/A    bool checkLoads;
3942292SN/A
3952292SN/A    /** The number of load instructions in the LQ. */
3962292SN/A    int loads;
3972292SN/A    /** The number of store instructions in the SQ. */
3982292SN/A    int stores;
3992348SN/A    /** The number of store instructions in the SQ waiting to writeback. */
4002307SN/A    int storesToWB;
4012307SN/A
4022292SN/A    /** The index of the head instruction in the LQ. */
4032292SN/A    int loadHead;
4042292SN/A    /** The index of the tail instruction in the LQ. */
4052292SN/A    int loadTail;
4062292SN/A
4072292SN/A    /** The index of the head instruction in the SQ. */
4082292SN/A    int storeHead;
4092292SN/A    /** The index of the first instruction that may be ready to be
4102292SN/A     * written back, and has not yet been written back.
4112292SN/A     */
4122292SN/A    int storeWBIdx;
4132292SN/A    /** The index of the tail instruction in the SQ. */
4142292SN/A    int storeTail;
4152292SN/A
4162698Sktlim@umich.edu    /// @todo Consider moving to a more advanced model with write vs read ports
4172698Sktlim@umich.edu    /** The number of cache ports available each cycle. */
4182693Sktlim@umich.edu    int cachePorts;
4192698Sktlim@umich.edu
4202678Sktlim@umich.edu    /** The number of used cache ports in this cycle. */
4212678Sktlim@umich.edu    int usedPorts;
4222329SN/A
4232292SN/A    /** Is the LSQ switched out. */
4242292SN/A    bool switchedOut;
4252348SN/A
4262292SN/A    //list<InstSeqNum> mshrSeqNums;
4272292SN/A
4282348SN/A    /** Address Mask for a cache block (e.g. ~(cache_block_size-1)) */
4292292SN/A    Addr cacheBlockMask;
4302292SN/A
4312292SN/A    /** Wire to read information from the issue stage time queue. */
4322292SN/A    typename TimeBuffer<IssueStruct>::wire fromIssue;
4332292SN/A
4346974Stjones1@inf.ed.ac.uk    /** Whether or not the LSQ is stalled. */
4356974Stjones1@inf.ed.ac.uk    bool stalled;
4366974Stjones1@inf.ed.ac.uk    /** The store that causes the stall due to partial store to load
4376974Stjones1@inf.ed.ac.uk     * forwarding.
4386974Stjones1@inf.ed.ac.uk     */
4396974Stjones1@inf.ed.ac.uk    InstSeqNum stallingStoreIsn;
4406974Stjones1@inf.ed.ac.uk    /** The index of the above store. */
4412292SN/A    int stallingLoadIdx;
4422292SN/A
4432292SN/A    /** The packet that needs to be retried. */
4442727Sktlim@umich.edu    PacketPtr retryPkt;
4455999Snate@binkert.org
4462307SN/A    /** Whehter or not a store is blocked due to the memory system. */
4473126Sktlim@umich.edu    bool isStoreBlocked;
4485999Snate@binkert.org
4493126Sktlim@umich.edu    /** Whether or not a load is blocked due to the memory system. */
4503126Sktlim@umich.edu    bool isLoadBlocked;
4515999Snate@binkert.org
4523126Sktlim@umich.edu    /** Has the blocked load been handled. */
4533126Sktlim@umich.edu    bool loadBlockedHandled;
4543126Sktlim@umich.edu
4555999Snate@binkert.org    /** The sequence number of the blocked load. */
4563126Sktlim@umich.edu    InstSeqNum blockedLoadSeqNum;
4573126Sktlim@umich.edu
4585999Snate@binkert.org    /** The oldest load that caused a memory ordering violation. */
4593126Sktlim@umich.edu    DynInstPtr memDepViolator;
4602727Sktlim@umich.edu
4615999Snate@binkert.org    /** Whether or not there is a packet that couldn't be sent because of
4622727Sktlim@umich.edu     * a lack of cache ports. */
4632727Sktlim@umich.edu    bool hasPendingPkt;
4645999Snate@binkert.org
4652727Sktlim@umich.edu    /** The packet that is pending free cache ports. */
4662727Sktlim@umich.edu    PacketPtr pendingPkt;
4675999Snate@binkert.org
4682727Sktlim@umich.edu    // Will also need how many read/write ports the Dcache has.  Or keep track
4692727Sktlim@umich.edu    // of that in stage that is one level up, and only call executeLoad/Store
4705999Snate@binkert.org    // the appropriate number of times.
4712727Sktlim@umich.edu    /** Total number of loads forwaded from LSQ stores. */
4722727Sktlim@umich.edu    Stats::Scalar lsqForwLoads;
4735999Snate@binkert.org
4742727Sktlim@umich.edu    /** Total number of loads ignored due to invalid addresses. */
4752292SN/A    Stats::Scalar invAddrLoads;
4762292SN/A
4777520Sgblack@eecs.umich.edu    /** Total number of squashed loads. */
4787520Sgblack@eecs.umich.edu    Stats::Scalar lsqSquashedLoads;
4792292SN/A
4802292SN/A    /** Total number of responses from the memory system that are
4817520Sgblack@eecs.umich.edu     * ignored due to the instruction already being squashed. */
4827520Sgblack@eecs.umich.edu    Stats::Scalar lsqIgnoredResponses;
4832292SN/A
4842292SN/A    /** Tota number of memory ordering violations. */
4852292SN/A    Stats::Scalar lsqMemOrderViolation;
4862292SN/A
4872292SN/A    /** Total number of squashed stores. */
4882292SN/A    Stats::Scalar lsqSquashedStores;
4892292SN/A
4902292SN/A    /** Total number of software prefetches ignored due to invalid addresses. */
4912292SN/A    Stats::Scalar invAddrSwpfs;
4922292SN/A
4932292SN/A    /** Ready loads blocked due to partial store-forwarding. */
4942292SN/A    Stats::Scalar lsqBlockedLoads;
4952292SN/A
4962292SN/A    /** Number of loads that were rescheduled. */
4972292SN/A    Stats::Scalar lsqRescheduledLoads;
4982292SN/A
4992292SN/A    /** Number of times the LSQ is blocked due to the cache. */
5002292SN/A    Stats::Scalar lsqCacheBlocked;
5012292SN/A
5022292SN/A  public:
5032292SN/A    /** Executes the load at the given index. */
5042292SN/A    Fault read(Request *req, Request *sreqLow, Request *sreqHigh,
5052292SN/A               uint8_t *data, int load_idx);
5062292SN/A
5072292SN/A    /** Executes the store at the given index. */
5082292SN/A    Fault write(Request *req, Request *sreqLow, Request *sreqHigh,
5092292SN/A                uint8_t *data, int store_idx);
5102292SN/A
5112292SN/A    /** Returns the index of the head load instruction. */
5122292SN/A    int getLoadHead() { return loadHead; }
5132292SN/A    /** Returns the sequence number of the head load instruction. */
5142292SN/A    InstSeqNum getLoadHeadSeqNum()
5152292SN/A    {
5166974Stjones1@inf.ed.ac.uk        if (loadQueue[loadHead]) {
5177520Sgblack@eecs.umich.edu            return loadQueue[loadHead]->seqNum;
5182292SN/A        } else {
5192669Sktlim@umich.edu            return 0;
5202292SN/A        }
5212669Sktlim@umich.edu
5222669Sktlim@umich.edu    }
5232669Sktlim@umich.edu
5242292SN/A    /** Returns the index of the head store instruction. */
5252292SN/A    int getStoreHead() { return storeHead; }
5262292SN/A    /** Returns the sequence number of the head store instruction. */
5272292SN/A    InstSeqNum getStoreHeadSeqNum()
5282292SN/A    {
5293172Sstever@eecs.umich.edu        if (storeQueue[storeHead].inst) {
5302731Sktlim@umich.edu            return storeQueue[storeHead].inst->seqNum;
5312669Sktlim@umich.edu        } else {
5322727Sktlim@umich.edu            return 0;
5337720Sgblack@eecs.umich.edu        }
5347720Sgblack@eecs.umich.edu
5354032Sktlim@umich.edu    }
5364032Sktlim@umich.edu
5374032Sktlim@umich.edu    /** Returns whether or not the LSQ unit is stalled. */
5384032Sktlim@umich.edu    bool isStalled()  { return stalled; }
5394032Sktlim@umich.edu};
5406974Stjones1@inf.ed.ac.uk
5416974Stjones1@inf.ed.ac.uktemplate <class Impl>
5426974Stjones1@inf.ed.ac.ukFault
5436974Stjones1@inf.ed.ac.ukLSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh,
5442292SN/A                    uint8_t *data, int load_idx)
5452292SN/A{
5462292SN/A    DynInstPtr load_inst = loadQueue[load_idx];
5472292SN/A
5482669Sktlim@umich.edu    assert(load_inst);
5492292SN/A
5502292SN/A    assert(!load_inst->isExecuted());
5512292SN/A
5522292SN/A    // Make sure this isn't an uncacheable access
5536974Stjones1@inf.ed.ac.uk    // A bit of a hackish way to get uncached accesses to work only if they're
5546974Stjones1@inf.ed.ac.uk    // at the head of the LSQ and are ready to commit (at the head of the ROB
5556974Stjones1@inf.ed.ac.uk    // too).
5562292SN/A    if (req->isUncacheable() &&
5576102Sgblack@eecs.umich.edu        (load_idx != loadHead || !load_inst->isAtCommit())) {
5586974Stjones1@inf.ed.ac.uk        iewStage->rescheduleMemInst(load_inst);
5593326Sktlim@umich.edu        ++lsqRescheduledLoads;
5603326Sktlim@umich.edu        DPRINTF(LSQUnit, "Uncachable load [sn:%lli] PC %s\n",
5613326Sktlim@umich.edu                load_inst->seqNum, load_inst->pcState());
5623326Sktlim@umich.edu
5633326Sktlim@umich.edu        // Must delete request now that it wasn't handed off to
5643326Sktlim@umich.edu        // memory.  This is quite ugly.  @todo: Figure out the proper
5652292SN/A        // place to really handle request deletes.
5662292SN/A        delete req;
5672292SN/A        if (TheISA::HasUnalignedMemAcc && sreqLow) {
5682292SN/A            delete sreqLow;
5692292SN/A            delete sreqHigh;
5702292SN/A        }
5712292SN/A        return TheISA::genMachineCheckFault();
5722292SN/A    }
5732292SN/A
5742292SN/A    // Check the SQ for any previous stores that might lead to forwarding
5752292SN/A    int store_idx = load_inst->sqIdx;
5762292SN/A
5772292SN/A    int store_size = 0;
5782292SN/A
5792292SN/A    DPRINTF(LSQUnit, "Read called, load idx: %i, store idx: %i, "
5802292SN/A            "storeHead: %i addr: %#x%s\n",
5812292SN/A            load_idx, store_idx, storeHead, req->getPaddr(),
5822292SN/A            sreqLow ? " split" : "");
5834032Sktlim@umich.edu
5844032Sktlim@umich.edu    if (req->isLLSC()) {
5854032Sktlim@umich.edu        assert(!sreqLow);
5864032Sktlim@umich.edu        // Disable recording the result temporarily.  Writing to misc
5872292SN/A        // regs normally updates the result, but this is not the
5882292SN/A        // desired behavior when handling store conditionals.
5892292SN/A        load_inst->recordResult = false;
5902292SN/A        TheISA::handleLockedRead(load_inst.get(), req);
5912669Sktlim@umich.edu        load_inst->recordResult = true;
5922292SN/A    }
5932669Sktlim@umich.edu
5942669Sktlim@umich.edu    if (req->isMmappedIpr()) {
5952292SN/A        assert(!load_inst->memData);
5962669Sktlim@umich.edu        load_inst->memData = new uint8_t[64];
5972292SN/A
5982292SN/A        ThreadContext *thread = cpu->tcBase(lsqID);
5992669Sktlim@umich.edu        Tick delay;
6002669Sktlim@umich.edu        PacketPtr data_pkt =
6012292SN/A            new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
6022292SN/A
6034032Sktlim@umich.edu        if (!TheISA::HasUnalignedMemAcc || !sreqLow) {
6042329SN/A            data_pkt->dataStatic(load_inst->memData);
6052669Sktlim@umich.edu            delay = TheISA::handleIprRead(thread, data_pkt);
6062292SN/A        } else {
6077520Sgblack@eecs.umich.edu            assert(sreqLow->isMmappedIpr() && sreqHigh->isMmappedIpr());
6087520Sgblack@eecs.umich.edu            PacketPtr fst_data_pkt =
6093803Sgblack@eecs.umich.edu                new Packet(sreqLow, MemCmd::ReadReq, Packet::Broadcast);
6102669Sktlim@umich.edu            PacketPtr snd_data_pkt =
6112669Sktlim@umich.edu                new Packet(sreqHigh, MemCmd::ReadReq, Packet::Broadcast);
6122292SN/A
6134326Sgblack@eecs.umich.edu            fst_data_pkt->dataStatic(load_inst->memData);
6144326Sgblack@eecs.umich.edu            snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize());
6152292SN/A
6162292SN/A            delay = TheISA::handleIprRead(thread, fst_data_pkt);
6172292SN/A            unsigned delay2 = TheISA::handleIprRead(thread, snd_data_pkt);
6182693Sktlim@umich.edu            if (delay2 > delay)
6192678Sktlim@umich.edu                delay = delay2;
6204022Sstever@eecs.umich.edu
6214022Sstever@eecs.umich.edu            delete sreqLow;
6222678Sktlim@umich.edu            delete sreqHigh;
6232678Sktlim@umich.edu            delete fst_data_pkt;
6242678Sktlim@umich.edu            delete snd_data_pkt;
6252292SN/A        }
6262292SN/A        WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this);
6272292SN/A        cpu->schedule(wb, curTick() + delay);
6282292SN/A        return NoFault;
6295606Snate@binkert.org    }
6302678Sktlim@umich.edu
6316974Stjones1@inf.ed.ac.uk    while (store_idx != -1) {
6326974Stjones1@inf.ed.ac.uk        // End once we've reached the top of the LSQ
6336974Stjones1@inf.ed.ac.uk        if (store_idx == storeWBIdx) {
6346974Stjones1@inf.ed.ac.uk            break;
6356974Stjones1@inf.ed.ac.uk        }
6366974Stjones1@inf.ed.ac.uk
6372727Sktlim@umich.edu        // Move the index to one younger
6382292SN/A        if (--store_idx < 0)
6392292SN/A            store_idx += SQEntries;
6402292SN/A
6412292SN/A        assert(storeQueue[store_idx].inst);
6422292SN/A
6432292SN/A        store_size = storeQueue[store_idx].size;
6442292SN/A
6452292SN/A        if (store_size == 0)
6462292SN/A            continue;
6472292SN/A        else if (storeQueue[store_idx].inst->uncacheable())
6484032Sktlim@umich.edu            continue;
6492292SN/A
6502292SN/A        assert(storeQueue[store_idx].inst->effAddrValid);
6512292SN/A
6522292SN/A        // Check if the store data is within the lower and upper bounds of
6532292SN/A        // addresses that the request needs.
6542292SN/A        bool store_has_lower_limit =
6552292SN/A            req->getVaddr() >= storeQueue[store_idx].inst->effAddr;
6562669Sktlim@umich.edu        bool store_has_upper_limit =
6572292SN/A            (req->getVaddr() + req->getSize()) <=
6582292SN/A            (storeQueue[store_idx].inst->effAddr + store_size);
6592292SN/A        bool lower_load_has_store_part =
6602292SN/A            req->getVaddr() < (storeQueue[store_idx].inst->effAddr +
6612292SN/A                           store_size);
6622292SN/A        bool upper_load_has_store_part =
6632292SN/A            (req->getVaddr() + req->getSize()) >
6642292SN/A            storeQueue[store_idx].inst->effAddr;
6652669Sktlim@umich.edu
6662927Sktlim@umich.edu        // If the store's data has all of the data needed, we can forward.
6674032Sktlim@umich.edu        if ((store_has_lower_limit && store_has_upper_limit)) {
6682727Sktlim@umich.edu            // Get shift amount for offset into the store's data.
6692292SN/A            int shift_amt = req->getVaddr() - storeQueue[store_idx].inst->effAddr;
6702292SN/A
6712292SN/A            memcpy(data, storeQueue[store_idx].data + shift_amt,
6722292SN/A                   req->getSize());
6732292SN/A
6742669Sktlim@umich.edu            assert(!load_inst->memData);
6752292SN/A            load_inst->memData = new uint8_t[64];
6764032Sktlim@umich.edu
6774032Sktlim@umich.edu            memcpy(load_inst->memData,
6784032Sktlim@umich.edu                    storeQueue[store_idx].data + shift_amt, req->getSize());
6794032Sktlim@umich.edu
6806974Stjones1@inf.ed.ac.uk            DPRINTF(LSQUnit, "Forwarding from store idx %i to load to "
6816974Stjones1@inf.ed.ac.uk                    "addr %#x, data %#x\n",
6826974Stjones1@inf.ed.ac.uk                    store_idx, req->getVaddr(), data);
6836974Stjones1@inf.ed.ac.uk
6844032Sktlim@umich.edu            PacketPtr data_pkt = new Packet(req, MemCmd::ReadReq,
6852292SN/A                                            Packet::Broadcast);
6862292SN/A            data_pkt->dataStatic(load_inst->memData);
6872292SN/A
6882292SN/A            WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this);
6892292SN/A
6907720Sgblack@eecs.umich.edu            // We'll say this has a 1 cycle load-store forwarding latency
6917720Sgblack@eecs.umich.edu            // for now.
6922292SN/A            // @todo: Need to make this a parameter.
6932669Sktlim@umich.edu            cpu->schedule(wb, curTick());
6942669Sktlim@umich.edu
6952292SN/A            // Don't need to do anything special for split loads.
6962292SN/A            if (TheISA::HasUnalignedMemAcc && sreqLow) {
6972292SN/A                delete sreqLow;
6982907Sktlim@umich.edu                delete sreqHigh;
6996974Stjones1@inf.ed.ac.uk            }
7002907Sktlim@umich.edu
7016974Stjones1@inf.ed.ac.uk            ++lsqForwLoads;
7026974Stjones1@inf.ed.ac.uk            return NoFault;
7036974Stjones1@inf.ed.ac.uk        } else if ((store_has_lower_limit && lower_load_has_store_part) ||
7046974Stjones1@inf.ed.ac.uk                   (store_has_upper_limit && upper_load_has_store_part) ||
7056974Stjones1@inf.ed.ac.uk                   (lower_load_has_store_part && upper_load_has_store_part)) {
7066974Stjones1@inf.ed.ac.uk            // This is the partial store-load forwarding case where a store
7073228Sktlim@umich.edu            // has only part of the load's data.
7083228Sktlim@umich.edu
7093228Sktlim@umich.edu            // If it's already been written back, then don't worry about
7103228Sktlim@umich.edu            // stalling on it.
7113228Sktlim@umich.edu            if (storeQueue[store_idx].completed) {
7123228Sktlim@umich.edu                panic("Should not check one of these");
7133228Sktlim@umich.edu                continue;
7143228Sktlim@umich.edu            }
7156974Stjones1@inf.ed.ac.uk
7166974Stjones1@inf.ed.ac.uk            // Must stall load and force it to retry, so long as it's the oldest
7176974Stjones1@inf.ed.ac.uk            // load that needs to do so.
7186974Stjones1@inf.ed.ac.uk            if (!stalled ||
7196974Stjones1@inf.ed.ac.uk                (stalled &&
7206974Stjones1@inf.ed.ac.uk                 load_inst->seqNum <
7216974Stjones1@inf.ed.ac.uk                 loadQueue[stallingLoadIdx]->seqNum)) {
7226974Stjones1@inf.ed.ac.uk                stalled = true;
7236974Stjones1@inf.ed.ac.uk                stallingStoreIsn = storeQueue[store_idx].inst->seqNum;
7246974Stjones1@inf.ed.ac.uk                stallingLoadIdx = load_idx;
7256974Stjones1@inf.ed.ac.uk            }
7266974Stjones1@inf.ed.ac.uk
7276974Stjones1@inf.ed.ac.uk            // Tell IQ/mem dep unit that this instruction will need to be
7286974Stjones1@inf.ed.ac.uk            // rescheduled eventually
7296974Stjones1@inf.ed.ac.uk            iewStage->rescheduleMemInst(load_inst);
7306974Stjones1@inf.ed.ac.uk            iewStage->decrWb(load_inst->seqNum);
7316974Stjones1@inf.ed.ac.uk            load_inst->clearIssued();
7326974Stjones1@inf.ed.ac.uk            ++lsqRescheduledLoads;
7336974Stjones1@inf.ed.ac.uk
7346974Stjones1@inf.ed.ac.uk            // Do not generate a writeback event as this instruction is not
7356974Stjones1@inf.ed.ac.uk            // complete.
7366974Stjones1@inf.ed.ac.uk            DPRINTF(LSQUnit, "Load-store forwarding mis-match. "
7373228Sktlim@umich.edu                    "Store idx %i to load addr %#x\n",
7383228Sktlim@umich.edu                    store_idx, req->getVaddr());
7393228Sktlim@umich.edu
7404032Sktlim@umich.edu            // Must delete request now that it wasn't handed off to
7413228Sktlim@umich.edu            // memory.  This is quite ugly.  @todo: Figure out the
7426974Stjones1@inf.ed.ac.uk            // proper place to really handle request deletes.
7436974Stjones1@inf.ed.ac.uk            delete req;
7446974Stjones1@inf.ed.ac.uk            if (TheISA::HasUnalignedMemAcc && sreqLow) {
7456974Stjones1@inf.ed.ac.uk                delete sreqLow;
7466974Stjones1@inf.ed.ac.uk                delete sreqHigh;
7477511Stjones1@inf.ed.ac.uk            }
7487511Stjones1@inf.ed.ac.uk
7496974Stjones1@inf.ed.ac.uk            return NoFault;
7503228Sktlim@umich.edu        }
7514032Sktlim@umich.edu    }
7524032Sktlim@umich.edu
7532907Sktlim@umich.edu    // If there's no forwarding case, then go access memory
7542907Sktlim@umich.edu    DPRINTF(LSQUnit, "Doing memory access for inst [sn:%lli] PC %s\n",
7552907Sktlim@umich.edu            load_inst->seqNum, load_inst->pcState());
7566974Stjones1@inf.ed.ac.uk
7576974Stjones1@inf.ed.ac.uk    assert(!load_inst->memData);
7586974Stjones1@inf.ed.ac.uk    load_inst->memData = new uint8_t[64];
7596974Stjones1@inf.ed.ac.uk
7606974Stjones1@inf.ed.ac.uk    ++usedPorts;
7616974Stjones1@inf.ed.ac.uk
7626974Stjones1@inf.ed.ac.uk    // if we the cache is not blocked, do cache access
7636974Stjones1@inf.ed.ac.uk    bool completedFirst = false;
7646974Stjones1@inf.ed.ac.uk    if (!lsq->cacheBlocked()) {
7656974Stjones1@inf.ed.ac.uk        MemCmd command =
7666974Stjones1@inf.ed.ac.uk            req->isLLSC() ? MemCmd::LoadLockedReq : MemCmd::ReadReq;
7676974Stjones1@inf.ed.ac.uk        PacketPtr data_pkt = new Packet(req, command, Packet::Broadcast);
7686974Stjones1@inf.ed.ac.uk        PacketPtr fst_data_pkt = NULL;
7696974Stjones1@inf.ed.ac.uk        PacketPtr snd_data_pkt = NULL;
7706974Stjones1@inf.ed.ac.uk
7716974Stjones1@inf.ed.ac.uk        data_pkt->dataStatic(load_inst->memData);
7726974Stjones1@inf.ed.ac.uk
7736974Stjones1@inf.ed.ac.uk        LSQSenderState *state = new LSQSenderState;
7747511Stjones1@inf.ed.ac.uk        state->isLoad = true;
7756974Stjones1@inf.ed.ac.uk        state->idx = load_idx;
7766974Stjones1@inf.ed.ac.uk        state->inst = load_inst;
7776974Stjones1@inf.ed.ac.uk        data_pkt->senderState = state;
7782907Sktlim@umich.edu
7792907Sktlim@umich.edu        if (!TheISA::HasUnalignedMemAcc || !sreqLow) {
7802907Sktlim@umich.edu
7812907Sktlim@umich.edu            // Point the first packet at the main data packet.
7822907Sktlim@umich.edu            fst_data_pkt = data_pkt;
7832907Sktlim@umich.edu        } else {
7844032Sktlim@umich.edu
7854032Sktlim@umich.edu            // Create the split packets.
7866974Stjones1@inf.ed.ac.uk            fst_data_pkt = new Packet(sreqLow, command, Packet::Broadcast);
7876974Stjones1@inf.ed.ac.uk            snd_data_pkt = new Packet(sreqHigh, command, Packet::Broadcast);
7886974Stjones1@inf.ed.ac.uk
7896974Stjones1@inf.ed.ac.uk            fst_data_pkt->dataStatic(load_inst->memData);
7904032Sktlim@umich.edu            snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize());
7912727Sktlim@umich.edu
7923014Srdreslin@umich.edu            fst_data_pkt->senderState = state;
7933014Srdreslin@umich.edu            snd_data_pkt->senderState = state;
7942669Sktlim@umich.edu
7952669Sktlim@umich.edu            state->isSplit = true;
7962669Sktlim@umich.edu            state->outstanding = 2;
7972292SN/A            state->mainPkt = data_pkt;
7982669Sktlim@umich.edu        }
7992669Sktlim@umich.edu
8002669Sktlim@umich.edu        if (!dcachePort->sendTiming(fst_data_pkt)) {
8012669Sktlim@umich.edu            // Delete state and data packet because a load retry
8022669Sktlim@umich.edu            // initiates a pipeline restart; it does not retry.
8032669Sktlim@umich.edu            delete state;
8042669Sktlim@umich.edu            delete data_pkt->req;
8052669Sktlim@umich.edu            delete data_pkt;
8062292SN/A            if (TheISA::HasUnalignedMemAcc && sreqLow) {
8072292SN/A                delete fst_data_pkt->req;
8082669Sktlim@umich.edu                delete fst_data_pkt;
8092292SN/A                delete snd_data_pkt->req;
8102292SN/A                delete snd_data_pkt;
8112292SN/A                sreqLow = NULL;
8122292SN/A                sreqHigh = NULL;
8136974Stjones1@inf.ed.ac.uk            }
8147520Sgblack@eecs.umich.edu
8152292SN/A            req = NULL;
8162292SN/A
8172292SN/A            // If the access didn't succeed, tell the LSQ by setting
8182292SN/A            // the retry thread id.
8192292SN/A            lsq->setRetryTid(lsqID);
8202669Sktlim@umich.edu        } else if (TheISA::HasUnalignedMemAcc && sreqLow) {
8212292SN/A            completedFirst = true;
8222329SN/A
8232292SN/A            // The first packet was sent without problems, so send this one
8246974Stjones1@inf.ed.ac.uk            // too. If there is a problem with this packet then the whole
8256974Stjones1@inf.ed.ac.uk            // load will be squashed, so indicate this to the state object.
8267520Sgblack@eecs.umich.edu            // The first packet will return in completeDataAccess and be
8277520Sgblack@eecs.umich.edu            // handled there.
8287520Sgblack@eecs.umich.edu            ++usedPorts;
8297509Stjones1@inf.ed.ac.uk            if (!dcachePort->sendTiming(snd_data_pkt)) {
8307509Stjones1@inf.ed.ac.uk
8317509Stjones1@inf.ed.ac.uk                // The main packet will be deleted in completeDataAccess.
8327509Stjones1@inf.ed.ac.uk                delete snd_data_pkt->req;
8337509Stjones1@inf.ed.ac.uk                delete snd_data_pkt;
8347509Stjones1@inf.ed.ac.uk
8354326Sgblack@eecs.umich.edu                state->complete();
8367520Sgblack@eecs.umich.edu
8372329SN/A                req = NULL;
8382292SN/A                sreqHigh = NULL;
8392292SN/A
8402292SN/A                lsq->setRetryTid(lsqID);
8412292SN/A            }
8422292SN/A        }
8432292SN/A    }
844
845    // If the cache was blocked, or has become blocked due to the access,
846    // handle it.
847    if (lsq->cacheBlocked()) {
848        if (req)
849            delete req;
850        if (TheISA::HasUnalignedMemAcc && sreqLow && !completedFirst) {
851            delete sreqLow;
852            delete sreqHigh;
853        }
854
855        ++lsqCacheBlocked;
856
857        // If the first part of a split access succeeds, then let the LSQ
858        // handle the decrWb when completeDataAccess is called upon return
859        // of the requested first part of data
860        if (!completedFirst)
861            iewStage->decrWb(load_inst->seqNum);
862
863        // There's an older load that's already going to squash.
864        if (isLoadBlocked && blockedLoadSeqNum < load_inst->seqNum)
865            return NoFault;
866
867        // Record that the load was blocked due to memory.  This
868        // load will squash all instructions after it, be
869        // refetched, and re-executed.
870        isLoadBlocked = true;
871        loadBlockedHandled = false;
872        blockedLoadSeqNum = load_inst->seqNum;
873        // No fault occurred, even though the interface is blocked.
874        return NoFault;
875    }
876
877    return NoFault;
878}
879
880template <class Impl>
881Fault
882LSQUnit<Impl>::write(Request *req, Request *sreqLow, Request *sreqHigh,
883                     uint8_t *data, int store_idx)
884{
885    assert(storeQueue[store_idx].inst);
886
887    DPRINTF(LSQUnit, "Doing write to store idx %i, addr %#x data %#x"
888            " | storeHead:%i [sn:%i]\n",
889            store_idx, req->getPaddr(), data, storeHead,
890            storeQueue[store_idx].inst->seqNum);
891
892    storeQueue[store_idx].req = req;
893    storeQueue[store_idx].sreqLow = sreqLow;
894    storeQueue[store_idx].sreqHigh = sreqHigh;
895    unsigned size = req->getSize();
896    storeQueue[store_idx].size = size;
897    assert(size <= sizeof(storeQueue[store_idx].data));
898
899    // Split stores can only occur in ISAs with unaligned memory accesses.  If
900    // a store request has been split, sreqLow and sreqHigh will be non-null.
901    if (TheISA::HasUnalignedMemAcc && sreqLow) {
902        storeQueue[store_idx].isSplit = true;
903    }
904
905    memcpy(storeQueue[store_idx].data, data, size);
906
907    // This function only writes the data to the store queue, so no fault
908    // can happen here.
909    return NoFault;
910}
911
912#endif // __CPU_O3_LSQ_UNIT_HH__
913