lsq_unit.hh revision 8591
12292SN/A/*
22329SN/A * Copyright (c) 2004-2006 The Regents of The University of Michigan
32292SN/A * All rights reserved.
42292SN/A *
52292SN/A * Redistribution and use in source and binary forms, with or without
62292SN/A * modification, are permitted provided that the following conditions are
72292SN/A * met: redistributions of source code must retain the above copyright
82292SN/A * notice, this list of conditions and the following disclaimer;
92292SN/A * redistributions in binary form must reproduce the above copyright
102292SN/A * notice, this list of conditions and the following disclaimer in the
112292SN/A * documentation and/or other materials provided with the distribution;
122292SN/A * neither the name of the copyright holders nor the names of its
132292SN/A * contributors may be used to endorse or promote products derived from
142292SN/A * this software without specific prior written permission.
152292SN/A *
162292SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
172292SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
182292SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
192292SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
202292SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
212292SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
222292SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
232292SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
242292SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
252292SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
262292SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
272689Sktlim@umich.edu *
282689Sktlim@umich.edu * Authors: Kevin Lim
292689Sktlim@umich.edu *          Korey Sewell
302292SN/A */
312292SN/A
322292SN/A#ifndef __CPU_O3_LSQ_UNIT_HH__
332292SN/A#define __CPU_O3_LSQ_UNIT_HH__
342292SN/A
352329SN/A#include <algorithm>
364395Ssaidi@eecs.umich.edu#include <cstring>
372292SN/A#include <map>
382292SN/A#include <queue>
392292SN/A
402329SN/A#include "arch/faults.hh"
418591Sgblack@eecs.umich.edu#include "arch/generic/debugfaults.hh"
428506Sgblack@eecs.umich.edu#include "arch/isa_traits.hh"
433326Sktlim@umich.edu#include "arch/locked_mem.hh"
448481Sgblack@eecs.umich.edu#include "arch/mmapped_ipr.hh"
458229Snate@binkert.org#include "base/fast_alloc.hh"
468229Snate@binkert.org#include "base/hashmap.hh"
472292SN/A#include "config/full_system.hh"
486658Snate@binkert.org#include "config/the_isa.hh"
492292SN/A#include "cpu/inst_seq.hh"
508230Snate@binkert.org#include "cpu/timebuf.hh"
518232Snate@binkert.org#include "debug/LSQUnit.hh"
523348Sbinkertn@umich.edu#include "mem/packet.hh"
532669Sktlim@umich.edu#include "mem/port.hh"
542292SN/A
555529Snate@binkert.orgclass DerivO3CPUParams;
565529Snate@binkert.org
572292SN/A/**
582329SN/A * Class that implements the actual LQ and SQ for each specific
592329SN/A * thread.  Both are circular queues; load entries are freed upon
602329SN/A * committing, while store entries are freed once they writeback. The
612329SN/A * LSQUnit tracks if there are memory ordering violations, and also
622329SN/A * detects partial load to store forwarding cases (a store only has
632329SN/A * part of a load's data) that requires the load to wait until the
642329SN/A * store writes back. In the former case it holds onto the instruction
652329SN/A * until the dependence unit looks at it, and in the latter it stalls
662329SN/A * the LSQ until the store writes back. At that point the load is
672329SN/A * replayed.
682292SN/A */
692292SN/Atemplate <class Impl>
702292SN/Aclass LSQUnit {
712292SN/A  public:
722733Sktlim@umich.edu    typedef typename Impl::O3CPU O3CPU;
732292SN/A    typedef typename Impl::DynInstPtr DynInstPtr;
742292SN/A    typedef typename Impl::CPUPol::IEW IEW;
752907Sktlim@umich.edu    typedef typename Impl::CPUPol::LSQ LSQ;
762292SN/A    typedef typename Impl::CPUPol::IssueStruct IssueStruct;
772292SN/A
782292SN/A  public:
792292SN/A    /** Constructs an LSQ unit. init() must be called prior to use. */
802292SN/A    LSQUnit();
812292SN/A
822292SN/A    /** Initializes the LSQ unit with the specified number of entries. */
835529Snate@binkert.org    void init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params,
845529Snate@binkert.org            LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries,
855529Snate@binkert.org            unsigned id);
862292SN/A
872292SN/A    /** Returns the name of the LSQ unit. */
882292SN/A    std::string name() const;
892292SN/A
902727Sktlim@umich.edu    /** Registers statistics. */
912727Sktlim@umich.edu    void regStats();
922727Sktlim@umich.edu
932907Sktlim@umich.edu    /** Sets the pointer to the dcache port. */
944329Sktlim@umich.edu    void setDcachePort(Port *dcache_port);
952907Sktlim@umich.edu
962348SN/A    /** Switches out LSQ unit. */
972307SN/A    void switchOut();
982307SN/A
992348SN/A    /** Takes over from another CPU's thread. */
1002307SN/A    void takeOverFrom();
1012307SN/A
1022348SN/A    /** Returns if the LSQ is switched out. */
1032307SN/A    bool isSwitchedOut() { return switchedOut; }
1042307SN/A
1052292SN/A    /** Ticks the LSQ unit, which in this case only resets the number of
1062292SN/A     * used cache ports.
1072292SN/A     * @todo: Move the number of used ports up to the LSQ level so it can
1082292SN/A     * be shared by all LSQ units.
1092292SN/A     */
1102292SN/A    void tick() { usedPorts = 0; }
1112292SN/A
1122292SN/A    /** Inserts an instruction. */
1132292SN/A    void insert(DynInstPtr &inst);
1142292SN/A    /** Inserts a load instruction. */
1152292SN/A    void insertLoad(DynInstPtr &load_inst);
1162292SN/A    /** Inserts a store instruction. */
1172292SN/A    void insertStore(DynInstPtr &store_inst);
1182292SN/A
1198545Ssaidi@eecs.umich.edu    /** Check for ordering violations in the LSQ. For a store squash if we
1208545Ssaidi@eecs.umich.edu     * ever find a conflicting load. For a load, only squash if we
1218545Ssaidi@eecs.umich.edu     * an external snoop invalidate has been seen for that load address
1228199SAli.Saidi@ARM.com     * @param load_idx index to start checking at
1238199SAli.Saidi@ARM.com     * @param inst the instruction to check
1248199SAli.Saidi@ARM.com     */
1258199SAli.Saidi@ARM.com    Fault checkViolations(int load_idx, DynInstPtr &inst);
1268199SAli.Saidi@ARM.com
1278545Ssaidi@eecs.umich.edu    /** Check if an incoming invalidate hits in the lsq on a load
1288545Ssaidi@eecs.umich.edu     * that might have issued out of order wrt another load beacuse
1298545Ssaidi@eecs.umich.edu     * of the intermediate invalidate.
1308545Ssaidi@eecs.umich.edu     */
1318545Ssaidi@eecs.umich.edu    void checkSnoop(PacketPtr pkt);
1328545Ssaidi@eecs.umich.edu
1332292SN/A    /** Executes a load instruction. */
1342292SN/A    Fault executeLoad(DynInstPtr &inst);
1352292SN/A
1362329SN/A    Fault executeLoad(int lq_idx) { panic("Not implemented"); return NoFault; }
1372292SN/A    /** Executes a store instruction. */
1382292SN/A    Fault executeStore(DynInstPtr &inst);
1392292SN/A
1402292SN/A    /** Commits the head load. */
1412292SN/A    void commitLoad();
1422292SN/A    /** Commits loads older than a specific sequence number. */
1432292SN/A    void commitLoads(InstSeqNum &youngest_inst);
1442292SN/A
1452292SN/A    /** Commits stores older than a specific sequence number. */
1462292SN/A    void commitStores(InstSeqNum &youngest_inst);
1472292SN/A
1482292SN/A    /** Writes back stores. */
1492292SN/A    void writebackStores();
1502292SN/A
1512790Sktlim@umich.edu    /** Completes the data access that has been returned from the
1522790Sktlim@umich.edu     * memory system. */
1532669Sktlim@umich.edu    void completeDataAccess(PacketPtr pkt);
1542669Sktlim@umich.edu
1552292SN/A    /** Clears all the entries in the LQ. */
1562292SN/A    void clearLQ();
1572292SN/A
1582292SN/A    /** Clears all the entries in the SQ. */
1592292SN/A    void clearSQ();
1602292SN/A
1612292SN/A    /** Resizes the LQ to a given size. */
1622292SN/A    void resizeLQ(unsigned size);
1632292SN/A
1642292SN/A    /** Resizes the SQ to a given size. */
1652292SN/A    void resizeSQ(unsigned size);
1662292SN/A
1672292SN/A    /** Squashes all instructions younger than a specific sequence number. */
1682292SN/A    void squash(const InstSeqNum &squashed_num);
1692292SN/A
1702292SN/A    /** Returns if there is a memory ordering violation. Value is reset upon
1712292SN/A     * call to getMemDepViolator().
1722292SN/A     */
1732292SN/A    bool violation() { return memDepViolator; }
1742292SN/A
1752292SN/A    /** Returns the memory ordering violator. */
1762292SN/A    DynInstPtr getMemDepViolator();
1772292SN/A
1782329SN/A    /** Returns if a load became blocked due to the memory system. */
1792292SN/A    bool loadBlocked()
1802292SN/A    { return isLoadBlocked; }
1812292SN/A
1822348SN/A    /** Clears the signal that a load became blocked. */
1832292SN/A    void clearLoadBlocked()
1842292SN/A    { isLoadBlocked = false; }
1852292SN/A
1862348SN/A    /** Returns if the blocked load was handled. */
1872292SN/A    bool isLoadBlockedHandled()
1882292SN/A    { return loadBlockedHandled; }
1892292SN/A
1902348SN/A    /** Records the blocked load as being handled. */
1912292SN/A    void setLoadBlockedHandled()
1922292SN/A    { loadBlockedHandled = true; }
1932292SN/A
1942292SN/A    /** Returns the number of free entries (min of free LQ and SQ entries). */
1952292SN/A    unsigned numFreeEntries();
1962292SN/A
1972292SN/A    /** Returns the number of loads ready to execute. */
1982292SN/A    int numLoadsReady();
1992292SN/A
2002292SN/A    /** Returns the number of loads in the LQ. */
2012292SN/A    int numLoads() { return loads; }
2022292SN/A
2032292SN/A    /** Returns the number of stores in the SQ. */
2042292SN/A    int numStores() { return stores; }
2052292SN/A
2062292SN/A    /** Returns if either the LQ or SQ is full. */
2072292SN/A    bool isFull() { return lqFull() || sqFull(); }
2082292SN/A
2092292SN/A    /** Returns if the LQ is full. */
2102292SN/A    bool lqFull() { return loads >= (LQEntries - 1); }
2112292SN/A
2122292SN/A    /** Returns if the SQ is full. */
2132292SN/A    bool sqFull() { return stores >= (SQEntries - 1); }
2142292SN/A
2152292SN/A    /** Returns the number of instructions in the LSQ. */
2162292SN/A    unsigned getCount() { return loads + stores; }
2172292SN/A
2182292SN/A    /** Returns if there are any stores to writeback. */
2192292SN/A    bool hasStoresToWB() { return storesToWB; }
2202292SN/A
2212292SN/A    /** Returns the number of stores to writeback. */
2222292SN/A    int numStoresToWB() { return storesToWB; }
2232292SN/A
2242292SN/A    /** Returns if the LSQ unit will writeback on this cycle. */
2252292SN/A    bool willWB() { return storeQueue[storeWBIdx].canWB &&
2262678Sktlim@umich.edu                        !storeQueue[storeWBIdx].completed &&
2272678Sktlim@umich.edu                        !isStoreBlocked; }
2282292SN/A
2292907Sktlim@umich.edu    /** Handles doing the retry. */
2302907Sktlim@umich.edu    void recvRetry();
2312907Sktlim@umich.edu
2322292SN/A  private:
2332698Sktlim@umich.edu    /** Writes back the instruction, sending it to IEW. */
2342678Sktlim@umich.edu    void writeback(DynInstPtr &inst, PacketPtr pkt);
2352678Sktlim@umich.edu
2366974Stjones1@inf.ed.ac.uk    /** Writes back a store that couldn't be completed the previous cycle. */
2376974Stjones1@inf.ed.ac.uk    void writebackPendingStore();
2386974Stjones1@inf.ed.ac.uk
2392698Sktlim@umich.edu    /** Handles completing the send of a store to memory. */
2403349Sbinkertn@umich.edu    void storePostSend(PacketPtr pkt);
2412693Sktlim@umich.edu
2422292SN/A    /** Completes the store at the specified index. */
2432292SN/A    void completeStore(int store_idx);
2442292SN/A
2456974Stjones1@inf.ed.ac.uk    /** Attempts to send a store to the cache. */
2466974Stjones1@inf.ed.ac.uk    bool sendStore(PacketPtr data_pkt);
2476974Stjones1@inf.ed.ac.uk
2482292SN/A    /** Increments the given store index (circular queue). */
2492292SN/A    inline void incrStIdx(int &store_idx);
2502292SN/A    /** Decrements the given store index (circular queue). */
2512292SN/A    inline void decrStIdx(int &store_idx);
2522292SN/A    /** Increments the given load index (circular queue). */
2532292SN/A    inline void incrLdIdx(int &load_idx);
2542292SN/A    /** Decrements the given load index (circular queue). */
2552292SN/A    inline void decrLdIdx(int &load_idx);
2562292SN/A
2572329SN/A  public:
2582329SN/A    /** Debugging function to dump instructions in the LSQ. */
2592329SN/A    void dumpInsts();
2602329SN/A
2612292SN/A  private:
2622292SN/A    /** Pointer to the CPU. */
2632733Sktlim@umich.edu    O3CPU *cpu;
2642292SN/A
2652292SN/A    /** Pointer to the IEW stage. */
2662292SN/A    IEW *iewStage;
2672292SN/A
2682907Sktlim@umich.edu    /** Pointer to the LSQ. */
2692907Sktlim@umich.edu    LSQ *lsq;
2702669Sktlim@umich.edu
2712907Sktlim@umich.edu    /** Pointer to the dcache port.  Used only for sending. */
2722907Sktlim@umich.edu    Port *dcachePort;
2732292SN/A
2742698Sktlim@umich.edu    /** Derived class to hold any sender state the LSQ needs. */
2755386Sstever@gmail.com    class LSQSenderState : public Packet::SenderState, public FastAlloc
2762678Sktlim@umich.edu    {
2772678Sktlim@umich.edu      public:
2782698Sktlim@umich.edu        /** Default constructor. */
2792678Sktlim@umich.edu        LSQSenderState()
2806974Stjones1@inf.ed.ac.uk            : noWB(false), isSplit(false), pktToSend(false), outstanding(1),
2816974Stjones1@inf.ed.ac.uk              mainPkt(NULL), pendingPacket(NULL)
2822678Sktlim@umich.edu        { }
2832678Sktlim@umich.edu
2842698Sktlim@umich.edu        /** Instruction who initiated the access to memory. */
2852678Sktlim@umich.edu        DynInstPtr inst;
2862698Sktlim@umich.edu        /** Whether or not it is a load. */
2872678Sktlim@umich.edu        bool isLoad;
2882698Sktlim@umich.edu        /** The LQ/SQ index of the instruction. */
2892678Sktlim@umich.edu        int idx;
2902698Sktlim@umich.edu        /** Whether or not the instruction will need to writeback. */
2912678Sktlim@umich.edu        bool noWB;
2926974Stjones1@inf.ed.ac.uk        /** Whether or not this access is split in two. */
2936974Stjones1@inf.ed.ac.uk        bool isSplit;
2946974Stjones1@inf.ed.ac.uk        /** Whether or not there is a packet that needs sending. */
2956974Stjones1@inf.ed.ac.uk        bool pktToSend;
2966974Stjones1@inf.ed.ac.uk        /** Number of outstanding packets to complete. */
2976974Stjones1@inf.ed.ac.uk        int outstanding;
2986974Stjones1@inf.ed.ac.uk        /** The main packet from a split load, used during writeback. */
2996974Stjones1@inf.ed.ac.uk        PacketPtr mainPkt;
3006974Stjones1@inf.ed.ac.uk        /** A second packet from a split store that needs sending. */
3016974Stjones1@inf.ed.ac.uk        PacketPtr pendingPacket;
3026974Stjones1@inf.ed.ac.uk
3036974Stjones1@inf.ed.ac.uk        /** Completes a packet and returns whether the access is finished. */
3046974Stjones1@inf.ed.ac.uk        inline bool complete() { return --outstanding == 0; }
3052678Sktlim@umich.edu    };
3062678Sktlim@umich.edu
3072698Sktlim@umich.edu    /** Writeback event, specifically for when stores forward data to loads. */
3082678Sktlim@umich.edu    class WritebackEvent : public Event {
3092678Sktlim@umich.edu      public:
3102678Sktlim@umich.edu        /** Constructs a writeback event. */
3112678Sktlim@umich.edu        WritebackEvent(DynInstPtr &_inst, PacketPtr pkt, LSQUnit *lsq_ptr);
3122678Sktlim@umich.edu
3132678Sktlim@umich.edu        /** Processes the writeback event. */
3142678Sktlim@umich.edu        void process();
3152678Sktlim@umich.edu
3162678Sktlim@umich.edu        /** Returns the description of this event. */
3175336Shines@cs.fsu.edu        const char *description() const;
3182678Sktlim@umich.edu
3192678Sktlim@umich.edu      private:
3202698Sktlim@umich.edu        /** Instruction whose results are being written back. */
3212678Sktlim@umich.edu        DynInstPtr inst;
3222678Sktlim@umich.edu
3232698Sktlim@umich.edu        /** The packet that would have been sent to memory. */
3242678Sktlim@umich.edu        PacketPtr pkt;
3252678Sktlim@umich.edu
3262678Sktlim@umich.edu        /** The pointer to the LSQ unit that issued the store. */
3272678Sktlim@umich.edu        LSQUnit<Impl> *lsqPtr;
3282678Sktlim@umich.edu    };
3292678Sktlim@umich.edu
3302292SN/A  public:
3312292SN/A    struct SQEntry {
3322292SN/A        /** Constructs an empty store queue entry. */
3332292SN/A        SQEntry()
3344326Sgblack@eecs.umich.edu            : inst(NULL), req(NULL), size(0),
3352292SN/A              canWB(0), committed(0), completed(0)
3364326Sgblack@eecs.umich.edu        {
3374395Ssaidi@eecs.umich.edu            std::memset(data, 0, sizeof(data));
3384326Sgblack@eecs.umich.edu        }
3392292SN/A
3402292SN/A        /** Constructs a store queue entry for a given instruction. */
3412292SN/A        SQEntry(DynInstPtr &_inst)
3426974Stjones1@inf.ed.ac.uk            : inst(_inst), req(NULL), sreqLow(NULL), sreqHigh(NULL), size(0),
3436974Stjones1@inf.ed.ac.uk              isSplit(0), canWB(0), committed(0), completed(0)
3444326Sgblack@eecs.umich.edu        {
3454395Ssaidi@eecs.umich.edu            std::memset(data, 0, sizeof(data));
3464326Sgblack@eecs.umich.edu        }
3472292SN/A
3482292SN/A        /** The store instruction. */
3492292SN/A        DynInstPtr inst;
3502669Sktlim@umich.edu        /** The request for the store. */
3512669Sktlim@umich.edu        RequestPtr req;
3526974Stjones1@inf.ed.ac.uk        /** The split requests for the store. */
3536974Stjones1@inf.ed.ac.uk        RequestPtr sreqLow;
3546974Stjones1@inf.ed.ac.uk        RequestPtr sreqHigh;
3552292SN/A        /** The size of the store. */
3562292SN/A        int size;
3572292SN/A        /** The store data. */
3587786SAli.Saidi@ARM.com        char data[16];
3596974Stjones1@inf.ed.ac.uk        /** Whether or not the store is split into two requests. */
3606974Stjones1@inf.ed.ac.uk        bool isSplit;
3612292SN/A        /** Whether or not the store can writeback. */
3622292SN/A        bool canWB;
3632292SN/A        /** Whether or not the store is committed. */
3642292SN/A        bool committed;
3652292SN/A        /** Whether or not the store is completed. */
3662292SN/A        bool completed;
3672292SN/A    };
3682329SN/A
3692292SN/A  private:
3702292SN/A    /** The LSQUnit thread id. */
3716221Snate@binkert.org    ThreadID lsqID;
3722292SN/A
3732292SN/A    /** The store queue. */
3742292SN/A    std::vector<SQEntry> storeQueue;
3752292SN/A
3762292SN/A    /** The load queue. */
3772292SN/A    std::vector<DynInstPtr> loadQueue;
3782292SN/A
3792329SN/A    /** The number of LQ entries, plus a sentinel entry (circular queue).
3802329SN/A     *  @todo: Consider having var that records the true number of LQ entries.
3812329SN/A     */
3822292SN/A    unsigned LQEntries;
3832329SN/A    /** The number of SQ entries, plus a sentinel entry (circular queue).
3842329SN/A     *  @todo: Consider having var that records the true number of SQ entries.
3852329SN/A     */
3862292SN/A    unsigned SQEntries;
3872292SN/A
3888199SAli.Saidi@ARM.com    /** The number of places to shift addresses in the LSQ before checking
3898199SAli.Saidi@ARM.com     * for dependency violations
3908199SAli.Saidi@ARM.com     */
3918199SAli.Saidi@ARM.com    unsigned depCheckShift;
3928199SAli.Saidi@ARM.com
3938199SAli.Saidi@ARM.com    /** Should loads be checked for dependency issues */
3948199SAli.Saidi@ARM.com    bool checkLoads;
3958199SAli.Saidi@ARM.com
3962292SN/A    /** The number of load instructions in the LQ. */
3972292SN/A    int loads;
3982329SN/A    /** The number of store instructions in the SQ. */
3992292SN/A    int stores;
4002292SN/A    /** The number of store instructions in the SQ waiting to writeback. */
4012292SN/A    int storesToWB;
4022292SN/A
4032292SN/A    /** The index of the head instruction in the LQ. */
4042292SN/A    int loadHead;
4052292SN/A    /** The index of the tail instruction in the LQ. */
4062292SN/A    int loadTail;
4072292SN/A
4082292SN/A    /** The index of the head instruction in the SQ. */
4092292SN/A    int storeHead;
4102329SN/A    /** The index of the first instruction that may be ready to be
4112329SN/A     * written back, and has not yet been written back.
4122292SN/A     */
4132292SN/A    int storeWBIdx;
4142292SN/A    /** The index of the tail instruction in the SQ. */
4152292SN/A    int storeTail;
4162292SN/A
4172292SN/A    /// @todo Consider moving to a more advanced model with write vs read ports
4182292SN/A    /** The number of cache ports available each cycle. */
4192292SN/A    int cachePorts;
4202292SN/A
4212292SN/A    /** The number of used cache ports in this cycle. */
4222292SN/A    int usedPorts;
4232292SN/A
4242348SN/A    /** Is the LSQ switched out. */
4252307SN/A    bool switchedOut;
4262307SN/A
4272292SN/A    //list<InstSeqNum> mshrSeqNums;
4282292SN/A
4298545Ssaidi@eecs.umich.edu    /** Address Mask for a cache block (e.g. ~(cache_block_size-1)) */
4308545Ssaidi@eecs.umich.edu    Addr cacheBlockMask;
4318545Ssaidi@eecs.umich.edu
4322292SN/A    /** Wire to read information from the issue stage time queue. */
4332292SN/A    typename TimeBuffer<IssueStruct>::wire fromIssue;
4342292SN/A
4352292SN/A    /** Whether or not the LSQ is stalled. */
4362292SN/A    bool stalled;
4372292SN/A    /** The store that causes the stall due to partial store to load
4382292SN/A     * forwarding.
4392292SN/A     */
4402292SN/A    InstSeqNum stallingStoreIsn;
4412292SN/A    /** The index of the above store. */
4422292SN/A    int stallingLoadIdx;
4432292SN/A
4442698Sktlim@umich.edu    /** The packet that needs to be retried. */
4452698Sktlim@umich.edu    PacketPtr retryPkt;
4462693Sktlim@umich.edu
4472698Sktlim@umich.edu    /** Whehter or not a store is blocked due to the memory system. */
4482678Sktlim@umich.edu    bool isStoreBlocked;
4492678Sktlim@umich.edu
4502329SN/A    /** Whether or not a load is blocked due to the memory system. */
4512292SN/A    bool isLoadBlocked;
4522292SN/A
4532348SN/A    /** Has the blocked load been handled. */
4542292SN/A    bool loadBlockedHandled;
4552292SN/A
4562348SN/A    /** The sequence number of the blocked load. */
4572292SN/A    InstSeqNum blockedLoadSeqNum;
4582292SN/A
4592292SN/A    /** The oldest load that caused a memory ordering violation. */
4602292SN/A    DynInstPtr memDepViolator;
4612292SN/A
4626974Stjones1@inf.ed.ac.uk    /** Whether or not there is a packet that couldn't be sent because of
4636974Stjones1@inf.ed.ac.uk     * a lack of cache ports. */
4646974Stjones1@inf.ed.ac.uk    bool hasPendingPkt;
4656974Stjones1@inf.ed.ac.uk
4666974Stjones1@inf.ed.ac.uk    /** The packet that is pending free cache ports. */
4676974Stjones1@inf.ed.ac.uk    PacketPtr pendingPkt;
4686974Stjones1@inf.ed.ac.uk
4692292SN/A    // Will also need how many read/write ports the Dcache has.  Or keep track
4702292SN/A    // of that in stage that is one level up, and only call executeLoad/Store
4712292SN/A    // the appropriate number of times.
4722727Sktlim@umich.edu    /** Total number of loads forwaded from LSQ stores. */
4735999Snate@binkert.org    Stats::Scalar lsqForwLoads;
4742307SN/A
4753126Sktlim@umich.edu    /** Total number of loads ignored due to invalid addresses. */
4765999Snate@binkert.org    Stats::Scalar invAddrLoads;
4773126Sktlim@umich.edu
4783126Sktlim@umich.edu    /** Total number of squashed loads. */
4795999Snate@binkert.org    Stats::Scalar lsqSquashedLoads;
4803126Sktlim@umich.edu
4813126Sktlim@umich.edu    /** Total number of responses from the memory system that are
4823126Sktlim@umich.edu     * ignored due to the instruction already being squashed. */
4835999Snate@binkert.org    Stats::Scalar lsqIgnoredResponses;
4843126Sktlim@umich.edu
4853126Sktlim@umich.edu    /** Tota number of memory ordering violations. */
4865999Snate@binkert.org    Stats::Scalar lsqMemOrderViolation;
4873126Sktlim@umich.edu
4882727Sktlim@umich.edu    /** Total number of squashed stores. */
4895999Snate@binkert.org    Stats::Scalar lsqSquashedStores;
4902727Sktlim@umich.edu
4912727Sktlim@umich.edu    /** Total number of software prefetches ignored due to invalid addresses. */
4925999Snate@binkert.org    Stats::Scalar invAddrSwpfs;
4932727Sktlim@umich.edu
4942727Sktlim@umich.edu    /** Ready loads blocked due to partial store-forwarding. */
4955999Snate@binkert.org    Stats::Scalar lsqBlockedLoads;
4962727Sktlim@umich.edu
4972727Sktlim@umich.edu    /** Number of loads that were rescheduled. */
4985999Snate@binkert.org    Stats::Scalar lsqRescheduledLoads;
4992727Sktlim@umich.edu
5002727Sktlim@umich.edu    /** Number of times the LSQ is blocked due to the cache. */
5015999Snate@binkert.org    Stats::Scalar lsqCacheBlocked;
5022727Sktlim@umich.edu
5032292SN/A  public:
5042292SN/A    /** Executes the load at the given index. */
5057520Sgblack@eecs.umich.edu    Fault read(Request *req, Request *sreqLow, Request *sreqHigh,
5067520Sgblack@eecs.umich.edu               uint8_t *data, int load_idx);
5072292SN/A
5082292SN/A    /** Executes the store at the given index. */
5097520Sgblack@eecs.umich.edu    Fault write(Request *req, Request *sreqLow, Request *sreqHigh,
5107520Sgblack@eecs.umich.edu                uint8_t *data, int store_idx);
5112292SN/A
5122292SN/A    /** Returns the index of the head load instruction. */
5132292SN/A    int getLoadHead() { return loadHead; }
5142292SN/A    /** Returns the sequence number of the head load instruction. */
5152292SN/A    InstSeqNum getLoadHeadSeqNum()
5162292SN/A    {
5172292SN/A        if (loadQueue[loadHead]) {
5182292SN/A            return loadQueue[loadHead]->seqNum;
5192292SN/A        } else {
5202292SN/A            return 0;
5212292SN/A        }
5222292SN/A
5232292SN/A    }
5242292SN/A
5252292SN/A    /** Returns the index of the head store instruction. */
5262292SN/A    int getStoreHead() { return storeHead; }
5272292SN/A    /** Returns the sequence number of the head store instruction. */
5282292SN/A    InstSeqNum getStoreHeadSeqNum()
5292292SN/A    {
5302292SN/A        if (storeQueue[storeHead].inst) {
5312292SN/A            return storeQueue[storeHead].inst->seqNum;
5322292SN/A        } else {
5332292SN/A            return 0;
5342292SN/A        }
5352292SN/A
5362292SN/A    }
5372292SN/A
5382292SN/A    /** Returns whether or not the LSQ unit is stalled. */
5392292SN/A    bool isStalled()  { return stalled; }
5402292SN/A};
5412292SN/A
5422292SN/Atemplate <class Impl>
5432292SN/AFault
5446974Stjones1@inf.ed.ac.ukLSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh,
5457520Sgblack@eecs.umich.edu                    uint8_t *data, int load_idx)
5462292SN/A{
5472669Sktlim@umich.edu    DynInstPtr load_inst = loadQueue[load_idx];
5482292SN/A
5492669Sktlim@umich.edu    assert(load_inst);
5502669Sktlim@umich.edu
5512669Sktlim@umich.edu    assert(!load_inst->isExecuted());
5522292SN/A
5532292SN/A    // Make sure this isn't an uncacheable access
5542292SN/A    // A bit of a hackish way to get uncached accesses to work only if they're
5552292SN/A    // at the head of the LSQ and are ready to commit (at the head of the ROB
5562292SN/A    // too).
5573172Sstever@eecs.umich.edu    if (req->isUncacheable() &&
5582731Sktlim@umich.edu        (load_idx != loadHead || !load_inst->isAtCommit())) {
5592669Sktlim@umich.edu        iewStage->rescheduleMemInst(load_inst);
5602727Sktlim@umich.edu        ++lsqRescheduledLoads;
5617720Sgblack@eecs.umich.edu        DPRINTF(LSQUnit, "Uncachable load [sn:%lli] PC %s\n",
5627720Sgblack@eecs.umich.edu                load_inst->seqNum, load_inst->pcState());
5634032Sktlim@umich.edu
5644032Sktlim@umich.edu        // Must delete request now that it wasn't handed off to
5654032Sktlim@umich.edu        // memory.  This is quite ugly.  @todo: Figure out the proper
5664032Sktlim@umich.edu        // place to really handle request deletes.
5674032Sktlim@umich.edu        delete req;
5686974Stjones1@inf.ed.ac.uk        if (TheISA::HasUnalignedMemAcc && sreqLow) {
5696974Stjones1@inf.ed.ac.uk            delete sreqLow;
5706974Stjones1@inf.ed.ac.uk            delete sreqHigh;
5716974Stjones1@inf.ed.ac.uk        }
5728591Sgblack@eecs.umich.edu        return new GenericISA::M5PanicFault(
5738591Sgblack@eecs.umich.edu                "Uncachable load [sn:%llx] PC %s\n",
5748591Sgblack@eecs.umich.edu                load_inst->seqNum, load_inst->pcState());
5752292SN/A    }
5762292SN/A
5772292SN/A    // Check the SQ for any previous stores that might lead to forwarding
5782669Sktlim@umich.edu    int store_idx = load_inst->sqIdx;
5792292SN/A
5802292SN/A    int store_size = 0;
5812292SN/A
5822292SN/A    DPRINTF(LSQUnit, "Read called, load idx: %i, store idx: %i, "
5836974Stjones1@inf.ed.ac.uk            "storeHead: %i addr: %#x%s\n",
5846974Stjones1@inf.ed.ac.uk            load_idx, store_idx, storeHead, req->getPaddr(),
5856974Stjones1@inf.ed.ac.uk            sreqLow ? " split" : "");
5862292SN/A
5876102Sgblack@eecs.umich.edu    if (req->isLLSC()) {
5886974Stjones1@inf.ed.ac.uk        assert(!sreqLow);
5893326Sktlim@umich.edu        // Disable recording the result temporarily.  Writing to misc
5903326Sktlim@umich.edu        // regs normally updates the result, but this is not the
5913326Sktlim@umich.edu        // desired behavior when handling store conditionals.
5923326Sktlim@umich.edu        load_inst->recordResult = false;
5933326Sktlim@umich.edu        TheISA::handleLockedRead(load_inst.get(), req);
5943326Sktlim@umich.edu        load_inst->recordResult = true;
5952292SN/A    }
5962292SN/A
5978481Sgblack@eecs.umich.edu    if (req->isMmappedIpr()) {
5988481Sgblack@eecs.umich.edu        assert(!load_inst->memData);
5998481Sgblack@eecs.umich.edu        load_inst->memData = new uint8_t[64];
6008481Sgblack@eecs.umich.edu
6018481Sgblack@eecs.umich.edu        ThreadContext *thread = cpu->tcBase(lsqID);
6028481Sgblack@eecs.umich.edu        Tick delay;
6038481Sgblack@eecs.umich.edu        PacketPtr data_pkt =
6048481Sgblack@eecs.umich.edu            new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
6058481Sgblack@eecs.umich.edu
6068481Sgblack@eecs.umich.edu        if (!TheISA::HasUnalignedMemAcc || !sreqLow) {
6078481Sgblack@eecs.umich.edu            data_pkt->dataStatic(load_inst->memData);
6088481Sgblack@eecs.umich.edu            delay = TheISA::handleIprRead(thread, data_pkt);
6098481Sgblack@eecs.umich.edu        } else {
6108481Sgblack@eecs.umich.edu            assert(sreqLow->isMmappedIpr() && sreqHigh->isMmappedIpr());
6118481Sgblack@eecs.umich.edu            PacketPtr fst_data_pkt =
6128481Sgblack@eecs.umich.edu                new Packet(sreqLow, MemCmd::ReadReq, Packet::Broadcast);
6138481Sgblack@eecs.umich.edu            PacketPtr snd_data_pkt =
6148481Sgblack@eecs.umich.edu                new Packet(sreqHigh, MemCmd::ReadReq, Packet::Broadcast);
6158481Sgblack@eecs.umich.edu
6168481Sgblack@eecs.umich.edu            fst_data_pkt->dataStatic(load_inst->memData);
6178481Sgblack@eecs.umich.edu            snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize());
6188481Sgblack@eecs.umich.edu
6198481Sgblack@eecs.umich.edu            delay = TheISA::handleIprRead(thread, fst_data_pkt);
6208481Sgblack@eecs.umich.edu            unsigned delay2 = TheISA::handleIprRead(thread, snd_data_pkt);
6218481Sgblack@eecs.umich.edu            if (delay2 > delay)
6228481Sgblack@eecs.umich.edu                delay = delay2;
6238481Sgblack@eecs.umich.edu
6248481Sgblack@eecs.umich.edu            delete sreqLow;
6258481Sgblack@eecs.umich.edu            delete sreqHigh;
6268481Sgblack@eecs.umich.edu            delete fst_data_pkt;
6278481Sgblack@eecs.umich.edu            delete snd_data_pkt;
6288481Sgblack@eecs.umich.edu        }
6298481Sgblack@eecs.umich.edu        WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this);
6308481Sgblack@eecs.umich.edu        cpu->schedule(wb, curTick() + delay);
6318481Sgblack@eecs.umich.edu        return NoFault;
6328481Sgblack@eecs.umich.edu    }
6338481Sgblack@eecs.umich.edu
6342292SN/A    while (store_idx != -1) {
6352292SN/A        // End once we've reached the top of the LSQ
6362292SN/A        if (store_idx == storeWBIdx) {
6372292SN/A            break;
6382292SN/A        }
6392292SN/A
6402292SN/A        // Move the index to one younger
6412292SN/A        if (--store_idx < 0)
6422292SN/A            store_idx += SQEntries;
6432292SN/A
6442292SN/A        assert(storeQueue[store_idx].inst);
6452292SN/A
6462292SN/A        store_size = storeQueue[store_idx].size;
6472292SN/A
6482292SN/A        if (store_size == 0)
6492292SN/A            continue;
6504032Sktlim@umich.edu        else if (storeQueue[store_idx].inst->uncacheable())
6514032Sktlim@umich.edu            continue;
6524032Sktlim@umich.edu
6534032Sktlim@umich.edu        assert(storeQueue[store_idx].inst->effAddrValid);
6542292SN/A
6552292SN/A        // Check if the store data is within the lower and upper bounds of
6562292SN/A        // addresses that the request needs.
6572292SN/A        bool store_has_lower_limit =
6582669Sktlim@umich.edu            req->getVaddr() >= storeQueue[store_idx].inst->effAddr;
6592292SN/A        bool store_has_upper_limit =
6602669Sktlim@umich.edu            (req->getVaddr() + req->getSize()) <=
6612669Sktlim@umich.edu            (storeQueue[store_idx].inst->effAddr + store_size);
6622292SN/A        bool lower_load_has_store_part =
6632669Sktlim@umich.edu            req->getVaddr() < (storeQueue[store_idx].inst->effAddr +
6642292SN/A                           store_size);
6652292SN/A        bool upper_load_has_store_part =
6662669Sktlim@umich.edu            (req->getVaddr() + req->getSize()) >
6672669Sktlim@umich.edu            storeQueue[store_idx].inst->effAddr;
6682292SN/A
6692292SN/A        // If the store's data has all of the data needed, we can forward.
6704032Sktlim@umich.edu        if ((store_has_lower_limit && store_has_upper_limit)) {
6712329SN/A            // Get shift amount for offset into the store's data.
6728316Sgeoffrey.blake@arm.com            int shift_amt = req->getVaddr() - storeQueue[store_idx].inst->effAddr;
6732292SN/A
6747520Sgblack@eecs.umich.edu            memcpy(data, storeQueue[store_idx].data + shift_amt,
6757520Sgblack@eecs.umich.edu                   req->getSize());
6763803Sgblack@eecs.umich.edu
6772669Sktlim@umich.edu            assert(!load_inst->memData);
6782669Sktlim@umich.edu            load_inst->memData = new uint8_t[64];
6792292SN/A
6804326Sgblack@eecs.umich.edu            memcpy(load_inst->memData,
6814326Sgblack@eecs.umich.edu                    storeQueue[store_idx].data + shift_amt, req->getSize());
6822292SN/A
6832292SN/A            DPRINTF(LSQUnit, "Forwarding from store idx %i to load to "
6842292SN/A                    "addr %#x, data %#x\n",
6852693Sktlim@umich.edu                    store_idx, req->getVaddr(), data);
6862678Sktlim@umich.edu
6874022Sstever@eecs.umich.edu            PacketPtr data_pkt = new Packet(req, MemCmd::ReadReq,
6884022Sstever@eecs.umich.edu                                            Packet::Broadcast);
6892678Sktlim@umich.edu            data_pkt->dataStatic(load_inst->memData);
6902678Sktlim@umich.edu
6912678Sktlim@umich.edu            WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this);
6922292SN/A
6932292SN/A            // We'll say this has a 1 cycle load-store forwarding latency
6942292SN/A            // for now.
6952292SN/A            // @todo: Need to make this a parameter.
6967823Ssteve.reinhardt@amd.com            cpu->schedule(wb, curTick());
6972678Sktlim@umich.edu
6986974Stjones1@inf.ed.ac.uk            // Don't need to do anything special for split loads.
6996974Stjones1@inf.ed.ac.uk            if (TheISA::HasUnalignedMemAcc && sreqLow) {
7006974Stjones1@inf.ed.ac.uk                delete sreqLow;
7016974Stjones1@inf.ed.ac.uk                delete sreqHigh;
7026974Stjones1@inf.ed.ac.uk            }
7036974Stjones1@inf.ed.ac.uk
7042727Sktlim@umich.edu            ++lsqForwLoads;
7052292SN/A            return NoFault;
7062292SN/A        } else if ((store_has_lower_limit && lower_load_has_store_part) ||
7072292SN/A                   (store_has_upper_limit && upper_load_has_store_part) ||
7082292SN/A                   (lower_load_has_store_part && upper_load_has_store_part)) {
7092292SN/A            // This is the partial store-load forwarding case where a store
7102292SN/A            // has only part of the load's data.
7112292SN/A
7122292SN/A            // If it's already been written back, then don't worry about
7132292SN/A            // stalling on it.
7142292SN/A            if (storeQueue[store_idx].completed) {
7154032Sktlim@umich.edu                panic("Should not check one of these");
7162292SN/A                continue;
7172292SN/A            }
7182292SN/A
7192292SN/A            // Must stall load and force it to retry, so long as it's the oldest
7202292SN/A            // load that needs to do so.
7212292SN/A            if (!stalled ||
7222292SN/A                (stalled &&
7232669Sktlim@umich.edu                 load_inst->seqNum <
7242292SN/A                 loadQueue[stallingLoadIdx]->seqNum)) {
7252292SN/A                stalled = true;
7262292SN/A                stallingStoreIsn = storeQueue[store_idx].inst->seqNum;
7272292SN/A                stallingLoadIdx = load_idx;
7282292SN/A            }
7292292SN/A
7302292SN/A            // Tell IQ/mem dep unit that this instruction will need to be
7312292SN/A            // rescheduled eventually
7322669Sktlim@umich.edu            iewStage->rescheduleMemInst(load_inst);
7332927Sktlim@umich.edu            iewStage->decrWb(load_inst->seqNum);
7344032Sktlim@umich.edu            load_inst->clearIssued();
7352727Sktlim@umich.edu            ++lsqRescheduledLoads;
7362292SN/A
7372292SN/A            // Do not generate a writeback event as this instruction is not
7382292SN/A            // complete.
7392292SN/A            DPRINTF(LSQUnit, "Load-store forwarding mis-match. "
7402292SN/A                    "Store idx %i to load addr %#x\n",
7412669Sktlim@umich.edu                    store_idx, req->getVaddr());
7422292SN/A
7434032Sktlim@umich.edu            // Must delete request now that it wasn't handed off to
7444032Sktlim@umich.edu            // memory.  This is quite ugly.  @todo: Figure out the
7454032Sktlim@umich.edu            // proper place to really handle request deletes.
7464032Sktlim@umich.edu            delete req;
7476974Stjones1@inf.ed.ac.uk            if (TheISA::HasUnalignedMemAcc && sreqLow) {
7486974Stjones1@inf.ed.ac.uk                delete sreqLow;
7496974Stjones1@inf.ed.ac.uk                delete sreqHigh;
7506974Stjones1@inf.ed.ac.uk            }
7514032Sktlim@umich.edu
7522292SN/A            return NoFault;
7532292SN/A        }
7542292SN/A    }
7552292SN/A
7562292SN/A    // If there's no forwarding case, then go access memory
7577720Sgblack@eecs.umich.edu    DPRINTF(LSQUnit, "Doing memory access for inst [sn:%lli] PC %s\n",
7587720Sgblack@eecs.umich.edu            load_inst->seqNum, load_inst->pcState());
7592292SN/A
7602669Sktlim@umich.edu    assert(!load_inst->memData);
7612669Sktlim@umich.edu    load_inst->memData = new uint8_t[64];
7622292SN/A
7632292SN/A    ++usedPorts;
7642292SN/A
7652907Sktlim@umich.edu    // if we the cache is not blocked, do cache access
7666974Stjones1@inf.ed.ac.uk    bool completedFirst = false;
7672907Sktlim@umich.edu    if (!lsq->cacheBlocked()) {
7686974Stjones1@inf.ed.ac.uk        MemCmd command =
7696974Stjones1@inf.ed.ac.uk            req->isLLSC() ? MemCmd::LoadLockedReq : MemCmd::ReadReq;
7706974Stjones1@inf.ed.ac.uk        PacketPtr data_pkt = new Packet(req, command, Packet::Broadcast);
7716974Stjones1@inf.ed.ac.uk        PacketPtr fst_data_pkt = NULL;
7726974Stjones1@inf.ed.ac.uk        PacketPtr snd_data_pkt = NULL;
7736974Stjones1@inf.ed.ac.uk
7743228Sktlim@umich.edu        data_pkt->dataStatic(load_inst->memData);
7753228Sktlim@umich.edu
7763228Sktlim@umich.edu        LSQSenderState *state = new LSQSenderState;
7773228Sktlim@umich.edu        state->isLoad = true;
7783228Sktlim@umich.edu        state->idx = load_idx;
7793228Sktlim@umich.edu        state->inst = load_inst;
7803228Sktlim@umich.edu        data_pkt->senderState = state;
7813228Sktlim@umich.edu
7826974Stjones1@inf.ed.ac.uk        if (!TheISA::HasUnalignedMemAcc || !sreqLow) {
7836974Stjones1@inf.ed.ac.uk
7846974Stjones1@inf.ed.ac.uk            // Point the first packet at the main data packet.
7856974Stjones1@inf.ed.ac.uk            fst_data_pkt = data_pkt;
7866974Stjones1@inf.ed.ac.uk        } else {
7876974Stjones1@inf.ed.ac.uk
7886974Stjones1@inf.ed.ac.uk            // Create the split packets.
7896974Stjones1@inf.ed.ac.uk            fst_data_pkt = new Packet(sreqLow, command, Packet::Broadcast);
7906974Stjones1@inf.ed.ac.uk            snd_data_pkt = new Packet(sreqHigh, command, Packet::Broadcast);
7916974Stjones1@inf.ed.ac.uk
7926974Stjones1@inf.ed.ac.uk            fst_data_pkt->dataStatic(load_inst->memData);
7936974Stjones1@inf.ed.ac.uk            snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize());
7946974Stjones1@inf.ed.ac.uk
7956974Stjones1@inf.ed.ac.uk            fst_data_pkt->senderState = state;
7966974Stjones1@inf.ed.ac.uk            snd_data_pkt->senderState = state;
7976974Stjones1@inf.ed.ac.uk
7986974Stjones1@inf.ed.ac.uk            state->isSplit = true;
7996974Stjones1@inf.ed.ac.uk            state->outstanding = 2;
8006974Stjones1@inf.ed.ac.uk            state->mainPkt = data_pkt;
8016974Stjones1@inf.ed.ac.uk        }
8026974Stjones1@inf.ed.ac.uk
8036974Stjones1@inf.ed.ac.uk        if (!dcachePort->sendTiming(fst_data_pkt)) {
8043228Sktlim@umich.edu            // Delete state and data packet because a load retry
8053228Sktlim@umich.edu            // initiates a pipeline restart; it does not retry.
8063228Sktlim@umich.edu            delete state;
8074032Sktlim@umich.edu            delete data_pkt->req;
8083228Sktlim@umich.edu            delete data_pkt;
8096974Stjones1@inf.ed.ac.uk            if (TheISA::HasUnalignedMemAcc && sreqLow) {
8106974Stjones1@inf.ed.ac.uk                delete fst_data_pkt->req;
8116974Stjones1@inf.ed.ac.uk                delete fst_data_pkt;
8126974Stjones1@inf.ed.ac.uk                delete snd_data_pkt->req;
8136974Stjones1@inf.ed.ac.uk                delete snd_data_pkt;
8147511Stjones1@inf.ed.ac.uk                sreqLow = NULL;
8157511Stjones1@inf.ed.ac.uk                sreqHigh = NULL;
8166974Stjones1@inf.ed.ac.uk            }
8173228Sktlim@umich.edu
8184032Sktlim@umich.edu            req = NULL;
8194032Sktlim@umich.edu
8202907Sktlim@umich.edu            // If the access didn't succeed, tell the LSQ by setting
8212907Sktlim@umich.edu            // the retry thread id.
8222907Sktlim@umich.edu            lsq->setRetryTid(lsqID);
8236974Stjones1@inf.ed.ac.uk        } else if (TheISA::HasUnalignedMemAcc && sreqLow) {
8246974Stjones1@inf.ed.ac.uk            completedFirst = true;
8256974Stjones1@inf.ed.ac.uk
8266974Stjones1@inf.ed.ac.uk            // The first packet was sent without problems, so send this one
8276974Stjones1@inf.ed.ac.uk            // too. If there is a problem with this packet then the whole
8286974Stjones1@inf.ed.ac.uk            // load will be squashed, so indicate this to the state object.
8296974Stjones1@inf.ed.ac.uk            // The first packet will return in completeDataAccess and be
8306974Stjones1@inf.ed.ac.uk            // handled there.
8316974Stjones1@inf.ed.ac.uk            ++usedPorts;
8326974Stjones1@inf.ed.ac.uk            if (!dcachePort->sendTiming(snd_data_pkt)) {
8336974Stjones1@inf.ed.ac.uk
8346974Stjones1@inf.ed.ac.uk                // The main packet will be deleted in completeDataAccess.
8356974Stjones1@inf.ed.ac.uk                delete snd_data_pkt->req;
8366974Stjones1@inf.ed.ac.uk                delete snd_data_pkt;
8376974Stjones1@inf.ed.ac.uk
8386974Stjones1@inf.ed.ac.uk                state->complete();
8396974Stjones1@inf.ed.ac.uk
8406974Stjones1@inf.ed.ac.uk                req = NULL;
8417511Stjones1@inf.ed.ac.uk                sreqHigh = NULL;
8426974Stjones1@inf.ed.ac.uk
8436974Stjones1@inf.ed.ac.uk                lsq->setRetryTid(lsqID);
8446974Stjones1@inf.ed.ac.uk            }
8452907Sktlim@umich.edu        }
8462907Sktlim@umich.edu    }
8472907Sktlim@umich.edu
8482907Sktlim@umich.edu    // If the cache was blocked, or has become blocked due to the access,
8492907Sktlim@umich.edu    // handle it.
8502907Sktlim@umich.edu    if (lsq->cacheBlocked()) {
8514032Sktlim@umich.edu        if (req)
8524032Sktlim@umich.edu            delete req;
8536974Stjones1@inf.ed.ac.uk        if (TheISA::HasUnalignedMemAcc && sreqLow && !completedFirst) {
8546974Stjones1@inf.ed.ac.uk            delete sreqLow;
8556974Stjones1@inf.ed.ac.uk            delete sreqHigh;
8566974Stjones1@inf.ed.ac.uk        }
8574032Sktlim@umich.edu
8582727Sktlim@umich.edu        ++lsqCacheBlocked;
8593014Srdreslin@umich.edu
8608315Sgeoffrey.blake@arm.com        // If the first part of a split access succeeds, then let the LSQ
8618315Sgeoffrey.blake@arm.com        // handle the decrWb when completeDataAccess is called upon return
8628315Sgeoffrey.blake@arm.com        // of the requested first part of data
8638315Sgeoffrey.blake@arm.com        if (!completedFirst)
8648315Sgeoffrey.blake@arm.com            iewStage->decrWb(load_inst->seqNum);
8658315Sgeoffrey.blake@arm.com
8662669Sktlim@umich.edu        // There's an older load that's already going to squash.
8672669Sktlim@umich.edu        if (isLoadBlocked && blockedLoadSeqNum < load_inst->seqNum)
8682669Sktlim@umich.edu            return NoFault;
8692292SN/A
8702669Sktlim@umich.edu        // Record that the load was blocked due to memory.  This
8712669Sktlim@umich.edu        // load will squash all instructions after it, be
8722669Sktlim@umich.edu        // refetched, and re-executed.
8732669Sktlim@umich.edu        isLoadBlocked = true;
8742669Sktlim@umich.edu        loadBlockedHandled = false;
8752669Sktlim@umich.edu        blockedLoadSeqNum = load_inst->seqNum;
8762669Sktlim@umich.edu        // No fault occurred, even though the interface is blocked.
8772669Sktlim@umich.edu        return NoFault;
8782292SN/A    }
8792292SN/A
8802669Sktlim@umich.edu    return NoFault;
8812292SN/A}
8822292SN/A
8832292SN/Atemplate <class Impl>
8842292SN/AFault
8856974Stjones1@inf.ed.ac.ukLSQUnit<Impl>::write(Request *req, Request *sreqLow, Request *sreqHigh,
8867520Sgblack@eecs.umich.edu                     uint8_t *data, int store_idx)
8872292SN/A{
8882292SN/A    assert(storeQueue[store_idx].inst);
8892292SN/A
8902292SN/A    DPRINTF(LSQUnit, "Doing write to store idx %i, addr %#x data %#x"
8912292SN/A            " | storeHead:%i [sn:%i]\n",
8922669Sktlim@umich.edu            store_idx, req->getPaddr(), data, storeHead,
8932292SN/A            storeQueue[store_idx].inst->seqNum);
8942329SN/A
8952292SN/A    storeQueue[store_idx].req = req;
8966974Stjones1@inf.ed.ac.uk    storeQueue[store_idx].sreqLow = sreqLow;
8976974Stjones1@inf.ed.ac.uk    storeQueue[store_idx].sreqHigh = sreqHigh;
8987520Sgblack@eecs.umich.edu    unsigned size = req->getSize();
8997520Sgblack@eecs.umich.edu    storeQueue[store_idx].size = size;
9007520Sgblack@eecs.umich.edu    assert(size <= sizeof(storeQueue[store_idx].data));
9017509Stjones1@inf.ed.ac.uk
9027509Stjones1@inf.ed.ac.uk    // Split stores can only occur in ISAs with unaligned memory accesses.  If
9037509Stjones1@inf.ed.ac.uk    // a store request has been split, sreqLow and sreqHigh will be non-null.
9047509Stjones1@inf.ed.ac.uk    if (TheISA::HasUnalignedMemAcc && sreqLow) {
9057509Stjones1@inf.ed.ac.uk        storeQueue[store_idx].isSplit = true;
9067509Stjones1@inf.ed.ac.uk    }
9074326Sgblack@eecs.umich.edu
9087520Sgblack@eecs.umich.edu    memcpy(storeQueue[store_idx].data, data, size);
9092329SN/A
9102292SN/A    // This function only writes the data to the store queue, so no fault
9112292SN/A    // can happen here.
9122292SN/A    return NoFault;
9132292SN/A}
9142292SN/A
9152292SN/A#endif // __CPU_O3_LSQ_UNIT_HH__
916