lsq_unit.hh revision 12171
12292SN/A/*
210333Smitch.hayenga@arm.com * Copyright (c) 2012-2014 ARM Limited
39444SAndreas.Sandberg@ARM.com * All rights reserved
49444SAndreas.Sandberg@ARM.com *
59444SAndreas.Sandberg@ARM.com * The license below extends only to copyright in the software and shall
69444SAndreas.Sandberg@ARM.com * not be construed as granting a license to any other intellectual
79444SAndreas.Sandberg@ARM.com * property including but not limited to intellectual property relating
89444SAndreas.Sandberg@ARM.com * to a hardware implementation of the functionality of the software
99444SAndreas.Sandberg@ARM.com * licensed hereunder.  You may use the software subject to the license
109444SAndreas.Sandberg@ARM.com * terms below provided that you ensure that this notice is replicated
119444SAndreas.Sandberg@ARM.com * unmodified and in its entirety in all distributions of the software,
129444SAndreas.Sandberg@ARM.com * modified or unmodified, in source code or in binary form.
139444SAndreas.Sandberg@ARM.com *
142329SN/A * Copyright (c) 2004-2006 The Regents of The University of Michigan
1510239Sbinhpham@cs.rutgers.edu * Copyright (c) 2013 Advanced Micro Devices, Inc.
162292SN/A * All rights reserved.
172292SN/A *
182292SN/A * Redistribution and use in source and binary forms, with or without
192292SN/A * modification, are permitted provided that the following conditions are
202292SN/A * met: redistributions of source code must retain the above copyright
212292SN/A * notice, this list of conditions and the following disclaimer;
222292SN/A * redistributions in binary form must reproduce the above copyright
232292SN/A * notice, this list of conditions and the following disclaimer in the
242292SN/A * documentation and/or other materials provided with the distribution;
252292SN/A * neither the name of the copyright holders nor the names of its
262292SN/A * contributors may be used to endorse or promote products derived from
272292SN/A * this software without specific prior written permission.
282292SN/A *
292292SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
302292SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
312292SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
322292SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
332292SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
342292SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
352292SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
362292SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
372292SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
382292SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
392292SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
402689Sktlim@umich.edu *
412689Sktlim@umich.edu * Authors: Kevin Lim
422689Sktlim@umich.edu *          Korey Sewell
432292SN/A */
442292SN/A
452292SN/A#ifndef __CPU_O3_LSQ_UNIT_HH__
462292SN/A#define __CPU_O3_LSQ_UNIT_HH__
472292SN/A
482329SN/A#include <algorithm>
494395Ssaidi@eecs.umich.edu#include <cstring>
502292SN/A#include <map>
512292SN/A#include <queue>
522292SN/A
538591Sgblack@eecs.umich.edu#include "arch/generic/debugfaults.hh"
548506Sgblack@eecs.umich.edu#include "arch/isa_traits.hh"
553326Sktlim@umich.edu#include "arch/locked_mem.hh"
568481Sgblack@eecs.umich.edu#include "arch/mmapped_ipr.hh"
576658Snate@binkert.org#include "config/the_isa.hh"
582292SN/A#include "cpu/inst_seq.hh"
598230Snate@binkert.org#include "cpu/timebuf.hh"
608232Snate@binkert.org#include "debug/LSQUnit.hh"
613348Sbinkertn@umich.edu#include "mem/packet.hh"
622669Sktlim@umich.edu#include "mem/port.hh"
632292SN/A
648737Skoansin.tan@gmail.comstruct DerivO3CPUParams;
655529Snate@binkert.org
662292SN/A/**
672329SN/A * Class that implements the actual LQ and SQ for each specific
682329SN/A * thread.  Both are circular queues; load entries are freed upon
692329SN/A * committing, while store entries are freed once they writeback. The
702329SN/A * LSQUnit tracks if there are memory ordering violations, and also
712329SN/A * detects partial load to store forwarding cases (a store only has
722329SN/A * part of a load's data) that requires the load to wait until the
732329SN/A * store writes back. In the former case it holds onto the instruction
742329SN/A * until the dependence unit looks at it, and in the latter it stalls
752329SN/A * the LSQ until the store writes back. At that point the load is
762329SN/A * replayed.
772292SN/A */
782292SN/Atemplate <class Impl>
792292SN/Aclass LSQUnit {
802292SN/A  public:
812733Sktlim@umich.edu    typedef typename Impl::O3CPU O3CPU;
822292SN/A    typedef typename Impl::DynInstPtr DynInstPtr;
832292SN/A    typedef typename Impl::CPUPol::IEW IEW;
842907Sktlim@umich.edu    typedef typename Impl::CPUPol::LSQ LSQ;
852292SN/A    typedef typename Impl::CPUPol::IssueStruct IssueStruct;
862292SN/A
872292SN/A  public:
882292SN/A    /** Constructs an LSQ unit. init() must be called prior to use. */
892292SN/A    LSQUnit();
902292SN/A
912292SN/A    /** Initializes the LSQ unit with the specified number of entries. */
925529Snate@binkert.org    void init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params,
935529Snate@binkert.org            LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries,
945529Snate@binkert.org            unsigned id);
952292SN/A
962292SN/A    /** Returns the name of the LSQ unit. */
972292SN/A    std::string name() const;
982292SN/A
992727Sktlim@umich.edu    /** Registers statistics. */
1002727Sktlim@umich.edu    void regStats();
1012727Sktlim@umich.edu
1022907Sktlim@umich.edu    /** Sets the pointer to the dcache port. */
1038922Swilliam.wang@arm.com    void setDcachePort(MasterPort *dcache_port);
1042907Sktlim@umich.edu
1059444SAndreas.Sandberg@ARM.com    /** Perform sanity checks after a drain. */
1069444SAndreas.Sandberg@ARM.com    void drainSanityCheck() const;
1072307SN/A
1082348SN/A    /** Takes over from another CPU's thread. */
1092307SN/A    void takeOverFrom();
1102307SN/A
1112292SN/A    /** Ticks the LSQ unit, which in this case only resets the number of
1122292SN/A     * used cache ports.
1132292SN/A     * @todo: Move the number of used ports up to the LSQ level so it can
1142292SN/A     * be shared by all LSQ units.
1152292SN/A     */
11611780Sarthur.perais@inria.fr    void tick() { usedStorePorts = 0; }
1172292SN/A
1182292SN/A    /** Inserts an instruction. */
1192292SN/A    void insert(DynInstPtr &inst);
1202292SN/A    /** Inserts a load instruction. */
1212292SN/A    void insertLoad(DynInstPtr &load_inst);
1222292SN/A    /** Inserts a store instruction. */
1232292SN/A    void insertStore(DynInstPtr &store_inst);
1242292SN/A
1258545Ssaidi@eecs.umich.edu    /** Check for ordering violations in the LSQ. For a store squash if we
1268545Ssaidi@eecs.umich.edu     * ever find a conflicting load. For a load, only squash if we
1278545Ssaidi@eecs.umich.edu     * an external snoop invalidate has been seen for that load address
1288199SAli.Saidi@ARM.com     * @param load_idx index to start checking at
1298199SAli.Saidi@ARM.com     * @param inst the instruction to check
1308199SAli.Saidi@ARM.com     */
1318199SAli.Saidi@ARM.com    Fault checkViolations(int load_idx, DynInstPtr &inst);
1328199SAli.Saidi@ARM.com
1338545Ssaidi@eecs.umich.edu    /** Check if an incoming invalidate hits in the lsq on a load
1348545Ssaidi@eecs.umich.edu     * that might have issued out of order wrt another load beacuse
1358545Ssaidi@eecs.umich.edu     * of the intermediate invalidate.
1368545Ssaidi@eecs.umich.edu     */
1378545Ssaidi@eecs.umich.edu    void checkSnoop(PacketPtr pkt);
1388545Ssaidi@eecs.umich.edu
1392292SN/A    /** Executes a load instruction. */
1402292SN/A    Fault executeLoad(DynInstPtr &inst);
1412292SN/A
1422329SN/A    Fault executeLoad(int lq_idx) { panic("Not implemented"); return NoFault; }
1432292SN/A    /** Executes a store instruction. */
1442292SN/A    Fault executeStore(DynInstPtr &inst);
1452292SN/A
1462292SN/A    /** Commits the head load. */
1472292SN/A    void commitLoad();
1482292SN/A    /** Commits loads older than a specific sequence number. */
1492292SN/A    void commitLoads(InstSeqNum &youngest_inst);
1502292SN/A
1512292SN/A    /** Commits stores older than a specific sequence number. */
1522292SN/A    void commitStores(InstSeqNum &youngest_inst);
1532292SN/A
1542292SN/A    /** Writes back stores. */
1552292SN/A    void writebackStores();
1562292SN/A
1572790Sktlim@umich.edu    /** Completes the data access that has been returned from the
1582790Sktlim@umich.edu     * memory system. */
1592669Sktlim@umich.edu    void completeDataAccess(PacketPtr pkt);
1602669Sktlim@umich.edu
1612292SN/A    /** Clears all the entries in the LQ. */
1622292SN/A    void clearLQ();
1632292SN/A
1642292SN/A    /** Clears all the entries in the SQ. */
1652292SN/A    void clearSQ();
1662292SN/A
1672292SN/A    /** Resizes the LQ to a given size. */
1682292SN/A    void resizeLQ(unsigned size);
1692292SN/A
1702292SN/A    /** Resizes the SQ to a given size. */
1712292SN/A    void resizeSQ(unsigned size);
1722292SN/A
1732292SN/A    /** Squashes all instructions younger than a specific sequence number. */
1742292SN/A    void squash(const InstSeqNum &squashed_num);
1752292SN/A
1762292SN/A    /** Returns if there is a memory ordering violation. Value is reset upon
1772292SN/A     * call to getMemDepViolator().
1782292SN/A     */
1792292SN/A    bool violation() { return memDepViolator; }
1802292SN/A
1812292SN/A    /** Returns the memory ordering violator. */
1822292SN/A    DynInstPtr getMemDepViolator();
1832292SN/A
18410239Sbinhpham@cs.rutgers.edu    /** Returns the number of free LQ entries. */
18510239Sbinhpham@cs.rutgers.edu    unsigned numFreeLoadEntries();
18610239Sbinhpham@cs.rutgers.edu
18710239Sbinhpham@cs.rutgers.edu    /** Returns the number of free SQ entries. */
18810239Sbinhpham@cs.rutgers.edu    unsigned numFreeStoreEntries();
1892292SN/A
1902292SN/A    /** Returns the number of loads in the LQ. */
1912292SN/A    int numLoads() { return loads; }
1922292SN/A
1932292SN/A    /** Returns the number of stores in the SQ. */
1942292SN/A    int numStores() { return stores; }
1952292SN/A
1962292SN/A    /** Returns if either the LQ or SQ is full. */
1972292SN/A    bool isFull() { return lqFull() || sqFull(); }
1982292SN/A
1999444SAndreas.Sandberg@ARM.com    /** Returns if both the LQ and SQ are empty. */
2009444SAndreas.Sandberg@ARM.com    bool isEmpty() const { return lqEmpty() && sqEmpty(); }
2019444SAndreas.Sandberg@ARM.com
2022292SN/A    /** Returns if the LQ is full. */
2032292SN/A    bool lqFull() { return loads >= (LQEntries - 1); }
2042292SN/A
2052292SN/A    /** Returns if the SQ is full. */
2062292SN/A    bool sqFull() { return stores >= (SQEntries - 1); }
2072292SN/A
2089444SAndreas.Sandberg@ARM.com    /** Returns if the LQ is empty. */
2099444SAndreas.Sandberg@ARM.com    bool lqEmpty() const { return loads == 0; }
2109444SAndreas.Sandberg@ARM.com
2119444SAndreas.Sandberg@ARM.com    /** Returns if the SQ is empty. */
2129444SAndreas.Sandberg@ARM.com    bool sqEmpty() const { return stores == 0; }
2139444SAndreas.Sandberg@ARM.com
2142292SN/A    /** Returns the number of instructions in the LSQ. */
2152292SN/A    unsigned getCount() { return loads + stores; }
2162292SN/A
2172292SN/A    /** Returns if there are any stores to writeback. */
2182292SN/A    bool hasStoresToWB() { return storesToWB; }
2192292SN/A
2202292SN/A    /** Returns the number of stores to writeback. */
2212292SN/A    int numStoresToWB() { return storesToWB; }
2222292SN/A
2232292SN/A    /** Returns if the LSQ unit will writeback on this cycle. */
2242292SN/A    bool willWB() { return storeQueue[storeWBIdx].canWB &&
2252678Sktlim@umich.edu                        !storeQueue[storeWBIdx].completed &&
2262678Sktlim@umich.edu                        !isStoreBlocked; }
2272292SN/A
2282907Sktlim@umich.edu    /** Handles doing the retry. */
2292907Sktlim@umich.edu    void recvRetry();
2302907Sktlim@umich.edu
2312292SN/A  private:
2329444SAndreas.Sandberg@ARM.com    /** Reset the LSQ state */
2339444SAndreas.Sandberg@ARM.com    void resetState();
2349444SAndreas.Sandberg@ARM.com
2352698Sktlim@umich.edu    /** Writes back the instruction, sending it to IEW. */
2362678Sktlim@umich.edu    void writeback(DynInstPtr &inst, PacketPtr pkt);
2372678Sktlim@umich.edu
2386974Stjones1@inf.ed.ac.uk    /** Writes back a store that couldn't be completed the previous cycle. */
2396974Stjones1@inf.ed.ac.uk    void writebackPendingStore();
2406974Stjones1@inf.ed.ac.uk
2412698Sktlim@umich.edu    /** Handles completing the send of a store to memory. */
2423349Sbinkertn@umich.edu    void storePostSend(PacketPtr pkt);
2432693Sktlim@umich.edu
2442292SN/A    /** Completes the store at the specified index. */
2452292SN/A    void completeStore(int store_idx);
2462292SN/A
2476974Stjones1@inf.ed.ac.uk    /** Attempts to send a store to the cache. */
2486974Stjones1@inf.ed.ac.uk    bool sendStore(PacketPtr data_pkt);
2496974Stjones1@inf.ed.ac.uk
2502292SN/A    /** Increments the given store index (circular queue). */
2519440SAndreas.Sandberg@ARM.com    inline void incrStIdx(int &store_idx) const;
2522292SN/A    /** Decrements the given store index (circular queue). */
2539440SAndreas.Sandberg@ARM.com    inline void decrStIdx(int &store_idx) const;
2542292SN/A    /** Increments the given load index (circular queue). */
2559440SAndreas.Sandberg@ARM.com    inline void incrLdIdx(int &load_idx) const;
2562292SN/A    /** Decrements the given load index (circular queue). */
2579440SAndreas.Sandberg@ARM.com    inline void decrLdIdx(int &load_idx) const;
2582292SN/A
2592329SN/A  public:
2602329SN/A    /** Debugging function to dump instructions in the LSQ. */
2619440SAndreas.Sandberg@ARM.com    void dumpInsts() const;
2622329SN/A
2632292SN/A  private:
2642292SN/A    /** Pointer to the CPU. */
2652733Sktlim@umich.edu    O3CPU *cpu;
2662292SN/A
2672292SN/A    /** Pointer to the IEW stage. */
2682292SN/A    IEW *iewStage;
2692292SN/A
2702907Sktlim@umich.edu    /** Pointer to the LSQ. */
2712907Sktlim@umich.edu    LSQ *lsq;
2722669Sktlim@umich.edu
2732907Sktlim@umich.edu    /** Pointer to the dcache port.  Used only for sending. */
2748922Swilliam.wang@arm.com    MasterPort *dcachePort;
2752292SN/A
2762698Sktlim@umich.edu    /** Derived class to hold any sender state the LSQ needs. */
2779044SAli.Saidi@ARM.com    class LSQSenderState : public Packet::SenderState
2782678Sktlim@umich.edu    {
2792678Sktlim@umich.edu      public:
2802698Sktlim@umich.edu        /** Default constructor. */
2812678Sktlim@umich.edu        LSQSenderState()
28210537Sandreas.hansson@arm.com            : mainPkt(NULL), pendingPacket(NULL), idx(0), outstanding(1),
28310537Sandreas.hansson@arm.com              isLoad(false), noWB(false), isSplit(false),
28410537Sandreas.hansson@arm.com              pktToSend(false), cacheBlocked(false)
2859046SAli.Saidi@ARM.com          { }
2862678Sktlim@umich.edu
2872698Sktlim@umich.edu        /** Instruction who initiated the access to memory. */
2882678Sktlim@umich.edu        DynInstPtr inst;
2899046SAli.Saidi@ARM.com        /** The main packet from a split load, used during writeback. */
2909046SAli.Saidi@ARM.com        PacketPtr mainPkt;
2919046SAli.Saidi@ARM.com        /** A second packet from a split store that needs sending. */
2929046SAli.Saidi@ARM.com        PacketPtr pendingPacket;
2939046SAli.Saidi@ARM.com        /** The LQ/SQ index of the instruction. */
2949046SAli.Saidi@ARM.com        uint8_t idx;
2959046SAli.Saidi@ARM.com        /** Number of outstanding packets to complete. */
2969046SAli.Saidi@ARM.com        uint8_t outstanding;
2972698Sktlim@umich.edu        /** Whether or not it is a load. */
2982678Sktlim@umich.edu        bool isLoad;
2992698Sktlim@umich.edu        /** Whether or not the instruction will need to writeback. */
3002678Sktlim@umich.edu        bool noWB;
3016974Stjones1@inf.ed.ac.uk        /** Whether or not this access is split in two. */
3026974Stjones1@inf.ed.ac.uk        bool isSplit;
3036974Stjones1@inf.ed.ac.uk        /** Whether or not there is a packet that needs sending. */
3046974Stjones1@inf.ed.ac.uk        bool pktToSend;
30510333Smitch.hayenga@arm.com        /** Whether or not the second packet of this split load was blocked */
30610333Smitch.hayenga@arm.com        bool cacheBlocked;
3076974Stjones1@inf.ed.ac.uk
3086974Stjones1@inf.ed.ac.uk        /** Completes a packet and returns whether the access is finished. */
3096974Stjones1@inf.ed.ac.uk        inline bool complete() { return --outstanding == 0; }
3102678Sktlim@umich.edu    };
3112678Sktlim@umich.edu
3122698Sktlim@umich.edu    /** Writeback event, specifically for when stores forward data to loads. */
3132678Sktlim@umich.edu    class WritebackEvent : public Event {
3142678Sktlim@umich.edu      public:
3152678Sktlim@umich.edu        /** Constructs a writeback event. */
3162678Sktlim@umich.edu        WritebackEvent(DynInstPtr &_inst, PacketPtr pkt, LSQUnit *lsq_ptr);
3172678Sktlim@umich.edu
3182678Sktlim@umich.edu        /** Processes the writeback event. */
3192678Sktlim@umich.edu        void process();
3202678Sktlim@umich.edu
3212678Sktlim@umich.edu        /** Returns the description of this event. */
3225336Shines@cs.fsu.edu        const char *description() const;
3232678Sktlim@umich.edu
3242678Sktlim@umich.edu      private:
3252698Sktlim@umich.edu        /** Instruction whose results are being written back. */
3262678Sktlim@umich.edu        DynInstPtr inst;
3272678Sktlim@umich.edu
3282698Sktlim@umich.edu        /** The packet that would have been sent to memory. */
3292678Sktlim@umich.edu        PacketPtr pkt;
3302678Sktlim@umich.edu
3312678Sktlim@umich.edu        /** The pointer to the LSQ unit that issued the store. */
3322678Sktlim@umich.edu        LSQUnit<Impl> *lsqPtr;
3332678Sktlim@umich.edu    };
3342678Sktlim@umich.edu
3352292SN/A  public:
3362292SN/A    struct SQEntry {
3372292SN/A        /** Constructs an empty store queue entry. */
3382292SN/A        SQEntry()
3394326Sgblack@eecs.umich.edu            : inst(NULL), req(NULL), size(0),
3402292SN/A              canWB(0), committed(0), completed(0)
3414326Sgblack@eecs.umich.edu        {
3424395Ssaidi@eecs.umich.edu            std::memset(data, 0, sizeof(data));
3434326Sgblack@eecs.umich.edu        }
3442292SN/A
3459152Satgutier@umich.edu        ~SQEntry()
3469152Satgutier@umich.edu        {
3479152Satgutier@umich.edu            inst = NULL;
3489152Satgutier@umich.edu        }
3499152Satgutier@umich.edu
3502292SN/A        /** Constructs a store queue entry for a given instruction. */
3512292SN/A        SQEntry(DynInstPtr &_inst)
3526974Stjones1@inf.ed.ac.uk            : inst(_inst), req(NULL), sreqLow(NULL), sreqHigh(NULL), size(0),
35310031SAli.Saidi@ARM.com              isSplit(0), canWB(0), committed(0), completed(0), isAllZeros(0)
3544326Sgblack@eecs.umich.edu        {
3554395Ssaidi@eecs.umich.edu            std::memset(data, 0, sizeof(data));
3564326Sgblack@eecs.umich.edu        }
3579046SAli.Saidi@ARM.com        /** The store data. */
3589046SAli.Saidi@ARM.com        char data[16];
3592292SN/A        /** The store instruction. */
3602292SN/A        DynInstPtr inst;
3612669Sktlim@umich.edu        /** The request for the store. */
3622669Sktlim@umich.edu        RequestPtr req;
3636974Stjones1@inf.ed.ac.uk        /** The split requests for the store. */
3646974Stjones1@inf.ed.ac.uk        RequestPtr sreqLow;
3656974Stjones1@inf.ed.ac.uk        RequestPtr sreqHigh;
3662292SN/A        /** The size of the store. */
3679046SAli.Saidi@ARM.com        uint8_t size;
3686974Stjones1@inf.ed.ac.uk        /** Whether or not the store is split into two requests. */
3696974Stjones1@inf.ed.ac.uk        bool isSplit;
3702292SN/A        /** Whether or not the store can writeback. */
3712292SN/A        bool canWB;
3722292SN/A        /** Whether or not the store is committed. */
3732292SN/A        bool committed;
3742292SN/A        /** Whether or not the store is completed. */
3752292SN/A        bool completed;
37610031SAli.Saidi@ARM.com        /** Does this request write all zeros and thus doesn't
37710031SAli.Saidi@ARM.com         * have any data attached to it. Used for cache block zero
37810031SAli.Saidi@ARM.com         * style instructs (ARM DC ZVA; ALPHA WH64)
37910031SAli.Saidi@ARM.com         */
38010031SAli.Saidi@ARM.com        bool isAllZeros;
3812292SN/A    };
3822329SN/A
3832292SN/A  private:
3842292SN/A    /** The LSQUnit thread id. */
3856221Snate@binkert.org    ThreadID lsqID;
3862292SN/A
3872292SN/A    /** The store queue. */
3882292SN/A    std::vector<SQEntry> storeQueue;
3892292SN/A
3902292SN/A    /** The load queue. */
3912292SN/A    std::vector<DynInstPtr> loadQueue;
3922292SN/A
3932329SN/A    /** The number of LQ entries, plus a sentinel entry (circular queue).
3942329SN/A     *  @todo: Consider having var that records the true number of LQ entries.
3952329SN/A     */
3962292SN/A    unsigned LQEntries;
3972329SN/A    /** The number of SQ entries, plus a sentinel entry (circular queue).
3982329SN/A     *  @todo: Consider having var that records the true number of SQ entries.
3992329SN/A     */
4002292SN/A    unsigned SQEntries;
4012292SN/A
4028199SAli.Saidi@ARM.com    /** The number of places to shift addresses in the LSQ before checking
4038199SAli.Saidi@ARM.com     * for dependency violations
4048199SAli.Saidi@ARM.com     */
4058199SAli.Saidi@ARM.com    unsigned depCheckShift;
4068199SAli.Saidi@ARM.com
4078199SAli.Saidi@ARM.com    /** Should loads be checked for dependency issues */
4088199SAli.Saidi@ARM.com    bool checkLoads;
4098199SAli.Saidi@ARM.com
4102292SN/A    /** The number of load instructions in the LQ. */
4112292SN/A    int loads;
4122329SN/A    /** The number of store instructions in the SQ. */
4132292SN/A    int stores;
4142292SN/A    /** The number of store instructions in the SQ waiting to writeback. */
4152292SN/A    int storesToWB;
4162292SN/A
4172292SN/A    /** The index of the head instruction in the LQ. */
4182292SN/A    int loadHead;
4192292SN/A    /** The index of the tail instruction in the LQ. */
4202292SN/A    int loadTail;
4212292SN/A
4222292SN/A    /** The index of the head instruction in the SQ. */
4232292SN/A    int storeHead;
4242329SN/A    /** The index of the first instruction that may be ready to be
4252329SN/A     * written back, and has not yet been written back.
4262292SN/A     */
4272292SN/A    int storeWBIdx;
4282292SN/A    /** The index of the tail instruction in the SQ. */
4292292SN/A    int storeTail;
4302292SN/A
4312292SN/A    /// @todo Consider moving to a more advanced model with write vs read ports
43211780Sarthur.perais@inria.fr    /** The number of cache ports available each cycle (stores only). */
43311780Sarthur.perais@inria.fr    int cacheStorePorts;
4342292SN/A
43511780Sarthur.perais@inria.fr    /** The number of used cache ports in this cycle by stores. */
43611780Sarthur.perais@inria.fr    int usedStorePorts;
4372292SN/A
4382292SN/A    //list<InstSeqNum> mshrSeqNums;
4392292SN/A
4408545Ssaidi@eecs.umich.edu    /** Address Mask for a cache block (e.g. ~(cache_block_size-1)) */
4418545Ssaidi@eecs.umich.edu    Addr cacheBlockMask;
4428545Ssaidi@eecs.umich.edu
4432292SN/A    /** Wire to read information from the issue stage time queue. */
4442292SN/A    typename TimeBuffer<IssueStruct>::wire fromIssue;
4452292SN/A
4462292SN/A    /** Whether or not the LSQ is stalled. */
4472292SN/A    bool stalled;
4482292SN/A    /** The store that causes the stall due to partial store to load
4492292SN/A     * forwarding.
4502292SN/A     */
4512292SN/A    InstSeqNum stallingStoreIsn;
4522292SN/A    /** The index of the above store. */
4532292SN/A    int stallingLoadIdx;
4542292SN/A
4552698Sktlim@umich.edu    /** The packet that needs to be retried. */
4562698Sktlim@umich.edu    PacketPtr retryPkt;
4572693Sktlim@umich.edu
4582698Sktlim@umich.edu    /** Whehter or not a store is blocked due to the memory system. */
4592678Sktlim@umich.edu    bool isStoreBlocked;
4602678Sktlim@umich.edu
4618727Snilay@cs.wisc.edu    /** Whether or not a store is in flight. */
4628727Snilay@cs.wisc.edu    bool storeInFlight;
4638727Snilay@cs.wisc.edu
4642292SN/A    /** The oldest load that caused a memory ordering violation. */
4652292SN/A    DynInstPtr memDepViolator;
4662292SN/A
4676974Stjones1@inf.ed.ac.uk    /** Whether or not there is a packet that couldn't be sent because of
4686974Stjones1@inf.ed.ac.uk     * a lack of cache ports. */
4696974Stjones1@inf.ed.ac.uk    bool hasPendingPkt;
4706974Stjones1@inf.ed.ac.uk
4716974Stjones1@inf.ed.ac.uk    /** The packet that is pending free cache ports. */
4726974Stjones1@inf.ed.ac.uk    PacketPtr pendingPkt;
4736974Stjones1@inf.ed.ac.uk
4748727Snilay@cs.wisc.edu    /** Flag for memory model. */
4758727Snilay@cs.wisc.edu    bool needsTSO;
4768727Snilay@cs.wisc.edu
4772292SN/A    // Will also need how many read/write ports the Dcache has.  Or keep track
4782292SN/A    // of that in stage that is one level up, and only call executeLoad/Store
4792292SN/A    // the appropriate number of times.
4802727Sktlim@umich.edu    /** Total number of loads forwaded from LSQ stores. */
4815999Snate@binkert.org    Stats::Scalar lsqForwLoads;
4822307SN/A
4833126Sktlim@umich.edu    /** Total number of loads ignored due to invalid addresses. */
4845999Snate@binkert.org    Stats::Scalar invAddrLoads;
4853126Sktlim@umich.edu
4863126Sktlim@umich.edu    /** Total number of squashed loads. */
4875999Snate@binkert.org    Stats::Scalar lsqSquashedLoads;
4883126Sktlim@umich.edu
4893126Sktlim@umich.edu    /** Total number of responses from the memory system that are
4903126Sktlim@umich.edu     * ignored due to the instruction already being squashed. */
4915999Snate@binkert.org    Stats::Scalar lsqIgnoredResponses;
4923126Sktlim@umich.edu
4933126Sktlim@umich.edu    /** Tota number of memory ordering violations. */
4945999Snate@binkert.org    Stats::Scalar lsqMemOrderViolation;
4953126Sktlim@umich.edu
4962727Sktlim@umich.edu    /** Total number of squashed stores. */
4975999Snate@binkert.org    Stats::Scalar lsqSquashedStores;
4982727Sktlim@umich.edu
4992727Sktlim@umich.edu    /** Total number of software prefetches ignored due to invalid addresses. */
5005999Snate@binkert.org    Stats::Scalar invAddrSwpfs;
5012727Sktlim@umich.edu
5022727Sktlim@umich.edu    /** Ready loads blocked due to partial store-forwarding. */
5035999Snate@binkert.org    Stats::Scalar lsqBlockedLoads;
5042727Sktlim@umich.edu
5052727Sktlim@umich.edu    /** Number of loads that were rescheduled. */
5065999Snate@binkert.org    Stats::Scalar lsqRescheduledLoads;
5072727Sktlim@umich.edu
5082727Sktlim@umich.edu    /** Number of times the LSQ is blocked due to the cache. */
5095999Snate@binkert.org    Stats::Scalar lsqCacheBlocked;
5102727Sktlim@umich.edu
5112292SN/A  public:
5122292SN/A    /** Executes the load at the given index. */
5137520Sgblack@eecs.umich.edu    Fault read(Request *req, Request *sreqLow, Request *sreqHigh,
51411302Ssteve.reinhardt@amd.com               int load_idx);
5152292SN/A
5162292SN/A    /** Executes the store at the given index. */
5177520Sgblack@eecs.umich.edu    Fault write(Request *req, Request *sreqLow, Request *sreqHigh,
5187520Sgblack@eecs.umich.edu                uint8_t *data, int store_idx);
5192292SN/A
5202292SN/A    /** Returns the index of the head load instruction. */
5212292SN/A    int getLoadHead() { return loadHead; }
5222292SN/A    /** Returns the sequence number of the head load instruction. */
5232292SN/A    InstSeqNum getLoadHeadSeqNum()
5242292SN/A    {
5252292SN/A        if (loadQueue[loadHead]) {
5262292SN/A            return loadQueue[loadHead]->seqNum;
5272292SN/A        } else {
5282292SN/A            return 0;
5292292SN/A        }
5302292SN/A
5312292SN/A    }
5322292SN/A
5332292SN/A    /** Returns the index of the head store instruction. */
5342292SN/A    int getStoreHead() { return storeHead; }
5352292SN/A    /** Returns the sequence number of the head store instruction. */
5362292SN/A    InstSeqNum getStoreHeadSeqNum()
5372292SN/A    {
5382292SN/A        if (storeQueue[storeHead].inst) {
5392292SN/A            return storeQueue[storeHead].inst->seqNum;
5402292SN/A        } else {
5412292SN/A            return 0;
5422292SN/A        }
5432292SN/A
5442292SN/A    }
5452292SN/A
5462292SN/A    /** Returns whether or not the LSQ unit is stalled. */
5472292SN/A    bool isStalled()  { return stalled; }
5482292SN/A};
5492292SN/A
5502292SN/Atemplate <class Impl>
5512292SN/AFault
5526974Stjones1@inf.ed.ac.ukLSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh,
55311302Ssteve.reinhardt@amd.com                    int load_idx)
5542292SN/A{
5552669Sktlim@umich.edu    DynInstPtr load_inst = loadQueue[load_idx];
5562292SN/A
5572669Sktlim@umich.edu    assert(load_inst);
5582669Sktlim@umich.edu
5592669Sktlim@umich.edu    assert(!load_inst->isExecuted());
5602292SN/A
56110824SAndreas.Sandberg@ARM.com    // Make sure this isn't a strictly ordered load
56210824SAndreas.Sandberg@ARM.com    // A bit of a hackish way to get strictly ordered accesses to work
56310824SAndreas.Sandberg@ARM.com    // only if they're at the head of the LSQ and are ready to commit
56410824SAndreas.Sandberg@ARM.com    // (at the head of the ROB too).
56510824SAndreas.Sandberg@ARM.com    if (req->isStrictlyOrdered() &&
5662731Sktlim@umich.edu        (load_idx != loadHead || !load_inst->isAtCommit())) {
5672669Sktlim@umich.edu        iewStage->rescheduleMemInst(load_inst);
5682727Sktlim@umich.edu        ++lsqRescheduledLoads;
56910824SAndreas.Sandberg@ARM.com        DPRINTF(LSQUnit, "Strictly ordered load [sn:%lli] PC %s\n",
5707720Sgblack@eecs.umich.edu                load_inst->seqNum, load_inst->pcState());
5714032Sktlim@umich.edu
5724032Sktlim@umich.edu        // Must delete request now that it wasn't handed off to
5734032Sktlim@umich.edu        // memory.  This is quite ugly.  @todo: Figure out the proper
5744032Sktlim@umich.edu        // place to really handle request deletes.
5754032Sktlim@umich.edu        delete req;
5766974Stjones1@inf.ed.ac.uk        if (TheISA::HasUnalignedMemAcc && sreqLow) {
5776974Stjones1@inf.ed.ac.uk            delete sreqLow;
5786974Stjones1@inf.ed.ac.uk            delete sreqHigh;
5796974Stjones1@inf.ed.ac.uk        }
58010474Sandreas.hansson@arm.com        return std::make_shared<GenericISA::M5PanicFault>(
58110824SAndreas.Sandberg@ARM.com            "Strictly ordered load [sn:%llx] PC %s\n",
58210474Sandreas.hansson@arm.com            load_inst->seqNum, load_inst->pcState());
5832292SN/A    }
5842292SN/A
5852292SN/A    // Check the SQ for any previous stores that might lead to forwarding
5862669Sktlim@umich.edu    int store_idx = load_inst->sqIdx;
5872292SN/A
5882292SN/A    int store_size = 0;
5892292SN/A
5902292SN/A    DPRINTF(LSQUnit, "Read called, load idx: %i, store idx: %i, "
5916974Stjones1@inf.ed.ac.uk            "storeHead: %i addr: %#x%s\n",
5926974Stjones1@inf.ed.ac.uk            load_idx, store_idx, storeHead, req->getPaddr(),
5936974Stjones1@inf.ed.ac.uk            sreqLow ? " split" : "");
5942292SN/A
5956102Sgblack@eecs.umich.edu    if (req->isLLSC()) {
5966974Stjones1@inf.ed.ac.uk        assert(!sreqLow);
5973326Sktlim@umich.edu        // Disable recording the result temporarily.  Writing to misc
5983326Sktlim@umich.edu        // regs normally updates the result, but this is not the
5993326Sktlim@umich.edu        // desired behavior when handling store conditionals.
6009046SAli.Saidi@ARM.com        load_inst->recordResult(false);
6013326Sktlim@umich.edu        TheISA::handleLockedRead(load_inst.get(), req);
6029046SAli.Saidi@ARM.com        load_inst->recordResult(true);
6032292SN/A    }
6042292SN/A
6058481Sgblack@eecs.umich.edu    if (req->isMmappedIpr()) {
6068481Sgblack@eecs.umich.edu        assert(!load_inst->memData);
6078481Sgblack@eecs.umich.edu        load_inst->memData = new uint8_t[64];
6088481Sgblack@eecs.umich.edu
6098481Sgblack@eecs.umich.edu        ThreadContext *thread = cpu->tcBase(lsqID);
6109180Sandreas.hansson@arm.com        Cycles delay(0);
6118949Sandreas.hansson@arm.com        PacketPtr data_pkt = new Packet(req, MemCmd::ReadReq);
6128481Sgblack@eecs.umich.edu
61312171Smatthiashille8@gmail.com        data_pkt->dataStatic(load_inst->memData);
6148481Sgblack@eecs.umich.edu        if (!TheISA::HasUnalignedMemAcc || !sreqLow) {
6158481Sgblack@eecs.umich.edu            delay = TheISA::handleIprRead(thread, data_pkt);
6168481Sgblack@eecs.umich.edu        } else {
6178481Sgblack@eecs.umich.edu            assert(sreqLow->isMmappedIpr() && sreqHigh->isMmappedIpr());
6188949Sandreas.hansson@arm.com            PacketPtr fst_data_pkt = new Packet(sreqLow, MemCmd::ReadReq);
6198949Sandreas.hansson@arm.com            PacketPtr snd_data_pkt = new Packet(sreqHigh, MemCmd::ReadReq);
6208481Sgblack@eecs.umich.edu
6218481Sgblack@eecs.umich.edu            fst_data_pkt->dataStatic(load_inst->memData);
6228481Sgblack@eecs.umich.edu            snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize());
6238481Sgblack@eecs.umich.edu
6248481Sgblack@eecs.umich.edu            delay = TheISA::handleIprRead(thread, fst_data_pkt);
6259180Sandreas.hansson@arm.com            Cycles delay2 = TheISA::handleIprRead(thread, snd_data_pkt);
6268481Sgblack@eecs.umich.edu            if (delay2 > delay)
6278481Sgblack@eecs.umich.edu                delay = delay2;
6288481Sgblack@eecs.umich.edu
6298481Sgblack@eecs.umich.edu            delete sreqLow;
6308481Sgblack@eecs.umich.edu            delete sreqHigh;
6318481Sgblack@eecs.umich.edu            delete fst_data_pkt;
6328481Sgblack@eecs.umich.edu            delete snd_data_pkt;
6338481Sgblack@eecs.umich.edu        }
6348481Sgblack@eecs.umich.edu        WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this);
6359179Sandreas.hansson@arm.com        cpu->schedule(wb, cpu->clockEdge(delay));
6368481Sgblack@eecs.umich.edu        return NoFault;
6378481Sgblack@eecs.umich.edu    }
6388481Sgblack@eecs.umich.edu
6392292SN/A    while (store_idx != -1) {
6402292SN/A        // End once we've reached the top of the LSQ
6412292SN/A        if (store_idx == storeWBIdx) {
6422292SN/A            break;
6432292SN/A        }
6442292SN/A
6452292SN/A        // Move the index to one younger
6462292SN/A        if (--store_idx < 0)
6472292SN/A            store_idx += SQEntries;
6482292SN/A
6492292SN/A        assert(storeQueue[store_idx].inst);
6502292SN/A
6512292SN/A        store_size = storeQueue[store_idx].size;
6522292SN/A
6532292SN/A        if (store_size == 0)
6542292SN/A            continue;
65510824SAndreas.Sandberg@ARM.com        else if (storeQueue[store_idx].inst->strictlyOrdered())
6564032Sktlim@umich.edu            continue;
6574032Sktlim@umich.edu
6589046SAli.Saidi@ARM.com        assert(storeQueue[store_idx].inst->effAddrValid());
6592292SN/A
6602292SN/A        // Check if the store data is within the lower and upper bounds of
6612292SN/A        // addresses that the request needs.
6622292SN/A        bool store_has_lower_limit =
6632669Sktlim@umich.edu            req->getVaddr() >= storeQueue[store_idx].inst->effAddr;
6642292SN/A        bool store_has_upper_limit =
6652669Sktlim@umich.edu            (req->getVaddr() + req->getSize()) <=
6662669Sktlim@umich.edu            (storeQueue[store_idx].inst->effAddr + store_size);
6672292SN/A        bool lower_load_has_store_part =
6682669Sktlim@umich.edu            req->getVaddr() < (storeQueue[store_idx].inst->effAddr +
6692292SN/A                           store_size);
6702292SN/A        bool upper_load_has_store_part =
6712669Sktlim@umich.edu            (req->getVaddr() + req->getSize()) >
6722669Sktlim@umich.edu            storeQueue[store_idx].inst->effAddr;
6732292SN/A
67412022Sar4jc@virginia.edu        // If the store's data has all of the data needed and the load isn't
67512022Sar4jc@virginia.edu        // LLSC, we can forward.
67612022Sar4jc@virginia.edu        if (store_has_lower_limit && store_has_upper_limit && !req->isLLSC()) {
6772329SN/A            // Get shift amount for offset into the store's data.
6788316Sgeoffrey.blake@arm.com            int shift_amt = req->getVaddr() - storeQueue[store_idx].inst->effAddr;
6792292SN/A
68010333Smitch.hayenga@arm.com            // Allocate memory if this is the first time a load is issued.
68110333Smitch.hayenga@arm.com            if (!load_inst->memData) {
68210333Smitch.hayenga@arm.com                load_inst->memData = new uint8_t[req->getSize()];
68310333Smitch.hayenga@arm.com            }
68410031SAli.Saidi@ARM.com            if (storeQueue[store_idx].isAllZeros)
68510031SAli.Saidi@ARM.com                memset(load_inst->memData, 0, req->getSize());
68610031SAli.Saidi@ARM.com            else
68710031SAli.Saidi@ARM.com                memcpy(load_inst->memData,
6884326Sgblack@eecs.umich.edu                    storeQueue[store_idx].data + shift_amt, req->getSize());
6892292SN/A
6902292SN/A            DPRINTF(LSQUnit, "Forwarding from store idx %i to load to "
69110175SMitch.Hayenga@ARM.com                    "addr %#x\n", store_idx, req->getVaddr());
6922678Sktlim@umich.edu
6938949Sandreas.hansson@arm.com            PacketPtr data_pkt = new Packet(req, MemCmd::ReadReq);
6942678Sktlim@umich.edu            data_pkt->dataStatic(load_inst->memData);
6952678Sktlim@umich.edu
6962678Sktlim@umich.edu            WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this);
6972292SN/A
6982292SN/A            // We'll say this has a 1 cycle load-store forwarding latency
6992292SN/A            // for now.
7002292SN/A            // @todo: Need to make this a parameter.
7017823Ssteve.reinhardt@amd.com            cpu->schedule(wb, curTick());
7022678Sktlim@umich.edu
7036974Stjones1@inf.ed.ac.uk            // Don't need to do anything special for split loads.
7046974Stjones1@inf.ed.ac.uk            if (TheISA::HasUnalignedMemAcc && sreqLow) {
7056974Stjones1@inf.ed.ac.uk                delete sreqLow;
7066974Stjones1@inf.ed.ac.uk                delete sreqHigh;
7076974Stjones1@inf.ed.ac.uk            }
7086974Stjones1@inf.ed.ac.uk
7092727Sktlim@umich.edu            ++lsqForwLoads;
7102292SN/A            return NoFault;
71112022Sar4jc@virginia.edu        } else if (
71212022Sar4jc@virginia.edu                (!req->isLLSC() &&
71312022Sar4jc@virginia.edu                 ((store_has_lower_limit && lower_load_has_store_part) ||
71412022Sar4jc@virginia.edu                  (store_has_upper_limit && upper_load_has_store_part) ||
71512022Sar4jc@virginia.edu                  (lower_load_has_store_part && upper_load_has_store_part))) ||
71612022Sar4jc@virginia.edu                (req->isLLSC() &&
71712022Sar4jc@virginia.edu                 ((store_has_lower_limit || upper_load_has_store_part) &&
71812022Sar4jc@virginia.edu                  (store_has_upper_limit || lower_load_has_store_part)))) {
7192292SN/A            // This is the partial store-load forwarding case where a store
72012022Sar4jc@virginia.edu            // has only part of the load's data and the load isn't LLSC or
72112022Sar4jc@virginia.edu            // the load is LLSC and the store has all or part of the load's
72212022Sar4jc@virginia.edu            // data
7232292SN/A
7242292SN/A            // If it's already been written back, then don't worry about
7252292SN/A            // stalling on it.
7262292SN/A            if (storeQueue[store_idx].completed) {
7274032Sktlim@umich.edu                panic("Should not check one of these");
7282292SN/A                continue;
7292292SN/A            }
7302292SN/A
7312292SN/A            // Must stall load and force it to retry, so long as it's the oldest
7322292SN/A            // load that needs to do so.
7332292SN/A            if (!stalled ||
7342292SN/A                (stalled &&
7352669Sktlim@umich.edu                 load_inst->seqNum <
7362292SN/A                 loadQueue[stallingLoadIdx]->seqNum)) {
7372292SN/A                stalled = true;
7382292SN/A                stallingStoreIsn = storeQueue[store_idx].inst->seqNum;
7392292SN/A                stallingLoadIdx = load_idx;
7402292SN/A            }
7412292SN/A
7422292SN/A            // Tell IQ/mem dep unit that this instruction will need to be
7432292SN/A            // rescheduled eventually
7442669Sktlim@umich.edu            iewStage->rescheduleMemInst(load_inst);
7454032Sktlim@umich.edu            load_inst->clearIssued();
7462727Sktlim@umich.edu            ++lsqRescheduledLoads;
7472292SN/A
7482292SN/A            // Do not generate a writeback event as this instruction is not
7492292SN/A            // complete.
7502292SN/A            DPRINTF(LSQUnit, "Load-store forwarding mis-match. "
7512292SN/A                    "Store idx %i to load addr %#x\n",
7522669Sktlim@umich.edu                    store_idx, req->getVaddr());
7532292SN/A
7544032Sktlim@umich.edu            // Must delete request now that it wasn't handed off to
7554032Sktlim@umich.edu            // memory.  This is quite ugly.  @todo: Figure out the
7564032Sktlim@umich.edu            // proper place to really handle request deletes.
7574032Sktlim@umich.edu            delete req;
7586974Stjones1@inf.ed.ac.uk            if (TheISA::HasUnalignedMemAcc && sreqLow) {
7596974Stjones1@inf.ed.ac.uk                delete sreqLow;
7606974Stjones1@inf.ed.ac.uk                delete sreqHigh;
7616974Stjones1@inf.ed.ac.uk            }
7624032Sktlim@umich.edu
7632292SN/A            return NoFault;
7642292SN/A        }
7652292SN/A    }
7662292SN/A
7672292SN/A    // If there's no forwarding case, then go access memory
7687720Sgblack@eecs.umich.edu    DPRINTF(LSQUnit, "Doing memory access for inst [sn:%lli] PC %s\n",
7697720Sgblack@eecs.umich.edu            load_inst->seqNum, load_inst->pcState());
7702292SN/A
77110333Smitch.hayenga@arm.com    // Allocate memory if this is the first time a load is issued.
77210333Smitch.hayenga@arm.com    if (!load_inst->memData) {
77310333Smitch.hayenga@arm.com        load_inst->memData = new uint8_t[req->getSize()];
77410333Smitch.hayenga@arm.com    }
7752292SN/A
7762907Sktlim@umich.edu    // if we the cache is not blocked, do cache access
7776974Stjones1@inf.ed.ac.uk    bool completedFirst = false;
77810342SCurtis.Dunham@arm.com    PacketPtr data_pkt = Packet::createRead(req);
77910333Smitch.hayenga@arm.com    PacketPtr fst_data_pkt = NULL;
78010333Smitch.hayenga@arm.com    PacketPtr snd_data_pkt = NULL;
7816974Stjones1@inf.ed.ac.uk
78210333Smitch.hayenga@arm.com    data_pkt->dataStatic(load_inst->memData);
7833228Sktlim@umich.edu
78410333Smitch.hayenga@arm.com    LSQSenderState *state = new LSQSenderState;
78510333Smitch.hayenga@arm.com    state->isLoad = true;
78610333Smitch.hayenga@arm.com    state->idx = load_idx;
78710333Smitch.hayenga@arm.com    state->inst = load_inst;
78810333Smitch.hayenga@arm.com    data_pkt->senderState = state;
7893228Sktlim@umich.edu
79010333Smitch.hayenga@arm.com    if (!TheISA::HasUnalignedMemAcc || !sreqLow) {
79110333Smitch.hayenga@arm.com        // Point the first packet at the main data packet.
79210333Smitch.hayenga@arm.com        fst_data_pkt = data_pkt;
79310333Smitch.hayenga@arm.com    } else {
79410333Smitch.hayenga@arm.com        // Create the split packets.
79510342SCurtis.Dunham@arm.com        fst_data_pkt = Packet::createRead(sreqLow);
79610342SCurtis.Dunham@arm.com        snd_data_pkt = Packet::createRead(sreqHigh);
7976974Stjones1@inf.ed.ac.uk
79810333Smitch.hayenga@arm.com        fst_data_pkt->dataStatic(load_inst->memData);
79910333Smitch.hayenga@arm.com        snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize());
8006974Stjones1@inf.ed.ac.uk
80110333Smitch.hayenga@arm.com        fst_data_pkt->senderState = state;
80210333Smitch.hayenga@arm.com        snd_data_pkt->senderState = state;
8036974Stjones1@inf.ed.ac.uk
80410333Smitch.hayenga@arm.com        state->isSplit = true;
80510333Smitch.hayenga@arm.com        state->outstanding = 2;
80610333Smitch.hayenga@arm.com        state->mainPkt = data_pkt;
80710333Smitch.hayenga@arm.com    }
8086974Stjones1@inf.ed.ac.uk
80911780Sarthur.perais@inria.fr    // For now, load throughput is constrained by the number of
81011780Sarthur.perais@inria.fr    // load FUs only, and loads do not consume a cache port (only
81111780Sarthur.perais@inria.fr    // stores do).
81211780Sarthur.perais@inria.fr    // @todo We should account for cache port contention
81311780Sarthur.perais@inria.fr    // and arbitrate between loads and stores.
81410333Smitch.hayenga@arm.com    bool successful_load = true;
81510333Smitch.hayenga@arm.com    if (!dcachePort->sendTimingReq(fst_data_pkt)) {
81610333Smitch.hayenga@arm.com        successful_load = false;
81710333Smitch.hayenga@arm.com    } else if (TheISA::HasUnalignedMemAcc && sreqLow) {
81810333Smitch.hayenga@arm.com        completedFirst = true;
8196974Stjones1@inf.ed.ac.uk
82010333Smitch.hayenga@arm.com        // The first packet was sent without problems, so send this one
82110333Smitch.hayenga@arm.com        // too. If there is a problem with this packet then the whole
82210333Smitch.hayenga@arm.com        // load will be squashed, so indicate this to the state object.
82310333Smitch.hayenga@arm.com        // The first packet will return in completeDataAccess and be
82410333Smitch.hayenga@arm.com        // handled there.
82511780Sarthur.perais@inria.fr        // @todo We should also account for cache port contention
82611780Sarthur.perais@inria.fr        // here.
82710333Smitch.hayenga@arm.com        if (!dcachePort->sendTimingReq(snd_data_pkt)) {
82810333Smitch.hayenga@arm.com            // The main packet will be deleted in completeDataAccess.
82910333Smitch.hayenga@arm.com            state->complete();
83010333Smitch.hayenga@arm.com            // Signify to 1st half that the 2nd half was blocked via state
83110333Smitch.hayenga@arm.com            state->cacheBlocked = true;
83210333Smitch.hayenga@arm.com            successful_load = false;
8332907Sktlim@umich.edu        }
8342907Sktlim@umich.edu    }
8352907Sktlim@umich.edu
8362907Sktlim@umich.edu    // If the cache was blocked, or has become blocked due to the access,
8372907Sktlim@umich.edu    // handle it.
83810333Smitch.hayenga@arm.com    if (!successful_load) {
83910333Smitch.hayenga@arm.com        if (!sreqLow) {
84010333Smitch.hayenga@arm.com            // Packet wasn't split, just delete main packet info
84110333Smitch.hayenga@arm.com            delete state;
8424032Sktlim@umich.edu            delete req;
84310333Smitch.hayenga@arm.com            delete data_pkt;
84410333Smitch.hayenga@arm.com        }
84510333Smitch.hayenga@arm.com
84610333Smitch.hayenga@arm.com        if (TheISA::HasUnalignedMemAcc && sreqLow) {
84710333Smitch.hayenga@arm.com            if (!completedFirst) {
84810333Smitch.hayenga@arm.com                // Split packet, but first failed.  Delete all state.
84910333Smitch.hayenga@arm.com                delete state;
85010333Smitch.hayenga@arm.com                delete req;
85110333Smitch.hayenga@arm.com                delete data_pkt;
85210333Smitch.hayenga@arm.com                delete fst_data_pkt;
85310333Smitch.hayenga@arm.com                delete snd_data_pkt;
85410333Smitch.hayenga@arm.com                delete sreqLow;
85510333Smitch.hayenga@arm.com                delete sreqHigh;
85610333Smitch.hayenga@arm.com                sreqLow = NULL;
85710333Smitch.hayenga@arm.com                sreqHigh = NULL;
85810333Smitch.hayenga@arm.com            } else {
85910333Smitch.hayenga@arm.com                // Can't delete main packet data or state because first packet
86010333Smitch.hayenga@arm.com                // was sent to the memory system
86110333Smitch.hayenga@arm.com                delete data_pkt;
86210333Smitch.hayenga@arm.com                delete req;
86310333Smitch.hayenga@arm.com                delete sreqHigh;
86410333Smitch.hayenga@arm.com                delete snd_data_pkt;
86510333Smitch.hayenga@arm.com                sreqHigh = NULL;
86610333Smitch.hayenga@arm.com            }
8676974Stjones1@inf.ed.ac.uk        }
8684032Sktlim@umich.edu
8692727Sktlim@umich.edu        ++lsqCacheBlocked;
8703014Srdreslin@umich.edu
87110333Smitch.hayenga@arm.com        iewStage->blockMemInst(load_inst);
8722292SN/A
8732669Sktlim@umich.edu        // No fault occurred, even though the interface is blocked.
8742669Sktlim@umich.edu        return NoFault;
8752292SN/A    }
8762292SN/A
8772669Sktlim@umich.edu    return NoFault;
8782292SN/A}
8792292SN/A
8802292SN/Atemplate <class Impl>
8812292SN/AFault
8826974Stjones1@inf.ed.ac.ukLSQUnit<Impl>::write(Request *req, Request *sreqLow, Request *sreqHigh,
8837520Sgblack@eecs.umich.edu                     uint8_t *data, int store_idx)
8842292SN/A{
8852292SN/A    assert(storeQueue[store_idx].inst);
8862292SN/A
88710175SMitch.Hayenga@ARM.com    DPRINTF(LSQUnit, "Doing write to store idx %i, addr %#x"
8882292SN/A            " | storeHead:%i [sn:%i]\n",
88910175SMitch.Hayenga@ARM.com            store_idx, req->getPaddr(), storeHead,
8902292SN/A            storeQueue[store_idx].inst->seqNum);
8912329SN/A
8922292SN/A    storeQueue[store_idx].req = req;
8936974Stjones1@inf.ed.ac.uk    storeQueue[store_idx].sreqLow = sreqLow;
8946974Stjones1@inf.ed.ac.uk    storeQueue[store_idx].sreqHigh = sreqHigh;
8957520Sgblack@eecs.umich.edu    unsigned size = req->getSize();
8967520Sgblack@eecs.umich.edu    storeQueue[store_idx].size = size;
89710031SAli.Saidi@ARM.com    storeQueue[store_idx].isAllZeros = req->getFlags() & Request::CACHE_BLOCK_ZERO;
89810031SAli.Saidi@ARM.com    assert(size <= sizeof(storeQueue[store_idx].data) ||
89910031SAli.Saidi@ARM.com            (req->getFlags() & Request::CACHE_BLOCK_ZERO));
9007509Stjones1@inf.ed.ac.uk
9017509Stjones1@inf.ed.ac.uk    // Split stores can only occur in ISAs with unaligned memory accesses.  If
9027509Stjones1@inf.ed.ac.uk    // a store request has been split, sreqLow and sreqHigh will be non-null.
9037509Stjones1@inf.ed.ac.uk    if (TheISA::HasUnalignedMemAcc && sreqLow) {
9047509Stjones1@inf.ed.ac.uk        storeQueue[store_idx].isSplit = true;
9057509Stjones1@inf.ed.ac.uk    }
9064326Sgblack@eecs.umich.edu
90710031SAli.Saidi@ARM.com    if (!(req->getFlags() & Request::CACHE_BLOCK_ZERO))
90810031SAli.Saidi@ARM.com        memcpy(storeQueue[store_idx].data, data, size);
9092329SN/A
9102292SN/A    // This function only writes the data to the store queue, so no fault
9112292SN/A    // can happen here.
9122292SN/A    return NoFault;
9132292SN/A}
9142292SN/A
9152292SN/A#endif // __CPU_O3_LSQ_UNIT_HH__
916