lsq_unit.hh revision 9444
12292SN/A/*
29444SAndreas.Sandberg@ARM.com * Copyright (c) 2012 ARM Limited
39444SAndreas.Sandberg@ARM.com * All rights reserved
49444SAndreas.Sandberg@ARM.com *
59444SAndreas.Sandberg@ARM.com * The license below extends only to copyright in the software and shall
69444SAndreas.Sandberg@ARM.com * not be construed as granting a license to any other intellectual
79444SAndreas.Sandberg@ARM.com * property including but not limited to intellectual property relating
89444SAndreas.Sandberg@ARM.com * to a hardware implementation of the functionality of the software
99444SAndreas.Sandberg@ARM.com * licensed hereunder.  You may use the software subject to the license
109444SAndreas.Sandberg@ARM.com * terms below provided that you ensure that this notice is replicated
119444SAndreas.Sandberg@ARM.com * unmodified and in its entirety in all distributions of the software,
129444SAndreas.Sandberg@ARM.com * modified or unmodified, in source code or in binary form.
139444SAndreas.Sandberg@ARM.com *
142329SN/A * Copyright (c) 2004-2006 The Regents of The University of Michigan
152292SN/A * All rights reserved.
162292SN/A *
172292SN/A * Redistribution and use in source and binary forms, with or without
182292SN/A * modification, are permitted provided that the following conditions are
192292SN/A * met: redistributions of source code must retain the above copyright
202292SN/A * notice, this list of conditions and the following disclaimer;
212292SN/A * redistributions in binary form must reproduce the above copyright
222292SN/A * notice, this list of conditions and the following disclaimer in the
232292SN/A * documentation and/or other materials provided with the distribution;
242292SN/A * neither the name of the copyright holders nor the names of its
252292SN/A * contributors may be used to endorse or promote products derived from
262292SN/A * this software without specific prior written permission.
272292SN/A *
282292SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
292292SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
302292SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
312292SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
322292SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
332292SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
342292SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
352292SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
362292SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
372292SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
382292SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
392689Sktlim@umich.edu *
402689Sktlim@umich.edu * Authors: Kevin Lim
412689Sktlim@umich.edu *          Korey Sewell
422292SN/A */
432292SN/A
442292SN/A#ifndef __CPU_O3_LSQ_UNIT_HH__
452292SN/A#define __CPU_O3_LSQ_UNIT_HH__
462292SN/A
472329SN/A#include <algorithm>
484395Ssaidi@eecs.umich.edu#include <cstring>
492292SN/A#include <map>
502292SN/A#include <queue>
512292SN/A
528591Sgblack@eecs.umich.edu#include "arch/generic/debugfaults.hh"
538506Sgblack@eecs.umich.edu#include "arch/isa_traits.hh"
543326Sktlim@umich.edu#include "arch/locked_mem.hh"
558481Sgblack@eecs.umich.edu#include "arch/mmapped_ipr.hh"
568229Snate@binkert.org#include "base/hashmap.hh"
576658Snate@binkert.org#include "config/the_isa.hh"
582292SN/A#include "cpu/inst_seq.hh"
598230Snate@binkert.org#include "cpu/timebuf.hh"
608232Snate@binkert.org#include "debug/LSQUnit.hh"
613348Sbinkertn@umich.edu#include "mem/packet.hh"
622669Sktlim@umich.edu#include "mem/port.hh"
638817Sgblack@eecs.umich.edu#include "sim/fault_fwd.hh"
642292SN/A
658737Skoansin.tan@gmail.comstruct DerivO3CPUParams;
665529Snate@binkert.org
672292SN/A/**
682329SN/A * Class that implements the actual LQ and SQ for each specific
692329SN/A * thread.  Both are circular queues; load entries are freed upon
702329SN/A * committing, while store entries are freed once they writeback. The
712329SN/A * LSQUnit tracks if there are memory ordering violations, and also
722329SN/A * detects partial load to store forwarding cases (a store only has
732329SN/A * part of a load's data) that requires the load to wait until the
742329SN/A * store writes back. In the former case it holds onto the instruction
752329SN/A * until the dependence unit looks at it, and in the latter it stalls
762329SN/A * the LSQ until the store writes back. At that point the load is
772329SN/A * replayed.
782292SN/A */
792292SN/Atemplate <class Impl>
802292SN/Aclass LSQUnit {
812292SN/A  public:
822733Sktlim@umich.edu    typedef typename Impl::O3CPU O3CPU;
832292SN/A    typedef typename Impl::DynInstPtr DynInstPtr;
842292SN/A    typedef typename Impl::CPUPol::IEW IEW;
852907Sktlim@umich.edu    typedef typename Impl::CPUPol::LSQ LSQ;
862292SN/A    typedef typename Impl::CPUPol::IssueStruct IssueStruct;
872292SN/A
882292SN/A  public:
892292SN/A    /** Constructs an LSQ unit. init() must be called prior to use. */
902292SN/A    LSQUnit();
912292SN/A
922292SN/A    /** Initializes the LSQ unit with the specified number of entries. */
935529Snate@binkert.org    void init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params,
945529Snate@binkert.org            LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries,
955529Snate@binkert.org            unsigned id);
962292SN/A
972292SN/A    /** Returns the name of the LSQ unit. */
982292SN/A    std::string name() const;
992292SN/A
1002727Sktlim@umich.edu    /** Registers statistics. */
1012727Sktlim@umich.edu    void regStats();
1022727Sktlim@umich.edu
1032907Sktlim@umich.edu    /** Sets the pointer to the dcache port. */
1048922Swilliam.wang@arm.com    void setDcachePort(MasterPort *dcache_port);
1052907Sktlim@umich.edu
1069444SAndreas.Sandberg@ARM.com    /** Perform sanity checks after a drain. */
1079444SAndreas.Sandberg@ARM.com    void drainSanityCheck() const;
1082307SN/A
1092348SN/A    /** Takes over from another CPU's thread. */
1102307SN/A    void takeOverFrom();
1112307SN/A
1122292SN/A    /** Ticks the LSQ unit, which in this case only resets the number of
1132292SN/A     * used cache ports.
1142292SN/A     * @todo: Move the number of used ports up to the LSQ level so it can
1152292SN/A     * be shared by all LSQ units.
1162292SN/A     */
1172292SN/A    void tick() { usedPorts = 0; }
1182292SN/A
1192292SN/A    /** Inserts an instruction. */
1202292SN/A    void insert(DynInstPtr &inst);
1212292SN/A    /** Inserts a load instruction. */
1222292SN/A    void insertLoad(DynInstPtr &load_inst);
1232292SN/A    /** Inserts a store instruction. */
1242292SN/A    void insertStore(DynInstPtr &store_inst);
1252292SN/A
1268545Ssaidi@eecs.umich.edu    /** Check for ordering violations in the LSQ. For a store squash if we
1278545Ssaidi@eecs.umich.edu     * ever find a conflicting load. For a load, only squash if we
1288545Ssaidi@eecs.umich.edu     * an external snoop invalidate has been seen for that load address
1298199SAli.Saidi@ARM.com     * @param load_idx index to start checking at
1308199SAli.Saidi@ARM.com     * @param inst the instruction to check
1318199SAli.Saidi@ARM.com     */
1328199SAli.Saidi@ARM.com    Fault checkViolations(int load_idx, DynInstPtr &inst);
1338199SAli.Saidi@ARM.com
1348545Ssaidi@eecs.umich.edu    /** Check if an incoming invalidate hits in the lsq on a load
1358545Ssaidi@eecs.umich.edu     * that might have issued out of order wrt another load beacuse
1368545Ssaidi@eecs.umich.edu     * of the intermediate invalidate.
1378545Ssaidi@eecs.umich.edu     */
1388545Ssaidi@eecs.umich.edu    void checkSnoop(PacketPtr pkt);
1398545Ssaidi@eecs.umich.edu
1402292SN/A    /** Executes a load instruction. */
1412292SN/A    Fault executeLoad(DynInstPtr &inst);
1422292SN/A
1432329SN/A    Fault executeLoad(int lq_idx) { panic("Not implemented"); return NoFault; }
1442292SN/A    /** Executes a store instruction. */
1452292SN/A    Fault executeStore(DynInstPtr &inst);
1462292SN/A
1472292SN/A    /** Commits the head load. */
1482292SN/A    void commitLoad();
1492292SN/A    /** Commits loads older than a specific sequence number. */
1502292SN/A    void commitLoads(InstSeqNum &youngest_inst);
1512292SN/A
1522292SN/A    /** Commits stores older than a specific sequence number. */
1532292SN/A    void commitStores(InstSeqNum &youngest_inst);
1542292SN/A
1552292SN/A    /** Writes back stores. */
1562292SN/A    void writebackStores();
1572292SN/A
1582790Sktlim@umich.edu    /** Completes the data access that has been returned from the
1592790Sktlim@umich.edu     * memory system. */
1602669Sktlim@umich.edu    void completeDataAccess(PacketPtr pkt);
1612669Sktlim@umich.edu
1622292SN/A    /** Clears all the entries in the LQ. */
1632292SN/A    void clearLQ();
1642292SN/A
1652292SN/A    /** Clears all the entries in the SQ. */
1662292SN/A    void clearSQ();
1672292SN/A
1682292SN/A    /** Resizes the LQ to a given size. */
1692292SN/A    void resizeLQ(unsigned size);
1702292SN/A
1712292SN/A    /** Resizes the SQ to a given size. */
1722292SN/A    void resizeSQ(unsigned size);
1732292SN/A
1742292SN/A    /** Squashes all instructions younger than a specific sequence number. */
1752292SN/A    void squash(const InstSeqNum &squashed_num);
1762292SN/A
1772292SN/A    /** Returns if there is a memory ordering violation. Value is reset upon
1782292SN/A     * call to getMemDepViolator().
1792292SN/A     */
1802292SN/A    bool violation() { return memDepViolator; }
1812292SN/A
1822292SN/A    /** Returns the memory ordering violator. */
1832292SN/A    DynInstPtr getMemDepViolator();
1842292SN/A
1852329SN/A    /** Returns if a load became blocked due to the memory system. */
1862292SN/A    bool loadBlocked()
1872292SN/A    { return isLoadBlocked; }
1882292SN/A
1892348SN/A    /** Clears the signal that a load became blocked. */
1902292SN/A    void clearLoadBlocked()
1912292SN/A    { isLoadBlocked = false; }
1922292SN/A
1932348SN/A    /** Returns if the blocked load was handled. */
1942292SN/A    bool isLoadBlockedHandled()
1952292SN/A    { return loadBlockedHandled; }
1962292SN/A
1972348SN/A    /** Records the blocked load as being handled. */
1982292SN/A    void setLoadBlockedHandled()
1992292SN/A    { loadBlockedHandled = true; }
2002292SN/A
2012292SN/A    /** Returns the number of free entries (min of free LQ and SQ entries). */
2022292SN/A    unsigned numFreeEntries();
2032292SN/A
2042292SN/A    /** Returns the number of loads in the LQ. */
2052292SN/A    int numLoads() { return loads; }
2062292SN/A
2072292SN/A    /** Returns the number of stores in the SQ. */
2082292SN/A    int numStores() { return stores; }
2092292SN/A
2102292SN/A    /** Returns if either the LQ or SQ is full. */
2112292SN/A    bool isFull() { return lqFull() || sqFull(); }
2122292SN/A
2139444SAndreas.Sandberg@ARM.com    /** Returns if both the LQ and SQ are empty. */
2149444SAndreas.Sandberg@ARM.com    bool isEmpty() const { return lqEmpty() && sqEmpty(); }
2159444SAndreas.Sandberg@ARM.com
2162292SN/A    /** Returns if the LQ is full. */
2172292SN/A    bool lqFull() { return loads >= (LQEntries - 1); }
2182292SN/A
2192292SN/A    /** Returns if the SQ is full. */
2202292SN/A    bool sqFull() { return stores >= (SQEntries - 1); }
2212292SN/A
2229444SAndreas.Sandberg@ARM.com    /** Returns if the LQ is empty. */
2239444SAndreas.Sandberg@ARM.com    bool lqEmpty() const { return loads == 0; }
2249444SAndreas.Sandberg@ARM.com
2259444SAndreas.Sandberg@ARM.com    /** Returns if the SQ is empty. */
2269444SAndreas.Sandberg@ARM.com    bool sqEmpty() const { return stores == 0; }
2279444SAndreas.Sandberg@ARM.com
2282292SN/A    /** Returns the number of instructions in the LSQ. */
2292292SN/A    unsigned getCount() { return loads + stores; }
2302292SN/A
2312292SN/A    /** Returns if there are any stores to writeback. */
2322292SN/A    bool hasStoresToWB() { return storesToWB; }
2332292SN/A
2342292SN/A    /** Returns the number of stores to writeback. */
2352292SN/A    int numStoresToWB() { return storesToWB; }
2362292SN/A
2372292SN/A    /** Returns if the LSQ unit will writeback on this cycle. */
2382292SN/A    bool willWB() { return storeQueue[storeWBIdx].canWB &&
2392678Sktlim@umich.edu                        !storeQueue[storeWBIdx].completed &&
2402678Sktlim@umich.edu                        !isStoreBlocked; }
2412292SN/A
2422907Sktlim@umich.edu    /** Handles doing the retry. */
2432907Sktlim@umich.edu    void recvRetry();
2442907Sktlim@umich.edu
2452292SN/A  private:
2469444SAndreas.Sandberg@ARM.com    /** Reset the LSQ state */
2479444SAndreas.Sandberg@ARM.com    void resetState();
2489444SAndreas.Sandberg@ARM.com
2492698Sktlim@umich.edu    /** Writes back the instruction, sending it to IEW. */
2502678Sktlim@umich.edu    void writeback(DynInstPtr &inst, PacketPtr pkt);
2512678Sktlim@umich.edu
2526974Stjones1@inf.ed.ac.uk    /** Writes back a store that couldn't be completed the previous cycle. */
2536974Stjones1@inf.ed.ac.uk    void writebackPendingStore();
2546974Stjones1@inf.ed.ac.uk
2552698Sktlim@umich.edu    /** Handles completing the send of a store to memory. */
2563349Sbinkertn@umich.edu    void storePostSend(PacketPtr pkt);
2572693Sktlim@umich.edu
2582292SN/A    /** Completes the store at the specified index. */
2592292SN/A    void completeStore(int store_idx);
2602292SN/A
2616974Stjones1@inf.ed.ac.uk    /** Attempts to send a store to the cache. */
2626974Stjones1@inf.ed.ac.uk    bool sendStore(PacketPtr data_pkt);
2636974Stjones1@inf.ed.ac.uk
2642292SN/A    /** Increments the given store index (circular queue). */
2659440SAndreas.Sandberg@ARM.com    inline void incrStIdx(int &store_idx) const;
2662292SN/A    /** Decrements the given store index (circular queue). */
2679440SAndreas.Sandberg@ARM.com    inline void decrStIdx(int &store_idx) const;
2682292SN/A    /** Increments the given load index (circular queue). */
2699440SAndreas.Sandberg@ARM.com    inline void incrLdIdx(int &load_idx) const;
2702292SN/A    /** Decrements the given load index (circular queue). */
2719440SAndreas.Sandberg@ARM.com    inline void decrLdIdx(int &load_idx) const;
2722292SN/A
2732329SN/A  public:
2742329SN/A    /** Debugging function to dump instructions in the LSQ. */
2759440SAndreas.Sandberg@ARM.com    void dumpInsts() const;
2762329SN/A
2772292SN/A  private:
2782292SN/A    /** Pointer to the CPU. */
2792733Sktlim@umich.edu    O3CPU *cpu;
2802292SN/A
2812292SN/A    /** Pointer to the IEW stage. */
2822292SN/A    IEW *iewStage;
2832292SN/A
2842907Sktlim@umich.edu    /** Pointer to the LSQ. */
2852907Sktlim@umich.edu    LSQ *lsq;
2862669Sktlim@umich.edu
2872907Sktlim@umich.edu    /** Pointer to the dcache port.  Used only for sending. */
2888922Swilliam.wang@arm.com    MasterPort *dcachePort;
2892292SN/A
2902698Sktlim@umich.edu    /** Derived class to hold any sender state the LSQ needs. */
2919044SAli.Saidi@ARM.com    class LSQSenderState : public Packet::SenderState
2922678Sktlim@umich.edu    {
2932678Sktlim@umich.edu      public:
2942698Sktlim@umich.edu        /** Default constructor. */
2952678Sktlim@umich.edu        LSQSenderState()
2969046SAli.Saidi@ARM.com            : mainPkt(NULL), pendingPacket(NULL), outstanding(1),
2979046SAli.Saidi@ARM.com              noWB(false), isSplit(false), pktToSend(false)
2989046SAli.Saidi@ARM.com          { }
2992678Sktlim@umich.edu
3002698Sktlim@umich.edu        /** Instruction who initiated the access to memory. */
3012678Sktlim@umich.edu        DynInstPtr inst;
3029046SAli.Saidi@ARM.com        /** The main packet from a split load, used during writeback. */
3039046SAli.Saidi@ARM.com        PacketPtr mainPkt;
3049046SAli.Saidi@ARM.com        /** A second packet from a split store that needs sending. */
3059046SAli.Saidi@ARM.com        PacketPtr pendingPacket;
3069046SAli.Saidi@ARM.com        /** The LQ/SQ index of the instruction. */
3079046SAli.Saidi@ARM.com        uint8_t idx;
3089046SAli.Saidi@ARM.com        /** Number of outstanding packets to complete. */
3099046SAli.Saidi@ARM.com        uint8_t outstanding;
3102698Sktlim@umich.edu        /** Whether or not it is a load. */
3112678Sktlim@umich.edu        bool isLoad;
3122698Sktlim@umich.edu        /** Whether or not the instruction will need to writeback. */
3132678Sktlim@umich.edu        bool noWB;
3146974Stjones1@inf.ed.ac.uk        /** Whether or not this access is split in two. */
3156974Stjones1@inf.ed.ac.uk        bool isSplit;
3166974Stjones1@inf.ed.ac.uk        /** Whether or not there is a packet that needs sending. */
3176974Stjones1@inf.ed.ac.uk        bool pktToSend;
3186974Stjones1@inf.ed.ac.uk
3196974Stjones1@inf.ed.ac.uk        /** Completes a packet and returns whether the access is finished. */
3206974Stjones1@inf.ed.ac.uk        inline bool complete() { return --outstanding == 0; }
3212678Sktlim@umich.edu    };
3222678Sktlim@umich.edu
3232698Sktlim@umich.edu    /** Writeback event, specifically for when stores forward data to loads. */
3242678Sktlim@umich.edu    class WritebackEvent : public Event {
3252678Sktlim@umich.edu      public:
3262678Sktlim@umich.edu        /** Constructs a writeback event. */
3272678Sktlim@umich.edu        WritebackEvent(DynInstPtr &_inst, PacketPtr pkt, LSQUnit *lsq_ptr);
3282678Sktlim@umich.edu
3292678Sktlim@umich.edu        /** Processes the writeback event. */
3302678Sktlim@umich.edu        void process();
3312678Sktlim@umich.edu
3322678Sktlim@umich.edu        /** Returns the description of this event. */
3335336Shines@cs.fsu.edu        const char *description() const;
3342678Sktlim@umich.edu
3352678Sktlim@umich.edu      private:
3362698Sktlim@umich.edu        /** Instruction whose results are being written back. */
3372678Sktlim@umich.edu        DynInstPtr inst;
3382678Sktlim@umich.edu
3392698Sktlim@umich.edu        /** The packet that would have been sent to memory. */
3402678Sktlim@umich.edu        PacketPtr pkt;
3412678Sktlim@umich.edu
3422678Sktlim@umich.edu        /** The pointer to the LSQ unit that issued the store. */
3432678Sktlim@umich.edu        LSQUnit<Impl> *lsqPtr;
3442678Sktlim@umich.edu    };
3452678Sktlim@umich.edu
3462292SN/A  public:
3472292SN/A    struct SQEntry {
3482292SN/A        /** Constructs an empty store queue entry. */
3492292SN/A        SQEntry()
3504326Sgblack@eecs.umich.edu            : inst(NULL), req(NULL), size(0),
3512292SN/A              canWB(0), committed(0), completed(0)
3524326Sgblack@eecs.umich.edu        {
3534395Ssaidi@eecs.umich.edu            std::memset(data, 0, sizeof(data));
3544326Sgblack@eecs.umich.edu        }
3552292SN/A
3569152Satgutier@umich.edu        ~SQEntry()
3579152Satgutier@umich.edu        {
3589152Satgutier@umich.edu            inst = NULL;
3599152Satgutier@umich.edu        }
3609152Satgutier@umich.edu
3612292SN/A        /** Constructs a store queue entry for a given instruction. */
3622292SN/A        SQEntry(DynInstPtr &_inst)
3636974Stjones1@inf.ed.ac.uk            : inst(_inst), req(NULL), sreqLow(NULL), sreqHigh(NULL), size(0),
3646974Stjones1@inf.ed.ac.uk              isSplit(0), canWB(0), committed(0), completed(0)
3654326Sgblack@eecs.umich.edu        {
3664395Ssaidi@eecs.umich.edu            std::memset(data, 0, sizeof(data));
3674326Sgblack@eecs.umich.edu        }
3689046SAli.Saidi@ARM.com        /** The store data. */
3699046SAli.Saidi@ARM.com        char data[16];
3702292SN/A        /** The store instruction. */
3712292SN/A        DynInstPtr inst;
3722669Sktlim@umich.edu        /** The request for the store. */
3732669Sktlim@umich.edu        RequestPtr req;
3746974Stjones1@inf.ed.ac.uk        /** The split requests for the store. */
3756974Stjones1@inf.ed.ac.uk        RequestPtr sreqLow;
3766974Stjones1@inf.ed.ac.uk        RequestPtr sreqHigh;
3772292SN/A        /** The size of the store. */
3789046SAli.Saidi@ARM.com        uint8_t size;
3796974Stjones1@inf.ed.ac.uk        /** Whether or not the store is split into two requests. */
3806974Stjones1@inf.ed.ac.uk        bool isSplit;
3812292SN/A        /** Whether or not the store can writeback. */
3822292SN/A        bool canWB;
3832292SN/A        /** Whether or not the store is committed. */
3842292SN/A        bool committed;
3852292SN/A        /** Whether or not the store is completed. */
3862292SN/A        bool completed;
3872292SN/A    };
3882329SN/A
3892292SN/A  private:
3902292SN/A    /** The LSQUnit thread id. */
3916221Snate@binkert.org    ThreadID lsqID;
3922292SN/A
3932292SN/A    /** The store queue. */
3942292SN/A    std::vector<SQEntry> storeQueue;
3952292SN/A
3962292SN/A    /** The load queue. */
3972292SN/A    std::vector<DynInstPtr> loadQueue;
3982292SN/A
3992329SN/A    /** The number of LQ entries, plus a sentinel entry (circular queue).
4002329SN/A     *  @todo: Consider having var that records the true number of LQ entries.
4012329SN/A     */
4022292SN/A    unsigned LQEntries;
4032329SN/A    /** The number of SQ entries, plus a sentinel entry (circular queue).
4042329SN/A     *  @todo: Consider having var that records the true number of SQ entries.
4052329SN/A     */
4062292SN/A    unsigned SQEntries;
4072292SN/A
4088199SAli.Saidi@ARM.com    /** The number of places to shift addresses in the LSQ before checking
4098199SAli.Saidi@ARM.com     * for dependency violations
4108199SAli.Saidi@ARM.com     */
4118199SAli.Saidi@ARM.com    unsigned depCheckShift;
4128199SAli.Saidi@ARM.com
4138199SAli.Saidi@ARM.com    /** Should loads be checked for dependency issues */
4148199SAli.Saidi@ARM.com    bool checkLoads;
4158199SAli.Saidi@ARM.com
4162292SN/A    /** The number of load instructions in the LQ. */
4172292SN/A    int loads;
4182329SN/A    /** The number of store instructions in the SQ. */
4192292SN/A    int stores;
4202292SN/A    /** The number of store instructions in the SQ waiting to writeback. */
4212292SN/A    int storesToWB;
4222292SN/A
4232292SN/A    /** The index of the head instruction in the LQ. */
4242292SN/A    int loadHead;
4252292SN/A    /** The index of the tail instruction in the LQ. */
4262292SN/A    int loadTail;
4272292SN/A
4282292SN/A    /** The index of the head instruction in the SQ. */
4292292SN/A    int storeHead;
4302329SN/A    /** The index of the first instruction that may be ready to be
4312329SN/A     * written back, and has not yet been written back.
4322292SN/A     */
4332292SN/A    int storeWBIdx;
4342292SN/A    /** The index of the tail instruction in the SQ. */
4352292SN/A    int storeTail;
4362292SN/A
4372292SN/A    /// @todo Consider moving to a more advanced model with write vs read ports
4382292SN/A    /** The number of cache ports available each cycle. */
4392292SN/A    int cachePorts;
4402292SN/A
4412292SN/A    /** The number of used cache ports in this cycle. */
4422292SN/A    int usedPorts;
4432292SN/A
4442292SN/A    //list<InstSeqNum> mshrSeqNums;
4452292SN/A
4468545Ssaidi@eecs.umich.edu    /** Address Mask for a cache block (e.g. ~(cache_block_size-1)) */
4478545Ssaidi@eecs.umich.edu    Addr cacheBlockMask;
4488545Ssaidi@eecs.umich.edu
4492292SN/A    /** Wire to read information from the issue stage time queue. */
4502292SN/A    typename TimeBuffer<IssueStruct>::wire fromIssue;
4512292SN/A
4522292SN/A    /** Whether or not the LSQ is stalled. */
4532292SN/A    bool stalled;
4542292SN/A    /** The store that causes the stall due to partial store to load
4552292SN/A     * forwarding.
4562292SN/A     */
4572292SN/A    InstSeqNum stallingStoreIsn;
4582292SN/A    /** The index of the above store. */
4592292SN/A    int stallingLoadIdx;
4602292SN/A
4612698Sktlim@umich.edu    /** The packet that needs to be retried. */
4622698Sktlim@umich.edu    PacketPtr retryPkt;
4632693Sktlim@umich.edu
4642698Sktlim@umich.edu    /** Whehter or not a store is blocked due to the memory system. */
4652678Sktlim@umich.edu    bool isStoreBlocked;
4662678Sktlim@umich.edu
4672329SN/A    /** Whether or not a load is blocked due to the memory system. */
4682292SN/A    bool isLoadBlocked;
4692292SN/A
4702348SN/A    /** Has the blocked load been handled. */
4712292SN/A    bool loadBlockedHandled;
4722292SN/A
4738727Snilay@cs.wisc.edu    /** Whether or not a store is in flight. */
4748727Snilay@cs.wisc.edu    bool storeInFlight;
4758727Snilay@cs.wisc.edu
4762348SN/A    /** The sequence number of the blocked load. */
4772292SN/A    InstSeqNum blockedLoadSeqNum;
4782292SN/A
4792292SN/A    /** The oldest load that caused a memory ordering violation. */
4802292SN/A    DynInstPtr memDepViolator;
4812292SN/A
4826974Stjones1@inf.ed.ac.uk    /** Whether or not there is a packet that couldn't be sent because of
4836974Stjones1@inf.ed.ac.uk     * a lack of cache ports. */
4846974Stjones1@inf.ed.ac.uk    bool hasPendingPkt;
4856974Stjones1@inf.ed.ac.uk
4866974Stjones1@inf.ed.ac.uk    /** The packet that is pending free cache ports. */
4876974Stjones1@inf.ed.ac.uk    PacketPtr pendingPkt;
4886974Stjones1@inf.ed.ac.uk
4898727Snilay@cs.wisc.edu    /** Flag for memory model. */
4908727Snilay@cs.wisc.edu    bool needsTSO;
4918727Snilay@cs.wisc.edu
4922292SN/A    // Will also need how many read/write ports the Dcache has.  Or keep track
4932292SN/A    // of that in stage that is one level up, and only call executeLoad/Store
4942292SN/A    // the appropriate number of times.
4952727Sktlim@umich.edu    /** Total number of loads forwaded from LSQ stores. */
4965999Snate@binkert.org    Stats::Scalar lsqForwLoads;
4972307SN/A
4983126Sktlim@umich.edu    /** Total number of loads ignored due to invalid addresses. */
4995999Snate@binkert.org    Stats::Scalar invAddrLoads;
5003126Sktlim@umich.edu
5013126Sktlim@umich.edu    /** Total number of squashed loads. */
5025999Snate@binkert.org    Stats::Scalar lsqSquashedLoads;
5033126Sktlim@umich.edu
5043126Sktlim@umich.edu    /** Total number of responses from the memory system that are
5053126Sktlim@umich.edu     * ignored due to the instruction already being squashed. */
5065999Snate@binkert.org    Stats::Scalar lsqIgnoredResponses;
5073126Sktlim@umich.edu
5083126Sktlim@umich.edu    /** Tota number of memory ordering violations. */
5095999Snate@binkert.org    Stats::Scalar lsqMemOrderViolation;
5103126Sktlim@umich.edu
5112727Sktlim@umich.edu    /** Total number of squashed stores. */
5125999Snate@binkert.org    Stats::Scalar lsqSquashedStores;
5132727Sktlim@umich.edu
5142727Sktlim@umich.edu    /** Total number of software prefetches ignored due to invalid addresses. */
5155999Snate@binkert.org    Stats::Scalar invAddrSwpfs;
5162727Sktlim@umich.edu
5172727Sktlim@umich.edu    /** Ready loads blocked due to partial store-forwarding. */
5185999Snate@binkert.org    Stats::Scalar lsqBlockedLoads;
5192727Sktlim@umich.edu
5202727Sktlim@umich.edu    /** Number of loads that were rescheduled. */
5215999Snate@binkert.org    Stats::Scalar lsqRescheduledLoads;
5222727Sktlim@umich.edu
5232727Sktlim@umich.edu    /** Number of times the LSQ is blocked due to the cache. */
5245999Snate@binkert.org    Stats::Scalar lsqCacheBlocked;
5252727Sktlim@umich.edu
5262292SN/A  public:
5272292SN/A    /** Executes the load at the given index. */
5287520Sgblack@eecs.umich.edu    Fault read(Request *req, Request *sreqLow, Request *sreqHigh,
5297520Sgblack@eecs.umich.edu               uint8_t *data, int load_idx);
5302292SN/A
5312292SN/A    /** Executes the store at the given index. */
5327520Sgblack@eecs.umich.edu    Fault write(Request *req, Request *sreqLow, Request *sreqHigh,
5337520Sgblack@eecs.umich.edu                uint8_t *data, int store_idx);
5342292SN/A
5352292SN/A    /** Returns the index of the head load instruction. */
5362292SN/A    int getLoadHead() { return loadHead; }
5372292SN/A    /** Returns the sequence number of the head load instruction. */
5382292SN/A    InstSeqNum getLoadHeadSeqNum()
5392292SN/A    {
5402292SN/A        if (loadQueue[loadHead]) {
5412292SN/A            return loadQueue[loadHead]->seqNum;
5422292SN/A        } else {
5432292SN/A            return 0;
5442292SN/A        }
5452292SN/A
5462292SN/A    }
5472292SN/A
5482292SN/A    /** Returns the index of the head store instruction. */
5492292SN/A    int getStoreHead() { return storeHead; }
5502292SN/A    /** Returns the sequence number of the head store instruction. */
5512292SN/A    InstSeqNum getStoreHeadSeqNum()
5522292SN/A    {
5532292SN/A        if (storeQueue[storeHead].inst) {
5542292SN/A            return storeQueue[storeHead].inst->seqNum;
5552292SN/A        } else {
5562292SN/A            return 0;
5572292SN/A        }
5582292SN/A
5592292SN/A    }
5602292SN/A
5612292SN/A    /** Returns whether or not the LSQ unit is stalled. */
5622292SN/A    bool isStalled()  { return stalled; }
5632292SN/A};
5642292SN/A
5652292SN/Atemplate <class Impl>
5662292SN/AFault
5676974Stjones1@inf.ed.ac.ukLSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh,
5687520Sgblack@eecs.umich.edu                    uint8_t *data, int load_idx)
5692292SN/A{
5702669Sktlim@umich.edu    DynInstPtr load_inst = loadQueue[load_idx];
5712292SN/A
5722669Sktlim@umich.edu    assert(load_inst);
5732669Sktlim@umich.edu
5742669Sktlim@umich.edu    assert(!load_inst->isExecuted());
5752292SN/A
5762292SN/A    // Make sure this isn't an uncacheable access
5772292SN/A    // A bit of a hackish way to get uncached accesses to work only if they're
5782292SN/A    // at the head of the LSQ and are ready to commit (at the head of the ROB
5792292SN/A    // too).
5803172Sstever@eecs.umich.edu    if (req->isUncacheable() &&
5812731Sktlim@umich.edu        (load_idx != loadHead || !load_inst->isAtCommit())) {
5822669Sktlim@umich.edu        iewStage->rescheduleMemInst(load_inst);
5832727Sktlim@umich.edu        ++lsqRescheduledLoads;
5847720Sgblack@eecs.umich.edu        DPRINTF(LSQUnit, "Uncachable load [sn:%lli] PC %s\n",
5857720Sgblack@eecs.umich.edu                load_inst->seqNum, load_inst->pcState());
5864032Sktlim@umich.edu
5874032Sktlim@umich.edu        // Must delete request now that it wasn't handed off to
5884032Sktlim@umich.edu        // memory.  This is quite ugly.  @todo: Figure out the proper
5894032Sktlim@umich.edu        // place to really handle request deletes.
5904032Sktlim@umich.edu        delete req;
5916974Stjones1@inf.ed.ac.uk        if (TheISA::HasUnalignedMemAcc && sreqLow) {
5926974Stjones1@inf.ed.ac.uk            delete sreqLow;
5936974Stjones1@inf.ed.ac.uk            delete sreqHigh;
5946974Stjones1@inf.ed.ac.uk        }
5958591Sgblack@eecs.umich.edu        return new GenericISA::M5PanicFault(
5968591Sgblack@eecs.umich.edu                "Uncachable load [sn:%llx] PC %s\n",
5978591Sgblack@eecs.umich.edu                load_inst->seqNum, load_inst->pcState());
5982292SN/A    }
5992292SN/A
6002292SN/A    // Check the SQ for any previous stores that might lead to forwarding
6012669Sktlim@umich.edu    int store_idx = load_inst->sqIdx;
6022292SN/A
6032292SN/A    int store_size = 0;
6042292SN/A
6052292SN/A    DPRINTF(LSQUnit, "Read called, load idx: %i, store idx: %i, "
6066974Stjones1@inf.ed.ac.uk            "storeHead: %i addr: %#x%s\n",
6076974Stjones1@inf.ed.ac.uk            load_idx, store_idx, storeHead, req->getPaddr(),
6086974Stjones1@inf.ed.ac.uk            sreqLow ? " split" : "");
6092292SN/A
6106102Sgblack@eecs.umich.edu    if (req->isLLSC()) {
6116974Stjones1@inf.ed.ac.uk        assert(!sreqLow);
6123326Sktlim@umich.edu        // Disable recording the result temporarily.  Writing to misc
6133326Sktlim@umich.edu        // regs normally updates the result, but this is not the
6143326Sktlim@umich.edu        // desired behavior when handling store conditionals.
6159046SAli.Saidi@ARM.com        load_inst->recordResult(false);
6163326Sktlim@umich.edu        TheISA::handleLockedRead(load_inst.get(), req);
6179046SAli.Saidi@ARM.com        load_inst->recordResult(true);
6182292SN/A    }
6192292SN/A
6208481Sgblack@eecs.umich.edu    if (req->isMmappedIpr()) {
6218481Sgblack@eecs.umich.edu        assert(!load_inst->memData);
6228481Sgblack@eecs.umich.edu        load_inst->memData = new uint8_t[64];
6238481Sgblack@eecs.umich.edu
6248481Sgblack@eecs.umich.edu        ThreadContext *thread = cpu->tcBase(lsqID);
6259180Sandreas.hansson@arm.com        Cycles delay(0);
6268949Sandreas.hansson@arm.com        PacketPtr data_pkt = new Packet(req, MemCmd::ReadReq);
6278481Sgblack@eecs.umich.edu
6288481Sgblack@eecs.umich.edu        if (!TheISA::HasUnalignedMemAcc || !sreqLow) {
6298481Sgblack@eecs.umich.edu            data_pkt->dataStatic(load_inst->memData);
6308481Sgblack@eecs.umich.edu            delay = TheISA::handleIprRead(thread, data_pkt);
6318481Sgblack@eecs.umich.edu        } else {
6328481Sgblack@eecs.umich.edu            assert(sreqLow->isMmappedIpr() && sreqHigh->isMmappedIpr());
6338949Sandreas.hansson@arm.com            PacketPtr fst_data_pkt = new Packet(sreqLow, MemCmd::ReadReq);
6348949Sandreas.hansson@arm.com            PacketPtr snd_data_pkt = new Packet(sreqHigh, MemCmd::ReadReq);
6358481Sgblack@eecs.umich.edu
6368481Sgblack@eecs.umich.edu            fst_data_pkt->dataStatic(load_inst->memData);
6378481Sgblack@eecs.umich.edu            snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize());
6388481Sgblack@eecs.umich.edu
6398481Sgblack@eecs.umich.edu            delay = TheISA::handleIprRead(thread, fst_data_pkt);
6409180Sandreas.hansson@arm.com            Cycles delay2 = TheISA::handleIprRead(thread, snd_data_pkt);
6418481Sgblack@eecs.umich.edu            if (delay2 > delay)
6428481Sgblack@eecs.umich.edu                delay = delay2;
6438481Sgblack@eecs.umich.edu
6448481Sgblack@eecs.umich.edu            delete sreqLow;
6458481Sgblack@eecs.umich.edu            delete sreqHigh;
6468481Sgblack@eecs.umich.edu            delete fst_data_pkt;
6478481Sgblack@eecs.umich.edu            delete snd_data_pkt;
6488481Sgblack@eecs.umich.edu        }
6498481Sgblack@eecs.umich.edu        WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this);
6509179Sandreas.hansson@arm.com        cpu->schedule(wb, cpu->clockEdge(delay));
6518481Sgblack@eecs.umich.edu        return NoFault;
6528481Sgblack@eecs.umich.edu    }
6538481Sgblack@eecs.umich.edu
6542292SN/A    while (store_idx != -1) {
6552292SN/A        // End once we've reached the top of the LSQ
6562292SN/A        if (store_idx == storeWBIdx) {
6572292SN/A            break;
6582292SN/A        }
6592292SN/A
6602292SN/A        // Move the index to one younger
6612292SN/A        if (--store_idx < 0)
6622292SN/A            store_idx += SQEntries;
6632292SN/A
6642292SN/A        assert(storeQueue[store_idx].inst);
6652292SN/A
6662292SN/A        store_size = storeQueue[store_idx].size;
6672292SN/A
6682292SN/A        if (store_size == 0)
6692292SN/A            continue;
6704032Sktlim@umich.edu        else if (storeQueue[store_idx].inst->uncacheable())
6714032Sktlim@umich.edu            continue;
6724032Sktlim@umich.edu
6739046SAli.Saidi@ARM.com        assert(storeQueue[store_idx].inst->effAddrValid());
6742292SN/A
6752292SN/A        // Check if the store data is within the lower and upper bounds of
6762292SN/A        // addresses that the request needs.
6772292SN/A        bool store_has_lower_limit =
6782669Sktlim@umich.edu            req->getVaddr() >= storeQueue[store_idx].inst->effAddr;
6792292SN/A        bool store_has_upper_limit =
6802669Sktlim@umich.edu            (req->getVaddr() + req->getSize()) <=
6812669Sktlim@umich.edu            (storeQueue[store_idx].inst->effAddr + store_size);
6822292SN/A        bool lower_load_has_store_part =
6832669Sktlim@umich.edu            req->getVaddr() < (storeQueue[store_idx].inst->effAddr +
6842292SN/A                           store_size);
6852292SN/A        bool upper_load_has_store_part =
6862669Sktlim@umich.edu            (req->getVaddr() + req->getSize()) >
6872669Sktlim@umich.edu            storeQueue[store_idx].inst->effAddr;
6882292SN/A
6892292SN/A        // If the store's data has all of the data needed, we can forward.
6904032Sktlim@umich.edu        if ((store_has_lower_limit && store_has_upper_limit)) {
6912329SN/A            // Get shift amount for offset into the store's data.
6928316Sgeoffrey.blake@arm.com            int shift_amt = req->getVaddr() - storeQueue[store_idx].inst->effAddr;
6932292SN/A
6947520Sgblack@eecs.umich.edu            memcpy(data, storeQueue[store_idx].data + shift_amt,
6957520Sgblack@eecs.umich.edu                   req->getSize());
6963803Sgblack@eecs.umich.edu
6972669Sktlim@umich.edu            assert(!load_inst->memData);
6982669Sktlim@umich.edu            load_inst->memData = new uint8_t[64];
6992292SN/A
7004326Sgblack@eecs.umich.edu            memcpy(load_inst->memData,
7014326Sgblack@eecs.umich.edu                    storeQueue[store_idx].data + shift_amt, req->getSize());
7022292SN/A
7032292SN/A            DPRINTF(LSQUnit, "Forwarding from store idx %i to load to "
7042292SN/A                    "addr %#x, data %#x\n",
7052693Sktlim@umich.edu                    store_idx, req->getVaddr(), data);
7062678Sktlim@umich.edu
7078949Sandreas.hansson@arm.com            PacketPtr data_pkt = new Packet(req, MemCmd::ReadReq);
7082678Sktlim@umich.edu            data_pkt->dataStatic(load_inst->memData);
7092678Sktlim@umich.edu
7102678Sktlim@umich.edu            WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this);
7112292SN/A
7122292SN/A            // We'll say this has a 1 cycle load-store forwarding latency
7132292SN/A            // for now.
7142292SN/A            // @todo: Need to make this a parameter.
7157823Ssteve.reinhardt@amd.com            cpu->schedule(wb, curTick());
7162678Sktlim@umich.edu
7176974Stjones1@inf.ed.ac.uk            // Don't need to do anything special for split loads.
7186974Stjones1@inf.ed.ac.uk            if (TheISA::HasUnalignedMemAcc && sreqLow) {
7196974Stjones1@inf.ed.ac.uk                delete sreqLow;
7206974Stjones1@inf.ed.ac.uk                delete sreqHigh;
7216974Stjones1@inf.ed.ac.uk            }
7226974Stjones1@inf.ed.ac.uk
7232727Sktlim@umich.edu            ++lsqForwLoads;
7242292SN/A            return NoFault;
7252292SN/A        } else if ((store_has_lower_limit && lower_load_has_store_part) ||
7262292SN/A                   (store_has_upper_limit && upper_load_has_store_part) ||
7272292SN/A                   (lower_load_has_store_part && upper_load_has_store_part)) {
7282292SN/A            // This is the partial store-load forwarding case where a store
7292292SN/A            // has only part of the load's data.
7302292SN/A
7312292SN/A            // If it's already been written back, then don't worry about
7322292SN/A            // stalling on it.
7332292SN/A            if (storeQueue[store_idx].completed) {
7344032Sktlim@umich.edu                panic("Should not check one of these");
7352292SN/A                continue;
7362292SN/A            }
7372292SN/A
7382292SN/A            // Must stall load and force it to retry, so long as it's the oldest
7392292SN/A            // load that needs to do so.
7402292SN/A            if (!stalled ||
7412292SN/A                (stalled &&
7422669Sktlim@umich.edu                 load_inst->seqNum <
7432292SN/A                 loadQueue[stallingLoadIdx]->seqNum)) {
7442292SN/A                stalled = true;
7452292SN/A                stallingStoreIsn = storeQueue[store_idx].inst->seqNum;
7462292SN/A                stallingLoadIdx = load_idx;
7472292SN/A            }
7482292SN/A
7492292SN/A            // Tell IQ/mem dep unit that this instruction will need to be
7502292SN/A            // rescheduled eventually
7512669Sktlim@umich.edu            iewStage->rescheduleMemInst(load_inst);
7522927Sktlim@umich.edu            iewStage->decrWb(load_inst->seqNum);
7534032Sktlim@umich.edu            load_inst->clearIssued();
7542727Sktlim@umich.edu            ++lsqRescheduledLoads;
7552292SN/A
7562292SN/A            // Do not generate a writeback event as this instruction is not
7572292SN/A            // complete.
7582292SN/A            DPRINTF(LSQUnit, "Load-store forwarding mis-match. "
7592292SN/A                    "Store idx %i to load addr %#x\n",
7602669Sktlim@umich.edu                    store_idx, req->getVaddr());
7612292SN/A
7624032Sktlim@umich.edu            // Must delete request now that it wasn't handed off to
7634032Sktlim@umich.edu            // memory.  This is quite ugly.  @todo: Figure out the
7644032Sktlim@umich.edu            // proper place to really handle request deletes.
7654032Sktlim@umich.edu            delete req;
7666974Stjones1@inf.ed.ac.uk            if (TheISA::HasUnalignedMemAcc && sreqLow) {
7676974Stjones1@inf.ed.ac.uk                delete sreqLow;
7686974Stjones1@inf.ed.ac.uk                delete sreqHigh;
7696974Stjones1@inf.ed.ac.uk            }
7704032Sktlim@umich.edu
7712292SN/A            return NoFault;
7722292SN/A        }
7732292SN/A    }
7742292SN/A
7752292SN/A    // If there's no forwarding case, then go access memory
7767720Sgblack@eecs.umich.edu    DPRINTF(LSQUnit, "Doing memory access for inst [sn:%lli] PC %s\n",
7777720Sgblack@eecs.umich.edu            load_inst->seqNum, load_inst->pcState());
7782292SN/A
7792669Sktlim@umich.edu    assert(!load_inst->memData);
7802669Sktlim@umich.edu    load_inst->memData = new uint8_t[64];
7812292SN/A
7822292SN/A    ++usedPorts;
7832292SN/A
7842907Sktlim@umich.edu    // if we the cache is not blocked, do cache access
7856974Stjones1@inf.ed.ac.uk    bool completedFirst = false;
7862907Sktlim@umich.edu    if (!lsq->cacheBlocked()) {
7876974Stjones1@inf.ed.ac.uk        MemCmd command =
7886974Stjones1@inf.ed.ac.uk            req->isLLSC() ? MemCmd::LoadLockedReq : MemCmd::ReadReq;
7898949Sandreas.hansson@arm.com        PacketPtr data_pkt = new Packet(req, command);
7906974Stjones1@inf.ed.ac.uk        PacketPtr fst_data_pkt = NULL;
7916974Stjones1@inf.ed.ac.uk        PacketPtr snd_data_pkt = NULL;
7926974Stjones1@inf.ed.ac.uk
7933228Sktlim@umich.edu        data_pkt->dataStatic(load_inst->memData);
7943228Sktlim@umich.edu
7953228Sktlim@umich.edu        LSQSenderState *state = new LSQSenderState;
7963228Sktlim@umich.edu        state->isLoad = true;
7973228Sktlim@umich.edu        state->idx = load_idx;
7983228Sktlim@umich.edu        state->inst = load_inst;
7993228Sktlim@umich.edu        data_pkt->senderState = state;
8003228Sktlim@umich.edu
8016974Stjones1@inf.ed.ac.uk        if (!TheISA::HasUnalignedMemAcc || !sreqLow) {
8026974Stjones1@inf.ed.ac.uk
8036974Stjones1@inf.ed.ac.uk            // Point the first packet at the main data packet.
8046974Stjones1@inf.ed.ac.uk            fst_data_pkt = data_pkt;
8056974Stjones1@inf.ed.ac.uk        } else {
8066974Stjones1@inf.ed.ac.uk
8076974Stjones1@inf.ed.ac.uk            // Create the split packets.
8088949Sandreas.hansson@arm.com            fst_data_pkt = new Packet(sreqLow, command);
8098949Sandreas.hansson@arm.com            snd_data_pkt = new Packet(sreqHigh, command);
8106974Stjones1@inf.ed.ac.uk
8116974Stjones1@inf.ed.ac.uk            fst_data_pkt->dataStatic(load_inst->memData);
8126974Stjones1@inf.ed.ac.uk            snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize());
8136974Stjones1@inf.ed.ac.uk
8146974Stjones1@inf.ed.ac.uk            fst_data_pkt->senderState = state;
8156974Stjones1@inf.ed.ac.uk            snd_data_pkt->senderState = state;
8166974Stjones1@inf.ed.ac.uk
8176974Stjones1@inf.ed.ac.uk            state->isSplit = true;
8186974Stjones1@inf.ed.ac.uk            state->outstanding = 2;
8196974Stjones1@inf.ed.ac.uk            state->mainPkt = data_pkt;
8206974Stjones1@inf.ed.ac.uk        }
8216974Stjones1@inf.ed.ac.uk
8228975Sandreas.hansson@arm.com        if (!dcachePort->sendTimingReq(fst_data_pkt)) {
8233228Sktlim@umich.edu            // Delete state and data packet because a load retry
8243228Sktlim@umich.edu            // initiates a pipeline restart; it does not retry.
8253228Sktlim@umich.edu            delete state;
8264032Sktlim@umich.edu            delete data_pkt->req;
8273228Sktlim@umich.edu            delete data_pkt;
8286974Stjones1@inf.ed.ac.uk            if (TheISA::HasUnalignedMemAcc && sreqLow) {
8296974Stjones1@inf.ed.ac.uk                delete fst_data_pkt->req;
8306974Stjones1@inf.ed.ac.uk                delete fst_data_pkt;
8316974Stjones1@inf.ed.ac.uk                delete snd_data_pkt->req;
8326974Stjones1@inf.ed.ac.uk                delete snd_data_pkt;
8337511Stjones1@inf.ed.ac.uk                sreqLow = NULL;
8347511Stjones1@inf.ed.ac.uk                sreqHigh = NULL;
8356974Stjones1@inf.ed.ac.uk            }
8363228Sktlim@umich.edu
8374032Sktlim@umich.edu            req = NULL;
8384032Sktlim@umich.edu
8392907Sktlim@umich.edu            // If the access didn't succeed, tell the LSQ by setting
8402907Sktlim@umich.edu            // the retry thread id.
8412907Sktlim@umich.edu            lsq->setRetryTid(lsqID);
8426974Stjones1@inf.ed.ac.uk        } else if (TheISA::HasUnalignedMemAcc && sreqLow) {
8436974Stjones1@inf.ed.ac.uk            completedFirst = true;
8446974Stjones1@inf.ed.ac.uk
8456974Stjones1@inf.ed.ac.uk            // The first packet was sent without problems, so send this one
8466974Stjones1@inf.ed.ac.uk            // too. If there is a problem with this packet then the whole
8476974Stjones1@inf.ed.ac.uk            // load will be squashed, so indicate this to the state object.
8486974Stjones1@inf.ed.ac.uk            // The first packet will return in completeDataAccess and be
8496974Stjones1@inf.ed.ac.uk            // handled there.
8506974Stjones1@inf.ed.ac.uk            ++usedPorts;
8518975Sandreas.hansson@arm.com            if (!dcachePort->sendTimingReq(snd_data_pkt)) {
8526974Stjones1@inf.ed.ac.uk
8536974Stjones1@inf.ed.ac.uk                // The main packet will be deleted in completeDataAccess.
8546974Stjones1@inf.ed.ac.uk                delete snd_data_pkt->req;
8556974Stjones1@inf.ed.ac.uk                delete snd_data_pkt;
8566974Stjones1@inf.ed.ac.uk
8576974Stjones1@inf.ed.ac.uk                state->complete();
8586974Stjones1@inf.ed.ac.uk
8596974Stjones1@inf.ed.ac.uk                req = NULL;
8607511Stjones1@inf.ed.ac.uk                sreqHigh = NULL;
8616974Stjones1@inf.ed.ac.uk
8626974Stjones1@inf.ed.ac.uk                lsq->setRetryTid(lsqID);
8636974Stjones1@inf.ed.ac.uk            }
8642907Sktlim@umich.edu        }
8652907Sktlim@umich.edu    }
8662907Sktlim@umich.edu
8672907Sktlim@umich.edu    // If the cache was blocked, or has become blocked due to the access,
8682907Sktlim@umich.edu    // handle it.
8692907Sktlim@umich.edu    if (lsq->cacheBlocked()) {
8704032Sktlim@umich.edu        if (req)
8714032Sktlim@umich.edu            delete req;
8726974Stjones1@inf.ed.ac.uk        if (TheISA::HasUnalignedMemAcc && sreqLow && !completedFirst) {
8736974Stjones1@inf.ed.ac.uk            delete sreqLow;
8746974Stjones1@inf.ed.ac.uk            delete sreqHigh;
8756974Stjones1@inf.ed.ac.uk        }
8764032Sktlim@umich.edu
8772727Sktlim@umich.edu        ++lsqCacheBlocked;
8783014Srdreslin@umich.edu
8798315Sgeoffrey.blake@arm.com        // If the first part of a split access succeeds, then let the LSQ
8808315Sgeoffrey.blake@arm.com        // handle the decrWb when completeDataAccess is called upon return
8818315Sgeoffrey.blake@arm.com        // of the requested first part of data
8828315Sgeoffrey.blake@arm.com        if (!completedFirst)
8838315Sgeoffrey.blake@arm.com            iewStage->decrWb(load_inst->seqNum);
8848315Sgeoffrey.blake@arm.com
8852669Sktlim@umich.edu        // There's an older load that's already going to squash.
8862669Sktlim@umich.edu        if (isLoadBlocked && blockedLoadSeqNum < load_inst->seqNum)
8872669Sktlim@umich.edu            return NoFault;
8882292SN/A
8892669Sktlim@umich.edu        // Record that the load was blocked due to memory.  This
8902669Sktlim@umich.edu        // load will squash all instructions after it, be
8912669Sktlim@umich.edu        // refetched, and re-executed.
8922669Sktlim@umich.edu        isLoadBlocked = true;
8932669Sktlim@umich.edu        loadBlockedHandled = false;
8942669Sktlim@umich.edu        blockedLoadSeqNum = load_inst->seqNum;
8952669Sktlim@umich.edu        // No fault occurred, even though the interface is blocked.
8962669Sktlim@umich.edu        return NoFault;
8972292SN/A    }
8982292SN/A
8992669Sktlim@umich.edu    return NoFault;
9002292SN/A}
9012292SN/A
9022292SN/Atemplate <class Impl>
9032292SN/AFault
9046974Stjones1@inf.ed.ac.ukLSQUnit<Impl>::write(Request *req, Request *sreqLow, Request *sreqHigh,
9057520Sgblack@eecs.umich.edu                     uint8_t *data, int store_idx)
9062292SN/A{
9072292SN/A    assert(storeQueue[store_idx].inst);
9082292SN/A
9092292SN/A    DPRINTF(LSQUnit, "Doing write to store idx %i, addr %#x data %#x"
9102292SN/A            " | storeHead:%i [sn:%i]\n",
9112669Sktlim@umich.edu            store_idx, req->getPaddr(), data, storeHead,
9122292SN/A            storeQueue[store_idx].inst->seqNum);
9132329SN/A
9142292SN/A    storeQueue[store_idx].req = req;
9156974Stjones1@inf.ed.ac.uk    storeQueue[store_idx].sreqLow = sreqLow;
9166974Stjones1@inf.ed.ac.uk    storeQueue[store_idx].sreqHigh = sreqHigh;
9177520Sgblack@eecs.umich.edu    unsigned size = req->getSize();
9187520Sgblack@eecs.umich.edu    storeQueue[store_idx].size = size;
9197520Sgblack@eecs.umich.edu    assert(size <= sizeof(storeQueue[store_idx].data));
9207509Stjones1@inf.ed.ac.uk
9217509Stjones1@inf.ed.ac.uk    // Split stores can only occur in ISAs with unaligned memory accesses.  If
9227509Stjones1@inf.ed.ac.uk    // a store request has been split, sreqLow and sreqHigh will be non-null.
9237509Stjones1@inf.ed.ac.uk    if (TheISA::HasUnalignedMemAcc && sreqLow) {
9247509Stjones1@inf.ed.ac.uk        storeQueue[store_idx].isSplit = true;
9257509Stjones1@inf.ed.ac.uk    }
9264326Sgblack@eecs.umich.edu
9277520Sgblack@eecs.umich.edu    memcpy(storeQueue[store_idx].data, data, size);
9282329SN/A
9292292SN/A    // This function only writes the data to the store queue, so no fault
9302292SN/A    // can happen here.
9312292SN/A    return NoFault;
9322292SN/A}
9332292SN/A
9342292SN/A#endif // __CPU_O3_LSQ_UNIT_HH__
935