lsq_unit.hh revision 7720
12292SN/A/* 22329SN/A * Copyright (c) 2004-2006 The Regents of The University of Michigan 32292SN/A * All rights reserved. 42292SN/A * 52292SN/A * Redistribution and use in source and binary forms, with or without 62292SN/A * modification, are permitted provided that the following conditions are 72292SN/A * met: redistributions of source code must retain the above copyright 82292SN/A * notice, this list of conditions and the following disclaimer; 92292SN/A * redistributions in binary form must reproduce the above copyright 102292SN/A * notice, this list of conditions and the following disclaimer in the 112292SN/A * documentation and/or other materials provided with the distribution; 122292SN/A * neither the name of the copyright holders nor the names of its 132292SN/A * contributors may be used to endorse or promote products derived from 142292SN/A * this software without specific prior written permission. 152292SN/A * 162292SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 172292SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 182292SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 192292SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 202292SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 212292SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 222292SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 232292SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 242292SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 252292SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 262292SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 272689Sktlim@umich.edu * 282689Sktlim@umich.edu * Authors: Kevin Lim 292689Sktlim@umich.edu * Korey Sewell 302292SN/A */ 312292SN/A 322292SN/A#ifndef __CPU_O3_LSQ_UNIT_HH__ 332292SN/A#define __CPU_O3_LSQ_UNIT_HH__ 342292SN/A 352329SN/A#include <algorithm> 364395Ssaidi@eecs.umich.edu#include <cstring> 372292SN/A#include <map> 382292SN/A#include <queue> 392292SN/A 402329SN/A#include "arch/faults.hh" 418506Sgblack@eecs.umich.edu#include "arch/locked_mem.hh" 423326Sktlim@umich.edu#include "config/full_system.hh" 438481Sgblack@eecs.umich.edu#include "config/the_isa.hh" 448229Snate@binkert.org#include "base/fast_alloc.hh" 458229Snate@binkert.org#include "base/hashmap.hh" 462292SN/A#include "cpu/inst_seq.hh" 476658Snate@binkert.org#include "mem/packet.hh" 482292SN/A#include "mem/port.hh" 498230Snate@binkert.org 508232Snate@binkert.orgclass DerivO3CPUParams; 513348Sbinkertn@umich.edu 522669Sktlim@umich.edu/** 532292SN/A * Class that implements the actual LQ and SQ for each specific 545529Snate@binkert.org * thread. Both are circular queues; load entries are freed upon 555529Snate@binkert.org * committing, while store entries are freed once they writeback. The 562292SN/A * LSQUnit tracks if there are memory ordering violations, and also 572329SN/A * detects partial load to store forwarding cases (a store only has 582329SN/A * part of a load's data) that requires the load to wait until the 592329SN/A * store writes back. In the former case it holds onto the instruction 602329SN/A * until the dependence unit looks at it, and in the latter it stalls 612329SN/A * the LSQ until the store writes back. At that point the load is 622329SN/A * replayed. 632329SN/A */ 642329SN/Atemplate <class Impl> 652329SN/Aclass LSQUnit { 662329SN/A protected: 672292SN/A typedef TheISA::IntReg IntReg; 682292SN/A public: 692292SN/A typedef typename Impl::O3CPU O3CPU; 702292SN/A typedef typename Impl::DynInstPtr DynInstPtr; 712733Sktlim@umich.edu typedef typename Impl::CPUPol::IEW IEW; 722292SN/A typedef typename Impl::CPUPol::LSQ LSQ; 732292SN/A typedef typename Impl::CPUPol::IssueStruct IssueStruct; 742907Sktlim@umich.edu 752292SN/A public: 762292SN/A /** Constructs an LSQ unit. init() must be called prior to use. */ 772292SN/A LSQUnit(); 782292SN/A 792292SN/A /** Initializes the LSQ unit with the specified number of entries. */ 802292SN/A void init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params, 812292SN/A LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries, 825529Snate@binkert.org unsigned id); 835529Snate@binkert.org 845529Snate@binkert.org /** Returns the name of the LSQ unit. */ 852292SN/A std::string name() const; 862292SN/A 872292SN/A /** Registers statistics. */ 882292SN/A void regStats(); 892727Sktlim@umich.edu 902727Sktlim@umich.edu /** Sets the pointer to the dcache port. */ 912727Sktlim@umich.edu void setDcachePort(Port *dcache_port); 922907Sktlim@umich.edu 934329Sktlim@umich.edu /** Switches out LSQ unit. */ 942907Sktlim@umich.edu void switchOut(); 952348SN/A 962307SN/A /** Takes over from another CPU's thread. */ 972307SN/A void takeOverFrom(); 982348SN/A 992307SN/A /** Returns if the LSQ is switched out. */ 1002307SN/A bool isSwitchedOut() { return switchedOut; } 1012348SN/A 1022307SN/A /** Ticks the LSQ unit, which in this case only resets the number of 1032307SN/A * used cache ports. 1042292SN/A * @todo: Move the number of used ports up to the LSQ level so it can 1052292SN/A * be shared by all LSQ units. 1062292SN/A */ 1072292SN/A void tick() { usedPorts = 0; } 1082292SN/A 1092292SN/A /** Inserts an instruction. */ 1102292SN/A void insert(DynInstPtr &inst); 1112292SN/A /** Inserts a load instruction. */ 1122292SN/A void insertLoad(DynInstPtr &load_inst); 1132292SN/A /** Inserts a store instruction. */ 1142292SN/A void insertStore(DynInstPtr &store_inst); 1152292SN/A 1162292SN/A /** Executes a load instruction. */ 1172292SN/A Fault executeLoad(DynInstPtr &inst); 1188545Ssaidi@eecs.umich.edu 1198545Ssaidi@eecs.umich.edu Fault executeLoad(int lq_idx) { panic("Not implemented"); return NoFault; } 1208545Ssaidi@eecs.umich.edu /** Executes a store instruction. */ 1218199SAli.Saidi@ARM.com Fault executeStore(DynInstPtr &inst); 1228199SAli.Saidi@ARM.com 1238199SAli.Saidi@ARM.com /** Commits the head load. */ 1248199SAli.Saidi@ARM.com void commitLoad(); 1258199SAli.Saidi@ARM.com /** Commits loads older than a specific sequence number. */ 1268545Ssaidi@eecs.umich.edu void commitLoads(InstSeqNum &youngest_inst); 1278545Ssaidi@eecs.umich.edu 1288545Ssaidi@eecs.umich.edu /** Commits stores older than a specific sequence number. */ 1298545Ssaidi@eecs.umich.edu void commitStores(InstSeqNum &youngest_inst); 1308545Ssaidi@eecs.umich.edu 1318545Ssaidi@eecs.umich.edu /** Writes back stores. */ 1322292SN/A void writebackStores(); 1332292SN/A 1342292SN/A /** Completes the data access that has been returned from the 1352329SN/A * memory system. */ 1362292SN/A void completeDataAccess(PacketPtr pkt); 1372292SN/A 1382292SN/A /** Clears all the entries in the LQ. */ 1392292SN/A void clearLQ(); 1402292SN/A 1412292SN/A /** Clears all the entries in the SQ. */ 1422292SN/A void clearSQ(); 1432292SN/A 1442292SN/A /** Resizes the LQ to a given size. */ 1452292SN/A void resizeLQ(unsigned size); 1462292SN/A 1472292SN/A /** Resizes the SQ to a given size. */ 1482292SN/A void resizeSQ(unsigned size); 1492292SN/A 1502790Sktlim@umich.edu /** Squashes all instructions younger than a specific sequence number. */ 1512790Sktlim@umich.edu void squash(const InstSeqNum &squashed_num); 1522669Sktlim@umich.edu 1532669Sktlim@umich.edu /** Returns if there is a memory ordering violation. Value is reset upon 1542292SN/A * call to getMemDepViolator(). 1552292SN/A */ 1562292SN/A bool violation() { return memDepViolator; } 1572292SN/A 1582292SN/A /** Returns the memory ordering violator. */ 1592292SN/A DynInstPtr getMemDepViolator(); 1602292SN/A 1612292SN/A /** Returns if a load became blocked due to the memory system. */ 1622292SN/A bool loadBlocked() 1632292SN/A { return isLoadBlocked; } 1642292SN/A 1652292SN/A /** Clears the signal that a load became blocked. */ 1662292SN/A void clearLoadBlocked() 1672292SN/A { isLoadBlocked = false; } 1682292SN/A 1692292SN/A /** Returns if the blocked load was handled. */ 1702292SN/A bool isLoadBlockedHandled() 1712292SN/A { return loadBlockedHandled; } 1722292SN/A 1732292SN/A /** Records the blocked load as being handled. */ 1742292SN/A void setLoadBlockedHandled() 1752292SN/A { loadBlockedHandled = true; } 1762292SN/A 1772329SN/A /** Returns the number of free entries (min of free LQ and SQ entries). */ 1782292SN/A unsigned numFreeEntries(); 1792292SN/A 1802292SN/A /** Returns the number of loads ready to execute. */ 1812348SN/A int numLoadsReady(); 1822292SN/A 1832292SN/A /** Returns the number of loads in the LQ. */ 1842292SN/A int numLoads() { return loads; } 1852348SN/A 1862292SN/A /** Returns the number of stores in the SQ. */ 1872292SN/A int numStores() { return stores; } 1882292SN/A 1892348SN/A /** Returns if either the LQ or SQ is full. */ 1902292SN/A bool isFull() { return lqFull() || sqFull(); } 1912292SN/A 1922292SN/A /** Returns if the LQ is full. */ 1932292SN/A bool lqFull() { return loads >= (LQEntries - 1); } 1942292SN/A 1952292SN/A /** Returns if the SQ is full. */ 1962292SN/A bool sqFull() { return stores >= (SQEntries - 1); } 1972292SN/A 1982292SN/A /** Returns the number of instructions in the LSQ. */ 1992292SN/A unsigned getCount() { return loads + stores; } 2002292SN/A 2012292SN/A /** Returns if there are any stores to writeback. */ 2022292SN/A bool hasStoresToWB() { return storesToWB; } 2032292SN/A 2042292SN/A /** Returns the number of stores to writeback. */ 2052292SN/A int numStoresToWB() { return storesToWB; } 2062292SN/A 2072292SN/A /** Returns if the LSQ unit will writeback on this cycle. */ 2082292SN/A bool willWB() { return storeQueue[storeWBIdx].canWB && 2092292SN/A !storeQueue[storeWBIdx].completed && 2102292SN/A !isStoreBlocked; } 2112292SN/A 2122292SN/A /** Handles doing the retry. */ 2132292SN/A void recvRetry(); 2142292SN/A 2152292SN/A private: 2162292SN/A /** Writes back the instruction, sending it to IEW. */ 2172292SN/A void writeback(DynInstPtr &inst, PacketPtr pkt); 2182292SN/A 2192292SN/A /** Writes back a store that couldn't be completed the previous cycle. */ 2202292SN/A void writebackPendingStore(); 2212292SN/A 2222292SN/A /** Handles completing the send of a store to memory. */ 2232292SN/A void storePostSend(PacketPtr pkt); 2242292SN/A 2252678Sktlim@umich.edu /** Completes the store at the specified index. */ 2262678Sktlim@umich.edu void completeStore(int store_idx); 2272292SN/A 2282907Sktlim@umich.edu /** Attempts to send a store to the cache. */ 2292907Sktlim@umich.edu bool sendStore(PacketPtr data_pkt); 2302907Sktlim@umich.edu 2312292SN/A /** Increments the given store index (circular queue). */ 2322698Sktlim@umich.edu inline void incrStIdx(int &store_idx); 2332678Sktlim@umich.edu /** Decrements the given store index (circular queue). */ 2342678Sktlim@umich.edu inline void decrStIdx(int &store_idx); 2356974Stjones1@inf.ed.ac.uk /** Increments the given load index (circular queue). */ 2366974Stjones1@inf.ed.ac.uk inline void incrLdIdx(int &load_idx); 2376974Stjones1@inf.ed.ac.uk /** Decrements the given load index (circular queue). */ 2382698Sktlim@umich.edu inline void decrLdIdx(int &load_idx); 2393349Sbinkertn@umich.edu 2402693Sktlim@umich.edu public: 2412292SN/A /** Debugging function to dump instructions in the LSQ. */ 2422292SN/A void dumpInsts(); 2432292SN/A 2446974Stjones1@inf.ed.ac.uk private: 2456974Stjones1@inf.ed.ac.uk /** Pointer to the CPU. */ 2466974Stjones1@inf.ed.ac.uk O3CPU *cpu; 2472292SN/A 2482292SN/A /** Pointer to the IEW stage. */ 2492292SN/A IEW *iewStage; 2502292SN/A 2512292SN/A /** Pointer to the LSQ. */ 2522292SN/A LSQ *lsq; 2532292SN/A 2542292SN/A /** Pointer to the dcache port. Used only for sending. */ 2552292SN/A Port *dcachePort; 2562329SN/A 2572329SN/A /** Derived class to hold any sender state the LSQ needs. */ 2582329SN/A class LSQSenderState : public Packet::SenderState, public FastAlloc 2592329SN/A { 2602292SN/A public: 2612292SN/A /** Default constructor. */ 2622733Sktlim@umich.edu LSQSenderState() 2632292SN/A : noWB(false), isSplit(false), pktToSend(false), outstanding(1), 2642292SN/A mainPkt(NULL), pendingPacket(NULL) 2652292SN/A { } 2662292SN/A 2672907Sktlim@umich.edu /** Instruction who initiated the access to memory. */ 2682907Sktlim@umich.edu DynInstPtr inst; 2692669Sktlim@umich.edu /** Whether or not it is a load. */ 2702907Sktlim@umich.edu bool isLoad; 2712907Sktlim@umich.edu /** The LQ/SQ index of the instruction. */ 2722292SN/A int idx; 2732698Sktlim@umich.edu /** Whether or not the instruction will need to writeback. */ 2745386Sstever@gmail.com bool noWB; 2752678Sktlim@umich.edu /** Whether or not this access is split in two. */ 2762678Sktlim@umich.edu bool isSplit; 2772698Sktlim@umich.edu /** Whether or not there is a packet that needs sending. */ 2782678Sktlim@umich.edu bool pktToSend; 2796974Stjones1@inf.ed.ac.uk /** Number of outstanding packets to complete. */ 2806974Stjones1@inf.ed.ac.uk int outstanding; 2812678Sktlim@umich.edu /** The main packet from a split load, used during writeback. */ 2822678Sktlim@umich.edu PacketPtr mainPkt; 2832698Sktlim@umich.edu /** A second packet from a split store that needs sending. */ 2842678Sktlim@umich.edu PacketPtr pendingPacket; 2852698Sktlim@umich.edu 2862678Sktlim@umich.edu /** Completes a packet and returns whether the access is finished. */ 2872698Sktlim@umich.edu inline bool complete() { return --outstanding == 0; } 2882678Sktlim@umich.edu }; 2892698Sktlim@umich.edu 2902678Sktlim@umich.edu /** Writeback event, specifically for when stores forward data to loads. */ 2916974Stjones1@inf.ed.ac.uk class WritebackEvent : public Event { 2926974Stjones1@inf.ed.ac.uk public: 2936974Stjones1@inf.ed.ac.uk /** Constructs a writeback event. */ 2946974Stjones1@inf.ed.ac.uk WritebackEvent(DynInstPtr &_inst, PacketPtr pkt, LSQUnit *lsq_ptr); 2956974Stjones1@inf.ed.ac.uk 2966974Stjones1@inf.ed.ac.uk /** Processes the writeback event. */ 2976974Stjones1@inf.ed.ac.uk void process(); 2986974Stjones1@inf.ed.ac.uk 2996974Stjones1@inf.ed.ac.uk /** Returns the description of this event. */ 3006974Stjones1@inf.ed.ac.uk const char *description() const; 3016974Stjones1@inf.ed.ac.uk 3026974Stjones1@inf.ed.ac.uk private: 3036974Stjones1@inf.ed.ac.uk /** Instruction whose results are being written back. */ 3042678Sktlim@umich.edu DynInstPtr inst; 3052678Sktlim@umich.edu 3062698Sktlim@umich.edu /** The packet that would have been sent to memory. */ 3072678Sktlim@umich.edu PacketPtr pkt; 3082678Sktlim@umich.edu 3092678Sktlim@umich.edu /** The pointer to the LSQ unit that issued the store. */ 3102678Sktlim@umich.edu LSQUnit<Impl> *lsqPtr; 3112678Sktlim@umich.edu }; 3122678Sktlim@umich.edu 3132678Sktlim@umich.edu public: 3142678Sktlim@umich.edu struct SQEntry { 3152678Sktlim@umich.edu /** Constructs an empty store queue entry. */ 3165336Shines@cs.fsu.edu SQEntry() 3172678Sktlim@umich.edu : inst(NULL), req(NULL), size(0), 3182678Sktlim@umich.edu canWB(0), committed(0), completed(0) 3192698Sktlim@umich.edu { 3202678Sktlim@umich.edu std::memset(data, 0, sizeof(data)); 3212678Sktlim@umich.edu } 3222698Sktlim@umich.edu 3232678Sktlim@umich.edu /** Constructs a store queue entry for a given instruction. */ 3242678Sktlim@umich.edu SQEntry(DynInstPtr &_inst) 3252678Sktlim@umich.edu : inst(_inst), req(NULL), sreqLow(NULL), sreqHigh(NULL), size(0), 3262678Sktlim@umich.edu isSplit(0), canWB(0), committed(0), completed(0) 3272678Sktlim@umich.edu { 3282678Sktlim@umich.edu std::memset(data, 0, sizeof(data)); 3292292SN/A } 3302292SN/A 3312292SN/A /** The store instruction. */ 3322292SN/A DynInstPtr inst; 3334326Sgblack@eecs.umich.edu /** The request for the store. */ 3342292SN/A RequestPtr req; 3354326Sgblack@eecs.umich.edu /** The split requests for the store. */ 3364395Ssaidi@eecs.umich.edu RequestPtr sreqLow; 3374326Sgblack@eecs.umich.edu RequestPtr sreqHigh; 3382292SN/A /** The size of the store. */ 3392292SN/A int size; 3402292SN/A /** The store data. */ 3416974Stjones1@inf.ed.ac.uk char data[sizeof(IntReg)]; 3426974Stjones1@inf.ed.ac.uk /** Whether or not the store is split into two requests. */ 3434326Sgblack@eecs.umich.edu bool isSplit; 3444395Ssaidi@eecs.umich.edu /** Whether or not the store can writeback. */ 3454326Sgblack@eecs.umich.edu bool canWB; 3462292SN/A /** Whether or not the store is committed. */ 3472292SN/A bool committed; 3482292SN/A /** Whether or not the store is completed. */ 3492669Sktlim@umich.edu bool completed; 3502669Sktlim@umich.edu }; 3516974Stjones1@inf.ed.ac.uk 3526974Stjones1@inf.ed.ac.uk private: 3536974Stjones1@inf.ed.ac.uk /** The LSQUnit thread id. */ 3542292SN/A ThreadID lsqID; 3552292SN/A 3562292SN/A /** The store queue. */ 3577786SAli.Saidi@ARM.com std::vector<SQEntry> storeQueue; 3586974Stjones1@inf.ed.ac.uk 3596974Stjones1@inf.ed.ac.uk /** The load queue. */ 3602292SN/A std::vector<DynInstPtr> loadQueue; 3612292SN/A 3622292SN/A /** The number of LQ entries, plus a sentinel entry (circular queue). 3632292SN/A * @todo: Consider having var that records the true number of LQ entries. 3642292SN/A */ 3652292SN/A unsigned LQEntries; 3662292SN/A /** The number of SQ entries, plus a sentinel entry (circular queue). 3672329SN/A * @todo: Consider having var that records the true number of SQ entries. 3682292SN/A */ 3692292SN/A unsigned SQEntries; 3706221Snate@binkert.org 3712292SN/A /** The number of load instructions in the LQ. */ 3722292SN/A int loads; 3732292SN/A /** The number of store instructions in the SQ. */ 3742292SN/A int stores; 3752292SN/A /** The number of store instructions in the SQ waiting to writeback. */ 3762292SN/A int storesToWB; 3772292SN/A 3782329SN/A /** The index of the head instruction in the LQ. */ 3792329SN/A int loadHead; 3802329SN/A /** The index of the tail instruction in the LQ. */ 3812292SN/A int loadTail; 3822329SN/A 3832329SN/A /** The index of the head instruction in the SQ. */ 3842329SN/A int storeHead; 3852292SN/A /** The index of the first instruction that may be ready to be 3862292SN/A * written back, and has not yet been written back. 3878199SAli.Saidi@ARM.com */ 3888199SAli.Saidi@ARM.com int storeWBIdx; 3898199SAli.Saidi@ARM.com /** The index of the tail instruction in the SQ. */ 3908199SAli.Saidi@ARM.com int storeTail; 3918199SAli.Saidi@ARM.com 3928199SAli.Saidi@ARM.com /// @todo Consider moving to a more advanced model with write vs read ports 3938199SAli.Saidi@ARM.com /** The number of cache ports available each cycle. */ 3948199SAli.Saidi@ARM.com int cachePorts; 3952292SN/A 3962292SN/A /** The number of used cache ports in this cycle. */ 3972329SN/A int usedPorts; 3982292SN/A 3992292SN/A /** Is the LSQ switched out. */ 4002292SN/A bool switchedOut; 4012292SN/A 4022292SN/A //list<InstSeqNum> mshrSeqNums; 4032292SN/A 4042292SN/A /** Wire to read information from the issue stage time queue. */ 4052292SN/A typename TimeBuffer<IssueStruct>::wire fromIssue; 4062292SN/A 4072292SN/A /** Whether or not the LSQ is stalled. */ 4082292SN/A bool stalled; 4092329SN/A /** The store that causes the stall due to partial store to load 4102329SN/A * forwarding. 4112292SN/A */ 4122292SN/A InstSeqNum stallingStoreIsn; 4132292SN/A /** The index of the above store. */ 4142292SN/A int stallingLoadIdx; 4152292SN/A 4162292SN/A /** The packet that needs to be retried. */ 4172292SN/A PacketPtr retryPkt; 4182292SN/A 4192292SN/A /** Whehter or not a store is blocked due to the memory system. */ 4202292SN/A bool isStoreBlocked; 4212292SN/A 4222292SN/A /** Whether or not a load is blocked due to the memory system. */ 4232348SN/A bool isLoadBlocked; 4242307SN/A 4252307SN/A /** Has the blocked load been handled. */ 4262292SN/A bool loadBlockedHandled; 4272292SN/A 4288545Ssaidi@eecs.umich.edu /** The sequence number of the blocked load. */ 4298545Ssaidi@eecs.umich.edu InstSeqNum blockedLoadSeqNum; 4308545Ssaidi@eecs.umich.edu 4312292SN/A /** The oldest load that caused a memory ordering violation. */ 4322292SN/A DynInstPtr memDepViolator; 4332292SN/A 4342292SN/A /** Whether or not there is a packet that couldn't be sent because of 4352292SN/A * a lack of cache ports. */ 4362292SN/A bool hasPendingPkt; 4372292SN/A 4382292SN/A /** The packet that is pending free cache ports. */ 4392292SN/A PacketPtr pendingPkt; 4402292SN/A 4412292SN/A // Will also need how many read/write ports the Dcache has. Or keep track 4422292SN/A // of that in stage that is one level up, and only call executeLoad/Store 4432698Sktlim@umich.edu // the appropriate number of times. 4442698Sktlim@umich.edu /** Total number of loads forwaded from LSQ stores. */ 4452693Sktlim@umich.edu Stats::Scalar lsqForwLoads; 4462698Sktlim@umich.edu 4472678Sktlim@umich.edu /** Total number of loads ignored due to invalid addresses. */ 4482678Sktlim@umich.edu Stats::Scalar invAddrLoads; 4492329SN/A 4502292SN/A /** Total number of squashed loads. */ 4512292SN/A Stats::Scalar lsqSquashedLoads; 4522348SN/A 4532292SN/A /** Total number of responses from the memory system that are 4542292SN/A * ignored due to the instruction already being squashed. */ 4552348SN/A Stats::Scalar lsqIgnoredResponses; 4562292SN/A 4572292SN/A /** Tota number of memory ordering violations. */ 4582292SN/A Stats::Scalar lsqMemOrderViolation; 4592292SN/A 4602292SN/A /** Total number of squashed stores. */ 4616974Stjones1@inf.ed.ac.uk Stats::Scalar lsqSquashedStores; 4626974Stjones1@inf.ed.ac.uk 4636974Stjones1@inf.ed.ac.uk /** Total number of software prefetches ignored due to invalid addresses. */ 4646974Stjones1@inf.ed.ac.uk Stats::Scalar invAddrSwpfs; 4656974Stjones1@inf.ed.ac.uk 4666974Stjones1@inf.ed.ac.uk /** Ready loads blocked due to partial store-forwarding. */ 4676974Stjones1@inf.ed.ac.uk Stats::Scalar lsqBlockedLoads; 4682292SN/A 4692292SN/A /** Number of loads that were rescheduled. */ 4702292SN/A Stats::Scalar lsqRescheduledLoads; 4712727Sktlim@umich.edu 4725999Snate@binkert.org /** Number of times the LSQ is blocked due to the cache. */ 4732307SN/A Stats::Scalar lsqCacheBlocked; 4743126Sktlim@umich.edu 4755999Snate@binkert.org public: 4763126Sktlim@umich.edu /** Executes the load at the given index. */ 4773126Sktlim@umich.edu Fault read(Request *req, Request *sreqLow, Request *sreqHigh, 4785999Snate@binkert.org uint8_t *data, int load_idx); 4793126Sktlim@umich.edu 4803126Sktlim@umich.edu /** Executes the store at the given index. */ 4813126Sktlim@umich.edu Fault write(Request *req, Request *sreqLow, Request *sreqHigh, 4825999Snate@binkert.org uint8_t *data, int store_idx); 4833126Sktlim@umich.edu 4843126Sktlim@umich.edu /** Returns the index of the head load instruction. */ 4855999Snate@binkert.org int getLoadHead() { return loadHead; } 4863126Sktlim@umich.edu /** Returns the sequence number of the head load instruction. */ 4872727Sktlim@umich.edu InstSeqNum getLoadHeadSeqNum() 4885999Snate@binkert.org { 4892727Sktlim@umich.edu if (loadQueue[loadHead]) { 4902727Sktlim@umich.edu return loadQueue[loadHead]->seqNum; 4915999Snate@binkert.org } else { 4922727Sktlim@umich.edu return 0; 4932727Sktlim@umich.edu } 4945999Snate@binkert.org 4952727Sktlim@umich.edu } 4962727Sktlim@umich.edu 4975999Snate@binkert.org /** Returns the index of the head store instruction. */ 4982727Sktlim@umich.edu int getStoreHead() { return storeHead; } 4992727Sktlim@umich.edu /** Returns the sequence number of the head store instruction. */ 5005999Snate@binkert.org InstSeqNum getStoreHeadSeqNum() 5012727Sktlim@umich.edu { 5022292SN/A if (storeQueue[storeHead].inst) { 5032292SN/A return storeQueue[storeHead].inst->seqNum; 5047520Sgblack@eecs.umich.edu } else { 5057520Sgblack@eecs.umich.edu return 0; 5062292SN/A } 5072292SN/A 5087520Sgblack@eecs.umich.edu } 5097520Sgblack@eecs.umich.edu 5102292SN/A /** Returns whether or not the LSQ unit is stalled. */ 5112292SN/A bool isStalled() { return stalled; } 5122292SN/A}; 5132292SN/A 5142292SN/Atemplate <class Impl> 5152292SN/AFault 5162292SN/ALSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh, 5172292SN/A uint8_t *data, int load_idx) 5182292SN/A{ 5192292SN/A DynInstPtr load_inst = loadQueue[load_idx]; 5202292SN/A 5212292SN/A assert(load_inst); 5222292SN/A 5232292SN/A assert(!load_inst->isExecuted()); 5242292SN/A 5252292SN/A // Make sure this isn't an uncacheable access 5262292SN/A // A bit of a hackish way to get uncached accesses to work only if they're 5272292SN/A // at the head of the LSQ and are ready to commit (at the head of the ROB 5282292SN/A // too). 5292292SN/A if (req->isUncacheable() && 5302292SN/A (load_idx != loadHead || !load_inst->isAtCommit())) { 5312292SN/A iewStage->rescheduleMemInst(load_inst); 5322292SN/A ++lsqRescheduledLoads; 5332292SN/A DPRINTF(LSQUnit, "Uncachable load [sn:%lli] PC %s\n", 5342292SN/A load_inst->seqNum, load_inst->pcState()); 5352292SN/A 5362292SN/A // Must delete request now that it wasn't handed off to 5372292SN/A // memory. This is quite ugly. @todo: Figure out the proper 5382292SN/A // place to really handle request deletes. 5392292SN/A delete req; 5402292SN/A if (TheISA::HasUnalignedMemAcc && sreqLow) { 5412292SN/A delete sreqLow; 5422292SN/A delete sreqHigh; 5436974Stjones1@inf.ed.ac.uk } 5447520Sgblack@eecs.umich.edu return TheISA::genMachineCheckFault(); 5452292SN/A } 5462669Sktlim@umich.edu 5472292SN/A // Check the SQ for any previous stores that might lead to forwarding 5482669Sktlim@umich.edu int store_idx = load_inst->sqIdx; 5492669Sktlim@umich.edu 5502669Sktlim@umich.edu int store_size = 0; 5512292SN/A 5522292SN/A DPRINTF(LSQUnit, "Read called, load idx: %i, store idx: %i, " 5532292SN/A "storeHead: %i addr: %#x%s\n", 5542292SN/A load_idx, store_idx, storeHead, req->getPaddr(), 5552292SN/A sreqLow ? " split" : ""); 5563172Sstever@eecs.umich.edu 5572731Sktlim@umich.edu if (req->isLLSC()) { 5582669Sktlim@umich.edu assert(!sreqLow); 5592727Sktlim@umich.edu // Disable recording the result temporarily. Writing to misc 5607720Sgblack@eecs.umich.edu // regs normally updates the result, but this is not the 5617720Sgblack@eecs.umich.edu // desired behavior when handling store conditionals. 5624032Sktlim@umich.edu load_inst->recordResult = false; 5634032Sktlim@umich.edu TheISA::handleLockedRead(load_inst.get(), req); 5644032Sktlim@umich.edu load_inst->recordResult = true; 5654032Sktlim@umich.edu } 5664032Sktlim@umich.edu 5676974Stjones1@inf.ed.ac.uk while (store_idx != -1) { 5686974Stjones1@inf.ed.ac.uk // End once we've reached the top of the LSQ 5696974Stjones1@inf.ed.ac.uk if (store_idx == storeWBIdx) { 5706974Stjones1@inf.ed.ac.uk break; 5712292SN/A } 5722292SN/A 5732292SN/A // Move the index to one younger 5742292SN/A if (--store_idx < 0) 5752669Sktlim@umich.edu store_idx += SQEntries; 5762292SN/A 5772292SN/A assert(storeQueue[store_idx].inst); 5782292SN/A 5792292SN/A store_size = storeQueue[store_idx].size; 5806974Stjones1@inf.ed.ac.uk 5816974Stjones1@inf.ed.ac.uk if (store_size == 0) 5826974Stjones1@inf.ed.ac.uk continue; 5832292SN/A else if (storeQueue[store_idx].inst->uncacheable()) 5846102Sgblack@eecs.umich.edu continue; 5856974Stjones1@inf.ed.ac.uk 5863326Sktlim@umich.edu assert(storeQueue[store_idx].inst->effAddrValid); 5873326Sktlim@umich.edu 5883326Sktlim@umich.edu // Check if the store data is within the lower and upper bounds of 5893326Sktlim@umich.edu // addresses that the request needs. 5903326Sktlim@umich.edu bool store_has_lower_limit = 5913326Sktlim@umich.edu req->getVaddr() >= storeQueue[store_idx].inst->effAddr; 5922292SN/A bool store_has_upper_limit = 5932292SN/A (req->getVaddr() + req->getSize()) <= 5948481Sgblack@eecs.umich.edu (storeQueue[store_idx].inst->effAddr + store_size); 5958481Sgblack@eecs.umich.edu bool lower_load_has_store_part = 5968481Sgblack@eecs.umich.edu req->getVaddr() < (storeQueue[store_idx].inst->effAddr + 5978481Sgblack@eecs.umich.edu store_size); 5988481Sgblack@eecs.umich.edu bool upper_load_has_store_part = 5998481Sgblack@eecs.umich.edu (req->getVaddr() + req->getSize()) > 6008481Sgblack@eecs.umich.edu storeQueue[store_idx].inst->effAddr; 6018481Sgblack@eecs.umich.edu 6028481Sgblack@eecs.umich.edu // If the store's data has all of the data needed, we can forward. 6038481Sgblack@eecs.umich.edu if ((store_has_lower_limit && store_has_upper_limit)) { 6048481Sgblack@eecs.umich.edu // Get shift amount for offset into the store's data. 6058481Sgblack@eecs.umich.edu int shift_amt = req->getVaddr() & (store_size - 1); 6068481Sgblack@eecs.umich.edu 6078481Sgblack@eecs.umich.edu memcpy(data, storeQueue[store_idx].data + shift_amt, 6088481Sgblack@eecs.umich.edu req->getSize()); 6098481Sgblack@eecs.umich.edu 6108481Sgblack@eecs.umich.edu assert(!load_inst->memData); 6118481Sgblack@eecs.umich.edu load_inst->memData = new uint8_t[64]; 6128481Sgblack@eecs.umich.edu 6138481Sgblack@eecs.umich.edu memcpy(load_inst->memData, 6148481Sgblack@eecs.umich.edu storeQueue[store_idx].data + shift_amt, req->getSize()); 6158481Sgblack@eecs.umich.edu 6168481Sgblack@eecs.umich.edu DPRINTF(LSQUnit, "Forwarding from store idx %i to load to " 6178481Sgblack@eecs.umich.edu "addr %#x, data %#x\n", 6188481Sgblack@eecs.umich.edu store_idx, req->getVaddr(), data); 6198481Sgblack@eecs.umich.edu 6208481Sgblack@eecs.umich.edu PacketPtr data_pkt = new Packet(req, MemCmd::ReadReq, 6218481Sgblack@eecs.umich.edu Packet::Broadcast); 6228481Sgblack@eecs.umich.edu data_pkt->dataStatic(load_inst->memData); 6238481Sgblack@eecs.umich.edu 6248481Sgblack@eecs.umich.edu WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this); 6258481Sgblack@eecs.umich.edu 6268481Sgblack@eecs.umich.edu // We'll say this has a 1 cycle load-store forwarding latency 6278481Sgblack@eecs.umich.edu // for now. 6288481Sgblack@eecs.umich.edu // @todo: Need to make this a parameter. 6298481Sgblack@eecs.umich.edu cpu->schedule(wb, curTick); 6308481Sgblack@eecs.umich.edu 6312292SN/A // Don't need to do anything special for split loads. 6322292SN/A if (TheISA::HasUnalignedMemAcc && sreqLow) { 6332292SN/A delete sreqLow; 6342292SN/A delete sreqHigh; 6352292SN/A } 6362292SN/A 6372292SN/A ++lsqForwLoads; 6382292SN/A return NoFault; 6392292SN/A } else if ((store_has_lower_limit && lower_load_has_store_part) || 6402292SN/A (store_has_upper_limit && upper_load_has_store_part) || 6412292SN/A (lower_load_has_store_part && upper_load_has_store_part)) { 6422292SN/A // This is the partial store-load forwarding case where a store 6432292SN/A // has only part of the load's data. 6442292SN/A 6452292SN/A // If it's already been written back, then don't worry about 6462292SN/A // stalling on it. 6474032Sktlim@umich.edu if (storeQueue[store_idx].completed) { 6484032Sktlim@umich.edu panic("Should not check one of these"); 6494032Sktlim@umich.edu continue; 6504032Sktlim@umich.edu } 6512292SN/A 6522292SN/A // Must stall load and force it to retry, so long as it's the oldest 6532292SN/A // load that needs to do so. 6542292SN/A if (!stalled || 6552669Sktlim@umich.edu (stalled && 6562292SN/A load_inst->seqNum < 6572669Sktlim@umich.edu loadQueue[stallingLoadIdx]->seqNum)) { 6582669Sktlim@umich.edu stalled = true; 6592292SN/A stallingStoreIsn = storeQueue[store_idx].inst->seqNum; 6602669Sktlim@umich.edu stallingLoadIdx = load_idx; 6612292SN/A } 6622292SN/A 6632669Sktlim@umich.edu // Tell IQ/mem dep unit that this instruction will need to be 6642669Sktlim@umich.edu // rescheduled eventually 6652292SN/A iewStage->rescheduleMemInst(load_inst); 6662292SN/A iewStage->decrWb(load_inst->seqNum); 6674032Sktlim@umich.edu load_inst->clearIssued(); 6682329SN/A ++lsqRescheduledLoads; 6698316Sgeoffrey.blake@arm.com 6702292SN/A // Do not generate a writeback event as this instruction is not 6717520Sgblack@eecs.umich.edu // complete. 6727520Sgblack@eecs.umich.edu DPRINTF(LSQUnit, "Load-store forwarding mis-match. " 6733803Sgblack@eecs.umich.edu "Store idx %i to load addr %#x\n", 6742669Sktlim@umich.edu store_idx, req->getVaddr()); 6752669Sktlim@umich.edu 6762292SN/A // Must delete request now that it wasn't handed off to 6774326Sgblack@eecs.umich.edu // memory. This is quite ugly. @todo: Figure out the 6784326Sgblack@eecs.umich.edu // proper place to really handle request deletes. 6792292SN/A delete req; 6802292SN/A if (TheISA::HasUnalignedMemAcc && sreqLow) { 6812292SN/A delete sreqLow; 6822693Sktlim@umich.edu delete sreqHigh; 6832678Sktlim@umich.edu } 6844022Sstever@eecs.umich.edu 6854022Sstever@eecs.umich.edu return NoFault; 6862678Sktlim@umich.edu } 6872678Sktlim@umich.edu } 6882678Sktlim@umich.edu 6892292SN/A // If there's no forwarding case, then go access memory 6902292SN/A DPRINTF(LSQUnit, "Doing memory access for inst [sn:%lli] PC %s\n", 6912292SN/A load_inst->seqNum, load_inst->pcState()); 6922292SN/A 6937823Ssteve.reinhardt@amd.com assert(!load_inst->memData); 6942678Sktlim@umich.edu load_inst->memData = new uint8_t[64]; 6956974Stjones1@inf.ed.ac.uk 6966974Stjones1@inf.ed.ac.uk ++usedPorts; 6976974Stjones1@inf.ed.ac.uk 6986974Stjones1@inf.ed.ac.uk // if we the cache is not blocked, do cache access 6996974Stjones1@inf.ed.ac.uk bool completedFirst = false; 7006974Stjones1@inf.ed.ac.uk if (!lsq->cacheBlocked()) { 7012727Sktlim@umich.edu MemCmd command = 7022292SN/A req->isLLSC() ? MemCmd::LoadLockedReq : MemCmd::ReadReq; 7032292SN/A PacketPtr data_pkt = new Packet(req, command, Packet::Broadcast); 7042292SN/A PacketPtr fst_data_pkt = NULL; 7052292SN/A PacketPtr snd_data_pkt = NULL; 7062292SN/A 7072292SN/A data_pkt->dataStatic(load_inst->memData); 7082292SN/A 7092292SN/A LSQSenderState *state = new LSQSenderState; 7102292SN/A state->isLoad = true; 7112292SN/A state->idx = load_idx; 7124032Sktlim@umich.edu state->inst = load_inst; 7132292SN/A data_pkt->senderState = state; 7142292SN/A 7152292SN/A if (!TheISA::HasUnalignedMemAcc || !sreqLow) { 7162292SN/A 7172292SN/A // Point the first packet at the main data packet. 7182292SN/A fst_data_pkt = data_pkt; 7192292SN/A } else { 7202669Sktlim@umich.edu 7212292SN/A // Create the split packets. 7222292SN/A fst_data_pkt = new Packet(sreqLow, command, Packet::Broadcast); 7232292SN/A snd_data_pkt = new Packet(sreqHigh, command, Packet::Broadcast); 7242292SN/A 7252292SN/A fst_data_pkt->dataStatic(load_inst->memData); 7262292SN/A snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize()); 7272292SN/A 7282292SN/A fst_data_pkt->senderState = state; 7292669Sktlim@umich.edu snd_data_pkt->senderState = state; 7302927Sktlim@umich.edu 7314032Sktlim@umich.edu state->isSplit = true; 7322727Sktlim@umich.edu state->outstanding = 2; 7332292SN/A state->mainPkt = data_pkt; 7342292SN/A } 7352292SN/A 7362292SN/A if (!dcachePort->sendTiming(fst_data_pkt)) { 7372292SN/A // Delete state and data packet because a load retry 7382669Sktlim@umich.edu // initiates a pipeline restart; it does not retry. 7392292SN/A delete state; 7404032Sktlim@umich.edu delete data_pkt->req; 7414032Sktlim@umich.edu delete data_pkt; 7424032Sktlim@umich.edu if (TheISA::HasUnalignedMemAcc && sreqLow) { 7434032Sktlim@umich.edu delete fst_data_pkt->req; 7446974Stjones1@inf.ed.ac.uk delete fst_data_pkt; 7456974Stjones1@inf.ed.ac.uk delete snd_data_pkt->req; 7466974Stjones1@inf.ed.ac.uk delete snd_data_pkt; 7476974Stjones1@inf.ed.ac.uk sreqLow = NULL; 7484032Sktlim@umich.edu sreqHigh = NULL; 7492292SN/A } 7502292SN/A 7512292SN/A req = NULL; 7522292SN/A 7532292SN/A // If the access didn't succeed, tell the LSQ by setting 7547720Sgblack@eecs.umich.edu // the retry thread id. 7557720Sgblack@eecs.umich.edu lsq->setRetryTid(lsqID); 7562292SN/A } else if (TheISA::HasUnalignedMemAcc && sreqLow) { 7572669Sktlim@umich.edu completedFirst = true; 7582669Sktlim@umich.edu 7592292SN/A // The first packet was sent without problems, so send this one 7602292SN/A // too. If there is a problem with this packet then the whole 7612292SN/A // load will be squashed, so indicate this to the state object. 7622907Sktlim@umich.edu // The first packet will return in completeDataAccess and be 7636974Stjones1@inf.ed.ac.uk // handled there. 7642907Sktlim@umich.edu ++usedPorts; 7656974Stjones1@inf.ed.ac.uk if (!dcachePort->sendTiming(snd_data_pkt)) { 7666974Stjones1@inf.ed.ac.uk 7676974Stjones1@inf.ed.ac.uk // The main packet will be deleted in completeDataAccess. 7686974Stjones1@inf.ed.ac.uk delete snd_data_pkt->req; 7696974Stjones1@inf.ed.ac.uk delete snd_data_pkt; 7706974Stjones1@inf.ed.ac.uk 7713228Sktlim@umich.edu state->complete(); 7723228Sktlim@umich.edu 7733228Sktlim@umich.edu req = NULL; 7743228Sktlim@umich.edu sreqHigh = NULL; 7753228Sktlim@umich.edu 7763228Sktlim@umich.edu lsq->setRetryTid(lsqID); 7773228Sktlim@umich.edu } 7783228Sktlim@umich.edu } 7796974Stjones1@inf.ed.ac.uk } 7806974Stjones1@inf.ed.ac.uk 7816974Stjones1@inf.ed.ac.uk // If the cache was blocked, or has become blocked due to the access, 7826974Stjones1@inf.ed.ac.uk // handle it. 7836974Stjones1@inf.ed.ac.uk if (lsq->cacheBlocked()) { 7846974Stjones1@inf.ed.ac.uk if (req) 7856974Stjones1@inf.ed.ac.uk delete req; 7866974Stjones1@inf.ed.ac.uk if (TheISA::HasUnalignedMemAcc && sreqLow && !completedFirst) { 7876974Stjones1@inf.ed.ac.uk delete sreqLow; 7886974Stjones1@inf.ed.ac.uk delete sreqHigh; 7896974Stjones1@inf.ed.ac.uk } 7906974Stjones1@inf.ed.ac.uk 7916974Stjones1@inf.ed.ac.uk ++lsqCacheBlocked; 7926974Stjones1@inf.ed.ac.uk 7936974Stjones1@inf.ed.ac.uk iewStage->decrWb(load_inst->seqNum); 7946974Stjones1@inf.ed.ac.uk // There's an older load that's already going to squash. 7956974Stjones1@inf.ed.ac.uk if (isLoadBlocked && blockedLoadSeqNum < load_inst->seqNum) 7966974Stjones1@inf.ed.ac.uk return NoFault; 7976974Stjones1@inf.ed.ac.uk 7986974Stjones1@inf.ed.ac.uk // Record that the load was blocked due to memory. This 7996974Stjones1@inf.ed.ac.uk // load will squash all instructions after it, be 8006974Stjones1@inf.ed.ac.uk // refetched, and re-executed. 8013228Sktlim@umich.edu isLoadBlocked = true; 8023228Sktlim@umich.edu loadBlockedHandled = false; 8033228Sktlim@umich.edu blockedLoadSeqNum = load_inst->seqNum; 8044032Sktlim@umich.edu // No fault occurred, even though the interface is blocked. 8053228Sktlim@umich.edu return NoFault; 8066974Stjones1@inf.ed.ac.uk } 8076974Stjones1@inf.ed.ac.uk 8086974Stjones1@inf.ed.ac.uk return NoFault; 8096974Stjones1@inf.ed.ac.uk} 8106974Stjones1@inf.ed.ac.uk 8117511Stjones1@inf.ed.ac.uktemplate <class Impl> 8127511Stjones1@inf.ed.ac.ukFault 8136974Stjones1@inf.ed.ac.ukLSQUnit<Impl>::write(Request *req, Request *sreqLow, Request *sreqHigh, 8143228Sktlim@umich.edu uint8_t *data, int store_idx) 8154032Sktlim@umich.edu{ 8164032Sktlim@umich.edu assert(storeQueue[store_idx].inst); 8172907Sktlim@umich.edu 8182907Sktlim@umich.edu DPRINTF(LSQUnit, "Doing write to store idx %i, addr %#x data %#x" 8192907Sktlim@umich.edu " | storeHead:%i [sn:%i]\n", 8206974Stjones1@inf.ed.ac.uk store_idx, req->getPaddr(), data, storeHead, 8216974Stjones1@inf.ed.ac.uk storeQueue[store_idx].inst->seqNum); 8226974Stjones1@inf.ed.ac.uk 8236974Stjones1@inf.ed.ac.uk storeQueue[store_idx].req = req; 8246974Stjones1@inf.ed.ac.uk storeQueue[store_idx].sreqLow = sreqLow; 8256974Stjones1@inf.ed.ac.uk storeQueue[store_idx].sreqHigh = sreqHigh; 8266974Stjones1@inf.ed.ac.uk unsigned size = req->getSize(); 8276974Stjones1@inf.ed.ac.uk storeQueue[store_idx].size = size; 8286974Stjones1@inf.ed.ac.uk assert(size <= sizeof(storeQueue[store_idx].data)); 8296974Stjones1@inf.ed.ac.uk 8306974Stjones1@inf.ed.ac.uk // Split stores can only occur in ISAs with unaligned memory accesses. If 8316974Stjones1@inf.ed.ac.uk // a store request has been split, sreqLow and sreqHigh will be non-null. 8326974Stjones1@inf.ed.ac.uk if (TheISA::HasUnalignedMemAcc && sreqLow) { 8336974Stjones1@inf.ed.ac.uk storeQueue[store_idx].isSplit = true; 8346974Stjones1@inf.ed.ac.uk } 8356974Stjones1@inf.ed.ac.uk 8366974Stjones1@inf.ed.ac.uk memcpy(storeQueue[store_idx].data, data, size); 8376974Stjones1@inf.ed.ac.uk 8387511Stjones1@inf.ed.ac.uk // This function only writes the data to the store queue, so no fault 8396974Stjones1@inf.ed.ac.uk // can happen here. 8406974Stjones1@inf.ed.ac.uk return NoFault; 8416974Stjones1@inf.ed.ac.uk} 8422907Sktlim@umich.edu 8432907Sktlim@umich.edu#endif // __CPU_O3_LSQ_UNIT_HH__ 8442907Sktlim@umich.edu