lsq_unit.hh revision 10031
12292SN/A/* 210031SAli.Saidi@ARM.com * Copyright (c) 2012-2013 ARM Limited 39444SAndreas.Sandberg@ARM.com * All rights reserved 49444SAndreas.Sandberg@ARM.com * 59444SAndreas.Sandberg@ARM.com * The license below extends only to copyright in the software and shall 69444SAndreas.Sandberg@ARM.com * not be construed as granting a license to any other intellectual 79444SAndreas.Sandberg@ARM.com * property including but not limited to intellectual property relating 89444SAndreas.Sandberg@ARM.com * to a hardware implementation of the functionality of the software 99444SAndreas.Sandberg@ARM.com * licensed hereunder. You may use the software subject to the license 109444SAndreas.Sandberg@ARM.com * terms below provided that you ensure that this notice is replicated 119444SAndreas.Sandberg@ARM.com * unmodified and in its entirety in all distributions of the software, 129444SAndreas.Sandberg@ARM.com * modified or unmodified, in source code or in binary form. 139444SAndreas.Sandberg@ARM.com * 142329SN/A * Copyright (c) 2004-2006 The Regents of The University of Michigan 152292SN/A * All rights reserved. 162292SN/A * 172292SN/A * Redistribution and use in source and binary forms, with or without 182292SN/A * modification, are permitted provided that the following conditions are 192292SN/A * met: redistributions of source code must retain the above copyright 202292SN/A * notice, this list of conditions and the following disclaimer; 212292SN/A * redistributions in binary form must reproduce the above copyright 222292SN/A * notice, this list of conditions and the following disclaimer in the 232292SN/A * documentation and/or other materials provided with the distribution; 242292SN/A * neither the name of the copyright holders nor the names of its 252292SN/A * contributors may be used to endorse or promote products derived from 262292SN/A * this software without specific prior written permission. 272292SN/A * 282292SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 292292SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 302292SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 312292SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 322292SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 332292SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 342292SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 352292SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 362292SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 372292SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 382292SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 392689Sktlim@umich.edu * 402689Sktlim@umich.edu * Authors: Kevin Lim 412689Sktlim@umich.edu * Korey Sewell 422292SN/A */ 432292SN/A 442292SN/A#ifndef __CPU_O3_LSQ_UNIT_HH__ 452292SN/A#define __CPU_O3_LSQ_UNIT_HH__ 462292SN/A 472329SN/A#include <algorithm> 484395Ssaidi@eecs.umich.edu#include <cstring> 492292SN/A#include <map> 502292SN/A#include <queue> 512292SN/A 528591Sgblack@eecs.umich.edu#include "arch/generic/debugfaults.hh" 538506Sgblack@eecs.umich.edu#include "arch/isa_traits.hh" 543326Sktlim@umich.edu#include "arch/locked_mem.hh" 558481Sgblack@eecs.umich.edu#include "arch/mmapped_ipr.hh" 568229Snate@binkert.org#include "base/hashmap.hh" 576658Snate@binkert.org#include "config/the_isa.hh" 582292SN/A#include "cpu/inst_seq.hh" 598230Snate@binkert.org#include "cpu/timebuf.hh" 608232Snate@binkert.org#include "debug/LSQUnit.hh" 613348Sbinkertn@umich.edu#include "mem/packet.hh" 622669Sktlim@umich.edu#include "mem/port.hh" 638817Sgblack@eecs.umich.edu#include "sim/fault_fwd.hh" 642292SN/A 658737Skoansin.tan@gmail.comstruct DerivO3CPUParams; 665529Snate@binkert.org 672292SN/A/** 682329SN/A * Class that implements the actual LQ and SQ for each specific 692329SN/A * thread. Both are circular queues; load entries are freed upon 702329SN/A * committing, while store entries are freed once they writeback. The 712329SN/A * LSQUnit tracks if there are memory ordering violations, and also 722329SN/A * detects partial load to store forwarding cases (a store only has 732329SN/A * part of a load's data) that requires the load to wait until the 742329SN/A * store writes back. In the former case it holds onto the instruction 752329SN/A * until the dependence unit looks at it, and in the latter it stalls 762329SN/A * the LSQ until the store writes back. At that point the load is 772329SN/A * replayed. 782292SN/A */ 792292SN/Atemplate <class Impl> 802292SN/Aclass LSQUnit { 812292SN/A public: 822733Sktlim@umich.edu typedef typename Impl::O3CPU O3CPU; 832292SN/A typedef typename Impl::DynInstPtr DynInstPtr; 842292SN/A typedef typename Impl::CPUPol::IEW IEW; 852907Sktlim@umich.edu typedef typename Impl::CPUPol::LSQ LSQ; 862292SN/A typedef typename Impl::CPUPol::IssueStruct IssueStruct; 872292SN/A 882292SN/A public: 892292SN/A /** Constructs an LSQ unit. init() must be called prior to use. */ 902292SN/A LSQUnit(); 912292SN/A 922292SN/A /** Initializes the LSQ unit with the specified number of entries. */ 935529Snate@binkert.org void init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params, 945529Snate@binkert.org LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries, 955529Snate@binkert.org unsigned id); 962292SN/A 972292SN/A /** Returns the name of the LSQ unit. */ 982292SN/A std::string name() const; 992292SN/A 1002727Sktlim@umich.edu /** Registers statistics. */ 1012727Sktlim@umich.edu void regStats(); 1022727Sktlim@umich.edu 1032907Sktlim@umich.edu /** Sets the pointer to the dcache port. */ 1048922Swilliam.wang@arm.com void setDcachePort(MasterPort *dcache_port); 1052907Sktlim@umich.edu 1069444SAndreas.Sandberg@ARM.com /** Perform sanity checks after a drain. */ 1079444SAndreas.Sandberg@ARM.com void drainSanityCheck() const; 1082307SN/A 1092348SN/A /** Takes over from another CPU's thread. */ 1102307SN/A void takeOverFrom(); 1112307SN/A 1122292SN/A /** Ticks the LSQ unit, which in this case only resets the number of 1132292SN/A * used cache ports. 1142292SN/A * @todo: Move the number of used ports up to the LSQ level so it can 1152292SN/A * be shared by all LSQ units. 1162292SN/A */ 1172292SN/A void tick() { usedPorts = 0; } 1182292SN/A 1192292SN/A /** Inserts an instruction. */ 1202292SN/A void insert(DynInstPtr &inst); 1212292SN/A /** Inserts a load instruction. */ 1222292SN/A void insertLoad(DynInstPtr &load_inst); 1232292SN/A /** Inserts a store instruction. */ 1242292SN/A void insertStore(DynInstPtr &store_inst); 1252292SN/A 1268545Ssaidi@eecs.umich.edu /** Check for ordering violations in the LSQ. For a store squash if we 1278545Ssaidi@eecs.umich.edu * ever find a conflicting load. For a load, only squash if we 1288545Ssaidi@eecs.umich.edu * an external snoop invalidate has been seen for that load address 1298199SAli.Saidi@ARM.com * @param load_idx index to start checking at 1308199SAli.Saidi@ARM.com * @param inst the instruction to check 1318199SAli.Saidi@ARM.com */ 1328199SAli.Saidi@ARM.com Fault checkViolations(int load_idx, DynInstPtr &inst); 1338199SAli.Saidi@ARM.com 1348545Ssaidi@eecs.umich.edu /** Check if an incoming invalidate hits in the lsq on a load 1358545Ssaidi@eecs.umich.edu * that might have issued out of order wrt another load beacuse 1368545Ssaidi@eecs.umich.edu * of the intermediate invalidate. 1378545Ssaidi@eecs.umich.edu */ 1388545Ssaidi@eecs.umich.edu void checkSnoop(PacketPtr pkt); 1398545Ssaidi@eecs.umich.edu 1402292SN/A /** Executes a load instruction. */ 1412292SN/A Fault executeLoad(DynInstPtr &inst); 1422292SN/A 1432329SN/A Fault executeLoad(int lq_idx) { panic("Not implemented"); return NoFault; } 1442292SN/A /** Executes a store instruction. */ 1452292SN/A Fault executeStore(DynInstPtr &inst); 1462292SN/A 1472292SN/A /** Commits the head load. */ 1482292SN/A void commitLoad(); 1492292SN/A /** Commits loads older than a specific sequence number. */ 1502292SN/A void commitLoads(InstSeqNum &youngest_inst); 1512292SN/A 1522292SN/A /** Commits stores older than a specific sequence number. */ 1532292SN/A void commitStores(InstSeqNum &youngest_inst); 1542292SN/A 1552292SN/A /** Writes back stores. */ 1562292SN/A void writebackStores(); 1572292SN/A 1582790Sktlim@umich.edu /** Completes the data access that has been returned from the 1592790Sktlim@umich.edu * memory system. */ 1602669Sktlim@umich.edu void completeDataAccess(PacketPtr pkt); 1612669Sktlim@umich.edu 1622292SN/A /** Clears all the entries in the LQ. */ 1632292SN/A void clearLQ(); 1642292SN/A 1652292SN/A /** Clears all the entries in the SQ. */ 1662292SN/A void clearSQ(); 1672292SN/A 1682292SN/A /** Resizes the LQ to a given size. */ 1692292SN/A void resizeLQ(unsigned size); 1702292SN/A 1712292SN/A /** Resizes the SQ to a given size. */ 1722292SN/A void resizeSQ(unsigned size); 1732292SN/A 1742292SN/A /** Squashes all instructions younger than a specific sequence number. */ 1752292SN/A void squash(const InstSeqNum &squashed_num); 1762292SN/A 1772292SN/A /** Returns if there is a memory ordering violation. Value is reset upon 1782292SN/A * call to getMemDepViolator(). 1792292SN/A */ 1802292SN/A bool violation() { return memDepViolator; } 1812292SN/A 1822292SN/A /** Returns the memory ordering violator. */ 1832292SN/A DynInstPtr getMemDepViolator(); 1842292SN/A 1852329SN/A /** Returns if a load became blocked due to the memory system. */ 1862292SN/A bool loadBlocked() 1872292SN/A { return isLoadBlocked; } 1882292SN/A 1892348SN/A /** Clears the signal that a load became blocked. */ 1902292SN/A void clearLoadBlocked() 1912292SN/A { isLoadBlocked = false; } 1922292SN/A 1932348SN/A /** Returns if the blocked load was handled. */ 1942292SN/A bool isLoadBlockedHandled() 1952292SN/A { return loadBlockedHandled; } 1962292SN/A 1972348SN/A /** Records the blocked load as being handled. */ 1982292SN/A void setLoadBlockedHandled() 1992292SN/A { loadBlockedHandled = true; } 2002292SN/A 2012292SN/A /** Returns the number of free entries (min of free LQ and SQ entries). */ 2022292SN/A unsigned numFreeEntries(); 2032292SN/A 2042292SN/A /** Returns the number of loads in the LQ. */ 2052292SN/A int numLoads() { return loads; } 2062292SN/A 2072292SN/A /** Returns the number of stores in the SQ. */ 2082292SN/A int numStores() { return stores; } 2092292SN/A 2102292SN/A /** Returns if either the LQ or SQ is full. */ 2112292SN/A bool isFull() { return lqFull() || sqFull(); } 2122292SN/A 2139444SAndreas.Sandberg@ARM.com /** Returns if both the LQ and SQ are empty. */ 2149444SAndreas.Sandberg@ARM.com bool isEmpty() const { return lqEmpty() && sqEmpty(); } 2159444SAndreas.Sandberg@ARM.com 2162292SN/A /** Returns if the LQ is full. */ 2172292SN/A bool lqFull() { return loads >= (LQEntries - 1); } 2182292SN/A 2192292SN/A /** Returns if the SQ is full. */ 2202292SN/A bool sqFull() { return stores >= (SQEntries - 1); } 2212292SN/A 2229444SAndreas.Sandberg@ARM.com /** Returns if the LQ is empty. */ 2239444SAndreas.Sandberg@ARM.com bool lqEmpty() const { return loads == 0; } 2249444SAndreas.Sandberg@ARM.com 2259444SAndreas.Sandberg@ARM.com /** Returns if the SQ is empty. */ 2269444SAndreas.Sandberg@ARM.com bool sqEmpty() const { return stores == 0; } 2279444SAndreas.Sandberg@ARM.com 2282292SN/A /** Returns the number of instructions in the LSQ. */ 2292292SN/A unsigned getCount() { return loads + stores; } 2302292SN/A 2312292SN/A /** Returns if there are any stores to writeback. */ 2322292SN/A bool hasStoresToWB() { return storesToWB; } 2332292SN/A 2342292SN/A /** Returns the number of stores to writeback. */ 2352292SN/A int numStoresToWB() { return storesToWB; } 2362292SN/A 2372292SN/A /** Returns if the LSQ unit will writeback on this cycle. */ 2382292SN/A bool willWB() { return storeQueue[storeWBIdx].canWB && 2392678Sktlim@umich.edu !storeQueue[storeWBIdx].completed && 2402678Sktlim@umich.edu !isStoreBlocked; } 2412292SN/A 2422907Sktlim@umich.edu /** Handles doing the retry. */ 2432907Sktlim@umich.edu void recvRetry(); 2442907Sktlim@umich.edu 2452292SN/A private: 2469444SAndreas.Sandberg@ARM.com /** Reset the LSQ state */ 2479444SAndreas.Sandberg@ARM.com void resetState(); 2489444SAndreas.Sandberg@ARM.com 2492698Sktlim@umich.edu /** Writes back the instruction, sending it to IEW. */ 2502678Sktlim@umich.edu void writeback(DynInstPtr &inst, PacketPtr pkt); 2512678Sktlim@umich.edu 2526974Stjones1@inf.ed.ac.uk /** Writes back a store that couldn't be completed the previous cycle. */ 2536974Stjones1@inf.ed.ac.uk void writebackPendingStore(); 2546974Stjones1@inf.ed.ac.uk 2552698Sktlim@umich.edu /** Handles completing the send of a store to memory. */ 2563349Sbinkertn@umich.edu void storePostSend(PacketPtr pkt); 2572693Sktlim@umich.edu 2582292SN/A /** Completes the store at the specified index. */ 2592292SN/A void completeStore(int store_idx); 2602292SN/A 2616974Stjones1@inf.ed.ac.uk /** Attempts to send a store to the cache. */ 2626974Stjones1@inf.ed.ac.uk bool sendStore(PacketPtr data_pkt); 2636974Stjones1@inf.ed.ac.uk 2642292SN/A /** Increments the given store index (circular queue). */ 2659440SAndreas.Sandberg@ARM.com inline void incrStIdx(int &store_idx) const; 2662292SN/A /** Decrements the given store index (circular queue). */ 2679440SAndreas.Sandberg@ARM.com inline void decrStIdx(int &store_idx) const; 2682292SN/A /** Increments the given load index (circular queue). */ 2699440SAndreas.Sandberg@ARM.com inline void incrLdIdx(int &load_idx) const; 2702292SN/A /** Decrements the given load index (circular queue). */ 2719440SAndreas.Sandberg@ARM.com inline void decrLdIdx(int &load_idx) const; 2722292SN/A 2732329SN/A public: 2742329SN/A /** Debugging function to dump instructions in the LSQ. */ 2759440SAndreas.Sandberg@ARM.com void dumpInsts() const; 2762329SN/A 2772292SN/A private: 2782292SN/A /** Pointer to the CPU. */ 2792733Sktlim@umich.edu O3CPU *cpu; 2802292SN/A 2812292SN/A /** Pointer to the IEW stage. */ 2822292SN/A IEW *iewStage; 2832292SN/A 2842907Sktlim@umich.edu /** Pointer to the LSQ. */ 2852907Sktlim@umich.edu LSQ *lsq; 2862669Sktlim@umich.edu 2872907Sktlim@umich.edu /** Pointer to the dcache port. Used only for sending. */ 2888922Swilliam.wang@arm.com MasterPort *dcachePort; 2892292SN/A 2902698Sktlim@umich.edu /** Derived class to hold any sender state the LSQ needs. */ 2919044SAli.Saidi@ARM.com class LSQSenderState : public Packet::SenderState 2922678Sktlim@umich.edu { 2932678Sktlim@umich.edu public: 2942698Sktlim@umich.edu /** Default constructor. */ 2952678Sktlim@umich.edu LSQSenderState() 2969046SAli.Saidi@ARM.com : mainPkt(NULL), pendingPacket(NULL), outstanding(1), 2979046SAli.Saidi@ARM.com noWB(false), isSplit(false), pktToSend(false) 2989046SAli.Saidi@ARM.com { } 2992678Sktlim@umich.edu 3002698Sktlim@umich.edu /** Instruction who initiated the access to memory. */ 3012678Sktlim@umich.edu DynInstPtr inst; 3029046SAli.Saidi@ARM.com /** The main packet from a split load, used during writeback. */ 3039046SAli.Saidi@ARM.com PacketPtr mainPkt; 3049046SAli.Saidi@ARM.com /** A second packet from a split store that needs sending. */ 3059046SAli.Saidi@ARM.com PacketPtr pendingPacket; 3069046SAli.Saidi@ARM.com /** The LQ/SQ index of the instruction. */ 3079046SAli.Saidi@ARM.com uint8_t idx; 3089046SAli.Saidi@ARM.com /** Number of outstanding packets to complete. */ 3099046SAli.Saidi@ARM.com uint8_t outstanding; 3102698Sktlim@umich.edu /** Whether or not it is a load. */ 3112678Sktlim@umich.edu bool isLoad; 3122698Sktlim@umich.edu /** Whether or not the instruction will need to writeback. */ 3132678Sktlim@umich.edu bool noWB; 3146974Stjones1@inf.ed.ac.uk /** Whether or not this access is split in two. */ 3156974Stjones1@inf.ed.ac.uk bool isSplit; 3166974Stjones1@inf.ed.ac.uk /** Whether or not there is a packet that needs sending. */ 3176974Stjones1@inf.ed.ac.uk bool pktToSend; 3186974Stjones1@inf.ed.ac.uk 3196974Stjones1@inf.ed.ac.uk /** Completes a packet and returns whether the access is finished. */ 3206974Stjones1@inf.ed.ac.uk inline bool complete() { return --outstanding == 0; } 3212678Sktlim@umich.edu }; 3222678Sktlim@umich.edu 3232698Sktlim@umich.edu /** Writeback event, specifically for when stores forward data to loads. */ 3242678Sktlim@umich.edu class WritebackEvent : public Event { 3252678Sktlim@umich.edu public: 3262678Sktlim@umich.edu /** Constructs a writeback event. */ 3272678Sktlim@umich.edu WritebackEvent(DynInstPtr &_inst, PacketPtr pkt, LSQUnit *lsq_ptr); 3282678Sktlim@umich.edu 3292678Sktlim@umich.edu /** Processes the writeback event. */ 3302678Sktlim@umich.edu void process(); 3312678Sktlim@umich.edu 3322678Sktlim@umich.edu /** Returns the description of this event. */ 3335336Shines@cs.fsu.edu const char *description() const; 3342678Sktlim@umich.edu 3352678Sktlim@umich.edu private: 3362698Sktlim@umich.edu /** Instruction whose results are being written back. */ 3372678Sktlim@umich.edu DynInstPtr inst; 3382678Sktlim@umich.edu 3392698Sktlim@umich.edu /** The packet that would have been sent to memory. */ 3402678Sktlim@umich.edu PacketPtr pkt; 3412678Sktlim@umich.edu 3422678Sktlim@umich.edu /** The pointer to the LSQ unit that issued the store. */ 3432678Sktlim@umich.edu LSQUnit<Impl> *lsqPtr; 3442678Sktlim@umich.edu }; 3452678Sktlim@umich.edu 3462292SN/A public: 3472292SN/A struct SQEntry { 3482292SN/A /** Constructs an empty store queue entry. */ 3492292SN/A SQEntry() 3504326Sgblack@eecs.umich.edu : inst(NULL), req(NULL), size(0), 3512292SN/A canWB(0), committed(0), completed(0) 3524326Sgblack@eecs.umich.edu { 3534395Ssaidi@eecs.umich.edu std::memset(data, 0, sizeof(data)); 3544326Sgblack@eecs.umich.edu } 3552292SN/A 3569152Satgutier@umich.edu ~SQEntry() 3579152Satgutier@umich.edu { 3589152Satgutier@umich.edu inst = NULL; 3599152Satgutier@umich.edu } 3609152Satgutier@umich.edu 3612292SN/A /** Constructs a store queue entry for a given instruction. */ 3622292SN/A SQEntry(DynInstPtr &_inst) 3636974Stjones1@inf.ed.ac.uk : inst(_inst), req(NULL), sreqLow(NULL), sreqHigh(NULL), size(0), 36410031SAli.Saidi@ARM.com isSplit(0), canWB(0), committed(0), completed(0), isAllZeros(0) 3654326Sgblack@eecs.umich.edu { 3664395Ssaidi@eecs.umich.edu std::memset(data, 0, sizeof(data)); 3674326Sgblack@eecs.umich.edu } 3689046SAli.Saidi@ARM.com /** The store data. */ 3699046SAli.Saidi@ARM.com char data[16]; 3702292SN/A /** The store instruction. */ 3712292SN/A DynInstPtr inst; 3722669Sktlim@umich.edu /** The request for the store. */ 3732669Sktlim@umich.edu RequestPtr req; 3746974Stjones1@inf.ed.ac.uk /** The split requests for the store. */ 3756974Stjones1@inf.ed.ac.uk RequestPtr sreqLow; 3766974Stjones1@inf.ed.ac.uk RequestPtr sreqHigh; 3772292SN/A /** The size of the store. */ 3789046SAli.Saidi@ARM.com uint8_t size; 3796974Stjones1@inf.ed.ac.uk /** Whether or not the store is split into two requests. */ 3806974Stjones1@inf.ed.ac.uk bool isSplit; 3812292SN/A /** Whether or not the store can writeback. */ 3822292SN/A bool canWB; 3832292SN/A /** Whether or not the store is committed. */ 3842292SN/A bool committed; 3852292SN/A /** Whether or not the store is completed. */ 3862292SN/A bool completed; 38710031SAli.Saidi@ARM.com /** Does this request write all zeros and thus doesn't 38810031SAli.Saidi@ARM.com * have any data attached to it. Used for cache block zero 38910031SAli.Saidi@ARM.com * style instructs (ARM DC ZVA; ALPHA WH64) 39010031SAli.Saidi@ARM.com */ 39110031SAli.Saidi@ARM.com bool isAllZeros; 3922292SN/A }; 3932329SN/A 3942292SN/A private: 3952292SN/A /** The LSQUnit thread id. */ 3966221Snate@binkert.org ThreadID lsqID; 3972292SN/A 3982292SN/A /** The store queue. */ 3992292SN/A std::vector<SQEntry> storeQueue; 4002292SN/A 4012292SN/A /** The load queue. */ 4022292SN/A std::vector<DynInstPtr> loadQueue; 4032292SN/A 4042329SN/A /** The number of LQ entries, plus a sentinel entry (circular queue). 4052329SN/A * @todo: Consider having var that records the true number of LQ entries. 4062329SN/A */ 4072292SN/A unsigned LQEntries; 4082329SN/A /** The number of SQ entries, plus a sentinel entry (circular queue). 4092329SN/A * @todo: Consider having var that records the true number of SQ entries. 4102329SN/A */ 4112292SN/A unsigned SQEntries; 4122292SN/A 4138199SAli.Saidi@ARM.com /** The number of places to shift addresses in the LSQ before checking 4148199SAli.Saidi@ARM.com * for dependency violations 4158199SAli.Saidi@ARM.com */ 4168199SAli.Saidi@ARM.com unsigned depCheckShift; 4178199SAli.Saidi@ARM.com 4188199SAli.Saidi@ARM.com /** Should loads be checked for dependency issues */ 4198199SAli.Saidi@ARM.com bool checkLoads; 4208199SAli.Saidi@ARM.com 4212292SN/A /** The number of load instructions in the LQ. */ 4222292SN/A int loads; 4232329SN/A /** The number of store instructions in the SQ. */ 4242292SN/A int stores; 4252292SN/A /** The number of store instructions in the SQ waiting to writeback. */ 4262292SN/A int storesToWB; 4272292SN/A 4282292SN/A /** The index of the head instruction in the LQ. */ 4292292SN/A int loadHead; 4302292SN/A /** The index of the tail instruction in the LQ. */ 4312292SN/A int loadTail; 4322292SN/A 4332292SN/A /** The index of the head instruction in the SQ. */ 4342292SN/A int storeHead; 4352329SN/A /** The index of the first instruction that may be ready to be 4362329SN/A * written back, and has not yet been written back. 4372292SN/A */ 4382292SN/A int storeWBIdx; 4392292SN/A /** The index of the tail instruction in the SQ. */ 4402292SN/A int storeTail; 4412292SN/A 4422292SN/A /// @todo Consider moving to a more advanced model with write vs read ports 4432292SN/A /** The number of cache ports available each cycle. */ 4442292SN/A int cachePorts; 4452292SN/A 4462292SN/A /** The number of used cache ports in this cycle. */ 4472292SN/A int usedPorts; 4482292SN/A 4492292SN/A //list<InstSeqNum> mshrSeqNums; 4502292SN/A 4518545Ssaidi@eecs.umich.edu /** Address Mask for a cache block (e.g. ~(cache_block_size-1)) */ 4528545Ssaidi@eecs.umich.edu Addr cacheBlockMask; 4538545Ssaidi@eecs.umich.edu 4542292SN/A /** Wire to read information from the issue stage time queue. */ 4552292SN/A typename TimeBuffer<IssueStruct>::wire fromIssue; 4562292SN/A 4572292SN/A /** Whether or not the LSQ is stalled. */ 4582292SN/A bool stalled; 4592292SN/A /** The store that causes the stall due to partial store to load 4602292SN/A * forwarding. 4612292SN/A */ 4622292SN/A InstSeqNum stallingStoreIsn; 4632292SN/A /** The index of the above store. */ 4642292SN/A int stallingLoadIdx; 4652292SN/A 4662698Sktlim@umich.edu /** The packet that needs to be retried. */ 4672698Sktlim@umich.edu PacketPtr retryPkt; 4682693Sktlim@umich.edu 4692698Sktlim@umich.edu /** Whehter or not a store is blocked due to the memory system. */ 4702678Sktlim@umich.edu bool isStoreBlocked; 4712678Sktlim@umich.edu 4722329SN/A /** Whether or not a load is blocked due to the memory system. */ 4732292SN/A bool isLoadBlocked; 4742292SN/A 4752348SN/A /** Has the blocked load been handled. */ 4762292SN/A bool loadBlockedHandled; 4772292SN/A 4788727Snilay@cs.wisc.edu /** Whether or not a store is in flight. */ 4798727Snilay@cs.wisc.edu bool storeInFlight; 4808727Snilay@cs.wisc.edu 4812348SN/A /** The sequence number of the blocked load. */ 4822292SN/A InstSeqNum blockedLoadSeqNum; 4832292SN/A 4842292SN/A /** The oldest load that caused a memory ordering violation. */ 4852292SN/A DynInstPtr memDepViolator; 4862292SN/A 4876974Stjones1@inf.ed.ac.uk /** Whether or not there is a packet that couldn't be sent because of 4886974Stjones1@inf.ed.ac.uk * a lack of cache ports. */ 4896974Stjones1@inf.ed.ac.uk bool hasPendingPkt; 4906974Stjones1@inf.ed.ac.uk 4916974Stjones1@inf.ed.ac.uk /** The packet that is pending free cache ports. */ 4926974Stjones1@inf.ed.ac.uk PacketPtr pendingPkt; 4936974Stjones1@inf.ed.ac.uk 4948727Snilay@cs.wisc.edu /** Flag for memory model. */ 4958727Snilay@cs.wisc.edu bool needsTSO; 4968727Snilay@cs.wisc.edu 4972292SN/A // Will also need how many read/write ports the Dcache has. Or keep track 4982292SN/A // of that in stage that is one level up, and only call executeLoad/Store 4992292SN/A // the appropriate number of times. 5002727Sktlim@umich.edu /** Total number of loads forwaded from LSQ stores. */ 5015999Snate@binkert.org Stats::Scalar lsqForwLoads; 5022307SN/A 5033126Sktlim@umich.edu /** Total number of loads ignored due to invalid addresses. */ 5045999Snate@binkert.org Stats::Scalar invAddrLoads; 5053126Sktlim@umich.edu 5063126Sktlim@umich.edu /** Total number of squashed loads. */ 5075999Snate@binkert.org Stats::Scalar lsqSquashedLoads; 5083126Sktlim@umich.edu 5093126Sktlim@umich.edu /** Total number of responses from the memory system that are 5103126Sktlim@umich.edu * ignored due to the instruction already being squashed. */ 5115999Snate@binkert.org Stats::Scalar lsqIgnoredResponses; 5123126Sktlim@umich.edu 5133126Sktlim@umich.edu /** Tota number of memory ordering violations. */ 5145999Snate@binkert.org Stats::Scalar lsqMemOrderViolation; 5153126Sktlim@umich.edu 5162727Sktlim@umich.edu /** Total number of squashed stores. */ 5175999Snate@binkert.org Stats::Scalar lsqSquashedStores; 5182727Sktlim@umich.edu 5192727Sktlim@umich.edu /** Total number of software prefetches ignored due to invalid addresses. */ 5205999Snate@binkert.org Stats::Scalar invAddrSwpfs; 5212727Sktlim@umich.edu 5222727Sktlim@umich.edu /** Ready loads blocked due to partial store-forwarding. */ 5235999Snate@binkert.org Stats::Scalar lsqBlockedLoads; 5242727Sktlim@umich.edu 5252727Sktlim@umich.edu /** Number of loads that were rescheduled. */ 5265999Snate@binkert.org Stats::Scalar lsqRescheduledLoads; 5272727Sktlim@umich.edu 5282727Sktlim@umich.edu /** Number of times the LSQ is blocked due to the cache. */ 5295999Snate@binkert.org Stats::Scalar lsqCacheBlocked; 5302727Sktlim@umich.edu 5312292SN/A public: 5322292SN/A /** Executes the load at the given index. */ 5337520Sgblack@eecs.umich.edu Fault read(Request *req, Request *sreqLow, Request *sreqHigh, 5347520Sgblack@eecs.umich.edu uint8_t *data, int load_idx); 5352292SN/A 5362292SN/A /** Executes the store at the given index. */ 5377520Sgblack@eecs.umich.edu Fault write(Request *req, Request *sreqLow, Request *sreqHigh, 5387520Sgblack@eecs.umich.edu uint8_t *data, int store_idx); 5392292SN/A 5402292SN/A /** Returns the index of the head load instruction. */ 5412292SN/A int getLoadHead() { return loadHead; } 5422292SN/A /** Returns the sequence number of the head load instruction. */ 5432292SN/A InstSeqNum getLoadHeadSeqNum() 5442292SN/A { 5452292SN/A if (loadQueue[loadHead]) { 5462292SN/A return loadQueue[loadHead]->seqNum; 5472292SN/A } else { 5482292SN/A return 0; 5492292SN/A } 5502292SN/A 5512292SN/A } 5522292SN/A 5532292SN/A /** Returns the index of the head store instruction. */ 5542292SN/A int getStoreHead() { return storeHead; } 5552292SN/A /** Returns the sequence number of the head store instruction. */ 5562292SN/A InstSeqNum getStoreHeadSeqNum() 5572292SN/A { 5582292SN/A if (storeQueue[storeHead].inst) { 5592292SN/A return storeQueue[storeHead].inst->seqNum; 5602292SN/A } else { 5612292SN/A return 0; 5622292SN/A } 5632292SN/A 5642292SN/A } 5652292SN/A 5662292SN/A /** Returns whether or not the LSQ unit is stalled. */ 5672292SN/A bool isStalled() { return stalled; } 5682292SN/A}; 5692292SN/A 5702292SN/Atemplate <class Impl> 5712292SN/AFault 5726974Stjones1@inf.ed.ac.ukLSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh, 5737520Sgblack@eecs.umich.edu uint8_t *data, int load_idx) 5742292SN/A{ 5752669Sktlim@umich.edu DynInstPtr load_inst = loadQueue[load_idx]; 5762292SN/A 5772669Sktlim@umich.edu assert(load_inst); 5782669Sktlim@umich.edu 5792669Sktlim@umich.edu assert(!load_inst->isExecuted()); 5802292SN/A 5812292SN/A // Make sure this isn't an uncacheable access 5822292SN/A // A bit of a hackish way to get uncached accesses to work only if they're 5832292SN/A // at the head of the LSQ and are ready to commit (at the head of the ROB 5842292SN/A // too). 5853172Sstever@eecs.umich.edu if (req->isUncacheable() && 5862731Sktlim@umich.edu (load_idx != loadHead || !load_inst->isAtCommit())) { 5872669Sktlim@umich.edu iewStage->rescheduleMemInst(load_inst); 5882727Sktlim@umich.edu ++lsqRescheduledLoads; 5897720Sgblack@eecs.umich.edu DPRINTF(LSQUnit, "Uncachable load [sn:%lli] PC %s\n", 5907720Sgblack@eecs.umich.edu load_inst->seqNum, load_inst->pcState()); 5914032Sktlim@umich.edu 5924032Sktlim@umich.edu // Must delete request now that it wasn't handed off to 5934032Sktlim@umich.edu // memory. This is quite ugly. @todo: Figure out the proper 5944032Sktlim@umich.edu // place to really handle request deletes. 5954032Sktlim@umich.edu delete req; 5966974Stjones1@inf.ed.ac.uk if (TheISA::HasUnalignedMemAcc && sreqLow) { 5976974Stjones1@inf.ed.ac.uk delete sreqLow; 5986974Stjones1@inf.ed.ac.uk delete sreqHigh; 5996974Stjones1@inf.ed.ac.uk } 6008591Sgblack@eecs.umich.edu return new GenericISA::M5PanicFault( 6018591Sgblack@eecs.umich.edu "Uncachable load [sn:%llx] PC %s\n", 6028591Sgblack@eecs.umich.edu load_inst->seqNum, load_inst->pcState()); 6032292SN/A } 6042292SN/A 6052292SN/A // Check the SQ for any previous stores that might lead to forwarding 6062669Sktlim@umich.edu int store_idx = load_inst->sqIdx; 6072292SN/A 6082292SN/A int store_size = 0; 6092292SN/A 6102292SN/A DPRINTF(LSQUnit, "Read called, load idx: %i, store idx: %i, " 6116974Stjones1@inf.ed.ac.uk "storeHead: %i addr: %#x%s\n", 6126974Stjones1@inf.ed.ac.uk load_idx, store_idx, storeHead, req->getPaddr(), 6136974Stjones1@inf.ed.ac.uk sreqLow ? " split" : ""); 6142292SN/A 6156102Sgblack@eecs.umich.edu if (req->isLLSC()) { 6166974Stjones1@inf.ed.ac.uk assert(!sreqLow); 6173326Sktlim@umich.edu // Disable recording the result temporarily. Writing to misc 6183326Sktlim@umich.edu // regs normally updates the result, but this is not the 6193326Sktlim@umich.edu // desired behavior when handling store conditionals. 6209046SAli.Saidi@ARM.com load_inst->recordResult(false); 6213326Sktlim@umich.edu TheISA::handleLockedRead(load_inst.get(), req); 6229046SAli.Saidi@ARM.com load_inst->recordResult(true); 6232292SN/A } 6242292SN/A 6258481Sgblack@eecs.umich.edu if (req->isMmappedIpr()) { 6268481Sgblack@eecs.umich.edu assert(!load_inst->memData); 6278481Sgblack@eecs.umich.edu load_inst->memData = new uint8_t[64]; 6288481Sgblack@eecs.umich.edu 6298481Sgblack@eecs.umich.edu ThreadContext *thread = cpu->tcBase(lsqID); 6309180Sandreas.hansson@arm.com Cycles delay(0); 6318949Sandreas.hansson@arm.com PacketPtr data_pkt = new Packet(req, MemCmd::ReadReq); 6328481Sgblack@eecs.umich.edu 6338481Sgblack@eecs.umich.edu if (!TheISA::HasUnalignedMemAcc || !sreqLow) { 6348481Sgblack@eecs.umich.edu data_pkt->dataStatic(load_inst->memData); 6358481Sgblack@eecs.umich.edu delay = TheISA::handleIprRead(thread, data_pkt); 6368481Sgblack@eecs.umich.edu } else { 6378481Sgblack@eecs.umich.edu assert(sreqLow->isMmappedIpr() && sreqHigh->isMmappedIpr()); 6388949Sandreas.hansson@arm.com PacketPtr fst_data_pkt = new Packet(sreqLow, MemCmd::ReadReq); 6398949Sandreas.hansson@arm.com PacketPtr snd_data_pkt = new Packet(sreqHigh, MemCmd::ReadReq); 6408481Sgblack@eecs.umich.edu 6418481Sgblack@eecs.umich.edu fst_data_pkt->dataStatic(load_inst->memData); 6428481Sgblack@eecs.umich.edu snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize()); 6438481Sgblack@eecs.umich.edu 6448481Sgblack@eecs.umich.edu delay = TheISA::handleIprRead(thread, fst_data_pkt); 6459180Sandreas.hansson@arm.com Cycles delay2 = TheISA::handleIprRead(thread, snd_data_pkt); 6468481Sgblack@eecs.umich.edu if (delay2 > delay) 6478481Sgblack@eecs.umich.edu delay = delay2; 6488481Sgblack@eecs.umich.edu 6498481Sgblack@eecs.umich.edu delete sreqLow; 6508481Sgblack@eecs.umich.edu delete sreqHigh; 6518481Sgblack@eecs.umich.edu delete fst_data_pkt; 6528481Sgblack@eecs.umich.edu delete snd_data_pkt; 6538481Sgblack@eecs.umich.edu } 6548481Sgblack@eecs.umich.edu WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this); 6559179Sandreas.hansson@arm.com cpu->schedule(wb, cpu->clockEdge(delay)); 6568481Sgblack@eecs.umich.edu return NoFault; 6578481Sgblack@eecs.umich.edu } 6588481Sgblack@eecs.umich.edu 6592292SN/A while (store_idx != -1) { 6602292SN/A // End once we've reached the top of the LSQ 6612292SN/A if (store_idx == storeWBIdx) { 6622292SN/A break; 6632292SN/A } 6642292SN/A 6652292SN/A // Move the index to one younger 6662292SN/A if (--store_idx < 0) 6672292SN/A store_idx += SQEntries; 6682292SN/A 6692292SN/A assert(storeQueue[store_idx].inst); 6702292SN/A 6712292SN/A store_size = storeQueue[store_idx].size; 6722292SN/A 6732292SN/A if (store_size == 0) 6742292SN/A continue; 6754032Sktlim@umich.edu else if (storeQueue[store_idx].inst->uncacheable()) 6764032Sktlim@umich.edu continue; 6774032Sktlim@umich.edu 6789046SAli.Saidi@ARM.com assert(storeQueue[store_idx].inst->effAddrValid()); 6792292SN/A 6802292SN/A // Check if the store data is within the lower and upper bounds of 6812292SN/A // addresses that the request needs. 6822292SN/A bool store_has_lower_limit = 6832669Sktlim@umich.edu req->getVaddr() >= storeQueue[store_idx].inst->effAddr; 6842292SN/A bool store_has_upper_limit = 6852669Sktlim@umich.edu (req->getVaddr() + req->getSize()) <= 6862669Sktlim@umich.edu (storeQueue[store_idx].inst->effAddr + store_size); 6872292SN/A bool lower_load_has_store_part = 6882669Sktlim@umich.edu req->getVaddr() < (storeQueue[store_idx].inst->effAddr + 6892292SN/A store_size); 6902292SN/A bool upper_load_has_store_part = 6912669Sktlim@umich.edu (req->getVaddr() + req->getSize()) > 6922669Sktlim@umich.edu storeQueue[store_idx].inst->effAddr; 6932292SN/A 6942292SN/A // If the store's data has all of the data needed, we can forward. 6954032Sktlim@umich.edu if ((store_has_lower_limit && store_has_upper_limit)) { 6962329SN/A // Get shift amount for offset into the store's data. 6978316Sgeoffrey.blake@arm.com int shift_amt = req->getVaddr() - storeQueue[store_idx].inst->effAddr; 6982292SN/A 69910031SAli.Saidi@ARM.com if (storeQueue[store_idx].isAllZeros) 70010031SAli.Saidi@ARM.com memset(data, 0, req->getSize()); 70110031SAli.Saidi@ARM.com else 70210031SAli.Saidi@ARM.com memcpy(data, storeQueue[store_idx].data + shift_amt, 7037520Sgblack@eecs.umich.edu req->getSize()); 7043803Sgblack@eecs.umich.edu 7052669Sktlim@umich.edu assert(!load_inst->memData); 70610031SAli.Saidi@ARM.com load_inst->memData = new uint8_t[req->getSize()]; 70710031SAli.Saidi@ARM.com if (storeQueue[store_idx].isAllZeros) 70810031SAli.Saidi@ARM.com memset(load_inst->memData, 0, req->getSize()); 70910031SAli.Saidi@ARM.com else 71010031SAli.Saidi@ARM.com memcpy(load_inst->memData, 7114326Sgblack@eecs.umich.edu storeQueue[store_idx].data + shift_amt, req->getSize()); 7122292SN/A 7132292SN/A DPRINTF(LSQUnit, "Forwarding from store idx %i to load to " 7142292SN/A "addr %#x, data %#x\n", 7152693Sktlim@umich.edu store_idx, req->getVaddr(), data); 7162678Sktlim@umich.edu 7178949Sandreas.hansson@arm.com PacketPtr data_pkt = new Packet(req, MemCmd::ReadReq); 7182678Sktlim@umich.edu data_pkt->dataStatic(load_inst->memData); 7192678Sktlim@umich.edu 7202678Sktlim@umich.edu WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this); 7212292SN/A 7222292SN/A // We'll say this has a 1 cycle load-store forwarding latency 7232292SN/A // for now. 7242292SN/A // @todo: Need to make this a parameter. 7257823Ssteve.reinhardt@amd.com cpu->schedule(wb, curTick()); 7262678Sktlim@umich.edu 7276974Stjones1@inf.ed.ac.uk // Don't need to do anything special for split loads. 7286974Stjones1@inf.ed.ac.uk if (TheISA::HasUnalignedMemAcc && sreqLow) { 7296974Stjones1@inf.ed.ac.uk delete sreqLow; 7306974Stjones1@inf.ed.ac.uk delete sreqHigh; 7316974Stjones1@inf.ed.ac.uk } 7326974Stjones1@inf.ed.ac.uk 7332727Sktlim@umich.edu ++lsqForwLoads; 7342292SN/A return NoFault; 7352292SN/A } else if ((store_has_lower_limit && lower_load_has_store_part) || 7362292SN/A (store_has_upper_limit && upper_load_has_store_part) || 7372292SN/A (lower_load_has_store_part && upper_load_has_store_part)) { 7382292SN/A // This is the partial store-load forwarding case where a store 7392292SN/A // has only part of the load's data. 7402292SN/A 7412292SN/A // If it's already been written back, then don't worry about 7422292SN/A // stalling on it. 7432292SN/A if (storeQueue[store_idx].completed) { 7444032Sktlim@umich.edu panic("Should not check one of these"); 7452292SN/A continue; 7462292SN/A } 7472292SN/A 7482292SN/A // Must stall load and force it to retry, so long as it's the oldest 7492292SN/A // load that needs to do so. 7502292SN/A if (!stalled || 7512292SN/A (stalled && 7522669Sktlim@umich.edu load_inst->seqNum < 7532292SN/A loadQueue[stallingLoadIdx]->seqNum)) { 7542292SN/A stalled = true; 7552292SN/A stallingStoreIsn = storeQueue[store_idx].inst->seqNum; 7562292SN/A stallingLoadIdx = load_idx; 7572292SN/A } 7582292SN/A 7592292SN/A // Tell IQ/mem dep unit that this instruction will need to be 7602292SN/A // rescheduled eventually 7612669Sktlim@umich.edu iewStage->rescheduleMemInst(load_inst); 7622927Sktlim@umich.edu iewStage->decrWb(load_inst->seqNum); 7634032Sktlim@umich.edu load_inst->clearIssued(); 7642727Sktlim@umich.edu ++lsqRescheduledLoads; 7652292SN/A 7662292SN/A // Do not generate a writeback event as this instruction is not 7672292SN/A // complete. 7682292SN/A DPRINTF(LSQUnit, "Load-store forwarding mis-match. " 7692292SN/A "Store idx %i to load addr %#x\n", 7702669Sktlim@umich.edu store_idx, req->getVaddr()); 7712292SN/A 7724032Sktlim@umich.edu // Must delete request now that it wasn't handed off to 7734032Sktlim@umich.edu // memory. This is quite ugly. @todo: Figure out the 7744032Sktlim@umich.edu // proper place to really handle request deletes. 7754032Sktlim@umich.edu delete req; 7766974Stjones1@inf.ed.ac.uk if (TheISA::HasUnalignedMemAcc && sreqLow) { 7776974Stjones1@inf.ed.ac.uk delete sreqLow; 7786974Stjones1@inf.ed.ac.uk delete sreqHigh; 7796974Stjones1@inf.ed.ac.uk } 7804032Sktlim@umich.edu 7812292SN/A return NoFault; 7822292SN/A } 7832292SN/A } 7842292SN/A 7852292SN/A // If there's no forwarding case, then go access memory 7867720Sgblack@eecs.umich.edu DPRINTF(LSQUnit, "Doing memory access for inst [sn:%lli] PC %s\n", 7877720Sgblack@eecs.umich.edu load_inst->seqNum, load_inst->pcState()); 7882292SN/A 7892669Sktlim@umich.edu assert(!load_inst->memData); 79010031SAli.Saidi@ARM.com load_inst->memData = new uint8_t[req->getSize()]; 7912292SN/A 7922292SN/A ++usedPorts; 7932292SN/A 7942907Sktlim@umich.edu // if we the cache is not blocked, do cache access 7956974Stjones1@inf.ed.ac.uk bool completedFirst = false; 7962907Sktlim@umich.edu if (!lsq->cacheBlocked()) { 7976974Stjones1@inf.ed.ac.uk MemCmd command = 7986974Stjones1@inf.ed.ac.uk req->isLLSC() ? MemCmd::LoadLockedReq : MemCmd::ReadReq; 7998949Sandreas.hansson@arm.com PacketPtr data_pkt = new Packet(req, command); 8006974Stjones1@inf.ed.ac.uk PacketPtr fst_data_pkt = NULL; 8016974Stjones1@inf.ed.ac.uk PacketPtr snd_data_pkt = NULL; 8026974Stjones1@inf.ed.ac.uk 8033228Sktlim@umich.edu data_pkt->dataStatic(load_inst->memData); 8043228Sktlim@umich.edu 8053228Sktlim@umich.edu LSQSenderState *state = new LSQSenderState; 8063228Sktlim@umich.edu state->isLoad = true; 8073228Sktlim@umich.edu state->idx = load_idx; 8083228Sktlim@umich.edu state->inst = load_inst; 8093228Sktlim@umich.edu data_pkt->senderState = state; 8103228Sktlim@umich.edu 8116974Stjones1@inf.ed.ac.uk if (!TheISA::HasUnalignedMemAcc || !sreqLow) { 8126974Stjones1@inf.ed.ac.uk 8136974Stjones1@inf.ed.ac.uk // Point the first packet at the main data packet. 8146974Stjones1@inf.ed.ac.uk fst_data_pkt = data_pkt; 8156974Stjones1@inf.ed.ac.uk } else { 8166974Stjones1@inf.ed.ac.uk 8176974Stjones1@inf.ed.ac.uk // Create the split packets. 8188949Sandreas.hansson@arm.com fst_data_pkt = new Packet(sreqLow, command); 8198949Sandreas.hansson@arm.com snd_data_pkt = new Packet(sreqHigh, command); 8206974Stjones1@inf.ed.ac.uk 8216974Stjones1@inf.ed.ac.uk fst_data_pkt->dataStatic(load_inst->memData); 8226974Stjones1@inf.ed.ac.uk snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize()); 8236974Stjones1@inf.ed.ac.uk 8246974Stjones1@inf.ed.ac.uk fst_data_pkt->senderState = state; 8256974Stjones1@inf.ed.ac.uk snd_data_pkt->senderState = state; 8266974Stjones1@inf.ed.ac.uk 8276974Stjones1@inf.ed.ac.uk state->isSplit = true; 8286974Stjones1@inf.ed.ac.uk state->outstanding = 2; 8296974Stjones1@inf.ed.ac.uk state->mainPkt = data_pkt; 8306974Stjones1@inf.ed.ac.uk } 8316974Stjones1@inf.ed.ac.uk 8328975Sandreas.hansson@arm.com if (!dcachePort->sendTimingReq(fst_data_pkt)) { 8333228Sktlim@umich.edu // Delete state and data packet because a load retry 8343228Sktlim@umich.edu // initiates a pipeline restart; it does not retry. 8353228Sktlim@umich.edu delete state; 8364032Sktlim@umich.edu delete data_pkt->req; 8373228Sktlim@umich.edu delete data_pkt; 8386974Stjones1@inf.ed.ac.uk if (TheISA::HasUnalignedMemAcc && sreqLow) { 8396974Stjones1@inf.ed.ac.uk delete fst_data_pkt->req; 8406974Stjones1@inf.ed.ac.uk delete fst_data_pkt; 8416974Stjones1@inf.ed.ac.uk delete snd_data_pkt->req; 8426974Stjones1@inf.ed.ac.uk delete snd_data_pkt; 8437511Stjones1@inf.ed.ac.uk sreqLow = NULL; 8447511Stjones1@inf.ed.ac.uk sreqHigh = NULL; 8456974Stjones1@inf.ed.ac.uk } 8463228Sktlim@umich.edu 8474032Sktlim@umich.edu req = NULL; 8484032Sktlim@umich.edu 8492907Sktlim@umich.edu // If the access didn't succeed, tell the LSQ by setting 8502907Sktlim@umich.edu // the retry thread id. 8512907Sktlim@umich.edu lsq->setRetryTid(lsqID); 8526974Stjones1@inf.ed.ac.uk } else if (TheISA::HasUnalignedMemAcc && sreqLow) { 8536974Stjones1@inf.ed.ac.uk completedFirst = true; 8546974Stjones1@inf.ed.ac.uk 8556974Stjones1@inf.ed.ac.uk // The first packet was sent without problems, so send this one 8566974Stjones1@inf.ed.ac.uk // too. If there is a problem with this packet then the whole 8576974Stjones1@inf.ed.ac.uk // load will be squashed, so indicate this to the state object. 8586974Stjones1@inf.ed.ac.uk // The first packet will return in completeDataAccess and be 8596974Stjones1@inf.ed.ac.uk // handled there. 8606974Stjones1@inf.ed.ac.uk ++usedPorts; 8618975Sandreas.hansson@arm.com if (!dcachePort->sendTimingReq(snd_data_pkt)) { 8626974Stjones1@inf.ed.ac.uk 8636974Stjones1@inf.ed.ac.uk // The main packet will be deleted in completeDataAccess. 8646974Stjones1@inf.ed.ac.uk delete snd_data_pkt->req; 8656974Stjones1@inf.ed.ac.uk delete snd_data_pkt; 8666974Stjones1@inf.ed.ac.uk 8676974Stjones1@inf.ed.ac.uk state->complete(); 8686974Stjones1@inf.ed.ac.uk 8696974Stjones1@inf.ed.ac.uk req = NULL; 8707511Stjones1@inf.ed.ac.uk sreqHigh = NULL; 8716974Stjones1@inf.ed.ac.uk 8726974Stjones1@inf.ed.ac.uk lsq->setRetryTid(lsqID); 8736974Stjones1@inf.ed.ac.uk } 8742907Sktlim@umich.edu } 8752907Sktlim@umich.edu } 8762907Sktlim@umich.edu 8772907Sktlim@umich.edu // If the cache was blocked, or has become blocked due to the access, 8782907Sktlim@umich.edu // handle it. 8792907Sktlim@umich.edu if (lsq->cacheBlocked()) { 8804032Sktlim@umich.edu if (req) 8814032Sktlim@umich.edu delete req; 8826974Stjones1@inf.ed.ac.uk if (TheISA::HasUnalignedMemAcc && sreqLow && !completedFirst) { 8836974Stjones1@inf.ed.ac.uk delete sreqLow; 8846974Stjones1@inf.ed.ac.uk delete sreqHigh; 8856974Stjones1@inf.ed.ac.uk } 8864032Sktlim@umich.edu 8872727Sktlim@umich.edu ++lsqCacheBlocked; 8883014Srdreslin@umich.edu 8898315Sgeoffrey.blake@arm.com // If the first part of a split access succeeds, then let the LSQ 8908315Sgeoffrey.blake@arm.com // handle the decrWb when completeDataAccess is called upon return 8918315Sgeoffrey.blake@arm.com // of the requested first part of data 8928315Sgeoffrey.blake@arm.com if (!completedFirst) 8938315Sgeoffrey.blake@arm.com iewStage->decrWb(load_inst->seqNum); 8948315Sgeoffrey.blake@arm.com 8952669Sktlim@umich.edu // There's an older load that's already going to squash. 8962669Sktlim@umich.edu if (isLoadBlocked && blockedLoadSeqNum < load_inst->seqNum) 8972669Sktlim@umich.edu return NoFault; 8982292SN/A 8992669Sktlim@umich.edu // Record that the load was blocked due to memory. This 9002669Sktlim@umich.edu // load will squash all instructions after it, be 9012669Sktlim@umich.edu // refetched, and re-executed. 9022669Sktlim@umich.edu isLoadBlocked = true; 9032669Sktlim@umich.edu loadBlockedHandled = false; 9042669Sktlim@umich.edu blockedLoadSeqNum = load_inst->seqNum; 9052669Sktlim@umich.edu // No fault occurred, even though the interface is blocked. 9062669Sktlim@umich.edu return NoFault; 9072292SN/A } 9082292SN/A 9092669Sktlim@umich.edu return NoFault; 9102292SN/A} 9112292SN/A 9122292SN/Atemplate <class Impl> 9132292SN/AFault 9146974Stjones1@inf.ed.ac.ukLSQUnit<Impl>::write(Request *req, Request *sreqLow, Request *sreqHigh, 9157520Sgblack@eecs.umich.edu uint8_t *data, int store_idx) 9162292SN/A{ 9172292SN/A assert(storeQueue[store_idx].inst); 9182292SN/A 9192292SN/A DPRINTF(LSQUnit, "Doing write to store idx %i, addr %#x data %#x" 9202292SN/A " | storeHead:%i [sn:%i]\n", 9212669Sktlim@umich.edu store_idx, req->getPaddr(), data, storeHead, 9222292SN/A storeQueue[store_idx].inst->seqNum); 9232329SN/A 9242292SN/A storeQueue[store_idx].req = req; 9256974Stjones1@inf.ed.ac.uk storeQueue[store_idx].sreqLow = sreqLow; 9266974Stjones1@inf.ed.ac.uk storeQueue[store_idx].sreqHigh = sreqHigh; 9277520Sgblack@eecs.umich.edu unsigned size = req->getSize(); 9287520Sgblack@eecs.umich.edu storeQueue[store_idx].size = size; 92910031SAli.Saidi@ARM.com storeQueue[store_idx].isAllZeros = req->getFlags() & Request::CACHE_BLOCK_ZERO; 93010031SAli.Saidi@ARM.com assert(size <= sizeof(storeQueue[store_idx].data) || 93110031SAli.Saidi@ARM.com (req->getFlags() & Request::CACHE_BLOCK_ZERO)); 9327509Stjones1@inf.ed.ac.uk 9337509Stjones1@inf.ed.ac.uk // Split stores can only occur in ISAs with unaligned memory accesses. If 9347509Stjones1@inf.ed.ac.uk // a store request has been split, sreqLow and sreqHigh will be non-null. 9357509Stjones1@inf.ed.ac.uk if (TheISA::HasUnalignedMemAcc && sreqLow) { 9367509Stjones1@inf.ed.ac.uk storeQueue[store_idx].isSplit = true; 9377509Stjones1@inf.ed.ac.uk } 9384326Sgblack@eecs.umich.edu 93910031SAli.Saidi@ARM.com if (!(req->getFlags() & Request::CACHE_BLOCK_ZERO)) 94010031SAli.Saidi@ARM.com memcpy(storeQueue[store_idx].data, data, size); 9412329SN/A 9422292SN/A // This function only writes the data to the store queue, so no fault 9432292SN/A // can happen here. 9442292SN/A return NoFault; 9452292SN/A} 9462292SN/A 9472292SN/A#endif // __CPU_O3_LSQ_UNIT_HH__ 948