lsq_unit.hh revision 13429
12292SN/A/* 212355Snikos.nikoleris@arm.com * Copyright (c) 2012-2014,2017 ARM Limited 39444SAndreas.Sandberg@ARM.com * All rights reserved 49444SAndreas.Sandberg@ARM.com * 59444SAndreas.Sandberg@ARM.com * The license below extends only to copyright in the software and shall 69444SAndreas.Sandberg@ARM.com * not be construed as granting a license to any other intellectual 79444SAndreas.Sandberg@ARM.com * property including but not limited to intellectual property relating 89444SAndreas.Sandberg@ARM.com * to a hardware implementation of the functionality of the software 99444SAndreas.Sandberg@ARM.com * licensed hereunder. You may use the software subject to the license 109444SAndreas.Sandberg@ARM.com * terms below provided that you ensure that this notice is replicated 119444SAndreas.Sandberg@ARM.com * unmodified and in its entirety in all distributions of the software, 129444SAndreas.Sandberg@ARM.com * modified or unmodified, in source code or in binary form. 139444SAndreas.Sandberg@ARM.com * 142329SN/A * Copyright (c) 2004-2006 The Regents of The University of Michigan 1510239Sbinhpham@cs.rutgers.edu * Copyright (c) 2013 Advanced Micro Devices, Inc. 162292SN/A * All rights reserved. 172292SN/A * 182292SN/A * Redistribution and use in source and binary forms, with or without 192292SN/A * modification, are permitted provided that the following conditions are 202292SN/A * met: redistributions of source code must retain the above copyright 212292SN/A * notice, this list of conditions and the following disclaimer; 222292SN/A * redistributions in binary form must reproduce the above copyright 232292SN/A * notice, this list of conditions and the following disclaimer in the 242292SN/A * documentation and/or other materials provided with the distribution; 252292SN/A * neither the name of the copyright holders nor the names of its 262292SN/A * contributors may be used to endorse or promote products derived from 272292SN/A * this software without specific prior written permission. 282292SN/A * 292292SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 302292SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 312292SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 322292SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 332292SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 342292SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 352292SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 362292SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 372292SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 382292SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 392292SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 402689Sktlim@umich.edu * 412689Sktlim@umich.edu * Authors: Kevin Lim 422689Sktlim@umich.edu * Korey Sewell 432292SN/A */ 442292SN/A 452292SN/A#ifndef __CPU_O3_LSQ_UNIT_HH__ 462292SN/A#define __CPU_O3_LSQ_UNIT_HH__ 472292SN/A 482329SN/A#include <algorithm> 494395Ssaidi@eecs.umich.edu#include <cstring> 502292SN/A#include <map> 512292SN/A#include <queue> 522292SN/A 538591Sgblack@eecs.umich.edu#include "arch/generic/debugfaults.hh" 548506Sgblack@eecs.umich.edu#include "arch/isa_traits.hh" 553326Sktlim@umich.edu#include "arch/locked_mem.hh" 568481Sgblack@eecs.umich.edu#include "arch/mmapped_ipr.hh" 576658Snate@binkert.org#include "config/the_isa.hh" 582292SN/A#include "cpu/inst_seq.hh" 598230Snate@binkert.org#include "cpu/timebuf.hh" 608232Snate@binkert.org#include "debug/LSQUnit.hh" 613348Sbinkertn@umich.edu#include "mem/packet.hh" 622669Sktlim@umich.edu#include "mem/port.hh" 632292SN/A 648737Skoansin.tan@gmail.comstruct DerivO3CPUParams; 655529Snate@binkert.org 662292SN/A/** 672329SN/A * Class that implements the actual LQ and SQ for each specific 682329SN/A * thread. Both are circular queues; load entries are freed upon 692329SN/A * committing, while store entries are freed once they writeback. The 702329SN/A * LSQUnit tracks if there are memory ordering violations, and also 712329SN/A * detects partial load to store forwarding cases (a store only has 722329SN/A * part of a load's data) that requires the load to wait until the 732329SN/A * store writes back. In the former case it holds onto the instruction 742329SN/A * until the dependence unit looks at it, and in the latter it stalls 752329SN/A * the LSQ until the store writes back. At that point the load is 762329SN/A * replayed. 772292SN/A */ 782292SN/Atemplate <class Impl> 792292SN/Aclass LSQUnit { 802292SN/A public: 812733Sktlim@umich.edu typedef typename Impl::O3CPU O3CPU; 822292SN/A typedef typename Impl::DynInstPtr DynInstPtr; 832292SN/A typedef typename Impl::CPUPol::IEW IEW; 842907Sktlim@umich.edu typedef typename Impl::CPUPol::LSQ LSQ; 852292SN/A typedef typename Impl::CPUPol::IssueStruct IssueStruct; 862292SN/A 872292SN/A public: 882292SN/A /** Constructs an LSQ unit. init() must be called prior to use. */ 892292SN/A LSQUnit(); 902292SN/A 912292SN/A /** Initializes the LSQ unit with the specified number of entries. */ 925529Snate@binkert.org void init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params, 935529Snate@binkert.org LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries, 945529Snate@binkert.org unsigned id); 952292SN/A 962292SN/A /** Returns the name of the LSQ unit. */ 972292SN/A std::string name() const; 982292SN/A 992727Sktlim@umich.edu /** Registers statistics. */ 1002727Sktlim@umich.edu void regStats(); 1012727Sktlim@umich.edu 1022907Sktlim@umich.edu /** Sets the pointer to the dcache port. */ 1038922Swilliam.wang@arm.com void setDcachePort(MasterPort *dcache_port); 1042907Sktlim@umich.edu 1059444SAndreas.Sandberg@ARM.com /** Perform sanity checks after a drain. */ 1069444SAndreas.Sandberg@ARM.com void drainSanityCheck() const; 1072307SN/A 1082348SN/A /** Takes over from another CPU's thread. */ 1092307SN/A void takeOverFrom(); 1102307SN/A 1112292SN/A /** Ticks the LSQ unit, which in this case only resets the number of 1122292SN/A * used cache ports. 1132292SN/A * @todo: Move the number of used ports up to the LSQ level so it can 1142292SN/A * be shared by all LSQ units. 1152292SN/A */ 11611780Sarthur.perais@inria.fr void tick() { usedStorePorts = 0; } 1172292SN/A 1182292SN/A /** Inserts an instruction. */ 11913429Srekai.gonzalezalberquilla@arm.com void insert(const DynInstPtr &inst); 1202292SN/A /** Inserts a load instruction. */ 12113429Srekai.gonzalezalberquilla@arm.com void insertLoad(const DynInstPtr &load_inst); 1222292SN/A /** Inserts a store instruction. */ 12313429Srekai.gonzalezalberquilla@arm.com void insertStore(const DynInstPtr &store_inst); 1242292SN/A 1258545Ssaidi@eecs.umich.edu /** Check for ordering violations in the LSQ. For a store squash if we 1268545Ssaidi@eecs.umich.edu * ever find a conflicting load. For a load, only squash if we 1278545Ssaidi@eecs.umich.edu * an external snoop invalidate has been seen for that load address 1288199SAli.Saidi@ARM.com * @param load_idx index to start checking at 1298199SAli.Saidi@ARM.com * @param inst the instruction to check 1308199SAli.Saidi@ARM.com */ 13113429Srekai.gonzalezalberquilla@arm.com Fault checkViolations(int load_idx, const DynInstPtr &inst); 1328199SAli.Saidi@ARM.com 1338545Ssaidi@eecs.umich.edu /** Check if an incoming invalidate hits in the lsq on a load 1348545Ssaidi@eecs.umich.edu * that might have issued out of order wrt another load beacuse 1358545Ssaidi@eecs.umich.edu * of the intermediate invalidate. 1368545Ssaidi@eecs.umich.edu */ 1378545Ssaidi@eecs.umich.edu void checkSnoop(PacketPtr pkt); 1388545Ssaidi@eecs.umich.edu 1392292SN/A /** Executes a load instruction. */ 14013429Srekai.gonzalezalberquilla@arm.com Fault executeLoad(const DynInstPtr &inst); 1412292SN/A 1422329SN/A Fault executeLoad(int lq_idx) { panic("Not implemented"); return NoFault; } 1432292SN/A /** Executes a store instruction. */ 14413429Srekai.gonzalezalberquilla@arm.com Fault executeStore(const DynInstPtr &inst); 1452292SN/A 1462292SN/A /** Commits the head load. */ 1472292SN/A void commitLoad(); 1482292SN/A /** Commits loads older than a specific sequence number. */ 1492292SN/A void commitLoads(InstSeqNum &youngest_inst); 1502292SN/A 1512292SN/A /** Commits stores older than a specific sequence number. */ 1522292SN/A void commitStores(InstSeqNum &youngest_inst); 1532292SN/A 1542292SN/A /** Writes back stores. */ 1552292SN/A void writebackStores(); 1562292SN/A 1572790Sktlim@umich.edu /** Completes the data access that has been returned from the 1582790Sktlim@umich.edu * memory system. */ 1592669Sktlim@umich.edu void completeDataAccess(PacketPtr pkt); 1602669Sktlim@umich.edu 1612292SN/A /** Clears all the entries in the LQ. */ 1622292SN/A void clearLQ(); 1632292SN/A 1642292SN/A /** Clears all the entries in the SQ. */ 1652292SN/A void clearSQ(); 1662292SN/A 1672292SN/A /** Resizes the LQ to a given size. */ 1682292SN/A void resizeLQ(unsigned size); 1692292SN/A 1702292SN/A /** Resizes the SQ to a given size. */ 1712292SN/A void resizeSQ(unsigned size); 1722292SN/A 1732292SN/A /** Squashes all instructions younger than a specific sequence number. */ 1742292SN/A void squash(const InstSeqNum &squashed_num); 1752292SN/A 1762292SN/A /** Returns if there is a memory ordering violation. Value is reset upon 1772292SN/A * call to getMemDepViolator(). 1782292SN/A */ 1792292SN/A bool violation() { return memDepViolator; } 1802292SN/A 1812292SN/A /** Returns the memory ordering violator. */ 1822292SN/A DynInstPtr getMemDepViolator(); 1832292SN/A 18410239Sbinhpham@cs.rutgers.edu /** Returns the number of free LQ entries. */ 18510239Sbinhpham@cs.rutgers.edu unsigned numFreeLoadEntries(); 18610239Sbinhpham@cs.rutgers.edu 18710239Sbinhpham@cs.rutgers.edu /** Returns the number of free SQ entries. */ 18810239Sbinhpham@cs.rutgers.edu unsigned numFreeStoreEntries(); 1892292SN/A 1902292SN/A /** Returns the number of loads in the LQ. */ 1912292SN/A int numLoads() { return loads; } 1922292SN/A 1932292SN/A /** Returns the number of stores in the SQ. */ 1942292SN/A int numStores() { return stores; } 1952292SN/A 1962292SN/A /** Returns if either the LQ or SQ is full. */ 1972292SN/A bool isFull() { return lqFull() || sqFull(); } 1982292SN/A 1999444SAndreas.Sandberg@ARM.com /** Returns if both the LQ and SQ are empty. */ 2009444SAndreas.Sandberg@ARM.com bool isEmpty() const { return lqEmpty() && sqEmpty(); } 2019444SAndreas.Sandberg@ARM.com 2022292SN/A /** Returns if the LQ is full. */ 2032292SN/A bool lqFull() { return loads >= (LQEntries - 1); } 2042292SN/A 2052292SN/A /** Returns if the SQ is full. */ 2062292SN/A bool sqFull() { return stores >= (SQEntries - 1); } 2072292SN/A 2089444SAndreas.Sandberg@ARM.com /** Returns if the LQ is empty. */ 2099444SAndreas.Sandberg@ARM.com bool lqEmpty() const { return loads == 0; } 2109444SAndreas.Sandberg@ARM.com 2119444SAndreas.Sandberg@ARM.com /** Returns if the SQ is empty. */ 2129444SAndreas.Sandberg@ARM.com bool sqEmpty() const { return stores == 0; } 2139444SAndreas.Sandberg@ARM.com 2142292SN/A /** Returns the number of instructions in the LSQ. */ 2152292SN/A unsigned getCount() { return loads + stores; } 2162292SN/A 2172292SN/A /** Returns if there are any stores to writeback. */ 2182292SN/A bool hasStoresToWB() { return storesToWB; } 2192292SN/A 2202292SN/A /** Returns the number of stores to writeback. */ 2212292SN/A int numStoresToWB() { return storesToWB; } 2222292SN/A 2232292SN/A /** Returns if the LSQ unit will writeback on this cycle. */ 2242292SN/A bool willWB() { return storeQueue[storeWBIdx].canWB && 2252678Sktlim@umich.edu !storeQueue[storeWBIdx].completed && 2262678Sktlim@umich.edu !isStoreBlocked; } 2272292SN/A 2282907Sktlim@umich.edu /** Handles doing the retry. */ 2292907Sktlim@umich.edu void recvRetry(); 2302907Sktlim@umich.edu 2312292SN/A private: 2329444SAndreas.Sandberg@ARM.com /** Reset the LSQ state */ 2339444SAndreas.Sandberg@ARM.com void resetState(); 2349444SAndreas.Sandberg@ARM.com 2352698Sktlim@umich.edu /** Writes back the instruction, sending it to IEW. */ 23613429Srekai.gonzalezalberquilla@arm.com void writeback(const DynInstPtr &inst, PacketPtr pkt); 2372678Sktlim@umich.edu 2386974Stjones1@inf.ed.ac.uk /** Writes back a store that couldn't be completed the previous cycle. */ 2396974Stjones1@inf.ed.ac.uk void writebackPendingStore(); 2406974Stjones1@inf.ed.ac.uk 2412698Sktlim@umich.edu /** Handles completing the send of a store to memory. */ 2423349Sbinkertn@umich.edu void storePostSend(PacketPtr pkt); 2432693Sktlim@umich.edu 2442292SN/A /** Completes the store at the specified index. */ 2452292SN/A void completeStore(int store_idx); 2462292SN/A 2476974Stjones1@inf.ed.ac.uk /** Attempts to send a store to the cache. */ 2486974Stjones1@inf.ed.ac.uk bool sendStore(PacketPtr data_pkt); 2496974Stjones1@inf.ed.ac.uk 2502292SN/A /** Increments the given store index (circular queue). */ 2519440SAndreas.Sandberg@ARM.com inline void incrStIdx(int &store_idx) const; 2522292SN/A /** Decrements the given store index (circular queue). */ 2539440SAndreas.Sandberg@ARM.com inline void decrStIdx(int &store_idx) const; 2542292SN/A /** Increments the given load index (circular queue). */ 2559440SAndreas.Sandberg@ARM.com inline void incrLdIdx(int &load_idx) const; 2562292SN/A /** Decrements the given load index (circular queue). */ 2579440SAndreas.Sandberg@ARM.com inline void decrLdIdx(int &load_idx) const; 2582292SN/A 2592329SN/A public: 2602329SN/A /** Debugging function to dump instructions in the LSQ. */ 2619440SAndreas.Sandberg@ARM.com void dumpInsts() const; 2622329SN/A 2632292SN/A private: 2642292SN/A /** Pointer to the CPU. */ 2652733Sktlim@umich.edu O3CPU *cpu; 2662292SN/A 2672292SN/A /** Pointer to the IEW stage. */ 2682292SN/A IEW *iewStage; 2692292SN/A 2702907Sktlim@umich.edu /** Pointer to the LSQ. */ 2712907Sktlim@umich.edu LSQ *lsq; 2722669Sktlim@umich.edu 2732907Sktlim@umich.edu /** Pointer to the dcache port. Used only for sending. */ 2748922Swilliam.wang@arm.com MasterPort *dcachePort; 2752292SN/A 2762698Sktlim@umich.edu /** Derived class to hold any sender state the LSQ needs. */ 2779044SAli.Saidi@ARM.com class LSQSenderState : public Packet::SenderState 2782678Sktlim@umich.edu { 2792678Sktlim@umich.edu public: 2802698Sktlim@umich.edu /** Default constructor. */ 2812678Sktlim@umich.edu LSQSenderState() 28210537Sandreas.hansson@arm.com : mainPkt(NULL), pendingPacket(NULL), idx(0), outstanding(1), 28310537Sandreas.hansson@arm.com isLoad(false), noWB(false), isSplit(false), 28410537Sandreas.hansson@arm.com pktToSend(false), cacheBlocked(false) 2859046SAli.Saidi@ARM.com { } 2862678Sktlim@umich.edu 2872698Sktlim@umich.edu /** Instruction who initiated the access to memory. */ 2882678Sktlim@umich.edu DynInstPtr inst; 2899046SAli.Saidi@ARM.com /** The main packet from a split load, used during writeback. */ 2909046SAli.Saidi@ARM.com PacketPtr mainPkt; 2919046SAli.Saidi@ARM.com /** A second packet from a split store that needs sending. */ 2929046SAli.Saidi@ARM.com PacketPtr pendingPacket; 2939046SAli.Saidi@ARM.com /** The LQ/SQ index of the instruction. */ 2949046SAli.Saidi@ARM.com uint8_t idx; 2959046SAli.Saidi@ARM.com /** Number of outstanding packets to complete. */ 2969046SAli.Saidi@ARM.com uint8_t outstanding; 2972698Sktlim@umich.edu /** Whether or not it is a load. */ 2982678Sktlim@umich.edu bool isLoad; 2992698Sktlim@umich.edu /** Whether or not the instruction will need to writeback. */ 3002678Sktlim@umich.edu bool noWB; 3016974Stjones1@inf.ed.ac.uk /** Whether or not this access is split in two. */ 3026974Stjones1@inf.ed.ac.uk bool isSplit; 3036974Stjones1@inf.ed.ac.uk /** Whether or not there is a packet that needs sending. */ 3046974Stjones1@inf.ed.ac.uk bool pktToSend; 30510333Smitch.hayenga@arm.com /** Whether or not the second packet of this split load was blocked */ 30610333Smitch.hayenga@arm.com bool cacheBlocked; 3076974Stjones1@inf.ed.ac.uk 3086974Stjones1@inf.ed.ac.uk /** Completes a packet and returns whether the access is finished. */ 3096974Stjones1@inf.ed.ac.uk inline bool complete() { return --outstanding == 0; } 3102678Sktlim@umich.edu }; 3112678Sktlim@umich.edu 3122698Sktlim@umich.edu /** Writeback event, specifically for when stores forward data to loads. */ 3132678Sktlim@umich.edu class WritebackEvent : public Event { 3142678Sktlim@umich.edu public: 3152678Sktlim@umich.edu /** Constructs a writeback event. */ 31613429Srekai.gonzalezalberquilla@arm.com WritebackEvent(const DynInstPtr &_inst, PacketPtr pkt, 31713429Srekai.gonzalezalberquilla@arm.com LSQUnit *lsq_ptr); 3182678Sktlim@umich.edu 3192678Sktlim@umich.edu /** Processes the writeback event. */ 3202678Sktlim@umich.edu void process(); 3212678Sktlim@umich.edu 3222678Sktlim@umich.edu /** Returns the description of this event. */ 3235336Shines@cs.fsu.edu const char *description() const; 3242678Sktlim@umich.edu 3252678Sktlim@umich.edu private: 3262698Sktlim@umich.edu /** Instruction whose results are being written back. */ 3272678Sktlim@umich.edu DynInstPtr inst; 3282678Sktlim@umich.edu 3292698Sktlim@umich.edu /** The packet that would have been sent to memory. */ 3302678Sktlim@umich.edu PacketPtr pkt; 3312678Sktlim@umich.edu 3322678Sktlim@umich.edu /** The pointer to the LSQ unit that issued the store. */ 3332678Sktlim@umich.edu LSQUnit<Impl> *lsqPtr; 3342678Sktlim@umich.edu }; 3352678Sktlim@umich.edu 3362292SN/A public: 3372292SN/A struct SQEntry { 3382292SN/A /** Constructs an empty store queue entry. */ 3392292SN/A SQEntry() 3404326Sgblack@eecs.umich.edu : inst(NULL), req(NULL), size(0), 3412292SN/A canWB(0), committed(0), completed(0) 3424326Sgblack@eecs.umich.edu { 3434395Ssaidi@eecs.umich.edu std::memset(data, 0, sizeof(data)); 3444326Sgblack@eecs.umich.edu } 3452292SN/A 3469152Satgutier@umich.edu ~SQEntry() 3479152Satgutier@umich.edu { 3489152Satgutier@umich.edu inst = NULL; 3499152Satgutier@umich.edu } 3509152Satgutier@umich.edu 3512292SN/A /** Constructs a store queue entry for a given instruction. */ 35213429Srekai.gonzalezalberquilla@arm.com SQEntry(const DynInstPtr &_inst) 3536974Stjones1@inf.ed.ac.uk : inst(_inst), req(NULL), sreqLow(NULL), sreqHigh(NULL), size(0), 35410031SAli.Saidi@ARM.com isSplit(0), canWB(0), committed(0), completed(0), isAllZeros(0) 3554326Sgblack@eecs.umich.edu { 3564395Ssaidi@eecs.umich.edu std::memset(data, 0, sizeof(data)); 3574326Sgblack@eecs.umich.edu } 3589046SAli.Saidi@ARM.com /** The store data. */ 3599046SAli.Saidi@ARM.com char data[16]; 3602292SN/A /** The store instruction. */ 3612292SN/A DynInstPtr inst; 3622669Sktlim@umich.edu /** The request for the store. */ 3632669Sktlim@umich.edu RequestPtr req; 3646974Stjones1@inf.ed.ac.uk /** The split requests for the store. */ 3656974Stjones1@inf.ed.ac.uk RequestPtr sreqLow; 3666974Stjones1@inf.ed.ac.uk RequestPtr sreqHigh; 3672292SN/A /** The size of the store. */ 3689046SAli.Saidi@ARM.com uint8_t size; 3696974Stjones1@inf.ed.ac.uk /** Whether or not the store is split into two requests. */ 3706974Stjones1@inf.ed.ac.uk bool isSplit; 3712292SN/A /** Whether or not the store can writeback. */ 3722292SN/A bool canWB; 3732292SN/A /** Whether or not the store is committed. */ 3742292SN/A bool committed; 3752292SN/A /** Whether or not the store is completed. */ 3762292SN/A bool completed; 37710031SAli.Saidi@ARM.com /** Does this request write all zeros and thus doesn't 37810031SAli.Saidi@ARM.com * have any data attached to it. Used for cache block zero 37910031SAli.Saidi@ARM.com * style instructs (ARM DC ZVA; ALPHA WH64) 38010031SAli.Saidi@ARM.com */ 38110031SAli.Saidi@ARM.com bool isAllZeros; 3822292SN/A }; 3832329SN/A 3842292SN/A private: 3852292SN/A /** The LSQUnit thread id. */ 3866221Snate@binkert.org ThreadID lsqID; 3872292SN/A 3882292SN/A /** The store queue. */ 3892292SN/A std::vector<SQEntry> storeQueue; 3902292SN/A 3912292SN/A /** The load queue. */ 3922292SN/A std::vector<DynInstPtr> loadQueue; 3932292SN/A 3942329SN/A /** The number of LQ entries, plus a sentinel entry (circular queue). 3952329SN/A * @todo: Consider having var that records the true number of LQ entries. 3962329SN/A */ 3972292SN/A unsigned LQEntries; 3982329SN/A /** The number of SQ entries, plus a sentinel entry (circular queue). 3992329SN/A * @todo: Consider having var that records the true number of SQ entries. 4002329SN/A */ 4012292SN/A unsigned SQEntries; 4022292SN/A 4038199SAli.Saidi@ARM.com /** The number of places to shift addresses in the LSQ before checking 4048199SAli.Saidi@ARM.com * for dependency violations 4058199SAli.Saidi@ARM.com */ 4068199SAli.Saidi@ARM.com unsigned depCheckShift; 4078199SAli.Saidi@ARM.com 4088199SAli.Saidi@ARM.com /** Should loads be checked for dependency issues */ 4098199SAli.Saidi@ARM.com bool checkLoads; 4108199SAli.Saidi@ARM.com 4112292SN/A /** The number of load instructions in the LQ. */ 4122292SN/A int loads; 4132329SN/A /** The number of store instructions in the SQ. */ 4142292SN/A int stores; 4152292SN/A /** The number of store instructions in the SQ waiting to writeback. */ 4162292SN/A int storesToWB; 4172292SN/A 4182292SN/A /** The index of the head instruction in the LQ. */ 4192292SN/A int loadHead; 4202292SN/A /** The index of the tail instruction in the LQ. */ 4212292SN/A int loadTail; 4222292SN/A 4232292SN/A /** The index of the head instruction in the SQ. */ 4242292SN/A int storeHead; 4252329SN/A /** The index of the first instruction that may be ready to be 4262329SN/A * written back, and has not yet been written back. 4272292SN/A */ 4282292SN/A int storeWBIdx; 4292292SN/A /** The index of the tail instruction in the SQ. */ 4302292SN/A int storeTail; 4312292SN/A 4322292SN/A /// @todo Consider moving to a more advanced model with write vs read ports 43311780Sarthur.perais@inria.fr /** The number of cache ports available each cycle (stores only). */ 43411780Sarthur.perais@inria.fr int cacheStorePorts; 4352292SN/A 43611780Sarthur.perais@inria.fr /** The number of used cache ports in this cycle by stores. */ 43711780Sarthur.perais@inria.fr int usedStorePorts; 4382292SN/A 4392292SN/A //list<InstSeqNum> mshrSeqNums; 4402292SN/A 4418545Ssaidi@eecs.umich.edu /** Address Mask for a cache block (e.g. ~(cache_block_size-1)) */ 4428545Ssaidi@eecs.umich.edu Addr cacheBlockMask; 4438545Ssaidi@eecs.umich.edu 4442292SN/A /** Wire to read information from the issue stage time queue. */ 4452292SN/A typename TimeBuffer<IssueStruct>::wire fromIssue; 4462292SN/A 4472292SN/A /** Whether or not the LSQ is stalled. */ 4482292SN/A bool stalled; 4492292SN/A /** The store that causes the stall due to partial store to load 4502292SN/A * forwarding. 4512292SN/A */ 4522292SN/A InstSeqNum stallingStoreIsn; 4532292SN/A /** The index of the above store. */ 4542292SN/A int stallingLoadIdx; 4552292SN/A 4562698Sktlim@umich.edu /** The packet that needs to be retried. */ 4572698Sktlim@umich.edu PacketPtr retryPkt; 4582693Sktlim@umich.edu 4592698Sktlim@umich.edu /** Whehter or not a store is blocked due to the memory system. */ 4602678Sktlim@umich.edu bool isStoreBlocked; 4612678Sktlim@umich.edu 4628727Snilay@cs.wisc.edu /** Whether or not a store is in flight. */ 4638727Snilay@cs.wisc.edu bool storeInFlight; 4648727Snilay@cs.wisc.edu 4652292SN/A /** The oldest load that caused a memory ordering violation. */ 4662292SN/A DynInstPtr memDepViolator; 4672292SN/A 4686974Stjones1@inf.ed.ac.uk /** Whether or not there is a packet that couldn't be sent because of 4696974Stjones1@inf.ed.ac.uk * a lack of cache ports. */ 4706974Stjones1@inf.ed.ac.uk bool hasPendingPkt; 4716974Stjones1@inf.ed.ac.uk 4726974Stjones1@inf.ed.ac.uk /** The packet that is pending free cache ports. */ 4736974Stjones1@inf.ed.ac.uk PacketPtr pendingPkt; 4746974Stjones1@inf.ed.ac.uk 4758727Snilay@cs.wisc.edu /** Flag for memory model. */ 4768727Snilay@cs.wisc.edu bool needsTSO; 4778727Snilay@cs.wisc.edu 4782292SN/A // Will also need how many read/write ports the Dcache has. Or keep track 4792292SN/A // of that in stage that is one level up, and only call executeLoad/Store 4802292SN/A // the appropriate number of times. 4812727Sktlim@umich.edu /** Total number of loads forwaded from LSQ stores. */ 4825999Snate@binkert.org Stats::Scalar lsqForwLoads; 4832307SN/A 4843126Sktlim@umich.edu /** Total number of loads ignored due to invalid addresses. */ 4855999Snate@binkert.org Stats::Scalar invAddrLoads; 4863126Sktlim@umich.edu 4873126Sktlim@umich.edu /** Total number of squashed loads. */ 4885999Snate@binkert.org Stats::Scalar lsqSquashedLoads; 4893126Sktlim@umich.edu 4903126Sktlim@umich.edu /** Total number of responses from the memory system that are 4913126Sktlim@umich.edu * ignored due to the instruction already being squashed. */ 4925999Snate@binkert.org Stats::Scalar lsqIgnoredResponses; 4933126Sktlim@umich.edu 4943126Sktlim@umich.edu /** Tota number of memory ordering violations. */ 4955999Snate@binkert.org Stats::Scalar lsqMemOrderViolation; 4963126Sktlim@umich.edu 4972727Sktlim@umich.edu /** Total number of squashed stores. */ 4985999Snate@binkert.org Stats::Scalar lsqSquashedStores; 4992727Sktlim@umich.edu 5002727Sktlim@umich.edu /** Total number of software prefetches ignored due to invalid addresses. */ 5015999Snate@binkert.org Stats::Scalar invAddrSwpfs; 5022727Sktlim@umich.edu 5032727Sktlim@umich.edu /** Ready loads blocked due to partial store-forwarding. */ 5045999Snate@binkert.org Stats::Scalar lsqBlockedLoads; 5052727Sktlim@umich.edu 5062727Sktlim@umich.edu /** Number of loads that were rescheduled. */ 5075999Snate@binkert.org Stats::Scalar lsqRescheduledLoads; 5082727Sktlim@umich.edu 5092727Sktlim@umich.edu /** Number of times the LSQ is blocked due to the cache. */ 5105999Snate@binkert.org Stats::Scalar lsqCacheBlocked; 5112727Sktlim@umich.edu 5122292SN/A public: 5132292SN/A /** Executes the load at the given index. */ 51412749Sgiacomo.travaglini@arm.com Fault read(const RequestPtr &req, 51512749Sgiacomo.travaglini@arm.com RequestPtr &sreqLow, RequestPtr &sreqHigh, 51611302Ssteve.reinhardt@amd.com int load_idx); 5172292SN/A 5182292SN/A /** Executes the store at the given index. */ 51912749Sgiacomo.travaglini@arm.com Fault write(const RequestPtr &req, 52012749Sgiacomo.travaglini@arm.com const RequestPtr &sreqLow, const RequestPtr &sreqHigh, 5217520Sgblack@eecs.umich.edu uint8_t *data, int store_idx); 5222292SN/A 5232292SN/A /** Returns the index of the head load instruction. */ 5242292SN/A int getLoadHead() { return loadHead; } 5252292SN/A /** Returns the sequence number of the head load instruction. */ 5262292SN/A InstSeqNum getLoadHeadSeqNum() 5272292SN/A { 5282292SN/A if (loadQueue[loadHead]) { 5292292SN/A return loadQueue[loadHead]->seqNum; 5302292SN/A } else { 5312292SN/A return 0; 5322292SN/A } 5332292SN/A 5342292SN/A } 5352292SN/A 5362292SN/A /** Returns the index of the head store instruction. */ 5372292SN/A int getStoreHead() { return storeHead; } 5382292SN/A /** Returns the sequence number of the head store instruction. */ 5392292SN/A InstSeqNum getStoreHeadSeqNum() 5402292SN/A { 5412292SN/A if (storeQueue[storeHead].inst) { 5422292SN/A return storeQueue[storeHead].inst->seqNum; 5432292SN/A } else { 5442292SN/A return 0; 5452292SN/A } 5462292SN/A 5472292SN/A } 5482292SN/A 5492292SN/A /** Returns whether or not the LSQ unit is stalled. */ 5502292SN/A bool isStalled() { return stalled; } 5512292SN/A}; 5522292SN/A 5532292SN/Atemplate <class Impl> 5542292SN/AFault 55512749Sgiacomo.travaglini@arm.comLSQUnit<Impl>::read(const RequestPtr &req, 55612749Sgiacomo.travaglini@arm.com RequestPtr &sreqLow, RequestPtr &sreqHigh, 55711302Ssteve.reinhardt@amd.com int load_idx) 5582292SN/A{ 5592669Sktlim@umich.edu DynInstPtr load_inst = loadQueue[load_idx]; 5602292SN/A 5612669Sktlim@umich.edu assert(load_inst); 5622669Sktlim@umich.edu 5632669Sktlim@umich.edu assert(!load_inst->isExecuted()); 5642292SN/A 56510824SAndreas.Sandberg@ARM.com // Make sure this isn't a strictly ordered load 56610824SAndreas.Sandberg@ARM.com // A bit of a hackish way to get strictly ordered accesses to work 56710824SAndreas.Sandberg@ARM.com // only if they're at the head of the LSQ and are ready to commit 56810824SAndreas.Sandberg@ARM.com // (at the head of the ROB too). 56910824SAndreas.Sandberg@ARM.com if (req->isStrictlyOrdered() && 5702731Sktlim@umich.edu (load_idx != loadHead || !load_inst->isAtCommit())) { 5712669Sktlim@umich.edu iewStage->rescheduleMemInst(load_inst); 5722727Sktlim@umich.edu ++lsqRescheduledLoads; 57310824SAndreas.Sandberg@ARM.com DPRINTF(LSQUnit, "Strictly ordered load [sn:%lli] PC %s\n", 5747720Sgblack@eecs.umich.edu load_inst->seqNum, load_inst->pcState()); 5754032Sktlim@umich.edu 57610474Sandreas.hansson@arm.com return std::make_shared<GenericISA::M5PanicFault>( 57710824SAndreas.Sandberg@ARM.com "Strictly ordered load [sn:%llx] PC %s\n", 57810474Sandreas.hansson@arm.com load_inst->seqNum, load_inst->pcState()); 5792292SN/A } 5802292SN/A 5812292SN/A // Check the SQ for any previous stores that might lead to forwarding 5822669Sktlim@umich.edu int store_idx = load_inst->sqIdx; 5832292SN/A 5842292SN/A int store_size = 0; 5852292SN/A 5862292SN/A DPRINTF(LSQUnit, "Read called, load idx: %i, store idx: %i, " 5876974Stjones1@inf.ed.ac.uk "storeHead: %i addr: %#x%s\n", 5886974Stjones1@inf.ed.ac.uk load_idx, store_idx, storeHead, req->getPaddr(), 5896974Stjones1@inf.ed.ac.uk sreqLow ? " split" : ""); 5902292SN/A 5916102Sgblack@eecs.umich.edu if (req->isLLSC()) { 5926974Stjones1@inf.ed.ac.uk assert(!sreqLow); 5933326Sktlim@umich.edu // Disable recording the result temporarily. Writing to misc 5943326Sktlim@umich.edu // regs normally updates the result, but this is not the 5953326Sktlim@umich.edu // desired behavior when handling store conditionals. 5969046SAli.Saidi@ARM.com load_inst->recordResult(false); 5973326Sktlim@umich.edu TheISA::handleLockedRead(load_inst.get(), req); 5989046SAli.Saidi@ARM.com load_inst->recordResult(true); 5992292SN/A } 6002292SN/A 6018481Sgblack@eecs.umich.edu if (req->isMmappedIpr()) { 6028481Sgblack@eecs.umich.edu assert(!load_inst->memData); 6038481Sgblack@eecs.umich.edu load_inst->memData = new uint8_t[64]; 6048481Sgblack@eecs.umich.edu 6058481Sgblack@eecs.umich.edu ThreadContext *thread = cpu->tcBase(lsqID); 6069180Sandreas.hansson@arm.com Cycles delay(0); 6078949Sandreas.hansson@arm.com PacketPtr data_pkt = new Packet(req, MemCmd::ReadReq); 6088481Sgblack@eecs.umich.edu 60912171Smatthiashille8@gmail.com data_pkt->dataStatic(load_inst->memData); 6108481Sgblack@eecs.umich.edu if (!TheISA::HasUnalignedMemAcc || !sreqLow) { 6118481Sgblack@eecs.umich.edu delay = TheISA::handleIprRead(thread, data_pkt); 6128481Sgblack@eecs.umich.edu } else { 6138481Sgblack@eecs.umich.edu assert(sreqLow->isMmappedIpr() && sreqHigh->isMmappedIpr()); 6148949Sandreas.hansson@arm.com PacketPtr fst_data_pkt = new Packet(sreqLow, MemCmd::ReadReq); 6158949Sandreas.hansson@arm.com PacketPtr snd_data_pkt = new Packet(sreqHigh, MemCmd::ReadReq); 6168481Sgblack@eecs.umich.edu 6178481Sgblack@eecs.umich.edu fst_data_pkt->dataStatic(load_inst->memData); 6188481Sgblack@eecs.umich.edu snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize()); 6198481Sgblack@eecs.umich.edu 6208481Sgblack@eecs.umich.edu delay = TheISA::handleIprRead(thread, fst_data_pkt); 6219180Sandreas.hansson@arm.com Cycles delay2 = TheISA::handleIprRead(thread, snd_data_pkt); 6228481Sgblack@eecs.umich.edu if (delay2 > delay) 6238481Sgblack@eecs.umich.edu delay = delay2; 6248481Sgblack@eecs.umich.edu 6258481Sgblack@eecs.umich.edu delete fst_data_pkt; 6268481Sgblack@eecs.umich.edu delete snd_data_pkt; 6278481Sgblack@eecs.umich.edu } 6288481Sgblack@eecs.umich.edu WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this); 6299179Sandreas.hansson@arm.com cpu->schedule(wb, cpu->clockEdge(delay)); 6308481Sgblack@eecs.umich.edu return NoFault; 6318481Sgblack@eecs.umich.edu } 6328481Sgblack@eecs.umich.edu 6332292SN/A while (store_idx != -1) { 6342292SN/A // End once we've reached the top of the LSQ 6352292SN/A if (store_idx == storeWBIdx) { 6362292SN/A break; 6372292SN/A } 6382292SN/A 6392292SN/A // Move the index to one younger 6402292SN/A if (--store_idx < 0) 6412292SN/A store_idx += SQEntries; 6422292SN/A 6432292SN/A assert(storeQueue[store_idx].inst); 6442292SN/A 6452292SN/A store_size = storeQueue[store_idx].size; 6462292SN/A 64712355Snikos.nikoleris@arm.com if (!store_size || storeQueue[store_idx].inst->strictlyOrdered() || 64812355Snikos.nikoleris@arm.com (storeQueue[store_idx].req && 64912355Snikos.nikoleris@arm.com storeQueue[store_idx].req->isCacheMaintenance())) { 65012355Snikos.nikoleris@arm.com // Cache maintenance instructions go down via the store 65112355Snikos.nikoleris@arm.com // path but they carry no data and they shouldn't be 65212355Snikos.nikoleris@arm.com // considered for forwarding 6532292SN/A continue; 65412355Snikos.nikoleris@arm.com } 6554032Sktlim@umich.edu 6569046SAli.Saidi@ARM.com assert(storeQueue[store_idx].inst->effAddrValid()); 6572292SN/A 6582292SN/A // Check if the store data is within the lower and upper bounds of 6592292SN/A // addresses that the request needs. 6602292SN/A bool store_has_lower_limit = 6612669Sktlim@umich.edu req->getVaddr() >= storeQueue[store_idx].inst->effAddr; 6622292SN/A bool store_has_upper_limit = 6632669Sktlim@umich.edu (req->getVaddr() + req->getSize()) <= 6642669Sktlim@umich.edu (storeQueue[store_idx].inst->effAddr + store_size); 6652292SN/A bool lower_load_has_store_part = 6662669Sktlim@umich.edu req->getVaddr() < (storeQueue[store_idx].inst->effAddr + 6672292SN/A store_size); 6682292SN/A bool upper_load_has_store_part = 6692669Sktlim@umich.edu (req->getVaddr() + req->getSize()) > 6702669Sktlim@umich.edu storeQueue[store_idx].inst->effAddr; 6712292SN/A 67212022Sar4jc@virginia.edu // If the store's data has all of the data needed and the load isn't 67312022Sar4jc@virginia.edu // LLSC, we can forward. 67412022Sar4jc@virginia.edu if (store_has_lower_limit && store_has_upper_limit && !req->isLLSC()) { 6752329SN/A // Get shift amount for offset into the store's data. 6768316Sgeoffrey.blake@arm.com int shift_amt = req->getVaddr() - storeQueue[store_idx].inst->effAddr; 6772292SN/A 67810333Smitch.hayenga@arm.com // Allocate memory if this is the first time a load is issued. 67910333Smitch.hayenga@arm.com if (!load_inst->memData) { 68010333Smitch.hayenga@arm.com load_inst->memData = new uint8_t[req->getSize()]; 68110333Smitch.hayenga@arm.com } 68210031SAli.Saidi@ARM.com if (storeQueue[store_idx].isAllZeros) 68310031SAli.Saidi@ARM.com memset(load_inst->memData, 0, req->getSize()); 68410031SAli.Saidi@ARM.com else 68510031SAli.Saidi@ARM.com memcpy(load_inst->memData, 6864326Sgblack@eecs.umich.edu storeQueue[store_idx].data + shift_amt, req->getSize()); 6872292SN/A 6882292SN/A DPRINTF(LSQUnit, "Forwarding from store idx %i to load to " 68910175SMitch.Hayenga@ARM.com "addr %#x\n", store_idx, req->getVaddr()); 6902678Sktlim@umich.edu 6918949Sandreas.hansson@arm.com PacketPtr data_pkt = new Packet(req, MemCmd::ReadReq); 6922678Sktlim@umich.edu data_pkt->dataStatic(load_inst->memData); 6932678Sktlim@umich.edu 6942678Sktlim@umich.edu WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this); 6952292SN/A 6962292SN/A // We'll say this has a 1 cycle load-store forwarding latency 6972292SN/A // for now. 6982292SN/A // @todo: Need to make this a parameter. 6997823Ssteve.reinhardt@amd.com cpu->schedule(wb, curTick()); 7002678Sktlim@umich.edu 7012727Sktlim@umich.edu ++lsqForwLoads; 7022292SN/A return NoFault; 70312022Sar4jc@virginia.edu } else if ( 70412022Sar4jc@virginia.edu (!req->isLLSC() && 70512022Sar4jc@virginia.edu ((store_has_lower_limit && lower_load_has_store_part) || 70612022Sar4jc@virginia.edu (store_has_upper_limit && upper_load_has_store_part) || 70712022Sar4jc@virginia.edu (lower_load_has_store_part && upper_load_has_store_part))) || 70812022Sar4jc@virginia.edu (req->isLLSC() && 70912022Sar4jc@virginia.edu ((store_has_lower_limit || upper_load_has_store_part) && 71012022Sar4jc@virginia.edu (store_has_upper_limit || lower_load_has_store_part)))) { 7112292SN/A // This is the partial store-load forwarding case where a store 71212022Sar4jc@virginia.edu // has only part of the load's data and the load isn't LLSC or 71312022Sar4jc@virginia.edu // the load is LLSC and the store has all or part of the load's 71412022Sar4jc@virginia.edu // data 7152292SN/A 7162292SN/A // If it's already been written back, then don't worry about 7172292SN/A // stalling on it. 7182292SN/A if (storeQueue[store_idx].completed) { 7194032Sktlim@umich.edu panic("Should not check one of these"); 7202292SN/A continue; 7212292SN/A } 7222292SN/A 7232292SN/A // Must stall load and force it to retry, so long as it's the oldest 7242292SN/A // load that needs to do so. 7252292SN/A if (!stalled || 7262292SN/A (stalled && 7272669Sktlim@umich.edu load_inst->seqNum < 7282292SN/A loadQueue[stallingLoadIdx]->seqNum)) { 7292292SN/A stalled = true; 7302292SN/A stallingStoreIsn = storeQueue[store_idx].inst->seqNum; 7312292SN/A stallingLoadIdx = load_idx; 7322292SN/A } 7332292SN/A 7342292SN/A // Tell IQ/mem dep unit that this instruction will need to be 7352292SN/A // rescheduled eventually 7362669Sktlim@umich.edu iewStage->rescheduleMemInst(load_inst); 7374032Sktlim@umich.edu load_inst->clearIssued(); 7382727Sktlim@umich.edu ++lsqRescheduledLoads; 7392292SN/A 7402292SN/A // Do not generate a writeback event as this instruction is not 7412292SN/A // complete. 7422292SN/A DPRINTF(LSQUnit, "Load-store forwarding mis-match. " 7432292SN/A "Store idx %i to load addr %#x\n", 7442669Sktlim@umich.edu store_idx, req->getVaddr()); 7452292SN/A 7462292SN/A return NoFault; 7472292SN/A } 7482292SN/A } 7492292SN/A 7502292SN/A // If there's no forwarding case, then go access memory 7517720Sgblack@eecs.umich.edu DPRINTF(LSQUnit, "Doing memory access for inst [sn:%lli] PC %s\n", 7527720Sgblack@eecs.umich.edu load_inst->seqNum, load_inst->pcState()); 7532292SN/A 75410333Smitch.hayenga@arm.com // Allocate memory if this is the first time a load is issued. 75510333Smitch.hayenga@arm.com if (!load_inst->memData) { 75610333Smitch.hayenga@arm.com load_inst->memData = new uint8_t[req->getSize()]; 75710333Smitch.hayenga@arm.com } 7582292SN/A 7592907Sktlim@umich.edu // if we the cache is not blocked, do cache access 7606974Stjones1@inf.ed.ac.uk bool completedFirst = false; 76110342SCurtis.Dunham@arm.com PacketPtr data_pkt = Packet::createRead(req); 76210333Smitch.hayenga@arm.com PacketPtr fst_data_pkt = NULL; 76310333Smitch.hayenga@arm.com PacketPtr snd_data_pkt = NULL; 7646974Stjones1@inf.ed.ac.uk 76510333Smitch.hayenga@arm.com data_pkt->dataStatic(load_inst->memData); 7663228Sktlim@umich.edu 76710333Smitch.hayenga@arm.com LSQSenderState *state = new LSQSenderState; 76810333Smitch.hayenga@arm.com state->isLoad = true; 76910333Smitch.hayenga@arm.com state->idx = load_idx; 77010333Smitch.hayenga@arm.com state->inst = load_inst; 77110333Smitch.hayenga@arm.com data_pkt->senderState = state; 7723228Sktlim@umich.edu 77310333Smitch.hayenga@arm.com if (!TheISA::HasUnalignedMemAcc || !sreqLow) { 77410333Smitch.hayenga@arm.com // Point the first packet at the main data packet. 77510333Smitch.hayenga@arm.com fst_data_pkt = data_pkt; 77610333Smitch.hayenga@arm.com } else { 77710333Smitch.hayenga@arm.com // Create the split packets. 77810342SCurtis.Dunham@arm.com fst_data_pkt = Packet::createRead(sreqLow); 77910342SCurtis.Dunham@arm.com snd_data_pkt = Packet::createRead(sreqHigh); 7806974Stjones1@inf.ed.ac.uk 78110333Smitch.hayenga@arm.com fst_data_pkt->dataStatic(load_inst->memData); 78210333Smitch.hayenga@arm.com snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize()); 7836974Stjones1@inf.ed.ac.uk 78410333Smitch.hayenga@arm.com fst_data_pkt->senderState = state; 78510333Smitch.hayenga@arm.com snd_data_pkt->senderState = state; 7866974Stjones1@inf.ed.ac.uk 78710333Smitch.hayenga@arm.com state->isSplit = true; 78810333Smitch.hayenga@arm.com state->outstanding = 2; 78910333Smitch.hayenga@arm.com state->mainPkt = data_pkt; 79010333Smitch.hayenga@arm.com } 7916974Stjones1@inf.ed.ac.uk 79211780Sarthur.perais@inria.fr // For now, load throughput is constrained by the number of 79311780Sarthur.perais@inria.fr // load FUs only, and loads do not consume a cache port (only 79411780Sarthur.perais@inria.fr // stores do). 79511780Sarthur.perais@inria.fr // @todo We should account for cache port contention 79611780Sarthur.perais@inria.fr // and arbitrate between loads and stores. 79710333Smitch.hayenga@arm.com bool successful_load = true; 79810333Smitch.hayenga@arm.com if (!dcachePort->sendTimingReq(fst_data_pkt)) { 79910333Smitch.hayenga@arm.com successful_load = false; 80010333Smitch.hayenga@arm.com } else if (TheISA::HasUnalignedMemAcc && sreqLow) { 80110333Smitch.hayenga@arm.com completedFirst = true; 8026974Stjones1@inf.ed.ac.uk 80310333Smitch.hayenga@arm.com // The first packet was sent without problems, so send this one 80410333Smitch.hayenga@arm.com // too. If there is a problem with this packet then the whole 80510333Smitch.hayenga@arm.com // load will be squashed, so indicate this to the state object. 80610333Smitch.hayenga@arm.com // The first packet will return in completeDataAccess and be 80710333Smitch.hayenga@arm.com // handled there. 80811780Sarthur.perais@inria.fr // @todo We should also account for cache port contention 80911780Sarthur.perais@inria.fr // here. 81010333Smitch.hayenga@arm.com if (!dcachePort->sendTimingReq(snd_data_pkt)) { 81110333Smitch.hayenga@arm.com // The main packet will be deleted in completeDataAccess. 81210333Smitch.hayenga@arm.com state->complete(); 81310333Smitch.hayenga@arm.com // Signify to 1st half that the 2nd half was blocked via state 81410333Smitch.hayenga@arm.com state->cacheBlocked = true; 81510333Smitch.hayenga@arm.com successful_load = false; 8162907Sktlim@umich.edu } 8172907Sktlim@umich.edu } 8182907Sktlim@umich.edu 8192907Sktlim@umich.edu // If the cache was blocked, or has become blocked due to the access, 8202907Sktlim@umich.edu // handle it. 82110333Smitch.hayenga@arm.com if (!successful_load) { 82210333Smitch.hayenga@arm.com if (!sreqLow) { 82310333Smitch.hayenga@arm.com // Packet wasn't split, just delete main packet info 82410333Smitch.hayenga@arm.com delete state; 82510333Smitch.hayenga@arm.com delete data_pkt; 82610333Smitch.hayenga@arm.com } 82710333Smitch.hayenga@arm.com 82810333Smitch.hayenga@arm.com if (TheISA::HasUnalignedMemAcc && sreqLow) { 82910333Smitch.hayenga@arm.com if (!completedFirst) { 83010333Smitch.hayenga@arm.com // Split packet, but first failed. Delete all state. 83110333Smitch.hayenga@arm.com delete state; 83210333Smitch.hayenga@arm.com delete data_pkt; 83310333Smitch.hayenga@arm.com delete fst_data_pkt; 83410333Smitch.hayenga@arm.com delete snd_data_pkt; 83512749Sgiacomo.travaglini@arm.com sreqLow.reset(); 83612749Sgiacomo.travaglini@arm.com sreqHigh.reset(); 83710333Smitch.hayenga@arm.com } else { 83810333Smitch.hayenga@arm.com // Can't delete main packet data or state because first packet 83910333Smitch.hayenga@arm.com // was sent to the memory system 84010333Smitch.hayenga@arm.com delete data_pkt; 84110333Smitch.hayenga@arm.com delete snd_data_pkt; 84212749Sgiacomo.travaglini@arm.com sreqHigh.reset(); 84310333Smitch.hayenga@arm.com } 8446974Stjones1@inf.ed.ac.uk } 8454032Sktlim@umich.edu 8462727Sktlim@umich.edu ++lsqCacheBlocked; 8473014Srdreslin@umich.edu 84810333Smitch.hayenga@arm.com iewStage->blockMemInst(load_inst); 8492292SN/A 8502669Sktlim@umich.edu // No fault occurred, even though the interface is blocked. 8512669Sktlim@umich.edu return NoFault; 8522292SN/A } 8532292SN/A 8542669Sktlim@umich.edu return NoFault; 8552292SN/A} 8562292SN/A 8572292SN/Atemplate <class Impl> 8582292SN/AFault 85912749Sgiacomo.travaglini@arm.comLSQUnit<Impl>::write(const RequestPtr &req, 86012749Sgiacomo.travaglini@arm.com const RequestPtr &sreqLow, const RequestPtr &sreqHigh, 8617520Sgblack@eecs.umich.edu uint8_t *data, int store_idx) 8622292SN/A{ 8632292SN/A assert(storeQueue[store_idx].inst); 8642292SN/A 86510175SMitch.Hayenga@ARM.com DPRINTF(LSQUnit, "Doing write to store idx %i, addr %#x" 8662292SN/A " | storeHead:%i [sn:%i]\n", 86710175SMitch.Hayenga@ARM.com store_idx, req->getPaddr(), storeHead, 8682292SN/A storeQueue[store_idx].inst->seqNum); 8692329SN/A 8702292SN/A storeQueue[store_idx].req = req; 8716974Stjones1@inf.ed.ac.uk storeQueue[store_idx].sreqLow = sreqLow; 8726974Stjones1@inf.ed.ac.uk storeQueue[store_idx].sreqHigh = sreqHigh; 8737520Sgblack@eecs.umich.edu unsigned size = req->getSize(); 8747520Sgblack@eecs.umich.edu storeQueue[store_idx].size = size; 87512355Snikos.nikoleris@arm.com bool store_no_data = req->getFlags() & Request::STORE_NO_DATA; 87612355Snikos.nikoleris@arm.com storeQueue[store_idx].isAllZeros = store_no_data; 87712355Snikos.nikoleris@arm.com assert(size <= sizeof(storeQueue[store_idx].data) || store_no_data); 8787509Stjones1@inf.ed.ac.uk 8797509Stjones1@inf.ed.ac.uk // Split stores can only occur in ISAs with unaligned memory accesses. If 8807509Stjones1@inf.ed.ac.uk // a store request has been split, sreqLow and sreqHigh will be non-null. 8817509Stjones1@inf.ed.ac.uk if (TheISA::HasUnalignedMemAcc && sreqLow) { 8827509Stjones1@inf.ed.ac.uk storeQueue[store_idx].isSplit = true; 8837509Stjones1@inf.ed.ac.uk } 8844326Sgblack@eecs.umich.edu 88512355Snikos.nikoleris@arm.com if (!(req->getFlags() & Request::CACHE_BLOCK_ZERO) && \ 88612355Snikos.nikoleris@arm.com !req->isCacheMaintenance()) 88710031SAli.Saidi@ARM.com memcpy(storeQueue[store_idx].data, data, size); 8882329SN/A 8892292SN/A // This function only writes the data to the store queue, so no fault 8902292SN/A // can happen here. 8912292SN/A return NoFault; 8922292SN/A} 8932292SN/A 8942292SN/A#endif // __CPU_O3_LSQ_UNIT_HH__ 895