lsq_unit_impl.hh revision 9814
19814Sandreas.hansson@arm.com 22292SN/A/* 39383SAli.Saidi@ARM.com * Copyright (c) 2010-2012 ARM Limited 47597Sminkyu.jeong@arm.com * All rights reserved 57597Sminkyu.jeong@arm.com * 67597Sminkyu.jeong@arm.com * The license below extends only to copyright in the software and shall 77597Sminkyu.jeong@arm.com * not be construed as granting a license to any other intellectual 87597Sminkyu.jeong@arm.com * property including but not limited to intellectual property relating 97597Sminkyu.jeong@arm.com * to a hardware implementation of the functionality of the software 107597Sminkyu.jeong@arm.com * licensed hereunder. You may use the software subject to the license 117597Sminkyu.jeong@arm.com * terms below provided that you ensure that this notice is replicated 127597Sminkyu.jeong@arm.com * unmodified and in its entirety in all distributions of the software, 137597Sminkyu.jeong@arm.com * modified or unmodified, in source code or in binary form. 147597Sminkyu.jeong@arm.com * 152292SN/A * Copyright (c) 2004-2005 The Regents of The University of Michigan 162292SN/A * All rights reserved. 172292SN/A * 182292SN/A * Redistribution and use in source and binary forms, with or without 192292SN/A * modification, are permitted provided that the following conditions are 202292SN/A * met: redistributions of source code must retain the above copyright 212292SN/A * notice, this list of conditions and the following disclaimer; 222292SN/A * redistributions in binary form must reproduce the above copyright 232292SN/A * notice, this list of conditions and the following disclaimer in the 242292SN/A * documentation and/or other materials provided with the distribution; 252292SN/A * neither the name of the copyright holders nor the names of its 262292SN/A * contributors may be used to endorse or promote products derived from 272292SN/A * this software without specific prior written permission. 282292SN/A * 292292SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 302292SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 312292SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 322292SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 332292SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 342292SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 352292SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 362292SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 372292SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 382292SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 392292SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 402689Sktlim@umich.edu * 412689Sktlim@umich.edu * Authors: Kevin Lim 422689Sktlim@umich.edu * Korey Sewell 432292SN/A */ 442292SN/A 458591Sgblack@eecs.umich.edu#include "arch/generic/debugfaults.hh" 463326Sktlim@umich.edu#include "arch/locked_mem.hh" 478229Snate@binkert.org#include "base/str.hh" 486658Snate@binkert.org#include "config/the_isa.hh" 498887Sgeoffrey.blake@arm.com#include "cpu/checker/cpu.hh" 502907Sktlim@umich.edu#include "cpu/o3/lsq.hh" 512292SN/A#include "cpu/o3/lsq_unit.hh" 528232Snate@binkert.org#include "debug/Activity.hh" 538232Snate@binkert.org#include "debug/IEW.hh" 548232Snate@binkert.org#include "debug/LSQUnit.hh" 559527SMatt.Horsnell@arm.com#include "debug/O3PipeView.hh" 562722Sktlim@umich.edu#include "mem/packet.hh" 572669Sktlim@umich.edu#include "mem/request.hh" 582292SN/A 592669Sktlim@umich.edutemplate<class Impl> 602678Sktlim@umich.eduLSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt, 612678Sktlim@umich.edu LSQUnit *lsq_ptr) 628581Ssteve.reinhardt@amd.com : Event(Default_Pri, AutoDelete), 638581Ssteve.reinhardt@amd.com inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr) 642292SN/A{ 652292SN/A} 662292SN/A 672669Sktlim@umich.edutemplate<class Impl> 682292SN/Avoid 692678Sktlim@umich.eduLSQUnit<Impl>::WritebackEvent::process() 702292SN/A{ 719444SAndreas.Sandberg@ARM.com assert(!lsqPtr->cpu->switchedOut()); 729444SAndreas.Sandberg@ARM.com 739444SAndreas.Sandberg@ARM.com lsqPtr->writeback(inst, pkt); 744319Sktlim@umich.edu 754319Sktlim@umich.edu if (pkt->senderState) 764319Sktlim@umich.edu delete pkt->senderState; 774319Sktlim@umich.edu 784319Sktlim@umich.edu delete pkt->req; 792678Sktlim@umich.edu delete pkt; 802678Sktlim@umich.edu} 812292SN/A 822678Sktlim@umich.edutemplate<class Impl> 832678Sktlim@umich.educonst char * 845336Shines@cs.fsu.eduLSQUnit<Impl>::WritebackEvent::description() const 852678Sktlim@umich.edu{ 864873Sstever@eecs.umich.edu return "Store writeback"; 872678Sktlim@umich.edu} 882292SN/A 892678Sktlim@umich.edutemplate<class Impl> 902678Sktlim@umich.eduvoid 912678Sktlim@umich.eduLSQUnit<Impl>::completeDataAccess(PacketPtr pkt) 922678Sktlim@umich.edu{ 932678Sktlim@umich.edu LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState); 942678Sktlim@umich.edu DynInstPtr inst = state->inst; 957852SMatt.Horsnell@arm.com DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum); 967852SMatt.Horsnell@arm.com DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum); 972344SN/A 982678Sktlim@umich.edu //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum); 992678Sktlim@umich.edu 1006974Stjones1@inf.ed.ac.uk // If this is a split access, wait until all packets are received. 1016974Stjones1@inf.ed.ac.uk if (TheISA::HasUnalignedMemAcc && !state->complete()) { 1026974Stjones1@inf.ed.ac.uk delete pkt->req; 1036974Stjones1@inf.ed.ac.uk delete pkt; 1046974Stjones1@inf.ed.ac.uk return; 1056974Stjones1@inf.ed.ac.uk } 1066974Stjones1@inf.ed.ac.uk 1079444SAndreas.Sandberg@ARM.com assert(!cpu->switchedOut()); 1089444SAndreas.Sandberg@ARM.com if (inst->isSquashed()) { 1092820Sktlim@umich.edu iewStage->decrWb(inst->seqNum); 1102678Sktlim@umich.edu } else { 1112678Sktlim@umich.edu if (!state->noWB) { 1126974Stjones1@inf.ed.ac.uk if (!TheISA::HasUnalignedMemAcc || !state->isSplit || 1136974Stjones1@inf.ed.ac.uk !state->isLoad) { 1146974Stjones1@inf.ed.ac.uk writeback(inst, pkt); 1156974Stjones1@inf.ed.ac.uk } else { 1166974Stjones1@inf.ed.ac.uk writeback(inst, state->mainPkt); 1176974Stjones1@inf.ed.ac.uk } 1182678Sktlim@umich.edu } 1192678Sktlim@umich.edu 1202678Sktlim@umich.edu if (inst->isStore()) { 1212678Sktlim@umich.edu completeStore(state->idx); 1222678Sktlim@umich.edu } 1232344SN/A } 1242307SN/A 1256974Stjones1@inf.ed.ac.uk if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) { 1266974Stjones1@inf.ed.ac.uk delete state->mainPkt->req; 1276974Stjones1@inf.ed.ac.uk delete state->mainPkt; 1286974Stjones1@inf.ed.ac.uk } 1292678Sktlim@umich.edu delete state; 1304032Sktlim@umich.edu delete pkt->req; 1312678Sktlim@umich.edu delete pkt; 1322292SN/A} 1332292SN/A 1342292SN/Atemplate <class Impl> 1352292SN/ALSQUnit<Impl>::LSQUnit() 1368545Ssaidi@eecs.umich.edu : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false), 1372678Sktlim@umich.edu isStoreBlocked(false), isLoadBlocked(false), 1388727Snilay@cs.wisc.edu loadBlockedHandled(false), storeInFlight(false), hasPendingPkt(false) 1392292SN/A{ 1402292SN/A} 1412292SN/A 1422292SN/Atemplate<class Impl> 1432292SN/Avoid 1445529Snate@binkert.orgLSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params, 1455529Snate@binkert.org LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries, 1465529Snate@binkert.org unsigned id) 1472292SN/A{ 1484329Sktlim@umich.edu cpu = cpu_ptr; 1494329Sktlim@umich.edu iewStage = iew_ptr; 1504329Sktlim@umich.edu 1514329Sktlim@umich.edu DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id); 1522292SN/A 1532907Sktlim@umich.edu lsq = lsq_ptr; 1542907Sktlim@umich.edu 1552292SN/A lsqID = id; 1562292SN/A 1572329SN/A // Add 1 for the sentinel entry (they are circular queues). 1582329SN/A LQEntries = maxLQEntries + 1; 1592329SN/A SQEntries = maxSQEntries + 1; 1602292SN/A 1612292SN/A loadQueue.resize(LQEntries); 1622292SN/A storeQueue.resize(SQEntries); 1632292SN/A 1648199SAli.Saidi@ARM.com depCheckShift = params->LSQDepCheckShift; 1658199SAli.Saidi@ARM.com checkLoads = params->LSQCheckLoads; 1669444SAndreas.Sandberg@ARM.com cachePorts = params->cachePorts; 1679444SAndreas.Sandberg@ARM.com needsTSO = params->needsTSO; 1689444SAndreas.Sandberg@ARM.com 1699444SAndreas.Sandberg@ARM.com resetState(); 1709444SAndreas.Sandberg@ARM.com} 1719444SAndreas.Sandberg@ARM.com 1729444SAndreas.Sandberg@ARM.com 1739444SAndreas.Sandberg@ARM.comtemplate<class Impl> 1749444SAndreas.Sandberg@ARM.comvoid 1759444SAndreas.Sandberg@ARM.comLSQUnit<Impl>::resetState() 1769444SAndreas.Sandberg@ARM.com{ 1779444SAndreas.Sandberg@ARM.com loads = stores = storesToWB = 0; 1788199SAli.Saidi@ARM.com 1792292SN/A loadHead = loadTail = 0; 1802292SN/A 1812292SN/A storeHead = storeWBIdx = storeTail = 0; 1822292SN/A 1832292SN/A usedPorts = 0; 1842292SN/A 1853492Sktlim@umich.edu retryPkt = NULL; 1862329SN/A memDepViolator = NULL; 1872292SN/A 1882292SN/A blockedLoadSeqNum = 0; 1899444SAndreas.Sandberg@ARM.com 1909444SAndreas.Sandberg@ARM.com stalled = false; 1919444SAndreas.Sandberg@ARM.com isLoadBlocked = false; 1929444SAndreas.Sandberg@ARM.com loadBlockedHandled = false; 1939444SAndreas.Sandberg@ARM.com 1949814Sandreas.hansson@arm.com cacheBlockMask = ~(cpu->cacheLineSize() - 1); 1952292SN/A} 1962292SN/A 1972292SN/Atemplate<class Impl> 1982292SN/Astd::string 1992292SN/ALSQUnit<Impl>::name() const 2002292SN/A{ 2012292SN/A if (Impl::MaxThreads == 1) { 2022292SN/A return iewStage->name() + ".lsq"; 2032292SN/A } else { 2048247Snate@binkert.org return iewStage->name() + ".lsq.thread" + to_string(lsqID); 2052292SN/A } 2062292SN/A} 2072292SN/A 2082292SN/Atemplate<class Impl> 2092292SN/Avoid 2102727Sktlim@umich.eduLSQUnit<Impl>::regStats() 2112727Sktlim@umich.edu{ 2122727Sktlim@umich.edu lsqForwLoads 2132727Sktlim@umich.edu .name(name() + ".forwLoads") 2142727Sktlim@umich.edu .desc("Number of loads that had data forwarded from stores"); 2152727Sktlim@umich.edu 2162727Sktlim@umich.edu invAddrLoads 2172727Sktlim@umich.edu .name(name() + ".invAddrLoads") 2182727Sktlim@umich.edu .desc("Number of loads ignored due to an invalid address"); 2192727Sktlim@umich.edu 2202727Sktlim@umich.edu lsqSquashedLoads 2212727Sktlim@umich.edu .name(name() + ".squashedLoads") 2222727Sktlim@umich.edu .desc("Number of loads squashed"); 2232727Sktlim@umich.edu 2242727Sktlim@umich.edu lsqIgnoredResponses 2252727Sktlim@umich.edu .name(name() + ".ignoredResponses") 2262727Sktlim@umich.edu .desc("Number of memory responses ignored because the instruction is squashed"); 2272727Sktlim@umich.edu 2282361SN/A lsqMemOrderViolation 2292361SN/A .name(name() + ".memOrderViolation") 2302361SN/A .desc("Number of memory ordering violations"); 2312361SN/A 2322727Sktlim@umich.edu lsqSquashedStores 2332727Sktlim@umich.edu .name(name() + ".squashedStores") 2342727Sktlim@umich.edu .desc("Number of stores squashed"); 2352727Sktlim@umich.edu 2362727Sktlim@umich.edu invAddrSwpfs 2372727Sktlim@umich.edu .name(name() + ".invAddrSwpfs") 2382727Sktlim@umich.edu .desc("Number of software prefetches ignored due to an invalid address"); 2392727Sktlim@umich.edu 2402727Sktlim@umich.edu lsqBlockedLoads 2412727Sktlim@umich.edu .name(name() + ".blockedLoads") 2422727Sktlim@umich.edu .desc("Number of blocked loads due to partial load-store forwarding"); 2432727Sktlim@umich.edu 2442727Sktlim@umich.edu lsqRescheduledLoads 2452727Sktlim@umich.edu .name(name() + ".rescheduledLoads") 2462727Sktlim@umich.edu .desc("Number of loads that were rescheduled"); 2472727Sktlim@umich.edu 2482727Sktlim@umich.edu lsqCacheBlocked 2492727Sktlim@umich.edu .name(name() + ".cacheBlocked") 2502727Sktlim@umich.edu .desc("Number of times an access to memory failed due to the cache being blocked"); 2512727Sktlim@umich.edu} 2522727Sktlim@umich.edu 2532727Sktlim@umich.edutemplate<class Impl> 2542727Sktlim@umich.eduvoid 2558922Swilliam.wang@arm.comLSQUnit<Impl>::setDcachePort(MasterPort *dcache_port) 2564329Sktlim@umich.edu{ 2574329Sktlim@umich.edu dcachePort = dcache_port; 2584329Sktlim@umich.edu} 2594329Sktlim@umich.edu 2604329Sktlim@umich.edutemplate<class Impl> 2614329Sktlim@umich.eduvoid 2622292SN/ALSQUnit<Impl>::clearLQ() 2632292SN/A{ 2642292SN/A loadQueue.clear(); 2652292SN/A} 2662292SN/A 2672292SN/Atemplate<class Impl> 2682292SN/Avoid 2692292SN/ALSQUnit<Impl>::clearSQ() 2702292SN/A{ 2712292SN/A storeQueue.clear(); 2722292SN/A} 2732292SN/A 2742292SN/Atemplate<class Impl> 2752292SN/Avoid 2769444SAndreas.Sandberg@ARM.comLSQUnit<Impl>::drainSanityCheck() const 2772307SN/A{ 2789444SAndreas.Sandberg@ARM.com for (int i = 0; i < loadQueue.size(); ++i) 2792367SN/A assert(!loadQueue[i]); 2802307SN/A 2812329SN/A assert(storesToWB == 0); 2829444SAndreas.Sandberg@ARM.com assert(!retryPkt); 2832307SN/A} 2842307SN/A 2852307SN/Atemplate<class Impl> 2862307SN/Avoid 2872307SN/ALSQUnit<Impl>::takeOverFrom() 2882307SN/A{ 2899444SAndreas.Sandberg@ARM.com resetState(); 2902307SN/A} 2912307SN/A 2922307SN/Atemplate<class Impl> 2932307SN/Avoid 2942292SN/ALSQUnit<Impl>::resizeLQ(unsigned size) 2952292SN/A{ 2962329SN/A unsigned size_plus_sentinel = size + 1; 2972329SN/A assert(size_plus_sentinel >= LQEntries); 2982292SN/A 2992329SN/A if (size_plus_sentinel > LQEntries) { 3002329SN/A while (size_plus_sentinel > loadQueue.size()) { 3012292SN/A DynInstPtr dummy; 3022292SN/A loadQueue.push_back(dummy); 3032292SN/A LQEntries++; 3042292SN/A } 3052292SN/A } else { 3062329SN/A LQEntries = size_plus_sentinel; 3072292SN/A } 3082292SN/A 3092292SN/A} 3102292SN/A 3112292SN/Atemplate<class Impl> 3122292SN/Avoid 3132292SN/ALSQUnit<Impl>::resizeSQ(unsigned size) 3142292SN/A{ 3152329SN/A unsigned size_plus_sentinel = size + 1; 3162329SN/A if (size_plus_sentinel > SQEntries) { 3172329SN/A while (size_plus_sentinel > storeQueue.size()) { 3182292SN/A SQEntry dummy; 3192292SN/A storeQueue.push_back(dummy); 3202292SN/A SQEntries++; 3212292SN/A } 3222292SN/A } else { 3232329SN/A SQEntries = size_plus_sentinel; 3242292SN/A } 3252292SN/A} 3262292SN/A 3272292SN/Atemplate <class Impl> 3282292SN/Avoid 3292292SN/ALSQUnit<Impl>::insert(DynInstPtr &inst) 3302292SN/A{ 3312292SN/A assert(inst->isMemRef()); 3322292SN/A 3332292SN/A assert(inst->isLoad() || inst->isStore()); 3342292SN/A 3352292SN/A if (inst->isLoad()) { 3362292SN/A insertLoad(inst); 3372292SN/A } else { 3382292SN/A insertStore(inst); 3392292SN/A } 3402292SN/A 3412292SN/A inst->setInLSQ(); 3422292SN/A} 3432292SN/A 3442292SN/Atemplate <class Impl> 3452292SN/Avoid 3462292SN/ALSQUnit<Impl>::insertLoad(DynInstPtr &load_inst) 3472292SN/A{ 3482329SN/A assert((loadTail + 1) % LQEntries != loadHead); 3492329SN/A assert(loads < LQEntries); 3502292SN/A 3517720Sgblack@eecs.umich.edu DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n", 3527720Sgblack@eecs.umich.edu load_inst->pcState(), loadTail, load_inst->seqNum); 3532292SN/A 3542292SN/A load_inst->lqIdx = loadTail; 3552292SN/A 3562292SN/A if (stores == 0) { 3572292SN/A load_inst->sqIdx = -1; 3582292SN/A } else { 3592292SN/A load_inst->sqIdx = storeTail; 3602292SN/A } 3612292SN/A 3622292SN/A loadQueue[loadTail] = load_inst; 3632292SN/A 3642292SN/A incrLdIdx(loadTail); 3652292SN/A 3662292SN/A ++loads; 3672292SN/A} 3682292SN/A 3692292SN/Atemplate <class Impl> 3702292SN/Avoid 3712292SN/ALSQUnit<Impl>::insertStore(DynInstPtr &store_inst) 3722292SN/A{ 3732292SN/A // Make sure it is not full before inserting an instruction. 3742292SN/A assert((storeTail + 1) % SQEntries != storeHead); 3752292SN/A assert(stores < SQEntries); 3762292SN/A 3777720Sgblack@eecs.umich.edu DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n", 3787720Sgblack@eecs.umich.edu store_inst->pcState(), storeTail, store_inst->seqNum); 3792292SN/A 3802292SN/A store_inst->sqIdx = storeTail; 3812292SN/A store_inst->lqIdx = loadTail; 3822292SN/A 3832292SN/A storeQueue[storeTail] = SQEntry(store_inst); 3842292SN/A 3852292SN/A incrStIdx(storeTail); 3862292SN/A 3872292SN/A ++stores; 3882292SN/A} 3892292SN/A 3902292SN/Atemplate <class Impl> 3912292SN/Atypename Impl::DynInstPtr 3922292SN/ALSQUnit<Impl>::getMemDepViolator() 3932292SN/A{ 3942292SN/A DynInstPtr temp = memDepViolator; 3952292SN/A 3962292SN/A memDepViolator = NULL; 3972292SN/A 3982292SN/A return temp; 3992292SN/A} 4002292SN/A 4012292SN/Atemplate <class Impl> 4022292SN/Aunsigned 4032292SN/ALSQUnit<Impl>::numFreeEntries() 4042292SN/A{ 4052292SN/A unsigned free_lq_entries = LQEntries - loads; 4062292SN/A unsigned free_sq_entries = SQEntries - stores; 4072292SN/A 4082292SN/A // Both the LQ and SQ entries have an extra dummy entry to differentiate 4092292SN/A // empty/full conditions. Subtract 1 from the free entries. 4102292SN/A if (free_lq_entries < free_sq_entries) { 4112292SN/A return free_lq_entries - 1; 4122292SN/A } else { 4132292SN/A return free_sq_entries - 1; 4142292SN/A } 4152292SN/A} 4162292SN/A 4172292SN/Atemplate <class Impl> 4188545Ssaidi@eecs.umich.eduvoid 4198545Ssaidi@eecs.umich.eduLSQUnit<Impl>::checkSnoop(PacketPtr pkt) 4208545Ssaidi@eecs.umich.edu{ 4218545Ssaidi@eecs.umich.edu int load_idx = loadHead; 4228545Ssaidi@eecs.umich.edu 4239383SAli.Saidi@ARM.com // Unlock the cpu-local monitor when the CPU sees a snoop to a locked 4249383SAli.Saidi@ARM.com // address. The CPU can speculatively execute a LL operation after a pending 4259383SAli.Saidi@ARM.com // SC operation in the pipeline and that can make the cache monitor the CPU 4269383SAli.Saidi@ARM.com // is connected to valid while it really shouldn't be. 4279383SAli.Saidi@ARM.com for (int x = 0; x < cpu->numActiveThreads(); x++) { 4289383SAli.Saidi@ARM.com ThreadContext *tc = cpu->getContext(x); 4299383SAli.Saidi@ARM.com bool no_squash = cpu->thread[x]->noSquashFromTC; 4309383SAli.Saidi@ARM.com cpu->thread[x]->noSquashFromTC = true; 4319383SAli.Saidi@ARM.com TheISA::handleLockedSnoop(tc, pkt, cacheBlockMask); 4329383SAli.Saidi@ARM.com cpu->thread[x]->noSquashFromTC = no_squash; 4339383SAli.Saidi@ARM.com } 4349383SAli.Saidi@ARM.com 4358545Ssaidi@eecs.umich.edu // If this is the only load in the LSQ we don't care 4368545Ssaidi@eecs.umich.edu if (load_idx == loadTail) 4378545Ssaidi@eecs.umich.edu return; 4388545Ssaidi@eecs.umich.edu incrLdIdx(load_idx); 4398545Ssaidi@eecs.umich.edu 4408545Ssaidi@eecs.umich.edu DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr()); 4418545Ssaidi@eecs.umich.edu Addr invalidate_addr = pkt->getAddr() & cacheBlockMask; 4428545Ssaidi@eecs.umich.edu while (load_idx != loadTail) { 4438545Ssaidi@eecs.umich.edu DynInstPtr ld_inst = loadQueue[load_idx]; 4448545Ssaidi@eecs.umich.edu 4459046SAli.Saidi@ARM.com if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) { 4468545Ssaidi@eecs.umich.edu incrLdIdx(load_idx); 4478545Ssaidi@eecs.umich.edu continue; 4488545Ssaidi@eecs.umich.edu } 4498545Ssaidi@eecs.umich.edu 4508545Ssaidi@eecs.umich.edu Addr load_addr = ld_inst->physEffAddr & cacheBlockMask; 4518545Ssaidi@eecs.umich.edu DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n", 4528545Ssaidi@eecs.umich.edu ld_inst->seqNum, load_addr, invalidate_addr); 4538545Ssaidi@eecs.umich.edu 4548545Ssaidi@eecs.umich.edu if (load_addr == invalidate_addr) { 4559046SAli.Saidi@ARM.com if (ld_inst->possibleLoadViolation()) { 4568545Ssaidi@eecs.umich.edu DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n", 4578545Ssaidi@eecs.umich.edu ld_inst->physEffAddr, pkt->getAddr(), ld_inst->seqNum); 4588545Ssaidi@eecs.umich.edu 4598545Ssaidi@eecs.umich.edu // Mark the load for re-execution 4608545Ssaidi@eecs.umich.edu ld_inst->fault = new ReExec; 4618545Ssaidi@eecs.umich.edu } else { 4628545Ssaidi@eecs.umich.edu // If a older load checks this and it's true 4638545Ssaidi@eecs.umich.edu // then we might have missed the snoop 4648545Ssaidi@eecs.umich.edu // in which case we need to invalidate to be sure 4659046SAli.Saidi@ARM.com ld_inst->hitExternalSnoop(true); 4668545Ssaidi@eecs.umich.edu } 4678545Ssaidi@eecs.umich.edu } 4688545Ssaidi@eecs.umich.edu incrLdIdx(load_idx); 4698545Ssaidi@eecs.umich.edu } 4708545Ssaidi@eecs.umich.edu return; 4718545Ssaidi@eecs.umich.edu} 4728545Ssaidi@eecs.umich.edu 4738545Ssaidi@eecs.umich.edutemplate <class Impl> 4742292SN/AFault 4758199SAli.Saidi@ARM.comLSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst) 4768199SAli.Saidi@ARM.com{ 4778199SAli.Saidi@ARM.com Addr inst_eff_addr1 = inst->effAddr >> depCheckShift; 4788199SAli.Saidi@ARM.com Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift; 4798199SAli.Saidi@ARM.com 4808199SAli.Saidi@ARM.com /** @todo in theory you only need to check an instruction that has executed 4818199SAli.Saidi@ARM.com * however, there isn't a good way in the pipeline at the moment to check 4828199SAli.Saidi@ARM.com * all instructions that will execute before the store writes back. Thus, 4838199SAli.Saidi@ARM.com * like the implementation that came before it, we're overly conservative. 4848199SAli.Saidi@ARM.com */ 4858199SAli.Saidi@ARM.com while (load_idx != loadTail) { 4868199SAli.Saidi@ARM.com DynInstPtr ld_inst = loadQueue[load_idx]; 4879046SAli.Saidi@ARM.com if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) { 4888199SAli.Saidi@ARM.com incrLdIdx(load_idx); 4898199SAli.Saidi@ARM.com continue; 4908199SAli.Saidi@ARM.com } 4918199SAli.Saidi@ARM.com 4928199SAli.Saidi@ARM.com Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift; 4938199SAli.Saidi@ARM.com Addr ld_eff_addr2 = 4948199SAli.Saidi@ARM.com (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift; 4958199SAli.Saidi@ARM.com 4968272SAli.Saidi@ARM.com if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) { 4978545Ssaidi@eecs.umich.edu if (inst->isLoad()) { 4988545Ssaidi@eecs.umich.edu // If this load is to the same block as an external snoop 4998545Ssaidi@eecs.umich.edu // invalidate that we've observed then the load needs to be 5008545Ssaidi@eecs.umich.edu // squashed as it could have newer data 5019046SAli.Saidi@ARM.com if (ld_inst->hitExternalSnoop()) { 5028545Ssaidi@eecs.umich.edu if (!memDepViolator || 5038545Ssaidi@eecs.umich.edu ld_inst->seqNum < memDepViolator->seqNum) { 5048545Ssaidi@eecs.umich.edu DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] " 5058592Sgblack@eecs.umich.edu "and [sn:%lli] at address %#x\n", 5068592Sgblack@eecs.umich.edu inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 5078545Ssaidi@eecs.umich.edu memDepViolator = ld_inst; 5088199SAli.Saidi@ARM.com 5098545Ssaidi@eecs.umich.edu ++lsqMemOrderViolation; 5108199SAli.Saidi@ARM.com 5118591Sgblack@eecs.umich.edu return new GenericISA::M5PanicFault( 5128591Sgblack@eecs.umich.edu "Detected fault with inst [sn:%lli] and " 5138591Sgblack@eecs.umich.edu "[sn:%lli] at address %#x\n", 5148591Sgblack@eecs.umich.edu inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 5158545Ssaidi@eecs.umich.edu } 5168545Ssaidi@eecs.umich.edu } 5178199SAli.Saidi@ARM.com 5188545Ssaidi@eecs.umich.edu // Otherwise, mark the load has a possible load violation 5198545Ssaidi@eecs.umich.edu // and if we see a snoop before it's commited, we need to squash 5209046SAli.Saidi@ARM.com ld_inst->possibleLoadViolation(true); 5218545Ssaidi@eecs.umich.edu DPRINTF(LSQUnit, "Found possible load violaiton at addr: %#x" 5228545Ssaidi@eecs.umich.edu " between instructions [sn:%lli] and [sn:%lli]\n", 5238545Ssaidi@eecs.umich.edu inst_eff_addr1, inst->seqNum, ld_inst->seqNum); 5248545Ssaidi@eecs.umich.edu } else { 5258545Ssaidi@eecs.umich.edu // A load/store incorrectly passed this store. 5268545Ssaidi@eecs.umich.edu // Check if we already have a violator, or if it's newer 5278545Ssaidi@eecs.umich.edu // squash and refetch. 5288545Ssaidi@eecs.umich.edu if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum) 5298545Ssaidi@eecs.umich.edu break; 5308545Ssaidi@eecs.umich.edu 5318592Sgblack@eecs.umich.edu DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and " 5328592Sgblack@eecs.umich.edu "[sn:%lli] at address %#x\n", 5338592Sgblack@eecs.umich.edu inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 5348545Ssaidi@eecs.umich.edu memDepViolator = ld_inst; 5358545Ssaidi@eecs.umich.edu 5368545Ssaidi@eecs.umich.edu ++lsqMemOrderViolation; 5378545Ssaidi@eecs.umich.edu 5388591Sgblack@eecs.umich.edu return new GenericISA::M5PanicFault("Detected fault with " 5398591Sgblack@eecs.umich.edu "inst [sn:%lli] and [sn:%lli] at address %#x\n", 5408591Sgblack@eecs.umich.edu inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 5418545Ssaidi@eecs.umich.edu } 5428199SAli.Saidi@ARM.com } 5438199SAli.Saidi@ARM.com 5448199SAli.Saidi@ARM.com incrLdIdx(load_idx); 5458199SAli.Saidi@ARM.com } 5468199SAli.Saidi@ARM.com return NoFault; 5478199SAli.Saidi@ARM.com} 5488199SAli.Saidi@ARM.com 5498199SAli.Saidi@ARM.com 5508199SAli.Saidi@ARM.com 5518199SAli.Saidi@ARM.com 5528199SAli.Saidi@ARM.comtemplate <class Impl> 5538199SAli.Saidi@ARM.comFault 5542292SN/ALSQUnit<Impl>::executeLoad(DynInstPtr &inst) 5552292SN/A{ 5564032Sktlim@umich.edu using namespace TheISA; 5572292SN/A // Execute a specific load. 5582292SN/A Fault load_fault = NoFault; 5592292SN/A 5607720Sgblack@eecs.umich.edu DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n", 5617944SGiacomo.Gabrielli@arm.com inst->pcState(), inst->seqNum); 5622292SN/A 5634032Sktlim@umich.edu assert(!inst->isSquashed()); 5644032Sktlim@umich.edu 5652669Sktlim@umich.edu load_fault = inst->initiateAcc(); 5662292SN/A 5677944SGiacomo.Gabrielli@arm.com if (inst->isTranslationDelayed() && 5687944SGiacomo.Gabrielli@arm.com load_fault == NoFault) 5697944SGiacomo.Gabrielli@arm.com return load_fault; 5707944SGiacomo.Gabrielli@arm.com 5717597Sminkyu.jeong@arm.com // If the instruction faulted or predicated false, then we need to send it 5727597Sminkyu.jeong@arm.com // along to commit without the instruction completing. 5737597Sminkyu.jeong@arm.com if (load_fault != NoFault || inst->readPredicate() == false) { 5742329SN/A // Send this instruction to commit, also make sure iew stage 5752329SN/A // realizes there is activity. 5762367SN/A // Mark it as executed unless it is an uncached load that 5772367SN/A // needs to hit the head of commit. 5787848SAli.Saidi@ARM.com if (inst->readPredicate() == false) 5797848SAli.Saidi@ARM.com inst->forwardOldRegs(); 5807600Sminkyu.jeong@arm.com DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n", 5817600Sminkyu.jeong@arm.com inst->seqNum, 5827600Sminkyu.jeong@arm.com (load_fault != NoFault ? "fault" : "predication")); 5834032Sktlim@umich.edu if (!(inst->hasRequest() && inst->uncacheable()) || 5843731Sktlim@umich.edu inst->isAtCommit()) { 5852367SN/A inst->setExecuted(); 5862367SN/A } 5872292SN/A iewStage->instToCommit(inst); 5882292SN/A iewStage->activityThisCycle(); 5894032Sktlim@umich.edu } else if (!loadBlocked()) { 5909046SAli.Saidi@ARM.com assert(inst->effAddrValid()); 5914032Sktlim@umich.edu int load_idx = inst->lqIdx; 5924032Sktlim@umich.edu incrLdIdx(load_idx); 5934032Sktlim@umich.edu 5948199SAli.Saidi@ARM.com if (checkLoads) 5958199SAli.Saidi@ARM.com return checkViolations(load_idx, inst); 5962292SN/A } 5972292SN/A 5982292SN/A return load_fault; 5992292SN/A} 6002292SN/A 6012292SN/Atemplate <class Impl> 6022292SN/AFault 6032292SN/ALSQUnit<Impl>::executeStore(DynInstPtr &store_inst) 6042292SN/A{ 6052292SN/A using namespace TheISA; 6062292SN/A // Make sure that a store exists. 6072292SN/A assert(stores != 0); 6082292SN/A 6092292SN/A int store_idx = store_inst->sqIdx; 6102292SN/A 6117720Sgblack@eecs.umich.edu DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n", 6127720Sgblack@eecs.umich.edu store_inst->pcState(), store_inst->seqNum); 6132292SN/A 6144032Sktlim@umich.edu assert(!store_inst->isSquashed()); 6154032Sktlim@umich.edu 6162292SN/A // Check the recently completed loads to see if any match this store's 6172292SN/A // address. If so, then we have a memory ordering violation. 6182292SN/A int load_idx = store_inst->lqIdx; 6192292SN/A 6202292SN/A Fault store_fault = store_inst->initiateAcc(); 6212292SN/A 6227944SGiacomo.Gabrielli@arm.com if (store_inst->isTranslationDelayed() && 6237944SGiacomo.Gabrielli@arm.com store_fault == NoFault) 6247944SGiacomo.Gabrielli@arm.com return store_fault; 6257944SGiacomo.Gabrielli@arm.com 6267848SAli.Saidi@ARM.com if (store_inst->readPredicate() == false) 6277848SAli.Saidi@ARM.com store_inst->forwardOldRegs(); 6287848SAli.Saidi@ARM.com 6292329SN/A if (storeQueue[store_idx].size == 0) { 6307782Sminkyu.jeong@arm.com DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n", 6317720Sgblack@eecs.umich.edu store_inst->pcState(), store_inst->seqNum); 6322292SN/A 6332292SN/A return store_fault; 6347782Sminkyu.jeong@arm.com } else if (store_inst->readPredicate() == false) { 6357782Sminkyu.jeong@arm.com DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n", 6367782Sminkyu.jeong@arm.com store_inst->seqNum); 6377782Sminkyu.jeong@arm.com return store_fault; 6382292SN/A } 6392292SN/A 6402292SN/A assert(store_fault == NoFault); 6412292SN/A 6422336SN/A if (store_inst->isStoreConditional()) { 6432336SN/A // Store conditionals need to set themselves as able to 6442336SN/A // writeback if we haven't had a fault by here. 6452329SN/A storeQueue[store_idx].canWB = true; 6462292SN/A 6472329SN/A ++storesToWB; 6482292SN/A } 6492292SN/A 6508199SAli.Saidi@ARM.com return checkViolations(load_idx, store_inst); 6512292SN/A 6522292SN/A} 6532292SN/A 6542292SN/Atemplate <class Impl> 6552292SN/Avoid 6562292SN/ALSQUnit<Impl>::commitLoad() 6572292SN/A{ 6582292SN/A assert(loadQueue[loadHead]); 6592292SN/A 6607720Sgblack@eecs.umich.edu DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n", 6617720Sgblack@eecs.umich.edu loadQueue[loadHead]->pcState()); 6622292SN/A 6632292SN/A loadQueue[loadHead] = NULL; 6642292SN/A 6652292SN/A incrLdIdx(loadHead); 6662292SN/A 6672292SN/A --loads; 6682292SN/A} 6692292SN/A 6702292SN/Atemplate <class Impl> 6712292SN/Avoid 6722292SN/ALSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst) 6732292SN/A{ 6742292SN/A assert(loads == 0 || loadQueue[loadHead]); 6752292SN/A 6762292SN/A while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) { 6772292SN/A commitLoad(); 6782292SN/A } 6792292SN/A} 6802292SN/A 6812292SN/Atemplate <class Impl> 6822292SN/Avoid 6832292SN/ALSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst) 6842292SN/A{ 6852292SN/A assert(stores == 0 || storeQueue[storeHead].inst); 6862292SN/A 6872292SN/A int store_idx = storeHead; 6882292SN/A 6892292SN/A while (store_idx != storeTail) { 6902292SN/A assert(storeQueue[store_idx].inst); 6912329SN/A // Mark any stores that are now committed and have not yet 6922329SN/A // been marked as able to write back. 6932292SN/A if (!storeQueue[store_idx].canWB) { 6942292SN/A if (storeQueue[store_idx].inst->seqNum > youngest_inst) { 6952292SN/A break; 6962292SN/A } 6972292SN/A DPRINTF(LSQUnit, "Marking store as able to write back, PC " 6987720Sgblack@eecs.umich.edu "%s [sn:%lli]\n", 6997720Sgblack@eecs.umich.edu storeQueue[store_idx].inst->pcState(), 7002292SN/A storeQueue[store_idx].inst->seqNum); 7012292SN/A 7022292SN/A storeQueue[store_idx].canWB = true; 7032292SN/A 7042292SN/A ++storesToWB; 7052292SN/A } 7062292SN/A 7072292SN/A incrStIdx(store_idx); 7082292SN/A } 7092292SN/A} 7102292SN/A 7112292SN/Atemplate <class Impl> 7122292SN/Avoid 7136974Stjones1@inf.ed.ac.ukLSQUnit<Impl>::writebackPendingStore() 7146974Stjones1@inf.ed.ac.uk{ 7156974Stjones1@inf.ed.ac.uk if (hasPendingPkt) { 7166974Stjones1@inf.ed.ac.uk assert(pendingPkt != NULL); 7176974Stjones1@inf.ed.ac.uk 7186974Stjones1@inf.ed.ac.uk // If the cache is blocked, this will store the packet for retry. 7196974Stjones1@inf.ed.ac.uk if (sendStore(pendingPkt)) { 7206974Stjones1@inf.ed.ac.uk storePostSend(pendingPkt); 7216974Stjones1@inf.ed.ac.uk } 7226974Stjones1@inf.ed.ac.uk pendingPkt = NULL; 7236974Stjones1@inf.ed.ac.uk hasPendingPkt = false; 7246974Stjones1@inf.ed.ac.uk } 7256974Stjones1@inf.ed.ac.uk} 7266974Stjones1@inf.ed.ac.uk 7276974Stjones1@inf.ed.ac.uktemplate <class Impl> 7286974Stjones1@inf.ed.ac.ukvoid 7292292SN/ALSQUnit<Impl>::writebackStores() 7302292SN/A{ 7316974Stjones1@inf.ed.ac.uk // First writeback the second packet from any split store that didn't 7326974Stjones1@inf.ed.ac.uk // complete last cycle because there weren't enough cache ports available. 7336974Stjones1@inf.ed.ac.uk if (TheISA::HasUnalignedMemAcc) { 7346974Stjones1@inf.ed.ac.uk writebackPendingStore(); 7356974Stjones1@inf.ed.ac.uk } 7366974Stjones1@inf.ed.ac.uk 7372292SN/A while (storesToWB > 0 && 7382292SN/A storeWBIdx != storeTail && 7392292SN/A storeQueue[storeWBIdx].inst && 7402292SN/A storeQueue[storeWBIdx].canWB && 7418727Snilay@cs.wisc.edu ((!needsTSO) || (!storeInFlight)) && 7422292SN/A usedPorts < cachePorts) { 7432292SN/A 7442907Sktlim@umich.edu if (isStoreBlocked || lsq->cacheBlocked()) { 7452678Sktlim@umich.edu DPRINTF(LSQUnit, "Unable to write back any more stores, cache" 7462678Sktlim@umich.edu " is blocked!\n"); 7472678Sktlim@umich.edu break; 7482678Sktlim@umich.edu } 7492678Sktlim@umich.edu 7502329SN/A // Store didn't write any data so no need to write it back to 7512329SN/A // memory. 7522292SN/A if (storeQueue[storeWBIdx].size == 0) { 7532292SN/A completeStore(storeWBIdx); 7542292SN/A 7552292SN/A incrStIdx(storeWBIdx); 7562292SN/A 7572292SN/A continue; 7582292SN/A } 7592678Sktlim@umich.edu 7602292SN/A ++usedPorts; 7612292SN/A 7622292SN/A if (storeQueue[storeWBIdx].inst->isDataPrefetch()) { 7632292SN/A incrStIdx(storeWBIdx); 7642292SN/A 7652292SN/A continue; 7662292SN/A } 7672292SN/A 7682292SN/A assert(storeQueue[storeWBIdx].req); 7692292SN/A assert(!storeQueue[storeWBIdx].committed); 7702292SN/A 7716974Stjones1@inf.ed.ac.uk if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) { 7726974Stjones1@inf.ed.ac.uk assert(storeQueue[storeWBIdx].sreqLow); 7736974Stjones1@inf.ed.ac.uk assert(storeQueue[storeWBIdx].sreqHigh); 7746974Stjones1@inf.ed.ac.uk } 7756974Stjones1@inf.ed.ac.uk 7762669Sktlim@umich.edu DynInstPtr inst = storeQueue[storeWBIdx].inst; 7772669Sktlim@umich.edu 7782669Sktlim@umich.edu Request *req = storeQueue[storeWBIdx].req; 7798481Sgblack@eecs.umich.edu RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow; 7808481Sgblack@eecs.umich.edu RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh; 7818481Sgblack@eecs.umich.edu 7822292SN/A storeQueue[storeWBIdx].committed = true; 7832292SN/A 7842669Sktlim@umich.edu assert(!inst->memData); 7852669Sktlim@umich.edu inst->memData = new uint8_t[64]; 7863772Sgblack@eecs.umich.edu 7874326Sgblack@eecs.umich.edu memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize()); 7882669Sktlim@umich.edu 7894878Sstever@eecs.umich.edu MemCmd command = 7904878Sstever@eecs.umich.edu req->isSwap() ? MemCmd::SwapReq : 7916102Sgblack@eecs.umich.edu (req->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq); 7926974Stjones1@inf.ed.ac.uk PacketPtr data_pkt; 7936974Stjones1@inf.ed.ac.uk PacketPtr snd_data_pkt = NULL; 7942292SN/A 7952678Sktlim@umich.edu LSQSenderState *state = new LSQSenderState; 7962678Sktlim@umich.edu state->isLoad = false; 7972678Sktlim@umich.edu state->idx = storeWBIdx; 7982678Sktlim@umich.edu state->inst = inst; 7996974Stjones1@inf.ed.ac.uk 8006974Stjones1@inf.ed.ac.uk if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) { 8016974Stjones1@inf.ed.ac.uk 8026974Stjones1@inf.ed.ac.uk // Build a single data packet if the store isn't split. 8038949Sandreas.hansson@arm.com data_pkt = new Packet(req, command); 8046974Stjones1@inf.ed.ac.uk data_pkt->dataStatic(inst->memData); 8056974Stjones1@inf.ed.ac.uk data_pkt->senderState = state; 8066974Stjones1@inf.ed.ac.uk } else { 8076974Stjones1@inf.ed.ac.uk // Create two packets if the store is split in two. 8088949Sandreas.hansson@arm.com data_pkt = new Packet(sreqLow, command); 8098949Sandreas.hansson@arm.com snd_data_pkt = new Packet(sreqHigh, command); 8106974Stjones1@inf.ed.ac.uk 8116974Stjones1@inf.ed.ac.uk data_pkt->dataStatic(inst->memData); 8126974Stjones1@inf.ed.ac.uk snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize()); 8136974Stjones1@inf.ed.ac.uk 8146974Stjones1@inf.ed.ac.uk data_pkt->senderState = state; 8156974Stjones1@inf.ed.ac.uk snd_data_pkt->senderState = state; 8166974Stjones1@inf.ed.ac.uk 8176974Stjones1@inf.ed.ac.uk state->isSplit = true; 8186974Stjones1@inf.ed.ac.uk state->outstanding = 2; 8196974Stjones1@inf.ed.ac.uk 8206974Stjones1@inf.ed.ac.uk // Can delete the main request now. 8216974Stjones1@inf.ed.ac.uk delete req; 8226974Stjones1@inf.ed.ac.uk req = sreqLow; 8236974Stjones1@inf.ed.ac.uk } 8242678Sktlim@umich.edu 8257720Sgblack@eecs.umich.edu DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s " 8262292SN/A "to Addr:%#x, data:%#x [sn:%lli]\n", 8277720Sgblack@eecs.umich.edu storeWBIdx, inst->pcState(), 8283797Sgblack@eecs.umich.edu req->getPaddr(), (int)*(inst->memData), 8293221Sktlim@umich.edu inst->seqNum); 8302292SN/A 8312693Sktlim@umich.edu // @todo: Remove this SC hack once the memory system handles it. 8324350Sgblack@eecs.umich.edu if (inst->isStoreConditional()) { 8336974Stjones1@inf.ed.ac.uk assert(!storeQueue[storeWBIdx].isSplit); 8343326Sktlim@umich.edu // Disable recording the result temporarily. Writing to 8353326Sktlim@umich.edu // misc regs normally updates the result, but this is not 8363326Sktlim@umich.edu // the desired behavior when handling store conditionals. 8379046SAli.Saidi@ARM.com inst->recordResult(false); 8383326Sktlim@umich.edu bool success = TheISA::handleLockedWrite(inst.get(), req); 8399046SAli.Saidi@ARM.com inst->recordResult(true); 8403326Sktlim@umich.edu 8413326Sktlim@umich.edu if (!success) { 8423326Sktlim@umich.edu // Instantly complete this store. 8433326Sktlim@umich.edu DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. " 8443326Sktlim@umich.edu "Instantly completing it.\n", 8453326Sktlim@umich.edu inst->seqNum); 8463326Sktlim@umich.edu WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this); 8477823Ssteve.reinhardt@amd.com cpu->schedule(wb, curTick() + 1); 8488887Sgeoffrey.blake@arm.com if (cpu->checker) { 8498887Sgeoffrey.blake@arm.com // Make sure to set the LLSC data for verification 8508887Sgeoffrey.blake@arm.com // if checker is loaded 8518887Sgeoffrey.blake@arm.com inst->reqToVerify->setExtraData(0); 8528887Sgeoffrey.blake@arm.com inst->completeAcc(data_pkt); 8538887Sgeoffrey.blake@arm.com } 8543326Sktlim@umich.edu completeStore(storeWBIdx); 8553326Sktlim@umich.edu incrStIdx(storeWBIdx); 8563326Sktlim@umich.edu continue; 8572693Sktlim@umich.edu } 8582693Sktlim@umich.edu } else { 8592693Sktlim@umich.edu // Non-store conditionals do not need a writeback. 8602693Sktlim@umich.edu state->noWB = true; 8612693Sktlim@umich.edu } 8622693Sktlim@umich.edu 8638481Sgblack@eecs.umich.edu bool split = 8648481Sgblack@eecs.umich.edu TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit; 8658481Sgblack@eecs.umich.edu 8668481Sgblack@eecs.umich.edu ThreadContext *thread = cpu->tcBase(lsqID); 8678481Sgblack@eecs.umich.edu 8688481Sgblack@eecs.umich.edu if (req->isMmappedIpr()) { 8698481Sgblack@eecs.umich.edu assert(!inst->isStoreConditional()); 8708481Sgblack@eecs.umich.edu TheISA::handleIprWrite(thread, data_pkt); 8718481Sgblack@eecs.umich.edu delete data_pkt; 8728481Sgblack@eecs.umich.edu if (split) { 8738481Sgblack@eecs.umich.edu assert(snd_data_pkt->req->isMmappedIpr()); 8748481Sgblack@eecs.umich.edu TheISA::handleIprWrite(thread, snd_data_pkt); 8758481Sgblack@eecs.umich.edu delete snd_data_pkt; 8768481Sgblack@eecs.umich.edu delete sreqLow; 8778481Sgblack@eecs.umich.edu delete sreqHigh; 8788481Sgblack@eecs.umich.edu } 8798481Sgblack@eecs.umich.edu delete state; 8808481Sgblack@eecs.umich.edu delete req; 8818481Sgblack@eecs.umich.edu completeStore(storeWBIdx); 8828481Sgblack@eecs.umich.edu incrStIdx(storeWBIdx); 8838481Sgblack@eecs.umich.edu } else if (!sendStore(data_pkt)) { 8844032Sktlim@umich.edu DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will" 8853221Sktlim@umich.edu "retry later\n", 8863221Sktlim@umich.edu inst->seqNum); 8876974Stjones1@inf.ed.ac.uk 8886974Stjones1@inf.ed.ac.uk // Need to store the second packet, if split. 8898481Sgblack@eecs.umich.edu if (split) { 8906974Stjones1@inf.ed.ac.uk state->pktToSend = true; 8916974Stjones1@inf.ed.ac.uk state->pendingPacket = snd_data_pkt; 8926974Stjones1@inf.ed.ac.uk } 8932669Sktlim@umich.edu } else { 8946974Stjones1@inf.ed.ac.uk 8956974Stjones1@inf.ed.ac.uk // If split, try to send the second packet too 8968481Sgblack@eecs.umich.edu if (split) { 8976974Stjones1@inf.ed.ac.uk assert(snd_data_pkt); 8986974Stjones1@inf.ed.ac.uk 8996974Stjones1@inf.ed.ac.uk // Ensure there are enough ports to use. 9006974Stjones1@inf.ed.ac.uk if (usedPorts < cachePorts) { 9016974Stjones1@inf.ed.ac.uk ++usedPorts; 9026974Stjones1@inf.ed.ac.uk if (sendStore(snd_data_pkt)) { 9036974Stjones1@inf.ed.ac.uk storePostSend(snd_data_pkt); 9046974Stjones1@inf.ed.ac.uk } else { 9056974Stjones1@inf.ed.ac.uk DPRINTF(IEW, "D-Cache became blocked when writing" 9066974Stjones1@inf.ed.ac.uk " [sn:%lli] second packet, will retry later\n", 9076974Stjones1@inf.ed.ac.uk inst->seqNum); 9086974Stjones1@inf.ed.ac.uk } 9096974Stjones1@inf.ed.ac.uk } else { 9106974Stjones1@inf.ed.ac.uk 9116974Stjones1@inf.ed.ac.uk // Store the packet for when there's free ports. 9126974Stjones1@inf.ed.ac.uk assert(pendingPkt == NULL); 9136974Stjones1@inf.ed.ac.uk pendingPkt = snd_data_pkt; 9146974Stjones1@inf.ed.ac.uk hasPendingPkt = true; 9156974Stjones1@inf.ed.ac.uk } 9166974Stjones1@inf.ed.ac.uk } else { 9176974Stjones1@inf.ed.ac.uk 9186974Stjones1@inf.ed.ac.uk // Not a split store. 9196974Stjones1@inf.ed.ac.uk storePostSend(data_pkt); 9206974Stjones1@inf.ed.ac.uk } 9212292SN/A } 9222292SN/A } 9232292SN/A 9242292SN/A // Not sure this should set it to 0. 9252292SN/A usedPorts = 0; 9262292SN/A 9272292SN/A assert(stores >= 0 && storesToWB >= 0); 9282292SN/A} 9292292SN/A 9302292SN/A/*template <class Impl> 9312292SN/Avoid 9322292SN/ALSQUnit<Impl>::removeMSHR(InstSeqNum seqNum) 9332292SN/A{ 9342292SN/A list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(), 9352292SN/A mshrSeqNums.end(), 9362292SN/A seqNum); 9372292SN/A 9382292SN/A if (mshr_it != mshrSeqNums.end()) { 9392292SN/A mshrSeqNums.erase(mshr_it); 9402292SN/A DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size()); 9412292SN/A } 9422292SN/A}*/ 9432292SN/A 9442292SN/Atemplate <class Impl> 9452292SN/Avoid 9462292SN/ALSQUnit<Impl>::squash(const InstSeqNum &squashed_num) 9472292SN/A{ 9482292SN/A DPRINTF(LSQUnit, "Squashing until [sn:%lli]!" 9492329SN/A "(Loads:%i Stores:%i)\n", squashed_num, loads, stores); 9502292SN/A 9512292SN/A int load_idx = loadTail; 9522292SN/A decrLdIdx(load_idx); 9532292SN/A 9542292SN/A while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) { 9557720Sgblack@eecs.umich.edu DPRINTF(LSQUnit,"Load Instruction PC %s squashed, " 9562292SN/A "[sn:%lli]\n", 9577720Sgblack@eecs.umich.edu loadQueue[load_idx]->pcState(), 9582292SN/A loadQueue[load_idx]->seqNum); 9592292SN/A 9602292SN/A if (isStalled() && load_idx == stallingLoadIdx) { 9612292SN/A stalled = false; 9622292SN/A stallingStoreIsn = 0; 9632292SN/A stallingLoadIdx = 0; 9642292SN/A } 9652292SN/A 9662329SN/A // Clear the smart pointer to make sure it is decremented. 9672731Sktlim@umich.edu loadQueue[load_idx]->setSquashed(); 9682292SN/A loadQueue[load_idx] = NULL; 9692292SN/A --loads; 9702292SN/A 9712292SN/A // Inefficient! 9722292SN/A loadTail = load_idx; 9732292SN/A 9742292SN/A decrLdIdx(load_idx); 9752727Sktlim@umich.edu ++lsqSquashedLoads; 9762292SN/A } 9772292SN/A 9782292SN/A if (isLoadBlocked) { 9792292SN/A if (squashed_num < blockedLoadSeqNum) { 9802292SN/A isLoadBlocked = false; 9812292SN/A loadBlockedHandled = false; 9822292SN/A blockedLoadSeqNum = 0; 9832292SN/A } 9842292SN/A } 9852292SN/A 9864032Sktlim@umich.edu if (memDepViolator && squashed_num < memDepViolator->seqNum) { 9874032Sktlim@umich.edu memDepViolator = NULL; 9884032Sktlim@umich.edu } 9894032Sktlim@umich.edu 9902292SN/A int store_idx = storeTail; 9912292SN/A decrStIdx(store_idx); 9922292SN/A 9932292SN/A while (stores != 0 && 9942292SN/A storeQueue[store_idx].inst->seqNum > squashed_num) { 9952329SN/A // Instructions marked as can WB are already committed. 9962292SN/A if (storeQueue[store_idx].canWB) { 9972292SN/A break; 9982292SN/A } 9992292SN/A 10007720Sgblack@eecs.umich.edu DPRINTF(LSQUnit,"Store Instruction PC %s squashed, " 10012292SN/A "idx:%i [sn:%lli]\n", 10027720Sgblack@eecs.umich.edu storeQueue[store_idx].inst->pcState(), 10032292SN/A store_idx, storeQueue[store_idx].inst->seqNum); 10042292SN/A 10052329SN/A // I don't think this can happen. It should have been cleared 10062329SN/A // by the stalling load. 10072292SN/A if (isStalled() && 10082292SN/A storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 10092292SN/A panic("Is stalled should have been cleared by stalling load!\n"); 10102292SN/A stalled = false; 10112292SN/A stallingStoreIsn = 0; 10122292SN/A } 10132292SN/A 10142329SN/A // Clear the smart pointer to make sure it is decremented. 10152731Sktlim@umich.edu storeQueue[store_idx].inst->setSquashed(); 10162292SN/A storeQueue[store_idx].inst = NULL; 10172292SN/A storeQueue[store_idx].canWB = 0; 10182292SN/A 10194032Sktlim@umich.edu // Must delete request now that it wasn't handed off to 10204032Sktlim@umich.edu // memory. This is quite ugly. @todo: Figure out the proper 10214032Sktlim@umich.edu // place to really handle request deletes. 10224032Sktlim@umich.edu delete storeQueue[store_idx].req; 10236974Stjones1@inf.ed.ac.uk if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) { 10246974Stjones1@inf.ed.ac.uk delete storeQueue[store_idx].sreqLow; 10256974Stjones1@inf.ed.ac.uk delete storeQueue[store_idx].sreqHigh; 10266974Stjones1@inf.ed.ac.uk 10276974Stjones1@inf.ed.ac.uk storeQueue[store_idx].sreqLow = NULL; 10286974Stjones1@inf.ed.ac.uk storeQueue[store_idx].sreqHigh = NULL; 10296974Stjones1@inf.ed.ac.uk } 10304032Sktlim@umich.edu 10312292SN/A storeQueue[store_idx].req = NULL; 10322292SN/A --stores; 10332292SN/A 10342292SN/A // Inefficient! 10352292SN/A storeTail = store_idx; 10362292SN/A 10372292SN/A decrStIdx(store_idx); 10382727Sktlim@umich.edu ++lsqSquashedStores; 10392292SN/A } 10402292SN/A} 10412292SN/A 10422292SN/Atemplate <class Impl> 10432292SN/Avoid 10443349Sbinkertn@umich.eduLSQUnit<Impl>::storePostSend(PacketPtr pkt) 10452693Sktlim@umich.edu{ 10462693Sktlim@umich.edu if (isStalled() && 10472693Sktlim@umich.edu storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) { 10482693Sktlim@umich.edu DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 10492693Sktlim@umich.edu "load idx:%i\n", 10502693Sktlim@umich.edu stallingStoreIsn, stallingLoadIdx); 10512693Sktlim@umich.edu stalled = false; 10522693Sktlim@umich.edu stallingStoreIsn = 0; 10532693Sktlim@umich.edu iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 10542693Sktlim@umich.edu } 10552693Sktlim@umich.edu 10562693Sktlim@umich.edu if (!storeQueue[storeWBIdx].inst->isStoreConditional()) { 10572693Sktlim@umich.edu // The store is basically completed at this time. This 10582693Sktlim@umich.edu // only works so long as the checker doesn't try to 10592693Sktlim@umich.edu // verify the value in memory for stores. 10602693Sktlim@umich.edu storeQueue[storeWBIdx].inst->setCompleted(); 10618887Sgeoffrey.blake@arm.com 10622693Sktlim@umich.edu if (cpu->checker) { 10632732Sktlim@umich.edu cpu->checker->verify(storeQueue[storeWBIdx].inst); 10642693Sktlim@umich.edu } 10652693Sktlim@umich.edu } 10662693Sktlim@umich.edu 10678727Snilay@cs.wisc.edu if (needsTSO) { 10688727Snilay@cs.wisc.edu storeInFlight = true; 10698727Snilay@cs.wisc.edu } 10708727Snilay@cs.wisc.edu 10712693Sktlim@umich.edu incrStIdx(storeWBIdx); 10722693Sktlim@umich.edu} 10732693Sktlim@umich.edu 10742693Sktlim@umich.edutemplate <class Impl> 10752693Sktlim@umich.eduvoid 10762678Sktlim@umich.eduLSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt) 10772678Sktlim@umich.edu{ 10782678Sktlim@umich.edu iewStage->wakeCPU(); 10792678Sktlim@umich.edu 10802678Sktlim@umich.edu // Squashed instructions do not need to complete their access. 10812678Sktlim@umich.edu if (inst->isSquashed()) { 10822927Sktlim@umich.edu iewStage->decrWb(inst->seqNum); 10832678Sktlim@umich.edu assert(!inst->isStore()); 10842727Sktlim@umich.edu ++lsqIgnoredResponses; 10852678Sktlim@umich.edu return; 10862678Sktlim@umich.edu } 10872678Sktlim@umich.edu 10882678Sktlim@umich.edu if (!inst->isExecuted()) { 10892678Sktlim@umich.edu inst->setExecuted(); 10902678Sktlim@umich.edu 10912678Sktlim@umich.edu // Complete access to copy data to proper place. 10922678Sktlim@umich.edu inst->completeAcc(pkt); 10932678Sktlim@umich.edu } 10942678Sktlim@umich.edu 10952678Sktlim@umich.edu // Need to insert instruction into queue to commit 10962678Sktlim@umich.edu iewStage->instToCommit(inst); 10972678Sktlim@umich.edu 10982678Sktlim@umich.edu iewStage->activityThisCycle(); 10997598Sminkyu.jeong@arm.com 11007598Sminkyu.jeong@arm.com // see if this load changed the PC 11017598Sminkyu.jeong@arm.com iewStage->checkMisprediction(inst); 11022678Sktlim@umich.edu} 11032678Sktlim@umich.edu 11042678Sktlim@umich.edutemplate <class Impl> 11052678Sktlim@umich.eduvoid 11062292SN/ALSQUnit<Impl>::completeStore(int store_idx) 11072292SN/A{ 11082292SN/A assert(storeQueue[store_idx].inst); 11092292SN/A storeQueue[store_idx].completed = true; 11102292SN/A --storesToWB; 11112292SN/A // A bit conservative because a store completion may not free up entries, 11122292SN/A // but hopefully avoids two store completions in one cycle from making 11132292SN/A // the CPU tick twice. 11143126Sktlim@umich.edu cpu->wakeCPU(); 11152292SN/A cpu->activityThisCycle(); 11162292SN/A 11172292SN/A if (store_idx == storeHead) { 11182292SN/A do { 11192292SN/A incrStIdx(storeHead); 11202292SN/A 11212292SN/A --stores; 11222292SN/A } while (storeQueue[storeHead].completed && 11232292SN/A storeHead != storeTail); 11242292SN/A 11252292SN/A iewStage->updateLSQNextCycle = true; 11262292SN/A } 11272292SN/A 11282329SN/A DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head " 11292329SN/A "idx:%i\n", 11302329SN/A storeQueue[store_idx].inst->seqNum, store_idx, storeHead); 11312292SN/A 11329527SMatt.Horsnell@arm.com#if TRACING_ON 11339527SMatt.Horsnell@arm.com if (DTRACE(O3PipeView)) { 11349527SMatt.Horsnell@arm.com storeQueue[store_idx].inst->storeTick = 11359527SMatt.Horsnell@arm.com curTick() - storeQueue[store_idx].inst->fetchTick; 11369527SMatt.Horsnell@arm.com } 11379527SMatt.Horsnell@arm.com#endif 11389527SMatt.Horsnell@arm.com 11392292SN/A if (isStalled() && 11402292SN/A storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 11412292SN/A DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 11422292SN/A "load idx:%i\n", 11432292SN/A stallingStoreIsn, stallingLoadIdx); 11442292SN/A stalled = false; 11452292SN/A stallingStoreIsn = 0; 11462292SN/A iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 11472292SN/A } 11482316SN/A 11492316SN/A storeQueue[store_idx].inst->setCompleted(); 11502329SN/A 11518727Snilay@cs.wisc.edu if (needsTSO) { 11528727Snilay@cs.wisc.edu storeInFlight = false; 11538727Snilay@cs.wisc.edu } 11548727Snilay@cs.wisc.edu 11552329SN/A // Tell the checker we've completed this instruction. Some stores 11562329SN/A // may get reported twice to the checker, but the checker can 11572329SN/A // handle that case. 11582316SN/A if (cpu->checker) { 11592732Sktlim@umich.edu cpu->checker->verify(storeQueue[store_idx].inst); 11602316SN/A } 11612292SN/A} 11622292SN/A 11632292SN/Atemplate <class Impl> 11646974Stjones1@inf.ed.ac.ukbool 11656974Stjones1@inf.ed.ac.ukLSQUnit<Impl>::sendStore(PacketPtr data_pkt) 11666974Stjones1@inf.ed.ac.uk{ 11678975Sandreas.hansson@arm.com if (!dcachePort->sendTimingReq(data_pkt)) { 11686974Stjones1@inf.ed.ac.uk // Need to handle becoming blocked on a store. 11696974Stjones1@inf.ed.ac.uk isStoreBlocked = true; 11706974Stjones1@inf.ed.ac.uk ++lsqCacheBlocked; 11716974Stjones1@inf.ed.ac.uk assert(retryPkt == NULL); 11726974Stjones1@inf.ed.ac.uk retryPkt = data_pkt; 11736974Stjones1@inf.ed.ac.uk lsq->setRetryTid(lsqID); 11746974Stjones1@inf.ed.ac.uk return false; 11756974Stjones1@inf.ed.ac.uk } 11766974Stjones1@inf.ed.ac.uk return true; 11776974Stjones1@inf.ed.ac.uk} 11786974Stjones1@inf.ed.ac.uk 11796974Stjones1@inf.ed.ac.uktemplate <class Impl> 11802693Sktlim@umich.eduvoid 11812693Sktlim@umich.eduLSQUnit<Impl>::recvRetry() 11822693Sktlim@umich.edu{ 11832698Sktlim@umich.edu if (isStoreBlocked) { 11844985Sktlim@umich.edu DPRINTF(LSQUnit, "Receiving retry: store blocked\n"); 11852698Sktlim@umich.edu assert(retryPkt != NULL); 11862693Sktlim@umich.edu 11878587Snilay@cs.wisc.edu LSQSenderState *state = 11888587Snilay@cs.wisc.edu dynamic_cast<LSQSenderState *>(retryPkt->senderState); 11898587Snilay@cs.wisc.edu 11908975Sandreas.hansson@arm.com if (dcachePort->sendTimingReq(retryPkt)) { 11916974Stjones1@inf.ed.ac.uk // Don't finish the store unless this is the last packet. 11928133SAli.Saidi@ARM.com if (!TheISA::HasUnalignedMemAcc || !state->pktToSend || 11938133SAli.Saidi@ARM.com state->pendingPacket == retryPkt) { 11948133SAli.Saidi@ARM.com state->pktToSend = false; 11956974Stjones1@inf.ed.ac.uk storePostSend(retryPkt); 11966974Stjones1@inf.ed.ac.uk } 11972699Sktlim@umich.edu retryPkt = NULL; 11982693Sktlim@umich.edu isStoreBlocked = false; 11996221Snate@binkert.org lsq->setRetryTid(InvalidThreadID); 12006974Stjones1@inf.ed.ac.uk 12016974Stjones1@inf.ed.ac.uk // Send any outstanding packet. 12026974Stjones1@inf.ed.ac.uk if (TheISA::HasUnalignedMemAcc && state->pktToSend) { 12036974Stjones1@inf.ed.ac.uk assert(state->pendingPacket); 12046974Stjones1@inf.ed.ac.uk if (sendStore(state->pendingPacket)) { 12056974Stjones1@inf.ed.ac.uk storePostSend(state->pendingPacket); 12066974Stjones1@inf.ed.ac.uk } 12076974Stjones1@inf.ed.ac.uk } 12082693Sktlim@umich.edu } else { 12092693Sktlim@umich.edu // Still blocked! 12102727Sktlim@umich.edu ++lsqCacheBlocked; 12112907Sktlim@umich.edu lsq->setRetryTid(lsqID); 12122693Sktlim@umich.edu } 12132693Sktlim@umich.edu } else if (isLoadBlocked) { 12142693Sktlim@umich.edu DPRINTF(LSQUnit, "Loads squash themselves and all younger insts, " 12152693Sktlim@umich.edu "no need to resend packet.\n"); 12162693Sktlim@umich.edu } else { 12172693Sktlim@umich.edu DPRINTF(LSQUnit, "Retry received but LSQ is no longer blocked.\n"); 12182693Sktlim@umich.edu } 12192693Sktlim@umich.edu} 12202693Sktlim@umich.edu 12212693Sktlim@umich.edutemplate <class Impl> 12222292SN/Ainline void 12239440SAndreas.Sandberg@ARM.comLSQUnit<Impl>::incrStIdx(int &store_idx) const 12242292SN/A{ 12252292SN/A if (++store_idx >= SQEntries) 12262292SN/A store_idx = 0; 12272292SN/A} 12282292SN/A 12292292SN/Atemplate <class Impl> 12302292SN/Ainline void 12319440SAndreas.Sandberg@ARM.comLSQUnit<Impl>::decrStIdx(int &store_idx) const 12322292SN/A{ 12332292SN/A if (--store_idx < 0) 12342292SN/A store_idx += SQEntries; 12352292SN/A} 12362292SN/A 12372292SN/Atemplate <class Impl> 12382292SN/Ainline void 12399440SAndreas.Sandberg@ARM.comLSQUnit<Impl>::incrLdIdx(int &load_idx) const 12402292SN/A{ 12412292SN/A if (++load_idx >= LQEntries) 12422292SN/A load_idx = 0; 12432292SN/A} 12442292SN/A 12452292SN/Atemplate <class Impl> 12462292SN/Ainline void 12479440SAndreas.Sandberg@ARM.comLSQUnit<Impl>::decrLdIdx(int &load_idx) const 12482292SN/A{ 12492292SN/A if (--load_idx < 0) 12502292SN/A load_idx += LQEntries; 12512292SN/A} 12522329SN/A 12532329SN/Atemplate <class Impl> 12542329SN/Avoid 12559440SAndreas.Sandberg@ARM.comLSQUnit<Impl>::dumpInsts() const 12562329SN/A{ 12572329SN/A cprintf("Load store queue: Dumping instructions.\n"); 12582329SN/A cprintf("Load queue size: %i\n", loads); 12592329SN/A cprintf("Load queue: "); 12602329SN/A 12612329SN/A int load_idx = loadHead; 12622329SN/A 12632329SN/A while (load_idx != loadTail && loadQueue[load_idx]) { 12649440SAndreas.Sandberg@ARM.com const DynInstPtr &inst(loadQueue[load_idx]); 12659440SAndreas.Sandberg@ARM.com cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum); 12662329SN/A 12672329SN/A incrLdIdx(load_idx); 12682329SN/A } 12699440SAndreas.Sandberg@ARM.com cprintf("\n"); 12702329SN/A 12712329SN/A cprintf("Store queue size: %i\n", stores); 12722329SN/A cprintf("Store queue: "); 12732329SN/A 12742329SN/A int store_idx = storeHead; 12752329SN/A 12762329SN/A while (store_idx != storeTail && storeQueue[store_idx].inst) { 12779440SAndreas.Sandberg@ARM.com const DynInstPtr &inst(storeQueue[store_idx].inst); 12789440SAndreas.Sandberg@ARM.com cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum); 12792329SN/A 12802329SN/A incrStIdx(store_idx); 12812329SN/A } 12822329SN/A 12832329SN/A cprintf("\n"); 12842329SN/A} 1285