lsq_unit_impl.hh revision 12216
19814Sandreas.hansson@arm.com 22292SN/A/* 312216Snikos.nikoleris@arm.com * Copyright (c) 2010-2014, 2017 ARM Limited 410239Sbinhpham@cs.rutgers.edu * Copyright (c) 2013 Advanced Micro Devices, Inc. 57597Sminkyu.jeong@arm.com * All rights reserved 67597Sminkyu.jeong@arm.com * 77597Sminkyu.jeong@arm.com * The license below extends only to copyright in the software and shall 87597Sminkyu.jeong@arm.com * not be construed as granting a license to any other intellectual 97597Sminkyu.jeong@arm.com * property including but not limited to intellectual property relating 107597Sminkyu.jeong@arm.com * to a hardware implementation of the functionality of the software 117597Sminkyu.jeong@arm.com * licensed hereunder. You may use the software subject to the license 127597Sminkyu.jeong@arm.com * terms below provided that you ensure that this notice is replicated 137597Sminkyu.jeong@arm.com * unmodified and in its entirety in all distributions of the software, 147597Sminkyu.jeong@arm.com * modified or unmodified, in source code or in binary form. 157597Sminkyu.jeong@arm.com * 162292SN/A * Copyright (c) 2004-2005 The Regents of The University of Michigan 172292SN/A * All rights reserved. 182292SN/A * 192292SN/A * Redistribution and use in source and binary forms, with or without 202292SN/A * modification, are permitted provided that the following conditions are 212292SN/A * met: redistributions of source code must retain the above copyright 222292SN/A * notice, this list of conditions and the following disclaimer; 232292SN/A * redistributions in binary form must reproduce the above copyright 242292SN/A * notice, this list of conditions and the following disclaimer in the 252292SN/A * documentation and/or other materials provided with the distribution; 262292SN/A * neither the name of the copyright holders nor the names of its 272292SN/A * contributors may be used to endorse or promote products derived from 282292SN/A * this software without specific prior written permission. 292292SN/A * 302292SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 312292SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 322292SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 332292SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 342292SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 352292SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 362292SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 372292SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 382292SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 392292SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 402292SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 412689Sktlim@umich.edu * 422689Sktlim@umich.edu * Authors: Kevin Lim 432689Sktlim@umich.edu * Korey Sewell 442292SN/A */ 452292SN/A 469944Smatt.horsnell@ARM.com#ifndef __CPU_O3_LSQ_UNIT_IMPL_HH__ 479944Smatt.horsnell@ARM.com#define __CPU_O3_LSQ_UNIT_IMPL_HH__ 489944Smatt.horsnell@ARM.com 498591Sgblack@eecs.umich.edu#include "arch/generic/debugfaults.hh" 503326Sktlim@umich.edu#include "arch/locked_mem.hh" 518229Snate@binkert.org#include "base/str.hh" 526658Snate@binkert.org#include "config/the_isa.hh" 538887Sgeoffrey.blake@arm.com#include "cpu/checker/cpu.hh" 542907Sktlim@umich.edu#include "cpu/o3/lsq.hh" 552292SN/A#include "cpu/o3/lsq_unit.hh" 568232Snate@binkert.org#include "debug/Activity.hh" 578232Snate@binkert.org#include "debug/IEW.hh" 588232Snate@binkert.org#include "debug/LSQUnit.hh" 599527SMatt.Horsnell@arm.com#include "debug/O3PipeView.hh" 602722Sktlim@umich.edu#include "mem/packet.hh" 612669Sktlim@umich.edu#include "mem/request.hh" 622292SN/A 632669Sktlim@umich.edutemplate<class Impl> 642678Sktlim@umich.eduLSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt, 652678Sktlim@umich.edu LSQUnit *lsq_ptr) 668581Ssteve.reinhardt@amd.com : Event(Default_Pri, AutoDelete), 678581Ssteve.reinhardt@amd.com inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr) 682292SN/A{ 692292SN/A} 702292SN/A 712669Sktlim@umich.edutemplate<class Impl> 722292SN/Avoid 732678Sktlim@umich.eduLSQUnit<Impl>::WritebackEvent::process() 742292SN/A{ 759444SAndreas.Sandberg@ARM.com assert(!lsqPtr->cpu->switchedOut()); 769444SAndreas.Sandberg@ARM.com 779444SAndreas.Sandberg@ARM.com lsqPtr->writeback(inst, pkt); 784319Sktlim@umich.edu 794319Sktlim@umich.edu if (pkt->senderState) 804319Sktlim@umich.edu delete pkt->senderState; 814319Sktlim@umich.edu 824319Sktlim@umich.edu delete pkt->req; 832678Sktlim@umich.edu delete pkt; 842678Sktlim@umich.edu} 852292SN/A 862678Sktlim@umich.edutemplate<class Impl> 872678Sktlim@umich.educonst char * 885336Shines@cs.fsu.eduLSQUnit<Impl>::WritebackEvent::description() const 892678Sktlim@umich.edu{ 904873Sstever@eecs.umich.edu return "Store writeback"; 912678Sktlim@umich.edu} 922292SN/A 932678Sktlim@umich.edutemplate<class Impl> 942678Sktlim@umich.eduvoid 952678Sktlim@umich.eduLSQUnit<Impl>::completeDataAccess(PacketPtr pkt) 962678Sktlim@umich.edu{ 972678Sktlim@umich.edu LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState); 982678Sktlim@umich.edu DynInstPtr inst = state->inst; 997852SMatt.Horsnell@arm.com DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum); 1007852SMatt.Horsnell@arm.com DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum); 1012344SN/A 10210333Smitch.hayenga@arm.com if (state->cacheBlocked) { 10310333Smitch.hayenga@arm.com // This is the first half of a previous split load, 10410333Smitch.hayenga@arm.com // where the 2nd half blocked, ignore this response 10510333Smitch.hayenga@arm.com DPRINTF(IEW, "[sn:%lli]: Response from first half of earlier " 10610333Smitch.hayenga@arm.com "blocked split load recieved. Ignoring.\n", inst->seqNum); 10710333Smitch.hayenga@arm.com delete state; 10810333Smitch.hayenga@arm.com return; 10910333Smitch.hayenga@arm.com } 1102678Sktlim@umich.edu 1116974Stjones1@inf.ed.ac.uk // If this is a split access, wait until all packets are received. 1126974Stjones1@inf.ed.ac.uk if (TheISA::HasUnalignedMemAcc && !state->complete()) { 1136974Stjones1@inf.ed.ac.uk return; 1146974Stjones1@inf.ed.ac.uk } 1156974Stjones1@inf.ed.ac.uk 1169444SAndreas.Sandberg@ARM.com assert(!cpu->switchedOut()); 11710327Smitch.hayenga@arm.com if (!inst->isSquashed()) { 1182678Sktlim@umich.edu if (!state->noWB) { 11912216Snikos.nikoleris@arm.com // Only loads and store conditionals perform the writeback 12012216Snikos.nikoleris@arm.com // after receving the response from the memory 12112216Snikos.nikoleris@arm.com assert(inst->isLoad() || inst->isStoreConditional()); 1226974Stjones1@inf.ed.ac.uk if (!TheISA::HasUnalignedMemAcc || !state->isSplit || 1236974Stjones1@inf.ed.ac.uk !state->isLoad) { 1246974Stjones1@inf.ed.ac.uk writeback(inst, pkt); 1256974Stjones1@inf.ed.ac.uk } else { 1266974Stjones1@inf.ed.ac.uk writeback(inst, state->mainPkt); 1276974Stjones1@inf.ed.ac.uk } 1282678Sktlim@umich.edu } 1292678Sktlim@umich.edu 1302678Sktlim@umich.edu if (inst->isStore()) { 1312678Sktlim@umich.edu completeStore(state->idx); 1322678Sktlim@umich.edu } 1332344SN/A } 1342307SN/A 1356974Stjones1@inf.ed.ac.uk if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) { 1366974Stjones1@inf.ed.ac.uk delete state->mainPkt->req; 1376974Stjones1@inf.ed.ac.uk delete state->mainPkt; 1386974Stjones1@inf.ed.ac.uk } 13910020Smatt.horsnell@ARM.com 14010020Smatt.horsnell@ARM.com pkt->req->setAccessLatency(); 14110023Smatt.horsnell@ARM.com cpu->ppDataAccessComplete->notify(std::make_pair(inst, pkt)); 14210023Smatt.horsnell@ARM.com 1432678Sktlim@umich.edu delete state; 1442292SN/A} 1452292SN/A 1462292SN/Atemplate <class Impl> 1472292SN/ALSQUnit<Impl>::LSQUnit() 1488545Ssaidi@eecs.umich.edu : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false), 14911243Spau.cabre@metempsy.com isStoreBlocked(false), storeInFlight(false), hasPendingPkt(false), 15011243Spau.cabre@metempsy.com pendingPkt(nullptr) 1512292SN/A{ 1522292SN/A} 1532292SN/A 1542292SN/Atemplate<class Impl> 1552292SN/Avoid 1565529Snate@binkert.orgLSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params, 1575529Snate@binkert.org LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries, 1585529Snate@binkert.org unsigned id) 1592292SN/A{ 1604329Sktlim@umich.edu cpu = cpu_ptr; 1614329Sktlim@umich.edu iewStage = iew_ptr; 1624329Sktlim@umich.edu 1632907Sktlim@umich.edu lsq = lsq_ptr; 1642907Sktlim@umich.edu 1652292SN/A lsqID = id; 1662292SN/A 16710175SMitch.Hayenga@ARM.com DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id); 16810175SMitch.Hayenga@ARM.com 1692329SN/A // Add 1 for the sentinel entry (they are circular queues). 1702329SN/A LQEntries = maxLQEntries + 1; 1712329SN/A SQEntries = maxSQEntries + 1; 1722292SN/A 1739936SFaissal.Sleiman@arm.com //Due to uint8_t index in LSQSenderState 1749936SFaissal.Sleiman@arm.com assert(LQEntries <= 256); 1759936SFaissal.Sleiman@arm.com assert(SQEntries <= 256); 1769936SFaissal.Sleiman@arm.com 1772292SN/A loadQueue.resize(LQEntries); 1782292SN/A storeQueue.resize(SQEntries); 1792292SN/A 1808199SAli.Saidi@ARM.com depCheckShift = params->LSQDepCheckShift; 1818199SAli.Saidi@ARM.com checkLoads = params->LSQCheckLoads; 18211780Sarthur.perais@inria.fr cacheStorePorts = params->cacheStorePorts; 1839444SAndreas.Sandberg@ARM.com needsTSO = params->needsTSO; 1849444SAndreas.Sandberg@ARM.com 1859444SAndreas.Sandberg@ARM.com resetState(); 1869444SAndreas.Sandberg@ARM.com} 1879444SAndreas.Sandberg@ARM.com 1889444SAndreas.Sandberg@ARM.com 1899444SAndreas.Sandberg@ARM.comtemplate<class Impl> 1909444SAndreas.Sandberg@ARM.comvoid 1919444SAndreas.Sandberg@ARM.comLSQUnit<Impl>::resetState() 1929444SAndreas.Sandberg@ARM.com{ 1939444SAndreas.Sandberg@ARM.com loads = stores = storesToWB = 0; 1948199SAli.Saidi@ARM.com 1952292SN/A loadHead = loadTail = 0; 1962292SN/A 1972292SN/A storeHead = storeWBIdx = storeTail = 0; 1982292SN/A 19911780Sarthur.perais@inria.fr usedStorePorts = 0; 2002292SN/A 2013492Sktlim@umich.edu retryPkt = NULL; 2022329SN/A memDepViolator = NULL; 2032292SN/A 2049444SAndreas.Sandberg@ARM.com stalled = false; 2059444SAndreas.Sandberg@ARM.com 2069814Sandreas.hansson@arm.com cacheBlockMask = ~(cpu->cacheLineSize() - 1); 2072292SN/A} 2082292SN/A 2092292SN/Atemplate<class Impl> 2102292SN/Astd::string 2112292SN/ALSQUnit<Impl>::name() const 2122292SN/A{ 2132292SN/A if (Impl::MaxThreads == 1) { 2142292SN/A return iewStage->name() + ".lsq"; 2152292SN/A } else { 21610386Sandreas.hansson@arm.com return iewStage->name() + ".lsq.thread" + std::to_string(lsqID); 2172292SN/A } 2182292SN/A} 2192292SN/A 2202292SN/Atemplate<class Impl> 2212292SN/Avoid 2222727Sktlim@umich.eduLSQUnit<Impl>::regStats() 2232727Sktlim@umich.edu{ 2242727Sktlim@umich.edu lsqForwLoads 2252727Sktlim@umich.edu .name(name() + ".forwLoads") 2262727Sktlim@umich.edu .desc("Number of loads that had data forwarded from stores"); 2272727Sktlim@umich.edu 2282727Sktlim@umich.edu invAddrLoads 2292727Sktlim@umich.edu .name(name() + ".invAddrLoads") 2302727Sktlim@umich.edu .desc("Number of loads ignored due to an invalid address"); 2312727Sktlim@umich.edu 2322727Sktlim@umich.edu lsqSquashedLoads 2332727Sktlim@umich.edu .name(name() + ".squashedLoads") 2342727Sktlim@umich.edu .desc("Number of loads squashed"); 2352727Sktlim@umich.edu 2362727Sktlim@umich.edu lsqIgnoredResponses 2372727Sktlim@umich.edu .name(name() + ".ignoredResponses") 2382727Sktlim@umich.edu .desc("Number of memory responses ignored because the instruction is squashed"); 2392727Sktlim@umich.edu 2402361SN/A lsqMemOrderViolation 2412361SN/A .name(name() + ".memOrderViolation") 2422361SN/A .desc("Number of memory ordering violations"); 2432361SN/A 2442727Sktlim@umich.edu lsqSquashedStores 2452727Sktlim@umich.edu .name(name() + ".squashedStores") 2462727Sktlim@umich.edu .desc("Number of stores squashed"); 2472727Sktlim@umich.edu 2482727Sktlim@umich.edu invAddrSwpfs 2492727Sktlim@umich.edu .name(name() + ".invAddrSwpfs") 2502727Sktlim@umich.edu .desc("Number of software prefetches ignored due to an invalid address"); 2512727Sktlim@umich.edu 2522727Sktlim@umich.edu lsqBlockedLoads 2532727Sktlim@umich.edu .name(name() + ".blockedLoads") 2542727Sktlim@umich.edu .desc("Number of blocked loads due to partial load-store forwarding"); 2552727Sktlim@umich.edu 2562727Sktlim@umich.edu lsqRescheduledLoads 2572727Sktlim@umich.edu .name(name() + ".rescheduledLoads") 2582727Sktlim@umich.edu .desc("Number of loads that were rescheduled"); 2592727Sktlim@umich.edu 2602727Sktlim@umich.edu lsqCacheBlocked 2612727Sktlim@umich.edu .name(name() + ".cacheBlocked") 2622727Sktlim@umich.edu .desc("Number of times an access to memory failed due to the cache being blocked"); 2632727Sktlim@umich.edu} 2642727Sktlim@umich.edu 2652727Sktlim@umich.edutemplate<class Impl> 2662727Sktlim@umich.eduvoid 2678922Swilliam.wang@arm.comLSQUnit<Impl>::setDcachePort(MasterPort *dcache_port) 2684329Sktlim@umich.edu{ 2694329Sktlim@umich.edu dcachePort = dcache_port; 2704329Sktlim@umich.edu} 2714329Sktlim@umich.edu 2724329Sktlim@umich.edutemplate<class Impl> 2734329Sktlim@umich.eduvoid 2742292SN/ALSQUnit<Impl>::clearLQ() 2752292SN/A{ 2762292SN/A loadQueue.clear(); 2772292SN/A} 2782292SN/A 2792292SN/Atemplate<class Impl> 2802292SN/Avoid 2812292SN/ALSQUnit<Impl>::clearSQ() 2822292SN/A{ 2832292SN/A storeQueue.clear(); 2842292SN/A} 2852292SN/A 2862292SN/Atemplate<class Impl> 2872292SN/Avoid 2889444SAndreas.Sandberg@ARM.comLSQUnit<Impl>::drainSanityCheck() const 2892307SN/A{ 2909444SAndreas.Sandberg@ARM.com for (int i = 0; i < loadQueue.size(); ++i) 2912367SN/A assert(!loadQueue[i]); 2922307SN/A 2932329SN/A assert(storesToWB == 0); 2949444SAndreas.Sandberg@ARM.com assert(!retryPkt); 2952307SN/A} 2962307SN/A 2972307SN/Atemplate<class Impl> 2982307SN/Avoid 2992307SN/ALSQUnit<Impl>::takeOverFrom() 3002307SN/A{ 3019444SAndreas.Sandberg@ARM.com resetState(); 3022307SN/A} 3032307SN/A 3042307SN/Atemplate<class Impl> 3052307SN/Avoid 3062292SN/ALSQUnit<Impl>::resizeLQ(unsigned size) 3072292SN/A{ 3082329SN/A unsigned size_plus_sentinel = size + 1; 3092329SN/A assert(size_plus_sentinel >= LQEntries); 3102292SN/A 3112329SN/A if (size_plus_sentinel > LQEntries) { 3122329SN/A while (size_plus_sentinel > loadQueue.size()) { 3132292SN/A DynInstPtr dummy; 3142292SN/A loadQueue.push_back(dummy); 3152292SN/A LQEntries++; 3162292SN/A } 3172292SN/A } else { 3182329SN/A LQEntries = size_plus_sentinel; 3192292SN/A } 3202292SN/A 3219936SFaissal.Sleiman@arm.com assert(LQEntries <= 256); 3222292SN/A} 3232292SN/A 3242292SN/Atemplate<class Impl> 3252292SN/Avoid 3262292SN/ALSQUnit<Impl>::resizeSQ(unsigned size) 3272292SN/A{ 3282329SN/A unsigned size_plus_sentinel = size + 1; 3292329SN/A if (size_plus_sentinel > SQEntries) { 3302329SN/A while (size_plus_sentinel > storeQueue.size()) { 3312292SN/A SQEntry dummy; 3322292SN/A storeQueue.push_back(dummy); 3332292SN/A SQEntries++; 3342292SN/A } 3352292SN/A } else { 3362329SN/A SQEntries = size_plus_sentinel; 3372292SN/A } 3389936SFaissal.Sleiman@arm.com 3399936SFaissal.Sleiman@arm.com assert(SQEntries <= 256); 3402292SN/A} 3412292SN/A 3422292SN/Atemplate <class Impl> 3432292SN/Avoid 3442292SN/ALSQUnit<Impl>::insert(DynInstPtr &inst) 3452292SN/A{ 3462292SN/A assert(inst->isMemRef()); 3472292SN/A 3482292SN/A assert(inst->isLoad() || inst->isStore()); 3492292SN/A 3502292SN/A if (inst->isLoad()) { 3512292SN/A insertLoad(inst); 3522292SN/A } else { 3532292SN/A insertStore(inst); 3542292SN/A } 3552292SN/A 3562292SN/A inst->setInLSQ(); 3572292SN/A} 3582292SN/A 3592292SN/Atemplate <class Impl> 3602292SN/Avoid 3612292SN/ALSQUnit<Impl>::insertLoad(DynInstPtr &load_inst) 3622292SN/A{ 3632329SN/A assert((loadTail + 1) % LQEntries != loadHead); 3642329SN/A assert(loads < LQEntries); 3652292SN/A 3667720Sgblack@eecs.umich.edu DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n", 3677720Sgblack@eecs.umich.edu load_inst->pcState(), loadTail, load_inst->seqNum); 3682292SN/A 3692292SN/A load_inst->lqIdx = loadTail; 3702292SN/A 3712292SN/A if (stores == 0) { 3722292SN/A load_inst->sqIdx = -1; 3732292SN/A } else { 3742292SN/A load_inst->sqIdx = storeTail; 3752292SN/A } 3762292SN/A 3772292SN/A loadQueue[loadTail] = load_inst; 3782292SN/A 3792292SN/A incrLdIdx(loadTail); 3802292SN/A 3812292SN/A ++loads; 3822292SN/A} 3832292SN/A 3842292SN/Atemplate <class Impl> 3852292SN/Avoid 3862292SN/ALSQUnit<Impl>::insertStore(DynInstPtr &store_inst) 3872292SN/A{ 3882292SN/A // Make sure it is not full before inserting an instruction. 3892292SN/A assert((storeTail + 1) % SQEntries != storeHead); 3902292SN/A assert(stores < SQEntries); 3912292SN/A 3927720Sgblack@eecs.umich.edu DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n", 3937720Sgblack@eecs.umich.edu store_inst->pcState(), storeTail, store_inst->seqNum); 3942292SN/A 3952292SN/A store_inst->sqIdx = storeTail; 3962292SN/A store_inst->lqIdx = loadTail; 3972292SN/A 3982292SN/A storeQueue[storeTail] = SQEntry(store_inst); 3992292SN/A 4002292SN/A incrStIdx(storeTail); 4012292SN/A 4022292SN/A ++stores; 4032292SN/A} 4042292SN/A 4052292SN/Atemplate <class Impl> 4062292SN/Atypename Impl::DynInstPtr 4072292SN/ALSQUnit<Impl>::getMemDepViolator() 4082292SN/A{ 4092292SN/A DynInstPtr temp = memDepViolator; 4102292SN/A 4112292SN/A memDepViolator = NULL; 4122292SN/A 4132292SN/A return temp; 4142292SN/A} 4152292SN/A 4162292SN/Atemplate <class Impl> 4172292SN/Aunsigned 41810239Sbinhpham@cs.rutgers.eduLSQUnit<Impl>::numFreeLoadEntries() 4192292SN/A{ 42010239Sbinhpham@cs.rutgers.edu //LQ has an extra dummy entry to differentiate 42110239Sbinhpham@cs.rutgers.edu //empty/full conditions. Subtract 1 from the free entries. 42210239Sbinhpham@cs.rutgers.edu DPRINTF(LSQUnit, "LQ size: %d, #loads occupied: %d\n", LQEntries, loads); 42310239Sbinhpham@cs.rutgers.edu return LQEntries - loads - 1; 42410239Sbinhpham@cs.rutgers.edu} 4252292SN/A 42610239Sbinhpham@cs.rutgers.edutemplate <class Impl> 42710239Sbinhpham@cs.rutgers.eduunsigned 42810239Sbinhpham@cs.rutgers.eduLSQUnit<Impl>::numFreeStoreEntries() 42910239Sbinhpham@cs.rutgers.edu{ 43010239Sbinhpham@cs.rutgers.edu //SQ has an extra dummy entry to differentiate 43110239Sbinhpham@cs.rutgers.edu //empty/full conditions. Subtract 1 from the free entries. 43210239Sbinhpham@cs.rutgers.edu DPRINTF(LSQUnit, "SQ size: %d, #stores occupied: %d\n", SQEntries, stores); 43310239Sbinhpham@cs.rutgers.edu return SQEntries - stores - 1; 43410239Sbinhpham@cs.rutgers.edu 43510239Sbinhpham@cs.rutgers.edu } 4362292SN/A 4372292SN/Atemplate <class Impl> 4388545Ssaidi@eecs.umich.eduvoid 4398545Ssaidi@eecs.umich.eduLSQUnit<Impl>::checkSnoop(PacketPtr pkt) 4408545Ssaidi@eecs.umich.edu{ 44111357Sstephan.diestelhorst@arm.com // Should only ever get invalidations in here 44211357Sstephan.diestelhorst@arm.com assert(pkt->isInvalidate()); 44311357Sstephan.diestelhorst@arm.com 4448545Ssaidi@eecs.umich.edu int load_idx = loadHead; 44510030SAli.Saidi@ARM.com DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr()); 4468545Ssaidi@eecs.umich.edu 44711356Skrinat01@arm.com // Only Invalidate packet calls checkSnoop 44811356Skrinat01@arm.com assert(pkt->isInvalidate()); 44910030SAli.Saidi@ARM.com for (int x = 0; x < cpu->numContexts(); x++) { 4509383SAli.Saidi@ARM.com ThreadContext *tc = cpu->getContext(x); 4519383SAli.Saidi@ARM.com bool no_squash = cpu->thread[x]->noSquashFromTC; 4529383SAli.Saidi@ARM.com cpu->thread[x]->noSquashFromTC = true; 4539383SAli.Saidi@ARM.com TheISA::handleLockedSnoop(tc, pkt, cacheBlockMask); 4549383SAli.Saidi@ARM.com cpu->thread[x]->noSquashFromTC = no_squash; 4559383SAli.Saidi@ARM.com } 4569383SAli.Saidi@ARM.com 45710030SAli.Saidi@ARM.com Addr invalidate_addr = pkt->getAddr() & cacheBlockMask; 45810030SAli.Saidi@ARM.com 45910030SAli.Saidi@ARM.com DynInstPtr ld_inst = loadQueue[load_idx]; 46010030SAli.Saidi@ARM.com if (ld_inst) { 46111097Songal@cs.wisc.edu Addr load_addr_low = ld_inst->physEffAddrLow & cacheBlockMask; 46211097Songal@cs.wisc.edu Addr load_addr_high = ld_inst->physEffAddrHigh & cacheBlockMask; 46311097Songal@cs.wisc.edu 46410030SAli.Saidi@ARM.com // Check that this snoop didn't just invalidate our lock flag 46511097Songal@cs.wisc.edu if (ld_inst->effAddrValid() && (load_addr_low == invalidate_addr 46611097Songal@cs.wisc.edu || load_addr_high == invalidate_addr) 46711097Songal@cs.wisc.edu && ld_inst->memReqFlags & Request::LLSC) 46810030SAli.Saidi@ARM.com TheISA::handleLockedSnoopHit(ld_inst.get()); 46910030SAli.Saidi@ARM.com } 47010030SAli.Saidi@ARM.com 4718545Ssaidi@eecs.umich.edu // If this is the only load in the LSQ we don't care 4728545Ssaidi@eecs.umich.edu if (load_idx == loadTail) 4738545Ssaidi@eecs.umich.edu return; 47410030SAli.Saidi@ARM.com 4758545Ssaidi@eecs.umich.edu incrLdIdx(load_idx); 4768545Ssaidi@eecs.umich.edu 47710149Smarco.elver@ed.ac.uk bool force_squash = false; 47810149Smarco.elver@ed.ac.uk 4798545Ssaidi@eecs.umich.edu while (load_idx != loadTail) { 4808545Ssaidi@eecs.umich.edu DynInstPtr ld_inst = loadQueue[load_idx]; 4818545Ssaidi@eecs.umich.edu 48210824SAndreas.Sandberg@ARM.com if (!ld_inst->effAddrValid() || ld_inst->strictlyOrdered()) { 4838545Ssaidi@eecs.umich.edu incrLdIdx(load_idx); 4848545Ssaidi@eecs.umich.edu continue; 4858545Ssaidi@eecs.umich.edu } 4868545Ssaidi@eecs.umich.edu 48711097Songal@cs.wisc.edu Addr load_addr_low = ld_inst->physEffAddrLow & cacheBlockMask; 48811097Songal@cs.wisc.edu Addr load_addr_high = ld_inst->physEffAddrHigh & cacheBlockMask; 48911097Songal@cs.wisc.edu 4908545Ssaidi@eecs.umich.edu DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n", 49111097Songal@cs.wisc.edu ld_inst->seqNum, load_addr_low, invalidate_addr); 4928545Ssaidi@eecs.umich.edu 49311097Songal@cs.wisc.edu if ((load_addr_low == invalidate_addr 49411097Songal@cs.wisc.edu || load_addr_high == invalidate_addr) || force_squash) { 49510149Smarco.elver@ed.ac.uk if (needsTSO) { 49610149Smarco.elver@ed.ac.uk // If we have a TSO system, as all loads must be ordered with 49710149Smarco.elver@ed.ac.uk // all other loads, this load as well as *all* subsequent loads 49810149Smarco.elver@ed.ac.uk // need to be squashed to prevent possible load reordering. 49910149Smarco.elver@ed.ac.uk force_squash = true; 50010149Smarco.elver@ed.ac.uk } 50110149Smarco.elver@ed.ac.uk if (ld_inst->possibleLoadViolation() || force_squash) { 5028545Ssaidi@eecs.umich.edu DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n", 50310030SAli.Saidi@ARM.com pkt->getAddr(), ld_inst->seqNum); 5048545Ssaidi@eecs.umich.edu 5058545Ssaidi@eecs.umich.edu // Mark the load for re-execution 50610474Sandreas.hansson@arm.com ld_inst->fault = std::make_shared<ReExec>(); 5078545Ssaidi@eecs.umich.edu } else { 50810030SAli.Saidi@ARM.com DPRINTF(LSQUnit, "HitExternal Snoop for addr %#x [sn:%lli]\n", 50910030SAli.Saidi@ARM.com pkt->getAddr(), ld_inst->seqNum); 51010030SAli.Saidi@ARM.com 51110030SAli.Saidi@ARM.com // Make sure that we don't lose a snoop hitting a LOCKED 51210030SAli.Saidi@ARM.com // address since the LOCK* flags don't get updated until 51310030SAli.Saidi@ARM.com // commit. 51410030SAli.Saidi@ARM.com if (ld_inst->memReqFlags & Request::LLSC) 51510030SAli.Saidi@ARM.com TheISA::handleLockedSnoopHit(ld_inst.get()); 51610030SAli.Saidi@ARM.com 5178545Ssaidi@eecs.umich.edu // If a older load checks this and it's true 5188545Ssaidi@eecs.umich.edu // then we might have missed the snoop 5198545Ssaidi@eecs.umich.edu // in which case we need to invalidate to be sure 5209046SAli.Saidi@ARM.com ld_inst->hitExternalSnoop(true); 5218545Ssaidi@eecs.umich.edu } 5228545Ssaidi@eecs.umich.edu } 5238545Ssaidi@eecs.umich.edu incrLdIdx(load_idx); 5248545Ssaidi@eecs.umich.edu } 5258545Ssaidi@eecs.umich.edu return; 5268545Ssaidi@eecs.umich.edu} 5278545Ssaidi@eecs.umich.edu 5288545Ssaidi@eecs.umich.edutemplate <class Impl> 5292292SN/AFault 5308199SAli.Saidi@ARM.comLSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst) 5318199SAli.Saidi@ARM.com{ 5328199SAli.Saidi@ARM.com Addr inst_eff_addr1 = inst->effAddr >> depCheckShift; 5338199SAli.Saidi@ARM.com Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift; 5348199SAli.Saidi@ARM.com 5358199SAli.Saidi@ARM.com /** @todo in theory you only need to check an instruction that has executed 5368199SAli.Saidi@ARM.com * however, there isn't a good way in the pipeline at the moment to check 5378199SAli.Saidi@ARM.com * all instructions that will execute before the store writes back. Thus, 5388199SAli.Saidi@ARM.com * like the implementation that came before it, we're overly conservative. 5398199SAli.Saidi@ARM.com */ 5408199SAli.Saidi@ARM.com while (load_idx != loadTail) { 5418199SAli.Saidi@ARM.com DynInstPtr ld_inst = loadQueue[load_idx]; 54210824SAndreas.Sandberg@ARM.com if (!ld_inst->effAddrValid() || ld_inst->strictlyOrdered()) { 5438199SAli.Saidi@ARM.com incrLdIdx(load_idx); 5448199SAli.Saidi@ARM.com continue; 5458199SAli.Saidi@ARM.com } 5468199SAli.Saidi@ARM.com 5478199SAli.Saidi@ARM.com Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift; 5488199SAli.Saidi@ARM.com Addr ld_eff_addr2 = 5498199SAli.Saidi@ARM.com (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift; 5508199SAli.Saidi@ARM.com 5518272SAli.Saidi@ARM.com if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) { 5528545Ssaidi@eecs.umich.edu if (inst->isLoad()) { 5538545Ssaidi@eecs.umich.edu // If this load is to the same block as an external snoop 5548545Ssaidi@eecs.umich.edu // invalidate that we've observed then the load needs to be 5558545Ssaidi@eecs.umich.edu // squashed as it could have newer data 5569046SAli.Saidi@ARM.com if (ld_inst->hitExternalSnoop()) { 5578545Ssaidi@eecs.umich.edu if (!memDepViolator || 5588545Ssaidi@eecs.umich.edu ld_inst->seqNum < memDepViolator->seqNum) { 5598545Ssaidi@eecs.umich.edu DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] " 5608592Sgblack@eecs.umich.edu "and [sn:%lli] at address %#x\n", 5618592Sgblack@eecs.umich.edu inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 5628545Ssaidi@eecs.umich.edu memDepViolator = ld_inst; 5638199SAli.Saidi@ARM.com 5648545Ssaidi@eecs.umich.edu ++lsqMemOrderViolation; 5658199SAli.Saidi@ARM.com 56610474Sandreas.hansson@arm.com return std::make_shared<GenericISA::M5PanicFault>( 56710474Sandreas.hansson@arm.com "Detected fault with inst [sn:%lli] and " 56810474Sandreas.hansson@arm.com "[sn:%lli] at address %#x\n", 56910474Sandreas.hansson@arm.com inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 5708545Ssaidi@eecs.umich.edu } 5718545Ssaidi@eecs.umich.edu } 5728199SAli.Saidi@ARM.com 5738545Ssaidi@eecs.umich.edu // Otherwise, mark the load has a possible load violation 5748545Ssaidi@eecs.umich.edu // and if we see a snoop before it's commited, we need to squash 5759046SAli.Saidi@ARM.com ld_inst->possibleLoadViolation(true); 57610575SMarco.Elver@ARM.com DPRINTF(LSQUnit, "Found possible load violation at addr: %#x" 5778545Ssaidi@eecs.umich.edu " between instructions [sn:%lli] and [sn:%lli]\n", 5788545Ssaidi@eecs.umich.edu inst_eff_addr1, inst->seqNum, ld_inst->seqNum); 5798545Ssaidi@eecs.umich.edu } else { 5808545Ssaidi@eecs.umich.edu // A load/store incorrectly passed this store. 5818545Ssaidi@eecs.umich.edu // Check if we already have a violator, or if it's newer 5828545Ssaidi@eecs.umich.edu // squash and refetch. 5838545Ssaidi@eecs.umich.edu if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum) 5848545Ssaidi@eecs.umich.edu break; 5858545Ssaidi@eecs.umich.edu 5868592Sgblack@eecs.umich.edu DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and " 5878592Sgblack@eecs.umich.edu "[sn:%lli] at address %#x\n", 5888592Sgblack@eecs.umich.edu inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 5898545Ssaidi@eecs.umich.edu memDepViolator = ld_inst; 5908545Ssaidi@eecs.umich.edu 5918545Ssaidi@eecs.umich.edu ++lsqMemOrderViolation; 5928545Ssaidi@eecs.umich.edu 59310474Sandreas.hansson@arm.com return std::make_shared<GenericISA::M5PanicFault>( 59410474Sandreas.hansson@arm.com "Detected fault with " 59510474Sandreas.hansson@arm.com "inst [sn:%lli] and [sn:%lli] at address %#x\n", 59610474Sandreas.hansson@arm.com inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 5978545Ssaidi@eecs.umich.edu } 5988199SAli.Saidi@ARM.com } 5998199SAli.Saidi@ARM.com 6008199SAli.Saidi@ARM.com incrLdIdx(load_idx); 6018199SAli.Saidi@ARM.com } 6028199SAli.Saidi@ARM.com return NoFault; 6038199SAli.Saidi@ARM.com} 6048199SAli.Saidi@ARM.com 6058199SAli.Saidi@ARM.com 6068199SAli.Saidi@ARM.com 6078199SAli.Saidi@ARM.com 6088199SAli.Saidi@ARM.comtemplate <class Impl> 6098199SAli.Saidi@ARM.comFault 6102292SN/ALSQUnit<Impl>::executeLoad(DynInstPtr &inst) 6112292SN/A{ 6124032Sktlim@umich.edu using namespace TheISA; 6132292SN/A // Execute a specific load. 6142292SN/A Fault load_fault = NoFault; 6152292SN/A 6167720Sgblack@eecs.umich.edu DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n", 6177944SGiacomo.Gabrielli@arm.com inst->pcState(), inst->seqNum); 6182292SN/A 6194032Sktlim@umich.edu assert(!inst->isSquashed()); 6204032Sktlim@umich.edu 6212669Sktlim@umich.edu load_fault = inst->initiateAcc(); 6222292SN/A 6237944SGiacomo.Gabrielli@arm.com if (inst->isTranslationDelayed() && 6247944SGiacomo.Gabrielli@arm.com load_fault == NoFault) 6257944SGiacomo.Gabrielli@arm.com return load_fault; 6267944SGiacomo.Gabrielli@arm.com 6277597Sminkyu.jeong@arm.com // If the instruction faulted or predicated false, then we need to send it 6287597Sminkyu.jeong@arm.com // along to commit without the instruction completing. 62910231Ssteve.reinhardt@amd.com if (load_fault != NoFault || !inst->readPredicate()) { 6302329SN/A // Send this instruction to commit, also make sure iew stage 63110824SAndreas.Sandberg@ARM.com // realizes there is activity. Mark it as executed unless it 63210824SAndreas.Sandberg@ARM.com // is a strictly ordered load that needs to hit the head of 63310824SAndreas.Sandberg@ARM.com // commit. 63410231Ssteve.reinhardt@amd.com if (!inst->readPredicate()) 6357848SAli.Saidi@ARM.com inst->forwardOldRegs(); 6367600Sminkyu.jeong@arm.com DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n", 6377600Sminkyu.jeong@arm.com inst->seqNum, 6387600Sminkyu.jeong@arm.com (load_fault != NoFault ? "fault" : "predication")); 63910824SAndreas.Sandberg@ARM.com if (!(inst->hasRequest() && inst->strictlyOrdered()) || 6403731Sktlim@umich.edu inst->isAtCommit()) { 6412367SN/A inst->setExecuted(); 6422367SN/A } 6432292SN/A iewStage->instToCommit(inst); 6442292SN/A iewStage->activityThisCycle(); 64510333Smitch.hayenga@arm.com } else { 6469046SAli.Saidi@ARM.com assert(inst->effAddrValid()); 6474032Sktlim@umich.edu int load_idx = inst->lqIdx; 6484032Sktlim@umich.edu incrLdIdx(load_idx); 6494032Sktlim@umich.edu 6508199SAli.Saidi@ARM.com if (checkLoads) 6518199SAli.Saidi@ARM.com return checkViolations(load_idx, inst); 6522292SN/A } 6532292SN/A 6542292SN/A return load_fault; 6552292SN/A} 6562292SN/A 6572292SN/Atemplate <class Impl> 6582292SN/AFault 6592292SN/ALSQUnit<Impl>::executeStore(DynInstPtr &store_inst) 6602292SN/A{ 6612292SN/A using namespace TheISA; 6622292SN/A // Make sure that a store exists. 6632292SN/A assert(stores != 0); 6642292SN/A 6652292SN/A int store_idx = store_inst->sqIdx; 6662292SN/A 6677720Sgblack@eecs.umich.edu DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n", 6687720Sgblack@eecs.umich.edu store_inst->pcState(), store_inst->seqNum); 6692292SN/A 6704032Sktlim@umich.edu assert(!store_inst->isSquashed()); 6714032Sktlim@umich.edu 6722292SN/A // Check the recently completed loads to see if any match this store's 6732292SN/A // address. If so, then we have a memory ordering violation. 6742292SN/A int load_idx = store_inst->lqIdx; 6752292SN/A 6762292SN/A Fault store_fault = store_inst->initiateAcc(); 6772292SN/A 6787944SGiacomo.Gabrielli@arm.com if (store_inst->isTranslationDelayed() && 6797944SGiacomo.Gabrielli@arm.com store_fault == NoFault) 6807944SGiacomo.Gabrielli@arm.com return store_fault; 6817944SGiacomo.Gabrielli@arm.com 68210231Ssteve.reinhardt@amd.com if (!store_inst->readPredicate()) 6837848SAli.Saidi@ARM.com store_inst->forwardOldRegs(); 6847848SAli.Saidi@ARM.com 6852329SN/A if (storeQueue[store_idx].size == 0) { 6867782Sminkyu.jeong@arm.com DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n", 6877720Sgblack@eecs.umich.edu store_inst->pcState(), store_inst->seqNum); 6882292SN/A 6892292SN/A return store_fault; 69010231Ssteve.reinhardt@amd.com } else if (!store_inst->readPredicate()) { 6917782Sminkyu.jeong@arm.com DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n", 6927782Sminkyu.jeong@arm.com store_inst->seqNum); 6937782Sminkyu.jeong@arm.com return store_fault; 6942292SN/A } 6952292SN/A 6962292SN/A assert(store_fault == NoFault); 6972292SN/A 6982336SN/A if (store_inst->isStoreConditional()) { 6992336SN/A // Store conditionals need to set themselves as able to 7002336SN/A // writeback if we haven't had a fault by here. 7012329SN/A storeQueue[store_idx].canWB = true; 7022292SN/A 7032329SN/A ++storesToWB; 7042292SN/A } 7052292SN/A 7068199SAli.Saidi@ARM.com return checkViolations(load_idx, store_inst); 7072292SN/A 7082292SN/A} 7092292SN/A 7102292SN/Atemplate <class Impl> 7112292SN/Avoid 7122292SN/ALSQUnit<Impl>::commitLoad() 7132292SN/A{ 7142292SN/A assert(loadQueue[loadHead]); 7152292SN/A 7167720Sgblack@eecs.umich.edu DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n", 7177720Sgblack@eecs.umich.edu loadQueue[loadHead]->pcState()); 7182292SN/A 7192292SN/A loadQueue[loadHead] = NULL; 7202292SN/A 7212292SN/A incrLdIdx(loadHead); 7222292SN/A 7232292SN/A --loads; 7242292SN/A} 7252292SN/A 7262292SN/Atemplate <class Impl> 7272292SN/Avoid 7282292SN/ALSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst) 7292292SN/A{ 7302292SN/A assert(loads == 0 || loadQueue[loadHead]); 7312292SN/A 7322292SN/A while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) { 7332292SN/A commitLoad(); 7342292SN/A } 7352292SN/A} 7362292SN/A 7372292SN/Atemplate <class Impl> 7382292SN/Avoid 7392292SN/ALSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst) 7402292SN/A{ 7412292SN/A assert(stores == 0 || storeQueue[storeHead].inst); 7422292SN/A 7432292SN/A int store_idx = storeHead; 7442292SN/A 7452292SN/A while (store_idx != storeTail) { 7462292SN/A assert(storeQueue[store_idx].inst); 7472329SN/A // Mark any stores that are now committed and have not yet 7482329SN/A // been marked as able to write back. 7492292SN/A if (!storeQueue[store_idx].canWB) { 7502292SN/A if (storeQueue[store_idx].inst->seqNum > youngest_inst) { 7512292SN/A break; 7522292SN/A } 7532292SN/A DPRINTF(LSQUnit, "Marking store as able to write back, PC " 7547720Sgblack@eecs.umich.edu "%s [sn:%lli]\n", 7557720Sgblack@eecs.umich.edu storeQueue[store_idx].inst->pcState(), 7562292SN/A storeQueue[store_idx].inst->seqNum); 7572292SN/A 7582292SN/A storeQueue[store_idx].canWB = true; 7592292SN/A 7602292SN/A ++storesToWB; 7612292SN/A } 7622292SN/A 7632292SN/A incrStIdx(store_idx); 7642292SN/A } 7652292SN/A} 7662292SN/A 7672292SN/Atemplate <class Impl> 7682292SN/Avoid 7696974Stjones1@inf.ed.ac.ukLSQUnit<Impl>::writebackPendingStore() 7706974Stjones1@inf.ed.ac.uk{ 7716974Stjones1@inf.ed.ac.uk if (hasPendingPkt) { 7726974Stjones1@inf.ed.ac.uk assert(pendingPkt != NULL); 7736974Stjones1@inf.ed.ac.uk 7746974Stjones1@inf.ed.ac.uk // If the cache is blocked, this will store the packet for retry. 7756974Stjones1@inf.ed.ac.uk if (sendStore(pendingPkt)) { 7766974Stjones1@inf.ed.ac.uk storePostSend(pendingPkt); 7776974Stjones1@inf.ed.ac.uk } 7786974Stjones1@inf.ed.ac.uk pendingPkt = NULL; 7796974Stjones1@inf.ed.ac.uk hasPendingPkt = false; 7806974Stjones1@inf.ed.ac.uk } 7816974Stjones1@inf.ed.ac.uk} 7826974Stjones1@inf.ed.ac.uk 7836974Stjones1@inf.ed.ac.uktemplate <class Impl> 7846974Stjones1@inf.ed.ac.ukvoid 7852292SN/ALSQUnit<Impl>::writebackStores() 7862292SN/A{ 7876974Stjones1@inf.ed.ac.uk // First writeback the second packet from any split store that didn't 7886974Stjones1@inf.ed.ac.uk // complete last cycle because there weren't enough cache ports available. 7896974Stjones1@inf.ed.ac.uk if (TheISA::HasUnalignedMemAcc) { 7906974Stjones1@inf.ed.ac.uk writebackPendingStore(); 7916974Stjones1@inf.ed.ac.uk } 7926974Stjones1@inf.ed.ac.uk 7932292SN/A while (storesToWB > 0 && 7942292SN/A storeWBIdx != storeTail && 7952292SN/A storeQueue[storeWBIdx].inst && 7962292SN/A storeQueue[storeWBIdx].canWB && 7978727Snilay@cs.wisc.edu ((!needsTSO) || (!storeInFlight)) && 79811780Sarthur.perais@inria.fr usedStorePorts < cacheStorePorts) { 7992292SN/A 80010333Smitch.hayenga@arm.com if (isStoreBlocked) { 8012678Sktlim@umich.edu DPRINTF(LSQUnit, "Unable to write back any more stores, cache" 8022678Sktlim@umich.edu " is blocked!\n"); 8032678Sktlim@umich.edu break; 8042678Sktlim@umich.edu } 8052678Sktlim@umich.edu 8062329SN/A // Store didn't write any data so no need to write it back to 8072329SN/A // memory. 8082292SN/A if (storeQueue[storeWBIdx].size == 0) { 8092292SN/A completeStore(storeWBIdx); 8102292SN/A 8112292SN/A incrStIdx(storeWBIdx); 8122292SN/A 8132292SN/A continue; 8142292SN/A } 8152678Sktlim@umich.edu 81611780Sarthur.perais@inria.fr ++usedStorePorts; 8172292SN/A 8182292SN/A if (storeQueue[storeWBIdx].inst->isDataPrefetch()) { 8192292SN/A incrStIdx(storeWBIdx); 8202292SN/A 8212292SN/A continue; 8222292SN/A } 8232292SN/A 8242292SN/A assert(storeQueue[storeWBIdx].req); 8252292SN/A assert(!storeQueue[storeWBIdx].committed); 8262292SN/A 8276974Stjones1@inf.ed.ac.uk if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) { 8286974Stjones1@inf.ed.ac.uk assert(storeQueue[storeWBIdx].sreqLow); 8296974Stjones1@inf.ed.ac.uk assert(storeQueue[storeWBIdx].sreqHigh); 8306974Stjones1@inf.ed.ac.uk } 8316974Stjones1@inf.ed.ac.uk 8322669Sktlim@umich.edu DynInstPtr inst = storeQueue[storeWBIdx].inst; 8332669Sktlim@umich.edu 8342669Sktlim@umich.edu Request *req = storeQueue[storeWBIdx].req; 8358481Sgblack@eecs.umich.edu RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow; 8368481Sgblack@eecs.umich.edu RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh; 8378481Sgblack@eecs.umich.edu 8382292SN/A storeQueue[storeWBIdx].committed = true; 8392292SN/A 8402669Sktlim@umich.edu assert(!inst->memData); 84110031SAli.Saidi@ARM.com inst->memData = new uint8_t[req->getSize()]; 8423772Sgblack@eecs.umich.edu 84310031SAli.Saidi@ARM.com if (storeQueue[storeWBIdx].isAllZeros) 84410031SAli.Saidi@ARM.com memset(inst->memData, 0, req->getSize()); 84510031SAli.Saidi@ARM.com else 84610031SAli.Saidi@ARM.com memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize()); 8472669Sktlim@umich.edu 8486974Stjones1@inf.ed.ac.uk PacketPtr data_pkt; 8496974Stjones1@inf.ed.ac.uk PacketPtr snd_data_pkt = NULL; 8502292SN/A 8512678Sktlim@umich.edu LSQSenderState *state = new LSQSenderState; 8522678Sktlim@umich.edu state->isLoad = false; 8532678Sktlim@umich.edu state->idx = storeWBIdx; 8542678Sktlim@umich.edu state->inst = inst; 8556974Stjones1@inf.ed.ac.uk 8566974Stjones1@inf.ed.ac.uk if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) { 8576974Stjones1@inf.ed.ac.uk 8586974Stjones1@inf.ed.ac.uk // Build a single data packet if the store isn't split. 85910342SCurtis.Dunham@arm.com data_pkt = Packet::createWrite(req); 8606974Stjones1@inf.ed.ac.uk data_pkt->dataStatic(inst->memData); 8616974Stjones1@inf.ed.ac.uk data_pkt->senderState = state; 8626974Stjones1@inf.ed.ac.uk } else { 8636974Stjones1@inf.ed.ac.uk // Create two packets if the store is split in two. 86410342SCurtis.Dunham@arm.com data_pkt = Packet::createWrite(sreqLow); 86510342SCurtis.Dunham@arm.com snd_data_pkt = Packet::createWrite(sreqHigh); 8666974Stjones1@inf.ed.ac.uk 8676974Stjones1@inf.ed.ac.uk data_pkt->dataStatic(inst->memData); 8686974Stjones1@inf.ed.ac.uk snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize()); 8696974Stjones1@inf.ed.ac.uk 8706974Stjones1@inf.ed.ac.uk data_pkt->senderState = state; 8716974Stjones1@inf.ed.ac.uk snd_data_pkt->senderState = state; 8726974Stjones1@inf.ed.ac.uk 8736974Stjones1@inf.ed.ac.uk state->isSplit = true; 8746974Stjones1@inf.ed.ac.uk state->outstanding = 2; 8756974Stjones1@inf.ed.ac.uk 8766974Stjones1@inf.ed.ac.uk // Can delete the main request now. 8776974Stjones1@inf.ed.ac.uk delete req; 8786974Stjones1@inf.ed.ac.uk req = sreqLow; 8796974Stjones1@inf.ed.ac.uk } 8802678Sktlim@umich.edu 8817720Sgblack@eecs.umich.edu DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s " 8822292SN/A "to Addr:%#x, data:%#x [sn:%lli]\n", 8837720Sgblack@eecs.umich.edu storeWBIdx, inst->pcState(), 8843797Sgblack@eecs.umich.edu req->getPaddr(), (int)*(inst->memData), 8853221Sktlim@umich.edu inst->seqNum); 8862292SN/A 8872693Sktlim@umich.edu // @todo: Remove this SC hack once the memory system handles it. 8884350Sgblack@eecs.umich.edu if (inst->isStoreConditional()) { 8896974Stjones1@inf.ed.ac.uk assert(!storeQueue[storeWBIdx].isSplit); 8903326Sktlim@umich.edu // Disable recording the result temporarily. Writing to 8913326Sktlim@umich.edu // misc regs normally updates the result, but this is not 8923326Sktlim@umich.edu // the desired behavior when handling store conditionals. 8939046SAli.Saidi@ARM.com inst->recordResult(false); 89410030SAli.Saidi@ARM.com bool success = TheISA::handleLockedWrite(inst.get(), req, cacheBlockMask); 8959046SAli.Saidi@ARM.com inst->recordResult(true); 8963326Sktlim@umich.edu 8973326Sktlim@umich.edu if (!success) { 8983326Sktlim@umich.edu // Instantly complete this store. 8993326Sktlim@umich.edu DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. " 9003326Sktlim@umich.edu "Instantly completing it.\n", 9013326Sktlim@umich.edu inst->seqNum); 9023326Sktlim@umich.edu WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this); 9037823Ssteve.reinhardt@amd.com cpu->schedule(wb, curTick() + 1); 9043326Sktlim@umich.edu completeStore(storeWBIdx); 9053326Sktlim@umich.edu incrStIdx(storeWBIdx); 9063326Sktlim@umich.edu continue; 9072693Sktlim@umich.edu } 9082693Sktlim@umich.edu } else { 9092693Sktlim@umich.edu // Non-store conditionals do not need a writeback. 9102693Sktlim@umich.edu state->noWB = true; 9112693Sktlim@umich.edu } 9122693Sktlim@umich.edu 9138481Sgblack@eecs.umich.edu bool split = 9148481Sgblack@eecs.umich.edu TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit; 9158481Sgblack@eecs.umich.edu 9168481Sgblack@eecs.umich.edu ThreadContext *thread = cpu->tcBase(lsqID); 9178481Sgblack@eecs.umich.edu 9188481Sgblack@eecs.umich.edu if (req->isMmappedIpr()) { 9198481Sgblack@eecs.umich.edu assert(!inst->isStoreConditional()); 9208481Sgblack@eecs.umich.edu TheISA::handleIprWrite(thread, data_pkt); 9218481Sgblack@eecs.umich.edu delete data_pkt; 9228481Sgblack@eecs.umich.edu if (split) { 9238481Sgblack@eecs.umich.edu assert(snd_data_pkt->req->isMmappedIpr()); 9248481Sgblack@eecs.umich.edu TheISA::handleIprWrite(thread, snd_data_pkt); 9258481Sgblack@eecs.umich.edu delete snd_data_pkt; 9268481Sgblack@eecs.umich.edu delete sreqLow; 9278481Sgblack@eecs.umich.edu delete sreqHigh; 9288481Sgblack@eecs.umich.edu } 9298481Sgblack@eecs.umich.edu delete state; 9308481Sgblack@eecs.umich.edu delete req; 9318481Sgblack@eecs.umich.edu completeStore(storeWBIdx); 9328481Sgblack@eecs.umich.edu incrStIdx(storeWBIdx); 9338481Sgblack@eecs.umich.edu } else if (!sendStore(data_pkt)) { 9344032Sktlim@umich.edu DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will" 9353221Sktlim@umich.edu "retry later\n", 9363221Sktlim@umich.edu inst->seqNum); 9376974Stjones1@inf.ed.ac.uk 9386974Stjones1@inf.ed.ac.uk // Need to store the second packet, if split. 9398481Sgblack@eecs.umich.edu if (split) { 9406974Stjones1@inf.ed.ac.uk state->pktToSend = true; 9416974Stjones1@inf.ed.ac.uk state->pendingPacket = snd_data_pkt; 9426974Stjones1@inf.ed.ac.uk } 9432669Sktlim@umich.edu } else { 9446974Stjones1@inf.ed.ac.uk 9456974Stjones1@inf.ed.ac.uk // If split, try to send the second packet too 9468481Sgblack@eecs.umich.edu if (split) { 9476974Stjones1@inf.ed.ac.uk assert(snd_data_pkt); 9486974Stjones1@inf.ed.ac.uk 9496974Stjones1@inf.ed.ac.uk // Ensure there are enough ports to use. 95011780Sarthur.perais@inria.fr if (usedStorePorts < cacheStorePorts) { 95111780Sarthur.perais@inria.fr ++usedStorePorts; 9526974Stjones1@inf.ed.ac.uk if (sendStore(snd_data_pkt)) { 9536974Stjones1@inf.ed.ac.uk storePostSend(snd_data_pkt); 9546974Stjones1@inf.ed.ac.uk } else { 9556974Stjones1@inf.ed.ac.uk DPRINTF(IEW, "D-Cache became blocked when writing" 9566974Stjones1@inf.ed.ac.uk " [sn:%lli] second packet, will retry later\n", 9576974Stjones1@inf.ed.ac.uk inst->seqNum); 9586974Stjones1@inf.ed.ac.uk } 9596974Stjones1@inf.ed.ac.uk } else { 9606974Stjones1@inf.ed.ac.uk 9616974Stjones1@inf.ed.ac.uk // Store the packet for when there's free ports. 9626974Stjones1@inf.ed.ac.uk assert(pendingPkt == NULL); 9636974Stjones1@inf.ed.ac.uk pendingPkt = snd_data_pkt; 9646974Stjones1@inf.ed.ac.uk hasPendingPkt = true; 9656974Stjones1@inf.ed.ac.uk } 9666974Stjones1@inf.ed.ac.uk } else { 9676974Stjones1@inf.ed.ac.uk 9686974Stjones1@inf.ed.ac.uk // Not a split store. 9696974Stjones1@inf.ed.ac.uk storePostSend(data_pkt); 9706974Stjones1@inf.ed.ac.uk } 9712292SN/A } 9722292SN/A } 9732292SN/A 9742292SN/A // Not sure this should set it to 0. 97511780Sarthur.perais@inria.fr usedStorePorts = 0; 9762292SN/A 9772292SN/A assert(stores >= 0 && storesToWB >= 0); 9782292SN/A} 9792292SN/A 9802292SN/A/*template <class Impl> 9812292SN/Avoid 9822292SN/ALSQUnit<Impl>::removeMSHR(InstSeqNum seqNum) 9832292SN/A{ 9842292SN/A list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(), 9852292SN/A mshrSeqNums.end(), 9862292SN/A seqNum); 9872292SN/A 9882292SN/A if (mshr_it != mshrSeqNums.end()) { 9892292SN/A mshrSeqNums.erase(mshr_it); 9902292SN/A DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size()); 9912292SN/A } 9922292SN/A}*/ 9932292SN/A 9942292SN/Atemplate <class Impl> 9952292SN/Avoid 9962292SN/ALSQUnit<Impl>::squash(const InstSeqNum &squashed_num) 9972292SN/A{ 9982292SN/A DPRINTF(LSQUnit, "Squashing until [sn:%lli]!" 9992329SN/A "(Loads:%i Stores:%i)\n", squashed_num, loads, stores); 10002292SN/A 10012292SN/A int load_idx = loadTail; 10022292SN/A decrLdIdx(load_idx); 10032292SN/A 10042292SN/A while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) { 10057720Sgblack@eecs.umich.edu DPRINTF(LSQUnit,"Load Instruction PC %s squashed, " 10062292SN/A "[sn:%lli]\n", 10077720Sgblack@eecs.umich.edu loadQueue[load_idx]->pcState(), 10082292SN/A loadQueue[load_idx]->seqNum); 10092292SN/A 10102292SN/A if (isStalled() && load_idx == stallingLoadIdx) { 10112292SN/A stalled = false; 10122292SN/A stallingStoreIsn = 0; 10132292SN/A stallingLoadIdx = 0; 10142292SN/A } 10152292SN/A 10162329SN/A // Clear the smart pointer to make sure it is decremented. 10172731Sktlim@umich.edu loadQueue[load_idx]->setSquashed(); 10182292SN/A loadQueue[load_idx] = NULL; 10192292SN/A --loads; 10202292SN/A 10212292SN/A // Inefficient! 10222292SN/A loadTail = load_idx; 10232292SN/A 10242292SN/A decrLdIdx(load_idx); 10252727Sktlim@umich.edu ++lsqSquashedLoads; 10262292SN/A } 10272292SN/A 10284032Sktlim@umich.edu if (memDepViolator && squashed_num < memDepViolator->seqNum) { 10294032Sktlim@umich.edu memDepViolator = NULL; 10304032Sktlim@umich.edu } 10314032Sktlim@umich.edu 10322292SN/A int store_idx = storeTail; 10332292SN/A decrStIdx(store_idx); 10342292SN/A 10352292SN/A while (stores != 0 && 10362292SN/A storeQueue[store_idx].inst->seqNum > squashed_num) { 10372329SN/A // Instructions marked as can WB are already committed. 10382292SN/A if (storeQueue[store_idx].canWB) { 10392292SN/A break; 10402292SN/A } 10412292SN/A 10427720Sgblack@eecs.umich.edu DPRINTF(LSQUnit,"Store Instruction PC %s squashed, " 10432292SN/A "idx:%i [sn:%lli]\n", 10447720Sgblack@eecs.umich.edu storeQueue[store_idx].inst->pcState(), 10452292SN/A store_idx, storeQueue[store_idx].inst->seqNum); 10462292SN/A 10472329SN/A // I don't think this can happen. It should have been cleared 10482329SN/A // by the stalling load. 10492292SN/A if (isStalled() && 10502292SN/A storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 10512292SN/A panic("Is stalled should have been cleared by stalling load!\n"); 10522292SN/A stalled = false; 10532292SN/A stallingStoreIsn = 0; 10542292SN/A } 10552292SN/A 10562329SN/A // Clear the smart pointer to make sure it is decremented. 10572731Sktlim@umich.edu storeQueue[store_idx].inst->setSquashed(); 10582292SN/A storeQueue[store_idx].inst = NULL; 10592292SN/A storeQueue[store_idx].canWB = 0; 10602292SN/A 10614032Sktlim@umich.edu // Must delete request now that it wasn't handed off to 10624032Sktlim@umich.edu // memory. This is quite ugly. @todo: Figure out the proper 10634032Sktlim@umich.edu // place to really handle request deletes. 10644032Sktlim@umich.edu delete storeQueue[store_idx].req; 10656974Stjones1@inf.ed.ac.uk if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) { 10666974Stjones1@inf.ed.ac.uk delete storeQueue[store_idx].sreqLow; 10676974Stjones1@inf.ed.ac.uk delete storeQueue[store_idx].sreqHigh; 10686974Stjones1@inf.ed.ac.uk 10696974Stjones1@inf.ed.ac.uk storeQueue[store_idx].sreqLow = NULL; 10706974Stjones1@inf.ed.ac.uk storeQueue[store_idx].sreqHigh = NULL; 10716974Stjones1@inf.ed.ac.uk } 10724032Sktlim@umich.edu 10732292SN/A storeQueue[store_idx].req = NULL; 10742292SN/A --stores; 10752292SN/A 10762292SN/A // Inefficient! 10772292SN/A storeTail = store_idx; 10782292SN/A 10792292SN/A decrStIdx(store_idx); 10802727Sktlim@umich.edu ++lsqSquashedStores; 10812292SN/A } 10822292SN/A} 10832292SN/A 10842292SN/Atemplate <class Impl> 10852292SN/Avoid 10863349Sbinkertn@umich.eduLSQUnit<Impl>::storePostSend(PacketPtr pkt) 10872693Sktlim@umich.edu{ 10882693Sktlim@umich.edu if (isStalled() && 10892693Sktlim@umich.edu storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) { 10902693Sktlim@umich.edu DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 10912693Sktlim@umich.edu "load idx:%i\n", 10922693Sktlim@umich.edu stallingStoreIsn, stallingLoadIdx); 10932693Sktlim@umich.edu stalled = false; 10942693Sktlim@umich.edu stallingStoreIsn = 0; 10952693Sktlim@umich.edu iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 10962693Sktlim@umich.edu } 10972693Sktlim@umich.edu 10982693Sktlim@umich.edu if (!storeQueue[storeWBIdx].inst->isStoreConditional()) { 10992693Sktlim@umich.edu // The store is basically completed at this time. This 11002693Sktlim@umich.edu // only works so long as the checker doesn't try to 11012693Sktlim@umich.edu // verify the value in memory for stores. 11022693Sktlim@umich.edu storeQueue[storeWBIdx].inst->setCompleted(); 11038887Sgeoffrey.blake@arm.com 11042693Sktlim@umich.edu if (cpu->checker) { 11052732Sktlim@umich.edu cpu->checker->verify(storeQueue[storeWBIdx].inst); 11062693Sktlim@umich.edu } 11072693Sktlim@umich.edu } 11082693Sktlim@umich.edu 11098727Snilay@cs.wisc.edu if (needsTSO) { 11108727Snilay@cs.wisc.edu storeInFlight = true; 11118727Snilay@cs.wisc.edu } 11128727Snilay@cs.wisc.edu 11132693Sktlim@umich.edu incrStIdx(storeWBIdx); 11142693Sktlim@umich.edu} 11152693Sktlim@umich.edu 11162693Sktlim@umich.edutemplate <class Impl> 11172693Sktlim@umich.eduvoid 11182678Sktlim@umich.eduLSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt) 11192678Sktlim@umich.edu{ 11202678Sktlim@umich.edu iewStage->wakeCPU(); 11212678Sktlim@umich.edu 11222678Sktlim@umich.edu // Squashed instructions do not need to complete their access. 11232678Sktlim@umich.edu if (inst->isSquashed()) { 11242678Sktlim@umich.edu assert(!inst->isStore()); 11252727Sktlim@umich.edu ++lsqIgnoredResponses; 11262678Sktlim@umich.edu return; 11272678Sktlim@umich.edu } 11282678Sktlim@umich.edu 11292678Sktlim@umich.edu if (!inst->isExecuted()) { 11302678Sktlim@umich.edu inst->setExecuted(); 11312678Sktlim@umich.edu 113210575SMarco.Elver@ARM.com if (inst->fault == NoFault) { 113310575SMarco.Elver@ARM.com // Complete access to copy data to proper place. 113410575SMarco.Elver@ARM.com inst->completeAcc(pkt); 113510575SMarco.Elver@ARM.com } else { 113610575SMarco.Elver@ARM.com // If the instruction has an outstanding fault, we cannot complete 113710575SMarco.Elver@ARM.com // the access as this discards the current fault. 113810575SMarco.Elver@ARM.com 113910575SMarco.Elver@ARM.com // If we have an outstanding fault, the fault should only be of 114010575SMarco.Elver@ARM.com // type ReExec. 114110575SMarco.Elver@ARM.com assert(dynamic_cast<ReExec*>(inst->fault.get()) != nullptr); 114210575SMarco.Elver@ARM.com 114310575SMarco.Elver@ARM.com DPRINTF(LSQUnit, "Not completing instruction [sn:%lli] access " 114410575SMarco.Elver@ARM.com "due to pending fault.\n", inst->seqNum); 114510575SMarco.Elver@ARM.com } 11462678Sktlim@umich.edu } 11472678Sktlim@umich.edu 11482678Sktlim@umich.edu // Need to insert instruction into queue to commit 11492678Sktlim@umich.edu iewStage->instToCommit(inst); 11502678Sktlim@umich.edu 11512678Sktlim@umich.edu iewStage->activityThisCycle(); 11527598Sminkyu.jeong@arm.com 11537598Sminkyu.jeong@arm.com // see if this load changed the PC 11547598Sminkyu.jeong@arm.com iewStage->checkMisprediction(inst); 11552678Sktlim@umich.edu} 11562678Sktlim@umich.edu 11572678Sktlim@umich.edutemplate <class Impl> 11582678Sktlim@umich.eduvoid 11592292SN/ALSQUnit<Impl>::completeStore(int store_idx) 11602292SN/A{ 11612292SN/A assert(storeQueue[store_idx].inst); 11622292SN/A storeQueue[store_idx].completed = true; 11632292SN/A --storesToWB; 11642292SN/A // A bit conservative because a store completion may not free up entries, 11652292SN/A // but hopefully avoids two store completions in one cycle from making 11662292SN/A // the CPU tick twice. 11673126Sktlim@umich.edu cpu->wakeCPU(); 11682292SN/A cpu->activityThisCycle(); 11692292SN/A 11702292SN/A if (store_idx == storeHead) { 11712292SN/A do { 11722292SN/A incrStIdx(storeHead); 11732292SN/A 11742292SN/A --stores; 11752292SN/A } while (storeQueue[storeHead].completed && 11762292SN/A storeHead != storeTail); 11772292SN/A 11782292SN/A iewStage->updateLSQNextCycle = true; 11792292SN/A } 11802292SN/A 11812329SN/A DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head " 11822329SN/A "idx:%i\n", 11832329SN/A storeQueue[store_idx].inst->seqNum, store_idx, storeHead); 11842292SN/A 11859527SMatt.Horsnell@arm.com#if TRACING_ON 11869527SMatt.Horsnell@arm.com if (DTRACE(O3PipeView)) { 11879527SMatt.Horsnell@arm.com storeQueue[store_idx].inst->storeTick = 11889527SMatt.Horsnell@arm.com curTick() - storeQueue[store_idx].inst->fetchTick; 11899527SMatt.Horsnell@arm.com } 11909527SMatt.Horsnell@arm.com#endif 11919527SMatt.Horsnell@arm.com 11922292SN/A if (isStalled() && 11932292SN/A storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 11942292SN/A DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 11952292SN/A "load idx:%i\n", 11962292SN/A stallingStoreIsn, stallingLoadIdx); 11972292SN/A stalled = false; 11982292SN/A stallingStoreIsn = 0; 11992292SN/A iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 12002292SN/A } 12012316SN/A 12022316SN/A storeQueue[store_idx].inst->setCompleted(); 12032329SN/A 12048727Snilay@cs.wisc.edu if (needsTSO) { 12058727Snilay@cs.wisc.edu storeInFlight = false; 12068727Snilay@cs.wisc.edu } 12078727Snilay@cs.wisc.edu 12082329SN/A // Tell the checker we've completed this instruction. Some stores 12092329SN/A // may get reported twice to the checker, but the checker can 12102329SN/A // handle that case. 121112216Snikos.nikoleris@arm.com 121212216Snikos.nikoleris@arm.com // Store conditionals cannot be sent to the checker yet, they have 121312216Snikos.nikoleris@arm.com // to update the misc registers first which should take place 121412216Snikos.nikoleris@arm.com // when they commit 121512216Snikos.nikoleris@arm.com if (cpu->checker && !storeQueue[store_idx].inst->isStoreConditional()) { 12162732Sktlim@umich.edu cpu->checker->verify(storeQueue[store_idx].inst); 12172316SN/A } 12182292SN/A} 12192292SN/A 12202292SN/Atemplate <class Impl> 12216974Stjones1@inf.ed.ac.ukbool 12226974Stjones1@inf.ed.ac.ukLSQUnit<Impl>::sendStore(PacketPtr data_pkt) 12236974Stjones1@inf.ed.ac.uk{ 12248975Sandreas.hansson@arm.com if (!dcachePort->sendTimingReq(data_pkt)) { 12256974Stjones1@inf.ed.ac.uk // Need to handle becoming blocked on a store. 12266974Stjones1@inf.ed.ac.uk isStoreBlocked = true; 12276974Stjones1@inf.ed.ac.uk ++lsqCacheBlocked; 12286974Stjones1@inf.ed.ac.uk assert(retryPkt == NULL); 12296974Stjones1@inf.ed.ac.uk retryPkt = data_pkt; 12306974Stjones1@inf.ed.ac.uk return false; 12316974Stjones1@inf.ed.ac.uk } 12326974Stjones1@inf.ed.ac.uk return true; 12336974Stjones1@inf.ed.ac.uk} 12346974Stjones1@inf.ed.ac.uk 12356974Stjones1@inf.ed.ac.uktemplate <class Impl> 12362693Sktlim@umich.eduvoid 12372693Sktlim@umich.eduLSQUnit<Impl>::recvRetry() 12382693Sktlim@umich.edu{ 12392698Sktlim@umich.edu if (isStoreBlocked) { 12404985Sktlim@umich.edu DPRINTF(LSQUnit, "Receiving retry: store blocked\n"); 12412698Sktlim@umich.edu assert(retryPkt != NULL); 12422693Sktlim@umich.edu 12438587Snilay@cs.wisc.edu LSQSenderState *state = 12448587Snilay@cs.wisc.edu dynamic_cast<LSQSenderState *>(retryPkt->senderState); 12458587Snilay@cs.wisc.edu 12468975Sandreas.hansson@arm.com if (dcachePort->sendTimingReq(retryPkt)) { 12476974Stjones1@inf.ed.ac.uk // Don't finish the store unless this is the last packet. 12488133SAli.Saidi@ARM.com if (!TheISA::HasUnalignedMemAcc || !state->pktToSend || 12498133SAli.Saidi@ARM.com state->pendingPacket == retryPkt) { 12508133SAli.Saidi@ARM.com state->pktToSend = false; 12516974Stjones1@inf.ed.ac.uk storePostSend(retryPkt); 12526974Stjones1@inf.ed.ac.uk } 12532699Sktlim@umich.edu retryPkt = NULL; 12542693Sktlim@umich.edu isStoreBlocked = false; 12556974Stjones1@inf.ed.ac.uk 12566974Stjones1@inf.ed.ac.uk // Send any outstanding packet. 12576974Stjones1@inf.ed.ac.uk if (TheISA::HasUnalignedMemAcc && state->pktToSend) { 12586974Stjones1@inf.ed.ac.uk assert(state->pendingPacket); 12596974Stjones1@inf.ed.ac.uk if (sendStore(state->pendingPacket)) { 12606974Stjones1@inf.ed.ac.uk storePostSend(state->pendingPacket); 12616974Stjones1@inf.ed.ac.uk } 12626974Stjones1@inf.ed.ac.uk } 12632693Sktlim@umich.edu } else { 12642693Sktlim@umich.edu // Still blocked! 12652727Sktlim@umich.edu ++lsqCacheBlocked; 12662693Sktlim@umich.edu } 12672693Sktlim@umich.edu } 12682693Sktlim@umich.edu} 12692693Sktlim@umich.edu 12702693Sktlim@umich.edutemplate <class Impl> 12712292SN/Ainline void 12729440SAndreas.Sandberg@ARM.comLSQUnit<Impl>::incrStIdx(int &store_idx) const 12732292SN/A{ 12742292SN/A if (++store_idx >= SQEntries) 12752292SN/A store_idx = 0; 12762292SN/A} 12772292SN/A 12782292SN/Atemplate <class Impl> 12792292SN/Ainline void 12809440SAndreas.Sandberg@ARM.comLSQUnit<Impl>::decrStIdx(int &store_idx) const 12812292SN/A{ 12822292SN/A if (--store_idx < 0) 12832292SN/A store_idx += SQEntries; 12842292SN/A} 12852292SN/A 12862292SN/Atemplate <class Impl> 12872292SN/Ainline void 12889440SAndreas.Sandberg@ARM.comLSQUnit<Impl>::incrLdIdx(int &load_idx) const 12892292SN/A{ 12902292SN/A if (++load_idx >= LQEntries) 12912292SN/A load_idx = 0; 12922292SN/A} 12932292SN/A 12942292SN/Atemplate <class Impl> 12952292SN/Ainline void 12969440SAndreas.Sandberg@ARM.comLSQUnit<Impl>::decrLdIdx(int &load_idx) const 12972292SN/A{ 12982292SN/A if (--load_idx < 0) 12992292SN/A load_idx += LQEntries; 13002292SN/A} 13012329SN/A 13022329SN/Atemplate <class Impl> 13032329SN/Avoid 13049440SAndreas.Sandberg@ARM.comLSQUnit<Impl>::dumpInsts() const 13052329SN/A{ 13062329SN/A cprintf("Load store queue: Dumping instructions.\n"); 13072329SN/A cprintf("Load queue size: %i\n", loads); 13082329SN/A cprintf("Load queue: "); 13092329SN/A 13102329SN/A int load_idx = loadHead; 13112329SN/A 13122329SN/A while (load_idx != loadTail && loadQueue[load_idx]) { 13139440SAndreas.Sandberg@ARM.com const DynInstPtr &inst(loadQueue[load_idx]); 13149440SAndreas.Sandberg@ARM.com cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum); 13152329SN/A 13162329SN/A incrLdIdx(load_idx); 13172329SN/A } 13189440SAndreas.Sandberg@ARM.com cprintf("\n"); 13192329SN/A 13202329SN/A cprintf("Store queue size: %i\n", stores); 13212329SN/A cprintf("Store queue: "); 13222329SN/A 13232329SN/A int store_idx = storeHead; 13242329SN/A 13252329SN/A while (store_idx != storeTail && storeQueue[store_idx].inst) { 13269440SAndreas.Sandberg@ARM.com const DynInstPtr &inst(storeQueue[store_idx].inst); 13279440SAndreas.Sandberg@ARM.com cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum); 13282329SN/A 13292329SN/A incrStIdx(store_idx); 13302329SN/A } 13312329SN/A 13322329SN/A cprintf("\n"); 13332329SN/A} 13349944Smatt.horsnell@ARM.com 13359944Smatt.horsnell@ARM.com#endif//__CPU_O3_LSQ_UNIT_IMPL_HH__ 1336