lsq_unit_impl.hh revision 13710
111308Santhony.gutierrez@amd.com 211308Santhony.gutierrez@amd.com/* 311308Santhony.gutierrez@amd.com * Copyright (c) 2010-2014, 2017-2018 ARM Limited 411308Santhony.gutierrez@amd.com * Copyright (c) 2013 Advanced Micro Devices, Inc. 511308Santhony.gutierrez@amd.com * All rights reserved 611308Santhony.gutierrez@amd.com * 711308Santhony.gutierrez@amd.com * The license below extends only to copyright in the software and shall 811308Santhony.gutierrez@amd.com * not be construed as granting a license to any other intellectual 911308Santhony.gutierrez@amd.com * property including but not limited to intellectual property relating 1011308Santhony.gutierrez@amd.com * to a hardware implementation of the functionality of the software 1111308Santhony.gutierrez@amd.com * licensed hereunder. You may use the software subject to the license 1211308Santhony.gutierrez@amd.com * terms below provided that you ensure that this notice is replicated 1311308Santhony.gutierrez@amd.com * unmodified and in its entirety in all distributions of the software, 1411308Santhony.gutierrez@amd.com * modified or unmodified, in source code or in binary form. 1511308Santhony.gutierrez@amd.com * 1611308Santhony.gutierrez@amd.com * Copyright (c) 2004-2005 The Regents of The University of Michigan 1711308Santhony.gutierrez@amd.com * All rights reserved. 1811308Santhony.gutierrez@amd.com * 1911308Santhony.gutierrez@amd.com * Redistribution and use in source and binary forms, with or without 2011308Santhony.gutierrez@amd.com * modification, are permitted provided that the following conditions are 2111308Santhony.gutierrez@amd.com * met: redistributions of source code must retain the above copyright 2211308Santhony.gutierrez@amd.com * notice, this list of conditions and the following disclaimer; 2311308Santhony.gutierrez@amd.com * redistributions in binary form must reproduce the above copyright 2411308Santhony.gutierrez@amd.com * notice, this list of conditions and the following disclaimer in the 2511308Santhony.gutierrez@amd.com * documentation and/or other materials provided with the distribution; 2611308Santhony.gutierrez@amd.com * neither the name of the copyright holders nor the names of its 2711308Santhony.gutierrez@amd.com * contributors may be used to endorse or promote products derived from 2811308Santhony.gutierrez@amd.com * this software without specific prior written permission. 2911308Santhony.gutierrez@amd.com * 3011308Santhony.gutierrez@amd.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 3111308Santhony.gutierrez@amd.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 3211308Santhony.gutierrez@amd.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 3311308Santhony.gutierrez@amd.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 3411308Santhony.gutierrez@amd.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 3511308Santhony.gutierrez@amd.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 3611308Santhony.gutierrez@amd.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 3711308Santhony.gutierrez@amd.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 3811308Santhony.gutierrez@amd.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 3911308Santhony.gutierrez@amd.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 4011308Santhony.gutierrez@amd.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 4111308Santhony.gutierrez@amd.com * 4211308Santhony.gutierrez@amd.com * Authors: Kevin Lim 4311308Santhony.gutierrez@amd.com * Korey Sewell 4411308Santhony.gutierrez@amd.com */ 4511308Santhony.gutierrez@amd.com 4611308Santhony.gutierrez@amd.com#ifndef __CPU_O3_LSQ_UNIT_IMPL_HH__ 4711308Santhony.gutierrez@amd.com#define __CPU_O3_LSQ_UNIT_IMPL_HH__ 4811308Santhony.gutierrez@amd.com 4911308Santhony.gutierrez@amd.com#include "arch/generic/debugfaults.hh" 5011308Santhony.gutierrez@amd.com#include "arch/locked_mem.hh" 5111308Santhony.gutierrez@amd.com#include "base/str.hh" 5211308Santhony.gutierrez@amd.com#include "config/the_isa.hh" 5311308Santhony.gutierrez@amd.com#include "cpu/checker/cpu.hh" 5411308Santhony.gutierrez@amd.com#include "cpu/o3/lsq.hh" 5511308Santhony.gutierrez@amd.com#include "cpu/o3/lsq_unit.hh" 5611308Santhony.gutierrez@amd.com#include "debug/Activity.hh" 5711308Santhony.gutierrez@amd.com#include "debug/IEW.hh" 5811308Santhony.gutierrez@amd.com#include "debug/LSQUnit.hh" 5911308Santhony.gutierrez@amd.com#include "debug/O3PipeView.hh" 6011308Santhony.gutierrez@amd.com#include "mem/packet.hh" 6111308Santhony.gutierrez@amd.com#include "mem/request.hh" 6211308Santhony.gutierrez@amd.com 6311308Santhony.gutierrez@amd.comtemplate<class Impl> 6411308Santhony.gutierrez@amd.comLSQUnit<Impl>::WritebackEvent::WritebackEvent(const DynInstPtr &_inst, 6511308Santhony.gutierrez@amd.com PacketPtr _pkt, LSQUnit *lsq_ptr) 6611308Santhony.gutierrez@amd.com : Event(Default_Pri, AutoDelete), 6711308Santhony.gutierrez@amd.com inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr) 6811308Santhony.gutierrez@amd.com{ 6911308Santhony.gutierrez@amd.com assert(_inst->savedReq); 7011308Santhony.gutierrez@amd.com _inst->savedReq->writebackScheduled(); 7111308Santhony.gutierrez@amd.com} 7211308Santhony.gutierrez@amd.com 7311308Santhony.gutierrez@amd.comtemplate<class Impl> 7411308Santhony.gutierrez@amd.comvoid 7511308Santhony.gutierrez@amd.comLSQUnit<Impl>::WritebackEvent::process() 7611308Santhony.gutierrez@amd.com{ 7711308Santhony.gutierrez@amd.com assert(!lsqPtr->cpu->switchedOut()); 7811308Santhony.gutierrez@amd.com 7911308Santhony.gutierrez@amd.com lsqPtr->writeback(inst, pkt); 8011308Santhony.gutierrez@amd.com 8111308Santhony.gutierrez@amd.com assert(inst->savedReq); 8211308Santhony.gutierrez@amd.com inst->savedReq->writebackDone(); 8311308Santhony.gutierrez@amd.com delete pkt; 8411308Santhony.gutierrez@amd.com} 8511308Santhony.gutierrez@amd.com 8611308Santhony.gutierrez@amd.comtemplate<class Impl> 8711308Santhony.gutierrez@amd.comconst char * 8811308Santhony.gutierrez@amd.comLSQUnit<Impl>::WritebackEvent::description() const 8911308Santhony.gutierrez@amd.com{ 9011308Santhony.gutierrez@amd.com return "Store writeback"; 9111308Santhony.gutierrez@amd.com} 9211308Santhony.gutierrez@amd.com 9311308Santhony.gutierrez@amd.comtemplate <class Impl> 9411308Santhony.gutierrez@amd.combool 9511308Santhony.gutierrez@amd.comLSQUnit<Impl>::recvTimingResp(PacketPtr pkt) 9611308Santhony.gutierrez@amd.com{ 9711308Santhony.gutierrez@amd.com auto senderState = dynamic_cast<LSQSenderState*>(pkt->senderState); 9811308Santhony.gutierrez@amd.com LSQRequest* req = senderState->request(); 9911308Santhony.gutierrez@amd.com assert(req != nullptr); 10011308Santhony.gutierrez@amd.com bool ret = true; 10111308Santhony.gutierrez@amd.com /* Check that the request is still alive before any further action. */ 10211308Santhony.gutierrez@amd.com if (senderState->alive()) { 10311308Santhony.gutierrez@amd.com ret = req->recvTimingResp(pkt); 10411308Santhony.gutierrez@amd.com } else { 10511308Santhony.gutierrez@amd.com senderState->outstanding--; 10611308Santhony.gutierrez@amd.com } 10711308Santhony.gutierrez@amd.com return ret; 10811308Santhony.gutierrez@amd.com 10911308Santhony.gutierrez@amd.com} 11011308Santhony.gutierrez@amd.com 11111308Santhony.gutierrez@amd.comtemplate<class Impl> 11211308Santhony.gutierrez@amd.comvoid 11311308Santhony.gutierrez@amd.comLSQUnit<Impl>::completeDataAccess(PacketPtr pkt) 11411308Santhony.gutierrez@amd.com{ 11511308Santhony.gutierrez@amd.com LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState); 11611308Santhony.gutierrez@amd.com DynInstPtr inst = state->inst; 11711308Santhony.gutierrez@amd.com 11811308Santhony.gutierrez@amd.com cpu->ppDataAccessComplete->notify(std::make_pair(inst, pkt)); 11911308Santhony.gutierrez@amd.com 12011308Santhony.gutierrez@amd.com /* Notify the sender state that the access is complete (for ownership 12111308Santhony.gutierrez@amd.com * tracking). */ 12211308Santhony.gutierrez@amd.com state->complete(); 12311308Santhony.gutierrez@amd.com 12411308Santhony.gutierrez@amd.com assert(!cpu->switchedOut()); 12511308Santhony.gutierrez@amd.com if (!inst->isSquashed()) { 12611308Santhony.gutierrez@amd.com if (state->needWB) { 12711308Santhony.gutierrez@amd.com // Only loads, store conditionals and atomics perform the writeback 12811308Santhony.gutierrez@amd.com // after receving the response from the memory 12911308Santhony.gutierrez@amd.com assert(inst->isLoad() || inst->isStoreConditional() || 13011308Santhony.gutierrez@amd.com inst->isAtomic()); 13111308Santhony.gutierrez@amd.com writeback(inst, state->request()->mainPacket()); 13211308Santhony.gutierrez@amd.com if (inst->isStore() || inst->isAtomic()) { 13311308Santhony.gutierrez@amd.com auto ss = dynamic_cast<SQSenderState*>(state); 13411308Santhony.gutierrez@amd.com ss->writebackDone(); 13511308Santhony.gutierrez@amd.com completeStore(ss->idx); 13611308Santhony.gutierrez@amd.com } 13711308Santhony.gutierrez@amd.com } else if (inst->isStore()) { 13811308Santhony.gutierrez@amd.com // This is a regular store (i.e., not store conditionals and 13911308Santhony.gutierrez@amd.com // atomics), so it can complete without writing back 14011308Santhony.gutierrez@amd.com completeStore(dynamic_cast<SQSenderState*>(state)->idx); 14111308Santhony.gutierrez@amd.com } 14211308Santhony.gutierrez@amd.com } 14311308Santhony.gutierrez@amd.com} 14411308Santhony.gutierrez@amd.com 14511308Santhony.gutierrez@amd.comtemplate <class Impl> 14611308Santhony.gutierrez@amd.comLSQUnit<Impl>::LSQUnit(uint32_t lqEntries, uint32_t sqEntries) 14711308Santhony.gutierrez@amd.com : lsqID(-1), storeQueue(sqEntries+1), loadQueue(lqEntries+1), 14811308Santhony.gutierrez@amd.com loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false), 14911308Santhony.gutierrez@amd.com isStoreBlocked(false), storeInFlight(false), hasPendingRequest(false), 15011308Santhony.gutierrez@amd.com pendingRequest(nullptr) 15111308Santhony.gutierrez@amd.com{ 15211308Santhony.gutierrez@amd.com} 15311308Santhony.gutierrez@amd.com 15411308Santhony.gutierrez@amd.comtemplate<class Impl> 15511308Santhony.gutierrez@amd.comvoid 15611308Santhony.gutierrez@amd.comLSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params, 15711308Santhony.gutierrez@amd.com LSQ *lsq_ptr, unsigned id) 15811308Santhony.gutierrez@amd.com{ 15911308Santhony.gutierrez@amd.com lsqID = id; 16011308Santhony.gutierrez@amd.com 16111308Santhony.gutierrez@amd.com cpu = cpu_ptr; 16211308Santhony.gutierrez@amd.com iewStage = iew_ptr; 16311308Santhony.gutierrez@amd.com 16411308Santhony.gutierrez@amd.com lsq = lsq_ptr; 16511308Santhony.gutierrez@amd.com 16611308Santhony.gutierrez@amd.com DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",lsqID); 16711308Santhony.gutierrez@amd.com 16811308Santhony.gutierrez@amd.com depCheckShift = params->LSQDepCheckShift; 16911308Santhony.gutierrez@amd.com checkLoads = params->LSQCheckLoads; 17011308Santhony.gutierrez@amd.com needsTSO = params->needsTSO; 17111308Santhony.gutierrez@amd.com 17211308Santhony.gutierrez@amd.com resetState(); 17311308Santhony.gutierrez@amd.com} 17411308Santhony.gutierrez@amd.com 17511308Santhony.gutierrez@amd.com 17611308Santhony.gutierrez@amd.comtemplate<class Impl> 17711308Santhony.gutierrez@amd.comvoid 17811308Santhony.gutierrez@amd.comLSQUnit<Impl>::resetState() 17911308Santhony.gutierrez@amd.com{ 18011308Santhony.gutierrez@amd.com loads = stores = storesToWB = 0; 18111308Santhony.gutierrez@amd.com 18211308Santhony.gutierrez@amd.com 18311308Santhony.gutierrez@amd.com storeWBIt = storeQueue.begin(); 18411308Santhony.gutierrez@amd.com 18511308Santhony.gutierrez@amd.com retryPkt = NULL; 18611308Santhony.gutierrez@amd.com memDepViolator = NULL; 18711308Santhony.gutierrez@amd.com 18811308Santhony.gutierrez@amd.com stalled = false; 18911308Santhony.gutierrez@amd.com 19011308Santhony.gutierrez@amd.com cacheBlockMask = ~(cpu->cacheLineSize() - 1); 19111308Santhony.gutierrez@amd.com} 19211308Santhony.gutierrez@amd.com 19311308Santhony.gutierrez@amd.comtemplate<class Impl> 19411308Santhony.gutierrez@amd.comstd::string 19511308Santhony.gutierrez@amd.comLSQUnit<Impl>::name() const 19611308Santhony.gutierrez@amd.com{ 19711308Santhony.gutierrez@amd.com if (Impl::MaxThreads == 1) { 19811308Santhony.gutierrez@amd.com return iewStage->name() + ".lsq"; 19911308Santhony.gutierrez@amd.com } else { 20011308Santhony.gutierrez@amd.com return iewStage->name() + ".lsq.thread" + std::to_string(lsqID); 20111308Santhony.gutierrez@amd.com } 20211308Santhony.gutierrez@amd.com} 20311308Santhony.gutierrez@amd.com 20411308Santhony.gutierrez@amd.comtemplate<class Impl> 20511308Santhony.gutierrez@amd.comvoid 20611308Santhony.gutierrez@amd.comLSQUnit<Impl>::regStats() 20711308Santhony.gutierrez@amd.com{ 20811308Santhony.gutierrez@amd.com lsqForwLoads 20911308Santhony.gutierrez@amd.com .name(name() + ".forwLoads") 21011308Santhony.gutierrez@amd.com .desc("Number of loads that had data forwarded from stores"); 21111308Santhony.gutierrez@amd.com 21211308Santhony.gutierrez@amd.com invAddrLoads 21311308Santhony.gutierrez@amd.com .name(name() + ".invAddrLoads") 21411308Santhony.gutierrez@amd.com .desc("Number of loads ignored due to an invalid address"); 21511308Santhony.gutierrez@amd.com 21611308Santhony.gutierrez@amd.com lsqSquashedLoads 21711308Santhony.gutierrez@amd.com .name(name() + ".squashedLoads") 21811308Santhony.gutierrez@amd.com .desc("Number of loads squashed"); 21911308Santhony.gutierrez@amd.com 22011308Santhony.gutierrez@amd.com lsqIgnoredResponses 22111308Santhony.gutierrez@amd.com .name(name() + ".ignoredResponses") 22211308Santhony.gutierrez@amd.com .desc("Number of memory responses ignored because the instruction is squashed"); 22311308Santhony.gutierrez@amd.com 22411308Santhony.gutierrez@amd.com lsqMemOrderViolation 22511308Santhony.gutierrez@amd.com .name(name() + ".memOrderViolation") 22611308Santhony.gutierrez@amd.com .desc("Number of memory ordering violations"); 22711308Santhony.gutierrez@amd.com 22811308Santhony.gutierrez@amd.com lsqSquashedStores 22911308Santhony.gutierrez@amd.com .name(name() + ".squashedStores") 23011308Santhony.gutierrez@amd.com .desc("Number of stores squashed"); 23111308Santhony.gutierrez@amd.com 23211308Santhony.gutierrez@amd.com invAddrSwpfs 23311308Santhony.gutierrez@amd.com .name(name() + ".invAddrSwpfs") 23411308Santhony.gutierrez@amd.com .desc("Number of software prefetches ignored due to an invalid address"); 23511308Santhony.gutierrez@amd.com 23611308Santhony.gutierrez@amd.com lsqBlockedLoads 23711308Santhony.gutierrez@amd.com .name(name() + ".blockedLoads") 23811308Santhony.gutierrez@amd.com .desc("Number of blocked loads due to partial load-store forwarding"); 23911308Santhony.gutierrez@amd.com 24011308Santhony.gutierrez@amd.com lsqRescheduledLoads 24111308Santhony.gutierrez@amd.com .name(name() + ".rescheduledLoads") 24211308Santhony.gutierrez@amd.com .desc("Number of loads that were rescheduled"); 24311308Santhony.gutierrez@amd.com 24411308Santhony.gutierrez@amd.com lsqCacheBlocked 24511308Santhony.gutierrez@amd.com .name(name() + ".cacheBlocked") 24611308Santhony.gutierrez@amd.com .desc("Number of times an access to memory failed due to the cache being blocked"); 24711308Santhony.gutierrez@amd.com} 24811308Santhony.gutierrez@amd.com 24911308Santhony.gutierrez@amd.comtemplate<class Impl> 25011308Santhony.gutierrez@amd.comvoid 25111308Santhony.gutierrez@amd.comLSQUnit<Impl>::setDcachePort(MasterPort *dcache_port) 25211308Santhony.gutierrez@amd.com{ 25311308Santhony.gutierrez@amd.com dcachePort = dcache_port; 25411308Santhony.gutierrez@amd.com} 25511308Santhony.gutierrez@amd.com 25611308Santhony.gutierrez@amd.comtemplate<class Impl> 25711308Santhony.gutierrez@amd.comvoid 25811308Santhony.gutierrez@amd.comLSQUnit<Impl>::drainSanityCheck() const 25911308Santhony.gutierrez@amd.com{ 26011308Santhony.gutierrez@amd.com for (int i = 0; i < loadQueue.capacity(); ++i) 26111308Santhony.gutierrez@amd.com assert(!loadQueue[i].valid()); 26211308Santhony.gutierrez@amd.com 26311308Santhony.gutierrez@amd.com assert(storesToWB == 0); 26411308Santhony.gutierrez@amd.com assert(!retryPkt); 26511308Santhony.gutierrez@amd.com} 26611308Santhony.gutierrez@amd.com 26711308Santhony.gutierrez@amd.comtemplate<class Impl> 26811308Santhony.gutierrez@amd.comvoid 26911308Santhony.gutierrez@amd.comLSQUnit<Impl>::takeOverFrom() 27011308Santhony.gutierrez@amd.com{ 27111308Santhony.gutierrez@amd.com resetState(); 27211308Santhony.gutierrez@amd.com} 27311308Santhony.gutierrez@amd.com 27411308Santhony.gutierrez@amd.comtemplate <class Impl> 27511308Santhony.gutierrez@amd.comvoid 27611308Santhony.gutierrez@amd.comLSQUnit<Impl>::insert(const DynInstPtr &inst) 27711308Santhony.gutierrez@amd.com{ 27811308Santhony.gutierrez@amd.com assert(inst->isMemRef()); 27911308Santhony.gutierrez@amd.com 28011308Santhony.gutierrez@amd.com assert(inst->isLoad() || inst->isStore() || inst->isAtomic()); 28111308Santhony.gutierrez@amd.com 28211308Santhony.gutierrez@amd.com if (inst->isLoad()) { 28311308Santhony.gutierrez@amd.com insertLoad(inst); 28411308Santhony.gutierrez@amd.com } else { 28511308Santhony.gutierrez@amd.com insertStore(inst); 28611308Santhony.gutierrez@amd.com } 28711308Santhony.gutierrez@amd.com 28811308Santhony.gutierrez@amd.com inst->setInLSQ(); 28911308Santhony.gutierrez@amd.com} 29011308Santhony.gutierrez@amd.com 29111308Santhony.gutierrez@amd.comtemplate <class Impl> 29211308Santhony.gutierrez@amd.comvoid 29311308Santhony.gutierrez@amd.comLSQUnit<Impl>::insertLoad(const DynInstPtr &load_inst) 29411308Santhony.gutierrez@amd.com{ 29511308Santhony.gutierrez@amd.com assert(!loadQueue.full()); 29611308Santhony.gutierrez@amd.com assert(loads < loadQueue.capacity()); 29711308Santhony.gutierrez@amd.com 29811308Santhony.gutierrez@amd.com DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n", 29911308Santhony.gutierrez@amd.com load_inst->pcState(), loadQueue.tail(), load_inst->seqNum); 30011308Santhony.gutierrez@amd.com 30111308Santhony.gutierrez@amd.com /* Grow the queue. */ 30211308Santhony.gutierrez@amd.com loadQueue.advance_tail(); 30311308Santhony.gutierrez@amd.com 30411308Santhony.gutierrez@amd.com load_inst->sqIt = storeQueue.end(); 30511308Santhony.gutierrez@amd.com 30611308Santhony.gutierrez@amd.com assert(!loadQueue.back().valid()); 30711308Santhony.gutierrez@amd.com loadQueue.back().set(load_inst); 30811308Santhony.gutierrez@amd.com load_inst->lqIdx = loadQueue.tail(); 30911308Santhony.gutierrez@amd.com load_inst->lqIt = loadQueue.getIterator(load_inst->lqIdx); 31011308Santhony.gutierrez@amd.com 31111308Santhony.gutierrez@amd.com ++loads; 31211308Santhony.gutierrez@amd.com} 31311308Santhony.gutierrez@amd.com 31411308Santhony.gutierrez@amd.comtemplate <class Impl> 31511308Santhony.gutierrez@amd.comvoid 31611308Santhony.gutierrez@amd.comLSQUnit<Impl>::insertStore(const DynInstPtr& store_inst) 31711308Santhony.gutierrez@amd.com{ 31811308Santhony.gutierrez@amd.com // Make sure it is not full before inserting an instruction. 31911308Santhony.gutierrez@amd.com assert(!storeQueue.full()); 32011308Santhony.gutierrez@amd.com assert(stores < storeQueue.capacity()); 32111308Santhony.gutierrez@amd.com 32211308Santhony.gutierrez@amd.com DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n", 32311308Santhony.gutierrez@amd.com store_inst->pcState(), storeQueue.tail(), store_inst->seqNum); 32411308Santhony.gutierrez@amd.com storeQueue.advance_tail(); 32511308Santhony.gutierrez@amd.com 32611308Santhony.gutierrez@amd.com store_inst->sqIdx = storeQueue.tail(); 32711308Santhony.gutierrez@amd.com store_inst->lqIdx = loadQueue.moduloAdd(loadQueue.tail(), 1); 32811308Santhony.gutierrez@amd.com store_inst->lqIt = loadQueue.end(); 32911308Santhony.gutierrez@amd.com 33011308Santhony.gutierrez@amd.com storeQueue.back().set(store_inst); 33111308Santhony.gutierrez@amd.com 33211308Santhony.gutierrez@amd.com ++stores; 33311308Santhony.gutierrez@amd.com} 33411308Santhony.gutierrez@amd.com 33511308Santhony.gutierrez@amd.comtemplate <class Impl> 33611308Santhony.gutierrez@amd.comtypename Impl::DynInstPtr 33711308Santhony.gutierrez@amd.comLSQUnit<Impl>::getMemDepViolator() 33811308Santhony.gutierrez@amd.com{ 33911308Santhony.gutierrez@amd.com DynInstPtr temp = memDepViolator; 34011308Santhony.gutierrez@amd.com 34111308Santhony.gutierrez@amd.com memDepViolator = NULL; 342 343 return temp; 344} 345 346template <class Impl> 347unsigned 348LSQUnit<Impl>::numFreeLoadEntries() 349{ 350 //LQ has an extra dummy entry to differentiate 351 //empty/full conditions. Subtract 1 from the free entries. 352 DPRINTF(LSQUnit, "LQ size: %d, #loads occupied: %d\n", 353 1 + loadQueue.capacity(), loads); 354 return loadQueue.capacity() - loads; 355} 356 357template <class Impl> 358unsigned 359LSQUnit<Impl>::numFreeStoreEntries() 360{ 361 //SQ has an extra dummy entry to differentiate 362 //empty/full conditions. Subtract 1 from the free entries. 363 DPRINTF(LSQUnit, "SQ size: %d, #stores occupied: %d\n", 364 1 + storeQueue.capacity(), stores); 365 return storeQueue.capacity() - stores; 366 367 } 368 369template <class Impl> 370void 371LSQUnit<Impl>::checkSnoop(PacketPtr pkt) 372{ 373 // Should only ever get invalidations in here 374 assert(pkt->isInvalidate()); 375 376 DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr()); 377 378 for (int x = 0; x < cpu->numContexts(); x++) { 379 ThreadContext *tc = cpu->getContext(x); 380 bool no_squash = cpu->thread[x]->noSquashFromTC; 381 cpu->thread[x]->noSquashFromTC = true; 382 TheISA::handleLockedSnoop(tc, pkt, cacheBlockMask); 383 cpu->thread[x]->noSquashFromTC = no_squash; 384 } 385 386 if (loadQueue.empty()) 387 return; 388 389 auto iter = loadQueue.begin(); 390 391 Addr invalidate_addr = pkt->getAddr() & cacheBlockMask; 392 393 DynInstPtr ld_inst = iter->instruction(); 394 assert(ld_inst); 395 LSQRequest *req = iter->request(); 396 397 // Check that this snoop didn't just invalidate our lock flag 398 if (ld_inst->effAddrValid() && 399 req->isCacheBlockHit(invalidate_addr, cacheBlockMask) 400 && ld_inst->memReqFlags & Request::LLSC) 401 TheISA::handleLockedSnoopHit(ld_inst.get()); 402 403 bool force_squash = false; 404 405 while (++iter != loadQueue.end()) { 406 ld_inst = iter->instruction(); 407 assert(ld_inst); 408 req = iter->request(); 409 if (!ld_inst->effAddrValid() || ld_inst->strictlyOrdered()) 410 continue; 411 412 DPRINTF(LSQUnit, "-- inst [sn:%lli] to pktAddr:%#x\n", 413 ld_inst->seqNum, invalidate_addr); 414 415 if (force_squash || 416 req->isCacheBlockHit(invalidate_addr, cacheBlockMask)) { 417 if (needsTSO) { 418 // If we have a TSO system, as all loads must be ordered with 419 // all other loads, this load as well as *all* subsequent loads 420 // need to be squashed to prevent possible load reordering. 421 force_squash = true; 422 } 423 if (ld_inst->possibleLoadViolation() || force_squash) { 424 DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n", 425 pkt->getAddr(), ld_inst->seqNum); 426 427 // Mark the load for re-execution 428 ld_inst->fault = std::make_shared<ReExec>(); 429 } else { 430 DPRINTF(LSQUnit, "HitExternal Snoop for addr %#x [sn:%lli]\n", 431 pkt->getAddr(), ld_inst->seqNum); 432 433 // Make sure that we don't lose a snoop hitting a LOCKED 434 // address since the LOCK* flags don't get updated until 435 // commit. 436 if (ld_inst->memReqFlags & Request::LLSC) 437 TheISA::handleLockedSnoopHit(ld_inst.get()); 438 439 // If a older load checks this and it's true 440 // then we might have missed the snoop 441 // in which case we need to invalidate to be sure 442 ld_inst->hitExternalSnoop(true); 443 } 444 } 445 } 446 return; 447} 448 449template <class Impl> 450Fault 451LSQUnit<Impl>::checkViolations(typename LoadQueue::iterator& loadIt, 452 const DynInstPtr& inst) 453{ 454 Addr inst_eff_addr1 = inst->effAddr >> depCheckShift; 455 Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift; 456 457 /** @todo in theory you only need to check an instruction that has executed 458 * however, there isn't a good way in the pipeline at the moment to check 459 * all instructions that will execute before the store writes back. Thus, 460 * like the implementation that came before it, we're overly conservative. 461 */ 462 while (loadIt != loadQueue.end()) { 463 DynInstPtr ld_inst = loadIt->instruction(); 464 if (!ld_inst->effAddrValid() || ld_inst->strictlyOrdered()) { 465 ++loadIt; 466 continue; 467 } 468 469 Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift; 470 Addr ld_eff_addr2 = 471 (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift; 472 473 if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) { 474 if (inst->isLoad()) { 475 // If this load is to the same block as an external snoop 476 // invalidate that we've observed then the load needs to be 477 // squashed as it could have newer data 478 if (ld_inst->hitExternalSnoop()) { 479 if (!memDepViolator || 480 ld_inst->seqNum < memDepViolator->seqNum) { 481 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] " 482 "and [sn:%lli] at address %#x\n", 483 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 484 memDepViolator = ld_inst; 485 486 ++lsqMemOrderViolation; 487 488 return std::make_shared<GenericISA::M5PanicFault>( 489 "Detected fault with inst [sn:%lli] and " 490 "[sn:%lli] at address %#x\n", 491 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 492 } 493 } 494 495 // Otherwise, mark the load has a possible load violation 496 // and if we see a snoop before it's commited, we need to squash 497 ld_inst->possibleLoadViolation(true); 498 DPRINTF(LSQUnit, "Found possible load violation at addr: %#x" 499 " between instructions [sn:%lli] and [sn:%lli]\n", 500 inst_eff_addr1, inst->seqNum, ld_inst->seqNum); 501 } else { 502 // A load/store incorrectly passed this store. 503 // Check if we already have a violator, or if it's newer 504 // squash and refetch. 505 if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum) 506 break; 507 508 DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and " 509 "[sn:%lli] at address %#x\n", 510 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 511 memDepViolator = ld_inst; 512 513 ++lsqMemOrderViolation; 514 515 return std::make_shared<GenericISA::M5PanicFault>( 516 "Detected fault with " 517 "inst [sn:%lli] and [sn:%lli] at address %#x\n", 518 inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 519 } 520 } 521 522 ++loadIt; 523 } 524 return NoFault; 525} 526 527 528 529 530template <class Impl> 531Fault 532LSQUnit<Impl>::executeLoad(const DynInstPtr &inst) 533{ 534 using namespace TheISA; 535 // Execute a specific load. 536 Fault load_fault = NoFault; 537 538 DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n", 539 inst->pcState(), inst->seqNum); 540 541 assert(!inst->isSquashed()); 542 543 load_fault = inst->initiateAcc(); 544 545 if (inst->isTranslationDelayed() && load_fault == NoFault) 546 return load_fault; 547 548 // If the instruction faulted or predicated false, then we need to send it 549 // along to commit without the instruction completing. 550 if (load_fault != NoFault || !inst->readPredicate()) { 551 // Send this instruction to commit, also make sure iew stage 552 // realizes there is activity. Mark it as executed unless it 553 // is a strictly ordered load that needs to hit the head of 554 // commit. 555 if (!inst->readPredicate()) 556 inst->forwardOldRegs(); 557 DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n", 558 inst->seqNum, 559 (load_fault != NoFault ? "fault" : "predication")); 560 if (!(inst->hasRequest() && inst->strictlyOrdered()) || 561 inst->isAtCommit()) { 562 inst->setExecuted(); 563 } 564 iewStage->instToCommit(inst); 565 iewStage->activityThisCycle(); 566 } else { 567 if (inst->effAddrValid()) { 568 auto it = inst->lqIt; 569 ++it; 570 571 if (checkLoads) 572 return checkViolations(it, inst); 573 } 574 } 575 576 return load_fault; 577} 578 579template <class Impl> 580Fault 581LSQUnit<Impl>::executeStore(const DynInstPtr &store_inst) 582{ 583 using namespace TheISA; 584 // Make sure that a store exists. 585 assert(stores != 0); 586 587 int store_idx = store_inst->sqIdx; 588 589 DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n", 590 store_inst->pcState(), store_inst->seqNum); 591 592 assert(!store_inst->isSquashed()); 593 594 // Check the recently completed loads to see if any match this store's 595 // address. If so, then we have a memory ordering violation. 596 typename LoadQueue::iterator loadIt = store_inst->lqIt; 597 598 Fault store_fault = store_inst->initiateAcc(); 599 600 if (store_inst->isTranslationDelayed() && 601 store_fault == NoFault) 602 return store_fault; 603 604 if (!store_inst->readPredicate()) { 605 DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n", 606 store_inst->seqNum); 607 store_inst->forwardOldRegs(); 608 return store_fault; 609 } 610 611 if (storeQueue[store_idx].size() == 0) { 612 DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n", 613 store_inst->pcState(), store_inst->seqNum); 614 615 return store_fault; 616 } 617 618 assert(store_fault == NoFault); 619 620 if (store_inst->isStoreConditional() || store_inst->isAtomic()) { 621 // Store conditionals and Atomics need to set themselves as able to 622 // writeback if we haven't had a fault by here. 623 storeQueue[store_idx].canWB() = true; 624 625 ++storesToWB; 626 } 627 628 return checkViolations(loadIt, store_inst); 629 630} 631 632template <class Impl> 633void 634LSQUnit<Impl>::commitLoad() 635{ 636 assert(loadQueue.front().valid()); 637 638 DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n", 639 loadQueue.front().instruction()->pcState()); 640 641 loadQueue.front().clear(); 642 loadQueue.pop_front(); 643 644 --loads; 645} 646 647template <class Impl> 648void 649LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst) 650{ 651 assert(loads == 0 || loadQueue.front().valid()); 652 653 while (loads != 0 && loadQueue.front().instruction()->seqNum 654 <= youngest_inst) { 655 commitLoad(); 656 } 657} 658 659template <class Impl> 660void 661LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst) 662{ 663 assert(stores == 0 || storeQueue.front().valid()); 664 665 /* Forward iterate the store queue (age order). */ 666 for (auto& x : storeQueue) { 667 assert(x.valid()); 668 // Mark any stores that are now committed and have not yet 669 // been marked as able to write back. 670 if (!x.canWB()) { 671 if (x.instruction()->seqNum > youngest_inst) { 672 break; 673 } 674 DPRINTF(LSQUnit, "Marking store as able to write back, PC " 675 "%s [sn:%lli]\n", 676 x.instruction()->pcState(), 677 x.instruction()->seqNum); 678 679 x.canWB() = true; 680 681 ++storesToWB; 682 } 683 } 684} 685 686template <class Impl> 687void 688LSQUnit<Impl>::writebackBlockedStore() 689{ 690 assert(isStoreBlocked); 691 storeWBIt->request()->sendPacketToCache(); 692 if (storeWBIt->request()->isSent()){ 693 storePostSend(); 694 } 695} 696 697template <class Impl> 698void 699LSQUnit<Impl>::writebackStores() 700{ 701 if (isStoreBlocked) { 702 DPRINTF(LSQUnit, "Writing back blocked store\n"); 703 writebackBlockedStore(); 704 } 705 706 while (storesToWB > 0 && 707 storeWBIt.dereferenceable() && 708 storeWBIt->valid() && 709 storeWBIt->canWB() && 710 ((!needsTSO) || (!storeInFlight)) && 711 lsq->cachePortAvailable(false)) { 712 713 if (isStoreBlocked) { 714 DPRINTF(LSQUnit, "Unable to write back any more stores, cache" 715 " is blocked!\n"); 716 break; 717 } 718 719 // Store didn't write any data so no need to write it back to 720 // memory. 721 if (storeWBIt->size() == 0) { 722 /* It is important that the preincrement happens at (or before) 723 * the call, as the the code of completeStore checks 724 * storeWBIt. */ 725 completeStore(storeWBIt++); 726 continue; 727 } 728 729 if (storeWBIt->instruction()->isDataPrefetch()) { 730 storeWBIt++; 731 continue; 732 } 733 734 assert(storeWBIt->hasRequest()); 735 assert(!storeWBIt->committed()); 736 737 DynInstPtr inst = storeWBIt->instruction(); 738 LSQRequest* req = storeWBIt->request(); 739 storeWBIt->committed() = true; 740 741 assert(!inst->memData); 742 inst->memData = new uint8_t[req->_size]; 743 744 if (storeWBIt->isAllZeros()) 745 memset(inst->memData, 0, req->_size); 746 else 747 memcpy(inst->memData, storeWBIt->data(), req->_size); 748 749 750 if (req->senderState() == nullptr) { 751 SQSenderState *state = new SQSenderState(storeWBIt); 752 state->isLoad = false; 753 state->needWB = false; 754 state->inst = inst; 755 756 req->senderState(state); 757 if (inst->isStoreConditional() || inst->isAtomic()) { 758 /* Only store conditionals and atomics need a writeback. */ 759 state->needWB = true; 760 } 761 } 762 req->buildPackets(); 763 764 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s " 765 "to Addr:%#x, data:%#x [sn:%lli]\n", 766 storeWBIt.idx(), inst->pcState(), 767 req->request()->getPaddr(), (int)*(inst->memData), 768 inst->seqNum); 769 770 // @todo: Remove this SC hack once the memory system handles it. 771 if (inst->isStoreConditional()) { 772 // Disable recording the result temporarily. Writing to 773 // misc regs normally updates the result, but this is not 774 // the desired behavior when handling store conditionals. 775 inst->recordResult(false); 776 bool success = TheISA::handleLockedWrite(inst.get(), 777 req->request(), cacheBlockMask); 778 inst->recordResult(true); 779 req->packetSent(); 780 781 if (!success) { 782 req->complete(); 783 // Instantly complete this store. 784 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. " 785 "Instantly completing it.\n", 786 inst->seqNum); 787 PacketPtr new_pkt = new Packet(*req->packet()); 788 WritebackEvent *wb = new WritebackEvent(inst, 789 new_pkt, this); 790 cpu->schedule(wb, curTick() + 1); 791 completeStore(storeWBIt); 792 if (!storeQueue.empty()) 793 storeWBIt++; 794 else 795 storeWBIt = storeQueue.end(); 796 continue; 797 } 798 } 799 800 if (req->request()->isMmappedIpr()) { 801 assert(!inst->isStoreConditional()); 802 ThreadContext *thread = cpu->tcBase(lsqID); 803 PacketPtr main_pkt = new Packet(req->mainRequest(), 804 MemCmd::WriteReq); 805 main_pkt->dataStatic(inst->memData); 806 req->handleIprWrite(thread, main_pkt); 807 delete main_pkt; 808 completeStore(storeWBIt); 809 storeWBIt++; 810 continue; 811 } 812 /* Send to cache */ 813 req->sendPacketToCache(); 814 815 /* If successful, do the post send */ 816 if (req->isSent()) { 817 storePostSend(); 818 } else { 819 DPRINTF(LSQUnit, "D-Cache became blocked when writing [sn:%lli], " 820 "will retry later\n", 821 inst->seqNum); 822 } 823 } 824 assert(stores >= 0 && storesToWB >= 0); 825} 826 827template <class Impl> 828void 829LSQUnit<Impl>::squash(const InstSeqNum &squashed_num) 830{ 831 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!" 832 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores); 833 834 while (loads != 0 && 835 loadQueue.back().instruction()->seqNum > squashed_num) { 836 DPRINTF(LSQUnit,"Load Instruction PC %s squashed, " 837 "[sn:%lli]\n", 838 loadQueue.back().instruction()->pcState(), 839 loadQueue.back().instruction()->seqNum); 840 841 if (isStalled() && loadQueue.tail() == stallingLoadIdx) { 842 stalled = false; 843 stallingStoreIsn = 0; 844 stallingLoadIdx = 0; 845 } 846 847 // Clear the smart pointer to make sure it is decremented. 848 loadQueue.back().instruction()->setSquashed(); 849 loadQueue.back().clear(); 850 851 --loads; 852 853 loadQueue.pop_back(); 854 ++lsqSquashedLoads; 855 } 856 857 if (memDepViolator && squashed_num < memDepViolator->seqNum) { 858 memDepViolator = NULL; 859 } 860 861 while (stores != 0 && 862 storeQueue.back().instruction()->seqNum > squashed_num) { 863 // Instructions marked as can WB are already committed. 864 if (storeQueue.back().canWB()) { 865 break; 866 } 867 868 DPRINTF(LSQUnit,"Store Instruction PC %s squashed, " 869 "idx:%i [sn:%lli]\n", 870 storeQueue.back().instruction()->pcState(), 871 storeQueue.tail(), storeQueue.back().instruction()->seqNum); 872 873 // I don't think this can happen. It should have been cleared 874 // by the stalling load. 875 if (isStalled() && 876 storeQueue.back().instruction()->seqNum == stallingStoreIsn) { 877 panic("Is stalled should have been cleared by stalling load!\n"); 878 stalled = false; 879 stallingStoreIsn = 0; 880 } 881 882 // Clear the smart pointer to make sure it is decremented. 883 storeQueue.back().instruction()->setSquashed(); 884 885 // Must delete request now that it wasn't handed off to 886 // memory. This is quite ugly. @todo: Figure out the proper 887 // place to really handle request deletes. 888 storeQueue.back().clear(); 889 --stores; 890 891 storeQueue.pop_back(); 892 ++lsqSquashedStores; 893 } 894} 895 896template <class Impl> 897void 898LSQUnit<Impl>::storePostSend() 899{ 900 if (isStalled() && 901 storeWBIt->instruction()->seqNum == stallingStoreIsn) { 902 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 903 "load idx:%i\n", 904 stallingStoreIsn, stallingLoadIdx); 905 stalled = false; 906 stallingStoreIsn = 0; 907 iewStage->replayMemInst(loadQueue[stallingLoadIdx].instruction()); 908 } 909 910 if (!storeWBIt->instruction()->isStoreConditional()) { 911 // The store is basically completed at this time. This 912 // only works so long as the checker doesn't try to 913 // verify the value in memory for stores. 914 storeWBIt->instruction()->setCompleted(); 915 916 if (cpu->checker) { 917 cpu->checker->verify(storeWBIt->instruction()); 918 } 919 } 920 921 if (needsTSO) { 922 storeInFlight = true; 923 } 924 925 storeWBIt++; 926} 927 928template <class Impl> 929void 930LSQUnit<Impl>::writeback(const DynInstPtr &inst, PacketPtr pkt) 931{ 932 iewStage->wakeCPU(); 933 934 // Squashed instructions do not need to complete their access. 935 if (inst->isSquashed()) { 936 assert(!inst->isStore()); 937 ++lsqIgnoredResponses; 938 return; 939 } 940 941 if (!inst->isExecuted()) { 942 inst->setExecuted(); 943 944 if (inst->fault == NoFault) { 945 // Complete access to copy data to proper place. 946 inst->completeAcc(pkt); 947 } else { 948 // If the instruction has an outstanding fault, we cannot complete 949 // the access as this discards the current fault. 950 951 // If we have an outstanding fault, the fault should only be of 952 // type ReExec. 953 assert(dynamic_cast<ReExec*>(inst->fault.get()) != nullptr); 954 955 DPRINTF(LSQUnit, "Not completing instruction [sn:%lli] access " 956 "due to pending fault.\n", inst->seqNum); 957 } 958 } 959 960 // Need to insert instruction into queue to commit 961 iewStage->instToCommit(inst); 962 963 iewStage->activityThisCycle(); 964 965 // see if this load changed the PC 966 iewStage->checkMisprediction(inst); 967} 968 969template <class Impl> 970void 971LSQUnit<Impl>::completeStore(typename StoreQueue::iterator store_idx) 972{ 973 assert(store_idx->valid()); 974 store_idx->completed() = true; 975 --storesToWB; 976 // A bit conservative because a store completion may not free up entries, 977 // but hopefully avoids two store completions in one cycle from making 978 // the CPU tick twice. 979 cpu->wakeCPU(); 980 cpu->activityThisCycle(); 981 982 /* We 'need' a copy here because we may clear the entry from the 983 * store queue. */ 984 DynInstPtr store_inst = store_idx->instruction(); 985 if (store_idx == storeQueue.begin()) { 986 do { 987 storeQueue.front().clear(); 988 storeQueue.pop_front(); 989 --stores; 990 } while (storeQueue.front().completed() && 991 !storeQueue.empty()); 992 993 iewStage->updateLSQNextCycle = true; 994 } 995 996 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head " 997 "idx:%i\n", 998 store_inst->seqNum, store_idx.idx() - 1, storeQueue.head() - 1); 999 1000#if TRACING_ON 1001 if (DTRACE(O3PipeView)) { 1002 store_idx->instruction()->storeTick = 1003 curTick() - store_idx->instruction()->fetchTick; 1004 } 1005#endif 1006 1007 if (isStalled() && 1008 store_inst->seqNum == stallingStoreIsn) { 1009 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 1010 "load idx:%i\n", 1011 stallingStoreIsn, stallingLoadIdx); 1012 stalled = false; 1013 stallingStoreIsn = 0; 1014 iewStage->replayMemInst(loadQueue[stallingLoadIdx].instruction()); 1015 } 1016 1017 store_inst->setCompleted(); 1018 1019 if (needsTSO) { 1020 storeInFlight = false; 1021 } 1022 1023 // Tell the checker we've completed this instruction. Some stores 1024 // may get reported twice to the checker, but the checker can 1025 // handle that case. 1026 // Store conditionals cannot be sent to the checker yet, they have 1027 // to update the misc registers first which should take place 1028 // when they commit 1029 if (cpu->checker && !store_inst->isStoreConditional()) { 1030 cpu->checker->verify(store_inst); 1031 } 1032} 1033 1034template <class Impl> 1035bool 1036LSQUnit<Impl>::trySendPacket(bool isLoad, PacketPtr data_pkt) 1037{ 1038 bool ret = true; 1039 bool cache_got_blocked = false; 1040 1041 auto state = dynamic_cast<LSQSenderState*>(data_pkt->senderState); 1042 1043 if (!lsq->cacheBlocked() && 1044 lsq->cachePortAvailable(isLoad)) { 1045 if (!dcachePort->sendTimingReq(data_pkt)) { 1046 ret = false; 1047 cache_got_blocked = true; 1048 } 1049 } else { 1050 ret = false; 1051 } 1052 1053 if (ret) { 1054 if (!isLoad) { 1055 isStoreBlocked = false; 1056 } 1057 lsq->cachePortBusy(isLoad); 1058 state->outstanding++; 1059 state->request()->packetSent(); 1060 } else { 1061 if (cache_got_blocked) { 1062 lsq->cacheBlocked(true); 1063 ++lsqCacheBlocked; 1064 } 1065 if (!isLoad) { 1066 assert(state->request() == storeWBIt->request()); 1067 isStoreBlocked = true; 1068 } 1069 state->request()->packetNotSent(); 1070 } 1071 return ret; 1072} 1073 1074template <class Impl> 1075void 1076LSQUnit<Impl>::recvRetry() 1077{ 1078 if (isStoreBlocked) { 1079 DPRINTF(LSQUnit, "Receiving retry: blocked store\n"); 1080 writebackBlockedStore(); 1081 } 1082} 1083 1084template <class Impl> 1085void 1086LSQUnit<Impl>::dumpInsts() const 1087{ 1088 cprintf("Load store queue: Dumping instructions.\n"); 1089 cprintf("Load queue size: %i\n", loads); 1090 cprintf("Load queue: "); 1091 1092 for (const auto& e: loadQueue) { 1093 const DynInstPtr &inst(e.instruction()); 1094 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum); 1095 } 1096 cprintf("\n"); 1097 1098 cprintf("Store queue size: %i\n", stores); 1099 cprintf("Store queue: "); 1100 1101 for (const auto& e: storeQueue) { 1102 const DynInstPtr &inst(e.instruction()); 1103 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum); 1104 } 1105 1106 cprintf("\n"); 1107} 1108 1109template <class Impl> 1110unsigned int 1111LSQUnit<Impl>::cacheLineSize() 1112{ 1113 return cpu->cacheLineSize(); 1114} 1115 1116#endif//__CPU_O3_LSQ_UNIT_IMPL_HH__ 1117