lsq_unit_impl.hh revision 10333
16657Snate@binkert.org 26657Snate@binkert.org/* 36657Snate@binkert.org * Copyright (c) 2010-2014 ARM Limited 46657Snate@binkert.org * Copyright (c) 2013 Advanced Micro Devices, Inc. 56657Snate@binkert.org * All rights reserved 66657Snate@binkert.org * 76657Snate@binkert.org * The license below extends only to copyright in the software and shall 86657Snate@binkert.org * not be construed as granting a license to any other intellectual 96657Snate@binkert.org * property including but not limited to intellectual property relating 106657Snate@binkert.org * to a hardware implementation of the functionality of the software 116657Snate@binkert.org * licensed hereunder. You may use the software subject to the license 126657Snate@binkert.org * terms below provided that you ensure that this notice is replicated 136657Snate@binkert.org * unmodified and in its entirety in all distributions of the software, 146657Snate@binkert.org * modified or unmodified, in source code or in binary form. 156657Snate@binkert.org * 166657Snate@binkert.org * Copyright (c) 2004-2005 The Regents of The University of Michigan 176657Snate@binkert.org * All rights reserved. 186657Snate@binkert.org * 196657Snate@binkert.org * Redistribution and use in source and binary forms, with or without 206657Snate@binkert.org * modification, are permitted provided that the following conditions are 216657Snate@binkert.org * met: redistributions of source code must retain the above copyright 226657Snate@binkert.org * notice, this list of conditions and the following disclaimer; 236657Snate@binkert.org * redistributions in binary form must reproduce the above copyright 246657Snate@binkert.org * notice, this list of conditions and the following disclaimer in the 256657Snate@binkert.org * documentation and/or other materials provided with the distribution; 266657Snate@binkert.org * neither the name of the copyright holders nor the names of its 276657Snate@binkert.org * contributors may be used to endorse or promote products derived from 286999Snate@binkert.org * this software without specific prior written permission. 296657Snate@binkert.org * 306657Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 316657Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 326657Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 336657Snate@binkert.org * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 346657Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 356657Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 366657Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 376657Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 386657Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 396657Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 406657Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 416657Snate@binkert.org * 426657Snate@binkert.org * Authors: Kevin Lim 436657Snate@binkert.org * Korey Sewell 446657Snate@binkert.org */ 456657Snate@binkert.org 466657Snate@binkert.org#ifndef __CPU_O3_LSQ_UNIT_IMPL_HH__ 476657Snate@binkert.org#define __CPU_O3_LSQ_UNIT_IMPL_HH__ 486657Snate@binkert.org 496657Snate@binkert.org#include "arch/generic/debugfaults.hh" 506657Snate@binkert.org#include "arch/locked_mem.hh" 516657Snate@binkert.org#include "base/str.hh" 526657Snate@binkert.org#include "config/the_isa.hh" 536657Snate@binkert.org#include "cpu/checker/cpu.hh" 546882SBrad.Beckmann@amd.com#include "cpu/o3/lsq.hh" 556657Snate@binkert.org#include "cpu/o3/lsq_unit.hh" 566657Snate@binkert.org#include "debug/Activity.hh" 576657Snate@binkert.org#include "debug/IEW.hh" 586657Snate@binkert.org#include "debug/LSQUnit.hh" 596657Snate@binkert.org#include "debug/O3PipeView.hh" 606657Snate@binkert.org#include "mem/packet.hh" 616657Snate@binkert.org#include "mem/request.hh" 626657Snate@binkert.org 636657Snate@binkert.orgtemplate<class Impl> 646657Snate@binkert.orgLSQUnit<Impl>::WritebackEvent::WritebackEvent(DynInstPtr &_inst, PacketPtr _pkt, 656657Snate@binkert.org LSQUnit *lsq_ptr) 666657Snate@binkert.org : Event(Default_Pri, AutoDelete), 676657Snate@binkert.org inst(_inst), pkt(_pkt), lsqPtr(lsq_ptr) 686657Snate@binkert.org{ 696657Snate@binkert.org} 706657Snate@binkert.org 716657Snate@binkert.orgtemplate<class Impl> 726657Snate@binkert.orgvoid 736657Snate@binkert.orgLSQUnit<Impl>::WritebackEvent::process() 746657Snate@binkert.org{ 756657Snate@binkert.org assert(!lsqPtr->cpu->switchedOut()); 766657Snate@binkert.org 776657Snate@binkert.org lsqPtr->writeback(inst, pkt); 786657Snate@binkert.org 796657Snate@binkert.org if (pkt->senderState) 806657Snate@binkert.org delete pkt->senderState; 816657Snate@binkert.org 826657Snate@binkert.org delete pkt->req; 836657Snate@binkert.org delete pkt; 846657Snate@binkert.org} 856657Snate@binkert.org 866657Snate@binkert.orgtemplate<class Impl> 876657Snate@binkert.orgconst char * 886657Snate@binkert.orgLSQUnit<Impl>::WritebackEvent::description() const 896657Snate@binkert.org{ 906657Snate@binkert.org return "Store writeback"; 916657Snate@binkert.org} 926657Snate@binkert.org 936657Snate@binkert.orgtemplate<class Impl> 946657Snate@binkert.orgvoid 956657Snate@binkert.orgLSQUnit<Impl>::completeDataAccess(PacketPtr pkt) 966657Snate@binkert.org{ 976657Snate@binkert.org LSQSenderState *state = dynamic_cast<LSQSenderState *>(pkt->senderState); 986657Snate@binkert.org DynInstPtr inst = state->inst; 996657Snate@binkert.org DPRINTF(IEW, "Writeback event [sn:%lli].\n", inst->seqNum); 1006657Snate@binkert.org DPRINTF(Activity, "Activity: Writeback event [sn:%lli].\n", inst->seqNum); 1016657Snate@binkert.org 1026657Snate@binkert.org if (state->cacheBlocked) { 1036657Snate@binkert.org // This is the first half of a previous split load, 1046657Snate@binkert.org // where the 2nd half blocked, ignore this response 1056657Snate@binkert.org DPRINTF(IEW, "[sn:%lli]: Response from first half of earlier " 1066657Snate@binkert.org "blocked split load recieved. Ignoring.\n", inst->seqNum); 1076657Snate@binkert.org delete state; 1086657Snate@binkert.org delete pkt->req; 1096657Snate@binkert.org delete pkt; 1106657Snate@binkert.org return; 1116657Snate@binkert.org } 1126657Snate@binkert.org 1136657Snate@binkert.org // If this is a split access, wait until all packets are received. 1146657Snate@binkert.org if (TheISA::HasUnalignedMemAcc && !state->complete()) { 1156657Snate@binkert.org delete pkt->req; 1166657Snate@binkert.org delete pkt; 1176657Snate@binkert.org return; 1186657Snate@binkert.org } 1196657Snate@binkert.org 1206657Snate@binkert.org assert(!cpu->switchedOut()); 1216657Snate@binkert.org if (!inst->isSquashed()) { 1226657Snate@binkert.org if (!state->noWB) { 1236657Snate@binkert.org if (!TheISA::HasUnalignedMemAcc || !state->isSplit || 1246657Snate@binkert.org !state->isLoad) { 1256657Snate@binkert.org writeback(inst, pkt); 1266657Snate@binkert.org } else { 1276657Snate@binkert.org writeback(inst, state->mainPkt); 1286657Snate@binkert.org } 1296657Snate@binkert.org } 1306657Snate@binkert.org 1316657Snate@binkert.org if (inst->isStore()) { 1326657Snate@binkert.org completeStore(state->idx); 1336657Snate@binkert.org } 1346657Snate@binkert.org } 1356657Snate@binkert.org 1366657Snate@binkert.org if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) { 1376657Snate@binkert.org delete state->mainPkt->req; 1386657Snate@binkert.org delete state->mainPkt; 1396657Snate@binkert.org } 1406657Snate@binkert.org 1416657Snate@binkert.org pkt->req->setAccessLatency(); 1426657Snate@binkert.org cpu->ppDataAccessComplete->notify(std::make_pair(inst, pkt)); 1436657Snate@binkert.org 1446657Snate@binkert.org delete state; 1456657Snate@binkert.org delete pkt->req; 1466657Snate@binkert.org delete pkt; 1476657Snate@binkert.org} 1486657Snate@binkert.org 1496657Snate@binkert.orgtemplate <class Impl> 1506657Snate@binkert.orgLSQUnit<Impl>::LSQUnit() 1516657Snate@binkert.org : loads(0), stores(0), storesToWB(0), cacheBlockMask(0), stalled(false), 1526657Snate@binkert.org isStoreBlocked(false), storeInFlight(false), hasPendingPkt(false) 1536657Snate@binkert.org{ 1546657Snate@binkert.org} 1556657Snate@binkert.org 1566657Snate@binkert.orgtemplate<class Impl> 1576657Snate@binkert.orgvoid 1586882SBrad.Beckmann@amd.comLSQUnit<Impl>::init(O3CPU *cpu_ptr, IEW *iew_ptr, DerivO3CPUParams *params, 1596882SBrad.Beckmann@amd.com LSQ *lsq_ptr, unsigned maxLQEntries, unsigned maxSQEntries, 1606882SBrad.Beckmann@amd.com unsigned id) 1616657Snate@binkert.org{ 1626657Snate@binkert.org cpu = cpu_ptr; 1636657Snate@binkert.org iewStage = iew_ptr; 1646657Snate@binkert.org 1656657Snate@binkert.org lsq = lsq_ptr; 1666657Snate@binkert.org 1676657Snate@binkert.org lsqID = id; 1686657Snate@binkert.org 1696657Snate@binkert.org DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id); 1706657Snate@binkert.org 1716657Snate@binkert.org // Add 1 for the sentinel entry (they are circular queues). 1726657Snate@binkert.org LQEntries = maxLQEntries + 1; 1736657Snate@binkert.org SQEntries = maxSQEntries + 1; 1746657Snate@binkert.org 1756657Snate@binkert.org //Due to uint8_t index in LSQSenderState 1766657Snate@binkert.org assert(LQEntries <= 256); 1776657Snate@binkert.org assert(SQEntries <= 256); 1786657Snate@binkert.org 1796657Snate@binkert.org loadQueue.resize(LQEntries); 1806657Snate@binkert.org storeQueue.resize(SQEntries); 1816657Snate@binkert.org 1826657Snate@binkert.org depCheckShift = params->LSQDepCheckShift; 1836657Snate@binkert.org checkLoads = params->LSQCheckLoads; 1846657Snate@binkert.org cachePorts = params->cachePorts; 1856657Snate@binkert.org needsTSO = params->needsTSO; 1866657Snate@binkert.org 1876657Snate@binkert.org resetState(); 1886657Snate@binkert.org} 1896657Snate@binkert.org 1906657Snate@binkert.org 1916657Snate@binkert.orgtemplate<class Impl> 1926657Snate@binkert.orgvoid 1936657Snate@binkert.orgLSQUnit<Impl>::resetState() 1946999Snate@binkert.org{ 1956657Snate@binkert.org loads = stores = storesToWB = 0; 1966657Snate@binkert.org 1976657Snate@binkert.org loadHead = loadTail = 0; 1986657Snate@binkert.org 1996657Snate@binkert.org storeHead = storeWBIdx = storeTail = 0; 2006657Snate@binkert.org 2016657Snate@binkert.org usedPorts = 0; 2026657Snate@binkert.org 2036657Snate@binkert.org retryPkt = NULL; 2046657Snate@binkert.org memDepViolator = NULL; 2057002Snate@binkert.org 2067002Snate@binkert.org stalled = false; 2076657Snate@binkert.org 2086657Snate@binkert.org cacheBlockMask = ~(cpu->cacheLineSize() - 1); 2096657Snate@binkert.org} 2106657Snate@binkert.org 2116657Snate@binkert.orgtemplate<class Impl> 2126657Snate@binkert.orgstd::string 2136657Snate@binkert.orgLSQUnit<Impl>::name() const 2146657Snate@binkert.org{ 2156657Snate@binkert.org if (Impl::MaxThreads == 1) { 2166657Snate@binkert.org return iewStage->name() + ".lsq"; 2176657Snate@binkert.org } else { 2186657Snate@binkert.org return iewStage->name() + ".lsq.thread" + to_string(lsqID); 2196657Snate@binkert.org } 2206657Snate@binkert.org} 2216657Snate@binkert.org 2226657Snate@binkert.orgtemplate<class Impl> 2236657Snate@binkert.orgvoid 2246657Snate@binkert.orgLSQUnit<Impl>::regStats() 2256657Snate@binkert.org{ 2266657Snate@binkert.org lsqForwLoads 2276657Snate@binkert.org .name(name() + ".forwLoads") 2286657Snate@binkert.org .desc("Number of loads that had data forwarded from stores"); 2296657Snate@binkert.org 2306657Snate@binkert.org invAddrLoads 2316657Snate@binkert.org .name(name() + ".invAddrLoads") 2326657Snate@binkert.org .desc("Number of loads ignored due to an invalid address"); 2336657Snate@binkert.org 2346657Snate@binkert.org lsqSquashedLoads 2356657Snate@binkert.org .name(name() + ".squashedLoads") 2366657Snate@binkert.org .desc("Number of loads squashed"); 2376657Snate@binkert.org 2386657Snate@binkert.org lsqIgnoredResponses 2396657Snate@binkert.org .name(name() + ".ignoredResponses") 2406657Snate@binkert.org .desc("Number of memory responses ignored because the instruction is squashed"); 2416657Snate@binkert.org 2426657Snate@binkert.org lsqMemOrderViolation 2436657Snate@binkert.org .name(name() + ".memOrderViolation") 2446657Snate@binkert.org .desc("Number of memory ordering violations"); 2456657Snate@binkert.org 2466657Snate@binkert.org lsqSquashedStores 2476657Snate@binkert.org .name(name() + ".squashedStores") 2486657Snate@binkert.org .desc("Number of stores squashed"); 2496657Snate@binkert.org 2506657Snate@binkert.org invAddrSwpfs 2516657Snate@binkert.org .name(name() + ".invAddrSwpfs") 2526657Snate@binkert.org .desc("Number of software prefetches ignored due to an invalid address"); 2536657Snate@binkert.org 2546657Snate@binkert.org lsqBlockedLoads 2556657Snate@binkert.org .name(name() + ".blockedLoads") 2566657Snate@binkert.org .desc("Number of blocked loads due to partial load-store forwarding"); 2576657Snate@binkert.org 2586657Snate@binkert.org lsqRescheduledLoads 2596657Snate@binkert.org .name(name() + ".rescheduledLoads") 2606657Snate@binkert.org .desc("Number of loads that were rescheduled"); 2616657Snate@binkert.org 2626657Snate@binkert.org lsqCacheBlocked 2636657Snate@binkert.org .name(name() + ".cacheBlocked") 2646657Snate@binkert.org .desc("Number of times an access to memory failed due to the cache being blocked"); 2656657Snate@binkert.org} 2666657Snate@binkert.org 2676657Snate@binkert.orgtemplate<class Impl> 2686657Snate@binkert.orgvoid 2696657Snate@binkert.orgLSQUnit<Impl>::setDcachePort(MasterPort *dcache_port) 2706657Snate@binkert.org{ 2716657Snate@binkert.org dcachePort = dcache_port; 2726657Snate@binkert.org} 2736657Snate@binkert.org 2746657Snate@binkert.orgtemplate<class Impl> 2756657Snate@binkert.orgvoid 2766657Snate@binkert.orgLSQUnit<Impl>::clearLQ() 2776657Snate@binkert.org{ 2786657Snate@binkert.org loadQueue.clear(); 2796657Snate@binkert.org} 2806657Snate@binkert.org 2816657Snate@binkert.orgtemplate<class Impl> 2826657Snate@binkert.orgvoid 2836657Snate@binkert.orgLSQUnit<Impl>::clearSQ() 2846657Snate@binkert.org{ 2856657Snate@binkert.org storeQueue.clear(); 2866657Snate@binkert.org} 2876657Snate@binkert.org 2886657Snate@binkert.orgtemplate<class Impl> 2896657Snate@binkert.orgvoid 2906657Snate@binkert.orgLSQUnit<Impl>::drainSanityCheck() const 2916657Snate@binkert.org{ 2926657Snate@binkert.org for (int i = 0; i < loadQueue.size(); ++i) 2936657Snate@binkert.org assert(!loadQueue[i]); 2946657Snate@binkert.org 2956657Snate@binkert.org assert(storesToWB == 0); 2966657Snate@binkert.org assert(!retryPkt); 2976657Snate@binkert.org} 2986657Snate@binkert.org 2996657Snate@binkert.orgtemplate<class Impl> 3006657Snate@binkert.orgvoid 3016657Snate@binkert.orgLSQUnit<Impl>::takeOverFrom() 3026657Snate@binkert.org{ 3036657Snate@binkert.org resetState(); 3046657Snate@binkert.org} 3056657Snate@binkert.org 3066657Snate@binkert.orgtemplate<class Impl> 3076657Snate@binkert.orgvoid 3086657Snate@binkert.orgLSQUnit<Impl>::resizeLQ(unsigned size) 3096657Snate@binkert.org{ 3106657Snate@binkert.org unsigned size_plus_sentinel = size + 1; 3116657Snate@binkert.org assert(size_plus_sentinel >= LQEntries); 3126657Snate@binkert.org 3136657Snate@binkert.org if (size_plus_sentinel > LQEntries) { 3146657Snate@binkert.org while (size_plus_sentinel > loadQueue.size()) { 3156657Snate@binkert.org DynInstPtr dummy; 3166657Snate@binkert.org loadQueue.push_back(dummy); 3176657Snate@binkert.org LQEntries++; 3186657Snate@binkert.org } 3196657Snate@binkert.org } else { 3206657Snate@binkert.org LQEntries = size_plus_sentinel; 3216657Snate@binkert.org } 3226657Snate@binkert.org 3236657Snate@binkert.org assert(LQEntries <= 256); 3246657Snate@binkert.org} 3256657Snate@binkert.org 3266657Snate@binkert.orgtemplate<class Impl> 3276657Snate@binkert.orgvoid 3287002Snate@binkert.orgLSQUnit<Impl>::resizeSQ(unsigned size) 3296657Snate@binkert.org{ 3306657Snate@binkert.org unsigned size_plus_sentinel = size + 1; 3316657Snate@binkert.org if (size_plus_sentinel > SQEntries) { 3326657Snate@binkert.org while (size_plus_sentinel > storeQueue.size()) { 3336657Snate@binkert.org SQEntry dummy; 3346657Snate@binkert.org storeQueue.push_back(dummy); 3356657Snate@binkert.org SQEntries++; 3366657Snate@binkert.org } 3376657Snate@binkert.org } else { 3386657Snate@binkert.org SQEntries = size_plus_sentinel; 3396657Snate@binkert.org } 3406657Snate@binkert.org 3416657Snate@binkert.org assert(SQEntries <= 256); 3426657Snate@binkert.org} 3436657Snate@binkert.org 3446657Snate@binkert.orgtemplate <class Impl> 3456657Snate@binkert.orgvoid 3466657Snate@binkert.orgLSQUnit<Impl>::insert(DynInstPtr &inst) 3476657Snate@binkert.org{ 3486657Snate@binkert.org assert(inst->isMemRef()); 3496657Snate@binkert.org 3506657Snate@binkert.org assert(inst->isLoad() || inst->isStore()); 3516657Snate@binkert.org 3526657Snate@binkert.org if (inst->isLoad()) { 3536657Snate@binkert.org insertLoad(inst); 3546657Snate@binkert.org } else { 3556657Snate@binkert.org insertStore(inst); 3566657Snate@binkert.org } 3576657Snate@binkert.org 3586657Snate@binkert.org inst->setInLSQ(); 3596657Snate@binkert.org} 3606657Snate@binkert.org 3616657Snate@binkert.orgtemplate <class Impl> 3626657Snate@binkert.orgvoid 3637002Snate@binkert.orgLSQUnit<Impl>::insertLoad(DynInstPtr &load_inst) 3646657Snate@binkert.org{ 3656657Snate@binkert.org assert((loadTail + 1) % LQEntries != loadHead); 3666657Snate@binkert.org assert(loads < LQEntries); 3677002Snate@binkert.org 3686657Snate@binkert.org DPRINTF(LSQUnit, "Inserting load PC %s, idx:%i [sn:%lli]\n", 3696657Snate@binkert.org load_inst->pcState(), loadTail, load_inst->seqNum); 3707002Snate@binkert.org 3716657Snate@binkert.org load_inst->lqIdx = loadTail; 3726657Snate@binkert.org 3736657Snate@binkert.org if (stores == 0) { 3746657Snate@binkert.org load_inst->sqIdx = -1; 3756657Snate@binkert.org } else { 3766657Snate@binkert.org load_inst->sqIdx = storeTail; 3776657Snate@binkert.org } 3786657Snate@binkert.org 3796657Snate@binkert.org loadQueue[loadTail] = load_inst; 3806999Snate@binkert.org 3816657Snate@binkert.org incrLdIdx(loadTail); 3826657Snate@binkert.org 3836657Snate@binkert.org ++loads; 3846657Snate@binkert.org} 3856657Snate@binkert.org 3866657Snate@binkert.orgtemplate <class Impl> 3876657Snate@binkert.orgvoid 3887002Snate@binkert.orgLSQUnit<Impl>::insertStore(DynInstPtr &store_inst) 3897002Snate@binkert.org{ 3906657Snate@binkert.org // Make sure it is not full before inserting an instruction. 3917002Snate@binkert.org assert((storeTail + 1) % SQEntries != storeHead); 3927002Snate@binkert.org assert(stores < SQEntries); 3936657Snate@binkert.org 3946657Snate@binkert.org DPRINTF(LSQUnit, "Inserting store PC %s, idx:%i [sn:%lli]\n", 3956657Snate@binkert.org store_inst->pcState(), storeTail, store_inst->seqNum); 3966657Snate@binkert.org 3976657Snate@binkert.org store_inst->sqIdx = storeTail; 3986657Snate@binkert.org store_inst->lqIdx = loadTail; 3996657Snate@binkert.org 4006657Snate@binkert.org storeQueue[storeTail] = SQEntry(store_inst); 4016657Snate@binkert.org 4026657Snate@binkert.org incrStIdx(storeTail); 4036657Snate@binkert.org 4046657Snate@binkert.org ++stores; 4056657Snate@binkert.org} 4066657Snate@binkert.org 4076657Snate@binkert.orgtemplate <class Impl> 4086657Snate@binkert.orgtypename Impl::DynInstPtr 4096657Snate@binkert.orgLSQUnit<Impl>::getMemDepViolator() 4106657Snate@binkert.org{ 4116657Snate@binkert.org DynInstPtr temp = memDepViolator; 4126657Snate@binkert.org 4136657Snate@binkert.org memDepViolator = NULL; 4146657Snate@binkert.org 4156657Snate@binkert.org return temp; 4166657Snate@binkert.org} 4176657Snate@binkert.org 4186657Snate@binkert.orgtemplate <class Impl> 4196657Snate@binkert.orgunsigned 4206657Snate@binkert.orgLSQUnit<Impl>::numFreeLoadEntries() 4216999Snate@binkert.org{ 4226657Snate@binkert.org //LQ has an extra dummy entry to differentiate 4236657Snate@binkert.org //empty/full conditions. Subtract 1 from the free entries. 4246657Snate@binkert.org DPRINTF(LSQUnit, "LQ size: %d, #loads occupied: %d\n", LQEntries, loads); 4256657Snate@binkert.org return LQEntries - loads - 1; 4266657Snate@binkert.org} 4276657Snate@binkert.org 4286657Snate@binkert.orgtemplate <class Impl> 4296657Snate@binkert.orgunsigned 4307002Snate@binkert.orgLSQUnit<Impl>::numFreeStoreEntries() 4317002Snate@binkert.org{ 4327002Snate@binkert.org //SQ has an extra dummy entry to differentiate 4336657Snate@binkert.org //empty/full conditions. Subtract 1 from the free entries. 4346657Snate@binkert.org DPRINTF(LSQUnit, "SQ size: %d, #stores occupied: %d\n", SQEntries, stores); 4356657Snate@binkert.org return SQEntries - stores - 1; 4366657Snate@binkert.org 4376657Snate@binkert.org } 4386657Snate@binkert.org 4396657Snate@binkert.orgtemplate <class Impl> 4406657Snate@binkert.orgvoid 4416657Snate@binkert.orgLSQUnit<Impl>::checkSnoop(PacketPtr pkt) 4426657Snate@binkert.org{ 4436657Snate@binkert.org int load_idx = loadHead; 4446657Snate@binkert.org DPRINTF(LSQUnit, "Got snoop for address %#x\n", pkt->getAddr()); 4456657Snate@binkert.org 4466862Sdrh5@cs.wisc.edu // Unlock the cpu-local monitor when the CPU sees a snoop to a locked 4476862Sdrh5@cs.wisc.edu // address. The CPU can speculatively execute a LL operation after a pending 4486862Sdrh5@cs.wisc.edu // SC operation in the pipeline and that can make the cache monitor the CPU 4496862Sdrh5@cs.wisc.edu // is connected to valid while it really shouldn't be. 4506657Snate@binkert.org for (int x = 0; x < cpu->numContexts(); x++) { 4516657Snate@binkert.org ThreadContext *tc = cpu->getContext(x); 4526657Snate@binkert.org bool no_squash = cpu->thread[x]->noSquashFromTC; 4536657Snate@binkert.org cpu->thread[x]->noSquashFromTC = true; 4546657Snate@binkert.org TheISA::handleLockedSnoop(tc, pkt, cacheBlockMask); 4557002Snate@binkert.org cpu->thread[x]->noSquashFromTC = no_squash; 4567002Snate@binkert.org } 4576657Snate@binkert.org 4586657Snate@binkert.org Addr invalidate_addr = pkt->getAddr() & cacheBlockMask; 4596657Snate@binkert.org 4606657Snate@binkert.org DynInstPtr ld_inst = loadQueue[load_idx]; 4616657Snate@binkert.org if (ld_inst) { 4626657Snate@binkert.org Addr load_addr = ld_inst->physEffAddr & cacheBlockMask; 4636657Snate@binkert.org // Check that this snoop didn't just invalidate our lock flag 4646657Snate@binkert.org if (ld_inst->effAddrValid() && load_addr == invalidate_addr && 4656657Snate@binkert.org ld_inst->memReqFlags & Request::LLSC) 4666657Snate@binkert.org TheISA::handleLockedSnoopHit(ld_inst.get()); 4676657Snate@binkert.org } 4686657Snate@binkert.org 4696657Snate@binkert.org // If this is the only load in the LSQ we don't care 4706657Snate@binkert.org if (load_idx == loadTail) 4716657Snate@binkert.org return; 4726657Snate@binkert.org 4736657Snate@binkert.org incrLdIdx(load_idx); 4747002Snate@binkert.org 4756657Snate@binkert.org bool force_squash = false; 4766657Snate@binkert.org 4776657Snate@binkert.org while (load_idx != loadTail) { 4786657Snate@binkert.org DynInstPtr ld_inst = loadQueue[load_idx]; 4796657Snate@binkert.org 4806657Snate@binkert.org if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) { 4816657Snate@binkert.org incrLdIdx(load_idx); 4826999Snate@binkert.org continue; 4836657Snate@binkert.org } 4846657Snate@binkert.org 4856657Snate@binkert.org Addr load_addr = ld_inst->physEffAddr & cacheBlockMask; 4866657Snate@binkert.org DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n", 4876657Snate@binkert.org ld_inst->seqNum, load_addr, invalidate_addr); 4886657Snate@binkert.org 4897002Snate@binkert.org if (load_addr == invalidate_addr || force_squash) { 4907002Snate@binkert.org if (needsTSO) { 4917002Snate@binkert.org // If we have a TSO system, as all loads must be ordered with 4926657Snate@binkert.org // all other loads, this load as well as *all* subsequent loads 4936657Snate@binkert.org // need to be squashed to prevent possible load reordering. 4947002Snate@binkert.org force_squash = true; 4957002Snate@binkert.org } 4966657Snate@binkert.org if (ld_inst->possibleLoadViolation() || force_squash) { 4976657Snate@binkert.org DPRINTF(LSQUnit, "Conflicting load at addr %#x [sn:%lli]\n", 4986657Snate@binkert.org pkt->getAddr(), ld_inst->seqNum); 4996657Snate@binkert.org 5006657Snate@binkert.org // Mark the load for re-execution 5016657Snate@binkert.org ld_inst->fault = new ReExec; 5026657Snate@binkert.org } else { 5036657Snate@binkert.org DPRINTF(LSQUnit, "HitExternal Snoop for addr %#x [sn:%lli]\n", 5046657Snate@binkert.org pkt->getAddr(), ld_inst->seqNum); 5056657Snate@binkert.org 5066657Snate@binkert.org // Make sure that we don't lose a snoop hitting a LOCKED 5076657Snate@binkert.org // address since the LOCK* flags don't get updated until 5086657Snate@binkert.org // commit. 5096657Snate@binkert.org if (ld_inst->memReqFlags & Request::LLSC) 5106657Snate@binkert.org TheISA::handleLockedSnoopHit(ld_inst.get()); 5116657Snate@binkert.org 5126657Snate@binkert.org // If a older load checks this and it's true 5136657Snate@binkert.org // then we might have missed the snoop 5146657Snate@binkert.org // in which case we need to invalidate to be sure 5156657Snate@binkert.org ld_inst->hitExternalSnoop(true); 5166657Snate@binkert.org } 5176657Snate@binkert.org } 5186657Snate@binkert.org incrLdIdx(load_idx); 5196657Snate@binkert.org } 5206657Snate@binkert.org return; 5216657Snate@binkert.org} 5226657Snate@binkert.org 5236657Snate@binkert.orgtemplate <class Impl> 5246657Snate@binkert.orgFault 5256657Snate@binkert.orgLSQUnit<Impl>::checkViolations(int load_idx, DynInstPtr &inst) 5266657Snate@binkert.org{ 5276657Snate@binkert.org Addr inst_eff_addr1 = inst->effAddr >> depCheckShift; 5286657Snate@binkert.org Addr inst_eff_addr2 = (inst->effAddr + inst->effSize - 1) >> depCheckShift; 5296657Snate@binkert.org 5306657Snate@binkert.org /** @todo in theory you only need to check an instruction that has executed 5316657Snate@binkert.org * however, there isn't a good way in the pipeline at the moment to check 5326657Snate@binkert.org * all instructions that will execute before the store writes back. Thus, 5336657Snate@binkert.org * like the implementation that came before it, we're overly conservative. 5346657Snate@binkert.org */ 5356657Snate@binkert.org while (load_idx != loadTail) { 5366657Snate@binkert.org DynInstPtr ld_inst = loadQueue[load_idx]; 5376657Snate@binkert.org if (!ld_inst->effAddrValid() || ld_inst->uncacheable()) { 5386657Snate@binkert.org incrLdIdx(load_idx); 5396657Snate@binkert.org continue; 5406657Snate@binkert.org } 5416657Snate@binkert.org 5426657Snate@binkert.org Addr ld_eff_addr1 = ld_inst->effAddr >> depCheckShift; 5436657Snate@binkert.org Addr ld_eff_addr2 = 5446657Snate@binkert.org (ld_inst->effAddr + ld_inst->effSize - 1) >> depCheckShift; 5456657Snate@binkert.org 5466657Snate@binkert.org if (inst_eff_addr2 >= ld_eff_addr1 && inst_eff_addr1 <= ld_eff_addr2) { 5476657Snate@binkert.org if (inst->isLoad()) { 5486657Snate@binkert.org // If this load is to the same block as an external snoop 5496657Snate@binkert.org // invalidate that we've observed then the load needs to be 5506657Snate@binkert.org // squashed as it could have newer data 5516657Snate@binkert.org if (ld_inst->hitExternalSnoop()) { 5526657Snate@binkert.org if (!memDepViolator || 5536657Snate@binkert.org ld_inst->seqNum < memDepViolator->seqNum) { 5546657Snate@binkert.org DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] " 5556657Snate@binkert.org "and [sn:%lli] at address %#x\n", 5566657Snate@binkert.org inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 5576657Snate@binkert.org memDepViolator = ld_inst; 5586657Snate@binkert.org 5596657Snate@binkert.org ++lsqMemOrderViolation; 5606657Snate@binkert.org 5616657Snate@binkert.org return new GenericISA::M5PanicFault( 5626657Snate@binkert.org "Detected fault with inst [sn:%lli] and " 5636657Snate@binkert.org "[sn:%lli] at address %#x\n", 5646657Snate@binkert.org inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 5656657Snate@binkert.org } 5666657Snate@binkert.org } 5676657Snate@binkert.org 5686657Snate@binkert.org // Otherwise, mark the load has a possible load violation 5696657Snate@binkert.org // and if we see a snoop before it's commited, we need to squash 5706657Snate@binkert.org ld_inst->possibleLoadViolation(true); 5716657Snate@binkert.org DPRINTF(LSQUnit, "Found possible load violaiton at addr: %#x" 5726657Snate@binkert.org " between instructions [sn:%lli] and [sn:%lli]\n", 5736657Snate@binkert.org inst_eff_addr1, inst->seqNum, ld_inst->seqNum); 5746657Snate@binkert.org } else { 5756657Snate@binkert.org // A load/store incorrectly passed this store. 5766657Snate@binkert.org // Check if we already have a violator, or if it's newer 5776657Snate@binkert.org // squash and refetch. 5786657Snate@binkert.org if (memDepViolator && ld_inst->seqNum > memDepViolator->seqNum) 5796657Snate@binkert.org break; 5806657Snate@binkert.org 5816657Snate@binkert.org DPRINTF(LSQUnit, "Detected fault with inst [sn:%lli] and " 5826657Snate@binkert.org "[sn:%lli] at address %#x\n", 5836657Snate@binkert.org inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 5846657Snate@binkert.org memDepViolator = ld_inst; 5856657Snate@binkert.org 5866657Snate@binkert.org ++lsqMemOrderViolation; 5876657Snate@binkert.org 5886657Snate@binkert.org return new GenericISA::M5PanicFault("Detected fault with " 5896657Snate@binkert.org "inst [sn:%lli] and [sn:%lli] at address %#x\n", 5906657Snate@binkert.org inst->seqNum, ld_inst->seqNum, ld_eff_addr1); 5916657Snate@binkert.org } 5926657Snate@binkert.org } 5936657Snate@binkert.org 5946657Snate@binkert.org incrLdIdx(load_idx); 5956657Snate@binkert.org } 5966657Snate@binkert.org return NoFault; 5976657Snate@binkert.org} 5986657Snate@binkert.org 5996657Snate@binkert.org 6006657Snate@binkert.org 6016657Snate@binkert.org 6026657Snate@binkert.orgtemplate <class Impl> 6036657Snate@binkert.orgFault 6046657Snate@binkert.orgLSQUnit<Impl>::executeLoad(DynInstPtr &inst) 6056657Snate@binkert.org{ 6066657Snate@binkert.org using namespace TheISA; 6076657Snate@binkert.org // Execute a specific load. 6086657Snate@binkert.org Fault load_fault = NoFault; 6096657Snate@binkert.org 6106657Snate@binkert.org DPRINTF(LSQUnit, "Executing load PC %s, [sn:%lli]\n", 6116657Snate@binkert.org inst->pcState(), inst->seqNum); 6126657Snate@binkert.org 6136657Snate@binkert.org assert(!inst->isSquashed()); 6146657Snate@binkert.org 6156657Snate@binkert.org load_fault = inst->initiateAcc(); 6166657Snate@binkert.org 6176657Snate@binkert.org if (inst->isTranslationDelayed() && 6186657Snate@binkert.org load_fault == NoFault) 6196657Snate@binkert.org return load_fault; 6206657Snate@binkert.org 6216657Snate@binkert.org // If the instruction faulted or predicated false, then we need to send it 6226657Snate@binkert.org // along to commit without the instruction completing. 6236657Snate@binkert.org if (load_fault != NoFault || !inst->readPredicate()) { 6246657Snate@binkert.org // Send this instruction to commit, also make sure iew stage 6256657Snate@binkert.org // realizes there is activity. 6266657Snate@binkert.org // Mark it as executed unless it is an uncached load that 6276657Snate@binkert.org // needs to hit the head of commit. 6286657Snate@binkert.org if (!inst->readPredicate()) 6296657Snate@binkert.org inst->forwardOldRegs(); 6306657Snate@binkert.org DPRINTF(LSQUnit, "Load [sn:%lli] not executed from %s\n", 6316657Snate@binkert.org inst->seqNum, 6326657Snate@binkert.org (load_fault != NoFault ? "fault" : "predication")); 6336657Snate@binkert.org if (!(inst->hasRequest() && inst->uncacheable()) || 6346657Snate@binkert.org inst->isAtCommit()) { 6356657Snate@binkert.org inst->setExecuted(); 6366657Snate@binkert.org } 6376657Snate@binkert.org iewStage->instToCommit(inst); 6386657Snate@binkert.org iewStage->activityThisCycle(); 6396657Snate@binkert.org } else { 6406657Snate@binkert.org assert(inst->effAddrValid()); 6416657Snate@binkert.org int load_idx = inst->lqIdx; 6426657Snate@binkert.org incrLdIdx(load_idx); 6436657Snate@binkert.org 6446657Snate@binkert.org if (checkLoads) 6456657Snate@binkert.org return checkViolations(load_idx, inst); 6466657Snate@binkert.org } 6476657Snate@binkert.org 6486657Snate@binkert.org return load_fault; 6496657Snate@binkert.org} 6506657Snate@binkert.org 6516657Snate@binkert.orgtemplate <class Impl> 6526657Snate@binkert.orgFault 6536657Snate@binkert.orgLSQUnit<Impl>::executeStore(DynInstPtr &store_inst) 6546657Snate@binkert.org{ 6556657Snate@binkert.org using namespace TheISA; 6566657Snate@binkert.org // Make sure that a store exists. 6576657Snate@binkert.org assert(stores != 0); 6586657Snate@binkert.org 6596657Snate@binkert.org int store_idx = store_inst->sqIdx; 6606657Snate@binkert.org 6616657Snate@binkert.org DPRINTF(LSQUnit, "Executing store PC %s [sn:%lli]\n", 6626657Snate@binkert.org store_inst->pcState(), store_inst->seqNum); 6636657Snate@binkert.org 6646657Snate@binkert.org assert(!store_inst->isSquashed()); 6656657Snate@binkert.org 6666657Snate@binkert.org // Check the recently completed loads to see if any match this store's 6676657Snate@binkert.org // address. If so, then we have a memory ordering violation. 6686657Snate@binkert.org int load_idx = store_inst->lqIdx; 6696657Snate@binkert.org 670 Fault store_fault = store_inst->initiateAcc(); 671 672 if (store_inst->isTranslationDelayed() && 673 store_fault == NoFault) 674 return store_fault; 675 676 if (!store_inst->readPredicate()) 677 store_inst->forwardOldRegs(); 678 679 if (storeQueue[store_idx].size == 0) { 680 DPRINTF(LSQUnit,"Fault on Store PC %s, [sn:%lli], Size = 0\n", 681 store_inst->pcState(), store_inst->seqNum); 682 683 return store_fault; 684 } else if (!store_inst->readPredicate()) { 685 DPRINTF(LSQUnit, "Store [sn:%lli] not executed from predication\n", 686 store_inst->seqNum); 687 return store_fault; 688 } 689 690 assert(store_fault == NoFault); 691 692 if (store_inst->isStoreConditional()) { 693 // Store conditionals need to set themselves as able to 694 // writeback if we haven't had a fault by here. 695 storeQueue[store_idx].canWB = true; 696 697 ++storesToWB; 698 } 699 700 return checkViolations(load_idx, store_inst); 701 702} 703 704template <class Impl> 705void 706LSQUnit<Impl>::commitLoad() 707{ 708 assert(loadQueue[loadHead]); 709 710 DPRINTF(LSQUnit, "Committing head load instruction, PC %s\n", 711 loadQueue[loadHead]->pcState()); 712 713 loadQueue[loadHead] = NULL; 714 715 incrLdIdx(loadHead); 716 717 --loads; 718} 719 720template <class Impl> 721void 722LSQUnit<Impl>::commitLoads(InstSeqNum &youngest_inst) 723{ 724 assert(loads == 0 || loadQueue[loadHead]); 725 726 while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) { 727 commitLoad(); 728 } 729} 730 731template <class Impl> 732void 733LSQUnit<Impl>::commitStores(InstSeqNum &youngest_inst) 734{ 735 assert(stores == 0 || storeQueue[storeHead].inst); 736 737 int store_idx = storeHead; 738 739 while (store_idx != storeTail) { 740 assert(storeQueue[store_idx].inst); 741 // Mark any stores that are now committed and have not yet 742 // been marked as able to write back. 743 if (!storeQueue[store_idx].canWB) { 744 if (storeQueue[store_idx].inst->seqNum > youngest_inst) { 745 break; 746 } 747 DPRINTF(LSQUnit, "Marking store as able to write back, PC " 748 "%s [sn:%lli]\n", 749 storeQueue[store_idx].inst->pcState(), 750 storeQueue[store_idx].inst->seqNum); 751 752 storeQueue[store_idx].canWB = true; 753 754 ++storesToWB; 755 } 756 757 incrStIdx(store_idx); 758 } 759} 760 761template <class Impl> 762void 763LSQUnit<Impl>::writebackPendingStore() 764{ 765 if (hasPendingPkt) { 766 assert(pendingPkt != NULL); 767 768 // If the cache is blocked, this will store the packet for retry. 769 if (sendStore(pendingPkt)) { 770 storePostSend(pendingPkt); 771 } 772 pendingPkt = NULL; 773 hasPendingPkt = false; 774 } 775} 776 777template <class Impl> 778void 779LSQUnit<Impl>::writebackStores() 780{ 781 // First writeback the second packet from any split store that didn't 782 // complete last cycle because there weren't enough cache ports available. 783 if (TheISA::HasUnalignedMemAcc) { 784 writebackPendingStore(); 785 } 786 787 while (storesToWB > 0 && 788 storeWBIdx != storeTail && 789 storeQueue[storeWBIdx].inst && 790 storeQueue[storeWBIdx].canWB && 791 ((!needsTSO) || (!storeInFlight)) && 792 usedPorts < cachePorts) { 793 794 if (isStoreBlocked) { 795 DPRINTF(LSQUnit, "Unable to write back any more stores, cache" 796 " is blocked!\n"); 797 break; 798 } 799 800 // Store didn't write any data so no need to write it back to 801 // memory. 802 if (storeQueue[storeWBIdx].size == 0) { 803 completeStore(storeWBIdx); 804 805 incrStIdx(storeWBIdx); 806 807 continue; 808 } 809 810 ++usedPorts; 811 812 if (storeQueue[storeWBIdx].inst->isDataPrefetch()) { 813 incrStIdx(storeWBIdx); 814 815 continue; 816 } 817 818 assert(storeQueue[storeWBIdx].req); 819 assert(!storeQueue[storeWBIdx].committed); 820 821 if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) { 822 assert(storeQueue[storeWBIdx].sreqLow); 823 assert(storeQueue[storeWBIdx].sreqHigh); 824 } 825 826 DynInstPtr inst = storeQueue[storeWBIdx].inst; 827 828 Request *req = storeQueue[storeWBIdx].req; 829 RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow; 830 RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh; 831 832 storeQueue[storeWBIdx].committed = true; 833 834 assert(!inst->memData); 835 inst->memData = new uint8_t[req->getSize()]; 836 837 if (storeQueue[storeWBIdx].isAllZeros) 838 memset(inst->memData, 0, req->getSize()); 839 else 840 memcpy(inst->memData, storeQueue[storeWBIdx].data, req->getSize()); 841 842 MemCmd command = 843 req->isSwap() ? MemCmd::SwapReq : 844 (req->isLLSC() ? MemCmd::StoreCondReq : MemCmd::WriteReq); 845 PacketPtr data_pkt; 846 PacketPtr snd_data_pkt = NULL; 847 848 LSQSenderState *state = new LSQSenderState; 849 state->isLoad = false; 850 state->idx = storeWBIdx; 851 state->inst = inst; 852 853 if (!TheISA::HasUnalignedMemAcc || !storeQueue[storeWBIdx].isSplit) { 854 855 // Build a single data packet if the store isn't split. 856 data_pkt = new Packet(req, command); 857 data_pkt->dataStatic(inst->memData); 858 data_pkt->senderState = state; 859 } else { 860 // Create two packets if the store is split in two. 861 data_pkt = new Packet(sreqLow, command); 862 snd_data_pkt = new Packet(sreqHigh, command); 863 864 data_pkt->dataStatic(inst->memData); 865 snd_data_pkt->dataStatic(inst->memData + sreqLow->getSize()); 866 867 data_pkt->senderState = state; 868 snd_data_pkt->senderState = state; 869 870 state->isSplit = true; 871 state->outstanding = 2; 872 873 // Can delete the main request now. 874 delete req; 875 req = sreqLow; 876 } 877 878 DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%s " 879 "to Addr:%#x, data:%#x [sn:%lli]\n", 880 storeWBIdx, inst->pcState(), 881 req->getPaddr(), (int)*(inst->memData), 882 inst->seqNum); 883 884 // @todo: Remove this SC hack once the memory system handles it. 885 if (inst->isStoreConditional()) { 886 assert(!storeQueue[storeWBIdx].isSplit); 887 // Disable recording the result temporarily. Writing to 888 // misc regs normally updates the result, but this is not 889 // the desired behavior when handling store conditionals. 890 inst->recordResult(false); 891 bool success = TheISA::handleLockedWrite(inst.get(), req, cacheBlockMask); 892 inst->recordResult(true); 893 894 if (!success) { 895 // Instantly complete this store. 896 DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. " 897 "Instantly completing it.\n", 898 inst->seqNum); 899 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this); 900 cpu->schedule(wb, curTick() + 1); 901 if (cpu->checker) { 902 // Make sure to set the LLSC data for verification 903 // if checker is loaded 904 inst->reqToVerify->setExtraData(0); 905 inst->completeAcc(data_pkt); 906 } 907 completeStore(storeWBIdx); 908 incrStIdx(storeWBIdx); 909 continue; 910 } 911 } else { 912 // Non-store conditionals do not need a writeback. 913 state->noWB = true; 914 } 915 916 bool split = 917 TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit; 918 919 ThreadContext *thread = cpu->tcBase(lsqID); 920 921 if (req->isMmappedIpr()) { 922 assert(!inst->isStoreConditional()); 923 TheISA::handleIprWrite(thread, data_pkt); 924 delete data_pkt; 925 if (split) { 926 assert(snd_data_pkt->req->isMmappedIpr()); 927 TheISA::handleIprWrite(thread, snd_data_pkt); 928 delete snd_data_pkt; 929 delete sreqLow; 930 delete sreqHigh; 931 } 932 delete state; 933 delete req; 934 completeStore(storeWBIdx); 935 incrStIdx(storeWBIdx); 936 } else if (!sendStore(data_pkt)) { 937 DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will" 938 "retry later\n", 939 inst->seqNum); 940 941 // Need to store the second packet, if split. 942 if (split) { 943 state->pktToSend = true; 944 state->pendingPacket = snd_data_pkt; 945 } 946 } else { 947 948 // If split, try to send the second packet too 949 if (split) { 950 assert(snd_data_pkt); 951 952 // Ensure there are enough ports to use. 953 if (usedPorts < cachePorts) { 954 ++usedPorts; 955 if (sendStore(snd_data_pkt)) { 956 storePostSend(snd_data_pkt); 957 } else { 958 DPRINTF(IEW, "D-Cache became blocked when writing" 959 " [sn:%lli] second packet, will retry later\n", 960 inst->seqNum); 961 } 962 } else { 963 964 // Store the packet for when there's free ports. 965 assert(pendingPkt == NULL); 966 pendingPkt = snd_data_pkt; 967 hasPendingPkt = true; 968 } 969 } else { 970 971 // Not a split store. 972 storePostSend(data_pkt); 973 } 974 } 975 } 976 977 // Not sure this should set it to 0. 978 usedPorts = 0; 979 980 assert(stores >= 0 && storesToWB >= 0); 981} 982 983/*template <class Impl> 984void 985LSQUnit<Impl>::removeMSHR(InstSeqNum seqNum) 986{ 987 list<InstSeqNum>::iterator mshr_it = find(mshrSeqNums.begin(), 988 mshrSeqNums.end(), 989 seqNum); 990 991 if (mshr_it != mshrSeqNums.end()) { 992 mshrSeqNums.erase(mshr_it); 993 DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size()); 994 } 995}*/ 996 997template <class Impl> 998void 999LSQUnit<Impl>::squash(const InstSeqNum &squashed_num) 1000{ 1001 DPRINTF(LSQUnit, "Squashing until [sn:%lli]!" 1002 "(Loads:%i Stores:%i)\n", squashed_num, loads, stores); 1003 1004 int load_idx = loadTail; 1005 decrLdIdx(load_idx); 1006 1007 while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) { 1008 DPRINTF(LSQUnit,"Load Instruction PC %s squashed, " 1009 "[sn:%lli]\n", 1010 loadQueue[load_idx]->pcState(), 1011 loadQueue[load_idx]->seqNum); 1012 1013 if (isStalled() && load_idx == stallingLoadIdx) { 1014 stalled = false; 1015 stallingStoreIsn = 0; 1016 stallingLoadIdx = 0; 1017 } 1018 1019 // Clear the smart pointer to make sure it is decremented. 1020 loadQueue[load_idx]->setSquashed(); 1021 loadQueue[load_idx] = NULL; 1022 --loads; 1023 1024 // Inefficient! 1025 loadTail = load_idx; 1026 1027 decrLdIdx(load_idx); 1028 ++lsqSquashedLoads; 1029 } 1030 1031 if (memDepViolator && squashed_num < memDepViolator->seqNum) { 1032 memDepViolator = NULL; 1033 } 1034 1035 int store_idx = storeTail; 1036 decrStIdx(store_idx); 1037 1038 while (stores != 0 && 1039 storeQueue[store_idx].inst->seqNum > squashed_num) { 1040 // Instructions marked as can WB are already committed. 1041 if (storeQueue[store_idx].canWB) { 1042 break; 1043 } 1044 1045 DPRINTF(LSQUnit,"Store Instruction PC %s squashed, " 1046 "idx:%i [sn:%lli]\n", 1047 storeQueue[store_idx].inst->pcState(), 1048 store_idx, storeQueue[store_idx].inst->seqNum); 1049 1050 // I don't think this can happen. It should have been cleared 1051 // by the stalling load. 1052 if (isStalled() && 1053 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 1054 panic("Is stalled should have been cleared by stalling load!\n"); 1055 stalled = false; 1056 stallingStoreIsn = 0; 1057 } 1058 1059 // Clear the smart pointer to make sure it is decremented. 1060 storeQueue[store_idx].inst->setSquashed(); 1061 storeQueue[store_idx].inst = NULL; 1062 storeQueue[store_idx].canWB = 0; 1063 1064 // Must delete request now that it wasn't handed off to 1065 // memory. This is quite ugly. @todo: Figure out the proper 1066 // place to really handle request deletes. 1067 delete storeQueue[store_idx].req; 1068 if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) { 1069 delete storeQueue[store_idx].sreqLow; 1070 delete storeQueue[store_idx].sreqHigh; 1071 1072 storeQueue[store_idx].sreqLow = NULL; 1073 storeQueue[store_idx].sreqHigh = NULL; 1074 } 1075 1076 storeQueue[store_idx].req = NULL; 1077 --stores; 1078 1079 // Inefficient! 1080 storeTail = store_idx; 1081 1082 decrStIdx(store_idx); 1083 ++lsqSquashedStores; 1084 } 1085} 1086 1087template <class Impl> 1088void 1089LSQUnit<Impl>::storePostSend(PacketPtr pkt) 1090{ 1091 if (isStalled() && 1092 storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) { 1093 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 1094 "load idx:%i\n", 1095 stallingStoreIsn, stallingLoadIdx); 1096 stalled = false; 1097 stallingStoreIsn = 0; 1098 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 1099 } 1100 1101 if (!storeQueue[storeWBIdx].inst->isStoreConditional()) { 1102 // The store is basically completed at this time. This 1103 // only works so long as the checker doesn't try to 1104 // verify the value in memory for stores. 1105 storeQueue[storeWBIdx].inst->setCompleted(); 1106 1107 if (cpu->checker) { 1108 cpu->checker->verify(storeQueue[storeWBIdx].inst); 1109 } 1110 } 1111 1112 if (needsTSO) { 1113 storeInFlight = true; 1114 } 1115 1116 incrStIdx(storeWBIdx); 1117} 1118 1119template <class Impl> 1120void 1121LSQUnit<Impl>::writeback(DynInstPtr &inst, PacketPtr pkt) 1122{ 1123 iewStage->wakeCPU(); 1124 1125 // Squashed instructions do not need to complete their access. 1126 if (inst->isSquashed()) { 1127 assert(!inst->isStore()); 1128 ++lsqIgnoredResponses; 1129 return; 1130 } 1131 1132 if (!inst->isExecuted()) { 1133 inst->setExecuted(); 1134 1135 // Complete access to copy data to proper place. 1136 inst->completeAcc(pkt); 1137 } 1138 1139 // Need to insert instruction into queue to commit 1140 iewStage->instToCommit(inst); 1141 1142 iewStage->activityThisCycle(); 1143 1144 // see if this load changed the PC 1145 iewStage->checkMisprediction(inst); 1146} 1147 1148template <class Impl> 1149void 1150LSQUnit<Impl>::completeStore(int store_idx) 1151{ 1152 assert(storeQueue[store_idx].inst); 1153 storeQueue[store_idx].completed = true; 1154 --storesToWB; 1155 // A bit conservative because a store completion may not free up entries, 1156 // but hopefully avoids two store completions in one cycle from making 1157 // the CPU tick twice. 1158 cpu->wakeCPU(); 1159 cpu->activityThisCycle(); 1160 1161 if (store_idx == storeHead) { 1162 do { 1163 incrStIdx(storeHead); 1164 1165 --stores; 1166 } while (storeQueue[storeHead].completed && 1167 storeHead != storeTail); 1168 1169 iewStage->updateLSQNextCycle = true; 1170 } 1171 1172 DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head " 1173 "idx:%i\n", 1174 storeQueue[store_idx].inst->seqNum, store_idx, storeHead); 1175 1176#if TRACING_ON 1177 if (DTRACE(O3PipeView)) { 1178 storeQueue[store_idx].inst->storeTick = 1179 curTick() - storeQueue[store_idx].inst->fetchTick; 1180 } 1181#endif 1182 1183 if (isStalled() && 1184 storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { 1185 DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " 1186 "load idx:%i\n", 1187 stallingStoreIsn, stallingLoadIdx); 1188 stalled = false; 1189 stallingStoreIsn = 0; 1190 iewStage->replayMemInst(loadQueue[stallingLoadIdx]); 1191 } 1192 1193 storeQueue[store_idx].inst->setCompleted(); 1194 1195 if (needsTSO) { 1196 storeInFlight = false; 1197 } 1198 1199 // Tell the checker we've completed this instruction. Some stores 1200 // may get reported twice to the checker, but the checker can 1201 // handle that case. 1202 if (cpu->checker) { 1203 cpu->checker->verify(storeQueue[store_idx].inst); 1204 } 1205} 1206 1207template <class Impl> 1208bool 1209LSQUnit<Impl>::sendStore(PacketPtr data_pkt) 1210{ 1211 if (!dcachePort->sendTimingReq(data_pkt)) { 1212 // Need to handle becoming blocked on a store. 1213 isStoreBlocked = true; 1214 ++lsqCacheBlocked; 1215 assert(retryPkt == NULL); 1216 retryPkt = data_pkt; 1217 return false; 1218 } 1219 return true; 1220} 1221 1222template <class Impl> 1223void 1224LSQUnit<Impl>::recvRetry() 1225{ 1226 if (isStoreBlocked) { 1227 DPRINTF(LSQUnit, "Receiving retry: store blocked\n"); 1228 assert(retryPkt != NULL); 1229 1230 LSQSenderState *state = 1231 dynamic_cast<LSQSenderState *>(retryPkt->senderState); 1232 1233 if (dcachePort->sendTimingReq(retryPkt)) { 1234 // Don't finish the store unless this is the last packet. 1235 if (!TheISA::HasUnalignedMemAcc || !state->pktToSend || 1236 state->pendingPacket == retryPkt) { 1237 state->pktToSend = false; 1238 storePostSend(retryPkt); 1239 } 1240 retryPkt = NULL; 1241 isStoreBlocked = false; 1242 1243 // Send any outstanding packet. 1244 if (TheISA::HasUnalignedMemAcc && state->pktToSend) { 1245 assert(state->pendingPacket); 1246 if (sendStore(state->pendingPacket)) { 1247 storePostSend(state->pendingPacket); 1248 } 1249 } 1250 } else { 1251 // Still blocked! 1252 ++lsqCacheBlocked; 1253 } 1254 } 1255} 1256 1257template <class Impl> 1258inline void 1259LSQUnit<Impl>::incrStIdx(int &store_idx) const 1260{ 1261 if (++store_idx >= SQEntries) 1262 store_idx = 0; 1263} 1264 1265template <class Impl> 1266inline void 1267LSQUnit<Impl>::decrStIdx(int &store_idx) const 1268{ 1269 if (--store_idx < 0) 1270 store_idx += SQEntries; 1271} 1272 1273template <class Impl> 1274inline void 1275LSQUnit<Impl>::incrLdIdx(int &load_idx) const 1276{ 1277 if (++load_idx >= LQEntries) 1278 load_idx = 0; 1279} 1280 1281template <class Impl> 1282inline void 1283LSQUnit<Impl>::decrLdIdx(int &load_idx) const 1284{ 1285 if (--load_idx < 0) 1286 load_idx += LQEntries; 1287} 1288 1289template <class Impl> 1290void 1291LSQUnit<Impl>::dumpInsts() const 1292{ 1293 cprintf("Load store queue: Dumping instructions.\n"); 1294 cprintf("Load queue size: %i\n", loads); 1295 cprintf("Load queue: "); 1296 1297 int load_idx = loadHead; 1298 1299 while (load_idx != loadTail && loadQueue[load_idx]) { 1300 const DynInstPtr &inst(loadQueue[load_idx]); 1301 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum); 1302 1303 incrLdIdx(load_idx); 1304 } 1305 cprintf("\n"); 1306 1307 cprintf("Store queue size: %i\n", stores); 1308 cprintf("Store queue: "); 1309 1310 int store_idx = storeHead; 1311 1312 while (store_idx != storeTail && storeQueue[store_idx].inst) { 1313 const DynInstPtr &inst(storeQueue[store_idx].inst); 1314 cprintf("%s.[sn:%i] ", inst->pcState(), inst->seqNum); 1315 1316 incrStIdx(store_idx); 1317 } 1318 1319 cprintf("\n"); 1320} 1321 1322#endif//__CPU_O3_LSQ_UNIT_IMPL_HH__ 1323