atomic.cc revision 14085
12623SN/A/* 210596Sgabeblack@google.com * Copyright 2014 Google, Inc. 313012Sandreas.sandberg@arm.com * Copyright (c) 2012-2013,2015,2017-2018 ARM Limited 48926Sandreas.hansson@arm.com * All rights reserved. 58926Sandreas.hansson@arm.com * 68926Sandreas.hansson@arm.com * The license below extends only to copyright in the software and shall 78926Sandreas.hansson@arm.com * not be construed as granting a license to any other intellectual 88926Sandreas.hansson@arm.com * property including but not limited to intellectual property relating 98926Sandreas.hansson@arm.com * to a hardware implementation of the functionality of the software 108926Sandreas.hansson@arm.com * licensed hereunder. You may use the software subject to the license 118926Sandreas.hansson@arm.com * terms below provided that you ensure that this notice is replicated 128926Sandreas.hansson@arm.com * unmodified and in its entirety in all distributions of the software, 138926Sandreas.hansson@arm.com * modified or unmodified, in source code or in binary form. 148926Sandreas.hansson@arm.com * 152623SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan 162623SN/A * All rights reserved. 172623SN/A * 182623SN/A * Redistribution and use in source and binary forms, with or without 192623SN/A * modification, are permitted provided that the following conditions are 202623SN/A * met: redistributions of source code must retain the above copyright 212623SN/A * notice, this list of conditions and the following disclaimer; 222623SN/A * redistributions in binary form must reproduce the above copyright 232623SN/A * notice, this list of conditions and the following disclaimer in the 242623SN/A * documentation and/or other materials provided with the distribution; 252623SN/A * neither the name of the copyright holders nor the names of its 262623SN/A * contributors may be used to endorse or promote products derived from 272623SN/A * this software without specific prior written permission. 282623SN/A * 292623SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 302623SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 312623SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 322623SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 332623SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 342623SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 352623SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 362623SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 372623SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 382623SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 392623SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 402665Ssaidi@eecs.umich.edu * 412665Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt 422623SN/A */ 432623SN/A 4411793Sbrandon.potter@amd.com#include "cpu/simple/atomic.hh" 4511793Sbrandon.potter@amd.com 463170Sstever@eecs.umich.edu#include "arch/locked_mem.hh" 478105Sgblack@eecs.umich.edu#include "arch/mmapped_ipr.hh" 482623SN/A#include "arch/utility.hh" 499647Sdam.sunwoo@arm.com#include "base/output.hh" 506658Snate@binkert.org#include "config/the_isa.hh" 512623SN/A#include "cpu/exetrace.hh" 5213954Sgiacomo.gabrielli@arm.com#include "cpu/utils.hh" 539443SAndreas.Sandberg@ARM.com#include "debug/Drain.hh" 548232Snate@binkert.org#include "debug/ExecFaulting.hh" 558232Snate@binkert.org#include "debug/SimpleCPU.hh" 563348Sbinkertn@umich.edu#include "mem/packet.hh" 573348Sbinkertn@umich.edu#include "mem/packet_access.hh" 588926Sandreas.hansson@arm.com#include "mem/physical.hh" 594762Snate@binkert.org#include "params/AtomicSimpleCPU.hh" 607678Sgblack@eecs.umich.edu#include "sim/faults.hh" 6111793Sbrandon.potter@amd.com#include "sim/full_system.hh" 622901Ssaidi@eecs.umich.edu#include "sim/system.hh" 632623SN/A 642623SN/Ausing namespace std; 652623SN/Ausing namespace TheISA; 662623SN/A 672623SN/Avoid 682623SN/AAtomicSimpleCPU::init() 692623SN/A{ 7011147Smitch.hayenga@arm.com BaseSimpleCPU::init(); 718921Sandreas.hansson@arm.com 7211148Smitch.hayenga@arm.com int cid = threadContexts[0]->contextId(); 7312749Sgiacomo.travaglini@arm.com ifetch_req->setContext(cid); 7412749Sgiacomo.travaglini@arm.com data_read_req->setContext(cid); 7512749Sgiacomo.travaglini@arm.com data_write_req->setContext(cid); 7613652Sqtt2@cornell.edu data_amo_req->setContext(cid); 772623SN/A} 782623SN/A 795529Snate@binkert.orgAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 8012127Sspwilson2@wisc.edu : BaseSimpleCPU(p), 8112127Sspwilson2@wisc.edu tickEvent([this]{ tick(); }, "AtomicSimpleCPU tick", 8212127Sspwilson2@wisc.edu false, Event::CPU_Tick_Pri), 8312127Sspwilson2@wisc.edu width(p->width), locked(false), 845487Snate@binkert.org simulate_data_stalls(p->simulate_data_stalls), 855487Snate@binkert.org simulate_inst_stalls(p->simulate_inst_stalls), 869095Sandreas.hansson@arm.com icachePort(name() + ".icache_port", this), 879095Sandreas.hansson@arm.com dcachePort(name() + ".dcache_port", this), 8813012Sandreas.sandberg@arm.com dcache_access(false), dcache_latency(0), 8910537Sandreas.hansson@arm.com ppCommit(nullptr) 902623SN/A{ 912623SN/A _status = Idle; 9212749Sgiacomo.travaglini@arm.com ifetch_req = std::make_shared<Request>(); 9312749Sgiacomo.travaglini@arm.com data_read_req = std::make_shared<Request>(); 9412749Sgiacomo.travaglini@arm.com data_write_req = std::make_shared<Request>(); 9513652Sqtt2@cornell.edu data_amo_req = std::make_shared<Request>(); 962623SN/A} 972623SN/A 982623SN/A 992623SN/AAtomicSimpleCPU::~AtomicSimpleCPU() 1002623SN/A{ 1016775SBrad.Beckmann@amd.com if (tickEvent.scheduled()) { 1026775SBrad.Beckmann@amd.com deschedule(tickEvent); 1036775SBrad.Beckmann@amd.com } 1042623SN/A} 1052623SN/A 10610913Sandreas.sandberg@arm.comDrainState 10710913Sandreas.sandberg@arm.comAtomicSimpleCPU::drain() 1082623SN/A{ 10912276Sanouk.vanlaer@arm.com // Deschedule any power gating event (if any) 11012276Sanouk.vanlaer@arm.com deschedulePowerGatingEvent(); 11112276Sanouk.vanlaer@arm.com 1129448SAndreas.Sandberg@ARM.com if (switchedOut()) 11310913Sandreas.sandberg@arm.com return DrainState::Drained; 1142623SN/A 11514085Sgiacomo.travaglini@arm.com if (!isCpuDrained()) { 11611147Smitch.hayenga@arm.com DPRINTF(Drain, "Requesting drain.\n"); 11710913Sandreas.sandberg@arm.com return DrainState::Draining; 1189443SAndreas.Sandberg@ARM.com } else { 1199443SAndreas.Sandberg@ARM.com if (tickEvent.scheduled()) 1209443SAndreas.Sandberg@ARM.com deschedule(tickEvent); 1212915Sktlim@umich.edu 12211147Smitch.hayenga@arm.com activeThreads.clear(); 1239443SAndreas.Sandberg@ARM.com DPRINTF(Drain, "Not executing microcode, no need to drain.\n"); 12410913Sandreas.sandberg@arm.com return DrainState::Drained; 1259443SAndreas.Sandberg@ARM.com } 1269342SAndreas.Sandberg@arm.com} 1279342SAndreas.Sandberg@arm.com 1282915Sktlim@umich.eduvoid 12911148Smitch.hayenga@arm.comAtomicSimpleCPU::threadSnoop(PacketPtr pkt, ThreadID sender) 13011148Smitch.hayenga@arm.com{ 13111148Smitch.hayenga@arm.com DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 13211148Smitch.hayenga@arm.com pkt->cmdString()); 13311148Smitch.hayenga@arm.com 13411148Smitch.hayenga@arm.com for (ThreadID tid = 0; tid < numThreads; tid++) { 13511148Smitch.hayenga@arm.com if (tid != sender) { 13611321Ssteve.reinhardt@amd.com if (getCpuAddrMonitor(tid)->doMonitor(pkt)) { 13711151Smitch.hayenga@arm.com wakeup(tid); 13811148Smitch.hayenga@arm.com } 13911148Smitch.hayenga@arm.com 14011148Smitch.hayenga@arm.com TheISA::handleLockedSnoop(threadInfo[tid]->thread, 14111148Smitch.hayenga@arm.com pkt, dcachePort.cacheBlockMask); 14211148Smitch.hayenga@arm.com } 14311148Smitch.hayenga@arm.com } 14411148Smitch.hayenga@arm.com} 14511148Smitch.hayenga@arm.com 14611148Smitch.hayenga@arm.comvoid 1479342SAndreas.Sandberg@arm.comAtomicSimpleCPU::drainResume() 1482915Sktlim@umich.edu{ 1499448SAndreas.Sandberg@ARM.com assert(!tickEvent.scheduled()); 1509448SAndreas.Sandberg@ARM.com if (switchedOut()) 1515220Ssaidi@eecs.umich.edu return; 1525220Ssaidi@eecs.umich.edu 1534940Snate@binkert.org DPRINTF(SimpleCPU, "Resume\n"); 1549523SAndreas.Sandberg@ARM.com verifyMemoryMode(); 1553324Shsul@eecs.umich.edu 1569448SAndreas.Sandberg@ARM.com assert(!threadContexts.empty()); 1579448SAndreas.Sandberg@ARM.com 15811147Smitch.hayenga@arm.com _status = BaseSimpleCPU::Idle; 15911147Smitch.hayenga@arm.com 16011147Smitch.hayenga@arm.com for (ThreadID tid = 0; tid < numThreads; tid++) { 16111147Smitch.hayenga@arm.com if (threadInfo[tid]->thread->status() == ThreadContext::Active) { 16211147Smitch.hayenga@arm.com threadInfo[tid]->notIdleFraction = 1; 16311147Smitch.hayenga@arm.com activeThreads.push_back(tid); 16411147Smitch.hayenga@arm.com _status = BaseSimpleCPU::Running; 16511147Smitch.hayenga@arm.com 16611147Smitch.hayenga@arm.com // Tick if any threads active 16711147Smitch.hayenga@arm.com if (!tickEvent.scheduled()) { 16811147Smitch.hayenga@arm.com schedule(tickEvent, nextCycle()); 16911147Smitch.hayenga@arm.com } 17011147Smitch.hayenga@arm.com } else { 17111147Smitch.hayenga@arm.com threadInfo[tid]->notIdleFraction = 0; 17211147Smitch.hayenga@arm.com } 1739448SAndreas.Sandberg@ARM.com } 17412276Sanouk.vanlaer@arm.com 17512276Sanouk.vanlaer@arm.com // Reschedule any power gating event (if any) 17612276Sanouk.vanlaer@arm.com schedulePowerGatingEvent(); 1772623SN/A} 1782623SN/A 1799443SAndreas.Sandberg@ARM.combool 1809443SAndreas.Sandberg@ARM.comAtomicSimpleCPU::tryCompleteDrain() 1819443SAndreas.Sandberg@ARM.com{ 18210913Sandreas.sandberg@arm.com if (drainState() != DrainState::Draining) 1839443SAndreas.Sandberg@ARM.com return false; 1849443SAndreas.Sandberg@ARM.com 18511147Smitch.hayenga@arm.com DPRINTF(Drain, "tryCompleteDrain.\n"); 18614085Sgiacomo.travaglini@arm.com if (!isCpuDrained()) 1879443SAndreas.Sandberg@ARM.com return false; 1889443SAndreas.Sandberg@ARM.com 1899443SAndreas.Sandberg@ARM.com DPRINTF(Drain, "CPU done draining, processing drain event\n"); 19010913Sandreas.sandberg@arm.com signalDrainDone(); 1919443SAndreas.Sandberg@ARM.com 1929443SAndreas.Sandberg@ARM.com return true; 1939443SAndreas.Sandberg@ARM.com} 1949443SAndreas.Sandberg@ARM.com 1959443SAndreas.Sandberg@ARM.com 1962623SN/Avoid 1972798Sktlim@umich.eduAtomicSimpleCPU::switchOut() 1982623SN/A{ 1999429SAndreas.Sandberg@ARM.com BaseSimpleCPU::switchOut(); 2009429SAndreas.Sandberg@ARM.com 2019443SAndreas.Sandberg@ARM.com assert(!tickEvent.scheduled()); 2029342SAndreas.Sandberg@arm.com assert(_status == BaseSimpleCPU::Running || _status == Idle); 20314085Sgiacomo.travaglini@arm.com assert(isCpuDrained()); 2042623SN/A} 2052623SN/A 2062623SN/A 2072623SN/Avoid 2082623SN/AAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 2092623SN/A{ 2109429SAndreas.Sandberg@ARM.com BaseSimpleCPU::takeOverFrom(oldCPU); 2112623SN/A 2129443SAndreas.Sandberg@ARM.com // The tick event should have been descheduled by drain() 2132623SN/A assert(!tickEvent.scheduled()); 2142623SN/A} 2152623SN/A 2169523SAndreas.Sandberg@ARM.comvoid 2179523SAndreas.Sandberg@ARM.comAtomicSimpleCPU::verifyMemoryMode() const 2189523SAndreas.Sandberg@ARM.com{ 2199524SAndreas.Sandberg@ARM.com if (!system->isAtomicMode()) { 2209523SAndreas.Sandberg@ARM.com fatal("The atomic CPU requires the memory system to be in " 2219523SAndreas.Sandberg@ARM.com "'atomic' mode.\n"); 2229523SAndreas.Sandberg@ARM.com } 2239523SAndreas.Sandberg@ARM.com} 2242623SN/A 2252623SN/Avoid 22610407Smitch.hayenga@arm.comAtomicSimpleCPU::activateContext(ThreadID thread_num) 2272623SN/A{ 22810407Smitch.hayenga@arm.com DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num); 2294940Snate@binkert.org 23011147Smitch.hayenga@arm.com assert(thread_num < numThreads); 2312623SN/A 23211147Smitch.hayenga@arm.com threadInfo[thread_num]->notIdleFraction = 1; 23311147Smitch.hayenga@arm.com Cycles delta = ticksToCycles(threadInfo[thread_num]->thread->lastActivate - 23411147Smitch.hayenga@arm.com threadInfo[thread_num]->thread->lastSuspend); 23510464SAndreas.Sandberg@ARM.com numCycles += delta; 2363686Sktlim@umich.edu 23711147Smitch.hayenga@arm.com if (!tickEvent.scheduled()) { 23811147Smitch.hayenga@arm.com //Make sure ticks are still on multiples of cycles 23911147Smitch.hayenga@arm.com schedule(tickEvent, clockEdge(Cycles(0))); 24011147Smitch.hayenga@arm.com } 2419342SAndreas.Sandberg@arm.com _status = BaseSimpleCPU::Running; 24211147Smitch.hayenga@arm.com if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) 24311147Smitch.hayenga@arm.com == activeThreads.end()) { 24411147Smitch.hayenga@arm.com activeThreads.push_back(thread_num); 24511147Smitch.hayenga@arm.com } 24611526Sdavid.guillen@arm.com 24711526Sdavid.guillen@arm.com BaseCPU::activateContext(thread_num); 2482623SN/A} 2492623SN/A 2502623SN/A 2512623SN/Avoid 2528737Skoansin.tan@gmail.comAtomicSimpleCPU::suspendContext(ThreadID thread_num) 2532623SN/A{ 2544940Snate@binkert.org DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 2554940Snate@binkert.org 25611147Smitch.hayenga@arm.com assert(thread_num < numThreads); 25711147Smitch.hayenga@arm.com activeThreads.remove(thread_num); 2582623SN/A 2596043Sgblack@eecs.umich.edu if (_status == Idle) 2606043Sgblack@eecs.umich.edu return; 2616043Sgblack@eecs.umich.edu 2629342SAndreas.Sandberg@arm.com assert(_status == BaseSimpleCPU::Running); 2632626SN/A 26411147Smitch.hayenga@arm.com threadInfo[thread_num]->notIdleFraction = 0; 2652623SN/A 26611147Smitch.hayenga@arm.com if (activeThreads.empty()) { 26711147Smitch.hayenga@arm.com _status = Idle; 26811147Smitch.hayenga@arm.com 26911147Smitch.hayenga@arm.com if (tickEvent.scheduled()) { 27011147Smitch.hayenga@arm.com deschedule(tickEvent); 27111147Smitch.hayenga@arm.com } 27211147Smitch.hayenga@arm.com } 27311147Smitch.hayenga@arm.com 27411526Sdavid.guillen@arm.com BaseCPU::suspendContext(thread_num); 2752623SN/A} 2762623SN/A 27713012Sandreas.sandberg@arm.comTick 27813012Sandreas.sandberg@arm.comAtomicSimpleCPU::sendPacket(MasterPort &port, const PacketPtr &pkt) 27913012Sandreas.sandberg@arm.com{ 28013012Sandreas.sandberg@arm.com return port.sendAtomic(pkt); 28113012Sandreas.sandberg@arm.com} 2822623SN/A 28310030SAli.Saidi@ARM.comTick 28410030SAli.Saidi@ARM.comAtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt) 28510030SAli.Saidi@ARM.com{ 28610030SAli.Saidi@ARM.com DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 28710030SAli.Saidi@ARM.com pkt->cmdString()); 28810030SAli.Saidi@ARM.com 28910529Smorr@cs.wisc.edu // X86 ISA: Snooping an invalidation for monitor/mwait 29010529Smorr@cs.wisc.edu AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 29111148Smitch.hayenga@arm.com 29211148Smitch.hayenga@arm.com for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 29311148Smitch.hayenga@arm.com if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 29411151Smitch.hayenga@arm.com cpu->wakeup(tid); 29511148Smitch.hayenga@arm.com } 29610529Smorr@cs.wisc.edu } 29710529Smorr@cs.wisc.edu 29810030SAli.Saidi@ARM.com // if snoop invalidates, release any associated locks 29911356Skrinat01@arm.com // When run without caches, Invalidation packets will not be received 30011356Skrinat01@arm.com // hence we must check if the incoming packets are writes and wakeup 30111356Skrinat01@arm.com // the processor accordingly 30211356Skrinat01@arm.com if (pkt->isInvalidate() || pkt->isWrite()) { 30310030SAli.Saidi@ARM.com DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 30410030SAli.Saidi@ARM.com pkt->getAddr()); 30511147Smitch.hayenga@arm.com for (auto &t_info : cpu->threadInfo) { 30611147Smitch.hayenga@arm.com TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 30711147Smitch.hayenga@arm.com } 30810030SAli.Saidi@ARM.com } 30910030SAli.Saidi@ARM.com 31010030SAli.Saidi@ARM.com return 0; 31110030SAli.Saidi@ARM.com} 31210030SAli.Saidi@ARM.com 31310030SAli.Saidi@ARM.comvoid 31410030SAli.Saidi@ARM.comAtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt) 31510030SAli.Saidi@ARM.com{ 31610030SAli.Saidi@ARM.com DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 31710030SAli.Saidi@ARM.com pkt->cmdString()); 31810030SAli.Saidi@ARM.com 31910529Smorr@cs.wisc.edu // X86 ISA: Snooping an invalidation for monitor/mwait 32010529Smorr@cs.wisc.edu AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 32111148Smitch.hayenga@arm.com for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 32211321Ssteve.reinhardt@amd.com if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 32311151Smitch.hayenga@arm.com cpu->wakeup(tid); 32411148Smitch.hayenga@arm.com } 32510529Smorr@cs.wisc.edu } 32610529Smorr@cs.wisc.edu 32710030SAli.Saidi@ARM.com // if snoop invalidates, release any associated locks 32810030SAli.Saidi@ARM.com if (pkt->isInvalidate()) { 32910030SAli.Saidi@ARM.com DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 33010030SAli.Saidi@ARM.com pkt->getAddr()); 33111147Smitch.hayenga@arm.com for (auto &t_info : cpu->threadInfo) { 33211147Smitch.hayenga@arm.com TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 33311147Smitch.hayenga@arm.com } 33410030SAli.Saidi@ARM.com } 33510030SAli.Saidi@ARM.com} 33610030SAli.Saidi@ARM.com 33713954Sgiacomo.gabrielli@arm.combool 33813954Sgiacomo.gabrielli@arm.comAtomicSimpleCPU::genMemFragmentRequest(const RequestPtr& req, Addr frag_addr, 33913954Sgiacomo.gabrielli@arm.com int size, Request::Flags flags, 34013954Sgiacomo.gabrielli@arm.com const std::vector<bool>& byte_enable, 34113954Sgiacomo.gabrielli@arm.com int& frag_size, int& size_left) const 34213954Sgiacomo.gabrielli@arm.com{ 34313954Sgiacomo.gabrielli@arm.com bool predicate = true; 34413954Sgiacomo.gabrielli@arm.com Addr inst_addr = threadInfo[curThread]->thread->pcState().instAddr(); 34513954Sgiacomo.gabrielli@arm.com 34613954Sgiacomo.gabrielli@arm.com frag_size = std::min( 34713954Sgiacomo.gabrielli@arm.com cacheLineSize() - addrBlockOffset(frag_addr, cacheLineSize()), 34813954Sgiacomo.gabrielli@arm.com (Addr) size_left); 34913954Sgiacomo.gabrielli@arm.com size_left -= frag_size; 35013954Sgiacomo.gabrielli@arm.com 35113954Sgiacomo.gabrielli@arm.com if (!byte_enable.empty()) { 35213954Sgiacomo.gabrielli@arm.com // Set up byte-enable mask for the current fragment 35313954Sgiacomo.gabrielli@arm.com auto it_start = byte_enable.begin() + (size - (frag_size + size_left)); 35413954Sgiacomo.gabrielli@arm.com auto it_end = byte_enable.begin() + (size - size_left); 35513954Sgiacomo.gabrielli@arm.com if (isAnyActiveElement(it_start, it_end)) { 35613954Sgiacomo.gabrielli@arm.com req->setVirt(0, frag_addr, frag_size, flags, dataMasterId(), 35713954Sgiacomo.gabrielli@arm.com inst_addr); 35813954Sgiacomo.gabrielli@arm.com req->setByteEnable(std::vector<bool>(it_start, it_end)); 35913954Sgiacomo.gabrielli@arm.com } else { 36013954Sgiacomo.gabrielli@arm.com predicate = false; 36113954Sgiacomo.gabrielli@arm.com } 36213954Sgiacomo.gabrielli@arm.com } else { 36313954Sgiacomo.gabrielli@arm.com req->setVirt(0, frag_addr, frag_size, flags, dataMasterId(), 36413954Sgiacomo.gabrielli@arm.com inst_addr); 36513954Sgiacomo.gabrielli@arm.com } 36613954Sgiacomo.gabrielli@arm.com 36713954Sgiacomo.gabrielli@arm.com return predicate; 36813954Sgiacomo.gabrielli@arm.com} 36913954Sgiacomo.gabrielli@arm.com 3702623SN/AFault 37111608Snikos.nikoleris@arm.comAtomicSimpleCPU::readMem(Addr addr, uint8_t * data, unsigned size, 37213954Sgiacomo.gabrielli@arm.com Request::Flags flags, 37313954Sgiacomo.gabrielli@arm.com const std::vector<bool>& byteEnable) 3742623SN/A{ 37511147Smitch.hayenga@arm.com SimpleExecContext& t_info = *threadInfo[curThread]; 37611147Smitch.hayenga@arm.com SimpleThread* thread = t_info.thread; 37711147Smitch.hayenga@arm.com 3783169Sstever@eecs.umich.edu // use the CPU's statically allocated read request and packet objects 37912749Sgiacomo.travaglini@arm.com const RequestPtr &req = data_read_req; 3802623SN/A 38110665SAli.Saidi@ARM.com if (traceData) 38210665SAli.Saidi@ARM.com traceData->setMem(addr, size, flags); 3832623SN/A 3844999Sgblack@eecs.umich.edu dcache_latency = 0; 3854999Sgblack@eecs.umich.edu 38610024Sdam.sunwoo@arm.com req->taskId(taskId()); 38713954Sgiacomo.gabrielli@arm.com 38813954Sgiacomo.gabrielli@arm.com Addr frag_addr = addr; 38913954Sgiacomo.gabrielli@arm.com int frag_size = 0; 39013954Sgiacomo.gabrielli@arm.com int size_left = size; 39113954Sgiacomo.gabrielli@arm.com bool predicate; 39213954Sgiacomo.gabrielli@arm.com Fault fault = NoFault; 39313954Sgiacomo.gabrielli@arm.com 3947520Sgblack@eecs.umich.edu while (1) { 39513954Sgiacomo.gabrielli@arm.com predicate = genMemFragmentRequest(req, frag_addr, size, flags, 39613954Sgiacomo.gabrielli@arm.com byteEnable, frag_size, size_left); 3974999Sgblack@eecs.umich.edu 3984999Sgblack@eecs.umich.edu // translate to physical address 39913954Sgiacomo.gabrielli@arm.com if (predicate) { 40013954Sgiacomo.gabrielli@arm.com fault = thread->dtb->translateAtomic(req, thread->getTC(), 40113954Sgiacomo.gabrielli@arm.com BaseTLB::Read); 40213954Sgiacomo.gabrielli@arm.com } 4034999Sgblack@eecs.umich.edu 4044999Sgblack@eecs.umich.edu // Now do the access. 40513954Sgiacomo.gabrielli@arm.com if (predicate && fault == NoFault && 40613954Sgiacomo.gabrielli@arm.com !req->getFlags().isSet(Request::NO_ACCESS)) { 40710739Ssteve.reinhardt@amd.com Packet pkt(req, Packet::makeReadCmd(req)); 4087520Sgblack@eecs.umich.edu pkt.dataStatic(data); 4094999Sgblack@eecs.umich.edu 41013012Sandreas.sandberg@arm.com if (req->isMmappedIpr()) { 4114999Sgblack@eecs.umich.edu dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 41213012Sandreas.sandberg@arm.com } else { 41313012Sandreas.sandberg@arm.com dcache_latency += sendPacket(dcachePort, &pkt); 4144999Sgblack@eecs.umich.edu } 4154999Sgblack@eecs.umich.edu dcache_access = true; 4165012Sgblack@eecs.umich.edu 4174999Sgblack@eecs.umich.edu assert(!pkt.isError()); 4184999Sgblack@eecs.umich.edu 4196102Sgblack@eecs.umich.edu if (req->isLLSC()) { 4204999Sgblack@eecs.umich.edu TheISA::handleLockedRead(thread, req); 4214999Sgblack@eecs.umich.edu } 4224968Sacolyte@umich.edu } 4234986Ssaidi@eecs.umich.edu 4244999Sgblack@eecs.umich.edu //If there's a fault, return it 4256739Sgblack@eecs.umich.edu if (fault != NoFault) { 4266739Sgblack@eecs.umich.edu if (req->isPrefetch()) { 4276739Sgblack@eecs.umich.edu return NoFault; 4286739Sgblack@eecs.umich.edu } else { 4296739Sgblack@eecs.umich.edu return fault; 4306739Sgblack@eecs.umich.edu } 4316739Sgblack@eecs.umich.edu } 4326739Sgblack@eecs.umich.edu 43313954Sgiacomo.gabrielli@arm.com // If we don't need to access further cache lines, stop now. 43413954Sgiacomo.gabrielli@arm.com if (size_left == 0) { 43510760Ssteve.reinhardt@amd.com if (req->isLockedRMW() && fault == NoFault) { 4366078Sgblack@eecs.umich.edu assert(!locked); 4376078Sgblack@eecs.umich.edu locked = true; 4386078Sgblack@eecs.umich.edu } 4394999Sgblack@eecs.umich.edu return fault; 4404968Sacolyte@umich.edu } 4413170Sstever@eecs.umich.edu 4424999Sgblack@eecs.umich.edu /* 44313954Sgiacomo.gabrielli@arm.com * Set up for accessing the next cache line. 4444999Sgblack@eecs.umich.edu */ 44513954Sgiacomo.gabrielli@arm.com frag_addr += frag_size; 4464999Sgblack@eecs.umich.edu 4474999Sgblack@eecs.umich.edu //Move the pointer we're reading into to the correct location. 44813954Sgiacomo.gabrielli@arm.com data += frag_size; 4492623SN/A } 4502623SN/A} 4512623SN/A 45211303Ssteve.reinhardt@amd.comFault 45311608Snikos.nikoleris@arm.comAtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, Addr addr, 45413954Sgiacomo.gabrielli@arm.com Request::Flags flags, uint64_t *res, 45513954Sgiacomo.gabrielli@arm.com const std::vector<bool>& byteEnable) 4562623SN/A{ 45711147Smitch.hayenga@arm.com SimpleExecContext& t_info = *threadInfo[curThread]; 45811147Smitch.hayenga@arm.com SimpleThread* thread = t_info.thread; 45910031SAli.Saidi@ARM.com static uint8_t zero_array[64] = {}; 46010031SAli.Saidi@ARM.com 46110031SAli.Saidi@ARM.com if (data == NULL) { 46210031SAli.Saidi@ARM.com assert(size <= 64); 46312355Snikos.nikoleris@arm.com assert(flags & Request::STORE_NO_DATA); 46410031SAli.Saidi@ARM.com // This must be a cache block cleaning request 46510031SAli.Saidi@ARM.com data = zero_array; 46610031SAli.Saidi@ARM.com } 46710031SAli.Saidi@ARM.com 4683169Sstever@eecs.umich.edu // use the CPU's statically allocated write request and packet objects 46912749Sgiacomo.travaglini@arm.com const RequestPtr &req = data_write_req; 4702623SN/A 47110665SAli.Saidi@ARM.com if (traceData) 47210665SAli.Saidi@ARM.com traceData->setMem(addr, size, flags); 4732623SN/A 4744999Sgblack@eecs.umich.edu dcache_latency = 0; 4754999Sgblack@eecs.umich.edu 47610024Sdam.sunwoo@arm.com req->taskId(taskId()); 47713954Sgiacomo.gabrielli@arm.com 47813954Sgiacomo.gabrielli@arm.com Addr frag_addr = addr; 47913954Sgiacomo.gabrielli@arm.com int frag_size = 0; 48013954Sgiacomo.gabrielli@arm.com int size_left = size; 48113954Sgiacomo.gabrielli@arm.com int curr_frag_id = 0; 48213954Sgiacomo.gabrielli@arm.com bool predicate; 48313954Sgiacomo.gabrielli@arm.com Fault fault = NoFault; 48413954Sgiacomo.gabrielli@arm.com 48511321Ssteve.reinhardt@amd.com while (1) { 48613954Sgiacomo.gabrielli@arm.com predicate = genMemFragmentRequest(req, frag_addr, size, flags, 48713954Sgiacomo.gabrielli@arm.com byteEnable, frag_size, size_left); 4884999Sgblack@eecs.umich.edu 4894999Sgblack@eecs.umich.edu // translate to physical address 49013954Sgiacomo.gabrielli@arm.com if (predicate) 49113954Sgiacomo.gabrielli@arm.com fault = thread->dtb->translateAtomic(req, thread->getTC(), 49213954Sgiacomo.gabrielli@arm.com BaseTLB::Write); 4934999Sgblack@eecs.umich.edu 4944999Sgblack@eecs.umich.edu // Now do the access. 49513954Sgiacomo.gabrielli@arm.com if (predicate && fault == NoFault) { 4964999Sgblack@eecs.umich.edu bool do_access = true; // flag to suppress cache access 4974999Sgblack@eecs.umich.edu 4986102Sgblack@eecs.umich.edu if (req->isLLSC()) { 49913954Sgiacomo.gabrielli@arm.com assert(curr_frag_id == 0); 50013954Sgiacomo.gabrielli@arm.com do_access = 50113954Sgiacomo.gabrielli@arm.com TheISA::handleLockedWrite(thread, req, 50213954Sgiacomo.gabrielli@arm.com dcachePort.cacheBlockMask); 5034999Sgblack@eecs.umich.edu } else if (req->isSwap()) { 50413954Sgiacomo.gabrielli@arm.com assert(curr_frag_id == 0); 5054999Sgblack@eecs.umich.edu if (req->isCondSwap()) { 5064999Sgblack@eecs.umich.edu assert(res); 5074999Sgblack@eecs.umich.edu req->setExtraData(*res); 5084999Sgblack@eecs.umich.edu } 5094999Sgblack@eecs.umich.edu } 5104999Sgblack@eecs.umich.edu 5116623Sgblack@eecs.umich.edu if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 51212355Snikos.nikoleris@arm.com Packet pkt(req, Packet::makeWriteCmd(req)); 5137520Sgblack@eecs.umich.edu pkt.dataStatic(data); 5144999Sgblack@eecs.umich.edu 5158105Sgblack@eecs.umich.edu if (req->isMmappedIpr()) { 5164999Sgblack@eecs.umich.edu dcache_latency += 5174999Sgblack@eecs.umich.edu TheISA::handleIprWrite(thread->getTC(), &pkt); 5184999Sgblack@eecs.umich.edu } else { 51913012Sandreas.sandberg@arm.com dcache_latency += sendPacket(dcachePort, &pkt); 52011148Smitch.hayenga@arm.com 52111148Smitch.hayenga@arm.com // Notify other threads on this CPU of write 52211148Smitch.hayenga@arm.com threadSnoop(&pkt, curThread); 5234999Sgblack@eecs.umich.edu } 5244999Sgblack@eecs.umich.edu dcache_access = true; 5254999Sgblack@eecs.umich.edu assert(!pkt.isError()); 5264999Sgblack@eecs.umich.edu 5274999Sgblack@eecs.umich.edu if (req->isSwap()) { 52813954Sgiacomo.gabrielli@arm.com assert(res && curr_frag_id == 0); 52913954Sgiacomo.gabrielli@arm.com memcpy(res, pkt.getConstPtr<uint8_t>(), size); 5304999Sgblack@eecs.umich.edu } 5314999Sgblack@eecs.umich.edu } 5324999Sgblack@eecs.umich.edu 5334999Sgblack@eecs.umich.edu if (res && !req->isSwap()) { 5344999Sgblack@eecs.umich.edu *res = req->getExtraData(); 5354878Sstever@eecs.umich.edu } 5364040Ssaidi@eecs.umich.edu } 5374040Ssaidi@eecs.umich.edu 5384999Sgblack@eecs.umich.edu //If there's a fault or we don't need to access a second cache line, 5394999Sgblack@eecs.umich.edu //stop now. 54013954Sgiacomo.gabrielli@arm.com if (fault != NoFault || size_left == 0) 5414999Sgblack@eecs.umich.edu { 54210760Ssteve.reinhardt@amd.com if (req->isLockedRMW() && fault == NoFault) { 54313954Sgiacomo.gabrielli@arm.com assert(byteEnable.empty()); 5446078Sgblack@eecs.umich.edu locked = false; 5456078Sgblack@eecs.umich.edu } 54611147Smitch.hayenga@arm.com 5476739Sgblack@eecs.umich.edu if (fault != NoFault && req->isPrefetch()) { 5486739Sgblack@eecs.umich.edu return NoFault; 5496739Sgblack@eecs.umich.edu } else { 5506739Sgblack@eecs.umich.edu return fault; 5516739Sgblack@eecs.umich.edu } 5523170Sstever@eecs.umich.edu } 5533170Sstever@eecs.umich.edu 5544999Sgblack@eecs.umich.edu /* 55513954Sgiacomo.gabrielli@arm.com * Set up for accessing the next cache line. 5564999Sgblack@eecs.umich.edu */ 55713954Sgiacomo.gabrielli@arm.com frag_addr += frag_size; 5584999Sgblack@eecs.umich.edu 5594999Sgblack@eecs.umich.edu //Move the pointer we're reading into to the correct location. 56013954Sgiacomo.gabrielli@arm.com data += frag_size; 56113954Sgiacomo.gabrielli@arm.com 56213954Sgiacomo.gabrielli@arm.com curr_frag_id++; 5632623SN/A } 5642623SN/A} 5652623SN/A 56613652Sqtt2@cornell.eduFault 56713652Sqtt2@cornell.eduAtomicSimpleCPU::amoMem(Addr addr, uint8_t* data, unsigned size, 56813652Sqtt2@cornell.edu Request::Flags flags, AtomicOpFunctor *amo_op) 56913652Sqtt2@cornell.edu{ 57013652Sqtt2@cornell.edu SimpleExecContext& t_info = *threadInfo[curThread]; 57113652Sqtt2@cornell.edu SimpleThread* thread = t_info.thread; 57213652Sqtt2@cornell.edu 57313652Sqtt2@cornell.edu // use the CPU's statically allocated amo request and packet objects 57413652Sqtt2@cornell.edu const RequestPtr &req = data_amo_req; 57513652Sqtt2@cornell.edu 57613652Sqtt2@cornell.edu if (traceData) 57713652Sqtt2@cornell.edu traceData->setMem(addr, size, flags); 57813652Sqtt2@cornell.edu 57913652Sqtt2@cornell.edu //The address of the second part of this access if it needs to be split 58013652Sqtt2@cornell.edu //across a cache line boundary. 58113652Sqtt2@cornell.edu Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 58213652Sqtt2@cornell.edu 58313652Sqtt2@cornell.edu // AMO requests that access across a cache line boundary are not 58413652Sqtt2@cornell.edu // allowed since the cache does not guarantee AMO ops to be executed 58513652Sqtt2@cornell.edu // atomically in two cache lines 58613652Sqtt2@cornell.edu // For ISAs such as x86 that requires AMO operations to work on 58713652Sqtt2@cornell.edu // accesses that cross cache-line boundaries, the cache needs to be 58813652Sqtt2@cornell.edu // modified to support locking both cache lines to guarantee the 58913652Sqtt2@cornell.edu // atomicity. 59013652Sqtt2@cornell.edu if (secondAddr > addr) { 59113652Sqtt2@cornell.edu panic("AMO request should not access across a cache line boundary\n"); 59213652Sqtt2@cornell.edu } 59313652Sqtt2@cornell.edu 59413652Sqtt2@cornell.edu dcache_latency = 0; 59513652Sqtt2@cornell.edu 59613652Sqtt2@cornell.edu req->taskId(taskId()); 59713652Sqtt2@cornell.edu req->setVirt(0, addr, size, flags, dataMasterId(), 59813652Sqtt2@cornell.edu thread->pcState().instAddr(), amo_op); 59913652Sqtt2@cornell.edu 60013652Sqtt2@cornell.edu // translate to physical address 60113652Sqtt2@cornell.edu Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), 60213652Sqtt2@cornell.edu BaseTLB::Write); 60313652Sqtt2@cornell.edu 60413652Sqtt2@cornell.edu // Now do the access. 60513652Sqtt2@cornell.edu if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 60613652Sqtt2@cornell.edu // We treat AMO accesses as Write accesses with SwapReq command 60713652Sqtt2@cornell.edu // data will hold the return data of the AMO access 60813652Sqtt2@cornell.edu Packet pkt(req, Packet::makeWriteCmd(req)); 60913652Sqtt2@cornell.edu pkt.dataStatic(data); 61013652Sqtt2@cornell.edu 61113652Sqtt2@cornell.edu if (req->isMmappedIpr()) 61213652Sqtt2@cornell.edu dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 61313652Sqtt2@cornell.edu else { 61413652Sqtt2@cornell.edu dcache_latency += sendPacket(dcachePort, &pkt); 61513652Sqtt2@cornell.edu } 61613652Sqtt2@cornell.edu 61713652Sqtt2@cornell.edu dcache_access = true; 61813652Sqtt2@cornell.edu 61913652Sqtt2@cornell.edu assert(!pkt.isError()); 62013652Sqtt2@cornell.edu assert(!req->isLLSC()); 62113652Sqtt2@cornell.edu } 62213652Sqtt2@cornell.edu 62313652Sqtt2@cornell.edu if (fault != NoFault && req->isPrefetch()) { 62413652Sqtt2@cornell.edu return NoFault; 62513652Sqtt2@cornell.edu } 62613652Sqtt2@cornell.edu 62713652Sqtt2@cornell.edu //If there's a fault and we're not doing prefetch, return it 62813652Sqtt2@cornell.edu return fault; 62913652Sqtt2@cornell.edu} 6302623SN/A 6312623SN/Avoid 6322623SN/AAtomicSimpleCPU::tick() 6332623SN/A{ 6344940Snate@binkert.org DPRINTF(SimpleCPU, "Tick\n"); 6354940Snate@binkert.org 63611147Smitch.hayenga@arm.com // Change thread if multi-threaded 63711147Smitch.hayenga@arm.com swapActiveThread(); 63811147Smitch.hayenga@arm.com 63911147Smitch.hayenga@arm.com // Set memroy request ids to current thread 64011147Smitch.hayenga@arm.com if (numThreads > 1) { 64111148Smitch.hayenga@arm.com ContextID cid = threadContexts[curThread]->contextId(); 64211148Smitch.hayenga@arm.com 64312749Sgiacomo.travaglini@arm.com ifetch_req->setContext(cid); 64412749Sgiacomo.travaglini@arm.com data_read_req->setContext(cid); 64512749Sgiacomo.travaglini@arm.com data_write_req->setContext(cid); 64613652Sqtt2@cornell.edu data_amo_req->setContext(cid); 64711147Smitch.hayenga@arm.com } 64811147Smitch.hayenga@arm.com 64911147Smitch.hayenga@arm.com SimpleExecContext& t_info = *threadInfo[curThread]; 65011147Smitch.hayenga@arm.com SimpleThread* thread = t_info.thread; 65111147Smitch.hayenga@arm.com 6525487Snate@binkert.org Tick latency = 0; 6532623SN/A 6546078Sgblack@eecs.umich.edu for (int i = 0; i < width || locked; ++i) { 6552623SN/A numCycles++; 65612284Sjose.marinho@arm.com updateCycleCounters(BaseCPU::CPU_STATE_ON); 6572623SN/A 65810596Sgabeblack@google.com if (!curStaticInst || !curStaticInst->isDelayedCommit()) { 6593387Sgblack@eecs.umich.edu checkForInterrupts(); 66010596Sgabeblack@google.com checkPcEventQueue(); 66110596Sgabeblack@google.com } 6622626SN/A 6638143SAli.Saidi@ARM.com // We must have just got suspended by a PC event 6649443SAndreas.Sandberg@ARM.com if (_status == Idle) { 6659443SAndreas.Sandberg@ARM.com tryCompleteDrain(); 6668143SAli.Saidi@ARM.com return; 6679443SAndreas.Sandberg@ARM.com } 6685348Ssaidi@eecs.umich.edu 6695669Sgblack@eecs.umich.edu Fault fault = NoFault; 6705669Sgblack@eecs.umich.edu 6717720Sgblack@eecs.umich.edu TheISA::PCState pcState = thread->pcState(); 6727720Sgblack@eecs.umich.edu 6737720Sgblack@eecs.umich.edu bool needToFetch = !isRomMicroPC(pcState.microPC()) && 6747720Sgblack@eecs.umich.edu !curMacroStaticInst; 6757720Sgblack@eecs.umich.edu if (needToFetch) { 67612749Sgiacomo.travaglini@arm.com ifetch_req->taskId(taskId()); 67712749Sgiacomo.travaglini@arm.com setupFetchRequest(ifetch_req); 67812749Sgiacomo.travaglini@arm.com fault = thread->itb->translateAtomic(ifetch_req, thread->getTC(), 6796023Snate@binkert.org BaseTLB::Execute); 6805894Sgblack@eecs.umich.edu } 6812623SN/A 6822623SN/A if (fault == NoFault) { 6834182Sgblack@eecs.umich.edu Tick icache_latency = 0; 6844182Sgblack@eecs.umich.edu bool icache_access = false; 6854182Sgblack@eecs.umich.edu dcache_access = false; // assume no dcache access 6862662Sstever@eecs.umich.edu 6877720Sgblack@eecs.umich.edu if (needToFetch) { 6889023Sgblack@eecs.umich.edu // This is commented out because the decoder would act like 6895694Sgblack@eecs.umich.edu // a tiny cache otherwise. It wouldn't be flushed when needed 6905694Sgblack@eecs.umich.edu // like the I cache. It should be flushed, and when that works 6915694Sgblack@eecs.umich.edu // this code should be uncommented. 6925669Sgblack@eecs.umich.edu //Fetch more instruction memory if necessary 69311321Ssteve.reinhardt@amd.com //if (decoder.needMoreBytes()) 6945669Sgblack@eecs.umich.edu //{ 6955669Sgblack@eecs.umich.edu icache_access = true; 69612749Sgiacomo.travaglini@arm.com Packet ifetch_pkt = Packet(ifetch_req, MemCmd::ReadReq); 6975669Sgblack@eecs.umich.edu ifetch_pkt.dataStatic(&inst); 6982623SN/A 69913012Sandreas.sandberg@arm.com icache_latency = sendPacket(icachePort, &ifetch_pkt); 7004968Sacolyte@umich.edu 7015669Sgblack@eecs.umich.edu assert(!ifetch_pkt.isError()); 7024968Sacolyte@umich.edu 7035669Sgblack@eecs.umich.edu // ifetch_req is initialized to read the instruction directly 7045669Sgblack@eecs.umich.edu // into the CPU object's inst field. 7055669Sgblack@eecs.umich.edu //} 7065669Sgblack@eecs.umich.edu } 7074182Sgblack@eecs.umich.edu 7082623SN/A preExecute(); 7093814Ssaidi@eecs.umich.edu 71011877Sbrandon.potter@amd.com Tick stall_ticks = 0; 7115001Sgblack@eecs.umich.edu if (curStaticInst) { 71211147Smitch.hayenga@arm.com fault = curStaticInst->execute(&t_info, traceData); 7134998Sgblack@eecs.umich.edu 7144998Sgblack@eecs.umich.edu // keep an instruction count 71510381Sdam.sunwoo@arm.com if (fault == NoFault) { 7164998Sgblack@eecs.umich.edu countInst(); 71710651Snikos.nikoleris@gmail.com ppCommit->notify(std::make_pair(thread, curStaticInst)); 71810381Sdam.sunwoo@arm.com } 7197655Sali.saidi@arm.com else if (traceData && !DTRACE(ExecFaulting)) { 7205001Sgblack@eecs.umich.edu delete traceData; 7215001Sgblack@eecs.umich.edu traceData = NULL; 7225001Sgblack@eecs.umich.edu } 7234998Sgblack@eecs.umich.edu 72412710Sgiacomo.travaglini@arm.com if (fault != NoFault && 72512710Sgiacomo.travaglini@arm.com dynamic_pointer_cast<SyscallRetryFault>(fault)) { 72611877Sbrandon.potter@amd.com // Retry execution of system calls after a delay. 72711877Sbrandon.potter@amd.com // Prevents immediate re-execution since conditions which 72811877Sbrandon.potter@amd.com // caused the retry are unlikely to change every tick. 72911877Sbrandon.potter@amd.com stall_ticks += clockEdge(syscallRetryLatency) - curTick(); 73011877Sbrandon.potter@amd.com } 73111877Sbrandon.potter@amd.com 7324182Sgblack@eecs.umich.edu postExecute(); 7334182Sgblack@eecs.umich.edu } 7342623SN/A 7353814Ssaidi@eecs.umich.edu // @todo remove me after debugging with legion done 7364539Sgblack@eecs.umich.edu if (curStaticInst && (!curStaticInst->isMicroop() || 7374539Sgblack@eecs.umich.edu curStaticInst->isFirstMicroop())) 7383814Ssaidi@eecs.umich.edu instCnt++; 7393814Ssaidi@eecs.umich.edu 7405487Snate@binkert.org if (simulate_inst_stalls && icache_access) 7415487Snate@binkert.org stall_ticks += icache_latency; 7425487Snate@binkert.org 7435487Snate@binkert.org if (simulate_data_stalls && dcache_access) 7445487Snate@binkert.org stall_ticks += dcache_latency; 7455487Snate@binkert.org 7465487Snate@binkert.org if (stall_ticks) { 7479180Sandreas.hansson@arm.com // the atomic cpu does its accounting in ticks, so 7489180Sandreas.hansson@arm.com // keep counting in ticks but round to the clock 7499180Sandreas.hansson@arm.com // period 7509180Sandreas.hansson@arm.com latency += divCeil(stall_ticks, clockPeriod()) * 7519180Sandreas.hansson@arm.com clockPeriod(); 7522623SN/A } 7532623SN/A 7542623SN/A } 75511321Ssteve.reinhardt@amd.com if (fault != NoFault || !t_info.stayAtPC) 7564182Sgblack@eecs.umich.edu advancePC(fault); 7572623SN/A } 7582623SN/A 7599443SAndreas.Sandberg@ARM.com if (tryCompleteDrain()) 7609443SAndreas.Sandberg@ARM.com return; 7619443SAndreas.Sandberg@ARM.com 7625487Snate@binkert.org // instruction takes at least one cycle 7639179Sandreas.hansson@arm.com if (latency < clockPeriod()) 7649179Sandreas.hansson@arm.com latency = clockPeriod(); 7655487Snate@binkert.org 7662626SN/A if (_status != Idle) 76711147Smitch.hayenga@arm.com reschedule(tickEvent, curTick() + latency, true); 7682623SN/A} 7692623SN/A 77010381Sdam.sunwoo@arm.comvoid 77110381Sdam.sunwoo@arm.comAtomicSimpleCPU::regProbePoints() 77210381Sdam.sunwoo@arm.com{ 77310464SAndreas.Sandberg@ARM.com BaseCPU::regProbePoints(); 77410464SAndreas.Sandberg@ARM.com 77510381Sdam.sunwoo@arm.com ppCommit = new ProbePointArg<pair<SimpleThread*, const StaticInstPtr>> 77610381Sdam.sunwoo@arm.com (getProbeManager(), "Commit"); 77710381Sdam.sunwoo@arm.com} 7782623SN/A 7795315Sstever@gmail.comvoid 7805315Sstever@gmail.comAtomicSimpleCPU::printAddr(Addr a) 7815315Sstever@gmail.com{ 7825315Sstever@gmail.com dcachePort.printAddr(a); 7835315Sstever@gmail.com} 7845315Sstever@gmail.com 7852623SN/A//////////////////////////////////////////////////////////////////////// 7862623SN/A// 7872623SN/A// AtomicSimpleCPU Simulation Object 7882623SN/A// 7894762Snate@binkert.orgAtomicSimpleCPU * 7904762Snate@binkert.orgAtomicSimpleCPUParams::create() 7912623SN/A{ 7925529Snate@binkert.org return new AtomicSimpleCPU(this); 7932623SN/A} 794