atomic.cc revision 11793:ef606668d247
12SN/A/* 22190SN/A * Copyright 2014 Google, Inc. 32SN/A * Copyright (c) 2012-2013,2015 ARM Limited 42SN/A * All rights reserved. 52SN/A * 62SN/A * The license below extends only to copyright in the software and shall 72SN/A * not be construed as granting a license to any other intellectual 82SN/A * property including but not limited to intellectual property relating 92SN/A * to a hardware implementation of the functionality of the software 102SN/A * licensed hereunder. You may use the software subject to the license 112SN/A * terms below provided that you ensure that this notice is replicated 122SN/A * unmodified and in its entirety in all distributions of the software, 132SN/A * modified or unmodified, in source code or in binary form. 142SN/A * 152SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan 162SN/A * All rights reserved. 172SN/A * 182SN/A * Redistribution and use in source and binary forms, with or without 192SN/A * modification, are permitted provided that the following conditions are 202SN/A * met: redistributions of source code must retain the above copyright 212SN/A * notice, this list of conditions and the following disclaimer; 222SN/A * redistributions in binary form must reproduce the above copyright 232SN/A * notice, this list of conditions and the following disclaimer in the 242SN/A * documentation and/or other materials provided with the distribution; 252SN/A * neither the name of the copyright holders nor the names of its 262SN/A * contributors may be used to endorse or promote products derived from 272665SN/A * this software without specific prior written permission. 282665SN/A * 292SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 302SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 312680Sktlim@umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 322680Sktlim@umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 332SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 342972Sgblack@eecs.umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 352972Sgblack@eecs.umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 362972Sgblack@eecs.umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 371858SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 382423SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 392190SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 4056SN/A * 41217SN/A * Authors: Steve Reinhardt 422036SN/A */ 432SN/A 442190SN/A#include "cpu/simple/atomic.hh" 452190SN/A 462190SN/A#include "arch/locked_mem.hh" 472190SN/A#include "arch/mmapped_ipr.hh" 482190SN/A#include "arch/utility.hh" 492313SN/A#include "base/bigint.hh" 502235SN/A#include "base/output.hh" 512423SN/A#include "config/the_isa.hh" 522521SN/A#include "cpu/exetrace.hh" 532521SN/A#include "debug/Drain.hh" 542190SN/A#include "debug/ExecFaulting.hh" 552190SN/A#include "debug/SimpleCPU.hh" 562330SN/A#include "mem/packet.hh" 572330SN/A#include "mem/packet_access.hh" 582330SN/A#include "mem/physical.hh" 592SN/A#include "params/AtomicSimpleCPU.hh" 602680Sktlim@umich.edu#include "sim/faults.hh" 612680Sktlim@umich.edu#include "sim/full_system.hh" 622680Sktlim@umich.edu#include "sim/system.hh" 632680Sktlim@umich.edu 642680Sktlim@umich.eduusing namespace std; 652680Sktlim@umich.eduusing namespace TheISA; 662680Sktlim@umich.edu 672680Sktlim@umich.eduAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 682680Sktlim@umich.edu : Event(CPU_Tick_Pri), cpu(c) 692680Sktlim@umich.edu{ 702680Sktlim@umich.edu} 712682Sktlim@umich.edu 722680Sktlim@umich.edu 732680Sktlim@umich.eduvoid 742680Sktlim@umich.eduAtomicSimpleCPU::TickEvent::process() 752680Sktlim@umich.edu{ 762680Sktlim@umich.edu cpu->tick(); 772SN/A} 782107SN/A 792107SN/Aconst char * 802107SN/AAtomicSimpleCPU::TickEvent::description() const 812190SN/A{ 822455SN/A return "AtomicSimpleCPU tick"; 832455SN/A} 842107SN/A 852159SN/Avoid 862SN/AAtomicSimpleCPU::init() 87246SN/A{ 88246SN/A BaseSimpleCPU::init(); 89246SN/A 90246SN/A int cid = threadContexts[0]->contextId(); 91246SN/A ifetch_req.setContext(cid); 92246SN/A data_read_req.setContext(cid); 93246SN/A data_write_req.setContext(cid); 94246SN/A} 95246SN/A 96246SN/AAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 97246SN/A : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 98246SN/A simulate_data_stalls(p->simulate_data_stalls), 99246SN/A simulate_inst_stalls(p->simulate_inst_stalls), 1002190SN/A icachePort(name() + ".icache_port", this), 101246SN/A dcachePort(name() + ".dcache_port", this), 102246SN/A fastmem(p->fastmem), dcache_access(false), dcache_latency(0), 103246SN/A ppCommit(nullptr) 104246SN/A{ 105246SN/A _status = Idle; 106246SN/A} 107246SN/A 1082SN/A 1092680Sktlim@umich.eduAtomicSimpleCPU::~AtomicSimpleCPU() 1102423SN/A{ 1112190SN/A if (tickEvent.scheduled()) { 112180SN/A deschedule(tickEvent); 1132190SN/A } 1142190SN/A} 1152190SN/A 1162190SN/ADrainState 1172190SN/AAtomicSimpleCPU::drain() 1182190SN/A{ 1192190SN/A if (switchedOut()) 1202190SN/A return DrainState::Drained; 1212190SN/A 1222190SN/A if (!isDrained()) { 1232521SN/A DPRINTF(Drain, "Requesting drain.\n"); 1242330SN/A return DrainState::Draining; 1252654SN/A } else { 1262521SN/A if (tickEvent.scheduled()) 1272521SN/A deschedule(tickEvent); 1282680Sktlim@umich.edu 1292521SN/A activeThreads.clear(); 1302521SN/A DPRINTF(Drain, "Not executing microcode, no need to drain.\n"); 1312190SN/A return DrainState::Drained; 1322518SN/A } 1332518SN/A} 1342190SN/A 1352190SN/Avoid 1362190SN/AAtomicSimpleCPU::threadSnoop(PacketPtr pkt, ThreadID sender) 1372190SN/A{ 1382159SN/A DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 1392235SN/A pkt->cmdString()); 1402103SN/A 141393SN/A for (ThreadID tid = 0; tid < numThreads; tid++) { 142393SN/A if (tid != sender) { 1432190SN/A if (getCpuAddrMonitor(tid)->doMonitor(pkt)) { 144393SN/A wakeup(tid); 145393SN/A } 1462190SN/A 147393SN/A TheISA::handleLockedSnoop(threadInfo[tid]->thread, 148393SN/A pkt, dcachePort.cacheBlockMask); 1492875Sksewell@umich.edu } 150393SN/A } 151393SN/A} 1522190SN/A 1532159SN/Avoid 1542159SN/AAtomicSimpleCPU::drainResume() 1552190SN/A{ 1562159SN/A assert(!tickEvent.scheduled()); 1572159SN/A if (switchedOut()) 1582680Sktlim@umich.edu return; 1592159SN/A 1602190SN/A DPRINTF(SimpleCPU, "Resume\n"); 1612159SN/A verifyMemoryMode(); 1622190SN/A 1632190SN/A assert(!threadContexts.empty()); 1642159SN/A 1652235SN/A _status = BaseSimpleCPU::Idle; 1662313SN/A 1672235SN/A for (ThreadID tid = 0; tid < numThreads; tid++) { 1682235SN/A if (threadInfo[tid]->thread->status() == ThreadContext::Active) { 1692235SN/A threadInfo[tid]->notIdleFraction = 1; 1702235SN/A activeThreads.push_back(tid); 1712235SN/A _status = BaseSimpleCPU::Running; 1722254SN/A 1732254SN/A // Tick if any threads active 1742254SN/A if (!tickEvent.scheduled()) { 1752235SN/A schedule(tickEvent, nextCycle()); 1762235SN/A } 1772190SN/A } else { 1782159SN/A threadInfo[tid]->notIdleFraction = 0; 1792235SN/A } 1802254SN/A } 1812190SN/A} 1822159SN/A 1832680Sktlim@umich.edubool 1842159SN/AAtomicSimpleCPU::tryCompleteDrain() 1852190SN/A{ 1862159SN/A if (drainState() != DrainState::Draining) 1872159SN/A return false; 1882159SN/A 1892159SN/A DPRINTF(Drain, "tryCompleteDrain.\n"); 1902190SN/A if (!isDrained()) 1912159SN/A return false; 1922455SN/A 1932159SN/A DPRINTF(Drain, "CPU done draining, processing drain event\n"); 1942455SN/A signalDrainDone(); 1952159SN/A 1962455SN/A return true; 1972455SN/A} 1982455SN/A 1992159SN/A 2002190SN/Avoid 2012159SN/AAtomicSimpleCPU::switchOut() 2022455SN/A{ 2032159SN/A BaseSimpleCPU::switchOut(); 2042455SN/A 2052159SN/A assert(!tickEvent.scheduled()); 2062455SN/A assert(_status == BaseSimpleCPU::Running || _status == Idle); 2072455SN/A assert(isDrained()); 2082455SN/A} 2092159SN/A 2102190SN/A 2112159SN/Avoid 2122190SN/AAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 2132159SN/A{ 2142190SN/A BaseSimpleCPU::takeOverFrom(oldCPU); 2152159SN/A 2162190SN/A // The tick event should have been descheduled by drain() 2172159SN/A assert(!tickEvent.scheduled()); 2182447SN/A} 2192447SN/A 2202447SN/Avoid 2212447SN/AAtomicSimpleCPU::verifyMemoryMode() const 2222190SN/A{ 2232159SN/A if (!system->isAtomicMode()) { 2242190SN/A fatal("The atomic CPU requires the memory system to be in " 2252190SN/A "'atomic' mode.\n"); 2262190SN/A } 2272190SN/A} 2282190SN/A 2292190SN/Avoid 2302235SN/AAtomicSimpleCPU::activateContext(ThreadID thread_num) 2312235SN/A{ 2322190SN/A DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num); 2332190SN/A 2342190SN/A assert(thread_num < numThreads); 2352159SN/A 2362159SN/A threadInfo[thread_num]->notIdleFraction = 1; 2372190SN/A Cycles delta = ticksToCycles(threadInfo[thread_num]->thread->lastActivate - 2382159SN/A threadInfo[thread_num]->thread->lastSuspend); 2392159SN/A numCycles += delta; 2402235SN/A ppCycles->notify(delta); 2412190SN/A 2422190SN/A if (!tickEvent.scheduled()) { 2432159SN/A //Make sure ticks are still on multiples of cycles 2442190SN/A schedule(tickEvent, clockEdge(Cycles(0))); 2452159SN/A } 2462159SN/A _status = BaseSimpleCPU::Running; 2472190SN/A if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) 2482159SN/A == activeThreads.end()) { 2492190SN/A activeThreads.push_back(thread_num); 2502159SN/A } 2512235SN/A 2522190SN/A BaseCPU::activateContext(thread_num); 2532834Sksewell@umich.edu} 2542834Sksewell@umich.edu 2552834Sksewell@umich.edu 2562834Sksewell@umich.eduvoid 2572834Sksewell@umich.eduAtomicSimpleCPU::suspendContext(ThreadID thread_num) 2582159SN/A{ 2592525SN/A DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 2602972Sgblack@eecs.umich.edu 2612972Sgblack@eecs.umich.edu assert(thread_num < numThreads); 2622159SN/A activeThreads.remove(thread_num); 2632159SN/A 2642682Sktlim@umich.edu if (_status == Idle) 2652682Sktlim@umich.edu return; 2662682Sktlim@umich.edu 2672682Sktlim@umich.edu assert(_status == BaseSimpleCPU::Running); 2682682Sktlim@umich.edu 2692682Sktlim@umich.edu threadInfo[thread_num]->notIdleFraction = 0; 2702682Sktlim@umich.edu 2712682Sktlim@umich.edu if (activeThreads.empty()) { 2722682Sktlim@umich.edu _status = Idle; 2732682Sktlim@umich.edu 2742680Sktlim@umich.edu if (tickEvent.scheduled()) { 2752680Sktlim@umich.edu deschedule(tickEvent); 2762190SN/A } 2772190SN/A } 2782680Sktlim@umich.edu 2792680Sktlim@umich.edu BaseCPU::suspendContext(thread_num); 2802159SN/A} 2812190SN/A 2822680Sktlim@umich.edu 2832SN/ATick 2842SN/AAtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt) 2852SN/A{ 2862680Sktlim@umich.edu DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 2872SN/A pkt->cmdString()); 2882680Sktlim@umich.edu 289716SN/A // X86 ISA: Snooping an invalidation for monitor/mwait 2902680Sktlim@umich.edu AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 2912SN/A 2921858SN/A for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 2932680Sktlim@umich.edu if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 2942SN/A cpu->wakeup(tid); 2952680Sktlim@umich.edu } 2961917SN/A } 2972680Sktlim@umich.edu 2982521SN/A // if snoop invalidates, release any associated locks 2992680Sktlim@umich.edu // When run without caches, Invalidation packets will not be received 3002654SN/A // hence we must check if the incoming packets are writes and wakeup 3012680Sktlim@umich.edu // the processor accordingly 3022521SN/A if (pkt->isInvalidate() || pkt->isWrite()) { 3032680Sktlim@umich.edu DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 3042521SN/A pkt->getAddr()); 3052680Sktlim@umich.edu for (auto &t_info : cpu->threadInfo) { 3062SN/A TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 3072680Sktlim@umich.edu } 3082518SN/A } 3092680Sktlim@umich.edu 3102SN/A return 0; 3112SN/A} 3122680Sktlim@umich.edu 313595SN/Avoid 3142680Sktlim@umich.eduAtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt) 3152SN/A{ 3162190SN/A DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 3172190SN/A pkt->cmdString()); 3182680Sktlim@umich.edu 3192SN/A // X86 ISA: Snooping an invalidation for monitor/mwait 3202190SN/A AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 3212680Sktlim@umich.edu for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 3222SN/A if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 3232190SN/A cpu->wakeup(tid); 3242875Sksewell@umich.edu } 3252SN/A } 3262190SN/A 3272680Sktlim@umich.edu // if snoop invalidates, release any associated locks 328217SN/A if (pkt->isInvalidate()) { 3291858SN/A DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 3302680Sktlim@umich.edu pkt->getAddr()); 3312190SN/A for (auto &t_info : cpu->threadInfo) { 3322190SN/A TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 3332680Sktlim@umich.edu } 3342680Sktlim@umich.edu } 3352190SN/A} 3362680Sktlim@umich.edu 3372190SN/AFault 3382680Sktlim@umich.eduAtomicSimpleCPU::readMem(Addr addr, uint8_t * data, unsigned size, 3392190SN/A Request::Flags flags) 3402680Sktlim@umich.edu{ 3412190SN/A SimpleExecContext& t_info = *threadInfo[curThread]; 3422235SN/A SimpleThread* thread = t_info.thread; 3432680Sktlim@umich.edu 3442235SN/A // use the CPU's statically allocated read request and packet objects 3452680Sktlim@umich.edu Request *req = &data_read_req; 3462680Sktlim@umich.edu 3472254SN/A if (traceData) 3482680Sktlim@umich.edu traceData->setMem(addr, size, flags); 3492680Sktlim@umich.edu 3502235SN/A //The size of the data we're trying to read. 3512235SN/A int fullSize = size; 3522680Sktlim@umich.edu 3532190SN/A //The address of the second part of this access if it needs to be split 3542190SN/A //across a cache line boundary. 3552680Sktlim@umich.edu Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 3562SN/A 3572190SN/A if (secondAddr > addr) 3582680Sktlim@umich.edu size = secondAddr - addr; 3592SN/A 3602680Sktlim@umich.edu dcache_latency = 0; 361716SN/A 3622SN/A req->taskId(taskId()); 3632SN/A while (1) { 3642SN/A req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 3652SN/A 3662680Sktlim@umich.edu // translate to physical address 3672SN/A Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), 3682455SN/A BaseTLB::Read); 3692680Sktlim@umich.edu 3702SN/A // Now do the access. 3712455SN/A if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 3722680Sktlim@umich.edu Packet pkt(req, Packet::makeReadCmd(req)); 3732SN/A pkt.dataStatic(data); 3742455SN/A 3752680Sktlim@umich.edu if (req->isMmappedIpr()) 3762455SN/A dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 3772455SN/A else { 3782680Sktlim@umich.edu if (fastmem && system->isMemAddr(pkt.getAddr())) 3792SN/A system->getPhysMem().access(&pkt); 3802SN/A else 3812680Sktlim@umich.edu dcache_latency += dcachePort.sendAtomic(&pkt); 3822SN/A } 3832455SN/A dcache_access = true; 3842680Sktlim@umich.edu 3852SN/A assert(!pkt.isError()); 3862455SN/A 3872680Sktlim@umich.edu if (req->isLLSC()) { 3882SN/A TheISA::handleLockedRead(thread, req); 3892455SN/A } 3902680Sktlim@umich.edu } 3912455SN/A 3922455SN/A //If there's a fault, return it 3932680Sktlim@umich.edu if (fault != NoFault) { 3942SN/A if (req->isPrefetch()) { 3952680Sktlim@umich.edu return NoFault; 3962SN/A } else { 3972680Sktlim@umich.edu return fault; 3982206SN/A } 3992680Sktlim@umich.edu } 4002252SN/A 4012680Sktlim@umich.edu //If we don't need to access a second cache line, stop now. 4022SN/A if (secondAddr <= addr) 4032680Sktlim@umich.edu { 4042447SN/A if (req->isLockedRMW() && fault == NoFault) { 4052680Sktlim@umich.edu assert(!locked); 4062447SN/A locked = true; 4072159SN/A } 4082680Sktlim@umich.edu 4092SN/A return fault; 4102159SN/A } 4112680Sktlim@umich.edu 4122SN/A /* 4132159SN/A * Set up for accessing the second cache line. 4142680Sktlim@umich.edu */ 4152SN/A 4162159SN/A //Move the pointer we're reading into to the correct location. 4172680Sktlim@umich.edu data += size; 4182190SN/A //Adjust the size to get the remaining bytes. 4192190SN/A size = addr + fullSize - secondAddr; 4202680Sktlim@umich.edu //And access the right address. 4212190SN/A addr = secondAddr; 4222190SN/A } 4232680Sktlim@umich.edu} 4241858SN/A 4252680Sktlim@umich.eduFault 4262SN/AAtomicSimpleCPU::initiateMemRead(Addr addr, unsigned size, 4272SN/A Request::Flags flags) 4282190SN/A{ 4292680Sktlim@umich.edu panic("initiateMemRead() is for timing accesses, and should " 4302190SN/A "never be called on AtomicSimpleCPU.\n"); 4311858SN/A} 4322680Sktlim@umich.edu 433360SN/AFault 434360SN/AAtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, Addr addr, 4352190SN/A Request::Flags flags, uint64_t *res) 4362680Sktlim@umich.edu{ 437360SN/A SimpleExecContext& t_info = *threadInfo[curThread]; 4381450SN/A SimpleThread* thread = t_info.thread; 4392680Sktlim@umich.edu static uint8_t zero_array[64] = {}; 440360SN/A 4412680Sktlim@umich.edu if (data == NULL) { 4422SN/A assert(size <= 64); 4432525SN/A assert(flags & Request::CACHE_BLOCK_ZERO); 4442972Sgblack@eecs.umich.edu // This must be a cache block cleaning request 4452972Sgblack@eecs.umich.edu data = zero_array; 4462525SN/A } 4472680Sktlim@umich.edu 4482525SN/A // use the CPU's statically allocated write request and packet objects 4492SN/A Request *req = &data_write_req; 4502SN/A 4512190SN/A if (traceData) 452 traceData->setMem(addr, size, flags); 453 454 //The size of the data we're trying to read. 455 int fullSize = size; 456 457 //The address of the second part of this access if it needs to be split 458 //across a cache line boundary. 459 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 460 461 if (secondAddr > addr) 462 size = secondAddr - addr; 463 464 dcache_latency = 0; 465 466 req->taskId(taskId()); 467 while (1) { 468 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 469 470 // translate to physical address 471 Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), BaseTLB::Write); 472 473 // Now do the access. 474 if (fault == NoFault) { 475 MemCmd cmd = MemCmd::WriteReq; // default 476 bool do_access = true; // flag to suppress cache access 477 478 if (req->isLLSC()) { 479 cmd = MemCmd::StoreCondReq; 480 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); 481 } else if (req->isSwap()) { 482 cmd = MemCmd::SwapReq; 483 if (req->isCondSwap()) { 484 assert(res); 485 req->setExtraData(*res); 486 } 487 } 488 489 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 490 Packet pkt = Packet(req, cmd); 491 pkt.dataStatic(data); 492 493 if (req->isMmappedIpr()) { 494 dcache_latency += 495 TheISA::handleIprWrite(thread->getTC(), &pkt); 496 } else { 497 if (fastmem && system->isMemAddr(pkt.getAddr())) 498 system->getPhysMem().access(&pkt); 499 else 500 dcache_latency += dcachePort.sendAtomic(&pkt); 501 502 // Notify other threads on this CPU of write 503 threadSnoop(&pkt, curThread); 504 } 505 dcache_access = true; 506 assert(!pkt.isError()); 507 508 if (req->isSwap()) { 509 assert(res); 510 memcpy(res, pkt.getConstPtr<uint8_t>(), fullSize); 511 } 512 } 513 514 if (res && !req->isSwap()) { 515 *res = req->getExtraData(); 516 } 517 } 518 519 //If there's a fault or we don't need to access a second cache line, 520 //stop now. 521 if (fault != NoFault || secondAddr <= addr) 522 { 523 if (req->isLockedRMW() && fault == NoFault) { 524 assert(locked); 525 locked = false; 526 } 527 528 529 if (fault != NoFault && req->isPrefetch()) { 530 return NoFault; 531 } else { 532 return fault; 533 } 534 } 535 536 /* 537 * Set up for accessing the second cache line. 538 */ 539 540 //Move the pointer we're reading into to the correct location. 541 data += size; 542 //Adjust the size to get the remaining bytes. 543 size = addr + fullSize - secondAddr; 544 //And access the right address. 545 addr = secondAddr; 546 } 547} 548 549 550void 551AtomicSimpleCPU::tick() 552{ 553 DPRINTF(SimpleCPU, "Tick\n"); 554 555 // Change thread if multi-threaded 556 swapActiveThread(); 557 558 // Set memroy request ids to current thread 559 if (numThreads > 1) { 560 ContextID cid = threadContexts[curThread]->contextId(); 561 562 ifetch_req.setContext(cid); 563 data_read_req.setContext(cid); 564 data_write_req.setContext(cid); 565 } 566 567 SimpleExecContext& t_info = *threadInfo[curThread]; 568 SimpleThread* thread = t_info.thread; 569 570 Tick latency = 0; 571 572 for (int i = 0; i < width || locked; ++i) { 573 numCycles++; 574 ppCycles->notify(1); 575 576 if (!curStaticInst || !curStaticInst->isDelayedCommit()) { 577 checkForInterrupts(); 578 checkPcEventQueue(); 579 } 580 581 // We must have just got suspended by a PC event 582 if (_status == Idle) { 583 tryCompleteDrain(); 584 return; 585 } 586 587 Fault fault = NoFault; 588 589 TheISA::PCState pcState = thread->pcState(); 590 591 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 592 !curMacroStaticInst; 593 if (needToFetch) { 594 ifetch_req.taskId(taskId()); 595 setupFetchRequest(&ifetch_req); 596 fault = thread->itb->translateAtomic(&ifetch_req, thread->getTC(), 597 BaseTLB::Execute); 598 } 599 600 if (fault == NoFault) { 601 Tick icache_latency = 0; 602 bool icache_access = false; 603 dcache_access = false; // assume no dcache access 604 605 if (needToFetch) { 606 // This is commented out because the decoder would act like 607 // a tiny cache otherwise. It wouldn't be flushed when needed 608 // like the I cache. It should be flushed, and when that works 609 // this code should be uncommented. 610 //Fetch more instruction memory if necessary 611 //if (decoder.needMoreBytes()) 612 //{ 613 icache_access = true; 614 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq); 615 ifetch_pkt.dataStatic(&inst); 616 617 if (fastmem && system->isMemAddr(ifetch_pkt.getAddr())) 618 system->getPhysMem().access(&ifetch_pkt); 619 else 620 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 621 622 assert(!ifetch_pkt.isError()); 623 624 // ifetch_req is initialized to read the instruction directly 625 // into the CPU object's inst field. 626 //} 627 } 628 629 preExecute(); 630 631 if (curStaticInst) { 632 fault = curStaticInst->execute(&t_info, traceData); 633 634 // keep an instruction count 635 if (fault == NoFault) { 636 countInst(); 637 ppCommit->notify(std::make_pair(thread, curStaticInst)); 638 } 639 else if (traceData && !DTRACE(ExecFaulting)) { 640 delete traceData; 641 traceData = NULL; 642 } 643 644 postExecute(); 645 } 646 647 // @todo remove me after debugging with legion done 648 if (curStaticInst && (!curStaticInst->isMicroop() || 649 curStaticInst->isFirstMicroop())) 650 instCnt++; 651 652 Tick stall_ticks = 0; 653 if (simulate_inst_stalls && icache_access) 654 stall_ticks += icache_latency; 655 656 if (simulate_data_stalls && dcache_access) 657 stall_ticks += dcache_latency; 658 659 if (stall_ticks) { 660 // the atomic cpu does its accounting in ticks, so 661 // keep counting in ticks but round to the clock 662 // period 663 latency += divCeil(stall_ticks, clockPeriod()) * 664 clockPeriod(); 665 } 666 667 } 668 if (fault != NoFault || !t_info.stayAtPC) 669 advancePC(fault); 670 } 671 672 if (tryCompleteDrain()) 673 return; 674 675 // instruction takes at least one cycle 676 if (latency < clockPeriod()) 677 latency = clockPeriod(); 678 679 if (_status != Idle) 680 reschedule(tickEvent, curTick() + latency, true); 681} 682 683void 684AtomicSimpleCPU::regProbePoints() 685{ 686 BaseCPU::regProbePoints(); 687 688 ppCommit = new ProbePointArg<pair<SimpleThread*, const StaticInstPtr>> 689 (getProbeManager(), "Commit"); 690} 691 692void 693AtomicSimpleCPU::printAddr(Addr a) 694{ 695 dcachePort.printAddr(a); 696} 697 698//////////////////////////////////////////////////////////////////////// 699// 700// AtomicSimpleCPU Simulation Object 701// 702AtomicSimpleCPU * 703AtomicSimpleCPUParams::create() 704{ 705 return new AtomicSimpleCPU(this); 706} 707