atomic.cc revision 11147
16145Snate@binkert.org/* 26145Snate@binkert.org * Copyright 2014 Google, Inc. 36145Snate@binkert.org * Copyright (c) 2012-2013,2015 ARM Limited 46145Snate@binkert.org * All rights reserved. 56145Snate@binkert.org * 66145Snate@binkert.org * The license below extends only to copyright in the software and shall 76145Snate@binkert.org * not be construed as granting a license to any other intellectual 86145Snate@binkert.org * property including but not limited to intellectual property relating 96145Snate@binkert.org * to a hardware implementation of the functionality of the software 106145Snate@binkert.org * licensed hereunder. You may use the software subject to the license 116145Snate@binkert.org * terms below provided that you ensure that this notice is replicated 126145Snate@binkert.org * unmodified and in its entirety in all distributions of the software, 136145Snate@binkert.org * modified or unmodified, in source code or in binary form. 146145Snate@binkert.org * 156145Snate@binkert.org * Copyright (c) 2002-2005 The Regents of The University of Michigan 166145Snate@binkert.org * All rights reserved. 176145Snate@binkert.org * 186145Snate@binkert.org * Redistribution and use in source and binary forms, with or without 196145Snate@binkert.org * modification, are permitted provided that the following conditions are 206145Snate@binkert.org * met: redistributions of source code must retain the above copyright 216145Snate@binkert.org * notice, this list of conditions and the following disclaimer; 226145Snate@binkert.org * redistributions in binary form must reproduce the above copyright 236145Snate@binkert.org * notice, this list of conditions and the following disclaimer in the 246145Snate@binkert.org * documentation and/or other materials provided with the distribution; 256145Snate@binkert.org * neither the name of the copyright holders nor the names of its 266145Snate@binkert.org * contributors may be used to endorse or promote products derived from 276145Snate@binkert.org * this software without specific prior written permission. 286145Snate@binkert.org * 297454Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 307454Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 318645Snilay@cs.wisc.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 327454Snate@binkert.org * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 337054Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 347054Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 357054Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 368259SBrad.Beckmann@amd.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 376154Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 386154Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 396145Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 407055Snate@binkert.org * 417454Snate@binkert.org * Authors: Steve Reinhardt 427454Snate@binkert.org */ 437055Snate@binkert.org 449274Snilay@cs.wisc.edu#include "arch/locked_mem.hh" 456145Snate@binkert.org#include "arch/mmapped_ipr.hh" 469274Snilay@cs.wisc.edu#include "arch/utility.hh" 476145Snate@binkert.org#include "base/bigint.hh" 486145Snate@binkert.org#include "base/output.hh" 496145Snate@binkert.org#include "config/the_isa.hh" 506145Snate@binkert.org#include "cpu/simple/atomic.hh" 517054Snate@binkert.org#include "cpu/exetrace.hh" 526145Snate@binkert.org#include "debug/Drain.hh" 537054Snate@binkert.org#include "debug/ExecFaulting.hh" 547454Snate@binkert.org#include "debug/SimpleCPU.hh" 556145Snate@binkert.org#include "mem/packet.hh" 567054Snate@binkert.org#include "mem/packet_access.hh" 577454Snate@binkert.org#include "mem/physical.hh" 586145Snate@binkert.org#include "params/AtomicSimpleCPU.hh" 596145Snate@binkert.org#include "sim/faults.hh" 607054Snate@binkert.org#include "sim/system.hh" 619274Snilay@cs.wisc.edu#include "sim/full_system.hh" 629274Snilay@cs.wisc.edu 639274Snilay@cs.wisc.eduusing namespace std; 649274Snilay@cs.wisc.eduusing namespace TheISA; 659274Snilay@cs.wisc.edu 669274Snilay@cs.wisc.eduAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 679274Snilay@cs.wisc.edu : Event(CPU_Tick_Pri), cpu(c) 687454Snate@binkert.org{ 696145Snate@binkert.org} 709508Snilay@cs.wisc.edu 719508Snilay@cs.wisc.edu 729508Snilay@cs.wisc.eduvoid 739508Snilay@cs.wisc.eduAtomicSimpleCPU::TickEvent::process() 749508Snilay@cs.wisc.edu{ 756145Snate@binkert.org cpu->tick(); 766145Snate@binkert.org} 777054Snate@binkert.org 787454Snate@binkert.orgconst char * 799499Snilay@cs.wisc.eduAtomicSimpleCPU::TickEvent::description() const 806145Snate@binkert.org{ 817054Snate@binkert.org return "AtomicSimpleCPU tick"; 829274Snilay@cs.wisc.edu} 839499Snilay@cs.wisc.edu 849499Snilay@cs.wisc.eduvoid 857454Snate@binkert.orgAtomicSimpleCPU::init() 866145Snate@binkert.org{ 877054Snate@binkert.org BaseSimpleCPU::init(); 887454Snate@binkert.org 897054Snate@binkert.org ifetch_req.setThreadContext(_cpuId, 0); 909508Snilay@cs.wisc.edu data_read_req.setThreadContext(_cpuId, 0); 919508Snilay@cs.wisc.edu data_write_req.setThreadContext(_cpuId, 0); 927054Snate@binkert.org} 937054Snate@binkert.org 947054Snate@binkert.orgAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 959274Snilay@cs.wisc.edu : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 969274Snilay@cs.wisc.edu simulate_data_stalls(p->simulate_data_stalls), 977054Snate@binkert.org simulate_inst_stalls(p->simulate_inst_stalls), 989508Snilay@cs.wisc.edu icachePort(name() + ".icache_port", this), 997454Snate@binkert.org dcachePort(name() + ".dcache_port", this), 1007454Snate@binkert.org fastmem(p->fastmem), dcache_access(false), dcache_latency(0), 1019508Snilay@cs.wisc.edu ppCommit(nullptr) 1029508Snilay@cs.wisc.edu{ 1039508Snilay@cs.wisc.edu _status = Idle; 1049274Snilay@cs.wisc.edu} 1056145Snate@binkert.org 1067054Snate@binkert.org 1077054Snate@binkert.orgAtomicSimpleCPU::~AtomicSimpleCPU() 1086145Snate@binkert.org{ 1097054Snate@binkert.org if (tickEvent.scheduled()) { 1109508Snilay@cs.wisc.edu deschedule(tickEvent); 1116145Snate@binkert.org } 1126145Snate@binkert.org} 1137054Snate@binkert.org 1147054Snate@binkert.orgDrainState 1156145Snate@binkert.orgAtomicSimpleCPU::drain() 1167054Snate@binkert.org{ 1176145Snate@binkert.org if (switchedOut()) 1186145Snate@binkert.org return DrainState::Drained; 1197054Snate@binkert.org 1207054Snate@binkert.org if (!isDrained()) { 1216145Snate@binkert.org DPRINTF(Drain, "Requesting drain.\n"); 1227054Snate@binkert.org return DrainState::Draining; 1237054Snate@binkert.org } else { 1247054Snate@binkert.org if (tickEvent.scheduled()) 1257054Snate@binkert.org deschedule(tickEvent); 1267054Snate@binkert.org 1276145Snate@binkert.org activeThreads.clear(); 1286145Snate@binkert.org DPRINTF(Drain, "Not executing microcode, no need to drain.\n"); 1296145Snate@binkert.org return DrainState::Drained; 1307054Snate@binkert.org } 1317054Snate@binkert.org} 1326145Snate@binkert.org 1337054Snate@binkert.orgvoid 1346145Snate@binkert.orgAtomicSimpleCPU::drainResume() 1356145Snate@binkert.org{ 1367054Snate@binkert.org assert(!tickEvent.scheduled()); 1377054Snate@binkert.org if (switchedOut()) 1386145Snate@binkert.org return; 1397054Snate@binkert.org 1407054Snate@binkert.org DPRINTF(SimpleCPU, "Resume\n"); 1416145Snate@binkert.org verifyMemoryMode(); 1426145Snate@binkert.org 1437454Snate@binkert.org assert(!threadContexts.empty()); 1447054Snate@binkert.org 1456145Snate@binkert.org _status = BaseSimpleCPU::Idle; 1467054Snate@binkert.org 1476145Snate@binkert.org for (ThreadID tid = 0; tid < numThreads; tid++) { 1486145Snate@binkert.org if (threadInfo[tid]->thread->status() == ThreadContext::Active) { 1497054Snate@binkert.org threadInfo[tid]->notIdleFraction = 1; 1507054Snate@binkert.org activeThreads.push_back(tid); 1516145Snate@binkert.org _status = BaseSimpleCPU::Running; 1529274Snilay@cs.wisc.edu 1537054Snate@binkert.org // Tick if any threads active 1549274Snilay@cs.wisc.edu if (!tickEvent.scheduled()) { 1557054Snate@binkert.org schedule(tickEvent, nextCycle()); 1566145Snate@binkert.org } 1577054Snate@binkert.org } else { 1587054Snate@binkert.org threadInfo[tid]->notIdleFraction = 0; 1597054Snate@binkert.org } 1606145Snate@binkert.org } 1617054Snate@binkert.org} 1627054Snate@binkert.org 1637054Snate@binkert.orgbool 1647054Snate@binkert.orgAtomicSimpleCPU::tryCompleteDrain() 1657054Snate@binkert.org{ 1667054Snate@binkert.org if (drainState() != DrainState::Draining) 1676145Snate@binkert.org return false; 1687054Snate@binkert.org 1697054Snate@binkert.org DPRINTF(Drain, "tryCompleteDrain.\n"); 1706145Snate@binkert.org if (!isDrained()) 1717054Snate@binkert.org return false; 1729274Snilay@cs.wisc.edu 1737054Snate@binkert.org DPRINTF(Drain, "CPU done draining, processing drain event\n"); 1747054Snate@binkert.org signalDrainDone(); 1757054Snate@binkert.org 1767054Snate@binkert.org return true; 1777054Snate@binkert.org} 1789274Snilay@cs.wisc.edu 1797054Snate@binkert.org 1807054Snate@binkert.orgvoid 1817054Snate@binkert.orgAtomicSimpleCPU::switchOut() 1827054Snate@binkert.org{ 1837054Snate@binkert.org BaseSimpleCPU::switchOut(); 1846145Snate@binkert.org 1857054Snate@binkert.org assert(!tickEvent.scheduled()); 1866145Snate@binkert.org assert(_status == BaseSimpleCPU::Running || _status == Idle); 1877054Snate@binkert.org assert(isDrained()); 1887054Snate@binkert.org} 1897054Snate@binkert.org 1907054Snate@binkert.org 1917054Snate@binkert.orgvoid 1927054Snate@binkert.orgAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 1937454Snate@binkert.org{ 1947054Snate@binkert.org BaseSimpleCPU::takeOverFrom(oldCPU); 1957054Snate@binkert.org 1967054Snate@binkert.org // The tick event should have been descheduled by drain() 1977454Snate@binkert.org assert(!tickEvent.scheduled()); 1987454Snate@binkert.org} 1997054Snate@binkert.org 2007054Snate@binkert.orgvoid 2017054Snate@binkert.orgAtomicSimpleCPU::verifyMemoryMode() const 2029274Snilay@cs.wisc.edu{ 2037054Snate@binkert.org if (!system->isAtomicMode()) { 2049274Snilay@cs.wisc.edu fatal("The atomic CPU requires the memory system to be in " 2057454Snate@binkert.org "'atomic' mode.\n"); 2067454Snate@binkert.org } 2077454Snate@binkert.org} 2087054Snate@binkert.org 2096145Snate@binkert.orgvoid 2106145Snate@binkert.orgAtomicSimpleCPU::activateContext(ThreadID thread_num) 2117054Snate@binkert.org{ 2126145Snate@binkert.org DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num); 2136145Snate@binkert.org 2147054Snate@binkert.org assert(thread_num < numThreads); 2157054Snate@binkert.org 2166145Snate@binkert.org threadInfo[thread_num]->notIdleFraction = 1; 2177054Snate@binkert.org Cycles delta = ticksToCycles(threadInfo[thread_num]->thread->lastActivate - 2187054Snate@binkert.org threadInfo[thread_num]->thread->lastSuspend); 2197054Snate@binkert.org numCycles += delta; 2207054Snate@binkert.org ppCycles->notify(delta); 2216145Snate@binkert.org 2226145Snate@binkert.org if (!tickEvent.scheduled()) { 2236145Snate@binkert.org //Make sure ticks are still on multiples of cycles 2247054Snate@binkert.org schedule(tickEvent, clockEdge(Cycles(0))); 2257054Snate@binkert.org } 2266145Snate@binkert.org _status = BaseSimpleCPU::Running; 2277054Snate@binkert.org if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) 2287054Snate@binkert.org == activeThreads.end()) { 2296145Snate@binkert.org activeThreads.push_back(thread_num); 2309354Snilay@cs.wisc.edu } 2319302Snilay@cs.wisc.edu} 2329302Snilay@cs.wisc.edu 2339302Snilay@cs.wisc.edu 2349302Snilay@cs.wisc.eduvoid 2359302Snilay@cs.wisc.eduAtomicSimpleCPU::suspendContext(ThreadID thread_num) 2369302Snilay@cs.wisc.edu{ 2379302Snilay@cs.wisc.edu DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 2389302Snilay@cs.wisc.edu 2399302Snilay@cs.wisc.edu assert(thread_num < numThreads); 2409302Snilay@cs.wisc.edu activeThreads.remove(thread_num); 2419302Snilay@cs.wisc.edu 2429302Snilay@cs.wisc.edu if (_status == Idle) 2439302Snilay@cs.wisc.edu return; 2449302Snilay@cs.wisc.edu 2459302Snilay@cs.wisc.edu assert(_status == BaseSimpleCPU::Running); 2469302Snilay@cs.wisc.edu 2479302Snilay@cs.wisc.edu threadInfo[thread_num]->notIdleFraction = 0; 2489302Snilay@cs.wisc.edu 2499302Snilay@cs.wisc.edu if (activeThreads.empty()) { 2509302Snilay@cs.wisc.edu _status = Idle; 2519302Snilay@cs.wisc.edu 2529302Snilay@cs.wisc.edu if (tickEvent.scheduled()) { 2539274Snilay@cs.wisc.edu deschedule(tickEvent); 2549274Snilay@cs.wisc.edu } 2559274Snilay@cs.wisc.edu } 2569274Snilay@cs.wisc.edu 2579274Snilay@cs.wisc.edu} 2589274Snilay@cs.wisc.edu 259 260Tick 261AtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt) 262{ 263 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 264 pkt->cmdString()); 265 266 // X86 ISA: Snooping an invalidation for monitor/mwait 267 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 268 if(cpu->getCpuAddrMonitor()->doMonitor(pkt)) { 269 cpu->wakeup(); 270 } 271 272 // if snoop invalidates, release any associated locks 273 if (pkt->isInvalidate()) { 274 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 275 pkt->getAddr()); 276 for (auto &t_info : cpu->threadInfo) { 277 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 278 } 279 } 280 281 return 0; 282} 283 284void 285AtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt) 286{ 287 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 288 pkt->cmdString()); 289 290 // X86 ISA: Snooping an invalidation for monitor/mwait 291 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 292 if(cpu->getCpuAddrMonitor()->doMonitor(pkt)) { 293 cpu->wakeup(); 294 } 295 296 // if snoop invalidates, release any associated locks 297 if (pkt->isInvalidate()) { 298 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 299 pkt->getAddr()); 300 for (auto &t_info : cpu->threadInfo) { 301 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 302 } 303 } 304} 305 306Fault 307AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, 308 unsigned size, unsigned flags) 309{ 310 SimpleExecContext& t_info = *threadInfo[curThread]; 311 SimpleThread* thread = t_info.thread; 312 313 // use the CPU's statically allocated read request and packet objects 314 Request *req = &data_read_req; 315 316 if (traceData) 317 traceData->setMem(addr, size, flags); 318 319 //The size of the data we're trying to read. 320 int fullSize = size; 321 322 //The address of the second part of this access if it needs to be split 323 //across a cache line boundary. 324 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 325 326 if (secondAddr > addr) 327 size = secondAddr - addr; 328 329 dcache_latency = 0; 330 331 req->taskId(taskId()); 332 while (1) { 333 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 334 335 // translate to physical address 336 Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), 337 BaseTLB::Read); 338 339 // Now do the access. 340 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 341 Packet pkt(req, Packet::makeReadCmd(req)); 342 pkt.dataStatic(data); 343 344 if (req->isMmappedIpr()) 345 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 346 else { 347 if (fastmem && system->isMemAddr(pkt.getAddr())) 348 system->getPhysMem().access(&pkt); 349 else 350 dcache_latency += dcachePort.sendAtomic(&pkt); 351 } 352 dcache_access = true; 353 354 assert(!pkt.isError()); 355 356 if (req->isLLSC()) { 357 TheISA::handleLockedRead(thread, req); 358 } 359 } 360 361 //If there's a fault, return it 362 if (fault != NoFault) { 363 if (req->isPrefetch()) { 364 return NoFault; 365 } else { 366 return fault; 367 } 368 } 369 370 //If we don't need to access a second cache line, stop now. 371 if (secondAddr <= addr) 372 { 373 if (req->isLockedRMW() && fault == NoFault) { 374 assert(!locked); 375 locked = true; 376 } 377 378 return fault; 379 } 380 381 /* 382 * Set up for accessing the second cache line. 383 */ 384 385 //Move the pointer we're reading into to the correct location. 386 data += size; 387 //Adjust the size to get the remaining bytes. 388 size = addr + fullSize - secondAddr; 389 //And access the right address. 390 addr = secondAddr; 391 } 392} 393 394 395Fault 396AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, 397 Addr addr, unsigned flags, uint64_t *res) 398{ 399 SimpleExecContext& t_info = *threadInfo[curThread]; 400 SimpleThread* thread = t_info.thread; 401 static uint8_t zero_array[64] = {}; 402 403 if (data == NULL) { 404 assert(size <= 64); 405 assert(flags & Request::CACHE_BLOCK_ZERO); 406 // This must be a cache block cleaning request 407 data = zero_array; 408 } 409 410 // use the CPU's statically allocated write request and packet objects 411 Request *req = &data_write_req; 412 413 if (traceData) 414 traceData->setMem(addr, size, flags); 415 416 //The size of the data we're trying to read. 417 int fullSize = size; 418 419 //The address of the second part of this access if it needs to be split 420 //across a cache line boundary. 421 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 422 423 if(secondAddr > addr) 424 size = secondAddr - addr; 425 426 dcache_latency = 0; 427 428 req->taskId(taskId()); 429 while(1) { 430 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 431 432 // translate to physical address 433 Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), BaseTLB::Write); 434 435 // Now do the access. 436 if (fault == NoFault) { 437 MemCmd cmd = MemCmd::WriteReq; // default 438 bool do_access = true; // flag to suppress cache access 439 440 if (req->isLLSC()) { 441 cmd = MemCmd::StoreCondReq; 442 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); 443 } else if (req->isSwap()) { 444 cmd = MemCmd::SwapReq; 445 if (req->isCondSwap()) { 446 assert(res); 447 req->setExtraData(*res); 448 } 449 } 450 451 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 452 Packet pkt = Packet(req, cmd); 453 pkt.dataStatic(data); 454 455 if (req->isMmappedIpr()) { 456 dcache_latency += 457 TheISA::handleIprWrite(thread->getTC(), &pkt); 458 } else { 459 if (fastmem && system->isMemAddr(pkt.getAddr())) 460 system->getPhysMem().access(&pkt); 461 else 462 dcache_latency += dcachePort.sendAtomic(&pkt); 463 } 464 dcache_access = true; 465 assert(!pkt.isError()); 466 467 if (req->isSwap()) { 468 assert(res); 469 memcpy(res, pkt.getConstPtr<uint8_t>(), fullSize); 470 } 471 } 472 473 if (res && !req->isSwap()) { 474 *res = req->getExtraData(); 475 } 476 } 477 478 //If there's a fault or we don't need to access a second cache line, 479 //stop now. 480 if (fault != NoFault || secondAddr <= addr) 481 { 482 if (req->isLockedRMW() && fault == NoFault) { 483 assert(locked); 484 locked = false; 485 } 486 487 488 if (fault != NoFault && req->isPrefetch()) { 489 return NoFault; 490 } else { 491 return fault; 492 } 493 } 494 495 /* 496 * Set up for accessing the second cache line. 497 */ 498 499 //Move the pointer we're reading into to the correct location. 500 data += size; 501 //Adjust the size to get the remaining bytes. 502 size = addr + fullSize - secondAddr; 503 //And access the right address. 504 addr = secondAddr; 505 } 506} 507 508 509void 510AtomicSimpleCPU::tick() 511{ 512 DPRINTF(SimpleCPU, "Tick\n"); 513 514 // Change thread if multi-threaded 515 swapActiveThread(); 516 517 // Set memroy request ids to current thread 518 if (numThreads > 1) { 519 ifetch_req.setThreadContext(_cpuId, curThread); 520 data_read_req.setThreadContext(_cpuId, curThread); 521 data_write_req.setThreadContext(_cpuId, curThread); 522 } 523 524 SimpleExecContext& t_info = *threadInfo[curThread]; 525 SimpleThread* thread = t_info.thread; 526 527 Tick latency = 0; 528 529 for (int i = 0; i < width || locked; ++i) { 530 numCycles++; 531 ppCycles->notify(1); 532 533 if (!curStaticInst || !curStaticInst->isDelayedCommit()) { 534 checkForInterrupts(); 535 checkPcEventQueue(); 536 } 537 538 // We must have just got suspended by a PC event 539 if (_status == Idle) { 540 tryCompleteDrain(); 541 return; 542 } 543 544 Fault fault = NoFault; 545 546 TheISA::PCState pcState = thread->pcState(); 547 548 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 549 !curMacroStaticInst; 550 if (needToFetch) { 551 ifetch_req.taskId(taskId()); 552 setupFetchRequest(&ifetch_req); 553 fault = thread->itb->translateAtomic(&ifetch_req, thread->getTC(), 554 BaseTLB::Execute); 555 } 556 557 if (fault == NoFault) { 558 Tick icache_latency = 0; 559 bool icache_access = false; 560 dcache_access = false; // assume no dcache access 561 562 if (needToFetch) { 563 // This is commented out because the decoder would act like 564 // a tiny cache otherwise. It wouldn't be flushed when needed 565 // like the I cache. It should be flushed, and when that works 566 // this code should be uncommented. 567 //Fetch more instruction memory if necessary 568 //if(decoder.needMoreBytes()) 569 //{ 570 icache_access = true; 571 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq); 572 ifetch_pkt.dataStatic(&inst); 573 574 if (fastmem && system->isMemAddr(ifetch_pkt.getAddr())) 575 system->getPhysMem().access(&ifetch_pkt); 576 else 577 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 578 579 assert(!ifetch_pkt.isError()); 580 581 // ifetch_req is initialized to read the instruction directly 582 // into the CPU object's inst field. 583 //} 584 } 585 586 preExecute(); 587 588 if (curStaticInst) { 589 fault = curStaticInst->execute(&t_info, traceData); 590 591 // keep an instruction count 592 if (fault == NoFault) { 593 countInst(); 594 ppCommit->notify(std::make_pair(thread, curStaticInst)); 595 } 596 else if (traceData && !DTRACE(ExecFaulting)) { 597 delete traceData; 598 traceData = NULL; 599 } 600 601 postExecute(); 602 } 603 604 // @todo remove me after debugging with legion done 605 if (curStaticInst && (!curStaticInst->isMicroop() || 606 curStaticInst->isFirstMicroop())) 607 instCnt++; 608 609 Tick stall_ticks = 0; 610 if (simulate_inst_stalls && icache_access) 611 stall_ticks += icache_latency; 612 613 if (simulate_data_stalls && dcache_access) 614 stall_ticks += dcache_latency; 615 616 if (stall_ticks) { 617 // the atomic cpu does its accounting in ticks, so 618 // keep counting in ticks but round to the clock 619 // period 620 latency += divCeil(stall_ticks, clockPeriod()) * 621 clockPeriod(); 622 } 623 624 } 625 if(fault != NoFault || !t_info.stayAtPC) 626 advancePC(fault); 627 } 628 629 if (tryCompleteDrain()) 630 return; 631 632 // instruction takes at least one cycle 633 if (latency < clockPeriod()) 634 latency = clockPeriod(); 635 636 if (_status != Idle) 637 reschedule(tickEvent, curTick() + latency, true); 638} 639 640void 641AtomicSimpleCPU::regProbePoints() 642{ 643 BaseCPU::regProbePoints(); 644 645 ppCommit = new ProbePointArg<pair<SimpleThread*, const StaticInstPtr>> 646 (getProbeManager(), "Commit"); 647} 648 649void 650AtomicSimpleCPU::printAddr(Addr a) 651{ 652 dcachePort.printAddr(a); 653} 654 655//////////////////////////////////////////////////////////////////////// 656// 657// AtomicSimpleCPU Simulation Object 658// 659AtomicSimpleCPU * 660AtomicSimpleCPUParams::create() 661{ 662 return new AtomicSimpleCPU(this); 663} 664