atomic.cc revision 10464
16145Snate@binkert.org/* 26145Snate@binkert.org * Copyright (c) 2012-2013 ARM Limited 36145Snate@binkert.org * All rights reserved. 46145Snate@binkert.org * 56145Snate@binkert.org * The license below extends only to copyright in the software and shall 66145Snate@binkert.org * not be construed as granting a license to any other intellectual 76145Snate@binkert.org * property including but not limited to intellectual property relating 86145Snate@binkert.org * to a hardware implementation of the functionality of the software 96145Snate@binkert.org * licensed hereunder. You may use the software subject to the license 106145Snate@binkert.org * terms below provided that you ensure that this notice is replicated 116145Snate@binkert.org * unmodified and in its entirety in all distributions of the software, 126145Snate@binkert.org * modified or unmodified, in source code or in binary form. 136145Snate@binkert.org * 146145Snate@binkert.org * Copyright (c) 2002-2005 The Regents of The University of Michigan 156145Snate@binkert.org * All rights reserved. 166145Snate@binkert.org * 176145Snate@binkert.org * Redistribution and use in source and binary forms, with or without 186145Snate@binkert.org * modification, are permitted provided that the following conditions are 196145Snate@binkert.org * met: redistributions of source code must retain the above copyright 206145Snate@binkert.org * notice, this list of conditions and the following disclaimer; 216145Snate@binkert.org * redistributions in binary form must reproduce the above copyright 226145Snate@binkert.org * notice, this list of conditions and the following disclaimer in the 236145Snate@binkert.org * documentation and/or other materials provided with the distribution; 246145Snate@binkert.org * neither the name of the copyright holders nor the names of its 256145Snate@binkert.org * contributors may be used to endorse or promote products derived from 266145Snate@binkert.org * this software without specific prior written permission. 276145Snate@binkert.org * 286145Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 297454Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 307454Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 318645Snilay@cs.wisc.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 327454Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 337054Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 347054Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 357054Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 368259SBrad.Beckmann@amd.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 376154Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 386154Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 396145Snate@binkert.org * 407055Snate@binkert.org * Authors: Steve Reinhardt 417454Snate@binkert.org */ 427454Snate@binkert.org 437055Snate@binkert.org#include "arch/locked_mem.hh" 449274Snilay@cs.wisc.edu#include "arch/mmapped_ipr.hh" 456145Snate@binkert.org#include "arch/utility.hh" 469858Snilay@cs.wisc.edu#include "base/bigint.hh" 476145Snate@binkert.org#include "base/output.hh" 486145Snate@binkert.org#include "config/the_isa.hh" 496145Snate@binkert.org#include "cpu/simple/atomic.hh" 506145Snate@binkert.org#include "cpu/exetrace.hh" 519858Snilay@cs.wisc.edu#include "debug/Drain.hh" 526145Snate@binkert.org#include "debug/ExecFaulting.hh" 537054Snate@binkert.org#include "debug/SimpleCPU.hh" 547454Snate@binkert.org#include "mem/packet.hh" 556145Snate@binkert.org#include "mem/packet_access.hh" 567054Snate@binkert.org#include "mem/physical.hh" 577454Snate@binkert.org#include "params/AtomicSimpleCPU.hh" 586145Snate@binkert.org#include "sim/faults.hh" 596145Snate@binkert.org#include "sim/system.hh" 607054Snate@binkert.org#include "sim/full_system.hh" 619274Snilay@cs.wisc.edu 629274Snilay@cs.wisc.eduusing namespace std; 639274Snilay@cs.wisc.eduusing namespace TheISA; 649858Snilay@cs.wisc.edu 659274Snilay@cs.wisc.eduAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 669274Snilay@cs.wisc.edu : Event(CPU_Tick_Pri), cpu(c) 679274Snilay@cs.wisc.edu{ 687454Snate@binkert.org} 696145Snate@binkert.org 709858Snilay@cs.wisc.edu 719508Snilay@cs.wisc.eduvoid 729508Snilay@cs.wisc.eduAtomicSimpleCPU::TickEvent::process() 739508Snilay@cs.wisc.edu{ 749508Snilay@cs.wisc.edu cpu->tick(); 756145Snate@binkert.org} 766145Snate@binkert.org 777054Snate@binkert.orgconst char * 787454Snate@binkert.orgAtomicSimpleCPU::TickEvent::description() const 799499Snilay@cs.wisc.edu{ 806145Snate@binkert.org return "AtomicSimpleCPU tick"; 817054Snate@binkert.org} 829274Snilay@cs.wisc.edu 839499Snilay@cs.wisc.eduvoid 849499Snilay@cs.wisc.eduAtomicSimpleCPU::init() 857454Snate@binkert.org{ 866145Snate@binkert.org BaseCPU::init(); 877054Snate@binkert.org 887454Snate@binkert.org // Initialise the ThreadContext's memory proxies 897054Snate@binkert.org tcBase()->initMemProxies(tcBase()); 909508Snilay@cs.wisc.edu 919508Snilay@cs.wisc.edu if (FullSystem && !params()->switched_out) { 927054Snate@binkert.org ThreadID size = threadContexts.size(); 937054Snate@binkert.org for (ThreadID i = 0; i < size; ++i) { 947054Snate@binkert.org ThreadContext *tc = threadContexts[i]; 959274Snilay@cs.wisc.edu // initialize CPU, including PC 969274Snilay@cs.wisc.edu TheISA::initCPU(tc, tc->contextId()); 977054Snate@binkert.org } 989508Snilay@cs.wisc.edu } 997454Snate@binkert.org 1007454Snate@binkert.org // Atomic doesn't do MT right now, so contextId == threadId 1019508Snilay@cs.wisc.edu ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 1029508Snilay@cs.wisc.edu data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1039508Snilay@cs.wisc.edu data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1049274Snilay@cs.wisc.edu} 1056145Snate@binkert.org 1067054Snate@binkert.orgAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 1079858Snilay@cs.wisc.edu : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 1086145Snate@binkert.org simulate_data_stalls(p->simulate_data_stalls), 1097054Snate@binkert.org simulate_inst_stalls(p->simulate_inst_stalls), 1109508Snilay@cs.wisc.edu drain_manager(NULL), 1116145Snate@binkert.org icachePort(name() + ".icache_port", this), 1126145Snate@binkert.org dcachePort(name() + ".dcache_port", this), 1137054Snate@binkert.org fastmem(p->fastmem) 1147054Snate@binkert.org{ 1156145Snate@binkert.org _status = Idle; 1169858Snilay@cs.wisc.edu} 1176145Snate@binkert.org 1186145Snate@binkert.org 1197054Snate@binkert.orgAtomicSimpleCPU::~AtomicSimpleCPU() 1207054Snate@binkert.org{ 1216145Snate@binkert.org if (tickEvent.scheduled()) { 1229858Snilay@cs.wisc.edu deschedule(tickEvent); 1237054Snate@binkert.org } 1247054Snate@binkert.org} 1257054Snate@binkert.org 1267054Snate@binkert.orgunsigned int 1276145Snate@binkert.orgAtomicSimpleCPU::drain(DrainManager *dm) 1286145Snate@binkert.org{ 1296145Snate@binkert.org assert(!drain_manager); 1307054Snate@binkert.org if (switchedOut()) 1317054Snate@binkert.org return 0; 1326145Snate@binkert.org 1339858Snilay@cs.wisc.edu if (!isDrained()) { 1346145Snate@binkert.org DPRINTF(Drain, "Requesting drain: %s\n", pcState()); 1356145Snate@binkert.org drain_manager = dm; 1367054Snate@binkert.org return 1; 1377054Snate@binkert.org } else { 1386145Snate@binkert.org if (tickEvent.scheduled()) 1397054Snate@binkert.org deschedule(tickEvent); 1407054Snate@binkert.org 1416145Snate@binkert.org DPRINTF(Drain, "Not executing microcode, no need to drain.\n"); 1426145Snate@binkert.org return 0; 1437454Snate@binkert.org } 1447054Snate@binkert.org} 1456145Snate@binkert.org 1467054Snate@binkert.orgvoid 1476145Snate@binkert.orgAtomicSimpleCPU::drainResume() 1486145Snate@binkert.org{ 1497054Snate@binkert.org assert(!tickEvent.scheduled()); 1507054Snate@binkert.org assert(!drain_manager); 1516145Snate@binkert.org if (switchedOut()) 1529274Snilay@cs.wisc.edu return; 1539858Snilay@cs.wisc.edu 1549274Snilay@cs.wisc.edu DPRINTF(SimpleCPU, "Resume\n"); 1559858Snilay@cs.wisc.edu verifyMemoryMode(); 1566145Snate@binkert.org 1577054Snate@binkert.org assert(!threadContexts.empty()); 1587054Snate@binkert.org if (threadContexts.size() > 1) 1597054Snate@binkert.org fatal("The atomic CPU only supports one thread.\n"); 1606145Snate@binkert.org 1617054Snate@binkert.org if (thread->status() == ThreadContext::Active) { 1627054Snate@binkert.org schedule(tickEvent, nextCycle()); 1637054Snate@binkert.org _status = BaseSimpleCPU::Running; 1647054Snate@binkert.org notIdleFraction = 1; 1657054Snate@binkert.org } else { 1667054Snate@binkert.org _status = BaseSimpleCPU::Idle; 1676145Snate@binkert.org notIdleFraction = 0; 1687054Snate@binkert.org } 1697054Snate@binkert.org 1706145Snate@binkert.org system->totalNumInsts = 0; 1717054Snate@binkert.org} 1729274Snilay@cs.wisc.edu 1737054Snate@binkert.orgbool 1747054Snate@binkert.orgAtomicSimpleCPU::tryCompleteDrain() 1757054Snate@binkert.org{ 1767054Snate@binkert.org if (!drain_manager) 1777054Snate@binkert.org return false; 1789274Snilay@cs.wisc.edu 1797054Snate@binkert.org DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState()); 1807054Snate@binkert.org if (!isDrained()) 1817054Snate@binkert.org return false; 1827054Snate@binkert.org 1837054Snate@binkert.org DPRINTF(Drain, "CPU done draining, processing drain event\n"); 1846145Snate@binkert.org drain_manager->signalDrainDone(); 1857054Snate@binkert.org drain_manager = NULL; 1866145Snate@binkert.org 1877054Snate@binkert.org return true; 1887054Snate@binkert.org} 1897054Snate@binkert.org 1907054Snate@binkert.org 1917054Snate@binkert.orgvoid 1927054Snate@binkert.orgAtomicSimpleCPU::switchOut() 1937454Snate@binkert.org{ 1947054Snate@binkert.org BaseSimpleCPU::switchOut(); 1957054Snate@binkert.org 1967054Snate@binkert.org assert(!tickEvent.scheduled()); 1977454Snate@binkert.org assert(_status == BaseSimpleCPU::Running || _status == Idle); 1987454Snate@binkert.org assert(isDrained()); 1997054Snate@binkert.org} 2007054Snate@binkert.org 2017054Snate@binkert.org 2029274Snilay@cs.wisc.eduvoid 2037054Snate@binkert.orgAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 2049274Snilay@cs.wisc.edu{ 2057454Snate@binkert.org BaseSimpleCPU::takeOverFrom(oldCPU); 2067454Snate@binkert.org 2077454Snate@binkert.org // The tick event should have been descheduled by drain() 2087054Snate@binkert.org assert(!tickEvent.scheduled()); 2096145Snate@binkert.org 2106145Snate@binkert.org ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 2117054Snate@binkert.org data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2126145Snate@binkert.org data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2136145Snate@binkert.org} 2147054Snate@binkert.org 2157054Snate@binkert.orgvoid 2166145Snate@binkert.orgAtomicSimpleCPU::verifyMemoryMode() const 2179858Snilay@cs.wisc.edu{ 2187054Snate@binkert.org if (!system->isAtomicMode()) { 2197054Snate@binkert.org fatal("The atomic CPU requires the memory system to be in " 2207054Snate@binkert.org "'atomic' mode.\n"); 2216145Snate@binkert.org } 2226145Snate@binkert.org} 2236145Snate@binkert.org 2247054Snate@binkert.orgvoid 2257054Snate@binkert.orgAtomicSimpleCPU::activateContext(ThreadID thread_num) 2266145Snate@binkert.org{ 2277054Snate@binkert.org DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num); 2287054Snate@binkert.org 2296145Snate@binkert.org assert(thread_num == 0); 2309354Snilay@cs.wisc.edu assert(thread); 2319302Snilay@cs.wisc.edu 2329302Snilay@cs.wisc.edu assert(_status == Idle); 2339302Snilay@cs.wisc.edu assert(!tickEvent.scheduled()); 2349302Snilay@cs.wisc.edu 2359302Snilay@cs.wisc.edu notIdleFraction = 1; 2369302Snilay@cs.wisc.edu Cycles delta = ticksToCycles(thread->lastActivate - thread->lastSuspend); 2379302Snilay@cs.wisc.edu numCycles += delta; 2389302Snilay@cs.wisc.edu ppCycles->notify(delta); 2399302Snilay@cs.wisc.edu 2409302Snilay@cs.wisc.edu //Make sure ticks are still on multiples of cycles 2419302Snilay@cs.wisc.edu schedule(tickEvent, clockEdge(Cycles(0))); 2429302Snilay@cs.wisc.edu _status = BaseSimpleCPU::Running; 2439302Snilay@cs.wisc.edu} 2449302Snilay@cs.wisc.edu 2459302Snilay@cs.wisc.edu 2469302Snilay@cs.wisc.eduvoid 2479302Snilay@cs.wisc.eduAtomicSimpleCPU::suspendContext(ThreadID thread_num) 2489302Snilay@cs.wisc.edu{ 2499302Snilay@cs.wisc.edu DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 2509302Snilay@cs.wisc.edu 2519302Snilay@cs.wisc.edu assert(thread_num == 0); 2529302Snilay@cs.wisc.edu assert(thread); 2539274Snilay@cs.wisc.edu 2549274Snilay@cs.wisc.edu if (_status == Idle) 2559274Snilay@cs.wisc.edu return; 2569274Snilay@cs.wisc.edu 2579274Snilay@cs.wisc.edu assert(_status == BaseSimpleCPU::Running); 2589274Snilay@cs.wisc.edu 259 // tick event may not be scheduled if this gets called from inside 260 // an instruction's execution, e.g. "quiesce" 261 if (tickEvent.scheduled()) 262 deschedule(tickEvent); 263 264 notIdleFraction = 0; 265 _status = Idle; 266} 267 268 269Tick 270AtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt) 271{ 272 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 273 pkt->cmdString()); 274 275 // if snoop invalidates, release any associated locks 276 if (pkt->isInvalidate()) { 277 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 278 pkt->getAddr()); 279 TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask); 280 } 281 282 return 0; 283} 284 285void 286AtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt) 287{ 288 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 289 pkt->cmdString()); 290 291 // if snoop invalidates, release any associated locks 292 if (pkt->isInvalidate()) { 293 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 294 pkt->getAddr()); 295 TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask); 296 } 297} 298 299Fault 300AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, 301 unsigned size, unsigned flags) 302{ 303 // use the CPU's statically allocated read request and packet objects 304 Request *req = &data_read_req; 305 306 if (traceData) { 307 traceData->setAddr(addr); 308 } 309 310 //The size of the data we're trying to read. 311 int fullSize = size; 312 313 //The address of the second part of this access if it needs to be split 314 //across a cache line boundary. 315 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 316 317 if (secondAddr > addr) 318 size = secondAddr - addr; 319 320 dcache_latency = 0; 321 322 req->taskId(taskId()); 323 while (1) { 324 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 325 326 // translate to physical address 327 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read); 328 329 // Now do the access. 330 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 331 Packet pkt(req, MemCmd::ReadReq); 332 pkt.refineCommand(); 333 pkt.dataStatic(data); 334 335 if (req->isMmappedIpr()) 336 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 337 else { 338 if (fastmem && system->isMemAddr(pkt.getAddr())) 339 system->getPhysMem().access(&pkt); 340 else 341 dcache_latency += dcachePort.sendAtomic(&pkt); 342 } 343 dcache_access = true; 344 345 assert(!pkt.isError()); 346 347 if (req->isLLSC()) { 348 TheISA::handleLockedRead(thread, req); 349 } 350 } 351 352 //If there's a fault, return it 353 if (fault != NoFault) { 354 if (req->isPrefetch()) { 355 return NoFault; 356 } else { 357 return fault; 358 } 359 } 360 361 //If we don't need to access a second cache line, stop now. 362 if (secondAddr <= addr) 363 { 364 if (req->isLocked() && fault == NoFault) { 365 assert(!locked); 366 locked = true; 367 } 368 return fault; 369 } 370 371 /* 372 * Set up for accessing the second cache line. 373 */ 374 375 //Move the pointer we're reading into to the correct location. 376 data += size; 377 //Adjust the size to get the remaining bytes. 378 size = addr + fullSize - secondAddr; 379 //And access the right address. 380 addr = secondAddr; 381 } 382} 383 384 385Fault 386AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, 387 Addr addr, unsigned flags, uint64_t *res) 388{ 389 390 static uint8_t zero_array[64] = {}; 391 392 if (data == NULL) { 393 assert(size <= 64); 394 assert(flags & Request::CACHE_BLOCK_ZERO); 395 // This must be a cache block cleaning request 396 data = zero_array; 397 } 398 399 // use the CPU's statically allocated write request and packet objects 400 Request *req = &data_write_req; 401 402 if (traceData) { 403 traceData->setAddr(addr); 404 } 405 406 //The size of the data we're trying to read. 407 int fullSize = size; 408 409 //The address of the second part of this access if it needs to be split 410 //across a cache line boundary. 411 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 412 413 if(secondAddr > addr) 414 size = secondAddr - addr; 415 416 dcache_latency = 0; 417 418 req->taskId(taskId()); 419 while(1) { 420 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 421 422 // translate to physical address 423 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write); 424 425 // Now do the access. 426 if (fault == NoFault) { 427 MemCmd cmd = MemCmd::WriteReq; // default 428 bool do_access = true; // flag to suppress cache access 429 430 if (req->isLLSC()) { 431 cmd = MemCmd::StoreCondReq; 432 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); 433 } else if (req->isSwap()) { 434 cmd = MemCmd::SwapReq; 435 if (req->isCondSwap()) { 436 assert(res); 437 req->setExtraData(*res); 438 } 439 } 440 441 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 442 Packet pkt = Packet(req, cmd); 443 pkt.dataStatic(data); 444 445 if (req->isMmappedIpr()) { 446 dcache_latency += 447 TheISA::handleIprWrite(thread->getTC(), &pkt); 448 } else { 449 if (fastmem && system->isMemAddr(pkt.getAddr())) 450 system->getPhysMem().access(&pkt); 451 else 452 dcache_latency += dcachePort.sendAtomic(&pkt); 453 } 454 dcache_access = true; 455 assert(!pkt.isError()); 456 457 if (req->isSwap()) { 458 assert(res); 459 memcpy(res, pkt.getPtr<uint8_t>(), fullSize); 460 } 461 } 462 463 if (res && !req->isSwap()) { 464 *res = req->getExtraData(); 465 } 466 } 467 468 //If there's a fault or we don't need to access a second cache line, 469 //stop now. 470 if (fault != NoFault || secondAddr <= addr) 471 { 472 if (req->isLocked() && fault == NoFault) { 473 assert(locked); 474 locked = false; 475 } 476 if (fault != NoFault && req->isPrefetch()) { 477 return NoFault; 478 } else { 479 return fault; 480 } 481 } 482 483 /* 484 * Set up for accessing the second cache line. 485 */ 486 487 //Move the pointer we're reading into to the correct location. 488 data += size; 489 //Adjust the size to get the remaining bytes. 490 size = addr + fullSize - secondAddr; 491 //And access the right address. 492 addr = secondAddr; 493 } 494} 495 496 497void 498AtomicSimpleCPU::tick() 499{ 500 DPRINTF(SimpleCPU, "Tick\n"); 501 502 Tick latency = 0; 503 504 for (int i = 0; i < width || locked; ++i) { 505 numCycles++; 506 ppCycles->notify(1); 507 508 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 509 checkForInterrupts(); 510 511 checkPcEventQueue(); 512 // We must have just got suspended by a PC event 513 if (_status == Idle) { 514 tryCompleteDrain(); 515 return; 516 } 517 518 Fault fault = NoFault; 519 520 TheISA::PCState pcState = thread->pcState(); 521 522 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 523 !curMacroStaticInst; 524 if (needToFetch) { 525 ifetch_req.taskId(taskId()); 526 setupFetchRequest(&ifetch_req); 527 fault = thread->itb->translateAtomic(&ifetch_req, tc, 528 BaseTLB::Execute); 529 } 530 531 if (fault == NoFault) { 532 Tick icache_latency = 0; 533 bool icache_access = false; 534 dcache_access = false; // assume no dcache access 535 536 if (needToFetch) { 537 // This is commented out because the decoder would act like 538 // a tiny cache otherwise. It wouldn't be flushed when needed 539 // like the I cache. It should be flushed, and when that works 540 // this code should be uncommented. 541 //Fetch more instruction memory if necessary 542 //if(decoder.needMoreBytes()) 543 //{ 544 icache_access = true; 545 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq); 546 ifetch_pkt.dataStatic(&inst); 547 548 if (fastmem && system->isMemAddr(ifetch_pkt.getAddr())) 549 system->getPhysMem().access(&ifetch_pkt); 550 else 551 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 552 553 assert(!ifetch_pkt.isError()); 554 555 // ifetch_req is initialized to read the instruction directly 556 // into the CPU object's inst field. 557 //} 558 } 559 560 preExecute(); 561 562 if (curStaticInst) { 563 fault = curStaticInst->execute(this, traceData); 564 565 // keep an instruction count 566 if (fault == NoFault) { 567 countInst(); 568 if (!curStaticInst->isMicroop() || 569 curStaticInst->isLastMicroop()) { 570 ppCommit->notify(std::make_pair(thread, curStaticInst)); 571 } 572 } 573 else if (traceData && !DTRACE(ExecFaulting)) { 574 delete traceData; 575 traceData = NULL; 576 } 577 578 postExecute(); 579 } 580 581 // @todo remove me after debugging with legion done 582 if (curStaticInst && (!curStaticInst->isMicroop() || 583 curStaticInst->isFirstMicroop())) 584 instCnt++; 585 586 Tick stall_ticks = 0; 587 if (simulate_inst_stalls && icache_access) 588 stall_ticks += icache_latency; 589 590 if (simulate_data_stalls && dcache_access) 591 stall_ticks += dcache_latency; 592 593 if (stall_ticks) { 594 // the atomic cpu does its accounting in ticks, so 595 // keep counting in ticks but round to the clock 596 // period 597 latency += divCeil(stall_ticks, clockPeriod()) * 598 clockPeriod(); 599 } 600 601 } 602 if(fault != NoFault || !stayAtPC) 603 advancePC(fault); 604 } 605 606 if (tryCompleteDrain()) 607 return; 608 609 // instruction takes at least one cycle 610 if (latency < clockPeriod()) 611 latency = clockPeriod(); 612 613 if (_status != Idle) 614 schedule(tickEvent, curTick() + latency); 615} 616 617void 618AtomicSimpleCPU::regProbePoints() 619{ 620 BaseCPU::regProbePoints(); 621 622 ppCommit = new ProbePointArg<pair<SimpleThread*, const StaticInstPtr>> 623 (getProbeManager(), "Commit"); 624} 625 626void 627AtomicSimpleCPU::printAddr(Addr a) 628{ 629 dcachePort.printAddr(a); 630} 631 632//////////////////////////////////////////////////////////////////////// 633// 634// AtomicSimpleCPU Simulation Object 635// 636AtomicSimpleCPU * 637AtomicSimpleCPUParams::create() 638{ 639 numThreads = 1; 640 if (!FullSystem && workload.size() != 1) 641 panic("only one workload allowed"); 642 return new AtomicSimpleCPU(this); 643} 644