atomic.cc revision 7823
11060SN/A/* 22702SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan 31060SN/A * All rights reserved. 41060SN/A * 51060SN/A * Redistribution and use in source and binary forms, with or without 61060SN/A * modification, are permitted provided that the following conditions are 71060SN/A * met: redistributions of source code must retain the above copyright 81060SN/A * notice, this list of conditions and the following disclaimer; 91060SN/A * redistributions in binary form must reproduce the above copyright 101060SN/A * notice, this list of conditions and the following disclaimer in the 111060SN/A * documentation and/or other materials provided with the distribution; 121060SN/A * neither the name of the copyright holders nor the names of its 131060SN/A * contributors may be used to endorse or promote products derived from 141060SN/A * this software without specific prior written permission. 151060SN/A * 161060SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 171060SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 181060SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 191060SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 201060SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 211060SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 221060SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 231060SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 241060SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 251060SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 261060SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 272665SN/A * 282665SN/A * Authors: Steve Reinhardt 291060SN/A */ 301060SN/A 311060SN/A#include "arch/locked_mem.hh" 322292SN/A#include "arch/mmaped_ipr.hh" 331060SN/A#include "arch/utility.hh" 341060SN/A#include "base/bigint.hh" 351060SN/A#include "config/the_isa.hh" 361060SN/A#include "cpu/exetrace.hh" 371061SN/A#include "cpu/simple/atomic.hh" 381060SN/A#include "mem/packet.hh" 392980Sgblack@eecs.umich.edu#include "mem/packet_access.hh" 401060SN/A#include "params/AtomicSimpleCPU.hh" 412669SN/A#include "sim/faults.hh" 421060SN/A#include "sim/system.hh" 431060SN/A 441060SN/Ausing namespace std; 451060SN/Ausing namespace TheISA; 461060SN/A 471060SN/AAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 481060SN/A : Event(CPU_Tick_Pri), cpu(c) 491060SN/A{ 501060SN/A} 511060SN/A 522292SN/A 532292SN/Avoid 541060SN/AAtomicSimpleCPU::TickEvent::process() 552292SN/A{ 561060SN/A cpu->tick(); 571060SN/A} 582292SN/A 592292SN/Aconst char * 602292SN/AAtomicSimpleCPU::TickEvent::description() const 611060SN/A{ 621060SN/A return "AtomicSimpleCPU tick"; 631060SN/A} 641061SN/A 652980Sgblack@eecs.umich.eduPort * 661060SN/AAtomicSimpleCPU::getPort(const string &if_name, int idx) 672733SN/A{ 682733SN/A if (if_name == "dcache_port") 691060SN/A return &dcachePort; 701464SN/A else if (if_name == "icache_port") 711061SN/A return &icachePort; 721464SN/A else if (if_name == "physmem_port") { 732980Sgblack@eecs.umich.edu hasPhysMemPort = true; 742980Sgblack@eecs.umich.edu return &physmemPort; 751464SN/A } 761464SN/A else 771464SN/A panic("No Such Port\n"); 781464SN/A} 791464SN/A 801464SN/Avoid 812107SN/AAtomicSimpleCPU::init() 821464SN/A{ 831464SN/A BaseCPU::init(); 842292SN/A#if FULL_SYSTEM 851464SN/A ThreadID size = threadContexts.size(); 861464SN/A for (ThreadID i = 0; i < size; ++i) { 871464SN/A ThreadContext *tc = threadContexts[i]; 881464SN/A 891464SN/A // initialize CPU, including PC 901464SN/A TheISA::initCPU(tc, tc->contextId()); 911464SN/A } 922292SN/A#endif 932678SN/A if (hasPhysMemPort) { 942669SN/A bool snoop = false; 952669SN/A AddrRangeList pmAddrList; 961060SN/A physmemPort.getPeerAddressRanges(pmAddrList, snoop); 971060SN/A physMemAddr = *pmAddrList.begin(); 981060SN/A } 992702SN/A // Atomic doesn't do MT right now, so contextId == threadId 1002702SN/A ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 1012731SN/A data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1022731SN/A data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1031464SN/A} 1042292SN/A 1052731SN/Abool 1062292SN/AAtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt) 1072292SN/A{ 1082292SN/A panic("AtomicSimpleCPU doesn't expect recvTiming callback!"); 1091060SN/A return true; 1101060SN/A} 1111464SN/A 1121060SN/ATick 1131060SN/AAtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 1141060SN/A{ 1152698SN/A //Snooping a coherence request, just return 1162292SN/A return 0; 1171060SN/A} 1181060SN/A 1191060SN/Avoid 1202292SN/AAtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 1212292SN/A{ 1222292SN/A //No internal storage to update, just return 1232292SN/A return; 1242292SN/A} 1252292SN/A 1262292SN/Avoid 1272292SN/AAtomicSimpleCPU::CpuPort::recvStatusChange(Status status) 1282292SN/A{ 1292292SN/A if (status == RangeChange) { 1302292SN/A if (!snoopRangeSent) { 1312292SN/A snoopRangeSent = true; 1322292SN/A sendStatusChange(Port::RangeChange); 1332292SN/A } 1341060SN/A return; 1351060SN/A } 1361061SN/A 1371060SN/A panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!"); 1381060SN/A} 1392292SN/A 1402678SN/Avoid 1412678SN/AAtomicSimpleCPU::CpuPort::recvRetry() 1422678SN/A{ 1432678SN/A panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 1442678SN/A} 1452292SN/A 1462292SN/Avoid 1472292SN/AAtomicSimpleCPU::DcachePort::setPeer(Port *port) 1482292SN/A{ 1492292SN/A Port::setPeer(port); 1502292SN/A 1512348SN/A#if FULL_SYSTEM 1522348SN/A // Update the ThreadContext's memory ports (Functional/Virtual 1531060SN/A // Ports) 1542292SN/A cpu->tcBase()->connectMemPorts(cpu->tcBase()); 1552292SN/A#endif 1562292SN/A} 1572292SN/A 1582292SN/AAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 1592292SN/A : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 1601060SN/A simulate_data_stalls(p->simulate_data_stalls), 1611464SN/A simulate_inst_stalls(p->simulate_inst_stalls), 1622292SN/A icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this), 1632292SN/A physmemPort(name() + "-iport", this), hasPhysMemPort(false) 1642292SN/A{ 1652292SN/A _status = Idle; 1662292SN/A 1672292SN/A icachePort.snoopRangeSent = false; 1682292SN/A dcachePort.snoopRangeSent = false; 1692292SN/A 1702292SN/A} 1712292SN/A 1722292SN/A 1732292SN/AAtomicSimpleCPU::~AtomicSimpleCPU() 1742292SN/A{ 1752292SN/A if (tickEvent.scheduled()) { 1762292SN/A deschedule(tickEvent); 1772292SN/A } 1781061SN/A} 1791060SN/A 1801060SN/Avoid 1811060SN/AAtomicSimpleCPU::serialize(ostream &os) 1821060SN/A{ 1831060SN/A SimObject::State so_state = SimObject::getState(); 1841060SN/A SERIALIZE_ENUM(so_state); 1852669SN/A SERIALIZE_SCALAR(locked); 1861060SN/A BaseSimpleCPU::serialize(os); 1872292SN/A nameOut(os, csprintf("%s.tickEvent", name())); 1881060SN/A tickEvent.serialize(os); 1891060SN/A} 1901060SN/A 1912090SN/Avoid 1921060SN/AAtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 1931060SN/A{ 1942292SN/A SimObject::State so_state; 1951060SN/A UNSERIALIZE_ENUM(so_state); 1962090SN/A UNSERIALIZE_SCALAR(locked); 1971060SN/A BaseSimpleCPU::unserialize(cp, section); 1981060SN/A tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 1991060SN/A} 2001060SN/A 2011060SN/Avoid 2021060SN/AAtomicSimpleCPU::resume() 2031060SN/A{ 2041060SN/A if (_status == Idle || _status == SwitchedOut) 2051060SN/A return; 2061060SN/A 2071060SN/A DPRINTF(SimpleCPU, "Resume\n"); 2081060SN/A assert(system->getMemoryMode() == Enums::atomic); 2091060SN/A 2101060SN/A changeState(SimObject::Running); 2111060SN/A if (thread->status() == ThreadContext::Active) { 2121060SN/A if (!tickEvent.scheduled()) 2132669SN/A schedule(tickEvent, nextCycle()); 2141060SN/A } 2151060SN/A} 2161061SN/A 2171060SN/Avoid 2181060SN/AAtomicSimpleCPU::switchOut() 2191060SN/A{ 2202702SN/A assert(_status == Running || _status == Idle); 2211060SN/A _status = SwitchedOut; 2221060SN/A 2231060SN/A tickEvent.squash(); 2241060SN/A} 2251060SN/A 2261061SN/A 2272132SN/Avoid 2281060SN/AAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 2291060SN/A{ 2302702SN/A BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 2312669SN/A 2321060SN/A assert(!tickEvent.scheduled()); 2331060SN/A 2341060SN/A // if any of this CPU's ThreadContexts are active, mark the CPU as 2351060SN/A // running and schedule its tick event. 2361060SN/A ThreadID size = threadContexts.size(); 2371061SN/A for (ThreadID i = 0; i < size; ++i) { 2382132SN/A ThreadContext *tc = threadContexts[i]; 2391060SN/A if (tc->status() == ThreadContext::Active && _status != Running) { 2401060SN/A _status = Running; 2412702SN/A schedule(tickEvent, nextCycle()); 2422669SN/A break; 2431060SN/A } 2441060SN/A } 2451061SN/A if (_status != Running) { 2461060SN/A _status = Idle; 2471060SN/A } 2481060SN/A assert(threadContexts.size() == 1); 2491060SN/A ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 2502980Sgblack@eecs.umich.edu data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2511060SN/A data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 2521060SN/A} 2531060SN/A 2541061SN/A 2551060SN/Avoid 2561060SN/AAtomicSimpleCPU::activateContext(int thread_num, int delay) 2571060SN/A{ 2581060SN/A DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 2591060SN/A 2601060SN/A assert(thread_num == 0); 2611060SN/A assert(thread); 2621060SN/A 2631060SN/A assert(_status == Idle); 2641060SN/A assert(!tickEvent.scheduled()); 2651464SN/A 2662292SN/A notIdleFraction++; 2672292SN/A numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend); 2682292SN/A 2692292SN/A //Make sure ticks are still on multiples of cycles 2702731SN/A schedule(tickEvent, nextCycle(curTick() + ticks(delay))); 2712292SN/A _status = Running; 2722292SN/A} 2732292SN/A 2742292SN/A 2752292SN/Avoid 2762292SN/AAtomicSimpleCPU::suspendContext(int thread_num) 2772292SN/A{ 2782292SN/A DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 2792292SN/A 2802731SN/A assert(thread_num == 0); 2812292SN/A assert(thread); 2822292SN/A 2832292SN/A if (_status == Idle) 2841464SN/A return; 2851464SN/A 2861464SN/A assert(_status == Running); 2871464SN/A 2881464SN/A // tick event may not be scheduled if this gets called from inside 2891464SN/A // an instruction's execution, e.g. "quiesce" 2901464SN/A if (tickEvent.scheduled()) 2912292SN/A deschedule(tickEvent); 2921464SN/A 2931464SN/A notIdleFraction--; 2941464SN/A _status = Idle; 2951464SN/A} 2961464SN/A 2971464SN/A 298Fault 299AtomicSimpleCPU::readBytes(Addr addr, uint8_t * data, 300 unsigned size, unsigned flags) 301{ 302 // use the CPU's statically allocated read request and packet objects 303 Request *req = &data_read_req; 304 305 if (traceData) { 306 traceData->setAddr(addr); 307 } 308 309 //The block size of our peer. 310 unsigned blockSize = dcachePort.peerBlockSize(); 311 //The size of the data we're trying to read. 312 int fullSize = size; 313 314 //The address of the second part of this access if it needs to be split 315 //across a cache line boundary. 316 Addr secondAddr = roundDown(addr + size - 1, blockSize); 317 318 if (secondAddr > addr) 319 size = secondAddr - addr; 320 321 dcache_latency = 0; 322 323 while (1) { 324 req->setVirt(0, addr, size, flags, thread->pcState().instAddr()); 325 326 // translate to physical address 327 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read); 328 329 // Now do the access. 330 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 331 Packet pkt = Packet(req, 332 req->isLLSC() ? MemCmd::LoadLockedReq : MemCmd::ReadReq, 333 Packet::Broadcast); 334 pkt.dataStatic(data); 335 336 if (req->isMmapedIpr()) 337 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 338 else { 339 if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 340 dcache_latency += physmemPort.sendAtomic(&pkt); 341 else 342 dcache_latency += dcachePort.sendAtomic(&pkt); 343 } 344 dcache_access = true; 345 346 assert(!pkt.isError()); 347 348 if (req->isLLSC()) { 349 TheISA::handleLockedRead(thread, req); 350 } 351 } 352 353 //If there's a fault, return it 354 if (fault != NoFault) { 355 if (req->isPrefetch()) { 356 return NoFault; 357 } else { 358 return fault; 359 } 360 } 361 362 //If we don't need to access a second cache line, stop now. 363 if (secondAddr <= addr) 364 { 365 if (req->isLocked() && fault == NoFault) { 366 assert(!locked); 367 locked = true; 368 } 369 return fault; 370 } 371 372 /* 373 * Set up for accessing the second cache line. 374 */ 375 376 //Move the pointer we're reading into to the correct location. 377 data += size; 378 //Adjust the size to get the remaining bytes. 379 size = addr + fullSize - secondAddr; 380 //And access the right address. 381 addr = secondAddr; 382 } 383} 384 385 386template <class T> 387Fault 388AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags) 389{ 390 uint8_t *dataPtr = (uint8_t *)&data; 391 memset(dataPtr, 0, sizeof(data)); 392 Fault fault = readBytes(addr, dataPtr, sizeof(data), flags); 393 if (fault == NoFault) { 394 data = gtoh(data); 395 if (traceData) 396 traceData->setData(data); 397 } 398 return fault; 399} 400 401#ifndef DOXYGEN_SHOULD_SKIP_THIS 402 403template 404Fault 405AtomicSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags); 406 407template 408Fault 409AtomicSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags); 410 411template 412Fault 413AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 414 415template 416Fault 417AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 418 419template 420Fault 421AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 422 423template 424Fault 425AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 426 427#endif //DOXYGEN_SHOULD_SKIP_THIS 428 429template<> 430Fault 431AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags) 432{ 433 return read(addr, *(uint64_t*)&data, flags); 434} 435 436template<> 437Fault 438AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags) 439{ 440 return read(addr, *(uint32_t*)&data, flags); 441} 442 443 444template<> 445Fault 446AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 447{ 448 return read(addr, (uint32_t&)data, flags); 449} 450 451 452Fault 453AtomicSimpleCPU::writeBytes(uint8_t *data, unsigned size, 454 Addr addr, unsigned flags, uint64_t *res) 455{ 456 // use the CPU's statically allocated write request and packet objects 457 Request *req = &data_write_req; 458 459 if (traceData) { 460 traceData->setAddr(addr); 461 } 462 463 //The block size of our peer. 464 unsigned blockSize = dcachePort.peerBlockSize(); 465 //The size of the data we're trying to read. 466 int fullSize = size; 467 468 //The address of the second part of this access if it needs to be split 469 //across a cache line boundary. 470 Addr secondAddr = roundDown(addr + size - 1, blockSize); 471 472 if(secondAddr > addr) 473 size = secondAddr - addr; 474 475 dcache_latency = 0; 476 477 while(1) { 478 req->setVirt(0, addr, size, flags, thread->pcState().instAddr()); 479 480 // translate to physical address 481 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write); 482 483 // Now do the access. 484 if (fault == NoFault) { 485 MemCmd cmd = MemCmd::WriteReq; // default 486 bool do_access = true; // flag to suppress cache access 487 488 if (req->isLLSC()) { 489 cmd = MemCmd::StoreCondReq; 490 do_access = TheISA::handleLockedWrite(thread, req); 491 } else if (req->isSwap()) { 492 cmd = MemCmd::SwapReq; 493 if (req->isCondSwap()) { 494 assert(res); 495 req->setExtraData(*res); 496 } 497 } 498 499 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 500 Packet pkt = Packet(req, cmd, Packet::Broadcast); 501 pkt.dataStatic(data); 502 503 if (req->isMmapedIpr()) { 504 dcache_latency += 505 TheISA::handleIprWrite(thread->getTC(), &pkt); 506 } else { 507 if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 508 dcache_latency += physmemPort.sendAtomic(&pkt); 509 else 510 dcache_latency += dcachePort.sendAtomic(&pkt); 511 } 512 dcache_access = true; 513 assert(!pkt.isError()); 514 515 if (req->isSwap()) { 516 assert(res); 517 memcpy(res, pkt.getPtr<uint8_t>(), fullSize); 518 } 519 } 520 521 if (res && !req->isSwap()) { 522 *res = req->getExtraData(); 523 } 524 } 525 526 //If there's a fault or we don't need to access a second cache line, 527 //stop now. 528 if (fault != NoFault || secondAddr <= addr) 529 { 530 if (req->isLocked() && fault == NoFault) { 531 assert(locked); 532 locked = false; 533 } 534 if (fault != NoFault && req->isPrefetch()) { 535 return NoFault; 536 } else { 537 return fault; 538 } 539 } 540 541 /* 542 * Set up for accessing the second cache line. 543 */ 544 545 //Move the pointer we're reading into to the correct location. 546 data += size; 547 //Adjust the size to get the remaining bytes. 548 size = addr + fullSize - secondAddr; 549 //And access the right address. 550 addr = secondAddr; 551 } 552} 553 554 555template <class T> 556Fault 557AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 558{ 559 uint8_t *dataPtr = (uint8_t *)&data; 560 if (traceData) 561 traceData->setData(data); 562 data = htog(data); 563 564 Fault fault = writeBytes(dataPtr, sizeof(data), addr, flags, res); 565 if (fault == NoFault && data_write_req.isSwap()) { 566 *res = gtoh((T)*res); 567 } 568 return fault; 569} 570 571 572#ifndef DOXYGEN_SHOULD_SKIP_THIS 573 574template 575Fault 576AtomicSimpleCPU::write(Twin32_t data, Addr addr, 577 unsigned flags, uint64_t *res); 578 579template 580Fault 581AtomicSimpleCPU::write(Twin64_t data, Addr addr, 582 unsigned flags, uint64_t *res); 583 584template 585Fault 586AtomicSimpleCPU::write(uint64_t data, Addr addr, 587 unsigned flags, uint64_t *res); 588 589template 590Fault 591AtomicSimpleCPU::write(uint32_t data, Addr addr, 592 unsigned flags, uint64_t *res); 593 594template 595Fault 596AtomicSimpleCPU::write(uint16_t data, Addr addr, 597 unsigned flags, uint64_t *res); 598 599template 600Fault 601AtomicSimpleCPU::write(uint8_t data, Addr addr, 602 unsigned flags, uint64_t *res); 603 604#endif //DOXYGEN_SHOULD_SKIP_THIS 605 606template<> 607Fault 608AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 609{ 610 return write(*(uint64_t*)&data, addr, flags, res); 611} 612 613template<> 614Fault 615AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 616{ 617 return write(*(uint32_t*)&data, addr, flags, res); 618} 619 620 621template<> 622Fault 623AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 624{ 625 return write((uint32_t)data, addr, flags, res); 626} 627 628 629void 630AtomicSimpleCPU::tick() 631{ 632 DPRINTF(SimpleCPU, "Tick\n"); 633 634 Tick latency = 0; 635 636 for (int i = 0; i < width || locked; ++i) { 637 numCycles++; 638 639 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 640 checkForInterrupts(); 641 642 checkPcEventQueue(); 643 644 Fault fault = NoFault; 645 646 TheISA::PCState pcState = thread->pcState(); 647 648 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 649 !curMacroStaticInst; 650 if (needToFetch) { 651 setupFetchRequest(&ifetch_req); 652 fault = thread->itb->translateAtomic(&ifetch_req, tc, 653 BaseTLB::Execute); 654 } 655 656 if (fault == NoFault) { 657 Tick icache_latency = 0; 658 bool icache_access = false; 659 dcache_access = false; // assume no dcache access 660 661 if (needToFetch) { 662 // This is commented out because the predecoder would act like 663 // a tiny cache otherwise. It wouldn't be flushed when needed 664 // like the I cache. It should be flushed, and when that works 665 // this code should be uncommented. 666 //Fetch more instruction memory if necessary 667 //if(predecoder.needMoreBytes()) 668 //{ 669 icache_access = true; 670 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq, 671 Packet::Broadcast); 672 ifetch_pkt.dataStatic(&inst); 673 674 if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr) 675 icache_latency = physmemPort.sendAtomic(&ifetch_pkt); 676 else 677 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 678 679 assert(!ifetch_pkt.isError()); 680 681 // ifetch_req is initialized to read the instruction directly 682 // into the CPU object's inst field. 683 //} 684 } 685 686 preExecute(); 687 688 if (curStaticInst) { 689 fault = curStaticInst->execute(this, traceData); 690 691 // keep an instruction count 692 if (fault == NoFault) 693 countInst(); 694 else if (traceData && !DTRACE(ExecFaulting)) { 695 delete traceData; 696 traceData = NULL; 697 } 698 699 postExecute(); 700 } 701 702 // @todo remove me after debugging with legion done 703 if (curStaticInst && (!curStaticInst->isMicroop() || 704 curStaticInst->isFirstMicroop())) 705 instCnt++; 706 707 Tick stall_ticks = 0; 708 if (simulate_inst_stalls && icache_access) 709 stall_ticks += icache_latency; 710 711 if (simulate_data_stalls && dcache_access) 712 stall_ticks += dcache_latency; 713 714 if (stall_ticks) { 715 Tick stall_cycles = stall_ticks / ticks(1); 716 Tick aligned_stall_ticks = ticks(stall_cycles); 717 718 if (aligned_stall_ticks < stall_ticks) 719 aligned_stall_ticks += 1; 720 721 latency += aligned_stall_ticks; 722 } 723 724 } 725 if(fault != NoFault || !stayAtPC) 726 advancePC(fault); 727 } 728 729 // instruction takes at least one cycle 730 if (latency < ticks(1)) 731 latency = ticks(1); 732 733 if (_status != Idle) 734 schedule(tickEvent, curTick() + latency); 735} 736 737 738void 739AtomicSimpleCPU::printAddr(Addr a) 740{ 741 dcachePort.printAddr(a); 742} 743 744 745//////////////////////////////////////////////////////////////////////// 746// 747// AtomicSimpleCPU Simulation Object 748// 749AtomicSimpleCPU * 750AtomicSimpleCPUParams::create() 751{ 752 numThreads = 1; 753#if !FULL_SYSTEM 754 if (workload.size() != 1) 755 panic("only one workload allowed"); 756#endif 757 return new AtomicSimpleCPU(this); 758} 759