atomic.cc revision 13652
1/* 2 * Copyright 2014 Google, Inc. 3 * Copyright (c) 2012-2013,2015,2017-2018 ARM Limited 4 * All rights reserved. 5 * 6 * The license below extends only to copyright in the software and shall 7 * not be construed as granting a license to any other intellectual 8 * property including but not limited to intellectual property relating 9 * to a hardware implementation of the functionality of the software 10 * licensed hereunder. You may use the software subject to the license 11 * terms below provided that you ensure that this notice is replicated 12 * unmodified and in its entirety in all distributions of the software, 13 * modified or unmodified, in source code or in binary form. 14 * 15 * Copyright (c) 2002-2005 The Regents of The University of Michigan 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Steve Reinhardt 42 */ 43 44#include "cpu/simple/atomic.hh" 45 46#include "arch/locked_mem.hh" 47#include "arch/mmapped_ipr.hh" 48#include "arch/utility.hh" 49#include "base/output.hh" 50#include "config/the_isa.hh" 51#include "cpu/exetrace.hh" 52#include "debug/Drain.hh" 53#include "debug/ExecFaulting.hh" 54#include "debug/SimpleCPU.hh" 55#include "mem/packet.hh" 56#include "mem/packet_access.hh" 57#include "mem/physical.hh" 58#include "params/AtomicSimpleCPU.hh" 59#include "sim/faults.hh" 60#include "sim/full_system.hh" 61#include "sim/system.hh" 62 63using namespace std; 64using namespace TheISA; 65 66void 67AtomicSimpleCPU::init() 68{ 69 BaseSimpleCPU::init(); 70 71 int cid = threadContexts[0]->contextId(); 72 ifetch_req->setContext(cid); 73 data_read_req->setContext(cid); 74 data_write_req->setContext(cid); 75 data_amo_req->setContext(cid); 76} 77 78AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 79 : BaseSimpleCPU(p), 80 tickEvent([this]{ tick(); }, "AtomicSimpleCPU tick", 81 false, Event::CPU_Tick_Pri), 82 width(p->width), locked(false), 83 simulate_data_stalls(p->simulate_data_stalls), 84 simulate_inst_stalls(p->simulate_inst_stalls), 85 icachePort(name() + ".icache_port", this), 86 dcachePort(name() + ".dcache_port", this), 87 dcache_access(false), dcache_latency(0), 88 ppCommit(nullptr) 89{ 90 _status = Idle; 91 ifetch_req = std::make_shared<Request>(); 92 data_read_req = std::make_shared<Request>(); 93 data_write_req = std::make_shared<Request>(); 94 data_amo_req = std::make_shared<Request>(); 95} 96 97 98AtomicSimpleCPU::~AtomicSimpleCPU() 99{ 100 if (tickEvent.scheduled()) { 101 deschedule(tickEvent); 102 } 103} 104 105DrainState 106AtomicSimpleCPU::drain() 107{ 108 // Deschedule any power gating event (if any) 109 deschedulePowerGatingEvent(); 110 111 if (switchedOut()) 112 return DrainState::Drained; 113 114 if (!isDrained()) { 115 DPRINTF(Drain, "Requesting drain.\n"); 116 return DrainState::Draining; 117 } else { 118 if (tickEvent.scheduled()) 119 deschedule(tickEvent); 120 121 activeThreads.clear(); 122 DPRINTF(Drain, "Not executing microcode, no need to drain.\n"); 123 return DrainState::Drained; 124 } 125} 126 127void 128AtomicSimpleCPU::threadSnoop(PacketPtr pkt, ThreadID sender) 129{ 130 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 131 pkt->cmdString()); 132 133 for (ThreadID tid = 0; tid < numThreads; tid++) { 134 if (tid != sender) { 135 if (getCpuAddrMonitor(tid)->doMonitor(pkt)) { 136 wakeup(tid); 137 } 138 139 TheISA::handleLockedSnoop(threadInfo[tid]->thread, 140 pkt, dcachePort.cacheBlockMask); 141 } 142 } 143} 144 145void 146AtomicSimpleCPU::drainResume() 147{ 148 assert(!tickEvent.scheduled()); 149 if (switchedOut()) 150 return; 151 152 DPRINTF(SimpleCPU, "Resume\n"); 153 verifyMemoryMode(); 154 155 assert(!threadContexts.empty()); 156 157 _status = BaseSimpleCPU::Idle; 158 159 for (ThreadID tid = 0; tid < numThreads; tid++) { 160 if (threadInfo[tid]->thread->status() == ThreadContext::Active) { 161 threadInfo[tid]->notIdleFraction = 1; 162 activeThreads.push_back(tid); 163 _status = BaseSimpleCPU::Running; 164 165 // Tick if any threads active 166 if (!tickEvent.scheduled()) { 167 schedule(tickEvent, nextCycle()); 168 } 169 } else { 170 threadInfo[tid]->notIdleFraction = 0; 171 } 172 } 173 174 // Reschedule any power gating event (if any) 175 schedulePowerGatingEvent(); 176} 177 178bool 179AtomicSimpleCPU::tryCompleteDrain() 180{ 181 if (drainState() != DrainState::Draining) 182 return false; 183 184 DPRINTF(Drain, "tryCompleteDrain.\n"); 185 if (!isDrained()) 186 return false; 187 188 DPRINTF(Drain, "CPU done draining, processing drain event\n"); 189 signalDrainDone(); 190 191 return true; 192} 193 194 195void 196AtomicSimpleCPU::switchOut() 197{ 198 BaseSimpleCPU::switchOut(); 199 200 assert(!tickEvent.scheduled()); 201 assert(_status == BaseSimpleCPU::Running || _status == Idle); 202 assert(isDrained()); 203} 204 205 206void 207AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 208{ 209 BaseSimpleCPU::takeOverFrom(oldCPU); 210 211 // The tick event should have been descheduled by drain() 212 assert(!tickEvent.scheduled()); 213} 214 215void 216AtomicSimpleCPU::verifyMemoryMode() const 217{ 218 if (!system->isAtomicMode()) { 219 fatal("The atomic CPU requires the memory system to be in " 220 "'atomic' mode.\n"); 221 } 222} 223 224void 225AtomicSimpleCPU::activateContext(ThreadID thread_num) 226{ 227 DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num); 228 229 assert(thread_num < numThreads); 230 231 threadInfo[thread_num]->notIdleFraction = 1; 232 Cycles delta = ticksToCycles(threadInfo[thread_num]->thread->lastActivate - 233 threadInfo[thread_num]->thread->lastSuspend); 234 numCycles += delta; 235 236 if (!tickEvent.scheduled()) { 237 //Make sure ticks are still on multiples of cycles 238 schedule(tickEvent, clockEdge(Cycles(0))); 239 } 240 _status = BaseSimpleCPU::Running; 241 if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) 242 == activeThreads.end()) { 243 activeThreads.push_back(thread_num); 244 } 245 246 BaseCPU::activateContext(thread_num); 247} 248 249 250void 251AtomicSimpleCPU::suspendContext(ThreadID thread_num) 252{ 253 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 254 255 assert(thread_num < numThreads); 256 activeThreads.remove(thread_num); 257 258 if (_status == Idle) 259 return; 260 261 assert(_status == BaseSimpleCPU::Running); 262 263 threadInfo[thread_num]->notIdleFraction = 0; 264 265 if (activeThreads.empty()) { 266 _status = Idle; 267 268 if (tickEvent.scheduled()) { 269 deschedule(tickEvent); 270 } 271 } 272 273 BaseCPU::suspendContext(thread_num); 274} 275 276Tick 277AtomicSimpleCPU::sendPacket(MasterPort &port, const PacketPtr &pkt) 278{ 279 return port.sendAtomic(pkt); 280} 281 282Tick 283AtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt) 284{ 285 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 286 pkt->cmdString()); 287 288 // X86 ISA: Snooping an invalidation for monitor/mwait 289 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 290 291 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 292 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 293 cpu->wakeup(tid); 294 } 295 } 296 297 // if snoop invalidates, release any associated locks 298 // When run without caches, Invalidation packets will not be received 299 // hence we must check if the incoming packets are writes and wakeup 300 // the processor accordingly 301 if (pkt->isInvalidate() || pkt->isWrite()) { 302 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 303 pkt->getAddr()); 304 for (auto &t_info : cpu->threadInfo) { 305 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 306 } 307 } 308 309 return 0; 310} 311 312void 313AtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt) 314{ 315 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 316 pkt->cmdString()); 317 318 // X86 ISA: Snooping an invalidation for monitor/mwait 319 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 320 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 321 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 322 cpu->wakeup(tid); 323 } 324 } 325 326 // if snoop invalidates, release any associated locks 327 if (pkt->isInvalidate()) { 328 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 329 pkt->getAddr()); 330 for (auto &t_info : cpu->threadInfo) { 331 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 332 } 333 } 334} 335 336Fault 337AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, unsigned size, 338 Request::Flags flags) 339{ 340 SimpleExecContext& t_info = *threadInfo[curThread]; 341 SimpleThread* thread = t_info.thread; 342 343 // use the CPU's statically allocated read request and packet objects 344 const RequestPtr &req = data_read_req; 345 346 if (traceData) 347 traceData->setMem(addr, size, flags); 348 349 //The size of the data we're trying to read. 350 int fullSize = size; 351 352 //The address of the second part of this access if it needs to be split 353 //across a cache line boundary. 354 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 355 356 if (secondAddr > addr) 357 size = secondAddr - addr; 358 359 dcache_latency = 0; 360 361 req->taskId(taskId()); 362 while (1) { 363 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 364 365 // translate to physical address 366 Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), 367 BaseTLB::Read); 368 369 // Now do the access. 370 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 371 Packet pkt(req, Packet::makeReadCmd(req)); 372 pkt.dataStatic(data); 373 374 if (req->isMmappedIpr()) { 375 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 376 } else { 377 dcache_latency += sendPacket(dcachePort, &pkt); 378 } 379 dcache_access = true; 380 381 assert(!pkt.isError()); 382 383 if (req->isLLSC()) { 384 TheISA::handleLockedRead(thread, req); 385 } 386 } 387 388 //If there's a fault, return it 389 if (fault != NoFault) { 390 if (req->isPrefetch()) { 391 return NoFault; 392 } else { 393 return fault; 394 } 395 } 396 397 //If we don't need to access a second cache line, stop now. 398 if (secondAddr <= addr) 399 { 400 if (req->isLockedRMW() && fault == NoFault) { 401 assert(!locked); 402 locked = true; 403 } 404 405 return fault; 406 } 407 408 /* 409 * Set up for accessing the second cache line. 410 */ 411 412 //Move the pointer we're reading into to the correct location. 413 data += size; 414 //Adjust the size to get the remaining bytes. 415 size = addr + fullSize - secondAddr; 416 //And access the right address. 417 addr = secondAddr; 418 } 419} 420 421Fault 422AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, Addr addr, 423 Request::Flags flags, uint64_t *res) 424{ 425 SimpleExecContext& t_info = *threadInfo[curThread]; 426 SimpleThread* thread = t_info.thread; 427 static uint8_t zero_array[64] = {}; 428 429 if (data == NULL) { 430 assert(size <= 64); 431 assert(flags & Request::STORE_NO_DATA); 432 // This must be a cache block cleaning request 433 data = zero_array; 434 } 435 436 // use the CPU's statically allocated write request and packet objects 437 const RequestPtr &req = data_write_req; 438 439 if (traceData) 440 traceData->setMem(addr, size, flags); 441 442 //The size of the data we're trying to read. 443 int fullSize = size; 444 445 //The address of the second part of this access if it needs to be split 446 //across a cache line boundary. 447 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 448 449 if (secondAddr > addr) 450 size = secondAddr - addr; 451 452 dcache_latency = 0; 453 454 req->taskId(taskId()); 455 while (1) { 456 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 457 458 // translate to physical address 459 Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), BaseTLB::Write); 460 461 // Now do the access. 462 if (fault == NoFault) { 463 bool do_access = true; // flag to suppress cache access 464 465 if (req->isLLSC()) { 466 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); 467 } else if (req->isSwap()) { 468 if (req->isCondSwap()) { 469 assert(res); 470 req->setExtraData(*res); 471 } 472 } 473 474 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 475 Packet pkt(req, Packet::makeWriteCmd(req)); 476 pkt.dataStatic(data); 477 478 if (req->isMmappedIpr()) { 479 dcache_latency += 480 TheISA::handleIprWrite(thread->getTC(), &pkt); 481 } else { 482 dcache_latency += sendPacket(dcachePort, &pkt); 483 484 // Notify other threads on this CPU of write 485 threadSnoop(&pkt, curThread); 486 } 487 dcache_access = true; 488 assert(!pkt.isError()); 489 490 if (req->isSwap()) { 491 assert(res); 492 memcpy(res, pkt.getConstPtr<uint8_t>(), fullSize); 493 } 494 } 495 496 if (res && !req->isSwap()) { 497 *res = req->getExtraData(); 498 } 499 } 500 501 //If there's a fault or we don't need to access a second cache line, 502 //stop now. 503 if (fault != NoFault || secondAddr <= addr) 504 { 505 if (req->isLockedRMW() && fault == NoFault) { 506 assert(locked); 507 locked = false; 508 } 509 510 511 if (fault != NoFault && req->isPrefetch()) { 512 return NoFault; 513 } else { 514 return fault; 515 } 516 } 517 518 /* 519 * Set up for accessing the second cache line. 520 */ 521 522 //Move the pointer we're reading into to the correct location. 523 data += size; 524 //Adjust the size to get the remaining bytes. 525 size = addr + fullSize - secondAddr; 526 //And access the right address. 527 addr = secondAddr; 528 } 529} 530 531Fault 532AtomicSimpleCPU::amoMem(Addr addr, uint8_t* data, unsigned size, 533 Request::Flags flags, AtomicOpFunctor *amo_op) 534{ 535 SimpleExecContext& t_info = *threadInfo[curThread]; 536 SimpleThread* thread = t_info.thread; 537 538 // use the CPU's statically allocated amo request and packet objects 539 const RequestPtr &req = data_amo_req; 540 541 if (traceData) 542 traceData->setMem(addr, size, flags); 543 544 //The address of the second part of this access if it needs to be split 545 //across a cache line boundary. 546 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 547 548 // AMO requests that access across a cache line boundary are not 549 // allowed since the cache does not guarantee AMO ops to be executed 550 // atomically in two cache lines 551 // For ISAs such as x86 that requires AMO operations to work on 552 // accesses that cross cache-line boundaries, the cache needs to be 553 // modified to support locking both cache lines to guarantee the 554 // atomicity. 555 if (secondAddr > addr) { 556 panic("AMO request should not access across a cache line boundary\n"); 557 } 558 559 dcache_latency = 0; 560 561 req->taskId(taskId()); 562 req->setVirt(0, addr, size, flags, dataMasterId(), 563 thread->pcState().instAddr(), amo_op); 564 565 // translate to physical address 566 Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), 567 BaseTLB::Write); 568 569 // Now do the access. 570 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 571 // We treat AMO accesses as Write accesses with SwapReq command 572 // data will hold the return data of the AMO access 573 Packet pkt(req, Packet::makeWriteCmd(req)); 574 pkt.dataStatic(data); 575 576 if (req->isMmappedIpr()) 577 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 578 else { 579 dcache_latency += sendPacket(dcachePort, &pkt); 580 } 581 582 dcache_access = true; 583 584 assert(!pkt.isError()); 585 assert(!req->isLLSC()); 586 } 587 588 if (fault != NoFault && req->isPrefetch()) { 589 return NoFault; 590 } 591 592 //If there's a fault and we're not doing prefetch, return it 593 return fault; 594} 595 596void 597AtomicSimpleCPU::tick() 598{ 599 DPRINTF(SimpleCPU, "Tick\n"); 600 601 // Change thread if multi-threaded 602 swapActiveThread(); 603 604 // Set memroy request ids to current thread 605 if (numThreads > 1) { 606 ContextID cid = threadContexts[curThread]->contextId(); 607 608 ifetch_req->setContext(cid); 609 data_read_req->setContext(cid); 610 data_write_req->setContext(cid); 611 data_amo_req->setContext(cid); 612 } 613 614 SimpleExecContext& t_info = *threadInfo[curThread]; 615 SimpleThread* thread = t_info.thread; 616 617 Tick latency = 0; 618 619 for (int i = 0; i < width || locked; ++i) { 620 numCycles++; 621 updateCycleCounters(BaseCPU::CPU_STATE_ON); 622 623 if (!curStaticInst || !curStaticInst->isDelayedCommit()) { 624 checkForInterrupts(); 625 checkPcEventQueue(); 626 } 627 628 // We must have just got suspended by a PC event 629 if (_status == Idle) { 630 tryCompleteDrain(); 631 return; 632 } 633 634 Fault fault = NoFault; 635 636 TheISA::PCState pcState = thread->pcState(); 637 638 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 639 !curMacroStaticInst; 640 if (needToFetch) { 641 ifetch_req->taskId(taskId()); 642 setupFetchRequest(ifetch_req); 643 fault = thread->itb->translateAtomic(ifetch_req, thread->getTC(), 644 BaseTLB::Execute); 645 } 646 647 if (fault == NoFault) { 648 Tick icache_latency = 0; 649 bool icache_access = false; 650 dcache_access = false; // assume no dcache access 651 652 if (needToFetch) { 653 // This is commented out because the decoder would act like 654 // a tiny cache otherwise. It wouldn't be flushed when needed 655 // like the I cache. It should be flushed, and when that works 656 // this code should be uncommented. 657 //Fetch more instruction memory if necessary 658 //if (decoder.needMoreBytes()) 659 //{ 660 icache_access = true; 661 Packet ifetch_pkt = Packet(ifetch_req, MemCmd::ReadReq); 662 ifetch_pkt.dataStatic(&inst); 663 664 icache_latency = sendPacket(icachePort, &ifetch_pkt); 665 666 assert(!ifetch_pkt.isError()); 667 668 // ifetch_req is initialized to read the instruction directly 669 // into the CPU object's inst field. 670 //} 671 } 672 673 preExecute(); 674 675 Tick stall_ticks = 0; 676 if (curStaticInst) { 677 fault = curStaticInst->execute(&t_info, traceData); 678 679 // keep an instruction count 680 if (fault == NoFault) { 681 countInst(); 682 ppCommit->notify(std::make_pair(thread, curStaticInst)); 683 } 684 else if (traceData && !DTRACE(ExecFaulting)) { 685 delete traceData; 686 traceData = NULL; 687 } 688 689 if (fault != NoFault && 690 dynamic_pointer_cast<SyscallRetryFault>(fault)) { 691 // Retry execution of system calls after a delay. 692 // Prevents immediate re-execution since conditions which 693 // caused the retry are unlikely to change every tick. 694 stall_ticks += clockEdge(syscallRetryLatency) - curTick(); 695 } 696 697 postExecute(); 698 } 699 700 // @todo remove me after debugging with legion done 701 if (curStaticInst && (!curStaticInst->isMicroop() || 702 curStaticInst->isFirstMicroop())) 703 instCnt++; 704 705 if (simulate_inst_stalls && icache_access) 706 stall_ticks += icache_latency; 707 708 if (simulate_data_stalls && dcache_access) 709 stall_ticks += dcache_latency; 710 711 if (stall_ticks) { 712 // the atomic cpu does its accounting in ticks, so 713 // keep counting in ticks but round to the clock 714 // period 715 latency += divCeil(stall_ticks, clockPeriod()) * 716 clockPeriod(); 717 } 718 719 } 720 if (fault != NoFault || !t_info.stayAtPC) 721 advancePC(fault); 722 } 723 724 if (tryCompleteDrain()) 725 return; 726 727 // instruction takes at least one cycle 728 if (latency < clockPeriod()) 729 latency = clockPeriod(); 730 731 if (_status != Idle) 732 reschedule(tickEvent, curTick() + latency, true); 733} 734 735void 736AtomicSimpleCPU::regProbePoints() 737{ 738 BaseCPU::regProbePoints(); 739 740 ppCommit = new ProbePointArg<pair<SimpleThread*, const StaticInstPtr>> 741 (getProbeManager(), "Commit"); 742} 743 744void 745AtomicSimpleCPU::printAddr(Addr a) 746{ 747 dcachePort.printAddr(a); 748} 749 750//////////////////////////////////////////////////////////////////////// 751// 752// AtomicSimpleCPU Simulation Object 753// 754AtomicSimpleCPU * 755AtomicSimpleCPUParams::create() 756{ 757 return new AtomicSimpleCPU(this); 758} 759