base.cc revision 12811
1/* 2 * Copyright (c) 2011-2012,2016-2017 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2011 Regents of the University of California 16 * Copyright (c) 2013 Advanced Micro Devices, Inc. 17 * Copyright (c) 2013 Mark D. Hill and David A. Wood 18 * All rights reserved. 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions are 22 * met: redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer; 24 * redistributions in binary form must reproduce the above copyright 25 * notice, this list of conditions and the following disclaimer in the 26 * documentation and/or other materials provided with the distribution; 27 * neither the name of the copyright holders nor the names of its 28 * contributors may be used to endorse or promote products derived from 29 * this software without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 42 * 43 * Authors: Steve Reinhardt 44 * Nathan Binkert 45 * Rick Strong 46 */ 47 48#include "cpu/base.hh" 49 50#include <iostream> 51#include <sstream> 52#include <string> 53 54#include "arch/generic/tlb.hh" 55#include "base/cprintf.hh" 56#include "base/loader/symtab.hh" 57#include "base/logging.hh" 58#include "base/output.hh" 59#include "base/trace.hh" 60#include "cpu/checker/cpu.hh" 61#include "cpu/cpuevent.hh" 62#include "cpu/profile.hh" 63#include "cpu/thread_context.hh" 64#include "debug/Mwait.hh" 65#include "debug/SyscallVerbose.hh" 66#include "mem/page_table.hh" 67#include "params/BaseCPU.hh" 68#include "sim/clocked_object.hh" 69#include "sim/full_system.hh" 70#include "sim/process.hh" 71#include "sim/sim_events.hh" 72#include "sim/sim_exit.hh" 73#include "sim/system.hh" 74 75// Hack 76#include "sim/stat_control.hh" 77 78using namespace std; 79 80vector<BaseCPU *> BaseCPU::cpuList; 81 82// This variable reflects the max number of threads in any CPU. Be 83// careful to only use it once all the CPUs that you care about have 84// been initialized 85int maxThreadsPerCPU = 1; 86 87CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival) 88 : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0), 89 cpu(_cpu), _repeatEvent(true) 90{ 91 if (_interval) 92 cpu->schedule(this, curTick() + _interval); 93} 94 95void 96CPUProgressEvent::process() 97{ 98 Counter temp = cpu->totalOps(); 99 100 if (_repeatEvent) 101 cpu->schedule(this, curTick() + _interval); 102 103 if (cpu->switchedOut()) { 104 return; 105 } 106 107#ifndef NDEBUG 108 double ipc = double(temp - lastNumInst) / (_interval / cpu->clockPeriod()); 109 110 DPRINTFN("%s progress event, total committed:%i, progress insts committed: " 111 "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst, 112 ipc); 113 ipc = 0.0; 114#else 115 cprintf("%lli: %s progress event, total committed:%i, progress insts " 116 "committed: %lli\n", curTick(), cpu->name(), temp, 117 temp - lastNumInst); 118#endif 119 lastNumInst = temp; 120} 121 122const char * 123CPUProgressEvent::description() const 124{ 125 return "CPU Progress"; 126} 127 128BaseCPU::BaseCPU(Params *p, bool is_checker) 129 : MemObject(p), instCnt(0), _cpuId(p->cpu_id), _socketId(p->socket_id), 130 _instMasterId(p->system->getMasterId(this, "inst")), 131 _dataMasterId(p->system->getMasterId(this, "data")), 132 _taskId(ContextSwitchTaskId::Unknown), _pid(invldPid), 133 _switchedOut(p->switched_out), _cacheLineSize(p->system->cacheLineSize()), 134 interrupts(p->interrupts), profileEvent(NULL), 135 numThreads(p->numThreads), system(p->system), 136 previousCycle(0), previousState(CPU_STATE_SLEEP), 137 functionTraceStream(nullptr), currentFunctionStart(0), 138 currentFunctionEnd(0), functionEntryTick(0), 139 addressMonitor(p->numThreads), 140 syscallRetryLatency(p->syscallRetryLatency), 141 pwrGatingLatency(p->pwr_gating_latency), 142 powerGatingOnIdle(p->power_gating_on_idle), 143 enterPwrGatingEvent([this]{ enterPwrGating(); }, name()) 144{ 145 // if Python did not provide a valid ID, do it here 146 if (_cpuId == -1 ) { 147 _cpuId = cpuList.size(); 148 } 149 150 // add self to global list of CPUs 151 cpuList.push_back(this); 152 153 DPRINTF(SyscallVerbose, "Constructing CPU with id %d, socket id %d\n", 154 _cpuId, _socketId); 155 156 if (numThreads > maxThreadsPerCPU) 157 maxThreadsPerCPU = numThreads; 158 159 // allocate per-thread instruction-based event queues 160 comInstEventQueue = new EventQueue *[numThreads]; 161 for (ThreadID tid = 0; tid < numThreads; ++tid) 162 comInstEventQueue[tid] = 163 new EventQueue("instruction-based event queue"); 164 165 // 166 // set up instruction-count-based termination events, if any 167 // 168 if (p->max_insts_any_thread != 0) { 169 const char *cause = "a thread reached the max instruction count"; 170 for (ThreadID tid = 0; tid < numThreads; ++tid) 171 scheduleInstStop(tid, p->max_insts_any_thread, cause); 172 } 173 174 // Set up instruction-count-based termination events for SimPoints 175 // Typically, there are more than one action points. 176 // Simulation.py is responsible to take the necessary actions upon 177 // exitting the simulation loop. 178 if (!p->simpoint_start_insts.empty()) { 179 const char *cause = "simpoint starting point found"; 180 for (size_t i = 0; i < p->simpoint_start_insts.size(); ++i) 181 scheduleInstStop(0, p->simpoint_start_insts[i], cause); 182 } 183 184 if (p->max_insts_all_threads != 0) { 185 const char *cause = "all threads reached the max instruction count"; 186 187 // allocate & initialize shared downcounter: each event will 188 // decrement this when triggered; simulation will terminate 189 // when counter reaches 0 190 int *counter = new int; 191 *counter = numThreads; 192 for (ThreadID tid = 0; tid < numThreads; ++tid) { 193 Event *event = new CountedExitEvent(cause, *counter); 194 comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads); 195 } 196 } 197 198 // allocate per-thread load-based event queues 199 comLoadEventQueue = new EventQueue *[numThreads]; 200 for (ThreadID tid = 0; tid < numThreads; ++tid) 201 comLoadEventQueue[tid] = new EventQueue("load-based event queue"); 202 203 // 204 // set up instruction-count-based termination events, if any 205 // 206 if (p->max_loads_any_thread != 0) { 207 const char *cause = "a thread reached the max load count"; 208 for (ThreadID tid = 0; tid < numThreads; ++tid) 209 scheduleLoadStop(tid, p->max_loads_any_thread, cause); 210 } 211 212 if (p->max_loads_all_threads != 0) { 213 const char *cause = "all threads reached the max load count"; 214 // allocate & initialize shared downcounter: each event will 215 // decrement this when triggered; simulation will terminate 216 // when counter reaches 0 217 int *counter = new int; 218 *counter = numThreads; 219 for (ThreadID tid = 0; tid < numThreads; ++tid) { 220 Event *event = new CountedExitEvent(cause, *counter); 221 comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads); 222 } 223 } 224 225 functionTracingEnabled = false; 226 if (p->function_trace) { 227 const string fname = csprintf("ftrace.%s", name()); 228 functionTraceStream = simout.findOrCreate(fname)->stream(); 229 230 currentFunctionStart = currentFunctionEnd = 0; 231 functionEntryTick = p->function_trace_start; 232 233 if (p->function_trace_start == 0) { 234 functionTracingEnabled = true; 235 } else { 236 Event *event = new EventFunctionWrapper( 237 [this]{ enableFunctionTrace(); }, name(), true); 238 schedule(event, p->function_trace_start); 239 } 240 } 241 242 // The interrupts should always be present unless this CPU is 243 // switched in later or in case it is a checker CPU 244 if (!params()->switched_out && !is_checker) { 245 fatal_if(interrupts.size() != numThreads, 246 "CPU %s has %i interrupt controllers, but is expecting one " 247 "per thread (%i)\n", 248 name(), interrupts.size(), numThreads); 249 for (ThreadID tid = 0; tid < numThreads; tid++) 250 interrupts[tid]->setCPU(this); 251 } 252 253 if (FullSystem) { 254 if (params()->profile) 255 profileEvent = new EventFunctionWrapper( 256 [this]{ processProfileEvent(); }, 257 name()); 258 } 259 tracer = params()->tracer; 260 261 if (params()->isa.size() != numThreads) { 262 fatal("Number of ISAs (%i) assigned to the CPU does not equal number " 263 "of threads (%i).\n", params()->isa.size(), numThreads); 264 } 265} 266 267void 268BaseCPU::enableFunctionTrace() 269{ 270 functionTracingEnabled = true; 271} 272 273BaseCPU::~BaseCPU() 274{ 275 delete profileEvent; 276 delete[] comLoadEventQueue; 277 delete[] comInstEventQueue; 278} 279 280void 281BaseCPU::armMonitor(ThreadID tid, Addr address) 282{ 283 assert(tid < numThreads); 284 AddressMonitor &monitor = addressMonitor[tid]; 285 286 monitor.armed = true; 287 monitor.vAddr = address; 288 monitor.pAddr = 0x0; 289 DPRINTF(Mwait,"[tid:%d] Armed monitor (vAddr=0x%lx)\n", tid, address); 290} 291 292bool 293BaseCPU::mwait(ThreadID tid, PacketPtr pkt) 294{ 295 assert(tid < numThreads); 296 AddressMonitor &monitor = addressMonitor[tid]; 297 298 if (!monitor.gotWakeup) { 299 int block_size = cacheLineSize(); 300 uint64_t mask = ~((uint64_t)(block_size - 1)); 301 302 assert(pkt->req->hasPaddr()); 303 monitor.pAddr = pkt->getAddr() & mask; 304 monitor.waiting = true; 305 306 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, " 307 "line's paddr=0x%lx)\n", tid, monitor.vAddr, monitor.pAddr); 308 return true; 309 } else { 310 monitor.gotWakeup = false; 311 return false; 312 } 313} 314 315void 316BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, BaseTLB *dtb) 317{ 318 assert(tid < numThreads); 319 AddressMonitor &monitor = addressMonitor[tid]; 320 321 RequestPtr req = std::make_shared<Request>(); 322 323 Addr addr = monitor.vAddr; 324 int block_size = cacheLineSize(); 325 uint64_t mask = ~((uint64_t)(block_size - 1)); 326 int size = block_size; 327 328 //The address of the next line if it crosses a cache line boundary. 329 Addr secondAddr = roundDown(addr + size - 1, block_size); 330 331 if (secondAddr > addr) 332 size = secondAddr - addr; 333 334 req->setVirt(0, addr, size, 0x0, dataMasterId(), tc->instAddr()); 335 336 // translate to physical address 337 Fault fault = dtb->translateAtomic(req, tc, BaseTLB::Read); 338 assert(fault == NoFault); 339 340 monitor.pAddr = req->getPaddr() & mask; 341 monitor.waiting = true; 342 343 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n", 344 tid, monitor.vAddr, monitor.pAddr); 345} 346 347void 348BaseCPU::init() 349{ 350 if (!params()->switched_out) { 351 registerThreadContexts(); 352 353 verifyMemoryMode(); 354 } 355} 356 357void 358BaseCPU::startup() 359{ 360 if (FullSystem) { 361 if (!params()->switched_out && profileEvent) 362 schedule(profileEvent, curTick()); 363 } 364 365 if (params()->progress_interval) { 366 new CPUProgressEvent(this, params()->progress_interval); 367 } 368 369 if (_switchedOut) 370 ClockedObject::pwrState(Enums::PwrState::OFF); 371 372 // Assumption CPU start to operate instantaneously without any latency 373 if (ClockedObject::pwrState() == Enums::PwrState::UNDEFINED) 374 ClockedObject::pwrState(Enums::PwrState::ON); 375 376} 377 378ProbePoints::PMUUPtr 379BaseCPU::pmuProbePoint(const char *name) 380{ 381 ProbePoints::PMUUPtr ptr; 382 ptr.reset(new ProbePoints::PMU(getProbeManager(), name)); 383 384 return ptr; 385} 386 387void 388BaseCPU::regProbePoints() 389{ 390 ppAllCycles = pmuProbePoint("Cycles"); 391 ppActiveCycles = pmuProbePoint("ActiveCycles"); 392 393 ppRetiredInsts = pmuProbePoint("RetiredInsts"); 394 ppRetiredLoads = pmuProbePoint("RetiredLoads"); 395 ppRetiredStores = pmuProbePoint("RetiredStores"); 396 ppRetiredBranches = pmuProbePoint("RetiredBranches"); 397 398 ppSleeping = new ProbePointArg<bool>(this->getProbeManager(), 399 "Sleeping"); 400} 401 402void 403BaseCPU::probeInstCommit(const StaticInstPtr &inst) 404{ 405 if (!inst->isMicroop() || inst->isLastMicroop()) 406 ppRetiredInsts->notify(1); 407 408 409 if (inst->isLoad()) 410 ppRetiredLoads->notify(1); 411 412 if (inst->isStore()) 413 ppRetiredStores->notify(1); 414 415 if (inst->isControl()) 416 ppRetiredBranches->notify(1); 417} 418 419void 420BaseCPU::regStats() 421{ 422 MemObject::regStats(); 423 424 using namespace Stats; 425 426 numCycles 427 .name(name() + ".numCycles") 428 .desc("number of cpu cycles simulated") 429 ; 430 431 numWorkItemsStarted 432 .name(name() + ".numWorkItemsStarted") 433 .desc("number of work items this cpu started") 434 ; 435 436 numWorkItemsCompleted 437 .name(name() + ".numWorkItemsCompleted") 438 .desc("number of work items this cpu completed") 439 ; 440 441 int size = threadContexts.size(); 442 if (size > 1) { 443 for (int i = 0; i < size; ++i) { 444 stringstream namestr; 445 ccprintf(namestr, "%s.ctx%d", name(), i); 446 threadContexts[i]->regStats(namestr.str()); 447 } 448 } else if (size == 1) 449 threadContexts[0]->regStats(name()); 450} 451 452BaseMasterPort & 453BaseCPU::getMasterPort(const string &if_name, PortID idx) 454{ 455 // Get the right port based on name. This applies to all the 456 // subclasses of the base CPU and relies on their implementation 457 // of getDataPort and getInstPort. In all cases there methods 458 // return a MasterPort pointer. 459 if (if_name == "dcache_port") 460 return getDataPort(); 461 else if (if_name == "icache_port") 462 return getInstPort(); 463 else 464 return MemObject::getMasterPort(if_name, idx); 465} 466 467void 468BaseCPU::registerThreadContexts() 469{ 470 assert(system->multiThread || numThreads == 1); 471 472 ThreadID size = threadContexts.size(); 473 for (ThreadID tid = 0; tid < size; ++tid) { 474 ThreadContext *tc = threadContexts[tid]; 475 476 if (system->multiThread) { 477 tc->setContextId(system->registerThreadContext(tc)); 478 } else { 479 tc->setContextId(system->registerThreadContext(tc, _cpuId)); 480 } 481 482 if (!FullSystem) 483 tc->getProcessPtr()->assignThreadContext(tc->contextId()); 484 } 485} 486 487void 488BaseCPU::deschedulePowerGatingEvent() 489{ 490 if (enterPwrGatingEvent.scheduled()){ 491 deschedule(enterPwrGatingEvent); 492 } 493} 494 495void 496BaseCPU::schedulePowerGatingEvent() 497{ 498 for (auto tc : threadContexts) { 499 if (tc->status() == ThreadContext::Active) 500 return; 501 } 502 503 if (ClockedObject::pwrState() == Enums::PwrState::CLK_GATED && 504 powerGatingOnIdle) { 505 assert(!enterPwrGatingEvent.scheduled()); 506 // Schedule a power gating event when clock gated for the specified 507 // amount of time 508 schedule(enterPwrGatingEvent, clockEdge(pwrGatingLatency)); 509 } 510} 511 512int 513BaseCPU::findContext(ThreadContext *tc) 514{ 515 ThreadID size = threadContexts.size(); 516 for (ThreadID tid = 0; tid < size; ++tid) { 517 if (tc == threadContexts[tid]) 518 return tid; 519 } 520 return 0; 521} 522 523void 524BaseCPU::activateContext(ThreadID thread_num) 525{ 526 // Squash enter power gating event while cpu gets activated 527 if (enterPwrGatingEvent.scheduled()) 528 deschedule(enterPwrGatingEvent); 529 // For any active thread running, update CPU power state to active (ON) 530 ClockedObject::pwrState(Enums::PwrState::ON); 531 532 updateCycleCounters(CPU_STATE_WAKEUP); 533} 534 535void 536BaseCPU::suspendContext(ThreadID thread_num) 537{ 538 // Check if all threads are suspended 539 for (auto t : threadContexts) { 540 if (t->status() != ThreadContext::Suspended) { 541 return; 542 } 543 } 544 545 // All CPU thread are suspended, update cycle count 546 updateCycleCounters(CPU_STATE_SLEEP); 547 548 // All CPU threads suspended, enter lower power state for the CPU 549 ClockedObject::pwrState(Enums::PwrState::CLK_GATED); 550 551 // If pwrGatingLatency is set to 0 then this mechanism is disabled 552 if (powerGatingOnIdle) { 553 // Schedule power gating event when clock gated for pwrGatingLatency 554 // cycles 555 schedule(enterPwrGatingEvent, clockEdge(pwrGatingLatency)); 556 } 557} 558 559void 560BaseCPU::haltContext(ThreadID thread_num) 561{ 562 updateCycleCounters(BaseCPU::CPU_STATE_SLEEP); 563} 564 565void 566BaseCPU::enterPwrGating(void) 567{ 568 ClockedObject::pwrState(Enums::PwrState::OFF); 569} 570 571void 572BaseCPU::switchOut() 573{ 574 assert(!_switchedOut); 575 _switchedOut = true; 576 if (profileEvent && profileEvent->scheduled()) 577 deschedule(profileEvent); 578 579 // Flush all TLBs in the CPU to avoid having stale translations if 580 // it gets switched in later. 581 flushTLBs(); 582 583 // Go to the power gating state 584 ClockedObject::pwrState(Enums::PwrState::OFF); 585} 586 587void 588BaseCPU::takeOverFrom(BaseCPU *oldCPU) 589{ 590 assert(threadContexts.size() == oldCPU->threadContexts.size()); 591 assert(_cpuId == oldCPU->cpuId()); 592 assert(_switchedOut); 593 assert(oldCPU != this); 594 _pid = oldCPU->getPid(); 595 _taskId = oldCPU->taskId(); 596 // Take over the power state of the switchedOut CPU 597 ClockedObject::pwrState(oldCPU->pwrState()); 598 599 previousState = oldCPU->previousState; 600 previousCycle = oldCPU->previousCycle; 601 602 _switchedOut = false; 603 604 ThreadID size = threadContexts.size(); 605 for (ThreadID i = 0; i < size; ++i) { 606 ThreadContext *newTC = threadContexts[i]; 607 ThreadContext *oldTC = oldCPU->threadContexts[i]; 608 609 newTC->takeOverFrom(oldTC); 610 611 CpuEvent::replaceThreadContext(oldTC, newTC); 612 613 assert(newTC->contextId() == oldTC->contextId()); 614 assert(newTC->threadId() == oldTC->threadId()); 615 system->replaceThreadContext(newTC, newTC->contextId()); 616 617 /* This code no longer works since the zero register (e.g., 618 * r31 on Alpha) doesn't necessarily contain zero at this 619 * point. 620 if (DTRACE(Context)) 621 ThreadContext::compare(oldTC, newTC); 622 */ 623 624 BaseMasterPort *old_itb_port = oldTC->getITBPtr()->getMasterPort(); 625 BaseMasterPort *old_dtb_port = oldTC->getDTBPtr()->getMasterPort(); 626 BaseMasterPort *new_itb_port = newTC->getITBPtr()->getMasterPort(); 627 BaseMasterPort *new_dtb_port = newTC->getDTBPtr()->getMasterPort(); 628 629 // Move over any table walker ports if they exist 630 if (new_itb_port) { 631 assert(!new_itb_port->isConnected()); 632 assert(old_itb_port); 633 assert(old_itb_port->isConnected()); 634 BaseSlavePort &slavePort = old_itb_port->getSlavePort(); 635 old_itb_port->unbind(); 636 new_itb_port->bind(slavePort); 637 } 638 if (new_dtb_port) { 639 assert(!new_dtb_port->isConnected()); 640 assert(old_dtb_port); 641 assert(old_dtb_port->isConnected()); 642 BaseSlavePort &slavePort = old_dtb_port->getSlavePort(); 643 old_dtb_port->unbind(); 644 new_dtb_port->bind(slavePort); 645 } 646 newTC->getITBPtr()->takeOverFrom(oldTC->getITBPtr()); 647 newTC->getDTBPtr()->takeOverFrom(oldTC->getDTBPtr()); 648 649 // Checker whether or not we have to transfer CheckerCPU 650 // objects over in the switch 651 CheckerCPU *oldChecker = oldTC->getCheckerCpuPtr(); 652 CheckerCPU *newChecker = newTC->getCheckerCpuPtr(); 653 if (oldChecker && newChecker) { 654 BaseMasterPort *old_checker_itb_port = 655 oldChecker->getITBPtr()->getMasterPort(); 656 BaseMasterPort *old_checker_dtb_port = 657 oldChecker->getDTBPtr()->getMasterPort(); 658 BaseMasterPort *new_checker_itb_port = 659 newChecker->getITBPtr()->getMasterPort(); 660 BaseMasterPort *new_checker_dtb_port = 661 newChecker->getDTBPtr()->getMasterPort(); 662 663 newChecker->getITBPtr()->takeOverFrom(oldChecker->getITBPtr()); 664 newChecker->getDTBPtr()->takeOverFrom(oldChecker->getDTBPtr()); 665 666 // Move over any table walker ports if they exist for checker 667 if (new_checker_itb_port) { 668 assert(!new_checker_itb_port->isConnected()); 669 assert(old_checker_itb_port); 670 assert(old_checker_itb_port->isConnected()); 671 BaseSlavePort &slavePort = 672 old_checker_itb_port->getSlavePort(); 673 old_checker_itb_port->unbind(); 674 new_checker_itb_port->bind(slavePort); 675 } 676 if (new_checker_dtb_port) { 677 assert(!new_checker_dtb_port->isConnected()); 678 assert(old_checker_dtb_port); 679 assert(old_checker_dtb_port->isConnected()); 680 BaseSlavePort &slavePort = 681 old_checker_dtb_port->getSlavePort(); 682 old_checker_dtb_port->unbind(); 683 new_checker_dtb_port->bind(slavePort); 684 } 685 } 686 } 687 688 interrupts = oldCPU->interrupts; 689 for (ThreadID tid = 0; tid < numThreads; tid++) { 690 interrupts[tid]->setCPU(this); 691 } 692 oldCPU->interrupts.clear(); 693 694 if (FullSystem) { 695 for (ThreadID i = 0; i < size; ++i) 696 threadContexts[i]->profileClear(); 697 698 if (profileEvent) 699 schedule(profileEvent, curTick()); 700 } 701 702 // All CPUs have an instruction and a data port, and the new CPU's 703 // ports are dangling while the old CPU has its ports connected 704 // already. Unbind the old CPU and then bind the ports of the one 705 // we are switching to. 706 assert(!getInstPort().isConnected()); 707 assert(oldCPU->getInstPort().isConnected()); 708 BaseSlavePort &inst_peer_port = oldCPU->getInstPort().getSlavePort(); 709 oldCPU->getInstPort().unbind(); 710 getInstPort().bind(inst_peer_port); 711 712 assert(!getDataPort().isConnected()); 713 assert(oldCPU->getDataPort().isConnected()); 714 BaseSlavePort &data_peer_port = oldCPU->getDataPort().getSlavePort(); 715 oldCPU->getDataPort().unbind(); 716 getDataPort().bind(data_peer_port); 717} 718 719void 720BaseCPU::flushTLBs() 721{ 722 for (ThreadID i = 0; i < threadContexts.size(); ++i) { 723 ThreadContext &tc(*threadContexts[i]); 724 CheckerCPU *checker(tc.getCheckerCpuPtr()); 725 726 tc.getITBPtr()->flushAll(); 727 tc.getDTBPtr()->flushAll(); 728 if (checker) { 729 checker->getITBPtr()->flushAll(); 730 checker->getDTBPtr()->flushAll(); 731 } 732 } 733} 734 735void 736BaseCPU::processProfileEvent() 737{ 738 ThreadID size = threadContexts.size(); 739 740 for (ThreadID i = 0; i < size; ++i) 741 threadContexts[i]->profileSample(); 742 743 schedule(profileEvent, curTick() + params()->profile); 744} 745 746void 747BaseCPU::serialize(CheckpointOut &cp) const 748{ 749 SERIALIZE_SCALAR(instCnt); 750 751 if (!_switchedOut) { 752 /* Unlike _pid, _taskId is not serialized, as they are dynamically 753 * assigned unique ids that are only meaningful for the duration of 754 * a specific run. We will need to serialize the entire taskMap in 755 * system. */ 756 SERIALIZE_SCALAR(_pid); 757 758 // Serialize the threads, this is done by the CPU implementation. 759 for (ThreadID i = 0; i < numThreads; ++i) { 760 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i)); 761 interrupts[i]->serialize(cp); 762 serializeThread(cp, i); 763 } 764 } 765} 766 767void 768BaseCPU::unserialize(CheckpointIn &cp) 769{ 770 UNSERIALIZE_SCALAR(instCnt); 771 772 if (!_switchedOut) { 773 UNSERIALIZE_SCALAR(_pid); 774 775 // Unserialize the threads, this is done by the CPU implementation. 776 for (ThreadID i = 0; i < numThreads; ++i) { 777 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i)); 778 interrupts[i]->unserialize(cp); 779 unserializeThread(cp, i); 780 } 781 } 782} 783 784void 785BaseCPU::scheduleInstStop(ThreadID tid, Counter insts, const char *cause) 786{ 787 const Tick now(comInstEventQueue[tid]->getCurTick()); 788 Event *event(new LocalSimLoopExitEvent(cause, 0)); 789 790 comInstEventQueue[tid]->schedule(event, now + insts); 791} 792 793uint64_t 794BaseCPU::getCurrentInstCount(ThreadID tid) 795{ 796 return Tick(comInstEventQueue[tid]->getCurTick()); 797} 798 799AddressMonitor::AddressMonitor() { 800 armed = false; 801 waiting = false; 802 gotWakeup = false; 803} 804 805bool AddressMonitor::doMonitor(PacketPtr pkt) { 806 assert(pkt->req->hasPaddr()); 807 if (armed && waiting) { 808 if (pAddr == pkt->getAddr()) { 809 DPRINTF(Mwait,"pAddr=0x%lx invalidated: waking up core\n", 810 pkt->getAddr()); 811 waiting = false; 812 return true; 813 } 814 } 815 return false; 816} 817 818void 819BaseCPU::scheduleLoadStop(ThreadID tid, Counter loads, const char *cause) 820{ 821 const Tick now(comLoadEventQueue[tid]->getCurTick()); 822 Event *event(new LocalSimLoopExitEvent(cause, 0)); 823 824 comLoadEventQueue[tid]->schedule(event, now + loads); 825} 826 827 828void 829BaseCPU::traceFunctionsInternal(Addr pc) 830{ 831 if (!debugSymbolTable) 832 return; 833 834 // if pc enters different function, print new function symbol and 835 // update saved range. Otherwise do nothing. 836 if (pc < currentFunctionStart || pc >= currentFunctionEnd) { 837 string sym_str; 838 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str, 839 currentFunctionStart, 840 currentFunctionEnd); 841 842 if (!found) { 843 // no symbol found: use addr as label 844 sym_str = csprintf("0x%x", pc); 845 currentFunctionStart = pc; 846 currentFunctionEnd = pc + 1; 847 } 848 849 ccprintf(*functionTraceStream, " (%d)\n%d: %s", 850 curTick() - functionEntryTick, curTick(), sym_str); 851 functionEntryTick = curTick(); 852 } 853} 854 855bool 856BaseCPU::waitForRemoteGDB() const 857{ 858 return params()->wait_for_remote_gdb; 859} 860