base.cc revision 14207
1/* 2 * Copyright (c) 2011-2012,2016-2017 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2011 Regents of the University of California 16 * Copyright (c) 2013 Advanced Micro Devices, Inc. 17 * Copyright (c) 2013 Mark D. Hill and David A. Wood 18 * All rights reserved. 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions are 22 * met: redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer; 24 * redistributions in binary form must reproduce the above copyright 25 * notice, this list of conditions and the following disclaimer in the 26 * documentation and/or other materials provided with the distribution; 27 * neither the name of the copyright holders nor the names of its 28 * contributors may be used to endorse or promote products derived from 29 * this software without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 42 * 43 * Authors: Steve Reinhardt 44 * Nathan Binkert 45 * Rick Strong 46 */ 47 48#include "cpu/base.hh" 49 50#include <iostream> 51#include <sstream> 52#include <string> 53 54#include "arch/generic/tlb.hh" 55#include "base/cprintf.hh" 56#include "base/loader/symtab.hh" 57#include "base/logging.hh" 58#include "base/output.hh" 59#include "base/trace.hh" 60#include "cpu/checker/cpu.hh" 61#include "cpu/cpuevent.hh" 62#include "cpu/profile.hh" 63#include "cpu/thread_context.hh" 64#include "debug/Mwait.hh" 65#include "debug/SyscallVerbose.hh" 66#include "mem/page_table.hh" 67#include "params/BaseCPU.hh" 68#include "sim/clocked_object.hh" 69#include "sim/full_system.hh" 70#include "sim/process.hh" 71#include "sim/sim_events.hh" 72#include "sim/sim_exit.hh" 73#include "sim/system.hh" 74 75// Hack 76#include "sim/stat_control.hh" 77 78using namespace std; 79 80vector<BaseCPU *> BaseCPU::cpuList; 81 82// This variable reflects the max number of threads in any CPU. Be 83// careful to only use it once all the CPUs that you care about have 84// been initialized 85int maxThreadsPerCPU = 1; 86 87CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival) 88 : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0), 89 cpu(_cpu), _repeatEvent(true) 90{ 91 if (_interval) 92 cpu->schedule(this, curTick() + _interval); 93} 94 95void 96CPUProgressEvent::process() 97{ 98 Counter temp = cpu->totalOps(); 99 100 if (_repeatEvent) 101 cpu->schedule(this, curTick() + _interval); 102 103 if (cpu->switchedOut()) { 104 return; 105 } 106 107#ifndef NDEBUG 108 double ipc = double(temp - lastNumInst) / (_interval / cpu->clockPeriod()); 109 110 DPRINTFN("%s progress event, total committed:%i, progress insts committed: " 111 "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst, 112 ipc); 113 ipc = 0.0; 114#else 115 cprintf("%lli: %s progress event, total committed:%i, progress insts " 116 "committed: %lli\n", curTick(), cpu->name(), temp, 117 temp - lastNumInst); 118#endif 119 lastNumInst = temp; 120} 121 122const char * 123CPUProgressEvent::description() const 124{ 125 return "CPU Progress"; 126} 127 128BaseCPU::BaseCPU(Params *p, bool is_checker) 129 : ClockedObject(p), instCnt(0), _cpuId(p->cpu_id), _socketId(p->socket_id), 130 _instMasterId(p->system->getMasterId(this, "inst")), 131 _dataMasterId(p->system->getMasterId(this, "data")), 132 _taskId(ContextSwitchTaskId::Unknown), _pid(invldPid), 133 _switchedOut(p->switched_out), _cacheLineSize(p->system->cacheLineSize()), 134 interrupts(p->interrupts), profileEvent(NULL), 135 numThreads(p->numThreads), system(p->system), 136 previousCycle(0), previousState(CPU_STATE_SLEEP), 137 functionTraceStream(nullptr), currentFunctionStart(0), 138 currentFunctionEnd(0), functionEntryTick(0), 139 addressMonitor(p->numThreads), 140 syscallRetryLatency(p->syscallRetryLatency), 141 pwrGatingLatency(p->pwr_gating_latency), 142 powerGatingOnIdle(p->power_gating_on_idle), 143 enterPwrGatingEvent([this]{ enterPwrGating(); }, name()) 144{ 145 // if Python did not provide a valid ID, do it here 146 if (_cpuId == -1 ) { 147 _cpuId = cpuList.size(); 148 } 149 150 // add self to global list of CPUs 151 cpuList.push_back(this); 152 153 DPRINTF(SyscallVerbose, "Constructing CPU with id %d, socket id %d\n", 154 _cpuId, _socketId); 155 156 if (numThreads > maxThreadsPerCPU) 157 maxThreadsPerCPU = numThreads; 158 159 // allocate per-thread instruction-based event queues 160 comInstEventQueue = new EventQueue *[numThreads]; 161 for (ThreadID tid = 0; tid < numThreads; ++tid) 162 comInstEventQueue[tid] = 163 new EventQueue("instruction-based event queue"); 164 165 // 166 // set up instruction-count-based termination events, if any 167 // 168 if (p->max_insts_any_thread != 0) { 169 const char *cause = "a thread reached the max instruction count"; 170 for (ThreadID tid = 0; tid < numThreads; ++tid) 171 scheduleInstStop(tid, p->max_insts_any_thread, cause); 172 } 173 174 // Set up instruction-count-based termination events for SimPoints 175 // Typically, there are more than one action points. 176 // Simulation.py is responsible to take the necessary actions upon 177 // exitting the simulation loop. 178 if (!p->simpoint_start_insts.empty()) { 179 const char *cause = "simpoint starting point found"; 180 for (size_t i = 0; i < p->simpoint_start_insts.size(); ++i) 181 scheduleInstStop(0, p->simpoint_start_insts[i], cause); 182 } 183 184 if (p->max_insts_all_threads != 0) { 185 const char *cause = "all threads reached the max instruction count"; 186 187 // allocate & initialize shared downcounter: each event will 188 // decrement this when triggered; simulation will terminate 189 // when counter reaches 0 190 int *counter = new int; 191 *counter = numThreads; 192 for (ThreadID tid = 0; tid < numThreads; ++tid) { 193 Event *event = new CountedExitEvent(cause, *counter); 194 comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads); 195 } 196 } 197 198 // allocate per-thread load-based event queues 199 comLoadEventQueue = new EventQueue *[numThreads]; 200 for (ThreadID tid = 0; tid < numThreads; ++tid) 201 comLoadEventQueue[tid] = new EventQueue("load-based event queue"); 202 203 // 204 // set up instruction-count-based termination events, if any 205 // 206 if (p->max_loads_any_thread != 0) { 207 const char *cause = "a thread reached the max load count"; 208 for (ThreadID tid = 0; tid < numThreads; ++tid) 209 scheduleLoadStop(tid, p->max_loads_any_thread, cause); 210 } 211 212 if (p->max_loads_all_threads != 0) { 213 const char *cause = "all threads reached the max load count"; 214 // allocate & initialize shared downcounter: each event will 215 // decrement this when triggered; simulation will terminate 216 // when counter reaches 0 217 int *counter = new int; 218 *counter = numThreads; 219 for (ThreadID tid = 0; tid < numThreads; ++tid) { 220 Event *event = new CountedExitEvent(cause, *counter); 221 comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads); 222 } 223 } 224 225 functionTracingEnabled = false; 226 if (p->function_trace) { 227 const string fname = csprintf("ftrace.%s", name()); 228 functionTraceStream = simout.findOrCreate(fname)->stream(); 229 230 currentFunctionStart = currentFunctionEnd = 0; 231 functionEntryTick = p->function_trace_start; 232 233 if (p->function_trace_start == 0) { 234 functionTracingEnabled = true; 235 } else { 236 Event *event = new EventFunctionWrapper( 237 [this]{ enableFunctionTrace(); }, name(), true); 238 schedule(event, p->function_trace_start); 239 } 240 } 241 242 // The interrupts should always be present unless this CPU is 243 // switched in later or in case it is a checker CPU 244 if (!params()->switched_out && !is_checker) { 245 fatal_if(interrupts.size() != numThreads, 246 "CPU %s has %i interrupt controllers, but is expecting one " 247 "per thread (%i)\n", 248 name(), interrupts.size(), numThreads); 249 for (ThreadID tid = 0; tid < numThreads; tid++) 250 interrupts[tid]->setCPU(this); 251 } 252 253 if (FullSystem) { 254 if (params()->profile) 255 profileEvent = new EventFunctionWrapper( 256 [this]{ processProfileEvent(); }, 257 name()); 258 } 259 tracer = params()->tracer; 260 261 if (params()->isa.size() != numThreads) { 262 fatal("Number of ISAs (%i) assigned to the CPU does not equal number " 263 "of threads (%i).\n", params()->isa.size(), numThreads); 264 } 265} 266 267void 268BaseCPU::enableFunctionTrace() 269{ 270 functionTracingEnabled = true; 271} 272 273BaseCPU::~BaseCPU() 274{ 275 delete profileEvent; 276 delete[] comLoadEventQueue; 277 delete[] comInstEventQueue; 278} 279 280void 281BaseCPU::armMonitor(ThreadID tid, Addr address) 282{ 283 assert(tid < numThreads); 284 AddressMonitor &monitor = addressMonitor[tid]; 285 286 monitor.armed = true; 287 monitor.vAddr = address; 288 monitor.pAddr = 0x0; 289 DPRINTF(Mwait,"[tid:%d] Armed monitor (vAddr=0x%lx)\n", tid, address); 290} 291 292bool 293BaseCPU::mwait(ThreadID tid, PacketPtr pkt) 294{ 295 assert(tid < numThreads); 296 AddressMonitor &monitor = addressMonitor[tid]; 297 298 if (!monitor.gotWakeup) { 299 int block_size = cacheLineSize(); 300 uint64_t mask = ~((uint64_t)(block_size - 1)); 301 302 assert(pkt->req->hasPaddr()); 303 monitor.pAddr = pkt->getAddr() & mask; 304 monitor.waiting = true; 305 306 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, " 307 "line's paddr=0x%lx)\n", tid, monitor.vAddr, monitor.pAddr); 308 return true; 309 } else { 310 monitor.gotWakeup = false; 311 return false; 312 } 313} 314 315void 316BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, BaseTLB *dtb) 317{ 318 assert(tid < numThreads); 319 AddressMonitor &monitor = addressMonitor[tid]; 320 321 RequestPtr req = std::make_shared<Request>(); 322 323 Addr addr = monitor.vAddr; 324 int block_size = cacheLineSize(); 325 uint64_t mask = ~((uint64_t)(block_size - 1)); 326 int size = block_size; 327 328 //The address of the next line if it crosses a cache line boundary. 329 Addr secondAddr = roundDown(addr + size - 1, block_size); 330 331 if (secondAddr > addr) 332 size = secondAddr - addr; 333 334 req->setVirt(0, addr, size, 0x0, dataMasterId(), tc->instAddr()); 335 336 // translate to physical address 337 Fault fault = dtb->translateAtomic(req, tc, BaseTLB::Read); 338 assert(fault == NoFault); 339 340 monitor.pAddr = req->getPaddr() & mask; 341 monitor.waiting = true; 342 343 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n", 344 tid, monitor.vAddr, monitor.pAddr); 345} 346 347void 348BaseCPU::init() 349{ 350 if (!params()->switched_out) { 351 registerThreadContexts(); 352 353 verifyMemoryMode(); 354 } 355} 356 357void 358BaseCPU::startup() 359{ 360 if (FullSystem) { 361 if (!params()->switched_out && profileEvent) 362 schedule(profileEvent, curTick()); 363 } 364 365 if (params()->progress_interval) { 366 new CPUProgressEvent(this, params()->progress_interval); 367 } 368 369 if (_switchedOut) 370 ClockedObject::pwrState(Enums::PwrState::OFF); 371 372 // Assumption CPU start to operate instantaneously without any latency 373 if (ClockedObject::pwrState() == Enums::PwrState::UNDEFINED) 374 ClockedObject::pwrState(Enums::PwrState::ON); 375 376} 377 378ProbePoints::PMUUPtr 379BaseCPU::pmuProbePoint(const char *name) 380{ 381 ProbePoints::PMUUPtr ptr; 382 ptr.reset(new ProbePoints::PMU(getProbeManager(), name)); 383 384 return ptr; 385} 386 387void 388BaseCPU::regProbePoints() 389{ 390 ppAllCycles = pmuProbePoint("Cycles"); 391 ppActiveCycles = pmuProbePoint("ActiveCycles"); 392 393 ppRetiredInsts = pmuProbePoint("RetiredInsts"); 394 ppRetiredInstsPC = pmuProbePoint("RetiredInstsPC"); 395 ppRetiredLoads = pmuProbePoint("RetiredLoads"); 396 ppRetiredStores = pmuProbePoint("RetiredStores"); 397 ppRetiredBranches = pmuProbePoint("RetiredBranches"); 398 399 ppSleeping = new ProbePointArg<bool>(this->getProbeManager(), 400 "Sleeping"); 401} 402 403void 404BaseCPU::probeInstCommit(const StaticInstPtr &inst, Addr pc) 405{ 406 if (!inst->isMicroop() || inst->isLastMicroop()) { 407 ppRetiredInsts->notify(1); 408 ppRetiredInstsPC->notify(pc); 409 } 410 411 if (inst->isLoad()) 412 ppRetiredLoads->notify(1); 413 414 if (inst->isStore() || inst->isAtomic()) 415 ppRetiredStores->notify(1); 416 417 if (inst->isControl()) 418 ppRetiredBranches->notify(1); 419} 420 421void 422BaseCPU::regStats() 423{ 424 ClockedObject::regStats(); 425 426 using namespace Stats; 427 428 numCycles 429 .name(name() + ".numCycles") 430 .desc("number of cpu cycles simulated") 431 ; 432 433 numWorkItemsStarted 434 .name(name() + ".numWorkItemsStarted") 435 .desc("number of work items this cpu started") 436 ; 437 438 numWorkItemsCompleted 439 .name(name() + ".numWorkItemsCompleted") 440 .desc("number of work items this cpu completed") 441 ; 442 443 int size = threadContexts.size(); 444 if (size > 1) { 445 for (int i = 0; i < size; ++i) { 446 stringstream namestr; 447 ccprintf(namestr, "%s.ctx%d", name(), i); 448 threadContexts[i]->regStats(namestr.str()); 449 } 450 } else if (size == 1) 451 threadContexts[0]->regStats(name()); 452} 453 454Port & 455BaseCPU::getPort(const string &if_name, PortID idx) 456{ 457 // Get the right port based on name. This applies to all the 458 // subclasses of the base CPU and relies on their implementation 459 // of getDataPort and getInstPort. 460 if (if_name == "dcache_port") 461 return getDataPort(); 462 else if (if_name == "icache_port") 463 return getInstPort(); 464 else 465 return ClockedObject::getPort(if_name, idx); 466} 467 468void 469BaseCPU::registerThreadContexts() 470{ 471 assert(system->multiThread || numThreads == 1); 472 473 ThreadID size = threadContexts.size(); 474 for (ThreadID tid = 0; tid < size; ++tid) { 475 ThreadContext *tc = threadContexts[tid]; 476 477 if (system->multiThread) { 478 tc->setContextId(system->registerThreadContext(tc)); 479 } else { 480 tc->setContextId(system->registerThreadContext(tc, _cpuId)); 481 } 482 483 if (!FullSystem) 484 tc->getProcessPtr()->assignThreadContext(tc->contextId()); 485 } 486} 487 488void 489BaseCPU::deschedulePowerGatingEvent() 490{ 491 if (enterPwrGatingEvent.scheduled()){ 492 deschedule(enterPwrGatingEvent); 493 } 494} 495 496void 497BaseCPU::schedulePowerGatingEvent() 498{ 499 for (auto tc : threadContexts) { 500 if (tc->status() == ThreadContext::Active) 501 return; 502 } 503 504 if (ClockedObject::pwrState() == Enums::PwrState::CLK_GATED && 505 powerGatingOnIdle) { 506 assert(!enterPwrGatingEvent.scheduled()); 507 // Schedule a power gating event when clock gated for the specified 508 // amount of time 509 schedule(enterPwrGatingEvent, clockEdge(pwrGatingLatency)); 510 } 511} 512 513int 514BaseCPU::findContext(ThreadContext *tc) 515{ 516 ThreadID size = threadContexts.size(); 517 for (ThreadID tid = 0; tid < size; ++tid) { 518 if (tc == threadContexts[tid]) 519 return tid; 520 } 521 return 0; 522} 523 524void 525BaseCPU::activateContext(ThreadID thread_num) 526{ 527 // Squash enter power gating event while cpu gets activated 528 if (enterPwrGatingEvent.scheduled()) 529 deschedule(enterPwrGatingEvent); 530 // For any active thread running, update CPU power state to active (ON) 531 ClockedObject::pwrState(Enums::PwrState::ON); 532 533 updateCycleCounters(CPU_STATE_WAKEUP); 534} 535 536void 537BaseCPU::suspendContext(ThreadID thread_num) 538{ 539 // Check if all threads are suspended 540 for (auto t : threadContexts) { 541 if (t->status() != ThreadContext::Suspended) { 542 return; 543 } 544 } 545 546 // All CPU thread are suspended, update cycle count 547 updateCycleCounters(CPU_STATE_SLEEP); 548 549 // All CPU threads suspended, enter lower power state for the CPU 550 ClockedObject::pwrState(Enums::PwrState::CLK_GATED); 551 552 // If pwrGatingLatency is set to 0 then this mechanism is disabled 553 if (powerGatingOnIdle) { 554 // Schedule power gating event when clock gated for pwrGatingLatency 555 // cycles 556 schedule(enterPwrGatingEvent, clockEdge(pwrGatingLatency)); 557 } 558} 559 560void 561BaseCPU::haltContext(ThreadID thread_num) 562{ 563 updateCycleCounters(BaseCPU::CPU_STATE_SLEEP); 564} 565 566void 567BaseCPU::enterPwrGating(void) 568{ 569 ClockedObject::pwrState(Enums::PwrState::OFF); 570} 571 572void 573BaseCPU::switchOut() 574{ 575 assert(!_switchedOut); 576 _switchedOut = true; 577 if (profileEvent && profileEvent->scheduled()) 578 deschedule(profileEvent); 579 580 // Flush all TLBs in the CPU to avoid having stale translations if 581 // it gets switched in later. 582 flushTLBs(); 583 584 // Go to the power gating state 585 ClockedObject::pwrState(Enums::PwrState::OFF); 586} 587 588void 589BaseCPU::takeOverFrom(BaseCPU *oldCPU) 590{ 591 assert(threadContexts.size() == oldCPU->threadContexts.size()); 592 assert(_cpuId == oldCPU->cpuId()); 593 assert(_switchedOut); 594 assert(oldCPU != this); 595 _pid = oldCPU->getPid(); 596 _taskId = oldCPU->taskId(); 597 // Take over the power state of the switchedOut CPU 598 ClockedObject::pwrState(oldCPU->pwrState()); 599 600 previousState = oldCPU->previousState; 601 previousCycle = oldCPU->previousCycle; 602 603 _switchedOut = false; 604 605 ThreadID size = threadContexts.size(); 606 for (ThreadID i = 0; i < size; ++i) { 607 ThreadContext *newTC = threadContexts[i]; 608 ThreadContext *oldTC = oldCPU->threadContexts[i]; 609 610 newTC->takeOverFrom(oldTC); 611 612 CpuEvent::replaceThreadContext(oldTC, newTC); 613 614 assert(newTC->contextId() == oldTC->contextId()); 615 assert(newTC->threadId() == oldTC->threadId()); 616 system->replaceThreadContext(newTC, newTC->contextId()); 617 618 /* This code no longer works since the zero register (e.g., 619 * r31 on Alpha) doesn't necessarily contain zero at this 620 * point. 621 if (DTRACE(Context)) 622 ThreadContext::compare(oldTC, newTC); 623 */ 624 625 Port *old_itb_port = oldTC->getITBPtr()->getTableWalkerPort(); 626 Port *old_dtb_port = oldTC->getDTBPtr()->getTableWalkerPort(); 627 Port *new_itb_port = newTC->getITBPtr()->getTableWalkerPort(); 628 Port *new_dtb_port = newTC->getDTBPtr()->getTableWalkerPort(); 629 630 // Move over any table walker ports if they exist 631 if (new_itb_port) 632 new_itb_port->takeOverFrom(old_itb_port); 633 if (new_dtb_port) 634 new_dtb_port->takeOverFrom(old_dtb_port); 635 newTC->getITBPtr()->takeOverFrom(oldTC->getITBPtr()); 636 newTC->getDTBPtr()->takeOverFrom(oldTC->getDTBPtr()); 637 638 // Checker whether or not we have to transfer CheckerCPU 639 // objects over in the switch 640 CheckerCPU *oldChecker = oldTC->getCheckerCpuPtr(); 641 CheckerCPU *newChecker = newTC->getCheckerCpuPtr(); 642 if (oldChecker && newChecker) { 643 Port *old_checker_itb_port = 644 oldChecker->getITBPtr()->getTableWalkerPort(); 645 Port *old_checker_dtb_port = 646 oldChecker->getDTBPtr()->getTableWalkerPort(); 647 Port *new_checker_itb_port = 648 newChecker->getITBPtr()->getTableWalkerPort(); 649 Port *new_checker_dtb_port = 650 newChecker->getDTBPtr()->getTableWalkerPort(); 651 652 newChecker->getITBPtr()->takeOverFrom(oldChecker->getITBPtr()); 653 newChecker->getDTBPtr()->takeOverFrom(oldChecker->getDTBPtr()); 654 655 // Move over any table walker ports if they exist for checker 656 if (new_checker_itb_port) 657 new_checker_itb_port->takeOverFrom(old_checker_itb_port); 658 if (new_checker_dtb_port) 659 new_checker_dtb_port->takeOverFrom(old_checker_dtb_port); 660 } 661 } 662 663 interrupts = oldCPU->interrupts; 664 for (ThreadID tid = 0; tid < numThreads; tid++) { 665 interrupts[tid]->setCPU(this); 666 } 667 oldCPU->interrupts.clear(); 668 669 if (FullSystem) { 670 for (ThreadID i = 0; i < size; ++i) 671 threadContexts[i]->profileClear(); 672 673 if (profileEvent) 674 schedule(profileEvent, curTick()); 675 } 676 677 // All CPUs have an instruction and a data port, and the new CPU's 678 // ports are dangling while the old CPU has its ports connected 679 // already. Unbind the old CPU and then bind the ports of the one 680 // we are switching to. 681 getInstPort().takeOverFrom(&oldCPU->getInstPort()); 682 getDataPort().takeOverFrom(&oldCPU->getDataPort()); 683} 684 685void 686BaseCPU::flushTLBs() 687{ 688 for (ThreadID i = 0; i < threadContexts.size(); ++i) { 689 ThreadContext &tc(*threadContexts[i]); 690 CheckerCPU *checker(tc.getCheckerCpuPtr()); 691 692 tc.getITBPtr()->flushAll(); 693 tc.getDTBPtr()->flushAll(); 694 if (checker) { 695 checker->getITBPtr()->flushAll(); 696 checker->getDTBPtr()->flushAll(); 697 } 698 } 699} 700 701void 702BaseCPU::processProfileEvent() 703{ 704 ThreadID size = threadContexts.size(); 705 706 for (ThreadID i = 0; i < size; ++i) 707 threadContexts[i]->profileSample(); 708 709 schedule(profileEvent, curTick() + params()->profile); 710} 711 712void 713BaseCPU::serialize(CheckpointOut &cp) const 714{ 715 SERIALIZE_SCALAR(instCnt); 716 717 if (!_switchedOut) { 718 /* Unlike _pid, _taskId is not serialized, as they are dynamically 719 * assigned unique ids that are only meaningful for the duration of 720 * a specific run. We will need to serialize the entire taskMap in 721 * system. */ 722 SERIALIZE_SCALAR(_pid); 723 724 // Serialize the threads, this is done by the CPU implementation. 725 for (ThreadID i = 0; i < numThreads; ++i) { 726 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i)); 727 interrupts[i]->serialize(cp); 728 serializeThread(cp, i); 729 } 730 } 731} 732 733void 734BaseCPU::unserialize(CheckpointIn &cp) 735{ 736 UNSERIALIZE_SCALAR(instCnt); 737 738 if (!_switchedOut) { 739 UNSERIALIZE_SCALAR(_pid); 740 741 // Unserialize the threads, this is done by the CPU implementation. 742 for (ThreadID i = 0; i < numThreads; ++i) { 743 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i)); 744 interrupts[i]->unserialize(cp); 745 unserializeThread(cp, i); 746 } 747 } 748} 749 750void 751BaseCPU::scheduleInstStop(ThreadID tid, Counter insts, const char *cause) 752{ 753 const Tick now(comInstEventQueue[tid]->getCurTick()); 754 Event *event(new LocalSimLoopExitEvent(cause, 0)); 755 756 comInstEventQueue[tid]->schedule(event, now + insts); 757} 758 759uint64_t 760BaseCPU::getCurrentInstCount(ThreadID tid) 761{ 762 return Tick(comInstEventQueue[tid]->getCurTick()); 763} 764 765AddressMonitor::AddressMonitor() { 766 armed = false; 767 waiting = false; 768 gotWakeup = false; 769} 770 771bool AddressMonitor::doMonitor(PacketPtr pkt) { 772 assert(pkt->req->hasPaddr()); 773 if (armed && waiting) { 774 if (pAddr == pkt->getAddr()) { 775 DPRINTF(Mwait,"pAddr=0x%lx invalidated: waking up core\n", 776 pkt->getAddr()); 777 waiting = false; 778 return true; 779 } 780 } 781 return false; 782} 783 784void 785BaseCPU::scheduleLoadStop(ThreadID tid, Counter loads, const char *cause) 786{ 787 const Tick now(comLoadEventQueue[tid]->getCurTick()); 788 Event *event(new LocalSimLoopExitEvent(cause, 0)); 789 790 comLoadEventQueue[tid]->schedule(event, now + loads); 791} 792 793 794void 795BaseCPU::traceFunctionsInternal(Addr pc) 796{ 797 if (!debugSymbolTable) 798 return; 799 800 // if pc enters different function, print new function symbol and 801 // update saved range. Otherwise do nothing. 802 if (pc < currentFunctionStart || pc >= currentFunctionEnd) { 803 string sym_str; 804 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str, 805 currentFunctionStart, 806 currentFunctionEnd); 807 808 if (!found) { 809 // no symbol found: use addr as label 810 sym_str = csprintf("0x%x", pc); 811 currentFunctionStart = pc; 812 currentFunctionEnd = pc + 1; 813 } 814 815 ccprintf(*functionTraceStream, " (%d)\n%d: %s", 816 curTick() - functionEntryTick, curTick(), sym_str); 817 functionEntryTick = curTick(); 818 } 819} 820 821bool 822BaseCPU::waitForRemoteGDB() const 823{ 824 return params()->wait_for_remote_gdb; 825} 826