base.cc revision 12085:de78ea63e0ca
1/* 2 * Copyright (c) 2011-2012,2016 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2011 Regents of the University of California 16 * Copyright (c) 2013 Advanced Micro Devices, Inc. 17 * Copyright (c) 2013 Mark D. Hill and David A. Wood 18 * All rights reserved. 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions are 22 * met: redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer; 24 * redistributions in binary form must reproduce the above copyright 25 * notice, this list of conditions and the following disclaimer in the 26 * documentation and/or other materials provided with the distribution; 27 * neither the name of the copyright holders nor the names of its 28 * contributors may be used to endorse or promote products derived from 29 * this software without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 42 * 43 * Authors: Steve Reinhardt 44 * Nathan Binkert 45 * Rick Strong 46 */ 47 48#include "cpu/base.hh" 49 50#include <iostream> 51#include <sstream> 52#include <string> 53 54#include "arch/tlb.hh" 55#include "base/cprintf.hh" 56#include "base/loader/symtab.hh" 57#include "base/misc.hh" 58#include "base/output.hh" 59#include "base/trace.hh" 60#include "cpu/checker/cpu.hh" 61#include "cpu/cpuevent.hh" 62#include "cpu/profile.hh" 63#include "cpu/thread_context.hh" 64#include "debug/Mwait.hh" 65#include "debug/SyscallVerbose.hh" 66#include "mem/page_table.hh" 67#include "params/BaseCPU.hh" 68#include "sim/clocked_object.hh" 69#include "sim/full_system.hh" 70#include "sim/process.hh" 71#include "sim/sim_events.hh" 72#include "sim/sim_exit.hh" 73#include "sim/system.hh" 74 75// Hack 76#include "sim/stat_control.hh" 77 78using namespace std; 79 80vector<BaseCPU *> BaseCPU::cpuList; 81 82// This variable reflects the max number of threads in any CPU. Be 83// careful to only use it once all the CPUs that you care about have 84// been initialized 85int maxThreadsPerCPU = 1; 86 87CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival) 88 : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0), 89 cpu(_cpu), _repeatEvent(true) 90{ 91 if (_interval) 92 cpu->schedule(this, curTick() + _interval); 93} 94 95void 96CPUProgressEvent::process() 97{ 98 Counter temp = cpu->totalOps(); 99 100 if (_repeatEvent) 101 cpu->schedule(this, curTick() + _interval); 102 103 if (cpu->switchedOut()) { 104 return; 105 } 106 107#ifndef NDEBUG 108 double ipc = double(temp - lastNumInst) / (_interval / cpu->clockPeriod()); 109 110 DPRINTFN("%s progress event, total committed:%i, progress insts committed: " 111 "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst, 112 ipc); 113 ipc = 0.0; 114#else 115 cprintf("%lli: %s progress event, total committed:%i, progress insts " 116 "committed: %lli\n", curTick(), cpu->name(), temp, 117 temp - lastNumInst); 118#endif 119 lastNumInst = temp; 120} 121 122const char * 123CPUProgressEvent::description() const 124{ 125 return "CPU Progress"; 126} 127 128BaseCPU::BaseCPU(Params *p, bool is_checker) 129 : MemObject(p), instCnt(0), _cpuId(p->cpu_id), _socketId(p->socket_id), 130 _instMasterId(p->system->getMasterId(name() + ".inst")), 131 _dataMasterId(p->system->getMasterId(name() + ".data")), 132 _taskId(ContextSwitchTaskId::Unknown), _pid(invldPid), 133 _switchedOut(p->switched_out), _cacheLineSize(p->system->cacheLineSize()), 134 interrupts(p->interrupts), profileEvent(NULL), 135 numThreads(p->numThreads), system(p->system), 136 functionTraceStream(nullptr), currentFunctionStart(0), 137 currentFunctionEnd(0), functionEntryTick(0), 138 addressMonitor(p->numThreads), 139 syscallRetryLatency(p->syscallRetryLatency) 140{ 141 // if Python did not provide a valid ID, do it here 142 if (_cpuId == -1 ) { 143 _cpuId = cpuList.size(); 144 } 145 146 // add self to global list of CPUs 147 cpuList.push_back(this); 148 149 DPRINTF(SyscallVerbose, "Constructing CPU with id %d, socket id %d\n", 150 _cpuId, _socketId); 151 152 if (numThreads > maxThreadsPerCPU) 153 maxThreadsPerCPU = numThreads; 154 155 // allocate per-thread instruction-based event queues 156 comInstEventQueue = new EventQueue *[numThreads]; 157 for (ThreadID tid = 0; tid < numThreads; ++tid) 158 comInstEventQueue[tid] = 159 new EventQueue("instruction-based event queue"); 160 161 // 162 // set up instruction-count-based termination events, if any 163 // 164 if (p->max_insts_any_thread != 0) { 165 const char *cause = "a thread reached the max instruction count"; 166 for (ThreadID tid = 0; tid < numThreads; ++tid) 167 scheduleInstStop(tid, p->max_insts_any_thread, cause); 168 } 169 170 // Set up instruction-count-based termination events for SimPoints 171 // Typically, there are more than one action points. 172 // Simulation.py is responsible to take the necessary actions upon 173 // exitting the simulation loop. 174 if (!p->simpoint_start_insts.empty()) { 175 const char *cause = "simpoint starting point found"; 176 for (size_t i = 0; i < p->simpoint_start_insts.size(); ++i) 177 scheduleInstStop(0, p->simpoint_start_insts[i], cause); 178 } 179 180 if (p->max_insts_all_threads != 0) { 181 const char *cause = "all threads reached the max instruction count"; 182 183 // allocate & initialize shared downcounter: each event will 184 // decrement this when triggered; simulation will terminate 185 // when counter reaches 0 186 int *counter = new int; 187 *counter = numThreads; 188 for (ThreadID tid = 0; tid < numThreads; ++tid) { 189 Event *event = new CountedExitEvent(cause, *counter); 190 comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads); 191 } 192 } 193 194 // allocate per-thread load-based event queues 195 comLoadEventQueue = new EventQueue *[numThreads]; 196 for (ThreadID tid = 0; tid < numThreads; ++tid) 197 comLoadEventQueue[tid] = new EventQueue("load-based event queue"); 198 199 // 200 // set up instruction-count-based termination events, if any 201 // 202 if (p->max_loads_any_thread != 0) { 203 const char *cause = "a thread reached the max load count"; 204 for (ThreadID tid = 0; tid < numThreads; ++tid) 205 scheduleLoadStop(tid, p->max_loads_any_thread, cause); 206 } 207 208 if (p->max_loads_all_threads != 0) { 209 const char *cause = "all threads reached the max load count"; 210 // allocate & initialize shared downcounter: each event will 211 // decrement this when triggered; simulation will terminate 212 // when counter reaches 0 213 int *counter = new int; 214 *counter = numThreads; 215 for (ThreadID tid = 0; tid < numThreads; ++tid) { 216 Event *event = new CountedExitEvent(cause, *counter); 217 comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads); 218 } 219 } 220 221 functionTracingEnabled = false; 222 if (p->function_trace) { 223 const string fname = csprintf("ftrace.%s", name()); 224 functionTraceStream = simout.findOrCreate(fname)->stream(); 225 226 currentFunctionStart = currentFunctionEnd = 0; 227 functionEntryTick = p->function_trace_start; 228 229 if (p->function_trace_start == 0) { 230 functionTracingEnabled = true; 231 } else { 232 Event *event = new EventFunctionWrapper( 233 [this]{ enableFunctionTrace(); }, name(), true); 234 schedule(event, p->function_trace_start); 235 } 236 } 237 238 // The interrupts should always be present unless this CPU is 239 // switched in later or in case it is a checker CPU 240 if (!params()->switched_out && !is_checker) { 241 fatal_if(interrupts.size() != numThreads, 242 "CPU %s has %i interrupt controllers, but is expecting one " 243 "per thread (%i)\n", 244 name(), interrupts.size(), numThreads); 245 for (ThreadID tid = 0; tid < numThreads; tid++) 246 interrupts[tid]->setCPU(this); 247 } 248 249 if (FullSystem) { 250 if (params()->profile) 251 profileEvent = new ProfileEvent(this, params()->profile); 252 } 253 tracer = params()->tracer; 254 255 if (params()->isa.size() != numThreads) { 256 fatal("Number of ISAs (%i) assigned to the CPU does not equal number " 257 "of threads (%i).\n", params()->isa.size(), numThreads); 258 } 259} 260 261void 262BaseCPU::enableFunctionTrace() 263{ 264 functionTracingEnabled = true; 265} 266 267BaseCPU::~BaseCPU() 268{ 269 delete profileEvent; 270 delete[] comLoadEventQueue; 271 delete[] comInstEventQueue; 272} 273 274void 275BaseCPU::armMonitor(ThreadID tid, Addr address) 276{ 277 assert(tid < numThreads); 278 AddressMonitor &monitor = addressMonitor[tid]; 279 280 monitor.armed = true; 281 monitor.vAddr = address; 282 monitor.pAddr = 0x0; 283 DPRINTF(Mwait,"[tid:%d] Armed monitor (vAddr=0x%lx)\n", tid, address); 284} 285 286bool 287BaseCPU::mwait(ThreadID tid, PacketPtr pkt) 288{ 289 assert(tid < numThreads); 290 AddressMonitor &monitor = addressMonitor[tid]; 291 292 if (!monitor.gotWakeup) { 293 int block_size = cacheLineSize(); 294 uint64_t mask = ~((uint64_t)(block_size - 1)); 295 296 assert(pkt->req->hasPaddr()); 297 monitor.pAddr = pkt->getAddr() & mask; 298 monitor.waiting = true; 299 300 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, " 301 "line's paddr=0x%lx)\n", tid, monitor.vAddr, monitor.pAddr); 302 return true; 303 } else { 304 monitor.gotWakeup = false; 305 return false; 306 } 307} 308 309void 310BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, TheISA::TLB *dtb) 311{ 312 assert(tid < numThreads); 313 AddressMonitor &monitor = addressMonitor[tid]; 314 315 Request req; 316 Addr addr = monitor.vAddr; 317 int block_size = cacheLineSize(); 318 uint64_t mask = ~((uint64_t)(block_size - 1)); 319 int size = block_size; 320 321 //The address of the next line if it crosses a cache line boundary. 322 Addr secondAddr = roundDown(addr + size - 1, block_size); 323 324 if (secondAddr > addr) 325 size = secondAddr - addr; 326 327 req.setVirt(0, addr, size, 0x0, dataMasterId(), tc->instAddr()); 328 329 // translate to physical address 330 Fault fault = dtb->translateAtomic(&req, tc, BaseTLB::Read); 331 assert(fault == NoFault); 332 333 monitor.pAddr = req.getPaddr() & mask; 334 monitor.waiting = true; 335 336 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n", 337 tid, monitor.vAddr, monitor.pAddr); 338} 339 340void 341BaseCPU::init() 342{ 343 if (!params()->switched_out) { 344 registerThreadContexts(); 345 346 verifyMemoryMode(); 347 } 348} 349 350void 351BaseCPU::startup() 352{ 353 if (FullSystem) { 354 if (!params()->switched_out && profileEvent) 355 schedule(profileEvent, curTick()); 356 } 357 358 if (params()->progress_interval) { 359 new CPUProgressEvent(this, params()->progress_interval); 360 } 361 362 // Assumption CPU start to operate instantaneously without any latency 363 if (ClockedObject::pwrState() == Enums::PwrState::UNDEFINED) 364 ClockedObject::pwrState(Enums::PwrState::ON); 365 366} 367 368ProbePoints::PMUUPtr 369BaseCPU::pmuProbePoint(const char *name) 370{ 371 ProbePoints::PMUUPtr ptr; 372 ptr.reset(new ProbePoints::PMU(getProbeManager(), name)); 373 374 return ptr; 375} 376 377void 378BaseCPU::regProbePoints() 379{ 380 ppCycles = pmuProbePoint("Cycles"); 381 382 ppRetiredInsts = pmuProbePoint("RetiredInsts"); 383 ppRetiredLoads = pmuProbePoint("RetiredLoads"); 384 ppRetiredStores = pmuProbePoint("RetiredStores"); 385 ppRetiredBranches = pmuProbePoint("RetiredBranches"); 386} 387 388void 389BaseCPU::probeInstCommit(const StaticInstPtr &inst) 390{ 391 if (!inst->isMicroop() || inst->isLastMicroop()) 392 ppRetiredInsts->notify(1); 393 394 395 if (inst->isLoad()) 396 ppRetiredLoads->notify(1); 397 398 if (inst->isStore()) 399 ppRetiredStores->notify(1); 400 401 if (inst->isControl()) 402 ppRetiredBranches->notify(1); 403} 404 405void 406BaseCPU::regStats() 407{ 408 MemObject::regStats(); 409 410 using namespace Stats; 411 412 numCycles 413 .name(name() + ".numCycles") 414 .desc("number of cpu cycles simulated") 415 ; 416 417 numWorkItemsStarted 418 .name(name() + ".numWorkItemsStarted") 419 .desc("number of work items this cpu started") 420 ; 421 422 numWorkItemsCompleted 423 .name(name() + ".numWorkItemsCompleted") 424 .desc("number of work items this cpu completed") 425 ; 426 427 int size = threadContexts.size(); 428 if (size > 1) { 429 for (int i = 0; i < size; ++i) { 430 stringstream namestr; 431 ccprintf(namestr, "%s.ctx%d", name(), i); 432 threadContexts[i]->regStats(namestr.str()); 433 } 434 } else if (size == 1) 435 threadContexts[0]->regStats(name()); 436} 437 438BaseMasterPort & 439BaseCPU::getMasterPort(const string &if_name, PortID idx) 440{ 441 // Get the right port based on name. This applies to all the 442 // subclasses of the base CPU and relies on their implementation 443 // of getDataPort and getInstPort. In all cases there methods 444 // return a MasterPort pointer. 445 if (if_name == "dcache_port") 446 return getDataPort(); 447 else if (if_name == "icache_port") 448 return getInstPort(); 449 else 450 return MemObject::getMasterPort(if_name, idx); 451} 452 453void 454BaseCPU::registerThreadContexts() 455{ 456 assert(system->multiThread || numThreads == 1); 457 458 ThreadID size = threadContexts.size(); 459 for (ThreadID tid = 0; tid < size; ++tid) { 460 ThreadContext *tc = threadContexts[tid]; 461 462 if (system->multiThread) { 463 tc->setContextId(system->registerThreadContext(tc)); 464 } else { 465 tc->setContextId(system->registerThreadContext(tc, _cpuId)); 466 } 467 468 if (!FullSystem) 469 tc->getProcessPtr()->assignThreadContext(tc->contextId()); 470 } 471} 472 473 474int 475BaseCPU::findContext(ThreadContext *tc) 476{ 477 ThreadID size = threadContexts.size(); 478 for (ThreadID tid = 0; tid < size; ++tid) { 479 if (tc == threadContexts[tid]) 480 return tid; 481 } 482 return 0; 483} 484 485void 486BaseCPU::activateContext(ThreadID thread_num) 487{ 488 // For any active thread running, update CPU power state to active (ON) 489 ClockedObject::pwrState(Enums::PwrState::ON); 490} 491 492void 493BaseCPU::suspendContext(ThreadID thread_num) 494{ 495 // Check if all threads are suspended 496 for (auto t : threadContexts) { 497 if (t->status() != ThreadContext::Suspended) { 498 return; 499 } 500 } 501 502 // All CPU threads suspended, enter lower power state for the CPU 503 ClockedObject::pwrState(Enums::PwrState::CLK_GATED); 504} 505 506void 507BaseCPU::switchOut() 508{ 509 assert(!_switchedOut); 510 _switchedOut = true; 511 if (profileEvent && profileEvent->scheduled()) 512 deschedule(profileEvent); 513 514 // Flush all TLBs in the CPU to avoid having stale translations if 515 // it gets switched in later. 516 flushTLBs(); 517} 518 519void 520BaseCPU::takeOverFrom(BaseCPU *oldCPU) 521{ 522 assert(threadContexts.size() == oldCPU->threadContexts.size()); 523 assert(_cpuId == oldCPU->cpuId()); 524 assert(_switchedOut); 525 assert(oldCPU != this); 526 _pid = oldCPU->getPid(); 527 _taskId = oldCPU->taskId(); 528 _switchedOut = false; 529 530 ThreadID size = threadContexts.size(); 531 for (ThreadID i = 0; i < size; ++i) { 532 ThreadContext *newTC = threadContexts[i]; 533 ThreadContext *oldTC = oldCPU->threadContexts[i]; 534 535 newTC->takeOverFrom(oldTC); 536 537 CpuEvent::replaceThreadContext(oldTC, newTC); 538 539 assert(newTC->contextId() == oldTC->contextId()); 540 assert(newTC->threadId() == oldTC->threadId()); 541 system->replaceThreadContext(newTC, newTC->contextId()); 542 543 /* This code no longer works since the zero register (e.g., 544 * r31 on Alpha) doesn't necessarily contain zero at this 545 * point. 546 if (DTRACE(Context)) 547 ThreadContext::compare(oldTC, newTC); 548 */ 549 550 BaseMasterPort *old_itb_port = oldTC->getITBPtr()->getMasterPort(); 551 BaseMasterPort *old_dtb_port = oldTC->getDTBPtr()->getMasterPort(); 552 BaseMasterPort *new_itb_port = newTC->getITBPtr()->getMasterPort(); 553 BaseMasterPort *new_dtb_port = newTC->getDTBPtr()->getMasterPort(); 554 555 // Move over any table walker ports if they exist 556 if (new_itb_port) { 557 assert(!new_itb_port->isConnected()); 558 assert(old_itb_port); 559 assert(old_itb_port->isConnected()); 560 BaseSlavePort &slavePort = old_itb_port->getSlavePort(); 561 old_itb_port->unbind(); 562 new_itb_port->bind(slavePort); 563 } 564 if (new_dtb_port) { 565 assert(!new_dtb_port->isConnected()); 566 assert(old_dtb_port); 567 assert(old_dtb_port->isConnected()); 568 BaseSlavePort &slavePort = old_dtb_port->getSlavePort(); 569 old_dtb_port->unbind(); 570 new_dtb_port->bind(slavePort); 571 } 572 newTC->getITBPtr()->takeOverFrom(oldTC->getITBPtr()); 573 newTC->getDTBPtr()->takeOverFrom(oldTC->getDTBPtr()); 574 575 // Checker whether or not we have to transfer CheckerCPU 576 // objects over in the switch 577 CheckerCPU *oldChecker = oldTC->getCheckerCpuPtr(); 578 CheckerCPU *newChecker = newTC->getCheckerCpuPtr(); 579 if (oldChecker && newChecker) { 580 BaseMasterPort *old_checker_itb_port = 581 oldChecker->getITBPtr()->getMasterPort(); 582 BaseMasterPort *old_checker_dtb_port = 583 oldChecker->getDTBPtr()->getMasterPort(); 584 BaseMasterPort *new_checker_itb_port = 585 newChecker->getITBPtr()->getMasterPort(); 586 BaseMasterPort *new_checker_dtb_port = 587 newChecker->getDTBPtr()->getMasterPort(); 588 589 newChecker->getITBPtr()->takeOverFrom(oldChecker->getITBPtr()); 590 newChecker->getDTBPtr()->takeOverFrom(oldChecker->getDTBPtr()); 591 592 // Move over any table walker ports if they exist for checker 593 if (new_checker_itb_port) { 594 assert(!new_checker_itb_port->isConnected()); 595 assert(old_checker_itb_port); 596 assert(old_checker_itb_port->isConnected()); 597 BaseSlavePort &slavePort = 598 old_checker_itb_port->getSlavePort(); 599 old_checker_itb_port->unbind(); 600 new_checker_itb_port->bind(slavePort); 601 } 602 if (new_checker_dtb_port) { 603 assert(!new_checker_dtb_port->isConnected()); 604 assert(old_checker_dtb_port); 605 assert(old_checker_dtb_port->isConnected()); 606 BaseSlavePort &slavePort = 607 old_checker_dtb_port->getSlavePort(); 608 old_checker_dtb_port->unbind(); 609 new_checker_dtb_port->bind(slavePort); 610 } 611 } 612 } 613 614 interrupts = oldCPU->interrupts; 615 for (ThreadID tid = 0; tid < numThreads; tid++) { 616 interrupts[tid]->setCPU(this); 617 } 618 oldCPU->interrupts.clear(); 619 620 if (FullSystem) { 621 for (ThreadID i = 0; i < size; ++i) 622 threadContexts[i]->profileClear(); 623 624 if (profileEvent) 625 schedule(profileEvent, curTick()); 626 } 627 628 // All CPUs have an instruction and a data port, and the new CPU's 629 // ports are dangling while the old CPU has its ports connected 630 // already. Unbind the old CPU and then bind the ports of the one 631 // we are switching to. 632 assert(!getInstPort().isConnected()); 633 assert(oldCPU->getInstPort().isConnected()); 634 BaseSlavePort &inst_peer_port = oldCPU->getInstPort().getSlavePort(); 635 oldCPU->getInstPort().unbind(); 636 getInstPort().bind(inst_peer_port); 637 638 assert(!getDataPort().isConnected()); 639 assert(oldCPU->getDataPort().isConnected()); 640 BaseSlavePort &data_peer_port = oldCPU->getDataPort().getSlavePort(); 641 oldCPU->getDataPort().unbind(); 642 getDataPort().bind(data_peer_port); 643} 644 645void 646BaseCPU::flushTLBs() 647{ 648 for (ThreadID i = 0; i < threadContexts.size(); ++i) { 649 ThreadContext &tc(*threadContexts[i]); 650 CheckerCPU *checker(tc.getCheckerCpuPtr()); 651 652 tc.getITBPtr()->flushAll(); 653 tc.getDTBPtr()->flushAll(); 654 if (checker) { 655 checker->getITBPtr()->flushAll(); 656 checker->getDTBPtr()->flushAll(); 657 } 658 } 659} 660 661 662BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval) 663 : cpu(_cpu), interval(_interval) 664{ } 665 666void 667BaseCPU::ProfileEvent::process() 668{ 669 ThreadID size = cpu->threadContexts.size(); 670 for (ThreadID i = 0; i < size; ++i) { 671 ThreadContext *tc = cpu->threadContexts[i]; 672 tc->profileSample(); 673 } 674 675 cpu->schedule(this, curTick() + interval); 676} 677 678void 679BaseCPU::serialize(CheckpointOut &cp) const 680{ 681 SERIALIZE_SCALAR(instCnt); 682 683 if (!_switchedOut) { 684 /* Unlike _pid, _taskId is not serialized, as they are dynamically 685 * assigned unique ids that are only meaningful for the duration of 686 * a specific run. We will need to serialize the entire taskMap in 687 * system. */ 688 SERIALIZE_SCALAR(_pid); 689 690 // Serialize the threads, this is done by the CPU implementation. 691 for (ThreadID i = 0; i < numThreads; ++i) { 692 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i)); 693 interrupts[i]->serialize(cp); 694 serializeThread(cp, i); 695 } 696 } 697} 698 699void 700BaseCPU::unserialize(CheckpointIn &cp) 701{ 702 UNSERIALIZE_SCALAR(instCnt); 703 704 if (!_switchedOut) { 705 UNSERIALIZE_SCALAR(_pid); 706 707 // Unserialize the threads, this is done by the CPU implementation. 708 for (ThreadID i = 0; i < numThreads; ++i) { 709 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i)); 710 interrupts[i]->unserialize(cp); 711 unserializeThread(cp, i); 712 } 713 } 714} 715 716void 717BaseCPU::scheduleInstStop(ThreadID tid, Counter insts, const char *cause) 718{ 719 const Tick now(comInstEventQueue[tid]->getCurTick()); 720 Event *event(new LocalSimLoopExitEvent(cause, 0)); 721 722 comInstEventQueue[tid]->schedule(event, now + insts); 723} 724 725uint64_t 726BaseCPU::getCurrentInstCount(ThreadID tid) 727{ 728 return Tick(comInstEventQueue[tid]->getCurTick()); 729} 730 731AddressMonitor::AddressMonitor() { 732 armed = false; 733 waiting = false; 734 gotWakeup = false; 735} 736 737bool AddressMonitor::doMonitor(PacketPtr pkt) { 738 assert(pkt->req->hasPaddr()); 739 if (armed && waiting) { 740 if (pAddr == pkt->getAddr()) { 741 DPRINTF(Mwait,"pAddr=0x%lx invalidated: waking up core\n", 742 pkt->getAddr()); 743 waiting = false; 744 return true; 745 } 746 } 747 return false; 748} 749 750void 751BaseCPU::scheduleLoadStop(ThreadID tid, Counter loads, const char *cause) 752{ 753 const Tick now(comLoadEventQueue[tid]->getCurTick()); 754 Event *event(new LocalSimLoopExitEvent(cause, 0)); 755 756 comLoadEventQueue[tid]->schedule(event, now + loads); 757} 758 759 760void 761BaseCPU::traceFunctionsInternal(Addr pc) 762{ 763 if (!debugSymbolTable) 764 return; 765 766 // if pc enters different function, print new function symbol and 767 // update saved range. Otherwise do nothing. 768 if (pc < currentFunctionStart || pc >= currentFunctionEnd) { 769 string sym_str; 770 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str, 771 currentFunctionStart, 772 currentFunctionEnd); 773 774 if (!found) { 775 // no symbol found: use addr as label 776 sym_str = csprintf("0x%x", pc); 777 currentFunctionStart = pc; 778 currentFunctionEnd = pc + 1; 779 } 780 781 ccprintf(*functionTraceStream, " (%d)\n%d: %s", 782 curTick() - functionEntryTick, curTick(), sym_str); 783 functionEntryTick = curTick(); 784 } 785} 786