base.cc revision 11325
1/* 2 * Copyright (c) 2011-2012 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2011 Regents of the University of California 16 * Copyright (c) 2013 Advanced Micro Devices, Inc. 17 * Copyright (c) 2013 Mark D. Hill and David A. Wood 18 * All rights reserved. 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions are 22 * met: redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer; 24 * redistributions in binary form must reproduce the above copyright 25 * notice, this list of conditions and the following disclaimer in the 26 * documentation and/or other materials provided with the distribution; 27 * neither the name of the copyright holders nor the names of its 28 * contributors may be used to endorse or promote products derived from 29 * this software without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 42 * 43 * Authors: Steve Reinhardt 44 * Nathan Binkert 45 * Rick Strong 46 */ 47 48#include <iostream> 49#include <sstream> 50#include <string> 51 52#include "arch/tlb.hh" 53#include "base/loader/symtab.hh" 54#include "base/cprintf.hh" 55#include "base/misc.hh" 56#include "base/output.hh" 57#include "base/trace.hh" 58#include "cpu/checker/cpu.hh" 59#include "cpu/base.hh" 60#include "cpu/cpuevent.hh" 61#include "cpu/profile.hh" 62#include "cpu/thread_context.hh" 63#include "debug/Mwait.hh" 64#include "debug/SyscallVerbose.hh" 65#include "mem/page_table.hh" 66#include "params/BaseCPU.hh" 67#include "sim/full_system.hh" 68#include "sim/process.hh" 69#include "sim/sim_events.hh" 70#include "sim/sim_exit.hh" 71#include "sim/system.hh" 72 73// Hack 74#include "sim/stat_control.hh" 75 76using namespace std; 77 78vector<BaseCPU *> BaseCPU::cpuList; 79 80// This variable reflects the max number of threads in any CPU. Be 81// careful to only use it once all the CPUs that you care about have 82// been initialized 83int maxThreadsPerCPU = 1; 84 85CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival) 86 : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0), 87 cpu(_cpu), _repeatEvent(true) 88{ 89 if (_interval) 90 cpu->schedule(this, curTick() + _interval); 91} 92 93void 94CPUProgressEvent::process() 95{ 96 Counter temp = cpu->totalOps(); 97 98 if (_repeatEvent) 99 cpu->schedule(this, curTick() + _interval); 100 101 if (cpu->switchedOut()) { 102 return; 103 } 104 105#ifndef NDEBUG 106 double ipc = double(temp - lastNumInst) / (_interval / cpu->clockPeriod()); 107 108 DPRINTFN("%s progress event, total committed:%i, progress insts committed: " 109 "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst, 110 ipc); 111 ipc = 0.0; 112#else 113 cprintf("%lli: %s progress event, total committed:%i, progress insts " 114 "committed: %lli\n", curTick(), cpu->name(), temp, 115 temp - lastNumInst); 116#endif 117 lastNumInst = temp; 118} 119 120const char * 121CPUProgressEvent::description() const 122{ 123 return "CPU Progress"; 124} 125 126BaseCPU::BaseCPU(Params *p, bool is_checker) 127 : MemObject(p), instCnt(0), _cpuId(p->cpu_id), _socketId(p->socket_id), 128 _instMasterId(p->system->getMasterId(name() + ".inst")), 129 _dataMasterId(p->system->getMasterId(name() + ".data")), 130 _taskId(ContextSwitchTaskId::Unknown), _pid(invldPid), 131 _switchedOut(p->switched_out), _cacheLineSize(p->system->cacheLineSize()), 132 interrupts(p->interrupts), profileEvent(NULL), 133 numThreads(p->numThreads), system(p->system), 134 functionTraceStream(nullptr), currentFunctionStart(0), 135 currentFunctionEnd(0), functionEntryTick(0), 136 addressMonitor(p->numThreads) 137{ 138 // if Python did not provide a valid ID, do it here 139 if (_cpuId == -1 ) { 140 _cpuId = cpuList.size(); 141 } 142 143 // add self to global list of CPUs 144 cpuList.push_back(this); 145 146 DPRINTF(SyscallVerbose, "Constructing CPU with id %d, socket id %d\n", 147 _cpuId, _socketId); 148 149 if (numThreads > maxThreadsPerCPU) 150 maxThreadsPerCPU = numThreads; 151 152 // allocate per-thread instruction-based event queues 153 comInstEventQueue = new EventQueue *[numThreads]; 154 for (ThreadID tid = 0; tid < numThreads; ++tid) 155 comInstEventQueue[tid] = 156 new EventQueue("instruction-based event queue"); 157 158 // 159 // set up instruction-count-based termination events, if any 160 // 161 if (p->max_insts_any_thread != 0) { 162 const char *cause = "a thread reached the max instruction count"; 163 for (ThreadID tid = 0; tid < numThreads; ++tid) 164 scheduleInstStop(tid, p->max_insts_any_thread, cause); 165 } 166 167 // Set up instruction-count-based termination events for SimPoints 168 // Typically, there are more than one action points. 169 // Simulation.py is responsible to take the necessary actions upon 170 // exitting the simulation loop. 171 if (!p->simpoint_start_insts.empty()) { 172 const char *cause = "simpoint starting point found"; 173 for (size_t i = 0; i < p->simpoint_start_insts.size(); ++i) 174 scheduleInstStop(0, p->simpoint_start_insts[i], cause); 175 } 176 177 if (p->max_insts_all_threads != 0) { 178 const char *cause = "all threads reached the max instruction count"; 179 180 // allocate & initialize shared downcounter: each event will 181 // decrement this when triggered; simulation will terminate 182 // when counter reaches 0 183 int *counter = new int; 184 *counter = numThreads; 185 for (ThreadID tid = 0; tid < numThreads; ++tid) { 186 Event *event = new CountedExitEvent(cause, *counter); 187 comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads); 188 } 189 } 190 191 // allocate per-thread load-based event queues 192 comLoadEventQueue = new EventQueue *[numThreads]; 193 for (ThreadID tid = 0; tid < numThreads; ++tid) 194 comLoadEventQueue[tid] = new EventQueue("load-based event queue"); 195 196 // 197 // set up instruction-count-based termination events, if any 198 // 199 if (p->max_loads_any_thread != 0) { 200 const char *cause = "a thread reached the max load count"; 201 for (ThreadID tid = 0; tid < numThreads; ++tid) 202 scheduleLoadStop(tid, p->max_loads_any_thread, cause); 203 } 204 205 if (p->max_loads_all_threads != 0) { 206 const char *cause = "all threads reached the max load count"; 207 // allocate & initialize shared downcounter: each event will 208 // decrement this when triggered; simulation will terminate 209 // when counter reaches 0 210 int *counter = new int; 211 *counter = numThreads; 212 for (ThreadID tid = 0; tid < numThreads; ++tid) { 213 Event *event = new CountedExitEvent(cause, *counter); 214 comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads); 215 } 216 } 217 218 functionTracingEnabled = false; 219 if (p->function_trace) { 220 const string fname = csprintf("ftrace.%s", name()); 221 functionTraceStream = simout.find(fname); 222 if (!functionTraceStream) 223 functionTraceStream = simout.create(fname); 224 225 currentFunctionStart = currentFunctionEnd = 0; 226 functionEntryTick = p->function_trace_start; 227 228 if (p->function_trace_start == 0) { 229 functionTracingEnabled = true; 230 } else { 231 typedef EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace> wrap; 232 Event *event = new wrap(this, true); 233 schedule(event, p->function_trace_start); 234 } 235 } 236 237 // The interrupts should always be present unless this CPU is 238 // switched in later or in case it is a checker CPU 239 if (!params()->switched_out && !is_checker) { 240 fatal_if(interrupts.size() != numThreads, 241 "CPU %s has %i interrupt controllers, but is expecting one " 242 "per thread (%i)\n", 243 name(), interrupts.size(), numThreads); 244 for (ThreadID tid = 0; tid < numThreads; tid++) 245 interrupts[tid]->setCPU(this); 246 } 247 248 if (FullSystem) { 249 if (params()->profile) 250 profileEvent = new ProfileEvent(this, params()->profile); 251 } 252 tracer = params()->tracer; 253 254 if (params()->isa.size() != numThreads) { 255 fatal("Number of ISAs (%i) assigned to the CPU does not equal number " 256 "of threads (%i).\n", params()->isa.size(), numThreads); 257 } 258} 259 260void 261BaseCPU::enableFunctionTrace() 262{ 263 functionTracingEnabled = true; 264} 265 266BaseCPU::~BaseCPU() 267{ 268 delete profileEvent; 269 delete[] comLoadEventQueue; 270 delete[] comInstEventQueue; 271} 272 273void 274BaseCPU::armMonitor(ThreadID tid, Addr address) 275{ 276 assert(tid < numThreads); 277 AddressMonitor &monitor = addressMonitor[tid]; 278 279 monitor.armed = true; 280 monitor.vAddr = address; 281 monitor.pAddr = 0x0; 282 DPRINTF(Mwait,"[tid:%d] Armed monitor (vAddr=0x%lx)\n", tid, address); 283} 284 285bool 286BaseCPU::mwait(ThreadID tid, PacketPtr pkt) 287{ 288 assert(tid < numThreads); 289 AddressMonitor &monitor = addressMonitor[tid]; 290 291 if (!monitor.gotWakeup) { 292 int block_size = cacheLineSize(); 293 uint64_t mask = ~((uint64_t)(block_size - 1)); 294 295 assert(pkt->req->hasPaddr()); 296 monitor.pAddr = pkt->getAddr() & mask; 297 monitor.waiting = true; 298 299 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, " 300 "line's paddr=0x%lx)\n", tid, monitor.vAddr, monitor.pAddr); 301 return true; 302 } else { 303 monitor.gotWakeup = false; 304 return false; 305 } 306} 307 308void 309BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, TheISA::TLB *dtb) 310{ 311 assert(tid < numThreads); 312 AddressMonitor &monitor = addressMonitor[tid]; 313 314 Request req; 315 Addr addr = monitor.vAddr; 316 int block_size = cacheLineSize(); 317 uint64_t mask = ~((uint64_t)(block_size - 1)); 318 int size = block_size; 319 320 //The address of the next line if it crosses a cache line boundary. 321 Addr secondAddr = roundDown(addr + size - 1, block_size); 322 323 if (secondAddr > addr) 324 size = secondAddr - addr; 325 326 req.setVirt(0, addr, size, 0x0, dataMasterId(), tc->instAddr()); 327 328 // translate to physical address 329 Fault fault = dtb->translateAtomic(&req, tc, BaseTLB::Read); 330 assert(fault == NoFault); 331 332 monitor.pAddr = req.getPaddr() & mask; 333 monitor.waiting = true; 334 335 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n", 336 tid, monitor.vAddr, monitor.pAddr); 337} 338 339void 340BaseCPU::init() 341{ 342 if (!params()->switched_out) { 343 registerThreadContexts(); 344 345 verifyMemoryMode(); 346 } 347} 348 349void 350BaseCPU::startup() 351{ 352 if (FullSystem) { 353 if (!params()->switched_out && profileEvent) 354 schedule(profileEvent, curTick()); 355 } 356 357 if (params()->progress_interval) { 358 new CPUProgressEvent(this, params()->progress_interval); 359 } 360} 361 362ProbePoints::PMUUPtr 363BaseCPU::pmuProbePoint(const char *name) 364{ 365 ProbePoints::PMUUPtr ptr; 366 ptr.reset(new ProbePoints::PMU(getProbeManager(), name)); 367 368 return ptr; 369} 370 371void 372BaseCPU::regProbePoints() 373{ 374 ppCycles = pmuProbePoint("Cycles"); 375 376 ppRetiredInsts = pmuProbePoint("RetiredInsts"); 377 ppRetiredLoads = pmuProbePoint("RetiredLoads"); 378 ppRetiredStores = pmuProbePoint("RetiredStores"); 379 ppRetiredBranches = pmuProbePoint("RetiredBranches"); 380} 381 382void 383BaseCPU::probeInstCommit(const StaticInstPtr &inst) 384{ 385 if (!inst->isMicroop() || inst->isLastMicroop()) 386 ppRetiredInsts->notify(1); 387 388 389 if (inst->isLoad()) 390 ppRetiredLoads->notify(1); 391 392 if (inst->isStore()) 393 ppRetiredStores->notify(1); 394 395 if (inst->isControl()) 396 ppRetiredBranches->notify(1); 397} 398 399void 400BaseCPU::regStats() 401{ 402 using namespace Stats; 403 404 numCycles 405 .name(name() + ".numCycles") 406 .desc("number of cpu cycles simulated") 407 ; 408 409 numWorkItemsStarted 410 .name(name() + ".numWorkItemsStarted") 411 .desc("number of work items this cpu started") 412 ; 413 414 numWorkItemsCompleted 415 .name(name() + ".numWorkItemsCompleted") 416 .desc("number of work items this cpu completed") 417 ; 418 419 int size = threadContexts.size(); 420 if (size > 1) { 421 for (int i = 0; i < size; ++i) { 422 stringstream namestr; 423 ccprintf(namestr, "%s.ctx%d", name(), i); 424 threadContexts[i]->regStats(namestr.str()); 425 } 426 } else if (size == 1) 427 threadContexts[0]->regStats(name()); 428} 429 430BaseMasterPort & 431BaseCPU::getMasterPort(const string &if_name, PortID idx) 432{ 433 // Get the right port based on name. This applies to all the 434 // subclasses of the base CPU and relies on their implementation 435 // of getDataPort and getInstPort. In all cases there methods 436 // return a MasterPort pointer. 437 if (if_name == "dcache_port") 438 return getDataPort(); 439 else if (if_name == "icache_port") 440 return getInstPort(); 441 else 442 return MemObject::getMasterPort(if_name, idx); 443} 444 445void 446BaseCPU::registerThreadContexts() 447{ 448 assert(system->multiThread || numThreads == 1); 449 450 ThreadID size = threadContexts.size(); 451 for (ThreadID tid = 0; tid < size; ++tid) { 452 ThreadContext *tc = threadContexts[tid]; 453 454 if (system->multiThread) { 455 tc->setContextId(system->registerThreadContext(tc)); 456 } else { 457 tc->setContextId(system->registerThreadContext(tc, _cpuId)); 458 } 459 460 if (!FullSystem) 461 tc->getProcessPtr()->assignThreadContext(tc->contextId()); 462 } 463} 464 465 466int 467BaseCPU::findContext(ThreadContext *tc) 468{ 469 ThreadID size = threadContexts.size(); 470 for (ThreadID tid = 0; tid < size; ++tid) { 471 if (tc == threadContexts[tid]) 472 return tid; 473 } 474 return 0; 475} 476 477void 478BaseCPU::switchOut() 479{ 480 assert(!_switchedOut); 481 _switchedOut = true; 482 if (profileEvent && profileEvent->scheduled()) 483 deschedule(profileEvent); 484 485 // Flush all TLBs in the CPU to avoid having stale translations if 486 // it gets switched in later. 487 flushTLBs(); 488} 489 490void 491BaseCPU::takeOverFrom(BaseCPU *oldCPU) 492{ 493 assert(threadContexts.size() == oldCPU->threadContexts.size()); 494 assert(_cpuId == oldCPU->cpuId()); 495 assert(_switchedOut); 496 assert(oldCPU != this); 497 _pid = oldCPU->getPid(); 498 _taskId = oldCPU->taskId(); 499 _switchedOut = false; 500 501 ThreadID size = threadContexts.size(); 502 for (ThreadID i = 0; i < size; ++i) { 503 ThreadContext *newTC = threadContexts[i]; 504 ThreadContext *oldTC = oldCPU->threadContexts[i]; 505 506 newTC->takeOverFrom(oldTC); 507 508 CpuEvent::replaceThreadContext(oldTC, newTC); 509 510 assert(newTC->contextId() == oldTC->contextId()); 511 assert(newTC->threadId() == oldTC->threadId()); 512 system->replaceThreadContext(newTC, newTC->contextId()); 513 514 /* This code no longer works since the zero register (e.g., 515 * r31 on Alpha) doesn't necessarily contain zero at this 516 * point. 517 if (DTRACE(Context)) 518 ThreadContext::compare(oldTC, newTC); 519 */ 520 521 BaseMasterPort *old_itb_port = oldTC->getITBPtr()->getMasterPort(); 522 BaseMasterPort *old_dtb_port = oldTC->getDTBPtr()->getMasterPort(); 523 BaseMasterPort *new_itb_port = newTC->getITBPtr()->getMasterPort(); 524 BaseMasterPort *new_dtb_port = newTC->getDTBPtr()->getMasterPort(); 525 526 // Move over any table walker ports if they exist 527 if (new_itb_port) { 528 assert(!new_itb_port->isConnected()); 529 assert(old_itb_port); 530 assert(old_itb_port->isConnected()); 531 BaseSlavePort &slavePort = old_itb_port->getSlavePort(); 532 old_itb_port->unbind(); 533 new_itb_port->bind(slavePort); 534 } 535 if (new_dtb_port) { 536 assert(!new_dtb_port->isConnected()); 537 assert(old_dtb_port); 538 assert(old_dtb_port->isConnected()); 539 BaseSlavePort &slavePort = old_dtb_port->getSlavePort(); 540 old_dtb_port->unbind(); 541 new_dtb_port->bind(slavePort); 542 } 543 newTC->getITBPtr()->takeOverFrom(oldTC->getITBPtr()); 544 newTC->getDTBPtr()->takeOverFrom(oldTC->getDTBPtr()); 545 546 // Checker whether or not we have to transfer CheckerCPU 547 // objects over in the switch 548 CheckerCPU *oldChecker = oldTC->getCheckerCpuPtr(); 549 CheckerCPU *newChecker = newTC->getCheckerCpuPtr(); 550 if (oldChecker && newChecker) { 551 BaseMasterPort *old_checker_itb_port = 552 oldChecker->getITBPtr()->getMasterPort(); 553 BaseMasterPort *old_checker_dtb_port = 554 oldChecker->getDTBPtr()->getMasterPort(); 555 BaseMasterPort *new_checker_itb_port = 556 newChecker->getITBPtr()->getMasterPort(); 557 BaseMasterPort *new_checker_dtb_port = 558 newChecker->getDTBPtr()->getMasterPort(); 559 560 newChecker->getITBPtr()->takeOverFrom(oldChecker->getITBPtr()); 561 newChecker->getDTBPtr()->takeOverFrom(oldChecker->getDTBPtr()); 562 563 // Move over any table walker ports if they exist for checker 564 if (new_checker_itb_port) { 565 assert(!new_checker_itb_port->isConnected()); 566 assert(old_checker_itb_port); 567 assert(old_checker_itb_port->isConnected()); 568 BaseSlavePort &slavePort = 569 old_checker_itb_port->getSlavePort(); 570 old_checker_itb_port->unbind(); 571 new_checker_itb_port->bind(slavePort); 572 } 573 if (new_checker_dtb_port) { 574 assert(!new_checker_dtb_port->isConnected()); 575 assert(old_checker_dtb_port); 576 assert(old_checker_dtb_port->isConnected()); 577 BaseSlavePort &slavePort = 578 old_checker_dtb_port->getSlavePort(); 579 old_checker_dtb_port->unbind(); 580 new_checker_dtb_port->bind(slavePort); 581 } 582 } 583 } 584 585 interrupts = oldCPU->interrupts; 586 for (ThreadID tid = 0; tid < numThreads; tid++) { 587 interrupts[tid]->setCPU(this); 588 } 589 oldCPU->interrupts.clear(); 590 591 if (FullSystem) { 592 for (ThreadID i = 0; i < size; ++i) 593 threadContexts[i]->profileClear(); 594 595 if (profileEvent) 596 schedule(profileEvent, curTick()); 597 } 598 599 // All CPUs have an instruction and a data port, and the new CPU's 600 // ports are dangling while the old CPU has its ports connected 601 // already. Unbind the old CPU and then bind the ports of the one 602 // we are switching to. 603 assert(!getInstPort().isConnected()); 604 assert(oldCPU->getInstPort().isConnected()); 605 BaseSlavePort &inst_peer_port = oldCPU->getInstPort().getSlavePort(); 606 oldCPU->getInstPort().unbind(); 607 getInstPort().bind(inst_peer_port); 608 609 assert(!getDataPort().isConnected()); 610 assert(oldCPU->getDataPort().isConnected()); 611 BaseSlavePort &data_peer_port = oldCPU->getDataPort().getSlavePort(); 612 oldCPU->getDataPort().unbind(); 613 getDataPort().bind(data_peer_port); 614} 615 616void 617BaseCPU::flushTLBs() 618{ 619 for (ThreadID i = 0; i < threadContexts.size(); ++i) { 620 ThreadContext &tc(*threadContexts[i]); 621 CheckerCPU *checker(tc.getCheckerCpuPtr()); 622 623 tc.getITBPtr()->flushAll(); 624 tc.getDTBPtr()->flushAll(); 625 if (checker) { 626 checker->getITBPtr()->flushAll(); 627 checker->getDTBPtr()->flushAll(); 628 } 629 } 630} 631 632 633BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval) 634 : cpu(_cpu), interval(_interval) 635{ } 636 637void 638BaseCPU::ProfileEvent::process() 639{ 640 ThreadID size = cpu->threadContexts.size(); 641 for (ThreadID i = 0; i < size; ++i) { 642 ThreadContext *tc = cpu->threadContexts[i]; 643 tc->profileSample(); 644 } 645 646 cpu->schedule(this, curTick() + interval); 647} 648 649void 650BaseCPU::serialize(CheckpointOut &cp) const 651{ 652 SERIALIZE_SCALAR(instCnt); 653 654 if (!_switchedOut) { 655 /* Unlike _pid, _taskId is not serialized, as they are dynamically 656 * assigned unique ids that are only meaningful for the duration of 657 * a specific run. We will need to serialize the entire taskMap in 658 * system. */ 659 SERIALIZE_SCALAR(_pid); 660 661 // Serialize the threads, this is done by the CPU implementation. 662 for (ThreadID i = 0; i < numThreads; ++i) { 663 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i)); 664 interrupts[i]->serialize(cp); 665 serializeThread(cp, i); 666 } 667 } 668} 669 670void 671BaseCPU::unserialize(CheckpointIn &cp) 672{ 673 UNSERIALIZE_SCALAR(instCnt); 674 675 if (!_switchedOut) { 676 UNSERIALIZE_SCALAR(_pid); 677 678 // Unserialize the threads, this is done by the CPU implementation. 679 for (ThreadID i = 0; i < numThreads; ++i) { 680 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i)); 681 interrupts[i]->unserialize(cp); 682 unserializeThread(cp, i); 683 } 684 } 685} 686 687void 688BaseCPU::scheduleInstStop(ThreadID tid, Counter insts, const char *cause) 689{ 690 const Tick now(comInstEventQueue[tid]->getCurTick()); 691 Event *event(new LocalSimLoopExitEvent(cause, 0)); 692 693 comInstEventQueue[tid]->schedule(event, now + insts); 694} 695 696AddressMonitor::AddressMonitor() { 697 armed = false; 698 waiting = false; 699 gotWakeup = false; 700} 701 702bool AddressMonitor::doMonitor(PacketPtr pkt) { 703 assert(pkt->req->hasPaddr()); 704 if (armed && waiting) { 705 if (pAddr == pkt->getAddr()) { 706 DPRINTF(Mwait,"pAddr=0x%lx invalidated: waking up core\n", 707 pkt->getAddr()); 708 waiting = false; 709 return true; 710 } 711 } 712 return false; 713} 714 715void 716BaseCPU::scheduleLoadStop(ThreadID tid, Counter loads, const char *cause) 717{ 718 const Tick now(comLoadEventQueue[tid]->getCurTick()); 719 Event *event(new LocalSimLoopExitEvent(cause, 0)); 720 721 comLoadEventQueue[tid]->schedule(event, now + loads); 722} 723 724 725void 726BaseCPU::traceFunctionsInternal(Addr pc) 727{ 728 if (!debugSymbolTable) 729 return; 730 731 // if pc enters different function, print new function symbol and 732 // update saved range. Otherwise do nothing. 733 if (pc < currentFunctionStart || pc >= currentFunctionEnd) { 734 string sym_str; 735 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str, 736 currentFunctionStart, 737 currentFunctionEnd); 738 739 if (!found) { 740 // no symbol found: use addr as label 741 sym_str = csprintf("0x%x", pc); 742 currentFunctionStart = pc; 743 currentFunctionEnd = pc + 1; 744 } 745 746 ccprintf(*functionTraceStream, " (%d)\n%d: %s", 747 curTick() - functionEntryTick, curTick(), sym_str); 748 functionEntryTick = curTick(); 749 } 750} 751