61#include "cpu/cpuevent.hh" 62#include "cpu/profile.hh" 63#include "cpu/thread_context.hh" 64#include "debug/Mwait.hh" 65#include "debug/SyscallVerbose.hh" 66#include "mem/page_table.hh" 67#include "params/BaseCPU.hh" 68#include "sim/clocked_object.hh" 69#include "sim/full_system.hh" 70#include "sim/process.hh" 71#include "sim/sim_events.hh" 72#include "sim/sim_exit.hh" 73#include "sim/system.hh" 74 75// Hack 76#include "sim/stat_control.hh" 77 78using namespace std; 79 80vector<BaseCPU *> BaseCPU::cpuList; 81 82// This variable reflects the max number of threads in any CPU. Be 83// careful to only use it once all the CPUs that you care about have 84// been initialized 85int maxThreadsPerCPU = 1; 86 87CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival) 88 : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0), 89 cpu(_cpu), _repeatEvent(true) 90{ 91 if (_interval) 92 cpu->schedule(this, curTick() + _interval); 93} 94 95void 96CPUProgressEvent::process() 97{ 98 Counter temp = cpu->totalOps(); 99 100 if (_repeatEvent) 101 cpu->schedule(this, curTick() + _interval); 102 103 if (cpu->switchedOut()) { 104 return; 105 } 106 107#ifndef NDEBUG 108 double ipc = double(temp - lastNumInst) / (_interval / cpu->clockPeriod()); 109 110 DPRINTFN("%s progress event, total committed:%i, progress insts committed: " 111 "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst, 112 ipc); 113 ipc = 0.0; 114#else 115 cprintf("%lli: %s progress event, total committed:%i, progress insts " 116 "committed: %lli\n", curTick(), cpu->name(), temp, 117 temp - lastNumInst); 118#endif 119 lastNumInst = temp; 120} 121 122const char * 123CPUProgressEvent::description() const 124{ 125 return "CPU Progress"; 126} 127 128BaseCPU::BaseCPU(Params *p, bool is_checker) 129 : MemObject(p), instCnt(0), _cpuId(p->cpu_id), _socketId(p->socket_id), 130 _instMasterId(p->system->getMasterId(name() + ".inst")), 131 _dataMasterId(p->system->getMasterId(name() + ".data")), 132 _taskId(ContextSwitchTaskId::Unknown), _pid(invldPid), 133 _switchedOut(p->switched_out), _cacheLineSize(p->system->cacheLineSize()), 134 interrupts(p->interrupts), profileEvent(NULL), 135 numThreads(p->numThreads), system(p->system), 136 functionTraceStream(nullptr), currentFunctionStart(0), 137 currentFunctionEnd(0), functionEntryTick(0), 138 addressMonitor(p->numThreads) 139{ 140 // if Python did not provide a valid ID, do it here 141 if (_cpuId == -1 ) { 142 _cpuId = cpuList.size(); 143 } 144 145 // add self to global list of CPUs 146 cpuList.push_back(this); 147 148 DPRINTF(SyscallVerbose, "Constructing CPU with id %d, socket id %d\n", 149 _cpuId, _socketId); 150 151 if (numThreads > maxThreadsPerCPU) 152 maxThreadsPerCPU = numThreads; 153 154 // allocate per-thread instruction-based event queues 155 comInstEventQueue = new EventQueue *[numThreads]; 156 for (ThreadID tid = 0; tid < numThreads; ++tid) 157 comInstEventQueue[tid] = 158 new EventQueue("instruction-based event queue"); 159 160 // 161 // set up instruction-count-based termination events, if any 162 // 163 if (p->max_insts_any_thread != 0) { 164 const char *cause = "a thread reached the max instruction count"; 165 for (ThreadID tid = 0; tid < numThreads; ++tid) 166 scheduleInstStop(tid, p->max_insts_any_thread, cause); 167 } 168 169 // Set up instruction-count-based termination events for SimPoints 170 // Typically, there are more than one action points. 171 // Simulation.py is responsible to take the necessary actions upon 172 // exitting the simulation loop. 173 if (!p->simpoint_start_insts.empty()) { 174 const char *cause = "simpoint starting point found"; 175 for (size_t i = 0; i < p->simpoint_start_insts.size(); ++i) 176 scheduleInstStop(0, p->simpoint_start_insts[i], cause); 177 } 178 179 if (p->max_insts_all_threads != 0) { 180 const char *cause = "all threads reached the max instruction count"; 181 182 // allocate & initialize shared downcounter: each event will 183 // decrement this when triggered; simulation will terminate 184 // when counter reaches 0 185 int *counter = new int; 186 *counter = numThreads; 187 for (ThreadID tid = 0; tid < numThreads; ++tid) { 188 Event *event = new CountedExitEvent(cause, *counter); 189 comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads); 190 } 191 } 192 193 // allocate per-thread load-based event queues 194 comLoadEventQueue = new EventQueue *[numThreads]; 195 for (ThreadID tid = 0; tid < numThreads; ++tid) 196 comLoadEventQueue[tid] = new EventQueue("load-based event queue"); 197 198 // 199 // set up instruction-count-based termination events, if any 200 // 201 if (p->max_loads_any_thread != 0) { 202 const char *cause = "a thread reached the max load count"; 203 for (ThreadID tid = 0; tid < numThreads; ++tid) 204 scheduleLoadStop(tid, p->max_loads_any_thread, cause); 205 } 206 207 if (p->max_loads_all_threads != 0) { 208 const char *cause = "all threads reached the max load count"; 209 // allocate & initialize shared downcounter: each event will 210 // decrement this when triggered; simulation will terminate 211 // when counter reaches 0 212 int *counter = new int; 213 *counter = numThreads; 214 for (ThreadID tid = 0; tid < numThreads; ++tid) { 215 Event *event = new CountedExitEvent(cause, *counter); 216 comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads); 217 } 218 } 219 220 functionTracingEnabled = false; 221 if (p->function_trace) { 222 const string fname = csprintf("ftrace.%s", name()); 223 functionTraceStream = simout.findOrCreate(fname)->stream(); 224 225 currentFunctionStart = currentFunctionEnd = 0; 226 functionEntryTick = p->function_trace_start; 227 228 if (p->function_trace_start == 0) { 229 functionTracingEnabled = true; 230 } else { 231 typedef EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace> wrap; 232 Event *event = new wrap(this, true); 233 schedule(event, p->function_trace_start); 234 } 235 } 236 237 // The interrupts should always be present unless this CPU is 238 // switched in later or in case it is a checker CPU 239 if (!params()->switched_out && !is_checker) { 240 fatal_if(interrupts.size() != numThreads, 241 "CPU %s has %i interrupt controllers, but is expecting one " 242 "per thread (%i)\n", 243 name(), interrupts.size(), numThreads); 244 for (ThreadID tid = 0; tid < numThreads; tid++) 245 interrupts[tid]->setCPU(this); 246 } 247 248 if (FullSystem) { 249 if (params()->profile) 250 profileEvent = new ProfileEvent(this, params()->profile); 251 } 252 tracer = params()->tracer; 253 254 if (params()->isa.size() != numThreads) { 255 fatal("Number of ISAs (%i) assigned to the CPU does not equal number " 256 "of threads (%i).\n", params()->isa.size(), numThreads); 257 } 258} 259 260void 261BaseCPU::enableFunctionTrace() 262{ 263 functionTracingEnabled = true; 264} 265 266BaseCPU::~BaseCPU() 267{ 268 delete profileEvent; 269 delete[] comLoadEventQueue; 270 delete[] comInstEventQueue; 271} 272 273void 274BaseCPU::armMonitor(ThreadID tid, Addr address) 275{ 276 assert(tid < numThreads); 277 AddressMonitor &monitor = addressMonitor[tid]; 278 279 monitor.armed = true; 280 monitor.vAddr = address; 281 monitor.pAddr = 0x0; 282 DPRINTF(Mwait,"[tid:%d] Armed monitor (vAddr=0x%lx)\n", tid, address); 283} 284 285bool 286BaseCPU::mwait(ThreadID tid, PacketPtr pkt) 287{ 288 assert(tid < numThreads); 289 AddressMonitor &monitor = addressMonitor[tid]; 290 291 if (!monitor.gotWakeup) { 292 int block_size = cacheLineSize(); 293 uint64_t mask = ~((uint64_t)(block_size - 1)); 294 295 assert(pkt->req->hasPaddr()); 296 monitor.pAddr = pkt->getAddr() & mask; 297 monitor.waiting = true; 298 299 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, " 300 "line's paddr=0x%lx)\n", tid, monitor.vAddr, monitor.pAddr); 301 return true; 302 } else { 303 monitor.gotWakeup = false; 304 return false; 305 } 306} 307 308void 309BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, TheISA::TLB *dtb) 310{ 311 assert(tid < numThreads); 312 AddressMonitor &monitor = addressMonitor[tid]; 313 314 Request req; 315 Addr addr = monitor.vAddr; 316 int block_size = cacheLineSize(); 317 uint64_t mask = ~((uint64_t)(block_size - 1)); 318 int size = block_size; 319 320 //The address of the next line if it crosses a cache line boundary. 321 Addr secondAddr = roundDown(addr + size - 1, block_size); 322 323 if (secondAddr > addr) 324 size = secondAddr - addr; 325 326 req.setVirt(0, addr, size, 0x0, dataMasterId(), tc->instAddr()); 327 328 // translate to physical address 329 Fault fault = dtb->translateAtomic(&req, tc, BaseTLB::Read); 330 assert(fault == NoFault); 331 332 monitor.pAddr = req.getPaddr() & mask; 333 monitor.waiting = true; 334 335 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n", 336 tid, monitor.vAddr, monitor.pAddr); 337} 338 339void 340BaseCPU::init() 341{ 342 if (!params()->switched_out) { 343 registerThreadContexts(); 344 345 verifyMemoryMode(); 346 } 347} 348 349void 350BaseCPU::startup() 351{ 352 if (FullSystem) { 353 if (!params()->switched_out && profileEvent) 354 schedule(profileEvent, curTick()); 355 } 356 357 if (params()->progress_interval) { 358 new CPUProgressEvent(this, params()->progress_interval); 359 } 360 361 // Assumption CPU start to operate instantaneously without any latency 362 if (ClockedObject::pwrState() == Enums::PwrState::UNDEFINED) 363 ClockedObject::pwrState(Enums::PwrState::ON); 364 365} 366 367ProbePoints::PMUUPtr 368BaseCPU::pmuProbePoint(const char *name) 369{ 370 ProbePoints::PMUUPtr ptr; 371 ptr.reset(new ProbePoints::PMU(getProbeManager(), name)); 372 373 return ptr; 374} 375 376void 377BaseCPU::regProbePoints() 378{ 379 ppCycles = pmuProbePoint("Cycles"); 380 381 ppRetiredInsts = pmuProbePoint("RetiredInsts"); 382 ppRetiredLoads = pmuProbePoint("RetiredLoads"); 383 ppRetiredStores = pmuProbePoint("RetiredStores"); 384 ppRetiredBranches = pmuProbePoint("RetiredBranches"); 385} 386 387void 388BaseCPU::probeInstCommit(const StaticInstPtr &inst) 389{ 390 if (!inst->isMicroop() || inst->isLastMicroop()) 391 ppRetiredInsts->notify(1); 392 393 394 if (inst->isLoad()) 395 ppRetiredLoads->notify(1); 396 397 if (inst->isStore()) 398 ppRetiredStores->notify(1); 399 400 if (inst->isControl()) 401 ppRetiredBranches->notify(1); 402} 403 404void 405BaseCPU::regStats() 406{ 407 MemObject::regStats(); 408 409 using namespace Stats; 410 411 numCycles 412 .name(name() + ".numCycles") 413 .desc("number of cpu cycles simulated") 414 ; 415 416 numWorkItemsStarted 417 .name(name() + ".numWorkItemsStarted") 418 .desc("number of work items this cpu started") 419 ; 420 421 numWorkItemsCompleted 422 .name(name() + ".numWorkItemsCompleted") 423 .desc("number of work items this cpu completed") 424 ; 425 426 int size = threadContexts.size(); 427 if (size > 1) { 428 for (int i = 0; i < size; ++i) { 429 stringstream namestr; 430 ccprintf(namestr, "%s.ctx%d", name(), i); 431 threadContexts[i]->regStats(namestr.str()); 432 } 433 } else if (size == 1) 434 threadContexts[0]->regStats(name()); 435} 436 437BaseMasterPort & 438BaseCPU::getMasterPort(const string &if_name, PortID idx) 439{ 440 // Get the right port based on name. This applies to all the 441 // subclasses of the base CPU and relies on their implementation 442 // of getDataPort and getInstPort. In all cases there methods 443 // return a MasterPort pointer. 444 if (if_name == "dcache_port") 445 return getDataPort(); 446 else if (if_name == "icache_port") 447 return getInstPort(); 448 else 449 return MemObject::getMasterPort(if_name, idx); 450} 451 452void 453BaseCPU::registerThreadContexts() 454{ 455 assert(system->multiThread || numThreads == 1); 456 457 ThreadID size = threadContexts.size(); 458 for (ThreadID tid = 0; tid < size; ++tid) { 459 ThreadContext *tc = threadContexts[tid]; 460 461 if (system->multiThread) { 462 tc->setContextId(system->registerThreadContext(tc)); 463 } else { 464 tc->setContextId(system->registerThreadContext(tc, _cpuId)); 465 } 466 467 if (!FullSystem) 468 tc->getProcessPtr()->assignThreadContext(tc->contextId()); 469 } 470} 471 472 473int 474BaseCPU::findContext(ThreadContext *tc) 475{ 476 ThreadID size = threadContexts.size(); 477 for (ThreadID tid = 0; tid < size; ++tid) { 478 if (tc == threadContexts[tid]) 479 return tid; 480 } 481 return 0; 482} 483 484void 485BaseCPU::activateContext(ThreadID thread_num) 486{ 487 // For any active thread running, update CPU power state to active (ON) 488 ClockedObject::pwrState(Enums::PwrState::ON); 489} 490 491void 492BaseCPU::suspendContext(ThreadID thread_num) 493{ 494 // Check if all threads are suspended 495 for (auto t : threadContexts) { 496 if (t->status() != ThreadContext::Suspended) { 497 return; 498 } 499 } 500 501 // All CPU threads suspended, enter lower power state for the CPU 502 ClockedObject::pwrState(Enums::PwrState::CLK_GATED); 503} 504 505void 506BaseCPU::switchOut() 507{ 508 assert(!_switchedOut); 509 _switchedOut = true; 510 if (profileEvent && profileEvent->scheduled()) 511 deschedule(profileEvent); 512 513 // Flush all TLBs in the CPU to avoid having stale translations if 514 // it gets switched in later. 515 flushTLBs(); 516} 517 518void 519BaseCPU::takeOverFrom(BaseCPU *oldCPU) 520{ 521 assert(threadContexts.size() == oldCPU->threadContexts.size()); 522 assert(_cpuId == oldCPU->cpuId()); 523 assert(_switchedOut); 524 assert(oldCPU != this); 525 _pid = oldCPU->getPid(); 526 _taskId = oldCPU->taskId(); 527 _switchedOut = false; 528 529 ThreadID size = threadContexts.size(); 530 for (ThreadID i = 0; i < size; ++i) { 531 ThreadContext *newTC = threadContexts[i]; 532 ThreadContext *oldTC = oldCPU->threadContexts[i]; 533 534 newTC->takeOverFrom(oldTC); 535 536 CpuEvent::replaceThreadContext(oldTC, newTC); 537 538 assert(newTC->contextId() == oldTC->contextId()); 539 assert(newTC->threadId() == oldTC->threadId()); 540 system->replaceThreadContext(newTC, newTC->contextId()); 541 542 /* This code no longer works since the zero register (e.g., 543 * r31 on Alpha) doesn't necessarily contain zero at this 544 * point. 545 if (DTRACE(Context)) 546 ThreadContext::compare(oldTC, newTC); 547 */ 548 549 BaseMasterPort *old_itb_port = oldTC->getITBPtr()->getMasterPort(); 550 BaseMasterPort *old_dtb_port = oldTC->getDTBPtr()->getMasterPort(); 551 BaseMasterPort *new_itb_port = newTC->getITBPtr()->getMasterPort(); 552 BaseMasterPort *new_dtb_port = newTC->getDTBPtr()->getMasterPort(); 553 554 // Move over any table walker ports if they exist 555 if (new_itb_port) { 556 assert(!new_itb_port->isConnected()); 557 assert(old_itb_port); 558 assert(old_itb_port->isConnected()); 559 BaseSlavePort &slavePort = old_itb_port->getSlavePort(); 560 old_itb_port->unbind(); 561 new_itb_port->bind(slavePort); 562 } 563 if (new_dtb_port) { 564 assert(!new_dtb_port->isConnected()); 565 assert(old_dtb_port); 566 assert(old_dtb_port->isConnected()); 567 BaseSlavePort &slavePort = old_dtb_port->getSlavePort(); 568 old_dtb_port->unbind(); 569 new_dtb_port->bind(slavePort); 570 } 571 newTC->getITBPtr()->takeOverFrom(oldTC->getITBPtr()); 572 newTC->getDTBPtr()->takeOverFrom(oldTC->getDTBPtr()); 573 574 // Checker whether or not we have to transfer CheckerCPU 575 // objects over in the switch 576 CheckerCPU *oldChecker = oldTC->getCheckerCpuPtr(); 577 CheckerCPU *newChecker = newTC->getCheckerCpuPtr(); 578 if (oldChecker && newChecker) { 579 BaseMasterPort *old_checker_itb_port = 580 oldChecker->getITBPtr()->getMasterPort(); 581 BaseMasterPort *old_checker_dtb_port = 582 oldChecker->getDTBPtr()->getMasterPort(); 583 BaseMasterPort *new_checker_itb_port = 584 newChecker->getITBPtr()->getMasterPort(); 585 BaseMasterPort *new_checker_dtb_port = 586 newChecker->getDTBPtr()->getMasterPort(); 587 588 newChecker->getITBPtr()->takeOverFrom(oldChecker->getITBPtr()); 589 newChecker->getDTBPtr()->takeOverFrom(oldChecker->getDTBPtr()); 590 591 // Move over any table walker ports if they exist for checker 592 if (new_checker_itb_port) { 593 assert(!new_checker_itb_port->isConnected()); 594 assert(old_checker_itb_port); 595 assert(old_checker_itb_port->isConnected()); 596 BaseSlavePort &slavePort = 597 old_checker_itb_port->getSlavePort(); 598 old_checker_itb_port->unbind(); 599 new_checker_itb_port->bind(slavePort); 600 } 601 if (new_checker_dtb_port) { 602 assert(!new_checker_dtb_port->isConnected()); 603 assert(old_checker_dtb_port); 604 assert(old_checker_dtb_port->isConnected()); 605 BaseSlavePort &slavePort = 606 old_checker_dtb_port->getSlavePort(); 607 old_checker_dtb_port->unbind(); 608 new_checker_dtb_port->bind(slavePort); 609 } 610 } 611 } 612 613 interrupts = oldCPU->interrupts; 614 for (ThreadID tid = 0; tid < numThreads; tid++) { 615 interrupts[tid]->setCPU(this); 616 } 617 oldCPU->interrupts.clear(); 618 619 if (FullSystem) { 620 for (ThreadID i = 0; i < size; ++i) 621 threadContexts[i]->profileClear(); 622 623 if (profileEvent) 624 schedule(profileEvent, curTick()); 625 } 626 627 // All CPUs have an instruction and a data port, and the new CPU's 628 // ports are dangling while the old CPU has its ports connected 629 // already. Unbind the old CPU and then bind the ports of the one 630 // we are switching to. 631 assert(!getInstPort().isConnected()); 632 assert(oldCPU->getInstPort().isConnected()); 633 BaseSlavePort &inst_peer_port = oldCPU->getInstPort().getSlavePort(); 634 oldCPU->getInstPort().unbind(); 635 getInstPort().bind(inst_peer_port); 636 637 assert(!getDataPort().isConnected()); 638 assert(oldCPU->getDataPort().isConnected()); 639 BaseSlavePort &data_peer_port = oldCPU->getDataPort().getSlavePort(); 640 oldCPU->getDataPort().unbind(); 641 getDataPort().bind(data_peer_port); 642} 643 644void 645BaseCPU::flushTLBs() 646{ 647 for (ThreadID i = 0; i < threadContexts.size(); ++i) { 648 ThreadContext &tc(*threadContexts[i]); 649 CheckerCPU *checker(tc.getCheckerCpuPtr()); 650 651 tc.getITBPtr()->flushAll(); 652 tc.getDTBPtr()->flushAll(); 653 if (checker) { 654 checker->getITBPtr()->flushAll(); 655 checker->getDTBPtr()->flushAll(); 656 } 657 } 658} 659 660 661BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval) 662 : cpu(_cpu), interval(_interval) 663{ } 664 665void 666BaseCPU::ProfileEvent::process() 667{ 668 ThreadID size = cpu->threadContexts.size(); 669 for (ThreadID i = 0; i < size; ++i) { 670 ThreadContext *tc = cpu->threadContexts[i]; 671 tc->profileSample(); 672 } 673 674 cpu->schedule(this, curTick() + interval); 675} 676 677void 678BaseCPU::serialize(CheckpointOut &cp) const 679{ 680 SERIALIZE_SCALAR(instCnt); 681 682 if (!_switchedOut) { 683 /* Unlike _pid, _taskId is not serialized, as they are dynamically 684 * assigned unique ids that are only meaningful for the duration of 685 * a specific run. We will need to serialize the entire taskMap in 686 * system. */ 687 SERIALIZE_SCALAR(_pid); 688 689 // Serialize the threads, this is done by the CPU implementation. 690 for (ThreadID i = 0; i < numThreads; ++i) { 691 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i)); 692 interrupts[i]->serialize(cp); 693 serializeThread(cp, i); 694 } 695 } 696} 697 698void 699BaseCPU::unserialize(CheckpointIn &cp) 700{ 701 UNSERIALIZE_SCALAR(instCnt); 702 703 if (!_switchedOut) { 704 UNSERIALIZE_SCALAR(_pid); 705 706 // Unserialize the threads, this is done by the CPU implementation. 707 for (ThreadID i = 0; i < numThreads; ++i) { 708 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i)); 709 interrupts[i]->unserialize(cp); 710 unserializeThread(cp, i); 711 } 712 } 713} 714 715void 716BaseCPU::scheduleInstStop(ThreadID tid, Counter insts, const char *cause) 717{ 718 const Tick now(comInstEventQueue[tid]->getCurTick()); 719 Event *event(new LocalSimLoopExitEvent(cause, 0)); 720 721 comInstEventQueue[tid]->schedule(event, now + insts); 722} 723 724uint64_t 725BaseCPU::getCurrentInstCount(ThreadID tid) 726{ 727 return Tick(comInstEventQueue[tid]->getCurTick()); 728} 729 730AddressMonitor::AddressMonitor() { 731 armed = false; 732 waiting = false; 733 gotWakeup = false; 734} 735 736bool AddressMonitor::doMonitor(PacketPtr pkt) { 737 assert(pkt->req->hasPaddr()); 738 if (armed && waiting) { 739 if (pAddr == pkt->getAddr()) { 740 DPRINTF(Mwait,"pAddr=0x%lx invalidated: waking up core\n", 741 pkt->getAddr()); 742 waiting = false; 743 return true; 744 } 745 } 746 return false; 747} 748 749void 750BaseCPU::scheduleLoadStop(ThreadID tid, Counter loads, const char *cause) 751{ 752 const Tick now(comLoadEventQueue[tid]->getCurTick()); 753 Event *event(new LocalSimLoopExitEvent(cause, 0)); 754 755 comLoadEventQueue[tid]->schedule(event, now + loads); 756} 757 758 759void 760BaseCPU::traceFunctionsInternal(Addr pc) 761{ 762 if (!debugSymbolTable) 763 return; 764 765 // if pc enters different function, print new function symbol and 766 // update saved range. Otherwise do nothing. 767 if (pc < currentFunctionStart || pc >= currentFunctionEnd) { 768 string sym_str; 769 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str, 770 currentFunctionStart, 771 currentFunctionEnd); 772 773 if (!found) { 774 // no symbol found: use addr as label 775 sym_str = csprintf("0x%x", pc); 776 currentFunctionStart = pc; 777 currentFunctionEnd = pc + 1; 778 } 779 780 ccprintf(*functionTraceStream, " (%d)\n%d: %s", 781 curTick() - functionEntryTick, curTick(), sym_str); 782 functionEntryTick = curTick(); 783 } 784}
|