base.cc revision 11423:831c7f2f9e39
1/* 2 * Copyright (c) 2011-2012 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2011 Regents of the University of California 16 * Copyright (c) 2013 Advanced Micro Devices, Inc. 17 * Copyright (c) 2013 Mark D. Hill and David A. Wood 18 * All rights reserved. 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions are 22 * met: redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer; 24 * redistributions in binary form must reproduce the above copyright 25 * notice, this list of conditions and the following disclaimer in the 26 * documentation and/or other materials provided with the distribution; 27 * neither the name of the copyright holders nor the names of its 28 * contributors may be used to endorse or promote products derived from 29 * this software without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 42 * 43 * Authors: Steve Reinhardt 44 * Nathan Binkert 45 * Rick Strong 46 */ 47 48#include <iostream> 49#include <sstream> 50#include <string> 51 52#include "arch/tlb.hh" 53#include "base/loader/symtab.hh" 54#include "base/cprintf.hh" 55#include "base/misc.hh" 56#include "base/output.hh" 57#include "base/trace.hh" 58#include "cpu/checker/cpu.hh" 59#include "cpu/base.hh" 60#include "cpu/cpuevent.hh" 61#include "cpu/profile.hh" 62#include "cpu/thread_context.hh" 63#include "debug/Mwait.hh" 64#include "debug/SyscallVerbose.hh" 65#include "mem/page_table.hh" 66#include "params/BaseCPU.hh" 67#include "sim/clocked_object.hh" 68#include "sim/full_system.hh" 69#include "sim/process.hh" 70#include "sim/sim_events.hh" 71#include "sim/sim_exit.hh" 72#include "sim/system.hh" 73 74// Hack 75#include "sim/stat_control.hh" 76 77using namespace std; 78 79vector<BaseCPU *> BaseCPU::cpuList; 80 81// This variable reflects the max number of threads in any CPU. Be 82// careful to only use it once all the CPUs that you care about have 83// been initialized 84int maxThreadsPerCPU = 1; 85 86CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival) 87 : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0), 88 cpu(_cpu), _repeatEvent(true) 89{ 90 if (_interval) 91 cpu->schedule(this, curTick() + _interval); 92} 93 94void 95CPUProgressEvent::process() 96{ 97 Counter temp = cpu->totalOps(); 98 99 if (_repeatEvent) 100 cpu->schedule(this, curTick() + _interval); 101 102 if (cpu->switchedOut()) { 103 return; 104 } 105 106#ifndef NDEBUG 107 double ipc = double(temp - lastNumInst) / (_interval / cpu->clockPeriod()); 108 109 DPRINTFN("%s progress event, total committed:%i, progress insts committed: " 110 "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst, 111 ipc); 112 ipc = 0.0; 113#else 114 cprintf("%lli: %s progress event, total committed:%i, progress insts " 115 "committed: %lli\n", curTick(), cpu->name(), temp, 116 temp - lastNumInst); 117#endif 118 lastNumInst = temp; 119} 120 121const char * 122CPUProgressEvent::description() const 123{ 124 return "CPU Progress"; 125} 126 127BaseCPU::BaseCPU(Params *p, bool is_checker) 128 : MemObject(p), instCnt(0), _cpuId(p->cpu_id), _socketId(p->socket_id), 129 _instMasterId(p->system->getMasterId(name() + ".inst")), 130 _dataMasterId(p->system->getMasterId(name() + ".data")), 131 _taskId(ContextSwitchTaskId::Unknown), _pid(invldPid), 132 _switchedOut(p->switched_out), _cacheLineSize(p->system->cacheLineSize()), 133 interrupts(p->interrupts), profileEvent(NULL), 134 numThreads(p->numThreads), system(p->system), 135 functionTraceStream(nullptr), currentFunctionStart(0), 136 currentFunctionEnd(0), functionEntryTick(0), 137 addressMonitor(p->numThreads) 138{ 139 // if Python did not provide a valid ID, do it here 140 if (_cpuId == -1 ) { 141 _cpuId = cpuList.size(); 142 } 143 144 // add self to global list of CPUs 145 cpuList.push_back(this); 146 147 DPRINTF(SyscallVerbose, "Constructing CPU with id %d, socket id %d\n", 148 _cpuId, _socketId); 149 150 if (numThreads > maxThreadsPerCPU) 151 maxThreadsPerCPU = numThreads; 152 153 // allocate per-thread instruction-based event queues 154 comInstEventQueue = new EventQueue *[numThreads]; 155 for (ThreadID tid = 0; tid < numThreads; ++tid) 156 comInstEventQueue[tid] = 157 new EventQueue("instruction-based event queue"); 158 159 // 160 // set up instruction-count-based termination events, if any 161 // 162 if (p->max_insts_any_thread != 0) { 163 const char *cause = "a thread reached the max instruction count"; 164 for (ThreadID tid = 0; tid < numThreads; ++tid) 165 scheduleInstStop(tid, p->max_insts_any_thread, cause); 166 } 167 168 // Set up instruction-count-based termination events for SimPoints 169 // Typically, there are more than one action points. 170 // Simulation.py is responsible to take the necessary actions upon 171 // exitting the simulation loop. 172 if (!p->simpoint_start_insts.empty()) { 173 const char *cause = "simpoint starting point found"; 174 for (size_t i = 0; i < p->simpoint_start_insts.size(); ++i) 175 scheduleInstStop(0, p->simpoint_start_insts[i], cause); 176 } 177 178 if (p->max_insts_all_threads != 0) { 179 const char *cause = "all threads reached the max instruction count"; 180 181 // allocate & initialize shared downcounter: each event will 182 // decrement this when triggered; simulation will terminate 183 // when counter reaches 0 184 int *counter = new int; 185 *counter = numThreads; 186 for (ThreadID tid = 0; tid < numThreads; ++tid) { 187 Event *event = new CountedExitEvent(cause, *counter); 188 comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads); 189 } 190 } 191 192 // allocate per-thread load-based event queues 193 comLoadEventQueue = new EventQueue *[numThreads]; 194 for (ThreadID tid = 0; tid < numThreads; ++tid) 195 comLoadEventQueue[tid] = new EventQueue("load-based event queue"); 196 197 // 198 // set up instruction-count-based termination events, if any 199 // 200 if (p->max_loads_any_thread != 0) { 201 const char *cause = "a thread reached the max load count"; 202 for (ThreadID tid = 0; tid < numThreads; ++tid) 203 scheduleLoadStop(tid, p->max_loads_any_thread, cause); 204 } 205 206 if (p->max_loads_all_threads != 0) { 207 const char *cause = "all threads reached the max load count"; 208 // allocate & initialize shared downcounter: each event will 209 // decrement this when triggered; simulation will terminate 210 // when counter reaches 0 211 int *counter = new int; 212 *counter = numThreads; 213 for (ThreadID tid = 0; tid < numThreads; ++tid) { 214 Event *event = new CountedExitEvent(cause, *counter); 215 comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads); 216 } 217 } 218 219 functionTracingEnabled = false; 220 if (p->function_trace) { 221 const string fname = csprintf("ftrace.%s", name()); 222 functionTraceStream = simout.findOrCreate(fname)->stream(); 223 224 currentFunctionStart = currentFunctionEnd = 0; 225 functionEntryTick = p->function_trace_start; 226 227 if (p->function_trace_start == 0) { 228 functionTracingEnabled = true; 229 } else { 230 typedef EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace> wrap; 231 Event *event = new wrap(this, true); 232 schedule(event, p->function_trace_start); 233 } 234 } 235 236 // The interrupts should always be present unless this CPU is 237 // switched in later or in case it is a checker CPU 238 if (!params()->switched_out && !is_checker) { 239 fatal_if(interrupts.size() != numThreads, 240 "CPU %s has %i interrupt controllers, but is expecting one " 241 "per thread (%i)\n", 242 name(), interrupts.size(), numThreads); 243 for (ThreadID tid = 0; tid < numThreads; tid++) 244 interrupts[tid]->setCPU(this); 245 } 246 247 if (FullSystem) { 248 if (params()->profile) 249 profileEvent = new ProfileEvent(this, params()->profile); 250 } 251 tracer = params()->tracer; 252 253 if (params()->isa.size() != numThreads) { 254 fatal("Number of ISAs (%i) assigned to the CPU does not equal number " 255 "of threads (%i).\n", params()->isa.size(), numThreads); 256 } 257} 258 259void 260BaseCPU::enableFunctionTrace() 261{ 262 functionTracingEnabled = true; 263} 264 265BaseCPU::~BaseCPU() 266{ 267 delete profileEvent; 268 delete[] comLoadEventQueue; 269 delete[] comInstEventQueue; 270} 271 272void 273BaseCPU::armMonitor(ThreadID tid, Addr address) 274{ 275 assert(tid < numThreads); 276 AddressMonitor &monitor = addressMonitor[tid]; 277 278 monitor.armed = true; 279 monitor.vAddr = address; 280 monitor.pAddr = 0x0; 281 DPRINTF(Mwait,"[tid:%d] Armed monitor (vAddr=0x%lx)\n", tid, address); 282} 283 284bool 285BaseCPU::mwait(ThreadID tid, PacketPtr pkt) 286{ 287 assert(tid < numThreads); 288 AddressMonitor &monitor = addressMonitor[tid]; 289 290 if (!monitor.gotWakeup) { 291 int block_size = cacheLineSize(); 292 uint64_t mask = ~((uint64_t)(block_size - 1)); 293 294 assert(pkt->req->hasPaddr()); 295 monitor.pAddr = pkt->getAddr() & mask; 296 monitor.waiting = true; 297 298 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, " 299 "line's paddr=0x%lx)\n", tid, monitor.vAddr, monitor.pAddr); 300 return true; 301 } else { 302 monitor.gotWakeup = false; 303 return false; 304 } 305} 306 307void 308BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, TheISA::TLB *dtb) 309{ 310 assert(tid < numThreads); 311 AddressMonitor &monitor = addressMonitor[tid]; 312 313 Request req; 314 Addr addr = monitor.vAddr; 315 int block_size = cacheLineSize(); 316 uint64_t mask = ~((uint64_t)(block_size - 1)); 317 int size = block_size; 318 319 //The address of the next line if it crosses a cache line boundary. 320 Addr secondAddr = roundDown(addr + size - 1, block_size); 321 322 if (secondAddr > addr) 323 size = secondAddr - addr; 324 325 req.setVirt(0, addr, size, 0x0, dataMasterId(), tc->instAddr()); 326 327 // translate to physical address 328 Fault fault = dtb->translateAtomic(&req, tc, BaseTLB::Read); 329 assert(fault == NoFault); 330 331 monitor.pAddr = req.getPaddr() & mask; 332 monitor.waiting = true; 333 334 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n", 335 tid, monitor.vAddr, monitor.pAddr); 336} 337 338void 339BaseCPU::init() 340{ 341 if (!params()->switched_out) { 342 registerThreadContexts(); 343 344 verifyMemoryMode(); 345 } 346} 347 348void 349BaseCPU::startup() 350{ 351 if (FullSystem) { 352 if (!params()->switched_out && profileEvent) 353 schedule(profileEvent, curTick()); 354 } 355 356 if (params()->progress_interval) { 357 new CPUProgressEvent(this, params()->progress_interval); 358 } 359 360 // Assumption CPU start to operate instantaneously without any latency 361 if (ClockedObject::pwrState() == Enums::PwrState::UNDEFINED) 362 ClockedObject::pwrState(Enums::PwrState::ON); 363 364} 365 366ProbePoints::PMUUPtr 367BaseCPU::pmuProbePoint(const char *name) 368{ 369 ProbePoints::PMUUPtr ptr; 370 ptr.reset(new ProbePoints::PMU(getProbeManager(), name)); 371 372 return ptr; 373} 374 375void 376BaseCPU::regProbePoints() 377{ 378 ppCycles = pmuProbePoint("Cycles"); 379 380 ppRetiredInsts = pmuProbePoint("RetiredInsts"); 381 ppRetiredLoads = pmuProbePoint("RetiredLoads"); 382 ppRetiredStores = pmuProbePoint("RetiredStores"); 383 ppRetiredBranches = pmuProbePoint("RetiredBranches"); 384} 385 386void 387BaseCPU::probeInstCommit(const StaticInstPtr &inst) 388{ 389 if (!inst->isMicroop() || inst->isLastMicroop()) 390 ppRetiredInsts->notify(1); 391 392 393 if (inst->isLoad()) 394 ppRetiredLoads->notify(1); 395 396 if (inst->isStore()) 397 ppRetiredStores->notify(1); 398 399 if (inst->isControl()) 400 ppRetiredBranches->notify(1); 401} 402 403void 404BaseCPU::regStats() 405{ 406 using namespace Stats; 407 408 numCycles 409 .name(name() + ".numCycles") 410 .desc("number of cpu cycles simulated") 411 ; 412 413 numWorkItemsStarted 414 .name(name() + ".numWorkItemsStarted") 415 .desc("number of work items this cpu started") 416 ; 417 418 numWorkItemsCompleted 419 .name(name() + ".numWorkItemsCompleted") 420 .desc("number of work items this cpu completed") 421 ; 422 423 int size = threadContexts.size(); 424 if (size > 1) { 425 for (int i = 0; i < size; ++i) { 426 stringstream namestr; 427 ccprintf(namestr, "%s.ctx%d", name(), i); 428 threadContexts[i]->regStats(namestr.str()); 429 } 430 } else if (size == 1) 431 threadContexts[0]->regStats(name()); 432} 433 434BaseMasterPort & 435BaseCPU::getMasterPort(const string &if_name, PortID idx) 436{ 437 // Get the right port based on name. This applies to all the 438 // subclasses of the base CPU and relies on their implementation 439 // of getDataPort and getInstPort. In all cases there methods 440 // return a MasterPort pointer. 441 if (if_name == "dcache_port") 442 return getDataPort(); 443 else if (if_name == "icache_port") 444 return getInstPort(); 445 else 446 return MemObject::getMasterPort(if_name, idx); 447} 448 449void 450BaseCPU::registerThreadContexts() 451{ 452 assert(system->multiThread || numThreads == 1); 453 454 ThreadID size = threadContexts.size(); 455 for (ThreadID tid = 0; tid < size; ++tid) { 456 ThreadContext *tc = threadContexts[tid]; 457 458 if (system->multiThread) { 459 tc->setContextId(system->registerThreadContext(tc)); 460 } else { 461 tc->setContextId(system->registerThreadContext(tc, _cpuId)); 462 } 463 464 if (!FullSystem) 465 tc->getProcessPtr()->assignThreadContext(tc->contextId()); 466 } 467} 468 469 470int 471BaseCPU::findContext(ThreadContext *tc) 472{ 473 ThreadID size = threadContexts.size(); 474 for (ThreadID tid = 0; tid < size; ++tid) { 475 if (tc == threadContexts[tid]) 476 return tid; 477 } 478 return 0; 479} 480 481void 482BaseCPU::activateContext(ThreadID thread_num) 483{ 484 // For any active thread running, update CPU power state to active (ON) 485 ClockedObject::pwrState(Enums::PwrState::ON); 486} 487 488void 489BaseCPU::suspendContext(ThreadID thread_num) 490{ 491 // Check if all threads are suspended 492 for (auto t : threadContexts) { 493 if (t->status() != ThreadContext::Suspended) { 494 return; 495 } 496 } 497 498 // All CPU threads suspended, enter lower power state for the CPU 499 ClockedObject::pwrState(Enums::PwrState::CLK_GATED); 500} 501 502void 503BaseCPU::switchOut() 504{ 505 assert(!_switchedOut); 506 _switchedOut = true; 507 if (profileEvent && profileEvent->scheduled()) 508 deschedule(profileEvent); 509 510 // Flush all TLBs in the CPU to avoid having stale translations if 511 // it gets switched in later. 512 flushTLBs(); 513} 514 515void 516BaseCPU::takeOverFrom(BaseCPU *oldCPU) 517{ 518 assert(threadContexts.size() == oldCPU->threadContexts.size()); 519 assert(_cpuId == oldCPU->cpuId()); 520 assert(_switchedOut); 521 assert(oldCPU != this); 522 _pid = oldCPU->getPid(); 523 _taskId = oldCPU->taskId(); 524 _switchedOut = false; 525 526 ThreadID size = threadContexts.size(); 527 for (ThreadID i = 0; i < size; ++i) { 528 ThreadContext *newTC = threadContexts[i]; 529 ThreadContext *oldTC = oldCPU->threadContexts[i]; 530 531 newTC->takeOverFrom(oldTC); 532 533 CpuEvent::replaceThreadContext(oldTC, newTC); 534 535 assert(newTC->contextId() == oldTC->contextId()); 536 assert(newTC->threadId() == oldTC->threadId()); 537 system->replaceThreadContext(newTC, newTC->contextId()); 538 539 /* This code no longer works since the zero register (e.g., 540 * r31 on Alpha) doesn't necessarily contain zero at this 541 * point. 542 if (DTRACE(Context)) 543 ThreadContext::compare(oldTC, newTC); 544 */ 545 546 BaseMasterPort *old_itb_port = oldTC->getITBPtr()->getMasterPort(); 547 BaseMasterPort *old_dtb_port = oldTC->getDTBPtr()->getMasterPort(); 548 BaseMasterPort *new_itb_port = newTC->getITBPtr()->getMasterPort(); 549 BaseMasterPort *new_dtb_port = newTC->getDTBPtr()->getMasterPort(); 550 551 // Move over any table walker ports if they exist 552 if (new_itb_port) { 553 assert(!new_itb_port->isConnected()); 554 assert(old_itb_port); 555 assert(old_itb_port->isConnected()); 556 BaseSlavePort &slavePort = old_itb_port->getSlavePort(); 557 old_itb_port->unbind(); 558 new_itb_port->bind(slavePort); 559 } 560 if (new_dtb_port) { 561 assert(!new_dtb_port->isConnected()); 562 assert(old_dtb_port); 563 assert(old_dtb_port->isConnected()); 564 BaseSlavePort &slavePort = old_dtb_port->getSlavePort(); 565 old_dtb_port->unbind(); 566 new_dtb_port->bind(slavePort); 567 } 568 newTC->getITBPtr()->takeOverFrom(oldTC->getITBPtr()); 569 newTC->getDTBPtr()->takeOverFrom(oldTC->getDTBPtr()); 570 571 // Checker whether or not we have to transfer CheckerCPU 572 // objects over in the switch 573 CheckerCPU *oldChecker = oldTC->getCheckerCpuPtr(); 574 CheckerCPU *newChecker = newTC->getCheckerCpuPtr(); 575 if (oldChecker && newChecker) { 576 BaseMasterPort *old_checker_itb_port = 577 oldChecker->getITBPtr()->getMasterPort(); 578 BaseMasterPort *old_checker_dtb_port = 579 oldChecker->getDTBPtr()->getMasterPort(); 580 BaseMasterPort *new_checker_itb_port = 581 newChecker->getITBPtr()->getMasterPort(); 582 BaseMasterPort *new_checker_dtb_port = 583 newChecker->getDTBPtr()->getMasterPort(); 584 585 newChecker->getITBPtr()->takeOverFrom(oldChecker->getITBPtr()); 586 newChecker->getDTBPtr()->takeOverFrom(oldChecker->getDTBPtr()); 587 588 // Move over any table walker ports if they exist for checker 589 if (new_checker_itb_port) { 590 assert(!new_checker_itb_port->isConnected()); 591 assert(old_checker_itb_port); 592 assert(old_checker_itb_port->isConnected()); 593 BaseSlavePort &slavePort = 594 old_checker_itb_port->getSlavePort(); 595 old_checker_itb_port->unbind(); 596 new_checker_itb_port->bind(slavePort); 597 } 598 if (new_checker_dtb_port) { 599 assert(!new_checker_dtb_port->isConnected()); 600 assert(old_checker_dtb_port); 601 assert(old_checker_dtb_port->isConnected()); 602 BaseSlavePort &slavePort = 603 old_checker_dtb_port->getSlavePort(); 604 old_checker_dtb_port->unbind(); 605 new_checker_dtb_port->bind(slavePort); 606 } 607 } 608 } 609 610 interrupts = oldCPU->interrupts; 611 for (ThreadID tid = 0; tid < numThreads; tid++) { 612 interrupts[tid]->setCPU(this); 613 } 614 oldCPU->interrupts.clear(); 615 616 if (FullSystem) { 617 for (ThreadID i = 0; i < size; ++i) 618 threadContexts[i]->profileClear(); 619 620 if (profileEvent) 621 schedule(profileEvent, curTick()); 622 } 623 624 // All CPUs have an instruction and a data port, and the new CPU's 625 // ports are dangling while the old CPU has its ports connected 626 // already. Unbind the old CPU and then bind the ports of the one 627 // we are switching to. 628 assert(!getInstPort().isConnected()); 629 assert(oldCPU->getInstPort().isConnected()); 630 BaseSlavePort &inst_peer_port = oldCPU->getInstPort().getSlavePort(); 631 oldCPU->getInstPort().unbind(); 632 getInstPort().bind(inst_peer_port); 633 634 assert(!getDataPort().isConnected()); 635 assert(oldCPU->getDataPort().isConnected()); 636 BaseSlavePort &data_peer_port = oldCPU->getDataPort().getSlavePort(); 637 oldCPU->getDataPort().unbind(); 638 getDataPort().bind(data_peer_port); 639} 640 641void 642BaseCPU::flushTLBs() 643{ 644 for (ThreadID i = 0; i < threadContexts.size(); ++i) { 645 ThreadContext &tc(*threadContexts[i]); 646 CheckerCPU *checker(tc.getCheckerCpuPtr()); 647 648 tc.getITBPtr()->flushAll(); 649 tc.getDTBPtr()->flushAll(); 650 if (checker) { 651 checker->getITBPtr()->flushAll(); 652 checker->getDTBPtr()->flushAll(); 653 } 654 } 655} 656 657 658BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval) 659 : cpu(_cpu), interval(_interval) 660{ } 661 662void 663BaseCPU::ProfileEvent::process() 664{ 665 ThreadID size = cpu->threadContexts.size(); 666 for (ThreadID i = 0; i < size; ++i) { 667 ThreadContext *tc = cpu->threadContexts[i]; 668 tc->profileSample(); 669 } 670 671 cpu->schedule(this, curTick() + interval); 672} 673 674void 675BaseCPU::serialize(CheckpointOut &cp) const 676{ 677 SERIALIZE_SCALAR(instCnt); 678 679 if (!_switchedOut) { 680 /* Unlike _pid, _taskId is not serialized, as they are dynamically 681 * assigned unique ids that are only meaningful for the duration of 682 * a specific run. We will need to serialize the entire taskMap in 683 * system. */ 684 SERIALIZE_SCALAR(_pid); 685 686 // Serialize the threads, this is done by the CPU implementation. 687 for (ThreadID i = 0; i < numThreads; ++i) { 688 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i)); 689 interrupts[i]->serialize(cp); 690 serializeThread(cp, i); 691 } 692 } 693} 694 695void 696BaseCPU::unserialize(CheckpointIn &cp) 697{ 698 UNSERIALIZE_SCALAR(instCnt); 699 700 if (!_switchedOut) { 701 UNSERIALIZE_SCALAR(_pid); 702 703 // Unserialize the threads, this is done by the CPU implementation. 704 for (ThreadID i = 0; i < numThreads; ++i) { 705 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i)); 706 interrupts[i]->unserialize(cp); 707 unserializeThread(cp, i); 708 } 709 } 710} 711 712void 713BaseCPU::scheduleInstStop(ThreadID tid, Counter insts, const char *cause) 714{ 715 const Tick now(comInstEventQueue[tid]->getCurTick()); 716 Event *event(new LocalSimLoopExitEvent(cause, 0)); 717 718 comInstEventQueue[tid]->schedule(event, now + insts); 719} 720 721uint64_t 722BaseCPU::getCurrentInstCount(ThreadID tid) 723{ 724 return Tick(comInstEventQueue[tid]->getCurTick()); 725} 726 727AddressMonitor::AddressMonitor() { 728 armed = false; 729 waiting = false; 730 gotWakeup = false; 731} 732 733bool AddressMonitor::doMonitor(PacketPtr pkt) { 734 assert(pkt->req->hasPaddr()); 735 if (armed && waiting) { 736 if (pAddr == pkt->getAddr()) { 737 DPRINTF(Mwait,"pAddr=0x%lx invalidated: waking up core\n", 738 pkt->getAddr()); 739 waiting = false; 740 return true; 741 } 742 } 743 return false; 744} 745 746void 747BaseCPU::scheduleLoadStop(ThreadID tid, Counter loads, const char *cause) 748{ 749 const Tick now(comLoadEventQueue[tid]->getCurTick()); 750 Event *event(new LocalSimLoopExitEvent(cause, 0)); 751 752 comLoadEventQueue[tid]->schedule(event, now + loads); 753} 754 755 756void 757BaseCPU::traceFunctionsInternal(Addr pc) 758{ 759 if (!debugSymbolTable) 760 return; 761 762 // if pc enters different function, print new function symbol and 763 // update saved range. Otherwise do nothing. 764 if (pc < currentFunctionStart || pc >= currentFunctionEnd) { 765 string sym_str; 766 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str, 767 currentFunctionStart, 768 currentFunctionEnd); 769 770 if (!found) { 771 // no symbol found: use addr as label 772 sym_str = csprintf("0x%x", pc); 773 currentFunctionStart = pc; 774 currentFunctionEnd = pc + 1; 775 } 776 777 ccprintf(*functionTraceStream, " (%d)\n%d: %s", 778 curTick() - functionEntryTick, curTick(), sym_str); 779 functionEntryTick = curTick(); 780 } 781} 782