base.cc revision 9814
1/* 2 * Copyright (c) 2011-2012 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2011 Regents of the University of California 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Steve Reinhardt 42 * Nathan Binkert 43 * Rick Strong 44 */ 45 46#include <iostream> 47#include <sstream> 48#include <string> 49 50#include "arch/tlb.hh" 51#include "base/loader/symtab.hh" 52#include "base/cprintf.hh" 53#include "base/misc.hh" 54#include "base/output.hh" 55#include "base/trace.hh" 56#include "cpu/base.hh" 57#include "cpu/checker/cpu.hh" 58#include "cpu/cpuevent.hh" 59#include "cpu/profile.hh" 60#include "cpu/thread_context.hh" 61#include "debug/SyscallVerbose.hh" 62#include "params/BaseCPU.hh" 63#include "sim/full_system.hh" 64#include "sim/process.hh" 65#include "sim/sim_events.hh" 66#include "sim/sim_exit.hh" 67#include "sim/system.hh" 68 69// Hack 70#include "sim/stat_control.hh" 71 72using namespace std; 73 74vector<BaseCPU *> BaseCPU::cpuList; 75 76// This variable reflects the max number of threads in any CPU. Be 77// careful to only use it once all the CPUs that you care about have 78// been initialized 79int maxThreadsPerCPU = 1; 80 81CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival) 82 : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0), 83 cpu(_cpu), _repeatEvent(true) 84{ 85 if (_interval) 86 cpu->schedule(this, curTick() + _interval); 87} 88 89void 90CPUProgressEvent::process() 91{ 92 Counter temp = cpu->totalOps(); 93#ifndef NDEBUG 94 double ipc = double(temp - lastNumInst) / (_interval / cpu->clockPeriod()); 95 96 DPRINTFN("%s progress event, total committed:%i, progress insts committed: " 97 "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst, 98 ipc); 99 ipc = 0.0; 100#else 101 cprintf("%lli: %s progress event, total committed:%i, progress insts " 102 "committed: %lli\n", curTick(), cpu->name(), temp, 103 temp - lastNumInst); 104#endif 105 lastNumInst = temp; 106 107 if (_repeatEvent) 108 cpu->schedule(this, curTick() + _interval); 109} 110 111const char * 112CPUProgressEvent::description() const 113{ 114 return "CPU Progress"; 115} 116 117BaseCPU::BaseCPU(Params *p, bool is_checker) 118 : MemObject(p), instCnt(0), _cpuId(p->cpu_id), 119 _instMasterId(p->system->getMasterId(name() + ".inst")), 120 _dataMasterId(p->system->getMasterId(name() + ".data")), 121 _taskId(ContextSwitchTaskId::Unknown), _pid(Request::invldPid), 122 _switchedOut(p->switched_out), _cacheLineSize(p->system->cacheLineSize()), 123 interrupts(p->interrupts), profileEvent(NULL), 124 numThreads(p->numThreads), system(p->system) 125{ 126 // if Python did not provide a valid ID, do it here 127 if (_cpuId == -1 ) { 128 _cpuId = cpuList.size(); 129 } 130 131 // add self to global list of CPUs 132 cpuList.push_back(this); 133 134 DPRINTF(SyscallVerbose, "Constructing CPU with id %d\n", _cpuId); 135 136 if (numThreads > maxThreadsPerCPU) 137 maxThreadsPerCPU = numThreads; 138 139 // allocate per-thread instruction-based event queues 140 comInstEventQueue = new EventQueue *[numThreads]; 141 for (ThreadID tid = 0; tid < numThreads; ++tid) 142 comInstEventQueue[tid] = 143 new EventQueue("instruction-based event queue"); 144 145 // 146 // set up instruction-count-based termination events, if any 147 // 148 if (p->max_insts_any_thread != 0) { 149 const char *cause = "a thread reached the max instruction count"; 150 for (ThreadID tid = 0; tid < numThreads; ++tid) 151 scheduleInstStop(tid, p->max_insts_any_thread, cause); 152 } 153 154 // Set up instruction-count-based termination events for SimPoints 155 // Typically, there are more than one action points. 156 // Simulation.py is responsible to take the necessary actions upon 157 // exitting the simulation loop. 158 if (!p->simpoint_start_insts.empty()) { 159 const char *cause = "simpoint starting point found"; 160 for (size_t i = 0; i < p->simpoint_start_insts.size(); ++i) 161 scheduleInstStop(0, p->simpoint_start_insts[i], cause); 162 } 163 164 if (p->max_insts_all_threads != 0) { 165 const char *cause = "all threads reached the max instruction count"; 166 167 // allocate & initialize shared downcounter: each event will 168 // decrement this when triggered; simulation will terminate 169 // when counter reaches 0 170 int *counter = new int; 171 *counter = numThreads; 172 for (ThreadID tid = 0; tid < numThreads; ++tid) { 173 Event *event = new CountedExitEvent(cause, *counter); 174 comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads); 175 } 176 } 177 178 // allocate per-thread load-based event queues 179 comLoadEventQueue = new EventQueue *[numThreads]; 180 for (ThreadID tid = 0; tid < numThreads; ++tid) 181 comLoadEventQueue[tid] = new EventQueue("load-based event queue"); 182 183 // 184 // set up instruction-count-based termination events, if any 185 // 186 if (p->max_loads_any_thread != 0) { 187 const char *cause = "a thread reached the max load count"; 188 for (ThreadID tid = 0; tid < numThreads; ++tid) 189 scheduleLoadStop(tid, p->max_loads_any_thread, cause); 190 } 191 192 if (p->max_loads_all_threads != 0) { 193 const char *cause = "all threads reached the max load count"; 194 // allocate & initialize shared downcounter: each event will 195 // decrement this when triggered; simulation will terminate 196 // when counter reaches 0 197 int *counter = new int; 198 *counter = numThreads; 199 for (ThreadID tid = 0; tid < numThreads; ++tid) { 200 Event *event = new CountedExitEvent(cause, *counter); 201 comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads); 202 } 203 } 204 205 functionTracingEnabled = false; 206 if (p->function_trace) { 207 const string fname = csprintf("ftrace.%s", name()); 208 functionTraceStream = simout.find(fname); 209 if (!functionTraceStream) 210 functionTraceStream = simout.create(fname); 211 212 currentFunctionStart = currentFunctionEnd = 0; 213 functionEntryTick = p->function_trace_start; 214 215 if (p->function_trace_start == 0) { 216 functionTracingEnabled = true; 217 } else { 218 typedef EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace> wrap; 219 Event *event = new wrap(this, true); 220 schedule(event, p->function_trace_start); 221 } 222 } 223 224 // The interrupts should always be present unless this CPU is 225 // switched in later or in case it is a checker CPU 226 if (!params()->switched_out && !is_checker) { 227 if (interrupts) { 228 interrupts->setCPU(this); 229 } else { 230 fatal("CPU %s has no interrupt controller.\n" 231 "Ensure createInterruptController() is called.\n", name()); 232 } 233 } 234 235 if (FullSystem) { 236 if (params()->profile) 237 profileEvent = new ProfileEvent(this, params()->profile); 238 } 239 tracer = params()->tracer; 240 241 if (params()->isa.size() != numThreads) { 242 fatal("Number of ISAs (%i) assigned to the CPU does not equal number " 243 "of threads (%i).\n", params()->isa.size(), numThreads); 244 } 245} 246 247void 248BaseCPU::enableFunctionTrace() 249{ 250 functionTracingEnabled = true; 251} 252 253BaseCPU::~BaseCPU() 254{ 255 delete profileEvent; 256 delete[] comLoadEventQueue; 257 delete[] comInstEventQueue; 258} 259 260void 261BaseCPU::init() 262{ 263 if (!params()->switched_out) { 264 registerThreadContexts(); 265 266 verifyMemoryMode(); 267 } 268} 269 270void 271BaseCPU::startup() 272{ 273 if (FullSystem) { 274 if (!params()->switched_out && profileEvent) 275 schedule(profileEvent, curTick()); 276 } 277 278 if (params()->progress_interval) { 279 new CPUProgressEvent(this, params()->progress_interval); 280 } 281} 282 283 284void 285BaseCPU::regStats() 286{ 287 using namespace Stats; 288 289 numCycles 290 .name(name() + ".numCycles") 291 .desc("number of cpu cycles simulated") 292 ; 293 294 numWorkItemsStarted 295 .name(name() + ".numWorkItemsStarted") 296 .desc("number of work items this cpu started") 297 ; 298 299 numWorkItemsCompleted 300 .name(name() + ".numWorkItemsCompleted") 301 .desc("number of work items this cpu completed") 302 ; 303 304 int size = threadContexts.size(); 305 if (size > 1) { 306 for (int i = 0; i < size; ++i) { 307 stringstream namestr; 308 ccprintf(namestr, "%s.ctx%d", name(), i); 309 threadContexts[i]->regStats(namestr.str()); 310 } 311 } else if (size == 1) 312 threadContexts[0]->regStats(name()); 313} 314 315BaseMasterPort & 316BaseCPU::getMasterPort(const string &if_name, PortID idx) 317{ 318 // Get the right port based on name. This applies to all the 319 // subclasses of the base CPU and relies on their implementation 320 // of getDataPort and getInstPort. In all cases there methods 321 // return a MasterPort pointer. 322 if (if_name == "dcache_port") 323 return getDataPort(); 324 else if (if_name == "icache_port") 325 return getInstPort(); 326 else 327 return MemObject::getMasterPort(if_name, idx); 328} 329 330void 331BaseCPU::registerThreadContexts() 332{ 333 ThreadID size = threadContexts.size(); 334 for (ThreadID tid = 0; tid < size; ++tid) { 335 ThreadContext *tc = threadContexts[tid]; 336 337 /** This is so that contextId and cpuId match where there is a 338 * 1cpu:1context relationship. Otherwise, the order of registration 339 * could affect the assignment and cpu 1 could have context id 3, for 340 * example. We may even want to do something like this for SMT so that 341 * cpu 0 has the lowest thread contexts and cpu N has the highest, but 342 * I'll just do this for now 343 */ 344 if (numThreads == 1) 345 tc->setContextId(system->registerThreadContext(tc, _cpuId)); 346 else 347 tc->setContextId(system->registerThreadContext(tc)); 348 349 if (!FullSystem) 350 tc->getProcessPtr()->assignThreadContext(tc->contextId()); 351 } 352} 353 354 355int 356BaseCPU::findContext(ThreadContext *tc) 357{ 358 ThreadID size = threadContexts.size(); 359 for (ThreadID tid = 0; tid < size; ++tid) { 360 if (tc == threadContexts[tid]) 361 return tid; 362 } 363 return 0; 364} 365 366void 367BaseCPU::switchOut() 368{ 369 assert(!_switchedOut); 370 _switchedOut = true; 371 if (profileEvent && profileEvent->scheduled()) 372 deschedule(profileEvent); 373 374 // Flush all TLBs in the CPU to avoid having stale translations if 375 // it gets switched in later. 376 flushTLBs(); 377} 378 379void 380BaseCPU::takeOverFrom(BaseCPU *oldCPU) 381{ 382 assert(threadContexts.size() == oldCPU->threadContexts.size()); 383 assert(_cpuId == oldCPU->cpuId()); 384 assert(_switchedOut); 385 assert(oldCPU != this); 386 _pid = oldCPU->getPid(); 387 _taskId = oldCPU->taskId(); 388 _switchedOut = false; 389 390 ThreadID size = threadContexts.size(); 391 for (ThreadID i = 0; i < size; ++i) { 392 ThreadContext *newTC = threadContexts[i]; 393 ThreadContext *oldTC = oldCPU->threadContexts[i]; 394 395 newTC->takeOverFrom(oldTC); 396 397 CpuEvent::replaceThreadContext(oldTC, newTC); 398 399 assert(newTC->contextId() == oldTC->contextId()); 400 assert(newTC->threadId() == oldTC->threadId()); 401 system->replaceThreadContext(newTC, newTC->contextId()); 402 403 /* This code no longer works since the zero register (e.g., 404 * r31 on Alpha) doesn't necessarily contain zero at this 405 * point. 406 if (DTRACE(Context)) 407 ThreadContext::compare(oldTC, newTC); 408 */ 409 410 BaseMasterPort *old_itb_port = oldTC->getITBPtr()->getMasterPort(); 411 BaseMasterPort *old_dtb_port = oldTC->getDTBPtr()->getMasterPort(); 412 BaseMasterPort *new_itb_port = newTC->getITBPtr()->getMasterPort(); 413 BaseMasterPort *new_dtb_port = newTC->getDTBPtr()->getMasterPort(); 414 415 // Move over any table walker ports if they exist 416 if (new_itb_port) { 417 assert(!new_itb_port->isConnected()); 418 assert(old_itb_port); 419 assert(old_itb_port->isConnected()); 420 BaseSlavePort &slavePort = old_itb_port->getSlavePort(); 421 old_itb_port->unbind(); 422 new_itb_port->bind(slavePort); 423 } 424 if (new_dtb_port) { 425 assert(!new_dtb_port->isConnected()); 426 assert(old_dtb_port); 427 assert(old_dtb_port->isConnected()); 428 BaseSlavePort &slavePort = old_dtb_port->getSlavePort(); 429 old_dtb_port->unbind(); 430 new_dtb_port->bind(slavePort); 431 } 432 433 // Checker whether or not we have to transfer CheckerCPU 434 // objects over in the switch 435 CheckerCPU *oldChecker = oldTC->getCheckerCpuPtr(); 436 CheckerCPU *newChecker = newTC->getCheckerCpuPtr(); 437 if (oldChecker && newChecker) { 438 BaseMasterPort *old_checker_itb_port = 439 oldChecker->getITBPtr()->getMasterPort(); 440 BaseMasterPort *old_checker_dtb_port = 441 oldChecker->getDTBPtr()->getMasterPort(); 442 BaseMasterPort *new_checker_itb_port = 443 newChecker->getITBPtr()->getMasterPort(); 444 BaseMasterPort *new_checker_dtb_port = 445 newChecker->getDTBPtr()->getMasterPort(); 446 447 // Move over any table walker ports if they exist for checker 448 if (new_checker_itb_port) { 449 assert(!new_checker_itb_port->isConnected()); 450 assert(old_checker_itb_port); 451 assert(old_checker_itb_port->isConnected()); 452 BaseSlavePort &slavePort = 453 old_checker_itb_port->getSlavePort(); 454 old_checker_itb_port->unbind(); 455 new_checker_itb_port->bind(slavePort); 456 } 457 if (new_checker_dtb_port) { 458 assert(!new_checker_dtb_port->isConnected()); 459 assert(old_checker_dtb_port); 460 assert(old_checker_dtb_port->isConnected()); 461 BaseSlavePort &slavePort = 462 old_checker_dtb_port->getSlavePort(); 463 old_checker_dtb_port->unbind(); 464 new_checker_dtb_port->bind(slavePort); 465 } 466 } 467 } 468 469 interrupts = oldCPU->interrupts; 470 interrupts->setCPU(this); 471 oldCPU->interrupts = NULL; 472 473 if (FullSystem) { 474 for (ThreadID i = 0; i < size; ++i) 475 threadContexts[i]->profileClear(); 476 477 if (profileEvent) 478 schedule(profileEvent, curTick()); 479 } 480 481 // All CPUs have an instruction and a data port, and the new CPU's 482 // ports are dangling while the old CPU has its ports connected 483 // already. Unbind the old CPU and then bind the ports of the one 484 // we are switching to. 485 assert(!getInstPort().isConnected()); 486 assert(oldCPU->getInstPort().isConnected()); 487 BaseSlavePort &inst_peer_port = oldCPU->getInstPort().getSlavePort(); 488 oldCPU->getInstPort().unbind(); 489 getInstPort().bind(inst_peer_port); 490 491 assert(!getDataPort().isConnected()); 492 assert(oldCPU->getDataPort().isConnected()); 493 BaseSlavePort &data_peer_port = oldCPU->getDataPort().getSlavePort(); 494 oldCPU->getDataPort().unbind(); 495 getDataPort().bind(data_peer_port); 496} 497 498void 499BaseCPU::flushTLBs() 500{ 501 for (ThreadID i = 0; i < threadContexts.size(); ++i) { 502 ThreadContext &tc(*threadContexts[i]); 503 CheckerCPU *checker(tc.getCheckerCpuPtr()); 504 505 tc.getITBPtr()->flushAll(); 506 tc.getDTBPtr()->flushAll(); 507 if (checker) { 508 checker->getITBPtr()->flushAll(); 509 checker->getDTBPtr()->flushAll(); 510 } 511 } 512} 513 514 515BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval) 516 : cpu(_cpu), interval(_interval) 517{ } 518 519void 520BaseCPU::ProfileEvent::process() 521{ 522 ThreadID size = cpu->threadContexts.size(); 523 for (ThreadID i = 0; i < size; ++i) { 524 ThreadContext *tc = cpu->threadContexts[i]; 525 tc->profileSample(); 526 } 527 528 cpu->schedule(this, curTick() + interval); 529} 530 531void 532BaseCPU::serialize(std::ostream &os) 533{ 534 SERIALIZE_SCALAR(instCnt); 535 536 if (!_switchedOut) { 537 /* Unlike _pid, _taskId is not serialized, as they are dynamically 538 * assigned unique ids that are only meaningful for the duration of 539 * a specific run. We will need to serialize the entire taskMap in 540 * system. */ 541 SERIALIZE_SCALAR(_pid); 542 543 interrupts->serialize(os); 544 545 // Serialize the threads, this is done by the CPU implementation. 546 for (ThreadID i = 0; i < numThreads; ++i) { 547 nameOut(os, csprintf("%s.xc.%i", name(), i)); 548 serializeThread(os, i); 549 } 550 } 551} 552 553void 554BaseCPU::unserialize(Checkpoint *cp, const std::string §ion) 555{ 556 UNSERIALIZE_SCALAR(instCnt); 557 558 if (!_switchedOut) { 559 UNSERIALIZE_SCALAR(_pid); 560 interrupts->unserialize(cp, section); 561 562 // Unserialize the threads, this is done by the CPU implementation. 563 for (ThreadID i = 0; i < numThreads; ++i) 564 unserializeThread(cp, csprintf("%s.xc.%i", section, i), i); 565 } 566} 567 568void 569BaseCPU::scheduleInstStop(ThreadID tid, Counter insts, const char *cause) 570{ 571 const Tick now(comInstEventQueue[tid]->getCurTick()); 572 Event *event(new SimLoopExitEvent(cause, 0)); 573 574 comInstEventQueue[tid]->schedule(event, now + insts); 575} 576 577void 578BaseCPU::scheduleLoadStop(ThreadID tid, Counter loads, const char *cause) 579{ 580 const Tick now(comLoadEventQueue[tid]->getCurTick()); 581 Event *event(new SimLoopExitEvent(cause, 0)); 582 583 comLoadEventQueue[tid]->schedule(event, now + loads); 584} 585 586 587void 588BaseCPU::traceFunctionsInternal(Addr pc) 589{ 590 if (!debugSymbolTable) 591 return; 592 593 // if pc enters different function, print new function symbol and 594 // update saved range. Otherwise do nothing. 595 if (pc < currentFunctionStart || pc >= currentFunctionEnd) { 596 string sym_str; 597 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str, 598 currentFunctionStart, 599 currentFunctionEnd); 600 601 if (!found) { 602 // no symbol found: use addr as label 603 sym_str = csprintf("0x%x", pc); 604 currentFunctionStart = pc; 605 currentFunctionEnd = pc + 1; 606 } 607 608 ccprintf(*functionTraceStream, " (%d)\n%d: %s", 609 curTick() - functionEntryTick, curTick(), sym_str); 610 functionEntryTick = curTick(); 611 } 612} 613