base.cc revision 9446:644f2a2c9bfc
1/* 2 * Copyright (c) 2011-2012 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2011 Regents of the University of California 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Steve Reinhardt 42 * Nathan Binkert 43 * Rick Strong 44 */ 45 46#include <iostream> 47#include <sstream> 48#include <string> 49 50#include "arch/tlb.hh" 51#include "base/loader/symtab.hh" 52#include "base/cprintf.hh" 53#include "base/misc.hh" 54#include "base/output.hh" 55#include "base/trace.hh" 56#include "cpu/base.hh" 57#include "cpu/checker/cpu.hh" 58#include "cpu/cpuevent.hh" 59#include "cpu/profile.hh" 60#include "cpu/thread_context.hh" 61#include "debug/SyscallVerbose.hh" 62#include "params/BaseCPU.hh" 63#include "sim/full_system.hh" 64#include "sim/process.hh" 65#include "sim/sim_events.hh" 66#include "sim/sim_exit.hh" 67#include "sim/system.hh" 68 69// Hack 70#include "sim/stat_control.hh" 71 72using namespace std; 73 74vector<BaseCPU *> BaseCPU::cpuList; 75 76// This variable reflects the max number of threads in any CPU. Be 77// careful to only use it once all the CPUs that you care about have 78// been initialized 79int maxThreadsPerCPU = 1; 80 81CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival) 82 : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0), 83 cpu(_cpu), _repeatEvent(true) 84{ 85 if (_interval) 86 cpu->schedule(this, curTick() + _interval); 87} 88 89void 90CPUProgressEvent::process() 91{ 92 Counter temp = cpu->totalOps(); 93#ifndef NDEBUG 94 double ipc = double(temp - lastNumInst) / (_interval / cpu->clockPeriod()); 95 96 DPRINTFN("%s progress event, total committed:%i, progress insts committed: " 97 "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst, 98 ipc); 99 ipc = 0.0; 100#else 101 cprintf("%lli: %s progress event, total committed:%i, progress insts " 102 "committed: %lli\n", curTick(), cpu->name(), temp, 103 temp - lastNumInst); 104#endif 105 lastNumInst = temp; 106 107 if (_repeatEvent) 108 cpu->schedule(this, curTick() + _interval); 109} 110 111const char * 112CPUProgressEvent::description() const 113{ 114 return "CPU Progress"; 115} 116 117BaseCPU::BaseCPU(Params *p, bool is_checker) 118 : MemObject(p), instCnt(0), _cpuId(p->cpu_id), 119 _instMasterId(p->system->getMasterId(name() + ".inst")), 120 _dataMasterId(p->system->getMasterId(name() + ".data")), 121 _taskId(ContextSwitchTaskId::Unknown), _pid(Request::invldPid), 122 _switchedOut(p->switched_out), 123 interrupts(p->interrupts), profileEvent(NULL), 124 numThreads(p->numThreads), system(p->system) 125{ 126 // if Python did not provide a valid ID, do it here 127 if (_cpuId == -1 ) { 128 _cpuId = cpuList.size(); 129 } 130 131 // add self to global list of CPUs 132 cpuList.push_back(this); 133 134 DPRINTF(SyscallVerbose, "Constructing CPU with id %d\n", _cpuId); 135 136 if (numThreads > maxThreadsPerCPU) 137 maxThreadsPerCPU = numThreads; 138 139 // allocate per-thread instruction-based event queues 140 comInstEventQueue = new EventQueue *[numThreads]; 141 for (ThreadID tid = 0; tid < numThreads; ++tid) 142 comInstEventQueue[tid] = 143 new EventQueue("instruction-based event queue"); 144 145 // 146 // set up instruction-count-based termination events, if any 147 // 148 if (p->max_insts_any_thread != 0) { 149 const char *cause = "a thread reached the max instruction count"; 150 for (ThreadID tid = 0; tid < numThreads; ++tid) { 151 Event *event = new SimLoopExitEvent(cause, 0); 152 comInstEventQueue[tid]->schedule(event, p->max_insts_any_thread); 153 } 154 } 155 156 if (p->max_insts_all_threads != 0) { 157 const char *cause = "all threads reached the max instruction count"; 158 159 // allocate & initialize shared downcounter: each event will 160 // decrement this when triggered; simulation will terminate 161 // when counter reaches 0 162 int *counter = new int; 163 *counter = numThreads; 164 for (ThreadID tid = 0; tid < numThreads; ++tid) { 165 Event *event = new CountedExitEvent(cause, *counter); 166 comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads); 167 } 168 } 169 170 // allocate per-thread load-based event queues 171 comLoadEventQueue = new EventQueue *[numThreads]; 172 for (ThreadID tid = 0; tid < numThreads; ++tid) 173 comLoadEventQueue[tid] = new EventQueue("load-based event queue"); 174 175 // 176 // set up instruction-count-based termination events, if any 177 // 178 if (p->max_loads_any_thread != 0) { 179 const char *cause = "a thread reached the max load count"; 180 for (ThreadID tid = 0; tid < numThreads; ++tid) { 181 Event *event = new SimLoopExitEvent(cause, 0); 182 comLoadEventQueue[tid]->schedule(event, p->max_loads_any_thread); 183 } 184 } 185 186 if (p->max_loads_all_threads != 0) { 187 const char *cause = "all threads reached the max load count"; 188 // allocate & initialize shared downcounter: each event will 189 // decrement this when triggered; simulation will terminate 190 // when counter reaches 0 191 int *counter = new int; 192 *counter = numThreads; 193 for (ThreadID tid = 0; tid < numThreads; ++tid) { 194 Event *event = new CountedExitEvent(cause, *counter); 195 comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads); 196 } 197 } 198 199 functionTracingEnabled = false; 200 if (p->function_trace) { 201 const string fname = csprintf("ftrace.%s", name()); 202 functionTraceStream = simout.find(fname); 203 if (!functionTraceStream) 204 functionTraceStream = simout.create(fname); 205 206 currentFunctionStart = currentFunctionEnd = 0; 207 functionEntryTick = p->function_trace_start; 208 209 if (p->function_trace_start == 0) { 210 functionTracingEnabled = true; 211 } else { 212 typedef EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace> wrap; 213 Event *event = new wrap(this, true); 214 schedule(event, p->function_trace_start); 215 } 216 } 217 218 // The interrupts should always be present unless this CPU is 219 // switched in later or in case it is a checker CPU 220 if (!params()->switched_out && !is_checker) { 221 if (interrupts) { 222 interrupts->setCPU(this); 223 } else { 224 fatal("CPU %s has no interrupt controller.\n" 225 "Ensure createInterruptController() is called.\n", name()); 226 } 227 } 228 229 if (FullSystem) { 230 if (params()->profile) 231 profileEvent = new ProfileEvent(this, params()->profile); 232 } 233 tracer = params()->tracer; 234 235 if (params()->isa.size() != numThreads) { 236 fatal("Number of ISAs (%i) assigned to the CPU does not equal number " 237 "of threads (%i).\n", params()->isa.size(), numThreads); 238 } 239} 240 241void 242BaseCPU::enableFunctionTrace() 243{ 244 functionTracingEnabled = true; 245} 246 247BaseCPU::~BaseCPU() 248{ 249 delete profileEvent; 250 delete[] comLoadEventQueue; 251 delete[] comInstEventQueue; 252} 253 254void 255BaseCPU::init() 256{ 257 if (!params()->switched_out) 258 registerThreadContexts(); 259} 260 261void 262BaseCPU::startup() 263{ 264 if (FullSystem) { 265 if (!params()->switched_out && profileEvent) 266 schedule(profileEvent, curTick()); 267 } 268 269 if (params()->progress_interval) { 270 new CPUProgressEvent(this, params()->progress_interval); 271 } 272} 273 274 275void 276BaseCPU::regStats() 277{ 278 using namespace Stats; 279 280 numCycles 281 .name(name() + ".numCycles") 282 .desc("number of cpu cycles simulated") 283 ; 284 285 numWorkItemsStarted 286 .name(name() + ".numWorkItemsStarted") 287 .desc("number of work items this cpu started") 288 ; 289 290 numWorkItemsCompleted 291 .name(name() + ".numWorkItemsCompleted") 292 .desc("number of work items this cpu completed") 293 ; 294 295 int size = threadContexts.size(); 296 if (size > 1) { 297 for (int i = 0; i < size; ++i) { 298 stringstream namestr; 299 ccprintf(namestr, "%s.ctx%d", name(), i); 300 threadContexts[i]->regStats(namestr.str()); 301 } 302 } else if (size == 1) 303 threadContexts[0]->regStats(name()); 304} 305 306BaseMasterPort & 307BaseCPU::getMasterPort(const string &if_name, PortID idx) 308{ 309 // Get the right port based on name. This applies to all the 310 // subclasses of the base CPU and relies on their implementation 311 // of getDataPort and getInstPort. In all cases there methods 312 // return a CpuPort pointer. 313 if (if_name == "dcache_port") 314 return getDataPort(); 315 else if (if_name == "icache_port") 316 return getInstPort(); 317 else 318 return MemObject::getMasterPort(if_name, idx); 319} 320 321void 322BaseCPU::registerThreadContexts() 323{ 324 ThreadID size = threadContexts.size(); 325 for (ThreadID tid = 0; tid < size; ++tid) { 326 ThreadContext *tc = threadContexts[tid]; 327 328 /** This is so that contextId and cpuId match where there is a 329 * 1cpu:1context relationship. Otherwise, the order of registration 330 * could affect the assignment and cpu 1 could have context id 3, for 331 * example. We may even want to do something like this for SMT so that 332 * cpu 0 has the lowest thread contexts and cpu N has the highest, but 333 * I'll just do this for now 334 */ 335 if (numThreads == 1) 336 tc->setContextId(system->registerThreadContext(tc, _cpuId)); 337 else 338 tc->setContextId(system->registerThreadContext(tc)); 339 340 if (!FullSystem) 341 tc->getProcessPtr()->assignThreadContext(tc->contextId()); 342 } 343} 344 345 346int 347BaseCPU::findContext(ThreadContext *tc) 348{ 349 ThreadID size = threadContexts.size(); 350 for (ThreadID tid = 0; tid < size; ++tid) { 351 if (tc == threadContexts[tid]) 352 return tid; 353 } 354 return 0; 355} 356 357void 358BaseCPU::switchOut() 359{ 360 assert(!_switchedOut); 361 _switchedOut = true; 362 if (profileEvent && profileEvent->scheduled()) 363 deschedule(profileEvent); 364 365 // Flush all TLBs in the CPU to avoid having stale translations if 366 // it gets switched in later. 367 flushTLBs(); 368} 369 370void 371BaseCPU::takeOverFrom(BaseCPU *oldCPU) 372{ 373 assert(threadContexts.size() == oldCPU->threadContexts.size()); 374 assert(_cpuId == oldCPU->cpuId()); 375 assert(_switchedOut); 376 assert(oldCPU != this); 377 _pid = oldCPU->getPid(); 378 _taskId = oldCPU->taskId(); 379 _switchedOut = false; 380 381 ThreadID size = threadContexts.size(); 382 for (ThreadID i = 0; i < size; ++i) { 383 ThreadContext *newTC = threadContexts[i]; 384 ThreadContext *oldTC = oldCPU->threadContexts[i]; 385 386 newTC->takeOverFrom(oldTC); 387 388 CpuEvent::replaceThreadContext(oldTC, newTC); 389 390 assert(newTC->contextId() == oldTC->contextId()); 391 assert(newTC->threadId() == oldTC->threadId()); 392 system->replaceThreadContext(newTC, newTC->contextId()); 393 394 /* This code no longer works since the zero register (e.g., 395 * r31 on Alpha) doesn't necessarily contain zero at this 396 * point. 397 if (DTRACE(Context)) 398 ThreadContext::compare(oldTC, newTC); 399 */ 400 401 BaseMasterPort *old_itb_port = oldTC->getITBPtr()->getMasterPort(); 402 BaseMasterPort *old_dtb_port = oldTC->getDTBPtr()->getMasterPort(); 403 BaseMasterPort *new_itb_port = newTC->getITBPtr()->getMasterPort(); 404 BaseMasterPort *new_dtb_port = newTC->getDTBPtr()->getMasterPort(); 405 406 // Move over any table walker ports if they exist 407 if (new_itb_port) { 408 assert(!new_itb_port->isConnected()); 409 assert(old_itb_port); 410 assert(old_itb_port->isConnected()); 411 BaseSlavePort &slavePort = old_itb_port->getSlavePort(); 412 old_itb_port->unbind(); 413 new_itb_port->bind(slavePort); 414 } 415 if (new_dtb_port) { 416 assert(!new_dtb_port->isConnected()); 417 assert(old_dtb_port); 418 assert(old_dtb_port->isConnected()); 419 BaseSlavePort &slavePort = old_dtb_port->getSlavePort(); 420 old_dtb_port->unbind(); 421 new_dtb_port->bind(slavePort); 422 } 423 424 // Checker whether or not we have to transfer CheckerCPU 425 // objects over in the switch 426 CheckerCPU *oldChecker = oldTC->getCheckerCpuPtr(); 427 CheckerCPU *newChecker = newTC->getCheckerCpuPtr(); 428 if (oldChecker && newChecker) { 429 BaseMasterPort *old_checker_itb_port = 430 oldChecker->getITBPtr()->getMasterPort(); 431 BaseMasterPort *old_checker_dtb_port = 432 oldChecker->getDTBPtr()->getMasterPort(); 433 BaseMasterPort *new_checker_itb_port = 434 newChecker->getITBPtr()->getMasterPort(); 435 BaseMasterPort *new_checker_dtb_port = 436 newChecker->getDTBPtr()->getMasterPort(); 437 438 // Move over any table walker ports if they exist for checker 439 if (new_checker_itb_port) { 440 assert(!new_checker_itb_port->isConnected()); 441 assert(old_checker_itb_port); 442 assert(old_checker_itb_port->isConnected()); 443 BaseSlavePort &slavePort = 444 old_checker_itb_port->getSlavePort(); 445 old_checker_itb_port->unbind(); 446 new_checker_itb_port->bind(slavePort); 447 } 448 if (new_checker_dtb_port) { 449 assert(!new_checker_dtb_port->isConnected()); 450 assert(old_checker_dtb_port); 451 assert(old_checker_dtb_port->isConnected()); 452 BaseSlavePort &slavePort = 453 old_checker_dtb_port->getSlavePort(); 454 old_checker_dtb_port->unbind(); 455 new_checker_dtb_port->bind(slavePort); 456 } 457 } 458 } 459 460 interrupts = oldCPU->interrupts; 461 interrupts->setCPU(this); 462 oldCPU->interrupts = NULL; 463 464 if (FullSystem) { 465 for (ThreadID i = 0; i < size; ++i) 466 threadContexts[i]->profileClear(); 467 468 if (profileEvent) 469 schedule(profileEvent, curTick()); 470 } 471 472 // All CPUs have an instruction and a data port, and the new CPU's 473 // ports are dangling while the old CPU has its ports connected 474 // already. Unbind the old CPU and then bind the ports of the one 475 // we are switching to. 476 assert(!getInstPort().isConnected()); 477 assert(oldCPU->getInstPort().isConnected()); 478 BaseSlavePort &inst_peer_port = oldCPU->getInstPort().getSlavePort(); 479 oldCPU->getInstPort().unbind(); 480 getInstPort().bind(inst_peer_port); 481 482 assert(!getDataPort().isConnected()); 483 assert(oldCPU->getDataPort().isConnected()); 484 BaseSlavePort &data_peer_port = oldCPU->getDataPort().getSlavePort(); 485 oldCPU->getDataPort().unbind(); 486 getDataPort().bind(data_peer_port); 487} 488 489void 490BaseCPU::flushTLBs() 491{ 492 for (ThreadID i = 0; i < threadContexts.size(); ++i) { 493 ThreadContext &tc(*threadContexts[i]); 494 CheckerCPU *checker(tc.getCheckerCpuPtr()); 495 496 tc.getITBPtr()->flushAll(); 497 tc.getDTBPtr()->flushAll(); 498 if (checker) { 499 checker->getITBPtr()->flushAll(); 500 checker->getDTBPtr()->flushAll(); 501 } 502 } 503} 504 505 506BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval) 507 : cpu(_cpu), interval(_interval) 508{ } 509 510void 511BaseCPU::ProfileEvent::process() 512{ 513 ThreadID size = cpu->threadContexts.size(); 514 for (ThreadID i = 0; i < size; ++i) { 515 ThreadContext *tc = cpu->threadContexts[i]; 516 tc->profileSample(); 517 } 518 519 cpu->schedule(this, curTick() + interval); 520} 521 522void 523BaseCPU::serialize(std::ostream &os) 524{ 525 SERIALIZE_SCALAR(instCnt); 526 527 /* Unlike _pid, _taskId is not serialized, as they are dynamically 528 * assigned unique ids that are only meaningful for the duration of 529 * a specific run. We will need to serialize the entire taskMap in 530 * system. */ 531 SERIALIZE_SCALAR(_pid); 532 533 interrupts->serialize(os); 534} 535 536void 537BaseCPU::unserialize(Checkpoint *cp, const std::string §ion) 538{ 539 UNSERIALIZE_SCALAR(instCnt); 540 UNSERIALIZE_SCALAR(_pid); 541 interrupts->unserialize(cp, section); 542} 543 544void 545BaseCPU::traceFunctionsInternal(Addr pc) 546{ 547 if (!debugSymbolTable) 548 return; 549 550 // if pc enters different function, print new function symbol and 551 // update saved range. Otherwise do nothing. 552 if (pc < currentFunctionStart || pc >= currentFunctionEnd) { 553 string sym_str; 554 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str, 555 currentFunctionStart, 556 currentFunctionEnd); 557 558 if (!found) { 559 // no symbol found: use addr as label 560 sym_str = csprintf("0x%x", pc); 561 currentFunctionStart = pc; 562 currentFunctionEnd = pc + 1; 563 } 564 565 ccprintf(*functionTraceStream, " (%d)\n%d: %s", 566 curTick() - functionEntryTick, curTick(), sym_str); 567 functionEntryTick = curTick(); 568 } 569} 570 571bool 572BaseCPU::CpuPort::recvTimingResp(PacketPtr pkt) 573{ 574 panic("BaseCPU doesn't expect recvTiming!\n"); 575 return true; 576} 577 578void 579BaseCPU::CpuPort::recvRetry() 580{ 581 panic("BaseCPU doesn't expect recvRetry!\n"); 582} 583 584void 585BaseCPU::CpuPort::recvFunctionalSnoop(PacketPtr pkt) 586{ 587 // No internal storage to update (in the general case). A CPU with 588 // internal storage, e.g. an LSQ that should be part of the 589 // coherent memory has to check against stored data. 590} 591