base.cc revision 7897
1/* 2 * Copyright (c) 2002-2005 The Regents of The University of Michigan 3 * Copyright (c) 2011 Regents of the University of California 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer; 10 * redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution; 13 * neither the name of the copyright holders nor the names of its 14 * contributors may be used to endorse or promote products derived from 15 * this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * Authors: Steve Reinhardt 30 * Nathan Binkert 31 * Rick Strong 32 */ 33 34#include <iostream> 35#include <string> 36#include <sstream> 37 38#include "arch/tlb.hh" 39#include "base/cprintf.hh" 40#include "base/loader/symtab.hh" 41#include "base/misc.hh" 42#include "base/output.hh" 43#include "base/trace.hh" 44#include "cpu/base.hh" 45#include "cpu/cpuevent.hh" 46#include "cpu/thread_context.hh" 47#include "cpu/profile.hh" 48#include "params/BaseCPU.hh" 49#include "sim/sim_exit.hh" 50#include "sim/process.hh" 51#include "sim/sim_events.hh" 52#include "sim/system.hh" 53 54// Hack 55#include "sim/stat_control.hh" 56 57using namespace std; 58 59vector<BaseCPU *> BaseCPU::cpuList; 60 61// This variable reflects the max number of threads in any CPU. Be 62// careful to only use it once all the CPUs that you care about have 63// been initialized 64int maxThreadsPerCPU = 1; 65 66CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival) 67 : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0), 68 cpu(_cpu), _repeatEvent(true) 69{ 70 if (_interval) 71 cpu->schedule(this, curTick() + _interval); 72} 73 74void 75CPUProgressEvent::process() 76{ 77 Counter temp = cpu->totalInstructions(); 78#ifndef NDEBUG 79 double ipc = double(temp - lastNumInst) / (_interval / cpu->ticks(1)); 80 81 DPRINTFN("%s progress event, total committed:%i, progress insts committed: " 82 "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst, 83 ipc); 84 ipc = 0.0; 85#else 86 cprintf("%lli: %s progress event, total committed:%i, progress insts " 87 "committed: %lli\n", curTick(), cpu->name(), temp, 88 temp - lastNumInst); 89#endif 90 lastNumInst = temp; 91 92 if (_repeatEvent) 93 cpu->schedule(this, curTick() + _interval); 94} 95 96const char * 97CPUProgressEvent::description() const 98{ 99 return "CPU Progress"; 100} 101 102#if FULL_SYSTEM 103BaseCPU::BaseCPU(Params *p) 104 : MemObject(p), clock(p->clock), instCnt(0), _cpuId(p->cpu_id), 105 interrupts(p->interrupts), 106 numThreads(p->numThreads), system(p->system), 107 phase(p->phase) 108#else 109BaseCPU::BaseCPU(Params *p) 110 : MemObject(p), clock(p->clock), _cpuId(p->cpu_id), 111 numThreads(p->numThreads), system(p->system), 112 phase(p->phase) 113#endif 114{ 115// currentTick = curTick(); 116 117 // if Python did not provide a valid ID, do it here 118 if (_cpuId == -1 ) { 119 _cpuId = cpuList.size(); 120 } 121 122 // add self to global list of CPUs 123 cpuList.push_back(this); 124 125 DPRINTF(SyscallVerbose, "Constructing CPU with id %d\n", _cpuId); 126 127 if (numThreads > maxThreadsPerCPU) 128 maxThreadsPerCPU = numThreads; 129 130 // allocate per-thread instruction-based event queues 131 comInstEventQueue = new EventQueue *[numThreads]; 132 for (ThreadID tid = 0; tid < numThreads; ++tid) 133 comInstEventQueue[tid] = 134 new EventQueue("instruction-based event queue"); 135 136 // 137 // set up instruction-count-based termination events, if any 138 // 139 if (p->max_insts_any_thread != 0) { 140 const char *cause = "a thread reached the max instruction count"; 141 for (ThreadID tid = 0; tid < numThreads; ++tid) { 142 Event *event = new SimLoopExitEvent(cause, 0); 143 comInstEventQueue[tid]->schedule(event, p->max_insts_any_thread); 144 } 145 } 146 147 if (p->max_insts_all_threads != 0) { 148 const char *cause = "all threads reached the max instruction count"; 149 150 // allocate & initialize shared downcounter: each event will 151 // decrement this when triggered; simulation will terminate 152 // when counter reaches 0 153 int *counter = new int; 154 *counter = numThreads; 155 for (ThreadID tid = 0; tid < numThreads; ++tid) { 156 Event *event = new CountedExitEvent(cause, *counter); 157 comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads); 158 } 159 } 160 161 // allocate per-thread load-based event queues 162 comLoadEventQueue = new EventQueue *[numThreads]; 163 for (ThreadID tid = 0; tid < numThreads; ++tid) 164 comLoadEventQueue[tid] = new EventQueue("load-based event queue"); 165 166 // 167 // set up instruction-count-based termination events, if any 168 // 169 if (p->max_loads_any_thread != 0) { 170 const char *cause = "a thread reached the max load count"; 171 for (ThreadID tid = 0; tid < numThreads; ++tid) { 172 Event *event = new SimLoopExitEvent(cause, 0); 173 comLoadEventQueue[tid]->schedule(event, p->max_loads_any_thread); 174 } 175 } 176 177 if (p->max_loads_all_threads != 0) { 178 const char *cause = "all threads reached the max load count"; 179 // allocate & initialize shared downcounter: each event will 180 // decrement this when triggered; simulation will terminate 181 // when counter reaches 0 182 int *counter = new int; 183 *counter = numThreads; 184 for (ThreadID tid = 0; tid < numThreads; ++tid) { 185 Event *event = new CountedExitEvent(cause, *counter); 186 comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads); 187 } 188 } 189 190 functionTracingEnabled = false; 191 if (p->function_trace) { 192 functionTraceStream = simout.find(csprintf("ftrace.%s", name())); 193 currentFunctionStart = currentFunctionEnd = 0; 194 functionEntryTick = p->function_trace_start; 195 196 if (p->function_trace_start == 0) { 197 functionTracingEnabled = true; 198 } else { 199 typedef EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace> wrap; 200 Event *event = new wrap(this, true); 201 schedule(event, p->function_trace_start); 202 } 203 } 204#if FULL_SYSTEM 205 interrupts->setCPU(this); 206 207 profileEvent = NULL; 208 if (params()->profile) 209 profileEvent = new ProfileEvent(this, params()->profile); 210#endif 211 tracer = params()->tracer; 212} 213 214void 215BaseCPU::enableFunctionTrace() 216{ 217 functionTracingEnabled = true; 218} 219 220BaseCPU::~BaseCPU() 221{ 222} 223 224void 225BaseCPU::init() 226{ 227 if (!params()->defer_registration) 228 registerThreadContexts(); 229} 230 231void 232BaseCPU::startup() 233{ 234#if FULL_SYSTEM 235 if (!params()->defer_registration && profileEvent) 236 schedule(profileEvent, curTick()); 237#endif 238 239 if (params()->progress_interval) { 240 Tick num_ticks = ticks(params()->progress_interval); 241 242 Event *event; 243 event = new CPUProgressEvent(this, num_ticks); 244 } 245} 246 247 248void 249BaseCPU::regStats() 250{ 251 using namespace Stats; 252 253 numCycles 254 .name(name() + ".numCycles") 255 .desc("number of cpu cycles simulated") 256 ; 257 258 int size = threadContexts.size(); 259 if (size > 1) { 260 for (int i = 0; i < size; ++i) { 261 stringstream namestr; 262 ccprintf(namestr, "%s.ctx%d", name(), i); 263 threadContexts[i]->regStats(namestr.str()); 264 } 265 } else if (size == 1) 266 threadContexts[0]->regStats(name()); 267 268#if FULL_SYSTEM 269#endif 270} 271 272Tick 273BaseCPU::nextCycle() 274{ 275 Tick next_tick = curTick() - phase + clock - 1; 276 next_tick -= (next_tick % clock); 277 next_tick += phase; 278 return next_tick; 279} 280 281Tick 282BaseCPU::nextCycle(Tick begin_tick) 283{ 284 Tick next_tick = begin_tick; 285 if (next_tick % clock != 0) 286 next_tick = next_tick - (next_tick % clock) + clock; 287 next_tick += phase; 288 289 assert(next_tick >= curTick()); 290 return next_tick; 291} 292 293void 294BaseCPU::registerThreadContexts() 295{ 296 ThreadID size = threadContexts.size(); 297 for (ThreadID tid = 0; tid < size; ++tid) { 298 ThreadContext *tc = threadContexts[tid]; 299 300 /** This is so that contextId and cpuId match where there is a 301 * 1cpu:1context relationship. Otherwise, the order of registration 302 * could affect the assignment and cpu 1 could have context id 3, for 303 * example. We may even want to do something like this for SMT so that 304 * cpu 0 has the lowest thread contexts and cpu N has the highest, but 305 * I'll just do this for now 306 */ 307 if (numThreads == 1) 308 tc->setContextId(system->registerThreadContext(tc, _cpuId)); 309 else 310 tc->setContextId(system->registerThreadContext(tc)); 311#if !FULL_SYSTEM 312 tc->getProcessPtr()->assignThreadContext(tc->contextId()); 313#endif 314 } 315} 316 317 318int 319BaseCPU::findContext(ThreadContext *tc) 320{ 321 ThreadID size = threadContexts.size(); 322 for (ThreadID tid = 0; tid < size; ++tid) { 323 if (tc == threadContexts[tid]) 324 return tid; 325 } 326 return 0; 327} 328 329void 330BaseCPU::switchOut() 331{ 332// panic("This CPU doesn't support sampling!"); 333#if FULL_SYSTEM 334 if (profileEvent && profileEvent->scheduled()) 335 deschedule(profileEvent); 336#endif 337} 338 339void 340BaseCPU::takeOverFrom(BaseCPU *oldCPU, Port *ic, Port *dc) 341{ 342 assert(threadContexts.size() == oldCPU->threadContexts.size()); 343 344 _cpuId = oldCPU->cpuId(); 345 346 ThreadID size = threadContexts.size(); 347 for (ThreadID i = 0; i < size; ++i) { 348 ThreadContext *newTC = threadContexts[i]; 349 ThreadContext *oldTC = oldCPU->threadContexts[i]; 350 351 newTC->takeOverFrom(oldTC); 352 353 CpuEvent::replaceThreadContext(oldTC, newTC); 354 355 assert(newTC->contextId() == oldTC->contextId()); 356 assert(newTC->threadId() == oldTC->threadId()); 357 system->replaceThreadContext(newTC, newTC->contextId()); 358 359 /* This code no longer works since the zero register (e.g., 360 * r31 on Alpha) doesn't necessarily contain zero at this 361 * point. 362 if (DTRACE(Context)) 363 ThreadContext::compare(oldTC, newTC); 364 */ 365 366 Port *old_itb_port, *old_dtb_port, *new_itb_port, *new_dtb_port; 367 old_itb_port = oldTC->getITBPtr()->getPort(); 368 old_dtb_port = oldTC->getDTBPtr()->getPort(); 369 new_itb_port = newTC->getITBPtr()->getPort(); 370 new_dtb_port = newTC->getDTBPtr()->getPort(); 371 372 // Move over any table walker ports if they exist 373 if (new_itb_port && !new_itb_port->isConnected()) { 374 assert(old_itb_port); 375 Port *peer = old_itb_port->getPeer();; 376 new_itb_port->setPeer(peer); 377 peer->setPeer(new_itb_port); 378 } 379 if (new_dtb_port && !new_dtb_port->isConnected()) { 380 assert(old_dtb_port); 381 Port *peer = old_dtb_port->getPeer();; 382 new_dtb_port->setPeer(peer); 383 peer->setPeer(new_dtb_port); 384 } 385 } 386 387#if FULL_SYSTEM 388 interrupts = oldCPU->interrupts; 389 interrupts->setCPU(this); 390 391 for (ThreadID i = 0; i < size; ++i) 392 threadContexts[i]->profileClear(); 393 394 if (profileEvent) 395 schedule(profileEvent, curTick()); 396#endif 397 398 // Connect new CPU to old CPU's memory only if new CPU isn't 399 // connected to anything. Also connect old CPU's memory to new 400 // CPU. 401 if (!ic->isConnected()) { 402 Port *peer = oldCPU->getPort("icache_port")->getPeer(); 403 ic->setPeer(peer); 404 peer->setPeer(ic); 405 } 406 407 if (!dc->isConnected()) { 408 Port *peer = oldCPU->getPort("dcache_port")->getPeer(); 409 dc->setPeer(peer); 410 peer->setPeer(dc); 411 } 412} 413 414 415#if FULL_SYSTEM 416BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval) 417 : cpu(_cpu), interval(_interval) 418{ } 419 420void 421BaseCPU::ProfileEvent::process() 422{ 423 ThreadID size = cpu->threadContexts.size(); 424 for (ThreadID i = 0; i < size; ++i) { 425 ThreadContext *tc = cpu->threadContexts[i]; 426 tc->profileSample(); 427 } 428 429 cpu->schedule(this, curTick() + interval); 430} 431 432void 433BaseCPU::serialize(std::ostream &os) 434{ 435 SERIALIZE_SCALAR(instCnt); 436 interrupts->serialize(os); 437} 438 439void 440BaseCPU::unserialize(Checkpoint *cp, const std::string §ion) 441{ 442 UNSERIALIZE_SCALAR(instCnt); 443 interrupts->unserialize(cp, section); 444} 445 446#endif // FULL_SYSTEM 447 448void 449BaseCPU::traceFunctionsInternal(Addr pc) 450{ 451 if (!debugSymbolTable) 452 return; 453 454 // if pc enters different function, print new function symbol and 455 // update saved range. Otherwise do nothing. 456 if (pc < currentFunctionStart || pc >= currentFunctionEnd) { 457 string sym_str; 458 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str, 459 currentFunctionStart, 460 currentFunctionEnd); 461 462 if (!found) { 463 // no symbol found: use addr as label 464 sym_str = csprintf("0x%x", pc); 465 currentFunctionStart = pc; 466 currentFunctionEnd = pc + 1; 467 } 468 469 ccprintf(*functionTraceStream, " (%d)\n%d: %s", 470 curTick() - functionEntryTick, curTick(), sym_str); 471 functionEntryTick = curTick(); 472 } 473} 474