base.cc revision 8817
1/* 2 * Copyright (c) 2011 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * Copyright (c) 2011 Regents of the University of California 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Steve Reinhardt 42 * Nathan Binkert 43 * Rick Strong 44 */ 45 46#include <iostream> 47#include <sstream> 48#include <string> 49 50#include "arch/tlb.hh" 51#include "base/loader/symtab.hh" 52#include "base/cprintf.hh" 53#include "base/misc.hh" 54#include "base/output.hh" 55#include "base/trace.hh" 56#include "config/use_checker.hh" 57#include "cpu/base.hh" 58#include "cpu/cpuevent.hh" 59#include "cpu/profile.hh" 60#include "cpu/thread_context.hh" 61#include "debug/SyscallVerbose.hh" 62#include "params/BaseCPU.hh" 63#include "sim/full_system.hh" 64#include "sim/process.hh" 65#include "sim/sim_events.hh" 66#include "sim/sim_exit.hh" 67#include "sim/system.hh" 68 69#if USE_CHECKER 70#include "cpu/checker/cpu.hh" 71#endif 72 73// Hack 74#include "sim/stat_control.hh" 75 76using namespace std; 77 78vector<BaseCPU *> BaseCPU::cpuList; 79 80// This variable reflects the max number of threads in any CPU. Be 81// careful to only use it once all the CPUs that you care about have 82// been initialized 83int maxThreadsPerCPU = 1; 84 85CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival) 86 : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0), 87 cpu(_cpu), _repeatEvent(true) 88{ 89 if (_interval) 90 cpu->schedule(this, curTick() + _interval); 91} 92 93void 94CPUProgressEvent::process() 95{ 96 Counter temp = cpu->totalInstructions(); 97#ifndef NDEBUG 98 double ipc = double(temp - lastNumInst) / (_interval / cpu->ticks(1)); 99 100 DPRINTFN("%s progress event, total committed:%i, progress insts committed: " 101 "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst, 102 ipc); 103 ipc = 0.0; 104#else 105 cprintf("%lli: %s progress event, total committed:%i, progress insts " 106 "committed: %lli\n", curTick(), cpu->name(), temp, 107 temp - lastNumInst); 108#endif 109 lastNumInst = temp; 110 111 if (_repeatEvent) 112 cpu->schedule(this, curTick() + _interval); 113} 114 115const char * 116CPUProgressEvent::description() const 117{ 118 return "CPU Progress"; 119} 120 121BaseCPU::BaseCPU(Params *p) 122 : MemObject(p), clock(p->clock), instCnt(0), _cpuId(p->cpu_id), 123 interrupts(p->interrupts), 124 numThreads(p->numThreads), system(p->system), 125 phase(p->phase) 126{ 127// currentTick = curTick(); 128 129 // if Python did not provide a valid ID, do it here 130 if (_cpuId == -1 ) { 131 _cpuId = cpuList.size(); 132 } 133 134 // add self to global list of CPUs 135 cpuList.push_back(this); 136 137 DPRINTF(SyscallVerbose, "Constructing CPU with id %d\n", _cpuId); 138 139 if (numThreads > maxThreadsPerCPU) 140 maxThreadsPerCPU = numThreads; 141 142 // allocate per-thread instruction-based event queues 143 comInstEventQueue = new EventQueue *[numThreads]; 144 for (ThreadID tid = 0; tid < numThreads; ++tid) 145 comInstEventQueue[tid] = 146 new EventQueue("instruction-based event queue"); 147 148 // 149 // set up instruction-count-based termination events, if any 150 // 151 if (p->max_insts_any_thread != 0) { 152 const char *cause = "a thread reached the max instruction count"; 153 for (ThreadID tid = 0; tid < numThreads; ++tid) { 154 Event *event = new SimLoopExitEvent(cause, 0); 155 comInstEventQueue[tid]->schedule(event, p->max_insts_any_thread); 156 } 157 } 158 159 if (p->max_insts_all_threads != 0) { 160 const char *cause = "all threads reached the max instruction count"; 161 162 // allocate & initialize shared downcounter: each event will 163 // decrement this when triggered; simulation will terminate 164 // when counter reaches 0 165 int *counter = new int; 166 *counter = numThreads; 167 for (ThreadID tid = 0; tid < numThreads; ++tid) { 168 Event *event = new CountedExitEvent(cause, *counter); 169 comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads); 170 } 171 } 172 173 // allocate per-thread load-based event queues 174 comLoadEventQueue = new EventQueue *[numThreads]; 175 for (ThreadID tid = 0; tid < numThreads; ++tid) 176 comLoadEventQueue[tid] = new EventQueue("load-based event queue"); 177 178 // 179 // set up instruction-count-based termination events, if any 180 // 181 if (p->max_loads_any_thread != 0) { 182 const char *cause = "a thread reached the max load count"; 183 for (ThreadID tid = 0; tid < numThreads; ++tid) { 184 Event *event = new SimLoopExitEvent(cause, 0); 185 comLoadEventQueue[tid]->schedule(event, p->max_loads_any_thread); 186 } 187 } 188 189 if (p->max_loads_all_threads != 0) { 190 const char *cause = "all threads reached the max load count"; 191 // allocate & initialize shared downcounter: each event will 192 // decrement this when triggered; simulation will terminate 193 // when counter reaches 0 194 int *counter = new int; 195 *counter = numThreads; 196 for (ThreadID tid = 0; tid < numThreads; ++tid) { 197 Event *event = new CountedExitEvent(cause, *counter); 198 comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads); 199 } 200 } 201 202 functionTracingEnabled = false; 203 if (p->function_trace) { 204 const string fname = csprintf("ftrace.%s", name()); 205 functionTraceStream = simout.find(fname); 206 if (!functionTraceStream) 207 functionTraceStream = simout.create(fname); 208 209 currentFunctionStart = currentFunctionEnd = 0; 210 functionEntryTick = p->function_trace_start; 211 212 if (p->function_trace_start == 0) { 213 functionTracingEnabled = true; 214 } else { 215 typedef EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace> wrap; 216 Event *event = new wrap(this, true); 217 schedule(event, p->function_trace_start); 218 } 219 } 220 // Check if CPU model has interrupts connected. The CheckerCPU 221 // cannot take interrupts directly for example. 222 if (interrupts) 223 interrupts->setCPU(this); 224 225 if (FullSystem) { 226 profileEvent = NULL; 227 if (params()->profile) 228 profileEvent = new ProfileEvent(this, params()->profile); 229 } 230 tracer = params()->tracer; 231} 232 233void 234BaseCPU::enableFunctionTrace() 235{ 236 functionTracingEnabled = true; 237} 238 239BaseCPU::~BaseCPU() 240{ 241} 242 243void 244BaseCPU::init() 245{ 246 if (!params()->defer_registration) 247 registerThreadContexts(); 248} 249 250void 251BaseCPU::startup() 252{ 253 if (FullSystem) { 254 if (!params()->defer_registration && profileEvent) 255 schedule(profileEvent, curTick()); 256 } 257 258 if (params()->progress_interval) { 259 Tick num_ticks = ticks(params()->progress_interval); 260 261 new CPUProgressEvent(this, num_ticks); 262 } 263} 264 265 266void 267BaseCPU::regStats() 268{ 269 using namespace Stats; 270 271 numCycles 272 .name(name() + ".numCycles") 273 .desc("number of cpu cycles simulated") 274 ; 275 276 numWorkItemsStarted 277 .name(name() + ".numWorkItemsStarted") 278 .desc("number of work items this cpu started") 279 ; 280 281 numWorkItemsCompleted 282 .name(name() + ".numWorkItemsCompleted") 283 .desc("number of work items this cpu completed") 284 ; 285 286 int size = threadContexts.size(); 287 if (size > 1) { 288 for (int i = 0; i < size; ++i) { 289 stringstream namestr; 290 ccprintf(namestr, "%s.ctx%d", name(), i); 291 threadContexts[i]->regStats(namestr.str()); 292 } 293 } else if (size == 1) 294 threadContexts[0]->regStats(name()); 295} 296 297Tick 298BaseCPU::nextCycle() 299{ 300 Tick next_tick = curTick() - phase + clock - 1; 301 next_tick -= (next_tick % clock); 302 next_tick += phase; 303 return next_tick; 304} 305 306Tick 307BaseCPU::nextCycle(Tick begin_tick) 308{ 309 Tick next_tick = begin_tick; 310 if (next_tick % clock != 0) 311 next_tick = next_tick - (next_tick % clock) + clock; 312 next_tick += phase; 313 314 assert(next_tick >= curTick()); 315 return next_tick; 316} 317 318void 319BaseCPU::registerThreadContexts() 320{ 321 ThreadID size = threadContexts.size(); 322 for (ThreadID tid = 0; tid < size; ++tid) { 323 ThreadContext *tc = threadContexts[tid]; 324 325 /** This is so that contextId and cpuId match where there is a 326 * 1cpu:1context relationship. Otherwise, the order of registration 327 * could affect the assignment and cpu 1 could have context id 3, for 328 * example. We may even want to do something like this for SMT so that 329 * cpu 0 has the lowest thread contexts and cpu N has the highest, but 330 * I'll just do this for now 331 */ 332 if (numThreads == 1) 333 tc->setContextId(system->registerThreadContext(tc, _cpuId)); 334 else 335 tc->setContextId(system->registerThreadContext(tc)); 336 337 if (!FullSystem) 338 tc->getProcessPtr()->assignThreadContext(tc->contextId()); 339 } 340} 341 342 343int 344BaseCPU::findContext(ThreadContext *tc) 345{ 346 ThreadID size = threadContexts.size(); 347 for (ThreadID tid = 0; tid < size; ++tid) { 348 if (tc == threadContexts[tid]) 349 return tid; 350 } 351 return 0; 352} 353 354void 355BaseCPU::switchOut() 356{ 357 if (profileEvent && profileEvent->scheduled()) 358 deschedule(profileEvent); 359} 360 361void 362BaseCPU::takeOverFrom(BaseCPU *oldCPU) 363{ 364 Port *ic = getPort("icache_port"); 365 Port *dc = getPort("dcache_port"); 366 assert(threadContexts.size() == oldCPU->threadContexts.size()); 367 368 _cpuId = oldCPU->cpuId(); 369 370 ThreadID size = threadContexts.size(); 371 for (ThreadID i = 0; i < size; ++i) { 372 ThreadContext *newTC = threadContexts[i]; 373 ThreadContext *oldTC = oldCPU->threadContexts[i]; 374 375 newTC->takeOverFrom(oldTC); 376 377 CpuEvent::replaceThreadContext(oldTC, newTC); 378 379 assert(newTC->contextId() == oldTC->contextId()); 380 assert(newTC->threadId() == oldTC->threadId()); 381 system->replaceThreadContext(newTC, newTC->contextId()); 382 383 /* This code no longer works since the zero register (e.g., 384 * r31 on Alpha) doesn't necessarily contain zero at this 385 * point. 386 if (DTRACE(Context)) 387 ThreadContext::compare(oldTC, newTC); 388 */ 389 390 Port *old_itb_port, *old_dtb_port, *new_itb_port, *new_dtb_port; 391 old_itb_port = oldTC->getITBPtr()->getPort(); 392 old_dtb_port = oldTC->getDTBPtr()->getPort(); 393 new_itb_port = newTC->getITBPtr()->getPort(); 394 new_dtb_port = newTC->getDTBPtr()->getPort(); 395 396 // Move over any table walker ports if they exist 397 if (new_itb_port && !new_itb_port->isConnected()) { 398 assert(old_itb_port); 399 Port *peer = old_itb_port->getPeer();; 400 new_itb_port->setPeer(peer); 401 peer->setPeer(new_itb_port); 402 } 403 if (new_dtb_port && !new_dtb_port->isConnected()) { 404 assert(old_dtb_port); 405 Port *peer = old_dtb_port->getPeer();; 406 new_dtb_port->setPeer(peer); 407 peer->setPeer(new_dtb_port); 408 } 409 410#if USE_CHECKER 411 Port *old_checker_itb_port, *old_checker_dtb_port; 412 Port *new_checker_itb_port, *new_checker_dtb_port; 413 414 CheckerCPU *oldChecker = 415 dynamic_cast<CheckerCPU*>(oldTC->getCheckerCpuPtr()); 416 CheckerCPU *newChecker = 417 dynamic_cast<CheckerCPU*>(newTC->getCheckerCpuPtr()); 418 old_checker_itb_port = oldChecker->getITBPtr()->getPort(); 419 old_checker_dtb_port = oldChecker->getDTBPtr()->getPort(); 420 new_checker_itb_port = newChecker->getITBPtr()->getPort(); 421 new_checker_dtb_port = newChecker->getDTBPtr()->getPort(); 422 423 // Move over any table walker ports if they exist for checker 424 if (new_checker_itb_port && !new_checker_itb_port->isConnected()) { 425 assert(old_checker_itb_port); 426 Port *peer = old_checker_itb_port->getPeer();; 427 new_checker_itb_port->setPeer(peer); 428 peer->setPeer(new_checker_itb_port); 429 } 430 if (new_checker_dtb_port && !new_checker_dtb_port->isConnected()) { 431 assert(old_checker_dtb_port); 432 Port *peer = old_checker_dtb_port->getPeer();; 433 new_checker_dtb_port->setPeer(peer); 434 peer->setPeer(new_checker_dtb_port); 435 } 436#endif 437 438 } 439 440 interrupts = oldCPU->interrupts; 441 interrupts->setCPU(this); 442 443 if (FullSystem) { 444 for (ThreadID i = 0; i < size; ++i) 445 threadContexts[i]->profileClear(); 446 447 if (profileEvent) 448 schedule(profileEvent, curTick()); 449 } 450 451 // Connect new CPU to old CPU's memory only if new CPU isn't 452 // connected to anything. Also connect old CPU's memory to new 453 // CPU. 454 if (!ic->isConnected()) { 455 Port *peer = oldCPU->getPort("icache_port")->getPeer(); 456 ic->setPeer(peer); 457 peer->setPeer(ic); 458 } 459 460 if (!dc->isConnected()) { 461 Port *peer = oldCPU->getPort("dcache_port")->getPeer(); 462 dc->setPeer(peer); 463 peer->setPeer(dc); 464 } 465} 466 467 468BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval) 469 : cpu(_cpu), interval(_interval) 470{ } 471 472void 473BaseCPU::ProfileEvent::process() 474{ 475 ThreadID size = cpu->threadContexts.size(); 476 for (ThreadID i = 0; i < size; ++i) { 477 ThreadContext *tc = cpu->threadContexts[i]; 478 tc->profileSample(); 479 } 480 481 cpu->schedule(this, curTick() + interval); 482} 483 484void 485BaseCPU::serialize(std::ostream &os) 486{ 487 SERIALIZE_SCALAR(instCnt); 488 interrupts->serialize(os); 489} 490 491void 492BaseCPU::unserialize(Checkpoint *cp, const std::string §ion) 493{ 494 UNSERIALIZE_SCALAR(instCnt); 495 interrupts->unserialize(cp, section); 496} 497 498void 499BaseCPU::traceFunctionsInternal(Addr pc) 500{ 501 if (!debugSymbolTable) 502 return; 503 504 // if pc enters different function, print new function symbol and 505 // update saved range. Otherwise do nothing. 506 if (pc < currentFunctionStart || pc >= currentFunctionEnd) { 507 string sym_str; 508 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str, 509 currentFunctionStart, 510 currentFunctionEnd); 511 512 if (!found) { 513 // no symbol found: use addr as label 514 sym_str = csprintf("0x%x", pc); 515 currentFunctionStart = pc; 516 currentFunctionEnd = pc + 1; 517 } 518 519 ccprintf(*functionTraceStream, " (%d)\n%d: %s", 520 curTick() - functionEntryTick, curTick(), sym_str); 521 functionEntryTick = curTick(); 522 } 523} 524 525bool 526BaseCPU::CpuPort::recvTiming(PacketPtr pkt) 527{ 528 panic("BaseCPU doesn't expect recvTiming callback!"); 529 return true; 530} 531 532void 533BaseCPU::CpuPort::recvRetry() 534{ 535 panic("BaseCPU doesn't expect recvRetry callback!"); 536} 537 538Tick 539BaseCPU::CpuPort::recvAtomic(PacketPtr pkt) 540{ 541 panic("BaseCPU doesn't expect recvAtomic callback!"); 542 return curTick(); 543} 544 545void 546BaseCPU::CpuPort::recvFunctional(PacketPtr pkt) 547{ 548 // No internal storage to update (in the general case). In the 549 // long term this should never be called, but that assumed a split 550 // into master/slave and request/response. 551} 552 553void 554BaseCPU::CpuPort::recvRangeChange() 555{ 556} 557