base.cc revision 2651:76db2c628241
1/* 2 * Copyright (c) 2002-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include <iostream> 30#include <string> 31#include <sstream> 32 33#include "base/cprintf.hh" 34#include "base/loader/symtab.hh" 35#include "base/misc.hh" 36#include "base/output.hh" 37#include "cpu/base.hh" 38#include "cpu/cpuevent.hh" 39#include "cpu/exec_context.hh" 40#include "cpu/profile.hh" 41#include "cpu/sampler/sampler.hh" 42#include "sim/param.hh" 43#include "sim/process.hh" 44#include "sim/sim_events.hh" 45#include "sim/system.hh" 46 47#include "base/trace.hh" 48 49#if FULL_SYSTEM 50#include "kern/kernel_stats.hh" 51#endif 52 53using namespace std; 54 55vector<BaseCPU *> BaseCPU::cpuList; 56 57// This variable reflects the max number of threads in any CPU. Be 58// careful to only use it once all the CPUs that you care about have 59// been initialized 60int maxThreadsPerCPU = 1; 61 62#if FULL_SYSTEM 63BaseCPU::BaseCPU(Params *p) 64 : SimObject(p->name), clock(p->clock), checkInterrupts(true), 65 params(p), number_of_threads(p->numberOfThreads), system(p->system) 66#else 67BaseCPU::BaseCPU(Params *p) 68 : SimObject(p->name), clock(p->clock), params(p), 69 number_of_threads(p->numberOfThreads), system(p->system) 70#endif 71{ 72 DPRINTF(FullCPU, "BaseCPU: Creating object, mem address %#x.\n", this); 73 74 // add self to global list of CPUs 75 cpuList.push_back(this); 76 77 DPRINTF(FullCPU, "BaseCPU: CPU added to cpuList, mem address %#x.\n", 78 this); 79 80 if (number_of_threads > maxThreadsPerCPU) 81 maxThreadsPerCPU = number_of_threads; 82 83 // allocate per-thread instruction-based event queues 84 comInstEventQueue = new EventQueue *[number_of_threads]; 85 for (int i = 0; i < number_of_threads; ++i) 86 comInstEventQueue[i] = new EventQueue("instruction-based event queue"); 87 88 // 89 // set up instruction-count-based termination events, if any 90 // 91 if (p->max_insts_any_thread != 0) 92 for (int i = 0; i < number_of_threads; ++i) 93 new SimExitEvent(comInstEventQueue[i], p->max_insts_any_thread, 94 "a thread reached the max instruction count"); 95 96 if (p->max_insts_all_threads != 0) { 97 // allocate & initialize shared downcounter: each event will 98 // decrement this when triggered; simulation will terminate 99 // when counter reaches 0 100 int *counter = new int; 101 *counter = number_of_threads; 102 for (int i = 0; i < number_of_threads; ++i) 103 new CountedExitEvent(comInstEventQueue[i], 104 "all threads reached the max instruction count", 105 p->max_insts_all_threads, *counter); 106 } 107 108 // allocate per-thread load-based event queues 109 comLoadEventQueue = new EventQueue *[number_of_threads]; 110 for (int i = 0; i < number_of_threads; ++i) 111 comLoadEventQueue[i] = new EventQueue("load-based event queue"); 112 113 // 114 // set up instruction-count-based termination events, if any 115 // 116 if (p->max_loads_any_thread != 0) 117 for (int i = 0; i < number_of_threads; ++i) 118 new SimExitEvent(comLoadEventQueue[i], p->max_loads_any_thread, 119 "a thread reached the max load count"); 120 121 if (p->max_loads_all_threads != 0) { 122 // allocate & initialize shared downcounter: each event will 123 // decrement this when triggered; simulation will terminate 124 // when counter reaches 0 125 int *counter = new int; 126 *counter = number_of_threads; 127 for (int i = 0; i < number_of_threads; ++i) 128 new CountedExitEvent(comLoadEventQueue[i], 129 "all threads reached the max load count", 130 p->max_loads_all_threads, *counter); 131 } 132 133#if FULL_SYSTEM 134 memset(interrupts, 0, sizeof(interrupts)); 135 intstatus = 0; 136#endif 137 138 functionTracingEnabled = false; 139 if (p->functionTrace) { 140 functionTraceStream = simout.find(csprintf("ftrace.%s", name())); 141 currentFunctionStart = currentFunctionEnd = 0; 142 functionEntryTick = p->functionTraceStart; 143 144 if (p->functionTraceStart == 0) { 145 functionTracingEnabled = true; 146 } else { 147 Event *e = 148 new EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace>(this, 149 true); 150 e->schedule(p->functionTraceStart); 151 } 152 } 153#if FULL_SYSTEM 154 profileEvent = NULL; 155 if (params->profile) 156 profileEvent = new ProfileEvent(this, params->profile); 157 158 kernelStats = new Kernel::Statistics(system); 159#endif 160 161} 162 163BaseCPU::Params::Params() 164{ 165#if FULL_SYSTEM 166 profile = false; 167#endif 168} 169 170void 171BaseCPU::enableFunctionTrace() 172{ 173 functionTracingEnabled = true; 174} 175 176BaseCPU::~BaseCPU() 177{ 178#if FULL_SYSTEM 179 if (kernelStats) 180 delete kernelStats; 181#endif 182} 183 184void 185BaseCPU::init() 186{ 187 if (!params->deferRegistration) 188 registerExecContexts(); 189} 190 191void 192BaseCPU::startup() 193{ 194#if FULL_SYSTEM 195 if (!params->deferRegistration && profileEvent) 196 profileEvent->schedule(curTick); 197#endif 198} 199 200 201void 202BaseCPU::regStats() 203{ 204 using namespace Stats; 205 206 numCycles 207 .name(name() + ".numCycles") 208 .desc("number of cpu cycles simulated") 209 ; 210 211 int size = execContexts.size(); 212 if (size > 1) { 213 for (int i = 0; i < size; ++i) { 214 stringstream namestr; 215 ccprintf(namestr, "%s.ctx%d", name(), i); 216 execContexts[i]->regStats(namestr.str()); 217 } 218 } else if (size == 1) 219 execContexts[0]->regStats(name()); 220 221#if FULL_SYSTEM 222 if (kernelStats) 223 kernelStats->regStats(name() + ".kern"); 224#endif 225} 226 227 228void 229BaseCPU::registerExecContexts() 230{ 231 for (int i = 0; i < execContexts.size(); ++i) { 232 ExecContext *xc = execContexts[i]; 233 234#if FULL_SYSTEM 235 int id = params->cpu_id; 236 if (id != -1) 237 id += i; 238 239 xc->setCpuId(system->registerExecContext(xc, id)); 240#else 241 xc->setCpuId(xc->getProcessPtr()->registerExecContext(xc)); 242#endif 243 } 244} 245 246 247void 248BaseCPU::switchOut(Sampler *sampler) 249{ 250 panic("This CPU doesn't support sampling!"); 251} 252 253void 254BaseCPU::takeOverFrom(BaseCPU *oldCPU) 255{ 256 assert(execContexts.size() == oldCPU->execContexts.size()); 257 258 for (int i = 0; i < execContexts.size(); ++i) { 259 ExecContext *newXC = execContexts[i]; 260 ExecContext *oldXC = oldCPU->execContexts[i]; 261 262 newXC->takeOverFrom(oldXC); 263 264 CpuEvent::replaceExecContext(oldXC, newXC); 265 266 assert(newXC->readCpuId() == oldXC->readCpuId()); 267#if FULL_SYSTEM 268 system->replaceExecContext(newXC, newXC->readCpuId()); 269#else 270 assert(newXC->getProcessPtr() == oldXC->getProcessPtr()); 271 newXC->getProcessPtr()->replaceExecContext(newXC, newXC->readCpuId()); 272#endif 273 } 274 275#if FULL_SYSTEM 276 for (int i = 0; i < TheISA::NumInterruptLevels; ++i) 277 interrupts[i] = oldCPU->interrupts[i]; 278 intstatus = oldCPU->intstatus; 279 280 for (int i = 0; i < execContexts.size(); ++i) 281 execContexts[i]->profileClear(); 282 283 if (profileEvent) 284 profileEvent->schedule(curTick); 285#endif 286} 287 288 289#if FULL_SYSTEM 290BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, int _interval) 291 : Event(&mainEventQueue), cpu(_cpu), interval(_interval) 292{ } 293 294void 295BaseCPU::ProfileEvent::process() 296{ 297 for (int i = 0, size = cpu->execContexts.size(); i < size; ++i) { 298 ExecContext *xc = cpu->execContexts[i]; 299 xc->profileSample(); 300 } 301 302 schedule(curTick + interval); 303} 304 305void 306BaseCPU::post_interrupt(int int_num, int index) 307{ 308 DPRINTF(Interrupt, "Interrupt %d:%d posted\n", int_num, index); 309 310 if (int_num < 0 || int_num >= TheISA::NumInterruptLevels) 311 panic("int_num out of bounds\n"); 312 313 if (index < 0 || index >= sizeof(uint64_t) * 8) 314 panic("int_num out of bounds\n"); 315 316 checkInterrupts = true; 317 interrupts[int_num] |= 1 << index; 318 intstatus |= (ULL(1) << int_num); 319} 320 321void 322BaseCPU::clear_interrupt(int int_num, int index) 323{ 324 DPRINTF(Interrupt, "Interrupt %d:%d cleared\n", int_num, index); 325 326 if (int_num < 0 || int_num >= TheISA::NumInterruptLevels) 327 panic("int_num out of bounds\n"); 328 329 if (index < 0 || index >= sizeof(uint64_t) * 8) 330 panic("int_num out of bounds\n"); 331 332 interrupts[int_num] &= ~(1 << index); 333 if (interrupts[int_num] == 0) 334 intstatus &= ~(ULL(1) << int_num); 335} 336 337void 338BaseCPU::clear_interrupts() 339{ 340 DPRINTF(Interrupt, "Interrupts all cleared\n"); 341 342 memset(interrupts, 0, sizeof(interrupts)); 343 intstatus = 0; 344} 345 346 347void 348BaseCPU::serialize(std::ostream &os) 349{ 350 SERIALIZE_ARRAY(interrupts, TheISA::NumInterruptLevels); 351 SERIALIZE_SCALAR(intstatus); 352 353#if FULL_SYSTEM 354 if (kernelStats) 355 kernelStats->serialize(os); 356#endif 357 358} 359 360void 361BaseCPU::unserialize(Checkpoint *cp, const std::string §ion) 362{ 363 UNSERIALIZE_ARRAY(interrupts, TheISA::NumInterruptLevels); 364 UNSERIALIZE_SCALAR(intstatus); 365 366#if FULL_SYSTEM 367 if (kernelStats) 368 kernelStats->unserialize(cp, section); 369#endif 370} 371 372#endif // FULL_SYSTEM 373 374void 375BaseCPU::traceFunctionsInternal(Addr pc) 376{ 377 if (!debugSymbolTable) 378 return; 379 380 // if pc enters different function, print new function symbol and 381 // update saved range. Otherwise do nothing. 382 if (pc < currentFunctionStart || pc >= currentFunctionEnd) { 383 string sym_str; 384 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str, 385 currentFunctionStart, 386 currentFunctionEnd); 387 388 if (!found) { 389 // no symbol found: use addr as label 390 sym_str = csprintf("0x%x", pc); 391 currentFunctionStart = pc; 392 currentFunctionEnd = pc + 1; 393 } 394 395 ccprintf(*functionTraceStream, " (%d)\n%d: %s", 396 curTick - functionEntryTick, curTick, sym_str); 397 functionEntryTick = curTick; 398 } 399} 400 401 402DEFINE_SIM_OBJECT_CLASS_NAME("BaseCPU", BaseCPU) 403