1/* 2 * Copyright (c) 2002-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Steve Reinhardt 29 * Nathan Binkert 30 */ 31 32#include <iostream> 33#include <string> 34#include <sstream> 35 36#include "base/cprintf.hh" 37#include "base/loader/symtab.hh" 38#include "base/misc.hh" 39#include "base/output.hh" 40#include "cpu/base.hh" 41#include "cpu/cpuevent.hh" 42#include "cpu/thread_context.hh" 43#include "cpu/profile.hh" 44#include "sim/sim_exit.hh" 45#include "sim/param.hh" 46#include "sim/process.hh" 47#include "sim/sim_events.hh" 48#include "sim/system.hh" 49 50#include "base/trace.hh" 51 52// Hack 53#include "sim/stat_control.hh" 54 55using namespace std; 56 57vector<BaseCPU *> BaseCPU::cpuList; 58 59// This variable reflects the max number of threads in any CPU. Be 60// careful to only use it once all the CPUs that you care about have 61// been initialized 62int maxThreadsPerCPU = 1; 63 64CPUProgressEvent::CPUProgressEvent(EventQueue *q, Tick ival, 65 BaseCPU *_cpu) 66 : Event(q, Event::Stat_Event_Pri), interval(ival), 67 lastNumInst(0), cpu(_cpu) 68{ 69 if (interval) 70 schedule(curTick + interval); 71} 72 73void 74CPUProgressEvent::process() 75{ 76 Counter temp = cpu->totalInstructions(); 77#ifndef NDEBUG 78 double ipc = double(temp - lastNumInst) / (interval / cpu->cycles(1)); 79 80 DPRINTFN("%s progress event, instructions committed: %lli, IPC: %0.8d\n", 81 cpu->name(), temp - lastNumInst, ipc); 82 ipc = 0.0; 83#else 84 cprintf("%lli: %s progress event, instructions committed: %lli\n", 85 curTick, cpu->name(), temp - lastNumInst); 86#endif 87 lastNumInst = temp; 88 schedule(curTick + interval); 89} 90 91const char * 92CPUProgressEvent::description() 93{ 94 return "CPU Progress event"; 95} 96 97#if FULL_SYSTEM 98BaseCPU::BaseCPU(Params *p)
| 1/* 2 * Copyright (c) 2002-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Steve Reinhardt 29 * Nathan Binkert 30 */ 31 32#include <iostream> 33#include <string> 34#include <sstream> 35 36#include "base/cprintf.hh" 37#include "base/loader/symtab.hh" 38#include "base/misc.hh" 39#include "base/output.hh" 40#include "cpu/base.hh" 41#include "cpu/cpuevent.hh" 42#include "cpu/thread_context.hh" 43#include "cpu/profile.hh" 44#include "sim/sim_exit.hh" 45#include "sim/param.hh" 46#include "sim/process.hh" 47#include "sim/sim_events.hh" 48#include "sim/system.hh" 49 50#include "base/trace.hh" 51 52// Hack 53#include "sim/stat_control.hh" 54 55using namespace std; 56 57vector<BaseCPU *> BaseCPU::cpuList; 58 59// This variable reflects the max number of threads in any CPU. Be 60// careful to only use it once all the CPUs that you care about have 61// been initialized 62int maxThreadsPerCPU = 1; 63 64CPUProgressEvent::CPUProgressEvent(EventQueue *q, Tick ival, 65 BaseCPU *_cpu) 66 : Event(q, Event::Stat_Event_Pri), interval(ival), 67 lastNumInst(0), cpu(_cpu) 68{ 69 if (interval) 70 schedule(curTick + interval); 71} 72 73void 74CPUProgressEvent::process() 75{ 76 Counter temp = cpu->totalInstructions(); 77#ifndef NDEBUG 78 double ipc = double(temp - lastNumInst) / (interval / cpu->cycles(1)); 79 80 DPRINTFN("%s progress event, instructions committed: %lli, IPC: %0.8d\n", 81 cpu->name(), temp - lastNumInst, ipc); 82 ipc = 0.0; 83#else 84 cprintf("%lli: %s progress event, instructions committed: %lli\n", 85 curTick, cpu->name(), temp - lastNumInst); 86#endif 87 lastNumInst = temp; 88 schedule(curTick + interval); 89} 90 91const char * 92CPUProgressEvent::description() 93{ 94 return "CPU Progress event"; 95} 96 97#if FULL_SYSTEM 98BaseCPU::BaseCPU(Params *p)
|
100 params(p), number_of_threads(p->numberOfThreads), system(p->system), 101 phase(p->phase) 102#else 103BaseCPU::BaseCPU(Params *p) 104 : MemObject(p->name), clock(p->clock), params(p), 105 number_of_threads(p->numberOfThreads), system(p->system), 106 phase(p->phase) 107#endif 108{ 109// currentTick = curTick; 110 DPRINTF(FullCPU, "BaseCPU: Creating object, mem address %#x.\n", this); 111 112 // add self to global list of CPUs 113 cpuList.push_back(this); 114 115 DPRINTF(FullCPU, "BaseCPU: CPU added to cpuList, mem address %#x.\n", 116 this); 117 118 if (number_of_threads > maxThreadsPerCPU) 119 maxThreadsPerCPU = number_of_threads; 120 121 // allocate per-thread instruction-based event queues 122 comInstEventQueue = new EventQueue *[number_of_threads]; 123 for (int i = 0; i < number_of_threads; ++i) 124 comInstEventQueue[i] = new EventQueue("instruction-based event queue"); 125 126 // 127 // set up instruction-count-based termination events, if any 128 // 129 if (p->max_insts_any_thread != 0) 130 for (int i = 0; i < number_of_threads; ++i) 131 schedExitSimLoop("a thread reached the max instruction count", 132 p->max_insts_any_thread, 0, 133 comInstEventQueue[i]); 134 135 if (p->max_insts_all_threads != 0) { 136 // allocate & initialize shared downcounter: each event will 137 // decrement this when triggered; simulation will terminate 138 // when counter reaches 0 139 int *counter = new int; 140 *counter = number_of_threads; 141 for (int i = 0; i < number_of_threads; ++i) 142 new CountedExitEvent(comInstEventQueue[i], 143 "all threads reached the max instruction count", 144 p->max_insts_all_threads, *counter); 145 } 146 147 // allocate per-thread load-based event queues 148 comLoadEventQueue = new EventQueue *[number_of_threads]; 149 for (int i = 0; i < number_of_threads; ++i) 150 comLoadEventQueue[i] = new EventQueue("load-based event queue"); 151 152 // 153 // set up instruction-count-based termination events, if any 154 // 155 if (p->max_loads_any_thread != 0) 156 for (int i = 0; i < number_of_threads; ++i) 157 schedExitSimLoop("a thread reached the max load count", 158 p->max_loads_any_thread, 0, 159 comLoadEventQueue[i]); 160 161 if (p->max_loads_all_threads != 0) { 162 // allocate & initialize shared downcounter: each event will 163 // decrement this when triggered; simulation will terminate 164 // when counter reaches 0 165 int *counter = new int; 166 *counter = number_of_threads; 167 for (int i = 0; i < number_of_threads; ++i) 168 new CountedExitEvent(comLoadEventQueue[i], 169 "all threads reached the max load count", 170 p->max_loads_all_threads, *counter); 171 } 172 173 functionTracingEnabled = false; 174 if (p->functionTrace) { 175 functionTraceStream = simout.find(csprintf("ftrace.%s", name())); 176 currentFunctionStart = currentFunctionEnd = 0; 177 functionEntryTick = p->functionTraceStart; 178 179 if (p->functionTraceStart == 0) { 180 functionTracingEnabled = true; 181 } else { 182 Event *e = 183 new EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace>(this, 184 true); 185 e->schedule(p->functionTraceStart); 186 } 187 } 188#if FULL_SYSTEM 189 profileEvent = NULL; 190 if (params->profile) 191 profileEvent = new ProfileEvent(this, params->profile); 192#endif 193} 194 195BaseCPU::Params::Params() 196{ 197#if FULL_SYSTEM 198 profile = false; 199#endif 200 checker = NULL; 201} 202 203void 204BaseCPU::enableFunctionTrace() 205{ 206 functionTracingEnabled = true; 207} 208 209BaseCPU::~BaseCPU() 210{ 211} 212 213void 214BaseCPU::init() 215{ 216 if (!params->deferRegistration) 217 registerThreadContexts(); 218} 219 220void 221BaseCPU::startup() 222{ 223#if FULL_SYSTEM 224 if (!params->deferRegistration && profileEvent) 225 profileEvent->schedule(curTick); 226#endif 227 228 if (params->progress_interval) { 229 new CPUProgressEvent(&mainEventQueue, params->progress_interval, 230 this); 231 } 232} 233 234 235void 236BaseCPU::regStats() 237{ 238 using namespace Stats; 239 240 numCycles 241 .name(name() + ".numCycles") 242 .desc("number of cpu cycles simulated") 243 ; 244 245 int size = threadContexts.size(); 246 if (size > 1) { 247 for (int i = 0; i < size; ++i) { 248 stringstream namestr; 249 ccprintf(namestr, "%s.ctx%d", name(), i); 250 threadContexts[i]->regStats(namestr.str()); 251 } 252 } else if (size == 1) 253 threadContexts[0]->regStats(name()); 254 255#if FULL_SYSTEM 256#endif 257} 258 259Tick 260BaseCPU::nextCycle() 261{ 262 Tick next_tick = curTick - phase + clock - 1; 263 next_tick -= (next_tick % clock); 264 next_tick += phase; 265 return next_tick; 266} 267 268Tick 269BaseCPU::nextCycle(Tick begin_tick) 270{ 271 Tick next_tick = begin_tick; 272 next_tick -= (next_tick % clock); 273 next_tick += phase; 274 275 while (next_tick < curTick) 276 next_tick += clock; 277 278 assert(next_tick >= curTick); 279 return next_tick; 280} 281 282void 283BaseCPU::registerThreadContexts() 284{ 285 for (int i = 0; i < threadContexts.size(); ++i) { 286 ThreadContext *tc = threadContexts[i]; 287 288#if FULL_SYSTEM 289 int id = params->cpu_id; 290 if (id != -1) 291 id += i; 292 293 tc->setCpuId(system->registerThreadContext(tc, id)); 294#else 295 tc->setCpuId(tc->getProcessPtr()->registerThreadContext(tc)); 296#endif 297 } 298} 299 300 301void 302BaseCPU::switchOut() 303{ 304// panic("This CPU doesn't support sampling!"); 305#if FULL_SYSTEM 306 if (profileEvent && profileEvent->scheduled()) 307 profileEvent->deschedule(); 308#endif 309} 310 311void 312BaseCPU::takeOverFrom(BaseCPU *oldCPU) 313{ 314 assert(threadContexts.size() == oldCPU->threadContexts.size()); 315 316 for (int i = 0; i < threadContexts.size(); ++i) { 317 ThreadContext *newTC = threadContexts[i]; 318 ThreadContext *oldTC = oldCPU->threadContexts[i]; 319 320 newTC->takeOverFrom(oldTC); 321 322 CpuEvent::replaceThreadContext(oldTC, newTC); 323 324 assert(newTC->readCpuId() == oldTC->readCpuId()); 325#if FULL_SYSTEM 326 system->replaceThreadContext(newTC, newTC->readCpuId()); 327#else 328 assert(newTC->getProcessPtr() == oldTC->getProcessPtr()); 329 newTC->getProcessPtr()->replaceThreadContext(newTC, newTC->readCpuId()); 330#endif 331 332// TheISA::compareXCs(oldXC, newXC); 333 } 334 335#if FULL_SYSTEM 336 interrupts = oldCPU->interrupts; 337 checkInterrupts = oldCPU->checkInterrupts; 338 339 for (int i = 0; i < threadContexts.size(); ++i) 340 threadContexts[i]->profileClear(); 341 342 // The Sampler must take care of this! 343// if (profileEvent) 344// profileEvent->schedule(curTick); 345#endif 346} 347 348 349#if FULL_SYSTEM 350BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, int _interval) 351 : Event(&mainEventQueue), cpu(_cpu), interval(_interval) 352{ } 353 354void 355BaseCPU::ProfileEvent::process() 356{ 357 for (int i = 0, size = cpu->threadContexts.size(); i < size; ++i) { 358 ThreadContext *tc = cpu->threadContexts[i]; 359 tc->profileSample(); 360 } 361 362 schedule(curTick + interval); 363} 364 365void 366BaseCPU::post_interrupt(int int_num, int index) 367{ 368 checkInterrupts = true; 369 interrupts.post(int_num, index); 370} 371 372void 373BaseCPU::clear_interrupt(int int_num, int index) 374{ 375 interrupts.clear(int_num, index); 376} 377 378void 379BaseCPU::clear_interrupts() 380{ 381 interrupts.clear_all(); 382} 383 384 385void 386BaseCPU::serialize(std::ostream &os) 387{ 388 interrupts.serialize(os); 389} 390 391void 392BaseCPU::unserialize(Checkpoint *cp, const std::string §ion) 393{ 394 interrupts.unserialize(cp, section); 395} 396 397#endif // FULL_SYSTEM 398 399void 400BaseCPU::traceFunctionsInternal(Addr pc) 401{ 402 if (!debugSymbolTable) 403 return; 404 405 // if pc enters different function, print new function symbol and 406 // update saved range. Otherwise do nothing. 407 if (pc < currentFunctionStart || pc >= currentFunctionEnd) { 408 string sym_str; 409 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str, 410 currentFunctionStart, 411 currentFunctionEnd); 412 413 if (!found) { 414 // no symbol found: use addr as label 415 sym_str = csprintf("0x%x", pc); 416 currentFunctionStart = pc; 417 currentFunctionEnd = pc + 1; 418 } 419 420 ccprintf(*functionTraceStream, " (%d)\n%d: %s", 421 curTick - functionEntryTick, curTick, sym_str); 422 functionEntryTick = curTick; 423 } 424} 425 426 427DEFINE_SIM_OBJECT_CLASS_NAME("BaseCPU", BaseCPU)
| 100 params(p), number_of_threads(p->numberOfThreads), system(p->system), 101 phase(p->phase) 102#else 103BaseCPU::BaseCPU(Params *p) 104 : MemObject(p->name), clock(p->clock), params(p), 105 number_of_threads(p->numberOfThreads), system(p->system), 106 phase(p->phase) 107#endif 108{ 109// currentTick = curTick; 110 DPRINTF(FullCPU, "BaseCPU: Creating object, mem address %#x.\n", this); 111 112 // add self to global list of CPUs 113 cpuList.push_back(this); 114 115 DPRINTF(FullCPU, "BaseCPU: CPU added to cpuList, mem address %#x.\n", 116 this); 117 118 if (number_of_threads > maxThreadsPerCPU) 119 maxThreadsPerCPU = number_of_threads; 120 121 // allocate per-thread instruction-based event queues 122 comInstEventQueue = new EventQueue *[number_of_threads]; 123 for (int i = 0; i < number_of_threads; ++i) 124 comInstEventQueue[i] = new EventQueue("instruction-based event queue"); 125 126 // 127 // set up instruction-count-based termination events, if any 128 // 129 if (p->max_insts_any_thread != 0) 130 for (int i = 0; i < number_of_threads; ++i) 131 schedExitSimLoop("a thread reached the max instruction count", 132 p->max_insts_any_thread, 0, 133 comInstEventQueue[i]); 134 135 if (p->max_insts_all_threads != 0) { 136 // allocate & initialize shared downcounter: each event will 137 // decrement this when triggered; simulation will terminate 138 // when counter reaches 0 139 int *counter = new int; 140 *counter = number_of_threads; 141 for (int i = 0; i < number_of_threads; ++i) 142 new CountedExitEvent(comInstEventQueue[i], 143 "all threads reached the max instruction count", 144 p->max_insts_all_threads, *counter); 145 } 146 147 // allocate per-thread load-based event queues 148 comLoadEventQueue = new EventQueue *[number_of_threads]; 149 for (int i = 0; i < number_of_threads; ++i) 150 comLoadEventQueue[i] = new EventQueue("load-based event queue"); 151 152 // 153 // set up instruction-count-based termination events, if any 154 // 155 if (p->max_loads_any_thread != 0) 156 for (int i = 0; i < number_of_threads; ++i) 157 schedExitSimLoop("a thread reached the max load count", 158 p->max_loads_any_thread, 0, 159 comLoadEventQueue[i]); 160 161 if (p->max_loads_all_threads != 0) { 162 // allocate & initialize shared downcounter: each event will 163 // decrement this when triggered; simulation will terminate 164 // when counter reaches 0 165 int *counter = new int; 166 *counter = number_of_threads; 167 for (int i = 0; i < number_of_threads; ++i) 168 new CountedExitEvent(comLoadEventQueue[i], 169 "all threads reached the max load count", 170 p->max_loads_all_threads, *counter); 171 } 172 173 functionTracingEnabled = false; 174 if (p->functionTrace) { 175 functionTraceStream = simout.find(csprintf("ftrace.%s", name())); 176 currentFunctionStart = currentFunctionEnd = 0; 177 functionEntryTick = p->functionTraceStart; 178 179 if (p->functionTraceStart == 0) { 180 functionTracingEnabled = true; 181 } else { 182 Event *e = 183 new EventWrapper<BaseCPU, &BaseCPU::enableFunctionTrace>(this, 184 true); 185 e->schedule(p->functionTraceStart); 186 } 187 } 188#if FULL_SYSTEM 189 profileEvent = NULL; 190 if (params->profile) 191 profileEvent = new ProfileEvent(this, params->profile); 192#endif 193} 194 195BaseCPU::Params::Params() 196{ 197#if FULL_SYSTEM 198 profile = false; 199#endif 200 checker = NULL; 201} 202 203void 204BaseCPU::enableFunctionTrace() 205{ 206 functionTracingEnabled = true; 207} 208 209BaseCPU::~BaseCPU() 210{ 211} 212 213void 214BaseCPU::init() 215{ 216 if (!params->deferRegistration) 217 registerThreadContexts(); 218} 219 220void 221BaseCPU::startup() 222{ 223#if FULL_SYSTEM 224 if (!params->deferRegistration && profileEvent) 225 profileEvent->schedule(curTick); 226#endif 227 228 if (params->progress_interval) { 229 new CPUProgressEvent(&mainEventQueue, params->progress_interval, 230 this); 231 } 232} 233 234 235void 236BaseCPU::regStats() 237{ 238 using namespace Stats; 239 240 numCycles 241 .name(name() + ".numCycles") 242 .desc("number of cpu cycles simulated") 243 ; 244 245 int size = threadContexts.size(); 246 if (size > 1) { 247 for (int i = 0; i < size; ++i) { 248 stringstream namestr; 249 ccprintf(namestr, "%s.ctx%d", name(), i); 250 threadContexts[i]->regStats(namestr.str()); 251 } 252 } else if (size == 1) 253 threadContexts[0]->regStats(name()); 254 255#if FULL_SYSTEM 256#endif 257} 258 259Tick 260BaseCPU::nextCycle() 261{ 262 Tick next_tick = curTick - phase + clock - 1; 263 next_tick -= (next_tick % clock); 264 next_tick += phase; 265 return next_tick; 266} 267 268Tick 269BaseCPU::nextCycle(Tick begin_tick) 270{ 271 Tick next_tick = begin_tick; 272 next_tick -= (next_tick % clock); 273 next_tick += phase; 274 275 while (next_tick < curTick) 276 next_tick += clock; 277 278 assert(next_tick >= curTick); 279 return next_tick; 280} 281 282void 283BaseCPU::registerThreadContexts() 284{ 285 for (int i = 0; i < threadContexts.size(); ++i) { 286 ThreadContext *tc = threadContexts[i]; 287 288#if FULL_SYSTEM 289 int id = params->cpu_id; 290 if (id != -1) 291 id += i; 292 293 tc->setCpuId(system->registerThreadContext(tc, id)); 294#else 295 tc->setCpuId(tc->getProcessPtr()->registerThreadContext(tc)); 296#endif 297 } 298} 299 300 301void 302BaseCPU::switchOut() 303{ 304// panic("This CPU doesn't support sampling!"); 305#if FULL_SYSTEM 306 if (profileEvent && profileEvent->scheduled()) 307 profileEvent->deschedule(); 308#endif 309} 310 311void 312BaseCPU::takeOverFrom(BaseCPU *oldCPU) 313{ 314 assert(threadContexts.size() == oldCPU->threadContexts.size()); 315 316 for (int i = 0; i < threadContexts.size(); ++i) { 317 ThreadContext *newTC = threadContexts[i]; 318 ThreadContext *oldTC = oldCPU->threadContexts[i]; 319 320 newTC->takeOverFrom(oldTC); 321 322 CpuEvent::replaceThreadContext(oldTC, newTC); 323 324 assert(newTC->readCpuId() == oldTC->readCpuId()); 325#if FULL_SYSTEM 326 system->replaceThreadContext(newTC, newTC->readCpuId()); 327#else 328 assert(newTC->getProcessPtr() == oldTC->getProcessPtr()); 329 newTC->getProcessPtr()->replaceThreadContext(newTC, newTC->readCpuId()); 330#endif 331 332// TheISA::compareXCs(oldXC, newXC); 333 } 334 335#if FULL_SYSTEM 336 interrupts = oldCPU->interrupts; 337 checkInterrupts = oldCPU->checkInterrupts; 338 339 for (int i = 0; i < threadContexts.size(); ++i) 340 threadContexts[i]->profileClear(); 341 342 // The Sampler must take care of this! 343// if (profileEvent) 344// profileEvent->schedule(curTick); 345#endif 346} 347 348 349#if FULL_SYSTEM 350BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, int _interval) 351 : Event(&mainEventQueue), cpu(_cpu), interval(_interval) 352{ } 353 354void 355BaseCPU::ProfileEvent::process() 356{ 357 for (int i = 0, size = cpu->threadContexts.size(); i < size; ++i) { 358 ThreadContext *tc = cpu->threadContexts[i]; 359 tc->profileSample(); 360 } 361 362 schedule(curTick + interval); 363} 364 365void 366BaseCPU::post_interrupt(int int_num, int index) 367{ 368 checkInterrupts = true; 369 interrupts.post(int_num, index); 370} 371 372void 373BaseCPU::clear_interrupt(int int_num, int index) 374{ 375 interrupts.clear(int_num, index); 376} 377 378void 379BaseCPU::clear_interrupts() 380{ 381 interrupts.clear_all(); 382} 383 384 385void 386BaseCPU::serialize(std::ostream &os) 387{ 388 interrupts.serialize(os); 389} 390 391void 392BaseCPU::unserialize(Checkpoint *cp, const std::string §ion) 393{ 394 interrupts.unserialize(cp, section); 395} 396 397#endif // FULL_SYSTEM 398 399void 400BaseCPU::traceFunctionsInternal(Addr pc) 401{ 402 if (!debugSymbolTable) 403 return; 404 405 // if pc enters different function, print new function symbol and 406 // update saved range. Otherwise do nothing. 407 if (pc < currentFunctionStart || pc >= currentFunctionEnd) { 408 string sym_str; 409 bool found = debugSymbolTable->findNearestSymbol(pc, sym_str, 410 currentFunctionStart, 411 currentFunctionEnd); 412 413 if (!found) { 414 // no symbol found: use addr as label 415 sym_str = csprintf("0x%x", pc); 416 currentFunctionStart = pc; 417 currentFunctionEnd = pc + 1; 418 } 419 420 ccprintf(*functionTraceStream, " (%d)\n%d: %s", 421 curTick - functionEntryTick, curTick, sym_str); 422 functionEntryTick = curTick; 423 } 424} 425 426 427DEFINE_SIM_OBJECT_CLASS_NAME("BaseCPU", BaseCPU)
|