base.cc revision 393
1/* 2 * Copyright (c) 2003 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include <cmath> 30#include <cstdio> 31#include <cstdlib> 32#include <iostream> 33#include <iomanip> 34#include <list> 35#include <sstream> 36#include <string> 37 38#include "base/cprintf.hh" 39#include "base/inifile.hh" 40#include "base/loader/symtab.hh" 41#include "base/misc.hh" 42#include "base/pollevent.hh" 43#include "base/range.hh" 44#include "base/trace.hh" 45#include "cpu/base_cpu.hh" 46#include "cpu/exec_context.hh" 47#include "cpu/exetrace.hh" 48#include "cpu/full_cpu/smt.hh" 49#include "cpu/simple_cpu/simple_cpu.hh" 50#include "cpu/static_inst.hh" 51#include "mem/base_mem.hh" 52#include "mem/mem_interface.hh" 53#include "sim/annotation.hh" 54#include "sim/builder.hh" 55#include "sim/debug.hh" 56#include "sim/host.hh" 57#include "sim/sim_events.hh" 58#include "sim/sim_object.hh" 59#include "sim/sim_stats.hh" 60 61#ifdef FULL_SYSTEM 62#include "base/remote_gdb.hh" 63#include "dev/alpha_access.h" 64#include "dev/pciareg.h" 65#include "mem/functional_mem/memory_control.hh" 66#include "mem/functional_mem/physical_memory.hh" 67#include "sim/system.hh" 68#include "targetarch/alpha_memory.hh" 69#include "targetarch/vtophys.hh" 70#else // !FULL_SYSTEM 71#include "eio/eio.hh" 72#include "mem/functional_mem/functional_memory.hh" 73#endif // FULL_SYSTEM 74 75using namespace std; 76 77SimpleCPU::TickEvent::TickEvent(SimpleCPU *c) 78 : Event(&mainEventQueue, 100), cpu(c) 79{ 80} 81 82void 83SimpleCPU::TickEvent::process() 84{ 85 cpu->tick(); 86} 87 88const char * 89SimpleCPU::TickEvent::description() 90{ 91 return "SimpleCPU tick event"; 92} 93 94 95SimpleCPU::CacheCompletionEvent::CacheCompletionEvent(SimpleCPU *_cpu) 96 : Event(&mainEventQueue), 97 cpu(_cpu) 98{ 99} 100 101void SimpleCPU::CacheCompletionEvent::process() 102{ 103 cpu->processCacheCompletion(); 104} 105 106const char * 107SimpleCPU::CacheCompletionEvent::description() 108{ 109 return "SimpleCPU cache completion event"; 110} 111 112#ifdef FULL_SYSTEM 113SimpleCPU::SimpleCPU(const string &_name, 114 System *_system, 115 Counter max_insts_any_thread, 116 Counter max_insts_all_threads, 117 Counter max_loads_any_thread, 118 Counter max_loads_all_threads, 119 AlphaItb *itb, AlphaDtb *dtb, 120 FunctionalMemory *mem, 121 MemInterface *icache_interface, 122 MemInterface *dcache_interface, 123 Tick freq) 124 : BaseCPU(_name, /* number_of_threads */ 1, 125 max_insts_any_thread, max_insts_all_threads, 126 max_loads_any_thread, max_loads_all_threads, 127 _system, freq), 128#else 129SimpleCPU::SimpleCPU(const string &_name, Process *_process, 130 Counter max_insts_any_thread, 131 Counter max_insts_all_threads, 132 Counter max_loads_any_thread, 133 Counter max_loads_all_threads, 134 MemInterface *icache_interface, 135 MemInterface *dcache_interface) 136 : BaseCPU(_name, /* number_of_threads */ 1, 137 max_insts_any_thread, max_insts_all_threads, 138 max_loads_any_thread, max_loads_all_threads), 139#endif 140 tickEvent(this), xc(NULL), cacheCompletionEvent(this) 141{ 142 _status = Idle; 143#ifdef FULL_SYSTEM 144 xc = new ExecContext(this, 0, system, itb, dtb, mem); 145 146 // initialize CPU, including PC 147 TheISA::initCPU(&xc->regs); 148#else 149 xc = new ExecContext(this, /* thread_num */ 0, _process, /* asid */ 0); 150#endif // !FULL_SYSTEM 151 152 icacheInterface = icache_interface; 153 dcacheInterface = dcache_interface; 154 155 memReq = new MemReq(); 156 memReq->xc = xc; 157 memReq->asid = 0; 158 memReq->data = new uint8_t[64]; 159 160 numInst = 0; 161 startNumInst = 0; 162 numLoad = 0; 163 startNumLoad = 0; 164 lastIcacheStall = 0; 165 lastDcacheStall = 0; 166 167 execContexts.push_back(xc); 168} 169 170SimpleCPU::~SimpleCPU() 171{ 172} 173 174void 175SimpleCPU::switchOut() 176{ 177 _status = SwitchedOut; 178 if (tickEvent.scheduled()) 179 tickEvent.squash(); 180} 181 182 183void 184SimpleCPU::takeOverFrom(BaseCPU *oldCPU) 185{ 186 BaseCPU::takeOverFrom(oldCPU); 187 188 assert(!tickEvent.scheduled()); 189 190 // if any of this CPU's ExecContexts are active, mark the CPU as 191 // running and schedule its tick event. 192 for (int i = 0; i < execContexts.size(); ++i) { 193 ExecContext *xc = execContexts[i]; 194 if (xc->status() == ExecContext::Active && _status != Running) { 195 _status = Running; 196 tickEvent.schedule(curTick); 197 } 198 } 199 200 oldCPU->switchOut(); 201} 202 203 204void 205SimpleCPU::activateContext(int thread_num, int delay) 206{ 207 assert(thread_num == 0); 208 assert(xc); 209 210 assert(_status == Idle); 211 notIdleFraction++; 212 scheduleTickEvent(delay); 213 _status = Running; 214} 215 216 217void 218SimpleCPU::suspendContext(int thread_num) 219{ 220 assert(thread_num == 0); 221 assert(xc); 222 223 assert(_status == Running); 224 notIdleFraction--; 225 unscheduleTickEvent(); 226 _status = Idle; 227} 228 229 230void 231SimpleCPU::deallocateContext(int thread_num) 232{ 233 // for now, these are equivalent 234 suspendContext(thread_num); 235} 236 237 238void 239SimpleCPU::haltContext(int thread_num) 240{ 241 // for now, these are equivalent 242 suspendContext(thread_num); 243} 244 245 246void 247SimpleCPU::regStats() 248{ 249 using namespace Statistics; 250 251 BaseCPU::regStats(); 252 253 numInsts 254 .name(name() + ".num_insts") 255 .desc("Number of instructions executed") 256 ; 257 258 numMemRefs 259 .name(name() + ".num_refs") 260 .desc("Number of memory references") 261 ; 262 263 idleFraction 264 .name(name() + ".idle_fraction") 265 .desc("Percentage of idle cycles") 266 ; 267 268 icacheStallCycles 269 .name(name() + ".icache_stall_cycles") 270 .desc("ICache total stall cycles") 271 .prereq(icacheStallCycles) 272 ; 273 274 dcacheStallCycles 275 .name(name() + ".dcache_stall_cycles") 276 .desc("DCache total stall cycles") 277 .prereq(dcacheStallCycles) 278 ; 279 280 idleFraction = constant(1.0) - notIdleFraction; 281 numInsts = Statistics::scalar(numInst) - Statistics::scalar(startNumInst); 282 simInsts += numInsts; 283} 284 285void 286SimpleCPU::resetStats() 287{ 288 startNumInst = numInst; 289 notIdleFraction = (_status != Idle); 290} 291 292void 293SimpleCPU::serialize(ostream &os) 294{ 295 SERIALIZE_ENUM(_status); 296 SERIALIZE_SCALAR(inst); 297 nameOut(os, csprintf("%s.xc", name())); 298 xc->serialize(os); 299 nameOut(os, csprintf("%s.tickEvent", name())); 300 tickEvent.serialize(os); 301 nameOut(os, csprintf("%s.cacheCompletionEvent", name())); 302 cacheCompletionEvent.serialize(os); 303} 304 305void 306SimpleCPU::unserialize(Checkpoint *cp, const string §ion) 307{ 308 UNSERIALIZE_ENUM(_status); 309 UNSERIALIZE_SCALAR(inst); 310 xc->unserialize(cp, csprintf("%s.xc", section)); 311 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 312 cacheCompletionEvent 313 .unserialize(cp, csprintf("%s.cacheCompletionEvent", section)); 314} 315 316void 317change_thread_state(int thread_number, int activate, int priority) 318{ 319} 320 321// precise architected memory state accessor macros 322template <class T> 323Fault 324SimpleCPU::read(Addr addr, T& data, unsigned flags) 325{ 326 memReq->reset(addr, sizeof(T), flags); 327 328 // translate to physical address 329 Fault fault = xc->translateDataReadReq(memReq); 330 331 // do functional access 332 if (fault == No_Fault) 333 fault = xc->read(memReq, data); 334 335 if (traceData) { 336 traceData->setAddr(addr); 337 if (fault == No_Fault) 338 traceData->setData(data); 339 } 340 341 // if we have a cache, do cache access too 342 if (fault == No_Fault && dcacheInterface) { 343 memReq->cmd = Read; 344 memReq->completionEvent = NULL; 345 memReq->time = curTick; 346 memReq->flags &= ~UNCACHEABLE; 347 MemAccessResult result = dcacheInterface->access(memReq); 348 349 // Ugly hack to get an event scheduled *only* if the access is 350 // a miss. We really should add first-class support for this 351 // at some point. 352 if (result != MA_HIT && dcacheInterface->doEvents) { 353 memReq->completionEvent = &cacheCompletionEvent; 354 lastDcacheStall = curTick; 355 unscheduleTickEvent(); 356 _status = DcacheMissStall; 357 } 358 } 359 360 return fault; 361} 362 363#ifndef DOXYGEN_SHOULD_SKIP_THIS 364 365template 366Fault 367SimpleCPU::read(Addr addr, uint64_t& data, unsigned flags); 368 369template 370Fault 371SimpleCPU::read(Addr addr, uint32_t& data, unsigned flags); 372 373template 374Fault 375SimpleCPU::read(Addr addr, uint16_t& data, unsigned flags); 376 377template 378Fault 379SimpleCPU::read(Addr addr, uint8_t& data, unsigned flags); 380 381#endif //DOXYGEN_SHOULD_SKIP_THIS 382 383template<> 384Fault 385SimpleCPU::read(Addr addr, double& data, unsigned flags) 386{ 387 return read(addr, *(uint64_t*)&data, flags); 388} 389 390template<> 391Fault 392SimpleCPU::read(Addr addr, float& data, unsigned flags) 393{ 394 return read(addr, *(uint32_t*)&data, flags); 395} 396 397 398template<> 399Fault 400SimpleCPU::read(Addr addr, int32_t& data, unsigned flags) 401{ 402 return read(addr, (uint32_t&)data, flags); 403} 404 405 406template <class T> 407Fault 408SimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 409{ 410 if (traceData) { 411 traceData->setAddr(addr); 412 traceData->setData(data); 413 } 414 415 memReq->reset(addr, sizeof(T), flags); 416 417 // translate to physical address 418 Fault fault = xc->translateDataWriteReq(memReq); 419 420 // do functional access 421 if (fault == No_Fault) 422 fault = xc->write(memReq, data); 423 424 if (fault == No_Fault && dcacheInterface) { 425 memReq->cmd = Write; 426 memcpy(memReq->data,(uint8_t *)&data,memReq->size); 427 memReq->completionEvent = NULL; 428 memReq->time = curTick; 429 memReq->flags &= ~UNCACHEABLE; 430 MemAccessResult result = dcacheInterface->access(memReq); 431 432 // Ugly hack to get an event scheduled *only* if the access is 433 // a miss. We really should add first-class support for this 434 // at some point. 435 if (result != MA_HIT && dcacheInterface->doEvents) { 436 memReq->completionEvent = &cacheCompletionEvent; 437 lastDcacheStall = curTick; 438 unscheduleTickEvent(); 439 _status = DcacheMissStall; 440 } 441 } 442 443 if (res && (fault == No_Fault)) 444 *res = memReq->result; 445 446 return fault; 447} 448 449 450#ifndef DOXYGEN_SHOULD_SKIP_THIS 451template 452Fault 453SimpleCPU::write(uint64_t data, Addr addr, unsigned flags, uint64_t *res); 454 455template 456Fault 457SimpleCPU::write(uint32_t data, Addr addr, unsigned flags, uint64_t *res); 458 459template 460Fault 461SimpleCPU::write(uint16_t data, Addr addr, unsigned flags, uint64_t *res); 462 463template 464Fault 465SimpleCPU::write(uint8_t data, Addr addr, unsigned flags, uint64_t *res); 466 467#endif //DOXYGEN_SHOULD_SKIP_THIS 468 469template<> 470Fault 471SimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 472{ 473 return write(*(uint64_t*)&data, addr, flags, res); 474} 475 476template<> 477Fault 478SimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 479{ 480 return write(*(uint32_t*)&data, addr, flags, res); 481} 482 483 484template<> 485Fault 486SimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 487{ 488 return write((uint32_t)data, addr, flags, res); 489} 490 491 492#ifdef FULL_SYSTEM 493Addr 494SimpleCPU::dbg_vtophys(Addr addr) 495{ 496 return vtophys(xc, addr); 497} 498#endif // FULL_SYSTEM 499 500Tick save_cycle = 0; 501 502 503void 504SimpleCPU::processCacheCompletion() 505{ 506 switch (status()) { 507 case IcacheMissStall: 508 icacheStallCycles += curTick - lastIcacheStall; 509 _status = IcacheMissComplete; 510 scheduleTickEvent(1); 511 break; 512 case DcacheMissStall: 513 dcacheStallCycles += curTick - lastDcacheStall; 514 _status = Running; 515 scheduleTickEvent(1); 516 break; 517 case SwitchedOut: 518 // If this CPU has been switched out due to sampling/warm-up, 519 // ignore any further status changes (e.g., due to cache 520 // misses outstanding at the time of the switch). 521 return; 522 default: 523 panic("SimpleCPU::processCacheCompletion: bad state"); 524 break; 525 } 526} 527 528#ifdef FULL_SYSTEM 529void 530SimpleCPU::post_interrupt(int int_num, int index) 531{ 532 BaseCPU::post_interrupt(int_num, index); 533 534 if (xc->status() == ExecContext::Suspended) { 535 DPRINTF(IPI,"Suspended Processor awoke\n"); 536 xc->activate(); 537 Annotate::Resume(xc); 538 } 539} 540#endif // FULL_SYSTEM 541 542/* start simulation, program loaded, processor precise state initialized */ 543void 544SimpleCPU::tick() 545{ 546 traceData = NULL; 547 548 Fault fault = No_Fault; 549 550#ifdef FULL_SYSTEM 551 if (AlphaISA::check_interrupts && 552 xc->cpu->check_interrupts() && 553 !PC_PAL(xc->regs.pc) && 554 status() != IcacheMissComplete) { 555 int ipl = 0; 556 int summary = 0; 557 AlphaISA::check_interrupts = 0; 558 IntReg *ipr = xc->regs.ipr; 559 560 if (xc->regs.ipr[TheISA::IPR_SIRR]) { 561 for (int i = TheISA::INTLEVEL_SOFTWARE_MIN; 562 i < TheISA::INTLEVEL_SOFTWARE_MAX; i++) { 563 if (ipr[TheISA::IPR_SIRR] & (ULL(1) << i)) { 564 // See table 4-19 of 21164 hardware reference 565 ipl = (i - TheISA::INTLEVEL_SOFTWARE_MIN) + 1; 566 summary |= (ULL(1) << i); 567 } 568 } 569 } 570 571 uint64_t interrupts = xc->cpu->intr_status(); 572 for (int i = TheISA::INTLEVEL_EXTERNAL_MIN; 573 i < TheISA::INTLEVEL_EXTERNAL_MAX; i++) { 574 if (interrupts & (ULL(1) << i)) { 575 // See table 4-19 of 21164 hardware reference 576 ipl = i; 577 summary |= (ULL(1) << i); 578 } 579 } 580 581 if (ipr[TheISA::IPR_ASTRR]) 582 panic("asynchronous traps not implemented\n"); 583 584 if (ipl && ipl > xc->regs.ipr[TheISA::IPR_IPLR]) { 585 ipr[TheISA::IPR_ISR] = summary; 586 ipr[TheISA::IPR_INTID] = ipl; 587 xc->ev5_trap(Interrupt_Fault); 588 589 DPRINTF(Flow, "Interrupt! IPLR=%d ipl=%d summary=%x\n", 590 ipr[TheISA::IPR_IPLR], ipl, summary); 591 } 592 } 593#endif 594 595 // maintain $r0 semantics 596 xc->regs.intRegFile[ZeroReg] = 0; 597#ifdef TARGET_ALPHA 598 xc->regs.floatRegFile.d[ZeroReg] = 0.0; 599#endif // TARGET_ALPHA 600 601 if (status() == IcacheMissComplete) { 602 // We've already fetched an instruction and were stalled on an 603 // I-cache miss. No need to fetch it again. 604 605 // Set status to running; tick event will get rescheduled if 606 // necessary at end of tick() function. 607 _status = Running; 608 } 609 else { 610 // Try to fetch an instruction 611 612 // set up memory request for instruction fetch 613#ifdef FULL_SYSTEM 614#define IFETCH_FLAGS(pc) ((pc) & 1) ? PHYSICAL : 0 615#else 616#define IFETCH_FLAGS(pc) 0 617#endif 618 619 memReq->cmd = Read; 620 memReq->reset(xc->regs.pc & ~3, sizeof(uint32_t), 621 IFETCH_FLAGS(xc->regs.pc)); 622 623 fault = xc->translateInstReq(memReq); 624 625 if (fault == No_Fault) 626 fault = xc->mem->read(memReq, inst); 627 628 if (icacheInterface && fault == No_Fault) { 629 memReq->completionEvent = NULL; 630 631 memReq->time = curTick; 632 memReq->flags &= ~UNCACHEABLE; 633 MemAccessResult result = icacheInterface->access(memReq); 634 635 // Ugly hack to get an event scheduled *only* if the access is 636 // a miss. We really should add first-class support for this 637 // at some point. 638 if (result != MA_HIT && icacheInterface->doEvents) { 639 memReq->completionEvent = &cacheCompletionEvent; 640 lastIcacheStall = curTick; 641 unscheduleTickEvent(); 642 _status = IcacheMissStall; 643 return; 644 } 645 } 646 } 647 648 // If we've got a valid instruction (i.e., no fault on instruction 649 // fetch), then execute it. 650 if (fault == No_Fault) { 651 652 // keep an instruction count 653 numInst++; 654 655 // check for instruction-count-based events 656 comInsnEventQueue[0]->serviceEvents(numInst); 657 658 // decode the instruction 659 StaticInstPtr<TheISA> si(inst); 660 661 traceData = Trace::getInstRecord(curTick, xc, this, si, 662 xc->regs.pc); 663 664#ifdef FULL_SYSTEM 665 xc->regs.opcode = (inst >> 26) & 0x3f; 666 xc->regs.ra = (inst >> 21) & 0x1f; 667#endif // FULL_SYSTEM 668 669 xc->func_exe_insn++; 670 671 fault = si->execute(this, xc, traceData); 672#ifdef FS_MEASURE 673 if (!(xc->misspeculating()) && (xc->system->bin)) { 674 SWContext *ctx = xc->swCtx; 675 if (ctx && !ctx->callStack.empty()) { 676 if (si->isCall()) { 677 ctx->calls++; 678 } 679 if (si->isReturn()) { 680 if (ctx->calls == 0) { 681 fnCall *top = ctx->callStack.top(); 682 DPRINTF(TCPIP, "Removing %s from callstack.\n", top->name); 683 delete top; 684 ctx->callStack.pop(); 685 if (ctx->callStack.empty()) 686 xc->system->nonPath->activate(); 687 else 688 ctx->callStack.top()->myBin->activate(); 689 690 xc->system->dumpState(xc); 691 } else { 692 ctx->calls--; 693 } 694 } 695 } 696 } 697#endif 698 if (si->isMemRef()) { 699 numMemRefs++; 700 } 701 702 if (si->isLoad()) { 703 ++numLoad; 704 comLoadEventQueue[0]->serviceEvents(numLoad); 705 } 706 707 if (traceData) 708 traceData->finalize(); 709 710 } // if (fault == No_Fault) 711 712 if (fault != No_Fault) { 713#ifdef FULL_SYSTEM 714 xc->ev5_trap(fault); 715#else // !FULL_SYSTEM 716 fatal("fault (%d) detected @ PC 0x%08p", fault, xc->regs.pc); 717#endif // FULL_SYSTEM 718 } 719 else { 720 // go to the next instruction 721 xc->regs.pc = xc->regs.npc; 722 xc->regs.npc += sizeof(MachInst); 723 } 724 725#ifdef FULL_SYSTEM 726 Addr oldpc; 727 do { 728 oldpc = xc->regs.pc; 729 system->pcEventQueue.service(xc); 730 } while (oldpc != xc->regs.pc); 731#endif 732 733 assert(status() == Running || 734 status() == Idle || 735 status() == DcacheMissStall); 736 737 if (status() == Running && !tickEvent.scheduled()) 738 tickEvent.schedule(curTick + 1); 739} 740 741 742//////////////////////////////////////////////////////////////////////// 743// 744// SimpleCPU Simulation Object 745// 746BEGIN_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU) 747 748 Param<Counter> max_insts_any_thread; 749 Param<Counter> max_insts_all_threads; 750 Param<Counter> max_loads_any_thread; 751 Param<Counter> max_loads_all_threads; 752 753#ifdef FULL_SYSTEM 754 SimObjectParam<AlphaItb *> itb; 755 SimObjectParam<AlphaDtb *> dtb; 756 SimObjectParam<FunctionalMemory *> mem; 757 SimObjectParam<System *> system; 758 Param<int> mult; 759#else 760 SimObjectParam<Process *> workload; 761#endif // FULL_SYSTEM 762 763 SimObjectParam<BaseMem *> icache; 764 SimObjectParam<BaseMem *> dcache; 765 766 Param<bool> defer_registration; 767 768END_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU) 769 770BEGIN_INIT_SIM_OBJECT_PARAMS(SimpleCPU) 771 772 INIT_PARAM_DFLT(max_insts_any_thread, 773 "terminate when any thread reaches this insn count", 774 0), 775 INIT_PARAM_DFLT(max_insts_all_threads, 776 "terminate when all threads have reached this insn count", 777 0), 778 INIT_PARAM_DFLT(max_loads_any_thread, 779 "terminate when any thread reaches this load count", 780 0), 781 INIT_PARAM_DFLT(max_loads_all_threads, 782 "terminate when all threads have reached this load count", 783 0), 784 785#ifdef FULL_SYSTEM 786 INIT_PARAM(itb, "Instruction TLB"), 787 INIT_PARAM(dtb, "Data TLB"), 788 INIT_PARAM(mem, "memory"), 789 INIT_PARAM(system, "system object"), 790 INIT_PARAM_DFLT(mult, "system clock multiplier", 1), 791#else 792 INIT_PARAM(workload, "processes to run"), 793#endif // FULL_SYSTEM 794 795 INIT_PARAM_DFLT(icache, "L1 instruction cache object", NULL), 796 INIT_PARAM_DFLT(dcache, "L1 data cache object", NULL), 797 INIT_PARAM_DFLT(defer_registration, "defer registration with system " 798 "(for sampling)", false) 799 800END_INIT_SIM_OBJECT_PARAMS(SimpleCPU) 801 802 803CREATE_SIM_OBJECT(SimpleCPU) 804{ 805 SimpleCPU *cpu; 806#ifdef FULL_SYSTEM 807 if (mult != 1) 808 panic("processor clock multiplier must be 1\n"); 809 810 cpu = new SimpleCPU(getInstanceName(), system, 811 max_insts_any_thread, max_insts_all_threads, 812 max_loads_any_thread, max_loads_all_threads, 813 itb, dtb, mem, 814 (icache) ? icache->getInterface() : NULL, 815 (dcache) ? dcache->getInterface() : NULL, 816 ticksPerSecond * mult); 817#else 818 819 cpu = new SimpleCPU(getInstanceName(), workload, 820 max_insts_any_thread, max_insts_all_threads, 821 max_loads_any_thread, max_loads_all_threads, 822 (icache) ? icache->getInterface() : NULL, 823 (dcache) ? dcache->getInterface() : NULL); 824 825#endif // FULL_SYSTEM 826 827 if (!defer_registration) { 828 cpu->registerExecContexts(); 829 } 830 831 return cpu; 832} 833 834REGISTER_SIM_OBJECT("SimpleCPU", SimpleCPU) 835 836