base.cc revision 384
1/* 2 * Copyright (c) 2003 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include <cmath> 30#include <cstdio> 31#include <cstdlib> 32#include <iostream> 33#include <iomanip> 34#include <list> 35#include <sstream> 36#include <string> 37 38#include "base/cprintf.hh" 39#include "base/inifile.hh" 40#include "base/loader/symtab.hh" 41#include "base/misc.hh" 42#include "base/pollevent.hh" 43#include "base/range.hh" 44#include "base/trace.hh" 45#include "cpu/base_cpu.hh" 46#include "cpu/exec_context.hh" 47#include "cpu/exetrace.hh" 48#include "cpu/full_cpu/smt.hh" 49#include "cpu/simple_cpu/simple_cpu.hh" 50#include "cpu/static_inst.hh" 51#include "mem/base_mem.hh" 52#include "mem/mem_interface.hh" 53#include "sim/annotation.hh" 54#include "sim/builder.hh" 55#include "sim/debug.hh" 56#include "sim/host.hh" 57#include "sim/sim_events.hh" 58#include "sim/sim_object.hh" 59#include "sim/sim_stats.hh" 60 61#ifdef FULL_SYSTEM 62#include "base/remote_gdb.hh" 63#include "dev/alpha_access.h" 64#include "dev/pciareg.h" 65#include "mem/functional_mem/memory_control.hh" 66#include "mem/functional_mem/physical_memory.hh" 67#include "sim/system.hh" 68#include "targetarch/alpha_memory.hh" 69#include "targetarch/vtophys.hh" 70#else // !FULL_SYSTEM 71#include "eio/eio.hh" 72#include "mem/functional_mem/functional_memory.hh" 73#endif // FULL_SYSTEM 74 75using namespace std; 76 77SimpleCPU::TickEvent::TickEvent(SimpleCPU *c) 78 : Event(&mainEventQueue, 100), cpu(c) 79{ 80} 81 82void 83SimpleCPU::TickEvent::process() 84{ 85 cpu->tick(); 86} 87 88const char * 89SimpleCPU::TickEvent::description() 90{ 91 return "SimpleCPU tick event"; 92} 93 94 95SimpleCPU::CacheCompletionEvent::CacheCompletionEvent(SimpleCPU *_cpu) 96 : Event(&mainEventQueue), 97 cpu(_cpu) 98{ 99} 100 101void SimpleCPU::CacheCompletionEvent::process() 102{ 103 cpu->processCacheCompletion(); 104} 105 106const char * 107SimpleCPU::CacheCompletionEvent::description() 108{ 109 return "SimpleCPU cache completion event"; 110} 111 112#ifdef FULL_SYSTEM 113SimpleCPU::SimpleCPU(const string &_name, 114 System *_system, 115 Counter max_insts_any_thread, 116 Counter max_insts_all_threads, 117 Counter max_loads_any_thread, 118 Counter max_loads_all_threads, 119 AlphaItb *itb, AlphaDtb *dtb, 120 FunctionalMemory *mem, 121 MemInterface *icache_interface, 122 MemInterface *dcache_interface, 123 Tick freq) 124 : BaseCPU(_name, /* number_of_threads */ 1, 125 max_insts_any_thread, max_insts_all_threads, 126 max_loads_any_thread, max_loads_all_threads, 127 _system, freq), 128#else 129SimpleCPU::SimpleCPU(const string &_name, Process *_process, 130 Counter max_insts_any_thread, 131 Counter max_insts_all_threads, 132 Counter max_loads_any_thread, 133 Counter max_loads_all_threads, 134 MemInterface *icache_interface, 135 MemInterface *dcache_interface) 136 : BaseCPU(_name, /* number_of_threads */ 1, 137 max_insts_any_thread, max_insts_all_threads, 138 max_loads_any_thread, max_loads_all_threads), 139#endif 140 tickEvent(this), xc(NULL), cacheCompletionEvent(this) 141{ 142 _status = Idle; 143#ifdef FULL_SYSTEM 144 xc = new ExecContext(this, 0, system, itb, dtb, mem); 145 146 // initialize CPU, including PC 147 TheISA::initCPU(&xc->regs); 148#else 149 xc = new ExecContext(this, /* thread_num */ 0, _process, /* asid */ 0); 150#endif // !FULL_SYSTEM 151 152 icacheInterface = icache_interface; 153 dcacheInterface = dcache_interface; 154 155 memReq = new MemReq(); 156 memReq->xc = xc; 157 memReq->asid = 0; 158 memReq->data = new uint8_t[64]; 159 160 numInst = 0; 161 startNumInst = 0; 162 numLoad = 0; 163 startNumLoad = 0; 164 lastIcacheStall = 0; 165 lastDcacheStall = 0; 166 167 execContexts.push_back(xc); 168} 169 170SimpleCPU::~SimpleCPU() 171{ 172} 173 174void 175SimpleCPU::switchOut() 176{ 177 _status = SwitchedOut; 178 if (tickEvent.scheduled()) 179 tickEvent.squash(); 180} 181 182 183void 184SimpleCPU::takeOverFrom(BaseCPU *oldCPU) 185{ 186 BaseCPU::takeOverFrom(oldCPU); 187 188 assert(!tickEvent.scheduled()); 189 190 // if any of this CPU's ExecContexts are active, mark the CPU as 191 // running and schedule its tick event. 192 for (int i = 0; i < execContexts.size(); ++i) { 193 ExecContext *xc = execContexts[i]; 194 if (xc->status() == ExecContext::Active && _status != Running) { 195 _status = Running; 196 tickEvent.schedule(curTick); 197 } 198 } 199 200 oldCPU->switchOut(); 201} 202 203 204void 205SimpleCPU::execCtxStatusChg(int thread_num) { 206 assert(thread_num == 0); 207 assert(xc); 208 209 if (xc->status() == ExecContext::Active) 210 setStatus(Running); 211 else 212 setStatus(Idle); 213} 214 215void 216SimpleCPU::setStatus(Status new_status) 217{ 218 Status old_status = status(); 219 220 // We should never even get here if the CPU has been switched out. 221 assert(old_status != SwitchedOut); 222 223 _status = new_status; 224 225 switch (status()) { 226 case IcacheMissStall: 227 assert(old_status == Running); 228 lastIcacheStall = curTick; 229 if (tickEvent.scheduled()) 230 tickEvent.squash(); 231 break; 232 233 case IcacheMissComplete: 234 assert(old_status == IcacheMissStall); 235 if (tickEvent.squashed()) 236 tickEvent.reschedule(curTick + 1); 237 else if (!tickEvent.scheduled()) 238 tickEvent.schedule(curTick + 1); 239 break; 240 241 case DcacheMissStall: 242 assert(old_status == Running); 243 lastDcacheStall = curTick; 244 if (tickEvent.scheduled()) 245 tickEvent.squash(); 246 break; 247 248 case Idle: 249 assert(old_status == Running); 250 idleFraction++; 251 if (tickEvent.scheduled()) 252 tickEvent.squash(); 253 break; 254 255 case Running: 256 assert(old_status == Idle || 257 old_status == DcacheMissStall || 258 old_status == IcacheMissComplete); 259 if (old_status == Idle && curTick != 0) 260 idleFraction--; 261 262 if (tickEvent.squashed()) 263 tickEvent.reschedule(curTick + 1); 264 else if (!tickEvent.scheduled()) 265 tickEvent.schedule(curTick + 1); 266 break; 267 268 default: 269 panic("can't get here"); 270 } 271} 272 273void 274SimpleCPU::regStats() 275{ 276 using namespace Statistics; 277 278 BaseCPU::regStats(); 279 280 numInsts 281 .name(name() + ".num_insts") 282 .desc("Number of instructions executed") 283 ; 284 285 numMemRefs 286 .name(name() + ".num_refs") 287 .desc("Number of memory references") 288 ; 289 290 idleFraction 291 .name(name() + ".idle_fraction") 292 .desc("Percentage of idle cycles") 293 ; 294 295 icacheStallCycles 296 .name(name() + ".icache_stall_cycles") 297 .desc("ICache total stall cycles") 298 .prereq(icacheStallCycles) 299 ; 300 301 dcacheStallCycles 302 .name(name() + ".dcache_stall_cycles") 303 .desc("DCache total stall cycles") 304 .prereq(dcacheStallCycles) 305 ; 306 307 numInsts = Statistics::scalar(numInst) - Statistics::scalar(startNumInst); 308 simInsts += numInsts; 309} 310 311void 312SimpleCPU::resetStats() 313{ 314 startNumInst = numInst; 315} 316 317void 318SimpleCPU::serialize(ostream &os) 319{ 320 SERIALIZE_ENUM(_status); 321 SERIALIZE_SCALAR(inst); 322 nameOut(os, csprintf("%s.xc", name())); 323 xc->serialize(os); 324 nameOut(os, csprintf("%s.tickEvent", name())); 325 tickEvent.serialize(os); 326 nameOut(os, csprintf("%s.cacheCompletionEvent", name())); 327 cacheCompletionEvent.serialize(os); 328} 329 330void 331SimpleCPU::unserialize(Checkpoint *cp, const string §ion) 332{ 333 UNSERIALIZE_ENUM(_status); 334 UNSERIALIZE_SCALAR(inst); 335 xc->unserialize(cp, csprintf("%s.xc", section)); 336 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 337 cacheCompletionEvent 338 .unserialize(cp, csprintf("%s.cacheCompletionEvent", section)); 339} 340 341void 342change_thread_state(int thread_number, int activate, int priority) 343{ 344} 345 346// precise architected memory state accessor macros 347template <class T> 348Fault 349SimpleCPU::read(Addr addr, T& data, unsigned flags) 350{ 351 memReq->reset(addr, sizeof(T), flags); 352 353 // translate to physical address 354 Fault fault = xc->translateDataReadReq(memReq); 355 356 // do functional access 357 if (fault == No_Fault) 358 fault = xc->read(memReq, data); 359 360 if (traceData) { 361 traceData->setAddr(addr); 362 if (fault == No_Fault) 363 traceData->setData(data); 364 } 365 366 // if we have a cache, do cache access too 367 if (fault == No_Fault && dcacheInterface) { 368 memReq->cmd = Read; 369 memReq->completionEvent = NULL; 370 memReq->time = curTick; 371 memReq->flags &= ~UNCACHEABLE; 372 MemAccessResult result = dcacheInterface->access(memReq); 373 374 // Ugly hack to get an event scheduled *only* if the access is 375 // a miss. We really should add first-class support for this 376 // at some point. 377 if (result != MA_HIT && dcacheInterface->doEvents) { 378 memReq->completionEvent = &cacheCompletionEvent; 379 setStatus(DcacheMissStall); 380 } 381 } 382 383 return fault; 384} 385 386#ifndef DOXYGEN_SHOULD_SKIP_THIS 387 388template 389Fault 390SimpleCPU::read(Addr addr, uint64_t& data, unsigned flags); 391 392template 393Fault 394SimpleCPU::read(Addr addr, uint32_t& data, unsigned flags); 395 396template 397Fault 398SimpleCPU::read(Addr addr, uint16_t& data, unsigned flags); 399 400template 401Fault 402SimpleCPU::read(Addr addr, uint8_t& data, unsigned flags); 403 404#endif //DOXYGEN_SHOULD_SKIP_THIS 405 406template<> 407Fault 408SimpleCPU::read(Addr addr, double& data, unsigned flags) 409{ 410 return read(addr, *(uint64_t*)&data, flags); 411} 412 413template<> 414Fault 415SimpleCPU::read(Addr addr, float& data, unsigned flags) 416{ 417 return read(addr, *(uint32_t*)&data, flags); 418} 419 420 421template<> 422Fault 423SimpleCPU::read(Addr addr, int32_t& data, unsigned flags) 424{ 425 return read(addr, (uint32_t&)data, flags); 426} 427 428 429template <class T> 430Fault 431SimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 432{ 433 if (traceData) { 434 traceData->setAddr(addr); 435 traceData->setData(data); 436 } 437 438 memReq->reset(addr, sizeof(T), flags); 439 440 // translate to physical address 441 Fault fault = xc->translateDataWriteReq(memReq); 442 443 // do functional access 444 if (fault == No_Fault) 445 fault = xc->write(memReq, data); 446 447 if (fault == No_Fault && dcacheInterface) { 448 memReq->cmd = Write; 449 memcpy(memReq->data,(uint8_t *)&data,memReq->size); 450 memReq->completionEvent = NULL; 451 memReq->time = curTick; 452 memReq->flags &= ~UNCACHEABLE; 453 MemAccessResult result = dcacheInterface->access(memReq); 454 455 // Ugly hack to get an event scheduled *only* if the access is 456 // a miss. We really should add first-class support for this 457 // at some point. 458 if (result != MA_HIT && dcacheInterface->doEvents) { 459 memReq->completionEvent = &cacheCompletionEvent; 460 setStatus(DcacheMissStall); 461 } 462 } 463 464 if (res && (fault == No_Fault)) 465 *res = memReq->result; 466 467 return fault; 468} 469 470 471#ifndef DOXYGEN_SHOULD_SKIP_THIS 472template 473Fault 474SimpleCPU::write(uint64_t data, Addr addr, unsigned flags, uint64_t *res); 475 476template 477Fault 478SimpleCPU::write(uint32_t data, Addr addr, unsigned flags, uint64_t *res); 479 480template 481Fault 482SimpleCPU::write(uint16_t data, Addr addr, unsigned flags, uint64_t *res); 483 484template 485Fault 486SimpleCPU::write(uint8_t data, Addr addr, unsigned flags, uint64_t *res); 487 488#endif //DOXYGEN_SHOULD_SKIP_THIS 489 490template<> 491Fault 492SimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 493{ 494 return write(*(uint64_t*)&data, addr, flags, res); 495} 496 497template<> 498Fault 499SimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 500{ 501 return write(*(uint32_t*)&data, addr, flags, res); 502} 503 504 505template<> 506Fault 507SimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 508{ 509 return write((uint32_t)data, addr, flags, res); 510} 511 512 513#ifdef FULL_SYSTEM 514Addr 515SimpleCPU::dbg_vtophys(Addr addr) 516{ 517 return vtophys(xc, addr); 518} 519#endif // FULL_SYSTEM 520 521Tick save_cycle = 0; 522 523 524void 525SimpleCPU::processCacheCompletion() 526{ 527 switch (status()) { 528 case IcacheMissStall: 529 icacheStallCycles += curTick - lastIcacheStall; 530 setStatus(IcacheMissComplete); 531 break; 532 case DcacheMissStall: 533 dcacheStallCycles += curTick - lastDcacheStall; 534 setStatus(Running); 535 break; 536 case SwitchedOut: 537 // If this CPU has been switched out due to sampling/warm-up, 538 // ignore any further status changes (e.g., due to cache 539 // misses outstanding at the time of the switch). 540 return; 541 default: 542 panic("SimpleCPU::processCacheCompletion: bad state"); 543 break; 544 } 545} 546 547#ifdef FULL_SYSTEM 548void 549SimpleCPU::post_interrupt(int int_num, int index) 550{ 551 BaseCPU::post_interrupt(int_num, index); 552 553 if (xc->status() == ExecContext::Suspended) { 554 DPRINTF(IPI,"Suspended Processor awoke\n"); 555 xc->setStatus(ExecContext::Active); 556 Annotate::Resume(xc); 557 } 558} 559#endif // FULL_SYSTEM 560 561/* start simulation, program loaded, processor precise state initialized */ 562void 563SimpleCPU::tick() 564{ 565 traceData = NULL; 566 567 Fault fault = No_Fault; 568 569#ifdef FULL_SYSTEM 570 if (AlphaISA::check_interrupts && 571 xc->cpu->check_interrupts() && 572 !PC_PAL(xc->regs.pc) && 573 status() != IcacheMissComplete) { 574 int ipl = 0; 575 int summary = 0; 576 AlphaISA::check_interrupts = 0; 577 IntReg *ipr = xc->regs.ipr; 578 579 if (xc->regs.ipr[TheISA::IPR_SIRR]) { 580 for (int i = TheISA::INTLEVEL_SOFTWARE_MIN; 581 i < TheISA::INTLEVEL_SOFTWARE_MAX; i++) { 582 if (ipr[TheISA::IPR_SIRR] & (ULL(1) << i)) { 583 // See table 4-19 of 21164 hardware reference 584 ipl = (i - TheISA::INTLEVEL_SOFTWARE_MIN) + 1; 585 summary |= (ULL(1) << i); 586 } 587 } 588 } 589 590 uint64_t interrupts = xc->cpu->intr_status(); 591 for (int i = TheISA::INTLEVEL_EXTERNAL_MIN; 592 i < TheISA::INTLEVEL_EXTERNAL_MAX; i++) { 593 if (interrupts & (ULL(1) << i)) { 594 // See table 4-19 of 21164 hardware reference 595 ipl = i; 596 summary |= (ULL(1) << i); 597 } 598 } 599 600 if (ipr[TheISA::IPR_ASTRR]) 601 panic("asynchronous traps not implemented\n"); 602 603 if (ipl && ipl > xc->regs.ipr[TheISA::IPR_IPLR]) { 604 ipr[TheISA::IPR_ISR] = summary; 605 ipr[TheISA::IPR_INTID] = ipl; 606 xc->ev5_trap(Interrupt_Fault); 607 608 DPRINTF(Flow, "Interrupt! IPLR=%d ipl=%d summary=%x\n", 609 ipr[TheISA::IPR_IPLR], ipl, summary); 610 } 611 } 612#endif 613 614 // maintain $r0 semantics 615 xc->regs.intRegFile[ZeroReg] = 0; 616#ifdef TARGET_ALPHA 617 xc->regs.floatRegFile.d[ZeroReg] = 0.0; 618#endif // TARGET_ALPHA 619 620 if (status() == IcacheMissComplete) { 621 // We've already fetched an instruction and were stalled on an 622 // I-cache miss. No need to fetch it again. 623 624 setStatus(Running); 625 } 626 else { 627 // Try to fetch an instruction 628 629 // set up memory request for instruction fetch 630#ifdef FULL_SYSTEM 631#define IFETCH_FLAGS(pc) ((pc) & 1) ? PHYSICAL : 0 632#else 633#define IFETCH_FLAGS(pc) 0 634#endif 635 636 memReq->cmd = Read; 637 memReq->reset(xc->regs.pc & ~3, sizeof(uint32_t), 638 IFETCH_FLAGS(xc->regs.pc)); 639 640 fault = xc->translateInstReq(memReq); 641 642 if (fault == No_Fault) 643 fault = xc->mem->read(memReq, inst); 644 645 if (icacheInterface && fault == No_Fault) { 646 memReq->completionEvent = NULL; 647 648 memReq->time = curTick; 649 memReq->flags &= ~UNCACHEABLE; 650 MemAccessResult result = icacheInterface->access(memReq); 651 652 // Ugly hack to get an event scheduled *only* if the access is 653 // a miss. We really should add first-class support for this 654 // at some point. 655 if (result != MA_HIT && icacheInterface->doEvents) { 656 memReq->completionEvent = &cacheCompletionEvent; 657 setStatus(IcacheMissStall); 658 return; 659 } 660 } 661 } 662 663 // If we've got a valid instruction (i.e., no fault on instruction 664 // fetch), then execute it. 665 if (fault == No_Fault) { 666 667 // keep an instruction count 668 numInst++; 669 670 // check for instruction-count-based events 671 comInsnEventQueue[0]->serviceEvents(numInst); 672 673 // decode the instruction 674 StaticInstPtr<TheISA> si(inst); 675 676 traceData = Trace::getInstRecord(curTick, xc, this, si, 677 xc->regs.pc); 678 679#ifdef FULL_SYSTEM 680 xc->regs.opcode = (inst >> 26) & 0x3f; 681 xc->regs.ra = (inst >> 21) & 0x1f; 682#endif // FULL_SYSTEM 683 684 xc->func_exe_insn++; 685 686 fault = si->execute(this, xc, traceData); 687#ifdef FS_MEASURE 688 if (!(xc->misspeculating()) && (xc->system->bin)) { 689 SWContext *ctx = xc->swCtx; 690 if (ctx && !ctx->callStack.empty()) { 691 if (si->isCall()) { 692 ctx->calls++; 693 } 694 if (si->isReturn()) { 695 if (ctx->calls == 0) { 696 fnCall *top = ctx->callStack.top(); 697 DPRINTF(TCPIP, "Removing %s from callstack.\n", top->name); 698 delete top; 699 ctx->callStack.pop(); 700 if (ctx->callStack.empty()) 701 xc->system->nonPath->activate(); 702 else 703 ctx->callStack.top()->myBin->activate(); 704 705 xc->system->dumpState(xc); 706 } else { 707 ctx->calls--; 708 } 709 } 710 } 711 } 712#endif 713 if (si->isMemRef()) { 714 numMemRefs++; 715 } 716 717 if (si->isLoad()) { 718 ++numLoad; 719 comLoadEventQueue[0]->serviceEvents(numLoad); 720 } 721 722 if (traceData) 723 traceData->finalize(); 724 725 } // if (fault == No_Fault) 726 727 if (fault != No_Fault) { 728#ifdef FULL_SYSTEM 729 xc->ev5_trap(fault); 730#else // !FULL_SYSTEM 731 fatal("fault (%d) detected @ PC 0x%08p", fault, xc->regs.pc); 732#endif // FULL_SYSTEM 733 } 734 else { 735 // go to the next instruction 736 xc->regs.pc = xc->regs.npc; 737 xc->regs.npc += sizeof(MachInst); 738 } 739 740#ifdef FULL_SYSTEM 741 Addr oldpc; 742 do { 743 oldpc = xc->regs.pc; 744 system->pcEventQueue.service(xc); 745 } while (oldpc != xc->regs.pc); 746#endif 747 748 assert(status() == Running || 749 status() == Idle || 750 status() == DcacheMissStall); 751 752 if (status() == Running && !tickEvent.scheduled()) 753 tickEvent.schedule(curTick + 1); 754} 755 756 757//////////////////////////////////////////////////////////////////////// 758// 759// SimpleCPU Simulation Object 760// 761BEGIN_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU) 762 763 Param<Counter> max_insts_any_thread; 764 Param<Counter> max_insts_all_threads; 765 Param<Counter> max_loads_any_thread; 766 Param<Counter> max_loads_all_threads; 767 768#ifdef FULL_SYSTEM 769 SimObjectParam<AlphaItb *> itb; 770 SimObjectParam<AlphaDtb *> dtb; 771 SimObjectParam<FunctionalMemory *> mem; 772 SimObjectParam<System *> system; 773 Param<int> mult; 774#else 775 SimObjectParam<Process *> workload; 776#endif // FULL_SYSTEM 777 778 SimObjectParam<BaseMem *> icache; 779 SimObjectParam<BaseMem *> dcache; 780 781 Param<bool> defer_registration; 782 783END_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU) 784 785BEGIN_INIT_SIM_OBJECT_PARAMS(SimpleCPU) 786 787 INIT_PARAM_DFLT(max_insts_any_thread, 788 "terminate when any thread reaches this insn count", 789 0), 790 INIT_PARAM_DFLT(max_insts_all_threads, 791 "terminate when all threads have reached this insn count", 792 0), 793 INIT_PARAM_DFLT(max_loads_any_thread, 794 "terminate when any thread reaches this load count", 795 0), 796 INIT_PARAM_DFLT(max_loads_all_threads, 797 "terminate when all threads have reached this load count", 798 0), 799 800#ifdef FULL_SYSTEM 801 INIT_PARAM(itb, "Instruction TLB"), 802 INIT_PARAM(dtb, "Data TLB"), 803 INIT_PARAM(mem, "memory"), 804 INIT_PARAM(system, "system object"), 805 INIT_PARAM_DFLT(mult, "system clock multiplier", 1), 806#else 807 INIT_PARAM(workload, "processes to run"), 808#endif // FULL_SYSTEM 809 810 INIT_PARAM_DFLT(icache, "L1 instruction cache object", NULL), 811 INIT_PARAM_DFLT(dcache, "L1 data cache object", NULL), 812 INIT_PARAM_DFLT(defer_registration, "defer registration with system " 813 "(for sampling)", false) 814 815END_INIT_SIM_OBJECT_PARAMS(SimpleCPU) 816 817 818CREATE_SIM_OBJECT(SimpleCPU) 819{ 820 SimpleCPU *cpu; 821#ifdef FULL_SYSTEM 822 if (mult != 1) 823 panic("processor clock multiplier must be 1\n"); 824 825 cpu = new SimpleCPU(getInstanceName(), system, 826 max_insts_any_thread, max_insts_all_threads, 827 max_loads_any_thread, max_loads_all_threads, 828 itb, dtb, mem, 829 (icache) ? icache->getInterface() : NULL, 830 (dcache) ? dcache->getInterface() : NULL, 831 ticksPerSecond * mult); 832#else 833 834 cpu = new SimpleCPU(getInstanceName(), workload, 835 max_insts_any_thread, max_insts_all_threads, 836 max_loads_any_thread, max_loads_all_threads, 837 (icache) ? icache->getInterface() : NULL, 838 (dcache) ? dcache->getInterface() : NULL); 839 840#endif // FULL_SYSTEM 841 842 if (!defer_registration) { 843 cpu->registerExecContexts(); 844 } 845 846 return cpu; 847} 848 849REGISTER_SIM_OBJECT("SimpleCPU", SimpleCPU) 850