base.cc revision 1191
1/* 2 * Copyright (c) 2002-2004 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include <cmath> 30#include <cstdio> 31#include <cstdlib> 32#include <iostream> 33#include <iomanip> 34#include <list> 35#include <sstream> 36#include <string> 37 38#include "base/cprintf.hh" 39#include "base/inifile.hh" 40#include "base/loader/symtab.hh" 41#include "base/misc.hh" 42#include "base/pollevent.hh" 43#include "base/range.hh" 44#include "base/trace.hh" 45#include "base/stats/events.hh" 46#include "cpu/base_cpu.hh" 47#include "cpu/exec_context.hh" 48#include "cpu/exetrace.hh" 49#include "cpu/full_cpu/smt.hh" 50#include "cpu/simple_cpu/simple_cpu.hh" 51#include "cpu/static_inst.hh" 52#include "mem/base_mem.hh" 53#include "mem/mem_interface.hh" 54#include "sim/builder.hh" 55#include "sim/debug.hh" 56#include "sim/host.hh" 57#include "sim/sim_events.hh" 58#include "sim/sim_object.hh" 59#include "sim/stats.hh" 60 61#ifdef FULL_SYSTEM 62#include "base/remote_gdb.hh" 63#include "dev/alpha_access.h" 64#include "dev/pciareg.h" 65#include "mem/functional_mem/memory_control.hh" 66#include "mem/functional_mem/physical_memory.hh" 67#include "sim/system.hh" 68#include "targetarch/alpha_memory.hh" 69#include "targetarch/vtophys.hh" 70#else // !FULL_SYSTEM 71#include "eio/eio.hh" 72#include "mem/functional_mem/functional_memory.hh" 73#endif // FULL_SYSTEM 74 75using namespace std; 76 77SimpleCPU::TickEvent::TickEvent(SimpleCPU *c) 78 : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c), multiplier(1) 79{ 80} 81 82void 83SimpleCPU::TickEvent::process() 84{ 85 int count = multiplier; 86 do { 87 cpu->tick(); 88 } while (--count > 0 && cpu->status() == Running); 89} 90 91const char * 92SimpleCPU::TickEvent::description() 93{ 94 return "SimpleCPU tick event"; 95} 96 97 98SimpleCPU::CacheCompletionEvent::CacheCompletionEvent(SimpleCPU *_cpu) 99 : Event(&mainEventQueue), 100 cpu(_cpu) 101{ 102} 103 104void SimpleCPU::CacheCompletionEvent::process() 105{ 106 cpu->processCacheCompletion(); 107} 108 109const char * 110SimpleCPU::CacheCompletionEvent::description() 111{ 112 return "SimpleCPU cache completion event"; 113} 114 115#ifdef FULL_SYSTEM 116SimpleCPU::SimpleCPU(const string &_name, 117 System *_system, 118 Counter max_insts_any_thread, 119 Counter max_insts_all_threads, 120 Counter max_loads_any_thread, 121 Counter max_loads_all_threads, 122 AlphaITB *itb, AlphaDTB *dtb, 123 FunctionalMemory *mem, 124 MemInterface *icache_interface, 125 MemInterface *dcache_interface, 126 bool _def_reg, Tick freq, 127 bool _function_trace, Tick _function_trace_start) 128 : BaseCPU(_name, /* number_of_threads */ 1, _def_reg, 129 max_insts_any_thread, max_insts_all_threads, 130 max_loads_any_thread, max_loads_all_threads, 131 _system, freq, _function_trace, _function_trace_start), 132#else 133SimpleCPU::SimpleCPU(const string &_name, Process *_process, 134 Counter max_insts_any_thread, 135 Counter max_insts_all_threads, 136 Counter max_loads_any_thread, 137 Counter max_loads_all_threads, 138 MemInterface *icache_interface, 139 MemInterface *dcache_interface, 140 bool _def_reg, 141 bool _function_trace, Tick _function_trace_start) 142 : BaseCPU(_name, /* number_of_threads */ 1, _def_reg, 143 max_insts_any_thread, max_insts_all_threads, 144 max_loads_any_thread, max_loads_all_threads, 145 _function_trace, _function_trace_start), 146#endif 147 tickEvent(this), xc(NULL), cacheCompletionEvent(this) 148{ 149 _status = Idle; 150#ifdef FULL_SYSTEM 151 xc = new ExecContext(this, 0, system, itb, dtb, mem); 152 153 // initialize CPU, including PC 154 TheISA::initCPU(&xc->regs); 155#else 156 xc = new ExecContext(this, /* thread_num */ 0, _process, /* asid */ 0); 157#endif // !FULL_SYSTEM 158 159 icacheInterface = icache_interface; 160 dcacheInterface = dcache_interface; 161 162 memReq = new MemReq(); 163 memReq->xc = xc; 164 memReq->asid = 0; 165 memReq->data = new uint8_t[64]; 166 167 numInst = 0; 168 startNumInst = 0; 169 numLoad = 0; 170 startNumLoad = 0; 171 lastIcacheStall = 0; 172 lastDcacheStall = 0; 173 174 execContexts.push_back(xc); 175} 176 177SimpleCPU::~SimpleCPU() 178{ 179} 180 181void 182SimpleCPU::switchOut() 183{ 184 _status = SwitchedOut; 185 if (tickEvent.scheduled()) 186 tickEvent.squash(); 187} 188 189 190void 191SimpleCPU::takeOverFrom(BaseCPU *oldCPU) 192{ 193 BaseCPU::takeOverFrom(oldCPU); 194 195 assert(!tickEvent.scheduled()); 196 197 // if any of this CPU's ExecContexts are active, mark the CPU as 198 // running and schedule its tick event. 199 for (int i = 0; i < execContexts.size(); ++i) { 200 ExecContext *xc = execContexts[i]; 201 if (xc->status() == ExecContext::Active && _status != Running) { 202 _status = Running; 203 tickEvent.schedule(curTick); 204 } 205 } 206 207 oldCPU->switchOut(); 208} 209 210 211void 212SimpleCPU::activateContext(int thread_num, int delay) 213{ 214 assert(thread_num == 0); 215 assert(xc); 216 217 assert(_status == Idle); 218 notIdleFraction++; 219 scheduleTickEvent(delay); 220 _status = Running; 221} 222 223 224void 225SimpleCPU::suspendContext(int thread_num) 226{ 227 assert(thread_num == 0); 228 assert(xc); 229 230 assert(_status == Running); 231 notIdleFraction--; 232 unscheduleTickEvent(); 233 _status = Idle; 234} 235 236 237void 238SimpleCPU::deallocateContext(int thread_num) 239{ 240 // for now, these are equivalent 241 suspendContext(thread_num); 242} 243 244 245void 246SimpleCPU::haltContext(int thread_num) 247{ 248 // for now, these are equivalent 249 suspendContext(thread_num); 250} 251 252 253void 254SimpleCPU::regStats() 255{ 256 using namespace Stats; 257 258 BaseCPU::regStats(); 259 260 numInsts 261 .name(name() + ".num_insts") 262 .desc("Number of instructions executed") 263 ; 264 265 numMemRefs 266 .name(name() + ".num_refs") 267 .desc("Number of memory references") 268 ; 269 270 notIdleFraction 271 .name(name() + ".not_idle_fraction") 272 .desc("Percentage of non-idle cycles") 273 ; 274 275 idleFraction 276 .name(name() + ".idle_fraction") 277 .desc("Percentage of idle cycles") 278 ; 279 280 icacheStallCycles 281 .name(name() + ".icache_stall_cycles") 282 .desc("ICache total stall cycles") 283 .prereq(icacheStallCycles) 284 ; 285 286 dcacheStallCycles 287 .name(name() + ".dcache_stall_cycles") 288 .desc("DCache total stall cycles") 289 .prereq(dcacheStallCycles) 290 ; 291 292 idleFraction = constant(1.0) - notIdleFraction; 293} 294 295void 296SimpleCPU::resetStats() 297{ 298 startNumInst = numInst; 299 notIdleFraction = (_status != Idle); 300} 301 302void 303SimpleCPU::serialize(ostream &os) 304{ 305 BaseCPU::serialize(os); 306 SERIALIZE_ENUM(_status); 307 SERIALIZE_SCALAR(inst); 308 nameOut(os, csprintf("%s.xc", name())); 309 xc->serialize(os); 310 nameOut(os, csprintf("%s.tickEvent", name())); 311 tickEvent.serialize(os); 312 nameOut(os, csprintf("%s.cacheCompletionEvent", name())); 313 cacheCompletionEvent.serialize(os); 314} 315 316void 317SimpleCPU::unserialize(Checkpoint *cp, const string §ion) 318{ 319 BaseCPU::unserialize(cp, section); 320 UNSERIALIZE_ENUM(_status); 321 UNSERIALIZE_SCALAR(inst); 322 xc->unserialize(cp, csprintf("%s.xc", section)); 323 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 324 cacheCompletionEvent 325 .unserialize(cp, csprintf("%s.cacheCompletionEvent", section)); 326} 327 328void 329change_thread_state(int thread_number, int activate, int priority) 330{ 331} 332 333Fault 334SimpleCPU::copySrcTranslate(Addr src) 335{ 336 static bool no_warn = true; 337 int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64; 338 // Only support block sizes of 64 atm. 339 assert(blk_size == 64); 340 int offset = src & (blk_size - 1); 341 342 // Make sure block doesn't span page 343 if (no_warn && 344 (src & TheISA::PageMask) != ((src + blk_size) & TheISA::PageMask) && 345 (src >> 40) != 0xfffffc) { 346 warn("Copied block source spans pages %x.", src); 347 no_warn = false; 348 } 349 350 memReq->reset(src & ~(blk_size - 1), blk_size); 351 352 // translate to physical address 353 Fault fault = xc->translateDataReadReq(memReq); 354 355 assert(fault != Alignment_Fault); 356 357 if (fault == No_Fault) { 358 xc->copySrcAddr = src; 359 xc->copySrcPhysAddr = memReq->paddr + offset; 360 } else { 361 xc->copySrcAddr = 0; 362 xc->copySrcPhysAddr = 0; 363 } 364 return fault; 365} 366 367Fault 368SimpleCPU::copy(Addr dest) 369{ 370 static bool no_warn = true; 371 int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64; 372 // Only support block sizes of 64 atm. 373 assert(blk_size == 64); 374 uint8_t data[blk_size]; 375 //assert(xc->copySrcAddr); 376 int offset = dest & (blk_size - 1); 377 378 // Make sure block doesn't span page 379 if (no_warn && 380 (dest & TheISA::PageMask) != ((dest + blk_size) & TheISA::PageMask) && 381 (dest >> 40) != 0xfffffc) { 382 no_warn = false; 383 warn("Copied block destination spans pages %x. ", dest); 384 } 385 386 memReq->reset(dest & ~(blk_size -1), blk_size); 387 // translate to physical address 388 Fault fault = xc->translateDataWriteReq(memReq); 389 390 assert(fault != Alignment_Fault); 391 392 if (fault == No_Fault) { 393 Addr dest_addr = memReq->paddr + offset; 394 // Need to read straight from memory since we have more than 8 bytes. 395 memReq->paddr = xc->copySrcPhysAddr; 396 xc->mem->read(memReq, data); 397 memReq->paddr = dest_addr; 398 xc->mem->write(memReq, data); 399 if (dcacheInterface) { 400 memReq->cmd = Copy; 401 memReq->completionEvent = NULL; 402 memReq->paddr = xc->copySrcPhysAddr; 403 memReq->dest = dest_addr; 404 memReq->size = 64; 405 memReq->time = curTick; 406 dcacheInterface->access(memReq); 407 } 408 } 409 return fault; 410} 411 412// precise architected memory state accessor macros 413template <class T> 414Fault 415SimpleCPU::read(Addr addr, T &data, unsigned flags) 416{ 417 memReq->reset(addr, sizeof(T), flags); 418 419 // translate to physical address 420 Fault fault = xc->translateDataReadReq(memReq); 421 422 // do functional access 423 if (fault == No_Fault) 424 fault = xc->read(memReq, data); 425 426 if (traceData) { 427 traceData->setAddr(addr); 428 if (fault == No_Fault) 429 traceData->setData(data); 430 } 431 432 // if we have a cache, do cache access too 433 if (fault == No_Fault && dcacheInterface) { 434 memReq->cmd = Read; 435 memReq->completionEvent = NULL; 436 memReq->time = curTick; 437 MemAccessResult result = dcacheInterface->access(memReq); 438 439 // Ugly hack to get an event scheduled *only* if the access is 440 // a miss. We really should add first-class support for this 441 // at some point. 442 if (result != MA_HIT && dcacheInterface->doEvents()) { 443 memReq->completionEvent = &cacheCompletionEvent; 444 lastDcacheStall = curTick; 445 unscheduleTickEvent(); 446 _status = DcacheMissStall; 447 } 448 } 449 450 if (!dcacheInterface && (memReq->flags & UNCACHEABLE)) 451 recordEvent("Uncached Read"); 452 453 return fault; 454} 455 456#ifndef DOXYGEN_SHOULD_SKIP_THIS 457 458template 459Fault 460SimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 461 462template 463Fault 464SimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 465 466template 467Fault 468SimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 469 470template 471Fault 472SimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 473 474#endif //DOXYGEN_SHOULD_SKIP_THIS 475 476template<> 477Fault 478SimpleCPU::read(Addr addr, double &data, unsigned flags) 479{ 480 return read(addr, *(uint64_t*)&data, flags); 481} 482 483template<> 484Fault 485SimpleCPU::read(Addr addr, float &data, unsigned flags) 486{ 487 return read(addr, *(uint32_t*)&data, flags); 488} 489 490 491template<> 492Fault 493SimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 494{ 495 return read(addr, (uint32_t&)data, flags); 496} 497 498 499template <class T> 500Fault 501SimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 502{ 503 if (traceData) { 504 traceData->setAddr(addr); 505 traceData->setData(data); 506 } 507 508 memReq->reset(addr, sizeof(T), flags); 509 510 // translate to physical address 511 Fault fault = xc->translateDataWriteReq(memReq); 512 513 // do functional access 514 if (fault == No_Fault) 515 fault = xc->write(memReq, data); 516 517 if (fault == No_Fault && dcacheInterface) { 518 memReq->cmd = Write; 519 memcpy(memReq->data,(uint8_t *)&data,memReq->size); 520 memReq->completionEvent = NULL; 521 memReq->time = curTick; 522 MemAccessResult result = dcacheInterface->access(memReq); 523 524 // Ugly hack to get an event scheduled *only* if the access is 525 // a miss. We really should add first-class support for this 526 // at some point. 527 if (result != MA_HIT && dcacheInterface->doEvents()) { 528 memReq->completionEvent = &cacheCompletionEvent; 529 lastDcacheStall = curTick; 530 unscheduleTickEvent(); 531 _status = DcacheMissStall; 532 } 533 } 534 535 if (res && (fault == No_Fault)) 536 *res = memReq->result; 537 538 if (!dcacheInterface && (memReq->flags & UNCACHEABLE)) 539 recordEvent("Uncached Write"); 540 541 return fault; 542} 543 544 545#ifndef DOXYGEN_SHOULD_SKIP_THIS 546template 547Fault 548SimpleCPU::write(uint64_t data, Addr addr, unsigned flags, uint64_t *res); 549 550template 551Fault 552SimpleCPU::write(uint32_t data, Addr addr, unsigned flags, uint64_t *res); 553 554template 555Fault 556SimpleCPU::write(uint16_t data, Addr addr, unsigned flags, uint64_t *res); 557 558template 559Fault 560SimpleCPU::write(uint8_t data, Addr addr, unsigned flags, uint64_t *res); 561 562#endif //DOXYGEN_SHOULD_SKIP_THIS 563 564template<> 565Fault 566SimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 567{ 568 return write(*(uint64_t*)&data, addr, flags, res); 569} 570 571template<> 572Fault 573SimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 574{ 575 return write(*(uint32_t*)&data, addr, flags, res); 576} 577 578 579template<> 580Fault 581SimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 582{ 583 return write((uint32_t)data, addr, flags, res); 584} 585 586 587#ifdef FULL_SYSTEM 588Addr 589SimpleCPU::dbg_vtophys(Addr addr) 590{ 591 return vtophys(xc, addr); 592} 593#endif // FULL_SYSTEM 594 595Tick save_cycle = 0; 596 597 598void 599SimpleCPU::processCacheCompletion() 600{ 601 switch (status()) { 602 case IcacheMissStall: 603 icacheStallCycles += curTick - lastIcacheStall; 604 _status = IcacheMissComplete; 605 scheduleTickEvent(1); 606 break; 607 case DcacheMissStall: 608 dcacheStallCycles += curTick - lastDcacheStall; 609 _status = Running; 610 scheduleTickEvent(1); 611 break; 612 case SwitchedOut: 613 // If this CPU has been switched out due to sampling/warm-up, 614 // ignore any further status changes (e.g., due to cache 615 // misses outstanding at the time of the switch). 616 return; 617 default: 618 panic("SimpleCPU::processCacheCompletion: bad state"); 619 break; 620 } 621} 622 623#ifdef FULL_SYSTEM 624void 625SimpleCPU::post_interrupt(int int_num, int index) 626{ 627 BaseCPU::post_interrupt(int_num, index); 628 629 if (xc->status() == ExecContext::Suspended) { 630 DPRINTF(IPI,"Suspended Processor awoke\n"); 631 xc->activate(); 632 } 633} 634#endif // FULL_SYSTEM 635 636/* start simulation, program loaded, processor precise state initialized */ 637void 638SimpleCPU::tick() 639{ 640 numCycles++; 641 642 traceData = NULL; 643 644 Fault fault = No_Fault; 645 646#ifdef FULL_SYSTEM 647 if (checkInterrupts && check_interrupts() && !xc->inPalMode() && 648 status() != IcacheMissComplete) { 649 int ipl = 0; 650 int summary = 0; 651 checkInterrupts = false; 652 IntReg *ipr = xc->regs.ipr; 653 654 if (xc->regs.ipr[TheISA::IPR_SIRR]) { 655 for (int i = TheISA::INTLEVEL_SOFTWARE_MIN; 656 i < TheISA::INTLEVEL_SOFTWARE_MAX; i++) { 657 if (ipr[TheISA::IPR_SIRR] & (ULL(1) << i)) { 658 // See table 4-19 of 21164 hardware reference 659 ipl = (i - TheISA::INTLEVEL_SOFTWARE_MIN) + 1; 660 summary |= (ULL(1) << i); 661 } 662 } 663 } 664 665 uint64_t interrupts = xc->cpu->intr_status(); 666 for (int i = TheISA::INTLEVEL_EXTERNAL_MIN; 667 i < TheISA::INTLEVEL_EXTERNAL_MAX; i++) { 668 if (interrupts & (ULL(1) << i)) { 669 // See table 4-19 of 21164 hardware reference 670 ipl = i; 671 summary |= (ULL(1) << i); 672 } 673 } 674 675 if (ipr[TheISA::IPR_ASTRR]) 676 panic("asynchronous traps not implemented\n"); 677 678 if (ipl && ipl > xc->regs.ipr[TheISA::IPR_IPLR]) { 679 ipr[TheISA::IPR_ISR] = summary; 680 ipr[TheISA::IPR_INTID] = ipl; 681 xc->ev5_trap(Interrupt_Fault); 682 683 DPRINTF(Flow, "Interrupt! IPLR=%d ipl=%d summary=%x\n", 684 ipr[TheISA::IPR_IPLR], ipl, summary); 685 } 686 } 687#endif 688 689 // maintain $r0 semantics 690 xc->regs.intRegFile[ZeroReg] = 0; 691#ifdef TARGET_ALPHA 692 xc->regs.floatRegFile.d[ZeroReg] = 0.0; 693#endif // TARGET_ALPHA 694 695 if (status() == IcacheMissComplete) { 696 // We've already fetched an instruction and were stalled on an 697 // I-cache miss. No need to fetch it again. 698 699 // Set status to running; tick event will get rescheduled if 700 // necessary at end of tick() function. 701 _status = Running; 702 } 703 else { 704 // Try to fetch an instruction 705 706 // set up memory request for instruction fetch 707#ifdef FULL_SYSTEM 708#define IFETCH_FLAGS(pc) ((pc) & 1) ? PHYSICAL : 0 709#else 710#define IFETCH_FLAGS(pc) 0 711#endif 712 713 memReq->cmd = Read; 714 memReq->reset(xc->regs.pc & ~3, sizeof(uint32_t), 715 IFETCH_FLAGS(xc->regs.pc)); 716 717 fault = xc->translateInstReq(memReq); 718 719 if (fault == No_Fault) 720 fault = xc->mem->read(memReq, inst); 721 722 if (icacheInterface && fault == No_Fault) { 723 memReq->completionEvent = NULL; 724 725 memReq->time = curTick; 726 MemAccessResult result = icacheInterface->access(memReq); 727 728 // Ugly hack to get an event scheduled *only* if the access is 729 // a miss. We really should add first-class support for this 730 // at some point. 731 if (result != MA_HIT && icacheInterface->doEvents()) { 732 memReq->completionEvent = &cacheCompletionEvent; 733 lastIcacheStall = curTick; 734 unscheduleTickEvent(); 735 _status = IcacheMissStall; 736 return; 737 } 738 } 739 } 740 741 // If we've got a valid instruction (i.e., no fault on instruction 742 // fetch), then execute it. 743 if (fault == No_Fault) { 744 745 // keep an instruction count 746 numInst++; 747 numInsts++; 748 749 // check for instruction-count-based events 750 comInstEventQueue[0]->serviceEvents(numInst); 751 752 // decode the instruction 753 inst = htoa(inst); 754 StaticInstPtr<TheISA> si(inst); 755 756 traceData = Trace::getInstRecord(curTick, xc, this, si, 757 xc->regs.pc); 758 759#ifdef FULL_SYSTEM 760 xc->setInst(inst); 761#endif // FULL_SYSTEM 762 763 xc->func_exe_inst++; 764 765 fault = si->execute(this, traceData); 766 767#ifdef FULL_SYSTEM 768 if (xc->fnbin) 769 xc->execute(si.get()); 770#endif 771 772 if (si->isMemRef()) { 773 numMemRefs++; 774 } 775 776 if (si->isLoad()) { 777 ++numLoad; 778 comLoadEventQueue[0]->serviceEvents(numLoad); 779 } 780 781 if (traceData) 782 traceData->finalize(); 783 784 traceFunctions(xc->regs.pc); 785 786 } // if (fault == No_Fault) 787 788 if (fault != No_Fault) { 789#ifdef FULL_SYSTEM 790 xc->ev5_trap(fault); 791#else // !FULL_SYSTEM 792 fatal("fault (%d) detected @ PC 0x%08p", fault, xc->regs.pc); 793#endif // FULL_SYSTEM 794 } 795 else { 796 // go to the next instruction 797 xc->regs.pc = xc->regs.npc; 798 xc->regs.npc += sizeof(MachInst); 799 } 800 801#ifdef FULL_SYSTEM 802 Addr oldpc; 803 do { 804 oldpc = xc->regs.pc; 805 system->pcEventQueue.service(xc); 806 } while (oldpc != xc->regs.pc); 807#endif 808 809 assert(status() == Running || 810 status() == Idle || 811 status() == DcacheMissStall); 812 813 if (status() == Running && !tickEvent.scheduled()) 814 tickEvent.schedule(curTick + 1); 815} 816 817 818//////////////////////////////////////////////////////////////////////// 819// 820// SimpleCPU Simulation Object 821// 822BEGIN_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU) 823 824 Param<Counter> max_insts_any_thread; 825 Param<Counter> max_insts_all_threads; 826 Param<Counter> max_loads_any_thread; 827 Param<Counter> max_loads_all_threads; 828 829#ifdef FULL_SYSTEM 830 SimObjectParam<AlphaITB *> itb; 831 SimObjectParam<AlphaDTB *> dtb; 832 SimObjectParam<FunctionalMemory *> mem; 833 SimObjectParam<System *> system; 834 Param<int> mult; 835#else 836 SimObjectParam<Process *> workload; 837#endif // FULL_SYSTEM 838 839 SimObjectParam<BaseMem *> icache; 840 SimObjectParam<BaseMem *> dcache; 841 842 Param<bool> defer_registration; 843 Param<int> multiplier; 844 Param<bool> function_trace; 845 Param<Tick> function_trace_start; 846 847END_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU) 848 849BEGIN_INIT_SIM_OBJECT_PARAMS(SimpleCPU) 850 851 INIT_PARAM_DFLT(max_insts_any_thread, 852 "terminate when any thread reaches this inst count", 853 0), 854 INIT_PARAM_DFLT(max_insts_all_threads, 855 "terminate when all threads have reached this inst count", 856 0), 857 INIT_PARAM_DFLT(max_loads_any_thread, 858 "terminate when any thread reaches this load count", 859 0), 860 INIT_PARAM_DFLT(max_loads_all_threads, 861 "terminate when all threads have reached this load count", 862 0), 863 864#ifdef FULL_SYSTEM 865 INIT_PARAM(itb, "Instruction TLB"), 866 INIT_PARAM(dtb, "Data TLB"), 867 INIT_PARAM(mem, "memory"), 868 INIT_PARAM(system, "system object"), 869 INIT_PARAM_DFLT(mult, "system clock multiplier", 1), 870#else 871 INIT_PARAM(workload, "processes to run"), 872#endif // FULL_SYSTEM 873 874 INIT_PARAM_DFLT(icache, "L1 instruction cache object", NULL), 875 INIT_PARAM_DFLT(dcache, "L1 data cache object", NULL), 876 INIT_PARAM_DFLT(defer_registration, "defer registration with system " 877 "(for sampling)", false), 878 879 INIT_PARAM_DFLT(multiplier, "clock multiplier", 1), 880 INIT_PARAM_DFLT(function_trace, "Enable function trace", false), 881 INIT_PARAM_DFLT(function_trace_start, "Cycle to start function trace", 0) 882 883END_INIT_SIM_OBJECT_PARAMS(SimpleCPU) 884 885 886CREATE_SIM_OBJECT(SimpleCPU) 887{ 888 SimpleCPU *cpu; 889#ifdef FULL_SYSTEM 890 if (mult != 1) 891 panic("processor clock multiplier must be 1\n"); 892 893 cpu = new SimpleCPU(getInstanceName(), system, 894 max_insts_any_thread, max_insts_all_threads, 895 max_loads_any_thread, max_loads_all_threads, 896 itb, dtb, mem, 897 (icache) ? icache->getInterface() : NULL, 898 (dcache) ? dcache->getInterface() : NULL, 899 defer_registration, 900 ticksPerSecond * mult, 901 function_trace, function_trace_start); 902#else 903 904 cpu = new SimpleCPU(getInstanceName(), workload, 905 max_insts_any_thread, max_insts_all_threads, 906 max_loads_any_thread, max_loads_all_threads, 907 (icache) ? icache->getInterface() : NULL, 908 (dcache) ? dcache->getInterface() : NULL, 909 defer_registration, 910 function_trace, function_trace_start); 911 912#endif // FULL_SYSTEM 913 914 cpu->setTickMultiplier(multiplier); 915 916 return cpu; 917} 918 919REGISTER_SIM_OBJECT("SimpleCPU", SimpleCPU) 920 921