base.cc revision 1388
1/* 2 * Copyright (c) 2002-2004 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include <cmath> 30#include <cstdio> 31#include <cstdlib> 32#include <iostream> 33#include <iomanip> 34#include <list> 35#include <sstream> 36#include <string> 37 38#include "base/cprintf.hh" 39#include "base/inifile.hh" 40#include "base/loader/symtab.hh" 41#include "base/misc.hh" 42#include "base/pollevent.hh" 43#include "base/range.hh" 44#include "base/trace.hh" 45#include "base/stats/events.hh" 46#include "cpu/base_cpu.hh" 47#include "cpu/exec_context.hh" 48#include "cpu/exetrace.hh" 49#include "cpu/full_cpu/smt.hh" 50#include "cpu/simple_cpu/simple_cpu.hh" 51#include "cpu/static_inst.hh" 52#include "mem/base_mem.hh" 53#include "mem/mem_interface.hh" 54#include "sim/builder.hh" 55#include "sim/debug.hh" 56#include "sim/host.hh" 57#include "sim/sim_events.hh" 58#include "sim/sim_object.hh" 59#include "sim/stats.hh" 60 61#ifdef FULL_SYSTEM 62#include "base/remote_gdb.hh" 63#include "dev/alpha_access.h" 64#include "dev/pciareg.h" 65#include "mem/functional_mem/memory_control.hh" 66#include "mem/functional_mem/physical_memory.hh" 67#include "sim/system.hh" 68#include "targetarch/alpha_memory.hh" 69#include "targetarch/vtophys.hh" 70#else // !FULL_SYSTEM 71#include "eio/eio.hh" 72#include "mem/functional_mem/functional_memory.hh" 73#endif // FULL_SYSTEM 74 75using namespace std; 76 77 78SimpleCPU::TickEvent::TickEvent(SimpleCPU *c) 79 : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c), multiplier(1) 80{ 81} 82 83void 84SimpleCPU::TickEvent::process() 85{ 86 int count = multiplier; 87 do { 88 cpu->tick(); 89 } while (--count > 0 && cpu->status() == Running); 90} 91 92const char * 93SimpleCPU::TickEvent::description() 94{ 95 return "SimpleCPU tick event"; 96} 97 98 99SimpleCPU::CacheCompletionEvent::CacheCompletionEvent(SimpleCPU *_cpu) 100 : Event(&mainEventQueue), 101 cpu(_cpu) 102{ 103} 104 105void SimpleCPU::CacheCompletionEvent::process() 106{ 107 cpu->processCacheCompletion(); 108} 109 110const char * 111SimpleCPU::CacheCompletionEvent::description() 112{ 113 return "SimpleCPU cache completion event"; 114} 115 116#ifdef FULL_SYSTEM 117SimpleCPU::SimpleCPU(const string &_name, 118 System *_system, 119 Counter max_insts_any_thread, 120 Counter max_insts_all_threads, 121 Counter max_loads_any_thread, 122 Counter max_loads_all_threads, 123 AlphaITB *itb, AlphaDTB *dtb, 124 FunctionalMemory *mem, 125 MemInterface *icache_interface, 126 MemInterface *dcache_interface, 127 bool _def_reg, Tick freq, 128 bool _function_trace, Tick _function_trace_start) 129 : BaseCPU(_name, /* number_of_threads */ 1, _def_reg, 130 max_insts_any_thread, max_insts_all_threads, 131 max_loads_any_thread, max_loads_all_threads, 132 _system, freq, _function_trace, _function_trace_start), 133#else 134SimpleCPU::SimpleCPU(const string &_name, Process *_process, 135 Counter max_insts_any_thread, 136 Counter max_insts_all_threads, 137 Counter max_loads_any_thread, 138 Counter max_loads_all_threads, 139 MemInterface *icache_interface, 140 MemInterface *dcache_interface, 141 bool _def_reg, 142 bool _function_trace, Tick _function_trace_start) 143 : BaseCPU(_name, /* number_of_threads */ 1, _def_reg, 144 max_insts_any_thread, max_insts_all_threads, 145 max_loads_any_thread, max_loads_all_threads, 146 _function_trace, _function_trace_start), 147#endif 148 tickEvent(this), xc(NULL), cacheCompletionEvent(this) 149{ 150 _status = Idle; 151#ifdef FULL_SYSTEM 152 xc = new ExecContext(this, 0, system, itb, dtb, mem); 153 154 // initialize CPU, including PC 155 TheISA::initCPU(&xc->regs); 156#else 157 xc = new ExecContext(this, /* thread_num */ 0, _process, /* asid */ 0); 158#endif // !FULL_SYSTEM 159 160 icacheInterface = icache_interface; 161 dcacheInterface = dcache_interface; 162 163 memReq = new MemReq(); 164 memReq->xc = xc; 165 memReq->asid = 0; 166 memReq->data = new uint8_t[64]; 167 168 numInst = 0; 169 startNumInst = 0; 170 numLoad = 0; 171 startNumLoad = 0; 172 lastIcacheStall = 0; 173 lastDcacheStall = 0; 174 175 execContexts.push_back(xc); 176} 177 178SimpleCPU::~SimpleCPU() 179{ 180} 181 182void 183SimpleCPU::switchOut() 184{ 185 _status = SwitchedOut; 186 if (tickEvent.scheduled()) 187 tickEvent.squash(); 188} 189 190 191void 192SimpleCPU::takeOverFrom(BaseCPU *oldCPU) 193{ 194 BaseCPU::takeOverFrom(oldCPU); 195 196 assert(!tickEvent.scheduled()); 197 198 // if any of this CPU's ExecContexts are active, mark the CPU as 199 // running and schedule its tick event. 200 for (int i = 0; i < execContexts.size(); ++i) { 201 ExecContext *xc = execContexts[i]; 202 if (xc->status() == ExecContext::Active && _status != Running) { 203 _status = Running; 204 tickEvent.schedule(curTick); 205 } 206 } 207 208 oldCPU->switchOut(); 209} 210 211 212void 213SimpleCPU::activateContext(int thread_num, int delay) 214{ 215 assert(thread_num == 0); 216 assert(xc); 217 218 assert(_status == Idle); 219 notIdleFraction++; 220 scheduleTickEvent(delay); 221 _status = Running; 222} 223 224 225void 226SimpleCPU::suspendContext(int thread_num) 227{ 228 assert(thread_num == 0); 229 assert(xc); 230 231 assert(_status == Running); 232 notIdleFraction--; 233 unscheduleTickEvent(); 234 _status = Idle; 235} 236 237 238void 239SimpleCPU::deallocateContext(int thread_num) 240{ 241 // for now, these are equivalent 242 suspendContext(thread_num); 243} 244 245 246void 247SimpleCPU::haltContext(int thread_num) 248{ 249 // for now, these are equivalent 250 suspendContext(thread_num); 251} 252 253 254void 255SimpleCPU::regStats() 256{ 257 using namespace Stats; 258 259 BaseCPU::regStats(); 260 261 numInsts 262 .name(name() + ".num_insts") 263 .desc("Number of instructions executed") 264 ; 265 266 numMemRefs 267 .name(name() + ".num_refs") 268 .desc("Number of memory references") 269 ; 270 271 notIdleFraction 272 .name(name() + ".not_idle_fraction") 273 .desc("Percentage of non-idle cycles") 274 ; 275 276 idleFraction 277 .name(name() + ".idle_fraction") 278 .desc("Percentage of idle cycles") 279 ; 280 281 icacheStallCycles 282 .name(name() + ".icache_stall_cycles") 283 .desc("ICache total stall cycles") 284 .prereq(icacheStallCycles) 285 ; 286 287 dcacheStallCycles 288 .name(name() + ".dcache_stall_cycles") 289 .desc("DCache total stall cycles") 290 .prereq(dcacheStallCycles) 291 ; 292 293 idleFraction = constant(1.0) - notIdleFraction; 294} 295 296void 297SimpleCPU::resetStats() 298{ 299 startNumInst = numInst; 300 notIdleFraction = (_status != Idle); 301} 302 303void 304SimpleCPU::serialize(ostream &os) 305{ 306 BaseCPU::serialize(os); 307 SERIALIZE_ENUM(_status); 308 SERIALIZE_SCALAR(inst); 309 nameOut(os, csprintf("%s.xc", name())); 310 xc->serialize(os); 311 nameOut(os, csprintf("%s.tickEvent", name())); 312 tickEvent.serialize(os); 313 nameOut(os, csprintf("%s.cacheCompletionEvent", name())); 314 cacheCompletionEvent.serialize(os); 315} 316 317void 318SimpleCPU::unserialize(Checkpoint *cp, const string §ion) 319{ 320 BaseCPU::unserialize(cp, section); 321 UNSERIALIZE_ENUM(_status); 322 UNSERIALIZE_SCALAR(inst); 323 xc->unserialize(cp, csprintf("%s.xc", section)); 324 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 325 cacheCompletionEvent 326 .unserialize(cp, csprintf("%s.cacheCompletionEvent", section)); 327} 328 329void 330change_thread_state(int thread_number, int activate, int priority) 331{ 332} 333 334Fault 335SimpleCPU::copySrcTranslate(Addr src) 336{ 337 static bool no_warn = true; 338 int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64; 339 // Only support block sizes of 64 atm. 340 assert(blk_size == 64); 341 int offset = src & (blk_size - 1); 342 343 // Make sure block doesn't span page 344 if (no_warn && 345 (src & TheISA::PageMask) != ((src + blk_size) & TheISA::PageMask) && 346 (src >> 40) != 0xfffffc) { 347 warn("Copied block source spans pages %x.", src); 348 no_warn = false; 349 } 350 351 memReq->reset(src & ~(blk_size - 1), blk_size); 352 353 // translate to physical address 354 Fault fault = xc->translateDataReadReq(memReq); 355 356 assert(fault != Alignment_Fault); 357 358 if (fault == No_Fault) { 359 xc->copySrcAddr = src; 360 xc->copySrcPhysAddr = memReq->paddr + offset; 361 } else { 362 xc->copySrcAddr = 0; 363 xc->copySrcPhysAddr = 0; 364 } 365 return fault; 366} 367 368Fault 369SimpleCPU::copy(Addr dest) 370{ 371 static bool no_warn = true; 372 int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64; 373 // Only support block sizes of 64 atm. 374 assert(blk_size == 64); 375 uint8_t data[blk_size]; 376 //assert(xc->copySrcAddr); 377 int offset = dest & (blk_size - 1); 378 379 // Make sure block doesn't span page 380 if (no_warn && 381 (dest & TheISA::PageMask) != ((dest + blk_size) & TheISA::PageMask) && 382 (dest >> 40) != 0xfffffc) { 383 no_warn = false; 384 warn("Copied block destination spans pages %x. ", dest); 385 } 386 387 memReq->reset(dest & ~(blk_size -1), blk_size); 388 // translate to physical address 389 Fault fault = xc->translateDataWriteReq(memReq); 390 391 assert(fault != Alignment_Fault); 392 393 if (fault == No_Fault) { 394 Addr dest_addr = memReq->paddr + offset; 395 // Need to read straight from memory since we have more than 8 bytes. 396 memReq->paddr = xc->copySrcPhysAddr; 397 xc->mem->read(memReq, data); 398 memReq->paddr = dest_addr; 399 xc->mem->write(memReq, data); 400 if (dcacheInterface) { 401 memReq->cmd = Copy; 402 memReq->completionEvent = NULL; 403 memReq->paddr = xc->copySrcPhysAddr; 404 memReq->dest = dest_addr; 405 memReq->size = 64; 406 memReq->time = curTick; 407 dcacheInterface->access(memReq); 408 } 409 } 410 return fault; 411} 412 413// precise architected memory state accessor macros 414template <class T> 415Fault 416SimpleCPU::read(Addr addr, T &data, unsigned flags) 417{ 418 memReq->reset(addr, sizeof(T), flags); 419 420 // translate to physical address 421 Fault fault = xc->translateDataReadReq(memReq); 422 423 // do functional access 424 if (fault == No_Fault) 425 fault = xc->read(memReq, data); 426 427 if (traceData) { 428 traceData->setAddr(addr); 429 if (fault == No_Fault) 430 traceData->setData(data); 431 } 432 433 // if we have a cache, do cache access too 434 if (fault == No_Fault && dcacheInterface) { 435 memReq->cmd = Read; 436 memReq->completionEvent = NULL; 437 memReq->time = curTick; 438 MemAccessResult result = dcacheInterface->access(memReq); 439 440 // Ugly hack to get an event scheduled *only* if the access is 441 // a miss. We really should add first-class support for this 442 // at some point. 443 if (result != MA_HIT && dcacheInterface->doEvents()) { 444 memReq->completionEvent = &cacheCompletionEvent; 445 lastDcacheStall = curTick; 446 unscheduleTickEvent(); 447 _status = DcacheMissStall; 448 } 449 } 450 451 if (!dcacheInterface && (memReq->flags & UNCACHEABLE)) 452 recordEvent("Uncached Read"); 453 454 return fault; 455} 456 457#ifndef DOXYGEN_SHOULD_SKIP_THIS 458 459template 460Fault 461SimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 462 463template 464Fault 465SimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 466 467template 468Fault 469SimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 470 471template 472Fault 473SimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 474 475#endif //DOXYGEN_SHOULD_SKIP_THIS 476 477template<> 478Fault 479SimpleCPU::read(Addr addr, double &data, unsigned flags) 480{ 481 return read(addr, *(uint64_t*)&data, flags); 482} 483 484template<> 485Fault 486SimpleCPU::read(Addr addr, float &data, unsigned flags) 487{ 488 return read(addr, *(uint32_t*)&data, flags); 489} 490 491 492template<> 493Fault 494SimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 495{ 496 return read(addr, (uint32_t&)data, flags); 497} 498 499 500template <class T> 501Fault 502SimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 503{ 504 if (traceData) { 505 traceData->setAddr(addr); 506 traceData->setData(data); 507 } 508 509 memReq->reset(addr, sizeof(T), flags); 510 511 // translate to physical address 512 Fault fault = xc->translateDataWriteReq(memReq); 513 514 // do functional access 515 if (fault == No_Fault) 516 fault = xc->write(memReq, data); 517 518 if (fault == No_Fault && dcacheInterface) { 519 memReq->cmd = Write; 520 memcpy(memReq->data,(uint8_t *)&data,memReq->size); 521 memReq->completionEvent = NULL; 522 memReq->time = curTick; 523 MemAccessResult result = dcacheInterface->access(memReq); 524 525 // Ugly hack to get an event scheduled *only* if the access is 526 // a miss. We really should add first-class support for this 527 // at some point. 528 if (result != MA_HIT && dcacheInterface->doEvents()) { 529 memReq->completionEvent = &cacheCompletionEvent; 530 lastDcacheStall = curTick; 531 unscheduleTickEvent(); 532 _status = DcacheMissStall; 533 } 534 } 535 536 if (res && (fault == No_Fault)) 537 *res = memReq->result; 538 539 if (!dcacheInterface && (memReq->flags & UNCACHEABLE)) 540 recordEvent("Uncached Write"); 541 542 return fault; 543} 544 545 546#ifndef DOXYGEN_SHOULD_SKIP_THIS 547template 548Fault 549SimpleCPU::write(uint64_t data, Addr addr, unsigned flags, uint64_t *res); 550 551template 552Fault 553SimpleCPU::write(uint32_t data, Addr addr, unsigned flags, uint64_t *res); 554 555template 556Fault 557SimpleCPU::write(uint16_t data, Addr addr, unsigned flags, uint64_t *res); 558 559template 560Fault 561SimpleCPU::write(uint8_t data, Addr addr, unsigned flags, uint64_t *res); 562 563#endif //DOXYGEN_SHOULD_SKIP_THIS 564 565template<> 566Fault 567SimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 568{ 569 return write(*(uint64_t*)&data, addr, flags, res); 570} 571 572template<> 573Fault 574SimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 575{ 576 return write(*(uint32_t*)&data, addr, flags, res); 577} 578 579 580template<> 581Fault 582SimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 583{ 584 return write((uint32_t)data, addr, flags, res); 585} 586 587 588#ifdef FULL_SYSTEM 589Addr 590SimpleCPU::dbg_vtophys(Addr addr) 591{ 592 return vtophys(xc, addr); 593} 594#endif // FULL_SYSTEM 595 596Tick save_cycle = 0; 597 598 599void 600SimpleCPU::processCacheCompletion() 601{ 602 switch (status()) { 603 case IcacheMissStall: 604 icacheStallCycles += curTick - lastIcacheStall; 605 _status = IcacheMissComplete; 606 scheduleTickEvent(1); 607 break; 608 case DcacheMissStall: 609 dcacheStallCycles += curTick - lastDcacheStall; 610 _status = Running; 611 scheduleTickEvent(1); 612 break; 613 case SwitchedOut: 614 // If this CPU has been switched out due to sampling/warm-up, 615 // ignore any further status changes (e.g., due to cache 616 // misses outstanding at the time of the switch). 617 return; 618 default: 619 panic("SimpleCPU::processCacheCompletion: bad state"); 620 break; 621 } 622} 623 624#ifdef FULL_SYSTEM 625void 626SimpleCPU::post_interrupt(int int_num, int index) 627{ 628 BaseCPU::post_interrupt(int_num, index); 629 630 if (xc->status() == ExecContext::Suspended) { 631 DPRINTF(IPI,"Suspended Processor awoke\n"); 632 xc->activate(); 633 } 634} 635#endif // FULL_SYSTEM 636 637/* start simulation, program loaded, processor precise state initialized */ 638void 639SimpleCPU::tick() 640{ 641 numCycles++; 642 643 traceData = NULL; 644 645 Fault fault = No_Fault; 646 647#ifdef FULL_SYSTEM 648 if (checkInterrupts && check_interrupts() && !xc->inPalMode() && 649 status() != IcacheMissComplete) { 650 int ipl = 0; 651 int summary = 0; 652 checkInterrupts = false; 653 IntReg *ipr = xc->regs.ipr; 654 655 if (xc->regs.ipr[TheISA::IPR_SIRR]) { 656 for (int i = TheISA::INTLEVEL_SOFTWARE_MIN; 657 i < TheISA::INTLEVEL_SOFTWARE_MAX; i++) { 658 if (ipr[TheISA::IPR_SIRR] & (ULL(1) << i)) { 659 // See table 4-19 of 21164 hardware reference 660 ipl = (i - TheISA::INTLEVEL_SOFTWARE_MIN) + 1; 661 summary |= (ULL(1) << i); 662 } 663 } 664 } 665 666 uint64_t interrupts = xc->cpu->intr_status(); 667 for (int i = TheISA::INTLEVEL_EXTERNAL_MIN; 668 i < TheISA::INTLEVEL_EXTERNAL_MAX; i++) { 669 if (interrupts & (ULL(1) << i)) { 670 // See table 4-19 of 21164 hardware reference 671 ipl = i; 672 summary |= (ULL(1) << i); 673 } 674 } 675 676 if (ipr[TheISA::IPR_ASTRR]) 677 panic("asynchronous traps not implemented\n"); 678 679 if (ipl && ipl > xc->regs.ipr[TheISA::IPR_IPLR]) { 680 ipr[TheISA::IPR_ISR] = summary; 681 ipr[TheISA::IPR_INTID] = ipl; 682 xc->ev5_trap(Interrupt_Fault); 683 684 DPRINTF(Flow, "Interrupt! IPLR=%d ipl=%d summary=%x\n", 685 ipr[TheISA::IPR_IPLR], ipl, summary); 686 } 687 } 688#endif 689 690 // maintain $r0 semantics 691 xc->regs.intRegFile[ZeroReg] = 0; 692#ifdef TARGET_ALPHA 693 xc->regs.floatRegFile.d[ZeroReg] = 0.0; 694#endif // TARGET_ALPHA 695 696 if (status() == IcacheMissComplete) { 697 // We've already fetched an instruction and were stalled on an 698 // I-cache miss. No need to fetch it again. 699 700 // Set status to running; tick event will get rescheduled if 701 // necessary at end of tick() function. 702 _status = Running; 703 } 704 else { 705 // Try to fetch an instruction 706 707 // set up memory request for instruction fetch 708#ifdef FULL_SYSTEM 709#define IFETCH_FLAGS(pc) ((pc) & 1) ? PHYSICAL : 0 710#else 711#define IFETCH_FLAGS(pc) 0 712#endif 713 714 memReq->cmd = Read; 715 memReq->reset(xc->regs.pc & ~3, sizeof(uint32_t), 716 IFETCH_FLAGS(xc->regs.pc)); 717 718 fault = xc->translateInstReq(memReq); 719 720 if (fault == No_Fault) 721 fault = xc->mem->read(memReq, inst); 722 723 if (icacheInterface && fault == No_Fault) { 724 memReq->completionEvent = NULL; 725 726 memReq->time = curTick; 727 MemAccessResult result = icacheInterface->access(memReq); 728 729 // Ugly hack to get an event scheduled *only* if the access is 730 // a miss. We really should add first-class support for this 731 // at some point. 732 if (result != MA_HIT && icacheInterface->doEvents()) { 733 memReq->completionEvent = &cacheCompletionEvent; 734 lastIcacheStall = curTick; 735 unscheduleTickEvent(); 736 _status = IcacheMissStall; 737 return; 738 } 739 } 740 } 741 742 // If we've got a valid instruction (i.e., no fault on instruction 743 // fetch), then execute it. 744 if (fault == No_Fault) { 745 746 // keep an instruction count 747 numInst++; 748 numInsts++; 749 750 // check for instruction-count-based events 751 comInstEventQueue[0]->serviceEvents(numInst); 752 753 // decode the instruction 754 inst = htoa(inst); 755 StaticInstPtr<TheISA> si(inst); 756 757 traceData = Trace::getInstRecord(curTick, xc, this, si, 758 xc->regs.pc); 759 760#ifdef FULL_SYSTEM 761 xc->setInst(inst); 762#endif // FULL_SYSTEM 763 764 xc->func_exe_inst++; 765 766 fault = si->execute(this, traceData); 767 768#ifdef FULL_SYSTEM 769 if (xc->fnbin) 770 xc->execute(si.get()); 771#endif 772 773 if (si->isMemRef()) { 774 numMemRefs++; 775 } 776 777 if (si->isLoad()) { 778 ++numLoad; 779 comLoadEventQueue[0]->serviceEvents(numLoad); 780 } 781 782 if (traceData) 783 traceData->finalize(); 784 785 traceFunctions(xc->regs.pc); 786 787 } // if (fault == No_Fault) 788 789 if (fault != No_Fault) { 790#ifdef FULL_SYSTEM 791 xc->ev5_trap(fault); 792#else // !FULL_SYSTEM 793 fatal("fault (%d) detected @ PC 0x%08p", fault, xc->regs.pc); 794#endif // FULL_SYSTEM 795 } 796 else { 797 // go to the next instruction 798 xc->regs.pc = xc->regs.npc; 799 xc->regs.npc += sizeof(MachInst); 800 } 801 802#ifdef FULL_SYSTEM 803 Addr oldpc; 804 do { 805 oldpc = xc->regs.pc; 806 system->pcEventQueue.service(xc); 807 } while (oldpc != xc->regs.pc); 808#endif 809 810 assert(status() == Running || 811 status() == Idle || 812 status() == DcacheMissStall); 813 814 if (status() == Running && !tickEvent.scheduled()) 815 tickEvent.schedule(curTick + 1); 816} 817 818 819//////////////////////////////////////////////////////////////////////// 820// 821// SimpleCPU Simulation Object 822// 823BEGIN_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU) 824 825 Param<Counter> max_insts_any_thread; 826 Param<Counter> max_insts_all_threads; 827 Param<Counter> max_loads_any_thread; 828 Param<Counter> max_loads_all_threads; 829 830#ifdef FULL_SYSTEM 831 SimObjectParam<AlphaITB *> itb; 832 SimObjectParam<AlphaDTB *> dtb; 833 SimObjectParam<FunctionalMemory *> mem; 834 SimObjectParam<System *> system; 835 Param<int> mult; 836#else 837 SimObjectParam<Process *> workload; 838#endif // FULL_SYSTEM 839 840 SimObjectParam<BaseMem *> icache; 841 SimObjectParam<BaseMem *> dcache; 842 843 Param<bool> defer_registration; 844 Param<int> multiplier; 845 Param<bool> function_trace; 846 Param<Tick> function_trace_start; 847 848END_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU) 849 850BEGIN_INIT_SIM_OBJECT_PARAMS(SimpleCPU) 851 852 INIT_PARAM_DFLT(max_insts_any_thread, 853 "terminate when any thread reaches this inst count", 854 0), 855 INIT_PARAM_DFLT(max_insts_all_threads, 856 "terminate when all threads have reached this inst count", 857 0), 858 INIT_PARAM_DFLT(max_loads_any_thread, 859 "terminate when any thread reaches this load count", 860 0), 861 INIT_PARAM_DFLT(max_loads_all_threads, 862 "terminate when all threads have reached this load count", 863 0), 864 865#ifdef FULL_SYSTEM 866 INIT_PARAM(itb, "Instruction TLB"), 867 INIT_PARAM(dtb, "Data TLB"), 868 INIT_PARAM(mem, "memory"), 869 INIT_PARAM(system, "system object"), 870 INIT_PARAM_DFLT(mult, "system clock multiplier", 1), 871#else 872 INIT_PARAM(workload, "processes to run"), 873#endif // FULL_SYSTEM 874 875 INIT_PARAM_DFLT(icache, "L1 instruction cache object", NULL), 876 INIT_PARAM_DFLT(dcache, "L1 data cache object", NULL), 877 INIT_PARAM_DFLT(defer_registration, "defer registration with system " 878 "(for sampling)", false), 879 880 INIT_PARAM_DFLT(multiplier, "clock multiplier", 1), 881 INIT_PARAM_DFLT(function_trace, "Enable function trace", false), 882 INIT_PARAM_DFLT(function_trace_start, "Cycle to start function trace", 0) 883 884END_INIT_SIM_OBJECT_PARAMS(SimpleCPU) 885 886 887CREATE_SIM_OBJECT(SimpleCPU) 888{ 889 SimpleCPU *cpu; 890#ifdef FULL_SYSTEM 891 if (mult != 1) 892 panic("processor clock multiplier must be 1\n"); 893 894 cpu = new SimpleCPU(getInstanceName(), system, 895 max_insts_any_thread, max_insts_all_threads, 896 max_loads_any_thread, max_loads_all_threads, 897 itb, dtb, mem, 898 (icache) ? icache->getInterface() : NULL, 899 (dcache) ? dcache->getInterface() : NULL, 900 defer_registration, 901 ticksPerSecond * mult, 902 function_trace, function_trace_start); 903#else 904 905 cpu = new SimpleCPU(getInstanceName(), workload, 906 max_insts_any_thread, max_insts_all_threads, 907 max_loads_any_thread, max_loads_all_threads, 908 (icache) ? icache->getInterface() : NULL, 909 (dcache) ? dcache->getInterface() : NULL, 910 defer_registration, 911 function_trace, function_trace_start); 912 913#endif // FULL_SYSTEM 914 915 cpu->setTickMultiplier(multiplier); 916 917 return cpu; 918} 919 920REGISTER_SIM_OBJECT("SimpleCPU", SimpleCPU) 921 922