base.cc revision 2361
1/* 2 * Copyright (c) 2002-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include <cmath> 30#include <cstdio> 31#include <cstdlib> 32#include <iostream> 33#include <iomanip> 34#include <list> 35#include <sstream> 36#include <string> 37 38#include "base/cprintf.hh" 39#include "base/inifile.hh" 40#include "base/loader/symtab.hh" 41#include "base/misc.hh" 42#include "base/pollevent.hh" 43#include "base/range.hh" 44#include "base/stats/events.hh" 45#include "base/trace.hh" 46#include "cpu/base.hh" 47#include "cpu/cpu_exec_context.hh" 48#include "cpu/exec_context.hh" 49#include "cpu/exetrace.hh" 50#include "cpu/profile.hh" 51#include "cpu/sampler/sampler.hh" 52#include "cpu/simple/cpu.hh" 53#include "cpu/smt.hh" 54#include "cpu/static_inst.hh" 55#include "kern/kernel_stats.hh" 56#include "mem/base_mem.hh" 57#include "mem/mem_interface.hh" 58#include "sim/byteswap.hh" 59#include "sim/builder.hh" 60#include "sim/debug.hh" 61#include "sim/host.hh" 62#include "sim/sim_events.hh" 63#include "sim/sim_object.hh" 64#include "sim/stats.hh" 65 66#if FULL_SYSTEM 67#include "base/remote_gdb.hh" 68#include "mem/functional/memory_control.hh" 69#include "mem/functional/physical.hh" 70#include "sim/system.hh" 71#include "arch/tlb.hh" 72#include "arch/stacktrace.hh" 73#include "arch/vtophys.hh" 74#else // !FULL_SYSTEM 75#include "mem/functional/functional.hh" 76#endif // FULL_SYSTEM 77 78using namespace std; 79//The SimpleCPU does alpha only 80using namespace AlphaISA; 81 82 83SimpleCPU::TickEvent::TickEvent(SimpleCPU *c, int w) 84 : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c), width(w) 85{ 86} 87 88 89void 90SimpleCPU::init() 91{ 92 BaseCPU::init(); 93#if FULL_SYSTEM 94 for (int i = 0; i < execContexts.size(); ++i) { 95 ExecContext *xc = execContexts[i]; 96 97 // initialize CPU, including PC 98 TheISA::initCPU(xc, xc->readCpuId()); 99 } 100#endif 101} 102 103void 104SimpleCPU::TickEvent::process() 105{ 106 int count = width; 107 do { 108 cpu->tick(); 109 } while (--count > 0 && cpu->status() == Running); 110} 111 112const char * 113SimpleCPU::TickEvent::description() 114{ 115 return "SimpleCPU tick event"; 116} 117 118 119SimpleCPU::CacheCompletionEvent::CacheCompletionEvent(SimpleCPU *_cpu) 120 : Event(&mainEventQueue), cpu(_cpu) 121{ 122} 123 124void SimpleCPU::CacheCompletionEvent::process() 125{ 126 cpu->processCacheCompletion(); 127} 128 129const char * 130SimpleCPU::CacheCompletionEvent::description() 131{ 132 return "SimpleCPU cache completion event"; 133} 134 135SimpleCPU::SimpleCPU(Params *p) 136 : BaseCPU(p), tickEvent(this, p->width), cpuXC(NULL), 137 cacheCompletionEvent(this) 138{ 139 _status = Idle; 140#if FULL_SYSTEM 141 cpuXC = new CPUExecContext(this, 0, p->system, p->itb, p->dtb, p->mem); 142 143#else 144 cpuXC = new CPUExecContext(this, /* thread_num */ 0, p->process, 145 /* asid */ 0); 146#endif // !FULL_SYSTEM 147 cpuXC->setStatus(ExecContext::Suspended); 148 xcProxy = cpuXC->getProxy(); 149 150 icacheInterface = p->icache_interface; 151 dcacheInterface = p->dcache_interface; 152 153 memReq = new MemReq(); 154 memReq->xc = xcProxy; 155 memReq->asid = 0; 156 memReq->data = new uint8_t[64]; 157 158 numInst = 0; 159 startNumInst = 0; 160 numLoad = 0; 161 startNumLoad = 0; 162 lastIcacheStall = 0; 163 lastDcacheStall = 0; 164 165 execContexts.push_back(xcProxy); 166} 167 168SimpleCPU::~SimpleCPU() 169{ 170} 171 172void 173SimpleCPU::switchOut(Sampler *s) 174{ 175 sampler = s; 176 if (status() == DcacheMissStall) { 177 DPRINTF(Sampler,"Outstanding dcache access, waiting for completion\n"); 178 _status = DcacheMissSwitch; 179 } 180 else { 181 _status = SwitchedOut; 182 183 if (tickEvent.scheduled()) 184 tickEvent.deschedule(); 185 186 assert(!tickEvent.scheduled()); 187 188 sampler->signalSwitched(); 189 } 190} 191 192 193void 194SimpleCPU::takeOverFrom(BaseCPU *oldCPU) 195{ 196 BaseCPU::takeOverFrom(oldCPU); 197 198 assert(!tickEvent.scheduled()); 199 200 // if any of this CPU's ExecContexts are active, mark the CPU as 201 // running and schedule its tick event. 202 for (int i = 0; i < execContexts.size(); ++i) { 203 ExecContext *xc = execContexts[i]; 204 if (xc->status() == ExecContext::Active && _status != Running) { 205 _status = Running; 206 tickEvent.schedule(curTick); 207 } 208 } 209} 210 211 212void 213SimpleCPU::activateContext(int thread_num, int delay) 214{ 215 assert(thread_num == 0); 216 assert(cpuXC); 217 218 assert(_status == Idle || _status == SwitchedOut); 219 notIdleFraction++; 220 scheduleTickEvent(delay); 221 _status = Running; 222} 223 224 225void 226SimpleCPU::suspendContext(int thread_num) 227{ 228 assert(thread_num == 0); 229 assert(cpuXC); 230 231 assert(_status == Running || _status == SwitchedOut); 232 notIdleFraction--; 233 unscheduleTickEvent(); 234 _status = Idle; 235} 236 237 238void 239SimpleCPU::deallocateContext(int thread_num) 240{ 241 // for now, these are equivalent 242 suspendContext(thread_num); 243} 244 245 246void 247SimpleCPU::haltContext(int thread_num) 248{ 249 // for now, these are equivalent 250 suspendContext(thread_num); 251} 252 253 254void 255SimpleCPU::regStats() 256{ 257 using namespace Stats; 258 259 BaseCPU::regStats(); 260 261 numInsts 262 .name(name() + ".num_insts") 263 .desc("Number of instructions executed") 264 ; 265 266 numMemRefs 267 .name(name() + ".num_refs") 268 .desc("Number of memory references") 269 ; 270 271 notIdleFraction 272 .name(name() + ".not_idle_fraction") 273 .desc("Percentage of non-idle cycles") 274 ; 275 276 idleFraction 277 .name(name() + ".idle_fraction") 278 .desc("Percentage of idle cycles") 279 ; 280 281 icacheStallCycles 282 .name(name() + ".icache_stall_cycles") 283 .desc("ICache total stall cycles") 284 .prereq(icacheStallCycles) 285 ; 286 287 dcacheStallCycles 288 .name(name() + ".dcache_stall_cycles") 289 .desc("DCache total stall cycles") 290 .prereq(dcacheStallCycles) 291 ; 292 293 idleFraction = constant(1.0) - notIdleFraction; 294} 295 296void 297SimpleCPU::resetStats() 298{ 299// startNumInst = numInst; 300 notIdleFraction = (_status != Idle); 301} 302 303void 304SimpleCPU::serialize(ostream &os) 305{ 306 BaseCPU::serialize(os); 307 SERIALIZE_ENUM(_status); 308 SERIALIZE_SCALAR(inst); 309 nameOut(os, csprintf("%s.xc.0", name())); 310 cpuXC->serialize(os); 311 nameOut(os, csprintf("%s.tickEvent", name())); 312 tickEvent.serialize(os); 313 nameOut(os, csprintf("%s.cacheCompletionEvent", name())); 314 cacheCompletionEvent.serialize(os); 315} 316 317void 318SimpleCPU::unserialize(Checkpoint *cp, const string §ion) 319{ 320 BaseCPU::unserialize(cp, section); 321 UNSERIALIZE_ENUM(_status); 322 UNSERIALIZE_SCALAR(inst); 323 cpuXC->unserialize(cp, csprintf("%s.xc.0", section)); 324 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 325 cacheCompletionEvent 326 .unserialize(cp, csprintf("%s.cacheCompletionEvent", section)); 327} 328 329void 330change_thread_state(int thread_number, int activate, int priority) 331{ 332} 333 334Fault 335SimpleCPU::copySrcTranslate(Addr src) 336{ 337 static bool no_warn = true; 338 int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64; 339 // Only support block sizes of 64 atm. 340 assert(blk_size == 64); 341 int offset = src & (blk_size - 1); 342 343 // Make sure block doesn't span page 344 if (no_warn && 345 (src & PageMask) != ((src + blk_size) & PageMask) && 346 (src >> 40) != 0xfffffc) { 347 warn("Copied block source spans pages %x.", src); 348 no_warn = false; 349 } 350 351 memReq->reset(src & ~(blk_size - 1), blk_size); 352 353 // translate to physical address 354 Fault fault = cpuXC->translateDataReadReq(memReq); 355 356 if (fault == NoFault) { 357 panic("We can't copy!"); 358 cpuXC->copySrcAddr = src; 359 cpuXC->copySrcPhysAddr = memReq->paddr + offset; 360 } else { 361 assert(!fault->isAlignmentFault()); 362 363 cpuXC->copySrcAddr = 0; 364 cpuXC->copySrcPhysAddr = 0; 365 } 366 return fault; 367} 368 369Fault 370SimpleCPU::copy(Addr dest) 371{ 372 static bool no_warn = true; 373 int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64; 374 // Only support block sizes of 64 atm. 375 assert(blk_size == 64); 376 uint8_t data[blk_size]; 377 //assert(cpuXC->copySrcAddr); 378 int offset = dest & (blk_size - 1); 379 380 // Make sure block doesn't span page 381 if (no_warn && 382 (dest & PageMask) != ((dest + blk_size) & PageMask) && 383 (dest >> 40) != 0xfffffc) { 384 no_warn = false; 385 warn("Copied block destination spans pages %x. ", dest); 386 } 387 388 memReq->reset(dest & ~(blk_size -1), blk_size); 389 // translate to physical address 390 Fault fault = cpuXC->translateDataWriteReq(memReq); 391 392 if (fault == NoFault) { 393 Addr dest_addr = memReq->paddr + offset; 394 // Need to read straight from memory since we have more than 8 bytes. 395 memReq->paddr = cpuXC->copySrcPhysAddr; 396 cpuXC->mem->read(memReq, data); 397 memReq->paddr = dest_addr; 398 cpuXC->mem->write(memReq, data); 399 if (dcacheInterface) { 400 memReq->cmd = Copy; 401 memReq->completionEvent = NULL; 402 memReq->paddr = cpuXC->copySrcPhysAddr; 403 memReq->dest = dest_addr; 404 memReq->size = 64; 405 memReq->time = curTick; 406 memReq->flags &= ~INST_READ; 407 dcacheInterface->access(memReq); 408 } 409 } 410 else 411 assert(!fault->isAlignmentFault()); 412 413 return fault; 414} 415 416// precise architected memory state accessor macros 417template <class T> 418Fault 419SimpleCPU::read(Addr addr, T &data, unsigned flags) 420{ 421 if (status() == DcacheMissStall || status() == DcacheMissSwitch) { 422 Fault fault = cpuXC->read(memReq,data); 423 424 if (traceData) { 425 traceData->setAddr(memReq->vaddr); 426 } 427 return fault; 428 } 429 430 memReq->reset(addr, sizeof(T), flags); 431 432 // translate to physical address 433 Fault fault = cpuXC->translateDataReadReq(memReq); 434 435 // if we have a cache, do cache access too 436 if (fault == NoFault && dcacheInterface) { 437 memReq->cmd = Read; 438 memReq->completionEvent = NULL; 439 memReq->time = curTick; 440 memReq->flags &= ~INST_READ; 441 MemAccessResult result = dcacheInterface->access(memReq); 442 443 // Ugly hack to get an event scheduled *only* if the access is 444 // a miss. We really should add first-class support for this 445 // at some point. 446 if (result != MA_HIT && dcacheInterface->doEvents()) { 447 memReq->completionEvent = &cacheCompletionEvent; 448 lastDcacheStall = curTick; 449 unscheduleTickEvent(); 450 _status = DcacheMissStall; 451 } else { 452 // do functional access 453 fault = cpuXC->read(memReq, data); 454 455 } 456 } else if(fault == NoFault) { 457 // do functional access 458 fault = cpuXC->read(memReq, data); 459 460 } 461 462 if (!dcacheInterface && (memReq->flags & UNCACHEABLE)) 463 recordEvent("Uncached Read"); 464 465 return fault; 466} 467 468#ifndef DOXYGEN_SHOULD_SKIP_THIS 469 470template 471Fault 472SimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 473 474template 475Fault 476SimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 477 478template 479Fault 480SimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 481 482template 483Fault 484SimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 485 486#endif //DOXYGEN_SHOULD_SKIP_THIS 487 488template<> 489Fault 490SimpleCPU::read(Addr addr, double &data, unsigned flags) 491{ 492 return read(addr, *(uint64_t*)&data, flags); 493} 494 495template<> 496Fault 497SimpleCPU::read(Addr addr, float &data, unsigned flags) 498{ 499 return read(addr, *(uint32_t*)&data, flags); 500} 501 502 503template<> 504Fault 505SimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 506{ 507 return read(addr, (uint32_t&)data, flags); 508} 509 510 511template <class T> 512Fault 513SimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 514{ 515 memReq->reset(addr, sizeof(T), flags); 516 517 // translate to physical address 518 Fault fault = cpuXC->translateDataWriteReq(memReq); 519 520 // do functional access 521 if (fault == NoFault) 522 fault = cpuXC->write(memReq, data); 523 524 if (fault == NoFault && dcacheInterface) { 525 memReq->cmd = Write; 526 memcpy(memReq->data,(uint8_t *)&data,memReq->size); 527 memReq->completionEvent = NULL; 528 memReq->time = curTick; 529 memReq->flags &= ~INST_READ; 530 MemAccessResult result = dcacheInterface->access(memReq); 531 532 // Ugly hack to get an event scheduled *only* if the access is 533 // a miss. We really should add first-class support for this 534 // at some point. 535 if (result != MA_HIT && dcacheInterface->doEvents()) { 536 memReq->completionEvent = &cacheCompletionEvent; 537 lastDcacheStall = curTick; 538 unscheduleTickEvent(); 539 _status = DcacheMissStall; 540 } 541 } 542 543 if (res && (fault == NoFault)) 544 *res = memReq->result; 545 546 if (!dcacheInterface && (memReq->flags & UNCACHEABLE)) 547 recordEvent("Uncached Write"); 548 549 return fault; 550} 551 552 553#ifndef DOXYGEN_SHOULD_SKIP_THIS 554template 555Fault 556SimpleCPU::write(uint64_t data, Addr addr, unsigned flags, uint64_t *res); 557 558template 559Fault 560SimpleCPU::write(uint32_t data, Addr addr, unsigned flags, uint64_t *res); 561 562template 563Fault 564SimpleCPU::write(uint16_t data, Addr addr, unsigned flags, uint64_t *res); 565 566template 567Fault 568SimpleCPU::write(uint8_t data, Addr addr, unsigned flags, uint64_t *res); 569 570#endif //DOXYGEN_SHOULD_SKIP_THIS 571 572template<> 573Fault 574SimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 575{ 576 return write(*(uint64_t*)&data, addr, flags, res); 577} 578 579template<> 580Fault 581SimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 582{ 583 return write(*(uint32_t*)&data, addr, flags, res); 584} 585 586 587template<> 588Fault 589SimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 590{ 591 return write((uint32_t)data, addr, flags, res); 592} 593 594 595#if FULL_SYSTEM 596Addr 597SimpleCPU::dbg_vtophys(Addr addr) 598{ 599 return vtophys(xcProxy, addr); 600} 601#endif // FULL_SYSTEM 602 603void 604SimpleCPU::processCacheCompletion() 605{ 606 Fault fault; 607 608 switch (status()) { 609 case IcacheMissStall: 610 icacheStallCycles += curTick - lastIcacheStall; 611 _status = IcacheMissComplete; 612 scheduleTickEvent(1); 613 break; 614 case DcacheMissStall: 615 if (memReq->cmd.isRead()) { 616 curStaticInst->execute(this,traceData); 617 if (traceData) 618 traceData->finalize(); 619 } 620 dcacheStallCycles += curTick - lastDcacheStall; 621 _status = Running; 622 scheduleTickEvent(1); 623 break; 624 case DcacheMissSwitch: 625 if (memReq->cmd.isRead()) { 626 fault = curStaticInst->execute(this,traceData); 627 if (traceData) 628 traceData->finalize(); 629 } else { 630 fault = NoFault; 631 } 632 assert(fault == NoFault); 633 assert(!tickEvent.scheduled()); 634 _status = SwitchedOut; 635 sampler->signalSwitched(); 636 return; 637 case SwitchedOut: 638 // If this CPU has been switched out due to sampling/warm-up, 639 // ignore any further status changes (e.g., due to cache 640 // misses outstanding at the time of the switch). 641 return; 642 default: 643 panic("SimpleCPU::processCacheCompletion: bad state"); 644 break; 645 } 646} 647 648#if FULL_SYSTEM 649void 650SimpleCPU::post_interrupt(int int_num, int index) 651{ 652 BaseCPU::post_interrupt(int_num, index); 653 654 if (cpuXC->status() == ExecContext::Suspended) { 655 DPRINTF(IPI,"Suspended Processor awoke\n"); 656 cpuXC->activate(); 657 } 658} 659#endif // FULL_SYSTEM 660 661/* start simulation, program loaded, processor precise state initialized */ 662void 663SimpleCPU::tick() 664{ 665 numCycles++; 666 667 traceData = NULL; 668 669 Fault fault = NoFault; 670 671#if FULL_SYSTEM 672 if (checkInterrupts && check_interrupts() && !cpuXC->inPalMode() && 673 status() != IcacheMissComplete) { 674 int ipl = 0; 675 int summary = 0; 676 checkInterrupts = false; 677 678 if (cpuXC->readMiscReg(IPR_SIRR)) { 679 for (int i = INTLEVEL_SOFTWARE_MIN; 680 i < INTLEVEL_SOFTWARE_MAX; i++) { 681 if (cpuXC->readMiscReg(IPR_SIRR) & (ULL(1) << i)) { 682 // See table 4-19 of 21164 hardware reference 683 ipl = (i - INTLEVEL_SOFTWARE_MIN) + 1; 684 summary |= (ULL(1) << i); 685 } 686 } 687 } 688 689 uint64_t interrupts = cpuXC->cpu->intr_status(); 690 for (int i = INTLEVEL_EXTERNAL_MIN; 691 i < INTLEVEL_EXTERNAL_MAX; i++) { 692 if (interrupts & (ULL(1) << i)) { 693 // See table 4-19 of 21164 hardware reference 694 ipl = i; 695 summary |= (ULL(1) << i); 696 } 697 } 698 699 if (cpuXC->readMiscReg(IPR_ASTRR)) 700 panic("asynchronous traps not implemented\n"); 701 702 if (ipl && ipl > cpuXC->readMiscReg(IPR_IPLR)) { 703 cpuXC->setMiscReg(IPR_ISR, summary); 704 cpuXC->setMiscReg(IPR_INTID, ipl); 705 706 Fault(new InterruptFault)->invoke(xcProxy); 707 708 DPRINTF(Flow, "Interrupt! IPLR=%d ipl=%d summary=%x\n", 709 cpuXC->readMiscReg(IPR_IPLR), ipl, summary); 710 } 711 } 712#endif 713 714 // maintain $r0 semantics 715 cpuXC->setIntReg(ZeroReg, 0); 716#ifdef TARGET_ALPHA 717 cpuXC->setFloatRegDouble(ZeroReg, 0.0); 718#endif // TARGET_ALPHA 719 720 if (status() == IcacheMissComplete) { 721 // We've already fetched an instruction and were stalled on an 722 // I-cache miss. No need to fetch it again. 723 724 // Set status to running; tick event will get rescheduled if 725 // necessary at end of tick() function. 726 _status = Running; 727 } 728 else { 729 // Try to fetch an instruction 730 731 // set up memory request for instruction fetch 732#if FULL_SYSTEM 733#define IFETCH_FLAGS(pc) ((pc) & 1) ? PHYSICAL : 0 734#else 735#define IFETCH_FLAGS(pc) 0 736#endif 737 738 memReq->cmd = Read; 739 memReq->reset(cpuXC->readPC() & ~3, sizeof(uint32_t), 740 IFETCH_FLAGS(cpuXC->readPC())); 741 742 fault = cpuXC->translateInstReq(memReq); 743 744 if (fault == NoFault) 745 fault = cpuXC->mem->read(memReq, inst); 746 747 if (icacheInterface && fault == NoFault) { 748 memReq->completionEvent = NULL; 749 750 memReq->time = curTick; 751 memReq->flags |= INST_READ; 752 MemAccessResult result = icacheInterface->access(memReq); 753 754 // Ugly hack to get an event scheduled *only* if the access is 755 // a miss. We really should add first-class support for this 756 // at some point. 757 if (result != MA_HIT && icacheInterface->doEvents()) { 758 memReq->completionEvent = &cacheCompletionEvent; 759 lastIcacheStall = curTick; 760 unscheduleTickEvent(); 761 _status = IcacheMissStall; 762 return; 763 } 764 } 765 } 766 767 // If we've got a valid instruction (i.e., no fault on instruction 768 // fetch), then execute it. 769 if (fault == NoFault) { 770 771 // keep an instruction count 772 numInst++; 773 numInsts++; 774 775 // check for instruction-count-based events 776 comInstEventQueue[0]->serviceEvents(numInst); 777 778 // decode the instruction 779 inst = gtoh(inst); 780 curStaticInst = StaticInst::decode(makeExtMI(inst, cpuXC->readPC())); 781 782 traceData = Trace::getInstRecord(curTick, xcProxy, this, curStaticInst, 783 cpuXC->readPC()); 784 785#if FULL_SYSTEM 786 cpuXC->setInst(inst); 787#endif // FULL_SYSTEM 788 789 cpuXC->func_exe_inst++; 790 791 fault = curStaticInst->execute(this, traceData); 792 793#if FULL_SYSTEM 794 if (system->kernelBinning->fnbin) { 795 assert(cpuXC->getKernelStats()); 796 system->kernelBinning->execute(xcProxy, inst); 797 } 798 799 if (cpuXC->profile) { 800// bool usermode = 801// (cpuXC->readMiscReg(AlphaISA::IPR_DTB_CM) & 0x18) != 0; 802// cpuXC->profilePC = usermode ? 1 : cpuXC->readPC(); 803 cpuXC->profilePC = cpuXC->readPC(); 804 ProfileNode *node = cpuXC->profile->consume(xcProxy, inst); 805 if (node) 806 cpuXC->profileNode = node; 807 } 808#endif 809 810 if (curStaticInst->isMemRef()) { 811 numMemRefs++; 812 } 813 814 if (curStaticInst->isLoad()) { 815 ++numLoad; 816 comLoadEventQueue[0]->serviceEvents(numLoad); 817 } 818 819 // If we have a dcache miss, then we can't finialize the instruction 820 // trace yet because we want to populate it with the data later 821 if (traceData && 822 !(status() == DcacheMissStall && memReq->cmd.isRead())) { 823 traceData->finalize(); 824 } 825 826 traceFunctions(cpuXC->readPC()); 827 828 } // if (fault == NoFault) 829 830 if (fault != NoFault) { 831#if FULL_SYSTEM 832 fault->invoke(xcProxy); 833#else // !FULL_SYSTEM 834 fatal("fault (%d) detected @ PC 0x%08p", fault, cpuXC->readPC()); 835#endif // FULL_SYSTEM 836 } 837 else { 838#if THE_ISA != MIPS_ISA 839 // go to the next instruction 840 cpuXC->setPC(cpuXC->readNextPC()); 841 cpuXC->setNextPC(cpuXC->readNextPC() + sizeof(MachInst)); 842#else 843 // go to the next instruction 844 cpuXC->setPC(cpuXC->readNextPC()); 845 cpuXC->setNextPC(cpuXC->readNextNPC()); 846 cpuXC->setNextNPC(cpuXC->readNextNPC() + sizeof(MachInst)); 847#endif 848 849 } 850 851#if FULL_SYSTEM 852 Addr oldpc; 853 do { 854 oldpc = cpuXC->readPC(); 855 system->pcEventQueue.service(xcProxy); 856 } while (oldpc != cpuXC->readPC()); 857#endif 858 859 assert(status() == Running || 860 status() == Idle || 861 status() == DcacheMissStall); 862 863 if (status() == Running && !tickEvent.scheduled()) { 864 assert(_status != SwitchedOut); 865 tickEvent.schedule(curTick + cycles(1)); 866 } 867} 868 869//////////////////////////////////////////////////////////////////////// 870// 871// SimpleCPU Simulation Object 872// 873BEGIN_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU) 874 875 Param<Counter> max_insts_any_thread; 876 Param<Counter> max_insts_all_threads; 877 Param<Counter> max_loads_any_thread; 878 Param<Counter> max_loads_all_threads; 879 Param<Counter> stats_reset_inst; 880 Param<Tick> progress_interval; 881 882#if FULL_SYSTEM 883 SimObjectParam<AlphaITB *> itb; 884 SimObjectParam<AlphaDTB *> dtb; 885 SimObjectParam<FunctionalMemory *> mem; 886 SimObjectParam<System *> system; 887 Param<int> cpu_id; 888 Param<Tick> profile; 889#else 890 SimObjectParam<Process *> workload; 891#endif // FULL_SYSTEM 892 893 Param<int> clock; 894 SimObjectParam<BaseMem *> icache; 895 SimObjectParam<BaseMem *> dcache; 896 897 Param<bool> defer_registration; 898 Param<int> width; 899 Param<bool> function_trace; 900 Param<Tick> function_trace_start; 901 902END_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU) 903 904BEGIN_INIT_SIM_OBJECT_PARAMS(SimpleCPU) 905 906 INIT_PARAM(max_insts_any_thread, 907 "terminate when any thread reaches this inst count"), 908 INIT_PARAM(max_insts_all_threads, 909 "terminate when all threads have reached this inst count"), 910 INIT_PARAM(max_loads_any_thread, 911 "terminate when any thread reaches this load count"), 912 INIT_PARAM(max_loads_all_threads, 913 "terminate when all threads have reached this load count"), 914 INIT_PARAM(stats_reset_inst, 915 "instruction to reset stats on"), 916 INIT_PARAM_DFLT(progress_interval, "CPU Progress interval", 0), 917 918#if FULL_SYSTEM 919 INIT_PARAM(itb, "Instruction TLB"), 920 INIT_PARAM(dtb, "Data TLB"), 921 INIT_PARAM(mem, "memory"), 922 INIT_PARAM(system, "system object"), 923 INIT_PARAM(cpu_id, "processor ID"), 924 INIT_PARAM(profile, ""), 925#else 926 INIT_PARAM(workload, "processes to run"), 927#endif // FULL_SYSTEM 928 929 INIT_PARAM(clock, "clock speed"), 930 INIT_PARAM(icache, "L1 instruction cache object"), 931 INIT_PARAM(dcache, "L1 data cache object"), 932 INIT_PARAM(defer_registration, "defer system registration (for sampling)"), 933 INIT_PARAM(width, "cpu width"), 934 INIT_PARAM(function_trace, "Enable function trace"), 935 INIT_PARAM(function_trace_start, "Cycle to start function trace") 936 937END_INIT_SIM_OBJECT_PARAMS(SimpleCPU) 938 939 940CREATE_SIM_OBJECT(SimpleCPU) 941{ 942 SimpleCPU::Params *params = new SimpleCPU::Params(); 943 params->name = getInstanceName(); 944 params->numberOfThreads = 1; 945 params->max_insts_any_thread = max_insts_any_thread; 946 params->max_insts_all_threads = max_insts_all_threads; 947 params->max_loads_any_thread = max_loads_any_thread; 948 params->max_loads_all_threads = max_loads_all_threads; 949 params->stats_reset_inst = stats_reset_inst; 950 params->deferRegistration = defer_registration; 951 params->clock = clock; 952 params->functionTrace = function_trace; 953 params->functionTraceStart = function_trace_start; 954 params->icache_interface = (icache) ? icache->getInterface() : NULL; 955 params->dcache_interface = (dcache) ? dcache->getInterface() : NULL; 956 params->width = width; 957 958 params->progress_interval = progress_interval; 959#if FULL_SYSTEM 960 params->itb = itb; 961 params->dtb = dtb; 962 params->mem = mem; 963 params->system = system; 964 params->cpu_id = cpu_id; 965 params->profile = profile; 966#else 967 params->process = workload; 968#endif 969 970 SimpleCPU *cpu = new SimpleCPU(params); 971 return cpu; 972} 973 974REGISTER_SIM_OBJECT("SimpleCPU", SimpleCPU) 975 976