base.cc revision 2390
1/* 2 * Copyright (c) 2002-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include <cmath> 30#include <cstdio> 31#include <cstdlib> 32#include <iostream> 33#include <iomanip> 34#include <list> 35#include <sstream> 36#include <string> 37 38#include "base/cprintf.hh" 39#include "base/inifile.hh" 40#include "base/loader/symtab.hh" 41#include "base/misc.hh" 42#include "base/pollevent.hh" 43#include "base/range.hh" 44#include "base/stats/events.hh" 45#include "base/trace.hh" 46#include "cpu/base.hh" 47#include "cpu/exec_context.hh" 48#include "cpu/exetrace.hh" 49#include "cpu/profile.hh" 50#include "cpu/sampler/sampler.hh" 51#include "cpu/simple/cpu.hh" 52#include "cpu/smt.hh" 53#include "cpu/static_inst.hh" 54#include "kern/kernel_stats.hh" 55#include "mem/base_mem.hh" 56#include "mem/mem_interface.hh" 57#include "sim/builder.hh" 58#include "sim/debug.hh" 59#include "sim/host.hh" 60#include "sim/sim_events.hh" 61#include "sim/sim_object.hh" 62#include "sim/stats.hh" 63 64#if FULL_SYSTEM 65#include "base/remote_gdb.hh" 66#include "mem/functional/memory_control.hh" 67#include "mem/functional/physical.hh" 68#include "sim/system.hh" 69#include "targetarch/alpha_memory.hh" 70#include "targetarch/stacktrace.hh" 71#include "targetarch/vtophys.hh" 72#else // !FULL_SYSTEM 73#include "mem/functional/functional.hh" 74#endif // FULL_SYSTEM 75 76using namespace std; 77 78 79SimpleCPU::TickEvent::TickEvent(SimpleCPU *c, int w) 80 : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c), width(w) 81{ 82} 83 84void 85SimpleCPU::TickEvent::process() 86{ 87 int count = width; 88 do { 89 cpu->tick(); 90 } while (--count > 0 && cpu->status() == Running); 91} 92 93const char * 94SimpleCPU::TickEvent::description() 95{ 96 return "SimpleCPU tick event"; 97} 98 99 100SimpleCPU::CacheCompletionEvent::CacheCompletionEvent(SimpleCPU *_cpu) 101 : Event(&mainEventQueue), cpu(_cpu) 102{ 103} 104 105void SimpleCPU::CacheCompletionEvent::process() 106{ 107 cpu->processCacheCompletion(); 108} 109 110const char * 111SimpleCPU::CacheCompletionEvent::description() 112{ 113 return "SimpleCPU cache completion event"; 114} 115 116SimpleCPU::SimpleCPU(Params *p) 117 : BaseCPU(p), tickEvent(this, p->width), xc(NULL), 118 cacheCompletionEvent(this), dcachePort(this), icachePort(this) 119{ 120 _status = Idle; 121#if FULL_SYSTEM 122 xc = new ExecContext(this, 0, p->system, p->itb, p->dtb, p->mem); 123 124 // initialize CPU, including PC 125 TheISA::initCPU(&xc->regs); 126#else 127 xc = new ExecContext(this, /* thread_num */ 0, p->process, /* asid */ 0); 128#endif // !FULL_SYSTEM 129 130 req = new CpuRequest; 131 132 req->asid = 0; 133 134 numInst = 0; 135 startNumInst = 0; 136 numLoad = 0; 137 startNumLoad = 0; 138 lastIcacheStall = 0; 139 lastDcacheStall = 0; 140 141 execContexts.push_back(xc); 142} 143 144SimpleCPU::~SimpleCPU() 145{ 146} 147 148void 149SimpleCPU::switchOut(Sampler *s) 150{ 151 sampler = s; 152 if (status() == DcacheWaitResponse) { 153 DPRINTF(Sampler,"Outstanding dcache access, waiting for completion\n"); 154 _status = DcacheWaitSwitch; 155 } 156 else { 157 _status = SwitchedOut; 158 159 if (tickEvent.scheduled()) 160 tickEvent.squash(); 161 162 sampler->signalSwitched(); 163 } 164} 165 166 167void 168SimpleCPU::takeOverFrom(BaseCPU *oldCPU) 169{ 170 BaseCPU::takeOverFrom(oldCPU); 171 172 assert(!tickEvent.scheduled()); 173 174 // if any of this CPU's ExecContexts are active, mark the CPU as 175 // running and schedule its tick event. 176 for (int i = 0; i < execContexts.size(); ++i) { 177 ExecContext *xc = execContexts[i]; 178 if (xc->status() == ExecContext::Active && _status != Running) { 179 _status = Running; 180 tickEvent.schedule(curTick); 181 } 182 } 183} 184 185 186void 187SimpleCPU::activateContext(int thread_num, int delay) 188{ 189 assert(thread_num == 0); 190 assert(xc); 191 192 assert(_status == Idle); 193 notIdleFraction++; 194 scheduleTickEvent(delay); 195 _status = Running; 196} 197 198 199void 200SimpleCPU::suspendContext(int thread_num) 201{ 202 assert(thread_num == 0); 203 assert(xc); 204 205 assert(_status == Running); 206 notIdleFraction--; 207 unscheduleTickEvent(); 208 _status = Idle; 209} 210 211 212void 213SimpleCPU::deallocateContext(int thread_num) 214{ 215 // for now, these are equivalent 216 suspendContext(thread_num); 217} 218 219 220void 221SimpleCPU::haltContext(int thread_num) 222{ 223 // for now, these are equivalent 224 suspendContext(thread_num); 225} 226 227 228void 229SimpleCPU::regStats() 230{ 231 using namespace Stats; 232 233 BaseCPU::regStats(); 234 235 numInsts 236 .name(name() + ".num_insts") 237 .desc("Number of instructions executed") 238 ; 239 240 numMemRefs 241 .name(name() + ".num_refs") 242 .desc("Number of memory references") 243 ; 244 245 notIdleFraction 246 .name(name() + ".not_idle_fraction") 247 .desc("Percentage of non-idle cycles") 248 ; 249 250 idleFraction 251 .name(name() + ".idle_fraction") 252 .desc("Percentage of idle cycles") 253 ; 254 255 icacheStallCycles 256 .name(name() + ".icache_stall_cycles") 257 .desc("ICache total stall cycles") 258 .prereq(icacheStallCycles) 259 ; 260 261 dcacheStallCycles 262 .name(name() + ".dcache_stall_cycles") 263 .desc("DCache total stall cycles") 264 .prereq(dcacheStallCycles) 265 ; 266 267 icacheRetryCycles 268 .name(name() + ".icache_retry_cycles") 269 .desc("ICache total retry cycles") 270 .prereq(icacheRetryCycles) 271 ; 272 273 dcacheRetryCycles 274 .name(name() + ".dcache_retry_cycles") 275 .desc("DCache total retry cycles") 276 .prereq(dcacheRetryCycles) 277 ; 278 279 idleFraction = constant(1.0) - notIdleFraction; 280} 281 282void 283SimpleCPU::resetStats() 284{ 285 startNumInst = numInst; 286 notIdleFraction = (_status != Idle); 287} 288 289void 290SimpleCPU::serialize(ostream &os) 291{ 292 BaseCPU::serialize(os); 293 SERIALIZE_ENUM(_status); 294 SERIALIZE_SCALAR(inst); 295 nameOut(os, csprintf("%s.xc", name())); 296 xc->serialize(os); 297 nameOut(os, csprintf("%s.tickEvent", name())); 298 tickEvent.serialize(os); 299 nameOut(os, csprintf("%s.cacheCompletionEvent", name())); 300 cacheCompletionEvent.serialize(os); 301} 302 303void 304SimpleCPU::unserialize(Checkpoint *cp, const string §ion) 305{ 306 BaseCPU::unserialize(cp, section); 307 UNSERIALIZE_ENUM(_status); 308 UNSERIALIZE_SCALAR(inst); 309 xc->unserialize(cp, csprintf("%s.xc", section)); 310 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 311 cacheCompletionEvent 312 .unserialize(cp, csprintf("%s.cacheCompletionEvent", section)); 313} 314 315void 316change_thread_state(int thread_number, int activate, int priority) 317{ 318} 319 320Fault 321SimpleCPU::copySrcTranslate(Addr src) 322{ 323#if 0 324 static bool no_warn = true; 325 int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64; 326 // Only support block sizes of 64 atm. 327 assert(blk_size == 64); 328 int offset = src & (blk_size - 1); 329 330 // Make sure block doesn't span page 331 if (no_warn && 332 (src & TheISA::PageMask) != ((src + blk_size) & TheISA::PageMask) && 333 (src >> 40) != 0xfffffc) { 334 warn("Copied block source spans pages %x.", src); 335 no_warn = false; 336 } 337 338 memReq->reset(src & ~(blk_size - 1), blk_size); 339 340 // translate to physical address 341 Fault fault = xc->translateDataReadReq(memReq); 342 343 assert(fault != Alignment_Fault); 344 345 if (fault == No_Fault) { 346 xc->copySrcAddr = src; 347 xc->copySrcPhysAddr = memReq->paddr + offset; 348 } else { 349 xc->copySrcAddr = 0; 350 xc->copySrcPhysAddr = 0; 351 } 352 return fault; 353#else 354 return No_Fault 355#endif 356} 357 358Fault 359SimpleCPU::copy(Addr dest) 360{ 361#if 0 362 static bool no_warn = true; 363 int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64; 364 // Only support block sizes of 64 atm. 365 assert(blk_size == 64); 366 uint8_t data[blk_size]; 367 //assert(xc->copySrcAddr); 368 int offset = dest & (blk_size - 1); 369 370 // Make sure block doesn't span page 371 if (no_warn && 372 (dest & TheISA::PageMask) != ((dest + blk_size) & TheISA::PageMask) && 373 (dest >> 40) != 0xfffffc) { 374 no_warn = false; 375 warn("Copied block destination spans pages %x. ", dest); 376 } 377 378 memReq->reset(dest & ~(blk_size -1), blk_size); 379 // translate to physical address 380 Fault fault = xc->translateDataWriteReq(memReq); 381 382 assert(fault != Alignment_Fault); 383 384 if (fault == No_Fault) { 385 Addr dest_addr = memReq->paddr + offset; 386 // Need to read straight from memory since we have more than 8 bytes. 387 memReq->paddr = xc->copySrcPhysAddr; 388 xc->mem->read(memReq, data); 389 memReq->paddr = dest_addr; 390 xc->mem->write(memReq, data); 391 if (dcacheInterface) { 392 memReq->cmd = Copy; 393 memReq->completionEvent = NULL; 394 memReq->paddr = xc->copySrcPhysAddr; 395 memReq->dest = dest_addr; 396 memReq->size = 64; 397 memReq->time = curTick; 398 memReq->flags &= ~INST_READ; 399 dcacheInterface->access(memReq); 400 } 401 } 402 return fault; 403#else 404 return No_Fault; 405#endif 406} 407 408// precise architected memory state accessor macros 409template <class T> 410Fault 411SimpleCPU::read(Addr addr, T &data, unsigned flags) 412{ 413 if (status() == DcacheWaitResponse || status() == DcacheWaitSwitch) { 414// Fault fault = xc->read(memReq,data); 415 // Not sure what to check for no fault... 416 if (pkt->result == Success) { 417 memcpy(&data, pkt->data, sizeof(T)); 418 } 419 420 if (traceData) { 421 traceData->setAddr(addr); 422 } 423 424 // @todo: Figure out a way to create a Fault from the packet result. 425 return No_Fault; 426 } 427 428// memReq->reset(addr, sizeof(T), flags); 429 430 // translate to physical address 431 // NEED NEW TRANSLATION HERE 432 Fault fault = xc->translateDataReadReq(memReq); 433 434 // Now do the access. 435 if (fault == No_Fault) { 436 pkt = new Packet; 437 pkt->cmd = Read; 438 req->paddr = addr; 439 pkt->size = sizeof(T); 440 pkt->req = req; 441 442 sendDcacheRequest(); 443 } 444/* 445 memReq->cmd = Read; 446 memReq->completionEvent = NULL; 447 memReq->time = curTick; 448 memReq->flags &= ~INST_READ; 449 MemAccessResult result = dcacheInterface->access(memReq); 450 451 // Ugly hack to get an event scheduled *only* if the access is 452 // a miss. We really should add first-class support for this 453 // at some point. 454 if (result != MA_HIT && dcacheInterface->doEvents()) { 455 memReq->completionEvent = &cacheCompletionEvent; 456 lastDcacheStall = curTick; 457 unscheduleTickEvent(); 458 _status = DcacheMissStall; 459 } else { 460 // do functional access 461 fault = xc->read(memReq, data); 462 463 } 464 465 } else if(fault == No_Fault) { 466 // do functional access 467 fault = xc->read(memReq, data); 468 469 } 470*/ 471 // This will need a new way to tell if it has a dcache attached. 472 if (/*!dcacheInterface && */(memReq->flags & UNCACHEABLE)) 473 recordEvent("Uncached Read"); 474 475 return fault; 476} 477 478#ifndef DOXYGEN_SHOULD_SKIP_THIS 479 480template 481Fault 482SimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 483 484template 485Fault 486SimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 487 488template 489Fault 490SimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 491 492template 493Fault 494SimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 495 496#endif //DOXYGEN_SHOULD_SKIP_THIS 497 498template<> 499Fault 500SimpleCPU::read(Addr addr, double &data, unsigned flags) 501{ 502 return read(addr, *(uint64_t*)&data, flags); 503} 504 505template<> 506Fault 507SimpleCPU::read(Addr addr, float &data, unsigned flags) 508{ 509 return read(addr, *(uint32_t*)&data, flags); 510} 511 512 513template<> 514Fault 515SimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 516{ 517 return read(addr, (uint32_t&)data, flags); 518} 519 520 521template <class T> 522Fault 523SimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 524{ 525// memReq->reset(addr, sizeof(T), flags); 526 req->vaddr = addr; 527 req->time = curTick; 528 req->size = sizeof(T); 529 530 // translate to physical address 531 // NEED NEW TRANSLATION HERE 532 Fault fault = xc->translateDataWriteReq(memReq); 533 534 // Now do the access. 535 if (fault == No_Fault) { 536 pkt = new Packet; 537 pkt->cmd = Write; 538 pkt->size = sizeof(T); 539 pkt->req = req; 540 541 // Copy data into the packet. 542 pkt->data = new uint8_t[64]; 543 memcpy(pkt->data, &data, sizeof(T)); 544 545 sendDcacheRequest(); 546 } 547 548/* 549 // do functional access 550 if (fault == No_Fault) 551 fault = xc->write(memReq, data); 552 553 if (fault == No_Fault && dcacheInterface) { 554 memReq->cmd = Write; 555 memcpy(memReq->data,(uint8_t *)&data,memReq->size); 556 memReq->completionEvent = NULL; 557 memReq->time = curTick; 558 memReq->flags &= ~INST_READ; 559 MemAccessResult result = dcacheInterface->access(memReq); 560 561 // Ugly hack to get an event scheduled *only* if the access is 562 // a miss. We really should add first-class support for this 563 // at some point. 564 if (result != MA_HIT && dcacheInterface->doEvents()) { 565 memReq->completionEvent = &cacheCompletionEvent; 566 lastDcacheStall = curTick; 567 unscheduleTickEvent(); 568 _status = DcacheMissStall; 569 } 570 } 571*/ 572 if (res && (fault == No_Fault)) 573 *res = memReq->result; 574 575 // This will need a new way to tell if it's hooked up to a cache or not. 576 if (/*!dcacheInterface && */(memReq->flags & UNCACHEABLE)) 577 recordEvent("Uncached Write"); 578 579 // If the write needs to have a fault on the access, consider calling 580 // changeStatus() and changing it to "bad addr write" or something. 581 return fault; 582} 583 584 585#ifndef DOXYGEN_SHOULD_SKIP_THIS 586template 587Fault 588SimpleCPU::write(uint64_t data, Addr addr, unsigned flags, uint64_t *res); 589 590template 591Fault 592SimpleCPU::write(uint32_t data, Addr addr, unsigned flags, uint64_t *res); 593 594template 595Fault 596SimpleCPU::write(uint16_t data, Addr addr, unsigned flags, uint64_t *res); 597 598template 599Fault 600SimpleCPU::write(uint8_t data, Addr addr, unsigned flags, uint64_t *res); 601 602#endif //DOXYGEN_SHOULD_SKIP_THIS 603 604template<> 605Fault 606SimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 607{ 608 return write(*(uint64_t*)&data, addr, flags, res); 609} 610 611template<> 612Fault 613SimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 614{ 615 return write(*(uint32_t*)&data, addr, flags, res); 616} 617 618 619template<> 620Fault 621SimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 622{ 623 return write((uint32_t)data, addr, flags, res); 624} 625 626 627#if FULL_SYSTEM 628Addr 629SimpleCPU::dbg_vtophys(Addr addr) 630{ 631 return vtophys(xc, addr); 632} 633#endif // FULL_SYSTEM 634 635void 636SimpleCPU::sendIcacheRequest() 637{ 638#if 1 639 bool success = icachePort.sendTiming(pkt); 640 641 unscheduleTickEvent(); 642 643 lastIcacheStall = curTick; 644 645 if (!success) { 646 // Need to wait for retry 647 _status = IcacheRetry; 648 } else { 649 // Need to wait for cache to respond 650 _status = IcacheWaitResponse; 651 } 652#else 653 Tick latency = icachePort.sendAtomic(pkt); 654 655 unscheduleTickEvent(); 656 scheduleTickEvent(latency); 657 658 // Note that Icache miss cycles will be incorrect. Unless 659 // we check the status of the packet sent (is this valid?), 660 // we won't know if the latency is a hit or a miss. 661 icacheStallCycles += latency; 662 663 _status = IcacheAccessComplete; 664#endif 665} 666 667void 668SimpleCPU::sendDcacheRequest() 669{ 670 unscheduleTickEvent(); 671 672#if 1 673 bool success = dcachePort.sendTiming(pkt); 674 675 lastDcacheStall = curTick; 676 677 if (!success) { 678 _status = DcacheRetry; 679 } else { 680 _status = DcacheWaitResponse; 681 } 682#else 683 Tick latency = dcachePort.sendAtomic(pkt); 684 685 scheduleTickEvent(latency); 686 687 // Note that Dcache miss cycles will be incorrect. Unless 688 // we check the status of the packet sent (is this valid?), 689 // we won't know if the latency is a hit or a miss. 690 dcacheStallCycles += latency; 691 692 // Delete the packet right here? 693 delete pkt; 694#endif 695} 696 697void 698SimpleCPU::processResponse(Packet *response) 699{ 700 // For what things is the CPU the consumer of the packet it sent out? 701 // This may create a memory leak if that's the case and it's expected of the 702 // SimpleCPU to delete its own packet. 703 pkt = response; 704 705 switch (status()) { 706 case IcacheWaitResponse: 707 icacheStallCycles += curTick - lastIcacheStall; 708 709 _status = IcacheAccessComplete; 710 scheduleTickEvent(1); 711 712 // Copy the icache data into the instruction itself. 713 memcpy(&inst, pkt->data, sizeof(inst)); 714 715 delete pkt; 716 break; 717 case DcacheWaitResponse: 718 if (req->cmd.isRead()) { 719 curStaticInst->execute(this,traceData); 720 if (traceData) 721 traceData->finalize(); 722 } 723 724 delete pkt; 725 726 dcacheStallCycles += curTick - lastDcacheStall; 727 _status = Running; 728 scheduleTickEvent(1); 729 break; 730 case DcacheWaitSwitch: 731 if (memReq->cmd.isRead()) { 732 curStaticInst->execute(this,traceData); 733 if (traceData) 734 traceData->finalize(); 735 } 736 737 delete pkt; 738 739 _status = SwitchedOut; 740 sampler->signalSwitched(); 741 case SwitchedOut: 742 // If this CPU has been switched out due to sampling/warm-up, 743 // ignore any further status changes (e.g., due to cache 744 // misses outstanding at the time of the switch). 745 delete pkt; 746 747 return; 748 default: 749 panic("SimpleCPU::processCacheCompletion: bad state"); 750 break; 751 } 752} 753 754Packet * 755SimpleCPU::processRetry() 756{ 757 switch(status()) { 758 case IcacheRetry: 759 icacheRetryCycles += curTick - lastIcacheStall; 760 return pkt; 761 break; 762 case DcacheRetry: 763 dcacheRetryCycles += curTick - lastDcacheStall; 764 return pkt; 765 break; 766 default: 767 panic("SimpleCPU::processRetry: bad state"); 768 break; 769 } 770} 771 772#if FULL_SYSTEM 773void 774SimpleCPU::post_interrupt(int int_num, int index) 775{ 776 BaseCPU::post_interrupt(int_num, index); 777 778 if (xc->status() == ExecContext::Suspended) { 779 DPRINTF(IPI,"Suspended Processor awoke\n"); 780 xc->activate(); 781 } 782} 783#endif // FULL_SYSTEM 784 785/* start simulation, program loaded, processor precise state initialized */ 786void 787SimpleCPU::tick() 788{ 789 numCycles++; 790 791 traceData = NULL; 792 793 Fault fault = No_Fault; 794 795#if FULL_SYSTEM 796 if (checkInterrupts && check_interrupts() && !xc->inPalMode() && 797 status() != IcacheMissComplete) { 798 int ipl = 0; 799 int summary = 0; 800 checkInterrupts = false; 801 IntReg *ipr = xc->regs.ipr; 802 803 if (xc->regs.ipr[TheISA::IPR_SIRR]) { 804 for (int i = TheISA::INTLEVEL_SOFTWARE_MIN; 805 i < TheISA::INTLEVEL_SOFTWARE_MAX; i++) { 806 if (ipr[TheISA::IPR_SIRR] & (ULL(1) << i)) { 807 // See table 4-19 of 21164 hardware reference 808 ipl = (i - TheISA::INTLEVEL_SOFTWARE_MIN) + 1; 809 summary |= (ULL(1) << i); 810 } 811 } 812 } 813 814 uint64_t interrupts = xc->cpu->intr_status(); 815 for (int i = TheISA::INTLEVEL_EXTERNAL_MIN; 816 i < TheISA::INTLEVEL_EXTERNAL_MAX; i++) { 817 if (interrupts & (ULL(1) << i)) { 818 // See table 4-19 of 21164 hardware reference 819 ipl = i; 820 summary |= (ULL(1) << i); 821 } 822 } 823 824 if (ipr[TheISA::IPR_ASTRR]) 825 panic("asynchronous traps not implemented\n"); 826 827 if (ipl && ipl > xc->regs.ipr[TheISA::IPR_IPLR]) { 828 ipr[TheISA::IPR_ISR] = summary; 829 ipr[TheISA::IPR_INTID] = ipl; 830 xc->ev5_trap(Interrupt_Fault); 831 832 DPRINTF(Flow, "Interrupt! IPLR=%d ipl=%d summary=%x\n", 833 ipr[TheISA::IPR_IPLR], ipl, summary); 834 } 835 } 836#endif 837 838 // maintain $r0 semantics 839 xc->regs.intRegFile[ZeroReg] = 0; 840#ifdef TARGET_ALPHA 841 xc->regs.floatRegFile.d[ZeroReg] = 0.0; 842#endif // TARGET_ALPHA 843 844 if (status() == IcacheAccessComplete) { 845 // We've already fetched an instruction and were stalled on an 846 // I-cache miss. No need to fetch it again. 847 848 // Set status to running; tick event will get rescheduled if 849 // necessary at end of tick() function. 850 _status = Running; 851 } else { 852 // Try to fetch an instruction 853 854 // set up memory request for instruction fetch 855#if FULL_SYSTEM 856#define IFETCH_FLAGS(pc) ((pc) & 1) ? PHYSICAL : 0 857#else 858#define IFETCH_FLAGS(pc) 0 859#endif 860 861 req->vaddr = xc->regs.pc & ~3; 862 req->time = curTick; 863 req->size = sizeof(MachInst); 864 865/* memReq->reset(xc->regs.pc & ~3, sizeof(uint32_t), 866 IFETCH_FLAGS(xc->regs.pc)); 867*/ 868//NEED NEW TRANSLATION HERE 869 fault = xc->translateInstReq(memReq); 870 871 if (fault == No_Fault) { 872 pkt = new Packet; 873 pkt->cmd = Read; 874 pkt->addr = req->paddr; 875 pkt->size = sizeof(MachInst); 876 pkt->req = req; 877 878 sendIcacheRequest(); 879/* fault = xc->mem->read(memReq, inst); 880 881 if (icacheInterface && fault == No_Fault) { 882 memReq->completionEvent = NULL; 883 884 memReq->time = curTick; 885 memReq->flags |= INST_READ; 886 MemAccessResult result = icacheInterface->access(memReq); 887 888 // Ugly hack to get an event scheduled *only* if the access is 889 // a miss. We really should add first-class support for this 890 // at some point. 891 if (result != MA_HIT && icacheInterface->doEvents()) { 892 memReq->completionEvent = &cacheCompletionEvent; 893 lastIcacheStall = curTick; 894 unscheduleTickEvent(); 895 _status = IcacheMissStall; 896 return; 897 } 898 } 899*/ 900 } 901 } 902 903 // If we've got a valid instruction (i.e., no fault on instruction 904 // fetch), then execute it. 905 if (fault == No_Fault) { 906 907 // keep an instruction count 908 numInst++; 909 numInsts++; 910 911 // check for instruction-count-based events 912 comInstEventQueue[0]->serviceEvents(numInst); 913 914 // decode the instruction 915 inst = gtoh(inst); 916 curStaticInst = StaticInst<TheISA>::decode(inst); 917 918 traceData = Trace::getInstRecord(curTick, xc, this, curStaticInst, 919 xc->regs.pc); 920 921#if FULL_SYSTEM 922 xc->setInst(inst); 923#endif // FULL_SYSTEM 924 925 xc->func_exe_inst++; 926 927 fault = curStaticInst->execute(this, traceData); 928 929#if FULL_SYSTEM 930 if (xc->fnbin) { 931 assert(xc->kernelStats); 932 system->kernelBinning->execute(xc, inst); 933 } 934 935 if (xc->profile) { 936 bool usermode = (xc->regs.ipr[AlphaISA::IPR_DTB_CM] & 0x18) != 0; 937 xc->profilePC = usermode ? 1 : xc->regs.pc; 938 ProfileNode *node = xc->profile->consume(xc, inst); 939 if (node) 940 xc->profileNode = node; 941 } 942#endif 943 944 if (curStaticInst->isMemRef()) { 945 numMemRefs++; 946 } 947 948 if (curStaticInst->isLoad()) { 949 ++numLoad; 950 comLoadEventQueue[0]->serviceEvents(numLoad); 951 } 952 953 // If we have a dcache miss, then we can't finialize the instruction 954 // trace yet because we want to populate it with the data later 955 if (traceData && 956 !(status() == DcacheWaitResponse && memReq->cmd.isRead())) { 957 traceData->finalize(); 958 } 959 960 traceFunctions(xc->regs.pc); 961 962 } // if (fault == No_Fault) 963 964 if (fault != No_Fault) { 965#if FULL_SYSTEM 966 xc->ev5_trap(fault); 967#else // !FULL_SYSTEM 968 fatal("fault (%d) detected @ PC 0x%08p", fault, xc->regs.pc); 969#endif // FULL_SYSTEM 970 } 971 else { 972 // go to the next instruction 973 xc->regs.pc = xc->regs.npc; 974 xc->regs.npc += sizeof(MachInst); 975 } 976 977#if FULL_SYSTEM 978 Addr oldpc; 979 do { 980 oldpc = xc->regs.pc; 981 system->pcEventQueue.service(xc); 982 } while (oldpc != xc->regs.pc); 983#endif 984 985 assert(status() == Running || 986 status() == Idle || 987 status() == DcacheWaitResponse); 988 989 if (status() == Running && !tickEvent.scheduled()) 990 tickEvent.schedule(curTick + cycles(1)); 991} 992 993//////////////////////////////////////////////////////////////////////// 994// 995// SimpleCPU Simulation Object 996// 997BEGIN_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU) 998 999 Param<Counter> max_insts_any_thread; 1000 Param<Counter> max_insts_all_threads; 1001 Param<Counter> max_loads_any_thread; 1002 Param<Counter> max_loads_all_threads; 1003 1004#if FULL_SYSTEM 1005 SimObjectParam<AlphaITB *> itb; 1006 SimObjectParam<AlphaDTB *> dtb; 1007 SimObjectParam<FunctionalMemory *> mem; 1008 SimObjectParam<System *> system; 1009 Param<int> cpu_id; 1010 Param<Tick> profile; 1011#else 1012 SimObjectParam<Process *> workload; 1013#endif // FULL_SYSTEM 1014 1015 Param<int> clock; 1016 SimObjectParam<BaseMem *> icache; 1017 SimObjectParam<BaseMem *> dcache; 1018 1019 Param<bool> defer_registration; 1020 Param<int> width; 1021 Param<bool> function_trace; 1022 Param<Tick> function_trace_start; 1023 1024END_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU) 1025 1026BEGIN_INIT_SIM_OBJECT_PARAMS(SimpleCPU) 1027 1028 INIT_PARAM(max_insts_any_thread, 1029 "terminate when any thread reaches this inst count"), 1030 INIT_PARAM(max_insts_all_threads, 1031 "terminate when all threads have reached this inst count"), 1032 INIT_PARAM(max_loads_any_thread, 1033 "terminate when any thread reaches this load count"), 1034 INIT_PARAM(max_loads_all_threads, 1035 "terminate when all threads have reached this load count"), 1036 1037#if FULL_SYSTEM 1038 INIT_PARAM(itb, "Instruction TLB"), 1039 INIT_PARAM(dtb, "Data TLB"), 1040 INIT_PARAM(mem, "memory"), 1041 INIT_PARAM(system, "system object"), 1042 INIT_PARAM(cpu_id, "processor ID"), 1043 INIT_PARAM(profile, ""), 1044#else 1045 INIT_PARAM(workload, "processes to run"), 1046#endif // FULL_SYSTEM 1047 1048 INIT_PARAM(clock, "clock speed"), 1049 INIT_PARAM(icache, "L1 instruction cache object"), 1050 INIT_PARAM(dcache, "L1 data cache object"), 1051 INIT_PARAM(defer_registration, "defer system registration (for sampling)"), 1052 INIT_PARAM(width, "cpu width"), 1053 INIT_PARAM(function_trace, "Enable function trace"), 1054 INIT_PARAM(function_trace_start, "Cycle to start function trace") 1055 1056END_INIT_SIM_OBJECT_PARAMS(SimpleCPU) 1057 1058 1059CREATE_SIM_OBJECT(SimpleCPU) 1060{ 1061 SimpleCPU::Params *params = new SimpleCPU::Params(); 1062 params->name = getInstanceName(); 1063 params->numberOfThreads = 1; 1064 params->max_insts_any_thread = max_insts_any_thread; 1065 params->max_insts_all_threads = max_insts_all_threads; 1066 params->max_loads_any_thread = max_loads_any_thread; 1067 params->max_loads_all_threads = max_loads_all_threads; 1068 params->deferRegistration = defer_registration; 1069 params->clock = clock; 1070 params->functionTrace = function_trace; 1071 params->functionTraceStart = function_trace_start; 1072 params->icache_interface = (icache) ? icache->getInterface() : NULL; 1073 params->dcache_interface = (dcache) ? dcache->getInterface() : NULL; 1074 params->width = width; 1075 1076#if FULL_SYSTEM 1077 params->itb = itb; 1078 params->dtb = dtb; 1079 params->mem = mem; 1080 params->system = system; 1081 params->cpu_id = cpu_id; 1082 params->profile = profile; 1083#else 1084 params->process = workload; 1085#endif 1086 1087 SimpleCPU *cpu = new SimpleCPU(params); 1088 return cpu; 1089} 1090 1091REGISTER_SIM_OBJECT("SimpleCPU", SimpleCPU) 1092 1093