66 } 67#endif 68} 69 70Tick 71TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 72{ 73 panic("TimingSimpleCPU doesn't expect recvAtomic callback!"); 74 return curTick; 75} 76 77void 78TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 79{ 80 //No internal storage to update, jusst return 81 return; 82} 83 84void 85TimingSimpleCPU::CpuPort::recvStatusChange(Status status) 86{ 87 if (status == RangeChange) { 88 if (!snoopRangeSent) { 89 snoopRangeSent = true; 90 sendStatusChange(Port::RangeChange); 91 } 92 return; 93 } 94 95 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!"); 96} 97 98 99void 100TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t) 101{ 102 pkt = _pkt; 103 Event::schedule(t); 104} 105 106TimingSimpleCPU::TimingSimpleCPU(Params *p) 107 : BaseSimpleCPU(p), icachePort(this, p->clock), dcachePort(this, p->clock) 108{ 109 _status = Idle; 110 111 icachePort.snoopRangeSent = false; 112 dcachePort.snoopRangeSent = false; 113 114 ifetch_pkt = dcache_pkt = NULL; 115 drainEvent = NULL; 116 fetchEvent = NULL; 117 previousTick = 0; 118 changeState(SimObject::Running); 119} 120 121 122TimingSimpleCPU::~TimingSimpleCPU() 123{ 124} 125 126void 127TimingSimpleCPU::serialize(ostream &os) 128{ 129 SimObject::State so_state = SimObject::getState(); 130 SERIALIZE_ENUM(so_state); 131 BaseSimpleCPU::serialize(os); 132} 133 134void 135TimingSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 136{ 137 SimObject::State so_state; 138 UNSERIALIZE_ENUM(so_state); 139 BaseSimpleCPU::unserialize(cp, section); 140} 141 142unsigned int 143TimingSimpleCPU::drain(Event *drain_event) 144{ 145 // TimingSimpleCPU is ready to drain if it's not waiting for 146 // an access to complete. 147 if (status() == Idle || status() == Running || status() == SwitchedOut) { 148 changeState(SimObject::Drained); 149 return 0; 150 } else { 151 changeState(SimObject::Draining); 152 drainEvent = drain_event; 153 return 1; 154 } 155} 156 157void 158TimingSimpleCPU::resume() 159{ 160 DPRINTF(SimpleCPU, "Resume\n"); 161 if (_status != SwitchedOut && _status != Idle) { 162 assert(system->getMemoryMode() == Enums::timing); 163 164 // Delete the old event if it existed. 165 if (fetchEvent) { 166 if (fetchEvent->scheduled()) 167 fetchEvent->deschedule(); 168 169 delete fetchEvent; 170 } 171 172 fetchEvent = new FetchEvent(this, nextCycle()); 173 } 174 175 changeState(SimObject::Running); 176} 177 178void 179TimingSimpleCPU::switchOut() 180{ 181 assert(status() == Running || status() == Idle); 182 _status = SwitchedOut; 183 numCycles += tickToCycles(curTick - previousTick); 184 185 // If we've been scheduled to resume but are then told to switch out, 186 // we'll need to cancel it. 187 if (fetchEvent && fetchEvent->scheduled()) 188 fetchEvent->deschedule(); 189} 190 191 192void 193TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 194{ 195 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 196 197 // if any of this CPU's ThreadContexts are active, mark the CPU as 198 // running and schedule its tick event. 199 for (int i = 0; i < threadContexts.size(); ++i) { 200 ThreadContext *tc = threadContexts[i]; 201 if (tc->status() == ThreadContext::Active && _status != Running) { 202 _status = Running; 203 break; 204 } 205 } 206 207 if (_status != Running) { 208 _status = Idle; 209 } 210 assert(threadContexts.size() == 1); 211 cpuId = tc->readCpuId(); 212 previousTick = curTick; 213} 214 215 216void 217TimingSimpleCPU::activateContext(int thread_num, int delay) 218{ 219 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 220 221 assert(thread_num == 0); 222 assert(thread); 223 224 assert(_status == Idle); 225 226 notIdleFraction++; 227 _status = Running; 228 229 // kick things off by initiating the fetch of the next instruction 230 fetchEvent = new FetchEvent(this, nextCycle(curTick + ticks(delay))); 231} 232 233 234void 235TimingSimpleCPU::suspendContext(int thread_num) 236{ 237 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 238 239 assert(thread_num == 0); 240 assert(thread); 241 242 assert(_status == Running); 243 244 // just change status to Idle... if status != Running, 245 // completeInst() will not initiate fetch of next instruction. 246 247 notIdleFraction--; 248 _status = Idle; 249} 250 251 252template <class T> 253Fault 254TimingSimpleCPU::read(Addr addr, T &data, unsigned flags) 255{ 256 Request *req = 257 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(), 258 cpuId, /* thread ID */ 0); 259 260 if (traceData) { 261 traceData->setAddr(req->getVaddr()); 262 } 263 264 // translate to physical address 265 Fault fault = thread->translateDataReadReq(req); 266 267 // Now do the access. 268 if (fault == NoFault) { 269 PacketPtr pkt = 270 new Packet(req, 271 (req->isLocked() ? 272 MemCmd::LoadLockedReq : MemCmd::ReadReq), 273 Packet::Broadcast); 274 pkt->dataDynamic<T>(new T); 275 276 if (req->isMmapedIpr()) { 277 Tick delay; 278 delay = TheISA::handleIprRead(thread->getTC(), pkt); 279 new IprEvent(pkt, this, nextCycle(curTick + delay)); 280 _status = DcacheWaitResponse; 281 dcache_pkt = NULL; 282 } else if (!dcachePort.sendTiming(pkt)) { 283 _status = DcacheRetry; 284 dcache_pkt = pkt; 285 } else { 286 _status = DcacheWaitResponse; 287 // memory system takes ownership of packet 288 dcache_pkt = NULL; 289 } 290 291 // This will need a new way to tell if it has a dcache attached. 292 if (req->isUncacheable()) 293 recordEvent("Uncached Read"); 294 } else { 295 delete req; 296 } 297 298 return fault; 299} 300 301Fault 302TimingSimpleCPU::translateDataReadAddr(Addr vaddr, Addr &paddr, 303 int size, unsigned flags) 304{ 305 Request *req = 306 new Request(0, vaddr, size, flags, thread->readPC(), cpuId, 0); 307 308 if (traceData) { 309 traceData->setAddr(vaddr); 310 } 311 312 Fault fault = thread->translateDataWriteReq(req); 313 314 if (fault == NoFault) 315 paddr = req->getPaddr(); 316 317 delete req; 318 return fault; 319} 320 321#ifndef DOXYGEN_SHOULD_SKIP_THIS 322 323template 324Fault 325TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags); 326 327template 328Fault 329TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags); 330 331template 332Fault 333TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 334 335template 336Fault 337TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 338 339template 340Fault 341TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 342 343template 344Fault 345TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 346 347#endif //DOXYGEN_SHOULD_SKIP_THIS 348 349template<> 350Fault 351TimingSimpleCPU::read(Addr addr, double &data, unsigned flags) 352{ 353 return read(addr, *(uint64_t*)&data, flags); 354} 355 356template<> 357Fault 358TimingSimpleCPU::read(Addr addr, float &data, unsigned flags) 359{ 360 return read(addr, *(uint32_t*)&data, flags); 361} 362 363 364template<> 365Fault 366TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 367{ 368 return read(addr, (uint32_t&)data, flags); 369} 370 371 372template <class T> 373Fault 374TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 375{ 376 Request *req = 377 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(), 378 cpuId, /* thread ID */ 0); 379 380 if (traceData) { 381 traceData->setAddr(req->getVaddr()); 382 } 383 384 // translate to physical address 385 Fault fault = thread->translateDataWriteReq(req); 386 387 // Now do the access. 388 if (fault == NoFault) { 389 MemCmd cmd = MemCmd::WriteReq; // default 390 bool do_access = true; // flag to suppress cache access 391 392 if (req->isLocked()) { 393 cmd = MemCmd::StoreCondReq; 394 do_access = TheISA::handleLockedWrite(thread, req); 395 } else if (req->isSwap()) { 396 cmd = MemCmd::SwapReq; 397 if (req->isCondSwap()) { 398 assert(res); 399 req->setExtraData(*res); 400 } 401 } 402 403 // Note: need to allocate dcache_pkt even if do_access is 404 // false, as it's used unconditionally to call completeAcc(). 405 assert(dcache_pkt == NULL); 406 dcache_pkt = new Packet(req, cmd, Packet::Broadcast); 407 dcache_pkt->allocate(); 408 dcache_pkt->set(data); 409 410 if (do_access) { 411 if (req->isMmapedIpr()) { 412 Tick delay; 413 dcache_pkt->set(htog(data)); 414 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt); 415 new IprEvent(dcache_pkt, this, nextCycle(curTick + delay)); 416 _status = DcacheWaitResponse; 417 dcache_pkt = NULL; 418 } else if (!dcachePort.sendTiming(dcache_pkt)) { 419 _status = DcacheRetry; 420 } else { 421 _status = DcacheWaitResponse; 422 // memory system takes ownership of packet 423 dcache_pkt = NULL; 424 } 425 } 426 // This will need a new way to tell if it's hooked up to a cache or not. 427 if (req->isUncacheable()) 428 recordEvent("Uncached Write"); 429 } else { 430 delete req; 431 } 432 433 434 // If the write needs to have a fault on the access, consider calling 435 // changeStatus() and changing it to "bad addr write" or something. 436 return fault; 437} 438 439Fault 440TimingSimpleCPU::translateDataWriteAddr(Addr vaddr, Addr &paddr, 441 int size, unsigned flags) 442{ 443 Request *req = 444 new Request(0, vaddr, size, flags, thread->readPC(), cpuId, 0); 445 446 if (traceData) { 447 traceData->setAddr(vaddr); 448 } 449 450 Fault fault = thread->translateDataWriteReq(req); 451 452 if (fault == NoFault) 453 paddr = req->getPaddr(); 454 455 delete req; 456 return fault; 457} 458 459 460#ifndef DOXYGEN_SHOULD_SKIP_THIS 461template 462Fault 463TimingSimpleCPU::write(Twin32_t data, Addr addr, 464 unsigned flags, uint64_t *res); 465 466template 467Fault 468TimingSimpleCPU::write(Twin64_t data, Addr addr, 469 unsigned flags, uint64_t *res); 470 471template 472Fault 473TimingSimpleCPU::write(uint64_t data, Addr addr, 474 unsigned flags, uint64_t *res); 475 476template 477Fault 478TimingSimpleCPU::write(uint32_t data, Addr addr, 479 unsigned flags, uint64_t *res); 480 481template 482Fault 483TimingSimpleCPU::write(uint16_t data, Addr addr, 484 unsigned flags, uint64_t *res); 485 486template 487Fault 488TimingSimpleCPU::write(uint8_t data, Addr addr, 489 unsigned flags, uint64_t *res); 490 491#endif //DOXYGEN_SHOULD_SKIP_THIS 492 493template<> 494Fault 495TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 496{ 497 return write(*(uint64_t*)&data, addr, flags, res); 498} 499 500template<> 501Fault 502TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 503{ 504 return write(*(uint32_t*)&data, addr, flags, res); 505} 506 507 508template<> 509Fault 510TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 511{ 512 return write((uint32_t)data, addr, flags, res); 513} 514 515 516void 517TimingSimpleCPU::fetch() 518{ 519 DPRINTF(SimpleCPU, "Fetch\n"); 520 521 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 522 checkForInterrupts(); 523 524 Request *ifetch_req = new Request(); 525 ifetch_req->setThreadContext(cpuId, /* thread ID */ 0); 526 Fault fault = setupFetchRequest(ifetch_req); 527 528 ifetch_pkt = new Packet(ifetch_req, MemCmd::ReadReq, Packet::Broadcast); 529 ifetch_pkt->dataStatic(&inst); 530 531 if (fault == NoFault) { 532 if (!icachePort.sendTiming(ifetch_pkt)) { 533 // Need to wait for retry 534 _status = IcacheRetry; 535 } else { 536 // Need to wait for cache to respond 537 _status = IcacheWaitResponse; 538 // ownership of packet transferred to memory system 539 ifetch_pkt = NULL; 540 } 541 } else { 542 delete ifetch_req; 543 delete ifetch_pkt; 544 // fetch fault: advance directly to next instruction (fault handler) 545 advanceInst(fault); 546 } 547 548 numCycles += tickToCycles(curTick - previousTick); 549 previousTick = curTick; 550} 551 552 553void 554TimingSimpleCPU::advanceInst(Fault fault) 555{ 556 advancePC(fault); 557 558 if (_status == Running) { 559 // kick off fetch of next instruction... callback from icache 560 // response will cause that instruction to be executed, 561 // keeping the CPU running. 562 fetch(); 563 } 564} 565 566 567void 568TimingSimpleCPU::completeIfetch(PacketPtr pkt) 569{ 570 DPRINTF(SimpleCPU, "Complete ICache Fetch\n"); 571 572 // received a response from the icache: execute the received 573 // instruction 574 assert(!pkt->isError()); 575 assert(_status == IcacheWaitResponse); 576 577 _status = Running; 578 579 numCycles += tickToCycles(curTick - previousTick); 580 previousTick = curTick; 581 582 if (getState() == SimObject::Draining) { 583 delete pkt->req; 584 delete pkt; 585 586 completeDrain(); 587 return; 588 } 589 590 preExecute(); 591 if (curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) { 592 // load or store: just send to dcache 593 Fault fault = curStaticInst->initiateAcc(this, traceData); 594 if (_status != Running) { 595 // instruction will complete in dcache response callback 596 assert(_status == DcacheWaitResponse || _status == DcacheRetry); 597 assert(fault == NoFault); 598 } else { 599 if (fault == NoFault) { 600 // early fail on store conditional: complete now 601 assert(dcache_pkt != NULL); 602 fault = curStaticInst->completeAcc(dcache_pkt, this, 603 traceData); 604 delete dcache_pkt->req; 605 delete dcache_pkt; 606 dcache_pkt = NULL; 607 608 // keep an instruction count 609 if (fault == NoFault) 610 countInst(); 611 } else if (traceData) { 612 // If there was a fault, we shouldn't trace this instruction. 613 delete traceData; 614 traceData = NULL; 615 } 616 617 postExecute(); 618 // @todo remove me after debugging with legion done 619 if (curStaticInst && (!curStaticInst->isMicroop() || 620 curStaticInst->isFirstMicroop())) 621 instCnt++; 622 advanceInst(fault); 623 } 624 } else { 625 // non-memory instruction: execute completely now 626 Fault fault = curStaticInst->execute(this, traceData); 627 628 // keep an instruction count 629 if (fault == NoFault) 630 countInst(); 631 else if (traceData) { 632 // If there was a fault, we shouldn't trace this instruction. 633 delete traceData; 634 traceData = NULL; 635 } 636 637 postExecute(); 638 // @todo remove me after debugging with legion done 639 if (curStaticInst && (!curStaticInst->isMicroop() || 640 curStaticInst->isFirstMicroop())) 641 instCnt++; 642 advanceInst(fault); 643 } 644 645 delete pkt->req; 646 delete pkt; 647} 648 649void 650TimingSimpleCPU::IcachePort::ITickEvent::process() 651{ 652 cpu->completeIfetch(pkt); 653} 654 655bool 656TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt) 657{ 658 if (pkt->isResponse() && !pkt->wasNacked()) { 659 // delay processing of returned data until next CPU clock edge 660 Tick next_tick = cpu->nextCycle(curTick); 661 662 if (next_tick == curTick) 663 cpu->completeIfetch(pkt); 664 else 665 tickEvent.schedule(pkt, next_tick); 666 667 return true; 668 } 669 else if (pkt->wasNacked()) { 670 assert(cpu->_status == IcacheWaitResponse); 671 pkt->reinitNacked(); 672 if (!sendTiming(pkt)) { 673 cpu->_status = IcacheRetry; 674 cpu->ifetch_pkt = pkt; 675 } 676 } 677 //Snooping a Coherence Request, do nothing 678 return true; 679} 680 681void 682TimingSimpleCPU::IcachePort::recvRetry() 683{ 684 // we shouldn't get a retry unless we have a packet that we're 685 // waiting to transmit 686 assert(cpu->ifetch_pkt != NULL); 687 assert(cpu->_status == IcacheRetry); 688 PacketPtr tmp = cpu->ifetch_pkt; 689 if (sendTiming(tmp)) { 690 cpu->_status = IcacheWaitResponse; 691 cpu->ifetch_pkt = NULL; 692 } 693} 694 695void 696TimingSimpleCPU::completeDataAccess(PacketPtr pkt) 697{ 698 // received a response from the dcache: complete the load or store 699 // instruction 700 assert(!pkt->isError()); 701 assert(_status == DcacheWaitResponse); 702 _status = Running; 703 704 numCycles += tickToCycles(curTick - previousTick); 705 previousTick = curTick; 706 707 Fault fault = curStaticInst->completeAcc(pkt, this, traceData); 708 709 // keep an instruction count 710 if (fault == NoFault) 711 countInst(); 712 else if (traceData) { 713 // If there was a fault, we shouldn't trace this instruction. 714 delete traceData; 715 traceData = NULL; 716 } 717 718 if (pkt->isRead() && pkt->isLocked()) { 719 TheISA::handleLockedRead(thread, pkt->req); 720 } 721 722 delete pkt->req; 723 delete pkt; 724 725 postExecute(); 726 727 if (getState() == SimObject::Draining) { 728 advancePC(fault); 729 completeDrain(); 730 731 return; 732 } 733 734 advanceInst(fault); 735} 736 737 738void 739TimingSimpleCPU::completeDrain() 740{ 741 DPRINTF(Config, "Done draining\n"); 742 changeState(SimObject::Drained); 743 drainEvent->process(); 744} 745 746void 747TimingSimpleCPU::DcachePort::setPeer(Port *port) 748{ 749 Port::setPeer(port); 750 751#if FULL_SYSTEM 752 // Update the ThreadContext's memory ports (Functional/Virtual 753 // Ports) 754 cpu->tcBase()->connectMemPorts(); 755#endif 756} 757 758bool 759TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt) 760{ 761 if (pkt->isResponse() && !pkt->wasNacked()) { 762 // delay processing of returned data until next CPU clock edge 763 Tick next_tick = cpu->nextCycle(curTick); 764 765 if (next_tick == curTick) 766 cpu->completeDataAccess(pkt); 767 else 768 tickEvent.schedule(pkt, next_tick); 769 770 return true; 771 } 772 else if (pkt->wasNacked()) { 773 assert(cpu->_status == DcacheWaitResponse); 774 pkt->reinitNacked(); 775 if (!sendTiming(pkt)) { 776 cpu->_status = DcacheRetry; 777 cpu->dcache_pkt = pkt; 778 } 779 } 780 //Snooping a Coherence Request, do nothing 781 return true; 782} 783 784void 785TimingSimpleCPU::DcachePort::DTickEvent::process() 786{ 787 cpu->completeDataAccess(pkt); 788} 789 790void 791TimingSimpleCPU::DcachePort::recvRetry() 792{ 793 // we shouldn't get a retry unless we have a packet that we're 794 // waiting to transmit 795 assert(cpu->dcache_pkt != NULL); 796 assert(cpu->_status == DcacheRetry); 797 PacketPtr tmp = cpu->dcache_pkt; 798 if (sendTiming(tmp)) { 799 cpu->_status = DcacheWaitResponse; 800 // memory system takes ownership of packet 801 cpu->dcache_pkt = NULL; 802 } 803} 804 805TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu, Tick t) 806 : Event(&mainEventQueue), pkt(_pkt), cpu(_cpu) 807{ 808 schedule(t); 809} 810 811void 812TimingSimpleCPU::IprEvent::process() 813{ 814 cpu->completeDataAccess(pkt); 815} 816 817const char * 818TimingSimpleCPU::IprEvent::description() 819{ 820 return "Timing Simple CPU Delay IPR event"; 821} 822 823 824//////////////////////////////////////////////////////////////////////// 825// 826// TimingSimpleCPU Simulation Object 827// 828TimingSimpleCPU * 829TimingSimpleCPUParams::create() 830{ 831 TimingSimpleCPU::Params *params = new TimingSimpleCPU::Params(); 832 params->name = name; 833 params->numberOfThreads = 1; 834 params->max_insts_any_thread = max_insts_any_thread; 835 params->max_insts_all_threads = max_insts_all_threads; 836 params->max_loads_any_thread = max_loads_any_thread; 837 params->max_loads_all_threads = max_loads_all_threads; 838 params->progress_interval = progress_interval; 839 params->deferRegistration = defer_registration; 840 params->clock = clock; 841 params->phase = phase; 842 params->functionTrace = function_trace; 843 params->functionTraceStart = function_trace_start; 844 params->system = system; 845 params->cpu_id = cpu_id; 846 params->tracer = tracer; 847 848 params->itb = itb; 849 params->dtb = dtb; 850#if FULL_SYSTEM 851 params->profile = profile; 852 params->do_quiesce = do_quiesce; 853 params->do_checkpoint_insts = do_checkpoint_insts; 854 params->do_statistics_insts = do_statistics_insts; 855#else 856 if (workload.size() != 1) 857 panic("only one workload allowed"); 858 params->process = workload[0]; 859#endif 860 861 TimingSimpleCPU *cpu = new TimingSimpleCPU(params); 862 return cpu; 863}
| 67 } 68#endif 69} 70 71Tick 72TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 73{ 74 panic("TimingSimpleCPU doesn't expect recvAtomic callback!"); 75 return curTick; 76} 77 78void 79TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 80{ 81 //No internal storage to update, jusst return 82 return; 83} 84 85void 86TimingSimpleCPU::CpuPort::recvStatusChange(Status status) 87{ 88 if (status == RangeChange) { 89 if (!snoopRangeSent) { 90 snoopRangeSent = true; 91 sendStatusChange(Port::RangeChange); 92 } 93 return; 94 } 95 96 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!"); 97} 98 99 100void 101TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t) 102{ 103 pkt = _pkt; 104 Event::schedule(t); 105} 106 107TimingSimpleCPU::TimingSimpleCPU(Params *p) 108 : BaseSimpleCPU(p), icachePort(this, p->clock), dcachePort(this, p->clock) 109{ 110 _status = Idle; 111 112 icachePort.snoopRangeSent = false; 113 dcachePort.snoopRangeSent = false; 114 115 ifetch_pkt = dcache_pkt = NULL; 116 drainEvent = NULL; 117 fetchEvent = NULL; 118 previousTick = 0; 119 changeState(SimObject::Running); 120} 121 122 123TimingSimpleCPU::~TimingSimpleCPU() 124{ 125} 126 127void 128TimingSimpleCPU::serialize(ostream &os) 129{ 130 SimObject::State so_state = SimObject::getState(); 131 SERIALIZE_ENUM(so_state); 132 BaseSimpleCPU::serialize(os); 133} 134 135void 136TimingSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 137{ 138 SimObject::State so_state; 139 UNSERIALIZE_ENUM(so_state); 140 BaseSimpleCPU::unserialize(cp, section); 141} 142 143unsigned int 144TimingSimpleCPU::drain(Event *drain_event) 145{ 146 // TimingSimpleCPU is ready to drain if it's not waiting for 147 // an access to complete. 148 if (status() == Idle || status() == Running || status() == SwitchedOut) { 149 changeState(SimObject::Drained); 150 return 0; 151 } else { 152 changeState(SimObject::Draining); 153 drainEvent = drain_event; 154 return 1; 155 } 156} 157 158void 159TimingSimpleCPU::resume() 160{ 161 DPRINTF(SimpleCPU, "Resume\n"); 162 if (_status != SwitchedOut && _status != Idle) { 163 assert(system->getMemoryMode() == Enums::timing); 164 165 // Delete the old event if it existed. 166 if (fetchEvent) { 167 if (fetchEvent->scheduled()) 168 fetchEvent->deschedule(); 169 170 delete fetchEvent; 171 } 172 173 fetchEvent = new FetchEvent(this, nextCycle()); 174 } 175 176 changeState(SimObject::Running); 177} 178 179void 180TimingSimpleCPU::switchOut() 181{ 182 assert(status() == Running || status() == Idle); 183 _status = SwitchedOut; 184 numCycles += tickToCycles(curTick - previousTick); 185 186 // If we've been scheduled to resume but are then told to switch out, 187 // we'll need to cancel it. 188 if (fetchEvent && fetchEvent->scheduled()) 189 fetchEvent->deschedule(); 190} 191 192 193void 194TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 195{ 196 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 197 198 // if any of this CPU's ThreadContexts are active, mark the CPU as 199 // running and schedule its tick event. 200 for (int i = 0; i < threadContexts.size(); ++i) { 201 ThreadContext *tc = threadContexts[i]; 202 if (tc->status() == ThreadContext::Active && _status != Running) { 203 _status = Running; 204 break; 205 } 206 } 207 208 if (_status != Running) { 209 _status = Idle; 210 } 211 assert(threadContexts.size() == 1); 212 cpuId = tc->readCpuId(); 213 previousTick = curTick; 214} 215 216 217void 218TimingSimpleCPU::activateContext(int thread_num, int delay) 219{ 220 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 221 222 assert(thread_num == 0); 223 assert(thread); 224 225 assert(_status == Idle); 226 227 notIdleFraction++; 228 _status = Running; 229 230 // kick things off by initiating the fetch of the next instruction 231 fetchEvent = new FetchEvent(this, nextCycle(curTick + ticks(delay))); 232} 233 234 235void 236TimingSimpleCPU::suspendContext(int thread_num) 237{ 238 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 239 240 assert(thread_num == 0); 241 assert(thread); 242 243 assert(_status == Running); 244 245 // just change status to Idle... if status != Running, 246 // completeInst() will not initiate fetch of next instruction. 247 248 notIdleFraction--; 249 _status = Idle; 250} 251 252 253template <class T> 254Fault 255TimingSimpleCPU::read(Addr addr, T &data, unsigned flags) 256{ 257 Request *req = 258 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(), 259 cpuId, /* thread ID */ 0); 260 261 if (traceData) { 262 traceData->setAddr(req->getVaddr()); 263 } 264 265 // translate to physical address 266 Fault fault = thread->translateDataReadReq(req); 267 268 // Now do the access. 269 if (fault == NoFault) { 270 PacketPtr pkt = 271 new Packet(req, 272 (req->isLocked() ? 273 MemCmd::LoadLockedReq : MemCmd::ReadReq), 274 Packet::Broadcast); 275 pkt->dataDynamic<T>(new T); 276 277 if (req->isMmapedIpr()) { 278 Tick delay; 279 delay = TheISA::handleIprRead(thread->getTC(), pkt); 280 new IprEvent(pkt, this, nextCycle(curTick + delay)); 281 _status = DcacheWaitResponse; 282 dcache_pkt = NULL; 283 } else if (!dcachePort.sendTiming(pkt)) { 284 _status = DcacheRetry; 285 dcache_pkt = pkt; 286 } else { 287 _status = DcacheWaitResponse; 288 // memory system takes ownership of packet 289 dcache_pkt = NULL; 290 } 291 292 // This will need a new way to tell if it has a dcache attached. 293 if (req->isUncacheable()) 294 recordEvent("Uncached Read"); 295 } else { 296 delete req; 297 } 298 299 return fault; 300} 301 302Fault 303TimingSimpleCPU::translateDataReadAddr(Addr vaddr, Addr &paddr, 304 int size, unsigned flags) 305{ 306 Request *req = 307 new Request(0, vaddr, size, flags, thread->readPC(), cpuId, 0); 308 309 if (traceData) { 310 traceData->setAddr(vaddr); 311 } 312 313 Fault fault = thread->translateDataWriteReq(req); 314 315 if (fault == NoFault) 316 paddr = req->getPaddr(); 317 318 delete req; 319 return fault; 320} 321 322#ifndef DOXYGEN_SHOULD_SKIP_THIS 323 324template 325Fault 326TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags); 327 328template 329Fault 330TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags); 331 332template 333Fault 334TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 335 336template 337Fault 338TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 339 340template 341Fault 342TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 343 344template 345Fault 346TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 347 348#endif //DOXYGEN_SHOULD_SKIP_THIS 349 350template<> 351Fault 352TimingSimpleCPU::read(Addr addr, double &data, unsigned flags) 353{ 354 return read(addr, *(uint64_t*)&data, flags); 355} 356 357template<> 358Fault 359TimingSimpleCPU::read(Addr addr, float &data, unsigned flags) 360{ 361 return read(addr, *(uint32_t*)&data, flags); 362} 363 364 365template<> 366Fault 367TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 368{ 369 return read(addr, (uint32_t&)data, flags); 370} 371 372 373template <class T> 374Fault 375TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 376{ 377 Request *req = 378 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(), 379 cpuId, /* thread ID */ 0); 380 381 if (traceData) { 382 traceData->setAddr(req->getVaddr()); 383 } 384 385 // translate to physical address 386 Fault fault = thread->translateDataWriteReq(req); 387 388 // Now do the access. 389 if (fault == NoFault) { 390 MemCmd cmd = MemCmd::WriteReq; // default 391 bool do_access = true; // flag to suppress cache access 392 393 if (req->isLocked()) { 394 cmd = MemCmd::StoreCondReq; 395 do_access = TheISA::handleLockedWrite(thread, req); 396 } else if (req->isSwap()) { 397 cmd = MemCmd::SwapReq; 398 if (req->isCondSwap()) { 399 assert(res); 400 req->setExtraData(*res); 401 } 402 } 403 404 // Note: need to allocate dcache_pkt even if do_access is 405 // false, as it's used unconditionally to call completeAcc(). 406 assert(dcache_pkt == NULL); 407 dcache_pkt = new Packet(req, cmd, Packet::Broadcast); 408 dcache_pkt->allocate(); 409 dcache_pkt->set(data); 410 411 if (do_access) { 412 if (req->isMmapedIpr()) { 413 Tick delay; 414 dcache_pkt->set(htog(data)); 415 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt); 416 new IprEvent(dcache_pkt, this, nextCycle(curTick + delay)); 417 _status = DcacheWaitResponse; 418 dcache_pkt = NULL; 419 } else if (!dcachePort.sendTiming(dcache_pkt)) { 420 _status = DcacheRetry; 421 } else { 422 _status = DcacheWaitResponse; 423 // memory system takes ownership of packet 424 dcache_pkt = NULL; 425 } 426 } 427 // This will need a new way to tell if it's hooked up to a cache or not. 428 if (req->isUncacheable()) 429 recordEvent("Uncached Write"); 430 } else { 431 delete req; 432 } 433 434 435 // If the write needs to have a fault on the access, consider calling 436 // changeStatus() and changing it to "bad addr write" or something. 437 return fault; 438} 439 440Fault 441TimingSimpleCPU::translateDataWriteAddr(Addr vaddr, Addr &paddr, 442 int size, unsigned flags) 443{ 444 Request *req = 445 new Request(0, vaddr, size, flags, thread->readPC(), cpuId, 0); 446 447 if (traceData) { 448 traceData->setAddr(vaddr); 449 } 450 451 Fault fault = thread->translateDataWriteReq(req); 452 453 if (fault == NoFault) 454 paddr = req->getPaddr(); 455 456 delete req; 457 return fault; 458} 459 460 461#ifndef DOXYGEN_SHOULD_SKIP_THIS 462template 463Fault 464TimingSimpleCPU::write(Twin32_t data, Addr addr, 465 unsigned flags, uint64_t *res); 466 467template 468Fault 469TimingSimpleCPU::write(Twin64_t data, Addr addr, 470 unsigned flags, uint64_t *res); 471 472template 473Fault 474TimingSimpleCPU::write(uint64_t data, Addr addr, 475 unsigned flags, uint64_t *res); 476 477template 478Fault 479TimingSimpleCPU::write(uint32_t data, Addr addr, 480 unsigned flags, uint64_t *res); 481 482template 483Fault 484TimingSimpleCPU::write(uint16_t data, Addr addr, 485 unsigned flags, uint64_t *res); 486 487template 488Fault 489TimingSimpleCPU::write(uint8_t data, Addr addr, 490 unsigned flags, uint64_t *res); 491 492#endif //DOXYGEN_SHOULD_SKIP_THIS 493 494template<> 495Fault 496TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 497{ 498 return write(*(uint64_t*)&data, addr, flags, res); 499} 500 501template<> 502Fault 503TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 504{ 505 return write(*(uint32_t*)&data, addr, flags, res); 506} 507 508 509template<> 510Fault 511TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 512{ 513 return write((uint32_t)data, addr, flags, res); 514} 515 516 517void 518TimingSimpleCPU::fetch() 519{ 520 DPRINTF(SimpleCPU, "Fetch\n"); 521 522 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 523 checkForInterrupts(); 524 525 Request *ifetch_req = new Request(); 526 ifetch_req->setThreadContext(cpuId, /* thread ID */ 0); 527 Fault fault = setupFetchRequest(ifetch_req); 528 529 ifetch_pkt = new Packet(ifetch_req, MemCmd::ReadReq, Packet::Broadcast); 530 ifetch_pkt->dataStatic(&inst); 531 532 if (fault == NoFault) { 533 if (!icachePort.sendTiming(ifetch_pkt)) { 534 // Need to wait for retry 535 _status = IcacheRetry; 536 } else { 537 // Need to wait for cache to respond 538 _status = IcacheWaitResponse; 539 // ownership of packet transferred to memory system 540 ifetch_pkt = NULL; 541 } 542 } else { 543 delete ifetch_req; 544 delete ifetch_pkt; 545 // fetch fault: advance directly to next instruction (fault handler) 546 advanceInst(fault); 547 } 548 549 numCycles += tickToCycles(curTick - previousTick); 550 previousTick = curTick; 551} 552 553 554void 555TimingSimpleCPU::advanceInst(Fault fault) 556{ 557 advancePC(fault); 558 559 if (_status == Running) { 560 // kick off fetch of next instruction... callback from icache 561 // response will cause that instruction to be executed, 562 // keeping the CPU running. 563 fetch(); 564 } 565} 566 567 568void 569TimingSimpleCPU::completeIfetch(PacketPtr pkt) 570{ 571 DPRINTF(SimpleCPU, "Complete ICache Fetch\n"); 572 573 // received a response from the icache: execute the received 574 // instruction 575 assert(!pkt->isError()); 576 assert(_status == IcacheWaitResponse); 577 578 _status = Running; 579 580 numCycles += tickToCycles(curTick - previousTick); 581 previousTick = curTick; 582 583 if (getState() == SimObject::Draining) { 584 delete pkt->req; 585 delete pkt; 586 587 completeDrain(); 588 return; 589 } 590 591 preExecute(); 592 if (curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) { 593 // load or store: just send to dcache 594 Fault fault = curStaticInst->initiateAcc(this, traceData); 595 if (_status != Running) { 596 // instruction will complete in dcache response callback 597 assert(_status == DcacheWaitResponse || _status == DcacheRetry); 598 assert(fault == NoFault); 599 } else { 600 if (fault == NoFault) { 601 // early fail on store conditional: complete now 602 assert(dcache_pkt != NULL); 603 fault = curStaticInst->completeAcc(dcache_pkt, this, 604 traceData); 605 delete dcache_pkt->req; 606 delete dcache_pkt; 607 dcache_pkt = NULL; 608 609 // keep an instruction count 610 if (fault == NoFault) 611 countInst(); 612 } else if (traceData) { 613 // If there was a fault, we shouldn't trace this instruction. 614 delete traceData; 615 traceData = NULL; 616 } 617 618 postExecute(); 619 // @todo remove me after debugging with legion done 620 if (curStaticInst && (!curStaticInst->isMicroop() || 621 curStaticInst->isFirstMicroop())) 622 instCnt++; 623 advanceInst(fault); 624 } 625 } else { 626 // non-memory instruction: execute completely now 627 Fault fault = curStaticInst->execute(this, traceData); 628 629 // keep an instruction count 630 if (fault == NoFault) 631 countInst(); 632 else if (traceData) { 633 // If there was a fault, we shouldn't trace this instruction. 634 delete traceData; 635 traceData = NULL; 636 } 637 638 postExecute(); 639 // @todo remove me after debugging with legion done 640 if (curStaticInst && (!curStaticInst->isMicroop() || 641 curStaticInst->isFirstMicroop())) 642 instCnt++; 643 advanceInst(fault); 644 } 645 646 delete pkt->req; 647 delete pkt; 648} 649 650void 651TimingSimpleCPU::IcachePort::ITickEvent::process() 652{ 653 cpu->completeIfetch(pkt); 654} 655 656bool 657TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt) 658{ 659 if (pkt->isResponse() && !pkt->wasNacked()) { 660 // delay processing of returned data until next CPU clock edge 661 Tick next_tick = cpu->nextCycle(curTick); 662 663 if (next_tick == curTick) 664 cpu->completeIfetch(pkt); 665 else 666 tickEvent.schedule(pkt, next_tick); 667 668 return true; 669 } 670 else if (pkt->wasNacked()) { 671 assert(cpu->_status == IcacheWaitResponse); 672 pkt->reinitNacked(); 673 if (!sendTiming(pkt)) { 674 cpu->_status = IcacheRetry; 675 cpu->ifetch_pkt = pkt; 676 } 677 } 678 //Snooping a Coherence Request, do nothing 679 return true; 680} 681 682void 683TimingSimpleCPU::IcachePort::recvRetry() 684{ 685 // we shouldn't get a retry unless we have a packet that we're 686 // waiting to transmit 687 assert(cpu->ifetch_pkt != NULL); 688 assert(cpu->_status == IcacheRetry); 689 PacketPtr tmp = cpu->ifetch_pkt; 690 if (sendTiming(tmp)) { 691 cpu->_status = IcacheWaitResponse; 692 cpu->ifetch_pkt = NULL; 693 } 694} 695 696void 697TimingSimpleCPU::completeDataAccess(PacketPtr pkt) 698{ 699 // received a response from the dcache: complete the load or store 700 // instruction 701 assert(!pkt->isError()); 702 assert(_status == DcacheWaitResponse); 703 _status = Running; 704 705 numCycles += tickToCycles(curTick - previousTick); 706 previousTick = curTick; 707 708 Fault fault = curStaticInst->completeAcc(pkt, this, traceData); 709 710 // keep an instruction count 711 if (fault == NoFault) 712 countInst(); 713 else if (traceData) { 714 // If there was a fault, we shouldn't trace this instruction. 715 delete traceData; 716 traceData = NULL; 717 } 718 719 if (pkt->isRead() && pkt->isLocked()) { 720 TheISA::handleLockedRead(thread, pkt->req); 721 } 722 723 delete pkt->req; 724 delete pkt; 725 726 postExecute(); 727 728 if (getState() == SimObject::Draining) { 729 advancePC(fault); 730 completeDrain(); 731 732 return; 733 } 734 735 advanceInst(fault); 736} 737 738 739void 740TimingSimpleCPU::completeDrain() 741{ 742 DPRINTF(Config, "Done draining\n"); 743 changeState(SimObject::Drained); 744 drainEvent->process(); 745} 746 747void 748TimingSimpleCPU::DcachePort::setPeer(Port *port) 749{ 750 Port::setPeer(port); 751 752#if FULL_SYSTEM 753 // Update the ThreadContext's memory ports (Functional/Virtual 754 // Ports) 755 cpu->tcBase()->connectMemPorts(); 756#endif 757} 758 759bool 760TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt) 761{ 762 if (pkt->isResponse() && !pkt->wasNacked()) { 763 // delay processing of returned data until next CPU clock edge 764 Tick next_tick = cpu->nextCycle(curTick); 765 766 if (next_tick == curTick) 767 cpu->completeDataAccess(pkt); 768 else 769 tickEvent.schedule(pkt, next_tick); 770 771 return true; 772 } 773 else if (pkt->wasNacked()) { 774 assert(cpu->_status == DcacheWaitResponse); 775 pkt->reinitNacked(); 776 if (!sendTiming(pkt)) { 777 cpu->_status = DcacheRetry; 778 cpu->dcache_pkt = pkt; 779 } 780 } 781 //Snooping a Coherence Request, do nothing 782 return true; 783} 784 785void 786TimingSimpleCPU::DcachePort::DTickEvent::process() 787{ 788 cpu->completeDataAccess(pkt); 789} 790 791void 792TimingSimpleCPU::DcachePort::recvRetry() 793{ 794 // we shouldn't get a retry unless we have a packet that we're 795 // waiting to transmit 796 assert(cpu->dcache_pkt != NULL); 797 assert(cpu->_status == DcacheRetry); 798 PacketPtr tmp = cpu->dcache_pkt; 799 if (sendTiming(tmp)) { 800 cpu->_status = DcacheWaitResponse; 801 // memory system takes ownership of packet 802 cpu->dcache_pkt = NULL; 803 } 804} 805 806TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu, Tick t) 807 : Event(&mainEventQueue), pkt(_pkt), cpu(_cpu) 808{ 809 schedule(t); 810} 811 812void 813TimingSimpleCPU::IprEvent::process() 814{ 815 cpu->completeDataAccess(pkt); 816} 817 818const char * 819TimingSimpleCPU::IprEvent::description() 820{ 821 return "Timing Simple CPU Delay IPR event"; 822} 823 824 825//////////////////////////////////////////////////////////////////////// 826// 827// TimingSimpleCPU Simulation Object 828// 829TimingSimpleCPU * 830TimingSimpleCPUParams::create() 831{ 832 TimingSimpleCPU::Params *params = new TimingSimpleCPU::Params(); 833 params->name = name; 834 params->numberOfThreads = 1; 835 params->max_insts_any_thread = max_insts_any_thread; 836 params->max_insts_all_threads = max_insts_all_threads; 837 params->max_loads_any_thread = max_loads_any_thread; 838 params->max_loads_all_threads = max_loads_all_threads; 839 params->progress_interval = progress_interval; 840 params->deferRegistration = defer_registration; 841 params->clock = clock; 842 params->phase = phase; 843 params->functionTrace = function_trace; 844 params->functionTraceStart = function_trace_start; 845 params->system = system; 846 params->cpu_id = cpu_id; 847 params->tracer = tracer; 848 849 params->itb = itb; 850 params->dtb = dtb; 851#if FULL_SYSTEM 852 params->profile = profile; 853 params->do_quiesce = do_quiesce; 854 params->do_checkpoint_insts = do_checkpoint_insts; 855 params->do_statistics_insts = do_statistics_insts; 856#else 857 if (workload.size() != 1) 858 panic("only one workload allowed"); 859 params->process = workload[0]; 860#endif 861 862 TimingSimpleCPU *cpu = new TimingSimpleCPU(params); 863 return cpu; 864}
|