atomic.cc revision 5714
1/* 2 * Copyright (c) 2002-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Steve Reinhardt 29 */ 30 31#include "arch/locked_mem.hh" 32#include "arch/mmaped_ipr.hh" 33#include "arch/utility.hh" 34#include "base/bigint.hh" 35#include "cpu/exetrace.hh" 36#include "cpu/simple/atomic.hh" 37#include "mem/packet.hh" 38#include "mem/packet_access.hh" 39#include "params/AtomicSimpleCPU.hh" 40#include "sim/system.hh" 41 42using namespace std; 43using namespace TheISA; 44 45AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 46 : Event(CPU_Tick_Pri), cpu(c) 47{ 48} 49 50 51void 52AtomicSimpleCPU::TickEvent::process() 53{ 54 cpu->tick(); 55} 56 57const char * 58AtomicSimpleCPU::TickEvent::description() const 59{ 60 return "AtomicSimpleCPU tick"; 61} 62 63Port * 64AtomicSimpleCPU::getPort(const std::string &if_name, int idx) 65{ 66 if (if_name == "dcache_port") 67 return &dcachePort; 68 else if (if_name == "icache_port") 69 return &icachePort; 70 else if (if_name == "physmem_port") { 71 hasPhysMemPort = true; 72 return &physmemPort; 73 } 74 else 75 panic("No Such Port\n"); 76} 77 78void 79AtomicSimpleCPU::init() 80{ 81 BaseCPU::init(); 82#if FULL_SYSTEM 83 for (int i = 0; i < threadContexts.size(); ++i) { 84 ThreadContext *tc = threadContexts[i]; 85 86 // initialize CPU, including PC 87 TheISA::initCPU(tc, tc->contextId()); 88 } 89#endif 90 if (hasPhysMemPort) { 91 bool snoop = false; 92 AddrRangeList pmAddrList; 93 physmemPort.getPeerAddressRanges(pmAddrList, snoop); 94 physMemAddr = *pmAddrList.begin(); 95 } 96 // Atomic doesn't do MT right now, so contextId == threadId 97 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 98 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 99 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 100} 101 102bool 103AtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt) 104{ 105 panic("AtomicSimpleCPU doesn't expect recvTiming callback!"); 106 return true; 107} 108 109Tick 110AtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 111{ 112 //Snooping a coherence request, just return 113 return 0; 114} 115 116void 117AtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 118{ 119 //No internal storage to update, just return 120 return; 121} 122 123void 124AtomicSimpleCPU::CpuPort::recvStatusChange(Status status) 125{ 126 if (status == RangeChange) { 127 if (!snoopRangeSent) { 128 snoopRangeSent = true; 129 sendStatusChange(Port::RangeChange); 130 } 131 return; 132 } 133 134 panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!"); 135} 136 137void 138AtomicSimpleCPU::CpuPort::recvRetry() 139{ 140 panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 141} 142 143void 144AtomicSimpleCPU::DcachePort::setPeer(Port *port) 145{ 146 Port::setPeer(port); 147 148#if FULL_SYSTEM 149 // Update the ThreadContext's memory ports (Functional/Virtual 150 // Ports) 151 cpu->tcBase()->connectMemPorts(cpu->tcBase()); 152#endif 153} 154 155AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 156 : BaseSimpleCPU(p), tickEvent(this), width(p->width), 157 simulate_data_stalls(p->simulate_data_stalls), 158 simulate_inst_stalls(p->simulate_inst_stalls), 159 icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this), 160 physmemPort(name() + "-iport", this), hasPhysMemPort(false) 161{ 162 _status = Idle; 163 164 icachePort.snoopRangeSent = false; 165 dcachePort.snoopRangeSent = false; 166 167} 168 169 170AtomicSimpleCPU::~AtomicSimpleCPU() 171{ 172} 173 174void 175AtomicSimpleCPU::serialize(ostream &os) 176{ 177 SimObject::State so_state = SimObject::getState(); 178 SERIALIZE_ENUM(so_state); 179 BaseSimpleCPU::serialize(os); 180 nameOut(os, csprintf("%s.tickEvent", name())); 181 tickEvent.serialize(os); 182} 183 184void 185AtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 186{ 187 SimObject::State so_state; 188 UNSERIALIZE_ENUM(so_state); 189 BaseSimpleCPU::unserialize(cp, section); 190 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 191} 192 193void 194AtomicSimpleCPU::resume() 195{ 196 if (_status == Idle || _status == SwitchedOut) 197 return; 198 199 DPRINTF(SimpleCPU, "Resume\n"); 200 assert(system->getMemoryMode() == Enums::atomic); 201 202 changeState(SimObject::Running); 203 if (thread->status() == ThreadContext::Active) { 204 if (!tickEvent.scheduled()) 205 schedule(tickEvent, nextCycle()); 206 } 207} 208 209void 210AtomicSimpleCPU::switchOut() 211{ 212 assert(_status == Running || _status == Idle); 213 _status = SwitchedOut; 214 215 tickEvent.squash(); 216} 217 218 219void 220AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 221{ 222 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 223 224 assert(!tickEvent.scheduled()); 225 226 // if any of this CPU's ThreadContexts are active, mark the CPU as 227 // running and schedule its tick event. 228 for (int i = 0; i < threadContexts.size(); ++i) { 229 ThreadContext *tc = threadContexts[i]; 230 if (tc->status() == ThreadContext::Active && _status != Running) { 231 _status = Running; 232 schedule(tickEvent, nextCycle()); 233 break; 234 } 235 } 236 if (_status != Running) { 237 _status = Idle; 238 } 239 assert(threadContexts.size() == 1); 240 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 241 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 242 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 243} 244 245 246void 247AtomicSimpleCPU::activateContext(int thread_num, int delay) 248{ 249 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 250 251 assert(thread_num == 0); 252 assert(thread); 253 254 assert(_status == Idle); 255 assert(!tickEvent.scheduled()); 256 257 notIdleFraction++; 258 numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend); 259 260 //Make sure ticks are still on multiples of cycles 261 schedule(tickEvent, nextCycle(curTick + ticks(delay))); 262 _status = Running; 263} 264 265 266void 267AtomicSimpleCPU::suspendContext(int thread_num) 268{ 269 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 270 271 assert(thread_num == 0); 272 assert(thread); 273 274 assert(_status == Running); 275 276 // tick event may not be scheduled if this gets called from inside 277 // an instruction's execution, e.g. "quiesce" 278 if (tickEvent.scheduled()) 279 deschedule(tickEvent); 280 281 notIdleFraction--; 282 _status = Idle; 283} 284 285 286template <class T> 287Fault 288AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags) 289{ 290 // use the CPU's statically allocated read request and packet objects 291 Request *req = &data_read_req; 292 293 if (traceData) { 294 traceData->setAddr(addr); 295 } 296 297 //The block size of our peer. 298 int blockSize = dcachePort.peerBlockSize(); 299 //The size of the data we're trying to read. 300 int dataSize = sizeof(T); 301 302 uint8_t * dataPtr = (uint8_t *)&data; 303 304 //The address of the second part of this access if it needs to be split 305 //across a cache line boundary. 306 Addr secondAddr = roundDown(addr + dataSize - 1, blockSize); 307 308 if(secondAddr > addr) 309 dataSize = secondAddr - addr; 310 311 dcache_latency = 0; 312 313 while(1) { 314 req->setVirt(0, addr, dataSize, flags, thread->readPC()); 315 316 // translate to physical address 317 Fault fault = thread->translateDataReadReq(req); 318 319 // Now do the access. 320 if (fault == NoFault) { 321 Packet pkt = Packet(req, 322 req->isLocked() ? MemCmd::LoadLockedReq : MemCmd::ReadReq, 323 Packet::Broadcast); 324 pkt.dataStatic(dataPtr); 325 326 if (req->isMmapedIpr()) 327 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 328 else { 329 if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 330 dcache_latency += physmemPort.sendAtomic(&pkt); 331 else 332 dcache_latency += dcachePort.sendAtomic(&pkt); 333 } 334 dcache_access = true; 335 336 assert(!pkt.isError()); 337 338 if (req->isLocked()) { 339 TheISA::handleLockedRead(thread, req); 340 } 341 } 342 343 // This will need a new way to tell if it has a dcache attached. 344 if (req->isUncacheable()) 345 recordEvent("Uncached Read"); 346 347 //If there's a fault, return it 348 if (fault != NoFault) 349 return fault; 350 //If we don't need to access a second cache line, stop now. 351 if (secondAddr <= addr) 352 { 353 data = gtoh(data); 354 if (traceData) { 355 traceData->setData(data); 356 } 357 return fault; 358 } 359 360 /* 361 * Set up for accessing the second cache line. 362 */ 363 364 //Move the pointer we're reading into to the correct location. 365 dataPtr += dataSize; 366 //Adjust the size to get the remaining bytes. 367 dataSize = addr + sizeof(T) - secondAddr; 368 //And access the right address. 369 addr = secondAddr; 370 } 371} 372 373Fault 374AtomicSimpleCPU::translateDataReadAddr(Addr vaddr, Addr & paddr, 375 int size, unsigned flags) 376{ 377 // use the CPU's statically allocated read request and packet objects 378 Request *req = &data_read_req; 379 380 if (traceData) { 381 traceData->setAddr(vaddr); 382 } 383 384 //The block size of our peer. 385 int blockSize = dcachePort.peerBlockSize(); 386 //The size of the data we're trying to read. 387 int dataSize = size; 388 389 bool firstTimeThrough = true; 390 391 //The address of the second part of this access if it needs to be split 392 //across a cache line boundary. 393 Addr secondAddr = roundDown(vaddr + dataSize - 1, blockSize); 394 395 if(secondAddr > vaddr) 396 dataSize = secondAddr - vaddr; 397 398 while(1) { 399 req->setVirt(0, vaddr, dataSize, flags, thread->readPC()); 400 401 // translate to physical address 402 Fault fault = thread->translateDataReadReq(req); 403 404 //If there's a fault, return it 405 if (fault != NoFault) 406 return fault; 407 408 if (firstTimeThrough) { 409 paddr = req->getPaddr(); 410 firstTimeThrough = false; 411 } 412 413 //If we don't need to access a second cache line, stop now. 414 if (secondAddr <= vaddr) 415 return fault; 416 417 /* 418 * Set up for accessing the second cache line. 419 */ 420 421 //Adjust the size to get the remaining bytes. 422 dataSize = vaddr + size - secondAddr; 423 //And access the right address. 424 vaddr = secondAddr; 425 } 426} 427 428#ifndef DOXYGEN_SHOULD_SKIP_THIS 429 430template 431Fault 432AtomicSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags); 433 434template 435Fault 436AtomicSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags); 437 438template 439Fault 440AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 441 442template 443Fault 444AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 445 446template 447Fault 448AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 449 450template 451Fault 452AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 453 454#endif //DOXYGEN_SHOULD_SKIP_THIS 455 456template<> 457Fault 458AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags) 459{ 460 return read(addr, *(uint64_t*)&data, flags); 461} 462 463template<> 464Fault 465AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags) 466{ 467 return read(addr, *(uint32_t*)&data, flags); 468} 469 470 471template<> 472Fault 473AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 474{ 475 return read(addr, (uint32_t&)data, flags); 476} 477 478 479template <class T> 480Fault 481AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 482{ 483 // use the CPU's statically allocated write request and packet objects 484 Request *req = &data_write_req; 485 486 if (traceData) { 487 traceData->setAddr(addr); 488 } 489 490 //The block size of our peer. 491 int blockSize = dcachePort.peerBlockSize(); 492 //The size of the data we're trying to read. 493 int dataSize = sizeof(T); 494 495 uint8_t * dataPtr = (uint8_t *)&data; 496 497 //The address of the second part of this access if it needs to be split 498 //across a cache line boundary. 499 Addr secondAddr = roundDown(addr + dataSize - 1, blockSize); 500 501 if(secondAddr > addr) 502 dataSize = secondAddr - addr; 503 504 dcache_latency = 0; 505 506 while(1) { 507 req->setVirt(0, addr, dataSize, flags, thread->readPC()); 508 509 // translate to physical address 510 Fault fault = thread->translateDataWriteReq(req); 511 512 // Now do the access. 513 if (fault == NoFault) { 514 MemCmd cmd = MemCmd::WriteReq; // default 515 bool do_access = true; // flag to suppress cache access 516 517 if (req->isLocked()) { 518 cmd = MemCmd::StoreCondReq; 519 do_access = TheISA::handleLockedWrite(thread, req); 520 } else if (req->isSwap()) { 521 cmd = MemCmd::SwapReq; 522 if (req->isCondSwap()) { 523 assert(res); 524 req->setExtraData(*res); 525 } 526 } 527 528 if (do_access) { 529 Packet pkt = Packet(req, cmd, Packet::Broadcast); 530 pkt.dataStatic(dataPtr); 531 532 if (req->isMmapedIpr()) { 533 dcache_latency += 534 TheISA::handleIprWrite(thread->getTC(), &pkt); 535 } else { 536 //XXX This needs to be outside of the loop in order to 537 //work properly for cache line boundary crossing 538 //accesses in transendian simulations. 539 data = htog(data); 540 if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 541 dcache_latency += physmemPort.sendAtomic(&pkt); 542 else 543 dcache_latency += dcachePort.sendAtomic(&pkt); 544 } 545 dcache_access = true; 546 assert(!pkt.isError()); 547 548 if (req->isSwap()) { 549 assert(res); 550 *res = pkt.get<T>(); 551 } 552 } 553 554 if (res && !req->isSwap()) { 555 *res = req->getExtraData(); 556 } 557 } 558 559 // This will need a new way to tell if it's hooked up to a cache or not. 560 if (req->isUncacheable()) 561 recordEvent("Uncached Write"); 562 563 //If there's a fault or we don't need to access a second cache line, 564 //stop now. 565 if (fault != NoFault || secondAddr <= addr) 566 { 567 // If the write needs to have a fault on the access, consider 568 // calling changeStatus() and changing it to "bad addr write" 569 // or something. 570 if (traceData) { 571 traceData->setData(data); 572 } 573 return fault; 574 } 575 576 /* 577 * Set up for accessing the second cache line. 578 */ 579 580 //Move the pointer we're reading into to the correct location. 581 dataPtr += dataSize; 582 //Adjust the size to get the remaining bytes. 583 dataSize = addr + sizeof(T) - secondAddr; 584 //And access the right address. 585 addr = secondAddr; 586 } 587} 588 589Fault 590AtomicSimpleCPU::translateDataWriteAddr(Addr vaddr, Addr &paddr, 591 int size, unsigned flags) 592{ 593 // use the CPU's statically allocated write request and packet objects 594 Request *req = &data_write_req; 595 596 if (traceData) { 597 traceData->setAddr(vaddr); 598 } 599 600 //The block size of our peer. 601 int blockSize = dcachePort.peerBlockSize(); 602 603 //The address of the second part of this access if it needs to be split 604 //across a cache line boundary. 605 Addr secondAddr = roundDown(vaddr + size - 1, blockSize); 606 607 //The size of the data we're trying to read. 608 int dataSize = size; 609 610 bool firstTimeThrough = true; 611 612 if(secondAddr > vaddr) 613 dataSize = secondAddr - vaddr; 614 615 dcache_latency = 0; 616 617 while(1) { 618 req->setVirt(0, vaddr, dataSize, flags, thread->readPC()); 619 620 // translate to physical address 621 Fault fault = thread->translateDataWriteReq(req); 622 623 //If there's a fault or we don't need to access a second cache line, 624 //stop now. 625 if (fault != NoFault) 626 return fault; 627 628 if (firstTimeThrough) { 629 paddr = req->getPaddr(); 630 firstTimeThrough = false; 631 } 632 633 if (secondAddr <= vaddr) 634 return fault; 635 636 /* 637 * Set up for accessing the second cache line. 638 */ 639 640 //Adjust the size to get the remaining bytes. 641 dataSize = vaddr + size - secondAddr; 642 //And access the right address. 643 vaddr = secondAddr; 644 } 645} 646 647 648#ifndef DOXYGEN_SHOULD_SKIP_THIS 649 650template 651Fault 652AtomicSimpleCPU::write(Twin32_t data, Addr addr, 653 unsigned flags, uint64_t *res); 654 655template 656Fault 657AtomicSimpleCPU::write(Twin64_t data, Addr addr, 658 unsigned flags, uint64_t *res); 659 660template 661Fault 662AtomicSimpleCPU::write(uint64_t data, Addr addr, 663 unsigned flags, uint64_t *res); 664 665template 666Fault 667AtomicSimpleCPU::write(uint32_t data, Addr addr, 668 unsigned flags, uint64_t *res); 669 670template 671Fault 672AtomicSimpleCPU::write(uint16_t data, Addr addr, 673 unsigned flags, uint64_t *res); 674 675template 676Fault 677AtomicSimpleCPU::write(uint8_t data, Addr addr, 678 unsigned flags, uint64_t *res); 679 680#endif //DOXYGEN_SHOULD_SKIP_THIS 681 682template<> 683Fault 684AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 685{ 686 return write(*(uint64_t*)&data, addr, flags, res); 687} 688 689template<> 690Fault 691AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 692{ 693 return write(*(uint32_t*)&data, addr, flags, res); 694} 695 696 697template<> 698Fault 699AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 700{ 701 return write((uint32_t)data, addr, flags, res); 702} 703 704 705void 706AtomicSimpleCPU::tick() 707{ 708 DPRINTF(SimpleCPU, "Tick\n"); 709 710 Tick latency = 0; 711 712 for (int i = 0; i < width; ++i) { 713 numCycles++; 714 715 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 716 checkForInterrupts(); 717 718 checkPcEventQueue(); 719 720 Fault fault = NoFault; 721 722 bool fromRom = isRomMicroPC(thread->readMicroPC()); 723 if (!fromRom) 724 fault = setupFetchRequest(&ifetch_req); 725 726 if (fault == NoFault) { 727 Tick icache_latency = 0; 728 bool icache_access = false; 729 dcache_access = false; // assume no dcache access 730 731 if (!fromRom) { 732 // This is commented out because the predecoder would act like 733 // a tiny cache otherwise. It wouldn't be flushed when needed 734 // like the I cache. It should be flushed, and when that works 735 // this code should be uncommented. 736 //Fetch more instruction memory if necessary 737 //if(predecoder.needMoreBytes()) 738 //{ 739 icache_access = true; 740 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq, 741 Packet::Broadcast); 742 ifetch_pkt.dataStatic(&inst); 743 744 if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr) 745 icache_latency = physmemPort.sendAtomic(&ifetch_pkt); 746 else 747 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 748 749 assert(!ifetch_pkt.isError()); 750 751 // ifetch_req is initialized to read the instruction directly 752 // into the CPU object's inst field. 753 //} 754 } 755 756 preExecute(); 757 758 if (curStaticInst) { 759 fault = curStaticInst->execute(this, traceData); 760 761 // keep an instruction count 762 if (fault == NoFault) 763 countInst(); 764 else if (traceData) { 765 // If there was a fault, we should trace this instruction. 766 delete traceData; 767 traceData = NULL; 768 } 769 770 postExecute(); 771 } 772 773 // @todo remove me after debugging with legion done 774 if (curStaticInst && (!curStaticInst->isMicroop() || 775 curStaticInst->isFirstMicroop())) 776 instCnt++; 777 778 Tick stall_ticks = 0; 779 if (simulate_inst_stalls && icache_access) 780 stall_ticks += icache_latency; 781 782 if (simulate_data_stalls && dcache_access) 783 stall_ticks += dcache_latency; 784 785 if (stall_ticks) { 786 Tick stall_cycles = stall_ticks / ticks(1); 787 Tick aligned_stall_ticks = ticks(stall_cycles); 788 789 if (aligned_stall_ticks < stall_ticks) 790 aligned_stall_ticks += 1; 791 792 latency += aligned_stall_ticks; 793 } 794 795 } 796 if(fault != NoFault || !stayAtPC) 797 advancePC(fault); 798 } 799 800 // instruction takes at least one cycle 801 if (latency < ticks(1)) 802 latency = ticks(1); 803 804 if (_status != Idle) 805 schedule(tickEvent, curTick + latency); 806} 807 808 809void 810AtomicSimpleCPU::printAddr(Addr a) 811{ 812 dcachePort.printAddr(a); 813} 814 815 816//////////////////////////////////////////////////////////////////////// 817// 818// AtomicSimpleCPU Simulation Object 819// 820AtomicSimpleCPU * 821AtomicSimpleCPUParams::create() 822{ 823 numThreads = 1; 824#if !FULL_SYSTEM 825 if (workload.size() != 1) 826 panic("only one workload allowed"); 827#endif 828 return new AtomicSimpleCPU(this); 829} 830