atomic.cc revision 5694
1/* 2 * Copyright (c) 2002-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Steve Reinhardt 29 */ 30 31#include "arch/locked_mem.hh" 32#include "arch/mmaped_ipr.hh" 33#include "arch/utility.hh" 34#include "base/bigint.hh" 35#include "cpu/exetrace.hh" 36#include "cpu/simple/atomic.hh" 37#include "mem/packet.hh" 38#include "mem/packet_access.hh" 39#include "params/AtomicSimpleCPU.hh" 40#include "sim/system.hh" 41 42using namespace std; 43using namespace TheISA; 44 45AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 46 : Event(CPU_Tick_Pri), cpu(c) 47{ 48} 49 50 51void 52AtomicSimpleCPU::TickEvent::process() 53{ 54 cpu->tick(); 55} 56 57const char * 58AtomicSimpleCPU::TickEvent::description() const 59{ 60 return "AtomicSimpleCPU tick"; 61} 62 63Port * 64AtomicSimpleCPU::getPort(const std::string &if_name, int idx) 65{ 66 if (if_name == "dcache_port") 67 return &dcachePort; 68 else if (if_name == "icache_port") 69 return &icachePort; 70 else if (if_name == "physmem_port") { 71 hasPhysMemPort = true; 72 return &physmemPort; 73 } 74 else 75 panic("No Such Port\n"); 76} 77 78void 79AtomicSimpleCPU::init() 80{ 81 BaseCPU::init(); 82 cpuId = tc->readCpuId(); 83#if FULL_SYSTEM 84 for (int i = 0; i < threadContexts.size(); ++i) { 85 ThreadContext *tc = threadContexts[i]; 86 87 // initialize CPU, including PC 88 TheISA::initCPU(tc, cpuId); 89 } 90#endif 91 if (hasPhysMemPort) { 92 bool snoop = false; 93 AddrRangeList pmAddrList; 94 physmemPort.getPeerAddressRanges(pmAddrList, snoop); 95 physMemAddr = *pmAddrList.begin(); 96 } 97 ifetch_req.setThreadContext(cpuId, 0); // Add thread ID if we add MT 98 data_read_req.setThreadContext(cpuId, 0); // Add thread ID here too 99 data_write_req.setThreadContext(cpuId, 0); // Add thread ID here too 100} 101 102bool 103AtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt) 104{ 105 panic("AtomicSimpleCPU doesn't expect recvTiming callback!"); 106 return true; 107} 108 109Tick 110AtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 111{ 112 //Snooping a coherence request, just return 113 return 0; 114} 115 116void 117AtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 118{ 119 //No internal storage to update, just return 120 return; 121} 122 123void 124AtomicSimpleCPU::CpuPort::recvStatusChange(Status status) 125{ 126 if (status == RangeChange) { 127 if (!snoopRangeSent) { 128 snoopRangeSent = true; 129 sendStatusChange(Port::RangeChange); 130 } 131 return; 132 } 133 134 panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!"); 135} 136 137void 138AtomicSimpleCPU::CpuPort::recvRetry() 139{ 140 panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 141} 142 143void 144AtomicSimpleCPU::DcachePort::setPeer(Port *port) 145{ 146 Port::setPeer(port); 147 148#if FULL_SYSTEM 149 // Update the ThreadContext's memory ports (Functional/Virtual 150 // Ports) 151 cpu->tcBase()->connectMemPorts(cpu->tcBase()); 152#endif 153} 154 155AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 156 : BaseSimpleCPU(p), tickEvent(this), width(p->width), 157 simulate_data_stalls(p->simulate_data_stalls), 158 simulate_inst_stalls(p->simulate_inst_stalls), 159 icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this), 160 physmemPort(name() + "-iport", this), hasPhysMemPort(false) 161{ 162 _status = Idle; 163 164 icachePort.snoopRangeSent = false; 165 dcachePort.snoopRangeSent = false; 166 167} 168 169 170AtomicSimpleCPU::~AtomicSimpleCPU() 171{ 172} 173 174void 175AtomicSimpleCPU::serialize(ostream &os) 176{ 177 SimObject::State so_state = SimObject::getState(); 178 SERIALIZE_ENUM(so_state); 179 BaseSimpleCPU::serialize(os); 180 nameOut(os, csprintf("%s.tickEvent", name())); 181 tickEvent.serialize(os); 182} 183 184void 185AtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 186{ 187 SimObject::State so_state; 188 UNSERIALIZE_ENUM(so_state); 189 BaseSimpleCPU::unserialize(cp, section); 190 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 191} 192 193void 194AtomicSimpleCPU::resume() 195{ 196 if (_status == Idle || _status == SwitchedOut) 197 return; 198 199 DPRINTF(SimpleCPU, "Resume\n"); 200 assert(system->getMemoryMode() == Enums::atomic); 201 202 changeState(SimObject::Running); 203 if (thread->status() == ThreadContext::Active) { 204 if (!tickEvent.scheduled()) 205 schedule(tickEvent, nextCycle()); 206 } 207} 208 209void 210AtomicSimpleCPU::switchOut() 211{ 212 assert(_status == Running || _status == Idle); 213 _status = SwitchedOut; 214 215 tickEvent.squash(); 216} 217 218 219void 220AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 221{ 222 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 223 224 assert(!tickEvent.scheduled()); 225 226 // if any of this CPU's ThreadContexts are active, mark the CPU as 227 // running and schedule its tick event. 228 for (int i = 0; i < threadContexts.size(); ++i) { 229 ThreadContext *tc = threadContexts[i]; 230 if (tc->status() == ThreadContext::Active && _status != Running) { 231 _status = Running; 232 schedule(tickEvent, nextCycle()); 233 break; 234 } 235 } 236 if (_status != Running) { 237 _status = Idle; 238 } 239 assert(threadContexts.size() == 1); 240 cpuId = tc->readCpuId(); 241 ifetch_req.setThreadContext(cpuId, 0); // Add thread ID if we add MT 242 data_read_req.setThreadContext(cpuId, 0); // Add thread ID here too 243 data_write_req.setThreadContext(cpuId, 0); // Add thread ID here too 244} 245 246 247void 248AtomicSimpleCPU::activateContext(int thread_num, int delay) 249{ 250 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 251 252 assert(thread_num == 0); 253 assert(thread); 254 255 assert(_status == Idle); 256 assert(!tickEvent.scheduled()); 257 258 notIdleFraction++; 259 numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend); 260 261 //Make sure ticks are still on multiples of cycles 262 schedule(tickEvent, nextCycle(curTick + ticks(delay))); 263 _status = Running; 264} 265 266 267void 268AtomicSimpleCPU::suspendContext(int thread_num) 269{ 270 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 271 272 assert(thread_num == 0); 273 assert(thread); 274 275 assert(_status == Running); 276 277 // tick event may not be scheduled if this gets called from inside 278 // an instruction's execution, e.g. "quiesce" 279 if (tickEvent.scheduled()) 280 deschedule(tickEvent); 281 282 notIdleFraction--; 283 _status = Idle; 284} 285 286 287template <class T> 288Fault 289AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags) 290{ 291 // use the CPU's statically allocated read request and packet objects 292 Request *req = &data_read_req; 293 294 if (traceData) { 295 traceData->setAddr(addr); 296 } 297 298 //The block size of our peer. 299 int blockSize = dcachePort.peerBlockSize(); 300 //The size of the data we're trying to read. 301 int dataSize = sizeof(T); 302 303 uint8_t * dataPtr = (uint8_t *)&data; 304 305 //The address of the second part of this access if it needs to be split 306 //across a cache line boundary. 307 Addr secondAddr = roundDown(addr + dataSize - 1, blockSize); 308 309 if(secondAddr > addr) 310 dataSize = secondAddr - addr; 311 312 dcache_latency = 0; 313 314 while(1) { 315 req->setVirt(0, addr, dataSize, flags, thread->readPC()); 316 317 // translate to physical address 318 Fault fault = thread->translateDataReadReq(req); 319 320 // Now do the access. 321 if (fault == NoFault) { 322 Packet pkt = Packet(req, 323 req->isLocked() ? MemCmd::LoadLockedReq : MemCmd::ReadReq, 324 Packet::Broadcast); 325 pkt.dataStatic(dataPtr); 326 327 if (req->isMmapedIpr()) 328 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 329 else { 330 if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 331 dcache_latency += physmemPort.sendAtomic(&pkt); 332 else 333 dcache_latency += dcachePort.sendAtomic(&pkt); 334 } 335 dcache_access = true; 336 337 assert(!pkt.isError()); 338 339 if (req->isLocked()) { 340 TheISA::handleLockedRead(thread, req); 341 } 342 } 343 344 // This will need a new way to tell if it has a dcache attached. 345 if (req->isUncacheable()) 346 recordEvent("Uncached Read"); 347 348 //If there's a fault, return it 349 if (fault != NoFault) 350 return fault; 351 //If we don't need to access a second cache line, stop now. 352 if (secondAddr <= addr) 353 { 354 data = gtoh(data); 355 if (traceData) { 356 traceData->setData(data); 357 } 358 return fault; 359 } 360 361 /* 362 * Set up for accessing the second cache line. 363 */ 364 365 //Move the pointer we're reading into to the correct location. 366 dataPtr += dataSize; 367 //Adjust the size to get the remaining bytes. 368 dataSize = addr + sizeof(T) - secondAddr; 369 //And access the right address. 370 addr = secondAddr; 371 } 372} 373 374Fault 375AtomicSimpleCPU::translateDataReadAddr(Addr vaddr, Addr & paddr, 376 int size, unsigned flags) 377{ 378 // use the CPU's statically allocated read request and packet objects 379 Request *req = &data_read_req; 380 381 if (traceData) { 382 traceData->setAddr(vaddr); 383 } 384 385 //The block size of our peer. 386 int blockSize = dcachePort.peerBlockSize(); 387 //The size of the data we're trying to read. 388 int dataSize = size; 389 390 bool firstTimeThrough = true; 391 392 //The address of the second part of this access if it needs to be split 393 //across a cache line boundary. 394 Addr secondAddr = roundDown(vaddr + dataSize - 1, blockSize); 395 396 if(secondAddr > vaddr) 397 dataSize = secondAddr - vaddr; 398 399 while(1) { 400 req->setVirt(0, vaddr, dataSize, flags, thread->readPC()); 401 402 // translate to physical address 403 Fault fault = thread->translateDataReadReq(req); 404 405 //If there's a fault, return it 406 if (fault != NoFault) 407 return fault; 408 409 if (firstTimeThrough) { 410 paddr = req->getPaddr(); 411 firstTimeThrough = false; 412 } 413 414 //If we don't need to access a second cache line, stop now. 415 if (secondAddr <= vaddr) 416 return fault; 417 418 /* 419 * Set up for accessing the second cache line. 420 */ 421 422 //Adjust the size to get the remaining bytes. 423 dataSize = vaddr + size - secondAddr; 424 //And access the right address. 425 vaddr = secondAddr; 426 } 427} 428 429#ifndef DOXYGEN_SHOULD_SKIP_THIS 430 431template 432Fault 433AtomicSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags); 434 435template 436Fault 437AtomicSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags); 438 439template 440Fault 441AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 442 443template 444Fault 445AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 446 447template 448Fault 449AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 450 451template 452Fault 453AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 454 455#endif //DOXYGEN_SHOULD_SKIP_THIS 456 457template<> 458Fault 459AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags) 460{ 461 return read(addr, *(uint64_t*)&data, flags); 462} 463 464template<> 465Fault 466AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags) 467{ 468 return read(addr, *(uint32_t*)&data, flags); 469} 470 471 472template<> 473Fault 474AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 475{ 476 return read(addr, (uint32_t&)data, flags); 477} 478 479 480template <class T> 481Fault 482AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 483{ 484 // use the CPU's statically allocated write request and packet objects 485 Request *req = &data_write_req; 486 487 if (traceData) { 488 traceData->setAddr(addr); 489 } 490 491 //The block size of our peer. 492 int blockSize = dcachePort.peerBlockSize(); 493 //The size of the data we're trying to read. 494 int dataSize = sizeof(T); 495 496 uint8_t * dataPtr = (uint8_t *)&data; 497 498 //The address of the second part of this access if it needs to be split 499 //across a cache line boundary. 500 Addr secondAddr = roundDown(addr + dataSize - 1, blockSize); 501 502 if(secondAddr > addr) 503 dataSize = secondAddr - addr; 504 505 dcache_latency = 0; 506 507 while(1) { 508 req->setVirt(0, addr, dataSize, flags, thread->readPC()); 509 510 // translate to physical address 511 Fault fault = thread->translateDataWriteReq(req); 512 513 // Now do the access. 514 if (fault == NoFault) { 515 MemCmd cmd = MemCmd::WriteReq; // default 516 bool do_access = true; // flag to suppress cache access 517 518 if (req->isLocked()) { 519 cmd = MemCmd::StoreCondReq; 520 do_access = TheISA::handleLockedWrite(thread, req); 521 } else if (req->isSwap()) { 522 cmd = MemCmd::SwapReq; 523 if (req->isCondSwap()) { 524 assert(res); 525 req->setExtraData(*res); 526 } 527 } 528 529 if (do_access) { 530 Packet pkt = Packet(req, cmd, Packet::Broadcast); 531 pkt.dataStatic(dataPtr); 532 533 if (req->isMmapedIpr()) { 534 dcache_latency += 535 TheISA::handleIprWrite(thread->getTC(), &pkt); 536 } else { 537 //XXX This needs to be outside of the loop in order to 538 //work properly for cache line boundary crossing 539 //accesses in transendian simulations. 540 data = htog(data); 541 if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 542 dcache_latency += physmemPort.sendAtomic(&pkt); 543 else 544 dcache_latency += dcachePort.sendAtomic(&pkt); 545 } 546 dcache_access = true; 547 assert(!pkt.isError()); 548 549 if (req->isSwap()) { 550 assert(res); 551 *res = pkt.get<T>(); 552 } 553 } 554 555 if (res && !req->isSwap()) { 556 *res = req->getExtraData(); 557 } 558 } 559 560 // This will need a new way to tell if it's hooked up to a cache or not. 561 if (req->isUncacheable()) 562 recordEvent("Uncached Write"); 563 564 //If there's a fault or we don't need to access a second cache line, 565 //stop now. 566 if (fault != NoFault || secondAddr <= addr) 567 { 568 // If the write needs to have a fault on the access, consider 569 // calling changeStatus() and changing it to "bad addr write" 570 // or something. 571 if (traceData) { 572 traceData->setData(data); 573 } 574 return fault; 575 } 576 577 /* 578 * Set up for accessing the second cache line. 579 */ 580 581 //Move the pointer we're reading into to the correct location. 582 dataPtr += dataSize; 583 //Adjust the size to get the remaining bytes. 584 dataSize = addr + sizeof(T) - secondAddr; 585 //And access the right address. 586 addr = secondAddr; 587 } 588} 589 590Fault 591AtomicSimpleCPU::translateDataWriteAddr(Addr vaddr, Addr &paddr, 592 int size, unsigned flags) 593{ 594 // use the CPU's statically allocated write request and packet objects 595 Request *req = &data_write_req; 596 597 if (traceData) { 598 traceData->setAddr(vaddr); 599 } 600 601 //The block size of our peer. 602 int blockSize = dcachePort.peerBlockSize(); 603 604 //The address of the second part of this access if it needs to be split 605 //across a cache line boundary. 606 Addr secondAddr = roundDown(vaddr + size - 1, blockSize); 607 608 //The size of the data we're trying to read. 609 int dataSize = size; 610 611 bool firstTimeThrough = true; 612 613 if(secondAddr > vaddr) 614 dataSize = secondAddr - vaddr; 615 616 dcache_latency = 0; 617 618 while(1) { 619 req->setVirt(0, vaddr, dataSize, flags, thread->readPC()); 620 621 // translate to physical address 622 Fault fault = thread->translateDataWriteReq(req); 623 624 //If there's a fault or we don't need to access a second cache line, 625 //stop now. 626 if (fault != NoFault) 627 return fault; 628 629 if (firstTimeThrough) { 630 paddr = req->getPaddr(); 631 firstTimeThrough = false; 632 } 633 634 if (secondAddr <= vaddr) 635 return fault; 636 637 /* 638 * Set up for accessing the second cache line. 639 */ 640 641 //Adjust the size to get the remaining bytes. 642 dataSize = vaddr + size - secondAddr; 643 //And access the right address. 644 vaddr = secondAddr; 645 } 646} 647 648 649#ifndef DOXYGEN_SHOULD_SKIP_THIS 650 651template 652Fault 653AtomicSimpleCPU::write(Twin32_t data, Addr addr, 654 unsigned flags, uint64_t *res); 655 656template 657Fault 658AtomicSimpleCPU::write(Twin64_t data, Addr addr, 659 unsigned flags, uint64_t *res); 660 661template 662Fault 663AtomicSimpleCPU::write(uint64_t data, Addr addr, 664 unsigned flags, uint64_t *res); 665 666template 667Fault 668AtomicSimpleCPU::write(uint32_t data, Addr addr, 669 unsigned flags, uint64_t *res); 670 671template 672Fault 673AtomicSimpleCPU::write(uint16_t data, Addr addr, 674 unsigned flags, uint64_t *res); 675 676template 677Fault 678AtomicSimpleCPU::write(uint8_t data, Addr addr, 679 unsigned flags, uint64_t *res); 680 681#endif //DOXYGEN_SHOULD_SKIP_THIS 682 683template<> 684Fault 685AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 686{ 687 return write(*(uint64_t*)&data, addr, flags, res); 688} 689 690template<> 691Fault 692AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 693{ 694 return write(*(uint32_t*)&data, addr, flags, res); 695} 696 697 698template<> 699Fault 700AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 701{ 702 return write((uint32_t)data, addr, flags, res); 703} 704 705 706void 707AtomicSimpleCPU::tick() 708{ 709 DPRINTF(SimpleCPU, "Tick\n"); 710 711 Tick latency = 0; 712 713 for (int i = 0; i < width; ++i) { 714 numCycles++; 715 716 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 717 checkForInterrupts(); 718 719 checkPcEventQueue(); 720 721 Fault fault = NoFault; 722 723 bool fromRom = isRomMicroPC(thread->readMicroPC()); 724 if (!fromRom) 725 fault = setupFetchRequest(&ifetch_req); 726 727 if (fault == NoFault) { 728 Tick icache_latency = 0; 729 bool icache_access = false; 730 dcache_access = false; // assume no dcache access 731 732 if (!fromRom) { 733 // This is commented out because the predecoder would act like 734 // a tiny cache otherwise. It wouldn't be flushed when needed 735 // like the I cache. It should be flushed, and when that works 736 // this code should be uncommented. 737 //Fetch more instruction memory if necessary 738 //if(predecoder.needMoreBytes()) 739 //{ 740 icache_access = true; 741 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq, 742 Packet::Broadcast); 743 ifetch_pkt.dataStatic(&inst); 744 745 if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr) 746 icache_latency = physmemPort.sendAtomic(&ifetch_pkt); 747 else 748 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 749 750 assert(!ifetch_pkt.isError()); 751 752 // ifetch_req is initialized to read the instruction directly 753 // into the CPU object's inst field. 754 //} 755 } 756 757 preExecute(); 758 759 if (curStaticInst) { 760 fault = curStaticInst->execute(this, traceData); 761 762 // keep an instruction count 763 if (fault == NoFault) 764 countInst(); 765 else if (traceData) { 766 // If there was a fault, we should trace this instruction. 767 delete traceData; 768 traceData = NULL; 769 } 770 771 postExecute(); 772 } 773 774 // @todo remove me after debugging with legion done 775 if (curStaticInst && (!curStaticInst->isMicroop() || 776 curStaticInst->isFirstMicroop())) 777 instCnt++; 778 779 Tick stall_ticks = 0; 780 if (simulate_inst_stalls && icache_access) 781 stall_ticks += icache_latency; 782 783 if (simulate_data_stalls && dcache_access) 784 stall_ticks += dcache_latency; 785 786 if (stall_ticks) { 787 Tick stall_cycles = stall_ticks / ticks(1); 788 Tick aligned_stall_ticks = ticks(stall_cycles); 789 790 if (aligned_stall_ticks < stall_ticks) 791 aligned_stall_ticks += 1; 792 793 latency += aligned_stall_ticks; 794 } 795 796 } 797 if(fault != NoFault || !stayAtPC) 798 advancePC(fault); 799 } 800 801 // instruction takes at least one cycle 802 if (latency < ticks(1)) 803 latency = ticks(1); 804 805 if (_status != Idle) 806 schedule(tickEvent, curTick + latency); 807} 808 809 810void 811AtomicSimpleCPU::printAddr(Addr a) 812{ 813 dcachePort.printAddr(a); 814} 815 816 817//////////////////////////////////////////////////////////////////////// 818// 819// AtomicSimpleCPU Simulation Object 820// 821AtomicSimpleCPU * 822AtomicSimpleCPUParams::create() 823{ 824 numThreads = 1; 825#if !FULL_SYSTEM 826 if (workload.size() != 1) 827 panic("only one workload allowed"); 828#endif 829 return new AtomicSimpleCPU(this); 830} 831