atomic.cc revision 5497
112855Sgabeblack@google.com/* 212855Sgabeblack@google.com * Copyright (c) 2002-2005 The Regents of The University of Michigan 312855Sgabeblack@google.com * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Steve Reinhardt 29 */ 30 31#include "arch/locked_mem.hh" 32#include "arch/mmaped_ipr.hh" 33#include "arch/utility.hh" 34#include "base/bigint.hh" 35#include "cpu/exetrace.hh" 36#include "cpu/simple/atomic.hh" 37#include "mem/packet.hh" 38#include "mem/packet_access.hh" 39#include "params/AtomicSimpleCPU.hh" 40#include "sim/system.hh" 41 42using namespace std; 43using namespace TheISA; 44 45AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 46 : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c) 47{ 48} 49 50 51void 52AtomicSimpleCPU::TickEvent::process() 53{ 54 cpu->tick(); 55} 56 57const char * 58AtomicSimpleCPU::TickEvent::description() const 59{ 60 return "AtomicSimpleCPU tick"; 61} 62 63Port * 64AtomicSimpleCPU::getPort(const std::string &if_name, int idx) 65{ 66 if (if_name == "dcache_port") 67 return &dcachePort; 68 else if (if_name == "icache_port") 69 return &icachePort; 70 else if (if_name == "physmem_port") { 71 hasPhysMemPort = true; 72 return &physmemPort; 73 } 74 else 75 panic("No Such Port\n"); 76} 77 78void 79AtomicSimpleCPU::init() 80{ 81 BaseCPU::init(); 82 cpuId = tc->readCpuId(); 83#if FULL_SYSTEM 84 for (int i = 0; i < threadContexts.size(); ++i) { 85 ThreadContext *tc = threadContexts[i]; 86 87 // initialize CPU, including PC 88 TheISA::initCPU(tc, cpuId); 89 } 90#endif 91 if (hasPhysMemPort) { 92 bool snoop = false; 93 AddrRangeList pmAddrList; 94 physmemPort.getPeerAddressRanges(pmAddrList, snoop); 95 physMemAddr = *pmAddrList.begin(); 96 } 97 ifetch_req.setThreadContext(cpuId, 0); // Add thread ID if we add MT 98 data_read_req.setThreadContext(cpuId, 0); // Add thread ID here too 99 data_write_req.setThreadContext(cpuId, 0); // Add thread ID here too 100} 101 102bool 103AtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt) 104{ 105 panic("AtomicSimpleCPU doesn't expect recvTiming callback!"); 106 return true; 107} 108 109Tick 110AtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 111{ 112 //Snooping a coherence request, just return 113 return 0; 114} 115 116void 117AtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 118{ 119 //No internal storage to update, just return 120 return; 121} 122 123void 124AtomicSimpleCPU::CpuPort::recvStatusChange(Status status) 125{ 126 if (status == RangeChange) { 127 if (!snoopRangeSent) { 128 snoopRangeSent = true; 129 sendStatusChange(Port::RangeChange); 130 } 131 return; 132 } 133 134 panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!"); 135} 136 137void 138AtomicSimpleCPU::CpuPort::recvRetry() 139{ 140 panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 141} 142 143void 144AtomicSimpleCPU::DcachePort::setPeer(Port *port) 145{ 146 Port::setPeer(port); 147 148#if FULL_SYSTEM 149 // Update the ThreadContext's memory ports (Functional/Virtual 150 // Ports) 151 cpu->tcBase()->connectMemPorts(cpu->tcBase()); 152#endif 153} 154 155AtomicSimpleCPU::AtomicSimpleCPU(Params *p) 156 : BaseSimpleCPU(p), tickEvent(this), width(p->width), 157 simulate_data_stalls(p->simulate_data_stalls), 158 simulate_inst_stalls(p->simulate_inst_stalls), 159 icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this), 160 physmemPort(name() + "-iport", this), hasPhysMemPort(false) 161{ 162 _status = Idle; 163 164 icachePort.snoopRangeSent = false; 165 dcachePort.snoopRangeSent = false; 166 167} 168 169 170AtomicSimpleCPU::~AtomicSimpleCPU() 171{ 172} 173 174void 175AtomicSimpleCPU::serialize(ostream &os) 176{ 177 SimObject::State so_state = SimObject::getState(); 178 SERIALIZE_ENUM(so_state); 179 BaseSimpleCPU::serialize(os); 180 nameOut(os, csprintf("%s.tickEvent", name())); 181 tickEvent.serialize(os); 182} 183 184void 185AtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 186{ 187 SimObject::State so_state; 188 UNSERIALIZE_ENUM(so_state); 189 BaseSimpleCPU::unserialize(cp, section); 190 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 191} 192 193void 194AtomicSimpleCPU::resume() 195{ 196 if (_status == Idle || _status == SwitchedOut) 197 return; 198 199 DPRINTF(SimpleCPU, "Resume\n"); 200 assert(system->getMemoryMode() == Enums::atomic); 201 202 changeState(SimObject::Running); 203 if (thread->status() == ThreadContext::Active) { 204 if (!tickEvent.scheduled()) { 205 tickEvent.schedule(nextCycle()); 206 } 207 } 208} 209 210void 211AtomicSimpleCPU::switchOut() 212{ 213 assert(_status == Running || _status == Idle); 214 _status = SwitchedOut; 215 216 tickEvent.squash(); 217} 218 219 220void 221AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 222{ 223 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 224 225 assert(!tickEvent.scheduled()); 226 227 // if any of this CPU's ThreadContexts are active, mark the CPU as 228 // running and schedule its tick event. 229 for (int i = 0; i < threadContexts.size(); ++i) { 230 ThreadContext *tc = threadContexts[i]; 231 if (tc->status() == ThreadContext::Active && _status != Running) { 232 _status = Running; 233 tickEvent.schedule(nextCycle()); 234 break; 235 } 236 } 237 if (_status != Running) { 238 _status = Idle; 239 } 240 assert(threadContexts.size() == 1); 241 cpuId = tc->readCpuId(); 242 ifetch_req.setThreadContext(cpuId, 0); // Add thread ID if we add MT 243 data_read_req.setThreadContext(cpuId, 0); // Add thread ID here too 244 data_write_req.setThreadContext(cpuId, 0); // Add thread ID here too 245} 246 247 248void 249AtomicSimpleCPU::activateContext(int thread_num, int delay) 250{ 251 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 252 253 assert(thread_num == 0); 254 assert(thread); 255 256 assert(_status == Idle); 257 assert(!tickEvent.scheduled()); 258 259 notIdleFraction++; 260 numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend); 261 262 //Make sure ticks are still on multiples of cycles 263 tickEvent.schedule(nextCycle(curTick + ticks(delay))); 264 _status = Running; 265} 266 267 268void 269AtomicSimpleCPU::suspendContext(int thread_num) 270{ 271 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 272 273 assert(thread_num == 0); 274 assert(thread); 275 276 assert(_status == Running); 277 278 // tick event may not be scheduled if this gets called from inside 279 // an instruction's execution, e.g. "quiesce" 280 if (tickEvent.scheduled()) 281 tickEvent.deschedule(); 282 283 notIdleFraction--; 284 _status = Idle; 285} 286 287 288template <class T> 289Fault 290AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags) 291{ 292 // use the CPU's statically allocated read request and packet objects 293 Request *req = &data_read_req; 294 295 if (traceData) { 296 traceData->setAddr(addr); 297 } 298 299 //The block size of our peer. 300 int blockSize = dcachePort.peerBlockSize(); 301 //The size of the data we're trying to read. 302 int dataSize = sizeof(T); 303 304 uint8_t * dataPtr = (uint8_t *)&data; 305 306 //The address of the second part of this access if it needs to be split 307 //across a cache line boundary. 308 Addr secondAddr = roundDown(addr + dataSize - 1, blockSize); 309 310 if(secondAddr > addr) 311 dataSize = secondAddr - addr; 312 313 dcache_latency = 0; 314 315 while(1) { 316 req->setVirt(0, addr, dataSize, flags, thread->readPC()); 317 318 // translate to physical address 319 Fault fault = thread->translateDataReadReq(req); 320 321 // Now do the access. 322 if (fault == NoFault) { 323 Packet pkt = Packet(req, 324 req->isLocked() ? MemCmd::LoadLockedReq : MemCmd::ReadReq, 325 Packet::Broadcast); 326 pkt.dataStatic(dataPtr); 327 328 if (req->isMmapedIpr()) 329 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 330 else { 331 if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 332 dcache_latency += physmemPort.sendAtomic(&pkt); 333 else 334 dcache_latency += dcachePort.sendAtomic(&pkt); 335 } 336 dcache_access = true; 337 338 assert(!pkt.isError()); 339 340 if (req->isLocked()) { 341 TheISA::handleLockedRead(thread, req); 342 } 343 } 344 345 // This will need a new way to tell if it has a dcache attached. 346 if (req->isUncacheable()) 347 recordEvent("Uncached Read"); 348 349 //If there's a fault, return it 350 if (fault != NoFault) 351 return fault; 352 //If we don't need to access a second cache line, stop now. 353 if (secondAddr <= addr) 354 { 355 data = gtoh(data); 356 if (traceData) { 357 traceData->setData(data); 358 } 359 return fault; 360 } 361 362 /* 363 * Set up for accessing the second cache line. 364 */ 365 366 //Move the pointer we're reading into to the correct location. 367 dataPtr += dataSize; 368 //Adjust the size to get the remaining bytes. 369 dataSize = addr + sizeof(T) - secondAddr; 370 //And access the right address. 371 addr = secondAddr; 372 } 373} 374 375Fault 376AtomicSimpleCPU::translateDataReadAddr(Addr vaddr, Addr & paddr, 377 int size, unsigned flags) 378{ 379 // use the CPU's statically allocated read request and packet objects 380 Request *req = &data_read_req; 381 382 if (traceData) { 383 traceData->setAddr(vaddr); 384 } 385 386 //The block size of our peer. 387 int blockSize = dcachePort.peerBlockSize(); 388 //The size of the data we're trying to read. 389 int dataSize = size; 390 391 bool firstTimeThrough = true; 392 393 //The address of the second part of this access if it needs to be split 394 //across a cache line boundary. 395 Addr secondAddr = roundDown(vaddr + dataSize - 1, blockSize); 396 397 if(secondAddr > vaddr) 398 dataSize = secondAddr - vaddr; 399 400 while(1) { 401 req->setVirt(0, vaddr, dataSize, flags, thread->readPC()); 402 403 // translate to physical address 404 Fault fault = thread->translateDataReadReq(req); 405 406 //If there's a fault, return it 407 if (fault != NoFault) 408 return fault; 409 410 if (firstTimeThrough) { 411 paddr = req->getPaddr(); 412 firstTimeThrough = false; 413 } 414 415 //If we don't need to access a second cache line, stop now. 416 if (secondAddr <= vaddr) 417 return fault; 418 419 /* 420 * Set up for accessing the second cache line. 421 */ 422 423 //Adjust the size to get the remaining bytes. 424 dataSize = vaddr + size - secondAddr; 425 //And access the right address. 426 vaddr = secondAddr; 427 } 428} 429 430#ifndef DOXYGEN_SHOULD_SKIP_THIS 431 432template 433Fault 434AtomicSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags); 435 436template 437Fault 438AtomicSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags); 439 440template 441Fault 442AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 443 444template 445Fault 446AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 447 448template 449Fault 450AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 451 452template 453Fault 454AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 455 456#endif //DOXYGEN_SHOULD_SKIP_THIS 457 458template<> 459Fault 460AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags) 461{ 462 return read(addr, *(uint64_t*)&data, flags); 463} 464 465template<> 466Fault 467AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags) 468{ 469 return read(addr, *(uint32_t*)&data, flags); 470} 471 472 473template<> 474Fault 475AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 476{ 477 return read(addr, (uint32_t&)data, flags); 478} 479 480 481template <class T> 482Fault 483AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 484{ 485 // use the CPU's statically allocated write request and packet objects 486 Request *req = &data_write_req; 487 488 if (traceData) { 489 traceData->setAddr(addr); 490 } 491 492 //The block size of our peer. 493 int blockSize = dcachePort.peerBlockSize(); 494 //The size of the data we're trying to read. 495 int dataSize = sizeof(T); 496 497 uint8_t * dataPtr = (uint8_t *)&data; 498 499 //The address of the second part of this access if it needs to be split 500 //across a cache line boundary. 501 Addr secondAddr = roundDown(addr + dataSize - 1, blockSize); 502 503 if(secondAddr > addr) 504 dataSize = secondAddr - addr; 505 506 dcache_latency = 0; 507 508 while(1) { 509 req->setVirt(0, addr, dataSize, flags, thread->readPC()); 510 511 // translate to physical address 512 Fault fault = thread->translateDataWriteReq(req); 513 514 // Now do the access. 515 if (fault == NoFault) { 516 MemCmd cmd = MemCmd::WriteReq; // default 517 bool do_access = true; // flag to suppress cache access 518 519 if (req->isLocked()) { 520 cmd = MemCmd::StoreCondReq; 521 do_access = TheISA::handleLockedWrite(thread, req); 522 } else if (req->isSwap()) { 523 cmd = MemCmd::SwapReq; 524 if (req->isCondSwap()) { 525 assert(res); 526 req->setExtraData(*res); 527 } 528 } 529 530 if (do_access) { 531 Packet pkt = Packet(req, cmd, Packet::Broadcast); 532 pkt.dataStatic(dataPtr); 533 534 if (req->isMmapedIpr()) { 535 dcache_latency += 536 TheISA::handleIprWrite(thread->getTC(), &pkt); 537 } else { 538 //XXX This needs to be outside of the loop in order to 539 //work properly for cache line boundary crossing 540 //accesses in transendian simulations. 541 data = htog(data); 542 if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 543 dcache_latency += physmemPort.sendAtomic(&pkt); 544 else 545 dcache_latency += dcachePort.sendAtomic(&pkt); 546 } 547 dcache_access = true; 548 assert(!pkt.isError()); 549 550 if (req->isSwap()) { 551 assert(res); 552 *res = pkt.get<T>(); 553 } 554 } 555 556 if (res && !req->isSwap()) { 557 *res = req->getExtraData(); 558 } 559 } 560 561 // This will need a new way to tell if it's hooked up to a cache or not. 562 if (req->isUncacheable()) 563 recordEvent("Uncached Write"); 564 565 //If there's a fault or we don't need to access a second cache line, 566 //stop now. 567 if (fault != NoFault || secondAddr <= addr) 568 { 569 // If the write needs to have a fault on the access, consider 570 // calling changeStatus() and changing it to "bad addr write" 571 // or something. 572 if (traceData) { 573 traceData->setData(data); 574 } 575 return fault; 576 } 577 578 /* 579 * Set up for accessing the second cache line. 580 */ 581 582 //Move the pointer we're reading into to the correct location. 583 dataPtr += dataSize; 584 //Adjust the size to get the remaining bytes. 585 dataSize = addr + sizeof(T) - secondAddr; 586 //And access the right address. 587 addr = secondAddr; 588 } 589} 590 591Fault 592AtomicSimpleCPU::translateDataWriteAddr(Addr vaddr, Addr &paddr, 593 int size, unsigned flags) 594{ 595 // use the CPU's statically allocated write request and packet objects 596 Request *req = &data_write_req; 597 598 if (traceData) { 599 traceData->setAddr(vaddr); 600 } 601 602 //The block size of our peer. 603 int blockSize = dcachePort.peerBlockSize(); 604 605 //The address of the second part of this access if it needs to be split 606 //across a cache line boundary. 607 Addr secondAddr = roundDown(vaddr + size - 1, blockSize); 608 609 //The size of the data we're trying to read. 610 int dataSize = size; 611 612 bool firstTimeThrough = true; 613 614 if(secondAddr > vaddr) 615 dataSize = secondAddr - vaddr; 616 617 dcache_latency = 0; 618 619 while(1) { 620 req->setVirt(0, vaddr, dataSize, flags, thread->readPC()); 621 622 // translate to physical address 623 Fault fault = thread->translateDataWriteReq(req); 624 625 //If there's a fault or we don't need to access a second cache line, 626 //stop now. 627 if (fault != NoFault) 628 return fault; 629 630 if (firstTimeThrough) { 631 paddr = req->getPaddr(); 632 firstTimeThrough = false; 633 } 634 635 if (secondAddr <= vaddr) 636 return fault; 637 638 /* 639 * Set up for accessing the second cache line. 640 */ 641 642 //Adjust the size to get the remaining bytes. 643 dataSize = vaddr + size - secondAddr; 644 //And access the right address. 645 vaddr = secondAddr; 646 } 647} 648 649 650#ifndef DOXYGEN_SHOULD_SKIP_THIS 651 652template 653Fault 654AtomicSimpleCPU::write(Twin32_t data, Addr addr, 655 unsigned flags, uint64_t *res); 656 657template 658Fault 659AtomicSimpleCPU::write(Twin64_t data, Addr addr, 660 unsigned flags, uint64_t *res); 661 662template 663Fault 664AtomicSimpleCPU::write(uint64_t data, Addr addr, 665 unsigned flags, uint64_t *res); 666 667template 668Fault 669AtomicSimpleCPU::write(uint32_t data, Addr addr, 670 unsigned flags, uint64_t *res); 671 672template 673Fault 674AtomicSimpleCPU::write(uint16_t data, Addr addr, 675 unsigned flags, uint64_t *res); 676 677template 678Fault 679AtomicSimpleCPU::write(uint8_t data, Addr addr, 680 unsigned flags, uint64_t *res); 681 682#endif //DOXYGEN_SHOULD_SKIP_THIS 683 684template<> 685Fault 686AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 687{ 688 return write(*(uint64_t*)&data, addr, flags, res); 689} 690 691template<> 692Fault 693AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 694{ 695 return write(*(uint32_t*)&data, addr, flags, res); 696} 697 698 699template<> 700Fault 701AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 702{ 703 return write((uint32_t)data, addr, flags, res); 704} 705 706 707void 708AtomicSimpleCPU::tick() 709{ 710 DPRINTF(SimpleCPU, "Tick\n"); 711 712 Tick latency = 0; 713 714 for (int i = 0; i < width; ++i) { 715 numCycles++; 716 717 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 718 checkForInterrupts(); 719 720 checkPcEventQueue(); 721 722 Fault fault = setupFetchRequest(&ifetch_req); 723 724 if (fault == NoFault) { 725 Tick icache_latency = 0; 726 bool icache_access = false; 727 dcache_access = false; // assume no dcache access 728 729 //Fetch more instruction memory if necessary 730 //if(predecoder.needMoreBytes()) 731 //{ 732 icache_access = true; 733 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq, 734 Packet::Broadcast); 735 ifetch_pkt.dataStatic(&inst); 736 737 if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr) 738 icache_latency = physmemPort.sendAtomic(&ifetch_pkt); 739 else 740 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 741 742 assert(!ifetch_pkt.isError()); 743 744 // ifetch_req is initialized to read the instruction directly 745 // into the CPU object's inst field. 746 //} 747 748 preExecute(); 749 750 if (curStaticInst) { 751 fault = curStaticInst->execute(this, traceData); 752 753 // keep an instruction count 754 if (fault == NoFault) 755 countInst(); 756 else if (traceData) { 757 // If there was a fault, we should trace this instruction. 758 delete traceData; 759 traceData = NULL; 760 } 761 762 postExecute(); 763 } 764 765 // @todo remove me after debugging with legion done 766 if (curStaticInst && (!curStaticInst->isMicroop() || 767 curStaticInst->isFirstMicroop())) 768 instCnt++; 769 770 Tick stall_ticks = 0; 771 if (simulate_inst_stalls && icache_access) 772 stall_ticks += icache_latency; 773 774 if (simulate_data_stalls && dcache_access) 775 stall_ticks += dcache_latency; 776 777 if (stall_ticks) { 778 Tick stall_cycles = stall_ticks / ticks(1); 779 Tick aligned_stall_ticks = ticks(stall_cycles); 780 781 if (aligned_stall_ticks < stall_ticks) 782 aligned_stall_ticks += 1; 783 784 latency += aligned_stall_ticks; 785 } 786 787 } 788 if(fault != NoFault || !stayAtPC) 789 advancePC(fault); 790 } 791 792 // instruction takes at least one cycle 793 if (latency < ticks(1)) 794 latency = ticks(1); 795 796 if (_status != Idle) 797 tickEvent.schedule(curTick + latency); 798} 799 800 801void 802AtomicSimpleCPU::printAddr(Addr a) 803{ 804 dcachePort.printAddr(a); 805} 806 807 808//////////////////////////////////////////////////////////////////////// 809// 810// AtomicSimpleCPU Simulation Object 811// 812AtomicSimpleCPU * 813AtomicSimpleCPUParams::create() 814{ 815 AtomicSimpleCPU::Params *params = new AtomicSimpleCPU::Params(); 816 params->name = name; 817 params->numberOfThreads = 1; 818 params->max_insts_any_thread = max_insts_any_thread; 819 params->max_insts_all_threads = max_insts_all_threads; 820 params->max_loads_any_thread = max_loads_any_thread; 821 params->max_loads_all_threads = max_loads_all_threads; 822 params->progress_interval = progress_interval; 823 params->deferRegistration = defer_registration; 824 params->phase = phase; 825 params->clock = clock; 826 params->functionTrace = function_trace; 827 params->functionTraceStart = function_trace_start; 828 params->width = width; 829 params->simulate_data_stalls = simulate_data_stalls; 830 params->simulate_inst_stalls = simulate_inst_stalls; 831 params->system = system; 832 params->cpu_id = cpu_id; 833 params->tracer = tracer; 834 835 params->itb = itb; 836 params->dtb = dtb; 837#if FULL_SYSTEM 838 params->profile = profile; 839 params->do_quiesce = do_quiesce; 840 params->do_checkpoint_insts = do_checkpoint_insts; 841 params->do_statistics_insts = do_statistics_insts; 842#else 843 if (workload.size() != 1) 844 panic("only one workload allowed"); 845 params->process = workload[0]; 846#endif 847 848 AtomicSimpleCPU *cpu = new AtomicSimpleCPU(params); 849 return cpu; 850} 851