atomic.cc revision 5408
1/* 2 * Copyright (c) 2002-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Steve Reinhardt 29 */ 30 31#include "arch/locked_mem.hh" 32#include "arch/mmaped_ipr.hh" 33#include "arch/utility.hh" 34#include "base/bigint.hh" 35#include "cpu/exetrace.hh" 36#include "cpu/simple/atomic.hh" 37#include "mem/packet.hh" 38#include "mem/packet_access.hh" 39#include "params/AtomicSimpleCPU.hh" 40#include "sim/system.hh" 41 42using namespace std; 43using namespace TheISA; 44 45AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 46 : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c) 47{ 48} 49 50 51void 52AtomicSimpleCPU::TickEvent::process() 53{ 54 cpu->tick(); 55} 56 57const char * 58AtomicSimpleCPU::TickEvent::description() const 59{ 60 return "AtomicSimpleCPU tick"; 61} 62 63Port * 64AtomicSimpleCPU::getPort(const std::string &if_name, int idx) 65{ 66 if (if_name == "dcache_port") 67 return &dcachePort; 68 else if (if_name == "icache_port") 69 return &icachePort; 70 else if (if_name == "physmem_port") { 71 hasPhysMemPort = true; 72 return &physmemPort; 73 } 74 else 75 panic("No Such Port\n"); 76} 77 78void 79AtomicSimpleCPU::init() 80{ 81 BaseCPU::init(); 82 cpuId = tc->readCpuId(); 83#if FULL_SYSTEM 84 for (int i = 0; i < threadContexts.size(); ++i) { 85 ThreadContext *tc = threadContexts[i]; 86 87 // initialize CPU, including PC 88 TheISA::initCPU(tc, cpuId); 89 } 90#endif 91 if (hasPhysMemPort) { 92 bool snoop = false; 93 AddrRangeList pmAddrList; 94 physmemPort.getPeerAddressRanges(pmAddrList, snoop); 95 physMemAddr = *pmAddrList.begin(); 96 } 97 ifetch_req.setThreadContext(cpuId, 0); // Add thread ID if we add MT 98 data_read_req.setThreadContext(cpuId, 0); // Add thread ID here too 99 data_write_req.setThreadContext(cpuId, 0); // Add thread ID here too 100} 101 102bool 103AtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt) 104{ 105 panic("AtomicSimpleCPU doesn't expect recvTiming callback!"); 106 return true; 107} 108 109Tick 110AtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 111{ 112 //Snooping a coherence request, just return 113 return 0; 114} 115 116void 117AtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 118{ 119 //No internal storage to update, just return 120 return; 121} 122 123void 124AtomicSimpleCPU::CpuPort::recvStatusChange(Status status) 125{ 126 if (status == RangeChange) { 127 if (!snoopRangeSent) { 128 snoopRangeSent = true; 129 sendStatusChange(Port::RangeChange); 130 } 131 return; 132 } 133 134 panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!"); 135} 136 137void 138AtomicSimpleCPU::CpuPort::recvRetry() 139{ 140 panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 141} 142 143void 144AtomicSimpleCPU::DcachePort::setPeer(Port *port) 145{ 146 Port::setPeer(port); 147 148#if FULL_SYSTEM 149 // Update the ThreadContext's memory ports (Functional/Virtual 150 // Ports) 151 cpu->tcBase()->connectMemPorts(); 152#endif 153} 154 155AtomicSimpleCPU::AtomicSimpleCPU(Params *p) 156 : BaseSimpleCPU(p), tickEvent(this), 157 width(p->width), simulate_stalls(p->simulate_stalls), 158 icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this), 159 physmemPort(name() + "-iport", this), hasPhysMemPort(false) 160{ 161 _status = Idle; 162 163 icachePort.snoopRangeSent = false; 164 dcachePort.snoopRangeSent = false; 165 166} 167 168 169AtomicSimpleCPU::~AtomicSimpleCPU() 170{ 171} 172 173void 174AtomicSimpleCPU::serialize(ostream &os) 175{ 176 SimObject::State so_state = SimObject::getState(); 177 SERIALIZE_ENUM(so_state); 178 Status _status = status(); 179 SERIALIZE_ENUM(_status); 180 BaseSimpleCPU::serialize(os); 181 nameOut(os, csprintf("%s.tickEvent", name())); 182 tickEvent.serialize(os); 183} 184 185void 186AtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 187{ 188 SimObject::State so_state; 189 UNSERIALIZE_ENUM(so_state); 190 UNSERIALIZE_ENUM(_status); 191 BaseSimpleCPU::unserialize(cp, section); 192 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 193} 194 195void 196AtomicSimpleCPU::resume() 197{ 198 if (_status == Idle || _status == SwitchedOut) 199 return; 200 201 DPRINTF(SimpleCPU, "Resume\n"); 202 assert(system->getMemoryMode() == Enums::atomic); 203 204 changeState(SimObject::Running); 205 if (thread->status() == ThreadContext::Active) { 206 if (!tickEvent.scheduled()) { 207 tickEvent.schedule(nextCycle()); 208 } 209 } 210} 211 212void 213AtomicSimpleCPU::switchOut() 214{ 215 assert(status() == Running || status() == Idle); 216 _status = SwitchedOut; 217 218 tickEvent.squash(); 219} 220 221 222void 223AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 224{ 225 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 226 227 assert(!tickEvent.scheduled()); 228 229 // if any of this CPU's ThreadContexts are active, mark the CPU as 230 // running and schedule its tick event. 231 for (int i = 0; i < threadContexts.size(); ++i) { 232 ThreadContext *tc = threadContexts[i]; 233 if (tc->status() == ThreadContext::Active && _status != Running) { 234 _status = Running; 235 tickEvent.schedule(nextCycle()); 236 break; 237 } 238 } 239 if (_status != Running) { 240 _status = Idle; 241 } 242 assert(threadContexts.size() == 1); 243 cpuId = tc->readCpuId(); 244 ifetch_req.setThreadContext(cpuId, 0); // Add thread ID if we add MT 245 data_read_req.setThreadContext(cpuId, 0); // Add thread ID here too 246 data_write_req.setThreadContext(cpuId, 0); // Add thread ID here too 247} 248 249 250void 251AtomicSimpleCPU::activateContext(int thread_num, int delay) 252{ 253 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 254 255 assert(thread_num == 0); 256 assert(thread); 257 258 assert(_status == Idle); 259 assert(!tickEvent.scheduled()); 260 261 notIdleFraction++; 262 numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend); 263 264 //Make sure ticks are still on multiples of cycles 265 tickEvent.schedule(nextCycle(curTick + ticks(delay))); 266 _status = Running; 267} 268 269 270void 271AtomicSimpleCPU::suspendContext(int thread_num) 272{ 273 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 274 275 assert(thread_num == 0); 276 assert(thread); 277 278 assert(_status == Running); 279 280 // tick event may not be scheduled if this gets called from inside 281 // an instruction's execution, e.g. "quiesce" 282 if (tickEvent.scheduled()) 283 tickEvent.deschedule(); 284 285 notIdleFraction--; 286 _status = Idle; 287} 288 289 290template <class T> 291Fault 292AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags) 293{ 294 // use the CPU's statically allocated read request and packet objects 295 Request *req = &data_read_req; 296 297 if (traceData) { 298 traceData->setAddr(addr); 299 } 300 301 //The block size of our peer. 302 int blockSize = dcachePort.peerBlockSize(); 303 //The size of the data we're trying to read. 304 int dataSize = sizeof(T); 305 306 uint8_t * dataPtr = (uint8_t *)&data; 307 308 //The address of the second part of this access if it needs to be split 309 //across a cache line boundary. 310 Addr secondAddr = roundDown(addr + dataSize - 1, blockSize); 311 312 if(secondAddr > addr) 313 dataSize = secondAddr - addr; 314 315 dcache_latency = 0; 316 317 while(1) { 318 req->setVirt(0, addr, dataSize, flags, thread->readPC()); 319 320 // translate to physical address 321 Fault fault = thread->translateDataReadReq(req); 322 323 // Now do the access. 324 if (fault == NoFault) { 325 Packet pkt = Packet(req, 326 req->isLocked() ? MemCmd::LoadLockedReq : MemCmd::ReadReq, 327 Packet::Broadcast); 328 pkt.dataStatic(dataPtr); 329 330 if (req->isMmapedIpr()) 331 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 332 else { 333 if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 334 dcache_latency += physmemPort.sendAtomic(&pkt); 335 else 336 dcache_latency += dcachePort.sendAtomic(&pkt); 337 } 338 dcache_access = true; 339 340 assert(!pkt.isError()); 341 342 if (req->isLocked()) { 343 TheISA::handleLockedRead(thread, req); 344 } 345 } 346 347 // This will need a new way to tell if it has a dcache attached. 348 if (req->isUncacheable()) 349 recordEvent("Uncached Read"); 350 351 //If there's a fault, return it 352 if (fault != NoFault) 353 return fault; 354 //If we don't need to access a second cache line, stop now. 355 if (secondAddr <= addr) 356 { 357 data = gtoh(data); 358 if (traceData) { 359 traceData->setData(data); 360 } 361 return fault; 362 } 363 364 /* 365 * Set up for accessing the second cache line. 366 */ 367 368 //Move the pointer we're reading into to the correct location. 369 dataPtr += dataSize; 370 //Adjust the size to get the remaining bytes. 371 dataSize = addr + sizeof(T) - secondAddr; 372 //And access the right address. 373 addr = secondAddr; 374 } 375} 376 377Fault 378AtomicSimpleCPU::translateDataReadAddr(Addr vaddr, Addr & paddr, 379 int size, unsigned flags) 380{ 381 // use the CPU's statically allocated read request and packet objects 382 Request *req = &data_read_req; 383 384 if (traceData) { 385 traceData->setAddr(vaddr); 386 } 387 388 //The block size of our peer. 389 int blockSize = dcachePort.peerBlockSize(); 390 //The size of the data we're trying to read. 391 int dataSize = size; 392 393 bool firstTimeThrough = true; 394 395 //The address of the second part of this access if it needs to be split 396 //across a cache line boundary. 397 Addr secondAddr = roundDown(vaddr + dataSize - 1, blockSize); 398 399 if(secondAddr > vaddr) 400 dataSize = secondAddr - vaddr; 401 402 while(1) { 403 req->setVirt(0, vaddr, dataSize, flags, thread->readPC()); 404 405 // translate to physical address 406 Fault fault = thread->translateDataReadReq(req); 407 408 //If there's a fault, return it 409 if (fault != NoFault) 410 return fault; 411 412 if (firstTimeThrough) { 413 paddr = req->getPaddr(); 414 firstTimeThrough = false; 415 } 416 417 //If we don't need to access a second cache line, stop now. 418 if (secondAddr <= vaddr) 419 return fault; 420 421 /* 422 * Set up for accessing the second cache line. 423 */ 424 425 //Adjust the size to get the remaining bytes. 426 dataSize = vaddr + size - secondAddr; 427 //And access the right address. 428 vaddr = secondAddr; 429 } 430} 431 432#ifndef DOXYGEN_SHOULD_SKIP_THIS 433 434template 435Fault 436AtomicSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags); 437 438template 439Fault 440AtomicSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags); 441 442template 443Fault 444AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 445 446template 447Fault 448AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 449 450template 451Fault 452AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 453 454template 455Fault 456AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 457 458#endif //DOXYGEN_SHOULD_SKIP_THIS 459 460template<> 461Fault 462AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags) 463{ 464 return read(addr, *(uint64_t*)&data, flags); 465} 466 467template<> 468Fault 469AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags) 470{ 471 return read(addr, *(uint32_t*)&data, flags); 472} 473 474 475template<> 476Fault 477AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 478{ 479 return read(addr, (uint32_t&)data, flags); 480} 481 482 483template <class T> 484Fault 485AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 486{ 487 // use the CPU's statically allocated write request and packet objects 488 Request *req = &data_write_req; 489 490 if (traceData) { 491 traceData->setAddr(addr); 492 } 493 494 //The block size of our peer. 495 int blockSize = dcachePort.peerBlockSize(); 496 //The size of the data we're trying to read. 497 int dataSize = sizeof(T); 498 499 uint8_t * dataPtr = (uint8_t *)&data; 500 501 //The address of the second part of this access if it needs to be split 502 //across a cache line boundary. 503 Addr secondAddr = roundDown(addr + dataSize - 1, blockSize); 504 505 if(secondAddr > addr) 506 dataSize = secondAddr - addr; 507 508 dcache_latency = 0; 509 510 while(1) { 511 req->setVirt(0, addr, dataSize, flags, thread->readPC()); 512 513 // translate to physical address 514 Fault fault = thread->translateDataWriteReq(req); 515 516 // Now do the access. 517 if (fault == NoFault) { 518 MemCmd cmd = MemCmd::WriteReq; // default 519 bool do_access = true; // flag to suppress cache access 520 521 if (req->isLocked()) { 522 cmd = MemCmd::StoreCondReq; 523 do_access = TheISA::handleLockedWrite(thread, req); 524 } else if (req->isSwap()) { 525 cmd = MemCmd::SwapReq; 526 if (req->isCondSwap()) { 527 assert(res); 528 req->setExtraData(*res); 529 } 530 } 531 532 if (do_access) { 533 Packet pkt = Packet(req, cmd, Packet::Broadcast); 534 pkt.dataStatic(dataPtr); 535 536 if (req->isMmapedIpr()) { 537 dcache_latency += 538 TheISA::handleIprWrite(thread->getTC(), &pkt); 539 } else { 540 //XXX This needs to be outside of the loop in order to 541 //work properly for cache line boundary crossing 542 //accesses in transendian simulations. 543 data = htog(data); 544 if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 545 dcache_latency += physmemPort.sendAtomic(&pkt); 546 else 547 dcache_latency += dcachePort.sendAtomic(&pkt); 548 } 549 dcache_access = true; 550 assert(!pkt.isError()); 551 552 if (req->isSwap()) { 553 assert(res); 554 *res = pkt.get<T>(); 555 } 556 } 557 558 if (res && !req->isSwap()) { 559 *res = req->getExtraData(); 560 } 561 } 562 563 // This will need a new way to tell if it's hooked up to a cache or not. 564 if (req->isUncacheable()) 565 recordEvent("Uncached Write"); 566 567 //If there's a fault or we don't need to access a second cache line, 568 //stop now. 569 if (fault != NoFault || secondAddr <= addr) 570 { 571 // If the write needs to have a fault on the access, consider 572 // calling changeStatus() and changing it to "bad addr write" 573 // or something. 574 if (traceData) { 575 traceData->setData(data); 576 } 577 return fault; 578 } 579 580 /* 581 * Set up for accessing the second cache line. 582 */ 583 584 //Move the pointer we're reading into to the correct location. 585 dataPtr += dataSize; 586 //Adjust the size to get the remaining bytes. 587 dataSize = addr + sizeof(T) - secondAddr; 588 //And access the right address. 589 addr = secondAddr; 590 } 591} 592 593Fault 594AtomicSimpleCPU::translateDataWriteAddr(Addr vaddr, Addr &paddr, 595 int size, unsigned flags) 596{ 597 // use the CPU's statically allocated write request and packet objects 598 Request *req = &data_write_req; 599 600 if (traceData) { 601 traceData->setAddr(vaddr); 602 } 603 604 //The block size of our peer. 605 int blockSize = dcachePort.peerBlockSize(); 606 607 //The address of the second part of this access if it needs to be split 608 //across a cache line boundary. 609 Addr secondAddr = roundDown(vaddr + size - 1, blockSize); 610 611 //The size of the data we're trying to read. 612 int dataSize = size; 613 614 bool firstTimeThrough = true; 615 616 if(secondAddr > vaddr) 617 dataSize = secondAddr - vaddr; 618 619 dcache_latency = 0; 620 621 while(1) { 622 req->setVirt(0, vaddr, dataSize, flags, thread->readPC()); 623 624 // translate to physical address 625 Fault fault = thread->translateDataWriteReq(req); 626 627 //If there's a fault or we don't need to access a second cache line, 628 //stop now. 629 if (fault != NoFault) 630 return fault; 631 632 if (firstTimeThrough) { 633 paddr = req->getPaddr(); 634 firstTimeThrough = false; 635 } 636 637 if (secondAddr <= vaddr) 638 return fault; 639 640 /* 641 * Set up for accessing the second cache line. 642 */ 643 644 //Adjust the size to get the remaining bytes. 645 dataSize = vaddr + size - secondAddr; 646 //And access the right address. 647 vaddr = secondAddr; 648 } 649} 650 651 652#ifndef DOXYGEN_SHOULD_SKIP_THIS 653 654template 655Fault 656AtomicSimpleCPU::write(Twin32_t data, Addr addr, 657 unsigned flags, uint64_t *res); 658 659template 660Fault 661AtomicSimpleCPU::write(Twin64_t data, Addr addr, 662 unsigned flags, uint64_t *res); 663 664template 665Fault 666AtomicSimpleCPU::write(uint64_t data, Addr addr, 667 unsigned flags, uint64_t *res); 668 669template 670Fault 671AtomicSimpleCPU::write(uint32_t data, Addr addr, 672 unsigned flags, uint64_t *res); 673 674template 675Fault 676AtomicSimpleCPU::write(uint16_t data, Addr addr, 677 unsigned flags, uint64_t *res); 678 679template 680Fault 681AtomicSimpleCPU::write(uint8_t data, Addr addr, 682 unsigned flags, uint64_t *res); 683 684#endif //DOXYGEN_SHOULD_SKIP_THIS 685 686template<> 687Fault 688AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 689{ 690 return write(*(uint64_t*)&data, addr, flags, res); 691} 692 693template<> 694Fault 695AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 696{ 697 return write(*(uint32_t*)&data, addr, flags, res); 698} 699 700 701template<> 702Fault 703AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 704{ 705 return write((uint32_t)data, addr, flags, res); 706} 707 708 709void 710AtomicSimpleCPU::tick() 711{ 712 DPRINTF(SimpleCPU, "Tick\n"); 713 714 Tick latency = ticks(1); // instruction takes one cycle by default 715 716 for (int i = 0; i < width; ++i) { 717 numCycles++; 718 719 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 720 checkForInterrupts(); 721 722 checkPcEventQueue(); 723 724 Fault fault = setupFetchRequest(&ifetch_req); 725 726 if (fault == NoFault) { 727 Tick icache_latency = 0; 728 bool icache_access = false; 729 dcache_access = false; // assume no dcache access 730 731 //Fetch more instruction memory if necessary 732 //if(predecoder.needMoreBytes()) 733 //{ 734 icache_access = true; 735 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq, 736 Packet::Broadcast); 737 ifetch_pkt.dataStatic(&inst); 738 739 if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr) 740 icache_latency = physmemPort.sendAtomic(&ifetch_pkt); 741 else 742 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 743 744 assert(!ifetch_pkt.isError()); 745 746 // ifetch_req is initialized to read the instruction directly 747 // into the CPU object's inst field. 748 //} 749 750 preExecute(); 751 752 if (curStaticInst) { 753 fault = curStaticInst->execute(this, traceData); 754 755 // keep an instruction count 756 if (fault == NoFault) 757 countInst(); 758 else if (traceData) { 759 // If there was a fault, we should trace this instruction. 760 delete traceData; 761 traceData = NULL; 762 } 763 764 postExecute(); 765 } 766 767 // @todo remove me after debugging with legion done 768 if (curStaticInst && (!curStaticInst->isMicroop() || 769 curStaticInst->isFirstMicroop())) 770 instCnt++; 771 772 if (simulate_stalls) { 773 Tick icache_stall = 774 icache_access ? icache_latency - ticks(1) : 0; 775 Tick dcache_stall = 776 dcache_access ? dcache_latency - ticks(1) : 0; 777 Tick stall_cycles = (icache_stall + dcache_stall) / ticks(1); 778 if (ticks(stall_cycles) < (icache_stall + dcache_stall)) 779 latency += ticks(stall_cycles+1); 780 else 781 latency += ticks(stall_cycles); 782 } 783 784 } 785 if(fault != NoFault || !stayAtPC) 786 advancePC(fault); 787 } 788 789 if (_status != Idle) 790 tickEvent.schedule(curTick + latency); 791} 792 793 794void 795AtomicSimpleCPU::printAddr(Addr a) 796{ 797 dcachePort.printAddr(a); 798} 799 800 801//////////////////////////////////////////////////////////////////////// 802// 803// AtomicSimpleCPU Simulation Object 804// 805AtomicSimpleCPU * 806AtomicSimpleCPUParams::create() 807{ 808 AtomicSimpleCPU::Params *params = new AtomicSimpleCPU::Params(); 809 params->name = name; 810 params->numberOfThreads = 1; 811 params->max_insts_any_thread = max_insts_any_thread; 812 params->max_insts_all_threads = max_insts_all_threads; 813 params->max_loads_any_thread = max_loads_any_thread; 814 params->max_loads_all_threads = max_loads_all_threads; 815 params->progress_interval = progress_interval; 816 params->deferRegistration = defer_registration; 817 params->phase = phase; 818 params->clock = clock; 819 params->functionTrace = function_trace; 820 params->functionTraceStart = function_trace_start; 821 params->width = width; 822 params->simulate_stalls = simulate_stalls; 823 params->system = system; 824 params->cpu_id = cpu_id; 825 params->tracer = tracer; 826 827 params->itb = itb; 828 params->dtb = dtb; 829#if FULL_SYSTEM 830 params->profile = profile; 831 params->do_quiesce = do_quiesce; 832 params->do_checkpoint_insts = do_checkpoint_insts; 833 params->do_statistics_insts = do_statistics_insts; 834#else 835 if (workload.size() != 1) 836 panic("only one workload allowed"); 837 params->process = workload[0]; 838#endif 839 840 AtomicSimpleCPU *cpu = new AtomicSimpleCPU(params); 841 return cpu; 842} 843