atomic.cc revision 6623
1/* 2 * Copyright (c) 2002-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Steve Reinhardt 29 */ 30 31#include "arch/locked_mem.hh" 32#include "arch/mmaped_ipr.hh" 33#include "arch/utility.hh" 34#include "base/bigint.hh" 35#include "cpu/exetrace.hh" 36#include "cpu/simple/atomic.hh" 37#include "mem/packet.hh" 38#include "mem/packet_access.hh" 39#include "params/AtomicSimpleCPU.hh" 40#include "sim/system.hh" 41 42using namespace std; 43using namespace TheISA; 44 45AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 46 : Event(CPU_Tick_Pri), cpu(c) 47{ 48} 49 50 51void 52AtomicSimpleCPU::TickEvent::process() 53{ 54 cpu->tick(); 55} 56 57const char * 58AtomicSimpleCPU::TickEvent::description() const 59{ 60 return "AtomicSimpleCPU tick"; 61} 62 63Port * 64AtomicSimpleCPU::getPort(const string &if_name, int idx) 65{ 66 if (if_name == "dcache_port") 67 return &dcachePort; 68 else if (if_name == "icache_port") 69 return &icachePort; 70 else if (if_name == "physmem_port") { 71 hasPhysMemPort = true; 72 return &physmemPort; 73 } 74 else 75 panic("No Such Port\n"); 76} 77 78void 79AtomicSimpleCPU::init() 80{ 81 BaseCPU::init(); 82#if FULL_SYSTEM 83 ThreadID size = threadContexts.size(); 84 for (ThreadID i = 0; i < size; ++i) { 85 ThreadContext *tc = threadContexts[i]; 86 87 // initialize CPU, including PC 88 TheISA::initCPU(tc, tc->contextId()); 89 } 90#endif 91 if (hasPhysMemPort) { 92 bool snoop = false; 93 AddrRangeList pmAddrList; 94 physmemPort.getPeerAddressRanges(pmAddrList, snoop); 95 physMemAddr = *pmAddrList.begin(); 96 } 97 // Atomic doesn't do MT right now, so contextId == threadId 98 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 99 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 100 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 101} 102 103bool 104AtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt) 105{ 106 panic("AtomicSimpleCPU doesn't expect recvTiming callback!"); 107 return true; 108} 109 110Tick 111AtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 112{ 113 //Snooping a coherence request, just return 114 return 0; 115} 116 117void 118AtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 119{ 120 //No internal storage to update, just return 121 return; 122} 123 124void 125AtomicSimpleCPU::CpuPort::recvStatusChange(Status status) 126{ 127 if (status == RangeChange) { 128 if (!snoopRangeSent) { 129 snoopRangeSent = true; 130 sendStatusChange(Port::RangeChange); 131 } 132 return; 133 } 134 135 panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!"); 136} 137 138void 139AtomicSimpleCPU::CpuPort::recvRetry() 140{ 141 panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 142} 143 144void 145AtomicSimpleCPU::DcachePort::setPeer(Port *port) 146{ 147 Port::setPeer(port); 148 149#if FULL_SYSTEM 150 // Update the ThreadContext's memory ports (Functional/Virtual 151 // Ports) 152 cpu->tcBase()->connectMemPorts(cpu->tcBase()); 153#endif 154} 155 156AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 157 : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 158 simulate_data_stalls(p->simulate_data_stalls), 159 simulate_inst_stalls(p->simulate_inst_stalls), 160 icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this), 161 physmemPort(name() + "-iport", this), hasPhysMemPort(false) 162{ 163 _status = Idle; 164 165 icachePort.snoopRangeSent = false; 166 dcachePort.snoopRangeSent = false; 167 168} 169 170 171AtomicSimpleCPU::~AtomicSimpleCPU() 172{ 173} 174 175void 176AtomicSimpleCPU::serialize(ostream &os) 177{ 178 SimObject::State so_state = SimObject::getState(); 179 SERIALIZE_ENUM(so_state); 180 SERIALIZE_SCALAR(locked); 181 BaseSimpleCPU::serialize(os); 182 nameOut(os, csprintf("%s.tickEvent", name())); 183 tickEvent.serialize(os); 184} 185 186void 187AtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 188{ 189 SimObject::State so_state; 190 UNSERIALIZE_ENUM(so_state); 191 UNSERIALIZE_SCALAR(locked); 192 BaseSimpleCPU::unserialize(cp, section); 193 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 194} 195 196void 197AtomicSimpleCPU::resume() 198{ 199 if (_status == Idle || _status == SwitchedOut) 200 return; 201 202 DPRINTF(SimpleCPU, "Resume\n"); 203 assert(system->getMemoryMode() == Enums::atomic); 204 205 changeState(SimObject::Running); 206 if (thread->status() == ThreadContext::Active) { 207 if (!tickEvent.scheduled()) 208 schedule(tickEvent, nextCycle()); 209 } 210} 211 212void 213AtomicSimpleCPU::switchOut() 214{ 215 assert(_status == Running || _status == Idle); 216 _status = SwitchedOut; 217 218 tickEvent.squash(); 219} 220 221 222void 223AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 224{ 225 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 226 227 assert(!tickEvent.scheduled()); 228 229 // if any of this CPU's ThreadContexts are active, mark the CPU as 230 // running and schedule its tick event. 231 ThreadID size = threadContexts.size(); 232 for (ThreadID i = 0; i < size; ++i) { 233 ThreadContext *tc = threadContexts[i]; 234 if (tc->status() == ThreadContext::Active && _status != Running) { 235 _status = Running; 236 schedule(tickEvent, nextCycle()); 237 break; 238 } 239 } 240 if (_status != Running) { 241 _status = Idle; 242 } 243 assert(threadContexts.size() == 1); 244 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 245 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 246 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 247} 248 249 250void 251AtomicSimpleCPU::activateContext(int thread_num, int delay) 252{ 253 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 254 255 assert(thread_num == 0); 256 assert(thread); 257 258 assert(_status == Idle); 259 assert(!tickEvent.scheduled()); 260 261 notIdleFraction++; 262 numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend); 263 264 //Make sure ticks are still on multiples of cycles 265 schedule(tickEvent, nextCycle(curTick + ticks(delay))); 266 _status = Running; 267} 268 269 270void 271AtomicSimpleCPU::suspendContext(int thread_num) 272{ 273 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 274 275 assert(thread_num == 0); 276 assert(thread); 277 278 if (_status == Idle) 279 return; 280 281 assert(_status == Running); 282 283 // tick event may not be scheduled if this gets called from inside 284 // an instruction's execution, e.g. "quiesce" 285 if (tickEvent.scheduled()) 286 deschedule(tickEvent); 287 288 notIdleFraction--; 289 _status = Idle; 290} 291 292 293template <class T> 294Fault 295AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags) 296{ 297 // use the CPU's statically allocated read request and packet objects 298 Request *req = &data_read_req; 299 300 if (traceData) { 301 traceData->setAddr(addr); 302 } 303 304 //The block size of our peer. 305 unsigned blockSize = dcachePort.peerBlockSize(); 306 //The size of the data we're trying to read. 307 int dataSize = sizeof(T); 308 309 uint8_t * dataPtr = (uint8_t *)&data; 310 311 //The address of the second part of this access if it needs to be split 312 //across a cache line boundary. 313 Addr secondAddr = roundDown(addr + dataSize - 1, blockSize); 314 315 if(secondAddr > addr) 316 dataSize = secondAddr - addr; 317 318 dcache_latency = 0; 319 320 while(1) { 321 req->setVirt(0, addr, dataSize, flags, thread->readPC()); 322 323 // translate to physical address 324 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read); 325 326 // Now do the access. 327 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 328 Packet pkt = Packet(req, 329 req->isLLSC() ? MemCmd::LoadLockedReq : MemCmd::ReadReq, 330 Packet::Broadcast); 331 pkt.dataStatic(dataPtr); 332 333 if (req->isMmapedIpr()) 334 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 335 else { 336 if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 337 dcache_latency += physmemPort.sendAtomic(&pkt); 338 else 339 dcache_latency += dcachePort.sendAtomic(&pkt); 340 } 341 dcache_access = true; 342 343 assert(!pkt.isError()); 344 345 if (req->isLLSC()) { 346 TheISA::handleLockedRead(thread, req); 347 } 348 } 349 350 // This will need a new way to tell if it has a dcache attached. 351 if (req->isUncacheable()) 352 recordEvent("Uncached Read"); 353 354 //If there's a fault, return it 355 if (fault != NoFault) 356 return fault; 357 //If we don't need to access a second cache line, stop now. 358 if (secondAddr <= addr) 359 { 360 data = gtoh(data); 361 if (traceData) { 362 traceData->setData(data); 363 } 364 if (req->isLocked() && fault == NoFault) { 365 assert(!locked); 366 locked = true; 367 } 368 return fault; 369 } 370 371 /* 372 * Set up for accessing the second cache line. 373 */ 374 375 //Move the pointer we're reading into to the correct location. 376 dataPtr += dataSize; 377 //Adjust the size to get the remaining bytes. 378 dataSize = addr + sizeof(T) - secondAddr; 379 //And access the right address. 380 addr = secondAddr; 381 } 382} 383 384#ifndef DOXYGEN_SHOULD_SKIP_THIS 385 386template 387Fault 388AtomicSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags); 389 390template 391Fault 392AtomicSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags); 393 394template 395Fault 396AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 397 398template 399Fault 400AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 401 402template 403Fault 404AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 405 406template 407Fault 408AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 409 410#endif //DOXYGEN_SHOULD_SKIP_THIS 411 412template<> 413Fault 414AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags) 415{ 416 return read(addr, *(uint64_t*)&data, flags); 417} 418 419template<> 420Fault 421AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags) 422{ 423 return read(addr, *(uint32_t*)&data, flags); 424} 425 426 427template<> 428Fault 429AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 430{ 431 return read(addr, (uint32_t&)data, flags); 432} 433 434 435template <class T> 436Fault 437AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 438{ 439 // use the CPU's statically allocated write request and packet objects 440 Request *req = &data_write_req; 441 442 if (traceData) { 443 traceData->setAddr(addr); 444 } 445 446 //The block size of our peer. 447 unsigned blockSize = dcachePort.peerBlockSize(); 448 //The size of the data we're trying to read. 449 int dataSize = sizeof(T); 450 451 uint8_t * dataPtr = (uint8_t *)&data; 452 453 //The address of the second part of this access if it needs to be split 454 //across a cache line boundary. 455 Addr secondAddr = roundDown(addr + dataSize - 1, blockSize); 456 457 if(secondAddr > addr) 458 dataSize = secondAddr - addr; 459 460 dcache_latency = 0; 461 462 while(1) { 463 req->setVirt(0, addr, dataSize, flags, thread->readPC()); 464 465 // translate to physical address 466 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write); 467 468 // Now do the access. 469 if (fault == NoFault) { 470 MemCmd cmd = MemCmd::WriteReq; // default 471 bool do_access = true; // flag to suppress cache access 472 473 if (req->isLLSC()) { 474 cmd = MemCmd::StoreCondReq; 475 do_access = TheISA::handleLockedWrite(thread, req); 476 } else if (req->isSwap()) { 477 cmd = MemCmd::SwapReq; 478 if (req->isCondSwap()) { 479 assert(res); 480 req->setExtraData(*res); 481 } 482 } 483 484 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 485 Packet pkt = Packet(req, cmd, Packet::Broadcast); 486 pkt.dataStatic(dataPtr); 487 488 if (req->isMmapedIpr()) { 489 dcache_latency += 490 TheISA::handleIprWrite(thread->getTC(), &pkt); 491 } else { 492 //XXX This needs to be outside of the loop in order to 493 //work properly for cache line boundary crossing 494 //accesses in transendian simulations. 495 data = htog(data); 496 if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 497 dcache_latency += physmemPort.sendAtomic(&pkt); 498 else 499 dcache_latency += dcachePort.sendAtomic(&pkt); 500 } 501 dcache_access = true; 502 assert(!pkt.isError()); 503 504 if (req->isSwap()) { 505 assert(res); 506 *res = pkt.get<T>(); 507 } 508 } 509 510 if (res && !req->isSwap()) { 511 *res = req->getExtraData(); 512 } 513 } 514 515 // This will need a new way to tell if it's hooked up to a cache or not. 516 if (req->isUncacheable()) 517 recordEvent("Uncached Write"); 518 519 //If there's a fault or we don't need to access a second cache line, 520 //stop now. 521 if (fault != NoFault || secondAddr <= addr) 522 { 523 // If the write needs to have a fault on the access, consider 524 // calling changeStatus() and changing it to "bad addr write" 525 // or something. 526 if (traceData) { 527 traceData->setData(gtoh(data)); 528 } 529 if (req->isLocked() && fault == NoFault) { 530 assert(locked); 531 locked = false; 532 } 533 return fault; 534 } 535 536 /* 537 * Set up for accessing the second cache line. 538 */ 539 540 //Move the pointer we're reading into to the correct location. 541 dataPtr += dataSize; 542 //Adjust the size to get the remaining bytes. 543 dataSize = addr + sizeof(T) - secondAddr; 544 //And access the right address. 545 addr = secondAddr; 546 } 547} 548 549 550#ifndef DOXYGEN_SHOULD_SKIP_THIS 551 552template 553Fault 554AtomicSimpleCPU::write(Twin32_t data, Addr addr, 555 unsigned flags, uint64_t *res); 556 557template 558Fault 559AtomicSimpleCPU::write(Twin64_t data, Addr addr, 560 unsigned flags, uint64_t *res); 561 562template 563Fault 564AtomicSimpleCPU::write(uint64_t data, Addr addr, 565 unsigned flags, uint64_t *res); 566 567template 568Fault 569AtomicSimpleCPU::write(uint32_t data, Addr addr, 570 unsigned flags, uint64_t *res); 571 572template 573Fault 574AtomicSimpleCPU::write(uint16_t data, Addr addr, 575 unsigned flags, uint64_t *res); 576 577template 578Fault 579AtomicSimpleCPU::write(uint8_t data, Addr addr, 580 unsigned flags, uint64_t *res); 581 582#endif //DOXYGEN_SHOULD_SKIP_THIS 583 584template<> 585Fault 586AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 587{ 588 return write(*(uint64_t*)&data, addr, flags, res); 589} 590 591template<> 592Fault 593AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 594{ 595 return write(*(uint32_t*)&data, addr, flags, res); 596} 597 598 599template<> 600Fault 601AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 602{ 603 return write((uint32_t)data, addr, flags, res); 604} 605 606 607void 608AtomicSimpleCPU::tick() 609{ 610 DPRINTF(SimpleCPU, "Tick\n"); 611 612 Tick latency = 0; 613 614 for (int i = 0; i < width || locked; ++i) { 615 numCycles++; 616 617 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 618 checkForInterrupts(); 619 620 checkPcEventQueue(); 621 622 Fault fault = NoFault; 623 624 bool fromRom = isRomMicroPC(thread->readMicroPC()); 625 if (!fromRom && !curMacroStaticInst) { 626 setupFetchRequest(&ifetch_req); 627 fault = thread->itb->translateAtomic(&ifetch_req, tc, 628 BaseTLB::Execute); 629 } 630 631 if (fault == NoFault) { 632 Tick icache_latency = 0; 633 bool icache_access = false; 634 dcache_access = false; // assume no dcache access 635 636 if (!fromRom && !curMacroStaticInst) { 637 // This is commented out because the predecoder would act like 638 // a tiny cache otherwise. It wouldn't be flushed when needed 639 // like the I cache. It should be flushed, and when that works 640 // this code should be uncommented. 641 //Fetch more instruction memory if necessary 642 //if(predecoder.needMoreBytes()) 643 //{ 644 icache_access = true; 645 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq, 646 Packet::Broadcast); 647 ifetch_pkt.dataStatic(&inst); 648 649 if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr) 650 icache_latency = physmemPort.sendAtomic(&ifetch_pkt); 651 else 652 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 653 654 assert(!ifetch_pkt.isError()); 655 656 // ifetch_req is initialized to read the instruction directly 657 // into the CPU object's inst field. 658 //} 659 } 660 661 preExecute(); 662 663 if (curStaticInst) { 664 fault = curStaticInst->execute(this, traceData); 665 666 // keep an instruction count 667 if (fault == NoFault) 668 countInst(); 669 else if (traceData) { 670 // If there was a fault, we should trace this instruction. 671 delete traceData; 672 traceData = NULL; 673 } 674 675 postExecute(); 676 } 677 678 // @todo remove me after debugging with legion done 679 if (curStaticInst && (!curStaticInst->isMicroop() || 680 curStaticInst->isFirstMicroop())) 681 instCnt++; 682 683 Tick stall_ticks = 0; 684 if (simulate_inst_stalls && icache_access) 685 stall_ticks += icache_latency; 686 687 if (simulate_data_stalls && dcache_access) 688 stall_ticks += dcache_latency; 689 690 if (stall_ticks) { 691 Tick stall_cycles = stall_ticks / ticks(1); 692 Tick aligned_stall_ticks = ticks(stall_cycles); 693 694 if (aligned_stall_ticks < stall_ticks) 695 aligned_stall_ticks += 1; 696 697 latency += aligned_stall_ticks; 698 } 699 700 } 701 if(fault != NoFault || !stayAtPC) 702 advancePC(fault); 703 } 704 705 // instruction takes at least one cycle 706 if (latency < ticks(1)) 707 latency = ticks(1); 708 709 if (_status != Idle) 710 schedule(tickEvent, curTick + latency); 711} 712 713 714void 715AtomicSimpleCPU::printAddr(Addr a) 716{ 717 dcachePort.printAddr(a); 718} 719 720 721//////////////////////////////////////////////////////////////////////// 722// 723// AtomicSimpleCPU Simulation Object 724// 725AtomicSimpleCPU * 726AtomicSimpleCPUParams::create() 727{ 728 numThreads = 1; 729#if !FULL_SYSTEM 730 if (workload.size() != 1) 731 panic("only one workload allowed"); 732#endif 733 return new AtomicSimpleCPU(this); 734} 735