atomic.cc revision 6078
1/* 2 * Copyright (c) 2002-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Steve Reinhardt 29 */ 30 31#include "arch/locked_mem.hh" 32#include "arch/mmaped_ipr.hh" 33#include "arch/utility.hh" 34#include "base/bigint.hh" 35#include "cpu/exetrace.hh" 36#include "cpu/simple/atomic.hh" 37#include "mem/packet.hh" 38#include "mem/packet_access.hh" 39#include "params/AtomicSimpleCPU.hh" 40#include "sim/system.hh" 41 42using namespace std; 43using namespace TheISA; 44 45AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 46 : Event(CPU_Tick_Pri), cpu(c) 47{ 48} 49 50 51void 52AtomicSimpleCPU::TickEvent::process() 53{ 54 cpu->tick(); 55} 56 57const char * 58AtomicSimpleCPU::TickEvent::description() const 59{ 60 return "AtomicSimpleCPU tick"; 61} 62 63Port * 64AtomicSimpleCPU::getPort(const std::string &if_name, int idx) 65{ 66 if (if_name == "dcache_port") 67 return &dcachePort; 68 else if (if_name == "icache_port") 69 return &icachePort; 70 else if (if_name == "physmem_port") { 71 hasPhysMemPort = true; 72 return &physmemPort; 73 } 74 else 75 panic("No Such Port\n"); 76} 77 78void 79AtomicSimpleCPU::init() 80{ 81 BaseCPU::init(); 82#if FULL_SYSTEM 83 for (int i = 0; i < threadContexts.size(); ++i) { 84 ThreadContext *tc = threadContexts[i]; 85 86 // initialize CPU, including PC 87 TheISA::initCPU(tc, tc->contextId()); 88 } 89#endif 90 if (hasPhysMemPort) { 91 bool snoop = false; 92 AddrRangeList pmAddrList; 93 physmemPort.getPeerAddressRanges(pmAddrList, snoop); 94 physMemAddr = *pmAddrList.begin(); 95 } 96 // Atomic doesn't do MT right now, so contextId == threadId 97 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 98 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 99 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 100} 101 102bool 103AtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt) 104{ 105 panic("AtomicSimpleCPU doesn't expect recvTiming callback!"); 106 return true; 107} 108 109Tick 110AtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 111{ 112 //Snooping a coherence request, just return 113 return 0; 114} 115 116void 117AtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 118{ 119 //No internal storage to update, just return 120 return; 121} 122 123void 124AtomicSimpleCPU::CpuPort::recvStatusChange(Status status) 125{ 126 if (status == RangeChange) { 127 if (!snoopRangeSent) { 128 snoopRangeSent = true; 129 sendStatusChange(Port::RangeChange); 130 } 131 return; 132 } 133 134 panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!"); 135} 136 137void 138AtomicSimpleCPU::CpuPort::recvRetry() 139{ 140 panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 141} 142 143void 144AtomicSimpleCPU::DcachePort::setPeer(Port *port) 145{ 146 Port::setPeer(port); 147 148#if FULL_SYSTEM 149 // Update the ThreadContext's memory ports (Functional/Virtual 150 // Ports) 151 cpu->tcBase()->connectMemPorts(cpu->tcBase()); 152#endif 153} 154 155AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 156 : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 157 simulate_data_stalls(p->simulate_data_stalls), 158 simulate_inst_stalls(p->simulate_inst_stalls), 159 icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this), 160 physmemPort(name() + "-iport", this), hasPhysMemPort(false) 161{ 162 _status = Idle; 163 164 icachePort.snoopRangeSent = false; 165 dcachePort.snoopRangeSent = false; 166 167} 168 169 170AtomicSimpleCPU::~AtomicSimpleCPU() 171{ 172} 173 174void 175AtomicSimpleCPU::serialize(ostream &os) 176{ 177 SimObject::State so_state = SimObject::getState(); 178 SERIALIZE_ENUM(so_state); 179 SERIALIZE_SCALAR(locked); 180 BaseSimpleCPU::serialize(os); 181 nameOut(os, csprintf("%s.tickEvent", name())); 182 tickEvent.serialize(os); 183} 184 185void 186AtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 187{ 188 SimObject::State so_state; 189 UNSERIALIZE_ENUM(so_state); 190 UNSERIALIZE_SCALAR(locked); 191 BaseSimpleCPU::unserialize(cp, section); 192 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 193} 194 195void 196AtomicSimpleCPU::resume() 197{ 198 if (_status == Idle || _status == SwitchedOut) 199 return; 200 201 DPRINTF(SimpleCPU, "Resume\n"); 202 assert(system->getMemoryMode() == Enums::atomic); 203 204 changeState(SimObject::Running); 205 if (thread->status() == ThreadContext::Active) { 206 if (!tickEvent.scheduled()) 207 schedule(tickEvent, nextCycle()); 208 } 209} 210 211void 212AtomicSimpleCPU::switchOut() 213{ 214 assert(_status == Running || _status == Idle); 215 _status = SwitchedOut; 216 217 tickEvent.squash(); 218} 219 220 221void 222AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 223{ 224 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 225 226 assert(!tickEvent.scheduled()); 227 228 // if any of this CPU's ThreadContexts are active, mark the CPU as 229 // running and schedule its tick event. 230 for (int i = 0; i < threadContexts.size(); ++i) { 231 ThreadContext *tc = threadContexts[i]; 232 if (tc->status() == ThreadContext::Active && _status != Running) { 233 _status = Running; 234 schedule(tickEvent, nextCycle()); 235 break; 236 } 237 } 238 if (_status != Running) { 239 _status = Idle; 240 } 241 assert(threadContexts.size() == 1); 242 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 243 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 244 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 245} 246 247 248void 249AtomicSimpleCPU::activateContext(int thread_num, int delay) 250{ 251 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 252 253 assert(thread_num == 0); 254 assert(thread); 255 256 assert(_status == Idle); 257 assert(!tickEvent.scheduled()); 258 259 notIdleFraction++; 260 numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend); 261 262 //Make sure ticks are still on multiples of cycles 263 schedule(tickEvent, nextCycle(curTick + ticks(delay))); 264 _status = Running; 265} 266 267 268void 269AtomicSimpleCPU::suspendContext(int thread_num) 270{ 271 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 272 273 assert(thread_num == 0); 274 assert(thread); 275 276 if (_status == Idle) 277 return; 278 279 assert(_status == Running); 280 281 // tick event may not be scheduled if this gets called from inside 282 // an instruction's execution, e.g. "quiesce" 283 if (tickEvent.scheduled()) 284 deschedule(tickEvent); 285 286 notIdleFraction--; 287 _status = Idle; 288} 289 290 291template <class T> 292Fault 293AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags) 294{ 295 // use the CPU's statically allocated read request and packet objects 296 Request *req = &data_read_req; 297 298 if (traceData) { 299 traceData->setAddr(addr); 300 } 301 302 //The block size of our peer. 303 int blockSize = dcachePort.peerBlockSize(); 304 //The size of the data we're trying to read. 305 int dataSize = sizeof(T); 306 307 uint8_t * dataPtr = (uint8_t *)&data; 308 309 //The address of the second part of this access if it needs to be split 310 //across a cache line boundary. 311 Addr secondAddr = roundDown(addr + dataSize - 1, blockSize); 312 313 if(secondAddr > addr) 314 dataSize = secondAddr - addr; 315 316 dcache_latency = 0; 317 318 while(1) { 319 req->setVirt(0, addr, dataSize, flags, thread->readPC()); 320 321 // translate to physical address 322 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read); 323 324 // Now do the access. 325 if (fault == NoFault) { 326 Packet pkt = Packet(req, 327 req->isLlsc() ? MemCmd::LoadLockedReq : MemCmd::ReadReq, 328 Packet::Broadcast); 329 pkt.dataStatic(dataPtr); 330 331 if (req->isMmapedIpr()) 332 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 333 else { 334 if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 335 dcache_latency += physmemPort.sendAtomic(&pkt); 336 else 337 dcache_latency += dcachePort.sendAtomic(&pkt); 338 } 339 dcache_access = true; 340 341 assert(!pkt.isError()); 342 343 if (req->isLlsc()) { 344 TheISA::handleLockedRead(thread, req); 345 } 346 } 347 348 // This will need a new way to tell if it has a dcache attached. 349 if (req->isUncacheable()) 350 recordEvent("Uncached Read"); 351 352 //If there's a fault, return it 353 if (fault != NoFault) 354 return fault; 355 //If we don't need to access a second cache line, stop now. 356 if (secondAddr <= addr) 357 { 358 data = gtoh(data); 359 if (traceData) { 360 traceData->setData(data); 361 } 362 if (req->isLocked() && fault == NoFault) { 363 assert(!locked); 364 locked = true; 365 } 366 return fault; 367 } 368 369 /* 370 * Set up for accessing the second cache line. 371 */ 372 373 //Move the pointer we're reading into to the correct location. 374 dataPtr += dataSize; 375 //Adjust the size to get the remaining bytes. 376 dataSize = addr + sizeof(T) - secondAddr; 377 //And access the right address. 378 addr = secondAddr; 379 } 380} 381 382#ifndef DOXYGEN_SHOULD_SKIP_THIS 383 384template 385Fault 386AtomicSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags); 387 388template 389Fault 390AtomicSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags); 391 392template 393Fault 394AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 395 396template 397Fault 398AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 399 400template 401Fault 402AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 403 404template 405Fault 406AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 407 408#endif //DOXYGEN_SHOULD_SKIP_THIS 409 410template<> 411Fault 412AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags) 413{ 414 return read(addr, *(uint64_t*)&data, flags); 415} 416 417template<> 418Fault 419AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags) 420{ 421 return read(addr, *(uint32_t*)&data, flags); 422} 423 424 425template<> 426Fault 427AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 428{ 429 return read(addr, (uint32_t&)data, flags); 430} 431 432 433template <class T> 434Fault 435AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 436{ 437 // use the CPU's statically allocated write request and packet objects 438 Request *req = &data_write_req; 439 440 if (traceData) { 441 traceData->setAddr(addr); 442 } 443 444 //The block size of our peer. 445 int blockSize = dcachePort.peerBlockSize(); 446 //The size of the data we're trying to read. 447 int dataSize = sizeof(T); 448 449 uint8_t * dataPtr = (uint8_t *)&data; 450 451 //The address of the second part of this access if it needs to be split 452 //across a cache line boundary. 453 Addr secondAddr = roundDown(addr + dataSize - 1, blockSize); 454 455 if(secondAddr > addr) 456 dataSize = secondAddr - addr; 457 458 dcache_latency = 0; 459 460 while(1) { 461 req->setVirt(0, addr, dataSize, flags, thread->readPC()); 462 463 // translate to physical address 464 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write); 465 466 // Now do the access. 467 if (fault == NoFault) { 468 MemCmd cmd = MemCmd::WriteReq; // default 469 bool do_access = true; // flag to suppress cache access 470 471 if (req->isLlsc()) { 472 cmd = MemCmd::StoreCondReq; 473 do_access = TheISA::handleLockedWrite(thread, req); 474 } else if (req->isSwap()) { 475 cmd = MemCmd::SwapReq; 476 if (req->isCondSwap()) { 477 assert(res); 478 req->setExtraData(*res); 479 } 480 } 481 482 if (do_access) { 483 Packet pkt = Packet(req, cmd, Packet::Broadcast); 484 pkt.dataStatic(dataPtr); 485 486 if (req->isMmapedIpr()) { 487 dcache_latency += 488 TheISA::handleIprWrite(thread->getTC(), &pkt); 489 } else { 490 //XXX This needs to be outside of the loop in order to 491 //work properly for cache line boundary crossing 492 //accesses in transendian simulations. 493 data = htog(data); 494 if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 495 dcache_latency += physmemPort.sendAtomic(&pkt); 496 else 497 dcache_latency += dcachePort.sendAtomic(&pkt); 498 } 499 dcache_access = true; 500 assert(!pkt.isError()); 501 502 if (req->isSwap()) { 503 assert(res); 504 *res = pkt.get<T>(); 505 } 506 } 507 508 if (res && !req->isSwap()) { 509 *res = req->getExtraData(); 510 } 511 } 512 513 // This will need a new way to tell if it's hooked up to a cache or not. 514 if (req->isUncacheable()) 515 recordEvent("Uncached Write"); 516 517 //If there's a fault or we don't need to access a second cache line, 518 //stop now. 519 if (fault != NoFault || secondAddr <= addr) 520 { 521 // If the write needs to have a fault on the access, consider 522 // calling changeStatus() and changing it to "bad addr write" 523 // or something. 524 if (traceData) { 525 traceData->setData(gtoh(data)); 526 } 527 if (req->isLocked() && fault == NoFault) { 528 assert(locked); 529 locked = false; 530 } 531 return fault; 532 } 533 534 /* 535 * Set up for accessing the second cache line. 536 */ 537 538 //Move the pointer we're reading into to the correct location. 539 dataPtr += dataSize; 540 //Adjust the size to get the remaining bytes. 541 dataSize = addr + sizeof(T) - secondAddr; 542 //And access the right address. 543 addr = secondAddr; 544 } 545} 546 547 548#ifndef DOXYGEN_SHOULD_SKIP_THIS 549 550template 551Fault 552AtomicSimpleCPU::write(Twin32_t data, Addr addr, 553 unsigned flags, uint64_t *res); 554 555template 556Fault 557AtomicSimpleCPU::write(Twin64_t data, Addr addr, 558 unsigned flags, uint64_t *res); 559 560template 561Fault 562AtomicSimpleCPU::write(uint64_t data, Addr addr, 563 unsigned flags, uint64_t *res); 564 565template 566Fault 567AtomicSimpleCPU::write(uint32_t data, Addr addr, 568 unsigned flags, uint64_t *res); 569 570template 571Fault 572AtomicSimpleCPU::write(uint16_t data, Addr addr, 573 unsigned flags, uint64_t *res); 574 575template 576Fault 577AtomicSimpleCPU::write(uint8_t data, Addr addr, 578 unsigned flags, uint64_t *res); 579 580#endif //DOXYGEN_SHOULD_SKIP_THIS 581 582template<> 583Fault 584AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 585{ 586 return write(*(uint64_t*)&data, addr, flags, res); 587} 588 589template<> 590Fault 591AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 592{ 593 return write(*(uint32_t*)&data, addr, flags, res); 594} 595 596 597template<> 598Fault 599AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 600{ 601 return write((uint32_t)data, addr, flags, res); 602} 603 604 605void 606AtomicSimpleCPU::tick() 607{ 608 DPRINTF(SimpleCPU, "Tick\n"); 609 610 Tick latency = 0; 611 612 for (int i = 0; i < width || locked; ++i) { 613 numCycles++; 614 615 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 616 checkForInterrupts(); 617 618 checkPcEventQueue(); 619 620 Fault fault = NoFault; 621 622 bool fromRom = isRomMicroPC(thread->readMicroPC()); 623 if (!fromRom && !curMacroStaticInst) { 624 setupFetchRequest(&ifetch_req); 625 fault = thread->itb->translateAtomic(&ifetch_req, tc, 626 BaseTLB::Execute); 627 } 628 629 if (fault == NoFault) { 630 Tick icache_latency = 0; 631 bool icache_access = false; 632 dcache_access = false; // assume no dcache access 633 634 if (!fromRom && !curMacroStaticInst) { 635 // This is commented out because the predecoder would act like 636 // a tiny cache otherwise. It wouldn't be flushed when needed 637 // like the I cache. It should be flushed, and when that works 638 // this code should be uncommented. 639 //Fetch more instruction memory if necessary 640 //if(predecoder.needMoreBytes()) 641 //{ 642 icache_access = true; 643 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq, 644 Packet::Broadcast); 645 ifetch_pkt.dataStatic(&inst); 646 647 if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr) 648 icache_latency = physmemPort.sendAtomic(&ifetch_pkt); 649 else 650 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 651 652 assert(!ifetch_pkt.isError()); 653 654 // ifetch_req is initialized to read the instruction directly 655 // into the CPU object's inst field. 656 //} 657 } 658 659 preExecute(); 660 661 if (curStaticInst) { 662 fault = curStaticInst->execute(this, traceData); 663 664 // keep an instruction count 665 if (fault == NoFault) 666 countInst(); 667 else if (traceData) { 668 // If there was a fault, we should trace this instruction. 669 delete traceData; 670 traceData = NULL; 671 } 672 673 postExecute(); 674 } 675 676 // @todo remove me after debugging with legion done 677 if (curStaticInst && (!curStaticInst->isMicroop() || 678 curStaticInst->isFirstMicroop())) 679 instCnt++; 680 681 Tick stall_ticks = 0; 682 if (simulate_inst_stalls && icache_access) 683 stall_ticks += icache_latency; 684 685 if (simulate_data_stalls && dcache_access) 686 stall_ticks += dcache_latency; 687 688 if (stall_ticks) { 689 Tick stall_cycles = stall_ticks / ticks(1); 690 Tick aligned_stall_ticks = ticks(stall_cycles); 691 692 if (aligned_stall_ticks < stall_ticks) 693 aligned_stall_ticks += 1; 694 695 latency += aligned_stall_ticks; 696 } 697 698 } 699 if(fault != NoFault || !stayAtPC) 700 advancePC(fault); 701 } 702 703 // instruction takes at least one cycle 704 if (latency < ticks(1)) 705 latency = ticks(1); 706 707 if (_status != Idle) 708 schedule(tickEvent, curTick + latency); 709} 710 711 712void 713AtomicSimpleCPU::printAddr(Addr a) 714{ 715 dcachePort.printAddr(a); 716} 717 718 719//////////////////////////////////////////////////////////////////////// 720// 721// AtomicSimpleCPU Simulation Object 722// 723AtomicSimpleCPU * 724AtomicSimpleCPUParams::create() 725{ 726 numThreads = 1; 727#if !FULL_SYSTEM 728 if (workload.size() != 1) 729 panic("only one workload allowed"); 730#endif 731 return new AtomicSimpleCPU(this); 732} 733