atomic.cc revision 7897
1/* 2 * Copyright (c) 2002-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Steve Reinhardt 29 */ 30 31#include "arch/locked_mem.hh" 32#include "arch/mmaped_ipr.hh" 33#include "arch/utility.hh" 34#include "base/bigint.hh" 35#include "config/the_isa.hh" 36#include "cpu/exetrace.hh" 37#include "cpu/simple/atomic.hh" 38#include "mem/packet.hh" 39#include "mem/packet_access.hh" 40#include "params/AtomicSimpleCPU.hh" 41#include "sim/faults.hh" 42#include "sim/system.hh" 43 44using namespace std; 45using namespace TheISA; 46 47AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 48 : Event(CPU_Tick_Pri), cpu(c) 49{ 50} 51 52 53void 54AtomicSimpleCPU::TickEvent::process() 55{ 56 cpu->tick(); 57} 58 59const char * 60AtomicSimpleCPU::TickEvent::description() const 61{ 62 return "AtomicSimpleCPU tick"; 63} 64 65Port * 66AtomicSimpleCPU::getPort(const string &if_name, int idx) 67{ 68 if (if_name == "dcache_port") 69 return &dcachePort; 70 else if (if_name == "icache_port") 71 return &icachePort; 72 else if (if_name == "physmem_port") { 73 hasPhysMemPort = true; 74 return &physmemPort; 75 } 76 else 77 panic("No Such Port\n"); 78} 79 80void 81AtomicSimpleCPU::init() 82{ 83 BaseCPU::init(); 84#if FULL_SYSTEM 85 ThreadID size = threadContexts.size(); 86 for (ThreadID i = 0; i < size; ++i) { 87 ThreadContext *tc = threadContexts[i]; 88 89 // initialize CPU, including PC 90 TheISA::initCPU(tc, tc->contextId()); 91 } 92#endif 93 if (hasPhysMemPort) { 94 bool snoop = false; 95 AddrRangeList pmAddrList; 96 physmemPort.getPeerAddressRanges(pmAddrList, snoop); 97 physMemAddr = *pmAddrList.begin(); 98 } 99 // Atomic doesn't do MT right now, so contextId == threadId 100 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 101 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 102 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 103} 104 105bool 106AtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt) 107{ 108 panic("AtomicSimpleCPU doesn't expect recvTiming callback!"); 109 return true; 110} 111 112Tick 113AtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 114{ 115 //Snooping a coherence request, just return 116 return 0; 117} 118 119void 120AtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 121{ 122 //No internal storage to update, just return 123 return; 124} 125 126void 127AtomicSimpleCPU::CpuPort::recvStatusChange(Status status) 128{ 129 if (status == RangeChange) { 130 if (!snoopRangeSent) { 131 snoopRangeSent = true; 132 sendStatusChange(Port::RangeChange); 133 } 134 return; 135 } 136 137 panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!"); 138} 139 140void 141AtomicSimpleCPU::CpuPort::recvRetry() 142{ 143 panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 144} 145 146void 147AtomicSimpleCPU::DcachePort::setPeer(Port *port) 148{ 149 Port::setPeer(port); 150 151#if FULL_SYSTEM 152 // Update the ThreadContext's memory ports (Functional/Virtual 153 // Ports) 154 cpu->tcBase()->connectMemPorts(cpu->tcBase()); 155#endif 156} 157 158AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 159 : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 160 simulate_data_stalls(p->simulate_data_stalls), 161 simulate_inst_stalls(p->simulate_inst_stalls), 162 icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this), 163 physmemPort(name() + "-iport", this), hasPhysMemPort(false) 164{ 165 _status = Idle; 166 167 icachePort.snoopRangeSent = false; 168 dcachePort.snoopRangeSent = false; 169 170} 171 172 173AtomicSimpleCPU::~AtomicSimpleCPU() 174{ 175 if (tickEvent.scheduled()) { 176 deschedule(tickEvent); 177 } 178} 179 180void 181AtomicSimpleCPU::serialize(ostream &os) 182{ 183 SimObject::State so_state = SimObject::getState(); 184 SERIALIZE_ENUM(so_state); 185 SERIALIZE_SCALAR(locked); 186 BaseSimpleCPU::serialize(os); 187 nameOut(os, csprintf("%s.tickEvent", name())); 188 tickEvent.serialize(os); 189} 190 191void 192AtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 193{ 194 SimObject::State so_state; 195 UNSERIALIZE_ENUM(so_state); 196 UNSERIALIZE_SCALAR(locked); 197 BaseSimpleCPU::unserialize(cp, section); 198 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 199} 200 201void 202AtomicSimpleCPU::resume() 203{ 204 if (_status == Idle || _status == SwitchedOut) 205 return; 206 207 DPRINTF(SimpleCPU, "Resume\n"); 208 assert(system->getMemoryMode() == Enums::atomic); 209 210 changeState(SimObject::Running); 211 if (thread->status() == ThreadContext::Active) { 212 if (!tickEvent.scheduled()) 213 schedule(tickEvent, nextCycle()); 214 } 215 system->totalNumInsts = 0; 216} 217 218void 219AtomicSimpleCPU::switchOut() 220{ 221 assert(_status == Running || _status == Idle); 222 _status = SwitchedOut; 223 224 tickEvent.squash(); 225} 226 227 228void 229AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 230{ 231 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 232 233 assert(!tickEvent.scheduled()); 234 235 // if any of this CPU's ThreadContexts are active, mark the CPU as 236 // running and schedule its tick event. 237 ThreadID size = threadContexts.size(); 238 for (ThreadID i = 0; i < size; ++i) { 239 ThreadContext *tc = threadContexts[i]; 240 if (tc->status() == ThreadContext::Active && _status != Running) { 241 _status = Running; 242 schedule(tickEvent, nextCycle()); 243 break; 244 } 245 } 246 if (_status != Running) { 247 _status = Idle; 248 } 249 assert(threadContexts.size() == 1); 250 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 251 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 252 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 253} 254 255 256void 257AtomicSimpleCPU::activateContext(int thread_num, int delay) 258{ 259 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 260 261 assert(thread_num == 0); 262 assert(thread); 263 264 assert(_status == Idle); 265 assert(!tickEvent.scheduled()); 266 267 notIdleFraction++; 268 numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend); 269 270 //Make sure ticks are still on multiples of cycles 271 schedule(tickEvent, nextCycle(curTick() + ticks(delay))); 272 _status = Running; 273} 274 275 276void 277AtomicSimpleCPU::suspendContext(int thread_num) 278{ 279 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 280 281 assert(thread_num == 0); 282 assert(thread); 283 284 if (_status == Idle) 285 return; 286 287 assert(_status == Running); 288 289 // tick event may not be scheduled if this gets called from inside 290 // an instruction's execution, e.g. "quiesce" 291 if (tickEvent.scheduled()) 292 deschedule(tickEvent); 293 294 notIdleFraction--; 295 _status = Idle; 296} 297 298 299Fault 300AtomicSimpleCPU::readBytes(Addr addr, uint8_t * data, 301 unsigned size, unsigned flags) 302{ 303 // use the CPU's statically allocated read request and packet objects 304 Request *req = &data_read_req; 305 306 if (traceData) { 307 traceData->setAddr(addr); 308 } 309 310 //The block size of our peer. 311 unsigned blockSize = dcachePort.peerBlockSize(); 312 //The size of the data we're trying to read. 313 int fullSize = size; 314 315 //The address of the second part of this access if it needs to be split 316 //across a cache line boundary. 317 Addr secondAddr = roundDown(addr + size - 1, blockSize); 318 319 if (secondAddr > addr) 320 size = secondAddr - addr; 321 322 dcache_latency = 0; 323 324 while (1) { 325 req->setVirt(0, addr, size, flags, thread->pcState().instAddr()); 326 327 // translate to physical address 328 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read); 329 330 // Now do the access. 331 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 332 Packet pkt = Packet(req, 333 req->isLLSC() ? MemCmd::LoadLockedReq : MemCmd::ReadReq, 334 Packet::Broadcast); 335 pkt.dataStatic(data); 336 337 if (req->isMmapedIpr()) 338 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 339 else { 340 if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 341 dcache_latency += physmemPort.sendAtomic(&pkt); 342 else 343 dcache_latency += dcachePort.sendAtomic(&pkt); 344 } 345 dcache_access = true; 346 347 assert(!pkt.isError()); 348 349 if (req->isLLSC()) { 350 TheISA::handleLockedRead(thread, req); 351 } 352 } 353 354 //If there's a fault, return it 355 if (fault != NoFault) { 356 if (req->isPrefetch()) { 357 return NoFault; 358 } else { 359 return fault; 360 } 361 } 362 363 //If we don't need to access a second cache line, stop now. 364 if (secondAddr <= addr) 365 { 366 if (req->isLocked() && fault == NoFault) { 367 assert(!locked); 368 locked = true; 369 } 370 return fault; 371 } 372 373 /* 374 * Set up for accessing the second cache line. 375 */ 376 377 //Move the pointer we're reading into to the correct location. 378 data += size; 379 //Adjust the size to get the remaining bytes. 380 size = addr + fullSize - secondAddr; 381 //And access the right address. 382 addr = secondAddr; 383 } 384} 385 386 387template <class T> 388Fault 389AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags) 390{ 391 uint8_t *dataPtr = (uint8_t *)&data; 392 memset(dataPtr, 0, sizeof(data)); 393 Fault fault = readBytes(addr, dataPtr, sizeof(data), flags); 394 if (fault == NoFault) { 395 data = gtoh(data); 396 if (traceData) 397 traceData->setData(data); 398 } 399 return fault; 400} 401 402#ifndef DOXYGEN_SHOULD_SKIP_THIS 403 404template 405Fault 406AtomicSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags); 407 408template 409Fault 410AtomicSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags); 411 412template 413Fault 414AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 415 416template 417Fault 418AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 419 420template 421Fault 422AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 423 424template 425Fault 426AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 427 428#endif //DOXYGEN_SHOULD_SKIP_THIS 429 430template<> 431Fault 432AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags) 433{ 434 return read(addr, *(uint64_t*)&data, flags); 435} 436 437template<> 438Fault 439AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags) 440{ 441 return read(addr, *(uint32_t*)&data, flags); 442} 443 444 445template<> 446Fault 447AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 448{ 449 return read(addr, (uint32_t&)data, flags); 450} 451 452 453Fault 454AtomicSimpleCPU::writeBytes(uint8_t *data, unsigned size, 455 Addr addr, unsigned flags, uint64_t *res) 456{ 457 // use the CPU's statically allocated write request and packet objects 458 Request *req = &data_write_req; 459 460 if (traceData) { 461 traceData->setAddr(addr); 462 } 463 464 //The block size of our peer. 465 unsigned blockSize = dcachePort.peerBlockSize(); 466 //The size of the data we're trying to read. 467 int fullSize = size; 468 469 //The address of the second part of this access if it needs to be split 470 //across a cache line boundary. 471 Addr secondAddr = roundDown(addr + size - 1, blockSize); 472 473 if(secondAddr > addr) 474 size = secondAddr - addr; 475 476 dcache_latency = 0; 477 478 while(1) { 479 req->setVirt(0, addr, size, flags, thread->pcState().instAddr()); 480 481 // translate to physical address 482 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write); 483 484 // Now do the access. 485 if (fault == NoFault) { 486 MemCmd cmd = MemCmd::WriteReq; // default 487 bool do_access = true; // flag to suppress cache access 488 489 if (req->isLLSC()) { 490 cmd = MemCmd::StoreCondReq; 491 do_access = TheISA::handleLockedWrite(thread, req); 492 } else if (req->isSwap()) { 493 cmd = MemCmd::SwapReq; 494 if (req->isCondSwap()) { 495 assert(res); 496 req->setExtraData(*res); 497 } 498 } 499 500 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 501 Packet pkt = Packet(req, cmd, Packet::Broadcast); 502 pkt.dataStatic(data); 503 504 if (req->isMmapedIpr()) { 505 dcache_latency += 506 TheISA::handleIprWrite(thread->getTC(), &pkt); 507 } else { 508 if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 509 dcache_latency += physmemPort.sendAtomic(&pkt); 510 else 511 dcache_latency += dcachePort.sendAtomic(&pkt); 512 } 513 dcache_access = true; 514 assert(!pkt.isError()); 515 516 if (req->isSwap()) { 517 assert(res); 518 memcpy(res, pkt.getPtr<uint8_t>(), fullSize); 519 } 520 } 521 522 if (res && !req->isSwap()) { 523 *res = req->getExtraData(); 524 } 525 } 526 527 //If there's a fault or we don't need to access a second cache line, 528 //stop now. 529 if (fault != NoFault || secondAddr <= addr) 530 { 531 if (req->isLocked() && fault == NoFault) { 532 assert(locked); 533 locked = false; 534 } 535 if (fault != NoFault && req->isPrefetch()) { 536 return NoFault; 537 } else { 538 return fault; 539 } 540 } 541 542 /* 543 * Set up for accessing the second cache line. 544 */ 545 546 //Move the pointer we're reading into to the correct location. 547 data += size; 548 //Adjust the size to get the remaining bytes. 549 size = addr + fullSize - secondAddr; 550 //And access the right address. 551 addr = secondAddr; 552 } 553} 554 555 556template <class T> 557Fault 558AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 559{ 560 uint8_t *dataPtr = (uint8_t *)&data; 561 if (traceData) 562 traceData->setData(data); 563 data = htog(data); 564 565 Fault fault = writeBytes(dataPtr, sizeof(data), addr, flags, res); 566 if (fault == NoFault && data_write_req.isSwap()) { 567 *res = gtoh((T)*res); 568 } 569 return fault; 570} 571 572 573#ifndef DOXYGEN_SHOULD_SKIP_THIS 574 575template 576Fault 577AtomicSimpleCPU::write(Twin32_t data, Addr addr, 578 unsigned flags, uint64_t *res); 579 580template 581Fault 582AtomicSimpleCPU::write(Twin64_t data, Addr addr, 583 unsigned flags, uint64_t *res); 584 585template 586Fault 587AtomicSimpleCPU::write(uint64_t data, Addr addr, 588 unsigned flags, uint64_t *res); 589 590template 591Fault 592AtomicSimpleCPU::write(uint32_t data, Addr addr, 593 unsigned flags, uint64_t *res); 594 595template 596Fault 597AtomicSimpleCPU::write(uint16_t data, Addr addr, 598 unsigned flags, uint64_t *res); 599 600template 601Fault 602AtomicSimpleCPU::write(uint8_t data, Addr addr, 603 unsigned flags, uint64_t *res); 604 605#endif //DOXYGEN_SHOULD_SKIP_THIS 606 607template<> 608Fault 609AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 610{ 611 return write(*(uint64_t*)&data, addr, flags, res); 612} 613 614template<> 615Fault 616AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 617{ 618 return write(*(uint32_t*)&data, addr, flags, res); 619} 620 621 622template<> 623Fault 624AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 625{ 626 return write((uint32_t)data, addr, flags, res); 627} 628 629 630void 631AtomicSimpleCPU::tick() 632{ 633 DPRINTF(SimpleCPU, "Tick\n"); 634 635 Tick latency = 0; 636 637 for (int i = 0; i < width || locked; ++i) { 638 numCycles++; 639 640 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 641 checkForInterrupts(); 642 643 checkPcEventQueue(); 644 645 Fault fault = NoFault; 646 647 TheISA::PCState pcState = thread->pcState(); 648 649 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 650 !curMacroStaticInst; 651 if (needToFetch) { 652 setupFetchRequest(&ifetch_req); 653 fault = thread->itb->translateAtomic(&ifetch_req, tc, 654 BaseTLB::Execute); 655 } 656 657 if (fault == NoFault) { 658 Tick icache_latency = 0; 659 bool icache_access = false; 660 dcache_access = false; // assume no dcache access 661 662 if (needToFetch) { 663 // This is commented out because the predecoder would act like 664 // a tiny cache otherwise. It wouldn't be flushed when needed 665 // like the I cache. It should be flushed, and when that works 666 // this code should be uncommented. 667 //Fetch more instruction memory if necessary 668 //if(predecoder.needMoreBytes()) 669 //{ 670 icache_access = true; 671 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq, 672 Packet::Broadcast); 673 ifetch_pkt.dataStatic(&inst); 674 675 if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr) 676 icache_latency = physmemPort.sendAtomic(&ifetch_pkt); 677 else 678 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 679 680 assert(!ifetch_pkt.isError()); 681 682 // ifetch_req is initialized to read the instruction directly 683 // into the CPU object's inst field. 684 //} 685 } 686 687 preExecute(); 688 689 if (curStaticInst) { 690 fault = curStaticInst->execute(this, traceData); 691 692 // keep an instruction count 693 if (fault == NoFault) 694 countInst(); 695 else if (traceData && !DTRACE(ExecFaulting)) { 696 delete traceData; 697 traceData = NULL; 698 } 699 700 postExecute(); 701 } 702 703 // @todo remove me after debugging with legion done 704 if (curStaticInst && (!curStaticInst->isMicroop() || 705 curStaticInst->isFirstMicroop())) 706 instCnt++; 707 708 Tick stall_ticks = 0; 709 if (simulate_inst_stalls && icache_access) 710 stall_ticks += icache_latency; 711 712 if (simulate_data_stalls && dcache_access) 713 stall_ticks += dcache_latency; 714 715 if (stall_ticks) { 716 Tick stall_cycles = stall_ticks / ticks(1); 717 Tick aligned_stall_ticks = ticks(stall_cycles); 718 719 if (aligned_stall_ticks < stall_ticks) 720 aligned_stall_ticks += 1; 721 722 latency += aligned_stall_ticks; 723 } 724 725 } 726 if(fault != NoFault || !stayAtPC) 727 advancePC(fault); 728 } 729 730 // instruction takes at least one cycle 731 if (latency < ticks(1)) 732 latency = ticks(1); 733 734 if (_status != Idle) 735 schedule(tickEvent, curTick() + latency); 736} 737 738 739void 740AtomicSimpleCPU::printAddr(Addr a) 741{ 742 dcachePort.printAddr(a); 743} 744 745 746//////////////////////////////////////////////////////////////////////// 747// 748// AtomicSimpleCPU Simulation Object 749// 750AtomicSimpleCPU * 751AtomicSimpleCPUParams::create() 752{ 753 numThreads = 1; 754#if !FULL_SYSTEM 755 if (workload.size() != 1) 756 panic("only one workload allowed"); 757#endif 758 return new AtomicSimpleCPU(this); 759} 760