atomic.cc revision 13012
1/* 2 * Copyright 2014 Google, Inc. 3 * Copyright (c) 2012-2013,2015,2017-2018 ARM Limited 4 * All rights reserved. 5 * 6 * The license below extends only to copyright in the software and shall 7 * not be construed as granting a license to any other intellectual 8 * property including but not limited to intellectual property relating 9 * to a hardware implementation of the functionality of the software 10 * licensed hereunder. You may use the software subject to the license 11 * terms below provided that you ensure that this notice is replicated 12 * unmodified and in its entirety in all distributions of the software, 13 * modified or unmodified, in source code or in binary form. 14 * 15 * Copyright (c) 2002-2005 The Regents of The University of Michigan 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Steve Reinhardt 42 */ 43 44#include "cpu/simple/atomic.hh" 45 46#include "arch/locked_mem.hh" 47#include "arch/mmapped_ipr.hh" 48#include "arch/utility.hh" 49#include "base/output.hh" 50#include "config/the_isa.hh" 51#include "cpu/exetrace.hh" 52#include "debug/Drain.hh" 53#include "debug/ExecFaulting.hh" 54#include "debug/SimpleCPU.hh" 55#include "mem/packet.hh" 56#include "mem/packet_access.hh" 57#include "mem/physical.hh" 58#include "params/AtomicSimpleCPU.hh" 59#include "sim/faults.hh" 60#include "sim/full_system.hh" 61#include "sim/system.hh" 62 63using namespace std; 64using namespace TheISA; 65 66void 67AtomicSimpleCPU::init() 68{ 69 BaseSimpleCPU::init(); 70 71 int cid = threadContexts[0]->contextId(); 72 ifetch_req->setContext(cid); 73 data_read_req->setContext(cid); 74 data_write_req->setContext(cid); 75} 76 77AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 78 : BaseSimpleCPU(p), 79 tickEvent([this]{ tick(); }, "AtomicSimpleCPU tick", 80 false, Event::CPU_Tick_Pri), 81 width(p->width), locked(false), 82 simulate_data_stalls(p->simulate_data_stalls), 83 simulate_inst_stalls(p->simulate_inst_stalls), 84 icachePort(name() + ".icache_port", this), 85 dcachePort(name() + ".dcache_port", this), 86 dcache_access(false), dcache_latency(0), 87 ppCommit(nullptr) 88{ 89 _status = Idle; 90 ifetch_req = std::make_shared<Request>(); 91 data_read_req = std::make_shared<Request>(); 92 data_write_req = std::make_shared<Request>(); 93} 94 95 96AtomicSimpleCPU::~AtomicSimpleCPU() 97{ 98 if (tickEvent.scheduled()) { 99 deschedule(tickEvent); 100 } 101} 102 103DrainState 104AtomicSimpleCPU::drain() 105{ 106 // Deschedule any power gating event (if any) 107 deschedulePowerGatingEvent(); 108 109 if (switchedOut()) 110 return DrainState::Drained; 111 112 if (!isDrained()) { 113 DPRINTF(Drain, "Requesting drain.\n"); 114 return DrainState::Draining; 115 } else { 116 if (tickEvent.scheduled()) 117 deschedule(tickEvent); 118 119 activeThreads.clear(); 120 DPRINTF(Drain, "Not executing microcode, no need to drain.\n"); 121 return DrainState::Drained; 122 } 123} 124 125void 126AtomicSimpleCPU::threadSnoop(PacketPtr pkt, ThreadID sender) 127{ 128 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 129 pkt->cmdString()); 130 131 for (ThreadID tid = 0; tid < numThreads; tid++) { 132 if (tid != sender) { 133 if (getCpuAddrMonitor(tid)->doMonitor(pkt)) { 134 wakeup(tid); 135 } 136 137 TheISA::handleLockedSnoop(threadInfo[tid]->thread, 138 pkt, dcachePort.cacheBlockMask); 139 } 140 } 141} 142 143void 144AtomicSimpleCPU::drainResume() 145{ 146 assert(!tickEvent.scheduled()); 147 if (switchedOut()) 148 return; 149 150 DPRINTF(SimpleCPU, "Resume\n"); 151 verifyMemoryMode(); 152 153 assert(!threadContexts.empty()); 154 155 _status = BaseSimpleCPU::Idle; 156 157 for (ThreadID tid = 0; tid < numThreads; tid++) { 158 if (threadInfo[tid]->thread->status() == ThreadContext::Active) { 159 threadInfo[tid]->notIdleFraction = 1; 160 activeThreads.push_back(tid); 161 _status = BaseSimpleCPU::Running; 162 163 // Tick if any threads active 164 if (!tickEvent.scheduled()) { 165 schedule(tickEvent, nextCycle()); 166 } 167 } else { 168 threadInfo[tid]->notIdleFraction = 0; 169 } 170 } 171 172 // Reschedule any power gating event (if any) 173 schedulePowerGatingEvent(); 174} 175 176bool 177AtomicSimpleCPU::tryCompleteDrain() 178{ 179 if (drainState() != DrainState::Draining) 180 return false; 181 182 DPRINTF(Drain, "tryCompleteDrain.\n"); 183 if (!isDrained()) 184 return false; 185 186 DPRINTF(Drain, "CPU done draining, processing drain event\n"); 187 signalDrainDone(); 188 189 return true; 190} 191 192 193void 194AtomicSimpleCPU::switchOut() 195{ 196 BaseSimpleCPU::switchOut(); 197 198 assert(!tickEvent.scheduled()); 199 assert(_status == BaseSimpleCPU::Running || _status == Idle); 200 assert(isDrained()); 201} 202 203 204void 205AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 206{ 207 BaseSimpleCPU::takeOverFrom(oldCPU); 208 209 // The tick event should have been descheduled by drain() 210 assert(!tickEvent.scheduled()); 211} 212 213void 214AtomicSimpleCPU::verifyMemoryMode() const 215{ 216 if (!system->isAtomicMode()) { 217 fatal("The atomic CPU requires the memory system to be in " 218 "'atomic' mode.\n"); 219 } 220} 221 222void 223AtomicSimpleCPU::activateContext(ThreadID thread_num) 224{ 225 DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num); 226 227 assert(thread_num < numThreads); 228 229 threadInfo[thread_num]->notIdleFraction = 1; 230 Cycles delta = ticksToCycles(threadInfo[thread_num]->thread->lastActivate - 231 threadInfo[thread_num]->thread->lastSuspend); 232 numCycles += delta; 233 234 if (!tickEvent.scheduled()) { 235 //Make sure ticks are still on multiples of cycles 236 schedule(tickEvent, clockEdge(Cycles(0))); 237 } 238 _status = BaseSimpleCPU::Running; 239 if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) 240 == activeThreads.end()) { 241 activeThreads.push_back(thread_num); 242 } 243 244 BaseCPU::activateContext(thread_num); 245} 246 247 248void 249AtomicSimpleCPU::suspendContext(ThreadID thread_num) 250{ 251 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 252 253 assert(thread_num < numThreads); 254 activeThreads.remove(thread_num); 255 256 if (_status == Idle) 257 return; 258 259 assert(_status == BaseSimpleCPU::Running); 260 261 threadInfo[thread_num]->notIdleFraction = 0; 262 263 if (activeThreads.empty()) { 264 _status = Idle; 265 266 if (tickEvent.scheduled()) { 267 deschedule(tickEvent); 268 } 269 } 270 271 BaseCPU::suspendContext(thread_num); 272} 273 274Tick 275AtomicSimpleCPU::sendPacket(MasterPort &port, const PacketPtr &pkt) 276{ 277 return port.sendAtomic(pkt); 278} 279 280Tick 281AtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt) 282{ 283 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 284 pkt->cmdString()); 285 286 // X86 ISA: Snooping an invalidation for monitor/mwait 287 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 288 289 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 290 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 291 cpu->wakeup(tid); 292 } 293 } 294 295 // if snoop invalidates, release any associated locks 296 // When run without caches, Invalidation packets will not be received 297 // hence we must check if the incoming packets are writes and wakeup 298 // the processor accordingly 299 if (pkt->isInvalidate() || pkt->isWrite()) { 300 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 301 pkt->getAddr()); 302 for (auto &t_info : cpu->threadInfo) { 303 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 304 } 305 } 306 307 return 0; 308} 309 310void 311AtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt) 312{ 313 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 314 pkt->cmdString()); 315 316 // X86 ISA: Snooping an invalidation for monitor/mwait 317 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 318 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 319 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 320 cpu->wakeup(tid); 321 } 322 } 323 324 // if snoop invalidates, release any associated locks 325 if (pkt->isInvalidate()) { 326 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 327 pkt->getAddr()); 328 for (auto &t_info : cpu->threadInfo) { 329 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 330 } 331 } 332} 333 334Fault 335AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, unsigned size, 336 Request::Flags flags) 337{ 338 SimpleExecContext& t_info = *threadInfo[curThread]; 339 SimpleThread* thread = t_info.thread; 340 341 // use the CPU's statically allocated read request and packet objects 342 const RequestPtr &req = data_read_req; 343 344 if (traceData) 345 traceData->setMem(addr, size, flags); 346 347 //The size of the data we're trying to read. 348 int fullSize = size; 349 350 //The address of the second part of this access if it needs to be split 351 //across a cache line boundary. 352 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 353 354 if (secondAddr > addr) 355 size = secondAddr - addr; 356 357 dcache_latency = 0; 358 359 req->taskId(taskId()); 360 while (1) { 361 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 362 363 // translate to physical address 364 Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), 365 BaseTLB::Read); 366 367 // Now do the access. 368 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 369 Packet pkt(req, Packet::makeReadCmd(req)); 370 pkt.dataStatic(data); 371 372 if (req->isMmappedIpr()) { 373 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 374 } else { 375 dcache_latency += sendPacket(dcachePort, &pkt); 376 } 377 dcache_access = true; 378 379 assert(!pkt.isError()); 380 381 if (req->isLLSC()) { 382 TheISA::handleLockedRead(thread, req); 383 } 384 } 385 386 //If there's a fault, return it 387 if (fault != NoFault) { 388 if (req->isPrefetch()) { 389 return NoFault; 390 } else { 391 return fault; 392 } 393 } 394 395 //If we don't need to access a second cache line, stop now. 396 if (secondAddr <= addr) 397 { 398 if (req->isLockedRMW() && fault == NoFault) { 399 assert(!locked); 400 locked = true; 401 } 402 403 return fault; 404 } 405 406 /* 407 * Set up for accessing the second cache line. 408 */ 409 410 //Move the pointer we're reading into to the correct location. 411 data += size; 412 //Adjust the size to get the remaining bytes. 413 size = addr + fullSize - secondAddr; 414 //And access the right address. 415 addr = secondAddr; 416 } 417} 418 419Fault 420AtomicSimpleCPU::initiateMemRead(Addr addr, unsigned size, 421 Request::Flags flags) 422{ 423 panic("initiateMemRead() is for timing accesses, and should " 424 "never be called on AtomicSimpleCPU.\n"); 425} 426 427Fault 428AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, Addr addr, 429 Request::Flags flags, uint64_t *res) 430{ 431 SimpleExecContext& t_info = *threadInfo[curThread]; 432 SimpleThread* thread = t_info.thread; 433 static uint8_t zero_array[64] = {}; 434 435 if (data == NULL) { 436 assert(size <= 64); 437 assert(flags & Request::STORE_NO_DATA); 438 // This must be a cache block cleaning request 439 data = zero_array; 440 } 441 442 // use the CPU's statically allocated write request and packet objects 443 const RequestPtr &req = data_write_req; 444 445 if (traceData) 446 traceData->setMem(addr, size, flags); 447 448 //The size of the data we're trying to read. 449 int fullSize = size; 450 451 //The address of the second part of this access if it needs to be split 452 //across a cache line boundary. 453 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 454 455 if (secondAddr > addr) 456 size = secondAddr - addr; 457 458 dcache_latency = 0; 459 460 req->taskId(taskId()); 461 while (1) { 462 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 463 464 // translate to physical address 465 Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), BaseTLB::Write); 466 467 // Now do the access. 468 if (fault == NoFault) { 469 bool do_access = true; // flag to suppress cache access 470 471 if (req->isLLSC()) { 472 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); 473 } else if (req->isSwap()) { 474 if (req->isCondSwap()) { 475 assert(res); 476 req->setExtraData(*res); 477 } 478 } 479 480 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 481 Packet pkt(req, Packet::makeWriteCmd(req)); 482 pkt.dataStatic(data); 483 484 if (req->isMmappedIpr()) { 485 dcache_latency += 486 TheISA::handleIprWrite(thread->getTC(), &pkt); 487 } else { 488 dcache_latency += sendPacket(dcachePort, &pkt); 489 490 // Notify other threads on this CPU of write 491 threadSnoop(&pkt, curThread); 492 } 493 dcache_access = true; 494 assert(!pkt.isError()); 495 496 if (req->isSwap()) { 497 assert(res); 498 memcpy(res, pkt.getConstPtr<uint8_t>(), fullSize); 499 } 500 } 501 502 if (res && !req->isSwap()) { 503 *res = req->getExtraData(); 504 } 505 } 506 507 //If there's a fault or we don't need to access a second cache line, 508 //stop now. 509 if (fault != NoFault || secondAddr <= addr) 510 { 511 if (req->isLockedRMW() && fault == NoFault) { 512 assert(locked); 513 locked = false; 514 } 515 516 517 if (fault != NoFault && req->isPrefetch()) { 518 return NoFault; 519 } else { 520 return fault; 521 } 522 } 523 524 /* 525 * Set up for accessing the second cache line. 526 */ 527 528 //Move the pointer we're reading into to the correct location. 529 data += size; 530 //Adjust the size to get the remaining bytes. 531 size = addr + fullSize - secondAddr; 532 //And access the right address. 533 addr = secondAddr; 534 } 535} 536 537 538void 539AtomicSimpleCPU::tick() 540{ 541 DPRINTF(SimpleCPU, "Tick\n"); 542 543 // Change thread if multi-threaded 544 swapActiveThread(); 545 546 // Set memroy request ids to current thread 547 if (numThreads > 1) { 548 ContextID cid = threadContexts[curThread]->contextId(); 549 550 ifetch_req->setContext(cid); 551 data_read_req->setContext(cid); 552 data_write_req->setContext(cid); 553 } 554 555 SimpleExecContext& t_info = *threadInfo[curThread]; 556 SimpleThread* thread = t_info.thread; 557 558 Tick latency = 0; 559 560 for (int i = 0; i < width || locked; ++i) { 561 numCycles++; 562 updateCycleCounters(BaseCPU::CPU_STATE_ON); 563 564 if (!curStaticInst || !curStaticInst->isDelayedCommit()) { 565 checkForInterrupts(); 566 checkPcEventQueue(); 567 } 568 569 // We must have just got suspended by a PC event 570 if (_status == Idle) { 571 tryCompleteDrain(); 572 return; 573 } 574 575 Fault fault = NoFault; 576 577 TheISA::PCState pcState = thread->pcState(); 578 579 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 580 !curMacroStaticInst; 581 if (needToFetch) { 582 ifetch_req->taskId(taskId()); 583 setupFetchRequest(ifetch_req); 584 fault = thread->itb->translateAtomic(ifetch_req, thread->getTC(), 585 BaseTLB::Execute); 586 } 587 588 if (fault == NoFault) { 589 Tick icache_latency = 0; 590 bool icache_access = false; 591 dcache_access = false; // assume no dcache access 592 593 if (needToFetch) { 594 // This is commented out because the decoder would act like 595 // a tiny cache otherwise. It wouldn't be flushed when needed 596 // like the I cache. It should be flushed, and when that works 597 // this code should be uncommented. 598 //Fetch more instruction memory if necessary 599 //if (decoder.needMoreBytes()) 600 //{ 601 icache_access = true; 602 Packet ifetch_pkt = Packet(ifetch_req, MemCmd::ReadReq); 603 ifetch_pkt.dataStatic(&inst); 604 605 icache_latency = sendPacket(icachePort, &ifetch_pkt); 606 607 assert(!ifetch_pkt.isError()); 608 609 // ifetch_req is initialized to read the instruction directly 610 // into the CPU object's inst field. 611 //} 612 } 613 614 preExecute(); 615 616 Tick stall_ticks = 0; 617 if (curStaticInst) { 618 fault = curStaticInst->execute(&t_info, traceData); 619 620 // keep an instruction count 621 if (fault == NoFault) { 622 countInst(); 623 ppCommit->notify(std::make_pair(thread, curStaticInst)); 624 } 625 else if (traceData && !DTRACE(ExecFaulting)) { 626 delete traceData; 627 traceData = NULL; 628 } 629 630 if (fault != NoFault && 631 dynamic_pointer_cast<SyscallRetryFault>(fault)) { 632 // Retry execution of system calls after a delay. 633 // Prevents immediate re-execution since conditions which 634 // caused the retry are unlikely to change every tick. 635 stall_ticks += clockEdge(syscallRetryLatency) - curTick(); 636 } 637 638 postExecute(); 639 } 640 641 // @todo remove me after debugging with legion done 642 if (curStaticInst && (!curStaticInst->isMicroop() || 643 curStaticInst->isFirstMicroop())) 644 instCnt++; 645 646 if (simulate_inst_stalls && icache_access) 647 stall_ticks += icache_latency; 648 649 if (simulate_data_stalls && dcache_access) 650 stall_ticks += dcache_latency; 651 652 if (stall_ticks) { 653 // the atomic cpu does its accounting in ticks, so 654 // keep counting in ticks but round to the clock 655 // period 656 latency += divCeil(stall_ticks, clockPeriod()) * 657 clockPeriod(); 658 } 659 660 } 661 if (fault != NoFault || !t_info.stayAtPC) 662 advancePC(fault); 663 } 664 665 if (tryCompleteDrain()) 666 return; 667 668 // instruction takes at least one cycle 669 if (latency < clockPeriod()) 670 latency = clockPeriod(); 671 672 if (_status != Idle) 673 reschedule(tickEvent, curTick() + latency, true); 674} 675 676void 677AtomicSimpleCPU::regProbePoints() 678{ 679 BaseCPU::regProbePoints(); 680 681 ppCommit = new ProbePointArg<pair<SimpleThread*, const StaticInstPtr>> 682 (getProbeManager(), "Commit"); 683} 684 685void 686AtomicSimpleCPU::printAddr(Addr a) 687{ 688 dcachePort.printAddr(a); 689} 690 691//////////////////////////////////////////////////////////////////////// 692// 693// AtomicSimpleCPU Simulation Object 694// 695AtomicSimpleCPU * 696AtomicSimpleCPUParams::create() 697{ 698 return new AtomicSimpleCPU(this); 699} 700