atomic.cc revision 12749
1/* 2 * Copyright 2014 Google, Inc. 3 * Copyright (c) 2012-2013,2015,2017 ARM Limited 4 * All rights reserved. 5 * 6 * The license below extends only to copyright in the software and shall 7 * not be construed as granting a license to any other intellectual 8 * property including but not limited to intellectual property relating 9 * to a hardware implementation of the functionality of the software 10 * licensed hereunder. You may use the software subject to the license 11 * terms below provided that you ensure that this notice is replicated 12 * unmodified and in its entirety in all distributions of the software, 13 * modified or unmodified, in source code or in binary form. 14 * 15 * Copyright (c) 2002-2005 The Regents of The University of Michigan 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Steve Reinhardt 42 */ 43 44#include "cpu/simple/atomic.hh" 45 46#include "arch/locked_mem.hh" 47#include "arch/mmapped_ipr.hh" 48#include "arch/utility.hh" 49#include "base/output.hh" 50#include "config/the_isa.hh" 51#include "cpu/exetrace.hh" 52#include "debug/Drain.hh" 53#include "debug/ExecFaulting.hh" 54#include "debug/SimpleCPU.hh" 55#include "mem/packet.hh" 56#include "mem/packet_access.hh" 57#include "mem/physical.hh" 58#include "params/AtomicSimpleCPU.hh" 59#include "sim/faults.hh" 60#include "sim/full_system.hh" 61#include "sim/system.hh" 62 63using namespace std; 64using namespace TheISA; 65 66void 67AtomicSimpleCPU::init() 68{ 69 BaseSimpleCPU::init(); 70 71 int cid = threadContexts[0]->contextId(); 72 ifetch_req->setContext(cid); 73 data_read_req->setContext(cid); 74 data_write_req->setContext(cid); 75} 76 77AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 78 : BaseSimpleCPU(p), 79 tickEvent([this]{ tick(); }, "AtomicSimpleCPU tick", 80 false, Event::CPU_Tick_Pri), 81 width(p->width), locked(false), 82 simulate_data_stalls(p->simulate_data_stalls), 83 simulate_inst_stalls(p->simulate_inst_stalls), 84 icachePort(name() + ".icache_port", this), 85 dcachePort(name() + ".dcache_port", this), 86 fastmem(p->fastmem), dcache_access(false), dcache_latency(0), 87 ppCommit(nullptr) 88{ 89 _status = Idle; 90 ifetch_req = std::make_shared<Request>(); 91 data_read_req = std::make_shared<Request>(); 92 data_write_req = std::make_shared<Request>(); 93} 94 95 96AtomicSimpleCPU::~AtomicSimpleCPU() 97{ 98 if (tickEvent.scheduled()) { 99 deschedule(tickEvent); 100 } 101} 102 103DrainState 104AtomicSimpleCPU::drain() 105{ 106 // Deschedule any power gating event (if any) 107 deschedulePowerGatingEvent(); 108 109 if (switchedOut()) 110 return DrainState::Drained; 111 112 if (!isDrained()) { 113 DPRINTF(Drain, "Requesting drain.\n"); 114 return DrainState::Draining; 115 } else { 116 if (tickEvent.scheduled()) 117 deschedule(tickEvent); 118 119 activeThreads.clear(); 120 DPRINTF(Drain, "Not executing microcode, no need to drain.\n"); 121 return DrainState::Drained; 122 } 123} 124 125void 126AtomicSimpleCPU::threadSnoop(PacketPtr pkt, ThreadID sender) 127{ 128 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 129 pkt->cmdString()); 130 131 for (ThreadID tid = 0; tid < numThreads; tid++) { 132 if (tid != sender) { 133 if (getCpuAddrMonitor(tid)->doMonitor(pkt)) { 134 wakeup(tid); 135 } 136 137 TheISA::handleLockedSnoop(threadInfo[tid]->thread, 138 pkt, dcachePort.cacheBlockMask); 139 } 140 } 141} 142 143void 144AtomicSimpleCPU::drainResume() 145{ 146 assert(!tickEvent.scheduled()); 147 if (switchedOut()) 148 return; 149 150 DPRINTF(SimpleCPU, "Resume\n"); 151 verifyMemoryMode(); 152 153 assert(!threadContexts.empty()); 154 155 _status = BaseSimpleCPU::Idle; 156 157 for (ThreadID tid = 0; tid < numThreads; tid++) { 158 if (threadInfo[tid]->thread->status() == ThreadContext::Active) { 159 threadInfo[tid]->notIdleFraction = 1; 160 activeThreads.push_back(tid); 161 _status = BaseSimpleCPU::Running; 162 163 // Tick if any threads active 164 if (!tickEvent.scheduled()) { 165 schedule(tickEvent, nextCycle()); 166 } 167 } else { 168 threadInfo[tid]->notIdleFraction = 0; 169 } 170 } 171 172 // Reschedule any power gating event (if any) 173 schedulePowerGatingEvent(); 174} 175 176bool 177AtomicSimpleCPU::tryCompleteDrain() 178{ 179 if (drainState() != DrainState::Draining) 180 return false; 181 182 DPRINTF(Drain, "tryCompleteDrain.\n"); 183 if (!isDrained()) 184 return false; 185 186 DPRINTF(Drain, "CPU done draining, processing drain event\n"); 187 signalDrainDone(); 188 189 return true; 190} 191 192 193void 194AtomicSimpleCPU::switchOut() 195{ 196 BaseSimpleCPU::switchOut(); 197 198 assert(!tickEvent.scheduled()); 199 assert(_status == BaseSimpleCPU::Running || _status == Idle); 200 assert(isDrained()); 201} 202 203 204void 205AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 206{ 207 BaseSimpleCPU::takeOverFrom(oldCPU); 208 209 // The tick event should have been descheduled by drain() 210 assert(!tickEvent.scheduled()); 211} 212 213void 214AtomicSimpleCPU::verifyMemoryMode() const 215{ 216 if (!system->isAtomicMode()) { 217 fatal("The atomic CPU requires the memory system to be in " 218 "'atomic' mode.\n"); 219 } 220} 221 222void 223AtomicSimpleCPU::activateContext(ThreadID thread_num) 224{ 225 DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num); 226 227 assert(thread_num < numThreads); 228 229 threadInfo[thread_num]->notIdleFraction = 1; 230 Cycles delta = ticksToCycles(threadInfo[thread_num]->thread->lastActivate - 231 threadInfo[thread_num]->thread->lastSuspend); 232 numCycles += delta; 233 234 if (!tickEvent.scheduled()) { 235 //Make sure ticks are still on multiples of cycles 236 schedule(tickEvent, clockEdge(Cycles(0))); 237 } 238 _status = BaseSimpleCPU::Running; 239 if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) 240 == activeThreads.end()) { 241 activeThreads.push_back(thread_num); 242 } 243 244 BaseCPU::activateContext(thread_num); 245} 246 247 248void 249AtomicSimpleCPU::suspendContext(ThreadID thread_num) 250{ 251 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 252 253 assert(thread_num < numThreads); 254 activeThreads.remove(thread_num); 255 256 if (_status == Idle) 257 return; 258 259 assert(_status == BaseSimpleCPU::Running); 260 261 threadInfo[thread_num]->notIdleFraction = 0; 262 263 if (activeThreads.empty()) { 264 _status = Idle; 265 266 if (tickEvent.scheduled()) { 267 deschedule(tickEvent); 268 } 269 } 270 271 BaseCPU::suspendContext(thread_num); 272} 273 274 275Tick 276AtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt) 277{ 278 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 279 pkt->cmdString()); 280 281 // X86 ISA: Snooping an invalidation for monitor/mwait 282 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 283 284 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 285 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 286 cpu->wakeup(tid); 287 } 288 } 289 290 // if snoop invalidates, release any associated locks 291 // When run without caches, Invalidation packets will not be received 292 // hence we must check if the incoming packets are writes and wakeup 293 // the processor accordingly 294 if (pkt->isInvalidate() || pkt->isWrite()) { 295 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 296 pkt->getAddr()); 297 for (auto &t_info : cpu->threadInfo) { 298 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 299 } 300 } 301 302 return 0; 303} 304 305void 306AtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt) 307{ 308 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 309 pkt->cmdString()); 310 311 // X86 ISA: Snooping an invalidation for monitor/mwait 312 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 313 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 314 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 315 cpu->wakeup(tid); 316 } 317 } 318 319 // if snoop invalidates, release any associated locks 320 if (pkt->isInvalidate()) { 321 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 322 pkt->getAddr()); 323 for (auto &t_info : cpu->threadInfo) { 324 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 325 } 326 } 327} 328 329Fault 330AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, unsigned size, 331 Request::Flags flags) 332{ 333 SimpleExecContext& t_info = *threadInfo[curThread]; 334 SimpleThread* thread = t_info.thread; 335 336 // use the CPU's statically allocated read request and packet objects 337 const RequestPtr &req = data_read_req; 338 339 if (traceData) 340 traceData->setMem(addr, size, flags); 341 342 //The size of the data we're trying to read. 343 int fullSize = size; 344 345 //The address of the second part of this access if it needs to be split 346 //across a cache line boundary. 347 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 348 349 if (secondAddr > addr) 350 size = secondAddr - addr; 351 352 dcache_latency = 0; 353 354 req->taskId(taskId()); 355 while (1) { 356 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 357 358 // translate to physical address 359 Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), 360 BaseTLB::Read); 361 362 // Now do the access. 363 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 364 Packet pkt(req, Packet::makeReadCmd(req)); 365 pkt.dataStatic(data); 366 367 if (req->isMmappedIpr()) 368 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 369 else { 370 if (fastmem && system->isMemAddr(pkt.getAddr())) 371 system->getPhysMem().access(&pkt); 372 else 373 dcache_latency += dcachePort.sendAtomic(&pkt); 374 } 375 dcache_access = true; 376 377 assert(!pkt.isError()); 378 379 if (req->isLLSC()) { 380 TheISA::handleLockedRead(thread, req); 381 } 382 } 383 384 //If there's a fault, return it 385 if (fault != NoFault) { 386 if (req->isPrefetch()) { 387 return NoFault; 388 } else { 389 return fault; 390 } 391 } 392 393 //If we don't need to access a second cache line, stop now. 394 if (secondAddr <= addr) 395 { 396 if (req->isLockedRMW() && fault == NoFault) { 397 assert(!locked); 398 locked = true; 399 } 400 401 return fault; 402 } 403 404 /* 405 * Set up for accessing the second cache line. 406 */ 407 408 //Move the pointer we're reading into to the correct location. 409 data += size; 410 //Adjust the size to get the remaining bytes. 411 size = addr + fullSize - secondAddr; 412 //And access the right address. 413 addr = secondAddr; 414 } 415} 416 417Fault 418AtomicSimpleCPU::initiateMemRead(Addr addr, unsigned size, 419 Request::Flags flags) 420{ 421 panic("initiateMemRead() is for timing accesses, and should " 422 "never be called on AtomicSimpleCPU.\n"); 423} 424 425Fault 426AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, Addr addr, 427 Request::Flags flags, uint64_t *res) 428{ 429 SimpleExecContext& t_info = *threadInfo[curThread]; 430 SimpleThread* thread = t_info.thread; 431 static uint8_t zero_array[64] = {}; 432 433 if (data == NULL) { 434 assert(size <= 64); 435 assert(flags & Request::STORE_NO_DATA); 436 // This must be a cache block cleaning request 437 data = zero_array; 438 } 439 440 // use the CPU's statically allocated write request and packet objects 441 const RequestPtr &req = data_write_req; 442 443 if (traceData) 444 traceData->setMem(addr, size, flags); 445 446 //The size of the data we're trying to read. 447 int fullSize = size; 448 449 //The address of the second part of this access if it needs to be split 450 //across a cache line boundary. 451 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 452 453 if (secondAddr > addr) 454 size = secondAddr - addr; 455 456 dcache_latency = 0; 457 458 req->taskId(taskId()); 459 while (1) { 460 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 461 462 // translate to physical address 463 Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), BaseTLB::Write); 464 465 // Now do the access. 466 if (fault == NoFault) { 467 bool do_access = true; // flag to suppress cache access 468 469 if (req->isLLSC()) { 470 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); 471 } else if (req->isSwap()) { 472 if (req->isCondSwap()) { 473 assert(res); 474 req->setExtraData(*res); 475 } 476 } 477 478 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 479 Packet pkt(req, Packet::makeWriteCmd(req)); 480 pkt.dataStatic(data); 481 482 if (req->isMmappedIpr()) { 483 dcache_latency += 484 TheISA::handleIprWrite(thread->getTC(), &pkt); 485 } else { 486 if (fastmem && system->isMemAddr(pkt.getAddr())) 487 system->getPhysMem().access(&pkt); 488 else 489 dcache_latency += dcachePort.sendAtomic(&pkt); 490 491 // Notify other threads on this CPU of write 492 threadSnoop(&pkt, curThread); 493 } 494 dcache_access = true; 495 assert(!pkt.isError()); 496 497 if (req->isSwap()) { 498 assert(res); 499 memcpy(res, pkt.getConstPtr<uint8_t>(), fullSize); 500 } 501 } 502 503 if (res && !req->isSwap()) { 504 *res = req->getExtraData(); 505 } 506 } 507 508 //If there's a fault or we don't need to access a second cache line, 509 //stop now. 510 if (fault != NoFault || secondAddr <= addr) 511 { 512 if (req->isLockedRMW() && fault == NoFault) { 513 assert(locked); 514 locked = false; 515 } 516 517 518 if (fault != NoFault && req->isPrefetch()) { 519 return NoFault; 520 } else { 521 return fault; 522 } 523 } 524 525 /* 526 * Set up for accessing the second cache line. 527 */ 528 529 //Move the pointer we're reading into to the correct location. 530 data += size; 531 //Adjust the size to get the remaining bytes. 532 size = addr + fullSize - secondAddr; 533 //And access the right address. 534 addr = secondAddr; 535 } 536} 537 538 539void 540AtomicSimpleCPU::tick() 541{ 542 DPRINTF(SimpleCPU, "Tick\n"); 543 544 // Change thread if multi-threaded 545 swapActiveThread(); 546 547 // Set memroy request ids to current thread 548 if (numThreads > 1) { 549 ContextID cid = threadContexts[curThread]->contextId(); 550 551 ifetch_req->setContext(cid); 552 data_read_req->setContext(cid); 553 data_write_req->setContext(cid); 554 } 555 556 SimpleExecContext& t_info = *threadInfo[curThread]; 557 SimpleThread* thread = t_info.thread; 558 559 Tick latency = 0; 560 561 for (int i = 0; i < width || locked; ++i) { 562 numCycles++; 563 updateCycleCounters(BaseCPU::CPU_STATE_ON); 564 565 if (!curStaticInst || !curStaticInst->isDelayedCommit()) { 566 checkForInterrupts(); 567 checkPcEventQueue(); 568 } 569 570 // We must have just got suspended by a PC event 571 if (_status == Idle) { 572 tryCompleteDrain(); 573 return; 574 } 575 576 Fault fault = NoFault; 577 578 TheISA::PCState pcState = thread->pcState(); 579 580 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 581 !curMacroStaticInst; 582 if (needToFetch) { 583 ifetch_req->taskId(taskId()); 584 setupFetchRequest(ifetch_req); 585 fault = thread->itb->translateAtomic(ifetch_req, thread->getTC(), 586 BaseTLB::Execute); 587 } 588 589 if (fault == NoFault) { 590 Tick icache_latency = 0; 591 bool icache_access = false; 592 dcache_access = false; // assume no dcache access 593 594 if (needToFetch) { 595 // This is commented out because the decoder would act like 596 // a tiny cache otherwise. It wouldn't be flushed when needed 597 // like the I cache. It should be flushed, and when that works 598 // this code should be uncommented. 599 //Fetch more instruction memory if necessary 600 //if (decoder.needMoreBytes()) 601 //{ 602 icache_access = true; 603 Packet ifetch_pkt = Packet(ifetch_req, MemCmd::ReadReq); 604 ifetch_pkt.dataStatic(&inst); 605 606 if (fastmem && system->isMemAddr(ifetch_pkt.getAddr())) 607 system->getPhysMem().access(&ifetch_pkt); 608 else 609 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 610 611 assert(!ifetch_pkt.isError()); 612 613 // ifetch_req is initialized to read the instruction directly 614 // into the CPU object's inst field. 615 //} 616 } 617 618 preExecute(); 619 620 Tick stall_ticks = 0; 621 if (curStaticInst) { 622 fault = curStaticInst->execute(&t_info, traceData); 623 624 // keep an instruction count 625 if (fault == NoFault) { 626 countInst(); 627 ppCommit->notify(std::make_pair(thread, curStaticInst)); 628 } 629 else if (traceData && !DTRACE(ExecFaulting)) { 630 delete traceData; 631 traceData = NULL; 632 } 633 634 if (fault != NoFault && 635 dynamic_pointer_cast<SyscallRetryFault>(fault)) { 636 // Retry execution of system calls after a delay. 637 // Prevents immediate re-execution since conditions which 638 // caused the retry are unlikely to change every tick. 639 stall_ticks += clockEdge(syscallRetryLatency) - curTick(); 640 } 641 642 postExecute(); 643 } 644 645 // @todo remove me after debugging with legion done 646 if (curStaticInst && (!curStaticInst->isMicroop() || 647 curStaticInst->isFirstMicroop())) 648 instCnt++; 649 650 if (simulate_inst_stalls && icache_access) 651 stall_ticks += icache_latency; 652 653 if (simulate_data_stalls && dcache_access) 654 stall_ticks += dcache_latency; 655 656 if (stall_ticks) { 657 // the atomic cpu does its accounting in ticks, so 658 // keep counting in ticks but round to the clock 659 // period 660 latency += divCeil(stall_ticks, clockPeriod()) * 661 clockPeriod(); 662 } 663 664 } 665 if (fault != NoFault || !t_info.stayAtPC) 666 advancePC(fault); 667 } 668 669 if (tryCompleteDrain()) 670 return; 671 672 // instruction takes at least one cycle 673 if (latency < clockPeriod()) 674 latency = clockPeriod(); 675 676 if (_status != Idle) 677 reschedule(tickEvent, curTick() + latency, true); 678} 679 680void 681AtomicSimpleCPU::regProbePoints() 682{ 683 BaseCPU::regProbePoints(); 684 685 ppCommit = new ProbePointArg<pair<SimpleThread*, const StaticInstPtr>> 686 (getProbeManager(), "Commit"); 687} 688 689void 690AtomicSimpleCPU::printAddr(Addr a) 691{ 692 dcachePort.printAddr(a); 693} 694 695//////////////////////////////////////////////////////////////////////// 696// 697// AtomicSimpleCPU Simulation Object 698// 699AtomicSimpleCPU * 700AtomicSimpleCPUParams::create() 701{ 702 return new AtomicSimpleCPU(this); 703} 704