atomic.cc revision 12276
1/* 2 * Copyright 2014 Google, Inc. 3 * Copyright (c) 2012-2013,2015,2017 ARM Limited 4 * All rights reserved. 5 * 6 * The license below extends only to copyright in the software and shall 7 * not be construed as granting a license to any other intellectual 8 * property including but not limited to intellectual property relating 9 * to a hardware implementation of the functionality of the software 10 * licensed hereunder. You may use the software subject to the license 11 * terms below provided that you ensure that this notice is replicated 12 * unmodified and in its entirety in all distributions of the software, 13 * modified or unmodified, in source code or in binary form. 14 * 15 * Copyright (c) 2002-2005 The Regents of The University of Michigan 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Steve Reinhardt 42 */ 43 44#include "cpu/simple/atomic.hh" 45 46#include "arch/locked_mem.hh" 47#include "arch/mmapped_ipr.hh" 48#include "arch/utility.hh" 49#include "base/bigint.hh" 50#include "base/output.hh" 51#include "config/the_isa.hh" 52#include "cpu/exetrace.hh" 53#include "debug/Drain.hh" 54#include "debug/ExecFaulting.hh" 55#include "debug/SimpleCPU.hh" 56#include "mem/packet.hh" 57#include "mem/packet_access.hh" 58#include "mem/physical.hh" 59#include "params/AtomicSimpleCPU.hh" 60#include "sim/faults.hh" 61#include "sim/full_system.hh" 62#include "sim/system.hh" 63 64using namespace std; 65using namespace TheISA; 66 67void 68AtomicSimpleCPU::init() 69{ 70 BaseSimpleCPU::init(); 71 72 int cid = threadContexts[0]->contextId(); 73 ifetch_req.setContext(cid); 74 data_read_req.setContext(cid); 75 data_write_req.setContext(cid); 76} 77 78AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 79 : BaseSimpleCPU(p), 80 tickEvent([this]{ tick(); }, "AtomicSimpleCPU tick", 81 false, Event::CPU_Tick_Pri), 82 width(p->width), locked(false), 83 simulate_data_stalls(p->simulate_data_stalls), 84 simulate_inst_stalls(p->simulate_inst_stalls), 85 icachePort(name() + ".icache_port", this), 86 dcachePort(name() + ".dcache_port", this), 87 fastmem(p->fastmem), dcache_access(false), dcache_latency(0), 88 ppCommit(nullptr) 89{ 90 _status = Idle; 91} 92 93 94AtomicSimpleCPU::~AtomicSimpleCPU() 95{ 96 if (tickEvent.scheduled()) { 97 deschedule(tickEvent); 98 } 99} 100 101DrainState 102AtomicSimpleCPU::drain() 103{ 104 // Deschedule any power gating event (if any) 105 deschedulePowerGatingEvent(); 106 107 if (switchedOut()) 108 return DrainState::Drained; 109 110 if (!isDrained()) { 111 DPRINTF(Drain, "Requesting drain.\n"); 112 return DrainState::Draining; 113 } else { 114 if (tickEvent.scheduled()) 115 deschedule(tickEvent); 116 117 activeThreads.clear(); 118 DPRINTF(Drain, "Not executing microcode, no need to drain.\n"); 119 return DrainState::Drained; 120 } 121} 122 123void 124AtomicSimpleCPU::threadSnoop(PacketPtr pkt, ThreadID sender) 125{ 126 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 127 pkt->cmdString()); 128 129 for (ThreadID tid = 0; tid < numThreads; tid++) { 130 if (tid != sender) { 131 if (getCpuAddrMonitor(tid)->doMonitor(pkt)) { 132 wakeup(tid); 133 } 134 135 TheISA::handleLockedSnoop(threadInfo[tid]->thread, 136 pkt, dcachePort.cacheBlockMask); 137 } 138 } 139} 140 141void 142AtomicSimpleCPU::drainResume() 143{ 144 assert(!tickEvent.scheduled()); 145 if (switchedOut()) 146 return; 147 148 DPRINTF(SimpleCPU, "Resume\n"); 149 verifyMemoryMode(); 150 151 assert(!threadContexts.empty()); 152 153 _status = BaseSimpleCPU::Idle; 154 155 for (ThreadID tid = 0; tid < numThreads; tid++) { 156 if (threadInfo[tid]->thread->status() == ThreadContext::Active) { 157 threadInfo[tid]->notIdleFraction = 1; 158 activeThreads.push_back(tid); 159 _status = BaseSimpleCPU::Running; 160 161 // Tick if any threads active 162 if (!tickEvent.scheduled()) { 163 schedule(tickEvent, nextCycle()); 164 } 165 } else { 166 threadInfo[tid]->notIdleFraction = 0; 167 } 168 } 169 170 // Reschedule any power gating event (if any) 171 schedulePowerGatingEvent(); 172} 173 174bool 175AtomicSimpleCPU::tryCompleteDrain() 176{ 177 if (drainState() != DrainState::Draining) 178 return false; 179 180 DPRINTF(Drain, "tryCompleteDrain.\n"); 181 if (!isDrained()) 182 return false; 183 184 DPRINTF(Drain, "CPU done draining, processing drain event\n"); 185 signalDrainDone(); 186 187 return true; 188} 189 190 191void 192AtomicSimpleCPU::switchOut() 193{ 194 BaseSimpleCPU::switchOut(); 195 196 assert(!tickEvent.scheduled()); 197 assert(_status == BaseSimpleCPU::Running || _status == Idle); 198 assert(isDrained()); 199} 200 201 202void 203AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 204{ 205 BaseSimpleCPU::takeOverFrom(oldCPU); 206 207 // The tick event should have been descheduled by drain() 208 assert(!tickEvent.scheduled()); 209} 210 211void 212AtomicSimpleCPU::verifyMemoryMode() const 213{ 214 if (!system->isAtomicMode()) { 215 fatal("The atomic CPU requires the memory system to be in " 216 "'atomic' mode.\n"); 217 } 218} 219 220void 221AtomicSimpleCPU::activateContext(ThreadID thread_num) 222{ 223 DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num); 224 225 assert(thread_num < numThreads); 226 227 threadInfo[thread_num]->notIdleFraction = 1; 228 Cycles delta = ticksToCycles(threadInfo[thread_num]->thread->lastActivate - 229 threadInfo[thread_num]->thread->lastSuspend); 230 numCycles += delta; 231 ppCycles->notify(delta); 232 233 if (!tickEvent.scheduled()) { 234 //Make sure ticks are still on multiples of cycles 235 schedule(tickEvent, clockEdge(Cycles(0))); 236 } 237 _status = BaseSimpleCPU::Running; 238 if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) 239 == activeThreads.end()) { 240 activeThreads.push_back(thread_num); 241 } 242 243 BaseCPU::activateContext(thread_num); 244} 245 246 247void 248AtomicSimpleCPU::suspendContext(ThreadID thread_num) 249{ 250 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 251 252 assert(thread_num < numThreads); 253 activeThreads.remove(thread_num); 254 255 if (_status == Idle) 256 return; 257 258 assert(_status == BaseSimpleCPU::Running); 259 260 threadInfo[thread_num]->notIdleFraction = 0; 261 262 if (activeThreads.empty()) { 263 _status = Idle; 264 265 if (tickEvent.scheduled()) { 266 deschedule(tickEvent); 267 } 268 } 269 270 BaseCPU::suspendContext(thread_num); 271} 272 273 274Tick 275AtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt) 276{ 277 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 278 pkt->cmdString()); 279 280 // X86 ISA: Snooping an invalidation for monitor/mwait 281 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 282 283 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 284 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 285 cpu->wakeup(tid); 286 } 287 } 288 289 // if snoop invalidates, release any associated locks 290 // When run without caches, Invalidation packets will not be received 291 // hence we must check if the incoming packets are writes and wakeup 292 // the processor accordingly 293 if (pkt->isInvalidate() || pkt->isWrite()) { 294 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 295 pkt->getAddr()); 296 for (auto &t_info : cpu->threadInfo) { 297 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 298 } 299 } 300 301 return 0; 302} 303 304void 305AtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt) 306{ 307 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 308 pkt->cmdString()); 309 310 // X86 ISA: Snooping an invalidation for monitor/mwait 311 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 312 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 313 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 314 cpu->wakeup(tid); 315 } 316 } 317 318 // if snoop invalidates, release any associated locks 319 if (pkt->isInvalidate()) { 320 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 321 pkt->getAddr()); 322 for (auto &t_info : cpu->threadInfo) { 323 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 324 } 325 } 326} 327 328Fault 329AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, unsigned size, 330 Request::Flags flags) 331{ 332 SimpleExecContext& t_info = *threadInfo[curThread]; 333 SimpleThread* thread = t_info.thread; 334 335 // use the CPU's statically allocated read request and packet objects 336 Request *req = &data_read_req; 337 338 if (traceData) 339 traceData->setMem(addr, size, flags); 340 341 //The size of the data we're trying to read. 342 int fullSize = size; 343 344 //The address of the second part of this access if it needs to be split 345 //across a cache line boundary. 346 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 347 348 if (secondAddr > addr) 349 size = secondAddr - addr; 350 351 dcache_latency = 0; 352 353 req->taskId(taskId()); 354 while (1) { 355 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 356 357 // translate to physical address 358 Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), 359 BaseTLB::Read); 360 361 // Now do the access. 362 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 363 Packet pkt(req, Packet::makeReadCmd(req)); 364 pkt.dataStatic(data); 365 366 if (req->isMmappedIpr()) 367 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 368 else { 369 if (fastmem && system->isMemAddr(pkt.getAddr())) 370 system->getPhysMem().access(&pkt); 371 else 372 dcache_latency += dcachePort.sendAtomic(&pkt); 373 } 374 dcache_access = true; 375 376 assert(!pkt.isError()); 377 378 if (req->isLLSC()) { 379 TheISA::handleLockedRead(thread, req); 380 } 381 } 382 383 //If there's a fault, return it 384 if (fault != NoFault) { 385 if (req->isPrefetch()) { 386 return NoFault; 387 } else { 388 return fault; 389 } 390 } 391 392 //If we don't need to access a second cache line, stop now. 393 if (secondAddr <= addr) 394 { 395 if (req->isLockedRMW() && fault == NoFault) { 396 assert(!locked); 397 locked = true; 398 } 399 400 return fault; 401 } 402 403 /* 404 * Set up for accessing the second cache line. 405 */ 406 407 //Move the pointer we're reading into to the correct location. 408 data += size; 409 //Adjust the size to get the remaining bytes. 410 size = addr + fullSize - secondAddr; 411 //And access the right address. 412 addr = secondAddr; 413 } 414} 415 416Fault 417AtomicSimpleCPU::initiateMemRead(Addr addr, unsigned size, 418 Request::Flags flags) 419{ 420 panic("initiateMemRead() is for timing accesses, and should " 421 "never be called on AtomicSimpleCPU.\n"); 422} 423 424Fault 425AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, Addr addr, 426 Request::Flags flags, uint64_t *res) 427{ 428 SimpleExecContext& t_info = *threadInfo[curThread]; 429 SimpleThread* thread = t_info.thread; 430 static uint8_t zero_array[64] = {}; 431 432 if (data == NULL) { 433 assert(size <= 64); 434 assert(flags & Request::CACHE_BLOCK_ZERO); 435 // This must be a cache block cleaning request 436 data = zero_array; 437 } 438 439 // use the CPU's statically allocated write request and packet objects 440 Request *req = &data_write_req; 441 442 if (traceData) 443 traceData->setMem(addr, size, flags); 444 445 //The size of the data we're trying to read. 446 int fullSize = size; 447 448 //The address of the second part of this access if it needs to be split 449 //across a cache line boundary. 450 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 451 452 if (secondAddr > addr) 453 size = secondAddr - addr; 454 455 dcache_latency = 0; 456 457 req->taskId(taskId()); 458 while (1) { 459 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 460 461 // translate to physical address 462 Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), BaseTLB::Write); 463 464 // Now do the access. 465 if (fault == NoFault) { 466 MemCmd cmd = MemCmd::WriteReq; // default 467 bool do_access = true; // flag to suppress cache access 468 469 if (req->isLLSC()) { 470 cmd = MemCmd::StoreCondReq; 471 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); 472 } else if (req->isSwap()) { 473 cmd = MemCmd::SwapReq; 474 if (req->isCondSwap()) { 475 assert(res); 476 req->setExtraData(*res); 477 } 478 } 479 480 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 481 Packet pkt = Packet(req, cmd); 482 pkt.dataStatic(data); 483 484 if (req->isMmappedIpr()) { 485 dcache_latency += 486 TheISA::handleIprWrite(thread->getTC(), &pkt); 487 } else { 488 if (fastmem && system->isMemAddr(pkt.getAddr())) 489 system->getPhysMem().access(&pkt); 490 else 491 dcache_latency += dcachePort.sendAtomic(&pkt); 492 493 // Notify other threads on this CPU of write 494 threadSnoop(&pkt, curThread); 495 } 496 dcache_access = true; 497 assert(!pkt.isError()); 498 499 if (req->isSwap()) { 500 assert(res); 501 memcpy(res, pkt.getConstPtr<uint8_t>(), fullSize); 502 } 503 } 504 505 if (res && !req->isSwap()) { 506 *res = req->getExtraData(); 507 } 508 } 509 510 //If there's a fault or we don't need to access a second cache line, 511 //stop now. 512 if (fault != NoFault || secondAddr <= addr) 513 { 514 if (req->isLockedRMW() && fault == NoFault) { 515 assert(locked); 516 locked = false; 517 } 518 519 520 if (fault != NoFault && req->isPrefetch()) { 521 return NoFault; 522 } else { 523 return fault; 524 } 525 } 526 527 /* 528 * Set up for accessing the second cache line. 529 */ 530 531 //Move the pointer we're reading into to the correct location. 532 data += size; 533 //Adjust the size to get the remaining bytes. 534 size = addr + fullSize - secondAddr; 535 //And access the right address. 536 addr = secondAddr; 537 } 538} 539 540 541void 542AtomicSimpleCPU::tick() 543{ 544 DPRINTF(SimpleCPU, "Tick\n"); 545 546 // Change thread if multi-threaded 547 swapActiveThread(); 548 549 // Set memroy request ids to current thread 550 if (numThreads > 1) { 551 ContextID cid = threadContexts[curThread]->contextId(); 552 553 ifetch_req.setContext(cid); 554 data_read_req.setContext(cid); 555 data_write_req.setContext(cid); 556 } 557 558 SimpleExecContext& t_info = *threadInfo[curThread]; 559 SimpleThread* thread = t_info.thread; 560 561 Tick latency = 0; 562 563 for (int i = 0; i < width || locked; ++i) { 564 numCycles++; 565 ppCycles->notify(1); 566 567 if (!curStaticInst || !curStaticInst->isDelayedCommit()) { 568 checkForInterrupts(); 569 checkPcEventQueue(); 570 } 571 572 // We must have just got suspended by a PC event 573 if (_status == Idle) { 574 tryCompleteDrain(); 575 return; 576 } 577 578 Fault fault = NoFault; 579 580 TheISA::PCState pcState = thread->pcState(); 581 582 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 583 !curMacroStaticInst; 584 if (needToFetch) { 585 ifetch_req.taskId(taskId()); 586 setupFetchRequest(&ifetch_req); 587 fault = thread->itb->translateAtomic(&ifetch_req, thread->getTC(), 588 BaseTLB::Execute); 589 } 590 591 if (fault == NoFault) { 592 Tick icache_latency = 0; 593 bool icache_access = false; 594 dcache_access = false; // assume no dcache access 595 596 if (needToFetch) { 597 // This is commented out because the decoder would act like 598 // a tiny cache otherwise. It wouldn't be flushed when needed 599 // like the I cache. It should be flushed, and when that works 600 // this code should be uncommented. 601 //Fetch more instruction memory if necessary 602 //if (decoder.needMoreBytes()) 603 //{ 604 icache_access = true; 605 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq); 606 ifetch_pkt.dataStatic(&inst); 607 608 if (fastmem && system->isMemAddr(ifetch_pkt.getAddr())) 609 system->getPhysMem().access(&ifetch_pkt); 610 else 611 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 612 613 assert(!ifetch_pkt.isError()); 614 615 // ifetch_req is initialized to read the instruction directly 616 // into the CPU object's inst field. 617 //} 618 } 619 620 preExecute(); 621 622 Tick stall_ticks = 0; 623 if (curStaticInst) { 624 fault = curStaticInst->execute(&t_info, traceData); 625 626 // keep an instruction count 627 if (fault == NoFault) { 628 countInst(); 629 ppCommit->notify(std::make_pair(thread, curStaticInst)); 630 } 631 else if (traceData && !DTRACE(ExecFaulting)) { 632 delete traceData; 633 traceData = NULL; 634 } 635 636 if (dynamic_pointer_cast<SyscallRetryFault>(fault)) { 637 // Retry execution of system calls after a delay. 638 // Prevents immediate re-execution since conditions which 639 // caused the retry are unlikely to change every tick. 640 stall_ticks += clockEdge(syscallRetryLatency) - curTick(); 641 } 642 643 postExecute(); 644 } 645 646 // @todo remove me after debugging with legion done 647 if (curStaticInst && (!curStaticInst->isMicroop() || 648 curStaticInst->isFirstMicroop())) 649 instCnt++; 650 651 if (simulate_inst_stalls && icache_access) 652 stall_ticks += icache_latency; 653 654 if (simulate_data_stalls && dcache_access) 655 stall_ticks += dcache_latency; 656 657 if (stall_ticks) { 658 // the atomic cpu does its accounting in ticks, so 659 // keep counting in ticks but round to the clock 660 // period 661 latency += divCeil(stall_ticks, clockPeriod()) * 662 clockPeriod(); 663 } 664 665 } 666 if (fault != NoFault || !t_info.stayAtPC) 667 advancePC(fault); 668 } 669 670 if (tryCompleteDrain()) 671 return; 672 673 // instruction takes at least one cycle 674 if (latency < clockPeriod()) 675 latency = clockPeriod(); 676 677 if (_status != Idle) 678 reschedule(tickEvent, curTick() + latency, true); 679} 680 681void 682AtomicSimpleCPU::regProbePoints() 683{ 684 BaseCPU::regProbePoints(); 685 686 ppCommit = new ProbePointArg<pair<SimpleThread*, const StaticInstPtr>> 687 (getProbeManager(), "Commit"); 688} 689 690void 691AtomicSimpleCPU::printAddr(Addr a) 692{ 693 dcachePort.printAddr(a); 694} 695 696//////////////////////////////////////////////////////////////////////// 697// 698// AtomicSimpleCPU Simulation Object 699// 700AtomicSimpleCPU * 701AtomicSimpleCPUParams::create() 702{ 703 return new AtomicSimpleCPU(this); 704} 705