atomic.cc revision 12127:4207df055b0d
1/* 2 * Copyright 2014 Google, Inc. 3 * Copyright (c) 2012-2013,2015 ARM Limited 4 * All rights reserved. 5 * 6 * The license below extends only to copyright in the software and shall 7 * not be construed as granting a license to any other intellectual 8 * property including but not limited to intellectual property relating 9 * to a hardware implementation of the functionality of the software 10 * licensed hereunder. You may use the software subject to the license 11 * terms below provided that you ensure that this notice is replicated 12 * unmodified and in its entirety in all distributions of the software, 13 * modified or unmodified, in source code or in binary form. 14 * 15 * Copyright (c) 2002-2005 The Regents of The University of Michigan 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Steve Reinhardt 42 */ 43 44#include "cpu/simple/atomic.hh" 45 46#include "arch/locked_mem.hh" 47#include "arch/mmapped_ipr.hh" 48#include "arch/utility.hh" 49#include "base/bigint.hh" 50#include "base/output.hh" 51#include "config/the_isa.hh" 52#include "cpu/exetrace.hh" 53#include "debug/Drain.hh" 54#include "debug/ExecFaulting.hh" 55#include "debug/SimpleCPU.hh" 56#include "mem/packet.hh" 57#include "mem/packet_access.hh" 58#include "mem/physical.hh" 59#include "params/AtomicSimpleCPU.hh" 60#include "sim/faults.hh" 61#include "sim/full_system.hh" 62#include "sim/system.hh" 63 64using namespace std; 65using namespace TheISA; 66 67void 68AtomicSimpleCPU::init() 69{ 70 BaseSimpleCPU::init(); 71 72 int cid = threadContexts[0]->contextId(); 73 ifetch_req.setContext(cid); 74 data_read_req.setContext(cid); 75 data_write_req.setContext(cid); 76} 77 78AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 79 : BaseSimpleCPU(p), 80 tickEvent([this]{ tick(); }, "AtomicSimpleCPU tick", 81 false, Event::CPU_Tick_Pri), 82 width(p->width), locked(false), 83 simulate_data_stalls(p->simulate_data_stalls), 84 simulate_inst_stalls(p->simulate_inst_stalls), 85 icachePort(name() + ".icache_port", this), 86 dcachePort(name() + ".dcache_port", this), 87 fastmem(p->fastmem), dcache_access(false), dcache_latency(0), 88 ppCommit(nullptr) 89{ 90 _status = Idle; 91} 92 93 94AtomicSimpleCPU::~AtomicSimpleCPU() 95{ 96 if (tickEvent.scheduled()) { 97 deschedule(tickEvent); 98 } 99} 100 101DrainState 102AtomicSimpleCPU::drain() 103{ 104 if (switchedOut()) 105 return DrainState::Drained; 106 107 if (!isDrained()) { 108 DPRINTF(Drain, "Requesting drain.\n"); 109 return DrainState::Draining; 110 } else { 111 if (tickEvent.scheduled()) 112 deschedule(tickEvent); 113 114 activeThreads.clear(); 115 DPRINTF(Drain, "Not executing microcode, no need to drain.\n"); 116 return DrainState::Drained; 117 } 118} 119 120void 121AtomicSimpleCPU::threadSnoop(PacketPtr pkt, ThreadID sender) 122{ 123 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 124 pkt->cmdString()); 125 126 for (ThreadID tid = 0; tid < numThreads; tid++) { 127 if (tid != sender) { 128 if (getCpuAddrMonitor(tid)->doMonitor(pkt)) { 129 wakeup(tid); 130 } 131 132 TheISA::handleLockedSnoop(threadInfo[tid]->thread, 133 pkt, dcachePort.cacheBlockMask); 134 } 135 } 136} 137 138void 139AtomicSimpleCPU::drainResume() 140{ 141 assert(!tickEvent.scheduled()); 142 if (switchedOut()) 143 return; 144 145 DPRINTF(SimpleCPU, "Resume\n"); 146 verifyMemoryMode(); 147 148 assert(!threadContexts.empty()); 149 150 _status = BaseSimpleCPU::Idle; 151 152 for (ThreadID tid = 0; tid < numThreads; tid++) { 153 if (threadInfo[tid]->thread->status() == ThreadContext::Active) { 154 threadInfo[tid]->notIdleFraction = 1; 155 activeThreads.push_back(tid); 156 _status = BaseSimpleCPU::Running; 157 158 // Tick if any threads active 159 if (!tickEvent.scheduled()) { 160 schedule(tickEvent, nextCycle()); 161 } 162 } else { 163 threadInfo[tid]->notIdleFraction = 0; 164 } 165 } 166} 167 168bool 169AtomicSimpleCPU::tryCompleteDrain() 170{ 171 if (drainState() != DrainState::Draining) 172 return false; 173 174 DPRINTF(Drain, "tryCompleteDrain.\n"); 175 if (!isDrained()) 176 return false; 177 178 DPRINTF(Drain, "CPU done draining, processing drain event\n"); 179 signalDrainDone(); 180 181 return true; 182} 183 184 185void 186AtomicSimpleCPU::switchOut() 187{ 188 BaseSimpleCPU::switchOut(); 189 190 assert(!tickEvent.scheduled()); 191 assert(_status == BaseSimpleCPU::Running || _status == Idle); 192 assert(isDrained()); 193} 194 195 196void 197AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 198{ 199 BaseSimpleCPU::takeOverFrom(oldCPU); 200 201 // The tick event should have been descheduled by drain() 202 assert(!tickEvent.scheduled()); 203} 204 205void 206AtomicSimpleCPU::verifyMemoryMode() const 207{ 208 if (!system->isAtomicMode()) { 209 fatal("The atomic CPU requires the memory system to be in " 210 "'atomic' mode.\n"); 211 } 212} 213 214void 215AtomicSimpleCPU::activateContext(ThreadID thread_num) 216{ 217 DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num); 218 219 assert(thread_num < numThreads); 220 221 threadInfo[thread_num]->notIdleFraction = 1; 222 Cycles delta = ticksToCycles(threadInfo[thread_num]->thread->lastActivate - 223 threadInfo[thread_num]->thread->lastSuspend); 224 numCycles += delta; 225 ppCycles->notify(delta); 226 227 if (!tickEvent.scheduled()) { 228 //Make sure ticks are still on multiples of cycles 229 schedule(tickEvent, clockEdge(Cycles(0))); 230 } 231 _status = BaseSimpleCPU::Running; 232 if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) 233 == activeThreads.end()) { 234 activeThreads.push_back(thread_num); 235 } 236 237 BaseCPU::activateContext(thread_num); 238} 239 240 241void 242AtomicSimpleCPU::suspendContext(ThreadID thread_num) 243{ 244 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 245 246 assert(thread_num < numThreads); 247 activeThreads.remove(thread_num); 248 249 if (_status == Idle) 250 return; 251 252 assert(_status == BaseSimpleCPU::Running); 253 254 threadInfo[thread_num]->notIdleFraction = 0; 255 256 if (activeThreads.empty()) { 257 _status = Idle; 258 259 if (tickEvent.scheduled()) { 260 deschedule(tickEvent); 261 } 262 } 263 264 BaseCPU::suspendContext(thread_num); 265} 266 267 268Tick 269AtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt) 270{ 271 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 272 pkt->cmdString()); 273 274 // X86 ISA: Snooping an invalidation for monitor/mwait 275 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 276 277 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 278 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 279 cpu->wakeup(tid); 280 } 281 } 282 283 // if snoop invalidates, release any associated locks 284 // When run without caches, Invalidation packets will not be received 285 // hence we must check if the incoming packets are writes and wakeup 286 // the processor accordingly 287 if (pkt->isInvalidate() || pkt->isWrite()) { 288 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 289 pkt->getAddr()); 290 for (auto &t_info : cpu->threadInfo) { 291 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 292 } 293 } 294 295 return 0; 296} 297 298void 299AtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt) 300{ 301 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 302 pkt->cmdString()); 303 304 // X86 ISA: Snooping an invalidation for monitor/mwait 305 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 306 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) { 307 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) { 308 cpu->wakeup(tid); 309 } 310 } 311 312 // if snoop invalidates, release any associated locks 313 if (pkt->isInvalidate()) { 314 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 315 pkt->getAddr()); 316 for (auto &t_info : cpu->threadInfo) { 317 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 318 } 319 } 320} 321 322Fault 323AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, unsigned size, 324 Request::Flags flags) 325{ 326 SimpleExecContext& t_info = *threadInfo[curThread]; 327 SimpleThread* thread = t_info.thread; 328 329 // use the CPU's statically allocated read request and packet objects 330 Request *req = &data_read_req; 331 332 if (traceData) 333 traceData->setMem(addr, size, flags); 334 335 //The size of the data we're trying to read. 336 int fullSize = size; 337 338 //The address of the second part of this access if it needs to be split 339 //across a cache line boundary. 340 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 341 342 if (secondAddr > addr) 343 size = secondAddr - addr; 344 345 dcache_latency = 0; 346 347 req->taskId(taskId()); 348 while (1) { 349 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 350 351 // translate to physical address 352 Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), 353 BaseTLB::Read); 354 355 // Now do the access. 356 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 357 Packet pkt(req, Packet::makeReadCmd(req)); 358 pkt.dataStatic(data); 359 360 if (req->isMmappedIpr()) 361 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 362 else { 363 if (fastmem && system->isMemAddr(pkt.getAddr())) 364 system->getPhysMem().access(&pkt); 365 else 366 dcache_latency += dcachePort.sendAtomic(&pkt); 367 } 368 dcache_access = true; 369 370 assert(!pkt.isError()); 371 372 if (req->isLLSC()) { 373 TheISA::handleLockedRead(thread, req); 374 } 375 } 376 377 //If there's a fault, return it 378 if (fault != NoFault) { 379 if (req->isPrefetch()) { 380 return NoFault; 381 } else { 382 return fault; 383 } 384 } 385 386 //If we don't need to access a second cache line, stop now. 387 if (secondAddr <= addr) 388 { 389 if (req->isLockedRMW() && fault == NoFault) { 390 assert(!locked); 391 locked = true; 392 } 393 394 return fault; 395 } 396 397 /* 398 * Set up for accessing the second cache line. 399 */ 400 401 //Move the pointer we're reading into to the correct location. 402 data += size; 403 //Adjust the size to get the remaining bytes. 404 size = addr + fullSize - secondAddr; 405 //And access the right address. 406 addr = secondAddr; 407 } 408} 409 410Fault 411AtomicSimpleCPU::initiateMemRead(Addr addr, unsigned size, 412 Request::Flags flags) 413{ 414 panic("initiateMemRead() is for timing accesses, and should " 415 "never be called on AtomicSimpleCPU.\n"); 416} 417 418Fault 419AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, Addr addr, 420 Request::Flags flags, uint64_t *res) 421{ 422 SimpleExecContext& t_info = *threadInfo[curThread]; 423 SimpleThread* thread = t_info.thread; 424 static uint8_t zero_array[64] = {}; 425 426 if (data == NULL) { 427 assert(size <= 64); 428 assert(flags & Request::CACHE_BLOCK_ZERO); 429 // This must be a cache block cleaning request 430 data = zero_array; 431 } 432 433 // use the CPU's statically allocated write request and packet objects 434 Request *req = &data_write_req; 435 436 if (traceData) 437 traceData->setMem(addr, size, flags); 438 439 //The size of the data we're trying to read. 440 int fullSize = size; 441 442 //The address of the second part of this access if it needs to be split 443 //across a cache line boundary. 444 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 445 446 if (secondAddr > addr) 447 size = secondAddr - addr; 448 449 dcache_latency = 0; 450 451 req->taskId(taskId()); 452 while (1) { 453 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 454 455 // translate to physical address 456 Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), BaseTLB::Write); 457 458 // Now do the access. 459 if (fault == NoFault) { 460 MemCmd cmd = MemCmd::WriteReq; // default 461 bool do_access = true; // flag to suppress cache access 462 463 if (req->isLLSC()) { 464 cmd = MemCmd::StoreCondReq; 465 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); 466 } else if (req->isSwap()) { 467 cmd = MemCmd::SwapReq; 468 if (req->isCondSwap()) { 469 assert(res); 470 req->setExtraData(*res); 471 } 472 } 473 474 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 475 Packet pkt = Packet(req, cmd); 476 pkt.dataStatic(data); 477 478 if (req->isMmappedIpr()) { 479 dcache_latency += 480 TheISA::handleIprWrite(thread->getTC(), &pkt); 481 } else { 482 if (fastmem && system->isMemAddr(pkt.getAddr())) 483 system->getPhysMem().access(&pkt); 484 else 485 dcache_latency += dcachePort.sendAtomic(&pkt); 486 487 // Notify other threads on this CPU of write 488 threadSnoop(&pkt, curThread); 489 } 490 dcache_access = true; 491 assert(!pkt.isError()); 492 493 if (req->isSwap()) { 494 assert(res); 495 memcpy(res, pkt.getConstPtr<uint8_t>(), fullSize); 496 } 497 } 498 499 if (res && !req->isSwap()) { 500 *res = req->getExtraData(); 501 } 502 } 503 504 //If there's a fault or we don't need to access a second cache line, 505 //stop now. 506 if (fault != NoFault || secondAddr <= addr) 507 { 508 if (req->isLockedRMW() && fault == NoFault) { 509 assert(locked); 510 locked = false; 511 } 512 513 514 if (fault != NoFault && req->isPrefetch()) { 515 return NoFault; 516 } else { 517 return fault; 518 } 519 } 520 521 /* 522 * Set up for accessing the second cache line. 523 */ 524 525 //Move the pointer we're reading into to the correct location. 526 data += size; 527 //Adjust the size to get the remaining bytes. 528 size = addr + fullSize - secondAddr; 529 //And access the right address. 530 addr = secondAddr; 531 } 532} 533 534 535void 536AtomicSimpleCPU::tick() 537{ 538 DPRINTF(SimpleCPU, "Tick\n"); 539 540 // Change thread if multi-threaded 541 swapActiveThread(); 542 543 // Set memroy request ids to current thread 544 if (numThreads > 1) { 545 ContextID cid = threadContexts[curThread]->contextId(); 546 547 ifetch_req.setContext(cid); 548 data_read_req.setContext(cid); 549 data_write_req.setContext(cid); 550 } 551 552 SimpleExecContext& t_info = *threadInfo[curThread]; 553 SimpleThread* thread = t_info.thread; 554 555 Tick latency = 0; 556 557 for (int i = 0; i < width || locked; ++i) { 558 numCycles++; 559 ppCycles->notify(1); 560 561 if (!curStaticInst || !curStaticInst->isDelayedCommit()) { 562 checkForInterrupts(); 563 checkPcEventQueue(); 564 } 565 566 // We must have just got suspended by a PC event 567 if (_status == Idle) { 568 tryCompleteDrain(); 569 return; 570 } 571 572 Fault fault = NoFault; 573 574 TheISA::PCState pcState = thread->pcState(); 575 576 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 577 !curMacroStaticInst; 578 if (needToFetch) { 579 ifetch_req.taskId(taskId()); 580 setupFetchRequest(&ifetch_req); 581 fault = thread->itb->translateAtomic(&ifetch_req, thread->getTC(), 582 BaseTLB::Execute); 583 } 584 585 if (fault == NoFault) { 586 Tick icache_latency = 0; 587 bool icache_access = false; 588 dcache_access = false; // assume no dcache access 589 590 if (needToFetch) { 591 // This is commented out because the decoder would act like 592 // a tiny cache otherwise. It wouldn't be flushed when needed 593 // like the I cache. It should be flushed, and when that works 594 // this code should be uncommented. 595 //Fetch more instruction memory if necessary 596 //if (decoder.needMoreBytes()) 597 //{ 598 icache_access = true; 599 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq); 600 ifetch_pkt.dataStatic(&inst); 601 602 if (fastmem && system->isMemAddr(ifetch_pkt.getAddr())) 603 system->getPhysMem().access(&ifetch_pkt); 604 else 605 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 606 607 assert(!ifetch_pkt.isError()); 608 609 // ifetch_req is initialized to read the instruction directly 610 // into the CPU object's inst field. 611 //} 612 } 613 614 preExecute(); 615 616 Tick stall_ticks = 0; 617 if (curStaticInst) { 618 fault = curStaticInst->execute(&t_info, traceData); 619 620 // keep an instruction count 621 if (fault == NoFault) { 622 countInst(); 623 ppCommit->notify(std::make_pair(thread, curStaticInst)); 624 } 625 else if (traceData && !DTRACE(ExecFaulting)) { 626 delete traceData; 627 traceData = NULL; 628 } 629 630 if (dynamic_pointer_cast<SyscallRetryFault>(fault)) { 631 // Retry execution of system calls after a delay. 632 // Prevents immediate re-execution since conditions which 633 // caused the retry are unlikely to change every tick. 634 stall_ticks += clockEdge(syscallRetryLatency) - curTick(); 635 } 636 637 postExecute(); 638 } 639 640 // @todo remove me after debugging with legion done 641 if (curStaticInst && (!curStaticInst->isMicroop() || 642 curStaticInst->isFirstMicroop())) 643 instCnt++; 644 645 if (simulate_inst_stalls && icache_access) 646 stall_ticks += icache_latency; 647 648 if (simulate_data_stalls && dcache_access) 649 stall_ticks += dcache_latency; 650 651 if (stall_ticks) { 652 // the atomic cpu does its accounting in ticks, so 653 // keep counting in ticks but round to the clock 654 // period 655 latency += divCeil(stall_ticks, clockPeriod()) * 656 clockPeriod(); 657 } 658 659 } 660 if (fault != NoFault || !t_info.stayAtPC) 661 advancePC(fault); 662 } 663 664 if (tryCompleteDrain()) 665 return; 666 667 // instruction takes at least one cycle 668 if (latency < clockPeriod()) 669 latency = clockPeriod(); 670 671 if (_status != Idle) 672 reschedule(tickEvent, curTick() + latency, true); 673} 674 675void 676AtomicSimpleCPU::regProbePoints() 677{ 678 BaseCPU::regProbePoints(); 679 680 ppCommit = new ProbePointArg<pair<SimpleThread*, const StaticInstPtr>> 681 (getProbeManager(), "Commit"); 682} 683 684void 685AtomicSimpleCPU::printAddr(Addr a) 686{ 687 dcachePort.printAddr(a); 688} 689 690//////////////////////////////////////////////////////////////////////// 691// 692// AtomicSimpleCPU Simulation Object 693// 694AtomicSimpleCPU * 695AtomicSimpleCPUParams::create() 696{ 697 return new AtomicSimpleCPU(this); 698} 699