atomic.cc revision 10563
1/* 2 * Copyright (c) 2012-2013 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Steve Reinhardt 41 */ 42 43#include "arch/locked_mem.hh" 44#include "arch/mmapped_ipr.hh" 45#include "arch/utility.hh" 46#include "base/bigint.hh" 47#include "base/output.hh" 48#include "config/the_isa.hh" 49#include "cpu/simple/atomic.hh" 50#include "cpu/exetrace.hh" 51#include "debug/Drain.hh" 52#include "debug/ExecFaulting.hh" 53#include "debug/SimpleCPU.hh" 54#include "mem/packet.hh" 55#include "mem/packet_access.hh" 56#include "mem/physical.hh" 57#include "params/AtomicSimpleCPU.hh" 58#include "sim/faults.hh" 59#include "sim/system.hh" 60#include "sim/full_system.hh" 61 62using namespace std; 63using namespace TheISA; 64 65AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 66 : Event(CPU_Tick_Pri), cpu(c) 67{ 68} 69 70 71void 72AtomicSimpleCPU::TickEvent::process() 73{ 74 cpu->tick(); 75} 76 77const char * 78AtomicSimpleCPU::TickEvent::description() const 79{ 80 return "AtomicSimpleCPU tick"; 81} 82 83void 84AtomicSimpleCPU::init() 85{ 86 BaseCPU::init(); 87 88 // Initialise the ThreadContext's memory proxies 89 tcBase()->initMemProxies(tcBase()); 90 91 if (FullSystem && !params()->switched_out) { 92 ThreadID size = threadContexts.size(); 93 for (ThreadID i = 0; i < size; ++i) { 94 ThreadContext *tc = threadContexts[i]; 95 // initialize CPU, including PC 96 TheISA::initCPU(tc, tc->contextId()); 97 } 98 } 99 100 // Atomic doesn't do MT right now, so contextId == threadId 101 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 102 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 103 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 104} 105 106AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 107 : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 108 simulate_data_stalls(p->simulate_data_stalls), 109 simulate_inst_stalls(p->simulate_inst_stalls), 110 drain_manager(NULL), 111 icachePort(name() + ".icache_port", this), 112 dcachePort(name() + ".dcache_port", this), 113 fastmem(p->fastmem), dcache_access(false), dcache_latency(0), 114 ppCommit(nullptr) 115{ 116 _status = Idle; 117} 118 119 120AtomicSimpleCPU::~AtomicSimpleCPU() 121{ 122 if (tickEvent.scheduled()) { 123 deschedule(tickEvent); 124 } 125} 126 127unsigned int 128AtomicSimpleCPU::drain(DrainManager *dm) 129{ 130 assert(!drain_manager); 131 if (switchedOut()) 132 return 0; 133 134 if (!isDrained()) { 135 DPRINTF(Drain, "Requesting drain: %s\n", pcState()); 136 drain_manager = dm; 137 return 1; 138 } else { 139 if (tickEvent.scheduled()) 140 deschedule(tickEvent); 141 142 DPRINTF(Drain, "Not executing microcode, no need to drain.\n"); 143 return 0; 144 } 145} 146 147void 148AtomicSimpleCPU::drainResume() 149{ 150 assert(!tickEvent.scheduled()); 151 assert(!drain_manager); 152 if (switchedOut()) 153 return; 154 155 DPRINTF(SimpleCPU, "Resume\n"); 156 verifyMemoryMode(); 157 158 assert(!threadContexts.empty()); 159 if (threadContexts.size() > 1) 160 fatal("The atomic CPU only supports one thread.\n"); 161 162 if (thread->status() == ThreadContext::Active) { 163 schedule(tickEvent, nextCycle()); 164 _status = BaseSimpleCPU::Running; 165 notIdleFraction = 1; 166 } else { 167 _status = BaseSimpleCPU::Idle; 168 notIdleFraction = 0; 169 } 170 171 system->totalNumInsts = 0; 172} 173 174bool 175AtomicSimpleCPU::tryCompleteDrain() 176{ 177 if (!drain_manager) 178 return false; 179 180 DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState()); 181 if (!isDrained()) 182 return false; 183 184 DPRINTF(Drain, "CPU done draining, processing drain event\n"); 185 drain_manager->signalDrainDone(); 186 drain_manager = NULL; 187 188 return true; 189} 190 191 192void 193AtomicSimpleCPU::switchOut() 194{ 195 BaseSimpleCPU::switchOut(); 196 197 assert(!tickEvent.scheduled()); 198 assert(_status == BaseSimpleCPU::Running || _status == Idle); 199 assert(isDrained()); 200} 201 202 203void 204AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 205{ 206 BaseSimpleCPU::takeOverFrom(oldCPU); 207 208 // The tick event should have been descheduled by drain() 209 assert(!tickEvent.scheduled()); 210 211 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 212 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 213 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 214} 215 216void 217AtomicSimpleCPU::verifyMemoryMode() const 218{ 219 if (!system->isAtomicMode()) { 220 fatal("The atomic CPU requires the memory system to be in " 221 "'atomic' mode.\n"); 222 } 223} 224 225void 226AtomicSimpleCPU::activateContext(ThreadID thread_num) 227{ 228 DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num); 229 230 assert(thread_num == 0); 231 assert(thread); 232 233 assert(_status == Idle); 234 assert(!tickEvent.scheduled()); 235 236 notIdleFraction = 1; 237 Cycles delta = ticksToCycles(thread->lastActivate - thread->lastSuspend); 238 numCycles += delta; 239 ppCycles->notify(delta); 240 241 //Make sure ticks are still on multiples of cycles 242 schedule(tickEvent, clockEdge(Cycles(0))); 243 _status = BaseSimpleCPU::Running; 244} 245 246 247void 248AtomicSimpleCPU::suspendContext(ThreadID thread_num) 249{ 250 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 251 252 assert(thread_num == 0); 253 assert(thread); 254 255 if (_status == Idle) 256 return; 257 258 assert(_status == BaseSimpleCPU::Running); 259 260 // tick event may not be scheduled if this gets called from inside 261 // an instruction's execution, e.g. "quiesce" 262 if (tickEvent.scheduled()) 263 deschedule(tickEvent); 264 265 notIdleFraction = 0; 266 _status = Idle; 267} 268 269 270Tick 271AtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt) 272{ 273 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 274 pkt->cmdString()); 275 276 // X86 ISA: Snooping an invalidation for monitor/mwait 277 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 278 if(cpu->getAddrMonitor()->doMonitor(pkt)) { 279 cpu->wakeup(); 280 } 281 282 // if snoop invalidates, release any associated locks 283 if (pkt->isInvalidate()) { 284 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 285 pkt->getAddr()); 286 TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask); 287 } 288 289 return 0; 290} 291 292void 293AtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt) 294{ 295 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 296 pkt->cmdString()); 297 298 // X86 ISA: Snooping an invalidation for monitor/mwait 299 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 300 if(cpu->getAddrMonitor()->doMonitor(pkt)) { 301 cpu->wakeup(); 302 } 303 304 // if snoop invalidates, release any associated locks 305 if (pkt->isInvalidate()) { 306 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 307 pkt->getAddr()); 308 TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask); 309 } 310} 311 312Fault 313AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, 314 unsigned size, unsigned flags) 315{ 316 // use the CPU's statically allocated read request and packet objects 317 Request *req = &data_read_req; 318 319 if (traceData) { 320 traceData->setAddr(addr); 321 } 322 323 //The size of the data we're trying to read. 324 int fullSize = size; 325 326 //The address of the second part of this access if it needs to be split 327 //across a cache line boundary. 328 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 329 330 if (secondAddr > addr) 331 size = secondAddr - addr; 332 333 dcache_latency = 0; 334 335 req->taskId(taskId()); 336 while (1) { 337 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 338 339 // translate to physical address 340 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read); 341 342 // Now do the access. 343 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 344 Packet pkt(req, MemCmd::ReadReq); 345 pkt.refineCommand(); 346 pkt.dataStatic(data); 347 348 if (req->isMmappedIpr()) 349 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 350 else { 351 if (fastmem && system->isMemAddr(pkt.getAddr())) 352 system->getPhysMem().access(&pkt); 353 else 354 dcache_latency += dcachePort.sendAtomic(&pkt); 355 } 356 dcache_access = true; 357 358 assert(!pkt.isError()); 359 360 if (req->isLLSC()) { 361 TheISA::handleLockedRead(thread, req); 362 } 363 } 364 365 //If there's a fault, return it 366 if (fault != NoFault) { 367 if (req->isPrefetch()) { 368 return NoFault; 369 } else { 370 return fault; 371 } 372 } 373 374 //If we don't need to access a second cache line, stop now. 375 if (secondAddr <= addr) 376 { 377 if (req->isLocked() && fault == NoFault) { 378 assert(!locked); 379 locked = true; 380 } 381 return fault; 382 } 383 384 /* 385 * Set up for accessing the second cache line. 386 */ 387 388 //Move the pointer we're reading into to the correct location. 389 data += size; 390 //Adjust the size to get the remaining bytes. 391 size = addr + fullSize - secondAddr; 392 //And access the right address. 393 addr = secondAddr; 394 } 395} 396 397 398Fault 399AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, 400 Addr addr, unsigned flags, uint64_t *res) 401{ 402 403 static uint8_t zero_array[64] = {}; 404 405 if (data == NULL) { 406 assert(size <= 64); 407 assert(flags & Request::CACHE_BLOCK_ZERO); 408 // This must be a cache block cleaning request 409 data = zero_array; 410 } 411 412 // use the CPU's statically allocated write request and packet objects 413 Request *req = &data_write_req; 414 415 if (traceData) { 416 traceData->setAddr(addr); 417 } 418 419 //The size of the data we're trying to read. 420 int fullSize = size; 421 422 //The address of the second part of this access if it needs to be split 423 //across a cache line boundary. 424 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 425 426 if(secondAddr > addr) 427 size = secondAddr - addr; 428 429 dcache_latency = 0; 430 431 req->taskId(taskId()); 432 while(1) { 433 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 434 435 // translate to physical address 436 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write); 437 438 // Now do the access. 439 if (fault == NoFault) { 440 MemCmd cmd = MemCmd::WriteReq; // default 441 bool do_access = true; // flag to suppress cache access 442 443 if (req->isLLSC()) { 444 cmd = MemCmd::StoreCondReq; 445 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); 446 } else if (req->isSwap()) { 447 cmd = MemCmd::SwapReq; 448 if (req->isCondSwap()) { 449 assert(res); 450 req->setExtraData(*res); 451 } 452 } 453 454 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 455 Packet pkt = Packet(req, cmd); 456 pkt.dataStatic(data); 457 458 if (req->isMmappedIpr()) { 459 dcache_latency += 460 TheISA::handleIprWrite(thread->getTC(), &pkt); 461 } else { 462 if (fastmem && system->isMemAddr(pkt.getAddr())) 463 system->getPhysMem().access(&pkt); 464 else 465 dcache_latency += dcachePort.sendAtomic(&pkt); 466 } 467 dcache_access = true; 468 assert(!pkt.isError()); 469 470 if (req->isSwap()) { 471 assert(res); 472 memcpy(res, pkt.getConstPtr<uint8_t>(), fullSize); 473 } 474 } 475 476 if (res && !req->isSwap()) { 477 *res = req->getExtraData(); 478 } 479 } 480 481 //If there's a fault or we don't need to access a second cache line, 482 //stop now. 483 if (fault != NoFault || secondAddr <= addr) 484 { 485 if (req->isLocked() && fault == NoFault) { 486 assert(locked); 487 locked = false; 488 } 489 if (fault != NoFault && req->isPrefetch()) { 490 return NoFault; 491 } else { 492 return fault; 493 } 494 } 495 496 /* 497 * Set up for accessing the second cache line. 498 */ 499 500 //Move the pointer we're reading into to the correct location. 501 data += size; 502 //Adjust the size to get the remaining bytes. 503 size = addr + fullSize - secondAddr; 504 //And access the right address. 505 addr = secondAddr; 506 } 507} 508 509 510void 511AtomicSimpleCPU::tick() 512{ 513 DPRINTF(SimpleCPU, "Tick\n"); 514 515 Tick latency = 0; 516 517 for (int i = 0; i < width || locked; ++i) { 518 numCycles++; 519 ppCycles->notify(1); 520 521 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 522 checkForInterrupts(); 523 524 checkPcEventQueue(); 525 // We must have just got suspended by a PC event 526 if (_status == Idle) { 527 tryCompleteDrain(); 528 return; 529 } 530 531 Fault fault = NoFault; 532 533 TheISA::PCState pcState = thread->pcState(); 534 535 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 536 !curMacroStaticInst; 537 if (needToFetch) { 538 ifetch_req.taskId(taskId()); 539 setupFetchRequest(&ifetch_req); 540 fault = thread->itb->translateAtomic(&ifetch_req, tc, 541 BaseTLB::Execute); 542 } 543 544 if (fault == NoFault) { 545 Tick icache_latency = 0; 546 bool icache_access = false; 547 dcache_access = false; // assume no dcache access 548 549 if (needToFetch) { 550 // This is commented out because the decoder would act like 551 // a tiny cache otherwise. It wouldn't be flushed when needed 552 // like the I cache. It should be flushed, and when that works 553 // this code should be uncommented. 554 //Fetch more instruction memory if necessary 555 //if(decoder.needMoreBytes()) 556 //{ 557 icache_access = true; 558 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq); 559 ifetch_pkt.dataStatic(&inst); 560 561 if (fastmem && system->isMemAddr(ifetch_pkt.getAddr())) 562 system->getPhysMem().access(&ifetch_pkt); 563 else 564 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 565 566 assert(!ifetch_pkt.isError()); 567 568 // ifetch_req is initialized to read the instruction directly 569 // into the CPU object's inst field. 570 //} 571 } 572 573 preExecute(); 574 575 if (curStaticInst) { 576 fault = curStaticInst->execute(this, traceData); 577 578 // keep an instruction count 579 if (fault == NoFault) { 580 countInst(); 581 if (!curStaticInst->isMicroop() || 582 curStaticInst->isLastMicroop()) { 583 ppCommit->notify(std::make_pair(thread, curStaticInst)); 584 } 585 } 586 else if (traceData && !DTRACE(ExecFaulting)) { 587 delete traceData; 588 traceData = NULL; 589 } 590 591 postExecute(); 592 } 593 594 // @todo remove me after debugging with legion done 595 if (curStaticInst && (!curStaticInst->isMicroop() || 596 curStaticInst->isFirstMicroop())) 597 instCnt++; 598 599 Tick stall_ticks = 0; 600 if (simulate_inst_stalls && icache_access) 601 stall_ticks += icache_latency; 602 603 if (simulate_data_stalls && dcache_access) 604 stall_ticks += dcache_latency; 605 606 if (stall_ticks) { 607 // the atomic cpu does its accounting in ticks, so 608 // keep counting in ticks but round to the clock 609 // period 610 latency += divCeil(stall_ticks, clockPeriod()) * 611 clockPeriod(); 612 } 613 614 } 615 if(fault != NoFault || !stayAtPC) 616 advancePC(fault); 617 } 618 619 if (tryCompleteDrain()) 620 return; 621 622 // instruction takes at least one cycle 623 if (latency < clockPeriod()) 624 latency = clockPeriod(); 625 626 if (_status != Idle) 627 schedule(tickEvent, curTick() + latency); 628} 629 630void 631AtomicSimpleCPU::regProbePoints() 632{ 633 BaseCPU::regProbePoints(); 634 635 ppCommit = new ProbePointArg<pair<SimpleThread*, const StaticInstPtr>> 636 (getProbeManager(), "Commit"); 637} 638 639void 640AtomicSimpleCPU::printAddr(Addr a) 641{ 642 dcachePort.printAddr(a); 643} 644 645//////////////////////////////////////////////////////////////////////// 646// 647// AtomicSimpleCPU Simulation Object 648// 649AtomicSimpleCPU * 650AtomicSimpleCPUParams::create() 651{ 652 numThreads = 1; 653 if (!FullSystem && workload.size() != 1) 654 panic("only one workload allowed"); 655 return new AtomicSimpleCPU(this); 656} 657