atomic.cc revision 10665
1/* 2 * Copyright 2014 Google, Inc. 3 * Copyright (c) 2012-2013 ARM Limited 4 * All rights reserved. 5 * 6 * The license below extends only to copyright in the software and shall 7 * not be construed as granting a license to any other intellectual 8 * property including but not limited to intellectual property relating 9 * to a hardware implementation of the functionality of the software 10 * licensed hereunder. You may use the software subject to the license 11 * terms below provided that you ensure that this notice is replicated 12 * unmodified and in its entirety in all distributions of the software, 13 * modified or unmodified, in source code or in binary form. 14 * 15 * Copyright (c) 2002-2005 The Regents of The University of Michigan 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Steve Reinhardt 42 */ 43 44#include "arch/locked_mem.hh" 45#include "arch/mmapped_ipr.hh" 46#include "arch/utility.hh" 47#include "base/bigint.hh" 48#include "base/output.hh" 49#include "config/the_isa.hh" 50#include "cpu/simple/atomic.hh" 51#include "cpu/exetrace.hh" 52#include "debug/Drain.hh" 53#include "debug/ExecFaulting.hh" 54#include "debug/SimpleCPU.hh" 55#include "mem/packet.hh" 56#include "mem/packet_access.hh" 57#include "mem/physical.hh" 58#include "params/AtomicSimpleCPU.hh" 59#include "sim/faults.hh" 60#include "sim/system.hh" 61#include "sim/full_system.hh" 62 63using namespace std; 64using namespace TheISA; 65 66AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 67 : Event(CPU_Tick_Pri), cpu(c) 68{ 69} 70 71 72void 73AtomicSimpleCPU::TickEvent::process() 74{ 75 cpu->tick(); 76} 77 78const char * 79AtomicSimpleCPU::TickEvent::description() const 80{ 81 return "AtomicSimpleCPU tick"; 82} 83 84void 85AtomicSimpleCPU::init() 86{ 87 BaseCPU::init(); 88 89 // Initialise the ThreadContext's memory proxies 90 tcBase()->initMemProxies(tcBase()); 91 92 if (FullSystem && !params()->switched_out) { 93 ThreadID size = threadContexts.size(); 94 for (ThreadID i = 0; i < size; ++i) { 95 ThreadContext *tc = threadContexts[i]; 96 // initialize CPU, including PC 97 TheISA::initCPU(tc, tc->contextId()); 98 } 99 } 100 101 // Atomic doesn't do MT right now, so contextId == threadId 102 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 103 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 104 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 105} 106 107AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 108 : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 109 simulate_data_stalls(p->simulate_data_stalls), 110 simulate_inst_stalls(p->simulate_inst_stalls), 111 drain_manager(NULL), 112 icachePort(name() + ".icache_port", this), 113 dcachePort(name() + ".dcache_port", this), 114 fastmem(p->fastmem), dcache_access(false), dcache_latency(0), 115 ppCommit(nullptr) 116{ 117 _status = Idle; 118} 119 120 121AtomicSimpleCPU::~AtomicSimpleCPU() 122{ 123 if (tickEvent.scheduled()) { 124 deschedule(tickEvent); 125 } 126} 127 128unsigned int 129AtomicSimpleCPU::drain(DrainManager *dm) 130{ 131 assert(!drain_manager); 132 if (switchedOut()) 133 return 0; 134 135 if (!isDrained()) { 136 DPRINTF(Drain, "Requesting drain: %s\n", pcState()); 137 drain_manager = dm; 138 return 1; 139 } else { 140 if (tickEvent.scheduled()) 141 deschedule(tickEvent); 142 143 DPRINTF(Drain, "Not executing microcode, no need to drain.\n"); 144 return 0; 145 } 146} 147 148void 149AtomicSimpleCPU::drainResume() 150{ 151 assert(!tickEvent.scheduled()); 152 assert(!drain_manager); 153 if (switchedOut()) 154 return; 155 156 DPRINTF(SimpleCPU, "Resume\n"); 157 verifyMemoryMode(); 158 159 assert(!threadContexts.empty()); 160 if (threadContexts.size() > 1) 161 fatal("The atomic CPU only supports one thread.\n"); 162 163 if (thread->status() == ThreadContext::Active) { 164 schedule(tickEvent, nextCycle()); 165 _status = BaseSimpleCPU::Running; 166 notIdleFraction = 1; 167 } else { 168 _status = BaseSimpleCPU::Idle; 169 notIdleFraction = 0; 170 } 171 172 system->totalNumInsts = 0; 173} 174 175bool 176AtomicSimpleCPU::tryCompleteDrain() 177{ 178 if (!drain_manager) 179 return false; 180 181 DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState()); 182 if (!isDrained()) 183 return false; 184 185 DPRINTF(Drain, "CPU done draining, processing drain event\n"); 186 drain_manager->signalDrainDone(); 187 drain_manager = NULL; 188 189 return true; 190} 191 192 193void 194AtomicSimpleCPU::switchOut() 195{ 196 BaseSimpleCPU::switchOut(); 197 198 assert(!tickEvent.scheduled()); 199 assert(_status == BaseSimpleCPU::Running || _status == Idle); 200 assert(isDrained()); 201} 202 203 204void 205AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 206{ 207 BaseSimpleCPU::takeOverFrom(oldCPU); 208 209 // The tick event should have been descheduled by drain() 210 assert(!tickEvent.scheduled()); 211 212 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 213 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 214 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 215} 216 217void 218AtomicSimpleCPU::verifyMemoryMode() const 219{ 220 if (!system->isAtomicMode()) { 221 fatal("The atomic CPU requires the memory system to be in " 222 "'atomic' mode.\n"); 223 } 224} 225 226void 227AtomicSimpleCPU::activateContext(ThreadID thread_num) 228{ 229 DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num); 230 231 assert(thread_num == 0); 232 assert(thread); 233 234 assert(_status == Idle); 235 assert(!tickEvent.scheduled()); 236 237 notIdleFraction = 1; 238 Cycles delta = ticksToCycles(thread->lastActivate - thread->lastSuspend); 239 numCycles += delta; 240 ppCycles->notify(delta); 241 242 //Make sure ticks are still on multiples of cycles 243 schedule(tickEvent, clockEdge(Cycles(0))); 244 _status = BaseSimpleCPU::Running; 245} 246 247 248void 249AtomicSimpleCPU::suspendContext(ThreadID thread_num) 250{ 251 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 252 253 assert(thread_num == 0); 254 assert(thread); 255 256 if (_status == Idle) 257 return; 258 259 assert(_status == BaseSimpleCPU::Running); 260 261 // tick event may not be scheduled if this gets called from inside 262 // an instruction's execution, e.g. "quiesce" 263 if (tickEvent.scheduled()) 264 deschedule(tickEvent); 265 266 notIdleFraction = 0; 267 _status = Idle; 268} 269 270 271Tick 272AtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt) 273{ 274 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 275 pkt->cmdString()); 276 277 // X86 ISA: Snooping an invalidation for monitor/mwait 278 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 279 if(cpu->getAddrMonitor()->doMonitor(pkt)) { 280 cpu->wakeup(); 281 } 282 283 // if snoop invalidates, release any associated locks 284 if (pkt->isInvalidate()) { 285 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 286 pkt->getAddr()); 287 TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask); 288 } 289 290 return 0; 291} 292 293void 294AtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt) 295{ 296 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 297 pkt->cmdString()); 298 299 // X86 ISA: Snooping an invalidation for monitor/mwait 300 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 301 if(cpu->getAddrMonitor()->doMonitor(pkt)) { 302 cpu->wakeup(); 303 } 304 305 // if snoop invalidates, release any associated locks 306 if (pkt->isInvalidate()) { 307 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 308 pkt->getAddr()); 309 TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask); 310 } 311} 312 313Fault 314AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, 315 unsigned size, unsigned flags) 316{ 317 // use the CPU's statically allocated read request and packet objects 318 Request *req = &data_read_req; 319 320 if (traceData) 321 traceData->setMem(addr, size, flags); 322 323 //The size of the data we're trying to read. 324 int fullSize = size; 325 326 //The address of the second part of this access if it needs to be split 327 //across a cache line boundary. 328 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 329 330 if (secondAddr > addr) 331 size = secondAddr - addr; 332 333 dcache_latency = 0; 334 335 req->taskId(taskId()); 336 while (1) { 337 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 338 339 // translate to physical address 340 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read); 341 342 // Now do the access. 343 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 344 Packet pkt(req, MemCmd::ReadReq); 345 pkt.refineCommand(); 346 pkt.dataStatic(data); 347 348 if (req->isMmappedIpr()) 349 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 350 else { 351 if (fastmem && system->isMemAddr(pkt.getAddr())) 352 system->getPhysMem().access(&pkt); 353 else 354 dcache_latency += dcachePort.sendAtomic(&pkt); 355 } 356 dcache_access = true; 357 358 assert(!pkt.isError()); 359 360 if (req->isLLSC()) { 361 TheISA::handleLockedRead(thread, req); 362 } 363 } 364 365 //If there's a fault, return it 366 if (fault != NoFault) { 367 if (req->isPrefetch()) { 368 return NoFault; 369 } else { 370 return fault; 371 } 372 } 373 374 //If we don't need to access a second cache line, stop now. 375 if (secondAddr <= addr) 376 { 377 if (req->isLocked() && fault == NoFault) { 378 assert(!locked); 379 locked = true; 380 } 381 return fault; 382 } 383 384 /* 385 * Set up for accessing the second cache line. 386 */ 387 388 //Move the pointer we're reading into to the correct location. 389 data += size; 390 //Adjust the size to get the remaining bytes. 391 size = addr + fullSize - secondAddr; 392 //And access the right address. 393 addr = secondAddr; 394 } 395} 396 397 398Fault 399AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, 400 Addr addr, unsigned flags, uint64_t *res) 401{ 402 403 static uint8_t zero_array[64] = {}; 404 405 if (data == NULL) { 406 assert(size <= 64); 407 assert(flags & Request::CACHE_BLOCK_ZERO); 408 // This must be a cache block cleaning request 409 data = zero_array; 410 } 411 412 // use the CPU's statically allocated write request and packet objects 413 Request *req = &data_write_req; 414 415 if (traceData) 416 traceData->setMem(addr, size, flags); 417 418 //The size of the data we're trying to read. 419 int fullSize = size; 420 421 //The address of the second part of this access if it needs to be split 422 //across a cache line boundary. 423 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 424 425 if(secondAddr > addr) 426 size = secondAddr - addr; 427 428 dcache_latency = 0; 429 430 req->taskId(taskId()); 431 while(1) { 432 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 433 434 // translate to physical address 435 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write); 436 437 // Now do the access. 438 if (fault == NoFault) { 439 MemCmd cmd = MemCmd::WriteReq; // default 440 bool do_access = true; // flag to suppress cache access 441 442 if (req->isLLSC()) { 443 cmd = MemCmd::StoreCondReq; 444 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); 445 } else if (req->isSwap()) { 446 cmd = MemCmd::SwapReq; 447 if (req->isCondSwap()) { 448 assert(res); 449 req->setExtraData(*res); 450 } 451 } 452 453 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 454 Packet pkt = Packet(req, cmd); 455 pkt.dataStatic(data); 456 457 if (req->isMmappedIpr()) { 458 dcache_latency += 459 TheISA::handleIprWrite(thread->getTC(), &pkt); 460 } else { 461 if (fastmem && system->isMemAddr(pkt.getAddr())) 462 system->getPhysMem().access(&pkt); 463 else 464 dcache_latency += dcachePort.sendAtomic(&pkt); 465 } 466 dcache_access = true; 467 assert(!pkt.isError()); 468 469 if (req->isSwap()) { 470 assert(res); 471 memcpy(res, pkt.getConstPtr<uint8_t>(), fullSize); 472 } 473 } 474 475 if (res && !req->isSwap()) { 476 *res = req->getExtraData(); 477 } 478 } 479 480 //If there's a fault or we don't need to access a second cache line, 481 //stop now. 482 if (fault != NoFault || secondAddr <= addr) 483 { 484 if (req->isLocked() && fault == NoFault) { 485 assert(locked); 486 locked = false; 487 } 488 if (fault != NoFault && req->isPrefetch()) { 489 return NoFault; 490 } else { 491 return fault; 492 } 493 } 494 495 /* 496 * Set up for accessing the second cache line. 497 */ 498 499 //Move the pointer we're reading into to the correct location. 500 data += size; 501 //Adjust the size to get the remaining bytes. 502 size = addr + fullSize - secondAddr; 503 //And access the right address. 504 addr = secondAddr; 505 } 506} 507 508 509void 510AtomicSimpleCPU::tick() 511{ 512 DPRINTF(SimpleCPU, "Tick\n"); 513 514 Tick latency = 0; 515 516 for (int i = 0; i < width || locked; ++i) { 517 numCycles++; 518 ppCycles->notify(1); 519 520 if (!curStaticInst || !curStaticInst->isDelayedCommit()) { 521 checkForInterrupts(); 522 checkPcEventQueue(); 523 } 524 525 // We must have just got suspended by a PC event 526 if (_status == Idle) { 527 tryCompleteDrain(); 528 return; 529 } 530 531 Fault fault = NoFault; 532 533 TheISA::PCState pcState = thread->pcState(); 534 535 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 536 !curMacroStaticInst; 537 if (needToFetch) { 538 ifetch_req.taskId(taskId()); 539 setupFetchRequest(&ifetch_req); 540 fault = thread->itb->translateAtomic(&ifetch_req, tc, 541 BaseTLB::Execute); 542 } 543 544 if (fault == NoFault) { 545 Tick icache_latency = 0; 546 bool icache_access = false; 547 dcache_access = false; // assume no dcache access 548 549 if (needToFetch) { 550 // This is commented out because the decoder would act like 551 // a tiny cache otherwise. It wouldn't be flushed when needed 552 // like the I cache. It should be flushed, and when that works 553 // this code should be uncommented. 554 //Fetch more instruction memory if necessary 555 //if(decoder.needMoreBytes()) 556 //{ 557 icache_access = true; 558 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq); 559 ifetch_pkt.dataStatic(&inst); 560 561 if (fastmem && system->isMemAddr(ifetch_pkt.getAddr())) 562 system->getPhysMem().access(&ifetch_pkt); 563 else 564 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 565 566 assert(!ifetch_pkt.isError()); 567 568 // ifetch_req is initialized to read the instruction directly 569 // into the CPU object's inst field. 570 //} 571 } 572 573 preExecute(); 574 575 if (curStaticInst) { 576 fault = curStaticInst->execute(this, traceData); 577 578 // keep an instruction count 579 if (fault == NoFault) { 580 countInst(); 581 ppCommit->notify(std::make_pair(thread, curStaticInst)); 582 } 583 else if (traceData && !DTRACE(ExecFaulting)) { 584 delete traceData; 585 traceData = NULL; 586 } 587 588 postExecute(); 589 } 590 591 // @todo remove me after debugging with legion done 592 if (curStaticInst && (!curStaticInst->isMicroop() || 593 curStaticInst->isFirstMicroop())) 594 instCnt++; 595 596 Tick stall_ticks = 0; 597 if (simulate_inst_stalls && icache_access) 598 stall_ticks += icache_latency; 599 600 if (simulate_data_stalls && dcache_access) 601 stall_ticks += dcache_latency; 602 603 if (stall_ticks) { 604 // the atomic cpu does its accounting in ticks, so 605 // keep counting in ticks but round to the clock 606 // period 607 latency += divCeil(stall_ticks, clockPeriod()) * 608 clockPeriod(); 609 } 610 611 } 612 if(fault != NoFault || !stayAtPC) 613 advancePC(fault); 614 } 615 616 if (tryCompleteDrain()) 617 return; 618 619 // instruction takes at least one cycle 620 if (latency < clockPeriod()) 621 latency = clockPeriod(); 622 623 if (_status != Idle) 624 schedule(tickEvent, curTick() + latency); 625} 626 627void 628AtomicSimpleCPU::regProbePoints() 629{ 630 BaseCPU::regProbePoints(); 631 632 ppCommit = new ProbePointArg<pair<SimpleThread*, const StaticInstPtr>> 633 (getProbeManager(), "Commit"); 634} 635 636void 637AtomicSimpleCPU::printAddr(Addr a) 638{ 639 dcachePort.printAddr(a); 640} 641 642//////////////////////////////////////////////////////////////////////// 643// 644// AtomicSimpleCPU Simulation Object 645// 646AtomicSimpleCPU * 647AtomicSimpleCPUParams::create() 648{ 649 numThreads = 1; 650 if (!FullSystem && workload.size() != 1) 651 panic("only one workload allowed"); 652 return new AtomicSimpleCPU(this); 653} 654