atomic.cc revision 10464
1/* 2 * Copyright (c) 2012-2013 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Steve Reinhardt 41 */ 42 43#include "arch/locked_mem.hh" 44#include "arch/mmapped_ipr.hh" 45#include "arch/utility.hh" 46#include "base/bigint.hh" 47#include "base/output.hh" 48#include "config/the_isa.hh" 49#include "cpu/simple/atomic.hh" 50#include "cpu/exetrace.hh" 51#include "debug/Drain.hh" 52#include "debug/ExecFaulting.hh" 53#include "debug/SimpleCPU.hh" 54#include "mem/packet.hh" 55#include "mem/packet_access.hh" 56#include "mem/physical.hh" 57#include "params/AtomicSimpleCPU.hh" 58#include "sim/faults.hh" 59#include "sim/system.hh" 60#include "sim/full_system.hh" 61 62using namespace std; 63using namespace TheISA; 64 65AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 66 : Event(CPU_Tick_Pri), cpu(c) 67{ 68} 69 70 71void 72AtomicSimpleCPU::TickEvent::process() 73{ 74 cpu->tick(); 75} 76 77const char * 78AtomicSimpleCPU::TickEvent::description() const 79{ 80 return "AtomicSimpleCPU tick"; 81} 82 83void 84AtomicSimpleCPU::init() 85{ 86 BaseCPU::init(); 87 88 // Initialise the ThreadContext's memory proxies 89 tcBase()->initMemProxies(tcBase()); 90 91 if (FullSystem && !params()->switched_out) { 92 ThreadID size = threadContexts.size(); 93 for (ThreadID i = 0; i < size; ++i) { 94 ThreadContext *tc = threadContexts[i]; 95 // initialize CPU, including PC 96 TheISA::initCPU(tc, tc->contextId()); 97 } 98 } 99 100 // Atomic doesn't do MT right now, so contextId == threadId 101 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 102 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 103 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 104} 105 106AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 107 : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 108 simulate_data_stalls(p->simulate_data_stalls), 109 simulate_inst_stalls(p->simulate_inst_stalls), 110 drain_manager(NULL), 111 icachePort(name() + ".icache_port", this), 112 dcachePort(name() + ".dcache_port", this), 113 fastmem(p->fastmem) 114{ 115 _status = Idle; 116} 117 118 119AtomicSimpleCPU::~AtomicSimpleCPU() 120{ 121 if (tickEvent.scheduled()) { 122 deschedule(tickEvent); 123 } 124} 125 126unsigned int 127AtomicSimpleCPU::drain(DrainManager *dm) 128{ 129 assert(!drain_manager); 130 if (switchedOut()) 131 return 0; 132 133 if (!isDrained()) { 134 DPRINTF(Drain, "Requesting drain: %s\n", pcState()); 135 drain_manager = dm; 136 return 1; 137 } else { 138 if (tickEvent.scheduled()) 139 deschedule(tickEvent); 140 141 DPRINTF(Drain, "Not executing microcode, no need to drain.\n"); 142 return 0; 143 } 144} 145 146void 147AtomicSimpleCPU::drainResume() 148{ 149 assert(!tickEvent.scheduled()); 150 assert(!drain_manager); 151 if (switchedOut()) 152 return; 153 154 DPRINTF(SimpleCPU, "Resume\n"); 155 verifyMemoryMode(); 156 157 assert(!threadContexts.empty()); 158 if (threadContexts.size() > 1) 159 fatal("The atomic CPU only supports one thread.\n"); 160 161 if (thread->status() == ThreadContext::Active) { 162 schedule(tickEvent, nextCycle()); 163 _status = BaseSimpleCPU::Running; 164 notIdleFraction = 1; 165 } else { 166 _status = BaseSimpleCPU::Idle; 167 notIdleFraction = 0; 168 } 169 170 system->totalNumInsts = 0; 171} 172 173bool 174AtomicSimpleCPU::tryCompleteDrain() 175{ 176 if (!drain_manager) 177 return false; 178 179 DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState()); 180 if (!isDrained()) 181 return false; 182 183 DPRINTF(Drain, "CPU done draining, processing drain event\n"); 184 drain_manager->signalDrainDone(); 185 drain_manager = NULL; 186 187 return true; 188} 189 190 191void 192AtomicSimpleCPU::switchOut() 193{ 194 BaseSimpleCPU::switchOut(); 195 196 assert(!tickEvent.scheduled()); 197 assert(_status == BaseSimpleCPU::Running || _status == Idle); 198 assert(isDrained()); 199} 200 201 202void 203AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 204{ 205 BaseSimpleCPU::takeOverFrom(oldCPU); 206 207 // The tick event should have been descheduled by drain() 208 assert(!tickEvent.scheduled()); 209 210 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 211 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 212 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 213} 214 215void 216AtomicSimpleCPU::verifyMemoryMode() const 217{ 218 if (!system->isAtomicMode()) { 219 fatal("The atomic CPU requires the memory system to be in " 220 "'atomic' mode.\n"); 221 } 222} 223 224void 225AtomicSimpleCPU::activateContext(ThreadID thread_num) 226{ 227 DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num); 228 229 assert(thread_num == 0); 230 assert(thread); 231 232 assert(_status == Idle); 233 assert(!tickEvent.scheduled()); 234 235 notIdleFraction = 1; 236 Cycles delta = ticksToCycles(thread->lastActivate - thread->lastSuspend); 237 numCycles += delta; 238 ppCycles->notify(delta); 239 240 //Make sure ticks are still on multiples of cycles 241 schedule(tickEvent, clockEdge(Cycles(0))); 242 _status = BaseSimpleCPU::Running; 243} 244 245 246void 247AtomicSimpleCPU::suspendContext(ThreadID thread_num) 248{ 249 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 250 251 assert(thread_num == 0); 252 assert(thread); 253 254 if (_status == Idle) 255 return; 256 257 assert(_status == BaseSimpleCPU::Running); 258 259 // tick event may not be scheduled if this gets called from inside 260 // an instruction's execution, e.g. "quiesce" 261 if (tickEvent.scheduled()) 262 deschedule(tickEvent); 263 264 notIdleFraction = 0; 265 _status = Idle; 266} 267 268 269Tick 270AtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt) 271{ 272 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 273 pkt->cmdString()); 274 275 // if snoop invalidates, release any associated locks 276 if (pkt->isInvalidate()) { 277 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 278 pkt->getAddr()); 279 TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask); 280 } 281 282 return 0; 283} 284 285void 286AtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt) 287{ 288 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 289 pkt->cmdString()); 290 291 // if snoop invalidates, release any associated locks 292 if (pkt->isInvalidate()) { 293 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 294 pkt->getAddr()); 295 TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask); 296 } 297} 298 299Fault 300AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, 301 unsigned size, unsigned flags) 302{ 303 // use the CPU's statically allocated read request and packet objects 304 Request *req = &data_read_req; 305 306 if (traceData) { 307 traceData->setAddr(addr); 308 } 309 310 //The size of the data we're trying to read. 311 int fullSize = size; 312 313 //The address of the second part of this access if it needs to be split 314 //across a cache line boundary. 315 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 316 317 if (secondAddr > addr) 318 size = secondAddr - addr; 319 320 dcache_latency = 0; 321 322 req->taskId(taskId()); 323 while (1) { 324 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 325 326 // translate to physical address 327 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read); 328 329 // Now do the access. 330 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 331 Packet pkt(req, MemCmd::ReadReq); 332 pkt.refineCommand(); 333 pkt.dataStatic(data); 334 335 if (req->isMmappedIpr()) 336 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 337 else { 338 if (fastmem && system->isMemAddr(pkt.getAddr())) 339 system->getPhysMem().access(&pkt); 340 else 341 dcache_latency += dcachePort.sendAtomic(&pkt); 342 } 343 dcache_access = true; 344 345 assert(!pkt.isError()); 346 347 if (req->isLLSC()) { 348 TheISA::handleLockedRead(thread, req); 349 } 350 } 351 352 //If there's a fault, return it 353 if (fault != NoFault) { 354 if (req->isPrefetch()) { 355 return NoFault; 356 } else { 357 return fault; 358 } 359 } 360 361 //If we don't need to access a second cache line, stop now. 362 if (secondAddr <= addr) 363 { 364 if (req->isLocked() && fault == NoFault) { 365 assert(!locked); 366 locked = true; 367 } 368 return fault; 369 } 370 371 /* 372 * Set up for accessing the second cache line. 373 */ 374 375 //Move the pointer we're reading into to the correct location. 376 data += size; 377 //Adjust the size to get the remaining bytes. 378 size = addr + fullSize - secondAddr; 379 //And access the right address. 380 addr = secondAddr; 381 } 382} 383 384 385Fault 386AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, 387 Addr addr, unsigned flags, uint64_t *res) 388{ 389 390 static uint8_t zero_array[64] = {}; 391 392 if (data == NULL) { 393 assert(size <= 64); 394 assert(flags & Request::CACHE_BLOCK_ZERO); 395 // This must be a cache block cleaning request 396 data = zero_array; 397 } 398 399 // use the CPU's statically allocated write request and packet objects 400 Request *req = &data_write_req; 401 402 if (traceData) { 403 traceData->setAddr(addr); 404 } 405 406 //The size of the data we're trying to read. 407 int fullSize = size; 408 409 //The address of the second part of this access if it needs to be split 410 //across a cache line boundary. 411 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 412 413 if(secondAddr > addr) 414 size = secondAddr - addr; 415 416 dcache_latency = 0; 417 418 req->taskId(taskId()); 419 while(1) { 420 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 421 422 // translate to physical address 423 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write); 424 425 // Now do the access. 426 if (fault == NoFault) { 427 MemCmd cmd = MemCmd::WriteReq; // default 428 bool do_access = true; // flag to suppress cache access 429 430 if (req->isLLSC()) { 431 cmd = MemCmd::StoreCondReq; 432 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); 433 } else if (req->isSwap()) { 434 cmd = MemCmd::SwapReq; 435 if (req->isCondSwap()) { 436 assert(res); 437 req->setExtraData(*res); 438 } 439 } 440 441 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 442 Packet pkt = Packet(req, cmd); 443 pkt.dataStatic(data); 444 445 if (req->isMmappedIpr()) { 446 dcache_latency += 447 TheISA::handleIprWrite(thread->getTC(), &pkt); 448 } else { 449 if (fastmem && system->isMemAddr(pkt.getAddr())) 450 system->getPhysMem().access(&pkt); 451 else 452 dcache_latency += dcachePort.sendAtomic(&pkt); 453 } 454 dcache_access = true; 455 assert(!pkt.isError()); 456 457 if (req->isSwap()) { 458 assert(res); 459 memcpy(res, pkt.getPtr<uint8_t>(), fullSize); 460 } 461 } 462 463 if (res && !req->isSwap()) { 464 *res = req->getExtraData(); 465 } 466 } 467 468 //If there's a fault or we don't need to access a second cache line, 469 //stop now. 470 if (fault != NoFault || secondAddr <= addr) 471 { 472 if (req->isLocked() && fault == NoFault) { 473 assert(locked); 474 locked = false; 475 } 476 if (fault != NoFault && req->isPrefetch()) { 477 return NoFault; 478 } else { 479 return fault; 480 } 481 } 482 483 /* 484 * Set up for accessing the second cache line. 485 */ 486 487 //Move the pointer we're reading into to the correct location. 488 data += size; 489 //Adjust the size to get the remaining bytes. 490 size = addr + fullSize - secondAddr; 491 //And access the right address. 492 addr = secondAddr; 493 } 494} 495 496 497void 498AtomicSimpleCPU::tick() 499{ 500 DPRINTF(SimpleCPU, "Tick\n"); 501 502 Tick latency = 0; 503 504 for (int i = 0; i < width || locked; ++i) { 505 numCycles++; 506 ppCycles->notify(1); 507 508 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 509 checkForInterrupts(); 510 511 checkPcEventQueue(); 512 // We must have just got suspended by a PC event 513 if (_status == Idle) { 514 tryCompleteDrain(); 515 return; 516 } 517 518 Fault fault = NoFault; 519 520 TheISA::PCState pcState = thread->pcState(); 521 522 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 523 !curMacroStaticInst; 524 if (needToFetch) { 525 ifetch_req.taskId(taskId()); 526 setupFetchRequest(&ifetch_req); 527 fault = thread->itb->translateAtomic(&ifetch_req, tc, 528 BaseTLB::Execute); 529 } 530 531 if (fault == NoFault) { 532 Tick icache_latency = 0; 533 bool icache_access = false; 534 dcache_access = false; // assume no dcache access 535 536 if (needToFetch) { 537 // This is commented out because the decoder would act like 538 // a tiny cache otherwise. It wouldn't be flushed when needed 539 // like the I cache. It should be flushed, and when that works 540 // this code should be uncommented. 541 //Fetch more instruction memory if necessary 542 //if(decoder.needMoreBytes()) 543 //{ 544 icache_access = true; 545 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq); 546 ifetch_pkt.dataStatic(&inst); 547 548 if (fastmem && system->isMemAddr(ifetch_pkt.getAddr())) 549 system->getPhysMem().access(&ifetch_pkt); 550 else 551 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 552 553 assert(!ifetch_pkt.isError()); 554 555 // ifetch_req is initialized to read the instruction directly 556 // into the CPU object's inst field. 557 //} 558 } 559 560 preExecute(); 561 562 if (curStaticInst) { 563 fault = curStaticInst->execute(this, traceData); 564 565 // keep an instruction count 566 if (fault == NoFault) { 567 countInst(); 568 if (!curStaticInst->isMicroop() || 569 curStaticInst->isLastMicroop()) { 570 ppCommit->notify(std::make_pair(thread, curStaticInst)); 571 } 572 } 573 else if (traceData && !DTRACE(ExecFaulting)) { 574 delete traceData; 575 traceData = NULL; 576 } 577 578 postExecute(); 579 } 580 581 // @todo remove me after debugging with legion done 582 if (curStaticInst && (!curStaticInst->isMicroop() || 583 curStaticInst->isFirstMicroop())) 584 instCnt++; 585 586 Tick stall_ticks = 0; 587 if (simulate_inst_stalls && icache_access) 588 stall_ticks += icache_latency; 589 590 if (simulate_data_stalls && dcache_access) 591 stall_ticks += dcache_latency; 592 593 if (stall_ticks) { 594 // the atomic cpu does its accounting in ticks, so 595 // keep counting in ticks but round to the clock 596 // period 597 latency += divCeil(stall_ticks, clockPeriod()) * 598 clockPeriod(); 599 } 600 601 } 602 if(fault != NoFault || !stayAtPC) 603 advancePC(fault); 604 } 605 606 if (tryCompleteDrain()) 607 return; 608 609 // instruction takes at least one cycle 610 if (latency < clockPeriod()) 611 latency = clockPeriod(); 612 613 if (_status != Idle) 614 schedule(tickEvent, curTick() + latency); 615} 616 617void 618AtomicSimpleCPU::regProbePoints() 619{ 620 BaseCPU::regProbePoints(); 621 622 ppCommit = new ProbePointArg<pair<SimpleThread*, const StaticInstPtr>> 623 (getProbeManager(), "Commit"); 624} 625 626void 627AtomicSimpleCPU::printAddr(Addr a) 628{ 629 dcachePort.printAddr(a); 630} 631 632//////////////////////////////////////////////////////////////////////// 633// 634// AtomicSimpleCPU Simulation Object 635// 636AtomicSimpleCPU * 637AtomicSimpleCPUParams::create() 638{ 639 numThreads = 1; 640 if (!FullSystem && workload.size() != 1) 641 panic("only one workload allowed"); 642 return new AtomicSimpleCPU(this); 643} 644