atomic.cc revision 11147
1/* 2 * Copyright 2014 Google, Inc. 3 * Copyright (c) 2012-2013,2015 ARM Limited 4 * All rights reserved. 5 * 6 * The license below extends only to copyright in the software and shall 7 * not be construed as granting a license to any other intellectual 8 * property including but not limited to intellectual property relating 9 * to a hardware implementation of the functionality of the software 10 * licensed hereunder. You may use the software subject to the license 11 * terms below provided that you ensure that this notice is replicated 12 * unmodified and in its entirety in all distributions of the software, 13 * modified or unmodified, in source code or in binary form. 14 * 15 * Copyright (c) 2002-2005 The Regents of The University of Michigan 16 * All rights reserved. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions are 20 * met: redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer; 22 * redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution; 25 * neither the name of the copyright holders nor the names of its 26 * contributors may be used to endorse or promote products derived from 27 * this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * Authors: Steve Reinhardt 42 */ 43 44#include "arch/locked_mem.hh" 45#include "arch/mmapped_ipr.hh" 46#include "arch/utility.hh" 47#include "base/bigint.hh" 48#include "base/output.hh" 49#include "config/the_isa.hh" 50#include "cpu/simple/atomic.hh" 51#include "cpu/exetrace.hh" 52#include "debug/Drain.hh" 53#include "debug/ExecFaulting.hh" 54#include "debug/SimpleCPU.hh" 55#include "mem/packet.hh" 56#include "mem/packet_access.hh" 57#include "mem/physical.hh" 58#include "params/AtomicSimpleCPU.hh" 59#include "sim/faults.hh" 60#include "sim/system.hh" 61#include "sim/full_system.hh" 62 63using namespace std; 64using namespace TheISA; 65 66AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 67 : Event(CPU_Tick_Pri), cpu(c) 68{ 69} 70 71 72void 73AtomicSimpleCPU::TickEvent::process() 74{ 75 cpu->tick(); 76} 77 78const char * 79AtomicSimpleCPU::TickEvent::description() const 80{ 81 return "AtomicSimpleCPU tick"; 82} 83 84void 85AtomicSimpleCPU::init() 86{ 87 BaseSimpleCPU::init(); 88 89 ifetch_req.setThreadContext(_cpuId, 0); 90 data_read_req.setThreadContext(_cpuId, 0); 91 data_write_req.setThreadContext(_cpuId, 0); 92} 93 94AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 95 : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 96 simulate_data_stalls(p->simulate_data_stalls), 97 simulate_inst_stalls(p->simulate_inst_stalls), 98 icachePort(name() + ".icache_port", this), 99 dcachePort(name() + ".dcache_port", this), 100 fastmem(p->fastmem), dcache_access(false), dcache_latency(0), 101 ppCommit(nullptr) 102{ 103 _status = Idle; 104} 105 106 107AtomicSimpleCPU::~AtomicSimpleCPU() 108{ 109 if (tickEvent.scheduled()) { 110 deschedule(tickEvent); 111 } 112} 113 114DrainState 115AtomicSimpleCPU::drain() 116{ 117 if (switchedOut()) 118 return DrainState::Drained; 119 120 if (!isDrained()) { 121 DPRINTF(Drain, "Requesting drain.\n"); 122 return DrainState::Draining; 123 } else { 124 if (tickEvent.scheduled()) 125 deschedule(tickEvent); 126 127 activeThreads.clear(); 128 DPRINTF(Drain, "Not executing microcode, no need to drain.\n"); 129 return DrainState::Drained; 130 } 131} 132 133void 134AtomicSimpleCPU::drainResume() 135{ 136 assert(!tickEvent.scheduled()); 137 if (switchedOut()) 138 return; 139 140 DPRINTF(SimpleCPU, "Resume\n"); 141 verifyMemoryMode(); 142 143 assert(!threadContexts.empty()); 144 145 _status = BaseSimpleCPU::Idle; 146 147 for (ThreadID tid = 0; tid < numThreads; tid++) { 148 if (threadInfo[tid]->thread->status() == ThreadContext::Active) { 149 threadInfo[tid]->notIdleFraction = 1; 150 activeThreads.push_back(tid); 151 _status = BaseSimpleCPU::Running; 152 153 // Tick if any threads active 154 if (!tickEvent.scheduled()) { 155 schedule(tickEvent, nextCycle()); 156 } 157 } else { 158 threadInfo[tid]->notIdleFraction = 0; 159 } 160 } 161} 162 163bool 164AtomicSimpleCPU::tryCompleteDrain() 165{ 166 if (drainState() != DrainState::Draining) 167 return false; 168 169 DPRINTF(Drain, "tryCompleteDrain.\n"); 170 if (!isDrained()) 171 return false; 172 173 DPRINTF(Drain, "CPU done draining, processing drain event\n"); 174 signalDrainDone(); 175 176 return true; 177} 178 179 180void 181AtomicSimpleCPU::switchOut() 182{ 183 BaseSimpleCPU::switchOut(); 184 185 assert(!tickEvent.scheduled()); 186 assert(_status == BaseSimpleCPU::Running || _status == Idle); 187 assert(isDrained()); 188} 189 190 191void 192AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 193{ 194 BaseSimpleCPU::takeOverFrom(oldCPU); 195 196 // The tick event should have been descheduled by drain() 197 assert(!tickEvent.scheduled()); 198} 199 200void 201AtomicSimpleCPU::verifyMemoryMode() const 202{ 203 if (!system->isAtomicMode()) { 204 fatal("The atomic CPU requires the memory system to be in " 205 "'atomic' mode.\n"); 206 } 207} 208 209void 210AtomicSimpleCPU::activateContext(ThreadID thread_num) 211{ 212 DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num); 213 214 assert(thread_num < numThreads); 215 216 threadInfo[thread_num]->notIdleFraction = 1; 217 Cycles delta = ticksToCycles(threadInfo[thread_num]->thread->lastActivate - 218 threadInfo[thread_num]->thread->lastSuspend); 219 numCycles += delta; 220 ppCycles->notify(delta); 221 222 if (!tickEvent.scheduled()) { 223 //Make sure ticks are still on multiples of cycles 224 schedule(tickEvent, clockEdge(Cycles(0))); 225 } 226 _status = BaseSimpleCPU::Running; 227 if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) 228 == activeThreads.end()) { 229 activeThreads.push_back(thread_num); 230 } 231} 232 233 234void 235AtomicSimpleCPU::suspendContext(ThreadID thread_num) 236{ 237 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 238 239 assert(thread_num < numThreads); 240 activeThreads.remove(thread_num); 241 242 if (_status == Idle) 243 return; 244 245 assert(_status == BaseSimpleCPU::Running); 246 247 threadInfo[thread_num]->notIdleFraction = 0; 248 249 if (activeThreads.empty()) { 250 _status = Idle; 251 252 if (tickEvent.scheduled()) { 253 deschedule(tickEvent); 254 } 255 } 256 257} 258 259 260Tick 261AtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt) 262{ 263 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 264 pkt->cmdString()); 265 266 // X86 ISA: Snooping an invalidation for monitor/mwait 267 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 268 if(cpu->getCpuAddrMonitor()->doMonitor(pkt)) { 269 cpu->wakeup(); 270 } 271 272 // if snoop invalidates, release any associated locks 273 if (pkt->isInvalidate()) { 274 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 275 pkt->getAddr()); 276 for (auto &t_info : cpu->threadInfo) { 277 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 278 } 279 } 280 281 return 0; 282} 283 284void 285AtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt) 286{ 287 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 288 pkt->cmdString()); 289 290 // X86 ISA: Snooping an invalidation for monitor/mwait 291 AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner); 292 if(cpu->getCpuAddrMonitor()->doMonitor(pkt)) { 293 cpu->wakeup(); 294 } 295 296 // if snoop invalidates, release any associated locks 297 if (pkt->isInvalidate()) { 298 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 299 pkt->getAddr()); 300 for (auto &t_info : cpu->threadInfo) { 301 TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask); 302 } 303 } 304} 305 306Fault 307AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, 308 unsigned size, unsigned flags) 309{ 310 SimpleExecContext& t_info = *threadInfo[curThread]; 311 SimpleThread* thread = t_info.thread; 312 313 // use the CPU's statically allocated read request and packet objects 314 Request *req = &data_read_req; 315 316 if (traceData) 317 traceData->setMem(addr, size, flags); 318 319 //The size of the data we're trying to read. 320 int fullSize = size; 321 322 //The address of the second part of this access if it needs to be split 323 //across a cache line boundary. 324 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 325 326 if (secondAddr > addr) 327 size = secondAddr - addr; 328 329 dcache_latency = 0; 330 331 req->taskId(taskId()); 332 while (1) { 333 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 334 335 // translate to physical address 336 Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), 337 BaseTLB::Read); 338 339 // Now do the access. 340 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 341 Packet pkt(req, Packet::makeReadCmd(req)); 342 pkt.dataStatic(data); 343 344 if (req->isMmappedIpr()) 345 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 346 else { 347 if (fastmem && system->isMemAddr(pkt.getAddr())) 348 system->getPhysMem().access(&pkt); 349 else 350 dcache_latency += dcachePort.sendAtomic(&pkt); 351 } 352 dcache_access = true; 353 354 assert(!pkt.isError()); 355 356 if (req->isLLSC()) { 357 TheISA::handleLockedRead(thread, req); 358 } 359 } 360 361 //If there's a fault, return it 362 if (fault != NoFault) { 363 if (req->isPrefetch()) { 364 return NoFault; 365 } else { 366 return fault; 367 } 368 } 369 370 //If we don't need to access a second cache line, stop now. 371 if (secondAddr <= addr) 372 { 373 if (req->isLockedRMW() && fault == NoFault) { 374 assert(!locked); 375 locked = true; 376 } 377 378 return fault; 379 } 380 381 /* 382 * Set up for accessing the second cache line. 383 */ 384 385 //Move the pointer we're reading into to the correct location. 386 data += size; 387 //Adjust the size to get the remaining bytes. 388 size = addr + fullSize - secondAddr; 389 //And access the right address. 390 addr = secondAddr; 391 } 392} 393 394 395Fault 396AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, 397 Addr addr, unsigned flags, uint64_t *res) 398{ 399 SimpleExecContext& t_info = *threadInfo[curThread]; 400 SimpleThread* thread = t_info.thread; 401 static uint8_t zero_array[64] = {}; 402 403 if (data == NULL) { 404 assert(size <= 64); 405 assert(flags & Request::CACHE_BLOCK_ZERO); 406 // This must be a cache block cleaning request 407 data = zero_array; 408 } 409 410 // use the CPU's statically allocated write request and packet objects 411 Request *req = &data_write_req; 412 413 if (traceData) 414 traceData->setMem(addr, size, flags); 415 416 //The size of the data we're trying to read. 417 int fullSize = size; 418 419 //The address of the second part of this access if it needs to be split 420 //across a cache line boundary. 421 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 422 423 if(secondAddr > addr) 424 size = secondAddr - addr; 425 426 dcache_latency = 0; 427 428 req->taskId(taskId()); 429 while(1) { 430 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 431 432 // translate to physical address 433 Fault fault = thread->dtb->translateAtomic(req, thread->getTC(), BaseTLB::Write); 434 435 // Now do the access. 436 if (fault == NoFault) { 437 MemCmd cmd = MemCmd::WriteReq; // default 438 bool do_access = true; // flag to suppress cache access 439 440 if (req->isLLSC()) { 441 cmd = MemCmd::StoreCondReq; 442 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); 443 } else if (req->isSwap()) { 444 cmd = MemCmd::SwapReq; 445 if (req->isCondSwap()) { 446 assert(res); 447 req->setExtraData(*res); 448 } 449 } 450 451 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 452 Packet pkt = Packet(req, cmd); 453 pkt.dataStatic(data); 454 455 if (req->isMmappedIpr()) { 456 dcache_latency += 457 TheISA::handleIprWrite(thread->getTC(), &pkt); 458 } else { 459 if (fastmem && system->isMemAddr(pkt.getAddr())) 460 system->getPhysMem().access(&pkt); 461 else 462 dcache_latency += dcachePort.sendAtomic(&pkt); 463 } 464 dcache_access = true; 465 assert(!pkt.isError()); 466 467 if (req->isSwap()) { 468 assert(res); 469 memcpy(res, pkt.getConstPtr<uint8_t>(), fullSize); 470 } 471 } 472 473 if (res && !req->isSwap()) { 474 *res = req->getExtraData(); 475 } 476 } 477 478 //If there's a fault or we don't need to access a second cache line, 479 //stop now. 480 if (fault != NoFault || secondAddr <= addr) 481 { 482 if (req->isLockedRMW() && fault == NoFault) { 483 assert(locked); 484 locked = false; 485 } 486 487 488 if (fault != NoFault && req->isPrefetch()) { 489 return NoFault; 490 } else { 491 return fault; 492 } 493 } 494 495 /* 496 * Set up for accessing the second cache line. 497 */ 498 499 //Move the pointer we're reading into to the correct location. 500 data += size; 501 //Adjust the size to get the remaining bytes. 502 size = addr + fullSize - secondAddr; 503 //And access the right address. 504 addr = secondAddr; 505 } 506} 507 508 509void 510AtomicSimpleCPU::tick() 511{ 512 DPRINTF(SimpleCPU, "Tick\n"); 513 514 // Change thread if multi-threaded 515 swapActiveThread(); 516 517 // Set memroy request ids to current thread 518 if (numThreads > 1) { 519 ifetch_req.setThreadContext(_cpuId, curThread); 520 data_read_req.setThreadContext(_cpuId, curThread); 521 data_write_req.setThreadContext(_cpuId, curThread); 522 } 523 524 SimpleExecContext& t_info = *threadInfo[curThread]; 525 SimpleThread* thread = t_info.thread; 526 527 Tick latency = 0; 528 529 for (int i = 0; i < width || locked; ++i) { 530 numCycles++; 531 ppCycles->notify(1); 532 533 if (!curStaticInst || !curStaticInst->isDelayedCommit()) { 534 checkForInterrupts(); 535 checkPcEventQueue(); 536 } 537 538 // We must have just got suspended by a PC event 539 if (_status == Idle) { 540 tryCompleteDrain(); 541 return; 542 } 543 544 Fault fault = NoFault; 545 546 TheISA::PCState pcState = thread->pcState(); 547 548 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 549 !curMacroStaticInst; 550 if (needToFetch) { 551 ifetch_req.taskId(taskId()); 552 setupFetchRequest(&ifetch_req); 553 fault = thread->itb->translateAtomic(&ifetch_req, thread->getTC(), 554 BaseTLB::Execute); 555 } 556 557 if (fault == NoFault) { 558 Tick icache_latency = 0; 559 bool icache_access = false; 560 dcache_access = false; // assume no dcache access 561 562 if (needToFetch) { 563 // This is commented out because the decoder would act like 564 // a tiny cache otherwise. It wouldn't be flushed when needed 565 // like the I cache. It should be flushed, and when that works 566 // this code should be uncommented. 567 //Fetch more instruction memory if necessary 568 //if(decoder.needMoreBytes()) 569 //{ 570 icache_access = true; 571 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq); 572 ifetch_pkt.dataStatic(&inst); 573 574 if (fastmem && system->isMemAddr(ifetch_pkt.getAddr())) 575 system->getPhysMem().access(&ifetch_pkt); 576 else 577 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 578 579 assert(!ifetch_pkt.isError()); 580 581 // ifetch_req is initialized to read the instruction directly 582 // into the CPU object's inst field. 583 //} 584 } 585 586 preExecute(); 587 588 if (curStaticInst) { 589 fault = curStaticInst->execute(&t_info, traceData); 590 591 // keep an instruction count 592 if (fault == NoFault) { 593 countInst(); 594 ppCommit->notify(std::make_pair(thread, curStaticInst)); 595 } 596 else if (traceData && !DTRACE(ExecFaulting)) { 597 delete traceData; 598 traceData = NULL; 599 } 600 601 postExecute(); 602 } 603 604 // @todo remove me after debugging with legion done 605 if (curStaticInst && (!curStaticInst->isMicroop() || 606 curStaticInst->isFirstMicroop())) 607 instCnt++; 608 609 Tick stall_ticks = 0; 610 if (simulate_inst_stalls && icache_access) 611 stall_ticks += icache_latency; 612 613 if (simulate_data_stalls && dcache_access) 614 stall_ticks += dcache_latency; 615 616 if (stall_ticks) { 617 // the atomic cpu does its accounting in ticks, so 618 // keep counting in ticks but round to the clock 619 // period 620 latency += divCeil(stall_ticks, clockPeriod()) * 621 clockPeriod(); 622 } 623 624 } 625 if(fault != NoFault || !t_info.stayAtPC) 626 advancePC(fault); 627 } 628 629 if (tryCompleteDrain()) 630 return; 631 632 // instruction takes at least one cycle 633 if (latency < clockPeriod()) 634 latency = clockPeriod(); 635 636 if (_status != Idle) 637 reschedule(tickEvent, curTick() + latency, true); 638} 639 640void 641AtomicSimpleCPU::regProbePoints() 642{ 643 BaseCPU::regProbePoints(); 644 645 ppCommit = new ProbePointArg<pair<SimpleThread*, const StaticInstPtr>> 646 (getProbeManager(), "Commit"); 647} 648 649void 650AtomicSimpleCPU::printAddr(Addr a) 651{ 652 dcachePort.printAddr(a); 653} 654 655//////////////////////////////////////////////////////////////////////// 656// 657// AtomicSimpleCPU Simulation Object 658// 659AtomicSimpleCPU * 660AtomicSimpleCPUParams::create() 661{ 662 return new AtomicSimpleCPU(this); 663} 664