1/* 2 * Copyright (c) 2010-2012 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Steve Reinhardt 41 */ 42 43#include "arch/locked_mem.hh" 44#include "arch/mmapped_ipr.hh" 45#include "arch/utility.hh" 46#include "base/bigint.hh" 47#include "config/the_isa.hh" 48#include "cpu/simple/timing.hh" 49#include "cpu/exetrace.hh" 50#include "debug/Config.hh" 51#include "debug/Drain.hh" 52#include "debug/ExecFaulting.hh" 53#include "debug/SimpleCPU.hh" 54#include "mem/packet.hh" 55#include "mem/packet_access.hh" 56#include "params/TimingSimpleCPU.hh" 57#include "sim/faults.hh" 58#include "sim/full_system.hh" 59#include "sim/system.hh" 60 61using namespace std; 62using namespace TheISA; 63 64void 65TimingSimpleCPU::init() 66{ 67 BaseCPU::init(); 68 69 // Initialise the ThreadContext's memory proxies 70 tcBase()->initMemProxies(tcBase()); 71 72 if (FullSystem && !params()->switched_out) { 73 for (int i = 0; i < threadContexts.size(); ++i) { 74 ThreadContext *tc = threadContexts[i]; 75 // initialize CPU, including PC 76 TheISA::initCPU(tc, _cpuId); 77 } 78 } 79} 80 81void 82TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t) 83{ 84 pkt = _pkt; 85 cpu->schedule(this, t); 86} 87 88TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p) 89 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this), 90 dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0), 91 fetchEvent(this), drainManager(NULL) 92{ 93 _status = Idle; 94 95 system->totalNumInsts = 0; 96} 97 98 99TimingSimpleCPU::~TimingSimpleCPU() 100{ 101} 102 103unsigned int 104TimingSimpleCPU::drain(DrainManager *drain_manager) 105{ 106 assert(!drainManager); 107 if (switchedOut()) 108 return 0; 109 110 if (_status == Idle || 111 (_status == BaseSimpleCPU::Running && isDrained())) { 112 assert(!fetchEvent.scheduled()); 113 DPRINTF(Drain, "No need to drain.\n"); 114 return 0; 115 } else { 116 drainManager = drain_manager; 117 DPRINTF(Drain, "Requesting drain: %s\n", pcState()); 118 119 // The fetch event can become descheduled if a drain didn't 120 // succeed on the first attempt. We need to reschedule it if 121 // the CPU is waiting for a microcode routine to complete. 122 if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled())
|
124 125 return 1; 126 } 127} 128 129void 130TimingSimpleCPU::drainResume() 131{ 132 assert(!fetchEvent.scheduled()); 133 assert(!drainManager); 134 if (switchedOut()) 135 return; 136 137 DPRINTF(SimpleCPU, "Resume\n"); 138 verifyMemoryMode(); 139 140 assert(!threadContexts.empty()); 141 if (threadContexts.size() > 1) 142 fatal("The timing CPU only supports one thread.\n"); 143 144 if (thread->status() == ThreadContext::Active) { 145 schedule(fetchEvent, nextCycle()); 146 _status = BaseSimpleCPU::Running; 147 } else { 148 _status = BaseSimpleCPU::Idle; 149 } 150} 151 152bool 153TimingSimpleCPU::tryCompleteDrain() 154{ 155 if (!drainManager) 156 return false; 157 158 DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState()); 159 if (!isDrained()) 160 return false; 161 162 DPRINTF(Drain, "CPU done draining, processing drain event\n"); 163 drainManager->signalDrainDone(); 164 drainManager = NULL; 165 166 return true; 167} 168 169void 170TimingSimpleCPU::switchOut() 171{ 172 BaseSimpleCPU::switchOut(); 173 174 assert(!fetchEvent.scheduled()); 175 assert(_status == BaseSimpleCPU::Running || _status == Idle); 176 assert(!stayAtPC); 177 assert(microPC() == 0); 178 179 numCycles += curCycle() - previousCycle; 180} 181 182 183void 184TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 185{ 186 BaseSimpleCPU::takeOverFrom(oldCPU); 187 188 previousCycle = curCycle(); 189} 190 191void 192TimingSimpleCPU::verifyMemoryMode() const 193{ 194 if (!system->isTimingMode()) { 195 fatal("The timing CPU requires the memory system to be in " 196 "'timing' mode.\n"); 197 } 198} 199 200void 201TimingSimpleCPU::activateContext(ThreadID thread_num, Cycles delay) 202{ 203 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 204 205 assert(thread_num == 0); 206 assert(thread); 207 208 assert(_status == Idle); 209 210 notIdleFraction++; 211 _status = BaseSimpleCPU::Running; 212 213 // kick things off by initiating the fetch of the next instruction 214 schedule(fetchEvent, clockEdge(delay)); 215} 216 217 218void 219TimingSimpleCPU::suspendContext(ThreadID thread_num) 220{ 221 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 222 223 assert(thread_num == 0); 224 assert(thread); 225 226 if (_status == Idle) 227 return; 228 229 assert(_status == BaseSimpleCPU::Running); 230 231 // just change status to Idle... if status != Running, 232 // completeInst() will not initiate fetch of next instruction. 233 234 notIdleFraction--; 235 _status = Idle; 236} 237 238bool 239TimingSimpleCPU::handleReadPacket(PacketPtr pkt) 240{ 241 RequestPtr req = pkt->req; 242 if (req->isMmappedIpr()) { 243 Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt); 244 new IprEvent(pkt, this, clockEdge(delay)); 245 _status = DcacheWaitResponse; 246 dcache_pkt = NULL; 247 } else if (!dcachePort.sendTimingReq(pkt)) { 248 _status = DcacheRetry; 249 dcache_pkt = pkt; 250 } else { 251 _status = DcacheWaitResponse; 252 // memory system takes ownership of packet 253 dcache_pkt = NULL; 254 } 255 return dcache_pkt == NULL; 256} 257 258void 259TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res, 260 bool read) 261{ 262 PacketPtr pkt; 263 buildPacket(pkt, req, read); 264 pkt->dataDynamicArray<uint8_t>(data); 265 if (req->getFlags().isSet(Request::NO_ACCESS)) { 266 assert(!dcache_pkt); 267 pkt->makeResponse(); 268 completeDataAccess(pkt); 269 } else if (read) { 270 handleReadPacket(pkt); 271 } else { 272 bool do_access = true; // flag to suppress cache access 273 274 if (req->isLLSC()) { 275 do_access = TheISA::handleLockedWrite(thread, req); 276 } else if (req->isCondSwap()) { 277 assert(res); 278 req->setExtraData(*res); 279 } 280 281 if (do_access) { 282 dcache_pkt = pkt; 283 handleWritePacket(); 284 } else { 285 _status = DcacheWaitResponse; 286 completeDataAccess(pkt); 287 } 288 } 289} 290 291void 292TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2, 293 RequestPtr req, uint8_t *data, bool read) 294{ 295 PacketPtr pkt1, pkt2; 296 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read); 297 if (req->getFlags().isSet(Request::NO_ACCESS)) { 298 assert(!dcache_pkt); 299 pkt1->makeResponse(); 300 completeDataAccess(pkt1); 301 } else if (read) { 302 SplitFragmentSenderState * send_state = 303 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 304 if (handleReadPacket(pkt1)) { 305 send_state->clearFromParent(); 306 send_state = dynamic_cast<SplitFragmentSenderState *>( 307 pkt2->senderState); 308 if (handleReadPacket(pkt2)) { 309 send_state->clearFromParent(); 310 } 311 } 312 } else { 313 dcache_pkt = pkt1; 314 SplitFragmentSenderState * send_state = 315 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState); 316 if (handleWritePacket()) { 317 send_state->clearFromParent(); 318 dcache_pkt = pkt2; 319 send_state = dynamic_cast<SplitFragmentSenderState *>( 320 pkt2->senderState); 321 if (handleWritePacket()) { 322 send_state->clearFromParent(); 323 } 324 } 325 } 326} 327 328void 329TimingSimpleCPU::translationFault(Fault fault) 330{ 331 // fault may be NoFault in cases where a fault is suppressed, 332 // for instance prefetches. 333 numCycles += curCycle() - previousCycle; 334 previousCycle = curCycle(); 335 336 if (traceData) { 337 // Since there was a fault, we shouldn't trace this instruction. 338 delete traceData; 339 traceData = NULL; 340 } 341 342 postExecute(); 343 344 advanceInst(fault); 345} 346 347void 348TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read) 349{ 350 MemCmd cmd; 351 if (read) { 352 cmd = MemCmd::ReadReq; 353 if (req->isLLSC()) 354 cmd = MemCmd::LoadLockedReq; 355 } else { 356 cmd = MemCmd::WriteReq; 357 if (req->isLLSC()) { 358 cmd = MemCmd::StoreCondReq; 359 } else if (req->isSwap()) { 360 cmd = MemCmd::SwapReq; 361 } 362 } 363 pkt = new Packet(req, cmd); 364} 365 366void 367TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2, 368 RequestPtr req1, RequestPtr req2, RequestPtr req, 369 uint8_t *data, bool read) 370{ 371 pkt1 = pkt2 = NULL; 372 373 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr()); 374 375 if (req->getFlags().isSet(Request::NO_ACCESS)) { 376 buildPacket(pkt1, req, read); 377 return; 378 } 379 380 buildPacket(pkt1, req1, read); 381 buildPacket(pkt2, req2, read); 382 383 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId()); 384 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand()); 385 386 pkt->dataDynamicArray<uint8_t>(data); 387 pkt1->dataStatic<uint8_t>(data); 388 pkt2->dataStatic<uint8_t>(data + req1->getSize()); 389 390 SplitMainSenderState * main_send_state = new SplitMainSenderState; 391 pkt->senderState = main_send_state; 392 main_send_state->fragments[0] = pkt1; 393 main_send_state->fragments[1] = pkt2; 394 main_send_state->outstanding = 2; 395 pkt1->senderState = new SplitFragmentSenderState(pkt, 0); 396 pkt2->senderState = new SplitFragmentSenderState(pkt, 1); 397} 398 399Fault 400TimingSimpleCPU::readMem(Addr addr, uint8_t *data, 401 unsigned size, unsigned flags) 402{ 403 Fault fault; 404 const int asid = 0; 405 const ThreadID tid = 0; 406 const Addr pc = thread->instAddr(); 407 unsigned block_size = dcachePort.peerBlockSize(); 408 BaseTLB::Mode mode = BaseTLB::Read; 409 410 if (traceData) { 411 traceData->setAddr(addr); 412 } 413 414 RequestPtr req = new Request(asid, addr, size, 415 flags, dataMasterId(), pc, _cpuId, tid); 416 417 Addr split_addr = roundDown(addr + size - 1, block_size); 418 assert(split_addr <= addr || split_addr - addr < block_size); 419 420 _status = DTBWaitResponse; 421 if (split_addr > addr) { 422 RequestPtr req1, req2; 423 assert(!req->isLLSC() && !req->isSwap()); 424 req->splitOnVaddr(split_addr, req1, req2); 425 426 WholeTranslationState *state = 427 new WholeTranslationState(req, req1, req2, new uint8_t[size], 428 NULL, mode); 429 DataTranslation<TimingSimpleCPU *> *trans1 = 430 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 431 DataTranslation<TimingSimpleCPU *> *trans2 = 432 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 433 434 thread->dtb->translateTiming(req1, tc, trans1, mode); 435 thread->dtb->translateTiming(req2, tc, trans2, mode); 436 } else { 437 WholeTranslationState *state = 438 new WholeTranslationState(req, new uint8_t[size], NULL, mode); 439 DataTranslation<TimingSimpleCPU *> *translation 440 = new DataTranslation<TimingSimpleCPU *>(this, state); 441 thread->dtb->translateTiming(req, tc, translation, mode); 442 } 443 444 return NoFault; 445} 446 447bool 448TimingSimpleCPU::handleWritePacket() 449{ 450 RequestPtr req = dcache_pkt->req; 451 if (req->isMmappedIpr()) { 452 Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt); 453 new IprEvent(dcache_pkt, this, clockEdge(delay)); 454 _status = DcacheWaitResponse; 455 dcache_pkt = NULL; 456 } else if (!dcachePort.sendTimingReq(dcache_pkt)) { 457 _status = DcacheRetry; 458 } else { 459 _status = DcacheWaitResponse; 460 // memory system takes ownership of packet 461 dcache_pkt = NULL; 462 } 463 return dcache_pkt == NULL; 464} 465 466Fault 467TimingSimpleCPU::writeMem(uint8_t *data, unsigned size, 468 Addr addr, unsigned flags, uint64_t *res) 469{ 470 uint8_t *newData = new uint8_t[size]; 471 memcpy(newData, data, size); 472 473 const int asid = 0; 474 const ThreadID tid = 0; 475 const Addr pc = thread->instAddr(); 476 unsigned block_size = dcachePort.peerBlockSize(); 477 BaseTLB::Mode mode = BaseTLB::Write; 478 479 if (traceData) { 480 traceData->setAddr(addr); 481 } 482 483 RequestPtr req = new Request(asid, addr, size, 484 flags, dataMasterId(), pc, _cpuId, tid); 485 486 Addr split_addr = roundDown(addr + size - 1, block_size); 487 assert(split_addr <= addr || split_addr - addr < block_size); 488 489 _status = DTBWaitResponse; 490 if (split_addr > addr) { 491 RequestPtr req1, req2; 492 assert(!req->isLLSC() && !req->isSwap()); 493 req->splitOnVaddr(split_addr, req1, req2); 494 495 WholeTranslationState *state = 496 new WholeTranslationState(req, req1, req2, newData, res, mode); 497 DataTranslation<TimingSimpleCPU *> *trans1 = 498 new DataTranslation<TimingSimpleCPU *>(this, state, 0); 499 DataTranslation<TimingSimpleCPU *> *trans2 = 500 new DataTranslation<TimingSimpleCPU *>(this, state, 1); 501 502 thread->dtb->translateTiming(req1, tc, trans1, mode); 503 thread->dtb->translateTiming(req2, tc, trans2, mode); 504 } else { 505 WholeTranslationState *state = 506 new WholeTranslationState(req, newData, res, mode); 507 DataTranslation<TimingSimpleCPU *> *translation = 508 new DataTranslation<TimingSimpleCPU *>(this, state); 509 thread->dtb->translateTiming(req, tc, translation, mode); 510 } 511 512 // Translation faults will be returned via finishTranslation() 513 return NoFault; 514} 515 516 517void 518TimingSimpleCPU::finishTranslation(WholeTranslationState *state) 519{ 520 _status = BaseSimpleCPU::Running; 521 522 if (state->getFault() != NoFault) { 523 if (state->isPrefetch()) { 524 state->setNoFault(); 525 } 526 delete [] state->data; 527 state->deleteReqs(); 528 translationFault(state->getFault()); 529 } else { 530 if (!state->isSplit) { 531 sendData(state->mainReq, state->data, state->res, 532 state->mode == BaseTLB::Read); 533 } else { 534 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq, 535 state->data, state->mode == BaseTLB::Read); 536 } 537 } 538 539 delete state; 540} 541 542 543void 544TimingSimpleCPU::fetch() 545{ 546 DPRINTF(SimpleCPU, "Fetch\n"); 547 548 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 549 checkForInterrupts(); 550 551 checkPcEventQueue(); 552 553 // We must have just got suspended by a PC event 554 if (_status == Idle) 555 return; 556 557 TheISA::PCState pcState = thread->pcState(); 558 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst; 559 560 if (needToFetch) { 561 _status = BaseSimpleCPU::Running; 562 Request *ifetch_req = new Request(); 563 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0); 564 setupFetchRequest(ifetch_req); 565 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr()); 566 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation, 567 BaseTLB::Execute); 568 } else { 569 _status = IcacheWaitResponse; 570 completeIfetch(NULL); 571 572 numCycles += curCycle() - previousCycle; 573 previousCycle = curCycle(); 574 } 575} 576 577 578void 579TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc) 580{ 581 if (fault == NoFault) { 582 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n", 583 req->getVaddr(), req->getPaddr()); 584 ifetch_pkt = new Packet(req, MemCmd::ReadReq); 585 ifetch_pkt->dataStatic(&inst); 586 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr()); 587 588 if (!icachePort.sendTimingReq(ifetch_pkt)) { 589 // Need to wait for retry 590 _status = IcacheRetry; 591 } else { 592 // Need to wait for cache to respond 593 _status = IcacheWaitResponse; 594 // ownership of packet transferred to memory system 595 ifetch_pkt = NULL; 596 } 597 } else { 598 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr()); 599 delete req; 600 // fetch fault: advance directly to next instruction (fault handler) 601 _status = BaseSimpleCPU::Running; 602 advanceInst(fault); 603 } 604 605 numCycles += curCycle() - previousCycle; 606 previousCycle = curCycle(); 607} 608 609 610void 611TimingSimpleCPU::advanceInst(Fault fault) 612{ 613 if (_status == Faulting) 614 return; 615 616 if (fault != NoFault) { 617 advancePC(fault); 618 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
|