atomic.cc revision 8779
1/* 2 * Copyright (c) 2002-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Steve Reinhardt 29 */ 30 31#include "arch/locked_mem.hh" 32#include "arch/mmapped_ipr.hh" 33#include "arch/utility.hh" 34#include "base/bigint.hh" 35#include "config/the_isa.hh" 36#include "cpu/simple/atomic.hh" 37#include "cpu/exetrace.hh" 38#include "debug/ExecFaulting.hh" 39#include "debug/SimpleCPU.hh" 40#include "mem/packet.hh" 41#include "mem/packet_access.hh" 42#include "params/AtomicSimpleCPU.hh" 43#include "sim/faults.hh" 44#include "sim/system.hh" 45#include "sim/full_system.hh" 46 47using namespace std; 48using namespace TheISA; 49 50AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 51 : Event(CPU_Tick_Pri), cpu(c) 52{ 53} 54 55 56void 57AtomicSimpleCPU::TickEvent::process() 58{ 59 cpu->tick(); 60} 61 62const char * 63AtomicSimpleCPU::TickEvent::description() const 64{ 65 return "AtomicSimpleCPU tick"; 66} 67 68Port * 69AtomicSimpleCPU::getPort(const string &if_name, int idx) 70{ 71 if (if_name == "dcache_port") 72 return &dcachePort; 73 else if (if_name == "icache_port") 74 return &icachePort; 75 else if (if_name == "physmem_port") { 76 hasPhysMemPort = true; 77 return &physmemPort; 78 } 79 else 80 panic("No Such Port\n"); 81} 82 83void 84AtomicSimpleCPU::init() 85{ 86 BaseCPU::init(); 87 if (FullSystem) { 88 ThreadID size = threadContexts.size(); 89 for (ThreadID i = 0; i < size; ++i) { 90#if FULL_SYSTEM 91 ThreadContext *tc = threadContexts[i]; 92 // initialize CPU, including PC 93 TheISA::initCPU(tc, tc->contextId()); 94#endif 95 } 96 } 97 if (hasPhysMemPort) { 98 bool snoop = false; 99 AddrRangeList pmAddrList; 100 physmemPort.getPeerAddressRanges(pmAddrList, snoop); 101 physMemAddr = *pmAddrList.begin(); 102 } 103 // Atomic doesn't do MT right now, so contextId == threadId 104 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 105 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 106 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 107} 108 109bool 110AtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt) 111{ 112 panic("AtomicSimpleCPU doesn't expect recvTiming callback!"); 113 return true; 114} 115 116Tick 117AtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 118{ 119 //Snooping a coherence request, just return 120 return 0; 121} 122 123void 124AtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 125{ 126 //No internal storage to update, just return 127 return; 128} 129 130void 131AtomicSimpleCPU::CpuPort::recvStatusChange(Status status) 132{ 133 if (status == RangeChange) { 134 if (!snoopRangeSent) { 135 snoopRangeSent = true; 136 sendStatusChange(Port::RangeChange); 137 } 138 return; 139 } 140 141 panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!"); 142} 143 144void 145AtomicSimpleCPU::CpuPort::recvRetry() 146{ 147 panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 148} 149 150void 151AtomicSimpleCPU::DcachePort::setPeer(Port *port) 152{ 153 Port::setPeer(port); 154 155 if (FullSystem) { 156 // Update the ThreadContext's memory ports (Functional/Virtual 157 // Ports) 158 cpu->tcBase()->connectMemPorts(cpu->tcBase()); 159 } 160} 161 162AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 163 : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 164 simulate_data_stalls(p->simulate_data_stalls), 165 simulate_inst_stalls(p->simulate_inst_stalls), 166 icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this), 167 physmemPort(name() + "-iport", this), hasPhysMemPort(false) 168{ 169 _status = Idle; 170 171 icachePort.snoopRangeSent = false; 172 dcachePort.snoopRangeSent = false; 173 174} 175 176 177AtomicSimpleCPU::~AtomicSimpleCPU() 178{ 179 if (tickEvent.scheduled()) { 180 deschedule(tickEvent); 181 } 182} 183 184void 185AtomicSimpleCPU::serialize(ostream &os) 186{ 187 SimObject::State so_state = SimObject::getState(); 188 SERIALIZE_ENUM(so_state); 189 SERIALIZE_SCALAR(locked); 190 BaseSimpleCPU::serialize(os); 191 nameOut(os, csprintf("%s.tickEvent", name())); 192 tickEvent.serialize(os); 193} 194 195void 196AtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 197{ 198 SimObject::State so_state; 199 UNSERIALIZE_ENUM(so_state); 200 UNSERIALIZE_SCALAR(locked); 201 BaseSimpleCPU::unserialize(cp, section); 202 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 203} 204 205void 206AtomicSimpleCPU::resume() 207{ 208 if (_status == Idle || _status == SwitchedOut) 209 return; 210 211 DPRINTF(SimpleCPU, "Resume\n"); 212 assert(system->getMemoryMode() == Enums::atomic); 213 214 changeState(SimObject::Running); 215 if (thread->status() == ThreadContext::Active) { 216 if (!tickEvent.scheduled()) 217 schedule(tickEvent, nextCycle()); 218 } 219 system->totalNumInsts = 0; 220} 221 222void 223AtomicSimpleCPU::switchOut() 224{ 225 assert(_status == Running || _status == Idle); 226 _status = SwitchedOut; 227 228 tickEvent.squash(); 229} 230 231 232void 233AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 234{ 235 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 236 237 assert(!tickEvent.scheduled()); 238 239 // if any of this CPU's ThreadContexts are active, mark the CPU as 240 // running and schedule its tick event. 241 ThreadID size = threadContexts.size(); 242 for (ThreadID i = 0; i < size; ++i) { 243 ThreadContext *tc = threadContexts[i]; 244 if (tc->status() == ThreadContext::Active && _status != Running) { 245 _status = Running; 246 schedule(tickEvent, nextCycle()); 247 break; 248 } 249 } 250 if (_status != Running) { 251 _status = Idle; 252 } 253 assert(threadContexts.size() == 1); 254 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 255 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 256 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 257} 258 259 260void 261AtomicSimpleCPU::activateContext(int thread_num, int delay) 262{ 263 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 264 265 assert(thread_num == 0); 266 assert(thread); 267 268 assert(_status == Idle); 269 assert(!tickEvent.scheduled()); 270 271 notIdleFraction++; 272 numCycles += tickToCycles(thread->lastActivate - thread->lastSuspend); 273 274 //Make sure ticks are still on multiples of cycles 275 schedule(tickEvent, nextCycle(curTick() + ticks(delay))); 276 _status = Running; 277} 278 279 280void 281AtomicSimpleCPU::suspendContext(int thread_num) 282{ 283 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 284 285 assert(thread_num == 0); 286 assert(thread); 287 288 if (_status == Idle) 289 return; 290 291 assert(_status == Running); 292 293 // tick event may not be scheduled if this gets called from inside 294 // an instruction's execution, e.g. "quiesce" 295 if (tickEvent.scheduled()) 296 deschedule(tickEvent); 297 298 notIdleFraction--; 299 _status = Idle; 300} 301 302 303Fault 304AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, 305 unsigned size, unsigned flags) 306{ 307 // use the CPU's statically allocated read request and packet objects 308 Request *req = &data_read_req; 309 310 if (traceData) { 311 traceData->setAddr(addr); 312 } 313 314 //The block size of our peer. 315 unsigned blockSize = dcachePort.peerBlockSize(); 316 //The size of the data we're trying to read. 317 int fullSize = size; 318 319 //The address of the second part of this access if it needs to be split 320 //across a cache line boundary. 321 Addr secondAddr = roundDown(addr + size - 1, blockSize); 322 323 if (secondAddr > addr) 324 size = secondAddr - addr; 325 326 dcache_latency = 0; 327 328 while (1) { 329 req->setVirt(0, addr, size, flags, thread->pcState().instAddr()); 330 331 // translate to physical address 332 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read); 333 334 // Now do the access. 335 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 336 Packet pkt = Packet(req, 337 req->isLLSC() ? MemCmd::LoadLockedReq : MemCmd::ReadReq, 338 Packet::Broadcast); 339 pkt.dataStatic(data); 340 341 if (req->isMmappedIpr()) 342 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 343 else { 344 if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 345 dcache_latency += physmemPort.sendAtomic(&pkt); 346 else 347 dcache_latency += dcachePort.sendAtomic(&pkt); 348 } 349 dcache_access = true; 350 351 assert(!pkt.isError()); 352 353 if (req->isLLSC()) { 354 TheISA::handleLockedRead(thread, req); 355 } 356 } 357 358 //If there's a fault, return it 359 if (fault != NoFault) { 360 if (req->isPrefetch()) { 361 return NoFault; 362 } else { 363 return fault; 364 } 365 } 366 367 //If we don't need to access a second cache line, stop now. 368 if (secondAddr <= addr) 369 { 370 if (req->isLocked() && fault == NoFault) { 371 assert(!locked); 372 locked = true; 373 } 374 return fault; 375 } 376 377 /* 378 * Set up for accessing the second cache line. 379 */ 380 381 //Move the pointer we're reading into to the correct location. 382 data += size; 383 //Adjust the size to get the remaining bytes. 384 size = addr + fullSize - secondAddr; 385 //And access the right address. 386 addr = secondAddr; 387 } 388} 389 390 391Fault 392AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, 393 Addr addr, unsigned flags, uint64_t *res) 394{ 395 // use the CPU's statically allocated write request and packet objects 396 Request *req = &data_write_req; 397 398 if (traceData) { 399 traceData->setAddr(addr); 400 } 401 402 //The block size of our peer. 403 unsigned blockSize = dcachePort.peerBlockSize(); 404 //The size of the data we're trying to read. 405 int fullSize = size; 406 407 //The address of the second part of this access if it needs to be split 408 //across a cache line boundary. 409 Addr secondAddr = roundDown(addr + size - 1, blockSize); 410 411 if(secondAddr > addr) 412 size = secondAddr - addr; 413 414 dcache_latency = 0; 415 416 while(1) { 417 req->setVirt(0, addr, size, flags, thread->pcState().instAddr()); 418 419 // translate to physical address 420 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write); 421 422 // Now do the access. 423 if (fault == NoFault) { 424 MemCmd cmd = MemCmd::WriteReq; // default 425 bool do_access = true; // flag to suppress cache access 426 427 if (req->isLLSC()) { 428 cmd = MemCmd::StoreCondReq; 429 do_access = TheISA::handleLockedWrite(thread, req); 430 } else if (req->isSwap()) { 431 cmd = MemCmd::SwapReq; 432 if (req->isCondSwap()) { 433 assert(res); 434 req->setExtraData(*res); 435 } 436 } 437 438 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 439 Packet pkt = Packet(req, cmd, Packet::Broadcast); 440 pkt.dataStatic(data); 441 442 if (req->isMmappedIpr()) { 443 dcache_latency += 444 TheISA::handleIprWrite(thread->getTC(), &pkt); 445 } else { 446 if (hasPhysMemPort && pkt.getAddr() == physMemAddr) 447 dcache_latency += physmemPort.sendAtomic(&pkt); 448 else 449 dcache_latency += dcachePort.sendAtomic(&pkt); 450 } 451 dcache_access = true; 452 assert(!pkt.isError()); 453 454 if (req->isSwap()) { 455 assert(res); 456 memcpy(res, pkt.getPtr<uint8_t>(), fullSize); 457 } 458 } 459 460 if (res && !req->isSwap()) { 461 *res = req->getExtraData(); 462 } 463 } 464 465 //If there's a fault or we don't need to access a second cache line, 466 //stop now. 467 if (fault != NoFault || secondAddr <= addr) 468 { 469 if (req->isLocked() && fault == NoFault) { 470 assert(locked); 471 locked = false; 472 } 473 if (fault != NoFault && req->isPrefetch()) { 474 return NoFault; 475 } else { 476 return fault; 477 } 478 } 479 480 /* 481 * Set up for accessing the second cache line. 482 */ 483 484 //Move the pointer we're reading into to the correct location. 485 data += size; 486 //Adjust the size to get the remaining bytes. 487 size = addr + fullSize - secondAddr; 488 //And access the right address. 489 addr = secondAddr; 490 } 491} 492 493 494void 495AtomicSimpleCPU::tick() 496{ 497 DPRINTF(SimpleCPU, "Tick\n"); 498 499 Tick latency = 0; 500 501 for (int i = 0; i < width || locked; ++i) { 502 numCycles++; 503 504 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 505 checkForInterrupts(); 506 507 checkPcEventQueue(); 508 // We must have just got suspended by a PC event 509 if (_status == Idle) 510 return; 511 512 Fault fault = NoFault; 513 514 TheISA::PCState pcState = thread->pcState(); 515 516 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 517 !curMacroStaticInst; 518 if (needToFetch) { 519 setupFetchRequest(&ifetch_req); 520 fault = thread->itb->translateAtomic(&ifetch_req, tc, 521 BaseTLB::Execute); 522 } 523 524 if (fault == NoFault) { 525 Tick icache_latency = 0; 526 bool icache_access = false; 527 dcache_access = false; // assume no dcache access 528 529 if (needToFetch) { 530 // This is commented out because the predecoder would act like 531 // a tiny cache otherwise. It wouldn't be flushed when needed 532 // like the I cache. It should be flushed, and when that works 533 // this code should be uncommented. 534 //Fetch more instruction memory if necessary 535 //if(predecoder.needMoreBytes()) 536 //{ 537 icache_access = true; 538 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq, 539 Packet::Broadcast); 540 ifetch_pkt.dataStatic(&inst); 541 542 if (hasPhysMemPort && ifetch_pkt.getAddr() == physMemAddr) 543 icache_latency = physmemPort.sendAtomic(&ifetch_pkt); 544 else 545 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 546 547 assert(!ifetch_pkt.isError()); 548 549 // ifetch_req is initialized to read the instruction directly 550 // into the CPU object's inst field. 551 //} 552 } 553 554 preExecute(); 555 556 if (curStaticInst) { 557 fault = curStaticInst->execute(this, traceData); 558 559 // keep an instruction count 560 if (fault == NoFault) 561 countInst(); 562 else if (traceData && !DTRACE(ExecFaulting)) { 563 delete traceData; 564 traceData = NULL; 565 } 566 567 postExecute(); 568 } 569 570 // @todo remove me after debugging with legion done 571 if (curStaticInst && (!curStaticInst->isMicroop() || 572 curStaticInst->isFirstMicroop())) 573 instCnt++; 574 575 Tick stall_ticks = 0; 576 if (simulate_inst_stalls && icache_access) 577 stall_ticks += icache_latency; 578 579 if (simulate_data_stalls && dcache_access) 580 stall_ticks += dcache_latency; 581 582 if (stall_ticks) { 583 Tick stall_cycles = stall_ticks / ticks(1); 584 Tick aligned_stall_ticks = ticks(stall_cycles); 585 586 if (aligned_stall_ticks < stall_ticks) 587 aligned_stall_ticks += 1; 588 589 latency += aligned_stall_ticks; 590 } 591 592 } 593 if(fault != NoFault || !stayAtPC) 594 advancePC(fault); 595 } 596 597 // instruction takes at least one cycle 598 if (latency < ticks(1)) 599 latency = ticks(1); 600 601 if (_status != Idle) 602 schedule(tickEvent, curTick() + latency); 603} 604 605 606void 607AtomicSimpleCPU::printAddr(Addr a) 608{ 609 dcachePort.printAddr(a); 610} 611 612 613//////////////////////////////////////////////////////////////////////// 614// 615// AtomicSimpleCPU Simulation Object 616// 617AtomicSimpleCPU * 618AtomicSimpleCPUParams::create() 619{ 620 numThreads = 1; 621#if !FULL_SYSTEM 622 if (!FullSystem && workload.size() != 1) 623 panic("only one workload allowed"); 624#endif 625 return new AtomicSimpleCPU(this); 626} 627