atomic.cc revision 9179
1/* 2 * Copyright (c) 2012 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2002-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Steve Reinhardt 41 */ 42 43#include "arch/locked_mem.hh" 44#include "arch/mmapped_ipr.hh" 45#include "arch/utility.hh" 46#include "base/bigint.hh" 47#include "config/the_isa.hh" 48#include "cpu/simple/atomic.hh" 49#include "cpu/exetrace.hh" 50#include "debug/ExecFaulting.hh" 51#include "debug/SimpleCPU.hh" 52#include "mem/packet.hh" 53#include "mem/packet_access.hh" 54#include "mem/physical.hh" 55#include "params/AtomicSimpleCPU.hh" 56#include "sim/faults.hh" 57#include "sim/system.hh" 58#include "sim/full_system.hh" 59 60using namespace std; 61using namespace TheISA; 62 63AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 64 : Event(CPU_Tick_Pri), cpu(c) 65{ 66} 67 68 69void 70AtomicSimpleCPU::TickEvent::process() 71{ 72 cpu->tick(); 73} 74 75const char * 76AtomicSimpleCPU::TickEvent::description() const 77{ 78 return "AtomicSimpleCPU tick"; 79} 80 81void 82AtomicSimpleCPU::init() 83{ 84 BaseCPU::init(); 85 86 // Initialise the ThreadContext's memory proxies 87 tcBase()->initMemProxies(tcBase()); 88 89 if (FullSystem && !params()->defer_registration) { 90 ThreadID size = threadContexts.size(); 91 for (ThreadID i = 0; i < size; ++i) { 92 ThreadContext *tc = threadContexts[i]; 93 // initialize CPU, including PC 94 TheISA::initCPU(tc, tc->contextId()); 95 } 96 } 97 98 // Atomic doesn't do MT right now, so contextId == threadId 99 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 100 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 101 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 102} 103 104AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 105 : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 106 simulate_data_stalls(p->simulate_data_stalls), 107 simulate_inst_stalls(p->simulate_inst_stalls), 108 icachePort(name() + ".icache_port", this), 109 dcachePort(name() + ".dcache_port", this), 110 fastmem(p->fastmem) 111{ 112 _status = Idle; 113} 114 115 116AtomicSimpleCPU::~AtomicSimpleCPU() 117{ 118 if (tickEvent.scheduled()) { 119 deschedule(tickEvent); 120 } 121} 122 123void 124AtomicSimpleCPU::serialize(ostream &os) 125{ 126 SimObject::State so_state = SimObject::getState(); 127 SERIALIZE_ENUM(so_state); 128 SERIALIZE_SCALAR(locked); 129 BaseSimpleCPU::serialize(os); 130 nameOut(os, csprintf("%s.tickEvent", name())); 131 tickEvent.serialize(os); 132} 133 134void 135AtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 136{ 137 SimObject::State so_state; 138 UNSERIALIZE_ENUM(so_state); 139 UNSERIALIZE_SCALAR(locked); 140 BaseSimpleCPU::unserialize(cp, section); 141 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 142} 143 144void 145AtomicSimpleCPU::resume() 146{ 147 if (_status == Idle || _status == SwitchedOut) 148 return; 149 150 DPRINTF(SimpleCPU, "Resume\n"); 151 assert(system->getMemoryMode() == Enums::atomic); 152 153 changeState(SimObject::Running); 154 if (thread->status() == ThreadContext::Active) { 155 if (!tickEvent.scheduled()) 156 schedule(tickEvent, nextCycle()); 157 } 158 system->totalNumInsts = 0; 159} 160 161void 162AtomicSimpleCPU::switchOut() 163{ 164 assert(_status == Running || _status == Idle); 165 _status = SwitchedOut; 166 167 tickEvent.squash(); 168} 169 170 171void 172AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 173{ 174 BaseCPU::takeOverFrom(oldCPU); 175 176 assert(!tickEvent.scheduled()); 177 178 // if any of this CPU's ThreadContexts are active, mark the CPU as 179 // running and schedule its tick event. 180 ThreadID size = threadContexts.size(); 181 for (ThreadID i = 0; i < size; ++i) { 182 ThreadContext *tc = threadContexts[i]; 183 if (tc->status() == ThreadContext::Active && _status != Running) { 184 _status = Running; 185 schedule(tickEvent, nextCycle()); 186 break; 187 } 188 } 189 if (_status != Running) { 190 _status = Idle; 191 } 192 assert(threadContexts.size() == 1); 193 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 194 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 195 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 196} 197 198 199void 200AtomicSimpleCPU::activateContext(ThreadID thread_num, int delay) 201{ 202 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 203 204 assert(thread_num == 0); 205 assert(thread); 206 207 assert(_status == Idle); 208 assert(!tickEvent.scheduled()); 209 210 notIdleFraction++; 211 numCycles += tickToCycle(thread->lastActivate - thread->lastSuspend); 212 213 //Make sure ticks are still on multiples of cycles 214 schedule(tickEvent, clockEdge(delay)); 215 _status = Running; 216} 217 218 219void 220AtomicSimpleCPU::suspendContext(ThreadID thread_num) 221{ 222 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 223 224 assert(thread_num == 0); 225 assert(thread); 226 227 if (_status == Idle) 228 return; 229 230 assert(_status == Running); 231 232 // tick event may not be scheduled if this gets called from inside 233 // an instruction's execution, e.g. "quiesce" 234 if (tickEvent.scheduled()) 235 deschedule(tickEvent); 236 237 notIdleFraction--; 238 _status = Idle; 239} 240 241 242Fault 243AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, 244 unsigned size, unsigned flags) 245{ 246 // use the CPU's statically allocated read request and packet objects 247 Request *req = &data_read_req; 248 249 if (traceData) { 250 traceData->setAddr(addr); 251 } 252 253 //The block size of our peer. 254 unsigned blockSize = dcachePort.peerBlockSize(); 255 //The size of the data we're trying to read. 256 int fullSize = size; 257 258 //The address of the second part of this access if it needs to be split 259 //across a cache line boundary. 260 Addr secondAddr = roundDown(addr + size - 1, blockSize); 261 262 if (secondAddr > addr) 263 size = secondAddr - addr; 264 265 dcache_latency = 0; 266 267 while (1) { 268 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 269 270 // translate to physical address 271 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read); 272 273 // Now do the access. 274 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 275 Packet pkt = Packet(req, 276 req->isLLSC() ? MemCmd::LoadLockedReq : 277 MemCmd::ReadReq); 278 pkt.dataStatic(data); 279 280 if (req->isMmappedIpr()) 281 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 282 else { 283 if (fastmem && system->isMemAddr(pkt.getAddr())) 284 system->getPhysMem().access(&pkt); 285 else 286 dcache_latency += dcachePort.sendAtomic(&pkt); 287 } 288 dcache_access = true; 289 290 assert(!pkt.isError()); 291 292 if (req->isLLSC()) { 293 TheISA::handleLockedRead(thread, req); 294 } 295 } 296 297 //If there's a fault, return it 298 if (fault != NoFault) { 299 if (req->isPrefetch()) { 300 return NoFault; 301 } else { 302 return fault; 303 } 304 } 305 306 //If we don't need to access a second cache line, stop now. 307 if (secondAddr <= addr) 308 { 309 if (req->isLocked() && fault == NoFault) { 310 assert(!locked); 311 locked = true; 312 } 313 return fault; 314 } 315 316 /* 317 * Set up for accessing the second cache line. 318 */ 319 320 //Move the pointer we're reading into to the correct location. 321 data += size; 322 //Adjust the size to get the remaining bytes. 323 size = addr + fullSize - secondAddr; 324 //And access the right address. 325 addr = secondAddr; 326 } 327} 328 329 330Fault 331AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, 332 Addr addr, unsigned flags, uint64_t *res) 333{ 334 // use the CPU's statically allocated write request and packet objects 335 Request *req = &data_write_req; 336 337 if (traceData) { 338 traceData->setAddr(addr); 339 } 340 341 //The block size of our peer. 342 unsigned blockSize = dcachePort.peerBlockSize(); 343 //The size of the data we're trying to read. 344 int fullSize = size; 345 346 //The address of the second part of this access if it needs to be split 347 //across a cache line boundary. 348 Addr secondAddr = roundDown(addr + size - 1, blockSize); 349 350 if(secondAddr > addr) 351 size = secondAddr - addr; 352 353 dcache_latency = 0; 354 355 while(1) { 356 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 357 358 // translate to physical address 359 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write); 360 361 // Now do the access. 362 if (fault == NoFault) { 363 MemCmd cmd = MemCmd::WriteReq; // default 364 bool do_access = true; // flag to suppress cache access 365 366 if (req->isLLSC()) { 367 cmd = MemCmd::StoreCondReq; 368 do_access = TheISA::handleLockedWrite(thread, req); 369 } else if (req->isSwap()) { 370 cmd = MemCmd::SwapReq; 371 if (req->isCondSwap()) { 372 assert(res); 373 req->setExtraData(*res); 374 } 375 } 376 377 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 378 Packet pkt = Packet(req, cmd); 379 pkt.dataStatic(data); 380 381 if (req->isMmappedIpr()) { 382 dcache_latency += 383 TheISA::handleIprWrite(thread->getTC(), &pkt); 384 } else { 385 if (fastmem && system->isMemAddr(pkt.getAddr())) 386 system->getPhysMem().access(&pkt); 387 else 388 dcache_latency += dcachePort.sendAtomic(&pkt); 389 } 390 dcache_access = true; 391 assert(!pkt.isError()); 392 393 if (req->isSwap()) { 394 assert(res); 395 memcpy(res, pkt.getPtr<uint8_t>(), fullSize); 396 } 397 } 398 399 if (res && !req->isSwap()) { 400 *res = req->getExtraData(); 401 } 402 } 403 404 //If there's a fault or we don't need to access a second cache line, 405 //stop now. 406 if (fault != NoFault || secondAddr <= addr) 407 { 408 if (req->isLocked() && fault == NoFault) { 409 assert(locked); 410 locked = false; 411 } 412 if (fault != NoFault && req->isPrefetch()) { 413 return NoFault; 414 } else { 415 return fault; 416 } 417 } 418 419 /* 420 * Set up for accessing the second cache line. 421 */ 422 423 //Move the pointer we're reading into to the correct location. 424 data += size; 425 //Adjust the size to get the remaining bytes. 426 size = addr + fullSize - secondAddr; 427 //And access the right address. 428 addr = secondAddr; 429 } 430} 431 432 433void 434AtomicSimpleCPU::tick() 435{ 436 DPRINTF(SimpleCPU, "Tick\n"); 437 438 Tick latency = 0; 439 440 for (int i = 0; i < width || locked; ++i) { 441 numCycles++; 442 443 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 444 checkForInterrupts(); 445 446 checkPcEventQueue(); 447 // We must have just got suspended by a PC event 448 if (_status == Idle) 449 return; 450 451 Fault fault = NoFault; 452 453 TheISA::PCState pcState = thread->pcState(); 454 455 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 456 !curMacroStaticInst; 457 if (needToFetch) { 458 setupFetchRequest(&ifetch_req); 459 fault = thread->itb->translateAtomic(&ifetch_req, tc, 460 BaseTLB::Execute); 461 } 462 463 if (fault == NoFault) { 464 Tick icache_latency = 0; 465 bool icache_access = false; 466 dcache_access = false; // assume no dcache access 467 468 if (needToFetch) { 469 // This is commented out because the decoder would act like 470 // a tiny cache otherwise. It wouldn't be flushed when needed 471 // like the I cache. It should be flushed, and when that works 472 // this code should be uncommented. 473 //Fetch more instruction memory if necessary 474 //if(decoder.needMoreBytes()) 475 //{ 476 icache_access = true; 477 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq); 478 ifetch_pkt.dataStatic(&inst); 479 480 if (fastmem && system->isMemAddr(ifetch_pkt.getAddr())) 481 system->getPhysMem().access(&ifetch_pkt); 482 else 483 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 484 485 assert(!ifetch_pkt.isError()); 486 487 // ifetch_req is initialized to read the instruction directly 488 // into the CPU object's inst field. 489 //} 490 } 491 492 preExecute(); 493 494 if (curStaticInst) { 495 fault = curStaticInst->execute(this, traceData); 496 497 // keep an instruction count 498 if (fault == NoFault) 499 countInst(); 500 else if (traceData && !DTRACE(ExecFaulting)) { 501 delete traceData; 502 traceData = NULL; 503 } 504 505 postExecute(); 506 } 507 508 // @todo remove me after debugging with legion done 509 if (curStaticInst && (!curStaticInst->isMicroop() || 510 curStaticInst->isFirstMicroop())) 511 instCnt++; 512 513 Tick stall_ticks = 0; 514 if (simulate_inst_stalls && icache_access) 515 stall_ticks += icache_latency; 516 517 if (simulate_data_stalls && dcache_access) 518 stall_ticks += dcache_latency; 519 520 if (stall_ticks) { 521 Tick stall_cycles = stall_ticks / clockPeriod(); 522 Tick aligned_stall_ticks = ticks(stall_cycles); 523 524 if (aligned_stall_ticks < stall_ticks) 525 aligned_stall_ticks += 1; 526 527 latency += aligned_stall_ticks; 528 } 529 530 } 531 if(fault != NoFault || !stayAtPC) 532 advancePC(fault); 533 } 534 535 // instruction takes at least one cycle 536 if (latency < clockPeriod()) 537 latency = clockPeriod(); 538 539 if (_status != Idle) 540 schedule(tickEvent, curTick() + latency); 541} 542 543 544void 545AtomicSimpleCPU::printAddr(Addr a) 546{ 547 dcachePort.printAddr(a); 548} 549 550 551//////////////////////////////////////////////////////////////////////// 552// 553// AtomicSimpleCPU Simulation Object 554// 555AtomicSimpleCPU * 556AtomicSimpleCPUParams::create() 557{ 558 numThreads = 1; 559 if (!FullSystem && workload.size() != 1) 560 panic("only one workload allowed"); 561 return new AtomicSimpleCPU(this); 562} 563