atomic.cc revision 3512
1/* 2 * Copyright (c) 2002-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Steve Reinhardt 29 */ 30 31#include "arch/locked_mem.hh" 32#include "arch/utility.hh" 33#include "cpu/exetrace.hh" 34#include "cpu/simple/atomic.hh" 35#include "mem/packet.hh" 36#include "mem/packet_access.hh" 37#include "sim/builder.hh" 38#include "sim/system.hh" 39 40using namespace std; 41using namespace TheISA; 42 43AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 44 : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c) 45{ 46} 47 48 49void 50AtomicSimpleCPU::TickEvent::process() 51{ 52 cpu->tick(); 53} 54 55const char * 56AtomicSimpleCPU::TickEvent::description() 57{ 58 return "AtomicSimpleCPU tick event"; 59} 60 61Port * 62AtomicSimpleCPU::getPort(const std::string &if_name, int idx) 63{ 64 if (if_name == "dcache_port") 65 return &dcachePort; 66 else if (if_name == "icache_port") 67 return &icachePort; 68 else 69 panic("No Such Port\n"); 70} 71 72void 73AtomicSimpleCPU::init() 74{ 75 BaseCPU::init(); 76#if FULL_SYSTEM 77 for (int i = 0; i < threadContexts.size(); ++i) { 78 ThreadContext *tc = threadContexts[i]; 79 80 // initialize CPU, including PC 81 TheISA::initCPU(tc, tc->readCpuId()); 82 } 83#endif 84} 85 86bool 87AtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt) 88{ 89 panic("AtomicSimpleCPU doesn't expect recvTiming callback!"); 90 return true; 91} 92 93Tick 94AtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 95{ 96 //Snooping a coherence request, just return 97 return curTick; 98} 99 100void 101AtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 102{ 103 //No internal storage to update, just return 104 return; 105} 106 107void 108AtomicSimpleCPU::CpuPort::recvStatusChange(Status status) 109{ 110 if (status == RangeChange) 111 return; 112 113 panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!"); 114} 115 116void 117AtomicSimpleCPU::CpuPort::recvRetry() 118{ 119 panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 120} 121 122 123AtomicSimpleCPU::AtomicSimpleCPU(Params *p) 124 : BaseSimpleCPU(p), tickEvent(this), 125 width(p->width), simulate_stalls(p->simulate_stalls), 126 icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this) 127{ 128 _status = Idle; 129 130 ifetch_req = new Request(); 131 ifetch_req->setThreadContext(p->cpu_id, 0); // Add thread ID if we add MT 132 ifetch_pkt = new Packet(ifetch_req, Packet::ReadReq, Packet::Broadcast); 133 ifetch_pkt->dataStatic(&inst); 134 135 data_read_req = new Request(); 136 data_read_req->setThreadContext(p->cpu_id, 0); // Add thread ID here too 137 data_read_pkt = new Packet(data_read_req, Packet::ReadReq, 138 Packet::Broadcast); 139 data_read_pkt->dataStatic(&dataReg); 140 141 data_write_req = new Request(); 142 data_write_req->setThreadContext(p->cpu_id, 0); // Add thread ID here too 143 data_write_pkt = new Packet(data_write_req, Packet::WriteReq, 144 Packet::Broadcast); 145} 146 147 148AtomicSimpleCPU::~AtomicSimpleCPU() 149{ 150} 151 152void 153AtomicSimpleCPU::serialize(ostream &os) 154{ 155 SimObject::State so_state = SimObject::getState(); 156 SERIALIZE_ENUM(so_state); 157 Status _status = status(); 158 SERIALIZE_ENUM(_status); 159 BaseSimpleCPU::serialize(os); 160 nameOut(os, csprintf("%s.tickEvent", name())); 161 tickEvent.serialize(os); 162} 163 164void 165AtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 166{ 167 SimObject::State so_state; 168 UNSERIALIZE_ENUM(so_state); 169 UNSERIALIZE_ENUM(_status); 170 BaseSimpleCPU::unserialize(cp, section); 171 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 172} 173 174void 175AtomicSimpleCPU::resume() 176{ 177 if (_status != SwitchedOut && _status != Idle) { 178 assert(system->getMemoryMode() == System::Atomic); 179 180 changeState(SimObject::Running); 181 if (thread->status() == ThreadContext::Active) { 182 if (!tickEvent.scheduled()) { 183 tickEvent.schedule(nextCycle()); 184 } 185 } 186 } 187} 188 189void 190AtomicSimpleCPU::switchOut() 191{ 192 assert(status() == Running || status() == Idle); 193 _status = SwitchedOut; 194 195 tickEvent.squash(); 196} 197 198 199void 200AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 201{ 202 BaseCPU::takeOverFrom(oldCPU); 203 204 assert(!tickEvent.scheduled()); 205 206 // if any of this CPU's ThreadContexts are active, mark the CPU as 207 // running and schedule its tick event. 208 for (int i = 0; i < threadContexts.size(); ++i) { 209 ThreadContext *tc = threadContexts[i]; 210 if (tc->status() == ThreadContext::Active && _status != Running) { 211 _status = Running; 212 tickEvent.schedule(nextCycle()); 213 break; 214 } 215 } 216 if (_status != Running) { 217 _status = Idle; 218 } 219} 220 221 222void 223AtomicSimpleCPU::activateContext(int thread_num, int delay) 224{ 225 assert(thread_num == 0); 226 assert(thread); 227 228 assert(_status == Idle); 229 assert(!tickEvent.scheduled()); 230 231 notIdleFraction++; 232 //Make sure ticks are still on multiples of cycles 233 tickEvent.schedule(nextCycle(curTick + cycles(delay))); 234 _status = Running; 235} 236 237 238void 239AtomicSimpleCPU::suspendContext(int thread_num) 240{ 241 assert(thread_num == 0); 242 assert(thread); 243 244 assert(_status == Running); 245 246 // tick event may not be scheduled if this gets called from inside 247 // an instruction's execution, e.g. "quiesce" 248 if (tickEvent.scheduled()) 249 tickEvent.deschedule(); 250 251 notIdleFraction--; 252 _status = Idle; 253} 254 255 256template <class T> 257Fault 258AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags) 259{ 260 // use the CPU's statically allocated read request and packet objects 261 Request *req = data_read_req; 262 PacketPtr pkt = data_read_pkt; 263 264 req->setVirt(0, addr, sizeof(T), flags, thread->readPC()); 265 266 if (traceData) { 267 traceData->setAddr(addr); 268 } 269 270 // translate to physical address 271 Fault fault = thread->translateDataReadReq(req); 272 273 // Now do the access. 274 if (fault == NoFault) { 275 pkt->reinitFromRequest(); 276 277 dcache_latency = dcachePort.sendAtomic(pkt); 278 dcache_access = true; 279 280 assert(pkt->result == Packet::Success); 281 data = pkt->get<T>(); 282 283 if (req->isLocked()) { 284 TheISA::handleLockedRead(thread, req); 285 } 286 } 287 288 // This will need a new way to tell if it has a dcache attached. 289 if (req->isUncacheable()) 290 recordEvent("Uncached Read"); 291 292 return fault; 293} 294 295#ifndef DOXYGEN_SHOULD_SKIP_THIS 296 297template 298Fault 299AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 300 301template 302Fault 303AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 304 305template 306Fault 307AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 308 309template 310Fault 311AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 312 313#endif //DOXYGEN_SHOULD_SKIP_THIS 314 315template<> 316Fault 317AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags) 318{ 319 return read(addr, *(uint64_t*)&data, flags); 320} 321 322template<> 323Fault 324AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags) 325{ 326 return read(addr, *(uint32_t*)&data, flags); 327} 328 329 330template<> 331Fault 332AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 333{ 334 return read(addr, (uint32_t&)data, flags); 335} 336 337 338template <class T> 339Fault 340AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 341{ 342 // use the CPU's statically allocated write request and packet objects 343 Request *req = data_write_req; 344 PacketPtr pkt = data_write_pkt; 345 346 req->setVirt(0, addr, sizeof(T), flags, thread->readPC()); 347 348 if (traceData) { 349 traceData->setAddr(addr); 350 } 351 352 // translate to physical address 353 Fault fault = thread->translateDataWriteReq(req); 354 355 // Now do the access. 356 if (fault == NoFault) { 357 bool do_access = true; // flag to suppress cache access 358 359 if (req->isLocked()) { 360 do_access = TheISA::handleLockedWrite(thread, req); 361 } 362 363 if (do_access) { 364 data = htog(data); 365 pkt->reinitFromRequest(); 366 pkt->dataStatic(&data); 367 368 dcache_latency = dcachePort.sendAtomic(pkt); 369 dcache_access = true; 370 371 assert(pkt->result == Packet::Success); 372 } 373 374 if (req->isLocked()) { 375 uint64_t scResult = req->getScResult(); 376 if (scResult != 0) { 377 // clear failure counter 378 thread->setStCondFailures(0); 379 } 380 if (res) { 381 *res = req->getScResult(); 382 } 383 } 384 } 385 386 // This will need a new way to tell if it's hooked up to a cache or not. 387 if (req->isUncacheable()) 388 recordEvent("Uncached Write"); 389 390 // If the write needs to have a fault on the access, consider calling 391 // changeStatus() and changing it to "bad addr write" or something. 392 return fault; 393} 394 395 396#ifndef DOXYGEN_SHOULD_SKIP_THIS 397template 398Fault 399AtomicSimpleCPU::write(uint64_t data, Addr addr, 400 unsigned flags, uint64_t *res); 401 402template 403Fault 404AtomicSimpleCPU::write(uint32_t data, Addr addr, 405 unsigned flags, uint64_t *res); 406 407template 408Fault 409AtomicSimpleCPU::write(uint16_t data, Addr addr, 410 unsigned flags, uint64_t *res); 411 412template 413Fault 414AtomicSimpleCPU::write(uint8_t data, Addr addr, 415 unsigned flags, uint64_t *res); 416 417#endif //DOXYGEN_SHOULD_SKIP_THIS 418 419template<> 420Fault 421AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 422{ 423 return write(*(uint64_t*)&data, addr, flags, res); 424} 425 426template<> 427Fault 428AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 429{ 430 return write(*(uint32_t*)&data, addr, flags, res); 431} 432 433 434template<> 435Fault 436AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 437{ 438 return write((uint32_t)data, addr, flags, res); 439} 440 441 442void 443AtomicSimpleCPU::tick() 444{ 445 Tick latency = cycles(1); // instruction takes one cycle by default 446 447 for (int i = 0; i < width; ++i) { 448 numCycles++; 449 450 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 451 checkForInterrupts(); 452 453 Fault fault = setupFetchRequest(ifetch_req); 454 455 if (fault == NoFault) { 456 ifetch_pkt->reinitFromRequest(); 457 458 Tick icache_latency = icachePort.sendAtomic(ifetch_pkt); 459 // ifetch_req is initialized to read the instruction directly 460 // into the CPU object's inst field. 461 462 dcache_access = false; // assume no dcache access 463 preExecute(); 464 fault = curStaticInst->execute(this, traceData); 465 postExecute(); 466 467 if (simulate_stalls) { 468 Tick icache_stall = icache_latency - cycles(1); 469 Tick dcache_stall = 470 dcache_access ? dcache_latency - cycles(1) : 0; 471 Tick stall_cycles = (icache_stall + dcache_stall) / cycles(1); 472 if (cycles(stall_cycles) < (icache_stall + dcache_stall)) 473 latency += cycles(stall_cycles+1); 474 else 475 latency += cycles(stall_cycles); 476 } 477 478 } 479 480 advancePC(fault); 481 } 482 483 if (_status != Idle) 484 tickEvent.schedule(curTick + latency); 485} 486 487 488//////////////////////////////////////////////////////////////////////// 489// 490// AtomicSimpleCPU Simulation Object 491// 492BEGIN_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU) 493 494 Param<Counter> max_insts_any_thread; 495 Param<Counter> max_insts_all_threads; 496 Param<Counter> max_loads_any_thread; 497 Param<Counter> max_loads_all_threads; 498 Param<Tick> progress_interval; 499 SimObjectParam<System *> system; 500 Param<int> cpu_id; 501 502#if FULL_SYSTEM 503 SimObjectParam<TheISA::ITB *> itb; 504 SimObjectParam<TheISA::DTB *> dtb; 505 Param<Tick> profile; 506#else 507 SimObjectParam<Process *> workload; 508#endif // FULL_SYSTEM 509 510 Param<int> clock; 511 512 Param<bool> defer_registration; 513 Param<int> width; 514 Param<bool> function_trace; 515 Param<Tick> function_trace_start; 516 Param<bool> simulate_stalls; 517 518END_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU) 519 520BEGIN_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU) 521 522 INIT_PARAM(max_insts_any_thread, 523 "terminate when any thread reaches this inst count"), 524 INIT_PARAM(max_insts_all_threads, 525 "terminate when all threads have reached this inst count"), 526 INIT_PARAM(max_loads_any_thread, 527 "terminate when any thread reaches this load count"), 528 INIT_PARAM(max_loads_all_threads, 529 "terminate when all threads have reached this load count"), 530 INIT_PARAM(progress_interval, "Progress interval"), 531 INIT_PARAM(system, "system object"), 532 INIT_PARAM(cpu_id, "processor ID"), 533 534#if FULL_SYSTEM 535 INIT_PARAM(itb, "Instruction TLB"), 536 INIT_PARAM(dtb, "Data TLB"), 537 INIT_PARAM(profile, ""), 538#else 539 INIT_PARAM(workload, "processes to run"), 540#endif // FULL_SYSTEM 541 542 INIT_PARAM(clock, "clock speed"), 543 INIT_PARAM(defer_registration, "defer system registration (for sampling)"), 544 INIT_PARAM(width, "cpu width"), 545 INIT_PARAM(function_trace, "Enable function trace"), 546 INIT_PARAM(function_trace_start, "Cycle to start function trace"), 547 INIT_PARAM(simulate_stalls, "Simulate cache stall cycles") 548 549END_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU) 550 551 552CREATE_SIM_OBJECT(AtomicSimpleCPU) 553{ 554 AtomicSimpleCPU::Params *params = new AtomicSimpleCPU::Params(); 555 params->name = getInstanceName(); 556 params->numberOfThreads = 1; 557 params->max_insts_any_thread = max_insts_any_thread; 558 params->max_insts_all_threads = max_insts_all_threads; 559 params->max_loads_any_thread = max_loads_any_thread; 560 params->max_loads_all_threads = max_loads_all_threads; 561 params->progress_interval = progress_interval; 562 params->deferRegistration = defer_registration; 563 params->clock = clock; 564 params->functionTrace = function_trace; 565 params->functionTraceStart = function_trace_start; 566 params->width = width; 567 params->simulate_stalls = simulate_stalls; 568 params->system = system; 569 params->cpu_id = cpu_id; 570 571#if FULL_SYSTEM 572 params->itb = itb; 573 params->dtb = dtb; 574 params->profile = profile; 575#else 576 params->process = workload; 577#endif 578 579 AtomicSimpleCPU *cpu = new AtomicSimpleCPU(params); 580 return cpu; 581} 582 583REGISTER_SIM_OBJECT("AtomicSimpleCPU", AtomicSimpleCPU) 584 585