atomic.cc revision 4192
110259SAndrew.Bardsley@arm.com/* 210259SAndrew.Bardsley@arm.com * Copyright (c) 2002-2005 The Regents of The University of Michigan 310259SAndrew.Bardsley@arm.com * All rights reserved. 410259SAndrew.Bardsley@arm.com * 510259SAndrew.Bardsley@arm.com * Redistribution and use in source and binary forms, with or without 610259SAndrew.Bardsley@arm.com * modification, are permitted provided that the following conditions are 710259SAndrew.Bardsley@arm.com * met: redistributions of source code must retain the above copyright 810259SAndrew.Bardsley@arm.com * notice, this list of conditions and the following disclaimer; 910259SAndrew.Bardsley@arm.com * redistributions in binary form must reproduce the above copyright 1010259SAndrew.Bardsley@arm.com * notice, this list of conditions and the following disclaimer in the 1110259SAndrew.Bardsley@arm.com * documentation and/or other materials provided with the distribution; 1210259SAndrew.Bardsley@arm.com * neither the name of the copyright holders nor the names of its 1310259SAndrew.Bardsley@arm.com * contributors may be used to endorse or promote products derived from 1410259SAndrew.Bardsley@arm.com * this software without specific prior written permission. 1510259SAndrew.Bardsley@arm.com * 1610259SAndrew.Bardsley@arm.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 1710259SAndrew.Bardsley@arm.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 1810259SAndrew.Bardsley@arm.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 1910259SAndrew.Bardsley@arm.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 2010259SAndrew.Bardsley@arm.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 2110259SAndrew.Bardsley@arm.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 2210259SAndrew.Bardsley@arm.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2310259SAndrew.Bardsley@arm.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2410259SAndrew.Bardsley@arm.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2510259SAndrew.Bardsley@arm.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 2610259SAndrew.Bardsley@arm.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2710259SAndrew.Bardsley@arm.com * 2810259SAndrew.Bardsley@arm.com * Authors: Steve Reinhardt 2910259SAndrew.Bardsley@arm.com */ 3010259SAndrew.Bardsley@arm.com 3110259SAndrew.Bardsley@arm.com#include "arch/locked_mem.hh" 3210259SAndrew.Bardsley@arm.com#include "arch/mmaped_ipr.hh" 3310259SAndrew.Bardsley@arm.com#include "arch/utility.hh" 3410259SAndrew.Bardsley@arm.com#include "base/bigint.hh" 3510259SAndrew.Bardsley@arm.com#include "cpu/exetrace.hh" 3610259SAndrew.Bardsley@arm.com#include "cpu/simple/atomic.hh" 3710259SAndrew.Bardsley@arm.com#include "mem/packet.hh" 3810259SAndrew.Bardsley@arm.com#include "mem/packet_access.hh" 3910259SAndrew.Bardsley@arm.com#include "sim/builder.hh" 4010259SAndrew.Bardsley@arm.com#include "sim/system.hh" 4110259SAndrew.Bardsley@arm.com 4210259SAndrew.Bardsley@arm.comusing namespace std; 4310259SAndrew.Bardsley@arm.comusing namespace TheISA; 4410259SAndrew.Bardsley@arm.com 4510259SAndrew.Bardsley@arm.comAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 4610259SAndrew.Bardsley@arm.com : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c) 4710259SAndrew.Bardsley@arm.com{ 4810259SAndrew.Bardsley@arm.com} 4910259SAndrew.Bardsley@arm.com 5010259SAndrew.Bardsley@arm.com 5110259SAndrew.Bardsley@arm.comvoid 5210259SAndrew.Bardsley@arm.comAtomicSimpleCPU::TickEvent::process() 5310259SAndrew.Bardsley@arm.com{ 5410259SAndrew.Bardsley@arm.com cpu->tick(); 5510259SAndrew.Bardsley@arm.com} 5610259SAndrew.Bardsley@arm.com 5710259SAndrew.Bardsley@arm.comconst char * 5810259SAndrew.Bardsley@arm.comAtomicSimpleCPU::TickEvent::description() 5910259SAndrew.Bardsley@arm.com{ 6010259SAndrew.Bardsley@arm.com return "AtomicSimpleCPU tick event"; 6110259SAndrew.Bardsley@arm.com} 6210259SAndrew.Bardsley@arm.com 6310259SAndrew.Bardsley@arm.comPort * 6410259SAndrew.Bardsley@arm.comAtomicSimpleCPU::getPort(const std::string &if_name, int idx) 6510259SAndrew.Bardsley@arm.com{ 6610259SAndrew.Bardsley@arm.com if (if_name == "dcache_port") 6710259SAndrew.Bardsley@arm.com return &dcachePort; 6810259SAndrew.Bardsley@arm.com else if (if_name == "icache_port") 6910259SAndrew.Bardsley@arm.com return &icachePort; 7010259SAndrew.Bardsley@arm.com else 7110259SAndrew.Bardsley@arm.com panic("No Such Port\n"); 7210259SAndrew.Bardsley@arm.com} 7310259SAndrew.Bardsley@arm.com 7410259SAndrew.Bardsley@arm.comvoid 7510259SAndrew.Bardsley@arm.comAtomicSimpleCPU::init() 7610259SAndrew.Bardsley@arm.com{ 7710259SAndrew.Bardsley@arm.com BaseCPU::init(); 7810259SAndrew.Bardsley@arm.com#if FULL_SYSTEM 7910259SAndrew.Bardsley@arm.com for (int i = 0; i < threadContexts.size(); ++i) { 8010259SAndrew.Bardsley@arm.com ThreadContext *tc = threadContexts[i]; 8110259SAndrew.Bardsley@arm.com 8210259SAndrew.Bardsley@arm.com // initialize CPU, including PC 8310259SAndrew.Bardsley@arm.com TheISA::initCPU(tc, tc->readCpuId()); 8410259SAndrew.Bardsley@arm.com } 8510259SAndrew.Bardsley@arm.com#endif 8610259SAndrew.Bardsley@arm.com} 8710259SAndrew.Bardsley@arm.com 8810259SAndrew.Bardsley@arm.combool 8910259SAndrew.Bardsley@arm.comAtomicSimpleCPU::CpuPort::recvTiming(PacketPtr pkt) 9010259SAndrew.Bardsley@arm.com{ 9110259SAndrew.Bardsley@arm.com panic("AtomicSimpleCPU doesn't expect recvTiming callback!"); 9210259SAndrew.Bardsley@arm.com return true; 9310259SAndrew.Bardsley@arm.com} 9410259SAndrew.Bardsley@arm.com 9510259SAndrew.Bardsley@arm.comTick 9610259SAndrew.Bardsley@arm.comAtomicSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt) 9710259SAndrew.Bardsley@arm.com{ 9810259SAndrew.Bardsley@arm.com //Snooping a coherence request, just return 9910259SAndrew.Bardsley@arm.com return 0; 10010259SAndrew.Bardsley@arm.com} 10110259SAndrew.Bardsley@arm.com 10210259SAndrew.Bardsley@arm.comvoid 10310259SAndrew.Bardsley@arm.comAtomicSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt) 10410259SAndrew.Bardsley@arm.com{ 10510259SAndrew.Bardsley@arm.com //No internal storage to update, just return 10610259SAndrew.Bardsley@arm.com return; 10710259SAndrew.Bardsley@arm.com} 10810259SAndrew.Bardsley@arm.com 10910259SAndrew.Bardsley@arm.comvoid 11010259SAndrew.Bardsley@arm.comAtomicSimpleCPU::CpuPort::recvStatusChange(Status status) 11110259SAndrew.Bardsley@arm.com{ 11210259SAndrew.Bardsley@arm.com if (status == RangeChange) { 11310259SAndrew.Bardsley@arm.com if (!snoopRangeSent) { 11410259SAndrew.Bardsley@arm.com snoopRangeSent = true; 11510259SAndrew.Bardsley@arm.com sendStatusChange(Port::RangeChange); 11610259SAndrew.Bardsley@arm.com } 11710259SAndrew.Bardsley@arm.com return; 11810259SAndrew.Bardsley@arm.com } 11910259SAndrew.Bardsley@arm.com 12010259SAndrew.Bardsley@arm.com panic("AtomicSimpleCPU doesn't expect recvStatusChange callback!"); 12110259SAndrew.Bardsley@arm.com} 12210259SAndrew.Bardsley@arm.com 12310259SAndrew.Bardsley@arm.comvoid 12410259SAndrew.Bardsley@arm.comAtomicSimpleCPU::CpuPort::recvRetry() 12510259SAndrew.Bardsley@arm.com{ 12610259SAndrew.Bardsley@arm.com panic("AtomicSimpleCPU doesn't expect recvRetry callback!"); 12710259SAndrew.Bardsley@arm.com} 12810259SAndrew.Bardsley@arm.com 12910259SAndrew.Bardsley@arm.comvoid 13010259SAndrew.Bardsley@arm.comAtomicSimpleCPU::DcachePort::setPeer(Port *port) 13110259SAndrew.Bardsley@arm.com{ 13210259SAndrew.Bardsley@arm.com Port::setPeer(port); 13310259SAndrew.Bardsley@arm.com 13410259SAndrew.Bardsley@arm.com#if FULL_SYSTEM 13510259SAndrew.Bardsley@arm.com // Update the ThreadContext's memory ports (Functional/Virtual 13610259SAndrew.Bardsley@arm.com // Ports) 13710259SAndrew.Bardsley@arm.com cpu->tcBase()->connectMemPorts(); 13810259SAndrew.Bardsley@arm.com#endif 13910259SAndrew.Bardsley@arm.com} 14010259SAndrew.Bardsley@arm.com 14110259SAndrew.Bardsley@arm.comAtomicSimpleCPU::AtomicSimpleCPU(Params *p) 14210259SAndrew.Bardsley@arm.com : BaseSimpleCPU(p), tickEvent(this), 14310259SAndrew.Bardsley@arm.com width(p->width), simulate_stalls(p->simulate_stalls), 14410259SAndrew.Bardsley@arm.com icachePort(name() + "-iport", this), dcachePort(name() + "-iport", this) 14510259SAndrew.Bardsley@arm.com{ 14610259SAndrew.Bardsley@arm.com _status = Idle; 14710259SAndrew.Bardsley@arm.com 14810259SAndrew.Bardsley@arm.com icachePort.snoopRangeSent = false; 14910259SAndrew.Bardsley@arm.com dcachePort.snoopRangeSent = false; 15010259SAndrew.Bardsley@arm.com 15110259SAndrew.Bardsley@arm.com ifetch_req = new Request(); 15210259SAndrew.Bardsley@arm.com ifetch_req->setThreadContext(p->cpu_id, 0); // Add thread ID if we add MT 15310259SAndrew.Bardsley@arm.com ifetch_pkt = new Packet(ifetch_req, MemCmd::ReadReq, Packet::Broadcast); 15410259SAndrew.Bardsley@arm.com ifetch_pkt->dataStatic(&inst); 15510259SAndrew.Bardsley@arm.com 15610259SAndrew.Bardsley@arm.com data_read_req = new Request(); 15710259SAndrew.Bardsley@arm.com data_read_req->setThreadContext(p->cpu_id, 0); // Add thread ID here too 15810259SAndrew.Bardsley@arm.com data_read_pkt = new Packet(data_read_req, MemCmd::ReadReq, 15910259SAndrew.Bardsley@arm.com Packet::Broadcast); 16010259SAndrew.Bardsley@arm.com data_read_pkt->dataStatic(&dataReg); 16110259SAndrew.Bardsley@arm.com 16210259SAndrew.Bardsley@arm.com data_write_req = new Request(); 16310259SAndrew.Bardsley@arm.com data_write_req->setThreadContext(p->cpu_id, 0); // Add thread ID here too 16410259SAndrew.Bardsley@arm.com data_write_pkt = new Packet(data_write_req, MemCmd::WriteReq, 16510259SAndrew.Bardsley@arm.com Packet::Broadcast); 16610259SAndrew.Bardsley@arm.com data_swap_pkt = new Packet(data_write_req, MemCmd::SwapReq, 16710259SAndrew.Bardsley@arm.com Packet::Broadcast); 16810259SAndrew.Bardsley@arm.com} 16910259SAndrew.Bardsley@arm.com 17010259SAndrew.Bardsley@arm.com 17110259SAndrew.Bardsley@arm.comAtomicSimpleCPU::~AtomicSimpleCPU() 17210259SAndrew.Bardsley@arm.com{ 17310259SAndrew.Bardsley@arm.com} 17410259SAndrew.Bardsley@arm.com 17510259SAndrew.Bardsley@arm.comvoid 17610259SAndrew.Bardsley@arm.comAtomicSimpleCPU::serialize(ostream &os) 17710259SAndrew.Bardsley@arm.com{ 17810259SAndrew.Bardsley@arm.com SimObject::State so_state = SimObject::getState(); 17910259SAndrew.Bardsley@arm.com SERIALIZE_ENUM(so_state); 18010259SAndrew.Bardsley@arm.com Status _status = status(); 18110259SAndrew.Bardsley@arm.com SERIALIZE_ENUM(_status); 18210259SAndrew.Bardsley@arm.com BaseSimpleCPU::serialize(os); 18310259SAndrew.Bardsley@arm.com nameOut(os, csprintf("%s.tickEvent", name())); 18410259SAndrew.Bardsley@arm.com tickEvent.serialize(os); 18510259SAndrew.Bardsley@arm.com} 18610259SAndrew.Bardsley@arm.com 18710259SAndrew.Bardsley@arm.comvoid 18810259SAndrew.Bardsley@arm.comAtomicSimpleCPU::unserialize(Checkpoint *cp, const string §ion) 18910259SAndrew.Bardsley@arm.com{ 19010259SAndrew.Bardsley@arm.com SimObject::State so_state; 19110259SAndrew.Bardsley@arm.com UNSERIALIZE_ENUM(so_state); 19210259SAndrew.Bardsley@arm.com UNSERIALIZE_ENUM(_status); 19310259SAndrew.Bardsley@arm.com BaseSimpleCPU::unserialize(cp, section); 19410259SAndrew.Bardsley@arm.com tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 19510259SAndrew.Bardsley@arm.com} 19610259SAndrew.Bardsley@arm.com 19710259SAndrew.Bardsley@arm.comvoid 19810259SAndrew.Bardsley@arm.comAtomicSimpleCPU::resume() 19910259SAndrew.Bardsley@arm.com{ 20010259SAndrew.Bardsley@arm.com if (_status != SwitchedOut && _status != Idle) { 20110259SAndrew.Bardsley@arm.com assert(system->getMemoryMode() == System::Atomic); 20210259SAndrew.Bardsley@arm.com 20310259SAndrew.Bardsley@arm.com changeState(SimObject::Running); 20410259SAndrew.Bardsley@arm.com if (thread->status() == ThreadContext::Active) { 20510259SAndrew.Bardsley@arm.com if (!tickEvent.scheduled()) { 20610259SAndrew.Bardsley@arm.com tickEvent.schedule(nextCycle()); 20710259SAndrew.Bardsley@arm.com } 20810259SAndrew.Bardsley@arm.com } 20910259SAndrew.Bardsley@arm.com } 21010259SAndrew.Bardsley@arm.com} 21110259SAndrew.Bardsley@arm.com 21210259SAndrew.Bardsley@arm.comvoid 21310259SAndrew.Bardsley@arm.comAtomicSimpleCPU::switchOut() 21410259SAndrew.Bardsley@arm.com{ 21510259SAndrew.Bardsley@arm.com assert(status() == Running || status() == Idle); 21610259SAndrew.Bardsley@arm.com _status = SwitchedOut; 21710259SAndrew.Bardsley@arm.com 21810259SAndrew.Bardsley@arm.com tickEvent.squash(); 21910259SAndrew.Bardsley@arm.com} 22010259SAndrew.Bardsley@arm.com 22110259SAndrew.Bardsley@arm.com 22210259SAndrew.Bardsley@arm.comvoid 22310259SAndrew.Bardsley@arm.comAtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 22410259SAndrew.Bardsley@arm.com{ 22510259SAndrew.Bardsley@arm.com BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort); 22610259SAndrew.Bardsley@arm.com 22710259SAndrew.Bardsley@arm.com assert(!tickEvent.scheduled()); 22810259SAndrew.Bardsley@arm.com 22910259SAndrew.Bardsley@arm.com // if any of this CPU's ThreadContexts are active, mark the CPU as 23010259SAndrew.Bardsley@arm.com // running and schedule its tick event. 23110259SAndrew.Bardsley@arm.com for (int i = 0; i < threadContexts.size(); ++i) { 23210259SAndrew.Bardsley@arm.com ThreadContext *tc = threadContexts[i]; 23310259SAndrew.Bardsley@arm.com if (tc->status() == ThreadContext::Active && _status != Running) { 23410259SAndrew.Bardsley@arm.com _status = Running; 23510259SAndrew.Bardsley@arm.com tickEvent.schedule(nextCycle()); 23610259SAndrew.Bardsley@arm.com break; 23710259SAndrew.Bardsley@arm.com } 23810259SAndrew.Bardsley@arm.com } 23910259SAndrew.Bardsley@arm.com if (_status != Running) { 24010259SAndrew.Bardsley@arm.com _status = Idle; 24110259SAndrew.Bardsley@arm.com } 24210259SAndrew.Bardsley@arm.com} 243 244 245void 246AtomicSimpleCPU::activateContext(int thread_num, int delay) 247{ 248 assert(thread_num == 0); 249 assert(thread); 250 251 assert(_status == Idle); 252 assert(!tickEvent.scheduled()); 253 254 notIdleFraction++; 255 256 //Make sure ticks are still on multiples of cycles 257 tickEvent.schedule(nextCycle(curTick + cycles(delay))); 258 _status = Running; 259} 260 261 262void 263AtomicSimpleCPU::suspendContext(int thread_num) 264{ 265 assert(thread_num == 0); 266 assert(thread); 267 268 assert(_status == Running); 269 270 // tick event may not be scheduled if this gets called from inside 271 // an instruction's execution, e.g. "quiesce" 272 if (tickEvent.scheduled()) 273 tickEvent.deschedule(); 274 275 notIdleFraction--; 276 _status = Idle; 277} 278 279 280template <class T> 281Fault 282AtomicSimpleCPU::read(Addr addr, T &data, unsigned flags) 283{ 284 // use the CPU's statically allocated read request and packet objects 285 Request *req = data_read_req; 286 PacketPtr pkt = data_read_pkt; 287 288 req->setVirt(0, addr, sizeof(T), flags, thread->readPC()); 289 290 if (traceData) { 291 traceData->setAddr(addr); 292 } 293 294 // translate to physical address 295 Fault fault = thread->translateDataReadReq(req); 296 297 // Now do the access. 298 if (fault == NoFault) { 299 pkt->reinitFromRequest(); 300 301 if (req->isMmapedIpr()) 302 dcache_latency = TheISA::handleIprRead(thread->getTC(),pkt); 303 else 304 dcache_latency = dcachePort.sendAtomic(pkt); 305 dcache_access = true; 306#if !defined(NDEBUG) 307 if (pkt->result != Packet::Success) 308 panic("Unable to find responder for address pa = %#X va = %#X\n", 309 pkt->req->getPaddr(), pkt->req->getVaddr()); 310#endif 311 data = pkt->get<T>(); 312 313 if (req->isLocked()) { 314 TheISA::handleLockedRead(thread, req); 315 } 316 } 317 318 // This will need a new way to tell if it has a dcache attached. 319 if (req->isUncacheable()) 320 recordEvent("Uncached Read"); 321 322 return fault; 323} 324 325#ifndef DOXYGEN_SHOULD_SKIP_THIS 326 327template 328Fault 329AtomicSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags); 330 331template 332Fault 333AtomicSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags); 334 335template 336Fault 337AtomicSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 338 339template 340Fault 341AtomicSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 342 343template 344Fault 345AtomicSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 346 347template 348Fault 349AtomicSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 350 351#endif //DOXYGEN_SHOULD_SKIP_THIS 352 353template<> 354Fault 355AtomicSimpleCPU::read(Addr addr, double &data, unsigned flags) 356{ 357 return read(addr, *(uint64_t*)&data, flags); 358} 359 360template<> 361Fault 362AtomicSimpleCPU::read(Addr addr, float &data, unsigned flags) 363{ 364 return read(addr, *(uint32_t*)&data, flags); 365} 366 367 368template<> 369Fault 370AtomicSimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 371{ 372 return read(addr, (uint32_t&)data, flags); 373} 374 375 376template <class T> 377Fault 378AtomicSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 379{ 380 // use the CPU's statically allocated write request and packet objects 381 Request *req = data_write_req; 382 PacketPtr pkt; 383 384 req->setVirt(0, addr, sizeof(T), flags, thread->readPC()); 385 386 if (req->isSwap()) 387 pkt = data_swap_pkt; 388 else 389 pkt = data_write_pkt; 390 391 if (traceData) { 392 traceData->setAddr(addr); 393 } 394 395 // translate to physical address 396 Fault fault = thread->translateDataWriteReq(req); 397 398 // Now do the access. 399 if (fault == NoFault) { 400 bool do_access = true; // flag to suppress cache access 401 402 if (req->isLocked()) { 403 do_access = TheISA::handleLockedWrite(thread, req); 404 } 405 if (req->isCondSwap()) { 406 assert(res); 407 req->setExtraData(*res); 408 } 409 410 411 if (do_access) { 412 pkt->reinitFromRequest(); 413 pkt->dataStatic(&data); 414 415 if (req->isMmapedIpr()) { 416 dcache_latency = TheISA::handleIprWrite(thread->getTC(), pkt); 417 } else { 418 data = htog(data); 419 dcache_latency = dcachePort.sendAtomic(pkt); 420 } 421 dcache_access = true; 422 423#if !defined(NDEBUG) 424 if (pkt->result != Packet::Success) 425 panic("Unable to find responder for address pa = %#X va = %#X\n", 426 pkt->req->getPaddr(), pkt->req->getVaddr()); 427#endif 428 } 429 430 if (req->isSwap()) { 431 assert(res); 432 *res = pkt->get<T>(); 433 } else if (res) { 434 *res = req->getExtraData(); 435 } 436 } 437 438 // This will need a new way to tell if it's hooked up to a cache or not. 439 if (req->isUncacheable()) 440 recordEvent("Uncached Write"); 441 442 // If the write needs to have a fault on the access, consider calling 443 // changeStatus() and changing it to "bad addr write" or something. 444 return fault; 445} 446 447 448#ifndef DOXYGEN_SHOULD_SKIP_THIS 449template 450Fault 451AtomicSimpleCPU::write(uint64_t data, Addr addr, 452 unsigned flags, uint64_t *res); 453 454template 455Fault 456AtomicSimpleCPU::write(uint32_t data, Addr addr, 457 unsigned flags, uint64_t *res); 458 459template 460Fault 461AtomicSimpleCPU::write(uint16_t data, Addr addr, 462 unsigned flags, uint64_t *res); 463 464template 465Fault 466AtomicSimpleCPU::write(uint8_t data, Addr addr, 467 unsigned flags, uint64_t *res); 468 469#endif //DOXYGEN_SHOULD_SKIP_THIS 470 471template<> 472Fault 473AtomicSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 474{ 475 return write(*(uint64_t*)&data, addr, flags, res); 476} 477 478template<> 479Fault 480AtomicSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 481{ 482 return write(*(uint32_t*)&data, addr, flags, res); 483} 484 485 486template<> 487Fault 488AtomicSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 489{ 490 return write((uint32_t)data, addr, flags, res); 491} 492 493 494void 495AtomicSimpleCPU::tick() 496{ 497 Tick latency = cycles(1); // instruction takes one cycle by default 498 499 for (int i = 0; i < width; ++i) { 500 numCycles++; 501 502 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 503 checkForInterrupts(); 504 505 Fault fault = setupFetchRequest(ifetch_req); 506 507 if (fault == NoFault) { 508 ifetch_pkt->reinitFromRequest(); 509 510 Tick icache_latency = icachePort.sendAtomic(ifetch_pkt); 511 // ifetch_req is initialized to read the instruction directly 512 // into the CPU object's inst field. 513 514 dcache_access = false; // assume no dcache access 515 preExecute(); 516 517 fault = curStaticInst->execute(this, traceData); 518 postExecute(); 519 520 // @todo remove me after debugging with legion done 521 if (curStaticInst && (!curStaticInst->isMicroOp() || 522 curStaticInst->isFirstMicroOp())) 523 instCnt++; 524 525 if (simulate_stalls) { 526 Tick icache_stall = icache_latency - cycles(1); 527 Tick dcache_stall = 528 dcache_access ? dcache_latency - cycles(1) : 0; 529 Tick stall_cycles = (icache_stall + dcache_stall) / cycles(1); 530 if (cycles(stall_cycles) < (icache_stall + dcache_stall)) 531 latency += cycles(stall_cycles+1); 532 else 533 latency += cycles(stall_cycles); 534 } 535 536 } 537 538 advancePC(fault); 539 } 540 541 if (_status != Idle) 542 tickEvent.schedule(curTick + latency); 543} 544 545 546//////////////////////////////////////////////////////////////////////// 547// 548// AtomicSimpleCPU Simulation Object 549// 550BEGIN_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU) 551 552 Param<Counter> max_insts_any_thread; 553 Param<Counter> max_insts_all_threads; 554 Param<Counter> max_loads_any_thread; 555 Param<Counter> max_loads_all_threads; 556 Param<Tick> progress_interval; 557 SimObjectParam<System *> system; 558 Param<int> cpu_id; 559 560#if FULL_SYSTEM 561 SimObjectParam<TheISA::ITB *> itb; 562 SimObjectParam<TheISA::DTB *> dtb; 563 Param<Tick> profile; 564 565 Param<bool> do_quiesce; 566 Param<bool> do_checkpoint_insts; 567 Param<bool> do_statistics_insts; 568#else 569 SimObjectParam<Process *> workload; 570#endif // FULL_SYSTEM 571 572 Param<int> clock; 573 Param<int> phase; 574 575 Param<bool> defer_registration; 576 Param<int> width; 577 Param<bool> function_trace; 578 Param<Tick> function_trace_start; 579 Param<bool> simulate_stalls; 580 581END_DECLARE_SIM_OBJECT_PARAMS(AtomicSimpleCPU) 582 583BEGIN_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU) 584 585 INIT_PARAM(max_insts_any_thread, 586 "terminate when any thread reaches this inst count"), 587 INIT_PARAM(max_insts_all_threads, 588 "terminate when all threads have reached this inst count"), 589 INIT_PARAM(max_loads_any_thread, 590 "terminate when any thread reaches this load count"), 591 INIT_PARAM(max_loads_all_threads, 592 "terminate when all threads have reached this load count"), 593 INIT_PARAM(progress_interval, "Progress interval"), 594 INIT_PARAM(system, "system object"), 595 INIT_PARAM(cpu_id, "processor ID"), 596 597#if FULL_SYSTEM 598 INIT_PARAM(itb, "Instruction TLB"), 599 INIT_PARAM(dtb, "Data TLB"), 600 INIT_PARAM(profile, ""), 601 INIT_PARAM(do_quiesce, ""), 602 INIT_PARAM(do_checkpoint_insts, ""), 603 INIT_PARAM(do_statistics_insts, ""), 604#else 605 INIT_PARAM(workload, "processes to run"), 606#endif // FULL_SYSTEM 607 608 INIT_PARAM(clock, "clock speed"), 609 INIT_PARAM_DFLT(phase, "clock phase", 0), 610 INIT_PARAM(defer_registration, "defer system registration (for sampling)"), 611 INIT_PARAM(width, "cpu width"), 612 INIT_PARAM(function_trace, "Enable function trace"), 613 INIT_PARAM(function_trace_start, "Cycle to start function trace"), 614 INIT_PARAM(simulate_stalls, "Simulate cache stall cycles") 615 616END_INIT_SIM_OBJECT_PARAMS(AtomicSimpleCPU) 617 618 619CREATE_SIM_OBJECT(AtomicSimpleCPU) 620{ 621 AtomicSimpleCPU::Params *params = new AtomicSimpleCPU::Params(); 622 params->name = getInstanceName(); 623 params->numberOfThreads = 1; 624 params->max_insts_any_thread = max_insts_any_thread; 625 params->max_insts_all_threads = max_insts_all_threads; 626 params->max_loads_any_thread = max_loads_any_thread; 627 params->max_loads_all_threads = max_loads_all_threads; 628 params->progress_interval = progress_interval; 629 params->deferRegistration = defer_registration; 630 params->phase = phase; 631 params->clock = clock; 632 params->functionTrace = function_trace; 633 params->functionTraceStart = function_trace_start; 634 params->width = width; 635 params->simulate_stalls = simulate_stalls; 636 params->system = system; 637 params->cpu_id = cpu_id; 638 639#if FULL_SYSTEM 640 params->itb = itb; 641 params->dtb = dtb; 642 params->profile = profile; 643 params->do_quiesce = do_quiesce; 644 params->do_checkpoint_insts = do_checkpoint_insts; 645 params->do_statistics_insts = do_statistics_insts; 646#else 647 params->process = workload; 648#endif 649 650 AtomicSimpleCPU *cpu = new AtomicSimpleCPU(params); 651 return cpu; 652} 653 654REGISTER_SIM_OBJECT("AtomicSimpleCPU", AtomicSimpleCPU) 655 656