base.cc revision 2262
17635SBrad.Beckmann@amd.com/* 27635SBrad.Beckmann@amd.com * Copyright (c) 2002-2005 The Regents of The University of Michigan 37635SBrad.Beckmann@amd.com * All rights reserved. 47635SBrad.Beckmann@amd.com * 57635SBrad.Beckmann@amd.com * Redistribution and use in source and binary forms, with or without 67635SBrad.Beckmann@amd.com * modification, are permitted provided that the following conditions are 77635SBrad.Beckmann@amd.com * met: redistributions of source code must retain the above copyright 87635SBrad.Beckmann@amd.com * notice, this list of conditions and the following disclaimer; 97635SBrad.Beckmann@amd.com * redistributions in binary form must reproduce the above copyright 107635SBrad.Beckmann@amd.com * notice, this list of conditions and the following disclaimer in the 117635SBrad.Beckmann@amd.com * documentation and/or other materials provided with the distribution; 127635SBrad.Beckmann@amd.com * neither the name of the copyright holders nor the names of its 137635SBrad.Beckmann@amd.com * contributors may be used to endorse or promote products derived from 147635SBrad.Beckmann@amd.com * this software without specific prior written permission. 157635SBrad.Beckmann@amd.com * 167635SBrad.Beckmann@amd.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 177635SBrad.Beckmann@amd.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 187635SBrad.Beckmann@amd.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 197635SBrad.Beckmann@amd.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 207635SBrad.Beckmann@amd.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 217635SBrad.Beckmann@amd.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 227635SBrad.Beckmann@amd.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 237635SBrad.Beckmann@amd.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 247635SBrad.Beckmann@amd.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 257635SBrad.Beckmann@amd.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 267635SBrad.Beckmann@amd.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 277635SBrad.Beckmann@amd.com */ 287635SBrad.Beckmann@amd.com 297635SBrad.Beckmann@amd.com#include <cmath> 307635SBrad.Beckmann@amd.com#include <cstdio> 317635SBrad.Beckmann@amd.com#include <cstdlib> 327635SBrad.Beckmann@amd.com#include <iostream> 337635SBrad.Beckmann@amd.com#include <iomanip> 347635SBrad.Beckmann@amd.com#include <list> 357635SBrad.Beckmann@amd.com#include <sstream> 367635SBrad.Beckmann@amd.com#include <string> 377635SBrad.Beckmann@amd.com 387635SBrad.Beckmann@amd.com#include "base/cprintf.hh" 397635SBrad.Beckmann@amd.com#include "base/inifile.hh" 407635SBrad.Beckmann@amd.com#include "base/loader/symtab.hh" 417635SBrad.Beckmann@amd.com#include "base/misc.hh" 427635SBrad.Beckmann@amd.com#include "base/pollevent.hh" 437635SBrad.Beckmann@amd.com#include "base/range.hh" 447635SBrad.Beckmann@amd.com#include "base/stats/events.hh" 457635SBrad.Beckmann@amd.com#include "base/trace.hh" 467635SBrad.Beckmann@amd.com#include "cpu/base.hh" 477635SBrad.Beckmann@amd.com#include "cpu/cpu_exec_context.hh" 487635SBrad.Beckmann@amd.com#include "cpu/exec_context.hh" 497635SBrad.Beckmann@amd.com#include "cpu/exetrace.hh" 507635SBrad.Beckmann@amd.com#include "cpu/profile.hh" 517635SBrad.Beckmann@amd.com#include "cpu/sampler/sampler.hh" 527635SBrad.Beckmann@amd.com#include "cpu/simple/cpu.hh" 537635SBrad.Beckmann@amd.com#include "cpu/smt.hh" 547635SBrad.Beckmann@amd.com#include "cpu/static_inst.hh" 558436SBrad.Beckmann@amd.com#include "kern/kernel_stats.hh" 568436SBrad.Beckmann@amd.com#include "mem/base_mem.hh" 578436SBrad.Beckmann@amd.com#include "mem/mem_interface.hh" 588436SBrad.Beckmann@amd.com#include "sim/byteswap.hh" 597635SBrad.Beckmann@amd.com#include "sim/builder.hh" 607635SBrad.Beckmann@amd.com#include "sim/debug.hh" 617635SBrad.Beckmann@amd.com#include "sim/host.hh" 627635SBrad.Beckmann@amd.com#include "sim/sim_events.hh" 637635SBrad.Beckmann@amd.com#include "sim/sim_object.hh" 647635SBrad.Beckmann@amd.com#include "sim/stats.hh" 657635SBrad.Beckmann@amd.com 667635SBrad.Beckmann@amd.com#if FULL_SYSTEM 677635SBrad.Beckmann@amd.com#include "base/remote_gdb.hh" 687635SBrad.Beckmann@amd.com#include "mem/functional/memory_control.hh" 697635SBrad.Beckmann@amd.com#include "mem/functional/physical.hh" 707635SBrad.Beckmann@amd.com#include "sim/system.hh" 717635SBrad.Beckmann@amd.com#include "arch/tlb.hh" 727635SBrad.Beckmann@amd.com#include "arch/stacktrace.hh" 737635SBrad.Beckmann@amd.com#include "arch/vtophys.hh" 747635SBrad.Beckmann@amd.com#else // !FULL_SYSTEM 757635SBrad.Beckmann@amd.com#include "mem/functional/functional.hh" 767635SBrad.Beckmann@amd.com#endif // FULL_SYSTEM 777635SBrad.Beckmann@amd.com 787635SBrad.Beckmann@amd.comusing namespace std; 797635SBrad.Beckmann@amd.com//The SimpleCPU does alpha only 807635SBrad.Beckmann@amd.comusing namespace AlphaISA; 817635SBrad.Beckmann@amd.com 827635SBrad.Beckmann@amd.com 837635SBrad.Beckmann@amd.comSimpleCPU::TickEvent::TickEvent(SimpleCPU *c, int w) 847635SBrad.Beckmann@amd.com : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c), width(w) 857635SBrad.Beckmann@amd.com{ 867635SBrad.Beckmann@amd.com} 877635SBrad.Beckmann@amd.com 887635SBrad.Beckmann@amd.com 897635SBrad.Beckmann@amd.comvoid 907635SBrad.Beckmann@amd.comSimpleCPU::init() 917635SBrad.Beckmann@amd.com{ 927635SBrad.Beckmann@amd.com BaseCPU::init(); 937635SBrad.Beckmann@amd.com#if FULL_SYSTEM 948436SBrad.Beckmann@amd.com for (int i = 0; i < execContexts.size(); ++i) { 957635SBrad.Beckmann@amd.com ExecContext *xc = execContexts[i]; 968437SBrad.Beckmann@amd.com 978437SBrad.Beckmann@amd.com // initialize CPU, including PC 988437SBrad.Beckmann@amd.com TheISA::initCPU(xc, xc->readCpuId()); 998437SBrad.Beckmann@amd.com } 1008437SBrad.Beckmann@amd.com#endif 1018437SBrad.Beckmann@amd.com} 1028436SBrad.Beckmann@amd.com 1037635SBrad.Beckmann@amd.comvoid 1047635SBrad.Beckmann@amd.comSimpleCPU::TickEvent::process() 1057635SBrad.Beckmann@amd.com{ 1067635SBrad.Beckmann@amd.com int count = width; 1077635SBrad.Beckmann@amd.com do { 1087635SBrad.Beckmann@amd.com cpu->tick(); 1097635SBrad.Beckmann@amd.com } while (--count > 0 && cpu->status() == Running); 1108437SBrad.Beckmann@amd.com} 1118437SBrad.Beckmann@amd.com 1128437SBrad.Beckmann@amd.comconst char * 1138437SBrad.Beckmann@amd.comSimpleCPU::TickEvent::description() 1148437SBrad.Beckmann@amd.com{ 1158437SBrad.Beckmann@amd.com return "SimpleCPU tick event"; 1168436SBrad.Beckmann@amd.com} 1177635SBrad.Beckmann@amd.com 1187635SBrad.Beckmann@amd.com 1197635SBrad.Beckmann@amd.comSimpleCPU::CacheCompletionEvent::CacheCompletionEvent(SimpleCPU *_cpu) 1207635SBrad.Beckmann@amd.com : Event(&mainEventQueue), cpu(_cpu) 1217635SBrad.Beckmann@amd.com{ 1228436SBrad.Beckmann@amd.com} 1237635SBrad.Beckmann@amd.com 1247635SBrad.Beckmann@amd.comvoid SimpleCPU::CacheCompletionEvent::process() 1257635SBrad.Beckmann@amd.com{ 1267635SBrad.Beckmann@amd.com cpu->processCacheCompletion(); 1277635SBrad.Beckmann@amd.com} 1287635SBrad.Beckmann@amd.com 1297635SBrad.Beckmann@amd.comconst char * 1308322Ssteve.reinhardt@amd.comSimpleCPU::CacheCompletionEvent::description() 1317635SBrad.Beckmann@amd.com{ 1327635SBrad.Beckmann@amd.com return "SimpleCPU cache completion event"; 1337635SBrad.Beckmann@amd.com} 1347635SBrad.Beckmann@amd.com 1357635SBrad.Beckmann@amd.comSimpleCPU::SimpleCPU(Params *p) 1368845Sandreas.hansson@arm.com : BaseCPU(p), tickEvent(this, p->width), cpuXC(NULL), 1377635SBrad.Beckmann@amd.com cacheCompletionEvent(this) 1387635SBrad.Beckmann@amd.com{ 1397938SBrad.Beckmann@amd.com _status = Idle; 1407938SBrad.Beckmann@amd.com#if FULL_SYSTEM 1417938SBrad.Beckmann@amd.com cpuXC = new CPUExecContext(this, 0, p->system, p->itb, p->dtb, p->mem); 1427938SBrad.Beckmann@amd.com 1438322Ssteve.reinhardt@amd.com#else 1447938SBrad.Beckmann@amd.com cpuXC = new CPUExecContext(this, /* thread_num */ 0, p->process, 1458436SBrad.Beckmann@amd.com /* asid */ 0); 1468436SBrad.Beckmann@amd.com#endif // !FULL_SYSTEM 1478436SBrad.Beckmann@amd.com xcProxy = cpuXC->getProxy(); 1488436SBrad.Beckmann@amd.com 1498436SBrad.Beckmann@amd.com icacheInterface = p->icache_interface; 1508436SBrad.Beckmann@amd.com dcacheInterface = p->dcache_interface; 1517635SBrad.Beckmann@amd.com 1527635SBrad.Beckmann@amd.com memReq = new MemReq(); 1537635SBrad.Beckmann@amd.com memReq->xc = xcProxy; 1547635SBrad.Beckmann@amd.com memReq->asid = 0; 1557635SBrad.Beckmann@amd.com memReq->data = new uint8_t[64]; 1567635SBrad.Beckmann@amd.com 1577635SBrad.Beckmann@amd.com numInst = 0; 1587635SBrad.Beckmann@amd.com startNumInst = 0; 1597635SBrad.Beckmann@amd.com numLoad = 0; 1607635SBrad.Beckmann@amd.com startNumLoad = 0; 1617635SBrad.Beckmann@amd.com lastIcacheStall = 0; 1628801Sgblack@eecs.umich.edu lastDcacheStall = 0; 1637635SBrad.Beckmann@amd.com 1647635SBrad.Beckmann@amd.com execContexts.push_back(xcProxy); 1657635SBrad.Beckmann@amd.com} 1667635SBrad.Beckmann@amd.com 1677635SBrad.Beckmann@amd.comSimpleCPU::~SimpleCPU() 1687635SBrad.Beckmann@amd.com{ 1697635SBrad.Beckmann@amd.com} 1707635SBrad.Beckmann@amd.com 1717635SBrad.Beckmann@amd.comvoid 1727635SBrad.Beckmann@amd.comSimpleCPU::switchOut(Sampler *s) 1737635SBrad.Beckmann@amd.com{ 1747635SBrad.Beckmann@amd.com sampler = s; 175 if (status() == DcacheMissStall) { 176 DPRINTF(Sampler,"Outstanding dcache access, waiting for completion\n"); 177 _status = DcacheMissSwitch; 178 } 179 else { 180 _status = SwitchedOut; 181 182 if (tickEvent.scheduled()) 183 tickEvent.squash(); 184 185 sampler->signalSwitched(); 186 } 187} 188 189 190void 191SimpleCPU::takeOverFrom(BaseCPU *oldCPU) 192{ 193 BaseCPU::takeOverFrom(oldCPU); 194 195 assert(!tickEvent.scheduled()); 196 197 // if any of this CPU's ExecContexts are active, mark the CPU as 198 // running and schedule its tick event. 199 for (int i = 0; i < execContexts.size(); ++i) { 200 ExecContext *xc = execContexts[i]; 201 if (xc->status() == ExecContext::Active && _status != Running) { 202 _status = Running; 203 tickEvent.schedule(curTick); 204 } 205 } 206} 207 208 209void 210SimpleCPU::activateContext(int thread_num, int delay) 211{ 212 assert(thread_num == 0); 213 assert(cpuXC); 214 215 assert(_status == Idle); 216 notIdleFraction++; 217 scheduleTickEvent(delay); 218 _status = Running; 219} 220 221 222void 223SimpleCPU::suspendContext(int thread_num) 224{ 225 assert(thread_num == 0); 226 assert(cpuXC); 227 228 assert(_status == Running); 229 notIdleFraction--; 230 unscheduleTickEvent(); 231 _status = Idle; 232} 233 234 235void 236SimpleCPU::deallocateContext(int thread_num) 237{ 238 // for now, these are equivalent 239 suspendContext(thread_num); 240} 241 242 243void 244SimpleCPU::haltContext(int thread_num) 245{ 246 // for now, these are equivalent 247 suspendContext(thread_num); 248} 249 250 251void 252SimpleCPU::regStats() 253{ 254 using namespace Stats; 255 256 BaseCPU::regStats(); 257 258 numInsts 259 .name(name() + ".num_insts") 260 .desc("Number of instructions executed") 261 ; 262 263 numMemRefs 264 .name(name() + ".num_refs") 265 .desc("Number of memory references") 266 ; 267 268 notIdleFraction 269 .name(name() + ".not_idle_fraction") 270 .desc("Percentage of non-idle cycles") 271 ; 272 273 idleFraction 274 .name(name() + ".idle_fraction") 275 .desc("Percentage of idle cycles") 276 ; 277 278 icacheStallCycles 279 .name(name() + ".icache_stall_cycles") 280 .desc("ICache total stall cycles") 281 .prereq(icacheStallCycles) 282 ; 283 284 dcacheStallCycles 285 .name(name() + ".dcache_stall_cycles") 286 .desc("DCache total stall cycles") 287 .prereq(dcacheStallCycles) 288 ; 289 290 idleFraction = constant(1.0) - notIdleFraction; 291} 292 293void 294SimpleCPU::resetStats() 295{ 296 startNumInst = numInst; 297 notIdleFraction = (_status != Idle); 298} 299 300void 301SimpleCPU::serialize(ostream &os) 302{ 303 BaseCPU::serialize(os); 304 SERIALIZE_ENUM(_status); 305 SERIALIZE_SCALAR(inst); 306 nameOut(os, csprintf("%s.xc", name())); 307 cpuXC->serialize(os); 308 nameOut(os, csprintf("%s.tickEvent", name())); 309 tickEvent.serialize(os); 310 nameOut(os, csprintf("%s.cacheCompletionEvent", name())); 311 cacheCompletionEvent.serialize(os); 312} 313 314void 315SimpleCPU::unserialize(Checkpoint *cp, const string §ion) 316{ 317 BaseCPU::unserialize(cp, section); 318 UNSERIALIZE_ENUM(_status); 319 UNSERIALIZE_SCALAR(inst); 320 cpuXC->unserialize(cp, csprintf("%s.xc", section)); 321 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section)); 322 cacheCompletionEvent 323 .unserialize(cp, csprintf("%s.cacheCompletionEvent", section)); 324} 325 326void 327change_thread_state(int thread_number, int activate, int priority) 328{ 329} 330 331Fault 332SimpleCPU::copySrcTranslate(Addr src) 333{ 334 static bool no_warn = true; 335 int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64; 336 // Only support block sizes of 64 atm. 337 assert(blk_size == 64); 338 int offset = src & (blk_size - 1); 339 340 // Make sure block doesn't span page 341 if (no_warn && 342 (src & PageMask) != ((src + blk_size) & PageMask) && 343 (src >> 40) != 0xfffffc) { 344 warn("Copied block source spans pages %x.", src); 345 no_warn = false; 346 } 347 348 memReq->reset(src & ~(blk_size - 1), blk_size); 349 350 // translate to physical address 351 Fault fault = cpuXC->translateDataReadReq(memReq); 352 353 if (fault == NoFault) { 354 cpuXC->copySrcAddr = src; 355 cpuXC->copySrcPhysAddr = memReq->paddr + offset; 356 } else { 357 assert(!fault->isAlignmentFault()); 358 359 cpuXC->copySrcAddr = 0; 360 cpuXC->copySrcPhysAddr = 0; 361 } 362 return fault; 363} 364 365Fault 366SimpleCPU::copy(Addr dest) 367{ 368 static bool no_warn = true; 369 int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64; 370 // Only support block sizes of 64 atm. 371 assert(blk_size == 64); 372 uint8_t data[blk_size]; 373 //assert(cpuXC->copySrcAddr); 374 int offset = dest & (blk_size - 1); 375 376 // Make sure block doesn't span page 377 if (no_warn && 378 (dest & PageMask) != ((dest + blk_size) & PageMask) && 379 (dest >> 40) != 0xfffffc) { 380 no_warn = false; 381 warn("Copied block destination spans pages %x. ", dest); 382 } 383 384 memReq->reset(dest & ~(blk_size -1), blk_size); 385 // translate to physical address 386 Fault fault = cpuXC->translateDataWriteReq(memReq); 387 388 if (fault == NoFault) { 389 Addr dest_addr = memReq->paddr + offset; 390 // Need to read straight from memory since we have more than 8 bytes. 391 memReq->paddr = cpuXC->copySrcPhysAddr; 392 cpuXC->mem->read(memReq, data); 393 memReq->paddr = dest_addr; 394 cpuXC->mem->write(memReq, data); 395 if (dcacheInterface) { 396 memReq->cmd = Copy; 397 memReq->completionEvent = NULL; 398 memReq->paddr = cpuXC->copySrcPhysAddr; 399 memReq->dest = dest_addr; 400 memReq->size = 64; 401 memReq->time = curTick; 402 memReq->flags &= ~INST_READ; 403 dcacheInterface->access(memReq); 404 } 405 } 406 else 407 assert(!fault->isAlignmentFault()); 408 409 return fault; 410} 411 412// precise architected memory state accessor macros 413template <class T> 414Fault 415SimpleCPU::read(Addr addr, T &data, unsigned flags) 416{ 417 if (status() == DcacheMissStall || status() == DcacheMissSwitch) { 418 Fault fault = cpuXC->read(memReq,data); 419 420 if (traceData) { 421 traceData->setAddr(addr); 422 } 423 return fault; 424 } 425 426 memReq->reset(addr, sizeof(T), flags); 427 428 // translate to physical address 429 Fault fault = cpuXC->translateDataReadReq(memReq); 430 431 // if we have a cache, do cache access too 432 if (fault == NoFault && dcacheInterface) { 433 memReq->cmd = Read; 434 memReq->completionEvent = NULL; 435 memReq->time = curTick; 436 memReq->flags &= ~INST_READ; 437 MemAccessResult result = dcacheInterface->access(memReq); 438 439 // Ugly hack to get an event scheduled *only* if the access is 440 // a miss. We really should add first-class support for this 441 // at some point. 442 if (result != MA_HIT && dcacheInterface->doEvents()) { 443 memReq->completionEvent = &cacheCompletionEvent; 444 lastDcacheStall = curTick; 445 unscheduleTickEvent(); 446 _status = DcacheMissStall; 447 } else { 448 // do functional access 449 fault = cpuXC->read(memReq, data); 450 451 } 452 } else if(fault == NoFault) { 453 // do functional access 454 fault = cpuXC->read(memReq, data); 455 456 } 457 458 if (!dcacheInterface && (memReq->flags & UNCACHEABLE)) 459 recordEvent("Uncached Read"); 460 461 return fault; 462} 463 464#ifndef DOXYGEN_SHOULD_SKIP_THIS 465 466template 467Fault 468SimpleCPU::read(Addr addr, uint64_t &data, unsigned flags); 469 470template 471Fault 472SimpleCPU::read(Addr addr, uint32_t &data, unsigned flags); 473 474template 475Fault 476SimpleCPU::read(Addr addr, uint16_t &data, unsigned flags); 477 478template 479Fault 480SimpleCPU::read(Addr addr, uint8_t &data, unsigned flags); 481 482#endif //DOXYGEN_SHOULD_SKIP_THIS 483 484template<> 485Fault 486SimpleCPU::read(Addr addr, double &data, unsigned flags) 487{ 488 return read(addr, *(uint64_t*)&data, flags); 489} 490 491template<> 492Fault 493SimpleCPU::read(Addr addr, float &data, unsigned flags) 494{ 495 return read(addr, *(uint32_t*)&data, flags); 496} 497 498 499template<> 500Fault 501SimpleCPU::read(Addr addr, int32_t &data, unsigned flags) 502{ 503 return read(addr, (uint32_t&)data, flags); 504} 505 506 507template <class T> 508Fault 509SimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res) 510{ 511 memReq->reset(addr, sizeof(T), flags); 512 513 // translate to physical address 514 Fault fault = cpuXC->translateDataWriteReq(memReq); 515 516 // do functional access 517 if (fault == NoFault) 518 fault = cpuXC->write(memReq, data); 519 520 if (fault == NoFault && dcacheInterface) { 521 memReq->cmd = Write; 522 memcpy(memReq->data,(uint8_t *)&data,memReq->size); 523 memReq->completionEvent = NULL; 524 memReq->time = curTick; 525 memReq->flags &= ~INST_READ; 526 MemAccessResult result = dcacheInterface->access(memReq); 527 528 // Ugly hack to get an event scheduled *only* if the access is 529 // a miss. We really should add first-class support for this 530 // at some point. 531 if (result != MA_HIT && dcacheInterface->doEvents()) { 532 memReq->completionEvent = &cacheCompletionEvent; 533 lastDcacheStall = curTick; 534 unscheduleTickEvent(); 535 _status = DcacheMissStall; 536 } 537 } 538 539 if (res && (fault == NoFault)) 540 *res = memReq->result; 541 542 if (!dcacheInterface && (memReq->flags & UNCACHEABLE)) 543 recordEvent("Uncached Write"); 544 545 return fault; 546} 547 548 549#ifndef DOXYGEN_SHOULD_SKIP_THIS 550template 551Fault 552SimpleCPU::write(uint64_t data, Addr addr, unsigned flags, uint64_t *res); 553 554template 555Fault 556SimpleCPU::write(uint32_t data, Addr addr, unsigned flags, uint64_t *res); 557 558template 559Fault 560SimpleCPU::write(uint16_t data, Addr addr, unsigned flags, uint64_t *res); 561 562template 563Fault 564SimpleCPU::write(uint8_t data, Addr addr, unsigned flags, uint64_t *res); 565 566#endif //DOXYGEN_SHOULD_SKIP_THIS 567 568template<> 569Fault 570SimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res) 571{ 572 return write(*(uint64_t*)&data, addr, flags, res); 573} 574 575template<> 576Fault 577SimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res) 578{ 579 return write(*(uint32_t*)&data, addr, flags, res); 580} 581 582 583template<> 584Fault 585SimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res) 586{ 587 return write((uint32_t)data, addr, flags, res); 588} 589 590 591#if FULL_SYSTEM 592Addr 593SimpleCPU::dbg_vtophys(Addr addr) 594{ 595 return vtophys(xcProxy, addr); 596} 597#endif // FULL_SYSTEM 598 599void 600SimpleCPU::processCacheCompletion() 601{ 602 switch (status()) { 603 case IcacheMissStall: 604 icacheStallCycles += curTick - lastIcacheStall; 605 _status = IcacheMissComplete; 606 scheduleTickEvent(1); 607 break; 608 case DcacheMissStall: 609 if (memReq->cmd.isRead()) { 610 curStaticInst->execute(this,traceData); 611 if (traceData) 612 traceData->finalize(); 613 } 614 dcacheStallCycles += curTick - lastDcacheStall; 615 _status = Running; 616 scheduleTickEvent(1); 617 break; 618 case DcacheMissSwitch: 619 if (memReq->cmd.isRead()) { 620 curStaticInst->execute(this,traceData); 621 if (traceData) 622 traceData->finalize(); 623 } 624 _status = SwitchedOut; 625 sampler->signalSwitched(); 626 case SwitchedOut: 627 // If this CPU has been switched out due to sampling/warm-up, 628 // ignore any further status changes (e.g., due to cache 629 // misses outstanding at the time of the switch). 630 return; 631 default: 632 panic("SimpleCPU::processCacheCompletion: bad state"); 633 break; 634 } 635} 636 637#if FULL_SYSTEM 638void 639SimpleCPU::post_interrupt(int int_num, int index) 640{ 641 BaseCPU::post_interrupt(int_num, index); 642 643 if (cpuXC->status() == ExecContext::Suspended) { 644 DPRINTF(IPI,"Suspended Processor awoke\n"); 645 cpuXC->activate(); 646 } 647} 648#endif // FULL_SYSTEM 649 650/* start simulation, program loaded, processor precise state initialized */ 651void 652SimpleCPU::tick() 653{ 654 numCycles++; 655 656 traceData = NULL; 657 658 Fault fault = NoFault; 659 660#if FULL_SYSTEM 661 if (checkInterrupts && check_interrupts() && !cpuXC->inPalMode() && 662 status() != IcacheMissComplete) { 663 int ipl = 0; 664 int summary = 0; 665 checkInterrupts = false; 666 667 if (cpuXC->readMiscReg(IPR_SIRR)) { 668 for (int i = INTLEVEL_SOFTWARE_MIN; 669 i < INTLEVEL_SOFTWARE_MAX; i++) { 670 if (cpuXC->readMiscReg(IPR_SIRR) & (ULL(1) << i)) { 671 // See table 4-19 of 21164 hardware reference 672 ipl = (i - INTLEVEL_SOFTWARE_MIN) + 1; 673 summary |= (ULL(1) << i); 674 } 675 } 676 } 677 678 uint64_t interrupts = cpuXC->cpu->intr_status(); 679 for (int i = INTLEVEL_EXTERNAL_MIN; 680 i < INTLEVEL_EXTERNAL_MAX; i++) { 681 if (interrupts & (ULL(1) << i)) { 682 // See table 4-19 of 21164 hardware reference 683 ipl = i; 684 summary |= (ULL(1) << i); 685 } 686 } 687 688 if (cpuXC->readMiscReg(IPR_ASTRR)) 689 panic("asynchronous traps not implemented\n"); 690 691 if (ipl && ipl > cpuXC->readMiscReg(IPR_IPLR)) { 692 cpuXC->setMiscReg(IPR_ISR, summary); 693 cpuXC->setMiscReg(IPR_INTID, ipl); 694 695 Fault(new InterruptFault)->invoke(xcProxy); 696 697 DPRINTF(Flow, "Interrupt! IPLR=%d ipl=%d summary=%x\n", 698 cpuXC->readMiscReg(IPR_IPLR), ipl, summary); 699 } 700 } 701#endif 702 703 // maintain $r0 semantics 704 cpuXC->setIntReg(ZeroReg, 0); 705#ifdef TARGET_ALPHA 706 cpuXC->setFloatRegDouble(ZeroReg, 0.0); 707#endif // TARGET_ALPHA 708 709 if (status() == IcacheMissComplete) { 710 // We've already fetched an instruction and were stalled on an 711 // I-cache miss. No need to fetch it again. 712 713 // Set status to running; tick event will get rescheduled if 714 // necessary at end of tick() function. 715 _status = Running; 716 } 717 else { 718 // Try to fetch an instruction 719 720 // set up memory request for instruction fetch 721#if FULL_SYSTEM 722#define IFETCH_FLAGS(pc) ((pc) & 1) ? PHYSICAL : 0 723#else 724#define IFETCH_FLAGS(pc) 0 725#endif 726 727 memReq->cmd = Read; 728 memReq->reset(cpuXC->readPC() & ~3, sizeof(uint32_t), 729 IFETCH_FLAGS(cpuXC->readPC())); 730 731 fault = cpuXC->translateInstReq(memReq); 732 733 if (fault == NoFault) 734 fault = cpuXC->mem->read(memReq, inst); 735 736 if (icacheInterface && fault == NoFault) { 737 memReq->completionEvent = NULL; 738 739 memReq->time = curTick; 740 memReq->flags |= INST_READ; 741 MemAccessResult result = icacheInterface->access(memReq); 742 743 // Ugly hack to get an event scheduled *only* if the access is 744 // a miss. We really should add first-class support for this 745 // at some point. 746 if (result != MA_HIT && icacheInterface->doEvents()) { 747 memReq->completionEvent = &cacheCompletionEvent; 748 lastIcacheStall = curTick; 749 unscheduleTickEvent(); 750 _status = IcacheMissStall; 751 return; 752 } 753 } 754 } 755 756 // If we've got a valid instruction (i.e., no fault on instruction 757 // fetch), then execute it. 758 if (fault == NoFault) { 759 760 // keep an instruction count 761 numInst++; 762 numInsts++; 763 764 // check for instruction-count-based events 765 comInstEventQueue[0]->serviceEvents(numInst); 766 767 // decode the instruction 768 inst = gtoh(inst); 769 curStaticInst = StaticInst::decode(makeExtMI(inst, cpuXC->readPC())); 770 771 traceData = Trace::getInstRecord(curTick, xcProxy, this, curStaticInst, 772 cpuXC->readPC()); 773 774#if FULL_SYSTEM 775 cpuXC->setInst(inst); 776#endif // FULL_SYSTEM 777 778 cpuXC->func_exe_inst++; 779 780 fault = curStaticInst->execute(this, traceData); 781 782#if FULL_SYSTEM 783 if (system->kernelBinning->fnbin) { 784 assert(kernelStats); 785 system->kernelBinning->execute(xcProxy, inst); 786 } 787 788 if (cpuXC->profile) { 789 bool usermode = 790 (cpuXC->readMiscReg(AlphaISA::IPR_DTB_CM) & 0x18) != 0; 791 cpuXC->profilePC = usermode ? 1 : cpuXC->readPC(); 792 ProfileNode *node = cpuXC->profile->consume(xcProxy, inst); 793 if (node) 794 cpuXC->profileNode = node; 795 } 796#endif 797 798 if (curStaticInst->isMemRef()) { 799 numMemRefs++; 800 } 801 802 if (curStaticInst->isLoad()) { 803 ++numLoad; 804 comLoadEventQueue[0]->serviceEvents(numLoad); 805 } 806 807 // If we have a dcache miss, then we can't finialize the instruction 808 // trace yet because we want to populate it with the data later 809 if (traceData && 810 !(status() == DcacheMissStall && memReq->cmd.isRead())) { 811 traceData->finalize(); 812 } 813 814 traceFunctions(cpuXC->readPC()); 815 816 } // if (fault == NoFault) 817 818 if (fault != NoFault) { 819#if FULL_SYSTEM 820 fault->invoke(xcProxy); 821#else // !FULL_SYSTEM 822 fatal("fault (%d) detected @ PC 0x%08p", fault, cpuXC->readPC()); 823#endif // FULL_SYSTEM 824 } 825 else { 826#if THE_ISA != MIPS_ISA 827 // go to the next instruction 828 cpuXC->setPC(cpuXC->readNextPC()); 829 cpuXC->setNextPC(cpuXC->readNextPC() + sizeof(MachInst)); 830#else 831 // go to the next instruction 832 cpuXC->setPC(cpuXC->readNextPC()); 833 cpuXC->setNextPC(cpuXC->readNextNPC()); 834 cpuXC->setNextNPC(cpuXC->readNextNPC() + sizeof(MachInst)); 835#endif 836 837 } 838 839#if FULL_SYSTEM 840 Addr oldpc; 841 do { 842 oldpc = cpuXC->readPC(); 843 system->pcEventQueue.service(xcProxy); 844 } while (oldpc != cpuXC->readPC()); 845#endif 846 847 assert(status() == Running || 848 status() == Idle || 849 status() == DcacheMissStall); 850 851 if (status() == Running && !tickEvent.scheduled()) 852 tickEvent.schedule(curTick + cycles(1)); 853} 854 855//////////////////////////////////////////////////////////////////////// 856// 857// SimpleCPU Simulation Object 858// 859BEGIN_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU) 860 861 Param<Counter> max_insts_any_thread; 862 Param<Counter> max_insts_all_threads; 863 Param<Counter> max_loads_any_thread; 864 Param<Counter> max_loads_all_threads; 865 866#if FULL_SYSTEM 867 SimObjectParam<AlphaITB *> itb; 868 SimObjectParam<AlphaDTB *> dtb; 869 SimObjectParam<FunctionalMemory *> mem; 870 SimObjectParam<System *> system; 871 Param<int> cpu_id; 872 Param<Tick> profile; 873#else 874 SimObjectParam<Process *> workload; 875#endif // FULL_SYSTEM 876 877 Param<int> clock; 878 SimObjectParam<BaseMem *> icache; 879 SimObjectParam<BaseMem *> dcache; 880 881 Param<bool> defer_registration; 882 Param<int> width; 883 Param<bool> function_trace; 884 Param<Tick> function_trace_start; 885 886END_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU) 887 888BEGIN_INIT_SIM_OBJECT_PARAMS(SimpleCPU) 889 890 INIT_PARAM(max_insts_any_thread, 891 "terminate when any thread reaches this inst count"), 892 INIT_PARAM(max_insts_all_threads, 893 "terminate when all threads have reached this inst count"), 894 INIT_PARAM(max_loads_any_thread, 895 "terminate when any thread reaches this load count"), 896 INIT_PARAM(max_loads_all_threads, 897 "terminate when all threads have reached this load count"), 898 899#if FULL_SYSTEM 900 INIT_PARAM(itb, "Instruction TLB"), 901 INIT_PARAM(dtb, "Data TLB"), 902 INIT_PARAM(mem, "memory"), 903 INIT_PARAM(system, "system object"), 904 INIT_PARAM(cpu_id, "processor ID"), 905 INIT_PARAM(profile, ""), 906#else 907 INIT_PARAM(workload, "processes to run"), 908#endif // FULL_SYSTEM 909 910 INIT_PARAM(clock, "clock speed"), 911 INIT_PARAM(icache, "L1 instruction cache object"), 912 INIT_PARAM(dcache, "L1 data cache object"), 913 INIT_PARAM(defer_registration, "defer system registration (for sampling)"), 914 INIT_PARAM(width, "cpu width"), 915 INIT_PARAM(function_trace, "Enable function trace"), 916 INIT_PARAM(function_trace_start, "Cycle to start function trace") 917 918END_INIT_SIM_OBJECT_PARAMS(SimpleCPU) 919 920 921CREATE_SIM_OBJECT(SimpleCPU) 922{ 923 SimpleCPU::Params *params = new SimpleCPU::Params(); 924 params->name = getInstanceName(); 925 params->numberOfThreads = 1; 926 params->max_insts_any_thread = max_insts_any_thread; 927 params->max_insts_all_threads = max_insts_all_threads; 928 params->max_loads_any_thread = max_loads_any_thread; 929 params->max_loads_all_threads = max_loads_all_threads; 930 params->deferRegistration = defer_registration; 931 params->clock = clock; 932 params->functionTrace = function_trace; 933 params->functionTraceStart = function_trace_start; 934 params->icache_interface = (icache) ? icache->getInterface() : NULL; 935 params->dcache_interface = (dcache) ? dcache->getInterface() : NULL; 936 params->width = width; 937 938#if FULL_SYSTEM 939 params->itb = itb; 940 params->dtb = dtb; 941 params->mem = mem; 942 params->system = system; 943 params->cpu_id = cpu_id; 944 params->profile = profile; 945#else 946 params->process = workload; 947#endif 948 949 SimpleCPU *cpu = new SimpleCPU(params); 950 return cpu; 951} 952 953REGISTER_SIM_OBJECT("SimpleCPU", SimpleCPU) 954 955