atomic.cc revision 10030:b531e328342d
12650Ssaidi@eecs.umich.edu/* 22650Ssaidi@eecs.umich.edu * Copyright (c) 2012-2013 ARM Limited 32650Ssaidi@eecs.umich.edu * All rights reserved. 42650Ssaidi@eecs.umich.edu * 52650Ssaidi@eecs.umich.edu * The license below extends only to copyright in the software and shall 62650Ssaidi@eecs.umich.edu * not be construed as granting a license to any other intellectual 72650Ssaidi@eecs.umich.edu * property including but not limited to intellectual property relating 82650Ssaidi@eecs.umich.edu * to a hardware implementation of the functionality of the software 92650Ssaidi@eecs.umich.edu * licensed hereunder. You may use the software subject to the license 102650Ssaidi@eecs.umich.edu * terms below provided that you ensure that this notice is replicated 112650Ssaidi@eecs.umich.edu * unmodified and in its entirety in all distributions of the software, 122650Ssaidi@eecs.umich.edu * modified or unmodified, in source code or in binary form. 132650Ssaidi@eecs.umich.edu * 142650Ssaidi@eecs.umich.edu * Copyright (c) 2002-2005 The Regents of The University of Michigan 152650Ssaidi@eecs.umich.edu * All rights reserved. 162650Ssaidi@eecs.umich.edu * 172650Ssaidi@eecs.umich.edu * Redistribution and use in source and binary forms, with or without 182650Ssaidi@eecs.umich.edu * modification, are permitted provided that the following conditions are 192650Ssaidi@eecs.umich.edu * met: redistributions of source code must retain the above copyright 202650Ssaidi@eecs.umich.edu * notice, this list of conditions and the following disclaimer; 212650Ssaidi@eecs.umich.edu * redistributions in binary form must reproduce the above copyright 222650Ssaidi@eecs.umich.edu * notice, this list of conditions and the following disclaimer in the 232650Ssaidi@eecs.umich.edu * documentation and/or other materials provided with the distribution; 242650Ssaidi@eecs.umich.edu * neither the name of the copyright holders nor the names of its 252650Ssaidi@eecs.umich.edu * contributors may be used to endorse or promote products derived from 262650Ssaidi@eecs.umich.edu * this software without specific prior written permission. 272665Ssaidi@eecs.umich.edu * 284070Ssaidi@eecs.umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 292650Ssaidi@eecs.umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 302650Ssaidi@eecs.umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 312650Ssaidi@eecs.umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 322650Ssaidi@eecs.umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 338229Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 343529Sgblack@eecs.umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 358229Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 364070Ssaidi@eecs.umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 372650Ssaidi@eecs.umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 382680Sktlim@umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 392650Ssaidi@eecs.umich.edu * 402650Ssaidi@eecs.umich.edu * Authors: Steve Reinhardt 412650Ssaidi@eecs.umich.edu */ 422650Ssaidi@eecs.umich.edu 435560Snate@binkert.org#include "arch/locked_mem.hh" 445560Snate@binkert.org#include "arch/mmapped_ipr.hh" 455560Snate@binkert.org#include "arch/utility.hh" 465560Snate@binkert.org#include "base/bigint.hh" 472650Ssaidi@eecs.umich.edu#include "base/output.hh" 485560Snate@binkert.org#include "config/the_isa.hh" 495560Snate@binkert.org#include "cpu/simple/atomic.hh" 505560Snate@binkert.org#include "cpu/exetrace.hh" 515560Snate@binkert.org#include "debug/Drain.hh" 525560Snate@binkert.org#include "debug/ExecFaulting.hh" 534070Ssaidi@eecs.umich.edu#include "debug/SimpleCPU.hh" 545560Snate@binkert.org#include "mem/packet.hh" 555560Snate@binkert.org#include "mem/packet_access.hh" 565560Snate@binkert.org#include "mem/physical.hh" 575560Snate@binkert.org#include "params/AtomicSimpleCPU.hh" 585560Snate@binkert.org#include "sim/faults.hh" 595560Snate@binkert.org#include "sim/system.hh" 605560Snate@binkert.org#include "sim/full_system.hh" 615560Snate@binkert.org 625560Snate@binkert.orgusing namespace std; 635560Snate@binkert.orgusing namespace TheISA; 645560Snate@binkert.org 655560Snate@binkert.orgAtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c) 665560Snate@binkert.org : Event(CPU_Tick_Pri), cpu(c) 675560Snate@binkert.org{ 685560Snate@binkert.org} 695560Snate@binkert.org 705560Snate@binkert.org 715560Snate@binkert.orgvoid 725560Snate@binkert.orgAtomicSimpleCPU::TickEvent::process() 735560Snate@binkert.org{ 747741Sgblack@eecs.umich.edu cpu->tick(); 755560Snate@binkert.org} 765560Snate@binkert.org 775560Snate@binkert.orgconst char * 785560Snate@binkert.orgAtomicSimpleCPU::TickEvent::description() const 795560Snate@binkert.org{ 805560Snate@binkert.org return "AtomicSimpleCPU tick"; 817741Sgblack@eecs.umich.edu} 825560Snate@binkert.org 835560Snate@binkert.orgvoid 846022Sgblack@eecs.umich.eduAtomicSimpleCPU::init() 856022Sgblack@eecs.umich.edu{ 865560Snate@binkert.org BaseCPU::init(); 875560Snate@binkert.org 885560Snate@binkert.org // Initialise the ThreadContext's memory proxies 895560Snate@binkert.org tcBase()->initMemProxies(tcBase()); 905560Snate@binkert.org 915560Snate@binkert.org if (FullSystem && !params()->switched_out) { 925560Snate@binkert.org ThreadID size = threadContexts.size(); 935560Snate@binkert.org for (ThreadID i = 0; i < size; ++i) { 945560Snate@binkert.org ThreadContext *tc = threadContexts[i]; 955560Snate@binkert.org // initialize CPU, including PC 965560Snate@binkert.org TheISA::initCPU(tc, tc->contextId()); 975560Snate@binkert.org } 985560Snate@binkert.org } 995560Snate@binkert.org 1005560Snate@binkert.org // Atomic doesn't do MT right now, so contextId == threadId 1015560Snate@binkert.org ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 1025560Snate@binkert.org data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1035560Snate@binkert.org data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 1045560Snate@binkert.org} 1055560Snate@binkert.org 1065560Snate@binkert.orgAtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p) 1075560Snate@binkert.org : BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false), 1085560Snate@binkert.org simulate_data_stalls(p->simulate_data_stalls), 1095560Snate@binkert.org simulate_inst_stalls(p->simulate_inst_stalls), 1105560Snate@binkert.org drain_manager(NULL), 1115560Snate@binkert.org icachePort(name() + ".icache_port", this), 1125560Snate@binkert.org dcachePort(name() + ".dcache_port", this), 1135560Snate@binkert.org fastmem(p->fastmem), 1145560Snate@binkert.org simpoint(p->simpoint_profile), 1155560Snate@binkert.org intervalSize(p->simpoint_interval), 1165560Snate@binkert.org intervalCount(0), 1175560Snate@binkert.org intervalDrift(0), 1185560Snate@binkert.org simpointStream(NULL), 1195560Snate@binkert.org currentBBV(0, 0), 1205560Snate@binkert.org currentBBVInstCount(0) 1212650Ssaidi@eecs.umich.edu{ 1225560Snate@binkert.org _status = Idle; 1232650Ssaidi@eecs.umich.edu 1245560Snate@binkert.org if (simpoint) { 1255560Snate@binkert.org simpointStream = simout.create(p->simpoint_profile_file, false); 1265560Snate@binkert.org } 1275560Snate@binkert.org} 1285560Snate@binkert.org 1295560Snate@binkert.org 1305560Snate@binkert.orgAtomicSimpleCPU::~AtomicSimpleCPU() 1312650Ssaidi@eecs.umich.edu{ 1327811Ssteve.reinhardt@amd.com if (tickEvent.scheduled()) { 133 deschedule(tickEvent); 134 } 135 if (simpointStream) { 136 simout.close(simpointStream); 137 } 138} 139 140unsigned int 141AtomicSimpleCPU::drain(DrainManager *dm) 142{ 143 assert(!drain_manager); 144 if (switchedOut()) 145 return 0; 146 147 if (!isDrained()) { 148 DPRINTF(Drain, "Requesting drain: %s\n", pcState()); 149 drain_manager = dm; 150 return 1; 151 } else { 152 if (tickEvent.scheduled()) 153 deschedule(tickEvent); 154 155 DPRINTF(Drain, "Not executing microcode, no need to drain.\n"); 156 return 0; 157 } 158} 159 160void 161AtomicSimpleCPU::drainResume() 162{ 163 assert(!tickEvent.scheduled()); 164 assert(!drain_manager); 165 if (switchedOut()) 166 return; 167 168 DPRINTF(SimpleCPU, "Resume\n"); 169 verifyMemoryMode(); 170 171 assert(!threadContexts.empty()); 172 if (threadContexts.size() > 1) 173 fatal("The atomic CPU only supports one thread.\n"); 174 175 if (thread->status() == ThreadContext::Active) { 176 schedule(tickEvent, nextCycle()); 177 _status = BaseSimpleCPU::Running; 178 notIdleFraction = 1; 179 } else { 180 _status = BaseSimpleCPU::Idle; 181 notIdleFraction = 0; 182 } 183 184 system->totalNumInsts = 0; 185} 186 187bool 188AtomicSimpleCPU::tryCompleteDrain() 189{ 190 if (!drain_manager) 191 return false; 192 193 DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState()); 194 if (!isDrained()) 195 return false; 196 197 DPRINTF(Drain, "CPU done draining, processing drain event\n"); 198 drain_manager->signalDrainDone(); 199 drain_manager = NULL; 200 201 return true; 202} 203 204 205void 206AtomicSimpleCPU::switchOut() 207{ 208 BaseSimpleCPU::switchOut(); 209 210 assert(!tickEvent.scheduled()); 211 assert(_status == BaseSimpleCPU::Running || _status == Idle); 212 assert(isDrained()); 213} 214 215 216void 217AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU) 218{ 219 BaseSimpleCPU::takeOverFrom(oldCPU); 220 221 // The tick event should have been descheduled by drain() 222 assert(!tickEvent.scheduled()); 223 224 ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT 225 data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too 226 data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too 227} 228 229void 230AtomicSimpleCPU::verifyMemoryMode() const 231{ 232 if (!system->isAtomicMode()) { 233 fatal("The atomic CPU requires the memory system to be in " 234 "'atomic' mode.\n"); 235 } 236} 237 238void 239AtomicSimpleCPU::activateContext(ThreadID thread_num, Cycles delay) 240{ 241 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); 242 243 assert(thread_num == 0); 244 assert(thread); 245 246 assert(_status == Idle); 247 assert(!tickEvent.scheduled()); 248 249 notIdleFraction = 1; 250 numCycles += ticksToCycles(thread->lastActivate - thread->lastSuspend); 251 252 //Make sure ticks are still on multiples of cycles 253 schedule(tickEvent, clockEdge(delay)); 254 _status = BaseSimpleCPU::Running; 255} 256 257 258void 259AtomicSimpleCPU::suspendContext(ThreadID thread_num) 260{ 261 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num); 262 263 assert(thread_num == 0); 264 assert(thread); 265 266 if (_status == Idle) 267 return; 268 269 assert(_status == BaseSimpleCPU::Running); 270 271 // tick event may not be scheduled if this gets called from inside 272 // an instruction's execution, e.g. "quiesce" 273 if (tickEvent.scheduled()) 274 deschedule(tickEvent); 275 276 notIdleFraction = 0; 277 _status = Idle; 278} 279 280 281Tick 282AtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt) 283{ 284 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 285 pkt->cmdString()); 286 287 // if snoop invalidates, release any associated locks 288 if (pkt->isInvalidate()) { 289 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 290 pkt->getAddr()); 291 TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask); 292 } 293 294 return 0; 295} 296 297void 298AtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt) 299{ 300 DPRINTF(SimpleCPU, "received snoop pkt for addr:%#x %s\n", pkt->getAddr(), 301 pkt->cmdString()); 302 303 // if snoop invalidates, release any associated locks 304 if (pkt->isInvalidate()) { 305 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n", 306 pkt->getAddr()); 307 TheISA::handleLockedSnoop(cpu->thread, pkt, cacheBlockMask); 308 } 309} 310 311Fault 312AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, 313 unsigned size, unsigned flags) 314{ 315 // use the CPU's statically allocated read request and packet objects 316 Request *req = &data_read_req; 317 318 if (traceData) { 319 traceData->setAddr(addr); 320 } 321 322 //The size of the data we're trying to read. 323 int fullSize = size; 324 325 //The address of the second part of this access if it needs to be split 326 //across a cache line boundary. 327 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 328 329 if (secondAddr > addr) 330 size = secondAddr - addr; 331 332 dcache_latency = 0; 333 334 req->taskId(taskId()); 335 while (1) { 336 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 337 338 // translate to physical address 339 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read); 340 341 // Now do the access. 342 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) { 343 Packet pkt = Packet(req, 344 req->isLLSC() ? MemCmd::LoadLockedReq : 345 MemCmd::ReadReq); 346 pkt.dataStatic(data); 347 348 if (req->isMmappedIpr()) 349 dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt); 350 else { 351 if (fastmem && system->isMemAddr(pkt.getAddr())) 352 system->getPhysMem().access(&pkt); 353 else 354 dcache_latency += dcachePort.sendAtomic(&pkt); 355 } 356 dcache_access = true; 357 358 assert(!pkt.isError()); 359 360 if (req->isLLSC()) { 361 TheISA::handleLockedRead(thread, req); 362 } 363 } 364 365 //If there's a fault, return it 366 if (fault != NoFault) { 367 if (req->isPrefetch()) { 368 return NoFault; 369 } else { 370 return fault; 371 } 372 } 373 374 //If we don't need to access a second cache line, stop now. 375 if (secondAddr <= addr) 376 { 377 if (req->isLocked() && fault == NoFault) { 378 assert(!locked); 379 locked = true; 380 } 381 return fault; 382 } 383 384 /* 385 * Set up for accessing the second cache line. 386 */ 387 388 //Move the pointer we're reading into to the correct location. 389 data += size; 390 //Adjust the size to get the remaining bytes. 391 size = addr + fullSize - secondAddr; 392 //And access the right address. 393 addr = secondAddr; 394 } 395} 396 397 398Fault 399AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, 400 Addr addr, unsigned flags, uint64_t *res) 401{ 402 // use the CPU's statically allocated write request and packet objects 403 Request *req = &data_write_req; 404 405 if (traceData) { 406 traceData->setAddr(addr); 407 } 408 409 //The size of the data we're trying to read. 410 int fullSize = size; 411 412 //The address of the second part of this access if it needs to be split 413 //across a cache line boundary. 414 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize()); 415 416 if(secondAddr > addr) 417 size = secondAddr - addr; 418 419 dcache_latency = 0; 420 421 req->taskId(taskId()); 422 while(1) { 423 req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr()); 424 425 // translate to physical address 426 Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write); 427 428 // Now do the access. 429 if (fault == NoFault) { 430 MemCmd cmd = MemCmd::WriteReq; // default 431 bool do_access = true; // flag to suppress cache access 432 433 if (req->isLLSC()) { 434 cmd = MemCmd::StoreCondReq; 435 do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask); 436 } else if (req->isSwap()) { 437 cmd = MemCmd::SwapReq; 438 if (req->isCondSwap()) { 439 assert(res); 440 req->setExtraData(*res); 441 } 442 } 443 444 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) { 445 Packet pkt = Packet(req, cmd); 446 pkt.dataStatic(data); 447 448 if (req->isMmappedIpr()) { 449 dcache_latency += 450 TheISA::handleIprWrite(thread->getTC(), &pkt); 451 } else { 452 if (fastmem && system->isMemAddr(pkt.getAddr())) 453 system->getPhysMem().access(&pkt); 454 else 455 dcache_latency += dcachePort.sendAtomic(&pkt); 456 } 457 dcache_access = true; 458 assert(!pkt.isError()); 459 460 if (req->isSwap()) { 461 assert(res); 462 memcpy(res, pkt.getPtr<uint8_t>(), fullSize); 463 } 464 } 465 466 if (res && !req->isSwap()) { 467 *res = req->getExtraData(); 468 } 469 } 470 471 //If there's a fault or we don't need to access a second cache line, 472 //stop now. 473 if (fault != NoFault || secondAddr <= addr) 474 { 475 if (req->isLocked() && fault == NoFault) { 476 assert(locked); 477 locked = false; 478 } 479 if (fault != NoFault && req->isPrefetch()) { 480 return NoFault; 481 } else { 482 return fault; 483 } 484 } 485 486 /* 487 * Set up for accessing the second cache line. 488 */ 489 490 //Move the pointer we're reading into to the correct location. 491 data += size; 492 //Adjust the size to get the remaining bytes. 493 size = addr + fullSize - secondAddr; 494 //And access the right address. 495 addr = secondAddr; 496 } 497} 498 499 500void 501AtomicSimpleCPU::tick() 502{ 503 DPRINTF(SimpleCPU, "Tick\n"); 504 505 Tick latency = 0; 506 507 for (int i = 0; i < width || locked; ++i) { 508 numCycles++; 509 510 if (!curStaticInst || !curStaticInst->isDelayedCommit()) 511 checkForInterrupts(); 512 513 checkPcEventQueue(); 514 // We must have just got suspended by a PC event 515 if (_status == Idle) { 516 tryCompleteDrain(); 517 return; 518 } 519 520 Fault fault = NoFault; 521 522 TheISA::PCState pcState = thread->pcState(); 523 524 bool needToFetch = !isRomMicroPC(pcState.microPC()) && 525 !curMacroStaticInst; 526 if (needToFetch) { 527 ifetch_req.taskId(taskId()); 528 setupFetchRequest(&ifetch_req); 529 fault = thread->itb->translateAtomic(&ifetch_req, tc, 530 BaseTLB::Execute); 531 } 532 533 if (fault == NoFault) { 534 Tick icache_latency = 0; 535 bool icache_access = false; 536 dcache_access = false; // assume no dcache access 537 538 if (needToFetch) { 539 // This is commented out because the decoder would act like 540 // a tiny cache otherwise. It wouldn't be flushed when needed 541 // like the I cache. It should be flushed, and when that works 542 // this code should be uncommented. 543 //Fetch more instruction memory if necessary 544 //if(decoder.needMoreBytes()) 545 //{ 546 icache_access = true; 547 Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq); 548 ifetch_pkt.dataStatic(&inst); 549 550 if (fastmem && system->isMemAddr(ifetch_pkt.getAddr())) 551 system->getPhysMem().access(&ifetch_pkt); 552 else 553 icache_latency = icachePort.sendAtomic(&ifetch_pkt); 554 555 assert(!ifetch_pkt.isError()); 556 557 // ifetch_req is initialized to read the instruction directly 558 // into the CPU object's inst field. 559 //} 560 } 561 562 preExecute(); 563 564 if (curStaticInst) { 565 fault = curStaticInst->execute(this, traceData); 566 567 // keep an instruction count 568 if (fault == NoFault) 569 countInst(); 570 else if (traceData && !DTRACE(ExecFaulting)) { 571 delete traceData; 572 traceData = NULL; 573 } 574 575 postExecute(); 576 } 577 578 // @todo remove me after debugging with legion done 579 if (curStaticInst && (!curStaticInst->isMicroop() || 580 curStaticInst->isFirstMicroop())) 581 instCnt++; 582 583 // profile for SimPoints if enabled and macro inst is finished 584 if (simpoint && curStaticInst && (fault == NoFault) && 585 (!curStaticInst->isMicroop() || 586 curStaticInst->isLastMicroop())) { 587 profileSimPoint(); 588 } 589 590 Tick stall_ticks = 0; 591 if (simulate_inst_stalls && icache_access) 592 stall_ticks += icache_latency; 593 594 if (simulate_data_stalls && dcache_access) 595 stall_ticks += dcache_latency; 596 597 if (stall_ticks) { 598 // the atomic cpu does its accounting in ticks, so 599 // keep counting in ticks but round to the clock 600 // period 601 latency += divCeil(stall_ticks, clockPeriod()) * 602 clockPeriod(); 603 } 604 605 } 606 if(fault != NoFault || !stayAtPC) 607 advancePC(fault); 608 } 609 610 if (tryCompleteDrain()) 611 return; 612 613 // instruction takes at least one cycle 614 if (latency < clockPeriod()) 615 latency = clockPeriod(); 616 617 if (_status != Idle) 618 schedule(tickEvent, curTick() + latency); 619} 620 621 622void 623AtomicSimpleCPU::printAddr(Addr a) 624{ 625 dcachePort.printAddr(a); 626} 627 628void 629AtomicSimpleCPU::profileSimPoint() 630{ 631 if (!currentBBVInstCount) 632 currentBBV.first = thread->pcState().instAddr(); 633 634 ++intervalCount; 635 ++currentBBVInstCount; 636 637 // If inst is control inst, assume end of basic block. 638 if (curStaticInst->isControl()) { 639 currentBBV.second = thread->pcState().instAddr(); 640 641 auto map_itr = bbMap.find(currentBBV); 642 if (map_itr == bbMap.end()){ 643 // If a new (previously unseen) basic block is found, 644 // add a new unique id, record num of insts and insert into bbMap. 645 BBInfo info; 646 info.id = bbMap.size() + 1; 647 info.insts = currentBBVInstCount; 648 info.count = currentBBVInstCount; 649 bbMap.insert(std::make_pair(currentBBV, info)); 650 } else { 651 // If basic block is seen before, just increment the count by the 652 // number of insts in basic block. 653 BBInfo& info = map_itr->second; 654 info.count += currentBBVInstCount; 655 } 656 currentBBVInstCount = 0; 657 658 // Reached end of interval if the sum of the current inst count 659 // (intervalCount) and the excessive inst count from the previous 660 // interval (intervalDrift) is greater than/equal to the interval size. 661 if (intervalCount + intervalDrift >= intervalSize) { 662 // summarize interval and display BBV info 663 std::vector<pair<uint64_t, uint64_t> > counts; 664 for (auto map_itr = bbMap.begin(); map_itr != bbMap.end(); 665 ++map_itr) { 666 BBInfo& info = map_itr->second; 667 if (info.count != 0) { 668 counts.push_back(std::make_pair(info.id, info.count)); 669 info.count = 0; 670 } 671 } 672 std::sort(counts.begin(), counts.end()); 673 674 // Print output BBV info 675 *simpointStream << "T"; 676 for (auto cnt_itr = counts.begin(); cnt_itr != counts.end(); 677 ++cnt_itr) { 678 *simpointStream << ":" << cnt_itr->first 679 << ":" << cnt_itr->second << " "; 680 } 681 *simpointStream << "\n"; 682 683 intervalDrift = (intervalCount + intervalDrift) - intervalSize; 684 intervalCount = 0; 685 } 686 } 687} 688 689//////////////////////////////////////////////////////////////////////// 690// 691// AtomicSimpleCPU Simulation Object 692// 693AtomicSimpleCPU * 694AtomicSimpleCPUParams::create() 695{ 696 numThreads = 1; 697 if (!FullSystem && workload.size() != 1) 698 panic("only one workload allowed"); 699 return new AtomicSimpleCPU(this); 700} 701