base.cc revision 10098
1/* 2 * Copyright (c) 2012 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions are 16 * met: redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer; 18 * redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution; 21 * neither the name of the copyright holders nor the names of its 22 * contributors may be used to endorse or promote products derived from 23 * this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Authors: Andreas Sandberg 38 */ 39 40#include <linux/kvm.h> 41#include <sys/ioctl.h> 42#include <sys/mman.h> 43#include <unistd.h> 44 45#include <cerrno> 46#include <csignal> 47#include <ostream> 48 49#include "arch/mmapped_ipr.hh" 50#include "arch/utility.hh" 51#include "cpu/kvm/base.hh" 52#include "debug/Checkpoint.hh" 53#include "debug/Drain.hh" 54#include "debug/Kvm.hh" 55#include "debug/KvmIO.hh" 56#include "debug/KvmRun.hh" 57#include "params/BaseKvmCPU.hh" 58#include "sim/process.hh" 59#include "sim/system.hh" 60 61#include <signal.h> 62 63/* Used by some KVM macros */ 64#define PAGE_SIZE pageSize 65 66static volatile __thread bool timerOverflowed = false; 67 68BaseKvmCPU::BaseKvmCPU(BaseKvmCPUParams *params) 69 : BaseCPU(params), 70 vm(*params->kvmVM), 71 _status(Idle), 72 dataPort(name() + ".dcache_port", this), 73 instPort(name() + ".icache_port", this), 74 threadContextDirty(true), 75 kvmStateDirty(false), 76 vcpuID(vm.allocVCPUID()), vcpuFD(-1), vcpuMMapSize(0), 77 _kvmRun(NULL), mmioRing(NULL), 78 pageSize(sysconf(_SC_PAGE_SIZE)), 79 tickEvent(*this), 80 activeInstPeriod(0), 81 perfControlledByTimer(params->usePerfOverflow), 82 hostFactor(params->hostFactor), 83 drainManager(NULL), 84 ctrInsts(0) 85{ 86 if (pageSize == -1) 87 panic("KVM: Failed to determine host page size (%i)\n", 88 errno); 89 90 thread = new SimpleThread(this, 0, params->system, 91 params->itb, params->dtb, params->isa[0]); 92 thread->setStatus(ThreadContext::Halted); 93 tc = thread->getTC(); 94 threadContexts.push_back(tc); 95} 96 97BaseKvmCPU::~BaseKvmCPU() 98{ 99 if (_kvmRun) 100 munmap(_kvmRun, vcpuMMapSize); 101 close(vcpuFD); 102} 103 104void 105BaseKvmCPU::init() 106{ 107 BaseCPU::init(); 108 109 if (numThreads != 1) 110 fatal("KVM: Multithreading not supported"); 111 112 tc->initMemProxies(tc); 113 114 // initialize CPU, including PC 115 if (FullSystem && !switchedOut()) 116 TheISA::initCPU(tc, tc->contextId()); 117 118 mmio_req.setThreadContext(tc->contextId(), 0); 119} 120 121void 122BaseKvmCPU::startup() 123{ 124 const BaseKvmCPUParams * const p( 125 dynamic_cast<const BaseKvmCPUParams *>(params())); 126 127 Kvm &kvm(vm.kvm); 128 129 BaseCPU::startup(); 130 131 assert(vcpuFD == -1); 132 133 // Tell the VM that a CPU is about to start. 134 vm.cpuStartup(); 135 136 // We can't initialize KVM CPUs in BaseKvmCPU::init() since we are 137 // not guaranteed that the parent KVM VM has initialized at that 138 // point. Initialize virtual CPUs here instead. 139 vcpuFD = vm.createVCPU(vcpuID); 140 141 // Map the KVM run structure */ 142 vcpuMMapSize = kvm.getVCPUMMapSize(); 143 _kvmRun = (struct kvm_run *)mmap(0, vcpuMMapSize, 144 PROT_READ | PROT_WRITE, MAP_SHARED, 145 vcpuFD, 0); 146 if (_kvmRun == MAP_FAILED) 147 panic("KVM: Failed to map run data structure\n"); 148 149 // Setup a pointer to the MMIO ring buffer if coalesced MMIO is 150 // available. The offset into the KVM's communication page is 151 // provided by the coalesced MMIO capability. 152 int mmioOffset(kvm.capCoalescedMMIO()); 153 if (!p->useCoalescedMMIO) { 154 inform("KVM: Coalesced MMIO disabled by config.\n"); 155 } else if (mmioOffset) { 156 inform("KVM: Coalesced IO available\n"); 157 mmioRing = (struct kvm_coalesced_mmio_ring *)( 158 (char *)_kvmRun + (mmioOffset * pageSize)); 159 } else { 160 inform("KVM: Coalesced not supported by host OS\n"); 161 } 162 163 thread->startup(); 164 165 Event *startupEvent( 166 new EventWrapper<BaseKvmCPU, 167 &BaseKvmCPU::startupThread>(this, true)); 168 schedule(startupEvent, curTick()); 169} 170 171void 172BaseKvmCPU::startupThread() 173{ 174 // Do thread-specific initialization. We need to setup signal 175 // delivery for counters and timers from within the thread that 176 // will execute the event queue to ensure that signals are 177 // delivered to the right threads. 178 const BaseKvmCPUParams * const p( 179 dynamic_cast<const BaseKvmCPUParams *>(params())); 180 181 // Setup signal handlers. This has to be done after the vCPU is 182 // created since it manipulates the vCPU signal mask. 183 setupSignalHandler(); 184 185 setupCounters(); 186 187 if (p->usePerfOverflow) 188 runTimer.reset(new PerfKvmTimer(hwCycles, 189 KVM_TIMER_SIGNAL, 190 p->hostFactor, 191 p->hostFreq)); 192 else 193 runTimer.reset(new PosixKvmTimer(KVM_TIMER_SIGNAL, CLOCK_MONOTONIC, 194 p->hostFactor, 195 p->hostFreq)); 196 197} 198 199void 200BaseKvmCPU::regStats() 201{ 202 using namespace Stats; 203 204 BaseCPU::regStats(); 205 206 numInsts 207 .name(name() + ".committedInsts") 208 .desc("Number of instructions committed") 209 ; 210 211 numVMExits 212 .name(name() + ".numVMExits") 213 .desc("total number of KVM exits") 214 ; 215 216 numVMHalfEntries 217 .name(name() + ".numVMHalfEntries") 218 .desc("number of KVM entries to finalize pending operations") 219 ; 220 221 numExitSignal 222 .name(name() + ".numExitSignal") 223 .desc("exits due to signal delivery") 224 ; 225 226 numMMIO 227 .name(name() + ".numMMIO") 228 .desc("number of VM exits due to memory mapped IO") 229 ; 230 231 numCoalescedMMIO 232 .name(name() + ".numCoalescedMMIO") 233 .desc("number of coalesced memory mapped IO requests") 234 ; 235 236 numIO 237 .name(name() + ".numIO") 238 .desc("number of VM exits due to legacy IO") 239 ; 240 241 numHalt 242 .name(name() + ".numHalt") 243 .desc("number of VM exits due to wait for interrupt instructions") 244 ; 245 246 numInterrupts 247 .name(name() + ".numInterrupts") 248 .desc("number of interrupts delivered") 249 ; 250 251 numHypercalls 252 .name(name() + ".numHypercalls") 253 .desc("number of hypercalls") 254 ; 255} 256 257void 258BaseKvmCPU::serializeThread(std::ostream &os, ThreadID tid) 259{ 260 if (DTRACE(Checkpoint)) { 261 DPRINTF(Checkpoint, "KVM: Serializing thread %i:\n", tid); 262 dump(); 263 } 264 265 assert(tid == 0); 266 assert(_status == Idle); 267 thread->serialize(os); 268} 269 270void 271BaseKvmCPU::unserializeThread(Checkpoint *cp, const std::string §ion, 272 ThreadID tid) 273{ 274 DPRINTF(Checkpoint, "KVM: Unserialize thread %i:\n", tid); 275 276 assert(tid == 0); 277 assert(_status == Idle); 278 thread->unserialize(cp, section); 279 threadContextDirty = true; 280} 281 282unsigned int 283BaseKvmCPU::drain(DrainManager *dm) 284{ 285 if (switchedOut()) 286 return 0; 287 288 DPRINTF(Drain, "BaseKvmCPU::drain\n"); 289 switch (_status) { 290 case Running: 291 // The base KVM code is normally ready when it is in the 292 // Running state, but the architecture specific code might be 293 // of a different opinion. This may happen when the CPU been 294 // notified of an event that hasn't been accepted by the vCPU 295 // yet. 296 if (!archIsDrained()) { 297 drainManager = dm; 298 return 1; 299 } 300 301 // The state of the CPU is consistent, so we don't need to do 302 // anything special to drain it. We simply de-schedule the 303 // tick event and enter the Idle state to prevent nasty things 304 // like MMIOs from happening. 305 if (tickEvent.scheduled()) 306 deschedule(tickEvent); 307 _status = Idle; 308 309 /** FALLTHROUGH */ 310 case Idle: 311 // Idle, no need to drain 312 assert(!tickEvent.scheduled()); 313 314 // Sync the thread context here since we'll need it when we 315 // switch CPUs or checkpoint the CPU. 316 syncThreadContext(); 317 318 return 0; 319 320 case RunningServiceCompletion: 321 // The CPU has just requested a service that was handled in 322 // the RunningService state, but the results have still not 323 // been reported to the CPU. Now, we /could/ probably just 324 // update the register state ourselves instead of letting KVM 325 // handle it, but that would be tricky. Instead, we enter KVM 326 // and let it do its stuff. 327 drainManager = dm; 328 329 DPRINTF(Drain, "KVM CPU is waiting for service completion, " 330 "requesting drain.\n"); 331 return 1; 332 333 case RunningService: 334 // We need to drain since the CPU is waiting for service (e.g., MMIOs) 335 drainManager = dm; 336 337 DPRINTF(Drain, "KVM CPU is waiting for service, requesting drain.\n"); 338 return 1; 339 340 default: 341 panic("KVM: Unhandled CPU state in drain()\n"); 342 return 0; 343 } 344} 345 346void 347BaseKvmCPU::drainResume() 348{ 349 assert(!tickEvent.scheduled()); 350 351 // We might have been switched out. In that case, we don't need to 352 // do anything. 353 if (switchedOut()) 354 return; 355 356 DPRINTF(Kvm, "drainResume\n"); 357 verifyMemoryMode(); 358 359 // The tick event is de-scheduled as a part of the draining 360 // process. Re-schedule it if the thread context is active. 361 if (tc->status() == ThreadContext::Active) { 362 schedule(tickEvent, nextCycle()); 363 _status = Running; 364 } else { 365 _status = Idle; 366 } 367} 368 369void 370BaseKvmCPU::switchOut() 371{ 372 DPRINTF(Kvm, "switchOut\n"); 373 374 BaseCPU::switchOut(); 375 376 // We should have drained prior to executing a switchOut, which 377 // means that the tick event shouldn't be scheduled and the CPU is 378 // idle. 379 assert(!tickEvent.scheduled()); 380 assert(_status == Idle); 381} 382 383void 384BaseKvmCPU::takeOverFrom(BaseCPU *cpu) 385{ 386 DPRINTF(Kvm, "takeOverFrom\n"); 387 388 BaseCPU::takeOverFrom(cpu); 389 390 // We should have drained prior to executing a switchOut, which 391 // means that the tick event shouldn't be scheduled and the CPU is 392 // idle. 393 assert(!tickEvent.scheduled()); 394 assert(_status == Idle); 395 assert(threadContexts.size() == 1); 396 397 // Force an update of the KVM state here instead of flagging the 398 // TC as dirty. This is not ideal from a performance point of 399 // view, but it makes debugging easier as it allows meaningful KVM 400 // state to be dumped before and after a takeover. 401 updateKvmState(); 402 threadContextDirty = false; 403} 404 405void 406BaseKvmCPU::verifyMemoryMode() const 407{ 408 if (!(system->isAtomicMode() && system->bypassCaches())) { 409 fatal("The KVM-based CPUs requires the memory system to be in the " 410 "'atomic_noncaching' mode.\n"); 411 } 412} 413 414void 415BaseKvmCPU::wakeup() 416{ 417 DPRINTF(Kvm, "wakeup()\n"); 418 419 if (thread->status() != ThreadContext::Suspended) 420 return; 421 422 thread->activate(); 423} 424 425void 426BaseKvmCPU::activateContext(ThreadID thread_num, Cycles delay) 427{ 428 DPRINTF(Kvm, "ActivateContext %d (%d cycles)\n", thread_num, delay); 429 430 assert(thread_num == 0); 431 assert(thread); 432 433 assert(_status == Idle); 434 assert(!tickEvent.scheduled()); 435 436 numCycles += ticksToCycles(thread->lastActivate - thread->lastSuspend); 437 438 schedule(tickEvent, clockEdge(delay)); 439 _status = Running; 440} 441 442 443void 444BaseKvmCPU::suspendContext(ThreadID thread_num) 445{ 446 DPRINTF(Kvm, "SuspendContext %d\n", thread_num); 447 448 assert(thread_num == 0); 449 assert(thread); 450 451 if (_status == Idle) 452 return; 453 454 assert(_status == Running); 455 456 // The tick event may no be scheduled if the quest has requested 457 // the monitor to wait for interrupts. The normal CPU models can 458 // get their tick events descheduled by quiesce instructions, but 459 // that can't happen here. 460 if (tickEvent.scheduled()) 461 deschedule(tickEvent); 462 463 _status = Idle; 464} 465 466void 467BaseKvmCPU::deallocateContext(ThreadID thread_num) 468{ 469 // for now, these are equivalent 470 suspendContext(thread_num); 471} 472 473void 474BaseKvmCPU::haltContext(ThreadID thread_num) 475{ 476 // for now, these are equivalent 477 suspendContext(thread_num); 478} 479 480ThreadContext * 481BaseKvmCPU::getContext(int tn) 482{ 483 assert(tn == 0); 484 syncThreadContext(); 485 return tc; 486} 487 488 489Counter 490BaseKvmCPU::totalInsts() const 491{ 492 return ctrInsts; 493} 494 495Counter 496BaseKvmCPU::totalOps() const 497{ 498 hack_once("Pretending totalOps is equivalent to totalInsts()\n"); 499 return ctrInsts; 500} 501 502void 503BaseKvmCPU::dump() 504{ 505 inform("State dumping not implemented."); 506} 507 508void 509BaseKvmCPU::tick() 510{ 511 Tick delay(0); 512 assert(_status != Idle); 513 514 switch (_status) { 515 case RunningService: 516 // handleKvmExit() will determine the next state of the CPU 517 delay = handleKvmExit(); 518 519 if (tryDrain()) 520 _status = Idle; 521 break; 522 523 case RunningServiceCompletion: 524 case Running: { 525 EventQueue *q = curEventQueue(); 526 Tick ticksToExecute(q->nextTick() - curTick()); 527 528 // We might need to update the KVM state. 529 syncKvmState(); 530 531 // Setup any pending instruction count breakpoints using 532 // PerfEvent. 533 setupInstStop(); 534 535 DPRINTF(KvmRun, "Entering KVM...\n"); 536 if (drainManager) { 537 // Force an immediate exit from KVM after completing 538 // pending operations. The architecture-specific code 539 // takes care to run until it is in a state where it can 540 // safely be drained. 541 delay = kvmRunDrain(); 542 } else { 543 delay = kvmRun(ticksToExecute); 544 } 545 546 // Entering into KVM implies that we'll have to reload the thread 547 // context from KVM if we want to access it. Flag the KVM state as 548 // dirty with respect to the cached thread context. 549 kvmStateDirty = true; 550 551 // Enter into the RunningService state unless the 552 // simulation was stopped by a timer. 553 if (_kvmRun->exit_reason != KVM_EXIT_INTR) { 554 _status = RunningService; 555 } else { 556 ++numExitSignal; 557 _status = Running; 558 } 559 560 // Service any pending instruction events. The vCPU should 561 // have exited in time for the event using the instruction 562 // counter configured by setupInstStop(). 563 comInstEventQueue[0]->serviceEvents(ctrInsts); 564 system->instEventQueue.serviceEvents(system->totalNumInsts); 565 566 if (tryDrain()) 567 _status = Idle; 568 } break; 569 570 default: 571 panic("BaseKvmCPU entered tick() in an illegal state (%i)\n", 572 _status); 573 } 574 575 // Schedule a new tick if we are still running 576 if (_status != Idle) 577 schedule(tickEvent, clockEdge(ticksToCycles(delay))); 578} 579 580Tick 581BaseKvmCPU::kvmRunDrain() 582{ 583 // By default, the only thing we need to drain is a pending IO 584 // operation which assumes that we are in the 585 // RunningServiceCompletion state. 586 assert(_status == RunningServiceCompletion); 587 588 // Deliver the data from the pending IO operation and immediately 589 // exit. 590 return kvmRun(0); 591} 592 593uint64_t 594BaseKvmCPU::getHostCycles() const 595{ 596 return hwCycles.read(); 597} 598 599Tick 600BaseKvmCPU::kvmRun(Tick ticks) 601{ 602 Tick ticksExecuted; 603 DPRINTF(KvmRun, "KVM: Executing for %i ticks\n", ticks); 604 timerOverflowed = false; 605 606 if (ticks == 0) { 607 // Settings ticks == 0 is a special case which causes an entry 608 // into KVM that finishes pending operations (e.g., IO) and 609 // then immediately exits. 610 DPRINTF(KvmRun, "KVM: Delivering IO without full guest entry\n"); 611 612 ++numVMHalfEntries; 613 614 // This signal is always masked while we are executing in gem5 615 // and gets unmasked temporarily as soon as we enter into 616 // KVM. See setSignalMask() and setupSignalHandler(). 617 raise(KVM_TIMER_SIGNAL); 618 619 // Enter into KVM. KVM will check for signals after completing 620 // pending operations (IO). Since the KVM_TIMER_SIGNAL is 621 // pending, this forces an immediate exit into gem5 again. We 622 // don't bother to setup timers since this shouldn't actually 623 // execute any code in the guest. 624 ioctlRun(); 625 626 // We always execute at least one cycle to prevent the 627 // BaseKvmCPU::tick() to be rescheduled on the same tick 628 // twice. 629 ticksExecuted = clockPeriod(); 630 } else { 631 if (ticks < runTimer->resolution()) { 632 DPRINTF(KvmRun, "KVM: Adjusting tick count (%i -> %i)\n", 633 ticks, runTimer->resolution()); 634 ticks = runTimer->resolution(); 635 } 636 637 // Get hardware statistics after synchronizing contexts. The KVM 638 // state update might affect guest cycle counters. 639 uint64_t baseCycles(getHostCycles()); 640 uint64_t baseInstrs(hwInstructions.read()); 641 642 // Arm the run timer and start the cycle timer if it isn't 643 // controlled by the overflow timer. Starting/stopping the cycle 644 // timer automatically starts the other perf timers as they are in 645 // the same counter group. 646 runTimer->arm(ticks); 647 if (!perfControlledByTimer) 648 hwCycles.start(); 649 650 ioctlRun(); 651 652 runTimer->disarm(); 653 if (!perfControlledByTimer) 654 hwCycles.stop(); 655 656 // The timer signal may have been delivered after we exited 657 // from KVM. It will be pending in that case since it is 658 // masked when we aren't executing in KVM. Discard it to make 659 // sure we don't deliver it immediately next time we try to 660 // enter into KVM. 661 discardPendingSignal(KVM_TIMER_SIGNAL); 662 discardPendingSignal(KVM_INST_SIGNAL); 663 664 const uint64_t hostCyclesExecuted(getHostCycles() - baseCycles); 665 const uint64_t simCyclesExecuted(hostCyclesExecuted * hostFactor); 666 const uint64_t instsExecuted(hwInstructions.read() - baseInstrs); 667 ticksExecuted = runTimer->ticksFromHostCycles(hostCyclesExecuted); 668 669 if (ticksExecuted < ticks && 670 timerOverflowed && 671 _kvmRun->exit_reason == KVM_EXIT_INTR) { 672 // TODO: We should probably do something clever here... 673 warn("KVM: Early timer event, requested %i ticks but got %i ticks.\n", 674 ticks, ticksExecuted); 675 } 676 677 /* Update statistics */ 678 numCycles += simCyclesExecuted;; 679 numInsts += instsExecuted; 680 ctrInsts += instsExecuted; 681 system->totalNumInsts += instsExecuted; 682 683 DPRINTF(KvmRun, 684 "KVM: Executed %i instructions in %i cycles " 685 "(%i ticks, sim cycles: %i).\n", 686 instsExecuted, hostCyclesExecuted, ticksExecuted, simCyclesExecuted); 687 } 688 689 ++numVMExits; 690 691 return ticksExecuted + flushCoalescedMMIO(); 692} 693 694void 695BaseKvmCPU::kvmNonMaskableInterrupt() 696{ 697 ++numInterrupts; 698 if (ioctl(KVM_NMI) == -1) 699 panic("KVM: Failed to deliver NMI to virtual CPU\n"); 700} 701 702void 703BaseKvmCPU::kvmInterrupt(const struct kvm_interrupt &interrupt) 704{ 705 ++numInterrupts; 706 if (ioctl(KVM_INTERRUPT, (void *)&interrupt) == -1) 707 panic("KVM: Failed to deliver interrupt to virtual CPU\n"); 708} 709 710void 711BaseKvmCPU::getRegisters(struct kvm_regs ®s) const 712{ 713 if (ioctl(KVM_GET_REGS, ®s) == -1) 714 panic("KVM: Failed to get guest registers\n"); 715} 716 717void 718BaseKvmCPU::setRegisters(const struct kvm_regs ®s) 719{ 720 if (ioctl(KVM_SET_REGS, (void *)®s) == -1) 721 panic("KVM: Failed to set guest registers\n"); 722} 723 724void 725BaseKvmCPU::getSpecialRegisters(struct kvm_sregs ®s) const 726{ 727 if (ioctl(KVM_GET_SREGS, ®s) == -1) 728 panic("KVM: Failed to get guest special registers\n"); 729} 730 731void 732BaseKvmCPU::setSpecialRegisters(const struct kvm_sregs ®s) 733{ 734 if (ioctl(KVM_SET_SREGS, (void *)®s) == -1) 735 panic("KVM: Failed to set guest special registers\n"); 736} 737 738void 739BaseKvmCPU::getFPUState(struct kvm_fpu &state) const 740{ 741 if (ioctl(KVM_GET_FPU, &state) == -1) 742 panic("KVM: Failed to get guest FPU state\n"); 743} 744 745void 746BaseKvmCPU::setFPUState(const struct kvm_fpu &state) 747{ 748 if (ioctl(KVM_SET_FPU, (void *)&state) == -1) 749 panic("KVM: Failed to set guest FPU state\n"); 750} 751 752 753void 754BaseKvmCPU::setOneReg(uint64_t id, const void *addr) 755{ 756#ifdef KVM_SET_ONE_REG 757 struct kvm_one_reg reg; 758 reg.id = id; 759 reg.addr = (uint64_t)addr; 760 761 if (ioctl(KVM_SET_ONE_REG, ®) == -1) { 762 panic("KVM: Failed to set register (0x%x) value (errno: %i)\n", 763 id, errno); 764 } 765#else 766 panic("KVM_SET_ONE_REG is unsupported on this platform.\n"); 767#endif 768} 769 770void 771BaseKvmCPU::getOneReg(uint64_t id, void *addr) const 772{ 773#ifdef KVM_GET_ONE_REG 774 struct kvm_one_reg reg; 775 reg.id = id; 776 reg.addr = (uint64_t)addr; 777 778 if (ioctl(KVM_GET_ONE_REG, ®) == -1) { 779 panic("KVM: Failed to get register (0x%x) value (errno: %i)\n", 780 id, errno); 781 } 782#else 783 panic("KVM_GET_ONE_REG is unsupported on this platform.\n"); 784#endif 785} 786 787std::string 788BaseKvmCPU::getAndFormatOneReg(uint64_t id) const 789{ 790#ifdef KVM_GET_ONE_REG 791 std::ostringstream ss; 792 793 ss.setf(std::ios::hex, std::ios::basefield); 794 ss.setf(std::ios::showbase); 795#define HANDLE_INTTYPE(len) \ 796 case KVM_REG_SIZE_U ## len: { \ 797 uint ## len ## _t value; \ 798 getOneReg(id, &value); \ 799 ss << value; \ 800 } break 801 802#define HANDLE_ARRAY(len) \ 803 case KVM_REG_SIZE_U ## len: { \ 804 uint8_t value[len / 8]; \ 805 getOneReg(id, value); \ 806 ss << "[" << value[0]; \ 807 for (int i = 1; i < len / 8; ++i) \ 808 ss << ", " << value[i]; \ 809 ss << "]"; \ 810 } break 811 812 switch (id & KVM_REG_SIZE_MASK) { 813 HANDLE_INTTYPE(8); 814 HANDLE_INTTYPE(16); 815 HANDLE_INTTYPE(32); 816 HANDLE_INTTYPE(64); 817 HANDLE_ARRAY(128); 818 HANDLE_ARRAY(256); 819 HANDLE_ARRAY(512); 820 HANDLE_ARRAY(1024); 821 default: 822 ss << "??"; 823 } 824 825#undef HANDLE_INTTYPE 826#undef HANDLE_ARRAY 827 828 return ss.str(); 829#else 830 panic("KVM_GET_ONE_REG is unsupported on this platform.\n"); 831#endif 832} 833 834void 835BaseKvmCPU::syncThreadContext() 836{ 837 if (!kvmStateDirty) 838 return; 839 840 assert(!threadContextDirty); 841 842 updateThreadContext(); 843 kvmStateDirty = false; 844} 845 846void 847BaseKvmCPU::syncKvmState() 848{ 849 if (!threadContextDirty) 850 return; 851 852 assert(!kvmStateDirty); 853 854 updateKvmState(); 855 threadContextDirty = false; 856} 857 858Tick 859BaseKvmCPU::handleKvmExit() 860{ 861 DPRINTF(KvmRun, "handleKvmExit (exit_reason: %i)\n", _kvmRun->exit_reason); 862 assert(_status == RunningService); 863 864 // Switch into the running state by default. Individual handlers 865 // can override this. 866 _status = Running; 867 switch (_kvmRun->exit_reason) { 868 case KVM_EXIT_UNKNOWN: 869 return handleKvmExitUnknown(); 870 871 case KVM_EXIT_EXCEPTION: 872 return handleKvmExitException(); 873 874 case KVM_EXIT_IO: 875 _status = RunningServiceCompletion; 876 ++numIO; 877 return handleKvmExitIO(); 878 879 case KVM_EXIT_HYPERCALL: 880 ++numHypercalls; 881 return handleKvmExitHypercall(); 882 883 case KVM_EXIT_HLT: 884 /* The guest has halted and is waiting for interrupts */ 885 DPRINTF(Kvm, "handleKvmExitHalt\n"); 886 ++numHalt; 887 888 // Suspend the thread until the next interrupt arrives 889 thread->suspend(); 890 891 // This is actually ignored since the thread is suspended. 892 return 0; 893 894 case KVM_EXIT_MMIO: 895 _status = RunningServiceCompletion; 896 /* Service memory mapped IO requests */ 897 DPRINTF(KvmIO, "KVM: Handling MMIO (w: %u, addr: 0x%x, len: %u)\n", 898 _kvmRun->mmio.is_write, 899 _kvmRun->mmio.phys_addr, _kvmRun->mmio.len); 900 901 ++numMMIO; 902 return doMMIOAccess(_kvmRun->mmio.phys_addr, _kvmRun->mmio.data, 903 _kvmRun->mmio.len, _kvmRun->mmio.is_write); 904 905 case KVM_EXIT_IRQ_WINDOW_OPEN: 906 return handleKvmExitIRQWindowOpen(); 907 908 case KVM_EXIT_FAIL_ENTRY: 909 return handleKvmExitFailEntry(); 910 911 case KVM_EXIT_INTR: 912 /* KVM was interrupted by a signal, restart it in the next 913 * tick. */ 914 return 0; 915 916 case KVM_EXIT_INTERNAL_ERROR: 917 panic("KVM: Internal error (suberror: %u)\n", 918 _kvmRun->internal.suberror); 919 920 default: 921 dump(); 922 panic("KVM: Unexpected exit (exit_reason: %u)\n", _kvmRun->exit_reason); 923 } 924} 925 926Tick 927BaseKvmCPU::handleKvmExitIO() 928{ 929 panic("KVM: Unhandled guest IO (dir: %i, size: %i, port: 0x%x, count: %i)\n", 930 _kvmRun->io.direction, _kvmRun->io.size, 931 _kvmRun->io.port, _kvmRun->io.count); 932} 933 934Tick 935BaseKvmCPU::handleKvmExitHypercall() 936{ 937 panic("KVM: Unhandled hypercall\n"); 938} 939 940Tick 941BaseKvmCPU::handleKvmExitIRQWindowOpen() 942{ 943 warn("KVM: Unhandled IRQ window.\n"); 944 return 0; 945} 946 947 948Tick 949BaseKvmCPU::handleKvmExitUnknown() 950{ 951 dump(); 952 panic("KVM: Unknown error when starting vCPU (hw reason: 0x%llx)\n", 953 _kvmRun->hw.hardware_exit_reason); 954} 955 956Tick 957BaseKvmCPU::handleKvmExitException() 958{ 959 dump(); 960 panic("KVM: Got exception when starting vCPU " 961 "(exception: %u, error_code: %u)\n", 962 _kvmRun->ex.exception, _kvmRun->ex.error_code); 963} 964 965Tick 966BaseKvmCPU::handleKvmExitFailEntry() 967{ 968 dump(); 969 panic("KVM: Failed to enter virtualized mode (hw reason: 0x%llx)\n", 970 _kvmRun->fail_entry.hardware_entry_failure_reason); 971} 972 973Tick 974BaseKvmCPU::doMMIOAccess(Addr paddr, void *data, int size, bool write) 975{ 976 ThreadContext *tc(thread->getTC()); 977 syncThreadContext(); 978 979 mmio_req.setPhys(paddr, size, Request::UNCACHEABLE, dataMasterId()); 980 // Some architectures do need to massage physical addresses a bit 981 // before they are inserted into the memory system. This enables 982 // APIC accesses on x86 and m5ops where supported through a MMIO 983 // interface. 984 BaseTLB::Mode tlb_mode(write ? BaseTLB::Write : BaseTLB::Read); 985 Fault fault(tc->getDTBPtr()->finalizePhysical(&mmio_req, tc, tlb_mode)); 986 if (fault != NoFault) 987 warn("Finalization of MMIO address failed: %s\n", fault->name()); 988 989 990 const MemCmd cmd(write ? MemCmd::WriteReq : MemCmd::ReadReq); 991 Packet pkt(&mmio_req, cmd); 992 pkt.dataStatic(data); 993 994 if (mmio_req.isMmappedIpr()) { 995 const Cycles ipr_delay(write ? 996 TheISA::handleIprWrite(tc, &pkt) : 997 TheISA::handleIprRead(tc, &pkt)); 998 return clockPeriod() * ipr_delay; 999 } else { 1000 return dataPort.sendAtomic(&pkt); 1001 } 1002} 1003 1004void 1005BaseKvmCPU::setSignalMask(const sigset_t *mask) 1006{ 1007 std::unique_ptr<struct kvm_signal_mask> kvm_mask; 1008 1009 if (mask) { 1010 kvm_mask.reset((struct kvm_signal_mask *)operator new( 1011 sizeof(struct kvm_signal_mask) + sizeof(*mask))); 1012 // The kernel and the user-space headers have different ideas 1013 // about the size of sigset_t. This seems like a massive hack, 1014 // but is actually what qemu does. 1015 assert(sizeof(*mask) >= 8); 1016 kvm_mask->len = 8; 1017 memcpy(kvm_mask->sigset, mask, kvm_mask->len); 1018 } 1019 1020 if (ioctl(KVM_SET_SIGNAL_MASK, (void *)kvm_mask.get()) == -1) 1021 panic("KVM: Failed to set vCPU signal mask (errno: %i)\n", 1022 errno); 1023} 1024 1025int 1026BaseKvmCPU::ioctl(int request, long p1) const 1027{ 1028 if (vcpuFD == -1) 1029 panic("KVM: CPU ioctl called before initialization\n"); 1030 1031 return ::ioctl(vcpuFD, request, p1); 1032} 1033 1034Tick 1035BaseKvmCPU::flushCoalescedMMIO() 1036{ 1037 if (!mmioRing) 1038 return 0; 1039 1040 DPRINTF(KvmIO, "KVM: Flushing the coalesced MMIO ring buffer\n"); 1041 1042 // TODO: We might need to do synchronization when we start to 1043 // support multiple CPUs 1044 Tick ticks(0); 1045 while (mmioRing->first != mmioRing->last) { 1046 struct kvm_coalesced_mmio &ent( 1047 mmioRing->coalesced_mmio[mmioRing->first]); 1048 1049 DPRINTF(KvmIO, "KVM: Handling coalesced MMIO (addr: 0x%x, len: %u)\n", 1050 ent.phys_addr, ent.len); 1051 1052 ++numCoalescedMMIO; 1053 ticks += doMMIOAccess(ent.phys_addr, ent.data, ent.len, true); 1054 1055 mmioRing->first = (mmioRing->first + 1) % KVM_COALESCED_MMIO_MAX; 1056 } 1057 1058 return ticks; 1059} 1060 1061/** 1062 * Cycle timer overflow when running in KVM. Forces the KVM syscall to 1063 * exit with EINTR and allows us to run the event queue. 1064 * 1065 * @warn This function might not be called since some kernels don't 1066 * seem to deliver signals when the signal is only unmasked when 1067 * running in KVM. This doesn't matter though since we are only 1068 * interested in getting KVM to exit, which happens as expected. See 1069 * setupSignalHandler() and kvmRun() for details about KVM signal 1070 * handling. 1071 */ 1072static void 1073onTimerOverflow(int signo, siginfo_t *si, void *data) 1074{ 1075 timerOverflowed = true; 1076} 1077 1078/** 1079 * Instruction counter overflow when running in KVM. Forces the KVM 1080 * syscall to exit with EINTR and allows us to handle instruction 1081 * count events. 1082 */ 1083static void 1084onInstEvent(int signo, siginfo_t *si, void *data) 1085{ 1086} 1087 1088void 1089BaseKvmCPU::setupSignalHandler() 1090{ 1091 struct sigaction sa; 1092 1093 memset(&sa, 0, sizeof(sa)); 1094 sa.sa_sigaction = onTimerOverflow; 1095 sa.sa_flags = SA_SIGINFO | SA_RESTART; 1096 if (sigaction(KVM_TIMER_SIGNAL, &sa, NULL) == -1) 1097 panic("KVM: Failed to setup vCPU timer signal handler\n"); 1098 1099 memset(&sa, 0, sizeof(sa)); 1100 sa.sa_sigaction = onInstEvent; 1101 sa.sa_flags = SA_SIGINFO | SA_RESTART; 1102 if (sigaction(KVM_INST_SIGNAL, &sa, NULL) == -1) 1103 panic("KVM: Failed to setup vCPU instruction signal handler\n"); 1104 1105 sigset_t sigset; 1106 if (pthread_sigmask(SIG_BLOCK, NULL, &sigset) == -1) 1107 panic("KVM: Failed get signal mask\n"); 1108 1109 // Request KVM to setup the same signal mask as we're currently 1110 // running with except for the KVM control signals. We'll 1111 // sometimes need to raise the KVM_TIMER_SIGNAL to cause immediate 1112 // exits from KVM after servicing IO requests. See kvmRun(). 1113 sigdelset(&sigset, KVM_TIMER_SIGNAL); 1114 sigdelset(&sigset, KVM_INST_SIGNAL); 1115 setSignalMask(&sigset); 1116 1117 // Mask our control signals so they aren't delivered unless we're 1118 // actually executing inside KVM. 1119 sigaddset(&sigset, KVM_TIMER_SIGNAL); 1120 sigaddset(&sigset, KVM_INST_SIGNAL); 1121 if (pthread_sigmask(SIG_SETMASK, &sigset, NULL) == -1) 1122 panic("KVM: Failed mask the KVM control signals\n"); 1123} 1124 1125bool 1126BaseKvmCPU::discardPendingSignal(int signum) const 1127{ 1128 int discardedSignal; 1129 1130 // Setting the timeout to zero causes sigtimedwait to return 1131 // immediately. 1132 struct timespec timeout; 1133 timeout.tv_sec = 0; 1134 timeout.tv_nsec = 0; 1135 1136 sigset_t sigset; 1137 sigemptyset(&sigset); 1138 sigaddset(&sigset, signum); 1139 1140 do { 1141 discardedSignal = sigtimedwait(&sigset, NULL, &timeout); 1142 } while (discardedSignal == -1 && errno == EINTR); 1143 1144 if (discardedSignal == signum) 1145 return true; 1146 else if (discardedSignal == -1 && errno == EAGAIN) 1147 return false; 1148 else 1149 panic("Unexpected return value from sigtimedwait: %i (errno: %i)\n", 1150 discardedSignal, errno); 1151} 1152 1153void 1154BaseKvmCPU::setupCounters() 1155{ 1156 DPRINTF(Kvm, "Attaching cycle counter...\n"); 1157 PerfKvmCounterConfig cfgCycles(PERF_TYPE_HARDWARE, 1158 PERF_COUNT_HW_CPU_CYCLES); 1159 cfgCycles.disabled(true) 1160 .pinned(true); 1161 1162 // Try to exclude the host. We set both exclude_hv and 1163 // exclude_host since different architectures use slightly 1164 // different APIs in the kernel. 1165 cfgCycles.exclude_hv(true) 1166 .exclude_host(true); 1167 1168 if (perfControlledByTimer) { 1169 // We need to configure the cycles counter to send overflows 1170 // since we are going to use it to trigger timer signals that 1171 // trap back into m5 from KVM. In practice, this means that we 1172 // need to set some non-zero sample period that gets 1173 // overridden when the timer is armed. 1174 cfgCycles.wakeupEvents(1) 1175 .samplePeriod(42); 1176 } 1177 1178 hwCycles.attach(cfgCycles, 1179 0); // TID (0 => currentThread) 1180 1181 setupInstCounter(); 1182} 1183 1184bool 1185BaseKvmCPU::tryDrain() 1186{ 1187 if (!drainManager) 1188 return false; 1189 1190 if (!archIsDrained()) { 1191 DPRINTF(Drain, "tryDrain: Architecture code is not ready.\n"); 1192 return false; 1193 } 1194 1195 if (_status == Idle || _status == Running) { 1196 DPRINTF(Drain, 1197 "tryDrain: CPU transitioned into the Idle state, drain done\n"); 1198 drainManager->signalDrainDone(); 1199 drainManager = NULL; 1200 return true; 1201 } else { 1202 DPRINTF(Drain, "tryDrain: CPU not ready.\n"); 1203 return false; 1204 } 1205} 1206 1207void 1208BaseKvmCPU::ioctlRun() 1209{ 1210 if (ioctl(KVM_RUN) == -1) { 1211 if (errno != EINTR) 1212 panic("KVM: Failed to start virtual CPU (errno: %i)\n", 1213 errno); 1214 } 1215} 1216 1217void 1218BaseKvmCPU::setupInstStop() 1219{ 1220 if (comInstEventQueue[0]->empty()) { 1221 setupInstCounter(0); 1222 } else { 1223 const uint64_t next(comInstEventQueue[0]->nextTick()); 1224 1225 assert(next > ctrInsts); 1226 setupInstCounter(next - ctrInsts); 1227 } 1228} 1229 1230void 1231BaseKvmCPU::setupInstCounter(uint64_t period) 1232{ 1233 // No need to do anything if we aren't attaching for the first 1234 // time or the period isn't changing. 1235 if (period == activeInstPeriod && hwInstructions.attached()) 1236 return; 1237 1238 PerfKvmCounterConfig cfgInstructions(PERF_TYPE_HARDWARE, 1239 PERF_COUNT_HW_INSTRUCTIONS); 1240 1241 // Try to exclude the host. We set both exclude_hv and 1242 // exclude_host since different architectures use slightly 1243 // different APIs in the kernel. 1244 cfgInstructions.exclude_hv(true) 1245 .exclude_host(true); 1246 1247 if (period) { 1248 // Setup a sampling counter if that has been requested. 1249 cfgInstructions.wakeupEvents(1) 1250 .samplePeriod(period); 1251 } 1252 1253 // We need to detach and re-attach the counter to reliably change 1254 // sampling settings. See PerfKvmCounter::period() for details. 1255 if (hwInstructions.attached()) 1256 hwInstructions.detach(); 1257 assert(hwCycles.attached()); 1258 hwInstructions.attach(cfgInstructions, 1259 0, // TID (0 => currentThread) 1260 hwCycles); 1261 1262 if (period) 1263 hwInstructions.enableSignals(KVM_INST_SIGNAL); 1264 1265 activeInstPeriod = period; 1266} 1267