base.cc revision 10114:bd83b4f6a12e
1/* 2 * Copyright (c) 2012 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions are 16 * met: redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer; 18 * redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution; 21 * neither the name of the copyright holders nor the names of its 22 * contributors may be used to endorse or promote products derived from 23 * this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Authors: Andreas Sandberg 38 */ 39 40#include <linux/kvm.h> 41#include <sys/ioctl.h> 42#include <sys/mman.h> 43#include <unistd.h> 44 45#include <cerrno> 46#include <csignal> 47#include <ostream> 48 49#include "arch/mmapped_ipr.hh" 50#include "arch/utility.hh" 51#include "cpu/kvm/base.hh" 52#include "debug/Checkpoint.hh" 53#include "debug/Drain.hh" 54#include "debug/Kvm.hh" 55#include "debug/KvmIO.hh" 56#include "debug/KvmRun.hh" 57#include "params/BaseKvmCPU.hh" 58#include "sim/process.hh" 59#include "sim/system.hh" 60 61#include <signal.h> 62 63/* Used by some KVM macros */ 64#define PAGE_SIZE pageSize 65 66BaseKvmCPU::BaseKvmCPU(BaseKvmCPUParams *params) 67 : BaseCPU(params), 68 vm(*params->kvmVM), 69 _status(Idle), 70 dataPort(name() + ".dcache_port", this), 71 instPort(name() + ".icache_port", this), 72 threadContextDirty(true), 73 kvmStateDirty(false), 74 vcpuID(vm.allocVCPUID()), vcpuFD(-1), vcpuMMapSize(0), 75 _kvmRun(NULL), mmioRing(NULL), 76 pageSize(sysconf(_SC_PAGE_SIZE)), 77 tickEvent(*this), 78 activeInstPeriod(0), 79 perfControlledByTimer(params->usePerfOverflow), 80 hostFactor(params->hostFactor), 81 drainManager(NULL), 82 ctrInsts(0) 83{ 84 if (pageSize == -1) 85 panic("KVM: Failed to determine host page size (%i)\n", 86 errno); 87 88 thread = new SimpleThread(this, 0, params->system, 89 params->itb, params->dtb, params->isa[0]); 90 thread->setStatus(ThreadContext::Halted); 91 tc = thread->getTC(); 92 threadContexts.push_back(tc); 93} 94 95BaseKvmCPU::~BaseKvmCPU() 96{ 97 if (_kvmRun) 98 munmap(_kvmRun, vcpuMMapSize); 99 close(vcpuFD); 100} 101 102void 103BaseKvmCPU::init() 104{ 105 BaseCPU::init(); 106 107 if (numThreads != 1) 108 fatal("KVM: Multithreading not supported"); 109 110 tc->initMemProxies(tc); 111 112 // initialize CPU, including PC 113 if (FullSystem && !switchedOut()) 114 TheISA::initCPU(tc, tc->contextId()); 115 116 mmio_req.setThreadContext(tc->contextId(), 0); 117} 118 119void 120BaseKvmCPU::startup() 121{ 122 const BaseKvmCPUParams * const p( 123 dynamic_cast<const BaseKvmCPUParams *>(params())); 124 125 Kvm &kvm(vm.kvm); 126 127 BaseCPU::startup(); 128 129 assert(vcpuFD == -1); 130 131 // Tell the VM that a CPU is about to start. 132 vm.cpuStartup(); 133 134 // We can't initialize KVM CPUs in BaseKvmCPU::init() since we are 135 // not guaranteed that the parent KVM VM has initialized at that 136 // point. Initialize virtual CPUs here instead. 137 vcpuFD = vm.createVCPU(vcpuID); 138 139 // Map the KVM run structure */ 140 vcpuMMapSize = kvm.getVCPUMMapSize(); 141 _kvmRun = (struct kvm_run *)mmap(0, vcpuMMapSize, 142 PROT_READ | PROT_WRITE, MAP_SHARED, 143 vcpuFD, 0); 144 if (_kvmRun == MAP_FAILED) 145 panic("KVM: Failed to map run data structure\n"); 146 147 // Setup a pointer to the MMIO ring buffer if coalesced MMIO is 148 // available. The offset into the KVM's communication page is 149 // provided by the coalesced MMIO capability. 150 int mmioOffset(kvm.capCoalescedMMIO()); 151 if (!p->useCoalescedMMIO) { 152 inform("KVM: Coalesced MMIO disabled by config.\n"); 153 } else if (mmioOffset) { 154 inform("KVM: Coalesced IO available\n"); 155 mmioRing = (struct kvm_coalesced_mmio_ring *)( 156 (char *)_kvmRun + (mmioOffset * pageSize)); 157 } else { 158 inform("KVM: Coalesced not supported by host OS\n"); 159 } 160 161 thread->startup(); 162 163 Event *startupEvent( 164 new EventWrapper<BaseKvmCPU, 165 &BaseKvmCPU::startupThread>(this, true)); 166 schedule(startupEvent, curTick()); 167} 168 169void 170BaseKvmCPU::startupThread() 171{ 172 // Do thread-specific initialization. We need to setup signal 173 // delivery for counters and timers from within the thread that 174 // will execute the event queue to ensure that signals are 175 // delivered to the right threads. 176 const BaseKvmCPUParams * const p( 177 dynamic_cast<const BaseKvmCPUParams *>(params())); 178 179 vcpuThread = pthread_self(); 180 181 // Setup signal handlers. This has to be done after the vCPU is 182 // created since it manipulates the vCPU signal mask. 183 setupSignalHandler(); 184 185 setupCounters(); 186 187 if (p->usePerfOverflow) 188 runTimer.reset(new PerfKvmTimer(hwCycles, 189 KVM_KICK_SIGNAL, 190 p->hostFactor, 191 p->hostFreq)); 192 else 193 runTimer.reset(new PosixKvmTimer(KVM_KICK_SIGNAL, CLOCK_MONOTONIC, 194 p->hostFactor, 195 p->hostFreq)); 196 197} 198 199void 200BaseKvmCPU::regStats() 201{ 202 using namespace Stats; 203 204 BaseCPU::regStats(); 205 206 numInsts 207 .name(name() + ".committedInsts") 208 .desc("Number of instructions committed") 209 ; 210 211 numVMExits 212 .name(name() + ".numVMExits") 213 .desc("total number of KVM exits") 214 ; 215 216 numVMHalfEntries 217 .name(name() + ".numVMHalfEntries") 218 .desc("number of KVM entries to finalize pending operations") 219 ; 220 221 numExitSignal 222 .name(name() + ".numExitSignal") 223 .desc("exits due to signal delivery") 224 ; 225 226 numMMIO 227 .name(name() + ".numMMIO") 228 .desc("number of VM exits due to memory mapped IO") 229 ; 230 231 numCoalescedMMIO 232 .name(name() + ".numCoalescedMMIO") 233 .desc("number of coalesced memory mapped IO requests") 234 ; 235 236 numIO 237 .name(name() + ".numIO") 238 .desc("number of VM exits due to legacy IO") 239 ; 240 241 numHalt 242 .name(name() + ".numHalt") 243 .desc("number of VM exits due to wait for interrupt instructions") 244 ; 245 246 numInterrupts 247 .name(name() + ".numInterrupts") 248 .desc("number of interrupts delivered") 249 ; 250 251 numHypercalls 252 .name(name() + ".numHypercalls") 253 .desc("number of hypercalls") 254 ; 255} 256 257void 258BaseKvmCPU::serializeThread(std::ostream &os, ThreadID tid) 259{ 260 if (DTRACE(Checkpoint)) { 261 DPRINTF(Checkpoint, "KVM: Serializing thread %i:\n", tid); 262 dump(); 263 } 264 265 assert(tid == 0); 266 assert(_status == Idle); 267 thread->serialize(os); 268} 269 270void 271BaseKvmCPU::unserializeThread(Checkpoint *cp, const std::string §ion, 272 ThreadID tid) 273{ 274 DPRINTF(Checkpoint, "KVM: Unserialize thread %i:\n", tid); 275 276 assert(tid == 0); 277 assert(_status == Idle); 278 thread->unserialize(cp, section); 279 threadContextDirty = true; 280} 281 282unsigned int 283BaseKvmCPU::drain(DrainManager *dm) 284{ 285 if (switchedOut()) 286 return 0; 287 288 DPRINTF(Drain, "BaseKvmCPU::drain\n"); 289 switch (_status) { 290 case Running: 291 // The base KVM code is normally ready when it is in the 292 // Running state, but the architecture specific code might be 293 // of a different opinion. This may happen when the CPU been 294 // notified of an event that hasn't been accepted by the vCPU 295 // yet. 296 if (!archIsDrained()) { 297 drainManager = dm; 298 return 1; 299 } 300 301 // The state of the CPU is consistent, so we don't need to do 302 // anything special to drain it. We simply de-schedule the 303 // tick event and enter the Idle state to prevent nasty things 304 // like MMIOs from happening. 305 if (tickEvent.scheduled()) 306 deschedule(tickEvent); 307 _status = Idle; 308 309 /** FALLTHROUGH */ 310 case Idle: 311 // Idle, no need to drain 312 assert(!tickEvent.scheduled()); 313 314 // Sync the thread context here since we'll need it when we 315 // switch CPUs or checkpoint the CPU. 316 syncThreadContext(); 317 318 return 0; 319 320 case RunningServiceCompletion: 321 // The CPU has just requested a service that was handled in 322 // the RunningService state, but the results have still not 323 // been reported to the CPU. Now, we /could/ probably just 324 // update the register state ourselves instead of letting KVM 325 // handle it, but that would be tricky. Instead, we enter KVM 326 // and let it do its stuff. 327 drainManager = dm; 328 329 DPRINTF(Drain, "KVM CPU is waiting for service completion, " 330 "requesting drain.\n"); 331 return 1; 332 333 case RunningService: 334 // We need to drain since the CPU is waiting for service (e.g., MMIOs) 335 drainManager = dm; 336 337 DPRINTF(Drain, "KVM CPU is waiting for service, requesting drain.\n"); 338 return 1; 339 340 default: 341 panic("KVM: Unhandled CPU state in drain()\n"); 342 return 0; 343 } 344} 345 346void 347BaseKvmCPU::drainResume() 348{ 349 assert(!tickEvent.scheduled()); 350 351 // We might have been switched out. In that case, we don't need to 352 // do anything. 353 if (switchedOut()) 354 return; 355 356 DPRINTF(Kvm, "drainResume\n"); 357 verifyMemoryMode(); 358 359 // The tick event is de-scheduled as a part of the draining 360 // process. Re-schedule it if the thread context is active. 361 if (tc->status() == ThreadContext::Active) { 362 schedule(tickEvent, nextCycle()); 363 _status = Running; 364 } else { 365 _status = Idle; 366 } 367} 368 369void 370BaseKvmCPU::switchOut() 371{ 372 DPRINTF(Kvm, "switchOut\n"); 373 374 BaseCPU::switchOut(); 375 376 // We should have drained prior to executing a switchOut, which 377 // means that the tick event shouldn't be scheduled and the CPU is 378 // idle. 379 assert(!tickEvent.scheduled()); 380 assert(_status == Idle); 381} 382 383void 384BaseKvmCPU::takeOverFrom(BaseCPU *cpu) 385{ 386 DPRINTF(Kvm, "takeOverFrom\n"); 387 388 BaseCPU::takeOverFrom(cpu); 389 390 // We should have drained prior to executing a switchOut, which 391 // means that the tick event shouldn't be scheduled and the CPU is 392 // idle. 393 assert(!tickEvent.scheduled()); 394 assert(_status == Idle); 395 assert(threadContexts.size() == 1); 396 397 // Force an update of the KVM state here instead of flagging the 398 // TC as dirty. This is not ideal from a performance point of 399 // view, but it makes debugging easier as it allows meaningful KVM 400 // state to be dumped before and after a takeover. 401 updateKvmState(); 402 threadContextDirty = false; 403} 404 405void 406BaseKvmCPU::verifyMemoryMode() const 407{ 408 if (!(system->isAtomicMode() && system->bypassCaches())) { 409 fatal("The KVM-based CPUs requires the memory system to be in the " 410 "'atomic_noncaching' mode.\n"); 411 } 412} 413 414void 415BaseKvmCPU::wakeup() 416{ 417 DPRINTF(Kvm, "wakeup()\n"); 418 419 if (thread->status() != ThreadContext::Suspended) 420 return; 421 422 thread->activate(); 423} 424 425void 426BaseKvmCPU::activateContext(ThreadID thread_num, Cycles delay) 427{ 428 DPRINTF(Kvm, "ActivateContext %d (%d cycles)\n", thread_num, delay); 429 430 assert(thread_num == 0); 431 assert(thread); 432 433 assert(_status == Idle); 434 assert(!tickEvent.scheduled()); 435 436 numCycles += ticksToCycles(thread->lastActivate - thread->lastSuspend); 437 438 schedule(tickEvent, clockEdge(delay)); 439 _status = Running; 440} 441 442 443void 444BaseKvmCPU::suspendContext(ThreadID thread_num) 445{ 446 DPRINTF(Kvm, "SuspendContext %d\n", thread_num); 447 448 assert(thread_num == 0); 449 assert(thread); 450 451 if (_status == Idle) 452 return; 453 454 assert(_status == Running); 455 456 // The tick event may no be scheduled if the quest has requested 457 // the monitor to wait for interrupts. The normal CPU models can 458 // get their tick events descheduled by quiesce instructions, but 459 // that can't happen here. 460 if (tickEvent.scheduled()) 461 deschedule(tickEvent); 462 463 _status = Idle; 464} 465 466void 467BaseKvmCPU::deallocateContext(ThreadID thread_num) 468{ 469 // for now, these are equivalent 470 suspendContext(thread_num); 471} 472 473void 474BaseKvmCPU::haltContext(ThreadID thread_num) 475{ 476 // for now, these are equivalent 477 suspendContext(thread_num); 478} 479 480ThreadContext * 481BaseKvmCPU::getContext(int tn) 482{ 483 assert(tn == 0); 484 syncThreadContext(); 485 return tc; 486} 487 488 489Counter 490BaseKvmCPU::totalInsts() const 491{ 492 return ctrInsts; 493} 494 495Counter 496BaseKvmCPU::totalOps() const 497{ 498 hack_once("Pretending totalOps is equivalent to totalInsts()\n"); 499 return ctrInsts; 500} 501 502void 503BaseKvmCPU::dump() 504{ 505 inform("State dumping not implemented."); 506} 507 508void 509BaseKvmCPU::tick() 510{ 511 Tick delay(0); 512 assert(_status != Idle); 513 514 switch (_status) { 515 case RunningService: 516 // handleKvmExit() will determine the next state of the CPU 517 delay = handleKvmExit(); 518 519 if (tryDrain()) 520 _status = Idle; 521 break; 522 523 case RunningServiceCompletion: 524 case Running: { 525 EventQueue *q = curEventQueue(); 526 Tick ticksToExecute(q->nextTick() - curTick()); 527 528 // We might need to update the KVM state. 529 syncKvmState(); 530 531 // Setup any pending instruction count breakpoints using 532 // PerfEvent. 533 setupInstStop(); 534 535 DPRINTF(KvmRun, "Entering KVM...\n"); 536 if (drainManager) { 537 // Force an immediate exit from KVM after completing 538 // pending operations. The architecture-specific code 539 // takes care to run until it is in a state where it can 540 // safely be drained. 541 delay = kvmRunDrain(); 542 } else { 543 delay = kvmRun(ticksToExecute); 544 } 545 546 // The CPU might have been suspended before entering into 547 // KVM. Assume that the CPU was suspended /before/ entering 548 // into KVM and skip the exit handling. 549 if (_status == Idle) 550 break; 551 552 // Entering into KVM implies that we'll have to reload the thread 553 // context from KVM if we want to access it. Flag the KVM state as 554 // dirty with respect to the cached thread context. 555 kvmStateDirty = true; 556 557 // Enter into the RunningService state unless the 558 // simulation was stopped by a timer. 559 if (_kvmRun->exit_reason != KVM_EXIT_INTR) { 560 _status = RunningService; 561 } else { 562 ++numExitSignal; 563 _status = Running; 564 } 565 566 // Service any pending instruction events. The vCPU should 567 // have exited in time for the event using the instruction 568 // counter configured by setupInstStop(). 569 comInstEventQueue[0]->serviceEvents(ctrInsts); 570 system->instEventQueue.serviceEvents(system->totalNumInsts); 571 572 if (tryDrain()) 573 _status = Idle; 574 } break; 575 576 default: 577 panic("BaseKvmCPU entered tick() in an illegal state (%i)\n", 578 _status); 579 } 580 581 // Schedule a new tick if we are still running 582 if (_status != Idle) 583 schedule(tickEvent, clockEdge(ticksToCycles(delay))); 584} 585 586Tick 587BaseKvmCPU::kvmRunDrain() 588{ 589 // By default, the only thing we need to drain is a pending IO 590 // operation which assumes that we are in the 591 // RunningServiceCompletion state. 592 assert(_status == RunningServiceCompletion); 593 594 // Deliver the data from the pending IO operation and immediately 595 // exit. 596 return kvmRun(0); 597} 598 599uint64_t 600BaseKvmCPU::getHostCycles() const 601{ 602 return hwCycles.read(); 603} 604 605Tick 606BaseKvmCPU::kvmRun(Tick ticks) 607{ 608 Tick ticksExecuted; 609 DPRINTF(KvmRun, "KVM: Executing for %i ticks\n", ticks); 610 611 if (ticks == 0) { 612 // Settings ticks == 0 is a special case which causes an entry 613 // into KVM that finishes pending operations (e.g., IO) and 614 // then immediately exits. 615 DPRINTF(KvmRun, "KVM: Delivering IO without full guest entry\n"); 616 617 ++numVMHalfEntries; 618 619 // Send a KVM_KICK_SIGNAL to the vCPU thread (i.e., this 620 // thread). The KVM control signal is masked while executing 621 // in gem5 and gets unmasked temporarily as when entering 622 // KVM. See setSignalMask() and setupSignalHandler(). 623 kick(); 624 625 // Start the vCPU. KVM will check for signals after completing 626 // pending operations (IO). Since the KVM_KICK_SIGNAL is 627 // pending, this forces an immediate exit to gem5 again. We 628 // don't bother to setup timers since this shouldn't actually 629 // execute any code (other than completing half-executed IO 630 // instructions) in the guest. 631 ioctlRun(); 632 633 // We always execute at least one cycle to prevent the 634 // BaseKvmCPU::tick() to be rescheduled on the same tick 635 // twice. 636 ticksExecuted = clockPeriod(); 637 } else { 638 if (ticks < runTimer->resolution()) { 639 DPRINTF(KvmRun, "KVM: Adjusting tick count (%i -> %i)\n", 640 ticks, runTimer->resolution()); 641 ticks = runTimer->resolution(); 642 } 643 644 // Get hardware statistics after synchronizing contexts. The KVM 645 // state update might affect guest cycle counters. 646 uint64_t baseCycles(getHostCycles()); 647 uint64_t baseInstrs(hwInstructions.read()); 648 649 // Arm the run timer and start the cycle timer if it isn't 650 // controlled by the overflow timer. Starting/stopping the cycle 651 // timer automatically starts the other perf timers as they are in 652 // the same counter group. 653 runTimer->arm(ticks); 654 if (!perfControlledByTimer) 655 hwCycles.start(); 656 657 ioctlRun(); 658 659 runTimer->disarm(); 660 if (!perfControlledByTimer) 661 hwCycles.stop(); 662 663 // The control signal may have been delivered after we exited 664 // from KVM. It will be pending in that case since it is 665 // masked when we aren't executing in KVM. Discard it to make 666 // sure we don't deliver it immediately next time we try to 667 // enter into KVM. 668 discardPendingSignal(KVM_KICK_SIGNAL); 669 670 const uint64_t hostCyclesExecuted(getHostCycles() - baseCycles); 671 const uint64_t simCyclesExecuted(hostCyclesExecuted * hostFactor); 672 const uint64_t instsExecuted(hwInstructions.read() - baseInstrs); 673 ticksExecuted = runTimer->ticksFromHostCycles(hostCyclesExecuted); 674 675 /* Update statistics */ 676 numCycles += simCyclesExecuted;; 677 numInsts += instsExecuted; 678 ctrInsts += instsExecuted; 679 system->totalNumInsts += instsExecuted; 680 681 DPRINTF(KvmRun, 682 "KVM: Executed %i instructions in %i cycles " 683 "(%i ticks, sim cycles: %i).\n", 684 instsExecuted, hostCyclesExecuted, ticksExecuted, simCyclesExecuted); 685 } 686 687 ++numVMExits; 688 689 return ticksExecuted + flushCoalescedMMIO(); 690} 691 692void 693BaseKvmCPU::kvmNonMaskableInterrupt() 694{ 695 ++numInterrupts; 696 if (ioctl(KVM_NMI) == -1) 697 panic("KVM: Failed to deliver NMI to virtual CPU\n"); 698} 699 700void 701BaseKvmCPU::kvmInterrupt(const struct kvm_interrupt &interrupt) 702{ 703 ++numInterrupts; 704 if (ioctl(KVM_INTERRUPT, (void *)&interrupt) == -1) 705 panic("KVM: Failed to deliver interrupt to virtual CPU\n"); 706} 707 708void 709BaseKvmCPU::getRegisters(struct kvm_regs ®s) const 710{ 711 if (ioctl(KVM_GET_REGS, ®s) == -1) 712 panic("KVM: Failed to get guest registers\n"); 713} 714 715void 716BaseKvmCPU::setRegisters(const struct kvm_regs ®s) 717{ 718 if (ioctl(KVM_SET_REGS, (void *)®s) == -1) 719 panic("KVM: Failed to set guest registers\n"); 720} 721 722void 723BaseKvmCPU::getSpecialRegisters(struct kvm_sregs ®s) const 724{ 725 if (ioctl(KVM_GET_SREGS, ®s) == -1) 726 panic("KVM: Failed to get guest special registers\n"); 727} 728 729void 730BaseKvmCPU::setSpecialRegisters(const struct kvm_sregs ®s) 731{ 732 if (ioctl(KVM_SET_SREGS, (void *)®s) == -1) 733 panic("KVM: Failed to set guest special registers\n"); 734} 735 736void 737BaseKvmCPU::getFPUState(struct kvm_fpu &state) const 738{ 739 if (ioctl(KVM_GET_FPU, &state) == -1) 740 panic("KVM: Failed to get guest FPU state\n"); 741} 742 743void 744BaseKvmCPU::setFPUState(const struct kvm_fpu &state) 745{ 746 if (ioctl(KVM_SET_FPU, (void *)&state) == -1) 747 panic("KVM: Failed to set guest FPU state\n"); 748} 749 750 751void 752BaseKvmCPU::setOneReg(uint64_t id, const void *addr) 753{ 754#ifdef KVM_SET_ONE_REG 755 struct kvm_one_reg reg; 756 reg.id = id; 757 reg.addr = (uint64_t)addr; 758 759 if (ioctl(KVM_SET_ONE_REG, ®) == -1) { 760 panic("KVM: Failed to set register (0x%x) value (errno: %i)\n", 761 id, errno); 762 } 763#else 764 panic("KVM_SET_ONE_REG is unsupported on this platform.\n"); 765#endif 766} 767 768void 769BaseKvmCPU::getOneReg(uint64_t id, void *addr) const 770{ 771#ifdef KVM_GET_ONE_REG 772 struct kvm_one_reg reg; 773 reg.id = id; 774 reg.addr = (uint64_t)addr; 775 776 if (ioctl(KVM_GET_ONE_REG, ®) == -1) { 777 panic("KVM: Failed to get register (0x%x) value (errno: %i)\n", 778 id, errno); 779 } 780#else 781 panic("KVM_GET_ONE_REG is unsupported on this platform.\n"); 782#endif 783} 784 785std::string 786BaseKvmCPU::getAndFormatOneReg(uint64_t id) const 787{ 788#ifdef KVM_GET_ONE_REG 789 std::ostringstream ss; 790 791 ss.setf(std::ios::hex, std::ios::basefield); 792 ss.setf(std::ios::showbase); 793#define HANDLE_INTTYPE(len) \ 794 case KVM_REG_SIZE_U ## len: { \ 795 uint ## len ## _t value; \ 796 getOneReg(id, &value); \ 797 ss << value; \ 798 } break 799 800#define HANDLE_ARRAY(len) \ 801 case KVM_REG_SIZE_U ## len: { \ 802 uint8_t value[len / 8]; \ 803 getOneReg(id, value); \ 804 ss << "[" << value[0]; \ 805 for (int i = 1; i < len / 8; ++i) \ 806 ss << ", " << value[i]; \ 807 ss << "]"; \ 808 } break 809 810 switch (id & KVM_REG_SIZE_MASK) { 811 HANDLE_INTTYPE(8); 812 HANDLE_INTTYPE(16); 813 HANDLE_INTTYPE(32); 814 HANDLE_INTTYPE(64); 815 HANDLE_ARRAY(128); 816 HANDLE_ARRAY(256); 817 HANDLE_ARRAY(512); 818 HANDLE_ARRAY(1024); 819 default: 820 ss << "??"; 821 } 822 823#undef HANDLE_INTTYPE 824#undef HANDLE_ARRAY 825 826 return ss.str(); 827#else 828 panic("KVM_GET_ONE_REG is unsupported on this platform.\n"); 829#endif 830} 831 832void 833BaseKvmCPU::syncThreadContext() 834{ 835 if (!kvmStateDirty) 836 return; 837 838 assert(!threadContextDirty); 839 840 updateThreadContext(); 841 kvmStateDirty = false; 842} 843 844void 845BaseKvmCPU::syncKvmState() 846{ 847 if (!threadContextDirty) 848 return; 849 850 assert(!kvmStateDirty); 851 852 updateKvmState(); 853 threadContextDirty = false; 854} 855 856Tick 857BaseKvmCPU::handleKvmExit() 858{ 859 DPRINTF(KvmRun, "handleKvmExit (exit_reason: %i)\n", _kvmRun->exit_reason); 860 assert(_status == RunningService); 861 862 // Switch into the running state by default. Individual handlers 863 // can override this. 864 _status = Running; 865 switch (_kvmRun->exit_reason) { 866 case KVM_EXIT_UNKNOWN: 867 return handleKvmExitUnknown(); 868 869 case KVM_EXIT_EXCEPTION: 870 return handleKvmExitException(); 871 872 case KVM_EXIT_IO: 873 _status = RunningServiceCompletion; 874 ++numIO; 875 return handleKvmExitIO(); 876 877 case KVM_EXIT_HYPERCALL: 878 ++numHypercalls; 879 return handleKvmExitHypercall(); 880 881 case KVM_EXIT_HLT: 882 /* The guest has halted and is waiting for interrupts */ 883 DPRINTF(Kvm, "handleKvmExitHalt\n"); 884 ++numHalt; 885 886 // Suspend the thread until the next interrupt arrives 887 thread->suspend(); 888 889 // This is actually ignored since the thread is suspended. 890 return 0; 891 892 case KVM_EXIT_MMIO: 893 _status = RunningServiceCompletion; 894 /* Service memory mapped IO requests */ 895 DPRINTF(KvmIO, "KVM: Handling MMIO (w: %u, addr: 0x%x, len: %u)\n", 896 _kvmRun->mmio.is_write, 897 _kvmRun->mmio.phys_addr, _kvmRun->mmio.len); 898 899 ++numMMIO; 900 return doMMIOAccess(_kvmRun->mmio.phys_addr, _kvmRun->mmio.data, 901 _kvmRun->mmio.len, _kvmRun->mmio.is_write); 902 903 case KVM_EXIT_IRQ_WINDOW_OPEN: 904 return handleKvmExitIRQWindowOpen(); 905 906 case KVM_EXIT_FAIL_ENTRY: 907 return handleKvmExitFailEntry(); 908 909 case KVM_EXIT_INTR: 910 /* KVM was interrupted by a signal, restart it in the next 911 * tick. */ 912 return 0; 913 914 case KVM_EXIT_INTERNAL_ERROR: 915 panic("KVM: Internal error (suberror: %u)\n", 916 _kvmRun->internal.suberror); 917 918 default: 919 dump(); 920 panic("KVM: Unexpected exit (exit_reason: %u)\n", _kvmRun->exit_reason); 921 } 922} 923 924Tick 925BaseKvmCPU::handleKvmExitIO() 926{ 927 panic("KVM: Unhandled guest IO (dir: %i, size: %i, port: 0x%x, count: %i)\n", 928 _kvmRun->io.direction, _kvmRun->io.size, 929 _kvmRun->io.port, _kvmRun->io.count); 930} 931 932Tick 933BaseKvmCPU::handleKvmExitHypercall() 934{ 935 panic("KVM: Unhandled hypercall\n"); 936} 937 938Tick 939BaseKvmCPU::handleKvmExitIRQWindowOpen() 940{ 941 warn("KVM: Unhandled IRQ window.\n"); 942 return 0; 943} 944 945 946Tick 947BaseKvmCPU::handleKvmExitUnknown() 948{ 949 dump(); 950 panic("KVM: Unknown error when starting vCPU (hw reason: 0x%llx)\n", 951 _kvmRun->hw.hardware_exit_reason); 952} 953 954Tick 955BaseKvmCPU::handleKvmExitException() 956{ 957 dump(); 958 panic("KVM: Got exception when starting vCPU " 959 "(exception: %u, error_code: %u)\n", 960 _kvmRun->ex.exception, _kvmRun->ex.error_code); 961} 962 963Tick 964BaseKvmCPU::handleKvmExitFailEntry() 965{ 966 dump(); 967 panic("KVM: Failed to enter virtualized mode (hw reason: 0x%llx)\n", 968 _kvmRun->fail_entry.hardware_entry_failure_reason); 969} 970 971Tick 972BaseKvmCPU::doMMIOAccess(Addr paddr, void *data, int size, bool write) 973{ 974 ThreadContext *tc(thread->getTC()); 975 syncThreadContext(); 976 977 mmio_req.setPhys(paddr, size, Request::UNCACHEABLE, dataMasterId()); 978 // Some architectures do need to massage physical addresses a bit 979 // before they are inserted into the memory system. This enables 980 // APIC accesses on x86 and m5ops where supported through a MMIO 981 // interface. 982 BaseTLB::Mode tlb_mode(write ? BaseTLB::Write : BaseTLB::Read); 983 Fault fault(tc->getDTBPtr()->finalizePhysical(&mmio_req, tc, tlb_mode)); 984 if (fault != NoFault) 985 warn("Finalization of MMIO address failed: %s\n", fault->name()); 986 987 988 const MemCmd cmd(write ? MemCmd::WriteReq : MemCmd::ReadReq); 989 Packet pkt(&mmio_req, cmd); 990 pkt.dataStatic(data); 991 992 if (mmio_req.isMmappedIpr()) { 993 const Cycles ipr_delay(write ? 994 TheISA::handleIprWrite(tc, &pkt) : 995 TheISA::handleIprRead(tc, &pkt)); 996 return clockPeriod() * ipr_delay; 997 } else { 998 return dataPort.sendAtomic(&pkt); 999 } 1000} 1001 1002void 1003BaseKvmCPU::setSignalMask(const sigset_t *mask) 1004{ 1005 std::unique_ptr<struct kvm_signal_mask> kvm_mask; 1006 1007 if (mask) { 1008 kvm_mask.reset((struct kvm_signal_mask *)operator new( 1009 sizeof(struct kvm_signal_mask) + sizeof(*mask))); 1010 // The kernel and the user-space headers have different ideas 1011 // about the size of sigset_t. This seems like a massive hack, 1012 // but is actually what qemu does. 1013 assert(sizeof(*mask) >= 8); 1014 kvm_mask->len = 8; 1015 memcpy(kvm_mask->sigset, mask, kvm_mask->len); 1016 } 1017 1018 if (ioctl(KVM_SET_SIGNAL_MASK, (void *)kvm_mask.get()) == -1) 1019 panic("KVM: Failed to set vCPU signal mask (errno: %i)\n", 1020 errno); 1021} 1022 1023int 1024BaseKvmCPU::ioctl(int request, long p1) const 1025{ 1026 if (vcpuFD == -1) 1027 panic("KVM: CPU ioctl called before initialization\n"); 1028 1029 return ::ioctl(vcpuFD, request, p1); 1030} 1031 1032Tick 1033BaseKvmCPU::flushCoalescedMMIO() 1034{ 1035 if (!mmioRing) 1036 return 0; 1037 1038 DPRINTF(KvmIO, "KVM: Flushing the coalesced MMIO ring buffer\n"); 1039 1040 // TODO: We might need to do synchronization when we start to 1041 // support multiple CPUs 1042 Tick ticks(0); 1043 while (mmioRing->first != mmioRing->last) { 1044 struct kvm_coalesced_mmio &ent( 1045 mmioRing->coalesced_mmio[mmioRing->first]); 1046 1047 DPRINTF(KvmIO, "KVM: Handling coalesced MMIO (addr: 0x%x, len: %u)\n", 1048 ent.phys_addr, ent.len); 1049 1050 ++numCoalescedMMIO; 1051 ticks += doMMIOAccess(ent.phys_addr, ent.data, ent.len, true); 1052 1053 mmioRing->first = (mmioRing->first + 1) % KVM_COALESCED_MMIO_MAX; 1054 } 1055 1056 return ticks; 1057} 1058 1059/** 1060 * Dummy handler for KVM kick signals. 1061 * 1062 * @note This function is usually not called since the kernel doesn't 1063 * seem to deliver signals when the signal is only unmasked when 1064 * running in KVM. This doesn't matter though since we are only 1065 * interested in getting KVM to exit, which happens as expected. See 1066 * setupSignalHandler() and kvmRun() for details about KVM signal 1067 * handling. 1068 */ 1069static void 1070onKickSignal(int signo, siginfo_t *si, void *data) 1071{ 1072} 1073 1074void 1075BaseKvmCPU::setupSignalHandler() 1076{ 1077 struct sigaction sa; 1078 1079 memset(&sa, 0, sizeof(sa)); 1080 sa.sa_sigaction = onKickSignal; 1081 sa.sa_flags = SA_SIGINFO | SA_RESTART; 1082 if (sigaction(KVM_KICK_SIGNAL, &sa, NULL) == -1) 1083 panic("KVM: Failed to setup vCPU timer signal handler\n"); 1084 1085 sigset_t sigset; 1086 if (pthread_sigmask(SIG_BLOCK, NULL, &sigset) == -1) 1087 panic("KVM: Failed get signal mask\n"); 1088 1089 // Request KVM to setup the same signal mask as we're currently 1090 // running with except for the KVM control signal. We'll sometimes 1091 // need to raise the KVM_KICK_SIGNAL to cause immediate exits from 1092 // KVM after servicing IO requests. See kvmRun(). 1093 sigdelset(&sigset, KVM_KICK_SIGNAL); 1094 setSignalMask(&sigset); 1095 1096 // Mask our control signals so they aren't delivered unless we're 1097 // actually executing inside KVM. 1098 sigaddset(&sigset, KVM_KICK_SIGNAL); 1099 if (pthread_sigmask(SIG_SETMASK, &sigset, NULL) == -1) 1100 panic("KVM: Failed mask the KVM control signals\n"); 1101} 1102 1103bool 1104BaseKvmCPU::discardPendingSignal(int signum) const 1105{ 1106 int discardedSignal; 1107 1108 // Setting the timeout to zero causes sigtimedwait to return 1109 // immediately. 1110 struct timespec timeout; 1111 timeout.tv_sec = 0; 1112 timeout.tv_nsec = 0; 1113 1114 sigset_t sigset; 1115 sigemptyset(&sigset); 1116 sigaddset(&sigset, signum); 1117 1118 do { 1119 discardedSignal = sigtimedwait(&sigset, NULL, &timeout); 1120 } while (discardedSignal == -1 && errno == EINTR); 1121 1122 if (discardedSignal == signum) 1123 return true; 1124 else if (discardedSignal == -1 && errno == EAGAIN) 1125 return false; 1126 else 1127 panic("Unexpected return value from sigtimedwait: %i (errno: %i)\n", 1128 discardedSignal, errno); 1129} 1130 1131void 1132BaseKvmCPU::setupCounters() 1133{ 1134 DPRINTF(Kvm, "Attaching cycle counter...\n"); 1135 PerfKvmCounterConfig cfgCycles(PERF_TYPE_HARDWARE, 1136 PERF_COUNT_HW_CPU_CYCLES); 1137 cfgCycles.disabled(true) 1138 .pinned(true); 1139 1140 // Try to exclude the host. We set both exclude_hv and 1141 // exclude_host since different architectures use slightly 1142 // different APIs in the kernel. 1143 cfgCycles.exclude_hv(true) 1144 .exclude_host(true); 1145 1146 if (perfControlledByTimer) { 1147 // We need to configure the cycles counter to send overflows 1148 // since we are going to use it to trigger timer signals that 1149 // trap back into m5 from KVM. In practice, this means that we 1150 // need to set some non-zero sample period that gets 1151 // overridden when the timer is armed. 1152 cfgCycles.wakeupEvents(1) 1153 .samplePeriod(42); 1154 } 1155 1156 hwCycles.attach(cfgCycles, 1157 0); // TID (0 => currentThread) 1158 1159 setupInstCounter(); 1160} 1161 1162bool 1163BaseKvmCPU::tryDrain() 1164{ 1165 if (!drainManager) 1166 return false; 1167 1168 if (!archIsDrained()) { 1169 DPRINTF(Drain, "tryDrain: Architecture code is not ready.\n"); 1170 return false; 1171 } 1172 1173 if (_status == Idle || _status == Running) { 1174 DPRINTF(Drain, 1175 "tryDrain: CPU transitioned into the Idle state, drain done\n"); 1176 drainManager->signalDrainDone(); 1177 drainManager = NULL; 1178 return true; 1179 } else { 1180 DPRINTF(Drain, "tryDrain: CPU not ready.\n"); 1181 return false; 1182 } 1183} 1184 1185void 1186BaseKvmCPU::ioctlRun() 1187{ 1188 if (ioctl(KVM_RUN) == -1) { 1189 if (errno != EINTR) 1190 panic("KVM: Failed to start virtual CPU (errno: %i)\n", 1191 errno); 1192 } 1193} 1194 1195void 1196BaseKvmCPU::setupInstStop() 1197{ 1198 if (comInstEventQueue[0]->empty()) { 1199 setupInstCounter(0); 1200 } else { 1201 const uint64_t next(comInstEventQueue[0]->nextTick()); 1202 1203 assert(next > ctrInsts); 1204 setupInstCounter(next - ctrInsts); 1205 } 1206} 1207 1208void 1209BaseKvmCPU::setupInstCounter(uint64_t period) 1210{ 1211 // No need to do anything if we aren't attaching for the first 1212 // time or the period isn't changing. 1213 if (period == activeInstPeriod && hwInstructions.attached()) 1214 return; 1215 1216 PerfKvmCounterConfig cfgInstructions(PERF_TYPE_HARDWARE, 1217 PERF_COUNT_HW_INSTRUCTIONS); 1218 1219 // Try to exclude the host. We set both exclude_hv and 1220 // exclude_host since different architectures use slightly 1221 // different APIs in the kernel. 1222 cfgInstructions.exclude_hv(true) 1223 .exclude_host(true); 1224 1225 if (period) { 1226 // Setup a sampling counter if that has been requested. 1227 cfgInstructions.wakeupEvents(1) 1228 .samplePeriod(period); 1229 } 1230 1231 // We need to detach and re-attach the counter to reliably change 1232 // sampling settings. See PerfKvmCounter::period() for details. 1233 if (hwInstructions.attached()) 1234 hwInstructions.detach(); 1235 assert(hwCycles.attached()); 1236 hwInstructions.attach(cfgInstructions, 1237 0, // TID (0 => currentThread) 1238 hwCycles); 1239 1240 if (period) 1241 hwInstructions.enableSignals(KVM_KICK_SIGNAL); 1242 1243 activeInstPeriod = period; 1244} 1245