base.cc revision 9651
1/* 2 * Copyright (c) 2012 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions are 16 * met: redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer; 18 * redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution; 21 * neither the name of the copyright holders nor the names of its 22 * contributors may be used to endorse or promote products derived from 23 * this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Authors: Andreas Sandberg 38 */ 39 40#include <linux/kvm.h> 41#include <sys/ioctl.h> 42#include <sys/mman.h> 43#include <unistd.h> 44 45#include <cerrno> 46#include <csignal> 47#include <ostream> 48 49#include "arch/utility.hh" 50#include "cpu/kvm/base.hh" 51#include "debug/Kvm.hh" 52#include "debug/KvmIO.hh" 53#include "debug/KvmRun.hh" 54#include "params/BaseKvmCPU.hh" 55#include "sim/process.hh" 56#include "sim/system.hh" 57 58/* Used by some KVM macros */ 59#define PAGE_SIZE pageSize 60 61volatile bool timerOverflowed = false; 62 63static void 64onTimerOverflow(int signo, siginfo_t *si, void *data) 65{ 66 timerOverflowed = true; 67} 68 69BaseKvmCPU::BaseKvmCPU(BaseKvmCPUParams *params) 70 : BaseCPU(params), 71 vm(*params->kvmVM), 72 _status(Idle), 73 dataPort(name() + ".dcache_port", this), 74 instPort(name() + ".icache_port", this), 75 contextDirty(true), 76 vcpuID(vm.allocVCPUID()), vcpuFD(-1), vcpuMMapSize(0), 77 _kvmRun(NULL), mmioRing(NULL), 78 pageSize(sysconf(_SC_PAGE_SIZE)), 79 tickEvent(*this), 80 hostFactor(params->hostFactor) 81{ 82 if (pageSize == -1) 83 panic("KVM: Failed to determine host page size (%i)\n", 84 errno); 85 86 thread = new SimpleThread(this, 0, params->system, 87 params->itb, params->dtb, params->isa[0]); 88 thread->setStatus(ThreadContext::Halted); 89 tc = thread->getTC(); 90 threadContexts.push_back(tc); 91 92 setupCounters(); 93 setupSignalHandler(); 94 95 runTimer.reset(new PosixKvmTimer(KVM_TIMER_SIGNAL, CLOCK_MONOTONIC, 96 params->hostFactor, 97 params->clock)); 98} 99 100BaseKvmCPU::~BaseKvmCPU() 101{ 102 if (_kvmRun) 103 munmap(_kvmRun, vcpuMMapSize); 104 close(vcpuFD); 105} 106 107void 108BaseKvmCPU::init() 109{ 110 BaseCPU::init(); 111 112 if (numThreads != 1) 113 fatal("KVM: Multithreading not supported"); 114 115 tc->initMemProxies(tc); 116 117 // initialize CPU, including PC 118 if (FullSystem && !switchedOut()) 119 TheISA::initCPU(tc, tc->contextId()); 120 121 mmio_req.setThreadContext(tc->contextId(), 0); 122} 123 124void 125BaseKvmCPU::startup() 126{ 127 Kvm &kvm(vm.kvm); 128 129 BaseCPU::startup(); 130 131 assert(vcpuFD == -1); 132 133 // Tell the VM that a CPU is about to start. 134 vm.cpuStartup(); 135 136 // We can't initialize KVM CPUs in BaseKvmCPU::init() since we are 137 // not guaranteed that the parent KVM VM has initialized at that 138 // point. Initialize virtual CPUs here instead. 139 vcpuFD = vm.createVCPU(vcpuID); 140 141 // Map the KVM run structure */ 142 vcpuMMapSize = kvm.getVCPUMMapSize(); 143 _kvmRun = (struct kvm_run *)mmap(0, vcpuMMapSize, 144 PROT_READ | PROT_WRITE, MAP_SHARED, 145 vcpuFD, 0); 146 if (_kvmRun == MAP_FAILED) 147 panic("KVM: Failed to map run data structure\n"); 148 149 // Setup a pointer to the MMIO ring buffer if coalesced MMIO is 150 // available. The offset into the KVM's communication page is 151 // provided by the coalesced MMIO capability. 152 int mmioOffset(kvm.capCoalescedMMIO()); 153 if (mmioOffset) { 154 inform("KVM: Coalesced IO available\n"); 155 mmioRing = (struct kvm_coalesced_mmio_ring *)( 156 (char *)_kvmRun + (mmioOffset * pageSize)); 157 } else { 158 inform("KVM: Coalesced not supported by host OS\n"); 159 } 160} 161 162void 163BaseKvmCPU::regStats() 164{ 165 using namespace Stats; 166 167 BaseCPU::regStats(); 168 169 numVMExits 170 .name(name() + ".numVMExits") 171 .desc("total number of KVM exits") 172 ; 173 174 numMMIO 175 .name(name() + ".numMMIO") 176 .desc("number of VM exits due to memory mapped IO") 177 ; 178 179 numCoalescedMMIO 180 .name(name() + ".numCoalescedMMIO") 181 .desc("number of coalesced memory mapped IO requests") 182 ; 183 184 numIO 185 .name(name() + ".numIO") 186 .desc("number of VM exits due to legacy IO") 187 ; 188 189 numHalt 190 .name(name() + ".numHalt") 191 .desc("number of VM exits due to wait for interrupt instructions") 192 ; 193 194 numInterrupts 195 .name(name() + ".numInterrupts") 196 .desc("number of interrupts delivered") 197 ; 198 199 numHypercalls 200 .name(name() + ".numHypercalls") 201 .desc("number of hypercalls") 202 ; 203} 204 205void 206BaseKvmCPU::serializeThread(std::ostream &os, ThreadID tid) 207{ 208 assert(tid == 0); 209 assert(_status == Idle); 210 thread->serialize(os); 211} 212 213void 214BaseKvmCPU::unserializeThread(Checkpoint *cp, const std::string §ion, 215 ThreadID tid) 216{ 217 assert(tid == 0); 218 assert(_status == Idle); 219 thread->unserialize(cp, section); 220 contextDirty = true; 221} 222 223unsigned int 224BaseKvmCPU::drain(DrainManager *dm) 225{ 226 if (switchedOut()) 227 return 0; 228 229 DPRINTF(Kvm, "drain\n"); 230 231 // De-schedule the tick event so we don't insert any more MMIOs 232 // into the system while it is draining. 233 if (tickEvent.scheduled()) 234 deschedule(tickEvent); 235 236 _status = Idle; 237 return 0; 238} 239 240void 241BaseKvmCPU::drainResume() 242{ 243 assert(!tickEvent.scheduled()); 244 245 // We might have been switched out. In that case, we don't need to 246 // do anything. 247 if (switchedOut()) 248 return; 249 250 DPRINTF(Kvm, "drainResume\n"); 251 verifyMemoryMode(); 252 253 // The tick event is de-scheduled as a part of the draining 254 // process. Re-schedule it if the thread context is active. 255 if (tc->status() == ThreadContext::Active) { 256 schedule(tickEvent, nextCycle()); 257 _status = Running; 258 } else { 259 _status = Idle; 260 } 261} 262 263void 264BaseKvmCPU::switchOut() 265{ 266 BaseCPU::switchOut(); 267 268 DPRINTF(Kvm, "switchOut\n"); 269 270 // We should have drained prior to executing a switchOut, which 271 // means that the tick event shouldn't be scheduled and the CPU is 272 // idle. 273 assert(!tickEvent.scheduled()); 274 assert(_status == Idle); 275} 276 277void 278BaseKvmCPU::takeOverFrom(BaseCPU *cpu) 279{ 280 DPRINTF(Kvm, "takeOverFrom\n"); 281 282 BaseCPU::takeOverFrom(cpu); 283 284 // We should have drained prior to executing a switchOut, which 285 // means that the tick event shouldn't be scheduled and the CPU is 286 // idle. 287 assert(!tickEvent.scheduled()); 288 assert(_status == Idle); 289 assert(threadContexts.size() == 1); 290 291 // Force a gem5 -> KVM context synchronization 292 contextDirty = true; 293} 294 295void 296BaseKvmCPU::verifyMemoryMode() const 297{ 298 if (!(system->isAtomicMode() && system->bypassCaches())) { 299 fatal("The KVM-based CPUs requires the memory system to be in the " 300 "'atomic_noncaching' mode.\n"); 301 } 302} 303 304void 305BaseKvmCPU::wakeup() 306{ 307 DPRINTF(Kvm, "wakeup()\n"); 308 309 if (thread->status() != ThreadContext::Suspended) 310 return; 311 312 thread->activate(); 313} 314 315void 316BaseKvmCPU::activateContext(ThreadID thread_num, Cycles delay) 317{ 318 DPRINTF(Kvm, "ActivateContext %d (%d cycles)\n", thread_num, delay); 319 320 assert(thread_num == 0); 321 assert(thread); 322 323 assert(_status == Idle); 324 assert(!tickEvent.scheduled()); 325 326 numCycles += ticksToCycles(thread->lastActivate - thread->lastSuspend) 327 * hostFactor; 328 329 schedule(tickEvent, clockEdge(delay)); 330 _status = Running; 331} 332 333 334void 335BaseKvmCPU::suspendContext(ThreadID thread_num) 336{ 337 DPRINTF(Kvm, "SuspendContext %d\n", thread_num); 338 339 assert(thread_num == 0); 340 assert(thread); 341 342 if (_status == Idle) 343 return; 344 345 assert(_status == Running); 346 347 // The tick event may no be scheduled if the quest has requested 348 // the monitor to wait for interrupts. The normal CPU models can 349 // get their tick events descheduled by quiesce instructions, but 350 // that can't happen here. 351 if (tickEvent.scheduled()) 352 deschedule(tickEvent); 353 354 _status = Idle; 355} 356 357void 358BaseKvmCPU::deallocateContext(ThreadID thread_num) 359{ 360 // for now, these are equivalent 361 suspendContext(thread_num); 362} 363 364void 365BaseKvmCPU::haltContext(ThreadID thread_num) 366{ 367 // for now, these are equivalent 368 suspendContext(thread_num); 369} 370 371Counter 372BaseKvmCPU::totalInsts() const 373{ 374 return hwInstructions.read(); 375} 376 377Counter 378BaseKvmCPU::totalOps() const 379{ 380 hack_once("Pretending totalOps is equivalent to totalInsts()\n"); 381 return hwInstructions.read(); 382} 383 384void 385BaseKvmCPU::dump() 386{ 387 inform("State dumping not implemented."); 388} 389 390void 391BaseKvmCPU::tick() 392{ 393 assert(_status == Running); 394 395 DPRINTF(KvmRun, "Entering KVM...\n"); 396 397 if (contextDirty) { 398 contextDirty = false; 399 updateKvmState(); 400 } 401 402 Tick ticksToExecute(mainEventQueue.nextTick() - curTick()); 403 Tick ticksExecuted(kvmRun(ticksToExecute)); 404 updateThreadContext(); 405 406 Tick delay(ticksExecuted + handleKvmExit()); 407 408 switch (_status) { 409 case Running: 410 schedule(tickEvent, clockEdge(ticksToCycles(delay))); 411 break; 412 413 default: 414 /* The CPU is halted or waiting for an interrupt from a 415 * device. Don't start it. */ 416 break; 417 } 418} 419 420Tick 421BaseKvmCPU::kvmRun(Tick ticks) 422{ 423 uint64_t baseCycles(hwCycles.read()); 424 uint64_t baseInstrs(hwInstructions.read()); 425 426 if (ticks < runTimer->resolution()) { 427 DPRINTF(KvmRun, "KVM: Adjusting tick count (%i -> %i)\n", 428 ticks, runTimer->resolution()); 429 ticks = runTimer->resolution(); 430 } 431 432 DPRINTF(KvmRun, "KVM: Executing for %i ticks\n", ticks); 433 timerOverflowed = false; 434 runTimer->arm(ticks); 435 startCounters(); 436 if (ioctl(KVM_RUN) == -1) { 437 if (errno != EINTR) 438 panic("KVM: Failed to start virtual CPU (errno: %i)\n", 439 errno); 440 } 441 stopCounters(); 442 runTimer->disarm(); 443 444 uint64_t cyclesExecuted(hwCycles.read() - baseCycles); 445 Tick ticksExecuted(runTimer->ticksFromHostCycles(cyclesExecuted)); 446 447 if (ticksExecuted < ticks && 448 timerOverflowed && 449 _kvmRun->exit_reason == KVM_EXIT_INTR) { 450 // TODO: We should probably do something clever here... 451 warn("KVM: Early timer event, requested %i ticks but got %i ticks.\n", 452 ticks, ticksExecuted); 453 } 454 455 numCycles += cyclesExecuted * hostFactor; 456 ++numVMExits; 457 458 DPRINTF(KvmRun, "KVM: Executed %i instructions in %i cycles (%i ticks, sim cycles: %i).\n", 459 hwInstructions.read() - baseInstrs, 460 cyclesExecuted, 461 ticksExecuted, 462 cyclesExecuted * hostFactor); 463 464 return ticksExecuted + flushCoalescedMMIO(); 465} 466 467void 468BaseKvmCPU::kvmNonMaskableInterrupt() 469{ 470 ++numInterrupts; 471 if (ioctl(KVM_NMI) == -1) 472 panic("KVM: Failed to deliver NMI to virtual CPU\n"); 473} 474 475void 476BaseKvmCPU::kvmInterrupt(const struct kvm_interrupt &interrupt) 477{ 478 ++numInterrupts; 479 if (ioctl(KVM_INTERRUPT, (void *)&interrupt) == -1) 480 panic("KVM: Failed to deliver interrupt to virtual CPU\n"); 481} 482 483void 484BaseKvmCPU::getRegisters(struct kvm_regs ®s) const 485{ 486 if (ioctl(KVM_GET_REGS, ®s) == -1) 487 panic("KVM: Failed to get guest registers\n"); 488} 489 490void 491BaseKvmCPU::setRegisters(const struct kvm_regs ®s) 492{ 493 if (ioctl(KVM_SET_REGS, (void *)®s) == -1) 494 panic("KVM: Failed to set guest registers\n"); 495} 496 497void 498BaseKvmCPU::getSpecialRegisters(struct kvm_sregs ®s) const 499{ 500 if (ioctl(KVM_GET_SREGS, ®s) == -1) 501 panic("KVM: Failed to get guest special registers\n"); 502} 503 504void 505BaseKvmCPU::setSpecialRegisters(const struct kvm_sregs ®s) 506{ 507 if (ioctl(KVM_SET_SREGS, (void *)®s) == -1) 508 panic("KVM: Failed to set guest special registers\n"); 509} 510 511void 512BaseKvmCPU::getFPUState(struct kvm_fpu &state) const 513{ 514 if (ioctl(KVM_GET_FPU, &state) == -1) 515 panic("KVM: Failed to get guest FPU state\n"); 516} 517 518void 519BaseKvmCPU::setFPUState(const struct kvm_fpu &state) 520{ 521 if (ioctl(KVM_SET_FPU, (void *)&state) == -1) 522 panic("KVM: Failed to set guest FPU state\n"); 523} 524 525 526void 527BaseKvmCPU::setOneReg(uint64_t id, const void *addr) 528{ 529#ifdef KVM_SET_ONE_REG 530 struct kvm_one_reg reg; 531 reg.id = id; 532 reg.addr = (uint64_t)addr; 533 534 if (ioctl(KVM_SET_ONE_REG, ®) == -1) { 535 panic("KVM: Failed to set register (0x%x) value (errno: %i)\n", 536 id, errno); 537 } 538#else 539 panic("KVM_SET_ONE_REG is unsupported on this platform.\n"); 540#endif 541} 542 543void 544BaseKvmCPU::getOneReg(uint64_t id, void *addr) const 545{ 546#ifdef KVM_GET_ONE_REG 547 struct kvm_one_reg reg; 548 reg.id = id; 549 reg.addr = (uint64_t)addr; 550 551 if (ioctl(KVM_GET_ONE_REG, ®) == -1) { 552 panic("KVM: Failed to get register (0x%x) value (errno: %i)\n", 553 id, errno); 554 } 555#else 556 panic("KVM_GET_ONE_REG is unsupported on this platform.\n"); 557#endif 558} 559 560std::string 561BaseKvmCPU::getAndFormatOneReg(uint64_t id) const 562{ 563#ifdef KVM_GET_ONE_REG 564 std::ostringstream ss; 565 566 ss.setf(std::ios::hex, std::ios::basefield); 567 ss.setf(std::ios::showbase); 568#define HANDLE_INTTYPE(len) \ 569 case KVM_REG_SIZE_U ## len: { \ 570 uint ## len ## _t value; \ 571 getOneReg(id, &value); \ 572 ss << value; \ 573 } break 574 575#define HANDLE_ARRAY(len) \ 576 case KVM_REG_SIZE_U ## len: { \ 577 uint8_t value[len / 8]; \ 578 getOneReg(id, value); \ 579 ss << "[" << value[0]; \ 580 for (int i = 1; i < len / 8; ++i) \ 581 ss << ", " << value[i]; \ 582 ss << "]"; \ 583 } break 584 585 switch (id & KVM_REG_SIZE_MASK) { 586 HANDLE_INTTYPE(8); 587 HANDLE_INTTYPE(16); 588 HANDLE_INTTYPE(32); 589 HANDLE_INTTYPE(64); 590 HANDLE_ARRAY(128); 591 HANDLE_ARRAY(256); 592 HANDLE_ARRAY(512); 593 HANDLE_ARRAY(1024); 594 default: 595 ss << "??"; 596 } 597 598#undef HANDLE_INTTYPE 599#undef HANDLE_ARRAY 600 601 return ss.str(); 602#else 603 panic("KVM_GET_ONE_REG is unsupported on this platform.\n"); 604#endif 605} 606 607Tick 608BaseKvmCPU::handleKvmExit() 609{ 610 DPRINTF(KvmRun, "handleKvmExit (exit_reason: %i)\n", _kvmRun->exit_reason); 611 612 switch (_kvmRun->exit_reason) { 613 case KVM_EXIT_UNKNOWN: 614 return handleKvmExitUnknown(); 615 616 case KVM_EXIT_EXCEPTION: 617 return handleKvmExitException(); 618 619 case KVM_EXIT_IO: 620 ++numIO; 621 return handleKvmExitIO(); 622 623 case KVM_EXIT_HYPERCALL: 624 ++numHypercalls; 625 return handleKvmExitHypercall(); 626 627 case KVM_EXIT_HLT: 628 /* The guest has halted and is waiting for interrupts */ 629 DPRINTF(Kvm, "handleKvmExitHalt\n"); 630 ++numHalt; 631 632 // Suspend the thread until the next interrupt arrives 633 thread->suspend(); 634 635 // This is actually ignored since the thread is suspended. 636 return 0; 637 638 case KVM_EXIT_MMIO: 639 /* Service memory mapped IO requests */ 640 DPRINTF(KvmIO, "KVM: Handling MMIO (w: %u, addr: 0x%x, len: %u)\n", 641 _kvmRun->mmio.is_write, 642 _kvmRun->mmio.phys_addr, _kvmRun->mmio.len); 643 644 ++numMMIO; 645 return doMMIOAccess(_kvmRun->mmio.phys_addr, _kvmRun->mmio.data, 646 _kvmRun->mmio.len, _kvmRun->mmio.is_write); 647 648 case KVM_EXIT_IRQ_WINDOW_OPEN: 649 return handleKvmExitIRQWindowOpen(); 650 651 case KVM_EXIT_FAIL_ENTRY: 652 return handleKvmExitFailEntry(); 653 654 case KVM_EXIT_INTR: 655 /* KVM was interrupted by a signal, restart it in the next 656 * tick. */ 657 return 0; 658 659 case KVM_EXIT_INTERNAL_ERROR: 660 panic("KVM: Internal error (suberror: %u)\n", 661 _kvmRun->internal.suberror); 662 663 default: 664 panic("KVM: Unexpected exit (exit_reason: %u)\n", _kvmRun->exit_reason); 665 } 666} 667 668Tick 669BaseKvmCPU::handleKvmExitIO() 670{ 671 panic("KVM: Unhandled guest IO (dir: %i, size: %i, port: 0x%x, count: %i)\n", 672 _kvmRun->io.direction, _kvmRun->io.size, 673 _kvmRun->io.port, _kvmRun->io.count); 674} 675 676Tick 677BaseKvmCPU::handleKvmExitHypercall() 678{ 679 panic("KVM: Unhandled hypercall\n"); 680} 681 682Tick 683BaseKvmCPU::handleKvmExitIRQWindowOpen() 684{ 685 warn("KVM: Unhandled IRQ window.\n"); 686 return 0; 687} 688 689 690Tick 691BaseKvmCPU::handleKvmExitUnknown() 692{ 693 panic("KVM: Unknown error when starting vCPU (hw reason: 0x%llx)\n", 694 _kvmRun->hw.hardware_exit_reason); 695} 696 697Tick 698BaseKvmCPU::handleKvmExitException() 699{ 700 panic("KVM: Got exception when starting vCPU " 701 "(exception: %u, error_code: %u)\n", 702 _kvmRun->ex.exception, _kvmRun->ex.error_code); 703} 704 705Tick 706BaseKvmCPU::handleKvmExitFailEntry() 707{ 708 panic("KVM: Failed to enter virtualized mode (hw reason: 0x%llx)\n", 709 _kvmRun->fail_entry.hardware_entry_failure_reason); 710} 711 712Tick 713BaseKvmCPU::doMMIOAccess(Addr paddr, void *data, int size, bool write) 714{ 715 mmio_req.setPhys(paddr, size, 716 0, /* flags */ 717 dataMasterId()); 718 719 const MemCmd cmd(write ? MemCmd::WriteReq : MemCmd::ReadReq); 720 Packet pkt(&mmio_req, cmd); 721 pkt.dataStatic(data); 722 return dataPort.sendAtomic(&pkt); 723} 724 725int 726BaseKvmCPU::ioctl(int request, long p1) const 727{ 728 if (vcpuFD == -1) 729 panic("KVM: CPU ioctl called before initialization\n"); 730 731 return ::ioctl(vcpuFD, request, p1); 732} 733 734Tick 735BaseKvmCPU::flushCoalescedMMIO() 736{ 737 if (!mmioRing) 738 return 0; 739 740 DPRINTF(KvmIO, "KVM: Flushing the coalesced MMIO ring buffer\n"); 741 742 // TODO: We might need to do synchronization when we start to 743 // support multiple CPUs 744 Tick ticks(0); 745 while (mmioRing->first != mmioRing->last) { 746 struct kvm_coalesced_mmio &ent( 747 mmioRing->coalesced_mmio[mmioRing->first]); 748 749 DPRINTF(KvmIO, "KVM: Handling coalesced MMIO (addr: 0x%x, len: %u)\n", 750 ent.phys_addr, ent.len); 751 752 ++numCoalescedMMIO; 753 ticks += doMMIOAccess(ent.phys_addr, ent.data, ent.len, true); 754 755 mmioRing->first = (mmioRing->first + 1) % KVM_COALESCED_MMIO_MAX; 756 } 757 758 return ticks; 759} 760 761void 762BaseKvmCPU::setupSignalHandler() 763{ 764 struct sigaction sa; 765 766 memset(&sa, 0, sizeof(sa)); 767 sa.sa_sigaction = onTimerOverflow; 768 sa.sa_flags = SA_SIGINFO | SA_RESTART; 769 if (sigaction(KVM_TIMER_SIGNAL, &sa, NULL) == -1) 770 panic("KVM: Failed to setup vCPU signal handler\n"); 771} 772 773void 774BaseKvmCPU::setupCounters() 775{ 776 DPRINTF(Kvm, "Attaching cycle counter...\n"); 777 PerfKvmCounterConfig cfgCycles(PERF_TYPE_HARDWARE, 778 PERF_COUNT_HW_CPU_CYCLES); 779 cfgCycles.disabled(true) 780 .pinned(true); 781 hwCycles.attach(cfgCycles, 782 0); // TID (0 => currentThread) 783 784 DPRINTF(Kvm, "Attaching instruction counter...\n"); 785 PerfKvmCounterConfig cfgInstructions(PERF_TYPE_HARDWARE, 786 PERF_COUNT_HW_INSTRUCTIONS); 787 hwInstructions.attach(cfgInstructions, 788 0, // TID (0 => currentThread) 789 hwCycles); 790} 791 792void 793BaseKvmCPU::startCounters() 794{ 795 // We only need to start/stop the hwCycles counter since hwCycles 796 // and hwInstructions are a counter group with hwCycles as the 797 // group leader. 798 hwCycles.start(); 799} 800 801void 802BaseKvmCPU::stopCounters() 803{ 804 hwCycles.stop(); 805} 806