x86_cpu.cc revision 13611:c8b7847b4171
1/* 2 * Copyright (c) 2013 Andreas Sandberg 3 * All rights reserved 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Andreas Sandberg 29 */ 30 31#include "cpu/kvm/x86_cpu.hh" 32 33#include <linux/kvm.h> 34 35#include <algorithm> 36#include <cerrno> 37#include <memory> 38 39#include "arch/registers.hh" 40#include "arch/x86/cpuid.hh" 41#include "arch/x86/regs/msr.hh" 42#include "arch/x86/utility.hh" 43#include "cpu/kvm/base.hh" 44#include "debug/Drain.hh" 45#include "debug/Kvm.hh" 46#include "debug/KvmContext.hh" 47#include "debug/KvmIO.hh" 48#include "debug/KvmInt.hh" 49 50using namespace X86ISA; 51 52#define MSR_TSC 0x10 53 54#define IO_PCI_CONF_ADDR 0xCF8 55#define IO_PCI_CONF_DATA_BASE 0xCFC 56 57// Task segment type of an inactive 32-bit or 64-bit task 58#define SEG_SYS_TYPE_TSS_AVAILABLE 9 59// Task segment type of an active 32-bit or 64-bit task 60#define SEG_SYS_TYPE_TSS_BUSY 11 61 62// Non-conforming accessed code segment 63#define SEG_CS_TYPE_ACCESSED 9 64// Non-conforming accessed code segment that can be read 65#define SEG_CS_TYPE_READ_ACCESSED 11 66 67// The lowest bit of the type field for normal segments (code and 68// data) is used to indicate that a segment has been accessed. 69#define SEG_TYPE_BIT_ACCESSED 1 70 71struct FXSave 72{ 73 uint16_t fcw; 74 uint16_t fsw; 75 uint8_t ftwx; 76 uint8_t pad0; 77 uint16_t last_opcode; 78 union { 79 struct { 80 uint32_t fpu_ip; 81 uint16_t fpu_cs; 82 uint16_t pad1; 83 uint32_t fpu_dp; 84 uint16_t fpu_ds; 85 uint16_t pad2; 86 } ctrl32; 87 88 struct { 89 uint64_t fpu_ip; 90 uint64_t fpu_dp; 91 } ctrl64; 92 }; 93 uint32_t mxcsr; 94 uint32_t mxcsr_mask; 95 96 uint8_t fpr[8][16]; 97 uint8_t xmm[16][16]; 98 99 uint64_t reserved[12]; 100} M5_ATTR_PACKED; 101 102static_assert(sizeof(FXSave) == 512, "Unexpected size of FXSave"); 103 104#define FOREACH_IREG() \ 105 do { \ 106 APPLY_IREG(rax, INTREG_RAX); \ 107 APPLY_IREG(rbx, INTREG_RBX); \ 108 APPLY_IREG(rcx, INTREG_RCX); \ 109 APPLY_IREG(rdx, INTREG_RDX); \ 110 APPLY_IREG(rsi, INTREG_RSI); \ 111 APPLY_IREG(rdi, INTREG_RDI); \ 112 APPLY_IREG(rsp, INTREG_RSP); \ 113 APPLY_IREG(rbp, INTREG_RBP); \ 114 APPLY_IREG(r8, INTREG_R8); \ 115 APPLY_IREG(r9, INTREG_R9); \ 116 APPLY_IREG(r10, INTREG_R10); \ 117 APPLY_IREG(r11, INTREG_R11); \ 118 APPLY_IREG(r12, INTREG_R12); \ 119 APPLY_IREG(r13, INTREG_R13); \ 120 APPLY_IREG(r14, INTREG_R14); \ 121 APPLY_IREG(r15, INTREG_R15); \ 122 } while (0) 123 124#define FOREACH_SREG() \ 125 do { \ 126 APPLY_SREG(cr0, MISCREG_CR0); \ 127 APPLY_SREG(cr2, MISCREG_CR2); \ 128 APPLY_SREG(cr3, MISCREG_CR3); \ 129 APPLY_SREG(cr4, MISCREG_CR4); \ 130 APPLY_SREG(cr8, MISCREG_CR8); \ 131 APPLY_SREG(efer, MISCREG_EFER); \ 132 APPLY_SREG(apic_base, MISCREG_APIC_BASE); \ 133 } while (0) 134 135#define FOREACH_DREG() \ 136 do { \ 137 APPLY_DREG(db[0], MISCREG_DR0); \ 138 APPLY_DREG(db[1], MISCREG_DR1); \ 139 APPLY_DREG(db[2], MISCREG_DR2); \ 140 APPLY_DREG(db[3], MISCREG_DR3); \ 141 APPLY_DREG(dr6, MISCREG_DR6); \ 142 APPLY_DREG(dr7, MISCREG_DR7); \ 143 } while (0) 144 145#define FOREACH_SEGMENT() \ 146 do { \ 147 APPLY_SEGMENT(cs, MISCREG_CS - MISCREG_SEG_SEL_BASE); \ 148 APPLY_SEGMENT(ds, MISCREG_DS - MISCREG_SEG_SEL_BASE); \ 149 APPLY_SEGMENT(es, MISCREG_ES - MISCREG_SEG_SEL_BASE); \ 150 APPLY_SEGMENT(fs, MISCREG_FS - MISCREG_SEG_SEL_BASE); \ 151 APPLY_SEGMENT(gs, MISCREG_GS - MISCREG_SEG_SEL_BASE); \ 152 APPLY_SEGMENT(ss, MISCREG_SS - MISCREG_SEG_SEL_BASE); \ 153 APPLY_SEGMENT(tr, MISCREG_TR - MISCREG_SEG_SEL_BASE); \ 154 APPLY_SEGMENT(ldt, MISCREG_TSL - MISCREG_SEG_SEL_BASE); \ 155 } while (0) 156 157#define FOREACH_DTABLE() \ 158 do { \ 159 APPLY_DTABLE(gdt, MISCREG_TSG - MISCREG_SEG_SEL_BASE); \ 160 APPLY_DTABLE(idt, MISCREG_IDTR - MISCREG_SEG_SEL_BASE); \ 161 } while (0) 162 163template<typename STRUCT, typename ENTRY> 164static STRUCT *newVarStruct(size_t entries) 165{ 166 return (STRUCT *)operator new(sizeof(STRUCT) + entries * sizeof(ENTRY)); 167} 168 169static void 170dumpKvm(const struct kvm_regs ®s) 171{ 172 inform("KVM register state:\n"); 173 174#define APPLY_IREG(kreg, mreg) \ 175 inform("\t" # kreg ": 0x%llx\n", regs.kreg) 176 177 FOREACH_IREG(); 178 179#undef APPLY_IREG 180 181 inform("\trip: 0x%llx\n", regs.rip); 182 inform("\trflags: 0x%llx\n", regs.rflags); 183} 184 185static void 186dumpKvm(const char *reg_name, const struct kvm_segment &seg) 187{ 188 inform("\t%s: @0x%llx+%x [sel: 0x%x, type: 0x%x]\n" 189 "\t\tpres.: %u, dpl: %u, db: %u, s: %u, l: %u, g: %u, avl: %u, unus.: %u\n", 190 reg_name, 191 seg.base, seg.limit, seg.selector, seg.type, 192 seg.present, seg.dpl, seg.db, seg.s, seg.l, seg.g, seg.avl, seg.unusable); 193} 194 195static void 196dumpKvm(const char *reg_name, const struct kvm_dtable &dtable) 197{ 198 inform("\t%s: @0x%llx+%x\n", 199 reg_name, dtable.base, dtable.limit); 200} 201 202static void 203dumpKvm(const struct kvm_sregs &sregs) 204{ 205#define APPLY_SREG(kreg, mreg) \ 206 inform("\t" # kreg ": 0x%llx\n", sregs.kreg); 207#define APPLY_SEGMENT(kreg, idx) \ 208 dumpKvm(# kreg, sregs.kreg); 209#define APPLY_DTABLE(kreg, idx) \ 210 dumpKvm(# kreg, sregs.kreg); 211 212 inform("Special registers:\n"); 213 FOREACH_SEGMENT(); 214 FOREACH_SREG(); 215 FOREACH_DTABLE(); 216 217 inform("Interrupt Bitmap:"); 218 for (int i = 0; i < KVM_NR_INTERRUPTS; i += 64) 219 inform(" 0x%.8x", sregs.interrupt_bitmap[i / 64]); 220 221#undef APPLY_SREG 222#undef APPLY_SEGMENT 223#undef APPLY_DTABLE 224} 225 226#ifdef KVM_GET_DEBUGREGS 227static void 228dumpKvm(const struct kvm_debugregs ®s) 229{ 230 inform("KVM debug state:\n"); 231 232#define APPLY_DREG(kreg, mreg) \ 233 inform("\t" # kreg ": 0x%llx\n", regs.kreg) 234 235 FOREACH_DREG(); 236 237#undef APPLY_DREG 238 239 inform("\tflags: 0x%llx\n", regs.flags); 240} 241#endif 242 243static void 244dumpFpuSpec(const struct FXSave &xs) 245{ 246 inform("\tlast_ip: 0x%x\n", xs.ctrl64.fpu_ip); 247 inform("\tlast_dp: 0x%x\n", xs.ctrl64.fpu_dp); 248 inform("\tmxcsr_mask: 0x%x\n", xs.mxcsr_mask); 249} 250 251static void 252dumpFpuSpec(const struct kvm_fpu &fpu) 253{ 254 inform("\tlast_ip: 0x%x\n", fpu.last_ip); 255 inform("\tlast_dp: 0x%x\n", fpu.last_dp); 256} 257 258template<typename T> 259static void 260dumpFpuCommon(const T &fpu) 261{ 262 const unsigned top((fpu.fsw >> 11) & 0x7); 263 inform("\tfcw: 0x%x\n", fpu.fcw); 264 265 inform("\tfsw: 0x%x (top: %i, " 266 "conditions: %s%s%s%s, exceptions: %s%s%s%s%s%s %s%s%s)\n", 267 fpu.fsw, top, 268 269 (fpu.fsw & CC0Bit) ? "C0" : "", 270 (fpu.fsw & CC1Bit) ? "C1" : "", 271 (fpu.fsw & CC2Bit) ? "C2" : "", 272 (fpu.fsw & CC3Bit) ? "C3" : "", 273 274 (fpu.fsw & IEBit) ? "I" : "", 275 (fpu.fsw & DEBit) ? "D" : "", 276 (fpu.fsw & ZEBit) ? "Z" : "", 277 (fpu.fsw & OEBit) ? "O" : "", 278 (fpu.fsw & UEBit) ? "U" : "", 279 (fpu.fsw & PEBit) ? "P" : "", 280 281 (fpu.fsw & StackFaultBit) ? "SF " : "", 282 (fpu.fsw & ErrSummaryBit) ? "ES " : "", 283 (fpu.fsw & BusyBit) ? "BUSY " : "" 284 ); 285 inform("\tftwx: 0x%x\n", fpu.ftwx); 286 inform("\tlast_opcode: 0x%x\n", fpu.last_opcode); 287 dumpFpuSpec(fpu); 288 inform("\tmxcsr: 0x%x\n", fpu.mxcsr); 289 inform("\tFP Stack:\n"); 290 for (int i = 0; i < 8; ++i) { 291 const unsigned reg_idx((i + top) & 0x7); 292 const bool empty(!((fpu.ftwx >> reg_idx) & 0x1)); 293 const double value(X86ISA::loadFloat80(fpu.fpr[i])); 294 char hex[33]; 295 for (int j = 0; j < 10; ++j) 296 snprintf(&hex[j*2], 3, "%.2x", fpu.fpr[i][j]); 297 inform("\t\tST%i/%i: 0x%s (%f)%s\n", i, reg_idx, 298 hex, value, empty ? " (e)" : ""); 299 } 300 inform("\tXMM registers:\n"); 301 for (int i = 0; i < 16; ++i) { 302 char hex[33]; 303 for (int j = 0; j < 16; ++j) 304 snprintf(&hex[j*2], 3, "%.2x", fpu.xmm[i][j]); 305 inform("\t\t%i: 0x%s\n", i, hex); 306 } 307} 308 309static void 310dumpKvm(const struct kvm_fpu &fpu) 311{ 312 inform("FPU registers:\n"); 313 dumpFpuCommon(fpu); 314} 315 316static void 317dumpKvm(const struct kvm_xsave &xsave) 318{ 319 inform("FPU registers (XSave):\n"); 320 dumpFpuCommon(*(FXSave *)xsave.region); 321} 322 323static void 324dumpKvm(const struct kvm_msrs &msrs) 325{ 326 inform("MSRs:\n"); 327 328 for (int i = 0; i < msrs.nmsrs; ++i) { 329 const struct kvm_msr_entry &e(msrs.entries[i]); 330 331 inform("\t0x%x: 0x%x\n", e.index, e.data); 332 } 333} 334 335static void 336dumpKvm(const struct kvm_xcrs ®s) 337{ 338 inform("KVM XCR registers:\n"); 339 340 inform("\tFlags: 0x%x\n", regs.flags); 341 for (int i = 0; i < regs.nr_xcrs; ++i) { 342 inform("\tXCR[0x%x]: 0x%x\n", 343 regs.xcrs[i].xcr, 344 regs.xcrs[i].value); 345 } 346} 347 348static void 349dumpKvm(const struct kvm_vcpu_events &events) 350{ 351 inform("vCPU events:\n"); 352 353 inform("\tException: [inj: %i, nr: %i, has_ec: %i, ec: %i]\n", 354 events.exception.injected, events.exception.nr, 355 events.exception.has_error_code, events.exception.error_code); 356 357 inform("\tInterrupt: [inj: %i, nr: %i, soft: %i]\n", 358 events.interrupt.injected, events.interrupt.nr, 359 events.interrupt.soft); 360 361 inform("\tNMI: [inj: %i, pending: %i, masked: %i]\n", 362 events.nmi.injected, events.nmi.pending, 363 events.nmi.masked); 364 365 inform("\tSIPI vector: 0x%x\n", events.sipi_vector); 366 inform("\tFlags: 0x%x\n", events.flags); 367} 368 369static bool 370isCanonicalAddress(uint64_t addr) 371{ 372 // x86-64 doesn't currently use the full 64-bit virtual address 373 // space, instead it uses signed 48 bit addresses that are 374 // sign-extended to 64 bits. Such addresses are known as 375 // "canonical". 376 uint64_t upper_half(addr & 0xffff800000000000ULL); 377 return upper_half == 0 || upper_half == 0xffff800000000000; 378} 379 380static void 381checkSeg(const char *name, const int idx, const struct kvm_segment &seg, 382 struct kvm_sregs sregs) 383{ 384 // Check the register base 385 switch (idx) { 386 case MISCREG_TSL: 387 case MISCREG_TR: 388 case MISCREG_FS: 389 case MISCREG_GS: 390 if (!isCanonicalAddress(seg.base)) 391 warn("Illegal %s base: 0x%x\n", name, seg.base); 392 break; 393 394 case MISCREG_SS: 395 case MISCREG_DS: 396 case MISCREG_ES: 397 if (seg.unusable) 398 break; 399 M5_FALLTHROUGH; 400 case MISCREG_CS: 401 if (seg.base & 0xffffffff00000000ULL) 402 warn("Illegal %s base: 0x%x\n", name, seg.base); 403 break; 404 } 405 406 // Check the type 407 switch (idx) { 408 case MISCREG_CS: 409 switch (seg.type) { 410 case 3: 411 if (seg.dpl != 0) 412 warn("CS type is 3 but dpl != 0.\n"); 413 break; 414 case 9: 415 case 11: 416 if (seg.dpl != sregs.ss.dpl) 417 warn("CS type is %i but CS DPL != SS DPL\n", seg.type); 418 break; 419 case 13: 420 case 15: 421 if (seg.dpl > sregs.ss.dpl) 422 warn("CS type is %i but CS DPL > SS DPL\n", seg.type); 423 break; 424 default: 425 warn("Illegal CS type: %i\n", seg.type); 426 break; 427 } 428 break; 429 430 case MISCREG_SS: 431 if (seg.unusable) 432 break; 433 switch (seg.type) { 434 case 3: 435 if (sregs.cs.type == 3 && seg.dpl != 0) 436 warn("CS type is 3, but SS DPL is != 0.\n"); 437 M5_FALLTHROUGH; 438 case 7: 439 if (!(sregs.cr0 & 1) && seg.dpl != 0) 440 warn("SS DPL is %i, but CR0 PE is 0\n", seg.dpl); 441 break; 442 default: 443 warn("Illegal SS type: %i\n", seg.type); 444 break; 445 } 446 break; 447 448 case MISCREG_DS: 449 case MISCREG_ES: 450 case MISCREG_FS: 451 case MISCREG_GS: 452 if (seg.unusable) 453 break; 454 if (!(seg.type & 0x1) || 455 ((seg.type & 0x8) && !(seg.type & 0x2))) 456 warn("%s has an illegal type field: %i\n", name, seg.type); 457 break; 458 459 case MISCREG_TR: 460 // TODO: We should check the CPU mode 461 if (seg.type != 3 && seg.type != 11) 462 warn("%s: Illegal segment type (%i)\n", name, seg.type); 463 break; 464 465 case MISCREG_TSL: 466 if (seg.unusable) 467 break; 468 if (seg.type != 2) 469 warn("%s: Illegal segment type (%i)\n", name, seg.type); 470 break; 471 } 472 473 switch (idx) { 474 case MISCREG_SS: 475 case MISCREG_DS: 476 case MISCREG_ES: 477 case MISCREG_FS: 478 case MISCREG_GS: 479 if (seg.unusable) 480 break; 481 M5_FALLTHROUGH; 482 case MISCREG_CS: 483 if (!seg.s) 484 warn("%s: S flag not set\n", name); 485 break; 486 487 case MISCREG_TSL: 488 if (seg.unusable) 489 break; 490 M5_FALLTHROUGH; 491 case MISCREG_TR: 492 if (seg.s) 493 warn("%s: S flag is set\n", name); 494 break; 495 } 496 497 switch (idx) { 498 case MISCREG_SS: 499 case MISCREG_DS: 500 case MISCREG_ES: 501 case MISCREG_FS: 502 case MISCREG_GS: 503 case MISCREG_TSL: 504 if (seg.unusable) 505 break; 506 M5_FALLTHROUGH; 507 case MISCREG_TR: 508 case MISCREG_CS: 509 if (!seg.present) 510 warn("%s: P flag not set\n", name); 511 512 if (((seg.limit & 0xFFF) == 0 && seg.g) || 513 ((seg.limit & 0xFFF00000) != 0 && !seg.g)) { 514 warn("%s limit (0x%x) and g (%i) combination is illegal.\n", 515 name, seg.limit, seg.g); 516 } 517 break; 518 } 519 520 // TODO: Check CS DB 521} 522 523X86KvmCPU::X86KvmCPU(X86KvmCPUParams *params) 524 : BaseKvmCPU(params), 525 useXSave(params->useXSave) 526{ 527 Kvm &kvm(*vm.kvm); 528 529 if (!kvm.capSetTSSAddress()) 530 panic("KVM: Missing capability (KVM_CAP_SET_TSS_ADDR)\n"); 531 if (!kvm.capExtendedCPUID()) 532 panic("KVM: Missing capability (KVM_CAP_EXT_CPUID)\n"); 533 if (!kvm.capUserNMI()) 534 warn("KVM: Missing capability (KVM_CAP_USER_NMI)\n"); 535 if (!kvm.capVCPUEvents()) 536 warn("KVM: Missing capability (KVM_CAP_VCPU_EVENTS)\n"); 537 538 haveDebugRegs = kvm.capDebugRegs(); 539 haveXSave = kvm.capXSave(); 540 haveXCRs = kvm.capXCRs(); 541 542 if (useXSave && !haveXSave) { 543 warn("KVM: XSAVE not supported by host. MXCSR synchronization might be " 544 "unreliable due to kernel bugs.\n"); 545 useXSave = false; 546 } else if (!useXSave) { 547 warn("KVM: XSave FPU/SIMD synchronization disabled by user.\n"); 548 } 549} 550 551X86KvmCPU::~X86KvmCPU() 552{ 553} 554 555void 556X86KvmCPU::startup() 557{ 558 BaseKvmCPU::startup(); 559 560 updateCPUID(); 561 562 // TODO: Do we need to create an identity mapped TSS area? We 563 // should call kvm.vm.setTSSAddress() here in that case. It should 564 // only be needed for old versions of the virtualization 565 // extensions. We should make sure that the identity range is 566 // reserved in the e820 memory map in that case. 567} 568 569void 570X86KvmCPU::dump() const 571{ 572 dumpIntRegs(); 573 if (useXSave) 574 dumpXSave(); 575 else 576 dumpFpuRegs(); 577 dumpSpecRegs(); 578 dumpDebugRegs(); 579 dumpXCRs(); 580 dumpVCpuEvents(); 581 dumpMSRs(); 582} 583 584void 585X86KvmCPU::dumpFpuRegs() const 586{ 587 struct kvm_fpu fpu; 588 getFPUState(fpu); 589 dumpKvm(fpu); 590} 591 592void 593X86KvmCPU::dumpIntRegs() const 594{ 595 struct kvm_regs regs; 596 getRegisters(regs); 597 dumpKvm(regs); 598} 599 600void 601X86KvmCPU::dumpSpecRegs() const 602{ 603 struct kvm_sregs sregs; 604 getSpecialRegisters(sregs); 605 dumpKvm(sregs); 606} 607 608void 609X86KvmCPU::dumpDebugRegs() const 610{ 611 if (haveDebugRegs) { 612#ifdef KVM_GET_DEBUGREGS 613 struct kvm_debugregs dregs; 614 getDebugRegisters(dregs); 615 dumpKvm(dregs); 616#endif 617 } else { 618 inform("Debug registers not supported by kernel.\n"); 619 } 620} 621 622void 623X86KvmCPU::dumpXCRs() const 624{ 625 if (haveXCRs) { 626 struct kvm_xcrs xcrs; 627 getXCRs(xcrs); 628 dumpKvm(xcrs); 629 } else { 630 inform("XCRs not supported by kernel.\n"); 631 } 632} 633 634void 635X86KvmCPU::dumpXSave() const 636{ 637 if (haveXSave) { 638 struct kvm_xsave xsave; 639 getXSave(xsave); 640 dumpKvm(xsave); 641 } else { 642 inform("XSave not supported by kernel.\n"); 643 } 644} 645 646void 647X86KvmCPU::dumpVCpuEvents() const 648{ 649 struct kvm_vcpu_events events; 650 getVCpuEvents(events); 651 dumpKvm(events); 652} 653 654void 655X86KvmCPU::dumpMSRs() const 656{ 657 const Kvm::MSRIndexVector &supported_msrs(vm.kvm->getSupportedMSRs()); 658 std::unique_ptr<struct kvm_msrs> msrs( 659 newVarStruct<struct kvm_msrs, struct kvm_msr_entry>( 660 supported_msrs.size())); 661 662 msrs->nmsrs = supported_msrs.size(); 663 for (int i = 0; i < supported_msrs.size(); ++i) { 664 struct kvm_msr_entry &e(msrs->entries[i]); 665 e.index = supported_msrs[i]; 666 e.reserved = 0; 667 e.data = 0; 668 } 669 getMSRs(*msrs.get()); 670 671 dumpKvm(*msrs.get()); 672} 673 674void 675X86KvmCPU::updateKvmState() 676{ 677 updateKvmStateRegs(); 678 updateKvmStateSRegs(); 679 updateKvmStateFPU(); 680 updateKvmStateMSRs(); 681 682 DPRINTF(KvmContext, "X86KvmCPU::updateKvmState():\n"); 683 if (DTRACE(KvmContext)) 684 dump(); 685} 686 687void 688X86KvmCPU::updateKvmStateRegs() 689{ 690 struct kvm_regs regs; 691 692#define APPLY_IREG(kreg, mreg) regs.kreg = tc->readIntReg(mreg) 693 FOREACH_IREG(); 694#undef APPLY_IREG 695 696 regs.rip = tc->instAddr() - tc->readMiscReg(MISCREG_CS_BASE); 697 698 /* You might think that setting regs.rflags to the contents 699 * MISCREG_RFLAGS here would suffice. In that case you're 700 * mistaken. We need to reconstruct it from a bunch of ucode 701 * registers and wave a dead chicken over it (aka mask out and set 702 * reserved bits) to get it to work. 703 */ 704 regs.rflags = X86ISA::getRFlags(tc); 705 706 setRegisters(regs); 707} 708 709static inline void 710setKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg, 711 const int index) 712{ 713 SegAttr attr(tc->readMiscRegNoEffect(MISCREG_SEG_ATTR(index))); 714 715 kvm_seg.base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(index)); 716 kvm_seg.limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(index)); 717 kvm_seg.selector = tc->readMiscRegNoEffect(MISCREG_SEG_SEL(index)); 718 kvm_seg.type = attr.type; 719 kvm_seg.present = attr.present; 720 kvm_seg.dpl = attr.dpl; 721 kvm_seg.db = attr.defaultSize; 722 kvm_seg.s = attr.system; 723 kvm_seg.l = attr.longMode; 724 kvm_seg.g = attr.granularity; 725 kvm_seg.avl = attr.avl; 726 727 // A segment is normally unusable when the selector is zero. There 728 // is a attr.unusable flag in gem5, but it seems unused. qemu 729 // seems to set this to 0 all the time, so we just do the same and 730 // hope for the best. 731 kvm_seg.unusable = 0; 732} 733 734static inline void 735setKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable, 736 const int index) 737{ 738 kvm_dtable.base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(index)); 739 kvm_dtable.limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(index)); 740} 741 742static void 743forceSegAccessed(struct kvm_segment &seg) 744{ 745 // Intel's VMX requires that (some) usable segments are flagged as 746 // 'accessed' (i.e., the lowest bit in the segment type is set) 747 // when entering VMX. This wouldn't necessary be the case even if 748 // gem5 did set the access bits correctly, so we force it to one 749 // in that case. 750 if (!seg.unusable) 751 seg.type |= SEG_TYPE_BIT_ACCESSED; 752} 753 754void 755X86KvmCPU::updateKvmStateSRegs() 756{ 757 struct kvm_sregs sregs; 758 759#define APPLY_SREG(kreg, mreg) sregs.kreg = tc->readMiscRegNoEffect(mreg) 760#define APPLY_SEGMENT(kreg, idx) setKvmSegmentReg(tc, sregs.kreg, idx) 761#define APPLY_DTABLE(kreg, idx) setKvmDTableReg(tc, sregs.kreg, idx) 762 763 FOREACH_SREG(); 764 FOREACH_SEGMENT(); 765 FOREACH_DTABLE(); 766 767#undef APPLY_SREG 768#undef APPLY_SEGMENT 769#undef APPLY_DTABLE 770 771 // Clear the interrupt bitmap 772 memset(&sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap)); 773 774 // VMX requires CS, SS, DS, ES, FS, and GS to have the accessed 775 // bit in the type field set. 776 forceSegAccessed(sregs.cs); 777 forceSegAccessed(sregs.ss); 778 forceSegAccessed(sregs.ds); 779 forceSegAccessed(sregs.es); 780 forceSegAccessed(sregs.fs); 781 forceSegAccessed(sregs.gs); 782 783 // There are currently some cases where the active task isn't 784 // marked as busy. This is illegal in VMX, so we force it to busy. 785 if (sregs.tr.type == SEG_SYS_TYPE_TSS_AVAILABLE) { 786 hack("tr.type (%i) is not busy. Forcing the busy bit.\n", 787 sregs.tr.type); 788 sregs.tr.type = SEG_SYS_TYPE_TSS_BUSY; 789 } 790 791 // VMX requires the DPL of SS and CS to be the same for 792 // non-conforming code segments. It seems like m5 doesn't set the 793 // DPL of SS correctly when taking interrupts, so we need to fix 794 // that here. 795 if ((sregs.cs.type == SEG_CS_TYPE_ACCESSED || 796 sregs.cs.type == SEG_CS_TYPE_READ_ACCESSED) && 797 sregs.cs.dpl != sregs.ss.dpl) { 798 799 hack("CS.DPL (%i) != SS.DPL (%i): Forcing SS.DPL to %i\n", 800 sregs.cs.dpl, sregs.ss.dpl, sregs.cs.dpl); 801 sregs.ss.dpl = sregs.cs.dpl; 802 } 803 804 // Do checks after fixing up the state to avoid getting excessive 805 // amounts of warnings. 806 RFLAGS rflags_nocc(tc->readMiscReg(MISCREG_RFLAGS)); 807 if (!rflags_nocc.vm) { 808 // Do segment verification if the CPU isn't entering virtual 809 // 8086 mode. We currently assume that unrestricted guest 810 // mode is available. 811 812#define APPLY_SEGMENT(kreg, idx) \ 813 checkSeg(# kreg, idx + MISCREG_SEG_SEL_BASE, sregs.kreg, sregs) 814 815 FOREACH_SEGMENT(); 816#undef APPLY_SEGMENT 817 } 818 819 setSpecialRegisters(sregs); 820} 821 822template <typename T> 823static void 824updateKvmStateFPUCommon(ThreadContext *tc, T &fpu) 825{ 826 fpu.mxcsr = tc->readMiscRegNoEffect(MISCREG_MXCSR); 827 fpu.fcw = tc->readMiscRegNoEffect(MISCREG_FCW); 828 // No need to rebuild from MISCREG_FSW and MISCREG_TOP if we read 829 // with effects. 830 fpu.fsw = tc->readMiscReg(MISCREG_FSW); 831 832 uint64_t ftw(tc->readMiscRegNoEffect(MISCREG_FTW)); 833 fpu.ftwx = X86ISA::convX87TagsToXTags(ftw); 834 835 fpu.last_opcode = tc->readMiscRegNoEffect(MISCREG_FOP); 836 837 const unsigned top((fpu.fsw >> 11) & 0x7); 838 for (int i = 0; i < 8; ++i) { 839 const unsigned reg_idx((i + top) & 0x7); 840 const double value(bitsToFloat64( 841 tc->readFloatReg(FLOATREG_FPR(reg_idx)))); 842 DPRINTF(KvmContext, "Setting KVM FP reg %i (st[%i]) := %f\n", 843 reg_idx, i, value); 844 X86ISA::storeFloat80(fpu.fpr[i], value); 845 } 846 847 // TODO: We should update the MMX state 848 849 for (int i = 0; i < 16; ++i) { 850 *(uint64_t *)&fpu.xmm[i][0] = 851 tc->readFloatReg(FLOATREG_XMM_LOW(i)); 852 *(uint64_t *)&fpu.xmm[i][8] = 853 tc->readFloatReg(FLOATREG_XMM_HIGH(i)); 854 } 855} 856 857void 858X86KvmCPU::updateKvmStateFPULegacy() 859{ 860 struct kvm_fpu fpu; 861 862 // There is some padding in the FP registers, so we'd better zero 863 // the whole struct. 864 memset(&fpu, 0, sizeof(fpu)); 865 866 updateKvmStateFPUCommon(tc, fpu); 867 868 if (tc->readMiscRegNoEffect(MISCREG_FISEG)) 869 warn_once("MISCREG_FISEG is non-zero.\n"); 870 871 fpu.last_ip = tc->readMiscRegNoEffect(MISCREG_FIOFF); 872 873 if (tc->readMiscRegNoEffect(MISCREG_FOSEG)) 874 warn_once("MISCREG_FOSEG is non-zero.\n"); 875 876 fpu.last_dp = tc->readMiscRegNoEffect(MISCREG_FOOFF); 877 878 setFPUState(fpu); 879} 880 881void 882X86KvmCPU::updateKvmStateFPUXSave() 883{ 884 struct kvm_xsave kxsave; 885 FXSave &xsave(*(FXSave *)kxsave.region); 886 887 // There is some padding and reserved fields in the structure, so 888 // we'd better zero the whole thing. 889 memset(&kxsave, 0, sizeof(kxsave)); 890 891 updateKvmStateFPUCommon(tc, xsave); 892 893 if (tc->readMiscRegNoEffect(MISCREG_FISEG)) 894 warn_once("MISCREG_FISEG is non-zero.\n"); 895 896 xsave.ctrl64.fpu_ip = tc->readMiscRegNoEffect(MISCREG_FIOFF); 897 898 if (tc->readMiscRegNoEffect(MISCREG_FOSEG)) 899 warn_once("MISCREG_FOSEG is non-zero.\n"); 900 901 xsave.ctrl64.fpu_dp = tc->readMiscRegNoEffect(MISCREG_FOOFF); 902 903 setXSave(kxsave); 904} 905 906void 907X86KvmCPU::updateKvmStateFPU() 908{ 909 if (useXSave) 910 updateKvmStateFPUXSave(); 911 else 912 updateKvmStateFPULegacy(); 913} 914 915void 916X86KvmCPU::updateKvmStateMSRs() 917{ 918 KvmMSRVector msrs; 919 920 const Kvm::MSRIndexVector &indices(getMsrIntersection()); 921 922 for (auto it = indices.cbegin(); it != indices.cend(); ++it) { 923 struct kvm_msr_entry e; 924 925 e.index = *it; 926 e.reserved = 0; 927 e.data = tc->readMiscReg(msrMap.at(*it)); 928 DPRINTF(KvmContext, "Adding MSR: idx: 0x%x, data: 0x%x\n", 929 e.index, e.data); 930 931 msrs.push_back(e); 932 } 933 934 setMSRs(msrs); 935} 936 937void 938X86KvmCPU::updateThreadContext() 939{ 940 struct kvm_regs regs; 941 struct kvm_sregs sregs; 942 943 getRegisters(regs); 944 getSpecialRegisters(sregs); 945 946 DPRINTF(KvmContext, "X86KvmCPU::updateThreadContext():\n"); 947 if (DTRACE(KvmContext)) 948 dump(); 949 950 updateThreadContextRegs(regs, sregs); 951 updateThreadContextSRegs(sregs); 952 if (useXSave) { 953 struct kvm_xsave xsave; 954 getXSave(xsave); 955 956 updateThreadContextXSave(xsave); 957 } else { 958 struct kvm_fpu fpu; 959 getFPUState(fpu); 960 961 updateThreadContextFPU(fpu); 962 } 963 updateThreadContextMSRs(); 964 965 // The M5 misc reg caches some values from other 966 // registers. Writing to it with side effects causes it to be 967 // updated from its source registers. 968 tc->setMiscReg(MISCREG_M5_REG, 0); 969} 970 971void 972X86KvmCPU::updateThreadContextRegs(const struct kvm_regs ®s, 973 const struct kvm_sregs &sregs) 974{ 975#define APPLY_IREG(kreg, mreg) tc->setIntReg(mreg, regs.kreg) 976 977 FOREACH_IREG(); 978 979#undef APPLY_IREG 980 981 tc->pcState(PCState(regs.rip + sregs.cs.base)); 982 983 // Flags are spread out across multiple semi-magic registers so we 984 // need some special care when updating them. 985 X86ISA::setRFlags(tc, regs.rflags); 986} 987 988 989inline void 990setContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg, 991 const int index) 992{ 993 SegAttr attr(0); 994 995 attr.type = kvm_seg.type; 996 attr.present = kvm_seg.present; 997 attr.dpl = kvm_seg.dpl; 998 attr.defaultSize = kvm_seg.db; 999 attr.system = kvm_seg.s; 1000 attr.longMode = kvm_seg.l; 1001 attr.granularity = kvm_seg.g; 1002 attr.avl = kvm_seg.avl; 1003 attr.unusable = kvm_seg.unusable; 1004 1005 // We need some setMiscReg magic here to keep the effective base 1006 // addresses in sync. We need an up-to-date version of EFER, so 1007 // make sure this is called after the sregs have been synced. 1008 tc->setMiscReg(MISCREG_SEG_BASE(index), kvm_seg.base); 1009 tc->setMiscReg(MISCREG_SEG_LIMIT(index), kvm_seg.limit); 1010 tc->setMiscReg(MISCREG_SEG_SEL(index), kvm_seg.selector); 1011 tc->setMiscReg(MISCREG_SEG_ATTR(index), attr); 1012} 1013 1014inline void 1015setContextSegment(ThreadContext *tc, const struct kvm_dtable &kvm_dtable, 1016 const int index) 1017{ 1018 // We need some setMiscReg magic here to keep the effective base 1019 // addresses in sync. We need an up-to-date version of EFER, so 1020 // make sure this is called after the sregs have been synced. 1021 tc->setMiscReg(MISCREG_SEG_BASE(index), kvm_dtable.base); 1022 tc->setMiscReg(MISCREG_SEG_LIMIT(index), kvm_dtable.limit); 1023} 1024 1025void 1026X86KvmCPU::updateThreadContextSRegs(const struct kvm_sregs &sregs) 1027{ 1028 assert(getKvmRunState()->apic_base == sregs.apic_base); 1029 assert(getKvmRunState()->cr8 == sregs.cr8); 1030 1031#define APPLY_SREG(kreg, mreg) tc->setMiscRegNoEffect(mreg, sregs.kreg) 1032#define APPLY_SEGMENT(kreg, idx) setContextSegment(tc, sregs.kreg, idx) 1033#define APPLY_DTABLE(kreg, idx) setContextSegment(tc, sregs.kreg, idx) 1034 FOREACH_SREG(); 1035 FOREACH_SEGMENT(); 1036 FOREACH_DTABLE(); 1037#undef APPLY_SREG 1038#undef APPLY_SEGMENT 1039#undef APPLY_DTABLE 1040} 1041 1042template<typename T> 1043static void 1044updateThreadContextFPUCommon(ThreadContext *tc, const T &fpu) 1045{ 1046 const unsigned top((fpu.fsw >> 11) & 0x7); 1047 1048 for (int i = 0; i < 8; ++i) { 1049 const unsigned reg_idx((i + top) & 0x7); 1050 const double value(X86ISA::loadFloat80(fpu.fpr[i])); 1051 DPRINTF(KvmContext, "Setting gem5 FP reg %i (st[%i]) := %f\n", 1052 reg_idx, i, value); 1053 tc->setFloatReg(FLOATREG_FPR(reg_idx), floatToBits64(value)); 1054 } 1055 1056 // TODO: We should update the MMX state 1057 1058 tc->setMiscRegNoEffect(MISCREG_X87_TOP, top); 1059 tc->setMiscRegNoEffect(MISCREG_MXCSR, fpu.mxcsr); 1060 tc->setMiscRegNoEffect(MISCREG_FCW, fpu.fcw); 1061 tc->setMiscRegNoEffect(MISCREG_FSW, fpu.fsw); 1062 1063 uint64_t ftw(convX87XTagsToTags(fpu.ftwx)); 1064 // TODO: Are these registers really the same? 1065 tc->setMiscRegNoEffect(MISCREG_FTW, ftw); 1066 tc->setMiscRegNoEffect(MISCREG_FTAG, ftw); 1067 1068 tc->setMiscRegNoEffect(MISCREG_FOP, fpu.last_opcode); 1069 1070 for (int i = 0; i < 16; ++i) { 1071 tc->setFloatReg(FLOATREG_XMM_LOW(i), *(uint64_t *)&fpu.xmm[i][0]); 1072 tc->setFloatReg(FLOATREG_XMM_HIGH(i), *(uint64_t *)&fpu.xmm[i][8]); 1073 } 1074} 1075 1076void 1077X86KvmCPU::updateThreadContextFPU(const struct kvm_fpu &fpu) 1078{ 1079 updateThreadContextFPUCommon(tc, fpu); 1080 1081 tc->setMiscRegNoEffect(MISCREG_FISEG, 0); 1082 tc->setMiscRegNoEffect(MISCREG_FIOFF, fpu.last_ip); 1083 tc->setMiscRegNoEffect(MISCREG_FOSEG, 0); 1084 tc->setMiscRegNoEffect(MISCREG_FOOFF, fpu.last_dp); 1085} 1086 1087void 1088X86KvmCPU::updateThreadContextXSave(const struct kvm_xsave &kxsave) 1089{ 1090 const FXSave &xsave(*(const FXSave *)kxsave.region); 1091 1092 updateThreadContextFPUCommon(tc, xsave); 1093 1094 tc->setMiscRegNoEffect(MISCREG_FISEG, 0); 1095 tc->setMiscRegNoEffect(MISCREG_FIOFF, xsave.ctrl64.fpu_ip); 1096 tc->setMiscRegNoEffect(MISCREG_FOSEG, 0); 1097 tc->setMiscRegNoEffect(MISCREG_FOOFF, xsave.ctrl64.fpu_dp); 1098} 1099 1100void 1101X86KvmCPU::updateThreadContextMSRs() 1102{ 1103 const Kvm::MSRIndexVector &msrs(getMsrIntersection()); 1104 1105 std::unique_ptr<struct kvm_msrs> kvm_msrs( 1106 newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(msrs.size())); 1107 struct kvm_msr_entry *entry; 1108 1109 // Create a list of MSRs to read 1110 kvm_msrs->nmsrs = msrs.size(); 1111 entry = &kvm_msrs->entries[0]; 1112 for (auto it = msrs.cbegin(); it != msrs.cend(); ++it, ++entry) { 1113 entry->index = *it; 1114 entry->reserved = 0; 1115 entry->data = 0; 1116 } 1117 1118 getMSRs(*kvm_msrs.get()); 1119 1120 // Update M5's state 1121 entry = &kvm_msrs->entries[0]; 1122 for (int i = 0; i < kvm_msrs->nmsrs; ++i, ++entry) { 1123 DPRINTF(KvmContext, "Setting M5 MSR: idx: 0x%x, data: 0x%x\n", 1124 entry->index, entry->data); 1125 1126 tc->setMiscReg(X86ISA::msrMap.at(entry->index), entry->data); 1127 } 1128} 1129 1130void 1131X86KvmCPU::deliverInterrupts() 1132{ 1133 Fault fault; 1134 1135 syncThreadContext(); 1136 1137 { 1138 // Migrate to the interrupt controller's thread to get the 1139 // interrupt. Even though the individual methods are safe to 1140 // call across threads, we might still lose interrupts unless 1141 // they are getInterrupt() and updateIntrInfo() are called 1142 // atomically. 1143 EventQueue::ScopedMigration migrate(interrupts[0]->eventQueue()); 1144 fault = interrupts[0]->getInterrupt(tc); 1145 interrupts[0]->updateIntrInfo(tc); 1146 } 1147 1148 X86Interrupt *x86int(dynamic_cast<X86Interrupt *>(fault.get())); 1149 if (dynamic_cast<NonMaskableInterrupt *>(fault.get())) { 1150 DPRINTF(KvmInt, "Delivering NMI\n"); 1151 kvmNonMaskableInterrupt(); 1152 } else if (dynamic_cast<InitInterrupt *>(fault.get())) { 1153 DPRINTF(KvmInt, "INIT interrupt\n"); 1154 fault.get()->invoke(tc); 1155 // Delay the kvm state update since we won't enter KVM on this 1156 // tick. 1157 threadContextDirty = true; 1158 // HACK: gem5 doesn't actually have any BIOS code, which means 1159 // that we need to halt the thread and wait for a startup 1160 // interrupt before restarting the thread. The simulated CPUs 1161 // use the same kind of hack using a microcode routine. 1162 thread->suspend(); 1163 } else if (dynamic_cast<StartupInterrupt *>(fault.get())) { 1164 DPRINTF(KvmInt, "STARTUP interrupt\n"); 1165 fault.get()->invoke(tc); 1166 // The kvm state is assumed to have been updated when entering 1167 // kvmRun(), so we need to update manually it here. 1168 updateKvmState(); 1169 } else if (x86int) { 1170 struct kvm_interrupt kvm_int; 1171 kvm_int.irq = x86int->getVector(); 1172 1173 DPRINTF(KvmInt, "Delivering interrupt: %s (%u)\n", 1174 fault->name(), kvm_int.irq); 1175 1176 kvmInterrupt(kvm_int); 1177 } else { 1178 panic("KVM: Unknown interrupt type\n"); 1179 } 1180 1181} 1182 1183Tick 1184X86KvmCPU::kvmRun(Tick ticks) 1185{ 1186 struct kvm_run &kvm_run(*getKvmRunState()); 1187 1188 if (interrupts[0]->checkInterruptsRaw()) { 1189 if (interrupts[0]->hasPendingUnmaskable()) { 1190 DPRINTF(KvmInt, 1191 "Delivering unmaskable interrupt.\n"); 1192 syncThreadContext(); 1193 deliverInterrupts(); 1194 } else if (kvm_run.ready_for_interrupt_injection) { 1195 // KVM claims that it is ready for an interrupt. It might 1196 // be lying if we just updated rflags and disabled 1197 // interrupts (e.g., by doing a CPU handover). Let's sync 1198 // the thread context and check if there are /really/ 1199 // interrupts that should be delivered now. 1200 syncThreadContext(); 1201 if (interrupts[0]->checkInterrupts(tc)) { 1202 DPRINTF(KvmInt, 1203 "M5 has pending interrupts, delivering interrupt.\n"); 1204 1205 deliverInterrupts(); 1206 } else { 1207 DPRINTF(KvmInt, 1208 "Interrupt delivery delayed due to KVM confusion.\n"); 1209 kvm_run.request_interrupt_window = 1; 1210 } 1211 } else if (!kvm_run.request_interrupt_window) { 1212 DPRINTF(KvmInt, 1213 "M5 has pending interrupts, requesting interrupt " 1214 "window.\n"); 1215 kvm_run.request_interrupt_window = 1; 1216 } 1217 } else { 1218 kvm_run.request_interrupt_window = 0; 1219 } 1220 1221 // The CPU might have been suspended as a result of the INIT 1222 // interrupt delivery hack. In that case, don't enter into KVM. 1223 if (_status == Idle) 1224 return 0; 1225 else 1226 return kvmRunWrapper(ticks); 1227} 1228 1229Tick 1230X86KvmCPU::kvmRunDrain() 1231{ 1232 struct kvm_run &kvm_run(*getKvmRunState()); 1233 1234 if (!archIsDrained()) { 1235 DPRINTF(Drain, "kvmRunDrain: Architecture code isn't drained\n"); 1236 1237 // Tell KVM to find a suitable place to deliver interrupts. This 1238 // should ensure that pending interrupts have been delivered and 1239 // things are reasonably consistent (i.e., no interrupts pending 1240 // in the guest). 1241 kvm_run.request_interrupt_window = 1; 1242 1243 // Limit the run to 1 millisecond. That is hopefully enough to 1244 // reach an interrupt window. Otherwise, we'll just try again 1245 // later. 1246 return kvmRunWrapper(1 * SimClock::Float::ms); 1247 } else { 1248 DPRINTF(Drain, "kvmRunDrain: Delivering pending IO\n"); 1249 1250 return kvmRunWrapper(0); 1251 } 1252} 1253 1254Tick 1255X86KvmCPU::kvmRunWrapper(Tick ticks) 1256{ 1257 struct kvm_run &kvm_run(*getKvmRunState()); 1258 1259 // Synchronize the APIC base and CR8 here since they are present 1260 // in the kvm_run struct, which makes the synchronization really 1261 // cheap. 1262 kvm_run.apic_base = tc->readMiscReg(MISCREG_APIC_BASE); 1263 kvm_run.cr8 = tc->readMiscReg(MISCREG_CR8); 1264 1265 const Tick run_ticks(BaseKvmCPU::kvmRun(ticks)); 1266 1267 tc->setMiscReg(MISCREG_APIC_BASE, kvm_run.apic_base); 1268 kvm_run.cr8 = tc->readMiscReg(MISCREG_CR8); 1269 1270 return run_ticks; 1271} 1272 1273uint64_t 1274X86KvmCPU::getHostCycles() const 1275{ 1276 return getMSR(MSR_TSC); 1277} 1278 1279void 1280X86KvmCPU::handleIOMiscReg32(int miscreg) 1281{ 1282 struct kvm_run &kvm_run(*getKvmRunState()); 1283 const uint16_t port(kvm_run.io.port); 1284 1285 assert(kvm_run.exit_reason == KVM_EXIT_IO); 1286 1287 if (kvm_run.io.size != 4) { 1288 panic("Unexpected IO size (%u) for address 0x%x.\n", 1289 kvm_run.io.size, port); 1290 } 1291 1292 if (kvm_run.io.count != 1) { 1293 panic("Unexpected IO count (%u) for address 0x%x.\n", 1294 kvm_run.io.count, port); 1295 } 1296 1297 uint32_t *data((uint32_t *)getGuestData(kvm_run.io.data_offset)); 1298 if (kvm_run.io.direction == KVM_EXIT_IO_OUT) 1299 tc->setMiscReg(miscreg, *data); 1300 else 1301 *data = tc->readMiscRegNoEffect(miscreg); 1302} 1303 1304Tick 1305X86KvmCPU::handleKvmExitIO() 1306{ 1307 struct kvm_run &kvm_run(*getKvmRunState()); 1308 bool isWrite(kvm_run.io.direction == KVM_EXIT_IO_OUT); 1309 unsigned char *guestData(getGuestData(kvm_run.io.data_offset)); 1310 Tick delay(0); 1311 uint16_t port(kvm_run.io.port); 1312 Addr pAddr; 1313 const int count(kvm_run.io.count); 1314 1315 assert(kvm_run.io.direction == KVM_EXIT_IO_IN || 1316 kvm_run.io.direction == KVM_EXIT_IO_OUT); 1317 1318 DPRINTF(KvmIO, "KVM-x86: Handling IO instruction (%s) (port: 0x%x)\n", 1319 (isWrite ? "out" : "in"), kvm_run.io.port); 1320 1321 /* Vanilla gem5 handles PCI discovery in the TLB(!). Since we 1322 * don't use the TLB component, we need to intercept and handle 1323 * the PCI configuration space IO ports here. 1324 * 1325 * The IO port PCI discovery mechanism uses one address register 1326 * and one data register. We map the address register to a misc 1327 * reg and use that to re-route data register accesses to the 1328 * right location in the PCI configuration space. 1329 */ 1330 if (port == IO_PCI_CONF_ADDR) { 1331 handleIOMiscReg32(MISCREG_PCI_CONFIG_ADDRESS); 1332 return 0; 1333 } else if ((port & ~0x3) == IO_PCI_CONF_DATA_BASE) { 1334 Addr pciConfigAddr(tc->readMiscRegNoEffect(MISCREG_PCI_CONFIG_ADDRESS)); 1335 if (pciConfigAddr & 0x80000000) { 1336 pAddr = X86ISA::x86PciConfigAddress((pciConfigAddr & 0x7ffffffc) | 1337 (port & 0x3)); 1338 } else { 1339 pAddr = X86ISA::x86IOAddress(port); 1340 } 1341 } else { 1342 pAddr = X86ISA::x86IOAddress(port); 1343 } 1344 1345 const MemCmd cmd(isWrite ? MemCmd::WriteReq : MemCmd::ReadReq); 1346 // Temporarily lock and migrate to the device event queue to 1347 // prevent races in multi-core mode. 1348 EventQueue::ScopedMigration migrate(deviceEventQueue()); 1349 for (int i = 0; i < count; ++i) { 1350 RequestPtr io_req = std::make_shared<Request>( 1351 pAddr, kvm_run.io.size, 1352 Request::UNCACHEABLE, dataMasterId()); 1353 1354 io_req->setContext(tc->contextId()); 1355 1356 PacketPtr pkt = new Packet(io_req, cmd); 1357 1358 pkt->dataStatic(guestData); 1359 delay += dataPort.submitIO(pkt); 1360 1361 guestData += kvm_run.io.size; 1362 } 1363 1364 return delay; 1365} 1366 1367Tick 1368X86KvmCPU::handleKvmExitIRQWindowOpen() 1369{ 1370 // We don't need to do anything here since this is caught the next 1371 // time we execute kvmRun(). We still overload the exit event to 1372 // silence the warning about an unhandled exit event. 1373 return 0; 1374} 1375 1376bool 1377X86KvmCPU::archIsDrained() const 1378{ 1379 struct kvm_vcpu_events events; 1380 1381 getVCpuEvents(events); 1382 1383 // We could probably handle this in a by re-inserting interrupts 1384 // that are pending into gem5 on a drain. However, that would 1385 // probably be tricky to do reliably, so we'll just prevent a 1386 // drain if there is anything pending in the 1387 // guest. X86KvmCPU::kvmRunDrain() minimizes the amount of code 1388 // executed in the guest by requesting an interrupt window if 1389 // there are pending interrupts. 1390 const bool pending_events(events.exception.injected || 1391 events.interrupt.injected || 1392 events.nmi.injected || events.nmi.pending); 1393 1394 if (pending_events) { 1395 DPRINTF(Drain, "archIsDrained: Pending events: %s %s %s %s\n", 1396 events.exception.injected ? "exception" : "", 1397 events.interrupt.injected ? "interrupt" : "", 1398 events.nmi.injected ? "nmi[i]" : "", 1399 events.nmi.pending ? "nmi[p]" : ""); 1400 } 1401 1402 return !pending_events; 1403} 1404 1405static struct kvm_cpuid_entry2 1406makeKvmCpuid(uint32_t function, uint32_t index, 1407 CpuidResult &result) 1408{ 1409 struct kvm_cpuid_entry2 e; 1410 e.function = function; 1411 e.index = index; 1412 e.flags = 0; 1413 e.eax = (uint32_t)result.rax; 1414 e.ebx = (uint32_t)result.rbx; 1415 e.ecx = (uint32_t)result.rcx; 1416 e.edx = (uint32_t)result.rdx; 1417 1418 return e; 1419} 1420 1421void 1422X86KvmCPU::updateCPUID() 1423{ 1424 Kvm::CPUIDVector m5_supported; 1425 1426 /* TODO: We currently don't support any of the functions that 1427 * iterate through data structures in the CPU using an index. It's 1428 * currently not a problem since M5 doesn't expose any of them at 1429 * the moment. 1430 */ 1431 1432 /* Basic features */ 1433 CpuidResult func0; 1434 X86ISA::doCpuid(tc, 0x0, 0, func0); 1435 for (uint32_t function = 0; function <= func0.rax; ++function) { 1436 CpuidResult cpuid; 1437 uint32_t idx(0); 1438 1439 X86ISA::doCpuid(tc, function, idx, cpuid); 1440 m5_supported.push_back(makeKvmCpuid(function, idx, cpuid)); 1441 } 1442 1443 /* Extended features */ 1444 CpuidResult efunc0; 1445 X86ISA::doCpuid(tc, 0x80000000, 0, efunc0); 1446 for (uint32_t function = 0x80000000; function <= efunc0.rax; ++function) { 1447 CpuidResult cpuid; 1448 uint32_t idx(0); 1449 1450 X86ISA::doCpuid(tc, function, idx, cpuid); 1451 m5_supported.push_back(makeKvmCpuid(function, idx, cpuid)); 1452 } 1453 1454 setCPUID(m5_supported); 1455} 1456 1457void 1458X86KvmCPU::setCPUID(const struct kvm_cpuid2 &cpuid) 1459{ 1460 if (ioctl(KVM_SET_CPUID2, (void *)&cpuid) == -1) 1461 panic("KVM: Failed to set guest CPUID2 (errno: %i)\n", 1462 errno); 1463} 1464 1465void 1466X86KvmCPU::setCPUID(const Kvm::CPUIDVector &cpuid) 1467{ 1468 std::unique_ptr<struct kvm_cpuid2> kvm_cpuid( 1469 newVarStruct<struct kvm_cpuid2, struct kvm_cpuid_entry2>(cpuid.size())); 1470 1471 kvm_cpuid->nent = cpuid.size(); 1472 std::copy(cpuid.begin(), cpuid.end(), kvm_cpuid->entries); 1473 1474 setCPUID(*kvm_cpuid); 1475} 1476 1477void 1478X86KvmCPU::setMSRs(const struct kvm_msrs &msrs) 1479{ 1480 if (ioctl(KVM_SET_MSRS, (void *)&msrs) == -1) 1481 panic("KVM: Failed to set guest MSRs (errno: %i)\n", 1482 errno); 1483} 1484 1485void 1486X86KvmCPU::setMSRs(const KvmMSRVector &msrs) 1487{ 1488 std::unique_ptr<struct kvm_msrs> kvm_msrs( 1489 newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(msrs.size())); 1490 1491 kvm_msrs->nmsrs = msrs.size(); 1492 std::copy(msrs.begin(), msrs.end(), kvm_msrs->entries); 1493 1494 setMSRs(*kvm_msrs); 1495} 1496 1497void 1498X86KvmCPU::getMSRs(struct kvm_msrs &msrs) const 1499{ 1500 if (ioctl(KVM_GET_MSRS, (void *)&msrs) == -1) 1501 panic("KVM: Failed to get guest MSRs (errno: %i)\n", 1502 errno); 1503} 1504 1505 1506void 1507X86KvmCPU::setMSR(uint32_t index, uint64_t value) 1508{ 1509 std::unique_ptr<struct kvm_msrs> kvm_msrs( 1510 newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1)); 1511 struct kvm_msr_entry &entry(kvm_msrs->entries[0]); 1512 1513 kvm_msrs->nmsrs = 1; 1514 entry.index = index; 1515 entry.reserved = 0; 1516 entry.data = value; 1517 1518 setMSRs(*kvm_msrs.get()); 1519} 1520 1521uint64_t 1522X86KvmCPU::getMSR(uint32_t index) const 1523{ 1524 std::unique_ptr<struct kvm_msrs> kvm_msrs( 1525 newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1)); 1526 struct kvm_msr_entry &entry(kvm_msrs->entries[0]); 1527 1528 kvm_msrs->nmsrs = 1; 1529 entry.index = index; 1530 entry.reserved = 0; 1531 entry.data = 0; 1532 1533 getMSRs(*kvm_msrs.get()); 1534 return entry.data; 1535} 1536 1537const Kvm::MSRIndexVector & 1538X86KvmCPU::getMsrIntersection() const 1539{ 1540 if (cachedMsrIntersection.empty()) { 1541 const Kvm::MSRIndexVector &kvm_msrs(vm.kvm->getSupportedMSRs()); 1542 1543 DPRINTF(Kvm, "kvm-x86: Updating MSR intersection\n"); 1544 for (auto it = kvm_msrs.cbegin(); it != kvm_msrs.cend(); ++it) { 1545 if (X86ISA::msrMap.find(*it) != X86ISA::msrMap.end()) { 1546 cachedMsrIntersection.push_back(*it); 1547 DPRINTF(Kvm, "kvm-x86: Adding MSR 0x%x\n", *it); 1548 } else { 1549 warn("kvm-x86: MSR (0x%x) unsupported by gem5. Skipping.\n", 1550 *it); 1551 } 1552 } 1553 } 1554 1555 return cachedMsrIntersection; 1556} 1557 1558void 1559X86KvmCPU::getDebugRegisters(struct kvm_debugregs ®s) const 1560{ 1561#ifdef KVM_GET_DEBUGREGS 1562 if (ioctl(KVM_GET_DEBUGREGS, ®s) == -1) 1563 panic("KVM: Failed to get guest debug registers\n"); 1564#else 1565 panic("KVM: Unsupported getDebugRegisters call.\n"); 1566#endif 1567} 1568 1569void 1570X86KvmCPU::setDebugRegisters(const struct kvm_debugregs ®s) 1571{ 1572#ifdef KVM_SET_DEBUGREGS 1573 if (ioctl(KVM_SET_DEBUGREGS, (void *)®s) == -1) 1574 panic("KVM: Failed to set guest debug registers\n"); 1575#else 1576 panic("KVM: Unsupported setDebugRegisters call.\n"); 1577#endif 1578} 1579 1580void 1581X86KvmCPU::getXCRs(struct kvm_xcrs ®s) const 1582{ 1583 if (ioctl(KVM_GET_XCRS, ®s) == -1) 1584 panic("KVM: Failed to get guest debug registers\n"); 1585} 1586 1587void 1588X86KvmCPU::setXCRs(const struct kvm_xcrs ®s) 1589{ 1590 if (ioctl(KVM_SET_XCRS, (void *)®s) == -1) 1591 panic("KVM: Failed to set guest debug registers\n"); 1592} 1593 1594void 1595X86KvmCPU::getXSave(struct kvm_xsave &xsave) const 1596{ 1597 if (ioctl(KVM_GET_XSAVE, &xsave) == -1) 1598 panic("KVM: Failed to get guest debug registers\n"); 1599} 1600 1601void 1602X86KvmCPU::setXSave(const struct kvm_xsave &xsave) 1603{ 1604 if (ioctl(KVM_SET_XSAVE, (void *)&xsave) == -1) 1605 panic("KVM: Failed to set guest debug registers\n"); 1606} 1607 1608 1609void 1610X86KvmCPU::getVCpuEvents(struct kvm_vcpu_events &events) const 1611{ 1612 if (ioctl(KVM_GET_VCPU_EVENTS, &events) == -1) 1613 panic("KVM: Failed to get guest debug registers\n"); 1614} 1615 1616void 1617X86KvmCPU::setVCpuEvents(const struct kvm_vcpu_events &events) 1618{ 1619 if (ioctl(KVM_SET_VCPU_EVENTS, (void *)&events) == -1) 1620 panic("KVM: Failed to set guest debug registers\n"); 1621} 1622 1623X86KvmCPU * 1624X86KvmCPUParams::create() 1625{ 1626 return new X86KvmCPU(this); 1627} 1628