x86_cpu.cc revision 13611:c8b7847b4171
1955SN/A/*
2955SN/A * Copyright (c) 2013 Andreas Sandberg
31762SN/A * All rights reserved
4955SN/A *
5955SN/A * Redistribution and use in source and binary forms, with or without
6955SN/A * modification, are permitted provided that the following conditions are
7955SN/A * met: redistributions of source code must retain the above copyright
8955SN/A * notice, this list of conditions and the following disclaimer;
9955SN/A * redistributions in binary form must reproduce the above copyright
10955SN/A * notice, this list of conditions and the following disclaimer in the
11955SN/A * documentation and/or other materials provided with the distribution;
12955SN/A * neither the name of the copyright holders nor the names of its
13955SN/A * contributors may be used to endorse or promote products derived from
14955SN/A * this software without specific prior written permission.
15955SN/A *
16955SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17955SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18955SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19955SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20955SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21955SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22955SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23955SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24955SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25955SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26955SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27955SN/A *
282665Ssaidi@eecs.umich.edu * Authors: Andreas Sandberg
292665Ssaidi@eecs.umich.edu */
30955SN/A
31955SN/A#include "cpu/kvm/x86_cpu.hh"
32955SN/A
33955SN/A#include <linux/kvm.h>
34955SN/A
352632Sstever@eecs.umich.edu#include <algorithm>
362632Sstever@eecs.umich.edu#include <cerrno>
372632Sstever@eecs.umich.edu#include <memory>
382632Sstever@eecs.umich.edu
39955SN/A#include "arch/registers.hh"
402632Sstever@eecs.umich.edu#include "arch/x86/cpuid.hh"
412632Sstever@eecs.umich.edu#include "arch/x86/regs/msr.hh"
422761Sstever@eecs.umich.edu#include "arch/x86/utility.hh"
432632Sstever@eecs.umich.edu#include "cpu/kvm/base.hh"
442632Sstever@eecs.umich.edu#include "debug/Drain.hh"
452632Sstever@eecs.umich.edu#include "debug/Kvm.hh"
462761Sstever@eecs.umich.edu#include "debug/KvmContext.hh"
472761Sstever@eecs.umich.edu#include "debug/KvmIO.hh"
482761Sstever@eecs.umich.edu#include "debug/KvmInt.hh"
492632Sstever@eecs.umich.edu
502632Sstever@eecs.umich.eduusing namespace X86ISA;
512761Sstever@eecs.umich.edu
522761Sstever@eecs.umich.edu#define MSR_TSC 0x10
532761Sstever@eecs.umich.edu
542761Sstever@eecs.umich.edu#define IO_PCI_CONF_ADDR 0xCF8
552761Sstever@eecs.umich.edu#define IO_PCI_CONF_DATA_BASE 0xCFC
562632Sstever@eecs.umich.edu
572632Sstever@eecs.umich.edu// Task segment type of an inactive 32-bit or 64-bit task
582632Sstever@eecs.umich.edu#define SEG_SYS_TYPE_TSS_AVAILABLE 9
592632Sstever@eecs.umich.edu// Task segment type of an active 32-bit or 64-bit task
602632Sstever@eecs.umich.edu#define SEG_SYS_TYPE_TSS_BUSY 11
612632Sstever@eecs.umich.edu
622632Sstever@eecs.umich.edu// Non-conforming accessed code segment
63955SN/A#define SEG_CS_TYPE_ACCESSED 9
64955SN/A// Non-conforming accessed code segment that can be read
65955SN/A#define SEG_CS_TYPE_READ_ACCESSED 11
66955SN/A
67955SN/A// The lowest bit of the type field for normal segments (code and
685396Ssaidi@eecs.umich.edu// data) is used to indicate that a segment has been accessed.
694202Sbinkertn@umich.edu#define SEG_TYPE_BIT_ACCESSED 1
705342Sstever@gmail.com
71955SN/Astruct FXSave
725273Sstever@gmail.com{
735273Sstever@gmail.com    uint16_t fcw;
742656Sstever@eecs.umich.edu    uint16_t fsw;
752656Sstever@eecs.umich.edu    uint8_t ftwx;
762656Sstever@eecs.umich.edu    uint8_t pad0;
772656Sstever@eecs.umich.edu    uint16_t last_opcode;
782656Sstever@eecs.umich.edu    union {
792656Sstever@eecs.umich.edu        struct {
802656Sstever@eecs.umich.edu            uint32_t fpu_ip;
812653Sstever@eecs.umich.edu            uint16_t fpu_cs;
825227Ssaidi@eecs.umich.edu            uint16_t pad1;
835227Ssaidi@eecs.umich.edu            uint32_t fpu_dp;
845227Ssaidi@eecs.umich.edu            uint16_t fpu_ds;
855227Ssaidi@eecs.umich.edu            uint16_t pad2;
865396Ssaidi@eecs.umich.edu        } ctrl32;
875396Ssaidi@eecs.umich.edu
885396Ssaidi@eecs.umich.edu        struct {
895396Ssaidi@eecs.umich.edu            uint64_t fpu_ip;
905396Ssaidi@eecs.umich.edu            uint64_t fpu_dp;
915396Ssaidi@eecs.umich.edu        } ctrl64;
925396Ssaidi@eecs.umich.edu    };
935396Ssaidi@eecs.umich.edu    uint32_t mxcsr;
945588Ssaidi@eecs.umich.edu    uint32_t mxcsr_mask;
955396Ssaidi@eecs.umich.edu
965396Ssaidi@eecs.umich.edu    uint8_t fpr[8][16];
975396Ssaidi@eecs.umich.edu    uint8_t xmm[16][16];
985396Ssaidi@eecs.umich.edu
995396Ssaidi@eecs.umich.edu    uint64_t reserved[12];
1005396Ssaidi@eecs.umich.edu} M5_ATTR_PACKED;
1015396Ssaidi@eecs.umich.edu
1025396Ssaidi@eecs.umich.edustatic_assert(sizeof(FXSave) == 512, "Unexpected size of FXSave");
1035396Ssaidi@eecs.umich.edu
1045396Ssaidi@eecs.umich.edu#define FOREACH_IREG()                          \
1055396Ssaidi@eecs.umich.edu    do {                                        \
1065396Ssaidi@eecs.umich.edu        APPLY_IREG(rax, INTREG_RAX);            \
1075396Ssaidi@eecs.umich.edu        APPLY_IREG(rbx, INTREG_RBX);            \
1085396Ssaidi@eecs.umich.edu        APPLY_IREG(rcx, INTREG_RCX);            \
1095396Ssaidi@eecs.umich.edu        APPLY_IREG(rdx, INTREG_RDX);            \
1105396Ssaidi@eecs.umich.edu        APPLY_IREG(rsi, INTREG_RSI);            \
1115396Ssaidi@eecs.umich.edu        APPLY_IREG(rdi, INTREG_RDI);            \
1125396Ssaidi@eecs.umich.edu        APPLY_IREG(rsp, INTREG_RSP);            \
1135396Ssaidi@eecs.umich.edu        APPLY_IREG(rbp, INTREG_RBP);            \
1145396Ssaidi@eecs.umich.edu        APPLY_IREG(r8, INTREG_R8);              \
1155396Ssaidi@eecs.umich.edu        APPLY_IREG(r9, INTREG_R9);              \
1165396Ssaidi@eecs.umich.edu        APPLY_IREG(r10, INTREG_R10);            \
1175396Ssaidi@eecs.umich.edu        APPLY_IREG(r11, INTREG_R11);            \
1185396Ssaidi@eecs.umich.edu        APPLY_IREG(r12, INTREG_R12);            \
1195396Ssaidi@eecs.umich.edu        APPLY_IREG(r13, INTREG_R13);            \
1205396Ssaidi@eecs.umich.edu        APPLY_IREG(r14, INTREG_R14);            \
1215396Ssaidi@eecs.umich.edu        APPLY_IREG(r15, INTREG_R15);            \
1225396Ssaidi@eecs.umich.edu    } while (0)
1235396Ssaidi@eecs.umich.edu
1245396Ssaidi@eecs.umich.edu#define FOREACH_SREG()                                  \
1255396Ssaidi@eecs.umich.edu    do {                                                \
1265396Ssaidi@eecs.umich.edu        APPLY_SREG(cr0, MISCREG_CR0);                   \
1275396Ssaidi@eecs.umich.edu        APPLY_SREG(cr2, MISCREG_CR2);                   \
1285396Ssaidi@eecs.umich.edu        APPLY_SREG(cr3, MISCREG_CR3);                   \
1295396Ssaidi@eecs.umich.edu        APPLY_SREG(cr4, MISCREG_CR4);                   \
1305396Ssaidi@eecs.umich.edu        APPLY_SREG(cr8, MISCREG_CR8);                   \
1315396Ssaidi@eecs.umich.edu        APPLY_SREG(efer, MISCREG_EFER);                 \
1325396Ssaidi@eecs.umich.edu        APPLY_SREG(apic_base, MISCREG_APIC_BASE);       \
1335396Ssaidi@eecs.umich.edu    } while (0)
1345396Ssaidi@eecs.umich.edu
1355396Ssaidi@eecs.umich.edu#define FOREACH_DREG()                          \
1365396Ssaidi@eecs.umich.edu    do {                                        \
1375396Ssaidi@eecs.umich.edu        APPLY_DREG(db[0], MISCREG_DR0);         \
1385396Ssaidi@eecs.umich.edu        APPLY_DREG(db[1], MISCREG_DR1);         \
1395396Ssaidi@eecs.umich.edu        APPLY_DREG(db[2], MISCREG_DR2);         \
1405396Ssaidi@eecs.umich.edu        APPLY_DREG(db[3], MISCREG_DR3);         \
1415396Ssaidi@eecs.umich.edu        APPLY_DREG(dr6, MISCREG_DR6);           \
1425396Ssaidi@eecs.umich.edu        APPLY_DREG(dr7, MISCREG_DR7);           \
1435396Ssaidi@eecs.umich.edu    } while (0)
1445396Ssaidi@eecs.umich.edu
1455396Ssaidi@eecs.umich.edu#define FOREACH_SEGMENT()                                       \
1465396Ssaidi@eecs.umich.edu    do {                                                        \
1474781Snate@binkert.org        APPLY_SEGMENT(cs, MISCREG_CS - MISCREG_SEG_SEL_BASE);   \
1481852SN/A        APPLY_SEGMENT(ds, MISCREG_DS - MISCREG_SEG_SEL_BASE);   \
149955SN/A        APPLY_SEGMENT(es, MISCREG_ES - MISCREG_SEG_SEL_BASE);   \
150955SN/A        APPLY_SEGMENT(fs, MISCREG_FS - MISCREG_SEG_SEL_BASE);   \
151955SN/A        APPLY_SEGMENT(gs, MISCREG_GS - MISCREG_SEG_SEL_BASE);   \
1523717Sstever@eecs.umich.edu        APPLY_SEGMENT(ss, MISCREG_SS - MISCREG_SEG_SEL_BASE);   \
1533716Sstever@eecs.umich.edu        APPLY_SEGMENT(tr, MISCREG_TR - MISCREG_SEG_SEL_BASE);   \
154955SN/A        APPLY_SEGMENT(ldt, MISCREG_TSL - MISCREG_SEG_SEL_BASE); \
1551533SN/A    } while (0)
1563716Sstever@eecs.umich.edu
1571533SN/A#define FOREACH_DTABLE()                                        \
1584678Snate@binkert.org    do {                                                        \
1594678Snate@binkert.org        APPLY_DTABLE(gdt, MISCREG_TSG - MISCREG_SEG_SEL_BASE);  \
1604678Snate@binkert.org        APPLY_DTABLE(idt, MISCREG_IDTR - MISCREG_SEG_SEL_BASE); \
1614678Snate@binkert.org    } while (0)
1624678Snate@binkert.org
1634678Snate@binkert.orgtemplate<typename STRUCT, typename ENTRY>
1644678Snate@binkert.orgstatic STRUCT *newVarStruct(size_t entries)
1654678Snate@binkert.org{
1664678Snate@binkert.org    return (STRUCT *)operator new(sizeof(STRUCT) + entries * sizeof(ENTRY));
1674678Snate@binkert.org}
1684678Snate@binkert.org
1694678Snate@binkert.orgstatic void
1704678Snate@binkert.orgdumpKvm(const struct kvm_regs &regs)
1714678Snate@binkert.org{
1724678Snate@binkert.org    inform("KVM register state:\n");
1734678Snate@binkert.org
1744678Snate@binkert.org#define APPLY_IREG(kreg, mreg)                  \
1754678Snate@binkert.org    inform("\t" # kreg ": 0x%llx\n", regs.kreg)
1764678Snate@binkert.org
1774678Snate@binkert.org    FOREACH_IREG();
1784678Snate@binkert.org
1794973Ssaidi@eecs.umich.edu#undef APPLY_IREG
1804678Snate@binkert.org
1814678Snate@binkert.org    inform("\trip: 0x%llx\n", regs.rip);
1824678Snate@binkert.org    inform("\trflags: 0x%llx\n", regs.rflags);
1834678Snate@binkert.org}
1844678Snate@binkert.org
1854678Snate@binkert.orgstatic void
186955SN/AdumpKvm(const char *reg_name, const struct kvm_segment &seg)
187955SN/A{
1882632Sstever@eecs.umich.edu    inform("\t%s: @0x%llx+%x [sel: 0x%x, type: 0x%x]\n"
1892632Sstever@eecs.umich.edu           "\t\tpres.: %u, dpl: %u, db: %u, s: %u, l: %u, g: %u, avl: %u, unus.: %u\n",
190955SN/A           reg_name,
191955SN/A           seg.base, seg.limit, seg.selector, seg.type,
192955SN/A           seg.present, seg.dpl, seg.db, seg.s, seg.l, seg.g, seg.avl, seg.unusable);
193955SN/A}
1942632Sstever@eecs.umich.edu
195955SN/Astatic void
1962632Sstever@eecs.umich.edudumpKvm(const char *reg_name, const struct kvm_dtable &dtable)
1972632Sstever@eecs.umich.edu{
1982632Sstever@eecs.umich.edu    inform("\t%s: @0x%llx+%x\n",
1992632Sstever@eecs.umich.edu           reg_name, dtable.base, dtable.limit);
2002632Sstever@eecs.umich.edu}
2012632Sstever@eecs.umich.edu
2022632Sstever@eecs.umich.edustatic void
2032632Sstever@eecs.umich.edudumpKvm(const struct kvm_sregs &sregs)
2042632Sstever@eecs.umich.edu{
2052632Sstever@eecs.umich.edu#define APPLY_SREG(kreg, mreg)                          \
2062632Sstever@eecs.umich.edu    inform("\t" # kreg ": 0x%llx\n", sregs.kreg);
2072632Sstever@eecs.umich.edu#define APPLY_SEGMENT(kreg, idx)                \
2082632Sstever@eecs.umich.edu    dumpKvm(# kreg, sregs.kreg);
2093718Sstever@eecs.umich.edu#define APPLY_DTABLE(kreg, idx)                 \
2103718Sstever@eecs.umich.edu    dumpKvm(# kreg, sregs.kreg);
2113718Sstever@eecs.umich.edu
2123718Sstever@eecs.umich.edu    inform("Special registers:\n");
2133718Sstever@eecs.umich.edu    FOREACH_SEGMENT();
2143718Sstever@eecs.umich.edu    FOREACH_SREG();
2153718Sstever@eecs.umich.edu    FOREACH_DTABLE();
2163718Sstever@eecs.umich.edu
2173718Sstever@eecs.umich.edu    inform("Interrupt Bitmap:");
2183718Sstever@eecs.umich.edu    for (int i = 0; i < KVM_NR_INTERRUPTS; i += 64)
2193718Sstever@eecs.umich.edu        inform("  0x%.8x", sregs.interrupt_bitmap[i / 64]);
2203718Sstever@eecs.umich.edu
2213718Sstever@eecs.umich.edu#undef APPLY_SREG
2222634Sstever@eecs.umich.edu#undef APPLY_SEGMENT
2232634Sstever@eecs.umich.edu#undef APPLY_DTABLE
2242632Sstever@eecs.umich.edu}
2252638Sstever@eecs.umich.edu
2262632Sstever@eecs.umich.edu#ifdef KVM_GET_DEBUGREGS
2272632Sstever@eecs.umich.edustatic void
2282632Sstever@eecs.umich.edudumpKvm(const struct kvm_debugregs &regs)
2292632Sstever@eecs.umich.edu{
2302632Sstever@eecs.umich.edu    inform("KVM debug state:\n");
2312632Sstever@eecs.umich.edu
2321858SN/A#define APPLY_DREG(kreg, mreg)                  \
2333716Sstever@eecs.umich.edu    inform("\t" # kreg ": 0x%llx\n", regs.kreg)
2342638Sstever@eecs.umich.edu
2352638Sstever@eecs.umich.edu    FOREACH_DREG();
2362638Sstever@eecs.umich.edu
2372638Sstever@eecs.umich.edu#undef APPLY_DREG
2382638Sstever@eecs.umich.edu
2392638Sstever@eecs.umich.edu    inform("\tflags: 0x%llx\n", regs.flags);
2402638Sstever@eecs.umich.edu}
2413716Sstever@eecs.umich.edu#endif
2422634Sstever@eecs.umich.edu
2432634Sstever@eecs.umich.edustatic void
244955SN/AdumpFpuSpec(const struct FXSave &xs)
2455341Sstever@gmail.com{
2465341Sstever@gmail.com    inform("\tlast_ip: 0x%x\n", xs.ctrl64.fpu_ip);
2475341Sstever@gmail.com    inform("\tlast_dp: 0x%x\n", xs.ctrl64.fpu_dp);
2485341Sstever@gmail.com    inform("\tmxcsr_mask: 0x%x\n", xs.mxcsr_mask);
249955SN/A}
250955SN/A
251955SN/Astatic void
252955SN/AdumpFpuSpec(const struct kvm_fpu &fpu)
253955SN/A{
254955SN/A    inform("\tlast_ip: 0x%x\n", fpu.last_ip);
255955SN/A    inform("\tlast_dp: 0x%x\n", fpu.last_dp);
2561858SN/A}
2571858SN/A
2582632Sstever@eecs.umich.edutemplate<typename T>
259955SN/Astatic void
2604494Ssaidi@eecs.umich.edudumpFpuCommon(const T &fpu)
2614494Ssaidi@eecs.umich.edu{
2623716Sstever@eecs.umich.edu    const unsigned top((fpu.fsw >> 11) & 0x7);
2631105SN/A    inform("\tfcw: 0x%x\n", fpu.fcw);
2642667Sstever@eecs.umich.edu
2652667Sstever@eecs.umich.edu    inform("\tfsw: 0x%x (top: %i, "
2662667Sstever@eecs.umich.edu           "conditions: %s%s%s%s, exceptions: %s%s%s%s%s%s %s%s%s)\n",
2672667Sstever@eecs.umich.edu           fpu.fsw, top,
2682667Sstever@eecs.umich.edu
2692667Sstever@eecs.umich.edu           (fpu.fsw & CC0Bit) ? "C0" : "",
2701869SN/A           (fpu.fsw & CC1Bit) ? "C1" : "",
2711869SN/A           (fpu.fsw & CC2Bit) ? "C2" : "",
2721869SN/A           (fpu.fsw & CC3Bit) ? "C3" : "",
2731869SN/A
2741869SN/A           (fpu.fsw & IEBit) ? "I" : "",
2751065SN/A           (fpu.fsw & DEBit) ? "D" : "",
2765341Sstever@gmail.com           (fpu.fsw & ZEBit) ? "Z" : "",
2775341Sstever@gmail.com           (fpu.fsw & OEBit) ? "O" : "",
2785341Sstever@gmail.com           (fpu.fsw & UEBit) ? "U" : "",
2795341Sstever@gmail.com           (fpu.fsw & PEBit) ? "P" : "",
2805341Sstever@gmail.com
2815341Sstever@gmail.com           (fpu.fsw & StackFaultBit) ? "SF " : "",
2825341Sstever@gmail.com           (fpu.fsw & ErrSummaryBit) ? "ES " : "",
2835341Sstever@gmail.com           (fpu.fsw & BusyBit) ? "BUSY " : ""
2845341Sstever@gmail.com        );
2855341Sstever@gmail.com    inform("\tftwx: 0x%x\n", fpu.ftwx);
2865341Sstever@gmail.com    inform("\tlast_opcode: 0x%x\n", fpu.last_opcode);
2875341Sstever@gmail.com    dumpFpuSpec(fpu);
2885341Sstever@gmail.com    inform("\tmxcsr: 0x%x\n", fpu.mxcsr);
2895341Sstever@gmail.com    inform("\tFP Stack:\n");
2905341Sstever@gmail.com    for (int i = 0; i < 8; ++i) {
2915341Sstever@gmail.com        const unsigned reg_idx((i + top) & 0x7);
2925341Sstever@gmail.com        const bool empty(!((fpu.ftwx >> reg_idx) & 0x1));
2935341Sstever@gmail.com        const double value(X86ISA::loadFloat80(fpu.fpr[i]));
2945341Sstever@gmail.com        char hex[33];
2955341Sstever@gmail.com        for (int j = 0; j < 10; ++j)
2965341Sstever@gmail.com            snprintf(&hex[j*2], 3, "%.2x", fpu.fpr[i][j]);
2975341Sstever@gmail.com        inform("\t\tST%i/%i: 0x%s (%f)%s\n", i, reg_idx,
2985341Sstever@gmail.com               hex, value, empty ? " (e)" : "");
2995341Sstever@gmail.com    }
3005341Sstever@gmail.com    inform("\tXMM registers:\n");
3015341Sstever@gmail.com    for (int i = 0; i < 16; ++i) {
3025341Sstever@gmail.com        char hex[33];
3035397Ssaidi@eecs.umich.edu        for (int j = 0; j < 16; ++j)
3045397Ssaidi@eecs.umich.edu            snprintf(&hex[j*2], 3, "%.2x", fpu.xmm[i][j]);
3055341Sstever@gmail.com        inform("\t\t%i: 0x%s\n", i, hex);
3065341Sstever@gmail.com    }
3075341Sstever@gmail.com}
3085341Sstever@gmail.com
3095341Sstever@gmail.comstatic void
3105341Sstever@gmail.comdumpKvm(const struct kvm_fpu &fpu)
3115341Sstever@gmail.com{
3125341Sstever@gmail.com    inform("FPU registers:\n");
3135341Sstever@gmail.com    dumpFpuCommon(fpu);
3145341Sstever@gmail.com}
3155341Sstever@gmail.com
3165341Sstever@gmail.comstatic void
3175341Sstever@gmail.comdumpKvm(const struct kvm_xsave &xsave)
3185341Sstever@gmail.com{
3195341Sstever@gmail.com    inform("FPU registers (XSave):\n");
3205341Sstever@gmail.com    dumpFpuCommon(*(FXSave *)xsave.region);
3215341Sstever@gmail.com}
3225341Sstever@gmail.com
3235341Sstever@gmail.comstatic void
3245341Sstever@gmail.comdumpKvm(const struct kvm_msrs &msrs)
3255341Sstever@gmail.com{
3265341Sstever@gmail.com    inform("MSRs:\n");
3275344Sstever@gmail.com
3285341Sstever@gmail.com    for (int i = 0; i < msrs.nmsrs; ++i) {
3295341Sstever@gmail.com        const struct kvm_msr_entry &e(msrs.entries[i]);
3305341Sstever@gmail.com
3315341Sstever@gmail.com        inform("\t0x%x: 0x%x\n", e.index, e.data);
3325341Sstever@gmail.com    }
3332632Sstever@eecs.umich.edu}
3345199Sstever@gmail.com
3354781Snate@binkert.orgstatic void
3364781Snate@binkert.orgdumpKvm(const struct kvm_xcrs &regs)
3375550Snate@binkert.org{
3384781Snate@binkert.org    inform("KVM XCR registers:\n");
3394781Snate@binkert.org
3403918Ssaidi@eecs.umich.edu    inform("\tFlags: 0x%x\n", regs.flags);
3414781Snate@binkert.org    for (int i = 0; i < regs.nr_xcrs; ++i) {
3424781Snate@binkert.org        inform("\tXCR[0x%x]: 0x%x\n",
3433940Ssaidi@eecs.umich.edu               regs.xcrs[i].xcr,
3443942Ssaidi@eecs.umich.edu               regs.xcrs[i].value);
3453940Ssaidi@eecs.umich.edu    }
3463918Ssaidi@eecs.umich.edu}
3473918Ssaidi@eecs.umich.edu
348955SN/Astatic void
3491858SN/AdumpKvm(const struct kvm_vcpu_events &events)
3503918Ssaidi@eecs.umich.edu{
3513918Ssaidi@eecs.umich.edu    inform("vCPU events:\n");
3523918Ssaidi@eecs.umich.edu
3533918Ssaidi@eecs.umich.edu    inform("\tException: [inj: %i, nr: %i, has_ec: %i, ec: %i]\n",
3545571Snate@binkert.org           events.exception.injected, events.exception.nr,
3553940Ssaidi@eecs.umich.edu           events.exception.has_error_code, events.exception.error_code);
3563940Ssaidi@eecs.umich.edu
3573918Ssaidi@eecs.umich.edu    inform("\tInterrupt: [inj: %i, nr: %i, soft: %i]\n",
3583918Ssaidi@eecs.umich.edu           events.interrupt.injected, events.interrupt.nr,
3593918Ssaidi@eecs.umich.edu           events.interrupt.soft);
3603918Ssaidi@eecs.umich.edu
3613918Ssaidi@eecs.umich.edu    inform("\tNMI: [inj: %i, pending: %i, masked: %i]\n",
3623918Ssaidi@eecs.umich.edu           events.nmi.injected, events.nmi.pending,
3633918Ssaidi@eecs.umich.edu           events.nmi.masked);
3643918Ssaidi@eecs.umich.edu
3653918Ssaidi@eecs.umich.edu    inform("\tSIPI vector: 0x%x\n", events.sipi_vector);
3663940Ssaidi@eecs.umich.edu    inform("\tFlags: 0x%x\n", events.flags);
3673918Ssaidi@eecs.umich.edu}
3683918Ssaidi@eecs.umich.edu
3695397Ssaidi@eecs.umich.edustatic bool
3705397Ssaidi@eecs.umich.eduisCanonicalAddress(uint64_t addr)
3715397Ssaidi@eecs.umich.edu{
3725708Ssaidi@eecs.umich.edu    // x86-64 doesn't currently use the full 64-bit virtual address
3735708Ssaidi@eecs.umich.edu    // space, instead it uses signed 48 bit addresses that are
3745708Ssaidi@eecs.umich.edu    // sign-extended to 64 bits.  Such addresses are known as
3755708Ssaidi@eecs.umich.edu    // "canonical".
3765708Ssaidi@eecs.umich.edu    uint64_t upper_half(addr & 0xffff800000000000ULL);
3775397Ssaidi@eecs.umich.edu    return upper_half == 0 || upper_half == 0xffff800000000000;
3781851SN/A}
3791851SN/A
3801858SN/Astatic void
3815200Sstever@gmail.comcheckSeg(const char *name, const int idx, const struct kvm_segment &seg,
382955SN/A         struct kvm_sregs sregs)
3833053Sstever@eecs.umich.edu{
3843053Sstever@eecs.umich.edu    // Check the register base
3853053Sstever@eecs.umich.edu    switch (idx) {
3863053Sstever@eecs.umich.edu      case MISCREG_TSL:
3873053Sstever@eecs.umich.edu      case MISCREG_TR:
3883053Sstever@eecs.umich.edu      case MISCREG_FS:
3893053Sstever@eecs.umich.edu      case MISCREG_GS:
3903053Sstever@eecs.umich.edu        if (!isCanonicalAddress(seg.base))
3913053Sstever@eecs.umich.edu            warn("Illegal %s base: 0x%x\n", name, seg.base);
3924742Sstever@eecs.umich.edu        break;
3934742Sstever@eecs.umich.edu
3943053Sstever@eecs.umich.edu      case MISCREG_SS:
3953053Sstever@eecs.umich.edu      case MISCREG_DS:
3963053Sstever@eecs.umich.edu      case MISCREG_ES:
3973053Sstever@eecs.umich.edu        if (seg.unusable)
3983053Sstever@eecs.umich.edu            break;
3993053Sstever@eecs.umich.edu        M5_FALLTHROUGH;
4003053Sstever@eecs.umich.edu      case MISCREG_CS:
4013053Sstever@eecs.umich.edu        if (seg.base & 0xffffffff00000000ULL)
4023053Sstever@eecs.umich.edu            warn("Illegal %s base: 0x%x\n", name, seg.base);
4032667Sstever@eecs.umich.edu        break;
4044554Sbinkertn@umich.edu    }
4054554Sbinkertn@umich.edu
4062667Sstever@eecs.umich.edu    // Check the type
4074554Sbinkertn@umich.edu    switch (idx) {
4084554Sbinkertn@umich.edu      case MISCREG_CS:
4094554Sbinkertn@umich.edu        switch (seg.type) {
4104554Sbinkertn@umich.edu          case 3:
4114554Sbinkertn@umich.edu            if (seg.dpl != 0)
4124554Sbinkertn@umich.edu                warn("CS type is 3 but dpl != 0.\n");
4134554Sbinkertn@umich.edu            break;
4144781Snate@binkert.org          case 9:
4154554Sbinkertn@umich.edu          case 11:
4164554Sbinkertn@umich.edu            if (seg.dpl != sregs.ss.dpl)
4172667Sstever@eecs.umich.edu                warn("CS type is %i but CS DPL != SS DPL\n", seg.type);
4184554Sbinkertn@umich.edu            break;
4194554Sbinkertn@umich.edu          case 13:
4204554Sbinkertn@umich.edu          case 15:
4214554Sbinkertn@umich.edu            if (seg.dpl > sregs.ss.dpl)
4222667Sstever@eecs.umich.edu                warn("CS type is %i but CS DPL > SS DPL\n", seg.type);
4234554Sbinkertn@umich.edu            break;
4242667Sstever@eecs.umich.edu          default:
4254554Sbinkertn@umich.edu            warn("Illegal CS type: %i\n", seg.type);
4264554Sbinkertn@umich.edu            break;
4272667Sstever@eecs.umich.edu        }
4285522Snate@binkert.org        break;
4295522Snate@binkert.org
4305522Snate@binkert.org      case MISCREG_SS:
4315522Snate@binkert.org        if (seg.unusable)
4325522Snate@binkert.org            break;
4335522Snate@binkert.org        switch (seg.type) {
4345522Snate@binkert.org          case 3:
4355522Snate@binkert.org            if (sregs.cs.type == 3 && seg.dpl != 0)
4365522Snate@binkert.org                warn("CS type is 3, but SS DPL is != 0.\n");
4375522Snate@binkert.org            M5_FALLTHROUGH;
4385522Snate@binkert.org          case 7:
4395522Snate@binkert.org            if (!(sregs.cr0 & 1) && seg.dpl != 0)
4405522Snate@binkert.org                warn("SS DPL is %i, but CR0 PE is 0\n", seg.dpl);
4415522Snate@binkert.org            break;
4425522Snate@binkert.org          default:
4435522Snate@binkert.org            warn("Illegal SS type: %i\n", seg.type);
4445522Snate@binkert.org            break;
4455522Snate@binkert.org        }
4465522Snate@binkert.org        break;
4475522Snate@binkert.org
4485522Snate@binkert.org      case MISCREG_DS:
4495522Snate@binkert.org      case MISCREG_ES:
4505522Snate@binkert.org      case MISCREG_FS:
4515522Snate@binkert.org      case MISCREG_GS:
4525522Snate@binkert.org        if (seg.unusable)
4535522Snate@binkert.org            break;
4542638Sstever@eecs.umich.edu        if (!(seg.type & 0x1) ||
4552638Sstever@eecs.umich.edu            ((seg.type & 0x8) && !(seg.type & 0x2)))
4562638Sstever@eecs.umich.edu            warn("%s has an illegal type field: %i\n", name, seg.type);
4573716Sstever@eecs.umich.edu        break;
4585522Snate@binkert.org
4595522Snate@binkert.org      case MISCREG_TR:
4605522Snate@binkert.org        // TODO: We should check the CPU mode
4615522Snate@binkert.org        if (seg.type != 3 && seg.type != 11)
4625522Snate@binkert.org            warn("%s: Illegal segment type (%i)\n", name, seg.type);
4635522Snate@binkert.org        break;
4641858SN/A
4655227Ssaidi@eecs.umich.edu      case MISCREG_TSL:
4665227Ssaidi@eecs.umich.edu        if (seg.unusable)
4675227Ssaidi@eecs.umich.edu            break;
4685227Ssaidi@eecs.umich.edu        if (seg.type != 2)
4695227Ssaidi@eecs.umich.edu            warn("%s: Illegal segment type (%i)\n", name, seg.type);
4705227Ssaidi@eecs.umich.edu        break;
4715227Ssaidi@eecs.umich.edu    }
4725227Ssaidi@eecs.umich.edu
4735227Ssaidi@eecs.umich.edu    switch (idx) {
4745227Ssaidi@eecs.umich.edu      case MISCREG_SS:
4755227Ssaidi@eecs.umich.edu      case MISCREG_DS:
4765227Ssaidi@eecs.umich.edu      case MISCREG_ES:
4775227Ssaidi@eecs.umich.edu      case MISCREG_FS:
4785227Ssaidi@eecs.umich.edu      case MISCREG_GS:
4795227Ssaidi@eecs.umich.edu        if (seg.unusable)
4805204Sstever@gmail.com            break;
4815204Sstever@gmail.com        M5_FALLTHROUGH;
4825204Sstever@gmail.com      case MISCREG_CS:
4835204Sstever@gmail.com        if (!seg.s)
4845204Sstever@gmail.com            warn("%s: S flag not set\n", name);
4855204Sstever@gmail.com        break;
4865204Sstever@gmail.com
4875204Sstever@gmail.com      case MISCREG_TSL:
4885204Sstever@gmail.com        if (seg.unusable)
4895204Sstever@gmail.com            break;
4905204Sstever@gmail.com        M5_FALLTHROUGH;
4915204Sstever@gmail.com      case MISCREG_TR:
4925204Sstever@gmail.com        if (seg.s)
4935204Sstever@gmail.com            warn("%s: S flag is set\n", name);
4945204Sstever@gmail.com        break;
4955204Sstever@gmail.com    }
4965204Sstever@gmail.com
4975204Sstever@gmail.com    switch (idx) {
4985204Sstever@gmail.com      case MISCREG_SS:
4993118Sstever@eecs.umich.edu      case MISCREG_DS:
5003118Sstever@eecs.umich.edu      case MISCREG_ES:
5013118Sstever@eecs.umich.edu      case MISCREG_FS:
5023118Sstever@eecs.umich.edu      case MISCREG_GS:
5033118Sstever@eecs.umich.edu      case MISCREG_TSL:
5043118Sstever@eecs.umich.edu        if (seg.unusable)
5053118Sstever@eecs.umich.edu            break;
5063118Sstever@eecs.umich.edu        M5_FALLTHROUGH;
5073118Sstever@eecs.umich.edu      case MISCREG_TR:
5083118Sstever@eecs.umich.edu      case MISCREG_CS:
5093118Sstever@eecs.umich.edu        if (!seg.present)
5103716Sstever@eecs.umich.edu            warn("%s: P flag not set\n", name);
5113118Sstever@eecs.umich.edu
5123118Sstever@eecs.umich.edu        if (((seg.limit & 0xFFF) == 0 && seg.g) ||
5133118Sstever@eecs.umich.edu            ((seg.limit & 0xFFF00000) != 0 && !seg.g)) {
5143118Sstever@eecs.umich.edu            warn("%s limit (0x%x) and g (%i) combination is illegal.\n",
5153118Sstever@eecs.umich.edu                 name, seg.limit, seg.g);
5163118Sstever@eecs.umich.edu        }
5173118Sstever@eecs.umich.edu        break;
5183118Sstever@eecs.umich.edu    }
5193118Sstever@eecs.umich.edu
5203716Sstever@eecs.umich.edu    // TODO: Check CS DB
5213118Sstever@eecs.umich.edu}
5223118Sstever@eecs.umich.edu
5233118Sstever@eecs.umich.eduX86KvmCPU::X86KvmCPU(X86KvmCPUParams *params)
5243118Sstever@eecs.umich.edu    : BaseKvmCPU(params),
5253118Sstever@eecs.umich.edu      useXSave(params->useXSave)
5263118Sstever@eecs.umich.edu{
5273118Sstever@eecs.umich.edu    Kvm &kvm(*vm.kvm);
5283118Sstever@eecs.umich.edu
5293118Sstever@eecs.umich.edu    if (!kvm.capSetTSSAddress())
5303118Sstever@eecs.umich.edu        panic("KVM: Missing capability (KVM_CAP_SET_TSS_ADDR)\n");
5313483Ssaidi@eecs.umich.edu    if (!kvm.capExtendedCPUID())
5323494Ssaidi@eecs.umich.edu        panic("KVM: Missing capability (KVM_CAP_EXT_CPUID)\n");
5333494Ssaidi@eecs.umich.edu    if (!kvm.capUserNMI())
5343483Ssaidi@eecs.umich.edu        warn("KVM: Missing capability (KVM_CAP_USER_NMI)\n");
5353483Ssaidi@eecs.umich.edu    if (!kvm.capVCPUEvents())
5363483Ssaidi@eecs.umich.edu        warn("KVM: Missing capability (KVM_CAP_VCPU_EVENTS)\n");
5373053Sstever@eecs.umich.edu
5383053Sstever@eecs.umich.edu    haveDebugRegs = kvm.capDebugRegs();
5393918Ssaidi@eecs.umich.edu    haveXSave = kvm.capXSave();
5403053Sstever@eecs.umich.edu    haveXCRs = kvm.capXCRs();
5413053Sstever@eecs.umich.edu
5423053Sstever@eecs.umich.edu    if (useXSave && !haveXSave) {
5433053Sstever@eecs.umich.edu        warn("KVM: XSAVE not supported by host. MXCSR synchronization might be "
5443053Sstever@eecs.umich.edu             "unreliable due to kernel bugs.\n");
5451858SN/A        useXSave = false;
5461858SN/A    } else if (!useXSave) {
5471858SN/A        warn("KVM: XSave FPU/SIMD synchronization disabled by user.\n");
5481858SN/A    }
5491858SN/A}
5501858SN/A
5511859SN/AX86KvmCPU::~X86KvmCPU()
5521858SN/A{
5531858SN/A}
5541858SN/A
5551859SN/Avoid
5561859SN/AX86KvmCPU::startup()
5571862SN/A{
5583053Sstever@eecs.umich.edu    BaseKvmCPU::startup();
5593053Sstever@eecs.umich.edu
5603053Sstever@eecs.umich.edu    updateCPUID();
5613053Sstever@eecs.umich.edu
5621859SN/A    // TODO: Do we need to create an identity mapped TSS area? We
5631859SN/A    // should call kvm.vm.setTSSAddress() here in that case. It should
5641859SN/A    // only be needed for old versions of the virtualization
5651859SN/A    // extensions. We should make sure that the identity range is
5661859SN/A    // reserved in the e820 memory map in that case.
5671859SN/A}
5681859SN/A
5691859SN/Avoid
5701862SN/AX86KvmCPU::dump() const
5711859SN/A{
5721859SN/A    dumpIntRegs();
5731859SN/A    if (useXSave)
5741858SN/A        dumpXSave();
5751858SN/A    else
5762139SN/A        dumpFpuRegs();
5774202Sbinkertn@umich.edu    dumpSpecRegs();
5784202Sbinkertn@umich.edu    dumpDebugRegs();
5792139SN/A    dumpXCRs();
5802155SN/A    dumpVCpuEvents();
5814202Sbinkertn@umich.edu    dumpMSRs();
5824202Sbinkertn@umich.edu}
5834202Sbinkertn@umich.edu
5842155SN/Avoid
5851869SN/AX86KvmCPU::dumpFpuRegs() const
5861869SN/A{
5871869SN/A    struct kvm_fpu fpu;
5881869SN/A    getFPUState(fpu);
5894202Sbinkertn@umich.edu    dumpKvm(fpu);
5904202Sbinkertn@umich.edu}
5914202Sbinkertn@umich.edu
5924202Sbinkertn@umich.eduvoid
5934202Sbinkertn@umich.eduX86KvmCPU::dumpIntRegs() const
5944202Sbinkertn@umich.edu{
5954202Sbinkertn@umich.edu    struct kvm_regs regs;
5964202Sbinkertn@umich.edu    getRegisters(regs);
5975341Sstever@gmail.com    dumpKvm(regs);
5985341Sstever@gmail.com}
5995341Sstever@gmail.com
6005342Sstever@gmail.comvoid
6015342Sstever@gmail.comX86KvmCPU::dumpSpecRegs() const
6024202Sbinkertn@umich.edu{
6034202Sbinkertn@umich.edu    struct kvm_sregs sregs;
6044202Sbinkertn@umich.edu    getSpecialRegisters(sregs);
6054202Sbinkertn@umich.edu    dumpKvm(sregs);
6064202Sbinkertn@umich.edu}
6071869SN/A
6084202Sbinkertn@umich.eduvoid
6091869SN/AX86KvmCPU::dumpDebugRegs() const
6102508SN/A{
6112508SN/A    if (haveDebugRegs) {
6122508SN/A#ifdef KVM_GET_DEBUGREGS
6132508SN/A        struct kvm_debugregs dregs;
6144202Sbinkertn@umich.edu        getDebugRegisters(dregs);
6151869SN/A        dumpKvm(dregs);
6165385Sstever@gmail.com#endif
6175385Sstever@gmail.com    } else {
6185385Sstever@gmail.com        inform("Debug registers not supported by kernel.\n");
6195385Sstever@gmail.com    }
6201869SN/A}
6211869SN/A
6221869SN/Avoid
6231869SN/AX86KvmCPU::dumpXCRs() const
6241869SN/A{
6251965SN/A    if (haveXCRs) {
6261965SN/A        struct kvm_xcrs xcrs;
6271965SN/A        getXCRs(xcrs);
6281869SN/A        dumpKvm(xcrs);
6291869SN/A    } else {
6302733Sktlim@umich.edu        inform("XCRs not supported by kernel.\n");
6311869SN/A    }
6321858SN/A}
6331869SN/A
6341869SN/Avoid
6351869SN/AX86KvmCPU::dumpXSave() const
6361858SN/A{
6372761Sstever@eecs.umich.edu    if (haveXSave) {
6381869SN/A        struct kvm_xsave xsave;
6395385Sstever@gmail.com        getXSave(xsave);
6405385Sstever@gmail.com        dumpKvm(xsave);
6415522Snate@binkert.org    } else {
6421869SN/A        inform("XSave not supported by kernel.\n");
6431869SN/A    }
6441869SN/A}
6451869SN/A
6461869SN/Avoid
6471869SN/AX86KvmCPU::dumpVCpuEvents() const
6481858SN/A{
649955SN/A    struct kvm_vcpu_events events;
650955SN/A    getVCpuEvents(events);
6511869SN/A    dumpKvm(events);
6521869SN/A}
6531869SN/A
6541869SN/Avoid
6551869SN/AX86KvmCPU::dumpMSRs() const
6561869SN/A{
6571869SN/A    const Kvm::MSRIndexVector &supported_msrs(vm.kvm->getSupportedMSRs());
6581869SN/A    std::unique_ptr<struct kvm_msrs> msrs(
6591869SN/A        newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
6601869SN/A            supported_msrs.size()));
6611869SN/A
6621869SN/A    msrs->nmsrs = supported_msrs.size();
6631869SN/A    for (int i = 0; i < supported_msrs.size(); ++i) {
6641869SN/A        struct kvm_msr_entry &e(msrs->entries[i]);
6651869SN/A        e.index = supported_msrs[i];
6661869SN/A        e.reserved = 0;
6671869SN/A        e.data = 0;
6681869SN/A    }
6691869SN/A    getMSRs(*msrs.get());
6701869SN/A
6711869SN/A    dumpKvm(*msrs.get());
6721869SN/A}
6731869SN/A
6741869SN/Avoid
6751869SN/AX86KvmCPU::updateKvmState()
6761869SN/A{
6771869SN/A    updateKvmStateRegs();
6781869SN/A    updateKvmStateSRegs();
6791869SN/A    updateKvmStateFPU();
6803716Sstever@eecs.umich.edu    updateKvmStateMSRs();
6813356Sbinkertn@umich.edu
6823356Sbinkertn@umich.edu    DPRINTF(KvmContext, "X86KvmCPU::updateKvmState():\n");
6833356Sbinkertn@umich.edu    if (DTRACE(KvmContext))
6843356Sbinkertn@umich.edu        dump();
6853356Sbinkertn@umich.edu}
6863356Sbinkertn@umich.edu
6874781Snate@binkert.orgvoid
6881869SN/AX86KvmCPU::updateKvmStateRegs()
6891869SN/A{
6901869SN/A    struct kvm_regs regs;
6911869SN/A
6921869SN/A#define APPLY_IREG(kreg, mreg) regs.kreg = tc->readIntReg(mreg)
6931869SN/A    FOREACH_IREG();
6941869SN/A#undef APPLY_IREG
6952655Sstever@eecs.umich.edu
6962655Sstever@eecs.umich.edu    regs.rip = tc->instAddr() - tc->readMiscReg(MISCREG_CS_BASE);
6972655Sstever@eecs.umich.edu
6982655Sstever@eecs.umich.edu    /* You might think that setting regs.rflags to the contents
6992655Sstever@eecs.umich.edu     * MISCREG_RFLAGS here would suffice. In that case you're
7002655Sstever@eecs.umich.edu     * mistaken. We need to reconstruct it from a bunch of ucode
7012655Sstever@eecs.umich.edu     * registers and wave a dead chicken over it (aka mask out and set
7022655Sstever@eecs.umich.edu     * reserved bits) to get it to work.
7032655Sstever@eecs.umich.edu     */
7042655Sstever@eecs.umich.edu    regs.rflags = X86ISA::getRFlags(tc);
7052655Sstever@eecs.umich.edu
7062655Sstever@eecs.umich.edu    setRegisters(regs);
7072655Sstever@eecs.umich.edu}
7082655Sstever@eecs.umich.edu
7092655Sstever@eecs.umich.edustatic inline void
7102655Sstever@eecs.umich.edusetKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg,
7112655Sstever@eecs.umich.edu                 const int index)
7122655Sstever@eecs.umich.edu{
7132655Sstever@eecs.umich.edu    SegAttr attr(tc->readMiscRegNoEffect(MISCREG_SEG_ATTR(index)));
7142655Sstever@eecs.umich.edu
7152655Sstever@eecs.umich.edu    kvm_seg.base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(index));
7162655Sstever@eecs.umich.edu    kvm_seg.limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(index));
7172655Sstever@eecs.umich.edu    kvm_seg.selector = tc->readMiscRegNoEffect(MISCREG_SEG_SEL(index));
7182655Sstever@eecs.umich.edu    kvm_seg.type = attr.type;
7192655Sstever@eecs.umich.edu    kvm_seg.present = attr.present;
7202655Sstever@eecs.umich.edu    kvm_seg.dpl = attr.dpl;
7212638Sstever@eecs.umich.edu    kvm_seg.db = attr.defaultSize;
7222638Sstever@eecs.umich.edu    kvm_seg.s = attr.system;
7233716Sstever@eecs.umich.edu    kvm_seg.l = attr.longMode;
7242638Sstever@eecs.umich.edu    kvm_seg.g = attr.granularity;
7252638Sstever@eecs.umich.edu    kvm_seg.avl = attr.avl;
7261869SN/A
7271869SN/A    // A segment is normally unusable when the selector is zero. There
7283546Sgblack@eecs.umich.edu    // is a attr.unusable flag in gem5, but it seems unused. qemu
7293546Sgblack@eecs.umich.edu    // seems to set this to 0 all the time, so we just do the same and
7303546Sgblack@eecs.umich.edu    // hope for the best.
7313546Sgblack@eecs.umich.edu    kvm_seg.unusable = 0;
7324202Sbinkertn@umich.edu}
7333546Sgblack@eecs.umich.edu
7343546Sgblack@eecs.umich.edustatic inline void
7353546Sgblack@eecs.umich.edusetKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable,
7363546Sgblack@eecs.umich.edu                const int index)
7373546Sgblack@eecs.umich.edu{
7384781Snate@binkert.org    kvm_dtable.base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(index));
7394781Snate@binkert.org    kvm_dtable.limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(index));
7404781Snate@binkert.org}
7414781Snate@binkert.org
7424781Snate@binkert.orgstatic void
7434781Snate@binkert.orgforceSegAccessed(struct kvm_segment &seg)
7444781Snate@binkert.org{
7454781Snate@binkert.org    // Intel's VMX requires that (some) usable segments are flagged as
7464781Snate@binkert.org    // 'accessed' (i.e., the lowest bit in the segment type is set)
7474781Snate@binkert.org    // when entering VMX. This wouldn't necessary be the case even if
7484781Snate@binkert.org    // gem5 did set the access bits correctly, so we force it to one
7494781Snate@binkert.org    // in that case.
7503546Sgblack@eecs.umich.edu    if (!seg.unusable)
7513546Sgblack@eecs.umich.edu        seg.type |= SEG_TYPE_BIT_ACCESSED;
7523546Sgblack@eecs.umich.edu}
7534781Snate@binkert.org
7543546Sgblack@eecs.umich.eduvoid
7553546Sgblack@eecs.umich.eduX86KvmCPU::updateKvmStateSRegs()
7563546Sgblack@eecs.umich.edu{
7573546Sgblack@eecs.umich.edu    struct kvm_sregs sregs;
7583546Sgblack@eecs.umich.edu
7593546Sgblack@eecs.umich.edu#define APPLY_SREG(kreg, mreg) sregs.kreg = tc->readMiscRegNoEffect(mreg)
7603546Sgblack@eecs.umich.edu#define APPLY_SEGMENT(kreg, idx) setKvmSegmentReg(tc, sregs.kreg, idx)
7613546Sgblack@eecs.umich.edu#define APPLY_DTABLE(kreg, idx) setKvmDTableReg(tc, sregs.kreg, idx)
7623546Sgblack@eecs.umich.edu
7633546Sgblack@eecs.umich.edu    FOREACH_SREG();
7644202Sbinkertn@umich.edu    FOREACH_SEGMENT();
7653546Sgblack@eecs.umich.edu    FOREACH_DTABLE();
7663546Sgblack@eecs.umich.edu
7673546Sgblack@eecs.umich.edu#undef APPLY_SREG
768955SN/A#undef APPLY_SEGMENT
769955SN/A#undef APPLY_DTABLE
770955SN/A
771955SN/A    // Clear the interrupt bitmap
7721858SN/A    memset(&sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
7731858SN/A
7741858SN/A    // VMX requires CS, SS, DS, ES, FS, and GS to have the accessed
7752632Sstever@eecs.umich.edu    // bit in the type field set.
7762632Sstever@eecs.umich.edu    forceSegAccessed(sregs.cs);
7775343Sstever@gmail.com    forceSegAccessed(sregs.ss);
7785343Sstever@gmail.com    forceSegAccessed(sregs.ds);
7795343Sstever@gmail.com    forceSegAccessed(sregs.es);
7804773Snate@binkert.org    forceSegAccessed(sregs.fs);
7814773Snate@binkert.org    forceSegAccessed(sregs.gs);
7822632Sstever@eecs.umich.edu
7832632Sstever@eecs.umich.edu    // There are currently some cases where the active task isn't
7842632Sstever@eecs.umich.edu    // marked as busy. This is illegal in VMX, so we force it to busy.
7852023SN/A    if (sregs.tr.type == SEG_SYS_TYPE_TSS_AVAILABLE) {
7862632Sstever@eecs.umich.edu        hack("tr.type (%i) is not busy. Forcing the busy bit.\n",
7872632Sstever@eecs.umich.edu             sregs.tr.type);
7882632Sstever@eecs.umich.edu        sregs.tr.type = SEG_SYS_TYPE_TSS_BUSY;
7892632Sstever@eecs.umich.edu    }
7902632Sstever@eecs.umich.edu
7913716Sstever@eecs.umich.edu    // VMX requires the DPL of SS and CS to be the same for
7925342Sstever@gmail.com    // non-conforming code segments. It seems like m5 doesn't set the
7932632Sstever@eecs.umich.edu    // DPL of SS correctly when taking interrupts, so we need to fix
7942632Sstever@eecs.umich.edu    // that here.
7952632Sstever@eecs.umich.edu    if ((sregs.cs.type == SEG_CS_TYPE_ACCESSED ||
7962632Sstever@eecs.umich.edu         sregs.cs.type == SEG_CS_TYPE_READ_ACCESSED) &&
7972023SN/A        sregs.cs.dpl != sregs.ss.dpl) {
7982632Sstever@eecs.umich.edu
7992632Sstever@eecs.umich.edu        hack("CS.DPL (%i) != SS.DPL (%i): Forcing SS.DPL to %i\n",
8005342Sstever@gmail.com             sregs.cs.dpl, sregs.ss.dpl, sregs.cs.dpl);
8011889SN/A        sregs.ss.dpl = sregs.cs.dpl;
8022632Sstever@eecs.umich.edu    }
8032632Sstever@eecs.umich.edu
8042632Sstever@eecs.umich.edu    // Do checks after fixing up the state to avoid getting excessive
8052632Sstever@eecs.umich.edu    // amounts of warnings.
8063716Sstever@eecs.umich.edu    RFLAGS rflags_nocc(tc->readMiscReg(MISCREG_RFLAGS));
8073716Sstever@eecs.umich.edu    if (!rflags_nocc.vm) {
8085342Sstever@gmail.com        // Do segment verification if the CPU isn't entering virtual
8092632Sstever@eecs.umich.edu        // 8086 mode.  We currently assume that unrestricted guest
8102632Sstever@eecs.umich.edu        // mode is available.
8112632Sstever@eecs.umich.edu
8122632Sstever@eecs.umich.edu#define APPLY_SEGMENT(kreg, idx) \
8132632Sstever@eecs.umich.edu        checkSeg(# kreg, idx + MISCREG_SEG_SEL_BASE, sregs.kreg, sregs)
8142632Sstever@eecs.umich.edu
8152632Sstever@eecs.umich.edu        FOREACH_SEGMENT();
8161888SN/A#undef APPLY_SEGMENT
8171888SN/A    }
8181869SN/A
8191869SN/A    setSpecialRegisters(sregs);
8201858SN/A}
8215341Sstever@gmail.com
8222598SN/Atemplate <typename T>
8232598SN/Astatic void
8242598SN/AupdateKvmStateFPUCommon(ThreadContext *tc, T &fpu)
8252598SN/A{
8261858SN/A    fpu.mxcsr = tc->readMiscRegNoEffect(MISCREG_MXCSR);
8271858SN/A    fpu.fcw = tc->readMiscRegNoEffect(MISCREG_FCW);
8281858SN/A    // No need to rebuild from MISCREG_FSW and MISCREG_TOP if we read
8291858SN/A    // with effects.
8301858SN/A    fpu.fsw = tc->readMiscReg(MISCREG_FSW);
8311858SN/A
8321858SN/A    uint64_t ftw(tc->readMiscRegNoEffect(MISCREG_FTW));
8331858SN/A    fpu.ftwx = X86ISA::convX87TagsToXTags(ftw);
8341858SN/A
8351871SN/A    fpu.last_opcode = tc->readMiscRegNoEffect(MISCREG_FOP);
8361858SN/A
8371858SN/A    const unsigned top((fpu.fsw >> 11) & 0x7);
8381858SN/A    for (int i = 0; i < 8; ++i) {
8391858SN/A        const unsigned reg_idx((i + top) & 0x7);
8401858SN/A        const double value(bitsToFloat64(
8411858SN/A                    tc->readFloatReg(FLOATREG_FPR(reg_idx))));
8421858SN/A        DPRINTF(KvmContext, "Setting KVM FP reg %i (st[%i]) := %f\n",
8431858SN/A                reg_idx, i, value);
8441858SN/A        X86ISA::storeFloat80(fpu.fpr[i], value);
8451858SN/A    }
8461858SN/A
8471859SN/A    // TODO: We should update the MMX state
8481859SN/A
8491869SN/A    for (int i = 0; i < 16; ++i) {
8501888SN/A        *(uint64_t *)&fpu.xmm[i][0] =
8512632Sstever@eecs.umich.edu            tc->readFloatReg(FLOATREG_XMM_LOW(i));
8521869SN/A        *(uint64_t *)&fpu.xmm[i][8] =
8531965SN/A            tc->readFloatReg(FLOATREG_XMM_HIGH(i));
8541965SN/A    }
8551965SN/A}
8562761Sstever@eecs.umich.edu
8571869SN/Avoid
8581869SN/AX86KvmCPU::updateKvmStateFPULegacy()
8592632Sstever@eecs.umich.edu{
8602667Sstever@eecs.umich.edu    struct kvm_fpu fpu;
8611869SN/A
8621869SN/A    // There is some padding in the FP registers, so we'd better zero
8632929Sktlim@umich.edu    // the whole struct.
8642929Sktlim@umich.edu    memset(&fpu, 0, sizeof(fpu));
8653716Sstever@eecs.umich.edu
8662929Sktlim@umich.edu    updateKvmStateFPUCommon(tc, fpu);
867955SN/A
8682598SN/A    if (tc->readMiscRegNoEffect(MISCREG_FISEG))
8692598SN/A        warn_once("MISCREG_FISEG is non-zero.\n");
8703546Sgblack@eecs.umich.edu
871955SN/A    fpu.last_ip = tc->readMiscRegNoEffect(MISCREG_FIOFF);
872955SN/A
873955SN/A    if (tc->readMiscRegNoEffect(MISCREG_FOSEG))
8741530SN/A        warn_once("MISCREG_FOSEG is non-zero.\n");
875955SN/A
876955SN/A    fpu.last_dp = tc->readMiscRegNoEffect(MISCREG_FOOFF);
877955SN/A
878    setFPUState(fpu);
879}
880
881void
882X86KvmCPU::updateKvmStateFPUXSave()
883{
884    struct kvm_xsave kxsave;
885    FXSave &xsave(*(FXSave *)kxsave.region);
886
887    // There is some padding and reserved fields in the structure, so
888    // we'd better zero the whole thing.
889    memset(&kxsave, 0, sizeof(kxsave));
890
891    updateKvmStateFPUCommon(tc, xsave);
892
893    if (tc->readMiscRegNoEffect(MISCREG_FISEG))
894        warn_once("MISCREG_FISEG is non-zero.\n");
895
896    xsave.ctrl64.fpu_ip = tc->readMiscRegNoEffect(MISCREG_FIOFF);
897
898    if (tc->readMiscRegNoEffect(MISCREG_FOSEG))
899        warn_once("MISCREG_FOSEG is non-zero.\n");
900
901    xsave.ctrl64.fpu_dp = tc->readMiscRegNoEffect(MISCREG_FOOFF);
902
903    setXSave(kxsave);
904}
905
906void
907X86KvmCPU::updateKvmStateFPU()
908{
909    if (useXSave)
910        updateKvmStateFPUXSave();
911    else
912        updateKvmStateFPULegacy();
913}
914
915void
916X86KvmCPU::updateKvmStateMSRs()
917{
918    KvmMSRVector msrs;
919
920    const Kvm::MSRIndexVector &indices(getMsrIntersection());
921
922    for (auto it = indices.cbegin(); it != indices.cend(); ++it) {
923        struct kvm_msr_entry e;
924
925        e.index = *it;
926        e.reserved = 0;
927        e.data = tc->readMiscReg(msrMap.at(*it));
928        DPRINTF(KvmContext, "Adding MSR: idx: 0x%x, data: 0x%x\n",
929                e.index, e.data);
930
931        msrs.push_back(e);
932    }
933
934    setMSRs(msrs);
935}
936
937void
938X86KvmCPU::updateThreadContext()
939{
940    struct kvm_regs regs;
941    struct kvm_sregs sregs;
942
943    getRegisters(regs);
944    getSpecialRegisters(sregs);
945
946    DPRINTF(KvmContext, "X86KvmCPU::updateThreadContext():\n");
947    if (DTRACE(KvmContext))
948        dump();
949
950    updateThreadContextRegs(regs, sregs);
951    updateThreadContextSRegs(sregs);
952    if (useXSave) {
953        struct kvm_xsave xsave;
954        getXSave(xsave);
955
956       updateThreadContextXSave(xsave);
957    } else {
958        struct kvm_fpu fpu;
959        getFPUState(fpu);
960
961        updateThreadContextFPU(fpu);
962    }
963    updateThreadContextMSRs();
964
965    // The M5 misc reg caches some values from other
966    // registers. Writing to it with side effects causes it to be
967    // updated from its source registers.
968    tc->setMiscReg(MISCREG_M5_REG, 0);
969}
970
971void
972X86KvmCPU::updateThreadContextRegs(const struct kvm_regs &regs,
973                                   const struct kvm_sregs &sregs)
974{
975#define APPLY_IREG(kreg, mreg) tc->setIntReg(mreg, regs.kreg)
976
977    FOREACH_IREG();
978
979#undef APPLY_IREG
980
981    tc->pcState(PCState(regs.rip + sregs.cs.base));
982
983    // Flags are spread out across multiple semi-magic registers so we
984    // need some special care when updating them.
985    X86ISA::setRFlags(tc, regs.rflags);
986}
987
988
989inline void
990setContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg,
991                  const int index)
992{
993    SegAttr attr(0);
994
995    attr.type = kvm_seg.type;
996    attr.present = kvm_seg.present;
997    attr.dpl = kvm_seg.dpl;
998    attr.defaultSize = kvm_seg.db;
999    attr.system = kvm_seg.s;
1000    attr.longMode = kvm_seg.l;
1001    attr.granularity = kvm_seg.g;
1002    attr.avl = kvm_seg.avl;
1003    attr.unusable = kvm_seg.unusable;
1004
1005    // We need some setMiscReg magic here to keep the effective base
1006    // addresses in sync. We need an up-to-date version of EFER, so
1007    // make sure this is called after the sregs have been synced.
1008    tc->setMiscReg(MISCREG_SEG_BASE(index), kvm_seg.base);
1009    tc->setMiscReg(MISCREG_SEG_LIMIT(index), kvm_seg.limit);
1010    tc->setMiscReg(MISCREG_SEG_SEL(index), kvm_seg.selector);
1011    tc->setMiscReg(MISCREG_SEG_ATTR(index), attr);
1012}
1013
1014inline void
1015setContextSegment(ThreadContext *tc, const struct kvm_dtable &kvm_dtable,
1016                  const int index)
1017{
1018    // We need some setMiscReg magic here to keep the effective base
1019    // addresses in sync. We need an up-to-date version of EFER, so
1020    // make sure this is called after the sregs have been synced.
1021    tc->setMiscReg(MISCREG_SEG_BASE(index), kvm_dtable.base);
1022    tc->setMiscReg(MISCREG_SEG_LIMIT(index), kvm_dtable.limit);
1023}
1024
1025void
1026X86KvmCPU::updateThreadContextSRegs(const struct kvm_sregs &sregs)
1027{
1028    assert(getKvmRunState()->apic_base == sregs.apic_base);
1029    assert(getKvmRunState()->cr8 == sregs.cr8);
1030
1031#define APPLY_SREG(kreg, mreg) tc->setMiscRegNoEffect(mreg, sregs.kreg)
1032#define APPLY_SEGMENT(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1033#define APPLY_DTABLE(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
1034    FOREACH_SREG();
1035    FOREACH_SEGMENT();
1036    FOREACH_DTABLE();
1037#undef APPLY_SREG
1038#undef APPLY_SEGMENT
1039#undef APPLY_DTABLE
1040}
1041
1042template<typename T>
1043static void
1044updateThreadContextFPUCommon(ThreadContext *tc, const T &fpu)
1045{
1046    const unsigned top((fpu.fsw >> 11) & 0x7);
1047
1048    for (int i = 0; i < 8; ++i) {
1049        const unsigned reg_idx((i + top) & 0x7);
1050        const double value(X86ISA::loadFloat80(fpu.fpr[i]));
1051        DPRINTF(KvmContext, "Setting gem5 FP reg %i (st[%i]) := %f\n",
1052                reg_idx, i, value);
1053        tc->setFloatReg(FLOATREG_FPR(reg_idx), floatToBits64(value));
1054    }
1055
1056    // TODO: We should update the MMX state
1057
1058    tc->setMiscRegNoEffect(MISCREG_X87_TOP, top);
1059    tc->setMiscRegNoEffect(MISCREG_MXCSR, fpu.mxcsr);
1060    tc->setMiscRegNoEffect(MISCREG_FCW, fpu.fcw);
1061    tc->setMiscRegNoEffect(MISCREG_FSW, fpu.fsw);
1062
1063    uint64_t ftw(convX87XTagsToTags(fpu.ftwx));
1064    // TODO: Are these registers really the same?
1065    tc->setMiscRegNoEffect(MISCREG_FTW, ftw);
1066    tc->setMiscRegNoEffect(MISCREG_FTAG, ftw);
1067
1068    tc->setMiscRegNoEffect(MISCREG_FOP, fpu.last_opcode);
1069
1070    for (int i = 0; i < 16; ++i) {
1071        tc->setFloatReg(FLOATREG_XMM_LOW(i), *(uint64_t *)&fpu.xmm[i][0]);
1072        tc->setFloatReg(FLOATREG_XMM_HIGH(i), *(uint64_t *)&fpu.xmm[i][8]);
1073    }
1074}
1075
1076void
1077X86KvmCPU::updateThreadContextFPU(const struct kvm_fpu &fpu)
1078{
1079    updateThreadContextFPUCommon(tc, fpu);
1080
1081    tc->setMiscRegNoEffect(MISCREG_FISEG, 0);
1082    tc->setMiscRegNoEffect(MISCREG_FIOFF, fpu.last_ip);
1083    tc->setMiscRegNoEffect(MISCREG_FOSEG, 0);
1084    tc->setMiscRegNoEffect(MISCREG_FOOFF, fpu.last_dp);
1085}
1086
1087void
1088X86KvmCPU::updateThreadContextXSave(const struct kvm_xsave &kxsave)
1089{
1090    const FXSave &xsave(*(const FXSave *)kxsave.region);
1091
1092    updateThreadContextFPUCommon(tc, xsave);
1093
1094    tc->setMiscRegNoEffect(MISCREG_FISEG, 0);
1095    tc->setMiscRegNoEffect(MISCREG_FIOFF, xsave.ctrl64.fpu_ip);
1096    tc->setMiscRegNoEffect(MISCREG_FOSEG, 0);
1097    tc->setMiscRegNoEffect(MISCREG_FOOFF, xsave.ctrl64.fpu_dp);
1098}
1099
1100void
1101X86KvmCPU::updateThreadContextMSRs()
1102{
1103    const Kvm::MSRIndexVector &msrs(getMsrIntersection());
1104
1105    std::unique_ptr<struct kvm_msrs> kvm_msrs(
1106        newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(msrs.size()));
1107    struct kvm_msr_entry *entry;
1108
1109    // Create a list of MSRs to read
1110    kvm_msrs->nmsrs = msrs.size();
1111    entry = &kvm_msrs->entries[0];
1112    for (auto it = msrs.cbegin(); it != msrs.cend(); ++it, ++entry) {
1113        entry->index = *it;
1114        entry->reserved = 0;
1115        entry->data = 0;
1116    }
1117
1118    getMSRs(*kvm_msrs.get());
1119
1120    // Update M5's state
1121    entry = &kvm_msrs->entries[0];
1122    for (int i = 0; i < kvm_msrs->nmsrs; ++i, ++entry) {
1123        DPRINTF(KvmContext, "Setting M5 MSR: idx: 0x%x, data: 0x%x\n",
1124                entry->index, entry->data);
1125
1126        tc->setMiscReg(X86ISA::msrMap.at(entry->index), entry->data);
1127    }
1128}
1129
1130void
1131X86KvmCPU::deliverInterrupts()
1132{
1133    Fault fault;
1134
1135    syncThreadContext();
1136
1137    {
1138        // Migrate to the interrupt controller's thread to get the
1139        // interrupt. Even though the individual methods are safe to
1140        // call across threads, we might still lose interrupts unless
1141        // they are getInterrupt() and updateIntrInfo() are called
1142        // atomically.
1143        EventQueue::ScopedMigration migrate(interrupts[0]->eventQueue());
1144        fault = interrupts[0]->getInterrupt(tc);
1145        interrupts[0]->updateIntrInfo(tc);
1146    }
1147
1148    X86Interrupt *x86int(dynamic_cast<X86Interrupt *>(fault.get()));
1149    if (dynamic_cast<NonMaskableInterrupt *>(fault.get())) {
1150        DPRINTF(KvmInt, "Delivering NMI\n");
1151        kvmNonMaskableInterrupt();
1152    } else if (dynamic_cast<InitInterrupt *>(fault.get())) {
1153        DPRINTF(KvmInt, "INIT interrupt\n");
1154        fault.get()->invoke(tc);
1155        // Delay the kvm state update since we won't enter KVM on this
1156        // tick.
1157        threadContextDirty = true;
1158        // HACK: gem5 doesn't actually have any BIOS code, which means
1159        // that we need to halt the thread and wait for a startup
1160        // interrupt before restarting the thread. The simulated CPUs
1161        // use the same kind of hack using a microcode routine.
1162        thread->suspend();
1163    } else if (dynamic_cast<StartupInterrupt *>(fault.get())) {
1164        DPRINTF(KvmInt, "STARTUP interrupt\n");
1165        fault.get()->invoke(tc);
1166        // The kvm state is assumed to have been updated when entering
1167        // kvmRun(), so we need to update manually it here.
1168        updateKvmState();
1169    } else if (x86int) {
1170        struct kvm_interrupt kvm_int;
1171        kvm_int.irq = x86int->getVector();
1172
1173        DPRINTF(KvmInt, "Delivering interrupt: %s (%u)\n",
1174                fault->name(), kvm_int.irq);
1175
1176        kvmInterrupt(kvm_int);
1177    } else {
1178        panic("KVM: Unknown interrupt type\n");
1179    }
1180
1181}
1182
1183Tick
1184X86KvmCPU::kvmRun(Tick ticks)
1185{
1186    struct kvm_run &kvm_run(*getKvmRunState());
1187
1188    if (interrupts[0]->checkInterruptsRaw()) {
1189        if (interrupts[0]->hasPendingUnmaskable()) {
1190            DPRINTF(KvmInt,
1191                    "Delivering unmaskable interrupt.\n");
1192            syncThreadContext();
1193            deliverInterrupts();
1194        } else if (kvm_run.ready_for_interrupt_injection) {
1195            // KVM claims that it is ready for an interrupt. It might
1196            // be lying if we just updated rflags and disabled
1197            // interrupts (e.g., by doing a CPU handover). Let's sync
1198            // the thread context and check if there are /really/
1199            // interrupts that should be delivered now.
1200            syncThreadContext();
1201            if (interrupts[0]->checkInterrupts(tc)) {
1202                DPRINTF(KvmInt,
1203                        "M5 has pending interrupts, delivering interrupt.\n");
1204
1205                deliverInterrupts();
1206            } else {
1207                DPRINTF(KvmInt,
1208                        "Interrupt delivery delayed due to KVM confusion.\n");
1209                kvm_run.request_interrupt_window = 1;
1210            }
1211        } else if (!kvm_run.request_interrupt_window) {
1212            DPRINTF(KvmInt,
1213                    "M5 has pending interrupts, requesting interrupt "
1214                    "window.\n");
1215            kvm_run.request_interrupt_window = 1;
1216        }
1217    } else {
1218        kvm_run.request_interrupt_window = 0;
1219    }
1220
1221    // The CPU might have been suspended as a result of the INIT
1222    // interrupt delivery hack. In that case, don't enter into KVM.
1223    if (_status == Idle)
1224        return 0;
1225    else
1226        return kvmRunWrapper(ticks);
1227}
1228
1229Tick
1230X86KvmCPU::kvmRunDrain()
1231{
1232    struct kvm_run &kvm_run(*getKvmRunState());
1233
1234    if (!archIsDrained()) {
1235        DPRINTF(Drain, "kvmRunDrain: Architecture code isn't drained\n");
1236
1237        // Tell KVM to find a suitable place to deliver interrupts. This
1238        // should ensure that pending interrupts have been delivered and
1239        // things are reasonably consistent (i.e., no interrupts pending
1240        // in the guest).
1241        kvm_run.request_interrupt_window = 1;
1242
1243        // Limit the run to 1 millisecond. That is hopefully enough to
1244        // reach an interrupt window. Otherwise, we'll just try again
1245        // later.
1246        return kvmRunWrapper(1 * SimClock::Float::ms);
1247    } else {
1248        DPRINTF(Drain, "kvmRunDrain: Delivering pending IO\n");
1249
1250        return kvmRunWrapper(0);
1251    }
1252}
1253
1254Tick
1255X86KvmCPU::kvmRunWrapper(Tick ticks)
1256{
1257    struct kvm_run &kvm_run(*getKvmRunState());
1258
1259    // Synchronize the APIC base and CR8 here since they are present
1260    // in the kvm_run struct, which makes the synchronization really
1261    // cheap.
1262    kvm_run.apic_base = tc->readMiscReg(MISCREG_APIC_BASE);
1263    kvm_run.cr8 = tc->readMiscReg(MISCREG_CR8);
1264
1265    const Tick run_ticks(BaseKvmCPU::kvmRun(ticks));
1266
1267    tc->setMiscReg(MISCREG_APIC_BASE, kvm_run.apic_base);
1268    kvm_run.cr8 = tc->readMiscReg(MISCREG_CR8);
1269
1270    return run_ticks;
1271}
1272
1273uint64_t
1274X86KvmCPU::getHostCycles() const
1275{
1276    return getMSR(MSR_TSC);
1277}
1278
1279void
1280X86KvmCPU::handleIOMiscReg32(int miscreg)
1281{
1282    struct kvm_run &kvm_run(*getKvmRunState());
1283    const uint16_t port(kvm_run.io.port);
1284
1285    assert(kvm_run.exit_reason == KVM_EXIT_IO);
1286
1287    if (kvm_run.io.size != 4) {
1288        panic("Unexpected IO size (%u) for address 0x%x.\n",
1289              kvm_run.io.size, port);
1290    }
1291
1292    if (kvm_run.io.count != 1) {
1293        panic("Unexpected IO count (%u) for address 0x%x.\n",
1294              kvm_run.io.count, port);
1295    }
1296
1297    uint32_t *data((uint32_t *)getGuestData(kvm_run.io.data_offset));
1298    if (kvm_run.io.direction == KVM_EXIT_IO_OUT)
1299        tc->setMiscReg(miscreg, *data);
1300    else
1301        *data = tc->readMiscRegNoEffect(miscreg);
1302}
1303
1304Tick
1305X86KvmCPU::handleKvmExitIO()
1306{
1307    struct kvm_run &kvm_run(*getKvmRunState());
1308    bool isWrite(kvm_run.io.direction == KVM_EXIT_IO_OUT);
1309    unsigned char *guestData(getGuestData(kvm_run.io.data_offset));
1310    Tick delay(0);
1311    uint16_t port(kvm_run.io.port);
1312    Addr pAddr;
1313    const int count(kvm_run.io.count);
1314
1315    assert(kvm_run.io.direction == KVM_EXIT_IO_IN ||
1316           kvm_run.io.direction == KVM_EXIT_IO_OUT);
1317
1318    DPRINTF(KvmIO, "KVM-x86: Handling IO instruction (%s) (port: 0x%x)\n",
1319            (isWrite ? "out" : "in"), kvm_run.io.port);
1320
1321    /* Vanilla gem5 handles PCI discovery in the TLB(!). Since we
1322     * don't use the TLB component, we need to intercept and handle
1323     * the PCI configuration space IO ports here.
1324     *
1325     * The IO port PCI discovery mechanism uses one address register
1326     * and one data register. We map the address register to a misc
1327     * reg and use that to re-route data register accesses to the
1328     * right location in the PCI configuration space.
1329     */
1330    if (port == IO_PCI_CONF_ADDR) {
1331        handleIOMiscReg32(MISCREG_PCI_CONFIG_ADDRESS);
1332        return 0;
1333    } else if ((port & ~0x3) == IO_PCI_CONF_DATA_BASE) {
1334        Addr pciConfigAddr(tc->readMiscRegNoEffect(MISCREG_PCI_CONFIG_ADDRESS));
1335        if (pciConfigAddr & 0x80000000) {
1336            pAddr = X86ISA::x86PciConfigAddress((pciConfigAddr & 0x7ffffffc) |
1337                                                (port & 0x3));
1338        } else {
1339            pAddr = X86ISA::x86IOAddress(port);
1340        }
1341    } else {
1342        pAddr = X86ISA::x86IOAddress(port);
1343    }
1344
1345    const MemCmd cmd(isWrite ? MemCmd::WriteReq : MemCmd::ReadReq);
1346    // Temporarily lock and migrate to the device event queue to
1347    // prevent races in multi-core mode.
1348    EventQueue::ScopedMigration migrate(deviceEventQueue());
1349    for (int i = 0; i < count; ++i) {
1350        RequestPtr io_req = std::make_shared<Request>(
1351            pAddr, kvm_run.io.size,
1352            Request::UNCACHEABLE, dataMasterId());
1353
1354        io_req->setContext(tc->contextId());
1355
1356        PacketPtr pkt = new Packet(io_req, cmd);
1357
1358        pkt->dataStatic(guestData);
1359        delay += dataPort.submitIO(pkt);
1360
1361        guestData += kvm_run.io.size;
1362    }
1363
1364    return delay;
1365}
1366
1367Tick
1368X86KvmCPU::handleKvmExitIRQWindowOpen()
1369{
1370    // We don't need to do anything here since this is caught the next
1371    // time we execute kvmRun(). We still overload the exit event to
1372    // silence the warning about an unhandled exit event.
1373    return 0;
1374}
1375
1376bool
1377X86KvmCPU::archIsDrained() const
1378{
1379    struct kvm_vcpu_events events;
1380
1381    getVCpuEvents(events);
1382
1383    // We could probably handle this in a by re-inserting interrupts
1384    // that are pending into gem5 on a drain. However, that would
1385    // probably be tricky to do reliably, so we'll just prevent a
1386    // drain if there is anything pending in the
1387    // guest. X86KvmCPU::kvmRunDrain() minimizes the amount of code
1388    // executed in the guest by requesting an interrupt window if
1389    // there are pending interrupts.
1390    const bool pending_events(events.exception.injected ||
1391                              events.interrupt.injected ||
1392                              events.nmi.injected || events.nmi.pending);
1393
1394    if (pending_events) {
1395        DPRINTF(Drain, "archIsDrained: Pending events: %s %s %s %s\n",
1396                events.exception.injected ? "exception" : "",
1397                events.interrupt.injected ? "interrupt" : "",
1398                events.nmi.injected ? "nmi[i]" : "",
1399                events.nmi.pending ? "nmi[p]" : "");
1400    }
1401
1402    return !pending_events;
1403}
1404
1405static struct kvm_cpuid_entry2
1406makeKvmCpuid(uint32_t function, uint32_t index,
1407             CpuidResult &result)
1408{
1409    struct kvm_cpuid_entry2 e;
1410    e.function = function;
1411    e.index = index;
1412    e.flags = 0;
1413    e.eax = (uint32_t)result.rax;
1414    e.ebx = (uint32_t)result.rbx;
1415    e.ecx = (uint32_t)result.rcx;
1416    e.edx = (uint32_t)result.rdx;
1417
1418    return e;
1419}
1420
1421void
1422X86KvmCPU::updateCPUID()
1423{
1424    Kvm::CPUIDVector m5_supported;
1425
1426    /* TODO: We currently don't support any of the functions that
1427     * iterate through data structures in the CPU using an index. It's
1428     * currently not a problem since M5 doesn't expose any of them at
1429     * the moment.
1430     */
1431
1432    /* Basic features */
1433    CpuidResult func0;
1434    X86ISA::doCpuid(tc, 0x0, 0, func0);
1435    for (uint32_t function = 0; function <= func0.rax; ++function) {
1436        CpuidResult cpuid;
1437        uint32_t idx(0);
1438
1439        X86ISA::doCpuid(tc, function, idx, cpuid);
1440        m5_supported.push_back(makeKvmCpuid(function, idx, cpuid));
1441    }
1442
1443    /* Extended features */
1444    CpuidResult efunc0;
1445    X86ISA::doCpuid(tc, 0x80000000, 0, efunc0);
1446    for (uint32_t function = 0x80000000; function <= efunc0.rax; ++function) {
1447        CpuidResult cpuid;
1448        uint32_t idx(0);
1449
1450        X86ISA::doCpuid(tc, function, idx, cpuid);
1451        m5_supported.push_back(makeKvmCpuid(function, idx, cpuid));
1452    }
1453
1454    setCPUID(m5_supported);
1455}
1456
1457void
1458X86KvmCPU::setCPUID(const struct kvm_cpuid2 &cpuid)
1459{
1460    if (ioctl(KVM_SET_CPUID2, (void *)&cpuid) == -1)
1461        panic("KVM: Failed to set guest CPUID2 (errno: %i)\n",
1462              errno);
1463}
1464
1465void
1466X86KvmCPU::setCPUID(const Kvm::CPUIDVector &cpuid)
1467{
1468    std::unique_ptr<struct kvm_cpuid2> kvm_cpuid(
1469        newVarStruct<struct kvm_cpuid2, struct kvm_cpuid_entry2>(cpuid.size()));
1470
1471    kvm_cpuid->nent = cpuid.size();
1472    std::copy(cpuid.begin(), cpuid.end(), kvm_cpuid->entries);
1473
1474    setCPUID(*kvm_cpuid);
1475}
1476
1477void
1478X86KvmCPU::setMSRs(const struct kvm_msrs &msrs)
1479{
1480    if (ioctl(KVM_SET_MSRS, (void *)&msrs) == -1)
1481        panic("KVM: Failed to set guest MSRs (errno: %i)\n",
1482              errno);
1483}
1484
1485void
1486X86KvmCPU::setMSRs(const KvmMSRVector &msrs)
1487{
1488    std::unique_ptr<struct kvm_msrs> kvm_msrs(
1489        newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(msrs.size()));
1490
1491    kvm_msrs->nmsrs = msrs.size();
1492    std::copy(msrs.begin(), msrs.end(), kvm_msrs->entries);
1493
1494    setMSRs(*kvm_msrs);
1495}
1496
1497void
1498X86KvmCPU::getMSRs(struct kvm_msrs &msrs) const
1499{
1500    if (ioctl(KVM_GET_MSRS, (void *)&msrs) == -1)
1501        panic("KVM: Failed to get guest MSRs (errno: %i)\n",
1502              errno);
1503}
1504
1505
1506void
1507X86KvmCPU::setMSR(uint32_t index, uint64_t value)
1508{
1509    std::unique_ptr<struct kvm_msrs> kvm_msrs(
1510        newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1));
1511    struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1512
1513    kvm_msrs->nmsrs = 1;
1514    entry.index = index;
1515    entry.reserved = 0;
1516    entry.data = value;
1517
1518    setMSRs(*kvm_msrs.get());
1519}
1520
1521uint64_t
1522X86KvmCPU::getMSR(uint32_t index) const
1523{
1524    std::unique_ptr<struct kvm_msrs> kvm_msrs(
1525        newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1));
1526    struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
1527
1528    kvm_msrs->nmsrs = 1;
1529    entry.index = index;
1530    entry.reserved = 0;
1531    entry.data = 0;
1532
1533    getMSRs(*kvm_msrs.get());
1534    return entry.data;
1535}
1536
1537const Kvm::MSRIndexVector &
1538X86KvmCPU::getMsrIntersection() const
1539{
1540    if (cachedMsrIntersection.empty()) {
1541        const Kvm::MSRIndexVector &kvm_msrs(vm.kvm->getSupportedMSRs());
1542
1543        DPRINTF(Kvm, "kvm-x86: Updating MSR intersection\n");
1544        for (auto it = kvm_msrs.cbegin(); it != kvm_msrs.cend(); ++it) {
1545            if (X86ISA::msrMap.find(*it) != X86ISA::msrMap.end()) {
1546                cachedMsrIntersection.push_back(*it);
1547                DPRINTF(Kvm, "kvm-x86: Adding MSR 0x%x\n", *it);
1548            } else {
1549                warn("kvm-x86: MSR (0x%x) unsupported by gem5. Skipping.\n",
1550                     *it);
1551            }
1552        }
1553    }
1554
1555    return cachedMsrIntersection;
1556}
1557
1558void
1559X86KvmCPU::getDebugRegisters(struct kvm_debugregs &regs) const
1560{
1561#ifdef KVM_GET_DEBUGREGS
1562    if (ioctl(KVM_GET_DEBUGREGS, &regs) == -1)
1563        panic("KVM: Failed to get guest debug registers\n");
1564#else
1565    panic("KVM: Unsupported getDebugRegisters call.\n");
1566#endif
1567}
1568
1569void
1570X86KvmCPU::setDebugRegisters(const struct kvm_debugregs &regs)
1571{
1572#ifdef KVM_SET_DEBUGREGS
1573    if (ioctl(KVM_SET_DEBUGREGS, (void *)&regs) == -1)
1574        panic("KVM: Failed to set guest debug registers\n");
1575#else
1576    panic("KVM: Unsupported setDebugRegisters call.\n");
1577#endif
1578}
1579
1580void
1581X86KvmCPU::getXCRs(struct kvm_xcrs &regs) const
1582{
1583    if (ioctl(KVM_GET_XCRS, &regs) == -1)
1584        panic("KVM: Failed to get guest debug registers\n");
1585}
1586
1587void
1588X86KvmCPU::setXCRs(const struct kvm_xcrs &regs)
1589{
1590    if (ioctl(KVM_SET_XCRS, (void *)&regs) == -1)
1591        panic("KVM: Failed to set guest debug registers\n");
1592}
1593
1594void
1595X86KvmCPU::getXSave(struct kvm_xsave &xsave) const
1596{
1597    if (ioctl(KVM_GET_XSAVE, &xsave) == -1)
1598        panic("KVM: Failed to get guest debug registers\n");
1599}
1600
1601void
1602X86KvmCPU::setXSave(const struct kvm_xsave &xsave)
1603{
1604    if (ioctl(KVM_SET_XSAVE, (void *)&xsave) == -1)
1605        panic("KVM: Failed to set guest debug registers\n");
1606}
1607
1608
1609void
1610X86KvmCPU::getVCpuEvents(struct kvm_vcpu_events &events) const
1611{
1612    if (ioctl(KVM_GET_VCPU_EVENTS, &events) == -1)
1613        panic("KVM: Failed to get guest debug registers\n");
1614}
1615
1616void
1617X86KvmCPU::setVCpuEvents(const struct kvm_vcpu_events &events)
1618{
1619    if (ioctl(KVM_SET_VCPU_EVENTS, (void *)&events) == -1)
1620        panic("KVM: Failed to set guest debug registers\n");
1621}
1622
1623X86KvmCPU *
1624X86KvmCPUParams::create()
1625{
1626    return new X86KvmCPU(this);
1627}
1628