x86_cpu.cc revision 12749
110259SAndrew.Bardsley@arm.com/*
213954Sgiacomo.gabrielli@arm.com * Copyright (c) 2013 Andreas Sandberg
310259SAndrew.Bardsley@arm.com * All rights reserved
410259SAndrew.Bardsley@arm.com *
510259SAndrew.Bardsley@arm.com * Redistribution and use in source and binary forms, with or without
610259SAndrew.Bardsley@arm.com * modification, are permitted provided that the following conditions are
710259SAndrew.Bardsley@arm.com * met: redistributions of source code must retain the above copyright
810259SAndrew.Bardsley@arm.com * notice, this list of conditions and the following disclaimer;
910259SAndrew.Bardsley@arm.com * redistributions in binary form must reproduce the above copyright
1010259SAndrew.Bardsley@arm.com * notice, this list of conditions and the following disclaimer in the
1110259SAndrew.Bardsley@arm.com * documentation and/or other materials provided with the distribution;
1210259SAndrew.Bardsley@arm.com * neither the name of the copyright holders nor the names of its
1310259SAndrew.Bardsley@arm.com * contributors may be used to endorse or promote products derived from
1410259SAndrew.Bardsley@arm.com * this software without specific prior written permission.
1510259SAndrew.Bardsley@arm.com *
1610259SAndrew.Bardsley@arm.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1710259SAndrew.Bardsley@arm.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1810259SAndrew.Bardsley@arm.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1910259SAndrew.Bardsley@arm.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2010259SAndrew.Bardsley@arm.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2110259SAndrew.Bardsley@arm.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2210259SAndrew.Bardsley@arm.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2310259SAndrew.Bardsley@arm.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2410259SAndrew.Bardsley@arm.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2510259SAndrew.Bardsley@arm.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
2610259SAndrew.Bardsley@arm.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2710259SAndrew.Bardsley@arm.com *
2810259SAndrew.Bardsley@arm.com * Authors: Andreas Sandberg
2910259SAndrew.Bardsley@arm.com */
3010259SAndrew.Bardsley@arm.com
3110259SAndrew.Bardsley@arm.com#include "cpu/kvm/x86_cpu.hh"
3210259SAndrew.Bardsley@arm.com
3310259SAndrew.Bardsley@arm.com#include <linux/kvm.h>
3410259SAndrew.Bardsley@arm.com
3510259SAndrew.Bardsley@arm.com#include <algorithm>
3610259SAndrew.Bardsley@arm.com#include <cerrno>
3710259SAndrew.Bardsley@arm.com#include <memory>
3810259SAndrew.Bardsley@arm.com
3910259SAndrew.Bardsley@arm.com#include "arch/registers.hh"
4011793Sbrandon.potter@amd.com#include "arch/x86/cpuid.hh"
4111793Sbrandon.potter@amd.com#include "arch/x86/regs/msr.hh"
4210259SAndrew.Bardsley@arm.com#include "arch/x86/utility.hh"
4310259SAndrew.Bardsley@arm.com#include "cpu/kvm/base.hh"
4410259SAndrew.Bardsley@arm.com#include "debug/Drain.hh"
4510259SAndrew.Bardsley@arm.com#include "debug/Kvm.hh"
4610259SAndrew.Bardsley@arm.com#include "debug/KvmContext.hh"
4710259SAndrew.Bardsley@arm.com#include "debug/KvmIO.hh"
4810259SAndrew.Bardsley@arm.com#include "debug/KvmInt.hh"
4910259SAndrew.Bardsley@arm.com
5010259SAndrew.Bardsley@arm.comusing namespace X86ISA;
5110259SAndrew.Bardsley@arm.com
5210259SAndrew.Bardsley@arm.com#define MSR_TSC 0x10
5310259SAndrew.Bardsley@arm.com
5410259SAndrew.Bardsley@arm.com#define IO_PCI_CONF_ADDR 0xCF8
5510259SAndrew.Bardsley@arm.com#define IO_PCI_CONF_DATA_BASE 0xCFC
5610259SAndrew.Bardsley@arm.com
5710259SAndrew.Bardsley@arm.com// Task segment type of an inactive 32-bit or 64-bit task
5810259SAndrew.Bardsley@arm.com#define SEG_SYS_TYPE_TSS_AVAILABLE 9
5910259SAndrew.Bardsley@arm.com// Task segment type of an active 32-bit or 64-bit task
6010259SAndrew.Bardsley@arm.com#define SEG_SYS_TYPE_TSS_BUSY 11
6110259SAndrew.Bardsley@arm.com
6210259SAndrew.Bardsley@arm.com// Non-conforming accessed code segment
6310259SAndrew.Bardsley@arm.com#define SEG_CS_TYPE_ACCESSED 9
6410259SAndrew.Bardsley@arm.com// Non-conforming accessed code segment that can be read
6510259SAndrew.Bardsley@arm.com#define SEG_CS_TYPE_READ_ACCESSED 11
6610259SAndrew.Bardsley@arm.com
6710259SAndrew.Bardsley@arm.com// The lowest bit of the type field for normal segments (code and
6810259SAndrew.Bardsley@arm.com// data) is used to indicate that a segment has been accessed.
6910259SAndrew.Bardsley@arm.com#define SEG_TYPE_BIT_ACCESSED 1
7010259SAndrew.Bardsley@arm.com
7110259SAndrew.Bardsley@arm.comstruct FXSave
7210259SAndrew.Bardsley@arm.com{
7310259SAndrew.Bardsley@arm.com    uint16_t fcw;
7410259SAndrew.Bardsley@arm.com    uint16_t fsw;
7510259SAndrew.Bardsley@arm.com    uint8_t ftwx;
7610259SAndrew.Bardsley@arm.com    uint8_t pad0;
7710259SAndrew.Bardsley@arm.com    uint16_t last_opcode;
7810259SAndrew.Bardsley@arm.com    union {
7910259SAndrew.Bardsley@arm.com        struct {
8010259SAndrew.Bardsley@arm.com            uint32_t fpu_ip;
8110259SAndrew.Bardsley@arm.com            uint16_t fpu_cs;
8210259SAndrew.Bardsley@arm.com            uint16_t pad1;
8310259SAndrew.Bardsley@arm.com            uint32_t fpu_dp;
8410259SAndrew.Bardsley@arm.com            uint16_t fpu_ds;
8510259SAndrew.Bardsley@arm.com            uint16_t pad2;
8610259SAndrew.Bardsley@arm.com        } ctrl32;
8710259SAndrew.Bardsley@arm.com
8810259SAndrew.Bardsley@arm.com        struct {
8910259SAndrew.Bardsley@arm.com            uint64_t fpu_ip;
9011567Smitch.hayenga@arm.com            uint64_t fpu_dp;
9111567Smitch.hayenga@arm.com        } ctrl64;
9211567Smitch.hayenga@arm.com    };
9311567Smitch.hayenga@arm.com    uint32_t mxcsr;
9410259SAndrew.Bardsley@arm.com    uint32_t mxcsr_mask;
9510259SAndrew.Bardsley@arm.com
9610259SAndrew.Bardsley@arm.com    uint8_t fpr[8][16];
9710259SAndrew.Bardsley@arm.com    uint8_t xmm[16][16];
9810259SAndrew.Bardsley@arm.com
9910259SAndrew.Bardsley@arm.com    uint64_t reserved[12];
10010259SAndrew.Bardsley@arm.com} M5_ATTR_PACKED;
10110259SAndrew.Bardsley@arm.com
10210259SAndrew.Bardsley@arm.comstatic_assert(sizeof(FXSave) == 512, "Unexpected size of FXSave");
10310259SAndrew.Bardsley@arm.com
10410259SAndrew.Bardsley@arm.com#define FOREACH_IREG()                          \
10510259SAndrew.Bardsley@arm.com    do {                                        \
10610259SAndrew.Bardsley@arm.com        APPLY_IREG(rax, INTREG_RAX);            \
10710259SAndrew.Bardsley@arm.com        APPLY_IREG(rbx, INTREG_RBX);            \
10810259SAndrew.Bardsley@arm.com        APPLY_IREG(rcx, INTREG_RCX);            \
10910259SAndrew.Bardsley@arm.com        APPLY_IREG(rdx, INTREG_RDX);            \
11010259SAndrew.Bardsley@arm.com        APPLY_IREG(rsi, INTREG_RSI);            \
11110259SAndrew.Bardsley@arm.com        APPLY_IREG(rdi, INTREG_RDI);            \
11210259SAndrew.Bardsley@arm.com        APPLY_IREG(rsp, INTREG_RSP);            \
11310259SAndrew.Bardsley@arm.com        APPLY_IREG(rbp, INTREG_RBP);            \
11410259SAndrew.Bardsley@arm.com        APPLY_IREG(r8, INTREG_R8);              \
11510259SAndrew.Bardsley@arm.com        APPLY_IREG(r9, INTREG_R9);              \
11610259SAndrew.Bardsley@arm.com        APPLY_IREG(r10, INTREG_R10);            \
11710259SAndrew.Bardsley@arm.com        APPLY_IREG(r11, INTREG_R11);            \
11810259SAndrew.Bardsley@arm.com        APPLY_IREG(r12, INTREG_R12);            \
11910259SAndrew.Bardsley@arm.com        APPLY_IREG(r13, INTREG_R13);            \
12010259SAndrew.Bardsley@arm.com        APPLY_IREG(r14, INTREG_R14);            \
12110259SAndrew.Bardsley@arm.com        APPLY_IREG(r15, INTREG_R15);            \
12210259SAndrew.Bardsley@arm.com    } while (0)
12310259SAndrew.Bardsley@arm.com
12410259SAndrew.Bardsley@arm.com#define FOREACH_SREG()                                  \
12510259SAndrew.Bardsley@arm.com    do {                                                \
12610259SAndrew.Bardsley@arm.com        APPLY_SREG(cr0, MISCREG_CR0);                   \
12710259SAndrew.Bardsley@arm.com        APPLY_SREG(cr2, MISCREG_CR2);                   \
12810259SAndrew.Bardsley@arm.com        APPLY_SREG(cr3, MISCREG_CR3);                   \
12910259SAndrew.Bardsley@arm.com        APPLY_SREG(cr4, MISCREG_CR4);                   \
13010259SAndrew.Bardsley@arm.com        APPLY_SREG(cr8, MISCREG_CR8);                   \
13110259SAndrew.Bardsley@arm.com        APPLY_SREG(efer, MISCREG_EFER);                 \
13210259SAndrew.Bardsley@arm.com        APPLY_SREG(apic_base, MISCREG_APIC_BASE);       \
13310259SAndrew.Bardsley@arm.com    } while (0)
13410259SAndrew.Bardsley@arm.com
13510259SAndrew.Bardsley@arm.com#define FOREACH_DREG()                          \
13610259SAndrew.Bardsley@arm.com    do {                                        \
13710259SAndrew.Bardsley@arm.com        APPLY_DREG(db[0], MISCREG_DR0);         \
13810259SAndrew.Bardsley@arm.com        APPLY_DREG(db[1], MISCREG_DR1);         \
13910259SAndrew.Bardsley@arm.com        APPLY_DREG(db[2], MISCREG_DR2);         \
14010259SAndrew.Bardsley@arm.com        APPLY_DREG(db[3], MISCREG_DR3);         \
14110259SAndrew.Bardsley@arm.com        APPLY_DREG(dr6, MISCREG_DR6);           \
14210259SAndrew.Bardsley@arm.com        APPLY_DREG(dr7, MISCREG_DR7);           \
14310259SAndrew.Bardsley@arm.com    } while (0)
14410259SAndrew.Bardsley@arm.com
14510259SAndrew.Bardsley@arm.com#define FOREACH_SEGMENT()                                       \
14610259SAndrew.Bardsley@arm.com    do {                                                        \
14710259SAndrew.Bardsley@arm.com        APPLY_SEGMENT(cs, MISCREG_CS - MISCREG_SEG_SEL_BASE);   \
14810259SAndrew.Bardsley@arm.com        APPLY_SEGMENT(ds, MISCREG_DS - MISCREG_SEG_SEL_BASE);   \
14910814Sandreas.hansson@arm.com        APPLY_SEGMENT(es, MISCREG_ES - MISCREG_SEG_SEL_BASE);   \
15010259SAndrew.Bardsley@arm.com        APPLY_SEGMENT(fs, MISCREG_FS - MISCREG_SEG_SEL_BASE);   \
15110259SAndrew.Bardsley@arm.com        APPLY_SEGMENT(gs, MISCREG_GS - MISCREG_SEG_SEL_BASE);   \
15210259SAndrew.Bardsley@arm.com        APPLY_SEGMENT(ss, MISCREG_SS - MISCREG_SEG_SEL_BASE);   \
15310259SAndrew.Bardsley@arm.com        APPLY_SEGMENT(tr, MISCREG_TR - MISCREG_SEG_SEL_BASE);   \
15410259SAndrew.Bardsley@arm.com        APPLY_SEGMENT(ldt, MISCREG_TSL - MISCREG_SEG_SEL_BASE); \
15510259SAndrew.Bardsley@arm.com    } while (0)
15610259SAndrew.Bardsley@arm.com
15710259SAndrew.Bardsley@arm.com#define FOREACH_DTABLE()                                        \
15810259SAndrew.Bardsley@arm.com    do {                                                        \
15910259SAndrew.Bardsley@arm.com        APPLY_DTABLE(gdt, MISCREG_TSG - MISCREG_SEG_SEL_BASE);  \
16010259SAndrew.Bardsley@arm.com        APPLY_DTABLE(idt, MISCREG_IDTR - MISCREG_SEG_SEL_BASE); \
16110259SAndrew.Bardsley@arm.com    } while (0)
16210259SAndrew.Bardsley@arm.com
16310259SAndrew.Bardsley@arm.comtemplate<typename STRUCT, typename ENTRY>
16410259SAndrew.Bardsley@arm.comstatic STRUCT *newVarStruct(size_t entries)
16510259SAndrew.Bardsley@arm.com{
16610259SAndrew.Bardsley@arm.com    return (STRUCT *)operator new(sizeof(STRUCT) + entries * sizeof(ENTRY));
16710259SAndrew.Bardsley@arm.com}
16810259SAndrew.Bardsley@arm.com
16911567Smitch.hayenga@arm.comstatic void
17011567Smitch.hayenga@arm.comdumpKvm(const struct kvm_regs &regs)
17111567Smitch.hayenga@arm.com{
17210259SAndrew.Bardsley@arm.com    inform("KVM register state:\n");
17311567Smitch.hayenga@arm.com
17411567Smitch.hayenga@arm.com#define APPLY_IREG(kreg, mreg)                  \
17511567Smitch.hayenga@arm.com    inform("\t" # kreg ": 0x%llx\n", regs.kreg)
17611567Smitch.hayenga@arm.com
17711567Smitch.hayenga@arm.com    FOREACH_IREG();
17811567Smitch.hayenga@arm.com
17911567Smitch.hayenga@arm.com#undef APPLY_IREG
18011567Smitch.hayenga@arm.com
18111567Smitch.hayenga@arm.com    inform("\trip: 0x%llx\n", regs.rip);
18211567Smitch.hayenga@arm.com    inform("\trflags: 0x%llx\n", regs.rflags);
18311567Smitch.hayenga@arm.com}
18411567Smitch.hayenga@arm.com
18511567Smitch.hayenga@arm.comstatic void
18611567Smitch.hayenga@arm.comdumpKvm(const char *reg_name, const struct kvm_segment &seg)
18711567Smitch.hayenga@arm.com{
18811567Smitch.hayenga@arm.com    inform("\t%s: @0x%llx+%x [sel: 0x%x, type: 0x%x]\n"
18911567Smitch.hayenga@arm.com           "\t\tpres.: %u, dpl: %u, db: %u, s: %u, l: %u, g: %u, avl: %u, unus.: %u\n",
19011567Smitch.hayenga@arm.com           reg_name,
19110259SAndrew.Bardsley@arm.com           seg.base, seg.limit, seg.selector, seg.type,
19210259SAndrew.Bardsley@arm.com           seg.present, seg.dpl, seg.db, seg.s, seg.l, seg.g, seg.avl, seg.unusable);
19310259SAndrew.Bardsley@arm.com}
19411567Smitch.hayenga@arm.com
19510259SAndrew.Bardsley@arm.comstatic void
19610259SAndrew.Bardsley@arm.comdumpKvm(const char *reg_name, const struct kvm_dtable &dtable)
19711567Smitch.hayenga@arm.com{
19811567Smitch.hayenga@arm.com    inform("\t%s: @0x%llx+%x\n",
19910259SAndrew.Bardsley@arm.com           reg_name, dtable.base, dtable.limit);
20011567Smitch.hayenga@arm.com}
20110259SAndrew.Bardsley@arm.com
20210259SAndrew.Bardsley@arm.comstatic void
20310259SAndrew.Bardsley@arm.comdumpKvm(const struct kvm_sregs &sregs)
20410259SAndrew.Bardsley@arm.com{
20510259SAndrew.Bardsley@arm.com#define APPLY_SREG(kreg, mreg)                          \
20610259SAndrew.Bardsley@arm.com    inform("\t" # kreg ": 0x%llx\n", sregs.kreg);
20711567Smitch.hayenga@arm.com#define APPLY_SEGMENT(kreg, idx)                \
20810259SAndrew.Bardsley@arm.com    dumpKvm(# kreg, sregs.kreg);
20911567Smitch.hayenga@arm.com#define APPLY_DTABLE(kreg, idx)                 \
21011567Smitch.hayenga@arm.com    dumpKvm(# kreg, sregs.kreg);
21110259SAndrew.Bardsley@arm.com
21211567Smitch.hayenga@arm.com    inform("Special registers:\n");
21310259SAndrew.Bardsley@arm.com    FOREACH_SEGMENT();
21410259SAndrew.Bardsley@arm.com    FOREACH_SREG();
21510259SAndrew.Bardsley@arm.com    FOREACH_DTABLE();
21610259SAndrew.Bardsley@arm.com
21710259SAndrew.Bardsley@arm.com    inform("Interrupt Bitmap:");
21810259SAndrew.Bardsley@arm.com    for (int i = 0; i < KVM_NR_INTERRUPTS; i += 64)
21910259SAndrew.Bardsley@arm.com        inform("  0x%.8x", sregs.interrupt_bitmap[i / 64]);
22010259SAndrew.Bardsley@arm.com
22110259SAndrew.Bardsley@arm.com#undef APPLY_SREG
22212489Sgiacomo.travaglini@arm.com#undef APPLY_SEGMENT
22312489Sgiacomo.travaglini@arm.com#undef APPLY_DTABLE
22410259SAndrew.Bardsley@arm.com}
22510259SAndrew.Bardsley@arm.com
22610259SAndrew.Bardsley@arm.com#ifdef KVM_GET_DEBUGREGS
22710259SAndrew.Bardsley@arm.comstatic void
22812489Sgiacomo.travaglini@arm.comdumpKvm(const struct kvm_debugregs &regs)
22912489Sgiacomo.travaglini@arm.com{
23010259SAndrew.Bardsley@arm.com    inform("KVM debug state:\n");
23110259SAndrew.Bardsley@arm.com
23210259SAndrew.Bardsley@arm.com#define APPLY_DREG(kreg, mreg)                  \
23310259SAndrew.Bardsley@arm.com    inform("\t" # kreg ": 0x%llx\n", regs.kreg)
23410259SAndrew.Bardsley@arm.com
23510259SAndrew.Bardsley@arm.com    FOREACH_DREG();
23610259SAndrew.Bardsley@arm.com
23710259SAndrew.Bardsley@arm.com#undef APPLY_DREG
23810259SAndrew.Bardsley@arm.com
23910259SAndrew.Bardsley@arm.com    inform("\tflags: 0x%llx\n", regs.flags);
24010259SAndrew.Bardsley@arm.com}
24110259SAndrew.Bardsley@arm.com#endif
24210259SAndrew.Bardsley@arm.com
24310259SAndrew.Bardsley@arm.comstatic void
24410259SAndrew.Bardsley@arm.comdumpFpuSpec(const struct FXSave &xs)
24510259SAndrew.Bardsley@arm.com{
24610259SAndrew.Bardsley@arm.com    inform("\tlast_ip: 0x%x\n", xs.ctrl64.fpu_ip);
24710259SAndrew.Bardsley@arm.com    inform("\tlast_dp: 0x%x\n", xs.ctrl64.fpu_dp);
24810259SAndrew.Bardsley@arm.com    inform("\tmxcsr_mask: 0x%x\n", xs.mxcsr_mask);
24910259SAndrew.Bardsley@arm.com}
25010259SAndrew.Bardsley@arm.com
25113964Sgiacomo.travaglini@arm.comstatic void
25210259SAndrew.Bardsley@arm.comdumpFpuSpec(const struct kvm_fpu &fpu)
25310259SAndrew.Bardsley@arm.com{
25410259SAndrew.Bardsley@arm.com    inform("\tlast_ip: 0x%x\n", fpu.last_ip);
25510259SAndrew.Bardsley@arm.com    inform("\tlast_dp: 0x%x\n", fpu.last_dp);
25610259SAndrew.Bardsley@arm.com}
25710259SAndrew.Bardsley@arm.com
25810259SAndrew.Bardsley@arm.comtemplate<typename T>
25910259SAndrew.Bardsley@arm.comstatic void
26010259SAndrew.Bardsley@arm.comdumpFpuCommon(const T &fpu)
26110259SAndrew.Bardsley@arm.com{
26210259SAndrew.Bardsley@arm.com    const unsigned top((fpu.fsw >> 11) & 0x7);
26310259SAndrew.Bardsley@arm.com    inform("\tfcw: 0x%x\n", fpu.fcw);
26410259SAndrew.Bardsley@arm.com
26510259SAndrew.Bardsley@arm.com    inform("\tfsw: 0x%x (top: %i, "
26610259SAndrew.Bardsley@arm.com           "conditions: %s%s%s%s, exceptions: %s%s%s%s%s%s %s%s%s)\n",
26710259SAndrew.Bardsley@arm.com           fpu.fsw, top,
26810259SAndrew.Bardsley@arm.com
26910259SAndrew.Bardsley@arm.com           (fpu.fsw & CC0Bit) ? "C0" : "",
27010259SAndrew.Bardsley@arm.com           (fpu.fsw & CC1Bit) ? "C1" : "",
27110259SAndrew.Bardsley@arm.com           (fpu.fsw & CC2Bit) ? "C2" : "",
27210259SAndrew.Bardsley@arm.com           (fpu.fsw & CC3Bit) ? "C3" : "",
27310259SAndrew.Bardsley@arm.com
27410366Sandreas.hansson@arm.com           (fpu.fsw & IEBit) ? "I" : "",
27510366Sandreas.hansson@arm.com           (fpu.fsw & DEBit) ? "D" : "",
27610366Sandreas.hansson@arm.com           (fpu.fsw & ZEBit) ? "Z" : "",
27710259SAndrew.Bardsley@arm.com           (fpu.fsw & OEBit) ? "O" : "",
27810259SAndrew.Bardsley@arm.com           (fpu.fsw & UEBit) ? "U" : "",
27910259SAndrew.Bardsley@arm.com           (fpu.fsw & PEBit) ? "P" : "",
28010259SAndrew.Bardsley@arm.com
28110259SAndrew.Bardsley@arm.com           (fpu.fsw & StackFaultBit) ? "SF " : "",
28210259SAndrew.Bardsley@arm.com           (fpu.fsw & ErrSummaryBit) ? "ES " : "",
28310259SAndrew.Bardsley@arm.com           (fpu.fsw & BusyBit) ? "BUSY " : ""
28410259SAndrew.Bardsley@arm.com        );
28510259SAndrew.Bardsley@arm.com    inform("\tftwx: 0x%x\n", fpu.ftwx);
28610259SAndrew.Bardsley@arm.com    inform("\tlast_opcode: 0x%x\n", fpu.last_opcode);
28710259SAndrew.Bardsley@arm.com    dumpFpuSpec(fpu);
28810259SAndrew.Bardsley@arm.com    inform("\tmxcsr: 0x%x\n", fpu.mxcsr);
28910259SAndrew.Bardsley@arm.com    inform("\tFP Stack:\n");
29010259SAndrew.Bardsley@arm.com    for (int i = 0; i < 8; ++i) {
29111567Smitch.hayenga@arm.com        const unsigned reg_idx((i + top) & 0x7);
29210259SAndrew.Bardsley@arm.com        const bool empty(!((fpu.ftwx >> reg_idx) & 0x1));
29310259SAndrew.Bardsley@arm.com        const double value(X86ISA::loadFloat80(fpu.fpr[i]));
29410259SAndrew.Bardsley@arm.com        char hex[33];
29510259SAndrew.Bardsley@arm.com        for (int j = 0; j < 10; ++j)
29611567Smitch.hayenga@arm.com            snprintf(&hex[j*2], 3, "%.2x", fpu.fpr[i][j]);
29710259SAndrew.Bardsley@arm.com        inform("\t\tST%i/%i: 0x%s (%f)%s\n", i, reg_idx,
29810259SAndrew.Bardsley@arm.com               hex, value, empty ? " (e)" : "");
29910259SAndrew.Bardsley@arm.com    }
30010259SAndrew.Bardsley@arm.com    inform("\tXMM registers:\n");
30110259SAndrew.Bardsley@arm.com    for (int i = 0; i < 16; ++i) {
30210259SAndrew.Bardsley@arm.com        char hex[33];
30310259SAndrew.Bardsley@arm.com        for (int j = 0; j < 16; ++j)
30411567Smitch.hayenga@arm.com            snprintf(&hex[j*2], 3, "%.2x", fpu.xmm[i][j]);
30510259SAndrew.Bardsley@arm.com        inform("\t\t%i: 0x%s\n", i, hex);
30610259SAndrew.Bardsley@arm.com    }
30710259SAndrew.Bardsley@arm.com}
30811567Smitch.hayenga@arm.com
30911567Smitch.hayenga@arm.comstatic void
31010259SAndrew.Bardsley@arm.comdumpKvm(const struct kvm_fpu &fpu)
31110259SAndrew.Bardsley@arm.com{
31211567Smitch.hayenga@arm.com    inform("FPU registers:\n");
31310259SAndrew.Bardsley@arm.com    dumpFpuCommon(fpu);
31410259SAndrew.Bardsley@arm.com}
31510259SAndrew.Bardsley@arm.com
31610259SAndrew.Bardsley@arm.comstatic void
31710259SAndrew.Bardsley@arm.comdumpKvm(const struct kvm_xsave &xsave)
31810259SAndrew.Bardsley@arm.com{
31910259SAndrew.Bardsley@arm.com    inform("FPU registers (XSave):\n");
32010259SAndrew.Bardsley@arm.com    dumpFpuCommon(*(FXSave *)xsave.region);
32110259SAndrew.Bardsley@arm.com}
32210259SAndrew.Bardsley@arm.com
32310259SAndrew.Bardsley@arm.comstatic void
32410259SAndrew.Bardsley@arm.comdumpKvm(const struct kvm_msrs &msrs)
32510259SAndrew.Bardsley@arm.com{
32610259SAndrew.Bardsley@arm.com    inform("MSRs:\n");
32710259SAndrew.Bardsley@arm.com
32810259SAndrew.Bardsley@arm.com    for (int i = 0; i < msrs.nmsrs; ++i) {
32910259SAndrew.Bardsley@arm.com        const struct kvm_msr_entry &e(msrs.entries[i]);
33010259SAndrew.Bardsley@arm.com
33110259SAndrew.Bardsley@arm.com        inform("\t0x%x: 0x%x\n", e.index, e.data);
33210259SAndrew.Bardsley@arm.com    }
33313652Sqtt2@cornell.edu}
33410259SAndrew.Bardsley@arm.com
33510259SAndrew.Bardsley@arm.comstatic void
33610259SAndrew.Bardsley@arm.comdumpKvm(const struct kvm_xcrs &regs)
33710259SAndrew.Bardsley@arm.com{
33810259SAndrew.Bardsley@arm.com    inform("KVM XCR registers:\n");
33910259SAndrew.Bardsley@arm.com
34010259SAndrew.Bardsley@arm.com    inform("\tFlags: 0x%x\n", regs.flags);
34110259SAndrew.Bardsley@arm.com    for (int i = 0; i < regs.nr_xcrs; ++i) {
34210259SAndrew.Bardsley@arm.com        inform("\tXCR[0x%x]: 0x%x\n",
34310259SAndrew.Bardsley@arm.com               regs.xcrs[i].xcr,
34410259SAndrew.Bardsley@arm.com               regs.xcrs[i].value);
34510259SAndrew.Bardsley@arm.com    }
34610259SAndrew.Bardsley@arm.com}
34710259SAndrew.Bardsley@arm.com
34810259SAndrew.Bardsley@arm.comstatic void
34910259SAndrew.Bardsley@arm.comdumpKvm(const struct kvm_vcpu_events &events)
35010259SAndrew.Bardsley@arm.com{
35110259SAndrew.Bardsley@arm.com    inform("vCPU events:\n");
35210259SAndrew.Bardsley@arm.com
35310259SAndrew.Bardsley@arm.com    inform("\tException: [inj: %i, nr: %i, has_ec: %i, ec: %i]\n",
35410259SAndrew.Bardsley@arm.com           events.exception.injected, events.exception.nr,
35510259SAndrew.Bardsley@arm.com           events.exception.has_error_code, events.exception.error_code);
35610259SAndrew.Bardsley@arm.com
35710259SAndrew.Bardsley@arm.com    inform("\tInterrupt: [inj: %i, nr: %i, soft: %i]\n",
35810259SAndrew.Bardsley@arm.com           events.interrupt.injected, events.interrupt.nr,
35910259SAndrew.Bardsley@arm.com           events.interrupt.soft);
36013954Sgiacomo.gabrielli@arm.com
36113954Sgiacomo.gabrielli@arm.com    inform("\tNMI: [inj: %i, pending: %i, masked: %i]\n",
36210259SAndrew.Bardsley@arm.com           events.nmi.injected, events.nmi.pending,
36310259SAndrew.Bardsley@arm.com           events.nmi.masked);
36410259SAndrew.Bardsley@arm.com
36510259SAndrew.Bardsley@arm.com    inform("\tSIPI vector: 0x%x\n", events.sipi_vector);
36610259SAndrew.Bardsley@arm.com    inform("\tFlags: 0x%x\n", events.flags);
36713652Sqtt2@cornell.edu}
36810259SAndrew.Bardsley@arm.com
36910259SAndrew.Bardsley@arm.comstatic bool
37010259SAndrew.Bardsley@arm.comisCanonicalAddress(uint64_t addr)
37110259SAndrew.Bardsley@arm.com{
37210259SAndrew.Bardsley@arm.com    // x86-64 doesn't currently use the full 64-bit virtual address
37310259SAndrew.Bardsley@arm.com    // space, instead it uses signed 48 bit addresses that are
37410259SAndrew.Bardsley@arm.com    // sign-extended to 64 bits.  Such addresses are known as
37510563Sandreas.hansson@arm.com    // "canonical".
37610259SAndrew.Bardsley@arm.com    uint64_t upper_half(addr & 0xffff800000000000ULL);
37710259SAndrew.Bardsley@arm.com    return upper_half == 0 || upper_half == 0xffff800000000000;
37810259SAndrew.Bardsley@arm.com}
37910259SAndrew.Bardsley@arm.com
38010259SAndrew.Bardsley@arm.comstatic void
38110259SAndrew.Bardsley@arm.comcheckSeg(const char *name, const int idx, const struct kvm_segment &seg,
38210259SAndrew.Bardsley@arm.com         struct kvm_sregs sregs)
38310259SAndrew.Bardsley@arm.com{
38410259SAndrew.Bardsley@arm.com    // Check the register base
38510259SAndrew.Bardsley@arm.com    switch (idx) {
38610259SAndrew.Bardsley@arm.com      case MISCREG_TSL:
38710259SAndrew.Bardsley@arm.com      case MISCREG_TR:
38810259SAndrew.Bardsley@arm.com      case MISCREG_FS:
38910259SAndrew.Bardsley@arm.com      case MISCREG_GS:
39010259SAndrew.Bardsley@arm.com        if (!isCanonicalAddress(seg.base))
39110259SAndrew.Bardsley@arm.com            warn("Illegal %s base: 0x%x\n", name, seg.base);
39210259SAndrew.Bardsley@arm.com        break;
39310259SAndrew.Bardsley@arm.com
39410259SAndrew.Bardsley@arm.com      case MISCREG_SS:
39510259SAndrew.Bardsley@arm.com      case MISCREG_DS:
39610259SAndrew.Bardsley@arm.com      case MISCREG_ES:
39710259SAndrew.Bardsley@arm.com        if (seg.unusable)
39810259SAndrew.Bardsley@arm.com            break;
39910259SAndrew.Bardsley@arm.com        M5_FALLTHROUGH;
40010259SAndrew.Bardsley@arm.com      case MISCREG_CS:
40110259SAndrew.Bardsley@arm.com        if (seg.base & 0xffffffff00000000ULL)
40210259SAndrew.Bardsley@arm.com            warn("Illegal %s base: 0x%x\n", name, seg.base);
40310259SAndrew.Bardsley@arm.com        break;
40410259SAndrew.Bardsley@arm.com    }
40510259SAndrew.Bardsley@arm.com
40610259SAndrew.Bardsley@arm.com    // Check the type
40710259SAndrew.Bardsley@arm.com    switch (idx) {
40810259SAndrew.Bardsley@arm.com      case MISCREG_CS:
40910259SAndrew.Bardsley@arm.com        switch (seg.type) {
41010259SAndrew.Bardsley@arm.com          case 3:
41110259SAndrew.Bardsley@arm.com            if (seg.dpl != 0)
41210259SAndrew.Bardsley@arm.com                warn("CS type is 3 but dpl != 0.\n");
41310259SAndrew.Bardsley@arm.com            break;
41410259SAndrew.Bardsley@arm.com          case 9:
41510259SAndrew.Bardsley@arm.com          case 11:
41610259SAndrew.Bardsley@arm.com            if (seg.dpl != sregs.ss.dpl)
41710259SAndrew.Bardsley@arm.com                warn("CS type is %i but CS DPL != SS DPL\n", seg.type);
41810259SAndrew.Bardsley@arm.com            break;
41910259SAndrew.Bardsley@arm.com          case 13:
42010259SAndrew.Bardsley@arm.com          case 15:
42110259SAndrew.Bardsley@arm.com            if (seg.dpl > sregs.ss.dpl)
42210259SAndrew.Bardsley@arm.com                warn("CS type is %i but CS DPL > SS DPL\n", seg.type);
42311150Smitch.hayenga@arm.com            break;
42410259SAndrew.Bardsley@arm.com          default:
42510259SAndrew.Bardsley@arm.com            warn("Illegal CS type: %i\n", seg.type);
42610259SAndrew.Bardsley@arm.com            break;
42710259SAndrew.Bardsley@arm.com        }
42811150Smitch.hayenga@arm.com        break;
42910259SAndrew.Bardsley@arm.com
43010259SAndrew.Bardsley@arm.com      case MISCREG_SS:
43110259SAndrew.Bardsley@arm.com        if (seg.unusable)
43210259SAndrew.Bardsley@arm.com            break;
43310259SAndrew.Bardsley@arm.com        switch (seg.type) {
43410259SAndrew.Bardsley@arm.com          case 3:
43510259SAndrew.Bardsley@arm.com            if (sregs.cs.type == 3 && seg.dpl != 0)
43610259SAndrew.Bardsley@arm.com                warn("CS type is 3, but SS DPL is != 0.\n");
43710259SAndrew.Bardsley@arm.com            M5_FALLTHROUGH;
43810259SAndrew.Bardsley@arm.com          case 7:
43911567Smitch.hayenga@arm.com            if (!(sregs.cr0 & 1) && seg.dpl != 0)
44011567Smitch.hayenga@arm.com                warn("SS DPL is %i, but CR0 PE is 0\n", seg.dpl);
44111567Smitch.hayenga@arm.com            break;
44210259SAndrew.Bardsley@arm.com          default:
44310259SAndrew.Bardsley@arm.com            warn("Illegal SS type: %i\n", seg.type);
44410259SAndrew.Bardsley@arm.com            break;
44510259SAndrew.Bardsley@arm.com        }
44610259SAndrew.Bardsley@arm.com        break;
44710259SAndrew.Bardsley@arm.com
44810259SAndrew.Bardsley@arm.com      case MISCREG_DS:
44910259SAndrew.Bardsley@arm.com      case MISCREG_ES:
45010259SAndrew.Bardsley@arm.com      case MISCREG_FS:
45110259SAndrew.Bardsley@arm.com      case MISCREG_GS:
45210259SAndrew.Bardsley@arm.com        if (seg.unusable)
45310259SAndrew.Bardsley@arm.com            break;
45410259SAndrew.Bardsley@arm.com        if (!(seg.type & 0x1) ||
45510259SAndrew.Bardsley@arm.com            ((seg.type & 0x8) && !(seg.type & 0x2)))
45610259SAndrew.Bardsley@arm.com            warn("%s has an illegal type field: %i\n", name, seg.type);
45710259SAndrew.Bardsley@arm.com        break;
45810259SAndrew.Bardsley@arm.com
45910259SAndrew.Bardsley@arm.com      case MISCREG_TR:
46010259SAndrew.Bardsley@arm.com        // TODO: We should check the CPU mode
46110259SAndrew.Bardsley@arm.com        if (seg.type != 3 && seg.type != 11)
46210259SAndrew.Bardsley@arm.com            warn("%s: Illegal segment type (%i)\n", name, seg.type);
46310259SAndrew.Bardsley@arm.com        break;
46410259SAndrew.Bardsley@arm.com
46510259SAndrew.Bardsley@arm.com      case MISCREG_TSL:
46610259SAndrew.Bardsley@arm.com        if (seg.unusable)
46710259SAndrew.Bardsley@arm.com            break;
46810259SAndrew.Bardsley@arm.com        if (seg.type != 2)
46910259SAndrew.Bardsley@arm.com            warn("%s: Illegal segment type (%i)\n", name, seg.type);
47010259SAndrew.Bardsley@arm.com        break;
47110259SAndrew.Bardsley@arm.com    }
47210259SAndrew.Bardsley@arm.com
47310259SAndrew.Bardsley@arm.com    switch (idx) {
47410259SAndrew.Bardsley@arm.com      case MISCREG_SS:
47510259SAndrew.Bardsley@arm.com      case MISCREG_DS:
47610259SAndrew.Bardsley@arm.com      case MISCREG_ES:
47710259SAndrew.Bardsley@arm.com      case MISCREG_FS:
47810259SAndrew.Bardsley@arm.com      case MISCREG_GS:
47913954Sgiacomo.gabrielli@arm.com        if (seg.unusable)
48013954Sgiacomo.gabrielli@arm.com            break;
48113954Sgiacomo.gabrielli@arm.com        M5_FALLTHROUGH;
48213954Sgiacomo.gabrielli@arm.com      case MISCREG_CS:
48310259SAndrew.Bardsley@arm.com        if (!seg.s)
48410259SAndrew.Bardsley@arm.com            warn("%s: S flag not set\n", name);
48510259SAndrew.Bardsley@arm.com        break;
48610259SAndrew.Bardsley@arm.com
48710259SAndrew.Bardsley@arm.com      case MISCREG_TSL:
48810259SAndrew.Bardsley@arm.com        if (seg.unusable)
48910259SAndrew.Bardsley@arm.com            break;
49010259SAndrew.Bardsley@arm.com        M5_FALLTHROUGH;
49110259SAndrew.Bardsley@arm.com      case MISCREG_TR:
49210259SAndrew.Bardsley@arm.com        if (seg.s)
49310259SAndrew.Bardsley@arm.com            warn("%s: S flag is set\n", name);
49410259SAndrew.Bardsley@arm.com        break;
49510259SAndrew.Bardsley@arm.com    }
49610259SAndrew.Bardsley@arm.com
49710259SAndrew.Bardsley@arm.com    switch (idx) {
49810259SAndrew.Bardsley@arm.com      case MISCREG_SS:
49910259SAndrew.Bardsley@arm.com      case MISCREG_DS:
50010259SAndrew.Bardsley@arm.com      case MISCREG_ES:
50110259SAndrew.Bardsley@arm.com      case MISCREG_FS:
50210259SAndrew.Bardsley@arm.com      case MISCREG_GS:
50310259SAndrew.Bardsley@arm.com      case MISCREG_TSL:
50410259SAndrew.Bardsley@arm.com        if (seg.unusable)
50510259SAndrew.Bardsley@arm.com            break;
50610259SAndrew.Bardsley@arm.com        M5_FALLTHROUGH;
50710259SAndrew.Bardsley@arm.com      case MISCREG_TR:
50810259SAndrew.Bardsley@arm.com      case MISCREG_CS:
50910259SAndrew.Bardsley@arm.com        if (!seg.present)
51010259SAndrew.Bardsley@arm.com            warn("%s: P flag not set\n", name);
51110259SAndrew.Bardsley@arm.com
51210259SAndrew.Bardsley@arm.com        if (((seg.limit & 0xFFF) == 0 && seg.g) ||
51310259SAndrew.Bardsley@arm.com            ((seg.limit & 0xFFF00000) != 0 && !seg.g)) {
51410259SAndrew.Bardsley@arm.com            warn("%s limit (0x%x) and g (%i) combination is illegal.\n",
51510259SAndrew.Bardsley@arm.com                 name, seg.limit, seg.g);
51610259SAndrew.Bardsley@arm.com        }
51710259SAndrew.Bardsley@arm.com        break;
51810259SAndrew.Bardsley@arm.com    }
51910259SAndrew.Bardsley@arm.com
52010259SAndrew.Bardsley@arm.com    // TODO: Check CS DB
52110259SAndrew.Bardsley@arm.com}
52210259SAndrew.Bardsley@arm.com
52310259SAndrew.Bardsley@arm.comX86KvmCPU::X86KvmCPU(X86KvmCPUParams *params)
52410259SAndrew.Bardsley@arm.com    : BaseKvmCPU(params),
52510259SAndrew.Bardsley@arm.com      useXSave(params->useXSave)
52610259SAndrew.Bardsley@arm.com{
52710259SAndrew.Bardsley@arm.com    Kvm &kvm(*vm.kvm);
52810259SAndrew.Bardsley@arm.com
52910259SAndrew.Bardsley@arm.com    if (!kvm.capSetTSSAddress())
53010259SAndrew.Bardsley@arm.com        panic("KVM: Missing capability (KVM_CAP_SET_TSS_ADDR)\n");
53111567Smitch.hayenga@arm.com    if (!kvm.capExtendedCPUID())
53210259SAndrew.Bardsley@arm.com        panic("KVM: Missing capability (KVM_CAP_EXT_CPUID)\n");
53311567Smitch.hayenga@arm.com    if (!kvm.capUserNMI())
53411567Smitch.hayenga@arm.com        warn("KVM: Missing capability (KVM_CAP_USER_NMI)\n");
53510259SAndrew.Bardsley@arm.com    if (!kvm.capVCPUEvents())
53610259SAndrew.Bardsley@arm.com        warn("KVM: Missing capability (KVM_CAP_VCPU_EVENTS)\n");
53710259SAndrew.Bardsley@arm.com
53810259SAndrew.Bardsley@arm.com    haveDebugRegs = kvm.capDebugRegs();
53910259SAndrew.Bardsley@arm.com    haveXSave = kvm.capXSave();
54010259SAndrew.Bardsley@arm.com    haveXCRs = kvm.capXCRs();
54110259SAndrew.Bardsley@arm.com
54210259SAndrew.Bardsley@arm.com    if (useXSave && !haveXSave) {
54310259SAndrew.Bardsley@arm.com        warn("KVM: XSAVE not supported by host. MXCSR synchronization might be "
54410259SAndrew.Bardsley@arm.com             "unreliable due to kernel bugs.\n");
54510259SAndrew.Bardsley@arm.com        useXSave = false;
54610259SAndrew.Bardsley@arm.com    } else if (!useXSave) {
54710259SAndrew.Bardsley@arm.com        warn("KVM: XSave FPU/SIMD synchronization disabled by user.\n");
54810259SAndrew.Bardsley@arm.com    }
54910259SAndrew.Bardsley@arm.com}
55010259SAndrew.Bardsley@arm.com
55110259SAndrew.Bardsley@arm.comX86KvmCPU::~X86KvmCPU()
55210259SAndrew.Bardsley@arm.com{
55310259SAndrew.Bardsley@arm.com}
55410259SAndrew.Bardsley@arm.com
55510259SAndrew.Bardsley@arm.comvoid
55610259SAndrew.Bardsley@arm.comX86KvmCPU::startup()
55710259SAndrew.Bardsley@arm.com{
55810259SAndrew.Bardsley@arm.com    BaseKvmCPU::startup();
55910259SAndrew.Bardsley@arm.com
56011567Smitch.hayenga@arm.com    updateCPUID();
56110259SAndrew.Bardsley@arm.com
56210259SAndrew.Bardsley@arm.com    // TODO: Do we need to create an identity mapped TSS area? We
56310259SAndrew.Bardsley@arm.com    // should call kvm.vm.setTSSAddress() here in that case. It should
56410259SAndrew.Bardsley@arm.com    // only be needed for old versions of the virtualization
56510259SAndrew.Bardsley@arm.com    // extensions. We should make sure that the identity range is
56610259SAndrew.Bardsley@arm.com    // reserved in the e820 memory map in that case.
56710259SAndrew.Bardsley@arm.com}
56810259SAndrew.Bardsley@arm.com
56910259SAndrew.Bardsley@arm.comvoid
57010259SAndrew.Bardsley@arm.comX86KvmCPU::dump() const
57111568Smitch.hayenga@arm.com{
57210259SAndrew.Bardsley@arm.com    dumpIntRegs();
57310259SAndrew.Bardsley@arm.com    if (useXSave)
57411568Smitch.hayenga@arm.com        dumpXSave();
57511568Smitch.hayenga@arm.com    else
57611567Smitch.hayenga@arm.com        dumpFpuRegs();
57710259SAndrew.Bardsley@arm.com    dumpSpecRegs();
57810259SAndrew.Bardsley@arm.com    dumpDebugRegs();
57911567Smitch.hayenga@arm.com    dumpXCRs();
58010259SAndrew.Bardsley@arm.com    dumpVCpuEvents();
58110259SAndrew.Bardsley@arm.com    dumpMSRs();
58210259SAndrew.Bardsley@arm.com}
58310259SAndrew.Bardsley@arm.com
58410259SAndrew.Bardsley@arm.comvoid
58510259SAndrew.Bardsley@arm.comX86KvmCPU::dumpFpuRegs() const
58610259SAndrew.Bardsley@arm.com{
58710259SAndrew.Bardsley@arm.com    struct kvm_fpu fpu;
58810259SAndrew.Bardsley@arm.com    getFPUState(fpu);
58910259SAndrew.Bardsley@arm.com    dumpKvm(fpu);
59010259SAndrew.Bardsley@arm.com}
59110259SAndrew.Bardsley@arm.com
59210259SAndrew.Bardsley@arm.comvoid
59310259SAndrew.Bardsley@arm.comX86KvmCPU::dumpIntRegs() const
59410259SAndrew.Bardsley@arm.com{
59510259SAndrew.Bardsley@arm.com    struct kvm_regs regs;
59610259SAndrew.Bardsley@arm.com    getRegisters(regs);
59710259SAndrew.Bardsley@arm.com    dumpKvm(regs);
59810259SAndrew.Bardsley@arm.com}
59910259SAndrew.Bardsley@arm.com
60010259SAndrew.Bardsley@arm.comvoid
60110259SAndrew.Bardsley@arm.comX86KvmCPU::dumpSpecRegs() const
60210259SAndrew.Bardsley@arm.com{
60310259SAndrew.Bardsley@arm.com    struct kvm_sregs sregs;
60410259SAndrew.Bardsley@arm.com    getSpecialRegisters(sregs);
60510259SAndrew.Bardsley@arm.com    dumpKvm(sregs);
60610259SAndrew.Bardsley@arm.com}
60710259SAndrew.Bardsley@arm.com
60810259SAndrew.Bardsley@arm.comvoid
60910259SAndrew.Bardsley@arm.comX86KvmCPU::dumpDebugRegs() const
61010259SAndrew.Bardsley@arm.com{
61110259SAndrew.Bardsley@arm.com    if (haveDebugRegs) {
61210259SAndrew.Bardsley@arm.com#ifdef KVM_GET_DEBUGREGS
61310259SAndrew.Bardsley@arm.com        struct kvm_debugregs dregs;
61410259SAndrew.Bardsley@arm.com        getDebugRegisters(dregs);
61511567Smitch.hayenga@arm.com        dumpKvm(dregs);
61610259SAndrew.Bardsley@arm.com#endif
61710259SAndrew.Bardsley@arm.com    } else {
61811567Smitch.hayenga@arm.com        inform("Debug registers not supported by kernel.\n");
61910259SAndrew.Bardsley@arm.com    }
62010259SAndrew.Bardsley@arm.com}
62110259SAndrew.Bardsley@arm.com
62210259SAndrew.Bardsley@arm.comvoid
62310259SAndrew.Bardsley@arm.comX86KvmCPU::dumpXCRs() const
62410259SAndrew.Bardsley@arm.com{
62510259SAndrew.Bardsley@arm.com    if (haveXCRs) {
62611567Smitch.hayenga@arm.com        struct kvm_xcrs xcrs;
62710259SAndrew.Bardsley@arm.com        getXCRs(xcrs);
62810259SAndrew.Bardsley@arm.com        dumpKvm(xcrs);
62910259SAndrew.Bardsley@arm.com    } else {
63010259SAndrew.Bardsley@arm.com        inform("XCRs not supported by kernel.\n");
63110259SAndrew.Bardsley@arm.com    }
63210259SAndrew.Bardsley@arm.com}
63310259SAndrew.Bardsley@arm.com
63410259SAndrew.Bardsley@arm.comvoid
63510259SAndrew.Bardsley@arm.comX86KvmCPU::dumpXSave() const
63610259SAndrew.Bardsley@arm.com{
63710259SAndrew.Bardsley@arm.com    if (haveXSave) {
63810259SAndrew.Bardsley@arm.com        struct kvm_xsave xsave;
63910259SAndrew.Bardsley@arm.com        getXSave(xsave);
64010259SAndrew.Bardsley@arm.com        dumpKvm(xsave);
64110259SAndrew.Bardsley@arm.com    } else {
64210259SAndrew.Bardsley@arm.com        inform("XSave not supported by kernel.\n");
64310259SAndrew.Bardsley@arm.com    }
64410259SAndrew.Bardsley@arm.com}
64510259SAndrew.Bardsley@arm.com
64610259SAndrew.Bardsley@arm.comvoid
64710259SAndrew.Bardsley@arm.comX86KvmCPU::dumpVCpuEvents() const
64810259SAndrew.Bardsley@arm.com{
64910259SAndrew.Bardsley@arm.com    struct kvm_vcpu_events events;
65010259SAndrew.Bardsley@arm.com    getVCpuEvents(events);
65110259SAndrew.Bardsley@arm.com    dumpKvm(events);
65210259SAndrew.Bardsley@arm.com}
65310259SAndrew.Bardsley@arm.com
65410259SAndrew.Bardsley@arm.comvoid
65510259SAndrew.Bardsley@arm.comX86KvmCPU::dumpMSRs() const
65610259SAndrew.Bardsley@arm.com{
65710259SAndrew.Bardsley@arm.com    const Kvm::MSRIndexVector &supported_msrs(vm.kvm->getSupportedMSRs());
65810259SAndrew.Bardsley@arm.com    std::unique_ptr<struct kvm_msrs> msrs(
65910259SAndrew.Bardsley@arm.com        newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
66010259SAndrew.Bardsley@arm.com            supported_msrs.size()));
66110259SAndrew.Bardsley@arm.com
66211567Smitch.hayenga@arm.com    msrs->nmsrs = supported_msrs.size();
66311567Smitch.hayenga@arm.com    for (int i = 0; i < supported_msrs.size(); ++i) {
66410259SAndrew.Bardsley@arm.com        struct kvm_msr_entry &e(msrs->entries[i]);
66510259SAndrew.Bardsley@arm.com        e.index = supported_msrs[i];
66610259SAndrew.Bardsley@arm.com        e.reserved = 0;
66710259SAndrew.Bardsley@arm.com        e.data = 0;
66810259SAndrew.Bardsley@arm.com    }
66910259SAndrew.Bardsley@arm.com    getMSRs(*msrs.get());
67010259SAndrew.Bardsley@arm.com
67110259SAndrew.Bardsley@arm.com    dumpKvm(*msrs.get());
67210259SAndrew.Bardsley@arm.com}
67310259SAndrew.Bardsley@arm.com
67410259SAndrew.Bardsley@arm.comvoid
67510259SAndrew.Bardsley@arm.comX86KvmCPU::updateKvmState()
67610259SAndrew.Bardsley@arm.com{
67710259SAndrew.Bardsley@arm.com    updateKvmStateRegs();
67810259SAndrew.Bardsley@arm.com    updateKvmStateSRegs();
67910259SAndrew.Bardsley@arm.com    updateKvmStateFPU();
68010259SAndrew.Bardsley@arm.com    updateKvmStateMSRs();
68110259SAndrew.Bardsley@arm.com
68210259SAndrew.Bardsley@arm.com    DPRINTF(KvmContext, "X86KvmCPU::updateKvmState():\n");
68310259SAndrew.Bardsley@arm.com    if (DTRACE(KvmContext))
68410259SAndrew.Bardsley@arm.com        dump();
68510259SAndrew.Bardsley@arm.com}
68610259SAndrew.Bardsley@arm.com
68710259SAndrew.Bardsley@arm.comvoid
68810259SAndrew.Bardsley@arm.comX86KvmCPU::updateKvmStateRegs()
68910580SAndrew.Bardsley@arm.com{
69010259SAndrew.Bardsley@arm.com    struct kvm_regs regs;
69110259SAndrew.Bardsley@arm.com
69210259SAndrew.Bardsley@arm.com#define APPLY_IREG(kreg, mreg) regs.kreg = tc->readIntReg(mreg)
69310259SAndrew.Bardsley@arm.com    FOREACH_IREG();
69410259SAndrew.Bardsley@arm.com#undef APPLY_IREG
69510259SAndrew.Bardsley@arm.com
69610259SAndrew.Bardsley@arm.com    regs.rip = tc->instAddr() - tc->readMiscReg(MISCREG_CS_BASE);
69710259SAndrew.Bardsley@arm.com
69810259SAndrew.Bardsley@arm.com    /* You might think that setting regs.rflags to the contents
69910259SAndrew.Bardsley@arm.com     * MISCREG_RFLAGS here would suffice. In that case you're
70010259SAndrew.Bardsley@arm.com     * mistaken. We need to reconstruct it from a bunch of ucode
70110259SAndrew.Bardsley@arm.com     * registers and wave a dead chicken over it (aka mask out and set
70210259SAndrew.Bardsley@arm.com     * reserved bits) to get it to work.
70310259SAndrew.Bardsley@arm.com     */
70410259SAndrew.Bardsley@arm.com    regs.rflags = X86ISA::getRFlags(tc);
70511567Smitch.hayenga@arm.com
70610259SAndrew.Bardsley@arm.com    setRegisters(regs);
70710259SAndrew.Bardsley@arm.com}
70811567Smitch.hayenga@arm.com
70910259SAndrew.Bardsley@arm.comstatic inline void
71010259SAndrew.Bardsley@arm.comsetKvmSegmentReg(ThreadContext *tc, struct kvm_segment &kvm_seg,
71110259SAndrew.Bardsley@arm.com                 const int index)
71210259SAndrew.Bardsley@arm.com{
71310259SAndrew.Bardsley@arm.com    SegAttr attr(tc->readMiscRegNoEffect(MISCREG_SEG_ATTR(index)));
71410259SAndrew.Bardsley@arm.com
71511567Smitch.hayenga@arm.com    kvm_seg.base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(index));
71610259SAndrew.Bardsley@arm.com    kvm_seg.limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(index));
71710259SAndrew.Bardsley@arm.com    kvm_seg.selector = tc->readMiscRegNoEffect(MISCREG_SEG_SEL(index));
71811567Smitch.hayenga@arm.com    kvm_seg.type = attr.type;
71910259SAndrew.Bardsley@arm.com    kvm_seg.present = attr.present;
72010259SAndrew.Bardsley@arm.com    kvm_seg.dpl = attr.dpl;
72110259SAndrew.Bardsley@arm.com    kvm_seg.db = attr.defaultSize;
72210259SAndrew.Bardsley@arm.com    kvm_seg.s = attr.system;
72310259SAndrew.Bardsley@arm.com    kvm_seg.l = attr.longMode;
72410259SAndrew.Bardsley@arm.com    kvm_seg.g = attr.granularity;
72510259SAndrew.Bardsley@arm.com    kvm_seg.avl = attr.avl;
72610259SAndrew.Bardsley@arm.com
72710259SAndrew.Bardsley@arm.com    // A segment is normally unusable when the selector is zero. There
72810259SAndrew.Bardsley@arm.com    // is a attr.unusable flag in gem5, but it seems unused. qemu
72910259SAndrew.Bardsley@arm.com    // seems to set this to 0 all the time, so we just do the same and
73010259SAndrew.Bardsley@arm.com    // hope for the best.
73110259SAndrew.Bardsley@arm.com    kvm_seg.unusable = 0;
73211567Smitch.hayenga@arm.com}
73310259SAndrew.Bardsley@arm.com
73410259SAndrew.Bardsley@arm.comstatic inline void
73510259SAndrew.Bardsley@arm.comsetKvmDTableReg(ThreadContext *tc, struct kvm_dtable &kvm_dtable,
73610259SAndrew.Bardsley@arm.com                const int index)
73710259SAndrew.Bardsley@arm.com{
73810259SAndrew.Bardsley@arm.com    kvm_dtable.base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(index));
73910259SAndrew.Bardsley@arm.com    kvm_dtable.limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(index));
74010259SAndrew.Bardsley@arm.com}
74110259SAndrew.Bardsley@arm.com
74210259SAndrew.Bardsley@arm.comstatic void
74311567Smitch.hayenga@arm.comforceSegAccessed(struct kvm_segment &seg)
74410259SAndrew.Bardsley@arm.com{
74510259SAndrew.Bardsley@arm.com    // Intel's VMX requires that (some) usable segments are flagged as
74610259SAndrew.Bardsley@arm.com    // 'accessed' (i.e., the lowest bit in the segment type is set)
74710259SAndrew.Bardsley@arm.com    // when entering VMX. This wouldn't necessary be the case even if
74810259SAndrew.Bardsley@arm.com    // gem5 did set the access bits correctly, so we force it to one
74910259SAndrew.Bardsley@arm.com    // in that case.
75010259SAndrew.Bardsley@arm.com    if (!seg.unusable)
75110259SAndrew.Bardsley@arm.com        seg.type |= SEG_TYPE_BIT_ACCESSED;
75211567Smitch.hayenga@arm.com}
75310259SAndrew.Bardsley@arm.com
75410259SAndrew.Bardsley@arm.comvoid
75510259SAndrew.Bardsley@arm.comX86KvmCPU::updateKvmStateSRegs()
75610259SAndrew.Bardsley@arm.com{
75710259SAndrew.Bardsley@arm.com    struct kvm_sregs sregs;
75810259SAndrew.Bardsley@arm.com
75910259SAndrew.Bardsley@arm.com#define APPLY_SREG(kreg, mreg) sregs.kreg = tc->readMiscRegNoEffect(mreg)
76010259SAndrew.Bardsley@arm.com#define APPLY_SEGMENT(kreg, idx) setKvmSegmentReg(tc, sregs.kreg, idx)
76110259SAndrew.Bardsley@arm.com#define APPLY_DTABLE(kreg, idx) setKvmDTableReg(tc, sregs.kreg, idx)
76210259SAndrew.Bardsley@arm.com
76310259SAndrew.Bardsley@arm.com    FOREACH_SREG();
76410259SAndrew.Bardsley@arm.com    FOREACH_SEGMENT();
76510259SAndrew.Bardsley@arm.com    FOREACH_DTABLE();
76610259SAndrew.Bardsley@arm.com
76710259SAndrew.Bardsley@arm.com#undef APPLY_SREG
76810259SAndrew.Bardsley@arm.com#undef APPLY_SEGMENT
76910259SAndrew.Bardsley@arm.com#undef APPLY_DTABLE
77010259SAndrew.Bardsley@arm.com
77110259SAndrew.Bardsley@arm.com    // Clear the interrupt bitmap
77210259SAndrew.Bardsley@arm.com    memset(&sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
77310259SAndrew.Bardsley@arm.com
77410259SAndrew.Bardsley@arm.com    // VMX requires CS, SS, DS, ES, FS, and GS to have the accessed
77510259SAndrew.Bardsley@arm.com    // bit in the type field set.
77610259SAndrew.Bardsley@arm.com    forceSegAccessed(sregs.cs);
77710259SAndrew.Bardsley@arm.com    forceSegAccessed(sregs.ss);
77810259SAndrew.Bardsley@arm.com    forceSegAccessed(sregs.ds);
77910259SAndrew.Bardsley@arm.com    forceSegAccessed(sregs.es);
78010259SAndrew.Bardsley@arm.com    forceSegAccessed(sregs.fs);
78110259SAndrew.Bardsley@arm.com    forceSegAccessed(sregs.gs);
78210259SAndrew.Bardsley@arm.com
78310259SAndrew.Bardsley@arm.com    // There are currently some cases where the active task isn't
78410259SAndrew.Bardsley@arm.com    // marked as busy. This is illegal in VMX, so we force it to busy.
78510259SAndrew.Bardsley@arm.com    if (sregs.tr.type == SEG_SYS_TYPE_TSS_AVAILABLE) {
78610259SAndrew.Bardsley@arm.com        hack("tr.type (%i) is not busy. Forcing the busy bit.\n",
78710259SAndrew.Bardsley@arm.com             sregs.tr.type);
78810851SAndrew.Bardsley@arm.com        sregs.tr.type = SEG_SYS_TYPE_TSS_BUSY;
78910259SAndrew.Bardsley@arm.com    }
79010259SAndrew.Bardsley@arm.com
79110259SAndrew.Bardsley@arm.com    // VMX requires the DPL of SS and CS to be the same for
79210259SAndrew.Bardsley@arm.com    // non-conforming code segments. It seems like m5 doesn't set the
79310259SAndrew.Bardsley@arm.com    // DPL of SS correctly when taking interrupts, so we need to fix
79410259SAndrew.Bardsley@arm.com    // that here.
79511567Smitch.hayenga@arm.com    if ((sregs.cs.type == SEG_CS_TYPE_ACCESSED ||
79610259SAndrew.Bardsley@arm.com         sregs.cs.type == SEG_CS_TYPE_READ_ACCESSED) &&
79711567Smitch.hayenga@arm.com        sregs.cs.dpl != sregs.ss.dpl) {
79810259SAndrew.Bardsley@arm.com
79910259SAndrew.Bardsley@arm.com        hack("CS.DPL (%i) != SS.DPL (%i): Forcing SS.DPL to %i\n",
80010259SAndrew.Bardsley@arm.com             sregs.cs.dpl, sregs.ss.dpl, sregs.cs.dpl);
80111567Smitch.hayenga@arm.com        sregs.ss.dpl = sregs.cs.dpl;
80211567Smitch.hayenga@arm.com    }
80310259SAndrew.Bardsley@arm.com
80410259SAndrew.Bardsley@arm.com    // Do checks after fixing up the state to avoid getting excessive
80510259SAndrew.Bardsley@arm.com    // amounts of warnings.
80610259SAndrew.Bardsley@arm.com    RFLAGS rflags_nocc(tc->readMiscReg(MISCREG_RFLAGS));
80710259SAndrew.Bardsley@arm.com    if (!rflags_nocc.vm) {
80810259SAndrew.Bardsley@arm.com        // Do segment verification if the CPU isn't entering virtual
80911567Smitch.hayenga@arm.com        // 8086 mode.  We currently assume that unrestricted guest
81010259SAndrew.Bardsley@arm.com        // mode is available.
81110259SAndrew.Bardsley@arm.com
81211567Smitch.hayenga@arm.com#define APPLY_SEGMENT(kreg, idx) \
81310259SAndrew.Bardsley@arm.com        checkSeg(# kreg, idx + MISCREG_SEG_SEL_BASE, sregs.kreg, sregs)
81410259SAndrew.Bardsley@arm.com
81510259SAndrew.Bardsley@arm.com        FOREACH_SEGMENT();
81610259SAndrew.Bardsley@arm.com#undef APPLY_SEGMENT
81710259SAndrew.Bardsley@arm.com    }
81810259SAndrew.Bardsley@arm.com
81910259SAndrew.Bardsley@arm.com    setSpecialRegisters(sregs);
82010259SAndrew.Bardsley@arm.com}
82110259SAndrew.Bardsley@arm.com
82210259SAndrew.Bardsley@arm.comtemplate <typename T>
82311567Smitch.hayenga@arm.comstatic void
82410259SAndrew.Bardsley@arm.comupdateKvmStateFPUCommon(ThreadContext *tc, T &fpu)
82511567Smitch.hayenga@arm.com{
82610259SAndrew.Bardsley@arm.com    static_assert(sizeof(X86ISA::FloatRegBits) == 8,
82710259SAndrew.Bardsley@arm.com                  "Unexpected size of X86ISA::FloatRegBits");
82810259SAndrew.Bardsley@arm.com
82910259SAndrew.Bardsley@arm.com    fpu.mxcsr = tc->readMiscRegNoEffect(MISCREG_MXCSR);
83010259SAndrew.Bardsley@arm.com    fpu.fcw = tc->readMiscRegNoEffect(MISCREG_FCW);
83110259SAndrew.Bardsley@arm.com    // No need to rebuild from MISCREG_FSW and MISCREG_TOP if we read
83210259SAndrew.Bardsley@arm.com    // with effects.
83310259SAndrew.Bardsley@arm.com    fpu.fsw = tc->readMiscReg(MISCREG_FSW);
83410259SAndrew.Bardsley@arm.com
83510259SAndrew.Bardsley@arm.com    uint64_t ftw(tc->readMiscRegNoEffect(MISCREG_FTW));
83610259SAndrew.Bardsley@arm.com    fpu.ftwx = X86ISA::convX87TagsToXTags(ftw);
83710259SAndrew.Bardsley@arm.com
83810259SAndrew.Bardsley@arm.com    fpu.last_opcode = tc->readMiscRegNoEffect(MISCREG_FOP);
83910259SAndrew.Bardsley@arm.com
84010259SAndrew.Bardsley@arm.com    const unsigned top((fpu.fsw >> 11) & 0x7);
84110259SAndrew.Bardsley@arm.com    for (int i = 0; i < 8; ++i) {
84210259SAndrew.Bardsley@arm.com        const unsigned reg_idx((i + top) & 0x7);
84310259SAndrew.Bardsley@arm.com        const double value(tc->readFloatReg(FLOATREG_FPR(reg_idx)));
84410259SAndrew.Bardsley@arm.com        DPRINTF(KvmContext, "Setting KVM FP reg %i (st[%i]) := %f\n",
84510259SAndrew.Bardsley@arm.com                reg_idx, i, value);
84610259SAndrew.Bardsley@arm.com        X86ISA::storeFloat80(fpu.fpr[i], value);
84710259SAndrew.Bardsley@arm.com    }
84810259SAndrew.Bardsley@arm.com
84910259SAndrew.Bardsley@arm.com    // TODO: We should update the MMX state
85010259SAndrew.Bardsley@arm.com
85110259SAndrew.Bardsley@arm.com    for (int i = 0; i < 16; ++i) {
85210259SAndrew.Bardsley@arm.com        *(X86ISA::FloatRegBits *)&fpu.xmm[i][0] =
85310259SAndrew.Bardsley@arm.com            tc->readFloatRegBits(FLOATREG_XMM_LOW(i));
85410259SAndrew.Bardsley@arm.com        *(X86ISA::FloatRegBits *)&fpu.xmm[i][8] =
85510259SAndrew.Bardsley@arm.com            tc->readFloatRegBits(FLOATREG_XMM_HIGH(i));
85610259SAndrew.Bardsley@arm.com    }
85710259SAndrew.Bardsley@arm.com}
85810774Snikos.nikoleris@gmail.com
85910774Snikos.nikoleris@gmail.comvoid
86010774Snikos.nikoleris@gmail.comX86KvmCPU::updateKvmStateFPULegacy()
86110774Snikos.nikoleris@gmail.com{
86210774Snikos.nikoleris@gmail.com    struct kvm_fpu fpu;
86310259SAndrew.Bardsley@arm.com
86410259SAndrew.Bardsley@arm.com    // There is some padding in the FP registers, so we'd better zero
86510259SAndrew.Bardsley@arm.com    // the whole struct.
86610259SAndrew.Bardsley@arm.com    memset(&fpu, 0, sizeof(fpu));
86711419Smitch.hayenga@arm.com
86811419Smitch.hayenga@arm.com    updateKvmStateFPUCommon(tc, fpu);
86910259SAndrew.Bardsley@arm.com
87010259SAndrew.Bardsley@arm.com    if (tc->readMiscRegNoEffect(MISCREG_FISEG))
87110259SAndrew.Bardsley@arm.com        warn_once("MISCREG_FISEG is non-zero.\n");
87210259SAndrew.Bardsley@arm.com
87310464SAndreas.Sandberg@ARM.com    fpu.last_ip = tc->readMiscRegNoEffect(MISCREG_FIOFF);
87413818Sjavier.bueno@metempsy.com
87510259SAndrew.Bardsley@arm.com    if (tc->readMiscRegNoEffect(MISCREG_FOSEG))
87610259SAndrew.Bardsley@arm.com        warn_once("MISCREG_FOSEG is non-zero.\n");
87710259SAndrew.Bardsley@arm.com
87810259SAndrew.Bardsley@arm.com    fpu.last_dp = tc->readMiscRegNoEffect(MISCREG_FOOFF);
87910259SAndrew.Bardsley@arm.com
88010259SAndrew.Bardsley@arm.com    setFPUState(fpu);
88110259SAndrew.Bardsley@arm.com}
88210259SAndrew.Bardsley@arm.com
88310259SAndrew.Bardsley@arm.comvoid
88410259SAndrew.Bardsley@arm.comX86KvmCPU::updateKvmStateFPUXSave()
88510259SAndrew.Bardsley@arm.com{
88610259SAndrew.Bardsley@arm.com    struct kvm_xsave kxsave;
88710259SAndrew.Bardsley@arm.com    FXSave &xsave(*(FXSave *)kxsave.region);
88810259SAndrew.Bardsley@arm.com
88910259SAndrew.Bardsley@arm.com    // There is some padding and reserved fields in the structure, so
89010259SAndrew.Bardsley@arm.com    // we'd better zero the whole thing.
89110259SAndrew.Bardsley@arm.com    memset(&kxsave, 0, sizeof(kxsave));
89210259SAndrew.Bardsley@arm.com
89311568Smitch.hayenga@arm.com    updateKvmStateFPUCommon(tc, xsave);
89411568Smitch.hayenga@arm.com
89510259SAndrew.Bardsley@arm.com    if (tc->readMiscRegNoEffect(MISCREG_FISEG))
89610259SAndrew.Bardsley@arm.com        warn_once("MISCREG_FISEG is non-zero.\n");
89710259SAndrew.Bardsley@arm.com
89810259SAndrew.Bardsley@arm.com    xsave.ctrl64.fpu_ip = tc->readMiscRegNoEffect(MISCREG_FIOFF);
89910259SAndrew.Bardsley@arm.com
90010259SAndrew.Bardsley@arm.com    if (tc->readMiscRegNoEffect(MISCREG_FOSEG))
90110259SAndrew.Bardsley@arm.com        warn_once("MISCREG_FOSEG is non-zero.\n");
90210259SAndrew.Bardsley@arm.com
90310259SAndrew.Bardsley@arm.com    xsave.ctrl64.fpu_dp = tc->readMiscRegNoEffect(MISCREG_FOOFF);
90410259SAndrew.Bardsley@arm.com
90510259SAndrew.Bardsley@arm.com    setXSave(kxsave);
90610259SAndrew.Bardsley@arm.com}
90710259SAndrew.Bardsley@arm.com
90810259SAndrew.Bardsley@arm.comvoid
90910259SAndrew.Bardsley@arm.comX86KvmCPU::updateKvmStateFPU()
91010259SAndrew.Bardsley@arm.com{
91110259SAndrew.Bardsley@arm.com    if (useXSave)
91210259SAndrew.Bardsley@arm.com        updateKvmStateFPUXSave();
91310259SAndrew.Bardsley@arm.com    else
91410259SAndrew.Bardsley@arm.com        updateKvmStateFPULegacy();
91510259SAndrew.Bardsley@arm.com}
91610259SAndrew.Bardsley@arm.com
91710259SAndrew.Bardsley@arm.comvoid
91810259SAndrew.Bardsley@arm.comX86KvmCPU::updateKvmStateMSRs()
91910259SAndrew.Bardsley@arm.com{
92010259SAndrew.Bardsley@arm.com    KvmMSRVector msrs;
92110259SAndrew.Bardsley@arm.com
92210259SAndrew.Bardsley@arm.com    const Kvm::MSRIndexVector &indices(getMsrIntersection());
92310259SAndrew.Bardsley@arm.com
92410259SAndrew.Bardsley@arm.com    for (auto it = indices.cbegin(); it != indices.cend(); ++it) {
92510259SAndrew.Bardsley@arm.com        struct kvm_msr_entry e;
92610259SAndrew.Bardsley@arm.com
92710259SAndrew.Bardsley@arm.com        e.index = *it;
92810259SAndrew.Bardsley@arm.com        e.reserved = 0;
92910259SAndrew.Bardsley@arm.com        e.data = tc->readMiscReg(msrMap.at(*it));
93013954Sgiacomo.gabrielli@arm.com        DPRINTF(KvmContext, "Adding MSR: idx: 0x%x, data: 0x%x\n",
93110259SAndrew.Bardsley@arm.com                e.index, e.data);
93210259SAndrew.Bardsley@arm.com
93310259SAndrew.Bardsley@arm.com        msrs.push_back(e);
93410259SAndrew.Bardsley@arm.com    }
93510259SAndrew.Bardsley@arm.com
93610259SAndrew.Bardsley@arm.com    setMSRs(msrs);
93710259SAndrew.Bardsley@arm.com}
93810259SAndrew.Bardsley@arm.com
93910259SAndrew.Bardsley@arm.comvoid
94010259SAndrew.Bardsley@arm.comX86KvmCPU::updateThreadContext()
94110259SAndrew.Bardsley@arm.com{
94210259SAndrew.Bardsley@arm.com    struct kvm_regs regs;
94310259SAndrew.Bardsley@arm.com    struct kvm_sregs sregs;
94410259SAndrew.Bardsley@arm.com
94510259SAndrew.Bardsley@arm.com    getRegisters(regs);
94610259SAndrew.Bardsley@arm.com    getSpecialRegisters(sregs);
94710259SAndrew.Bardsley@arm.com
94810259SAndrew.Bardsley@arm.com    DPRINTF(KvmContext, "X86KvmCPU::updateThreadContext():\n");
94910259SAndrew.Bardsley@arm.com    if (DTRACE(KvmContext))
95010259SAndrew.Bardsley@arm.com        dump();
95111567Smitch.hayenga@arm.com
95211567Smitch.hayenga@arm.com    updateThreadContextRegs(regs, sregs);
95311567Smitch.hayenga@arm.com    updateThreadContextSRegs(sregs);
95411567Smitch.hayenga@arm.com    if (useXSave) {
95511567Smitch.hayenga@arm.com        struct kvm_xsave xsave;
95610259SAndrew.Bardsley@arm.com        getXSave(xsave);
95710259SAndrew.Bardsley@arm.com
95810259SAndrew.Bardsley@arm.com       updateThreadContextXSave(xsave);
95910259SAndrew.Bardsley@arm.com    } else {
96010259SAndrew.Bardsley@arm.com        struct kvm_fpu fpu;
96110259SAndrew.Bardsley@arm.com        getFPUState(fpu);
96210259SAndrew.Bardsley@arm.com
96310259SAndrew.Bardsley@arm.com        updateThreadContextFPU(fpu);
96410259SAndrew.Bardsley@arm.com    }
96510259SAndrew.Bardsley@arm.com    updateThreadContextMSRs();
96610259SAndrew.Bardsley@arm.com
96710259SAndrew.Bardsley@arm.com    // The M5 misc reg caches some values from other
96810259SAndrew.Bardsley@arm.com    // registers. Writing to it with side effects causes it to be
96910259SAndrew.Bardsley@arm.com    // updated from its source registers.
97010259SAndrew.Bardsley@arm.com    tc->setMiscReg(MISCREG_M5_REG, 0);
97110259SAndrew.Bardsley@arm.com}
97210259SAndrew.Bardsley@arm.com
97310259SAndrew.Bardsley@arm.comvoid
97410259SAndrew.Bardsley@arm.comX86KvmCPU::updateThreadContextRegs(const struct kvm_regs &regs,
97510259SAndrew.Bardsley@arm.com                                   const struct kvm_sregs &sregs)
97610259SAndrew.Bardsley@arm.com{
97710259SAndrew.Bardsley@arm.com#define APPLY_IREG(kreg, mreg) tc->setIntReg(mreg, regs.kreg)
97810259SAndrew.Bardsley@arm.com
97910259SAndrew.Bardsley@arm.com    FOREACH_IREG();
98010259SAndrew.Bardsley@arm.com
98110259SAndrew.Bardsley@arm.com#undef APPLY_IREG
98210259SAndrew.Bardsley@arm.com
98310259SAndrew.Bardsley@arm.com    tc->pcState(PCState(regs.rip + sregs.cs.base));
98411567Smitch.hayenga@arm.com
98510259SAndrew.Bardsley@arm.com    // Flags are spread out across multiple semi-magic registers so we
98610259SAndrew.Bardsley@arm.com    // need some special care when updating them.
98710259SAndrew.Bardsley@arm.com    X86ISA::setRFlags(tc, regs.rflags);
98810259SAndrew.Bardsley@arm.com}
98910259SAndrew.Bardsley@arm.com
99010259SAndrew.Bardsley@arm.com
99110259SAndrew.Bardsley@arm.cominline void
99210259SAndrew.Bardsley@arm.comsetContextSegment(ThreadContext *tc, const struct kvm_segment &kvm_seg,
99311567Smitch.hayenga@arm.com                  const int index)
99410259SAndrew.Bardsley@arm.com{
99510259SAndrew.Bardsley@arm.com    SegAttr attr(0);
99610259SAndrew.Bardsley@arm.com
99710259SAndrew.Bardsley@arm.com    attr.type = kvm_seg.type;
99811567Smitch.hayenga@arm.com    attr.present = kvm_seg.present;
99910259SAndrew.Bardsley@arm.com    attr.dpl = kvm_seg.dpl;
100010259SAndrew.Bardsley@arm.com    attr.defaultSize = kvm_seg.db;
100110259SAndrew.Bardsley@arm.com    attr.system = kvm_seg.s;
100211567Smitch.hayenga@arm.com    attr.longMode = kvm_seg.l;
100311567Smitch.hayenga@arm.com    attr.granularity = kvm_seg.g;
100410259SAndrew.Bardsley@arm.com    attr.avl = kvm_seg.avl;
100510259SAndrew.Bardsley@arm.com    attr.unusable = kvm_seg.unusable;
100610259SAndrew.Bardsley@arm.com
100710259SAndrew.Bardsley@arm.com    // We need some setMiscReg magic here to keep the effective base
100810259SAndrew.Bardsley@arm.com    // addresses in sync. We need an up-to-date version of EFER, so
100910259SAndrew.Bardsley@arm.com    // make sure this is called after the sregs have been synced.
101010259SAndrew.Bardsley@arm.com    tc->setMiscReg(MISCREG_SEG_BASE(index), kvm_seg.base);
101111567Smitch.hayenga@arm.com    tc->setMiscReg(MISCREG_SEG_LIMIT(index), kvm_seg.limit);
101211567Smitch.hayenga@arm.com    tc->setMiscReg(MISCREG_SEG_SEL(index), kvm_seg.selector);
101310259SAndrew.Bardsley@arm.com    tc->setMiscReg(MISCREG_SEG_ATTR(index), attr);
101410259SAndrew.Bardsley@arm.com}
101510259SAndrew.Bardsley@arm.com
101611567Smitch.hayenga@arm.cominline void
101710259SAndrew.Bardsley@arm.comsetContextSegment(ThreadContext *tc, const struct kvm_dtable &kvm_dtable,
101810259SAndrew.Bardsley@arm.com                  const int index)
101910259SAndrew.Bardsley@arm.com{
102010259SAndrew.Bardsley@arm.com    // We need some setMiscReg magic here to keep the effective base
102110259SAndrew.Bardsley@arm.com    // addresses in sync. We need an up-to-date version of EFER, so
102210259SAndrew.Bardsley@arm.com    // make sure this is called after the sregs have been synced.
102310259SAndrew.Bardsley@arm.com    tc->setMiscReg(MISCREG_SEG_BASE(index), kvm_dtable.base);
102410259SAndrew.Bardsley@arm.com    tc->setMiscReg(MISCREG_SEG_LIMIT(index), kvm_dtable.limit);
102510259SAndrew.Bardsley@arm.com}
102610259SAndrew.Bardsley@arm.com
102710259SAndrew.Bardsley@arm.comvoid
102810259SAndrew.Bardsley@arm.comX86KvmCPU::updateThreadContextSRegs(const struct kvm_sregs &sregs)
102910259SAndrew.Bardsley@arm.com{
103010259SAndrew.Bardsley@arm.com    assert(getKvmRunState()->apic_base == sregs.apic_base);
103110259SAndrew.Bardsley@arm.com    assert(getKvmRunState()->cr8 == sregs.cr8);
103210259SAndrew.Bardsley@arm.com
103310259SAndrew.Bardsley@arm.com#define APPLY_SREG(kreg, mreg) tc->setMiscRegNoEffect(mreg, sregs.kreg)
103410259SAndrew.Bardsley@arm.com#define APPLY_SEGMENT(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
103510259SAndrew.Bardsley@arm.com#define APPLY_DTABLE(kreg, idx) setContextSegment(tc, sregs.kreg, idx)
103610259SAndrew.Bardsley@arm.com    FOREACH_SREG();
103710259SAndrew.Bardsley@arm.com    FOREACH_SEGMENT();
103810259SAndrew.Bardsley@arm.com    FOREACH_DTABLE();
103910259SAndrew.Bardsley@arm.com#undef APPLY_SREG
104010259SAndrew.Bardsley@arm.com#undef APPLY_SEGMENT
104110259SAndrew.Bardsley@arm.com#undef APPLY_DTABLE
104210259SAndrew.Bardsley@arm.com}
104310259SAndrew.Bardsley@arm.com
104410259SAndrew.Bardsley@arm.comtemplate<typename T>
104510259SAndrew.Bardsley@arm.comstatic void
104610259SAndrew.Bardsley@arm.comupdateThreadContextFPUCommon(ThreadContext *tc, const T &fpu)
104710259SAndrew.Bardsley@arm.com{
104810259SAndrew.Bardsley@arm.com    const unsigned top((fpu.fsw >> 11) & 0x7);
104910259SAndrew.Bardsley@arm.com
105010259SAndrew.Bardsley@arm.com    static_assert(sizeof(X86ISA::FloatRegBits) == 8,
105110259SAndrew.Bardsley@arm.com                  "Unexpected size of X86ISA::FloatRegBits");
105210259SAndrew.Bardsley@arm.com
105310259SAndrew.Bardsley@arm.com    for (int i = 0; i < 8; ++i) {
105411567Smitch.hayenga@arm.com        const unsigned reg_idx((i + top) & 0x7);
105510259SAndrew.Bardsley@arm.com        const double value(X86ISA::loadFloat80(fpu.fpr[i]));
105611567Smitch.hayenga@arm.com        DPRINTF(KvmContext, "Setting gem5 FP reg %i (st[%i]) := %f\n",
105711567Smitch.hayenga@arm.com                reg_idx, i, value);
105810259SAndrew.Bardsley@arm.com        tc->setFloatReg(FLOATREG_FPR(reg_idx), value);
105910259SAndrew.Bardsley@arm.com    }
106011567Smitch.hayenga@arm.com
106110259SAndrew.Bardsley@arm.com    // TODO: We should update the MMX state
106210259SAndrew.Bardsley@arm.com
106310259SAndrew.Bardsley@arm.com    tc->setMiscRegNoEffect(MISCREG_X87_TOP, top);
106413964Sgiacomo.travaglini@arm.com    tc->setMiscRegNoEffect(MISCREG_MXCSR, fpu.mxcsr);
106513964Sgiacomo.travaglini@arm.com    tc->setMiscRegNoEffect(MISCREG_FCW, fpu.fcw);
106610259SAndrew.Bardsley@arm.com    tc->setMiscRegNoEffect(MISCREG_FSW, fpu.fsw);
106710259SAndrew.Bardsley@arm.com
106810259SAndrew.Bardsley@arm.com    uint64_t ftw(convX87XTagsToTags(fpu.ftwx));
106910259SAndrew.Bardsley@arm.com    // TODO: Are these registers really the same?
107010259SAndrew.Bardsley@arm.com    tc->setMiscRegNoEffect(MISCREG_FTW, ftw);
107111567Smitch.hayenga@arm.com    tc->setMiscRegNoEffect(MISCREG_FTAG, ftw);
107210259SAndrew.Bardsley@arm.com
107310259SAndrew.Bardsley@arm.com    tc->setMiscRegNoEffect(MISCREG_FOP, fpu.last_opcode);
107411567Smitch.hayenga@arm.com
107510259SAndrew.Bardsley@arm.com    for (int i = 0; i < 16; ++i) {
107610259SAndrew.Bardsley@arm.com        tc->setFloatRegBits(FLOATREG_XMM_LOW(i),
107710259SAndrew.Bardsley@arm.com                            *(X86ISA::FloatRegBits *)&fpu.xmm[i][0]);
107810259SAndrew.Bardsley@arm.com        tc->setFloatRegBits(FLOATREG_XMM_HIGH(i),
107910259SAndrew.Bardsley@arm.com                            *(X86ISA::FloatRegBits *)&fpu.xmm[i][8]);
108010259SAndrew.Bardsley@arm.com    }
108110259SAndrew.Bardsley@arm.com}
108210259SAndrew.Bardsley@arm.com
108310259SAndrew.Bardsley@arm.comvoid
108410259SAndrew.Bardsley@arm.comX86KvmCPU::updateThreadContextFPU(const struct kvm_fpu &fpu)
108510259SAndrew.Bardsley@arm.com{
108610259SAndrew.Bardsley@arm.com    updateThreadContextFPUCommon(tc, fpu);
108710259SAndrew.Bardsley@arm.com
108810259SAndrew.Bardsley@arm.com    tc->setMiscRegNoEffect(MISCREG_FISEG, 0);
108910259SAndrew.Bardsley@arm.com    tc->setMiscRegNoEffect(MISCREG_FIOFF, fpu.last_ip);
109010259SAndrew.Bardsley@arm.com    tc->setMiscRegNoEffect(MISCREG_FOSEG, 0);
109110259SAndrew.Bardsley@arm.com    tc->setMiscRegNoEffect(MISCREG_FOOFF, fpu.last_dp);
109210259SAndrew.Bardsley@arm.com}
109310259SAndrew.Bardsley@arm.com
109410259SAndrew.Bardsley@arm.comvoid
109510259SAndrew.Bardsley@arm.comX86KvmCPU::updateThreadContextXSave(const struct kvm_xsave &kxsave)
109611567Smitch.hayenga@arm.com{
109711567Smitch.hayenga@arm.com    const FXSave &xsave(*(const FXSave *)kxsave.region);
109810259SAndrew.Bardsley@arm.com
109910259SAndrew.Bardsley@arm.com    updateThreadContextFPUCommon(tc, xsave);
110010259SAndrew.Bardsley@arm.com
110110259SAndrew.Bardsley@arm.com    tc->setMiscRegNoEffect(MISCREG_FISEG, 0);
110210259SAndrew.Bardsley@arm.com    tc->setMiscRegNoEffect(MISCREG_FIOFF, xsave.ctrl64.fpu_ip);
110310259SAndrew.Bardsley@arm.com    tc->setMiscRegNoEffect(MISCREG_FOSEG, 0);
110410259SAndrew.Bardsley@arm.com    tc->setMiscRegNoEffect(MISCREG_FOOFF, xsave.ctrl64.fpu_dp);
110510259SAndrew.Bardsley@arm.com}
110610259SAndrew.Bardsley@arm.com
110711567Smitch.hayenga@arm.comvoid
110811567Smitch.hayenga@arm.comX86KvmCPU::updateThreadContextMSRs()
110910259SAndrew.Bardsley@arm.com{
111010259SAndrew.Bardsley@arm.com    const Kvm::MSRIndexVector &msrs(getMsrIntersection());
111111567Smitch.hayenga@arm.com
111210259SAndrew.Bardsley@arm.com    std::unique_ptr<struct kvm_msrs> kvm_msrs(
111310259SAndrew.Bardsley@arm.com        newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(msrs.size()));
111410259SAndrew.Bardsley@arm.com    struct kvm_msr_entry *entry;
111510259SAndrew.Bardsley@arm.com
111610259SAndrew.Bardsley@arm.com    // Create a list of MSRs to read
111711567Smitch.hayenga@arm.com    kvm_msrs->nmsrs = msrs.size();
111811567Smitch.hayenga@arm.com    entry = &kvm_msrs->entries[0];
111910259SAndrew.Bardsley@arm.com    for (auto it = msrs.cbegin(); it != msrs.cend(); ++it, ++entry) {
112010259SAndrew.Bardsley@arm.com        entry->index = *it;
112110259SAndrew.Bardsley@arm.com        entry->reserved = 0;
112210259SAndrew.Bardsley@arm.com        entry->data = 0;
112310259SAndrew.Bardsley@arm.com    }
112410259SAndrew.Bardsley@arm.com
112510259SAndrew.Bardsley@arm.com    getMSRs(*kvm_msrs.get());
112610259SAndrew.Bardsley@arm.com
112711567Smitch.hayenga@arm.com    // Update M5's state
112810259SAndrew.Bardsley@arm.com    entry = &kvm_msrs->entries[0];
112910259SAndrew.Bardsley@arm.com    for (int i = 0; i < kvm_msrs->nmsrs; ++i, ++entry) {
113010259SAndrew.Bardsley@arm.com        DPRINTF(KvmContext, "Setting M5 MSR: idx: 0x%x, data: 0x%x\n",
113110259SAndrew.Bardsley@arm.com                entry->index, entry->data);
113210259SAndrew.Bardsley@arm.com
113310259SAndrew.Bardsley@arm.com        tc->setMiscReg(X86ISA::msrMap.at(entry->index), entry->data);
113410259SAndrew.Bardsley@arm.com    }
113510259SAndrew.Bardsley@arm.com}
113610259SAndrew.Bardsley@arm.com
113710259SAndrew.Bardsley@arm.comvoid
113810259SAndrew.Bardsley@arm.comX86KvmCPU::deliverInterrupts()
113910259SAndrew.Bardsley@arm.com{
114010259SAndrew.Bardsley@arm.com    Fault fault;
114110259SAndrew.Bardsley@arm.com
114210259SAndrew.Bardsley@arm.com    syncThreadContext();
114310259SAndrew.Bardsley@arm.com
114410259SAndrew.Bardsley@arm.com    {
114510259SAndrew.Bardsley@arm.com        // Migrate to the interrupt controller's thread to get the
114610259SAndrew.Bardsley@arm.com        // interrupt. Even though the individual methods are safe to
114710259SAndrew.Bardsley@arm.com        // call across threads, we might still lose interrupts unless
114810259SAndrew.Bardsley@arm.com        // they are getInterrupt() and updateIntrInfo() are called
114910259SAndrew.Bardsley@arm.com        // atomically.
115010259SAndrew.Bardsley@arm.com        EventQueue::ScopedMigration migrate(interrupts[0]->eventQueue());
115110259SAndrew.Bardsley@arm.com        fault = interrupts[0]->getInterrupt(tc);
115210259SAndrew.Bardsley@arm.com        interrupts[0]->updateIntrInfo(tc);
115311567Smitch.hayenga@arm.com    }
115410259SAndrew.Bardsley@arm.com
115510259SAndrew.Bardsley@arm.com    X86Interrupt *x86int(dynamic_cast<X86Interrupt *>(fault.get()));
115610259SAndrew.Bardsley@arm.com    if (dynamic_cast<NonMaskableInterrupt *>(fault.get())) {
115711567Smitch.hayenga@arm.com        DPRINTF(KvmInt, "Delivering NMI\n");
115810259SAndrew.Bardsley@arm.com        kvmNonMaskableInterrupt();
115910259SAndrew.Bardsley@arm.com    } else if (dynamic_cast<InitInterrupt *>(fault.get())) {
116010259SAndrew.Bardsley@arm.com        DPRINTF(KvmInt, "INIT interrupt\n");
116110259SAndrew.Bardsley@arm.com        fault.get()->invoke(tc);
116210259SAndrew.Bardsley@arm.com        // Delay the kvm state update since we won't enter KVM on this
116310259SAndrew.Bardsley@arm.com        // tick.
116410259SAndrew.Bardsley@arm.com        threadContextDirty = true;
116510259SAndrew.Bardsley@arm.com        // HACK: gem5 doesn't actually have any BIOS code, which means
116611567Smitch.hayenga@arm.com        // that we need to halt the thread and wait for a startup
116710259SAndrew.Bardsley@arm.com        // interrupt before restarting the thread. The simulated CPUs
116810259SAndrew.Bardsley@arm.com        // use the same kind of hack using a microcode routine.
116910259SAndrew.Bardsley@arm.com        thread->suspend();
117010259SAndrew.Bardsley@arm.com    } else if (dynamic_cast<StartupInterrupt *>(fault.get())) {
117110259SAndrew.Bardsley@arm.com        DPRINTF(KvmInt, "STARTUP interrupt\n");
117210259SAndrew.Bardsley@arm.com        fault.get()->invoke(tc);
117310259SAndrew.Bardsley@arm.com        // The kvm state is assumed to have been updated when entering
117410259SAndrew.Bardsley@arm.com        // kvmRun(), so we need to update manually it here.
117510259SAndrew.Bardsley@arm.com        updateKvmState();
117610259SAndrew.Bardsley@arm.com    } else if (x86int) {
117710259SAndrew.Bardsley@arm.com        struct kvm_interrupt kvm_int;
117810259SAndrew.Bardsley@arm.com        kvm_int.irq = x86int->getVector();
117910259SAndrew.Bardsley@arm.com
118010259SAndrew.Bardsley@arm.com        DPRINTF(KvmInt, "Delivering interrupt: %s (%u)\n",
118110259SAndrew.Bardsley@arm.com                fault->name(), kvm_int.irq);
118210259SAndrew.Bardsley@arm.com
118310259SAndrew.Bardsley@arm.com        kvmInterrupt(kvm_int);
118410259SAndrew.Bardsley@arm.com    } else {
118510259SAndrew.Bardsley@arm.com        panic("KVM: Unknown interrupt type\n");
118610259SAndrew.Bardsley@arm.com    }
118710259SAndrew.Bardsley@arm.com
118810259SAndrew.Bardsley@arm.com}
118910259SAndrew.Bardsley@arm.com
119010259SAndrew.Bardsley@arm.comTick
119110259SAndrew.Bardsley@arm.comX86KvmCPU::kvmRun(Tick ticks)
119210259SAndrew.Bardsley@arm.com{
119310259SAndrew.Bardsley@arm.com    struct kvm_run &kvm_run(*getKvmRunState());
119410259SAndrew.Bardsley@arm.com
119510259SAndrew.Bardsley@arm.com    if (interrupts[0]->checkInterruptsRaw()) {
119610259SAndrew.Bardsley@arm.com        if (interrupts[0]->hasPendingUnmaskable()) {
119710259SAndrew.Bardsley@arm.com            DPRINTF(KvmInt,
119810259SAndrew.Bardsley@arm.com                    "Delivering unmaskable interrupt.\n");
119910259SAndrew.Bardsley@arm.com            syncThreadContext();
120010259SAndrew.Bardsley@arm.com            deliverInterrupts();
120110259SAndrew.Bardsley@arm.com        } else if (kvm_run.ready_for_interrupt_injection) {
120210259SAndrew.Bardsley@arm.com            // KVM claims that it is ready for an interrupt. It might
120310259SAndrew.Bardsley@arm.com            // be lying if we just updated rflags and disabled
120410259SAndrew.Bardsley@arm.com            // interrupts (e.g., by doing a CPU handover). Let's sync
120510259SAndrew.Bardsley@arm.com            // the thread context and check if there are /really/
120610259SAndrew.Bardsley@arm.com            // interrupts that should be delivered now.
120710259SAndrew.Bardsley@arm.com            syncThreadContext();
120810259SAndrew.Bardsley@arm.com            if (interrupts[0]->checkInterrupts(tc)) {
120911567Smitch.hayenga@arm.com                DPRINTF(KvmInt,
121010259SAndrew.Bardsley@arm.com                        "M5 has pending interrupts, delivering interrupt.\n");
121110259SAndrew.Bardsley@arm.com
121210259SAndrew.Bardsley@arm.com                deliverInterrupts();
121310259SAndrew.Bardsley@arm.com            } else {
121410259SAndrew.Bardsley@arm.com                DPRINTF(KvmInt,
121510259SAndrew.Bardsley@arm.com                        "Interrupt delivery delayed due to KVM confusion.\n");
121610259SAndrew.Bardsley@arm.com                kvm_run.request_interrupt_window = 1;
121710259SAndrew.Bardsley@arm.com            }
121810259SAndrew.Bardsley@arm.com        } else if (!kvm_run.request_interrupt_window) {
121911567Smitch.hayenga@arm.com            DPRINTF(KvmInt,
122011567Smitch.hayenga@arm.com                    "M5 has pending interrupts, requesting interrupt "
122110259SAndrew.Bardsley@arm.com                    "window.\n");
122210259SAndrew.Bardsley@arm.com            kvm_run.request_interrupt_window = 1;
122310259SAndrew.Bardsley@arm.com        }
122410259SAndrew.Bardsley@arm.com    } else {
122510259SAndrew.Bardsley@arm.com        kvm_run.request_interrupt_window = 0;
122610259SAndrew.Bardsley@arm.com    }
122710259SAndrew.Bardsley@arm.com
122810259SAndrew.Bardsley@arm.com    // The CPU might have been suspended as a result of the INIT
122910259SAndrew.Bardsley@arm.com    // interrupt delivery hack. In that case, don't enter into KVM.
123010259SAndrew.Bardsley@arm.com    if (_status == Idle)
123110259SAndrew.Bardsley@arm.com        return 0;
123210259SAndrew.Bardsley@arm.com    else
123310259SAndrew.Bardsley@arm.com        return kvmRunWrapper(ticks);
123411567Smitch.hayenga@arm.com}
123510259SAndrew.Bardsley@arm.com
123610259SAndrew.Bardsley@arm.comTick
123710259SAndrew.Bardsley@arm.comX86KvmCPU::kvmRunDrain()
123810259SAndrew.Bardsley@arm.com{
123910259SAndrew.Bardsley@arm.com    struct kvm_run &kvm_run(*getKvmRunState());
124010259SAndrew.Bardsley@arm.com
124110259SAndrew.Bardsley@arm.com    if (!archIsDrained()) {
124210259SAndrew.Bardsley@arm.com        DPRINTF(Drain, "kvmRunDrain: Architecture code isn't drained\n");
124310259SAndrew.Bardsley@arm.com
124410259SAndrew.Bardsley@arm.com        // Tell KVM to find a suitable place to deliver interrupts. This
124510259SAndrew.Bardsley@arm.com        // should ensure that pending interrupts have been delivered and
124610259SAndrew.Bardsley@arm.com        // things are reasonably consistent (i.e., no interrupts pending
124710259SAndrew.Bardsley@arm.com        // in the guest).
124810259SAndrew.Bardsley@arm.com        kvm_run.request_interrupt_window = 1;
124910259SAndrew.Bardsley@arm.com
125010259SAndrew.Bardsley@arm.com        // Limit the run to 1 millisecond. That is hopefully enough to
125110259SAndrew.Bardsley@arm.com        // reach an interrupt window. Otherwise, we'll just try again
125210259SAndrew.Bardsley@arm.com        // later.
125310259SAndrew.Bardsley@arm.com        return kvmRunWrapper(1 * SimClock::Float::ms);
125410259SAndrew.Bardsley@arm.com    } else {
125510259SAndrew.Bardsley@arm.com        DPRINTF(Drain, "kvmRunDrain: Delivering pending IO\n");
125610259SAndrew.Bardsley@arm.com
125710259SAndrew.Bardsley@arm.com        return kvmRunWrapper(0);
125810259SAndrew.Bardsley@arm.com    }
125910259SAndrew.Bardsley@arm.com}
126010259SAndrew.Bardsley@arm.com
126110259SAndrew.Bardsley@arm.comTick
126210259SAndrew.Bardsley@arm.comX86KvmCPU::kvmRunWrapper(Tick ticks)
126310259SAndrew.Bardsley@arm.com{
126410259SAndrew.Bardsley@arm.com    struct kvm_run &kvm_run(*getKvmRunState());
126511567Smitch.hayenga@arm.com
126610259SAndrew.Bardsley@arm.com    // Synchronize the APIC base and CR8 here since they are present
126711567Smitch.hayenga@arm.com    // in the kvm_run struct, which makes the synchronization really
126810259SAndrew.Bardsley@arm.com    // cheap.
126910259SAndrew.Bardsley@arm.com    kvm_run.apic_base = tc->readMiscReg(MISCREG_APIC_BASE);
127010259SAndrew.Bardsley@arm.com    kvm_run.cr8 = tc->readMiscReg(MISCREG_CR8);
127110259SAndrew.Bardsley@arm.com
127210259SAndrew.Bardsley@arm.com    const Tick run_ticks(BaseKvmCPU::kvmRun(ticks));
127310259SAndrew.Bardsley@arm.com
127410259SAndrew.Bardsley@arm.com    tc->setMiscReg(MISCREG_APIC_BASE, kvm_run.apic_base);
127510259SAndrew.Bardsley@arm.com    kvm_run.cr8 = tc->readMiscReg(MISCREG_CR8);
127610259SAndrew.Bardsley@arm.com
127710259SAndrew.Bardsley@arm.com    return run_ticks;
127810259SAndrew.Bardsley@arm.com}
127910259SAndrew.Bardsley@arm.com
128010259SAndrew.Bardsley@arm.comuint64_t
128110259SAndrew.Bardsley@arm.comX86KvmCPU::getHostCycles() const
128210259SAndrew.Bardsley@arm.com{
128310259SAndrew.Bardsley@arm.com    return getMSR(MSR_TSC);
128410259SAndrew.Bardsley@arm.com}
128510259SAndrew.Bardsley@arm.com
128610259SAndrew.Bardsley@arm.comvoid
128710259SAndrew.Bardsley@arm.comX86KvmCPU::handleIOMiscReg32(int miscreg)
128810259SAndrew.Bardsley@arm.com{
128910259SAndrew.Bardsley@arm.com    struct kvm_run &kvm_run(*getKvmRunState());
129010259SAndrew.Bardsley@arm.com    const uint16_t port(kvm_run.io.port);
129110259SAndrew.Bardsley@arm.com
129210259SAndrew.Bardsley@arm.com    assert(kvm_run.exit_reason == KVM_EXIT_IO);
129311567Smitch.hayenga@arm.com
129411567Smitch.hayenga@arm.com    if (kvm_run.io.size != 4) {
129510259SAndrew.Bardsley@arm.com        panic("Unexpected IO size (%u) for address 0x%x.\n",
129611567Smitch.hayenga@arm.com              kvm_run.io.size, port);
129710259SAndrew.Bardsley@arm.com    }
129810259SAndrew.Bardsley@arm.com
129910259SAndrew.Bardsley@arm.com    if (kvm_run.io.count != 1) {
130010259SAndrew.Bardsley@arm.com        panic("Unexpected IO count (%u) for address 0x%x.\n",
130110259SAndrew.Bardsley@arm.com              kvm_run.io.count, port);
130210259SAndrew.Bardsley@arm.com    }
130310259SAndrew.Bardsley@arm.com
130410259SAndrew.Bardsley@arm.com    uint32_t *data((uint32_t *)getGuestData(kvm_run.io.data_offset));
130510259SAndrew.Bardsley@arm.com    if (kvm_run.io.direction == KVM_EXIT_IO_OUT)
130610259SAndrew.Bardsley@arm.com        tc->setMiscReg(miscreg, *data);
130710259SAndrew.Bardsley@arm.com    else
130810259SAndrew.Bardsley@arm.com        *data = tc->readMiscRegNoEffect(miscreg);
130910259SAndrew.Bardsley@arm.com}
131010259SAndrew.Bardsley@arm.com
131110259SAndrew.Bardsley@arm.comTick
131211567Smitch.hayenga@arm.comX86KvmCPU::handleKvmExitIO()
131310259SAndrew.Bardsley@arm.com{
131410259SAndrew.Bardsley@arm.com    struct kvm_run &kvm_run(*getKvmRunState());
131510259SAndrew.Bardsley@arm.com    bool isWrite(kvm_run.io.direction == KVM_EXIT_IO_OUT);
131610259SAndrew.Bardsley@arm.com    unsigned char *guestData(getGuestData(kvm_run.io.data_offset));
131710259SAndrew.Bardsley@arm.com    Tick delay(0);
131810259SAndrew.Bardsley@arm.com    uint16_t port(kvm_run.io.port);
131910259SAndrew.Bardsley@arm.com    Addr pAddr;
132010259SAndrew.Bardsley@arm.com    const int count(kvm_run.io.count);
132110259SAndrew.Bardsley@arm.com
132210259SAndrew.Bardsley@arm.com    assert(kvm_run.io.direction == KVM_EXIT_IO_IN ||
132310259SAndrew.Bardsley@arm.com           kvm_run.io.direction == KVM_EXIT_IO_OUT);
132410259SAndrew.Bardsley@arm.com
132510259SAndrew.Bardsley@arm.com    DPRINTF(KvmIO, "KVM-x86: Handling IO instruction (%s) (port: 0x%x)\n",
132610259SAndrew.Bardsley@arm.com            (isWrite ? "out" : "in"), kvm_run.io.port);
132710259SAndrew.Bardsley@arm.com
132810259SAndrew.Bardsley@arm.com    /* Vanilla gem5 handles PCI discovery in the TLB(!). Since we
132911567Smitch.hayenga@arm.com     * don't use the TLB component, we need to intercept and handle
133011567Smitch.hayenga@arm.com     * the PCI configuration space IO ports here.
133110259SAndrew.Bardsley@arm.com     *
133211567Smitch.hayenga@arm.com     * The IO port PCI discovery mechanism uses one address register
133310259SAndrew.Bardsley@arm.com     * and one data register. We map the address register to a misc
133410259SAndrew.Bardsley@arm.com     * reg and use that to re-route data register accesses to the
133510259SAndrew.Bardsley@arm.com     * right location in the PCI configuration space.
133610259SAndrew.Bardsley@arm.com     */
133710259SAndrew.Bardsley@arm.com    if (port == IO_PCI_CONF_ADDR) {
133810259SAndrew.Bardsley@arm.com        handleIOMiscReg32(MISCREG_PCI_CONFIG_ADDRESS);
133910259SAndrew.Bardsley@arm.com        return 0;
134010259SAndrew.Bardsley@arm.com    } else if ((port & ~0x3) == IO_PCI_CONF_DATA_BASE) {
134111567Smitch.hayenga@arm.com        Addr pciConfigAddr(tc->readMiscRegNoEffect(MISCREG_PCI_CONFIG_ADDRESS));
134210259SAndrew.Bardsley@arm.com        if (pciConfigAddr & 0x80000000) {
134310259SAndrew.Bardsley@arm.com            pAddr = X86ISA::x86PciConfigAddress((pciConfigAddr & 0x7ffffffc) |
134410259SAndrew.Bardsley@arm.com                                                (port & 0x3));
134510259SAndrew.Bardsley@arm.com        } else {
134611567Smitch.hayenga@arm.com            pAddr = X86ISA::x86IOAddress(port);
134710259SAndrew.Bardsley@arm.com        }
134810259SAndrew.Bardsley@arm.com    } else {
134910259SAndrew.Bardsley@arm.com        pAddr = X86ISA::x86IOAddress(port);
135011567Smitch.hayenga@arm.com    }
135110259SAndrew.Bardsley@arm.com
135210259SAndrew.Bardsley@arm.com    const MemCmd cmd(isWrite ? MemCmd::WriteReq : MemCmd::ReadReq);
135310259SAndrew.Bardsley@arm.com    // Temporarily lock and migrate to the device event queue to
135410259SAndrew.Bardsley@arm.com    // prevent races in multi-core mode.
135510259SAndrew.Bardsley@arm.com    EventQueue::ScopedMigration migrate(deviceEventQueue());
135610259SAndrew.Bardsley@arm.com    for (int i = 0; i < count; ++i) {
135710259SAndrew.Bardsley@arm.com        RequestPtr io_req = std::make_shared<Request>(
135810259SAndrew.Bardsley@arm.com            pAddr, kvm_run.io.size,
135911567Smitch.hayenga@arm.com            Request::UNCACHEABLE, dataMasterId());
136010259SAndrew.Bardsley@arm.com
136110259SAndrew.Bardsley@arm.com        io_req->setContext(tc->contextId());
136210259SAndrew.Bardsley@arm.com
136310259SAndrew.Bardsley@arm.com        PacketPtr pkt = new Packet(io_req, cmd);
136410259SAndrew.Bardsley@arm.com
136510259SAndrew.Bardsley@arm.com        pkt->dataStatic(guestData);
136610259SAndrew.Bardsley@arm.com        delay += dataPort.submitIO(pkt);
136710259SAndrew.Bardsley@arm.com
136810259SAndrew.Bardsley@arm.com        guestData += kvm_run.io.size;
136911567Smitch.hayenga@arm.com    }
137010259SAndrew.Bardsley@arm.com
137110259SAndrew.Bardsley@arm.com    return delay;
137210259SAndrew.Bardsley@arm.com}
137310259SAndrew.Bardsley@arm.com
137410259SAndrew.Bardsley@arm.comTick
137510259SAndrew.Bardsley@arm.comX86KvmCPU::handleKvmExitIRQWindowOpen()
137610259SAndrew.Bardsley@arm.com{
137710259SAndrew.Bardsley@arm.com    // We don't need to do anything here since this is caught the next
137810259SAndrew.Bardsley@arm.com    // time we execute kvmRun(). We still overload the exit event to
137910259SAndrew.Bardsley@arm.com    // silence the warning about an unhandled exit event.
138010259SAndrew.Bardsley@arm.com    return 0;
138110259SAndrew.Bardsley@arm.com}
138210259SAndrew.Bardsley@arm.com
138310259SAndrew.Bardsley@arm.combool
138410259SAndrew.Bardsley@arm.comX86KvmCPU::archIsDrained() const
138510259SAndrew.Bardsley@arm.com{
138610259SAndrew.Bardsley@arm.com    struct kvm_vcpu_events events;
138710259SAndrew.Bardsley@arm.com
138810259SAndrew.Bardsley@arm.com    getVCpuEvents(events);
138910259SAndrew.Bardsley@arm.com
139010259SAndrew.Bardsley@arm.com    // We could probably handle this in a by re-inserting interrupts
139110259SAndrew.Bardsley@arm.com    // that are pending into gem5 on a drain. However, that would
139210259SAndrew.Bardsley@arm.com    // probably be tricky to do reliably, so we'll just prevent a
139310259SAndrew.Bardsley@arm.com    // drain if there is anything pending in the
139410259SAndrew.Bardsley@arm.com    // guest. X86KvmCPU::kvmRunDrain() minimizes the amount of code
139511567Smitch.hayenga@arm.com    // executed in the guest by requesting an interrupt window if
139610259SAndrew.Bardsley@arm.com    // there are pending interrupts.
139711567Smitch.hayenga@arm.com    const bool pending_events(events.exception.injected ||
139810259SAndrew.Bardsley@arm.com                              events.interrupt.injected ||
139910259SAndrew.Bardsley@arm.com                              events.nmi.injected || events.nmi.pending);
140010259SAndrew.Bardsley@arm.com
140110259SAndrew.Bardsley@arm.com    if (pending_events) {
140210259SAndrew.Bardsley@arm.com        DPRINTF(Drain, "archIsDrained: Pending events: %s %s %s %s\n",
140310259SAndrew.Bardsley@arm.com                events.exception.injected ? "exception" : "",
140411567Smitch.hayenga@arm.com                events.interrupt.injected ? "interrupt" : "",
140511567Smitch.hayenga@arm.com                events.nmi.injected ? "nmi[i]" : "",
140611567Smitch.hayenga@arm.com                events.nmi.pending ? "nmi[p]" : "");
140710259SAndrew.Bardsley@arm.com    }
140810259SAndrew.Bardsley@arm.com
140911567Smitch.hayenga@arm.com    return !pending_events;
141010259SAndrew.Bardsley@arm.com}
141110259SAndrew.Bardsley@arm.com
141210259SAndrew.Bardsley@arm.comstatic struct kvm_cpuid_entry2
141310259SAndrew.Bardsley@arm.commakeKvmCpuid(uint32_t function, uint32_t index,
141410259SAndrew.Bardsley@arm.com             CpuidResult &result)
141511567Smitch.hayenga@arm.com{
141610259SAndrew.Bardsley@arm.com    struct kvm_cpuid_entry2 e;
141711567Smitch.hayenga@arm.com    e.function = function;
141810259SAndrew.Bardsley@arm.com    e.index = index;
141911567Smitch.hayenga@arm.com    e.flags = 0;
142011567Smitch.hayenga@arm.com    e.eax = (uint32_t)result.rax;
142111567Smitch.hayenga@arm.com    e.ebx = (uint32_t)result.rbx;
142210259SAndrew.Bardsley@arm.com    e.ecx = (uint32_t)result.rcx;
142310259SAndrew.Bardsley@arm.com    e.edx = (uint32_t)result.rdx;
142410259SAndrew.Bardsley@arm.com
142510259SAndrew.Bardsley@arm.com    return e;
142610259SAndrew.Bardsley@arm.com}
142710259SAndrew.Bardsley@arm.com
142811567Smitch.hayenga@arm.comvoid
142911567Smitch.hayenga@arm.comX86KvmCPU::updateCPUID()
143011567Smitch.hayenga@arm.com{
143111567Smitch.hayenga@arm.com    Kvm::CPUIDVector m5_supported;
143211567Smitch.hayenga@arm.com
143311567Smitch.hayenga@arm.com    /* TODO: We currently don't support any of the functions that
143411567Smitch.hayenga@arm.com     * iterate through data structures in the CPU using an index. It's
143511567Smitch.hayenga@arm.com     * currently not a problem since M5 doesn't expose any of them at
143611567Smitch.hayenga@arm.com     * the moment.
143711567Smitch.hayenga@arm.com     */
143811567Smitch.hayenga@arm.com
143911567Smitch.hayenga@arm.com    /* Basic features */
144011567Smitch.hayenga@arm.com    CpuidResult func0;
144111567Smitch.hayenga@arm.com    X86ISA::doCpuid(tc, 0x0, 0, func0);
144211567Smitch.hayenga@arm.com    for (uint32_t function = 0; function <= func0.rax; ++function) {
144311567Smitch.hayenga@arm.com        CpuidResult cpuid;
144411567Smitch.hayenga@arm.com        uint32_t idx(0);
144511567Smitch.hayenga@arm.com
144611567Smitch.hayenga@arm.com        X86ISA::doCpuid(tc, function, idx, cpuid);
144711567Smitch.hayenga@arm.com        m5_supported.push_back(makeKvmCpuid(function, idx, cpuid));
144811567Smitch.hayenga@arm.com    }
144911567Smitch.hayenga@arm.com
145011567Smitch.hayenga@arm.com    /* Extended features */
145111567Smitch.hayenga@arm.com    CpuidResult efunc0;
145211567Smitch.hayenga@arm.com    X86ISA::doCpuid(tc, 0x80000000, 0, efunc0);
145310259SAndrew.Bardsley@arm.com    for (uint32_t function = 0x80000000; function <= efunc0.rax; ++function) {
145411567Smitch.hayenga@arm.com        CpuidResult cpuid;
145511567Smitch.hayenga@arm.com        uint32_t idx(0);
145611567Smitch.hayenga@arm.com
145711567Smitch.hayenga@arm.com        X86ISA::doCpuid(tc, function, idx, cpuid);
145811567Smitch.hayenga@arm.com        m5_supported.push_back(makeKvmCpuid(function, idx, cpuid));
145911567Smitch.hayenga@arm.com    }
146011567Smitch.hayenga@arm.com
146111567Smitch.hayenga@arm.com    setCPUID(m5_supported);
146211567Smitch.hayenga@arm.com}
146311567Smitch.hayenga@arm.com
146411567Smitch.hayenga@arm.comvoid
146511567Smitch.hayenga@arm.comX86KvmCPU::setCPUID(const struct kvm_cpuid2 &cpuid)
146611567Smitch.hayenga@arm.com{
146711567Smitch.hayenga@arm.com    if (ioctl(KVM_SET_CPUID2, (void *)&cpuid) == -1)
146811567Smitch.hayenga@arm.com        panic("KVM: Failed to set guest CPUID2 (errno: %i)\n",
146911567Smitch.hayenga@arm.com              errno);
147011567Smitch.hayenga@arm.com}
147110259SAndrew.Bardsley@arm.com
147210259SAndrew.Bardsley@arm.comvoid
147311567Smitch.hayenga@arm.comX86KvmCPU::setCPUID(const Kvm::CPUIDVector &cpuid)
147411567Smitch.hayenga@arm.com{
147511567Smitch.hayenga@arm.com    std::unique_ptr<struct kvm_cpuid2> kvm_cpuid(
147611567Smitch.hayenga@arm.com        newVarStruct<struct kvm_cpuid2, struct kvm_cpuid_entry2>(cpuid.size()));
147711567Smitch.hayenga@arm.com
147811567Smitch.hayenga@arm.com    kvm_cpuid->nent = cpuid.size();
147911567Smitch.hayenga@arm.com    std::copy(cpuid.begin(), cpuid.end(), kvm_cpuid->entries);
148010259SAndrew.Bardsley@arm.com
148110259SAndrew.Bardsley@arm.com    setCPUID(*kvm_cpuid);
148210259SAndrew.Bardsley@arm.com}
148310259SAndrew.Bardsley@arm.com
148411567Smitch.hayenga@arm.comvoid
148511567Smitch.hayenga@arm.comX86KvmCPU::setMSRs(const struct kvm_msrs &msrs)
148611567Smitch.hayenga@arm.com{
148710259SAndrew.Bardsley@arm.com    if (ioctl(KVM_SET_MSRS, (void *)&msrs) == -1)
148810259SAndrew.Bardsley@arm.com        panic("KVM: Failed to set guest MSRs (errno: %i)\n",
148911567Smitch.hayenga@arm.com              errno);
149011567Smitch.hayenga@arm.com}
149111567Smitch.hayenga@arm.com
149211567Smitch.hayenga@arm.comvoid
149311567Smitch.hayenga@arm.comX86KvmCPU::setMSRs(const KvmMSRVector &msrs)
149411567Smitch.hayenga@arm.com{
149511567Smitch.hayenga@arm.com    std::unique_ptr<struct kvm_msrs> kvm_msrs(
149611567Smitch.hayenga@arm.com        newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(msrs.size()));
149711567Smitch.hayenga@arm.com
149811568Smitch.hayenga@arm.com    kvm_msrs->nmsrs = msrs.size();
149910259SAndrew.Bardsley@arm.com    std::copy(msrs.begin(), msrs.end(), kvm_msrs->entries);
150010259SAndrew.Bardsley@arm.com
150110259SAndrew.Bardsley@arm.com    setMSRs(*kvm_msrs);
150210259SAndrew.Bardsley@arm.com}
150310259SAndrew.Bardsley@arm.com
150410259SAndrew.Bardsley@arm.comvoid
150510259SAndrew.Bardsley@arm.comX86KvmCPU::getMSRs(struct kvm_msrs &msrs) const
150611567Smitch.hayenga@arm.com{
150710259SAndrew.Bardsley@arm.com    if (ioctl(KVM_GET_MSRS, (void *)&msrs) == -1)
150810259SAndrew.Bardsley@arm.com        panic("KVM: Failed to get guest MSRs (errno: %i)\n",
150910259SAndrew.Bardsley@arm.com              errno);
151010259SAndrew.Bardsley@arm.com}
151111567Smitch.hayenga@arm.com
151211567Smitch.hayenga@arm.com
151311567Smitch.hayenga@arm.comvoid
151410259SAndrew.Bardsley@arm.comX86KvmCPU::setMSR(uint32_t index, uint64_t value)
151510259SAndrew.Bardsley@arm.com{
151611567Smitch.hayenga@arm.com    std::unique_ptr<struct kvm_msrs> kvm_msrs(
151711567Smitch.hayenga@arm.com        newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1));
151811567Smitch.hayenga@arm.com    struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
151911567Smitch.hayenga@arm.com
152011567Smitch.hayenga@arm.com    kvm_msrs->nmsrs = 1;
152111567Smitch.hayenga@arm.com    entry.index = index;
152211567Smitch.hayenga@arm.com    entry.reserved = 0;
152311567Smitch.hayenga@arm.com    entry.data = value;
152411567Smitch.hayenga@arm.com
152511567Smitch.hayenga@arm.com    setMSRs(*kvm_msrs.get());
152611567Smitch.hayenga@arm.com}
152711567Smitch.hayenga@arm.com
152810259SAndrew.Bardsley@arm.comuint64_t
152910259SAndrew.Bardsley@arm.comX86KvmCPU::getMSR(uint32_t index) const
153010259SAndrew.Bardsley@arm.com{
153110259SAndrew.Bardsley@arm.com    std::unique_ptr<struct kvm_msrs> kvm_msrs(
153210259SAndrew.Bardsley@arm.com        newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(1));
153310259SAndrew.Bardsley@arm.com    struct kvm_msr_entry &entry(kvm_msrs->entries[0]);
153411567Smitch.hayenga@arm.com
153511567Smitch.hayenga@arm.com    kvm_msrs->nmsrs = 1;
153611567Smitch.hayenga@arm.com    entry.index = index;
153710259SAndrew.Bardsley@arm.com    entry.reserved = 0;
153811567Smitch.hayenga@arm.com    entry.data = 0;
153910259SAndrew.Bardsley@arm.com
154011567Smitch.hayenga@arm.com    getMSRs(*kvm_msrs.get());
154111567Smitch.hayenga@arm.com    return entry.data;
154211567Smitch.hayenga@arm.com}
154311567Smitch.hayenga@arm.com
154411567Smitch.hayenga@arm.comconst Kvm::MSRIndexVector &
154511567Smitch.hayenga@arm.comX86KvmCPU::getMsrIntersection() const
154611567Smitch.hayenga@arm.com{
154711567Smitch.hayenga@arm.com    if (cachedMsrIntersection.empty()) {
154811567Smitch.hayenga@arm.com        const Kvm::MSRIndexVector &kvm_msrs(vm.kvm->getSupportedMSRs());
154910259SAndrew.Bardsley@arm.com
155010259SAndrew.Bardsley@arm.com        DPRINTF(Kvm, "kvm-x86: Updating MSR intersection\n");
155110259SAndrew.Bardsley@arm.com        for (auto it = kvm_msrs.cbegin(); it != kvm_msrs.cend(); ++it) {
155210259SAndrew.Bardsley@arm.com            if (X86ISA::msrMap.find(*it) != X86ISA::msrMap.end()) {
155310259SAndrew.Bardsley@arm.com                cachedMsrIntersection.push_back(*it);
155410259SAndrew.Bardsley@arm.com                DPRINTF(Kvm, "kvm-x86: Adding MSR 0x%x\n", *it);
155511567Smitch.hayenga@arm.com            } else {
155610259SAndrew.Bardsley@arm.com                warn("kvm-x86: MSR (0x%x) unsupported by gem5. Skipping.\n",
155710259SAndrew.Bardsley@arm.com                     *it);
155810259SAndrew.Bardsley@arm.com            }
155910259SAndrew.Bardsley@arm.com        }
156010259SAndrew.Bardsley@arm.com    }
156110259SAndrew.Bardsley@arm.com
156210259SAndrew.Bardsley@arm.com    return cachedMsrIntersection;
156310259SAndrew.Bardsley@arm.com}
156410259SAndrew.Bardsley@arm.com
156510259SAndrew.Bardsley@arm.comvoid
156610259SAndrew.Bardsley@arm.comX86KvmCPU::getDebugRegisters(struct kvm_debugregs &regs) const
156710259SAndrew.Bardsley@arm.com{
156810259SAndrew.Bardsley@arm.com#ifdef KVM_GET_DEBUGREGS
156910259SAndrew.Bardsley@arm.com    if (ioctl(KVM_GET_DEBUGREGS, &regs) == -1)
157010259SAndrew.Bardsley@arm.com        panic("KVM: Failed to get guest debug registers\n");
157110259SAndrew.Bardsley@arm.com#else
157210259SAndrew.Bardsley@arm.com    panic("KVM: Unsupported getDebugRegisters call.\n");
157310259SAndrew.Bardsley@arm.com#endif
157410259SAndrew.Bardsley@arm.com}
157510259SAndrew.Bardsley@arm.com
157610259SAndrew.Bardsley@arm.comvoid
157710259SAndrew.Bardsley@arm.comX86KvmCPU::setDebugRegisters(const struct kvm_debugregs &regs)
157810259SAndrew.Bardsley@arm.com{
157910259SAndrew.Bardsley@arm.com#ifdef KVM_SET_DEBUGREGS
158010259SAndrew.Bardsley@arm.com    if (ioctl(KVM_SET_DEBUGREGS, (void *)&regs) == -1)
158110259SAndrew.Bardsley@arm.com        panic("KVM: Failed to set guest debug registers\n");
158210259SAndrew.Bardsley@arm.com#else
158311567Smitch.hayenga@arm.com    panic("KVM: Unsupported setDebugRegisters call.\n");
158411567Smitch.hayenga@arm.com#endif
158510259SAndrew.Bardsley@arm.com}
158610259SAndrew.Bardsley@arm.com
158711567Smitch.hayenga@arm.comvoid
158811567Smitch.hayenga@arm.comX86KvmCPU::getXCRs(struct kvm_xcrs &regs) const
158910259SAndrew.Bardsley@arm.com{
159011567Smitch.hayenga@arm.com    if (ioctl(KVM_GET_XCRS, &regs) == -1)
159111567Smitch.hayenga@arm.com        panic("KVM: Failed to get guest debug registers\n");
159211567Smitch.hayenga@arm.com}
159311567Smitch.hayenga@arm.com
159411567Smitch.hayenga@arm.comvoid
159511567Smitch.hayenga@arm.comX86KvmCPU::setXCRs(const struct kvm_xcrs &regs)
159610259SAndrew.Bardsley@arm.com{
159711567Smitch.hayenga@arm.com    if (ioctl(KVM_SET_XCRS, (void *)&regs) == -1)
159811567Smitch.hayenga@arm.com        panic("KVM: Failed to set guest debug registers\n");
159911567Smitch.hayenga@arm.com}
160011567Smitch.hayenga@arm.com
160111567Smitch.hayenga@arm.comvoid
160211567Smitch.hayenga@arm.comX86KvmCPU::getXSave(struct kvm_xsave &xsave) const
160311567Smitch.hayenga@arm.com{
160411567Smitch.hayenga@arm.com    if (ioctl(KVM_GET_XSAVE, &xsave) == -1)
160511567Smitch.hayenga@arm.com        panic("KVM: Failed to get guest debug registers\n");
160611567Smitch.hayenga@arm.com}
160711567Smitch.hayenga@arm.com
160811567Smitch.hayenga@arm.comvoid
160911567Smitch.hayenga@arm.comX86KvmCPU::setXSave(const struct kvm_xsave &xsave)
161011567Smitch.hayenga@arm.com{
161111567Smitch.hayenga@arm.com    if (ioctl(KVM_SET_XSAVE, (void *)&xsave) == -1)
161211567Smitch.hayenga@arm.com        panic("KVM: Failed to set guest debug registers\n");
161311567Smitch.hayenga@arm.com}
161411567Smitch.hayenga@arm.com
161511567Smitch.hayenga@arm.com
161611567Smitch.hayenga@arm.comvoid
161711567Smitch.hayenga@arm.comX86KvmCPU::getVCpuEvents(struct kvm_vcpu_events &events) const
161810259SAndrew.Bardsley@arm.com{
161911567Smitch.hayenga@arm.com    if (ioctl(KVM_GET_VCPU_EVENTS, &events) == -1)
162011567Smitch.hayenga@arm.com        panic("KVM: Failed to get guest debug registers\n");
162110259SAndrew.Bardsley@arm.com}
162211567Smitch.hayenga@arm.com
162311567Smitch.hayenga@arm.comvoid
162411567Smitch.hayenga@arm.comX86KvmCPU::setVCpuEvents(const struct kvm_vcpu_events &events)
162511567Smitch.hayenga@arm.com{
162611567Smitch.hayenga@arm.com    if (ioctl(KVM_SET_VCPU_EVENTS, (void *)&events) == -1)
162711567Smitch.hayenga@arm.com        panic("KVM: Failed to set guest debug registers\n");
162810259SAndrew.Bardsley@arm.com}
162910259SAndrew.Bardsley@arm.com
163011567Smitch.hayenga@arm.comX86KvmCPU *
163110259SAndrew.Bardsley@arm.comX86KvmCPUParams::create()
163210259SAndrew.Bardsley@arm.com{
163310259SAndrew.Bardsley@arm.com    return new X86KvmCPU(this);
163410259SAndrew.Bardsley@arm.com}
163510259SAndrew.Bardsley@arm.com