1/*
2 * Copyright (c) 2015, 2017 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andreas Sandberg
38 */
39
40#include "arch/arm/kvm/armv8_cpu.hh"
41
42#include <linux/kvm.h>
43
44#include "debug/KvmContext.hh"
45#include "params/ArmV8KvmCPU.hh"
46
47// Unlike gem5, kvm doesn't count the SP as a normal integer register,
48// which means we only have 31 normal integer registers.
49constexpr static unsigned NUM_XREGS = NUM_ARCH_INTREGS - 1;
50static_assert(NUM_XREGS == 31, "Unexpected number of aarch64 int. regs.");
51
52// The KVM interface accesses vector registers of 4 single precision
53// floats instead of individual registers.
54constexpr static unsigned NUM_QREGS = NumFloatV8ArchRegs / 4;
55static_assert(NUM_QREGS == 32, "Unexpected number of aarch64 vector regs.");
56
57#define EXTRACT_FIELD(v, name) \
58    (((v) & name ## _MASK) >> name ## _SHIFT)
59
60#define CORE_REG(name, size)                               \
61    (KVM_REG_ARM64 | KVM_REG_ARM_CORE |                    \
62     KVM_REG_SIZE_ ## size |                               \
63     KVM_REG_ARM_CORE_REG(name))
64
65#define INT_REG(name) CORE_REG(name, U64)
66#define SIMD_REG(name) CORE_REG(name, U128)
67
68#define SYS_MPIDR_EL1 ARM64_SYS_REG(0b11, 0b000, 0b0000, 0b0000, 0b101)
69
70constexpr uint64_t
71kvmXReg(const int num)
72{
73    return INT_REG(regs.regs[0]) +
74        (INT_REG(regs.regs[1]) - INT_REG(regs.regs[0])) * num;
75}
76
77constexpr uint64_t
78kvmFPReg(const int num)
79{
80    return SIMD_REG(fp_regs.vregs[0]) +
81        (SIMD_REG(fp_regs.vregs[1]) - SIMD_REG(fp_regs.vregs[0])) * num;
82}
83
84union KvmFPReg {
85    union {
86        uint32_t i;
87        float f;
88    } s[4];
89
90    union {
91        uint64_t i;
92        double f;
93    } d[2];
94
95    uint8_t data[32];
96};
97
98#define FP_REGS_PER_VFP_REG 4
99
100const std::vector<ArmV8KvmCPU::IntRegInfo> ArmV8KvmCPU::intRegMap = {
101    { INT_REG(regs.sp), INTREG_SP0, "SP(EL0)" },
102    { INT_REG(sp_el1), INTREG_SP1, "SP(EL1)" },
103};
104
105const std::vector<ArmV8KvmCPU::MiscRegInfo> ArmV8KvmCPU::miscRegMap = {
106    MiscRegInfo(INT_REG(elr_el1), MISCREG_ELR_EL1, "ELR(EL1)"),
107    MiscRegInfo(INT_REG(spsr[KVM_SPSR_EL1]), MISCREG_SPSR_EL1, "SPSR(EL1)"),
108    MiscRegInfo(INT_REG(spsr[KVM_SPSR_ABT]), MISCREG_SPSR_ABT, "SPSR(ABT)"),
109    MiscRegInfo(INT_REG(spsr[KVM_SPSR_UND]), MISCREG_SPSR_UND, "SPSR(UND)"),
110    MiscRegInfo(INT_REG(spsr[KVM_SPSR_IRQ]), MISCREG_SPSR_IRQ, "SPSR(IRQ)"),
111    MiscRegInfo(INT_REG(spsr[KVM_SPSR_FIQ]), MISCREG_SPSR_FIQ, "SPSR(FIQ)"),
112    MiscRegInfo(INT_REG(fp_regs.fpsr), MISCREG_FPSR, "FPSR"),
113    MiscRegInfo(INT_REG(fp_regs.fpcr), MISCREG_FPCR, "FPCR"),
114};
115
116const std::set<MiscRegIndex> ArmV8KvmCPU::deviceRegSet = {
117    MISCREG_CNTV_CTL_EL0,
118    MISCREG_CNTV_CVAL_EL0,
119    MISCREG_CNTKCTL_EL1,
120};
121
122const std::vector<ArmV8KvmCPU::MiscRegInfo> ArmV8KvmCPU::miscRegIdMap = {
123    MiscRegInfo(SYS_MPIDR_EL1, MISCREG_MPIDR_EL1, "MPIDR(EL1)"),
124};
125
126ArmV8KvmCPU::ArmV8KvmCPU(ArmV8KvmCPUParams *params)
127    : BaseArmKvmCPU(params)
128{
129}
130
131ArmV8KvmCPU::~ArmV8KvmCPU()
132{
133}
134
135void
136ArmV8KvmCPU::startup()
137{
138    BaseArmKvmCPU::startup();
139
140    // Override ID registers that KVM should "inherit" from gem5.
141    for (const auto &ri : miscRegIdMap) {
142        const uint64_t value(tc->readMiscReg(ri.idx));
143        DPRINTF(KvmContext, "  %s := 0x%x\n", ri.name, value);
144        setOneReg(ri.kvm, value);
145    }
146}
147
148void
149ArmV8KvmCPU::dump() const
150{
151    inform("Integer registers:\n");
152    inform("  PC: %s\n", getAndFormatOneReg(INT_REG(regs.pc)));
153    for (int i = 0; i < NUM_XREGS; ++i)
154        inform("  X%i: %s\n", i, getAndFormatOneReg(kvmXReg(i)));
155
156    for (int i = 0; i < NUM_QREGS; ++i)
157        inform("  Q%i: %s\n", i, getAndFormatOneReg(kvmFPReg(i)));
158
159    for (const auto &ri : intRegMap)
160        inform("  %s: %s\n", ri.name, getAndFormatOneReg(ri.kvm));
161
162    inform("  %s: %s\n", "PSTATE", getAndFormatOneReg(INT_REG(regs.pstate)));
163
164    for (const auto &ri : miscRegMap)
165        inform("  %s: %s\n", ri.name, getAndFormatOneReg(ri.kvm));
166
167    for (const auto &ri : miscRegIdMap)
168        inform("  %s: %s\n", ri.name, getAndFormatOneReg(ri.kvm));
169
170    for (const auto &reg : getRegList()) {
171        const uint64_t arch(reg & KVM_REG_ARCH_MASK);
172        if (arch != KVM_REG_ARM64) {
173            inform("0x%x: %s\n", reg, getAndFormatOneReg(reg));
174            continue;
175        }
176
177        const uint64_t type(reg & KVM_REG_ARM_COPROC_MASK);
178        switch (type) {
179          case KVM_REG_ARM_CORE:
180            // These have already been printed
181            break;
182
183          case KVM_REG_ARM64_SYSREG: {
184              const uint64_t op0(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_OP0));
185              const uint64_t op1(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_OP1));
186              const uint64_t crn(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_CRN));
187              const uint64_t crm(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_CRM));
188              const uint64_t op2(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_OP2));
189              const MiscRegIndex idx(
190                  decodeAArch64SysReg(op0, op1, crn, crm, op2));
191
192              inform("  %s (op0: %i, op1: %i, crn: %i, crm: %i, op2: %i): %s",
193                     miscRegName[idx], op0, op1, crn, crm, op2,
194                     getAndFormatOneReg(reg));
195          } break;
196
197          case KVM_REG_ARM_DEMUX: {
198              const uint64_t id(EXTRACT_FIELD(reg, KVM_REG_ARM_DEMUX_ID));
199              const uint64_t val(EXTRACT_FIELD(reg, KVM_REG_ARM_DEMUX_VAL));
200              if (id == KVM_REG_ARM_DEMUX_ID_CCSIDR) {
201                  inform("  CSSIDR[%i]: %s\n", val,
202                         getAndFormatOneReg(reg));
203              } else {
204                  inform("  UNKNOWN[%i:%i]: %s\n", id, val,
205                         getAndFormatOneReg(reg));
206              }
207          } break;
208
209          default:
210            inform("0x%x: %s\n", reg, getAndFormatOneReg(reg));
211        }
212    }
213}
214
215void
216ArmV8KvmCPU::updateKvmState()
217{
218    DPRINTF(KvmContext, "In updateKvmState():\n");
219
220    // update pstate register state
221    CPSR cpsr(tc->readMiscReg(MISCREG_CPSR));
222    cpsr.nz = tc->readCCReg(CCREG_NZ);
223    cpsr.c = tc->readCCReg(CCREG_C);
224    cpsr.v = tc->readCCReg(CCREG_V);
225    if (cpsr.width) {
226        cpsr.ge = tc->readCCReg(CCREG_GE);
227    } else {
228        cpsr.ge = 0;
229    }
230    DPRINTF(KvmContext, "  %s := 0x%x\n", "PSTATE", cpsr);
231    setOneReg(INT_REG(regs.pstate), static_cast<uint64_t>(cpsr));
232
233    for (const auto &ri : miscRegMap) {
234        const uint64_t value(tc->readMiscReg(ri.idx));
235        DPRINTF(KvmContext, "  %s := 0x%x\n", ri.name, value);
236        setOneReg(ri.kvm, value);
237    }
238
239    for (int i = 0; i < NUM_XREGS; ++i) {
240        const uint64_t value(tc->readIntReg(INTREG_X0 + i));
241        DPRINTF(KvmContext, "  X%i := 0x%x\n", i, value);
242        setOneReg(kvmXReg(i), value);
243    }
244
245    for (const auto &ri : intRegMap) {
246        const uint64_t value(tc->readIntReg(ri.idx));
247        DPRINTF(KvmContext, "  %s := 0x%x\n", ri.name, value);
248        setOneReg(ri.kvm, value);
249    }
250
251    for (int i = 0; i < NUM_QREGS; ++i) {
252        const RegIndex reg_base(i * FP_REGS_PER_VFP_REG);
253        KvmFPReg reg;
254        for (int j = 0; j < FP_REGS_PER_VFP_REG; j++)
255            reg.s[j].i = tc->readFloatReg(reg_base + j);
256
257        setOneReg(kvmFPReg(i), reg.data);
258        DPRINTF(KvmContext, "  Q%i: %s\n", i, getAndFormatOneReg(kvmFPReg(i)));
259    }
260
261    for (const auto &ri : getSysRegMap()) {
262        uint64_t value;
263        if (ri.is_device) {
264            // This system register is backed by a device. This means
265            // we need to lock the device event queue.
266            EventQueue::ScopedMigration migrate(deviceEventQueue());
267
268            value = tc->readMiscReg(ri.idx);
269        } else {
270            value = tc->readMiscReg(ri.idx);
271        }
272
273        DPRINTF(KvmContext, "  %s := 0x%x\n", ri.name, value);
274        setOneReg(ri.kvm, value);
275    }
276
277    setOneReg(INT_REG(regs.pc), tc->instAddr());
278    DPRINTF(KvmContext, "  PC := 0x%x\n", tc->instAddr());
279}
280
281void
282ArmV8KvmCPU::updateThreadContext()
283{
284    DPRINTF(KvmContext, "In updateThreadContext():\n");
285
286    // Update pstate thread context
287    const CPSR cpsr(getOneRegU64(INT_REG(regs.pstate)));
288    DPRINTF(KvmContext, "  %s := 0x%x\n", "PSTATE", cpsr);
289    tc->setMiscRegNoEffect(MISCREG_CPSR, cpsr);
290    tc->setCCReg(CCREG_NZ, cpsr.nz);
291    tc->setCCReg(CCREG_C, cpsr.c);
292    tc->setCCReg(CCREG_V, cpsr.v);
293    if (cpsr.width) {
294        tc->setCCReg(CCREG_GE, cpsr.ge);
295    }
296
297    // Update core misc regs first as they
298    // affect how other registers are mapped.
299    for (const auto &ri : miscRegMap) {
300        const auto value(getOneRegU64(ri.kvm));
301        DPRINTF(KvmContext, "  %s := 0x%x\n", ri.name, value);
302        tc->setMiscRegNoEffect(ri.idx, value);
303    }
304
305    for (int i = 0; i < NUM_XREGS; ++i) {
306        const auto value(getOneRegU64(kvmXReg(i)));
307        DPRINTF(KvmContext, "  X%i := 0x%x\n", i, value);
308        // KVM64 returns registers in 64-bit layout. If we are in aarch32
309        // mode, we need to map these to banked ARM32 registers.
310        if (inAArch64(tc)) {
311            tc->setIntReg(INTREG_X0 + i, value);
312        } else {
313            tc->setIntRegFlat(IntReg64Map[INTREG_X0 + i], value);
314        }
315    }
316
317    for (const auto &ri : intRegMap) {
318        const auto value(getOneRegU64(ri.kvm));
319        DPRINTF(KvmContext, "  %s := 0x%x\n", ri.name, value);
320        tc->setIntReg(ri.idx, value);
321    }
322
323    for (int i = 0; i < NUM_QREGS; ++i) {
324        const RegIndex reg_base(i * FP_REGS_PER_VFP_REG);
325        KvmFPReg reg;
326        DPRINTF(KvmContext, "  Q%i: %s\n", i, getAndFormatOneReg(kvmFPReg(i)));
327        getOneReg(kvmFPReg(i), reg.data);
328        for (int j = 0; j < FP_REGS_PER_VFP_REG; j++)
329            tc->setFloatReg(reg_base + j, reg.s[j].i);
330    }
331
332    for (const auto &ri : getSysRegMap()) {
333        const auto value(getOneRegU64(ri.kvm));
334        DPRINTF(KvmContext, "  %s := 0x%x\n", ri.name, value);
335        if (ri.is_device) {
336            // This system register is backed by a device. This means
337            // we need to lock the device event queue.
338            EventQueue::ScopedMigration migrate(deviceEventQueue());
339
340            tc->setMiscReg(ri.idx, value);
341        } else {
342            tc->setMiscRegNoEffect(ri.idx, value);
343        }
344    }
345
346    PCState pc(getOneRegU64(INT_REG(regs.pc)));
347    pc.aarch64(inAArch64(tc));
348    pc.thumb(cpsr.t);
349    pc.nextAArch64(inAArch64(tc));
350    // TODO: This is a massive assumption that will break when
351    // switching to thumb.
352    pc.nextThumb(cpsr.t);
353    DPRINTF(KvmContext, "  PC := 0x%x (t: %i, a64: %i)\n",
354            pc.instAddr(), pc.thumb(), pc.aarch64());
355    tc->pcState(pc);
356}
357
358const std::vector<ArmV8KvmCPU::MiscRegInfo> &
359ArmV8KvmCPU::getSysRegMap() const
360{
361    // Try to use the cached map
362    if (!sysRegMap.empty())
363        return sysRegMap;
364
365    for (const auto &reg : getRegList()) {
366        const uint64_t arch(reg & KVM_REG_ARCH_MASK);
367        if (arch != KVM_REG_ARM64)
368            continue;
369
370        const uint64_t type(reg & KVM_REG_ARM_COPROC_MASK);
371        if (type != KVM_REG_ARM64_SYSREG)
372            continue;
373
374        const uint64_t op0(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_OP0));
375        const uint64_t op1(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_OP1));
376        const uint64_t crn(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_CRN));
377        const uint64_t crm(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_CRM));
378        const uint64_t op2(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_OP2));
379        const MiscRegIndex idx(decodeAArch64SysReg(op0, op1, crn, crm, op2));
380        const auto &info(miscRegInfo[idx]);
381        const bool writeable(
382            info[MISCREG_USR_NS_WR] || info[MISCREG_USR_S_WR] ||
383            info[MISCREG_PRI_S_WR] || info[MISCREG_PRI_NS_WR] ||
384            info[MISCREG_HYP_WR] ||
385            info[MISCREG_MON_NS0_WR] || info[MISCREG_MON_NS1_WR]);
386        const bool implemented(
387            info[MISCREG_IMPLEMENTED] || info[MISCREG_WARN_NOT_FAIL]);
388
389        // Only add implemented registers that we are going to be able
390        // to write.
391        if (implemented && writeable)
392            sysRegMap.emplace_back(reg, idx, miscRegName[idx],
393                deviceRegSet.find(idx) != deviceRegSet.end());
394    }
395
396    return sysRegMap;
397}
398
399ArmV8KvmCPU *
400ArmV8KvmCPUParams::create()
401{
402    return new ArmV8KvmCPU(this);
403}
404