armv8_cpu.cc revision 11178
1/* 2 * Copyright (c) 2015 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions are 16 * met: redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer; 18 * redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution; 21 * neither the name of the copyright holders nor the names of its 22 * contributors may be used to endorse or promote products derived from 23 * this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Authors: Andreas Sandberg 38 */ 39 40#include "arch/arm/kvm/armv8_cpu.hh" 41 42#include <linux/kvm.h> 43 44#include "debug/KvmContext.hh" 45#include "params/ArmV8KvmCPU.hh" 46 47// Unlike gem5, kvm doesn't count the SP as a normal integer register, 48// which means we only have 31 normal integer registers. 49constexpr static unsigned NUM_XREGS = NUM_ARCH_INTREGS - 1; 50static_assert(NUM_XREGS == 31, "Unexpected number of aarch64 int. regs."); 51 52// The KVM interface accesses vector registers of 4 single precision 53// floats instead of individual registers. 54constexpr static unsigned NUM_QREGS = NumFloatV8ArchRegs / 4; 55static_assert(NUM_QREGS == 32, "Unexpected number of aarch64 vector regs."); 56 57#define EXTRACT_FIELD(v, name) \ 58 (((v) & name ## _MASK) >> name ## _SHIFT) 59 60#define CORE_REG(name, size) \ 61 (KVM_REG_ARM64 | KVM_REG_ARM_CORE | \ 62 KVM_REG_SIZE_ ## size | \ 63 KVM_REG_ARM_CORE_REG(name)) 64 65#define INT_REG(name) CORE_REG(name, U64) 66#define SIMD_REG(name) CORE_REG(name, U128) 67 68constexpr uint64_t 69kvmXReg(const int num) 70{ 71 return INT_REG(regs.regs[0]) + 72 (INT_REG(regs.regs[1]) - INT_REG(regs.regs[0])) * num; 73} 74 75constexpr uint64_t 76kvmFPReg(const int num) 77{ 78 return SIMD_REG(fp_regs.vregs[0]) + 79 (SIMD_REG(fp_regs.vregs[1]) - SIMD_REG(fp_regs.vregs[0])) * num; 80} 81 82union KvmFPReg { 83 union { 84 uint32_t i; 85 float f; 86 } s[4]; 87 88 union { 89 uint64_t i; 90 double f; 91 } d[2]; 92 93 uint8_t data[32]; 94}; 95 96#define FP_REGS_PER_VFP_REG 4 97static_assert(sizeof(FloatRegBits) == 4, "Unexpected float reg size"); 98 99const std::vector<ArmV8KvmCPU::IntRegInfo> ArmV8KvmCPU::intRegMap = { 100 { INT_REG(regs.sp), INTREG_SP0, "SP(EL0)" }, 101 { INT_REG(sp_el1), INTREG_SP1, "SP(EL1)" }, 102}; 103 104const std::vector<ArmV8KvmCPU::MiscRegInfo> ArmV8KvmCPU::miscRegMap = { 105 MiscRegInfo(INT_REG(regs.pstate), MISCREG_CPSR, "PSTATE"), 106 MiscRegInfo(INT_REG(elr_el1), MISCREG_ELR_EL1, "ELR(EL1)"), 107 MiscRegInfo(INT_REG(spsr[KVM_SPSR_EL1]), MISCREG_SPSR_EL1, "SPSR(EL1)"), 108 MiscRegInfo(INT_REG(spsr[KVM_SPSR_ABT]), MISCREG_SPSR_ABT, "SPSR(ABT)"), 109 MiscRegInfo(INT_REG(spsr[KVM_SPSR_UND]), MISCREG_SPSR_UND, "SPSR(UND)"), 110 MiscRegInfo(INT_REG(spsr[KVM_SPSR_IRQ]), MISCREG_SPSR_IRQ, "SPSR(IRQ)"), 111 MiscRegInfo(INT_REG(spsr[KVM_SPSR_FIQ]), MISCREG_SPSR_FIQ, "SPSR(FIQ)"), 112 MiscRegInfo(INT_REG(fp_regs.fpsr), MISCREG_FPSR, "FPSR"), 113 MiscRegInfo(INT_REG(fp_regs.fpcr), MISCREG_FPCR, "FPCR"), 114}; 115 116ArmV8KvmCPU::ArmV8KvmCPU(ArmV8KvmCPUParams *params) 117 : BaseArmKvmCPU(params) 118{ 119} 120 121ArmV8KvmCPU::~ArmV8KvmCPU() 122{ 123} 124 125void 126ArmV8KvmCPU::dump() const 127{ 128 inform("Integer registers:\n"); 129 inform(" PC: %s\n", getAndFormatOneReg(INT_REG(regs.pc))); 130 for (int i = 0; i < NUM_XREGS; ++i) 131 inform(" X%i: %s\n", i, getAndFormatOneReg(kvmXReg(i))); 132 133 for (int i = 0; i < NUM_QREGS; ++i) 134 inform(" Q%i: %s\n", i, getAndFormatOneReg(kvmFPReg(i))); 135 136 for (const auto &ri : intRegMap) 137 inform(" %s: %s\n", ri.name, getAndFormatOneReg(ri.kvm)); 138 139 for (const auto &ri : miscRegMap) 140 inform(" %s: %s\n", ri.name, getAndFormatOneReg(ri.kvm)); 141 142 for (const auto ® : getRegList()) { 143 const uint64_t arch(reg & KVM_REG_ARCH_MASK); 144 if (arch != KVM_REG_ARM64) { 145 inform("0x%x: %s\n", reg, getAndFormatOneReg(reg)); 146 continue; 147 } 148 149 const uint64_t type(reg & KVM_REG_ARM_COPROC_MASK); 150 switch (type) { 151 case KVM_REG_ARM_CORE: 152 // These have already been printed 153 break; 154 155 case KVM_REG_ARM64_SYSREG: { 156 const uint64_t op0(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_OP0)); 157 const uint64_t op1(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_OP1)); 158 const uint64_t crn(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_CRN)); 159 const uint64_t crm(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_CRM)); 160 const uint64_t op2(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_OP2)); 161 const MiscRegIndex idx( 162 decodeAArch64SysReg(op0, op1, crn, crm, op2)); 163 164 inform(" %s (op0: %i, op1: %i, crn: %i, crm: %i, op2: %i): %s", 165 miscRegName[idx], op0, op1, crn, crm, op2, 166 getAndFormatOneReg(reg)); 167 } break; 168 169 case KVM_REG_ARM_DEMUX: { 170 const uint64_t id(EXTRACT_FIELD(reg, KVM_REG_ARM_DEMUX_ID)); 171 const uint64_t val(EXTRACT_FIELD(reg, KVM_REG_ARM_DEMUX_VAL)); 172 if (id == KVM_REG_ARM_DEMUX_ID_CCSIDR) { 173 inform(" CSSIDR[%i]: %s\n", val, 174 getAndFormatOneReg(reg)); 175 } else { 176 inform(" UNKNOWN[%i:%i]: %s\n", id, val, 177 getAndFormatOneReg(reg)); 178 } 179 } break; 180 181 default: 182 inform("0x%x: %s\n", reg, getAndFormatOneReg(reg)); 183 } 184 } 185} 186 187void 188ArmV8KvmCPU::updateKvmState() 189{ 190 DPRINTF(KvmContext, "In updateKvmState():\n"); 191 for (const auto &ri : miscRegMap) { 192 const uint64_t value(tc->readMiscReg(ri.idx)); 193 DPRINTF(KvmContext, " %s := 0x%x\n", ri.name, value); 194 setOneReg(ri.kvm, value); 195 } 196 197 for (int i = 0; i < NUM_XREGS; ++i) { 198 const uint64_t value(tc->readIntReg(INTREG_X0 + i)); 199 DPRINTF(KvmContext, " X%i := 0x%x\n", i, value); 200 setOneReg(kvmXReg(i), value); 201 } 202 203 for (const auto &ri : intRegMap) { 204 const uint64_t value(tc->readIntReg(ri.idx)); 205 DPRINTF(KvmContext, " %s := 0x%x\n", ri.name, value); 206 setOneReg(ri.kvm, value); 207 } 208 209 for (int i = 0; i < NUM_QREGS; ++i) { 210 const RegIndex reg_base(i * FP_REGS_PER_VFP_REG); 211 KvmFPReg reg; 212 for (int j = 0; j < FP_REGS_PER_VFP_REG; j++) 213 reg.s[j].i = tc->readFloatRegBits(reg_base + j); 214 215 setOneReg(kvmFPReg(i), reg.data); 216 DPRINTF(KvmContext, " Q%i: %s\n", i, getAndFormatOneReg(kvmFPReg(i))); 217 } 218 219 for (const auto &ri : getSysRegMap()) { 220 const uint64_t value(tc->readMiscReg(ri.idx)); 221 DPRINTF(KvmContext, " %s := 0x%x\n", ri.name, value); 222 setOneReg(ri.kvm, value); 223 } 224 225 setOneReg(INT_REG(regs.pc), tc->instAddr()); 226 DPRINTF(KvmContext, " PC := 0x%x\n", tc->instAddr()); 227} 228 229void 230ArmV8KvmCPU::updateThreadContext() 231{ 232 DPRINTF(KvmContext, "In updateThreadContext():\n"); 233 234 // Update core misc regs first as they (particularly PSTATE/CPSR) 235 // affect how other registers are mapped. 236 for (const auto &ri : miscRegMap) { 237 const auto value(getOneRegU64(ri.kvm)); 238 DPRINTF(KvmContext, " %s := 0x%x\n", ri.name, value); 239 tc->setMiscRegNoEffect(ri.idx, value); 240 } 241 242 for (int i = 0; i < NUM_XREGS; ++i) { 243 const auto value(getOneRegU64(kvmXReg(i))); 244 DPRINTF(KvmContext, " X%i := 0x%x\n", i, value); 245 tc->setIntReg(INTREG_X0 + i, value); 246 } 247 248 for (const auto &ri : intRegMap) { 249 const auto value(getOneRegU64(ri.kvm)); 250 DPRINTF(KvmContext, " %s := 0x%x\n", ri.name, value); 251 tc->setIntReg(ri.idx, value); 252 } 253 254 for (int i = 0; i < NUM_QREGS; ++i) { 255 const RegIndex reg_base(i * FP_REGS_PER_VFP_REG); 256 KvmFPReg reg; 257 DPRINTF(KvmContext, " Q%i: %s\n", i, getAndFormatOneReg(kvmFPReg(i))); 258 getOneReg(kvmFPReg(i), reg.data); 259 for (int j = 0; j < FP_REGS_PER_VFP_REG; j++) 260 tc->setFloatRegBits(reg_base + j, reg.s[j].i); 261 } 262 263 for (const auto &ri : getSysRegMap()) { 264 const auto value(getOneRegU64(ri.kvm)); 265 DPRINTF(KvmContext, " %s := 0x%x\n", ri.name, value); 266 tc->setMiscRegNoEffect(ri.idx, value); 267 } 268 269 const CPSR cpsr(tc->readMiscRegNoEffect(MISCREG_CPSR)); 270 PCState pc(getOneRegU64(INT_REG(regs.pc))); 271 pc.aarch64(inAArch64(tc)); 272 pc.thumb(cpsr.t); 273 pc.nextAArch64(inAArch64(tc)); 274 // TODO: This is a massive assumption that will break when 275 // switching to thumb. 276 pc.nextThumb(cpsr.t); 277 DPRINTF(KvmContext, " PC := 0x%x (t: %i, a64: %i)\n", 278 pc.instAddr(), pc.thumb(), pc.aarch64()); 279 tc->pcState(pc); 280} 281 282const std::vector<ArmV8KvmCPU::MiscRegInfo> & 283ArmV8KvmCPU::getSysRegMap() const 284{ 285 // Try to use the cached map 286 if (!sysRegMap.empty()) 287 return sysRegMap; 288 289 for (const auto ® : getRegList()) { 290 const uint64_t arch(reg & KVM_REG_ARCH_MASK); 291 if (arch != KVM_REG_ARM64) 292 continue; 293 294 const uint64_t type(reg & KVM_REG_ARM_COPROC_MASK); 295 if (type != KVM_REG_ARM64_SYSREG) 296 continue; 297 298 const uint64_t op0(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_OP0)); 299 const uint64_t op1(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_OP1)); 300 const uint64_t crn(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_CRN)); 301 const uint64_t crm(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_CRM)); 302 const uint64_t op2(EXTRACT_FIELD(reg, KVM_REG_ARM64_SYSREG_OP2)); 303 const MiscRegIndex idx(decodeAArch64SysReg(op0, op1, crn, crm, op2)); 304 const auto &info(miscRegInfo[idx]); 305 const bool writeable( 306 info[MISCREG_USR_NS_WR] || info[MISCREG_USR_S_WR] || 307 info[MISCREG_PRI_S_WR] || info[MISCREG_PRI_NS_WR] || 308 info[MISCREG_HYP_WR] || 309 info[MISCREG_MON_NS0_WR] || info[MISCREG_MON_NS1_WR]); 310 const bool implemented( 311 info[MISCREG_IMPLEMENTED] || info[MISCREG_WARN_NOT_FAIL]); 312 313 // Only add implemented registers that we are going to be able 314 // to write. 315 if (implemented && writeable) 316 sysRegMap.emplace_back(reg, idx, miscRegName[idx]); 317 } 318 319 return sysRegMap; 320} 321 322ArmV8KvmCPU * 323ArmV8KvmCPUParams::create() 324{ 325 return new ArmV8KvmCPU(this); 326} 327