1/* 2 * Copyright (c) 2010-2013,2016-2018 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2007-2008 The Florida State University 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Stephen Hines 41 */ 42#ifndef __ARCH_ARM_INSTS_STATICINST_HH__ 43#define __ARCH_ARM_INSTS_STATICINST_HH__ 44 45#include <memory> 46 47#include "arch/arm/faults.hh" 48#include "arch/arm/utility.hh" 49#include "arch/arm/system.hh" 50#include "base/trace.hh" 51#include "cpu/exec_context.hh" 52#include "cpu/static_inst.hh" 53#include "sim/byteswap.hh" 54#include "sim/full_system.hh" 55 56namespace ArmISA 57{ 58 59class ArmStaticInst : public StaticInst 60{ 61 protected: 62 bool aarch64; 63 uint8_t intWidth; 64 65 int32_t shift_rm_imm(uint32_t base, uint32_t shamt, 66 uint32_t type, uint32_t cfval) const; 67 int32_t shift_rm_rs(uint32_t base, uint32_t shamt, 68 uint32_t type, uint32_t cfval) const; 69 70 bool shift_carry_imm(uint32_t base, uint32_t shamt, 71 uint32_t type, uint32_t cfval) const; 72 bool shift_carry_rs(uint32_t base, uint32_t shamt, 73 uint32_t type, uint32_t cfval) const; 74 75 int64_t shiftReg64(uint64_t base, uint64_t shiftAmt, 76 ArmShiftType type, uint8_t width) const; 77 int64_t extendReg64(uint64_t base, ArmExtendType type, 78 uint64_t shiftAmt, uint8_t width) const; 79 80 template<int width> 81 static inline bool 82 saturateOp(int32_t &res, int64_t op1, int64_t op2, bool sub=false) 83 { 84 int64_t midRes = sub ? (op1 - op2) : (op1 + op2); 85 if (bits(midRes, width) != bits(midRes, width - 1)) { 86 if (midRes > 0) 87 res = (LL(1) << (width - 1)) - 1; 88 else 89 res = -(LL(1) << (width - 1)); 90 return true; 91 } else { 92 res = midRes; 93 return false; 94 } 95 } 96 97 static inline bool 98 satInt(int32_t &res, int64_t op, int width) 99 { 100 width--; 101 if (op >= (LL(1) << width)) { 102 res = (LL(1) << width) - 1; 103 return true; 104 } else if (op < -(LL(1) << width)) { 105 res = -(LL(1) << width); 106 return true; 107 } else { 108 res = op; 109 return false; 110 } 111 } 112 113 template<int width> 114 static inline bool 115 uSaturateOp(uint32_t &res, int64_t op1, int64_t op2, bool sub=false) 116 { 117 int64_t midRes = sub ? (op1 - op2) : (op1 + op2); 118 if (midRes >= (LL(1) << width)) { 119 res = (LL(1) << width) - 1; 120 return true; 121 } else if (midRes < 0) { 122 res = 0; 123 return true; 124 } else { 125 res = midRes; 126 return false; 127 } 128 } 129 130 static inline bool 131 uSatInt(int32_t &res, int64_t op, int width) 132 { 133 if (op >= (LL(1) << width)) { 134 res = (LL(1) << width) - 1; 135 return true; 136 } else if (op < 0) { 137 res = 0; 138 return true; 139 } else { 140 res = op; 141 return false; 142 } 143 } 144 145 // Constructor 146 ArmStaticInst(const char *mnem, ExtMachInst _machInst, 147 OpClass __opClass) 148 : StaticInst(mnem, _machInst, __opClass) 149 { 150 aarch64 = machInst.aarch64; 151 if (bits(machInst, 28, 24) == 0x10) 152 intWidth = 64; // Force 64-bit width for ADR/ADRP 153 else 154 intWidth = (aarch64 && bits(machInst, 31)) ? 64 : 32; 155 } 156 157 /// Print a register name for disassembly given the unique 158 /// dependence tag number (FP or int). 159 void printIntReg(std::ostream &os, RegIndex reg_idx) const; 160 void printFloatReg(std::ostream &os, RegIndex reg_idx) const; 161 void printVecReg(std::ostream &os, RegIndex reg_idx) const; 162 void printCCReg(std::ostream &os, RegIndex reg_idx) const; 163 void printMiscReg(std::ostream &os, RegIndex reg_idx) const; 164 void printMnemonic(std::ostream &os, 165 const std::string &suffix = "", 166 bool withPred = true, 167 bool withCond64 = false, 168 ConditionCode cond64 = COND_UC) const; 169 void printTarget(std::ostream &os, Addr target, 170 const SymbolTable *symtab) const; 171 void printCondition(std::ostream &os, unsigned code, 172 bool noImplicit=false) const; 173 void printMemSymbol(std::ostream &os, const SymbolTable *symtab, 174 const std::string &prefix, const Addr addr, 175 const std::string &suffix) const; 176 void printShiftOperand(std::ostream &os, IntRegIndex rm, 177 bool immShift, uint32_t shiftAmt, 178 IntRegIndex rs, ArmShiftType type) const; 179 void printExtendOperand(bool firstOperand, std::ostream &os, 180 IntRegIndex rm, ArmExtendType type, 181 int64_t shiftAmt) const; 182 183 184 void printDataInst(std::ostream &os, bool withImm) const; 185 void printDataInst(std::ostream &os, bool withImm, bool immShift, bool s, 186 IntRegIndex rd, IntRegIndex rn, IntRegIndex rm, 187 IntRegIndex rs, uint32_t shiftAmt, ArmShiftType type, 188 uint64_t imm) const; 189 190 void
| 1/* 2 * Copyright (c) 2010-2013,2016-2018 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2007-2008 The Florida State University 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Stephen Hines 41 */ 42#ifndef __ARCH_ARM_INSTS_STATICINST_HH__ 43#define __ARCH_ARM_INSTS_STATICINST_HH__ 44 45#include <memory> 46 47#include "arch/arm/faults.hh" 48#include "arch/arm/utility.hh" 49#include "arch/arm/system.hh" 50#include "base/trace.hh" 51#include "cpu/exec_context.hh" 52#include "cpu/static_inst.hh" 53#include "sim/byteswap.hh" 54#include "sim/full_system.hh" 55 56namespace ArmISA 57{ 58 59class ArmStaticInst : public StaticInst 60{ 61 protected: 62 bool aarch64; 63 uint8_t intWidth; 64 65 int32_t shift_rm_imm(uint32_t base, uint32_t shamt, 66 uint32_t type, uint32_t cfval) const; 67 int32_t shift_rm_rs(uint32_t base, uint32_t shamt, 68 uint32_t type, uint32_t cfval) const; 69 70 bool shift_carry_imm(uint32_t base, uint32_t shamt, 71 uint32_t type, uint32_t cfval) const; 72 bool shift_carry_rs(uint32_t base, uint32_t shamt, 73 uint32_t type, uint32_t cfval) const; 74 75 int64_t shiftReg64(uint64_t base, uint64_t shiftAmt, 76 ArmShiftType type, uint8_t width) const; 77 int64_t extendReg64(uint64_t base, ArmExtendType type, 78 uint64_t shiftAmt, uint8_t width) const; 79 80 template<int width> 81 static inline bool 82 saturateOp(int32_t &res, int64_t op1, int64_t op2, bool sub=false) 83 { 84 int64_t midRes = sub ? (op1 - op2) : (op1 + op2); 85 if (bits(midRes, width) != bits(midRes, width - 1)) { 86 if (midRes > 0) 87 res = (LL(1) << (width - 1)) - 1; 88 else 89 res = -(LL(1) << (width - 1)); 90 return true; 91 } else { 92 res = midRes; 93 return false; 94 } 95 } 96 97 static inline bool 98 satInt(int32_t &res, int64_t op, int width) 99 { 100 width--; 101 if (op >= (LL(1) << width)) { 102 res = (LL(1) << width) - 1; 103 return true; 104 } else if (op < -(LL(1) << width)) { 105 res = -(LL(1) << width); 106 return true; 107 } else { 108 res = op; 109 return false; 110 } 111 } 112 113 template<int width> 114 static inline bool 115 uSaturateOp(uint32_t &res, int64_t op1, int64_t op2, bool sub=false) 116 { 117 int64_t midRes = sub ? (op1 - op2) : (op1 + op2); 118 if (midRes >= (LL(1) << width)) { 119 res = (LL(1) << width) - 1; 120 return true; 121 } else if (midRes < 0) { 122 res = 0; 123 return true; 124 } else { 125 res = midRes; 126 return false; 127 } 128 } 129 130 static inline bool 131 uSatInt(int32_t &res, int64_t op, int width) 132 { 133 if (op >= (LL(1) << width)) { 134 res = (LL(1) << width) - 1; 135 return true; 136 } else if (op < 0) { 137 res = 0; 138 return true; 139 } else { 140 res = op; 141 return false; 142 } 143 } 144 145 // Constructor 146 ArmStaticInst(const char *mnem, ExtMachInst _machInst, 147 OpClass __opClass) 148 : StaticInst(mnem, _machInst, __opClass) 149 { 150 aarch64 = machInst.aarch64; 151 if (bits(machInst, 28, 24) == 0x10) 152 intWidth = 64; // Force 64-bit width for ADR/ADRP 153 else 154 intWidth = (aarch64 && bits(machInst, 31)) ? 64 : 32; 155 } 156 157 /// Print a register name for disassembly given the unique 158 /// dependence tag number (FP or int). 159 void printIntReg(std::ostream &os, RegIndex reg_idx) const; 160 void printFloatReg(std::ostream &os, RegIndex reg_idx) const; 161 void printVecReg(std::ostream &os, RegIndex reg_idx) const; 162 void printCCReg(std::ostream &os, RegIndex reg_idx) const; 163 void printMiscReg(std::ostream &os, RegIndex reg_idx) const; 164 void printMnemonic(std::ostream &os, 165 const std::string &suffix = "", 166 bool withPred = true, 167 bool withCond64 = false, 168 ConditionCode cond64 = COND_UC) const; 169 void printTarget(std::ostream &os, Addr target, 170 const SymbolTable *symtab) const; 171 void printCondition(std::ostream &os, unsigned code, 172 bool noImplicit=false) const; 173 void printMemSymbol(std::ostream &os, const SymbolTable *symtab, 174 const std::string &prefix, const Addr addr, 175 const std::string &suffix) const; 176 void printShiftOperand(std::ostream &os, IntRegIndex rm, 177 bool immShift, uint32_t shiftAmt, 178 IntRegIndex rs, ArmShiftType type) const; 179 void printExtendOperand(bool firstOperand, std::ostream &os, 180 IntRegIndex rm, ArmExtendType type, 181 int64_t shiftAmt) const; 182 183 184 void printDataInst(std::ostream &os, bool withImm) const; 185 void printDataInst(std::ostream &os, bool withImm, bool immShift, bool s, 186 IntRegIndex rd, IntRegIndex rn, IntRegIndex rm, 187 IntRegIndex rs, uint32_t shiftAmt, ArmShiftType type, 188 uint64_t imm) const; 189 190 void
|
197 198 static inline uint32_t 199 cpsrWriteByInstr(CPSR cpsr, uint32_t val, SCR scr, NSACR nsacr, 200 uint8_t byteMask, bool affectState, bool nmfi, ThreadContext *tc) 201 { 202 bool privileged = (cpsr.mode != MODE_USER); 203 bool haveVirt = ArmSystem::haveVirtualization(tc); 204 bool haveSecurity = ArmSystem::haveSecurity(tc); 205 bool isSecure = inSecureState(scr, cpsr) || !haveSecurity; 206 207 uint32_t bitMask = 0; 208 209 if (bits(byteMask, 3)) { 210 unsigned lowIdx = affectState ? 24 : 27; 211 bitMask = bitMask | mask(31, lowIdx); 212 } 213 if (bits(byteMask, 2)) { 214 bitMask = bitMask | mask(19, 16); 215 } 216 if (bits(byteMask, 1)) { 217 unsigned highIdx = affectState ? 15 : 9; 218 unsigned lowIdx = (privileged && (isSecure || scr.aw || haveVirt)) 219 ? 8 : 9; 220 bitMask = bitMask | mask(highIdx, lowIdx); 221 } 222 if (bits(byteMask, 0)) { 223 if (privileged) { 224 bitMask |= 1 << 7; 225 if ( (!nmfi || !((val >> 6) & 0x1)) && 226 (isSecure || scr.fw || haveVirt) ) { 227 bitMask |= 1 << 6; 228 } 229 // Now check the new mode is allowed 230 OperatingMode newMode = (OperatingMode) (val & mask(5)); 231 OperatingMode oldMode = (OperatingMode)(uint32_t)cpsr.mode; 232 if (!badMode(newMode)) { 233 bool validModeChange = true; 234 // Check for attempts to enter modes only permitted in 235 // Secure state from Non-secure state. These are Monitor 236 // mode ('10110'), and FIQ mode ('10001') if the Security 237 // Extensions have reserved it. 238 if (!isSecure && newMode == MODE_MON) 239 validModeChange = false; 240 if (!isSecure && newMode == MODE_FIQ && nsacr.rfr == '1') 241 validModeChange = false; 242 // There is no Hyp mode ('11010') in Secure state, so that 243 // is UNPREDICTABLE 244 if (scr.ns == '0' && newMode == MODE_HYP) 245 validModeChange = false; 246 // Cannot move into Hyp mode directly from a Non-secure 247 // PL1 mode 248 if (!isSecure && oldMode != MODE_HYP && newMode == MODE_HYP) 249 validModeChange = false; 250 // Cannot move out of Hyp mode with this function except 251 // on an exception return 252 if (oldMode == MODE_HYP && newMode != MODE_HYP && !affectState) 253 validModeChange = false; 254 // Must not change to 64 bit when running in 32 bit mode 255 if (!opModeIs64(oldMode) && opModeIs64(newMode)) 256 validModeChange = false; 257 258 // If we passed all of the above then set the bit mask to 259 // copy the mode accross 260 if (validModeChange) { 261 bitMask = bitMask | mask(5); 262 } else { 263 warn_once("Illegal change to CPSR mode attempted\n"); 264 } 265 } else { 266 warn_once("Ignoring write of bad mode to CPSR.\n"); 267 } 268 } 269 if (affectState) 270 bitMask = bitMask | (1 << 5); 271 } 272 273 return ((uint32_t)cpsr & ~bitMask) | (val & bitMask); 274 } 275 276 static inline uint32_t 277 spsrWriteByInstr(uint32_t spsr, uint32_t val, 278 uint8_t byteMask, bool affectState) 279 { 280 uint32_t bitMask = 0; 281 282 if (bits(byteMask, 3)) 283 bitMask = bitMask | mask(31, 24); 284 if (bits(byteMask, 2)) 285 bitMask = bitMask | mask(19, 16); 286 if (bits(byteMask, 1)) 287 bitMask = bitMask | mask(15, 8); 288 if (bits(byteMask, 0)) 289 bitMask = bitMask | mask(7, 0); 290 291 return ((spsr & ~bitMask) | (val & bitMask)); 292 } 293 294 static inline Addr 295 readPC(ExecContext *xc) 296 { 297 return xc->pcState().instPC(); 298 } 299 300 static inline void 301 setNextPC(ExecContext *xc, Addr val) 302 { 303 PCState pc = xc->pcState(); 304 pc.instNPC(val); 305 xc->pcState(pc); 306 } 307 308 template<class T> 309 static inline T 310 cSwap(T val, bool big) 311 { 312 if (big) { 313 return gtobe(val); 314 } else { 315 return gtole(val); 316 } 317 } 318 319 template<class T, class E> 320 static inline T 321 cSwap(T val, bool big) 322 { 323 const unsigned count = sizeof(T) / sizeof(E); 324 union { 325 T tVal; 326 E eVals[count]; 327 } conv; 328 conv.tVal = htog(val); 329 if (big) { 330 for (unsigned i = 0; i < count; i++) { 331 conv.eVals[i] = gtobe(conv.eVals[i]); 332 } 333 } else { 334 for (unsigned i = 0; i < count; i++) { 335 conv.eVals[i] = gtole(conv.eVals[i]); 336 } 337 } 338 return gtoh(conv.tVal); 339 } 340 341 // Perform an interworking branch. 342 static inline void 343 setIWNextPC(ExecContext *xc, Addr val) 344 { 345 PCState pc = xc->pcState(); 346 pc.instIWNPC(val); 347 xc->pcState(pc); 348 } 349 350 // Perform an interworking branch in ARM mode, a regular branch 351 // otherwise. 352 static inline void 353 setAIWNextPC(ExecContext *xc, Addr val) 354 { 355 PCState pc = xc->pcState(); 356 pc.instAIWNPC(val); 357 xc->pcState(pc); 358 } 359 360 inline Fault 361 disabledFault() const 362 { 363 return std::make_shared<UndefinedInstruction>(machInst, false, 364 mnemonic, true); 365 } 366 367 // Utility function used by checkForWFxTrap32 and checkForWFxTrap64 368 // Returns true if processor has to trap a WFI/WFE instruction. 369 bool isWFxTrapping(ThreadContext *tc, 370 ExceptionLevel targetEL, bool isWfe) const; 371 372 /** 373 * Trap an access to Advanced SIMD or FP registers due to access 374 * control bits. 375 * 376 * See aarch64/exceptions/traps/AArch64.AdvSIMDFPAccessTrap in the 377 * ARM ARM psueodcode library. 378 * 379 * @param el Target EL for the trap 380 */ 381 Fault advSIMDFPAccessTrap64(ExceptionLevel el) const; 382 383 384 /** 385 * Check an Advaned SIMD access against CPTR_EL2 and CPTR_EL3. 386 * 387 * See aarch64/exceptions/traps/AArch64.CheckFPAdvSIMDTrap in the 388 * ARM ARM psueodcode library. 389 */ 390 Fault checkFPAdvSIMDTrap64(ThreadContext *tc, CPSR cpsr) const; 391 392 /** 393 * Check an Advaned SIMD access against CPACR_EL1, CPTR_EL2, and 394 * CPTR_EL3. 395 * 396 * See aarch64/exceptions/traps/AArch64.CheckFPAdvSIMDEnabled in the 397 * ARM ARM psueodcode library. 398 */ 399 Fault checkFPAdvSIMDEnabled64(ThreadContext *tc, 400 CPSR cpsr, CPACR cpacr) const; 401 402 /** 403 * Check if a VFP/SIMD access from aarch32 should be allowed. 404 * 405 * See aarch32/exceptions/traps/AArch32.CheckAdvSIMDOrFPEnabled in the 406 * ARM ARM psueodcode library. 407 */ 408 Fault checkAdvSIMDOrFPEnabled32(ThreadContext *tc, 409 CPSR cpsr, CPACR cpacr, 410 NSACR nsacr, FPEXC fpexc, 411 bool fpexc_check, bool advsimd) const; 412 413 /** 414 * Check if WFE/WFI instruction execution in aarch32 should be trapped. 415 * 416 * See aarch32/exceptions/traps/AArch32.checkForWFxTrap in the 417 * ARM ARM psueodcode library. 418 */ 419 Fault checkForWFxTrap32(ThreadContext *tc, 420 ExceptionLevel tgtEl, bool isWfe) const; 421 422 /** 423 * Check if WFE/WFI instruction execution in aarch64 should be trapped. 424 * 425 * See aarch64/exceptions/traps/AArch64.checkForWFxTrap in the 426 * ARM ARM psueodcode library. 427 */ 428 Fault checkForWFxTrap64(ThreadContext *tc, 429 ExceptionLevel tgtEl, bool isWfe) const; 430 431 /** 432 * WFE/WFI trapping helper function. 433 */ 434 Fault trapWFx(ThreadContext *tc, CPSR cpsr, SCR scr, bool isWfe) const; 435 436 /** 437 * Check if SETEND instruction execution in aarch32 should be trapped. 438 * 439 * See aarch32/exceptions/traps/AArch32.CheckSETENDEnabled in the 440 * ARM ARM pseudocode library. 441 */ 442 Fault checkSETENDEnabled(ThreadContext *tc, CPSR cpsr) const; 443 444 /** 445 * UNDEFINED behaviour in AArch32 446 * 447 * See aarch32/exceptions/traps/AArch32.UndefinedFault in the 448 * ARM ARM pseudocode library. 449 */ 450 Fault undefinedFault32(ThreadContext *tc, ExceptionLevel el) const; 451 452 /** 453 * UNDEFINED behaviour in AArch64 454 * 455 * See aarch64/exceptions/traps/AArch64.UndefinedFault in the 456 * ARM ARM pseudocode library. 457 */ 458 Fault undefinedFault64(ThreadContext *tc, ExceptionLevel el) const; 459 460 /** 461 * Get the new PSTATE from a SPSR register in preparation for an 462 * exception return. 463 * 464 * See shared/functions/system/SetPSTATEFromPSR in the ARM ARM 465 * pseudocode library. 466 */ 467 CPSR getPSTATEFromPSR(ThreadContext *tc, CPSR cpsr, CPSR spsr) const; 468 469 /** 470 * Return true if exceptions normally routed to EL1 are being handled 471 * at an Exception level using AArch64, because either EL1 is using 472 * AArch64 or TGE is in force and EL2 is using AArch64. 473 * 474 * See aarch32/exceptions/exceptions/AArch32.GeneralExceptionsToAArch64 475 * in the ARM ARM pseudocode library. 476 */ 477 bool generalExceptionsToAArch64(ThreadContext *tc, 478 ExceptionLevel pstateEL) const; 479 480 public: 481 virtual void 482 annotateFault(ArmFault *fault) {} 483 484 uint8_t 485 getIntWidth() const 486 { 487 return intWidth; 488 } 489 490 /** Returns the byte size of current instruction */ 491 ssize_t 492 instSize() const 493 { 494 return (!machInst.thumb || machInst.bigThumb) ? 4 : 2; 495 } 496 497 /** 498 * Returns the real encoding of the instruction: 499 * the machInst field is in fact always 64 bit wide and 500 * contains some instruction metadata, which means it differs 501 * from the real opcode. 502 */ 503 MachInst 504 encoding() const 505 { 506 return static_cast<MachInst>(machInst & (mask(instSize() * 8))); 507 } 508 509 size_t 510 asBytes(void *buf, size_t max_size) override 511 { 512 return simpleAsBytes(buf, max_size, machInst); 513 } 514}; 515} 516 517#endif //__ARCH_ARM_INSTS_STATICINST_HH__
| 198 199 static inline uint32_t 200 cpsrWriteByInstr(CPSR cpsr, uint32_t val, SCR scr, NSACR nsacr, 201 uint8_t byteMask, bool affectState, bool nmfi, ThreadContext *tc) 202 { 203 bool privileged = (cpsr.mode != MODE_USER); 204 bool haveVirt = ArmSystem::haveVirtualization(tc); 205 bool haveSecurity = ArmSystem::haveSecurity(tc); 206 bool isSecure = inSecureState(scr, cpsr) || !haveSecurity; 207 208 uint32_t bitMask = 0; 209 210 if (bits(byteMask, 3)) { 211 unsigned lowIdx = affectState ? 24 : 27; 212 bitMask = bitMask | mask(31, lowIdx); 213 } 214 if (bits(byteMask, 2)) { 215 bitMask = bitMask | mask(19, 16); 216 } 217 if (bits(byteMask, 1)) { 218 unsigned highIdx = affectState ? 15 : 9; 219 unsigned lowIdx = (privileged && (isSecure || scr.aw || haveVirt)) 220 ? 8 : 9; 221 bitMask = bitMask | mask(highIdx, lowIdx); 222 } 223 if (bits(byteMask, 0)) { 224 if (privileged) { 225 bitMask |= 1 << 7; 226 if ( (!nmfi || !((val >> 6) & 0x1)) && 227 (isSecure || scr.fw || haveVirt) ) { 228 bitMask |= 1 << 6; 229 } 230 // Now check the new mode is allowed 231 OperatingMode newMode = (OperatingMode) (val & mask(5)); 232 OperatingMode oldMode = (OperatingMode)(uint32_t)cpsr.mode; 233 if (!badMode(newMode)) { 234 bool validModeChange = true; 235 // Check for attempts to enter modes only permitted in 236 // Secure state from Non-secure state. These are Monitor 237 // mode ('10110'), and FIQ mode ('10001') if the Security 238 // Extensions have reserved it. 239 if (!isSecure && newMode == MODE_MON) 240 validModeChange = false; 241 if (!isSecure && newMode == MODE_FIQ && nsacr.rfr == '1') 242 validModeChange = false; 243 // There is no Hyp mode ('11010') in Secure state, so that 244 // is UNPREDICTABLE 245 if (scr.ns == '0' && newMode == MODE_HYP) 246 validModeChange = false; 247 // Cannot move into Hyp mode directly from a Non-secure 248 // PL1 mode 249 if (!isSecure && oldMode != MODE_HYP && newMode == MODE_HYP) 250 validModeChange = false; 251 // Cannot move out of Hyp mode with this function except 252 // on an exception return 253 if (oldMode == MODE_HYP && newMode != MODE_HYP && !affectState) 254 validModeChange = false; 255 // Must not change to 64 bit when running in 32 bit mode 256 if (!opModeIs64(oldMode) && opModeIs64(newMode)) 257 validModeChange = false; 258 259 // If we passed all of the above then set the bit mask to 260 // copy the mode accross 261 if (validModeChange) { 262 bitMask = bitMask | mask(5); 263 } else { 264 warn_once("Illegal change to CPSR mode attempted\n"); 265 } 266 } else { 267 warn_once("Ignoring write of bad mode to CPSR.\n"); 268 } 269 } 270 if (affectState) 271 bitMask = bitMask | (1 << 5); 272 } 273 274 return ((uint32_t)cpsr & ~bitMask) | (val & bitMask); 275 } 276 277 static inline uint32_t 278 spsrWriteByInstr(uint32_t spsr, uint32_t val, 279 uint8_t byteMask, bool affectState) 280 { 281 uint32_t bitMask = 0; 282 283 if (bits(byteMask, 3)) 284 bitMask = bitMask | mask(31, 24); 285 if (bits(byteMask, 2)) 286 bitMask = bitMask | mask(19, 16); 287 if (bits(byteMask, 1)) 288 bitMask = bitMask | mask(15, 8); 289 if (bits(byteMask, 0)) 290 bitMask = bitMask | mask(7, 0); 291 292 return ((spsr & ~bitMask) | (val & bitMask)); 293 } 294 295 static inline Addr 296 readPC(ExecContext *xc) 297 { 298 return xc->pcState().instPC(); 299 } 300 301 static inline void 302 setNextPC(ExecContext *xc, Addr val) 303 { 304 PCState pc = xc->pcState(); 305 pc.instNPC(val); 306 xc->pcState(pc); 307 } 308 309 template<class T> 310 static inline T 311 cSwap(T val, bool big) 312 { 313 if (big) { 314 return gtobe(val); 315 } else { 316 return gtole(val); 317 } 318 } 319 320 template<class T, class E> 321 static inline T 322 cSwap(T val, bool big) 323 { 324 const unsigned count = sizeof(T) / sizeof(E); 325 union { 326 T tVal; 327 E eVals[count]; 328 } conv; 329 conv.tVal = htog(val); 330 if (big) { 331 for (unsigned i = 0; i < count; i++) { 332 conv.eVals[i] = gtobe(conv.eVals[i]); 333 } 334 } else { 335 for (unsigned i = 0; i < count; i++) { 336 conv.eVals[i] = gtole(conv.eVals[i]); 337 } 338 } 339 return gtoh(conv.tVal); 340 } 341 342 // Perform an interworking branch. 343 static inline void 344 setIWNextPC(ExecContext *xc, Addr val) 345 { 346 PCState pc = xc->pcState(); 347 pc.instIWNPC(val); 348 xc->pcState(pc); 349 } 350 351 // Perform an interworking branch in ARM mode, a regular branch 352 // otherwise. 353 static inline void 354 setAIWNextPC(ExecContext *xc, Addr val) 355 { 356 PCState pc = xc->pcState(); 357 pc.instAIWNPC(val); 358 xc->pcState(pc); 359 } 360 361 inline Fault 362 disabledFault() const 363 { 364 return std::make_shared<UndefinedInstruction>(machInst, false, 365 mnemonic, true); 366 } 367 368 // Utility function used by checkForWFxTrap32 and checkForWFxTrap64 369 // Returns true if processor has to trap a WFI/WFE instruction. 370 bool isWFxTrapping(ThreadContext *tc, 371 ExceptionLevel targetEL, bool isWfe) const; 372 373 /** 374 * Trap an access to Advanced SIMD or FP registers due to access 375 * control bits. 376 * 377 * See aarch64/exceptions/traps/AArch64.AdvSIMDFPAccessTrap in the 378 * ARM ARM psueodcode library. 379 * 380 * @param el Target EL for the trap 381 */ 382 Fault advSIMDFPAccessTrap64(ExceptionLevel el) const; 383 384 385 /** 386 * Check an Advaned SIMD access against CPTR_EL2 and CPTR_EL3. 387 * 388 * See aarch64/exceptions/traps/AArch64.CheckFPAdvSIMDTrap in the 389 * ARM ARM psueodcode library. 390 */ 391 Fault checkFPAdvSIMDTrap64(ThreadContext *tc, CPSR cpsr) const; 392 393 /** 394 * Check an Advaned SIMD access against CPACR_EL1, CPTR_EL2, and 395 * CPTR_EL3. 396 * 397 * See aarch64/exceptions/traps/AArch64.CheckFPAdvSIMDEnabled in the 398 * ARM ARM psueodcode library. 399 */ 400 Fault checkFPAdvSIMDEnabled64(ThreadContext *tc, 401 CPSR cpsr, CPACR cpacr) const; 402 403 /** 404 * Check if a VFP/SIMD access from aarch32 should be allowed. 405 * 406 * See aarch32/exceptions/traps/AArch32.CheckAdvSIMDOrFPEnabled in the 407 * ARM ARM psueodcode library. 408 */ 409 Fault checkAdvSIMDOrFPEnabled32(ThreadContext *tc, 410 CPSR cpsr, CPACR cpacr, 411 NSACR nsacr, FPEXC fpexc, 412 bool fpexc_check, bool advsimd) const; 413 414 /** 415 * Check if WFE/WFI instruction execution in aarch32 should be trapped. 416 * 417 * See aarch32/exceptions/traps/AArch32.checkForWFxTrap in the 418 * ARM ARM psueodcode library. 419 */ 420 Fault checkForWFxTrap32(ThreadContext *tc, 421 ExceptionLevel tgtEl, bool isWfe) const; 422 423 /** 424 * Check if WFE/WFI instruction execution in aarch64 should be trapped. 425 * 426 * See aarch64/exceptions/traps/AArch64.checkForWFxTrap in the 427 * ARM ARM psueodcode library. 428 */ 429 Fault checkForWFxTrap64(ThreadContext *tc, 430 ExceptionLevel tgtEl, bool isWfe) const; 431 432 /** 433 * WFE/WFI trapping helper function. 434 */ 435 Fault trapWFx(ThreadContext *tc, CPSR cpsr, SCR scr, bool isWfe) const; 436 437 /** 438 * Check if SETEND instruction execution in aarch32 should be trapped. 439 * 440 * See aarch32/exceptions/traps/AArch32.CheckSETENDEnabled in the 441 * ARM ARM pseudocode library. 442 */ 443 Fault checkSETENDEnabled(ThreadContext *tc, CPSR cpsr) const; 444 445 /** 446 * UNDEFINED behaviour in AArch32 447 * 448 * See aarch32/exceptions/traps/AArch32.UndefinedFault in the 449 * ARM ARM pseudocode library. 450 */ 451 Fault undefinedFault32(ThreadContext *tc, ExceptionLevel el) const; 452 453 /** 454 * UNDEFINED behaviour in AArch64 455 * 456 * See aarch64/exceptions/traps/AArch64.UndefinedFault in the 457 * ARM ARM pseudocode library. 458 */ 459 Fault undefinedFault64(ThreadContext *tc, ExceptionLevel el) const; 460 461 /** 462 * Get the new PSTATE from a SPSR register in preparation for an 463 * exception return. 464 * 465 * See shared/functions/system/SetPSTATEFromPSR in the ARM ARM 466 * pseudocode library. 467 */ 468 CPSR getPSTATEFromPSR(ThreadContext *tc, CPSR cpsr, CPSR spsr) const; 469 470 /** 471 * Return true if exceptions normally routed to EL1 are being handled 472 * at an Exception level using AArch64, because either EL1 is using 473 * AArch64 or TGE is in force and EL2 is using AArch64. 474 * 475 * See aarch32/exceptions/exceptions/AArch32.GeneralExceptionsToAArch64 476 * in the ARM ARM pseudocode library. 477 */ 478 bool generalExceptionsToAArch64(ThreadContext *tc, 479 ExceptionLevel pstateEL) const; 480 481 public: 482 virtual void 483 annotateFault(ArmFault *fault) {} 484 485 uint8_t 486 getIntWidth() const 487 { 488 return intWidth; 489 } 490 491 /** Returns the byte size of current instruction */ 492 ssize_t 493 instSize() const 494 { 495 return (!machInst.thumb || machInst.bigThumb) ? 4 : 2; 496 } 497 498 /** 499 * Returns the real encoding of the instruction: 500 * the machInst field is in fact always 64 bit wide and 501 * contains some instruction metadata, which means it differs 502 * from the real opcode. 503 */ 504 MachInst 505 encoding() const 506 { 507 return static_cast<MachInst>(machInst & (mask(instSize() * 8))); 508 } 509 510 size_t 511 asBytes(void *buf, size_t max_size) override 512 { 513 return simpleAsBytes(buf, max_size, machInst); 514 } 515}; 516} 517 518#endif //__ARCH_ARM_INSTS_STATICINST_HH__
|