1/* 2 * Copyright (c) 2010-2013,2016-2018 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2007-2008 The Florida State University 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Stephen Hines 41 */ 42#ifndef __ARCH_ARM_INSTS_STATICINST_HH__ 43#define __ARCH_ARM_INSTS_STATICINST_HH__ 44 45#include <memory> 46 47#include "arch/arm/faults.hh" 48#include "arch/arm/utility.hh" 49#include "arch/arm/system.hh" 50#include "base/trace.hh" 51#include "cpu/exec_context.hh" 52#include "cpu/static_inst.hh" 53#include "sim/byteswap.hh" 54#include "sim/full_system.hh" 55 56namespace ArmISA 57{ 58 59class ArmStaticInst : public StaticInst 60{ 61 protected: 62 bool aarch64; 63 uint8_t intWidth; 64 65 int32_t shift_rm_imm(uint32_t base, uint32_t shamt, 66 uint32_t type, uint32_t cfval) const; 67 int32_t shift_rm_rs(uint32_t base, uint32_t shamt, 68 uint32_t type, uint32_t cfval) const; 69 70 bool shift_carry_imm(uint32_t base, uint32_t shamt, 71 uint32_t type, uint32_t cfval) const; 72 bool shift_carry_rs(uint32_t base, uint32_t shamt, 73 uint32_t type, uint32_t cfval) const; 74 75 int64_t shiftReg64(uint64_t base, uint64_t shiftAmt, 76 ArmShiftType type, uint8_t width) const; 77 int64_t extendReg64(uint64_t base, ArmExtendType type, 78 uint64_t shiftAmt, uint8_t width) const; 79 80 template<int width> 81 static inline bool 82 saturateOp(int32_t &res, int64_t op1, int64_t op2, bool sub=false) 83 { 84 int64_t midRes = sub ? (op1 - op2) : (op1 + op2); 85 if (bits(midRes, width) != bits(midRes, width - 1)) { 86 if (midRes > 0) 87 res = (LL(1) << (width - 1)) - 1; 88 else 89 res = -(LL(1) << (width - 1)); 90 return true; 91 } else { 92 res = midRes; 93 return false; 94 } 95 } 96 97 static inline bool 98 satInt(int32_t &res, int64_t op, int width) 99 { 100 width--; 101 if (op >= (LL(1) << width)) { 102 res = (LL(1) << width) - 1; 103 return true; 104 } else if (op < -(LL(1) << width)) { 105 res = -(LL(1) << width); 106 return true; 107 } else { 108 res = op; 109 return false; 110 } 111 } 112 113 template<int width> 114 static inline bool 115 uSaturateOp(uint32_t &res, int64_t op1, int64_t op2, bool sub=false) 116 { 117 int64_t midRes = sub ? (op1 - op2) : (op1 + op2); 118 if (midRes >= (LL(1) << width)) { 119 res = (LL(1) << width) - 1; 120 return true; 121 } else if (midRes < 0) { 122 res = 0; 123 return true; 124 } else { 125 res = midRes; 126 return false; 127 } 128 } 129 130 static inline bool 131 uSatInt(int32_t &res, int64_t op, int width) 132 { 133 if (op >= (LL(1) << width)) { 134 res = (LL(1) << width) - 1; 135 return true; 136 } else if (op < 0) { 137 res = 0; 138 return true; 139 } else { 140 res = op; 141 return false; 142 } 143 } 144 145 // Constructor 146 ArmStaticInst(const char *mnem, ExtMachInst _machInst, 147 OpClass __opClass) 148 : StaticInst(mnem, _machInst, __opClass) 149 { 150 aarch64 = machInst.aarch64; 151 if (bits(machInst, 28, 24) == 0x10) 152 intWidth = 64; // Force 64-bit width for ADR/ADRP 153 else 154 intWidth = (aarch64 && bits(machInst, 31)) ? 64 : 32; 155 } 156 157 /// Print a register name for disassembly given the unique 158 /// dependence tag number (FP or int). 159 void printIntReg(std::ostream &os, RegIndex reg_idx, 160 uint8_t opWidth = 0) const; 161 void printFloatReg(std::ostream &os, RegIndex reg_idx) const; 162 void printVecReg(std::ostream &os, RegIndex reg_idx, 163 bool isSveVecReg = false) const; 164 void printVecPredReg(std::ostream &os, RegIndex reg_idx) const; 165 void printCCReg(std::ostream &os, RegIndex reg_idx) const; 166 void printMiscReg(std::ostream &os, RegIndex reg_idx) const; 167 void printMnemonic(std::ostream &os, 168 const std::string &suffix = "", 169 bool withPred = true, 170 bool withCond64 = false, 171 ConditionCode cond64 = COND_UC) const; 172 void printTarget(std::ostream &os, Addr target, 173 const SymbolTable *symtab) const; 174 void printCondition(std::ostream &os, unsigned code, 175 bool noImplicit=false) const; 176 void printMemSymbol(std::ostream &os, const SymbolTable *symtab, 177 const std::string &prefix, const Addr addr, 178 const std::string &suffix) const; 179 void printShiftOperand(std::ostream &os, IntRegIndex rm, 180 bool immShift, uint32_t shiftAmt, 181 IntRegIndex rs, ArmShiftType type) const; 182 void printExtendOperand(bool firstOperand, std::ostream &os, 183 IntRegIndex rm, ArmExtendType type, 184 int64_t shiftAmt) const; 185 void printPFflags(std::ostream &os, int flag) const; 186 187 void printDataInst(std::ostream &os, bool withImm) const; 188 void printDataInst(std::ostream &os, bool withImm, bool immShift, bool s, 189 IntRegIndex rd, IntRegIndex rn, IntRegIndex rm, 190 IntRegIndex rs, uint32_t shiftAmt, ArmShiftType type, 191 uint64_t imm) const; 192 193 void 194 advancePC(PCState &pcState) const override 195 { 196 pcState.advance(); 197 } 198 199 std::string generateDisassembly( 200 Addr pc, const SymbolTable *symtab) const override; 201 202 static inline uint32_t 203 cpsrWriteByInstr(CPSR cpsr, uint32_t val, SCR scr, NSACR nsacr, 204 uint8_t byteMask, bool affectState, bool nmfi, ThreadContext *tc) 205 { 206 bool privileged = (cpsr.mode != MODE_USER); 207 bool haveVirt = ArmSystem::haveVirtualization(tc); 208 bool haveSecurity = ArmSystem::haveSecurity(tc); 209 bool isSecure = inSecureState(scr, cpsr) || !haveSecurity; 210 211 uint32_t bitMask = 0; 212 213 if (bits(byteMask, 3)) { 214 unsigned lowIdx = affectState ? 24 : 27; 215 bitMask = bitMask | mask(31, lowIdx); 216 } 217 if (bits(byteMask, 2)) { 218 bitMask = bitMask | mask(19, 16); 219 } 220 if (bits(byteMask, 1)) { 221 unsigned highIdx = affectState ? 15 : 9; 222 unsigned lowIdx = (privileged && (isSecure || scr.aw || haveVirt)) 223 ? 8 : 9; 224 bitMask = bitMask | mask(highIdx, lowIdx); 225 } 226 if (bits(byteMask, 0)) { 227 if (privileged) { 228 bitMask |= 1 << 7; 229 if ( (!nmfi || !((val >> 6) & 0x1)) && 230 (isSecure || scr.fw || haveVirt) ) { 231 bitMask |= 1 << 6; 232 } 233 // Now check the new mode is allowed 234 OperatingMode newMode = (OperatingMode) (val & mask(5)); 235 OperatingMode oldMode = (OperatingMode)(uint32_t)cpsr.mode; 236 if (!badMode(tc, newMode)) { 237 bool validModeChange = true; 238 // Check for attempts to enter modes only permitted in 239 // Secure state from Non-secure state. These are Monitor 240 // mode ('10110'), and FIQ mode ('10001') if the Security 241 // Extensions have reserved it. 242 if (!isSecure && newMode == MODE_MON) 243 validModeChange = false; 244 if (!isSecure && newMode == MODE_FIQ && nsacr.rfr == '1') 245 validModeChange = false; 246 // There is no Hyp mode ('11010') in Secure state, so that 247 // is UNPREDICTABLE 248 if (scr.ns == '0' && newMode == MODE_HYP) 249 validModeChange = false; 250 // Cannot move into Hyp mode directly from a Non-secure 251 // PL1 mode 252 if (!isSecure && oldMode != MODE_HYP && newMode == MODE_HYP) 253 validModeChange = false; 254 // Cannot move out of Hyp mode with this function except 255 // on an exception return 256 if (oldMode == MODE_HYP && newMode != MODE_HYP && !affectState) 257 validModeChange = false; 258 // Must not change to 64 bit when running in 32 bit mode 259 if (!opModeIs64(oldMode) && opModeIs64(newMode)) 260 validModeChange = false; 261 262 // If we passed all of the above then set the bit mask to 263 // copy the mode accross 264 if (validModeChange) { 265 bitMask = bitMask | mask(5); 266 } else { 267 warn_once("Illegal change to CPSR mode attempted\n"); 268 } 269 } else { 270 warn_once("Ignoring write of bad mode to CPSR.\n"); 271 } 272 } 273 if (affectState) 274 bitMask = bitMask | (1 << 5); 275 } 276 277 return ((uint32_t)cpsr & ~bitMask) | (val & bitMask); 278 } 279 280 static inline uint32_t 281 spsrWriteByInstr(uint32_t spsr, uint32_t val, 282 uint8_t byteMask, bool affectState) 283 { 284 uint32_t bitMask = 0; 285 286 if (bits(byteMask, 3)) 287 bitMask = bitMask | mask(31, 24); 288 if (bits(byteMask, 2)) 289 bitMask = bitMask | mask(19, 16); 290 if (bits(byteMask, 1)) 291 bitMask = bitMask | mask(15, 8); 292 if (bits(byteMask, 0)) 293 bitMask = bitMask | mask(7, 0); 294 295 return ((spsr & ~bitMask) | (val & bitMask)); 296 } 297 298 static inline Addr 299 readPC(ExecContext *xc) 300 { 301 return xc->pcState().instPC(); 302 } 303 304 static inline void 305 setNextPC(ExecContext *xc, Addr val) 306 { 307 PCState pc = xc->pcState(); 308 pc.instNPC(val); 309 xc->pcState(pc); 310 } 311 312 template<class T> 313 static inline T 314 cSwap(T val, bool big) 315 { 316 if (big) { 317 return gtobe(val); 318 } else { 319 return gtole(val); 320 } 321 } 322 323 template<class T, class E> 324 static inline T 325 cSwap(T val, bool big) 326 { 327 const unsigned count = sizeof(T) / sizeof(E); 328 union { 329 T tVal; 330 E eVals[count]; 331 } conv; 332 conv.tVal = htog(val); 333 if (big) { 334 for (unsigned i = 0; i < count; i++) { 335 conv.eVals[i] = gtobe(conv.eVals[i]); 336 } 337 } else { 338 for (unsigned i = 0; i < count; i++) { 339 conv.eVals[i] = gtole(conv.eVals[i]); 340 } 341 } 342 return gtoh(conv.tVal); 343 } 344 345 // Perform an interworking branch. 346 static inline void 347 setIWNextPC(ExecContext *xc, Addr val) 348 { 349 PCState pc = xc->pcState(); 350 pc.instIWNPC(val); 351 xc->pcState(pc); 352 } 353 354 // Perform an interworking branch in ARM mode, a regular branch 355 // otherwise. 356 static inline void 357 setAIWNextPC(ExecContext *xc, Addr val) 358 { 359 PCState pc = xc->pcState(); 360 pc.instAIWNPC(val); 361 xc->pcState(pc); 362 } 363 364 inline Fault 365 disabledFault() const 366 { 367 return std::make_shared<UndefinedInstruction>(machInst, false, 368 mnemonic, true); 369 } 370 371 // Utility function used by checkForWFxTrap32 and checkForWFxTrap64 372 // Returns true if processor has to trap a WFI/WFE instruction. 373 bool isWFxTrapping(ThreadContext *tc, 374 ExceptionLevel targetEL, bool isWfe) const; 375 376 /** 377 * Trigger a Software Breakpoint. 378 * 379 * See aarch32/exceptions/debug/AArch32.SoftwareBreakpoint in the 380 * ARM ARM psueodcode library. 381 */ 382 Fault softwareBreakpoint32(ExecContext *xc, uint16_t imm) const; 383 384 /** 385 * Trap an access to Advanced SIMD or FP registers due to access 386 * control bits. 387 * 388 * See aarch64/exceptions/traps/AArch64.AdvSIMDFPAccessTrap in the 389 * ARM ARM psueodcode library. 390 * 391 * @param el Target EL for the trap 392 */ 393 Fault advSIMDFPAccessTrap64(ExceptionLevel el) const; 394 395 396 /** 397 * Check an Advaned SIMD access against CPTR_EL2 and CPTR_EL3. 398 * 399 * See aarch64/exceptions/traps/AArch64.CheckFPAdvSIMDTrap in the 400 * ARM ARM psueodcode library. 401 */ 402 Fault checkFPAdvSIMDTrap64(ThreadContext *tc, CPSR cpsr) const; 403 404 /** 405 * Check an Advaned SIMD access against CPACR_EL1, CPTR_EL2, and 406 * CPTR_EL3. 407 * 408 * See aarch64/exceptions/traps/AArch64.CheckFPAdvSIMDEnabled in the 409 * ARM ARM psueodcode library. 410 */ 411 Fault checkFPAdvSIMDEnabled64(ThreadContext *tc, 412 CPSR cpsr, CPACR cpacr) const; 413 414 /** 415 * Check if a VFP/SIMD access from aarch32 should be allowed. 416 * 417 * See aarch32/exceptions/traps/AArch32.CheckAdvSIMDOrFPEnabled in the 418 * ARM ARM psueodcode library. 419 */ 420 Fault checkAdvSIMDOrFPEnabled32(ThreadContext *tc, 421 CPSR cpsr, CPACR cpacr, 422 NSACR nsacr, FPEXC fpexc, 423 bool fpexc_check, bool advsimd) const; 424 425 /** 426 * Check if WFE/WFI instruction execution in aarch32 should be trapped. 427 * 428 * See aarch32/exceptions/traps/AArch32.checkForWFxTrap in the 429 * ARM ARM psueodcode library. 430 */ 431 Fault checkForWFxTrap32(ThreadContext *tc, 432 ExceptionLevel tgtEl, bool isWfe) const; 433 434 /** 435 * Check if WFE/WFI instruction execution in aarch64 should be trapped. 436 * 437 * See aarch64/exceptions/traps/AArch64.checkForWFxTrap in the 438 * ARM ARM psueodcode library. 439 */ 440 Fault checkForWFxTrap64(ThreadContext *tc, 441 ExceptionLevel tgtEl, bool isWfe) const; 442 443 /** 444 * WFE/WFI trapping helper function. 445 */ 446 Fault trapWFx(ThreadContext *tc, CPSR cpsr, SCR scr, bool isWfe) const; 447 448 /** 449 * Check if SETEND instruction execution in aarch32 should be trapped. 450 * 451 * See aarch32/exceptions/traps/AArch32.CheckSETENDEnabled in the 452 * ARM ARM pseudocode library. 453 */ 454 Fault checkSETENDEnabled(ThreadContext *tc, CPSR cpsr) const; 455 456 /** 457 * UNDEFINED behaviour in AArch32 458 * 459 * See aarch32/exceptions/traps/AArch32.UndefinedFault in the 460 * ARM ARM pseudocode library. 461 */ 462 Fault undefinedFault32(ThreadContext *tc, ExceptionLevel el) const; 463 464 /** 465 * UNDEFINED behaviour in AArch64 466 * 467 * See aarch64/exceptions/traps/AArch64.UndefinedFault in the 468 * ARM ARM pseudocode library. 469 */ 470 Fault undefinedFault64(ThreadContext *tc, ExceptionLevel el) const; 471 472 /** 473 * Trap an access to SVE registers due to access control bits. 474 * 475 * @param el Target EL for the trap. 476 */ 477 Fault sveAccessTrap(ExceptionLevel el) const; 478 479 /** 480 * Check an SVE access against CPTR_EL2 and CPTR_EL3. 481 */ 482 Fault checkSveTrap(ThreadContext *tc, CPSR cpsr) const; 483 484 /** 485 * Check an SVE access against CPACR_EL1, CPTR_EL2, and CPTR_EL3. 486 */ 487 Fault checkSveEnabled(ThreadContext *tc, CPSR cpsr, CPACR cpacr) const; 488 489 /** 490 * Get the new PSTATE from a SPSR register in preparation for an 491 * exception return. 492 * 493 * See shared/functions/system/SetPSTATEFromPSR in the ARM ARM 494 * pseudocode library. 495 */ 496 CPSR getPSTATEFromPSR(ThreadContext *tc, CPSR cpsr, CPSR spsr) const; 497 498 /** 499 * Return true if exceptions normally routed to EL1 are being handled 500 * at an Exception level using AArch64, because either EL1 is using 501 * AArch64 or TGE is in force and EL2 is using AArch64. 502 * 503 * See aarch32/exceptions/exceptions/AArch32.GeneralExceptionsToAArch64 504 * in the ARM ARM pseudocode library. 505 */ 506 bool generalExceptionsToAArch64(ThreadContext *tc, 507 ExceptionLevel pstateEL) const; 508 509 public: 510 virtual void 511 annotateFault(ArmFault *fault) {} 512 513 uint8_t 514 getIntWidth() const 515 { 516 return intWidth; 517 } 518 519 /** Returns the byte size of current instruction */ 520 ssize_t 521 instSize() const 522 { 523 return (!machInst.thumb || machInst.bigThumb) ? 4 : 2; 524 } 525 526 /** 527 * Returns the real encoding of the instruction: 528 * the machInst field is in fact always 64 bit wide and 529 * contains some instruction metadata, which means it differs 530 * from the real opcode. 531 */ 532 MachInst 533 encoding() const 534 { 535 return static_cast<MachInst>(machInst & (mask(instSize() * 8))); 536 } 537 538 size_t 539 asBytes(void *buf, size_t max_size) override 540 { 541 return simpleAsBytes(buf, max_size, machInst); 542 } 543 544 static unsigned getCurSveVecLenInBits(ThreadContext *tc); 545 546 static unsigned 547 getCurSveVecLenInQWords(ThreadContext *tc) 548 { 549 return getCurSveVecLenInBits(tc) >> 6; 550 } 551 552 template<typename T> 553 static unsigned 554 getCurSveVecLen(ThreadContext *tc) 555 { 556 return getCurSveVecLenInBits(tc) / (8 * sizeof(T)); 557 } 558}; 559} 560 561#endif //__ARCH_ARM_INSTS_STATICINST_HH__ 562