static_inst.hh (11514:eb53b59ea625) static_inst.hh (12104:edd63f9c6184)
1/*
2 * Copyright (c) 2010-2013, 2016 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2007-2008 The Florida State University
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Stephen Hines
41 */
42#ifndef __ARCH_ARM_INSTS_STATICINST_HH__
43#define __ARCH_ARM_INSTS_STATICINST_HH__
44
45#include <memory>
46
47#include "arch/arm/faults.hh"
48#include "arch/arm/utility.hh"
49#include "arch/arm/system.hh"
50#include "base/trace.hh"
51#include "cpu/static_inst.hh"
52#include "sim/byteswap.hh"
53#include "sim/full_system.hh"
54
55namespace ArmISA
56{
57
58class ArmStaticInst : public StaticInst
59{
60 protected:
61 bool aarch64;
62 uint8_t intWidth;
63
64 int32_t shift_rm_imm(uint32_t base, uint32_t shamt,
65 uint32_t type, uint32_t cfval) const;
66 int32_t shift_rm_rs(uint32_t base, uint32_t shamt,
67 uint32_t type, uint32_t cfval) const;
68
69 bool shift_carry_imm(uint32_t base, uint32_t shamt,
70 uint32_t type, uint32_t cfval) const;
71 bool shift_carry_rs(uint32_t base, uint32_t shamt,
72 uint32_t type, uint32_t cfval) const;
73
74 int64_t shiftReg64(uint64_t base, uint64_t shiftAmt,
75 ArmShiftType type, uint8_t width) const;
76 int64_t extendReg64(uint64_t base, ArmExtendType type,
77 uint64_t shiftAmt, uint8_t width) const;
78
79 template<int width>
80 static inline bool
81 saturateOp(int32_t &res, int64_t op1, int64_t op2, bool sub=false)
82 {
83 int64_t midRes = sub ? (op1 - op2) : (op1 + op2);
84 if (bits(midRes, width) != bits(midRes, width - 1)) {
85 if (midRes > 0)
86 res = (LL(1) << (width - 1)) - 1;
87 else
88 res = -(LL(1) << (width - 1));
89 return true;
90 } else {
91 res = midRes;
92 return false;
93 }
94 }
95
96 static inline bool
97 satInt(int32_t &res, int64_t op, int width)
98 {
99 width--;
100 if (op >= (LL(1) << width)) {
101 res = (LL(1) << width) - 1;
102 return true;
103 } else if (op < -(LL(1) << width)) {
104 res = -(LL(1) << width);
105 return true;
106 } else {
107 res = op;
108 return false;
109 }
110 }
111
112 template<int width>
113 static inline bool
114 uSaturateOp(uint32_t &res, int64_t op1, int64_t op2, bool sub=false)
115 {
116 int64_t midRes = sub ? (op1 - op2) : (op1 + op2);
117 if (midRes >= (LL(1) << width)) {
118 res = (LL(1) << width) - 1;
119 return true;
120 } else if (midRes < 0) {
121 res = 0;
122 return true;
123 } else {
124 res = midRes;
125 return false;
126 }
127 }
128
129 static inline bool
130 uSatInt(int32_t &res, int64_t op, int width)
131 {
132 if (op >= (LL(1) << width)) {
133 res = (LL(1) << width) - 1;
134 return true;
135 } else if (op < 0) {
136 res = 0;
137 return true;
138 } else {
139 res = op;
140 return false;
141 }
142 }
143
144 // Constructor
145 ArmStaticInst(const char *mnem, ExtMachInst _machInst,
146 OpClass __opClass)
147 : StaticInst(mnem, _machInst, __opClass)
148 {
149 aarch64 = machInst.aarch64;
150 if (bits(machInst, 28, 24) == 0x10)
151 intWidth = 64; // Force 64-bit width for ADR/ADRP
152 else
153 intWidth = (aarch64 && bits(machInst, 31)) ? 64 : 32;
154 }
155
156 /// Print a register name for disassembly given the unique
157 /// dependence tag number (FP or int).
1/*
2 * Copyright (c) 2010-2013, 2016 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2007-2008 The Florida State University
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Stephen Hines
41 */
42#ifndef __ARCH_ARM_INSTS_STATICINST_HH__
43#define __ARCH_ARM_INSTS_STATICINST_HH__
44
45#include <memory>
46
47#include "arch/arm/faults.hh"
48#include "arch/arm/utility.hh"
49#include "arch/arm/system.hh"
50#include "base/trace.hh"
51#include "cpu/static_inst.hh"
52#include "sim/byteswap.hh"
53#include "sim/full_system.hh"
54
55namespace ArmISA
56{
57
58class ArmStaticInst : public StaticInst
59{
60 protected:
61 bool aarch64;
62 uint8_t intWidth;
63
64 int32_t shift_rm_imm(uint32_t base, uint32_t shamt,
65 uint32_t type, uint32_t cfval) const;
66 int32_t shift_rm_rs(uint32_t base, uint32_t shamt,
67 uint32_t type, uint32_t cfval) const;
68
69 bool shift_carry_imm(uint32_t base, uint32_t shamt,
70 uint32_t type, uint32_t cfval) const;
71 bool shift_carry_rs(uint32_t base, uint32_t shamt,
72 uint32_t type, uint32_t cfval) const;
73
74 int64_t shiftReg64(uint64_t base, uint64_t shiftAmt,
75 ArmShiftType type, uint8_t width) const;
76 int64_t extendReg64(uint64_t base, ArmExtendType type,
77 uint64_t shiftAmt, uint8_t width) const;
78
79 template<int width>
80 static inline bool
81 saturateOp(int32_t &res, int64_t op1, int64_t op2, bool sub=false)
82 {
83 int64_t midRes = sub ? (op1 - op2) : (op1 + op2);
84 if (bits(midRes, width) != bits(midRes, width - 1)) {
85 if (midRes > 0)
86 res = (LL(1) << (width - 1)) - 1;
87 else
88 res = -(LL(1) << (width - 1));
89 return true;
90 } else {
91 res = midRes;
92 return false;
93 }
94 }
95
96 static inline bool
97 satInt(int32_t &res, int64_t op, int width)
98 {
99 width--;
100 if (op >= (LL(1) << width)) {
101 res = (LL(1) << width) - 1;
102 return true;
103 } else if (op < -(LL(1) << width)) {
104 res = -(LL(1) << width);
105 return true;
106 } else {
107 res = op;
108 return false;
109 }
110 }
111
112 template<int width>
113 static inline bool
114 uSaturateOp(uint32_t &res, int64_t op1, int64_t op2, bool sub=false)
115 {
116 int64_t midRes = sub ? (op1 - op2) : (op1 + op2);
117 if (midRes >= (LL(1) << width)) {
118 res = (LL(1) << width) - 1;
119 return true;
120 } else if (midRes < 0) {
121 res = 0;
122 return true;
123 } else {
124 res = midRes;
125 return false;
126 }
127 }
128
129 static inline bool
130 uSatInt(int32_t &res, int64_t op, int width)
131 {
132 if (op >= (LL(1) << width)) {
133 res = (LL(1) << width) - 1;
134 return true;
135 } else if (op < 0) {
136 res = 0;
137 return true;
138 } else {
139 res = op;
140 return false;
141 }
142 }
143
144 // Constructor
145 ArmStaticInst(const char *mnem, ExtMachInst _machInst,
146 OpClass __opClass)
147 : StaticInst(mnem, _machInst, __opClass)
148 {
149 aarch64 = machInst.aarch64;
150 if (bits(machInst, 28, 24) == 0x10)
151 intWidth = 64; // Force 64-bit width for ADR/ADRP
152 else
153 intWidth = (aarch64 && bits(machInst, 31)) ? 64 : 32;
154 }
155
156 /// Print a register name for disassembly given the unique
157 /// dependence tag number (FP or int).
158 void printReg(std::ostream &os, int reg) const;
158 void printIntReg(std::ostream &os, RegIndex reg_idx) const;
159 void printFloatReg(std::ostream &os, RegIndex reg_idx) const;
160 void printCCReg(std::ostream &os, RegIndex reg_idx) const;
161 void printMiscReg(std::ostream &os, RegIndex reg_idx) const;
159 void printMnemonic(std::ostream &os,
160 const std::string &suffix = "",
161 bool withPred = true,
162 bool withCond64 = false,
163 ConditionCode cond64 = COND_UC) const;
164 void printTarget(std::ostream &os, Addr target,
165 const SymbolTable *symtab) const;
166 void printCondition(std::ostream &os, unsigned code,
167 bool noImplicit=false) const;
168 void printMemSymbol(std::ostream &os, const SymbolTable *symtab,
169 const std::string &prefix, const Addr addr,
170 const std::string &suffix) const;
171 void printShiftOperand(std::ostream &os, IntRegIndex rm,
172 bool immShift, uint32_t shiftAmt,
173 IntRegIndex rs, ArmShiftType type) const;
174 void printExtendOperand(bool firstOperand, std::ostream &os,
175 IntRegIndex rm, ArmExtendType type,
176 int64_t shiftAmt) const;
177
178
179 void printDataInst(std::ostream &os, bool withImm) const;
180 void printDataInst(std::ostream &os, bool withImm, bool immShift, bool s,
181 IntRegIndex rd, IntRegIndex rn, IntRegIndex rm,
182 IntRegIndex rs, uint32_t shiftAmt, ArmShiftType type,
183 uint64_t imm) const;
184
185 void
186 advancePC(PCState &pcState) const
187 {
188 pcState.advance();
189 }
190
191 std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
192
193 static inline uint32_t
194 cpsrWriteByInstr(CPSR cpsr, uint32_t val, SCR scr, NSACR nsacr,
195 uint8_t byteMask, bool affectState, bool nmfi, ThreadContext *tc)
196 {
197 bool privileged = (cpsr.mode != MODE_USER);
198 bool haveVirt = ArmSystem::haveVirtualization(tc);
199 bool haveSecurity = ArmSystem::haveSecurity(tc);
200 bool isSecure = inSecureState(scr, cpsr) || !haveSecurity;
201
202 uint32_t bitMask = 0;
203
204 if (bits(byteMask, 3)) {
205 unsigned lowIdx = affectState ? 24 : 27;
206 bitMask = bitMask | mask(31, lowIdx);
207 }
208 if (bits(byteMask, 2)) {
209 bitMask = bitMask | mask(19, 16);
210 }
211 if (bits(byteMask, 1)) {
212 unsigned highIdx = affectState ? 15 : 9;
213 unsigned lowIdx = (privileged && (isSecure || scr.aw || haveVirt))
214 ? 8 : 9;
215 bitMask = bitMask | mask(highIdx, lowIdx);
216 }
217 if (bits(byteMask, 0)) {
218 if (privileged) {
219 bitMask |= 1 << 7;
220 if ( (!nmfi || !((val >> 6) & 0x1)) &&
221 (isSecure || scr.fw || haveVirt) ) {
222 bitMask |= 1 << 6;
223 }
224 // Now check the new mode is allowed
225 OperatingMode newMode = (OperatingMode) (val & mask(5));
226 OperatingMode oldMode = (OperatingMode)(uint32_t)cpsr.mode;
227 if (!badMode(newMode)) {
228 bool validModeChange = true;
229 // Check for attempts to enter modes only permitted in
230 // Secure state from Non-secure state. These are Monitor
231 // mode ('10110'), and FIQ mode ('10001') if the Security
232 // Extensions have reserved it.
233 if (!isSecure && newMode == MODE_MON)
234 validModeChange = false;
235 if (!isSecure && newMode == MODE_FIQ && nsacr.rfr == '1')
236 validModeChange = false;
237 // There is no Hyp mode ('11010') in Secure state, so that
238 // is UNPREDICTABLE
239 if (scr.ns == '0' && newMode == MODE_HYP)
240 validModeChange = false;
241 // Cannot move into Hyp mode directly from a Non-secure
242 // PL1 mode
243 if (!isSecure && oldMode != MODE_HYP && newMode == MODE_HYP)
244 validModeChange = false;
245 // Cannot move out of Hyp mode with this function except
246 // on an exception return
247 if (oldMode == MODE_HYP && newMode != MODE_HYP && !affectState)
248 validModeChange = false;
249 // Must not change to 64 bit when running in 32 bit mode
250 if (!opModeIs64(oldMode) && opModeIs64(newMode))
251 validModeChange = false;
252
253 // If we passed all of the above then set the bit mask to
254 // copy the mode accross
255 if (validModeChange) {
256 bitMask = bitMask | mask(5);
257 } else {
258 warn_once("Illegal change to CPSR mode attempted\n");
259 }
260 } else {
261 warn_once("Ignoring write of bad mode to CPSR.\n");
262 }
263 }
264 if (affectState)
265 bitMask = bitMask | (1 << 5);
266 }
267
268 return ((uint32_t)cpsr & ~bitMask) | (val & bitMask);
269 }
270
271 static inline uint32_t
272 spsrWriteByInstr(uint32_t spsr, uint32_t val,
273 uint8_t byteMask, bool affectState)
274 {
275 uint32_t bitMask = 0;
276
277 if (bits(byteMask, 3))
278 bitMask = bitMask | mask(31, 24);
279 if (bits(byteMask, 2))
280 bitMask = bitMask | mask(19, 16);
281 if (bits(byteMask, 1))
282 bitMask = bitMask | mask(15, 8);
283 if (bits(byteMask, 0))
284 bitMask = bitMask | mask(7, 0);
285
286 return ((spsr & ~bitMask) | (val & bitMask));
287 }
288
289 template<class XC>
290 static inline Addr
291 readPC(XC *xc)
292 {
293 return xc->pcState().instPC();
294 }
295
296 template<class XC>
297 static inline void
298 setNextPC(XC *xc, Addr val)
299 {
300 PCState pc = xc->pcState();
301 pc.instNPC(val);
302 xc->pcState(pc);
303 }
304
305 template<class T>
306 static inline T
307 cSwap(T val, bool big)
308 {
309 if (big) {
310 return gtobe(val);
311 } else {
312 return gtole(val);
313 }
314 }
315
316 template<class T, class E>
317 static inline T
318 cSwap(T val, bool big)
319 {
320 const unsigned count = sizeof(T) / sizeof(E);
321 union {
322 T tVal;
323 E eVals[count];
324 } conv;
325 conv.tVal = htog(val);
326 if (big) {
327 for (unsigned i = 0; i < count; i++) {
328 conv.eVals[i] = gtobe(conv.eVals[i]);
329 }
330 } else {
331 for (unsigned i = 0; i < count; i++) {
332 conv.eVals[i] = gtole(conv.eVals[i]);
333 }
334 }
335 return gtoh(conv.tVal);
336 }
337
338 // Perform an interworking branch.
339 template<class XC>
340 static inline void
341 setIWNextPC(XC *xc, Addr val)
342 {
343 PCState pc = xc->pcState();
344 pc.instIWNPC(val);
345 xc->pcState(pc);
346 }
347
348 // Perform an interworking branch in ARM mode, a regular branch
349 // otherwise.
350 template<class XC>
351 static inline void
352 setAIWNextPC(XC *xc, Addr val)
353 {
354 PCState pc = xc->pcState();
355 pc.instAIWNPC(val);
356 xc->pcState(pc);
357 }
358
359 inline Fault
360 disabledFault() const
361 {
362 return std::make_shared<UndefinedInstruction>(machInst, false,
363 mnemonic, true);
364 }
365
366 /**
367 * Trap an access to Advanced SIMD or FP registers due to access
368 * control bits.
369 *
370 * See aarch64/exceptions/traps/AArch64.AdvSIMDFPAccessTrap in the
371 * ARM ARM psueodcode library.
372 *
373 * @param el Target EL for the trap
374 */
375 Fault advSIMDFPAccessTrap64(ExceptionLevel el) const;
376
377
378 /**
379 * Check an Advaned SIMD access against CPTR_EL2 and CPTR_EL3.
380 *
381 * See aarch64/exceptions/traps/AArch64.CheckFPAdvSIMDTrap in the
382 * ARM ARM psueodcode library.
383 */
384 Fault checkFPAdvSIMDTrap64(ThreadContext *tc, CPSR cpsr) const;
385
386 /**
387 * Check an Advaned SIMD access against CPACR_EL1, CPTR_EL2, and
388 * CPTR_EL3.
389 *
390 * See aarch64/exceptions/traps/AArch64.CheckFPAdvSIMDEnabled in the
391 * ARM ARM psueodcode library.
392 */
393 Fault checkFPAdvSIMDEnabled64(ThreadContext *tc,
394 CPSR cpsr, CPACR cpacr) const;
395
396 /**
397 * Check if a VFP/SIMD access from aarch32 should be allowed.
398 *
399 * See aarch32/exceptions/traps/AArch32.CheckAdvSIMDOrFPEnabled in the
400 * ARM ARM psueodcode library.
401 */
402 Fault checkAdvSIMDOrFPEnabled32(ThreadContext *tc,
403 CPSR cpsr, CPACR cpacr,
404 NSACR nsacr, FPEXC fpexc,
405 bool fpexc_check, bool advsimd) const;
406
407 /**
408 * Get the new PSTATE from a SPSR register in preparation for an
409 * exception return.
410 *
411 * See shared/functions/system/SetPSTATEFromPSR in the ARM ARM
412 * psueodcode library.
413 */
414 CPSR getPSTATEFromPSR(ThreadContext *tc, CPSR cpsr, CPSR spsr) const;
415
416 public:
417 virtual void
418 annotateFault(ArmFault *fault) {}
419};
420}
421
422#endif //__ARCH_ARM_INSTS_STATICINST_HH__
162 void printMnemonic(std::ostream &os,
163 const std::string &suffix = "",
164 bool withPred = true,
165 bool withCond64 = false,
166 ConditionCode cond64 = COND_UC) const;
167 void printTarget(std::ostream &os, Addr target,
168 const SymbolTable *symtab) const;
169 void printCondition(std::ostream &os, unsigned code,
170 bool noImplicit=false) const;
171 void printMemSymbol(std::ostream &os, const SymbolTable *symtab,
172 const std::string &prefix, const Addr addr,
173 const std::string &suffix) const;
174 void printShiftOperand(std::ostream &os, IntRegIndex rm,
175 bool immShift, uint32_t shiftAmt,
176 IntRegIndex rs, ArmShiftType type) const;
177 void printExtendOperand(bool firstOperand, std::ostream &os,
178 IntRegIndex rm, ArmExtendType type,
179 int64_t shiftAmt) const;
180
181
182 void printDataInst(std::ostream &os, bool withImm) const;
183 void printDataInst(std::ostream &os, bool withImm, bool immShift, bool s,
184 IntRegIndex rd, IntRegIndex rn, IntRegIndex rm,
185 IntRegIndex rs, uint32_t shiftAmt, ArmShiftType type,
186 uint64_t imm) const;
187
188 void
189 advancePC(PCState &pcState) const
190 {
191 pcState.advance();
192 }
193
194 std::string generateDisassembly(Addr pc, const SymbolTable *symtab) const;
195
196 static inline uint32_t
197 cpsrWriteByInstr(CPSR cpsr, uint32_t val, SCR scr, NSACR nsacr,
198 uint8_t byteMask, bool affectState, bool nmfi, ThreadContext *tc)
199 {
200 bool privileged = (cpsr.mode != MODE_USER);
201 bool haveVirt = ArmSystem::haveVirtualization(tc);
202 bool haveSecurity = ArmSystem::haveSecurity(tc);
203 bool isSecure = inSecureState(scr, cpsr) || !haveSecurity;
204
205 uint32_t bitMask = 0;
206
207 if (bits(byteMask, 3)) {
208 unsigned lowIdx = affectState ? 24 : 27;
209 bitMask = bitMask | mask(31, lowIdx);
210 }
211 if (bits(byteMask, 2)) {
212 bitMask = bitMask | mask(19, 16);
213 }
214 if (bits(byteMask, 1)) {
215 unsigned highIdx = affectState ? 15 : 9;
216 unsigned lowIdx = (privileged && (isSecure || scr.aw || haveVirt))
217 ? 8 : 9;
218 bitMask = bitMask | mask(highIdx, lowIdx);
219 }
220 if (bits(byteMask, 0)) {
221 if (privileged) {
222 bitMask |= 1 << 7;
223 if ( (!nmfi || !((val >> 6) & 0x1)) &&
224 (isSecure || scr.fw || haveVirt) ) {
225 bitMask |= 1 << 6;
226 }
227 // Now check the new mode is allowed
228 OperatingMode newMode = (OperatingMode) (val & mask(5));
229 OperatingMode oldMode = (OperatingMode)(uint32_t)cpsr.mode;
230 if (!badMode(newMode)) {
231 bool validModeChange = true;
232 // Check for attempts to enter modes only permitted in
233 // Secure state from Non-secure state. These are Monitor
234 // mode ('10110'), and FIQ mode ('10001') if the Security
235 // Extensions have reserved it.
236 if (!isSecure && newMode == MODE_MON)
237 validModeChange = false;
238 if (!isSecure && newMode == MODE_FIQ && nsacr.rfr == '1')
239 validModeChange = false;
240 // There is no Hyp mode ('11010') in Secure state, so that
241 // is UNPREDICTABLE
242 if (scr.ns == '0' && newMode == MODE_HYP)
243 validModeChange = false;
244 // Cannot move into Hyp mode directly from a Non-secure
245 // PL1 mode
246 if (!isSecure && oldMode != MODE_HYP && newMode == MODE_HYP)
247 validModeChange = false;
248 // Cannot move out of Hyp mode with this function except
249 // on an exception return
250 if (oldMode == MODE_HYP && newMode != MODE_HYP && !affectState)
251 validModeChange = false;
252 // Must not change to 64 bit when running in 32 bit mode
253 if (!opModeIs64(oldMode) && opModeIs64(newMode))
254 validModeChange = false;
255
256 // If we passed all of the above then set the bit mask to
257 // copy the mode accross
258 if (validModeChange) {
259 bitMask = bitMask | mask(5);
260 } else {
261 warn_once("Illegal change to CPSR mode attempted\n");
262 }
263 } else {
264 warn_once("Ignoring write of bad mode to CPSR.\n");
265 }
266 }
267 if (affectState)
268 bitMask = bitMask | (1 << 5);
269 }
270
271 return ((uint32_t)cpsr & ~bitMask) | (val & bitMask);
272 }
273
274 static inline uint32_t
275 spsrWriteByInstr(uint32_t spsr, uint32_t val,
276 uint8_t byteMask, bool affectState)
277 {
278 uint32_t bitMask = 0;
279
280 if (bits(byteMask, 3))
281 bitMask = bitMask | mask(31, 24);
282 if (bits(byteMask, 2))
283 bitMask = bitMask | mask(19, 16);
284 if (bits(byteMask, 1))
285 bitMask = bitMask | mask(15, 8);
286 if (bits(byteMask, 0))
287 bitMask = bitMask | mask(7, 0);
288
289 return ((spsr & ~bitMask) | (val & bitMask));
290 }
291
292 template<class XC>
293 static inline Addr
294 readPC(XC *xc)
295 {
296 return xc->pcState().instPC();
297 }
298
299 template<class XC>
300 static inline void
301 setNextPC(XC *xc, Addr val)
302 {
303 PCState pc = xc->pcState();
304 pc.instNPC(val);
305 xc->pcState(pc);
306 }
307
308 template<class T>
309 static inline T
310 cSwap(T val, bool big)
311 {
312 if (big) {
313 return gtobe(val);
314 } else {
315 return gtole(val);
316 }
317 }
318
319 template<class T, class E>
320 static inline T
321 cSwap(T val, bool big)
322 {
323 const unsigned count = sizeof(T) / sizeof(E);
324 union {
325 T tVal;
326 E eVals[count];
327 } conv;
328 conv.tVal = htog(val);
329 if (big) {
330 for (unsigned i = 0; i < count; i++) {
331 conv.eVals[i] = gtobe(conv.eVals[i]);
332 }
333 } else {
334 for (unsigned i = 0; i < count; i++) {
335 conv.eVals[i] = gtole(conv.eVals[i]);
336 }
337 }
338 return gtoh(conv.tVal);
339 }
340
341 // Perform an interworking branch.
342 template<class XC>
343 static inline void
344 setIWNextPC(XC *xc, Addr val)
345 {
346 PCState pc = xc->pcState();
347 pc.instIWNPC(val);
348 xc->pcState(pc);
349 }
350
351 // Perform an interworking branch in ARM mode, a regular branch
352 // otherwise.
353 template<class XC>
354 static inline void
355 setAIWNextPC(XC *xc, Addr val)
356 {
357 PCState pc = xc->pcState();
358 pc.instAIWNPC(val);
359 xc->pcState(pc);
360 }
361
362 inline Fault
363 disabledFault() const
364 {
365 return std::make_shared<UndefinedInstruction>(machInst, false,
366 mnemonic, true);
367 }
368
369 /**
370 * Trap an access to Advanced SIMD or FP registers due to access
371 * control bits.
372 *
373 * See aarch64/exceptions/traps/AArch64.AdvSIMDFPAccessTrap in the
374 * ARM ARM psueodcode library.
375 *
376 * @param el Target EL for the trap
377 */
378 Fault advSIMDFPAccessTrap64(ExceptionLevel el) const;
379
380
381 /**
382 * Check an Advaned SIMD access against CPTR_EL2 and CPTR_EL3.
383 *
384 * See aarch64/exceptions/traps/AArch64.CheckFPAdvSIMDTrap in the
385 * ARM ARM psueodcode library.
386 */
387 Fault checkFPAdvSIMDTrap64(ThreadContext *tc, CPSR cpsr) const;
388
389 /**
390 * Check an Advaned SIMD access against CPACR_EL1, CPTR_EL2, and
391 * CPTR_EL3.
392 *
393 * See aarch64/exceptions/traps/AArch64.CheckFPAdvSIMDEnabled in the
394 * ARM ARM psueodcode library.
395 */
396 Fault checkFPAdvSIMDEnabled64(ThreadContext *tc,
397 CPSR cpsr, CPACR cpacr) const;
398
399 /**
400 * Check if a VFP/SIMD access from aarch32 should be allowed.
401 *
402 * See aarch32/exceptions/traps/AArch32.CheckAdvSIMDOrFPEnabled in the
403 * ARM ARM psueodcode library.
404 */
405 Fault checkAdvSIMDOrFPEnabled32(ThreadContext *tc,
406 CPSR cpsr, CPACR cpacr,
407 NSACR nsacr, FPEXC fpexc,
408 bool fpexc_check, bool advsimd) const;
409
410 /**
411 * Get the new PSTATE from a SPSR register in preparation for an
412 * exception return.
413 *
414 * See shared/functions/system/SetPSTATEFromPSR in the ARM ARM
415 * psueodcode library.
416 */
417 CPSR getPSTATEFromPSR(ThreadContext *tc, CPSR cpsr, CPSR spsr) const;
418
419 public:
420 virtual void
421 annotateFault(ArmFault *fault) {}
422};
423}
424
425#endif //__ARCH_ARM_INSTS_STATICINST_HH__