static_inst.cc (14171:58d343fa3194) static_inst.cc (14172:bba55ff08279)
1/*
2 * Copyright (c) 2010-2014, 2016-2019 ARM Limited
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * All rights reserved
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2007-2008 The Florida State University
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Stephen Hines
42 */
43
44#include "arch/arm/insts/static_inst.hh"
45
46#include "arch/arm/faults.hh"
47#include "arch/arm/isa.hh"
48#include "base/condcodes.hh"
49#include "base/cprintf.hh"
50#include "base/loader/symtab.hh"
51#include "cpu/reg_class.hh"
52
53namespace ArmISA
54{
55// Shift Rm by an immediate value
56int32_t
57ArmStaticInst::shift_rm_imm(uint32_t base, uint32_t shamt,
58 uint32_t type, uint32_t cfval) const
59{
60 assert(shamt < 32);
61 ArmShiftType shiftType;
62 shiftType = (ArmShiftType)type;
63
64 switch (shiftType)
65 {
66 case LSL:
67 return base << shamt;
68 case LSR:
69 if (shamt == 0)
70 return 0;
71 else
72 return base >> shamt;
73 case ASR:
74 if (shamt == 0)
75 return (base >> 31) | -((base & (1 << 31)) >> 31);
76 else
77 return (base >> shamt) | -((base & (1 << 31)) >> shamt);
78 case ROR:
79 if (shamt == 0)
80 return (cfval << 31) | (base >> 1); // RRX
81 else
82 return (base << (32 - shamt)) | (base >> shamt);
83 default:
84 ccprintf(std::cerr, "Unhandled shift type\n");
85 exit(1);
86 break;
87 }
88 return 0;
89}
90
91int64_t
92ArmStaticInst::shiftReg64(uint64_t base, uint64_t shiftAmt,
93 ArmShiftType type, uint8_t width) const
94{
95 shiftAmt = shiftAmt % width;
96 ArmShiftType shiftType;
97 shiftType = (ArmShiftType)type;
98
99 switch (shiftType)
100 {
101 case LSL:
102 return base << shiftAmt;
103 case LSR:
104 if (shiftAmt == 0)
105 return base;
106 else
107 return (base & mask(width)) >> shiftAmt;
108 case ASR:
109 if (shiftAmt == 0) {
110 return base;
111 } else {
112 int sign_bit = bits(base, intWidth - 1);
113 base >>= shiftAmt;
114 base = sign_bit ? (base | ~mask(intWidth - shiftAmt)) : base;
115 return base & mask(intWidth);
116 }
117 case ROR:
118 if (shiftAmt == 0)
119 return base;
120 else
121 return (base << (width - shiftAmt)) | (base >> shiftAmt);
122 default:
123 ccprintf(std::cerr, "Unhandled shift type\n");
124 exit(1);
125 break;
126 }
127 return 0;
128}
129
130int64_t
131ArmStaticInst::extendReg64(uint64_t base, ArmExtendType type,
132 uint64_t shiftAmt, uint8_t width) const
133{
134 bool sign_extend = false;
135 int len = 0;
136 switch (type) {
137 case UXTB:
138 len = 8;
139 break;
140 case UXTH:
141 len = 16;
142 break;
143 case UXTW:
144 len = 32;
145 break;
146 case UXTX:
147 len = 64;
148 break;
149 case SXTB:
150 len = 8;
151 sign_extend = true;
152 break;
153 case SXTH:
154 len = 16;
155 sign_extend = true;
156 break;
157 case SXTW:
158 len = 32;
159 sign_extend = true;
160 break;
161 case SXTX:
162 len = 64;
163 sign_extend = true;
164 break;
165 }
166 len = len <= width - shiftAmt ? len : width - shiftAmt;
167 uint64_t tmp = (uint64_t) bits(base, len - 1, 0) << shiftAmt;
168 if (sign_extend) {
169 int sign_bit = bits(tmp, len + shiftAmt - 1);
170 tmp = sign_bit ? (tmp | ~mask(len + shiftAmt)) : tmp;
171 }
172 return tmp & mask(width);
173}
174
175// Shift Rm by Rs
176int32_t
177ArmStaticInst::shift_rm_rs(uint32_t base, uint32_t shamt,
178 uint32_t type, uint32_t cfval) const
179{
180 enum ArmShiftType shiftType;
181 shiftType = (enum ArmShiftType) type;
182
183 switch (shiftType)
184 {
185 case LSL:
186 if (shamt >= 32)
187 return 0;
188 else
189 return base << shamt;
190 case LSR:
191 if (shamt >= 32)
192 return 0;
193 else
194 return base >> shamt;
195 case ASR:
196 if (shamt >= 32)
197 return (base >> 31) | -((base & (1 << 31)) >> 31);
198 else
199 return (base >> shamt) | -((base & (1 << 31)) >> shamt);
200 case ROR:
201 shamt = shamt & 0x1f;
202 if (shamt == 0)
203 return base;
204 else
205 return (base << (32 - shamt)) | (base >> shamt);
206 default:
207 ccprintf(std::cerr, "Unhandled shift type\n");
208 exit(1);
209 break;
210 }
211 return 0;
212}
213
214
215// Generate C for a shift by immediate
216bool
217ArmStaticInst::shift_carry_imm(uint32_t base, uint32_t shamt,
218 uint32_t type, uint32_t cfval) const
219{
220 enum ArmShiftType shiftType;
221 shiftType = (enum ArmShiftType) type;
222
223 switch (shiftType)
224 {
225 case LSL:
226 if (shamt == 0)
227 return cfval;
228 else
229 return (base >> (32 - shamt)) & 1;
230 case LSR:
231 if (shamt == 0)
232 return (base >> 31);
233 else
234 return (base >> (shamt - 1)) & 1;
235 case ASR:
236 if (shamt == 0)
237 return (base >> 31);
238 else
239 return (base >> (shamt - 1)) & 1;
240 case ROR:
241 shamt = shamt & 0x1f;
242 if (shamt == 0)
243 return (base & 1); // RRX
244 else
245 return (base >> (shamt - 1)) & 1;
246 default:
247 ccprintf(std::cerr, "Unhandled shift type\n");
248 exit(1);
249 break;
250 }
251 return 0;
252}
253
254
255// Generate C for a shift by Rs
256bool
257ArmStaticInst::shift_carry_rs(uint32_t base, uint32_t shamt,
258 uint32_t type, uint32_t cfval) const
259{
260 enum ArmShiftType shiftType;
261 shiftType = (enum ArmShiftType) type;
262
263 if (shamt == 0)
264 return cfval;
265
266 switch (shiftType)
267 {
268 case LSL:
269 if (shamt > 32)
270 return 0;
271 else
272 return (base >> (32 - shamt)) & 1;
273 case LSR:
274 if (shamt > 32)
275 return 0;
276 else
277 return (base >> (shamt - 1)) & 1;
278 case ASR:
279 if (shamt > 32)
280 shamt = 32;
281 return (base >> (shamt - 1)) & 1;
282 case ROR:
283 shamt = shamt & 0x1f;
284 if (shamt == 0)
285 shamt = 32;
286 return (base >> (shamt - 1)) & 1;
287 default:
288 ccprintf(std::cerr, "Unhandled shift type\n");
289 exit(1);
290 break;
291 }
292 return 0;
293}
294
295void
296ArmStaticInst::printIntReg(std::ostream &os, RegIndex reg_idx,
297 uint8_t opWidth) const
298{
299 if (opWidth == 0)
300 opWidth = intWidth;
301 if (aarch64) {
302 if (reg_idx == INTREG_UREG0)
303 ccprintf(os, "ureg0");
304 else if (reg_idx == INTREG_SPX)
305 ccprintf(os, "%s%s", (opWidth == 32) ? "w" : "", "sp");
306 else if (reg_idx == INTREG_X31)
307 ccprintf(os, "%szr", (opWidth == 32) ? "w" : "x");
308 else
309 ccprintf(os, "%s%d", (opWidth == 32) ? "w" : "x", reg_idx);
310 } else {
311 switch (reg_idx) {
312 case PCReg:
313 ccprintf(os, "pc");
314 break;
315 case StackPointerReg:
316 ccprintf(os, "sp");
317 break;
318 case FramePointerReg:
319 ccprintf(os, "fp");
320 break;
321 case ReturnAddressReg:
322 ccprintf(os, "lr");
323 break;
324 default:
325 ccprintf(os, "r%d", reg_idx);
326 break;
327 }
328 }
329}
330
331void ArmStaticInst::printPFflags(std::ostream &os, int flag) const
332{
333 const char *flagtoprfop[]= { "PLD", "PLI", "PST", "Reserved"};
334 const char *flagtotarget[] = { "L1", "L2", "L3", "Reserved"};
335 const char *flagtopolicy[] = { "KEEP", "STRM"};
336
337 ccprintf(os, "%s%s%s", flagtoprfop[(flag>>3)&3],
338 flagtotarget[(flag>>1)&3], flagtopolicy[flag&1]);
339}
340
341void
342ArmStaticInst::printFloatReg(std::ostream &os, RegIndex reg_idx) const
343{
344 ccprintf(os, "f%d", reg_idx);
345}
346
347void
348ArmStaticInst::printVecReg(std::ostream &os, RegIndex reg_idx,
349 bool isSveVecReg) const
350{
351 ccprintf(os, "%s%d", isSveVecReg ? "z" : "v", reg_idx);
352}
353
354void
355ArmStaticInst::printVecPredReg(std::ostream &os, RegIndex reg_idx) const
356{
357 ccprintf(os, "p%d", reg_idx);
358}
359
360void
361ArmStaticInst::printCCReg(std::ostream &os, RegIndex reg_idx) const
362{
363 ccprintf(os, "cc_%s", ArmISA::ccRegName[reg_idx]);
364}
365
366void
367ArmStaticInst::printMiscReg(std::ostream &os, RegIndex reg_idx) const
368{
369 assert(reg_idx < NUM_MISCREGS);
370 ccprintf(os, "%s", ArmISA::miscRegName[reg_idx]);
371}
372
373void
374ArmStaticInst::printMnemonic(std::ostream &os,
375 const std::string &suffix,
376 bool withPred,
377 bool withCond64,
378 ConditionCode cond64) const
379{
380 os << " " << mnemonic;
381 if (withPred && !aarch64) {
382 printCondition(os, machInst.condCode);
383 os << suffix;
384 } else if (withCond64) {
385 os << ".";
386 printCondition(os, cond64);
387 os << suffix;
388 }
389 if (machInst.bigThumb)
390 os << ".w";
391 os << " ";
392}
393
394void
395ArmStaticInst::printTarget(std::ostream &os, Addr target,
396 const SymbolTable *symtab) const
397{
398 Addr symbolAddr;
399 std::string symbol;
400
401 if (symtab && symtab->findNearestSymbol(target, symbol, symbolAddr)) {
402 ccprintf(os, "<%s", symbol);
403 if (symbolAddr != target)
404 ccprintf(os, "+%d>", target - symbolAddr);
405 else
406 ccprintf(os, ">");
407 } else {
408 ccprintf(os, "%#x", target);
409 }
410}
411
412void
413ArmStaticInst::printCondition(std::ostream &os,
414 unsigned code,
415 bool noImplicit) const
416{
417 switch (code) {
418 case COND_EQ:
419 os << "eq";
420 break;
421 case COND_NE:
422 os << "ne";
423 break;
424 case COND_CS:
425 os << "cs";
426 break;
427 case COND_CC:
428 os << "cc";
429 break;
430 case COND_MI:
431 os << "mi";
432 break;
433 case COND_PL:
434 os << "pl";
435 break;
436 case COND_VS:
437 os << "vs";
438 break;
439 case COND_VC:
440 os << "vc";
441 break;
442 case COND_HI:
443 os << "hi";
444 break;
445 case COND_LS:
446 os << "ls";
447 break;
448 case COND_GE:
449 os << "ge";
450 break;
451 case COND_LT:
452 os << "lt";
453 break;
454 case COND_GT:
455 os << "gt";
456 break;
457 case COND_LE:
458 os << "le";
459 break;
460 case COND_AL:
461 // This one is implicit.
462 if (noImplicit)
463 os << "al";
464 break;
465 case COND_UC:
466 // Unconditional.
467 if (noImplicit)
468 os << "uc";
469 break;
470 default:
471 panic("Unrecognized condition code %d.\n", code);
472 }
473}
474
475void
476ArmStaticInst::printMemSymbol(std::ostream &os,
477 const SymbolTable *symtab,
478 const std::string &prefix,
479 const Addr addr,
480 const std::string &suffix) const
481{
482 Addr symbolAddr;
483 std::string symbol;
484 if (symtab && symtab->findNearestSymbol(addr, symbol, symbolAddr)) {
485 ccprintf(os, "%s%s", prefix, symbol);
486 if (symbolAddr != addr)
487 ccprintf(os, "+%d", addr - symbolAddr);
488 ccprintf(os, suffix);
489 }
490}
491
492void
493ArmStaticInst::printShiftOperand(std::ostream &os,
494 IntRegIndex rm,
495 bool immShift,
496 uint32_t shiftAmt,
497 IntRegIndex rs,
498 ArmShiftType type) const
499{
500 bool firstOp = false;
501
502 if (rm != INTREG_ZERO) {
503 printIntReg(os, rm);
504 }
505
506 bool done = false;
507
508 if ((type == LSR || type == ASR) && immShift && shiftAmt == 0)
509 shiftAmt = 32;
510
511 switch (type) {
512 case LSL:
513 if (immShift && shiftAmt == 0) {
514 done = true;
515 break;
516 }
517 if (!firstOp)
518 os << ", ";
519 os << "LSL";
520 break;
521 case LSR:
522 if (!firstOp)
523 os << ", ";
524 os << "LSR";
525 break;
526 case ASR:
527 if (!firstOp)
528 os << ", ";
529 os << "ASR";
530 break;
531 case ROR:
532 if (immShift && shiftAmt == 0) {
533 if (!firstOp)
534 os << ", ";
535 os << "RRX";
536 done = true;
537 break;
538 }
539 if (!firstOp)
540 os << ", ";
541 os << "ROR";
542 break;
543 default:
544 panic("Tried to disassemble unrecognized shift type.\n");
545 }
546 if (!done) {
547 if (!firstOp)
548 os << " ";
549 if (immShift)
550 os << "#" << shiftAmt;
551 else
552 printIntReg(os, rs);
553 }
554}
555
556void
557ArmStaticInst::printExtendOperand(bool firstOperand, std::ostream &os,
558 IntRegIndex rm, ArmExtendType type,
559 int64_t shiftAmt) const
560{
561 if (!firstOperand)
562 ccprintf(os, ", ");
563 printIntReg(os, rm);
564 if (type == UXTX && shiftAmt == 0)
565 return;
566 switch (type) {
567 case UXTB: ccprintf(os, ", UXTB");
568 break;
569 case UXTH: ccprintf(os, ", UXTH");
570 break;
571 case UXTW: ccprintf(os, ", UXTW");
572 break;
573 case UXTX: ccprintf(os, ", LSL");
574 break;
575 case SXTB: ccprintf(os, ", SXTB");
576 break;
577 case SXTH: ccprintf(os, ", SXTH");
578 break;
579 case SXTW: ccprintf(os, ", SXTW");
580 break;
581 case SXTX: ccprintf(os, ", SXTW");
582 break;
583 }
584 if (type == UXTX || shiftAmt)
585 ccprintf(os, " #%d", shiftAmt);
586}
587
588void
589ArmStaticInst::printDataInst(std::ostream &os, bool withImm,
590 bool immShift, bool s, IntRegIndex rd, IntRegIndex rn,
591 IntRegIndex rm, IntRegIndex rs, uint32_t shiftAmt,
592 ArmShiftType type, uint64_t imm) const
593{
594 printMnemonic(os, s ? "s" : "");
595 bool firstOp = true;
596
597 // Destination
598 if (rd != INTREG_ZERO) {
599 firstOp = false;
600 printIntReg(os, rd);
601 }
602
603 // Source 1.
604 if (rn != INTREG_ZERO) {
605 if (!firstOp)
606 os << ", ";
607 firstOp = false;
608 printIntReg(os, rn);
609 }
610
611 if (!firstOp)
612 os << ", ";
613 if (withImm) {
614 ccprintf(os, "#%ld", imm);
615 } else {
616 printShiftOperand(os, rm, immShift, shiftAmt, rs, type);
617 }
618}
619
620std::string
621ArmStaticInst::generateDisassembly(Addr pc,
622 const SymbolTable *symtab) const
623{
624 std::stringstream ss;
625 printMnemonic(ss);
626 return ss.str();
627}
628
629Fault
630ArmStaticInst::softwareBreakpoint32(ExecContext *xc, uint16_t imm) const
631{
632 const auto tc = xc->tcBase();
633 const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2);
634 const HDCR mdcr = tc->readMiscRegNoEffect(MISCREG_MDCR_EL2);
635 if ((ArmSystem::haveEL(tc, EL2) && !inSecureState(tc) &&
636 !ELIs32(tc, EL2) && (hcr.tge == 1 || mdcr.tde == 1)) ||
637 !ELIs32(tc, EL1)) {
638 // Route to AArch64 Software Breakpoint
639 return std::make_shared<SoftwareBreakpoint>(machInst, imm);
640 } else {
641 // Execute AArch32 Software Breakpoint
642 return std::make_shared<PrefetchAbort>(readPC(xc),
643 ArmFault::DebugEvent);
644 }
645}
646
647Fault
648ArmStaticInst::advSIMDFPAccessTrap64(ExceptionLevel el) const
649{
650 switch (el) {
651 case EL1:
652 return std::make_shared<SupervisorTrap>(machInst, 0x1E00000,
653 EC_TRAPPED_SIMD_FP);
654 case EL2:
655 return std::make_shared<HypervisorTrap>(machInst, 0x1E00000,
656 EC_TRAPPED_SIMD_FP);
657 case EL3:
658 return std::make_shared<SecureMonitorTrap>(machInst, 0x1E00000,
659 EC_TRAPPED_SIMD_FP);
660
661 default:
662 panic("Illegal EL in advSIMDFPAccessTrap64\n");
663 }
664}
665
666
667Fault
668ArmStaticInst::checkFPAdvSIMDTrap64(ThreadContext *tc, CPSR cpsr) const
669{
670 if (ArmSystem::haveVirtualization(tc) && !inSecureState(tc)) {
671 HCPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL2);
672 if (cptrEnCheck.tfp)
673 return advSIMDFPAccessTrap64(EL2);
674 }
675
676 if (ArmSystem::haveSecurity(tc)) {
677 HCPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL3);
678 if (cptrEnCheck.tfp)
679 return advSIMDFPAccessTrap64(EL3);
680 }
681
682 return NoFault;
683}
684
685Fault
686ArmStaticInst::checkFPAdvSIMDEnabled64(ThreadContext *tc,
687 CPSR cpsr, CPACR cpacr) const
688{
689 const ExceptionLevel el = currEL(tc);
690 if ((el == EL0 && cpacr.fpen != 0x3) ||
691 (el == EL1 && !(cpacr.fpen & 0x1)))
692 return advSIMDFPAccessTrap64(EL1);
693
694 return checkFPAdvSIMDTrap64(tc, cpsr);
695}
696
697Fault
698ArmStaticInst::checkAdvSIMDOrFPEnabled32(ThreadContext *tc,
699 CPSR cpsr, CPACR cpacr,
700 NSACR nsacr, FPEXC fpexc,
701 bool fpexc_check, bool advsimd) const
702{
703 const bool have_virtualization = ArmSystem::haveVirtualization(tc);
704 const bool have_security = ArmSystem::haveSecurity(tc);
705 const bool is_secure = inSecureState(tc);
1/*
2 * Copyright (c) 2010-2014, 2016-2019 ARM Limited
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * All rights reserved
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2007-2008 The Florida State University
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Stephen Hines
42 */
43
44#include "arch/arm/insts/static_inst.hh"
45
46#include "arch/arm/faults.hh"
47#include "arch/arm/isa.hh"
48#include "base/condcodes.hh"
49#include "base/cprintf.hh"
50#include "base/loader/symtab.hh"
51#include "cpu/reg_class.hh"
52
53namespace ArmISA
54{
55// Shift Rm by an immediate value
56int32_t
57ArmStaticInst::shift_rm_imm(uint32_t base, uint32_t shamt,
58 uint32_t type, uint32_t cfval) const
59{
60 assert(shamt < 32);
61 ArmShiftType shiftType;
62 shiftType = (ArmShiftType)type;
63
64 switch (shiftType)
65 {
66 case LSL:
67 return base << shamt;
68 case LSR:
69 if (shamt == 0)
70 return 0;
71 else
72 return base >> shamt;
73 case ASR:
74 if (shamt == 0)
75 return (base >> 31) | -((base & (1 << 31)) >> 31);
76 else
77 return (base >> shamt) | -((base & (1 << 31)) >> shamt);
78 case ROR:
79 if (shamt == 0)
80 return (cfval << 31) | (base >> 1); // RRX
81 else
82 return (base << (32 - shamt)) | (base >> shamt);
83 default:
84 ccprintf(std::cerr, "Unhandled shift type\n");
85 exit(1);
86 break;
87 }
88 return 0;
89}
90
91int64_t
92ArmStaticInst::shiftReg64(uint64_t base, uint64_t shiftAmt,
93 ArmShiftType type, uint8_t width) const
94{
95 shiftAmt = shiftAmt % width;
96 ArmShiftType shiftType;
97 shiftType = (ArmShiftType)type;
98
99 switch (shiftType)
100 {
101 case LSL:
102 return base << shiftAmt;
103 case LSR:
104 if (shiftAmt == 0)
105 return base;
106 else
107 return (base & mask(width)) >> shiftAmt;
108 case ASR:
109 if (shiftAmt == 0) {
110 return base;
111 } else {
112 int sign_bit = bits(base, intWidth - 1);
113 base >>= shiftAmt;
114 base = sign_bit ? (base | ~mask(intWidth - shiftAmt)) : base;
115 return base & mask(intWidth);
116 }
117 case ROR:
118 if (shiftAmt == 0)
119 return base;
120 else
121 return (base << (width - shiftAmt)) | (base >> shiftAmt);
122 default:
123 ccprintf(std::cerr, "Unhandled shift type\n");
124 exit(1);
125 break;
126 }
127 return 0;
128}
129
130int64_t
131ArmStaticInst::extendReg64(uint64_t base, ArmExtendType type,
132 uint64_t shiftAmt, uint8_t width) const
133{
134 bool sign_extend = false;
135 int len = 0;
136 switch (type) {
137 case UXTB:
138 len = 8;
139 break;
140 case UXTH:
141 len = 16;
142 break;
143 case UXTW:
144 len = 32;
145 break;
146 case UXTX:
147 len = 64;
148 break;
149 case SXTB:
150 len = 8;
151 sign_extend = true;
152 break;
153 case SXTH:
154 len = 16;
155 sign_extend = true;
156 break;
157 case SXTW:
158 len = 32;
159 sign_extend = true;
160 break;
161 case SXTX:
162 len = 64;
163 sign_extend = true;
164 break;
165 }
166 len = len <= width - shiftAmt ? len : width - shiftAmt;
167 uint64_t tmp = (uint64_t) bits(base, len - 1, 0) << shiftAmt;
168 if (sign_extend) {
169 int sign_bit = bits(tmp, len + shiftAmt - 1);
170 tmp = sign_bit ? (tmp | ~mask(len + shiftAmt)) : tmp;
171 }
172 return tmp & mask(width);
173}
174
175// Shift Rm by Rs
176int32_t
177ArmStaticInst::shift_rm_rs(uint32_t base, uint32_t shamt,
178 uint32_t type, uint32_t cfval) const
179{
180 enum ArmShiftType shiftType;
181 shiftType = (enum ArmShiftType) type;
182
183 switch (shiftType)
184 {
185 case LSL:
186 if (shamt >= 32)
187 return 0;
188 else
189 return base << shamt;
190 case LSR:
191 if (shamt >= 32)
192 return 0;
193 else
194 return base >> shamt;
195 case ASR:
196 if (shamt >= 32)
197 return (base >> 31) | -((base & (1 << 31)) >> 31);
198 else
199 return (base >> shamt) | -((base & (1 << 31)) >> shamt);
200 case ROR:
201 shamt = shamt & 0x1f;
202 if (shamt == 0)
203 return base;
204 else
205 return (base << (32 - shamt)) | (base >> shamt);
206 default:
207 ccprintf(std::cerr, "Unhandled shift type\n");
208 exit(1);
209 break;
210 }
211 return 0;
212}
213
214
215// Generate C for a shift by immediate
216bool
217ArmStaticInst::shift_carry_imm(uint32_t base, uint32_t shamt,
218 uint32_t type, uint32_t cfval) const
219{
220 enum ArmShiftType shiftType;
221 shiftType = (enum ArmShiftType) type;
222
223 switch (shiftType)
224 {
225 case LSL:
226 if (shamt == 0)
227 return cfval;
228 else
229 return (base >> (32 - shamt)) & 1;
230 case LSR:
231 if (shamt == 0)
232 return (base >> 31);
233 else
234 return (base >> (shamt - 1)) & 1;
235 case ASR:
236 if (shamt == 0)
237 return (base >> 31);
238 else
239 return (base >> (shamt - 1)) & 1;
240 case ROR:
241 shamt = shamt & 0x1f;
242 if (shamt == 0)
243 return (base & 1); // RRX
244 else
245 return (base >> (shamt - 1)) & 1;
246 default:
247 ccprintf(std::cerr, "Unhandled shift type\n");
248 exit(1);
249 break;
250 }
251 return 0;
252}
253
254
255// Generate C for a shift by Rs
256bool
257ArmStaticInst::shift_carry_rs(uint32_t base, uint32_t shamt,
258 uint32_t type, uint32_t cfval) const
259{
260 enum ArmShiftType shiftType;
261 shiftType = (enum ArmShiftType) type;
262
263 if (shamt == 0)
264 return cfval;
265
266 switch (shiftType)
267 {
268 case LSL:
269 if (shamt > 32)
270 return 0;
271 else
272 return (base >> (32 - shamt)) & 1;
273 case LSR:
274 if (shamt > 32)
275 return 0;
276 else
277 return (base >> (shamt - 1)) & 1;
278 case ASR:
279 if (shamt > 32)
280 shamt = 32;
281 return (base >> (shamt - 1)) & 1;
282 case ROR:
283 shamt = shamt & 0x1f;
284 if (shamt == 0)
285 shamt = 32;
286 return (base >> (shamt - 1)) & 1;
287 default:
288 ccprintf(std::cerr, "Unhandled shift type\n");
289 exit(1);
290 break;
291 }
292 return 0;
293}
294
295void
296ArmStaticInst::printIntReg(std::ostream &os, RegIndex reg_idx,
297 uint8_t opWidth) const
298{
299 if (opWidth == 0)
300 opWidth = intWidth;
301 if (aarch64) {
302 if (reg_idx == INTREG_UREG0)
303 ccprintf(os, "ureg0");
304 else if (reg_idx == INTREG_SPX)
305 ccprintf(os, "%s%s", (opWidth == 32) ? "w" : "", "sp");
306 else if (reg_idx == INTREG_X31)
307 ccprintf(os, "%szr", (opWidth == 32) ? "w" : "x");
308 else
309 ccprintf(os, "%s%d", (opWidth == 32) ? "w" : "x", reg_idx);
310 } else {
311 switch (reg_idx) {
312 case PCReg:
313 ccprintf(os, "pc");
314 break;
315 case StackPointerReg:
316 ccprintf(os, "sp");
317 break;
318 case FramePointerReg:
319 ccprintf(os, "fp");
320 break;
321 case ReturnAddressReg:
322 ccprintf(os, "lr");
323 break;
324 default:
325 ccprintf(os, "r%d", reg_idx);
326 break;
327 }
328 }
329}
330
331void ArmStaticInst::printPFflags(std::ostream &os, int flag) const
332{
333 const char *flagtoprfop[]= { "PLD", "PLI", "PST", "Reserved"};
334 const char *flagtotarget[] = { "L1", "L2", "L3", "Reserved"};
335 const char *flagtopolicy[] = { "KEEP", "STRM"};
336
337 ccprintf(os, "%s%s%s", flagtoprfop[(flag>>3)&3],
338 flagtotarget[(flag>>1)&3], flagtopolicy[flag&1]);
339}
340
341void
342ArmStaticInst::printFloatReg(std::ostream &os, RegIndex reg_idx) const
343{
344 ccprintf(os, "f%d", reg_idx);
345}
346
347void
348ArmStaticInst::printVecReg(std::ostream &os, RegIndex reg_idx,
349 bool isSveVecReg) const
350{
351 ccprintf(os, "%s%d", isSveVecReg ? "z" : "v", reg_idx);
352}
353
354void
355ArmStaticInst::printVecPredReg(std::ostream &os, RegIndex reg_idx) const
356{
357 ccprintf(os, "p%d", reg_idx);
358}
359
360void
361ArmStaticInst::printCCReg(std::ostream &os, RegIndex reg_idx) const
362{
363 ccprintf(os, "cc_%s", ArmISA::ccRegName[reg_idx]);
364}
365
366void
367ArmStaticInst::printMiscReg(std::ostream &os, RegIndex reg_idx) const
368{
369 assert(reg_idx < NUM_MISCREGS);
370 ccprintf(os, "%s", ArmISA::miscRegName[reg_idx]);
371}
372
373void
374ArmStaticInst::printMnemonic(std::ostream &os,
375 const std::string &suffix,
376 bool withPred,
377 bool withCond64,
378 ConditionCode cond64) const
379{
380 os << " " << mnemonic;
381 if (withPred && !aarch64) {
382 printCondition(os, machInst.condCode);
383 os << suffix;
384 } else if (withCond64) {
385 os << ".";
386 printCondition(os, cond64);
387 os << suffix;
388 }
389 if (machInst.bigThumb)
390 os << ".w";
391 os << " ";
392}
393
394void
395ArmStaticInst::printTarget(std::ostream &os, Addr target,
396 const SymbolTable *symtab) const
397{
398 Addr symbolAddr;
399 std::string symbol;
400
401 if (symtab && symtab->findNearestSymbol(target, symbol, symbolAddr)) {
402 ccprintf(os, "<%s", symbol);
403 if (symbolAddr != target)
404 ccprintf(os, "+%d>", target - symbolAddr);
405 else
406 ccprintf(os, ">");
407 } else {
408 ccprintf(os, "%#x", target);
409 }
410}
411
412void
413ArmStaticInst::printCondition(std::ostream &os,
414 unsigned code,
415 bool noImplicit) const
416{
417 switch (code) {
418 case COND_EQ:
419 os << "eq";
420 break;
421 case COND_NE:
422 os << "ne";
423 break;
424 case COND_CS:
425 os << "cs";
426 break;
427 case COND_CC:
428 os << "cc";
429 break;
430 case COND_MI:
431 os << "mi";
432 break;
433 case COND_PL:
434 os << "pl";
435 break;
436 case COND_VS:
437 os << "vs";
438 break;
439 case COND_VC:
440 os << "vc";
441 break;
442 case COND_HI:
443 os << "hi";
444 break;
445 case COND_LS:
446 os << "ls";
447 break;
448 case COND_GE:
449 os << "ge";
450 break;
451 case COND_LT:
452 os << "lt";
453 break;
454 case COND_GT:
455 os << "gt";
456 break;
457 case COND_LE:
458 os << "le";
459 break;
460 case COND_AL:
461 // This one is implicit.
462 if (noImplicit)
463 os << "al";
464 break;
465 case COND_UC:
466 // Unconditional.
467 if (noImplicit)
468 os << "uc";
469 break;
470 default:
471 panic("Unrecognized condition code %d.\n", code);
472 }
473}
474
475void
476ArmStaticInst::printMemSymbol(std::ostream &os,
477 const SymbolTable *symtab,
478 const std::string &prefix,
479 const Addr addr,
480 const std::string &suffix) const
481{
482 Addr symbolAddr;
483 std::string symbol;
484 if (symtab && symtab->findNearestSymbol(addr, symbol, symbolAddr)) {
485 ccprintf(os, "%s%s", prefix, symbol);
486 if (symbolAddr != addr)
487 ccprintf(os, "+%d", addr - symbolAddr);
488 ccprintf(os, suffix);
489 }
490}
491
492void
493ArmStaticInst::printShiftOperand(std::ostream &os,
494 IntRegIndex rm,
495 bool immShift,
496 uint32_t shiftAmt,
497 IntRegIndex rs,
498 ArmShiftType type) const
499{
500 bool firstOp = false;
501
502 if (rm != INTREG_ZERO) {
503 printIntReg(os, rm);
504 }
505
506 bool done = false;
507
508 if ((type == LSR || type == ASR) && immShift && shiftAmt == 0)
509 shiftAmt = 32;
510
511 switch (type) {
512 case LSL:
513 if (immShift && shiftAmt == 0) {
514 done = true;
515 break;
516 }
517 if (!firstOp)
518 os << ", ";
519 os << "LSL";
520 break;
521 case LSR:
522 if (!firstOp)
523 os << ", ";
524 os << "LSR";
525 break;
526 case ASR:
527 if (!firstOp)
528 os << ", ";
529 os << "ASR";
530 break;
531 case ROR:
532 if (immShift && shiftAmt == 0) {
533 if (!firstOp)
534 os << ", ";
535 os << "RRX";
536 done = true;
537 break;
538 }
539 if (!firstOp)
540 os << ", ";
541 os << "ROR";
542 break;
543 default:
544 panic("Tried to disassemble unrecognized shift type.\n");
545 }
546 if (!done) {
547 if (!firstOp)
548 os << " ";
549 if (immShift)
550 os << "#" << shiftAmt;
551 else
552 printIntReg(os, rs);
553 }
554}
555
556void
557ArmStaticInst::printExtendOperand(bool firstOperand, std::ostream &os,
558 IntRegIndex rm, ArmExtendType type,
559 int64_t shiftAmt) const
560{
561 if (!firstOperand)
562 ccprintf(os, ", ");
563 printIntReg(os, rm);
564 if (type == UXTX && shiftAmt == 0)
565 return;
566 switch (type) {
567 case UXTB: ccprintf(os, ", UXTB");
568 break;
569 case UXTH: ccprintf(os, ", UXTH");
570 break;
571 case UXTW: ccprintf(os, ", UXTW");
572 break;
573 case UXTX: ccprintf(os, ", LSL");
574 break;
575 case SXTB: ccprintf(os, ", SXTB");
576 break;
577 case SXTH: ccprintf(os, ", SXTH");
578 break;
579 case SXTW: ccprintf(os, ", SXTW");
580 break;
581 case SXTX: ccprintf(os, ", SXTW");
582 break;
583 }
584 if (type == UXTX || shiftAmt)
585 ccprintf(os, " #%d", shiftAmt);
586}
587
588void
589ArmStaticInst::printDataInst(std::ostream &os, bool withImm,
590 bool immShift, bool s, IntRegIndex rd, IntRegIndex rn,
591 IntRegIndex rm, IntRegIndex rs, uint32_t shiftAmt,
592 ArmShiftType type, uint64_t imm) const
593{
594 printMnemonic(os, s ? "s" : "");
595 bool firstOp = true;
596
597 // Destination
598 if (rd != INTREG_ZERO) {
599 firstOp = false;
600 printIntReg(os, rd);
601 }
602
603 // Source 1.
604 if (rn != INTREG_ZERO) {
605 if (!firstOp)
606 os << ", ";
607 firstOp = false;
608 printIntReg(os, rn);
609 }
610
611 if (!firstOp)
612 os << ", ";
613 if (withImm) {
614 ccprintf(os, "#%ld", imm);
615 } else {
616 printShiftOperand(os, rm, immShift, shiftAmt, rs, type);
617 }
618}
619
620std::string
621ArmStaticInst::generateDisassembly(Addr pc,
622 const SymbolTable *symtab) const
623{
624 std::stringstream ss;
625 printMnemonic(ss);
626 return ss.str();
627}
628
629Fault
630ArmStaticInst::softwareBreakpoint32(ExecContext *xc, uint16_t imm) const
631{
632 const auto tc = xc->tcBase();
633 const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2);
634 const HDCR mdcr = tc->readMiscRegNoEffect(MISCREG_MDCR_EL2);
635 if ((ArmSystem::haveEL(tc, EL2) && !inSecureState(tc) &&
636 !ELIs32(tc, EL2) && (hcr.tge == 1 || mdcr.tde == 1)) ||
637 !ELIs32(tc, EL1)) {
638 // Route to AArch64 Software Breakpoint
639 return std::make_shared<SoftwareBreakpoint>(machInst, imm);
640 } else {
641 // Execute AArch32 Software Breakpoint
642 return std::make_shared<PrefetchAbort>(readPC(xc),
643 ArmFault::DebugEvent);
644 }
645}
646
647Fault
648ArmStaticInst::advSIMDFPAccessTrap64(ExceptionLevel el) const
649{
650 switch (el) {
651 case EL1:
652 return std::make_shared<SupervisorTrap>(machInst, 0x1E00000,
653 EC_TRAPPED_SIMD_FP);
654 case EL2:
655 return std::make_shared<HypervisorTrap>(machInst, 0x1E00000,
656 EC_TRAPPED_SIMD_FP);
657 case EL3:
658 return std::make_shared<SecureMonitorTrap>(machInst, 0x1E00000,
659 EC_TRAPPED_SIMD_FP);
660
661 default:
662 panic("Illegal EL in advSIMDFPAccessTrap64\n");
663 }
664}
665
666
667Fault
668ArmStaticInst::checkFPAdvSIMDTrap64(ThreadContext *tc, CPSR cpsr) const
669{
670 if (ArmSystem::haveVirtualization(tc) && !inSecureState(tc)) {
671 HCPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL2);
672 if (cptrEnCheck.tfp)
673 return advSIMDFPAccessTrap64(EL2);
674 }
675
676 if (ArmSystem::haveSecurity(tc)) {
677 HCPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL3);
678 if (cptrEnCheck.tfp)
679 return advSIMDFPAccessTrap64(EL3);
680 }
681
682 return NoFault;
683}
684
685Fault
686ArmStaticInst::checkFPAdvSIMDEnabled64(ThreadContext *tc,
687 CPSR cpsr, CPACR cpacr) const
688{
689 const ExceptionLevel el = currEL(tc);
690 if ((el == EL0 && cpacr.fpen != 0x3) ||
691 (el == EL1 && !(cpacr.fpen & 0x1)))
692 return advSIMDFPAccessTrap64(EL1);
693
694 return checkFPAdvSIMDTrap64(tc, cpsr);
695}
696
697Fault
698ArmStaticInst::checkAdvSIMDOrFPEnabled32(ThreadContext *tc,
699 CPSR cpsr, CPACR cpacr,
700 NSACR nsacr, FPEXC fpexc,
701 bool fpexc_check, bool advsimd) const
702{
703 const bool have_virtualization = ArmSystem::haveVirtualization(tc);
704 const bool have_security = ArmSystem::haveSecurity(tc);
705 const bool is_secure = inSecureState(tc);
706 const ExceptionLevel cur_el = opModeToEL(currOpMode(tc));
706 const ExceptionLevel cur_el = currEL(tc);
707
708 if (cur_el == EL0 && ELIs64(tc, EL1))
709 return checkFPAdvSIMDEnabled64(tc, cpsr, cpacr);
710
711 uint8_t cpacr_cp10 = cpacr.cp10;
712 bool cpacr_asedis = cpacr.asedis;
713
714 if (have_security && !ELIs64(tc, EL3) && !is_secure) {
715 if (nsacr.nsasedis)
716 cpacr_asedis = true;
717 if (nsacr.cp10 == 0)
718 cpacr_cp10 = 0;
719 }
720
721 if (cur_el != EL2) {
722 if (advsimd && cpacr_asedis)
723 return disabledFault();
724
725 if ((cur_el == EL0 && cpacr_cp10 != 0x3) ||
726 (cur_el != EL0 && !(cpacr_cp10 & 0x1)))
727 return disabledFault();
728 }
729
730 if (fpexc_check && !fpexc.en)
731 return disabledFault();
732
733 // -- aarch32/exceptions/traps/AArch32.CheckFPAdvSIMDTrap --
734
735 if (have_virtualization && !is_secure && ELIs64(tc, EL2))
736 return checkFPAdvSIMDTrap64(tc, cpsr);
737
738 if (have_virtualization && !is_secure) {
739 HCPTR hcptr = tc->readMiscReg(MISCREG_HCPTR);
740 bool hcptr_cp10 = hcptr.tcp10;
741 bool hcptr_tase = hcptr.tase;
742
743 if (have_security && !ELIs64(tc, EL3) && !is_secure) {
744 if (nsacr.nsasedis)
745 hcptr_tase = true;
746 if (nsacr.cp10)
747 hcptr_cp10 = true;
748 }
749
750 if ((advsimd && hcptr_tase) || hcptr_cp10) {
751 const uint32_t iss = advsimd ? (1 << 5) : 0xA;
752 if (cur_el == EL2) {
753 return std::make_shared<UndefinedInstruction>(
754 machInst, iss,
755 EC_TRAPPED_HCPTR, mnemonic);
756 } else {
757 return std::make_shared<HypervisorTrap>(
758 machInst, iss,
759 EC_TRAPPED_HCPTR);
760 }
761
762 }
763 }
764
765 if (have_security && ELIs64(tc, EL3)) {
766 HCPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL3);
767 if (cptrEnCheck.tfp)
768 return advSIMDFPAccessTrap64(EL3);
769 }
770
771 return NoFault;
772}
773
774inline bool
775ArmStaticInst::isWFxTrapping(ThreadContext *tc,
776 ExceptionLevel tgtEl,
777 bool isWfe) const
778{
779 bool trap = false;
780 SCTLR sctlr = ((SCTLR)tc->readMiscReg(MISCREG_SCTLR_EL1));
781 HCR hcr = ((HCR)tc->readMiscReg(MISCREG_HCR_EL2));
782 SCR scr = ((SCR)tc->readMiscReg(MISCREG_SCR_EL3));
783
784 switch (tgtEl) {
785 case EL1:
786 trap = isWfe? !sctlr.ntwe : !sctlr.ntwi;
787 break;
788 case EL2:
789 trap = isWfe? hcr.twe : hcr.twi;
790 break;
791 case EL3:
792 trap = isWfe? scr.twe : scr.twi;
793 break;
794 default:
795 break;
796 }
797
798 return trap;
799}
800
801Fault
802ArmStaticInst::checkForWFxTrap32(ThreadContext *tc,
803 ExceptionLevel targetEL,
804 bool isWfe) const
805{
806 // Check if target exception level is implemented.
807 assert(ArmSystem::haveEL(tc, targetEL));
808
809 // Check for routing to AArch64: this happens if the
810 // target exception level (where the trap will be handled)
811 // is using aarch64
812 if (ELIs64(tc, targetEL)) {
813 return checkForWFxTrap64(tc, targetEL, isWfe);
814 }
815
816 // Check if processor needs to trap at selected exception level
817 bool trap = isWFxTrapping(tc, targetEL, isWfe);
818
819 if (trap) {
820 uint32_t iss = isWfe? 0x1E00001 : /* WFE Instruction syndrome */
821 0x1E00000; /* WFI Instruction syndrome */
822 switch (targetEL) {
823 case EL1:
824 return std::make_shared<UndefinedInstruction>(
825 machInst, iss,
826 EC_TRAPPED_WFI_WFE, mnemonic);
827 case EL2:
828 return std::make_shared<HypervisorTrap>(machInst, iss,
829 EC_TRAPPED_WFI_WFE);
830 case EL3:
831 return std::make_shared<SecureMonitorTrap>(machInst, iss,
832 EC_TRAPPED_WFI_WFE);
833 default:
834 panic("Unrecognized Exception Level: %d\n", targetEL);
835 }
836 }
837
838 return NoFault;
839}
840
841Fault
842ArmStaticInst::checkForWFxTrap64(ThreadContext *tc,
843 ExceptionLevel targetEL,
844 bool isWfe) const
845{
846 // Check if target exception level is implemented.
847 assert(ArmSystem::haveEL(tc, targetEL));
848
849 // Check if processor needs to trap at selected exception level
850 bool trap = isWFxTrapping(tc, targetEL, isWfe);
851
852 if (trap) {
853 uint32_t iss = isWfe? 0x1E00001 : /* WFE Instruction syndrome */
854 0x1E00000; /* WFI Instruction syndrome */
855 switch (targetEL) {
856 case EL1:
857 return std::make_shared<SupervisorTrap>(machInst, iss,
858 EC_TRAPPED_WFI_WFE);
859 case EL2:
860 return std::make_shared<HypervisorTrap>(machInst, iss,
861 EC_TRAPPED_WFI_WFE);
862 case EL3:
863 return std::make_shared<SecureMonitorTrap>(machInst, iss,
864 EC_TRAPPED_WFI_WFE);
865 default:
866 panic("Unrecognized Exception Level: %d\n", targetEL);
867 }
868 }
869
870 return NoFault;
871}
872
873Fault
874ArmStaticInst::trapWFx(ThreadContext *tc,
875 CPSR cpsr, SCR scr,
876 bool isWfe) const
877{
878 Fault fault = NoFault;
879 ExceptionLevel curr_el = currEL(tc);
880
881 if (curr_el == EL0) {
882 fault = checkForWFxTrap32(tc, EL1, isWfe);
883 }
884
885 if ((fault == NoFault) &&
886 ArmSystem::haveEL(tc, EL2) && !inSecureState(scr, cpsr) &&
887 ((curr_el == EL0) || (curr_el == EL1))) {
888
889 fault = checkForWFxTrap32(tc, EL2, isWfe);
890 }
891
892 if ((fault == NoFault) &&
893 ArmSystem::haveEL(tc, EL3) && curr_el != EL3) {
894 fault = checkForWFxTrap32(tc, EL3, isWfe);
895 }
896
897 return fault;
898}
899
900Fault
901ArmStaticInst::checkSETENDEnabled(ThreadContext *tc, CPSR cpsr) const
902{
903 bool setend_disabled(false);
904 ExceptionLevel pstate_el = currEL(tc);
905
906 if (pstate_el == EL2) {
907 setend_disabled = ((SCTLR)tc->readMiscRegNoEffect(MISCREG_HSCTLR)).sed;
908 } else {
909 // Please note: in the armarm pseudocode there is a distinction
910 // whether EL1 is aarch32 or aarch64:
911 // if ELUsingAArch32(EL1) then SCTLR.SED else SCTLR[].SED;
912 // Considering that SETEND is aarch32 only, ELUsingAArch32(EL1)
913 // will always be true (hence using SCTLR.SED) except for
914 // instruction executed at EL0, and with an AArch64 EL1.
915 // In this case SCTLR_EL1 will be used. In gem5 the register is
916 // mapped to SCTLR_ns. We can safely use SCTLR and choose the
917 // appropriate bank version.
918
919 // Get the index of the banked version of SCTLR:
920 // SCTLR_s or SCTLR_ns.
921 auto banked_sctlr = snsBankedIndex(
922 MISCREG_SCTLR, tc, !inSecureState(tc));
923
924 // SCTLR.SED bit is enabling/disabling the ue of SETEND instruction.
925 setend_disabled = ((SCTLR)tc->readMiscRegNoEffect(banked_sctlr)).sed;
926 }
927
928 return setend_disabled ? undefinedFault32(tc, pstate_el) :
929 NoFault;
930}
931
932Fault
933ArmStaticInst::undefinedFault32(ThreadContext *tc,
934 ExceptionLevel pstateEL) const
935{
936 // Even if we are running in aarch32, the fault might be dealt with in
937 // aarch64 ISA.
938 if (generalExceptionsToAArch64(tc, pstateEL)) {
939 return undefinedFault64(tc, pstateEL);
940 } else {
941 // Please note: according to the ARM ARM pseudocode we should handle
942 // the case when EL2 is aarch64 and HCR.TGE is 1 as well.
943 // However this case is already handled by the routeToHyp method in
944 // ArmFault class.
945 return std::make_shared<UndefinedInstruction>(
946 machInst, 0,
947 EC_UNKNOWN, mnemonic);
948 }
949}
950
951Fault
952ArmStaticInst::undefinedFault64(ThreadContext *tc,
953 ExceptionLevel pstateEL) const
954{
955 switch (pstateEL) {
956 case EL0:
957 case EL1:
958 return std::make_shared<SupervisorTrap>(machInst, 0, EC_UNKNOWN);
959 case EL2:
960 return std::make_shared<HypervisorTrap>(machInst, 0, EC_UNKNOWN);
961 case EL3:
962 return std::make_shared<SecureMonitorTrap>(machInst, 0, EC_UNKNOWN);
963 default:
964 panic("Unrecognized Exception Level: %d\n", pstateEL);
965 break;
966 }
967
968 return NoFault;
969}
970
971Fault
972ArmStaticInst::sveAccessTrap(ExceptionLevel el) const
973{
974 switch (el) {
975 case EL1:
976 return std::make_shared<SupervisorTrap>(machInst, 0, EC_TRAPPED_SVE);
977 case EL2:
978 return std::make_shared<HypervisorTrap>(machInst, 0, EC_TRAPPED_SVE);
979 case EL3:
980 return std::make_shared<SecureMonitorTrap>(machInst, 0,
981 EC_TRAPPED_SVE);
982
983 default:
984 panic("Illegal EL in sveAccessTrap\n");
985 }
986}
987
988Fault
989ArmStaticInst::checkSveTrap(ThreadContext *tc, CPSR cpsr) const
990{
991 const ExceptionLevel el = (ExceptionLevel) (uint8_t) cpsr.el;
992
993 if (ArmSystem::haveVirtualization(tc) && el <= EL2) {
994 CPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL2);
995 if (cptrEnCheck.tz)
996 return sveAccessTrap(EL2);
997 }
998
999 if (ArmSystem::haveSecurity(tc)) {
1000 CPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL3);
1001 if (!cptrEnCheck.ez)
1002 return sveAccessTrap(EL3);
1003 }
1004
1005 return NoFault;
1006}
1007
1008Fault
1009ArmStaticInst::checkSveEnabled(ThreadContext *tc, CPSR cpsr, CPACR cpacr) const
1010{
1011 const ExceptionLevel el = (ExceptionLevel) (uint8_t) cpsr.el;
1012 if ((el == EL0 && cpacr.zen != 0x3) ||
1013 (el == EL1 && !(cpacr.zen & 0x1)))
1014 return sveAccessTrap(EL1);
1015
1016 return checkSveTrap(tc, cpsr);
1017}
1018
1019
1020static uint8_t
1021getRestoredITBits(ThreadContext *tc, CPSR spsr)
1022{
1023 // See: shared/functions/system/RestoredITBits in the ARM ARM
1024
1025 const ExceptionLevel el = opModeToEL((OperatingMode) (uint8_t)spsr.mode);
1026 const uint8_t it = itState(spsr);
1027
1028 if (!spsr.t || spsr.il)
1029 return 0;
1030
1031 // The IT bits are forced to zero when they are set to a reserved
1032 // value.
1033 if (bits(it, 7, 4) != 0 && bits(it, 3, 0) == 0)
1034 return 0;
1035
1036 const bool itd = el == EL2 ?
1037 ((SCTLR)tc->readMiscReg(MISCREG_HSCTLR)).itd :
1038 ((SCTLR)tc->readMiscReg(MISCREG_SCTLR)).itd;
1039
1040 // The IT bits are forced to zero when returning to A32 state, or
1041 // when returning to an EL with the ITD bit set to 1, and the IT
1042 // bits are describing a multi-instruction block.
1043 if (itd && bits(it, 2, 0) != 0)
1044 return 0;
1045
1046 return it;
1047}
1048
1049static bool
1050illegalExceptionReturn(ThreadContext *tc, CPSR cpsr, CPSR spsr)
1051{
1052 const OperatingMode mode = (OperatingMode) (uint8_t)spsr.mode;
1053 if (unknownMode(mode))
1054 return true;
1055
1056 const OperatingMode cur_mode = (OperatingMode) (uint8_t)cpsr.mode;
1057 const ExceptionLevel target_el = opModeToEL(mode);
1058
1059 HCR hcr = ((HCR)tc->readMiscReg(MISCREG_HCR_EL2));
1060 SCR scr = ((SCR)tc->readMiscReg(MISCREG_SCR_EL3));
1061
1062 if (target_el > opModeToEL(cur_mode))
1063 return true;
1064
1065 if (!ArmSystem::haveEL(tc, target_el))
1066 return true;
1067
1068 if (target_el == EL1 && ArmSystem::haveEL(tc, EL2) && scr.ns && hcr.tge)
1069 return true;
1070
1071 if (target_el == EL2 && ArmSystem::haveEL(tc, EL3) && !scr.ns)
1072 return true;
1073
1074 bool spsr_mode_is_aarch32 = (spsr.width == 1);
1075 bool known, target_el_is_aarch32;
1076 std::tie(known, target_el_is_aarch32) = ELUsingAArch32K(tc, target_el);
1077 assert(known || (target_el == EL0 && ELIs64(tc, EL1)));
1078
1079 if (known && (spsr_mode_is_aarch32 != target_el_is_aarch32))
1080 return true;
1081
1082 if (!spsr.width) {
1083 // aarch64
1084 if (!ArmSystem::highestELIs64(tc))
1085 return true;
1086 if (spsr & 0x2)
1087 return true;
1088 if (target_el == EL0 && spsr.sp)
1089 return true;
1090 } else {
1091 // aarch32
1092 return unknownMode32(mode);
1093 }
1094
1095 return false;
1096}
1097
1098CPSR
1099ArmStaticInst::getPSTATEFromPSR(ThreadContext *tc, CPSR cpsr, CPSR spsr) const
1100{
1101 CPSR new_cpsr = 0;
1102
1103 // gem5 doesn't implement single-stepping, so force the SS bit to
1104 // 0.
1105 new_cpsr.ss = 0;
1106
1107 if (illegalExceptionReturn(tc, cpsr, spsr)) {
1108 // If the SPSR specifies an illegal exception return,
1109 // then PSTATE.{M, nRW, EL, SP} are unchanged and PSTATE.IL
1110 // is set to 1.
1111 new_cpsr.il = 1;
1112 if (cpsr.width) {
1113 new_cpsr.mode = cpsr.mode;
1114 } else {
1115 new_cpsr.width = cpsr.width;
1116 new_cpsr.el = cpsr.el;
1117 new_cpsr.sp = cpsr.sp;
1118 }
1119 } else {
1120 new_cpsr.il = spsr.il;
1121 if (spsr.width && unknownMode32((OperatingMode)(uint8_t)spsr.mode)) {
1122 new_cpsr.il = 1;
1123 } else if (spsr.width) {
1124 new_cpsr.mode = spsr.mode;
1125 } else {
1126 new_cpsr.el = spsr.el;
1127 new_cpsr.sp = spsr.sp;
1128 }
1129 }
1130
1131 new_cpsr.nz = spsr.nz;
1132 new_cpsr.c = spsr.c;
1133 new_cpsr.v = spsr.v;
1134 new_cpsr.pan = spsr.pan;
1135 if (new_cpsr.width) {
1136 // aarch32
1137 const ITSTATE it = getRestoredITBits(tc, spsr);
1138 new_cpsr.q = spsr.q;
1139 new_cpsr.ge = spsr.ge;
1140 new_cpsr.e = spsr.e;
1141 new_cpsr.aif = spsr.aif;
1142 new_cpsr.t = spsr.t;
1143 new_cpsr.it2 = it.top6;
1144 new_cpsr.it1 = it.bottom2;
1145 } else {
1146 // aarch64
1147 new_cpsr.daif = spsr.daif;
1148 }
1149
1150 return new_cpsr;
1151}
1152
1153bool
1154ArmStaticInst::generalExceptionsToAArch64(ThreadContext *tc,
1155 ExceptionLevel pstateEL) const
1156{
1157 // Returns TRUE if exceptions normally routed to EL1 are being handled
1158 // at an Exception level using AArch64, because either EL1 is using
1159 // AArch64 or TGE is in force and EL2 is using AArch64.
1160 HCR hcr = ((HCR)tc->readMiscReg(MISCREG_HCR_EL2));
1161 return (pstateEL == EL0 && !ELIs32(tc, EL1)) ||
1162 (ArmSystem::haveEL(tc, EL2) && !inSecureState(tc) &&
1163 !ELIs32(tc, EL2) && hcr.tge);
1164}
1165
1166unsigned
1167ArmStaticInst::getCurSveVecLenInBits(ThreadContext *tc)
1168{
1169 return tc->getIsaPtr()->getCurSveVecLenInBits(tc);
1170}
1171
1172}
707
708 if (cur_el == EL0 && ELIs64(tc, EL1))
709 return checkFPAdvSIMDEnabled64(tc, cpsr, cpacr);
710
711 uint8_t cpacr_cp10 = cpacr.cp10;
712 bool cpacr_asedis = cpacr.asedis;
713
714 if (have_security && !ELIs64(tc, EL3) && !is_secure) {
715 if (nsacr.nsasedis)
716 cpacr_asedis = true;
717 if (nsacr.cp10 == 0)
718 cpacr_cp10 = 0;
719 }
720
721 if (cur_el != EL2) {
722 if (advsimd && cpacr_asedis)
723 return disabledFault();
724
725 if ((cur_el == EL0 && cpacr_cp10 != 0x3) ||
726 (cur_el != EL0 && !(cpacr_cp10 & 0x1)))
727 return disabledFault();
728 }
729
730 if (fpexc_check && !fpexc.en)
731 return disabledFault();
732
733 // -- aarch32/exceptions/traps/AArch32.CheckFPAdvSIMDTrap --
734
735 if (have_virtualization && !is_secure && ELIs64(tc, EL2))
736 return checkFPAdvSIMDTrap64(tc, cpsr);
737
738 if (have_virtualization && !is_secure) {
739 HCPTR hcptr = tc->readMiscReg(MISCREG_HCPTR);
740 bool hcptr_cp10 = hcptr.tcp10;
741 bool hcptr_tase = hcptr.tase;
742
743 if (have_security && !ELIs64(tc, EL3) && !is_secure) {
744 if (nsacr.nsasedis)
745 hcptr_tase = true;
746 if (nsacr.cp10)
747 hcptr_cp10 = true;
748 }
749
750 if ((advsimd && hcptr_tase) || hcptr_cp10) {
751 const uint32_t iss = advsimd ? (1 << 5) : 0xA;
752 if (cur_el == EL2) {
753 return std::make_shared<UndefinedInstruction>(
754 machInst, iss,
755 EC_TRAPPED_HCPTR, mnemonic);
756 } else {
757 return std::make_shared<HypervisorTrap>(
758 machInst, iss,
759 EC_TRAPPED_HCPTR);
760 }
761
762 }
763 }
764
765 if (have_security && ELIs64(tc, EL3)) {
766 HCPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL3);
767 if (cptrEnCheck.tfp)
768 return advSIMDFPAccessTrap64(EL3);
769 }
770
771 return NoFault;
772}
773
774inline bool
775ArmStaticInst::isWFxTrapping(ThreadContext *tc,
776 ExceptionLevel tgtEl,
777 bool isWfe) const
778{
779 bool trap = false;
780 SCTLR sctlr = ((SCTLR)tc->readMiscReg(MISCREG_SCTLR_EL1));
781 HCR hcr = ((HCR)tc->readMiscReg(MISCREG_HCR_EL2));
782 SCR scr = ((SCR)tc->readMiscReg(MISCREG_SCR_EL3));
783
784 switch (tgtEl) {
785 case EL1:
786 trap = isWfe? !sctlr.ntwe : !sctlr.ntwi;
787 break;
788 case EL2:
789 trap = isWfe? hcr.twe : hcr.twi;
790 break;
791 case EL3:
792 trap = isWfe? scr.twe : scr.twi;
793 break;
794 default:
795 break;
796 }
797
798 return trap;
799}
800
801Fault
802ArmStaticInst::checkForWFxTrap32(ThreadContext *tc,
803 ExceptionLevel targetEL,
804 bool isWfe) const
805{
806 // Check if target exception level is implemented.
807 assert(ArmSystem::haveEL(tc, targetEL));
808
809 // Check for routing to AArch64: this happens if the
810 // target exception level (where the trap will be handled)
811 // is using aarch64
812 if (ELIs64(tc, targetEL)) {
813 return checkForWFxTrap64(tc, targetEL, isWfe);
814 }
815
816 // Check if processor needs to trap at selected exception level
817 bool trap = isWFxTrapping(tc, targetEL, isWfe);
818
819 if (trap) {
820 uint32_t iss = isWfe? 0x1E00001 : /* WFE Instruction syndrome */
821 0x1E00000; /* WFI Instruction syndrome */
822 switch (targetEL) {
823 case EL1:
824 return std::make_shared<UndefinedInstruction>(
825 machInst, iss,
826 EC_TRAPPED_WFI_WFE, mnemonic);
827 case EL2:
828 return std::make_shared<HypervisorTrap>(machInst, iss,
829 EC_TRAPPED_WFI_WFE);
830 case EL3:
831 return std::make_shared<SecureMonitorTrap>(machInst, iss,
832 EC_TRAPPED_WFI_WFE);
833 default:
834 panic("Unrecognized Exception Level: %d\n", targetEL);
835 }
836 }
837
838 return NoFault;
839}
840
841Fault
842ArmStaticInst::checkForWFxTrap64(ThreadContext *tc,
843 ExceptionLevel targetEL,
844 bool isWfe) const
845{
846 // Check if target exception level is implemented.
847 assert(ArmSystem::haveEL(tc, targetEL));
848
849 // Check if processor needs to trap at selected exception level
850 bool trap = isWFxTrapping(tc, targetEL, isWfe);
851
852 if (trap) {
853 uint32_t iss = isWfe? 0x1E00001 : /* WFE Instruction syndrome */
854 0x1E00000; /* WFI Instruction syndrome */
855 switch (targetEL) {
856 case EL1:
857 return std::make_shared<SupervisorTrap>(machInst, iss,
858 EC_TRAPPED_WFI_WFE);
859 case EL2:
860 return std::make_shared<HypervisorTrap>(machInst, iss,
861 EC_TRAPPED_WFI_WFE);
862 case EL3:
863 return std::make_shared<SecureMonitorTrap>(machInst, iss,
864 EC_TRAPPED_WFI_WFE);
865 default:
866 panic("Unrecognized Exception Level: %d\n", targetEL);
867 }
868 }
869
870 return NoFault;
871}
872
873Fault
874ArmStaticInst::trapWFx(ThreadContext *tc,
875 CPSR cpsr, SCR scr,
876 bool isWfe) const
877{
878 Fault fault = NoFault;
879 ExceptionLevel curr_el = currEL(tc);
880
881 if (curr_el == EL0) {
882 fault = checkForWFxTrap32(tc, EL1, isWfe);
883 }
884
885 if ((fault == NoFault) &&
886 ArmSystem::haveEL(tc, EL2) && !inSecureState(scr, cpsr) &&
887 ((curr_el == EL0) || (curr_el == EL1))) {
888
889 fault = checkForWFxTrap32(tc, EL2, isWfe);
890 }
891
892 if ((fault == NoFault) &&
893 ArmSystem::haveEL(tc, EL3) && curr_el != EL3) {
894 fault = checkForWFxTrap32(tc, EL3, isWfe);
895 }
896
897 return fault;
898}
899
900Fault
901ArmStaticInst::checkSETENDEnabled(ThreadContext *tc, CPSR cpsr) const
902{
903 bool setend_disabled(false);
904 ExceptionLevel pstate_el = currEL(tc);
905
906 if (pstate_el == EL2) {
907 setend_disabled = ((SCTLR)tc->readMiscRegNoEffect(MISCREG_HSCTLR)).sed;
908 } else {
909 // Please note: in the armarm pseudocode there is a distinction
910 // whether EL1 is aarch32 or aarch64:
911 // if ELUsingAArch32(EL1) then SCTLR.SED else SCTLR[].SED;
912 // Considering that SETEND is aarch32 only, ELUsingAArch32(EL1)
913 // will always be true (hence using SCTLR.SED) except for
914 // instruction executed at EL0, and with an AArch64 EL1.
915 // In this case SCTLR_EL1 will be used. In gem5 the register is
916 // mapped to SCTLR_ns. We can safely use SCTLR and choose the
917 // appropriate bank version.
918
919 // Get the index of the banked version of SCTLR:
920 // SCTLR_s or SCTLR_ns.
921 auto banked_sctlr = snsBankedIndex(
922 MISCREG_SCTLR, tc, !inSecureState(tc));
923
924 // SCTLR.SED bit is enabling/disabling the ue of SETEND instruction.
925 setend_disabled = ((SCTLR)tc->readMiscRegNoEffect(banked_sctlr)).sed;
926 }
927
928 return setend_disabled ? undefinedFault32(tc, pstate_el) :
929 NoFault;
930}
931
932Fault
933ArmStaticInst::undefinedFault32(ThreadContext *tc,
934 ExceptionLevel pstateEL) const
935{
936 // Even if we are running in aarch32, the fault might be dealt with in
937 // aarch64 ISA.
938 if (generalExceptionsToAArch64(tc, pstateEL)) {
939 return undefinedFault64(tc, pstateEL);
940 } else {
941 // Please note: according to the ARM ARM pseudocode we should handle
942 // the case when EL2 is aarch64 and HCR.TGE is 1 as well.
943 // However this case is already handled by the routeToHyp method in
944 // ArmFault class.
945 return std::make_shared<UndefinedInstruction>(
946 machInst, 0,
947 EC_UNKNOWN, mnemonic);
948 }
949}
950
951Fault
952ArmStaticInst::undefinedFault64(ThreadContext *tc,
953 ExceptionLevel pstateEL) const
954{
955 switch (pstateEL) {
956 case EL0:
957 case EL1:
958 return std::make_shared<SupervisorTrap>(machInst, 0, EC_UNKNOWN);
959 case EL2:
960 return std::make_shared<HypervisorTrap>(machInst, 0, EC_UNKNOWN);
961 case EL3:
962 return std::make_shared<SecureMonitorTrap>(machInst, 0, EC_UNKNOWN);
963 default:
964 panic("Unrecognized Exception Level: %d\n", pstateEL);
965 break;
966 }
967
968 return NoFault;
969}
970
971Fault
972ArmStaticInst::sveAccessTrap(ExceptionLevel el) const
973{
974 switch (el) {
975 case EL1:
976 return std::make_shared<SupervisorTrap>(machInst, 0, EC_TRAPPED_SVE);
977 case EL2:
978 return std::make_shared<HypervisorTrap>(machInst, 0, EC_TRAPPED_SVE);
979 case EL3:
980 return std::make_shared<SecureMonitorTrap>(machInst, 0,
981 EC_TRAPPED_SVE);
982
983 default:
984 panic("Illegal EL in sveAccessTrap\n");
985 }
986}
987
988Fault
989ArmStaticInst::checkSveTrap(ThreadContext *tc, CPSR cpsr) const
990{
991 const ExceptionLevel el = (ExceptionLevel) (uint8_t) cpsr.el;
992
993 if (ArmSystem::haveVirtualization(tc) && el <= EL2) {
994 CPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL2);
995 if (cptrEnCheck.tz)
996 return sveAccessTrap(EL2);
997 }
998
999 if (ArmSystem::haveSecurity(tc)) {
1000 CPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL3);
1001 if (!cptrEnCheck.ez)
1002 return sveAccessTrap(EL3);
1003 }
1004
1005 return NoFault;
1006}
1007
1008Fault
1009ArmStaticInst::checkSveEnabled(ThreadContext *tc, CPSR cpsr, CPACR cpacr) const
1010{
1011 const ExceptionLevel el = (ExceptionLevel) (uint8_t) cpsr.el;
1012 if ((el == EL0 && cpacr.zen != 0x3) ||
1013 (el == EL1 && !(cpacr.zen & 0x1)))
1014 return sveAccessTrap(EL1);
1015
1016 return checkSveTrap(tc, cpsr);
1017}
1018
1019
1020static uint8_t
1021getRestoredITBits(ThreadContext *tc, CPSR spsr)
1022{
1023 // See: shared/functions/system/RestoredITBits in the ARM ARM
1024
1025 const ExceptionLevel el = opModeToEL((OperatingMode) (uint8_t)spsr.mode);
1026 const uint8_t it = itState(spsr);
1027
1028 if (!spsr.t || spsr.il)
1029 return 0;
1030
1031 // The IT bits are forced to zero when they are set to a reserved
1032 // value.
1033 if (bits(it, 7, 4) != 0 && bits(it, 3, 0) == 0)
1034 return 0;
1035
1036 const bool itd = el == EL2 ?
1037 ((SCTLR)tc->readMiscReg(MISCREG_HSCTLR)).itd :
1038 ((SCTLR)tc->readMiscReg(MISCREG_SCTLR)).itd;
1039
1040 // The IT bits are forced to zero when returning to A32 state, or
1041 // when returning to an EL with the ITD bit set to 1, and the IT
1042 // bits are describing a multi-instruction block.
1043 if (itd && bits(it, 2, 0) != 0)
1044 return 0;
1045
1046 return it;
1047}
1048
1049static bool
1050illegalExceptionReturn(ThreadContext *tc, CPSR cpsr, CPSR spsr)
1051{
1052 const OperatingMode mode = (OperatingMode) (uint8_t)spsr.mode;
1053 if (unknownMode(mode))
1054 return true;
1055
1056 const OperatingMode cur_mode = (OperatingMode) (uint8_t)cpsr.mode;
1057 const ExceptionLevel target_el = opModeToEL(mode);
1058
1059 HCR hcr = ((HCR)tc->readMiscReg(MISCREG_HCR_EL2));
1060 SCR scr = ((SCR)tc->readMiscReg(MISCREG_SCR_EL3));
1061
1062 if (target_el > opModeToEL(cur_mode))
1063 return true;
1064
1065 if (!ArmSystem::haveEL(tc, target_el))
1066 return true;
1067
1068 if (target_el == EL1 && ArmSystem::haveEL(tc, EL2) && scr.ns && hcr.tge)
1069 return true;
1070
1071 if (target_el == EL2 && ArmSystem::haveEL(tc, EL3) && !scr.ns)
1072 return true;
1073
1074 bool spsr_mode_is_aarch32 = (spsr.width == 1);
1075 bool known, target_el_is_aarch32;
1076 std::tie(known, target_el_is_aarch32) = ELUsingAArch32K(tc, target_el);
1077 assert(known || (target_el == EL0 && ELIs64(tc, EL1)));
1078
1079 if (known && (spsr_mode_is_aarch32 != target_el_is_aarch32))
1080 return true;
1081
1082 if (!spsr.width) {
1083 // aarch64
1084 if (!ArmSystem::highestELIs64(tc))
1085 return true;
1086 if (spsr & 0x2)
1087 return true;
1088 if (target_el == EL0 && spsr.sp)
1089 return true;
1090 } else {
1091 // aarch32
1092 return unknownMode32(mode);
1093 }
1094
1095 return false;
1096}
1097
1098CPSR
1099ArmStaticInst::getPSTATEFromPSR(ThreadContext *tc, CPSR cpsr, CPSR spsr) const
1100{
1101 CPSR new_cpsr = 0;
1102
1103 // gem5 doesn't implement single-stepping, so force the SS bit to
1104 // 0.
1105 new_cpsr.ss = 0;
1106
1107 if (illegalExceptionReturn(tc, cpsr, spsr)) {
1108 // If the SPSR specifies an illegal exception return,
1109 // then PSTATE.{M, nRW, EL, SP} are unchanged and PSTATE.IL
1110 // is set to 1.
1111 new_cpsr.il = 1;
1112 if (cpsr.width) {
1113 new_cpsr.mode = cpsr.mode;
1114 } else {
1115 new_cpsr.width = cpsr.width;
1116 new_cpsr.el = cpsr.el;
1117 new_cpsr.sp = cpsr.sp;
1118 }
1119 } else {
1120 new_cpsr.il = spsr.il;
1121 if (spsr.width && unknownMode32((OperatingMode)(uint8_t)spsr.mode)) {
1122 new_cpsr.il = 1;
1123 } else if (spsr.width) {
1124 new_cpsr.mode = spsr.mode;
1125 } else {
1126 new_cpsr.el = spsr.el;
1127 new_cpsr.sp = spsr.sp;
1128 }
1129 }
1130
1131 new_cpsr.nz = spsr.nz;
1132 new_cpsr.c = spsr.c;
1133 new_cpsr.v = spsr.v;
1134 new_cpsr.pan = spsr.pan;
1135 if (new_cpsr.width) {
1136 // aarch32
1137 const ITSTATE it = getRestoredITBits(tc, spsr);
1138 new_cpsr.q = spsr.q;
1139 new_cpsr.ge = spsr.ge;
1140 new_cpsr.e = spsr.e;
1141 new_cpsr.aif = spsr.aif;
1142 new_cpsr.t = spsr.t;
1143 new_cpsr.it2 = it.top6;
1144 new_cpsr.it1 = it.bottom2;
1145 } else {
1146 // aarch64
1147 new_cpsr.daif = spsr.daif;
1148 }
1149
1150 return new_cpsr;
1151}
1152
1153bool
1154ArmStaticInst::generalExceptionsToAArch64(ThreadContext *tc,
1155 ExceptionLevel pstateEL) const
1156{
1157 // Returns TRUE if exceptions normally routed to EL1 are being handled
1158 // at an Exception level using AArch64, because either EL1 is using
1159 // AArch64 or TGE is in force and EL2 is using AArch64.
1160 HCR hcr = ((HCR)tc->readMiscReg(MISCREG_HCR_EL2));
1161 return (pstateEL == EL0 && !ELIs32(tc, EL1)) ||
1162 (ArmSystem::haveEL(tc, EL2) && !inSecureState(tc) &&
1163 !ELIs32(tc, EL2) && hcr.tge);
1164}
1165
1166unsigned
1167ArmStaticInst::getCurSveVecLenInBits(ThreadContext *tc)
1168{
1169 return tc->getIsaPtr()->getCurSveVecLenInBits(tc);
1170}
1171
1172}