Deleted Added
sdiff udiff text old ( 12788:fe6d6ae79d7c ) new ( 12789:b28b286fa57d )
full compact
1/*
2 * Copyright (c) 2010-2014, 2016-2018 ARM Limited
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * All rights reserved
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2007-2008 The Florida State University
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Stephen Hines
42 */
43
44#include "arch/arm/insts/static_inst.hh"
45
46#include "arch/arm/faults.hh"
47#include "base/condcodes.hh"
48#include "base/cprintf.hh"
49#include "base/loader/symtab.hh"
50#include "cpu/reg_class.hh"
51
52namespace ArmISA
53{
54// Shift Rm by an immediate value
55int32_t
56ArmStaticInst::shift_rm_imm(uint32_t base, uint32_t shamt,
57 uint32_t type, uint32_t cfval) const
58{
59 assert(shamt < 32);
60 ArmShiftType shiftType;
61 shiftType = (ArmShiftType)type;
62
63 switch (shiftType)
64 {
65 case LSL:
66 return base << shamt;
67 case LSR:
68 if (shamt == 0)
69 return 0;
70 else
71 return base >> shamt;
72 case ASR:
73 if (shamt == 0)
74 return (base >> 31) | -((base & (1 << 31)) >> 31);
75 else
76 return (base >> shamt) | -((base & (1 << 31)) >> shamt);
77 case ROR:
78 if (shamt == 0)
79 return (cfval << 31) | (base >> 1); // RRX
80 else
81 return (base << (32 - shamt)) | (base >> shamt);
82 default:
83 ccprintf(std::cerr, "Unhandled shift type\n");
84 exit(1);
85 break;
86 }
87 return 0;
88}
89
90int64_t
91ArmStaticInst::shiftReg64(uint64_t base, uint64_t shiftAmt,
92 ArmShiftType type, uint8_t width) const
93{
94 shiftAmt = shiftAmt % width;
95 ArmShiftType shiftType;
96 shiftType = (ArmShiftType)type;
97
98 switch (shiftType)
99 {
100 case LSL:
101 return base << shiftAmt;
102 case LSR:
103 if (shiftAmt == 0)
104 return base;
105 else
106 return (base & mask(width)) >> shiftAmt;
107 case ASR:
108 if (shiftAmt == 0) {
109 return base;
110 } else {
111 int sign_bit = bits(base, intWidth - 1);
112 base >>= shiftAmt;
113 base = sign_bit ? (base | ~mask(intWidth - shiftAmt)) : base;
114 return base & mask(intWidth);
115 }
116 case ROR:
117 if (shiftAmt == 0)
118 return base;
119 else
120 return (base << (width - shiftAmt)) | (base >> shiftAmt);
121 default:
122 ccprintf(std::cerr, "Unhandled shift type\n");
123 exit(1);
124 break;
125 }
126 return 0;
127}
128
129int64_t
130ArmStaticInst::extendReg64(uint64_t base, ArmExtendType type,
131 uint64_t shiftAmt, uint8_t width) const
132{
133 bool sign_extend = false;
134 int len = 0;
135 switch (type) {
136 case UXTB:
137 len = 8;
138 break;
139 case UXTH:
140 len = 16;
141 break;
142 case UXTW:
143 len = 32;
144 break;
145 case UXTX:
146 len = 64;
147 break;
148 case SXTB:
149 len = 8;
150 sign_extend = true;
151 break;
152 case SXTH:
153 len = 16;
154 sign_extend = true;
155 break;
156 case SXTW:
157 len = 32;
158 sign_extend = true;
159 break;
160 case SXTX:
161 len = 64;
162 sign_extend = true;
163 break;
164 }
165 len = len <= width - shiftAmt ? len : width - shiftAmt;
166 uint64_t tmp = (uint64_t) bits(base, len - 1, 0) << shiftAmt;
167 if (sign_extend) {
168 int sign_bit = bits(tmp, len + shiftAmt - 1);
169 tmp = sign_bit ? (tmp | ~mask(len + shiftAmt)) : tmp;
170 }
171 return tmp & mask(width);
172}
173
174// Shift Rm by Rs
175int32_t
176ArmStaticInst::shift_rm_rs(uint32_t base, uint32_t shamt,
177 uint32_t type, uint32_t cfval) const
178{
179 enum ArmShiftType shiftType;
180 shiftType = (enum ArmShiftType) type;
181
182 switch (shiftType)
183 {
184 case LSL:
185 if (shamt >= 32)
186 return 0;
187 else
188 return base << shamt;
189 case LSR:
190 if (shamt >= 32)
191 return 0;
192 else
193 return base >> shamt;
194 case ASR:
195 if (shamt >= 32)
196 return (base >> 31) | -((base & (1 << 31)) >> 31);
197 else
198 return (base >> shamt) | -((base & (1 << 31)) >> shamt);
199 case ROR:
200 shamt = shamt & 0x1f;
201 if (shamt == 0)
202 return base;
203 else
204 return (base << (32 - shamt)) | (base >> shamt);
205 default:
206 ccprintf(std::cerr, "Unhandled shift type\n");
207 exit(1);
208 break;
209 }
210 return 0;
211}
212
213
214// Generate C for a shift by immediate
215bool
216ArmStaticInst::shift_carry_imm(uint32_t base, uint32_t shamt,
217 uint32_t type, uint32_t cfval) const
218{
219 enum ArmShiftType shiftType;
220 shiftType = (enum ArmShiftType) type;
221
222 switch (shiftType)
223 {
224 case LSL:
225 if (shamt == 0)
226 return cfval;
227 else
228 return (base >> (32 - shamt)) & 1;
229 case LSR:
230 if (shamt == 0)
231 return (base >> 31);
232 else
233 return (base >> (shamt - 1)) & 1;
234 case ASR:
235 if (shamt == 0)
236 return (base >> 31);
237 else
238 return (base >> (shamt - 1)) & 1;
239 case ROR:
240 shamt = shamt & 0x1f;
241 if (shamt == 0)
242 return (base & 1); // RRX
243 else
244 return (base >> (shamt - 1)) & 1;
245 default:
246 ccprintf(std::cerr, "Unhandled shift type\n");
247 exit(1);
248 break;
249 }
250 return 0;
251}
252
253
254// Generate C for a shift by Rs
255bool
256ArmStaticInst::shift_carry_rs(uint32_t base, uint32_t shamt,
257 uint32_t type, uint32_t cfval) const
258{
259 enum ArmShiftType shiftType;
260 shiftType = (enum ArmShiftType) type;
261
262 if (shamt == 0)
263 return cfval;
264
265 switch (shiftType)
266 {
267 case LSL:
268 if (shamt > 32)
269 return 0;
270 else
271 return (base >> (32 - shamt)) & 1;
272 case LSR:
273 if (shamt > 32)
274 return 0;
275 else
276 return (base >> (shamt - 1)) & 1;
277 case ASR:
278 if (shamt > 32)
279 shamt = 32;
280 return (base >> (shamt - 1)) & 1;
281 case ROR:
282 shamt = shamt & 0x1f;
283 if (shamt == 0)
284 shamt = 32;
285 return (base >> (shamt - 1)) & 1;
286 default:
287 ccprintf(std::cerr, "Unhandled shift type\n");
288 exit(1);
289 break;
290 }
291 return 0;
292}
293
294void
295ArmStaticInst::printIntReg(std::ostream &os, RegIndex reg_idx) const
296{
297 if (aarch64) {
298 if (reg_idx == INTREG_UREG0)
299 ccprintf(os, "ureg0");
300 else if (reg_idx == INTREG_SPX)
301 ccprintf(os, "%s%s", (intWidth == 32) ? "w" : "", "sp");
302 else if (reg_idx == INTREG_X31)
303 ccprintf(os, "%szr", (intWidth == 32) ? "w" : "x");
304 else
305 ccprintf(os, "%s%d", (intWidth == 32) ? "w" : "x", reg_idx);
306 } else {
307 switch (reg_idx) {
308 case PCReg:
309 ccprintf(os, "pc");
310 break;
311 case StackPointerReg:
312 ccprintf(os, "sp");
313 break;
314 case FramePointerReg:
315 ccprintf(os, "fp");
316 break;
317 case ReturnAddressReg:
318 ccprintf(os, "lr");
319 break;
320 default:
321 ccprintf(os, "r%d", reg_idx);
322 break;
323 }
324 }
325}
326
327void
328ArmStaticInst::printFloatReg(std::ostream &os, RegIndex reg_idx) const
329{
330 ccprintf(os, "f%d", reg_idx);
331}
332
333void
334ArmStaticInst::printVecReg(std::ostream &os, RegIndex reg_idx) const
335{
336 ccprintf(os, "v%d", reg_idx);
337}
338
339void
340ArmStaticInst::printCCReg(std::ostream &os, RegIndex reg_idx) const
341{
342 ccprintf(os, "cc_%s", ArmISA::ccRegName[reg_idx]);
343}
344
345void
346ArmStaticInst::printMiscReg(std::ostream &os, RegIndex reg_idx) const
347{
348 assert(reg_idx < NUM_MISCREGS);
349 ccprintf(os, "%s", ArmISA::miscRegName[reg_idx]);
350}
351
352void
353ArmStaticInst::printMnemonic(std::ostream &os,
354 const std::string &suffix,
355 bool withPred,
356 bool withCond64,
357 ConditionCode cond64) const
358{
359 os << " " << mnemonic;
360 if (withPred && !aarch64) {
361 printCondition(os, machInst.condCode);
362 os << suffix;
363 } else if (withCond64) {
364 os << ".";
365 printCondition(os, cond64);
366 os << suffix;
367 }
368 if (machInst.bigThumb)
369 os << ".w";
370 os << " ";
371}
372
373void
374ArmStaticInst::printTarget(std::ostream &os, Addr target,
375 const SymbolTable *symtab) const
376{
377 Addr symbolAddr;
378 std::string symbol;
379
380 if (symtab && symtab->findNearestSymbol(target, symbol, symbolAddr)) {
381 ccprintf(os, "<%s", symbol);
382 if (symbolAddr != target)
383 ccprintf(os, "+%d>", target - symbolAddr);
384 else
385 ccprintf(os, ">");
386 } else {
387 ccprintf(os, "%#x", target);
388 }
389}
390
391void
392ArmStaticInst::printCondition(std::ostream &os,
393 unsigned code,
394 bool noImplicit) const
395{
396 switch (code) {
397 case COND_EQ:
398 os << "eq";
399 break;
400 case COND_NE:
401 os << "ne";
402 break;
403 case COND_CS:
404 os << "cs";
405 break;
406 case COND_CC:
407 os << "cc";
408 break;
409 case COND_MI:
410 os << "mi";
411 break;
412 case COND_PL:
413 os << "pl";
414 break;
415 case COND_VS:
416 os << "vs";
417 break;
418 case COND_VC:
419 os << "vc";
420 break;
421 case COND_HI:
422 os << "hi";
423 break;
424 case COND_LS:
425 os << "ls";
426 break;
427 case COND_GE:
428 os << "ge";
429 break;
430 case COND_LT:
431 os << "lt";
432 break;
433 case COND_GT:
434 os << "gt";
435 break;
436 case COND_LE:
437 os << "le";
438 break;
439 case COND_AL:
440 // This one is implicit.
441 if (noImplicit)
442 os << "al";
443 break;
444 case COND_UC:
445 // Unconditional.
446 if (noImplicit)
447 os << "uc";
448 break;
449 default:
450 panic("Unrecognized condition code %d.\n", code);
451 }
452}
453
454void
455ArmStaticInst::printMemSymbol(std::ostream &os,
456 const SymbolTable *symtab,
457 const std::string &prefix,
458 const Addr addr,
459 const std::string &suffix) const
460{
461 Addr symbolAddr;
462 std::string symbol;
463 if (symtab && symtab->findNearestSymbol(addr, symbol, symbolAddr)) {
464 ccprintf(os, "%s%s", prefix, symbol);
465 if (symbolAddr != addr)
466 ccprintf(os, "+%d", addr - symbolAddr);
467 ccprintf(os, suffix);
468 }
469}
470
471void
472ArmStaticInst::printShiftOperand(std::ostream &os,
473 IntRegIndex rm,
474 bool immShift,
475 uint32_t shiftAmt,
476 IntRegIndex rs,
477 ArmShiftType type) const
478{
479 bool firstOp = false;
480
481 if (rm != INTREG_ZERO) {
482 printIntReg(os, rm);
483 }
484
485 bool done = false;
486
487 if ((type == LSR || type == ASR) && immShift && shiftAmt == 0)
488 shiftAmt = 32;
489
490 switch (type) {
491 case LSL:
492 if (immShift && shiftAmt == 0) {
493 done = true;
494 break;
495 }
496 if (!firstOp)
497 os << ", ";
498 os << "LSL";
499 break;
500 case LSR:
501 if (!firstOp)
502 os << ", ";
503 os << "LSR";
504 break;
505 case ASR:
506 if (!firstOp)
507 os << ", ";
508 os << "ASR";
509 break;
510 case ROR:
511 if (immShift && shiftAmt == 0) {
512 if (!firstOp)
513 os << ", ";
514 os << "RRX";
515 done = true;
516 break;
517 }
518 if (!firstOp)
519 os << ", ";
520 os << "ROR";
521 break;
522 default:
523 panic("Tried to disassemble unrecognized shift type.\n");
524 }
525 if (!done) {
526 if (!firstOp)
527 os << " ";
528 if (immShift)
529 os << "#" << shiftAmt;
530 else
531 printIntReg(os, rs);
532 }
533}
534
535void
536ArmStaticInst::printExtendOperand(bool firstOperand, std::ostream &os,
537 IntRegIndex rm, ArmExtendType type,
538 int64_t shiftAmt) const
539{
540 if (!firstOperand)
541 ccprintf(os, ", ");
542 printIntReg(os, rm);
543 if (type == UXTX && shiftAmt == 0)
544 return;
545 switch (type) {
546 case UXTB: ccprintf(os, ", UXTB");
547 break;
548 case UXTH: ccprintf(os, ", UXTH");
549 break;
550 case UXTW: ccprintf(os, ", UXTW");
551 break;
552 case UXTX: ccprintf(os, ", LSL");
553 break;
554 case SXTB: ccprintf(os, ", SXTB");
555 break;
556 case SXTH: ccprintf(os, ", SXTH");
557 break;
558 case SXTW: ccprintf(os, ", SXTW");
559 break;
560 case SXTX: ccprintf(os, ", SXTW");
561 break;
562 }
563 if (type == UXTX || shiftAmt)
564 ccprintf(os, " #%d", shiftAmt);
565}
566
567void
568ArmStaticInst::printDataInst(std::ostream &os, bool withImm,
569 bool immShift, bool s, IntRegIndex rd, IntRegIndex rn,
570 IntRegIndex rm, IntRegIndex rs, uint32_t shiftAmt,
571 ArmShiftType type, uint64_t imm) const
572{
573 printMnemonic(os, s ? "s" : "");
574 bool firstOp = true;
575
576 // Destination
577 if (rd != INTREG_ZERO) {
578 firstOp = false;
579 printIntReg(os, rd);
580 }
581
582 // Source 1.
583 if (rn != INTREG_ZERO) {
584 if (!firstOp)
585 os << ", ";
586 firstOp = false;
587 printIntReg(os, rn);
588 }
589
590 if (!firstOp)
591 os << ", ";
592 if (withImm) {
593 ccprintf(os, "#%ld", imm);
594 } else {
595 printShiftOperand(os, rm, immShift, shiftAmt, rs, type);
596 }
597}
598
599std::string
600ArmStaticInst::generateDisassembly(Addr pc,
601 const SymbolTable *symtab) const
602{
603 std::stringstream ss;
604 printMnemonic(ss);
605 return ss.str();
606}
607
608
609Fault
610ArmStaticInst::advSIMDFPAccessTrap64(ExceptionLevel el) const
611{
612 switch (el) {
613 case EL1:
614 return std::make_shared<SupervisorTrap>(machInst, 0x1E00000,
615 EC_TRAPPED_SIMD_FP);
616 case EL2:
617 return std::make_shared<HypervisorTrap>(machInst, 0x1E00000,
618 EC_TRAPPED_SIMD_FP);
619 case EL3:
620 return std::make_shared<SecureMonitorTrap>(machInst, 0x1E00000,
621 EC_TRAPPED_SIMD_FP);
622
623 default:
624 panic("Illegal EL in advSIMDFPAccessTrap64\n");
625 }
626}
627
628
629Fault
630ArmStaticInst::checkFPAdvSIMDTrap64(ThreadContext *tc, CPSR cpsr) const
631{
632 if (ArmSystem::haveVirtualization(tc) && !inSecureState(tc)) {
633 HCPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL2);
634 if (cptrEnCheck.tfp)
635 return advSIMDFPAccessTrap64(EL2);
636 }
637
638 if (ArmSystem::haveSecurity(tc)) {
639 HCPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL3);
640 if (cptrEnCheck.tfp)
641 return advSIMDFPAccessTrap64(EL3);
642 }
643
644 return NoFault;
645}
646
647Fault
648ArmStaticInst::checkFPAdvSIMDEnabled64(ThreadContext *tc,
649 CPSR cpsr, CPACR cpacr) const
650{
651 const ExceptionLevel el = (ExceptionLevel) (uint8_t)cpsr.el;
652 if ((el == EL0 && cpacr.fpen != 0x3) ||
653 (el == EL1 && !(cpacr.fpen & 0x1)))
654 return advSIMDFPAccessTrap64(EL1);
655
656 return checkFPAdvSIMDTrap64(tc, cpsr);
657}
658
659Fault
660ArmStaticInst::checkAdvSIMDOrFPEnabled32(ThreadContext *tc,
661 CPSR cpsr, CPACR cpacr,
662 NSACR nsacr, FPEXC fpexc,
663 bool fpexc_check, bool advsimd) const
664{
665 const bool have_virtualization = ArmSystem::haveVirtualization(tc);
666 const bool have_security = ArmSystem::haveSecurity(tc);
667 const bool is_secure = inSecureState(tc);
668 const ExceptionLevel cur_el = opModeToEL(currOpMode(tc));
669
670 if (cur_el == EL0 && ELIs64(tc, EL1))
671 return checkFPAdvSIMDEnabled64(tc, cpsr, cpacr);
672
673 uint8_t cpacr_cp10 = cpacr.cp10;
674 bool cpacr_asedis = cpacr.asedis;
675
676 if (have_security && !ELIs64(tc, EL3) && !is_secure) {
677 if (nsacr.nsasedis)
678 cpacr_asedis = true;
679 if (nsacr.cp10 == 0)
680 cpacr_cp10 = 0;
681 }
682
683 if (cur_el != EL2) {
684 if (advsimd && cpacr_asedis)
685 return disabledFault();
686
687 if ((cur_el == EL0 && cpacr_cp10 != 0x3) ||
688 (cur_el != EL0 && !(cpacr_cp10 & 0x1)))
689 return disabledFault();
690 }
691
692 if (fpexc_check && !fpexc.en)
693 return disabledFault();
694
695 // -- aarch32/exceptions/traps/AArch32.CheckFPAdvSIMDTrap --
696
697 if (have_virtualization && !is_secure && ELIs64(tc, EL2))
698 return checkFPAdvSIMDTrap64(tc, cpsr);
699
700 if (have_virtualization && !is_secure) {
701 HCPTR hcptr = tc->readMiscReg(MISCREG_HCPTR);
702 bool hcptr_cp10 = hcptr.tcp10;
703 bool hcptr_tase = hcptr.tase;
704
705 if (have_security && !ELIs64(tc, EL3) && !is_secure) {
706 if (nsacr.nsasedis)
707 hcptr_tase = true;
708 if (nsacr.cp10)
709 hcptr_cp10 = true;
710 }
711
712 if ((advsimd && hcptr_tase) || hcptr_cp10) {
713 const uint32_t iss = advsimd ? (1 << 5) : 0xA;
714 if (cur_el == EL2) {
715 return std::make_shared<UndefinedInstruction>(
716 machInst, iss,
717 EC_TRAPPED_HCPTR, mnemonic);
718 } else {
719 return std::make_shared<HypervisorTrap>(
720 machInst, iss,
721 EC_TRAPPED_HCPTR);
722 }
723
724 }
725 }
726
727 if (have_security && ELIs64(tc, EL3)) {
728 HCPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL3);
729 if (cptrEnCheck.tfp)
730 return advSIMDFPAccessTrap64(EL3);
731 }
732
733 return NoFault;
734}
735
736inline bool
737ArmStaticInst::isWFxTrapping(ThreadContext *tc,
738 ExceptionLevel tgtEl,
739 bool isWfe) const
740{
741 bool trap = false;
742 SCTLR sctlr = ((SCTLR)tc->readMiscReg(MISCREG_SCTLR_EL1));
743 HCR hcr = ((HCR)tc->readMiscReg(MISCREG_HCR_EL2));
744 SCR scr = ((SCR)tc->readMiscReg(MISCREG_SCR_EL3));
745
746 switch (tgtEl) {
747 case EL1:
748 trap = isWfe? !sctlr.ntwe : !sctlr.ntwi;
749 break;
750 case EL2:
751 trap = isWfe? hcr.twe : hcr.twi;
752 break;
753 case EL3:
754 trap = isWfe? scr.twe : scr.twi;
755 break;
756 default:
757 break;
758 }
759
760 return trap;
761}
762
763Fault
764ArmStaticInst::checkForWFxTrap32(ThreadContext *tc,
765 ExceptionLevel targetEL,
766 bool isWfe) const
767{
768 // Check if target exception level is implemented.
769 assert(ArmSystem::haveEL(tc, targetEL));
770
771 // Check for routing to AArch64: this happens if the
772 // target exception level (where the trap will be handled)
773 // is using aarch64
774 if (ELIs64(tc, targetEL)) {
775 return checkForWFxTrap64(tc, targetEL, isWfe);
776 }
777
778 // Check if processor needs to trap at selected exception level
779 bool trap = isWFxTrapping(tc, targetEL, isWfe);
780
781 if (trap) {
782 uint32_t iss = isWfe? 0x1E00001 : /* WFE Instruction syndrome */
783 0x1E00000; /* WFI Instruction syndrome */
784 switch (targetEL) {
785 case EL1:
786 return std::make_shared<UndefinedInstruction>(
787 machInst, iss,
788 EC_TRAPPED_WFI_WFE, mnemonic);
789 case EL2:
790 return std::make_shared<HypervisorTrap>(machInst, iss,
791 EC_TRAPPED_WFI_WFE);
792 case EL3:
793 return std::make_shared<SecureMonitorTrap>(machInst, iss,
794 EC_TRAPPED_WFI_WFE);
795 default:
796 panic("Unrecognized Exception Level: %d\n", targetEL);
797 }
798 }
799
800 return NoFault;
801}
802
803Fault
804ArmStaticInst::checkForWFxTrap64(ThreadContext *tc,
805 ExceptionLevel targetEL,
806 bool isWfe) const
807{
808 // Check if target exception level is implemented.
809 assert(ArmSystem::haveEL(tc, targetEL));
810
811 // Check if processor needs to trap at selected exception level
812 bool trap = isWFxTrapping(tc, targetEL, isWfe);
813
814 if (trap) {
815 uint32_t iss = isWfe? 0x1E00001 : /* WFE Instruction syndrome */
816 0x1E00000; /* WFI Instruction syndrome */
817 switch (targetEL) {
818 case EL1:
819 return std::make_shared<SupervisorTrap>(machInst, iss,
820 EC_TRAPPED_WFI_WFE);
821 case EL2:
822 return std::make_shared<HypervisorTrap>(machInst, iss,
823 EC_TRAPPED_WFI_WFE);
824 case EL3:
825 return std::make_shared<SecureMonitorTrap>(machInst, iss,
826 EC_TRAPPED_WFI_WFE);
827 default:
828 panic("Unrecognized Exception Level: %d\n", targetEL);
829 }
830 }
831
832 return NoFault;
833}
834
835Fault
836ArmStaticInst::trapWFx(ThreadContext *tc,
837 CPSR cpsr, SCR scr,
838 bool isWfe) const
839{
840 Fault fault = NoFault;
841 if (cpsr.el == EL0) {
842 fault = checkForWFxTrap32(tc, EL1, isWfe);
843 }
844
845 if ((fault == NoFault) &&
846 ArmSystem::haveEL(tc, EL2) && !inSecureState(scr, cpsr) &&
847 ((cpsr.el == EL0) || (cpsr.el == EL1))) {
848
849 fault = checkForWFxTrap32(tc, EL2, isWfe);
850 }
851
852 if ((fault == NoFault) &&
853 ArmSystem::haveEL(tc, EL3) && cpsr.el != EL3) {
854 fault = checkForWFxTrap32(tc, EL3, isWfe);
855 }
856
857 return fault;
858}
859
860Fault
861ArmStaticInst::checkSETENDEnabled(ThreadContext *tc, CPSR cpsr) const
862{
863 bool setend_disabled(false);
864 ExceptionLevel pstateEL = (ExceptionLevel)(uint8_t)(cpsr.el);
865
866 if (pstateEL == EL2) {
867 setend_disabled = ((SCTLR)tc->readMiscRegNoEffect(MISCREG_HSCTLR)).sed;
868 } else {
869 // Please note: in the armarm pseudocode there is a distinction
870 // whether EL1 is aarch32 or aarch64:
871 // if ELUsingAArch32(EL1) then SCTLR.SED else SCTLR[].SED;
872 // Considering that SETEND is aarch32 only, ELUsingAArch32(EL1)
873 // will always be true (hence using SCTLR.SED) except for
874 // instruction executed at EL0, and with an AArch64 EL1.
875 // In this case SCTLR_EL1 will be used. In gem5 the register is
876 // mapped to SCTLR_ns. We can safely use SCTLR and choose the
877 // appropriate bank version.
878
879 // Get the index of the banked version of SCTLR:
880 // SCTLR_s or SCTLR_ns.
881 auto banked_sctlr = snsBankedIndex(
882 MISCREG_SCTLR, tc, !inSecureState(tc));
883
884 // SCTLR.SED bit is enabling/disabling the ue of SETEND instruction.
885 setend_disabled = ((SCTLR)tc->readMiscRegNoEffect(banked_sctlr)).sed;
886 }
887
888 return setend_disabled ? undefinedFault32(tc, pstateEL) :
889 NoFault;
890}
891
892Fault
893ArmStaticInst::undefinedFault32(ThreadContext *tc,
894 ExceptionLevel pstateEL) const
895{
896 // Even if we are running in aarch32, the fault might be dealt with in
897 // aarch64 ISA.
898 if (generalExceptionsToAArch64(tc, pstateEL)) {
899 return undefinedFault64(tc, pstateEL);
900 } else {
901 // Please note: according to the ARM ARM pseudocode we should handle
902 // the case when EL2 is aarch64 and HCR.TGE is 1 as well.
903 // However this case is already handled by the routeToHyp method in
904 // ArmFault class.
905 return std::make_shared<UndefinedInstruction>(
906 machInst, 0,
907 EC_UNKNOWN, mnemonic);
908 }
909}
910
911Fault
912ArmStaticInst::undefinedFault64(ThreadContext *tc,
913 ExceptionLevel pstateEL) const
914{
915 switch (pstateEL) {
916 case EL0:
917 case EL1:
918 return std::make_shared<SupervisorTrap>(machInst, 0, EC_UNKNOWN);
919 case EL2:
920 return std::make_shared<HypervisorTrap>(machInst, 0, EC_UNKNOWN);
921 case EL3:
922 return std::make_shared<SecureMonitorTrap>(machInst, 0, EC_UNKNOWN);
923 default:
924 panic("Unrecognized Exception Level: %d\n", pstateEL);
925 break;
926 }
927
928 return NoFault;
929}
930
931static uint8_t
932getRestoredITBits(ThreadContext *tc, CPSR spsr)
933{
934 // See: shared/functions/system/RestoredITBits in the ARM ARM
935
936 const ExceptionLevel el = opModeToEL((OperatingMode) (uint8_t)spsr.mode);
937 const uint8_t it = itState(spsr);
938
939 if (!spsr.t || spsr.il)
940 return 0;
941
942 // The IT bits are forced to zero when they are set to a reserved
943 // value.
944 if (bits(it, 7, 4) != 0 && bits(it, 3, 0) == 0)
945 return 0;
946
947 const bool itd = el == EL2 ?
948 ((SCTLR)tc->readMiscReg(MISCREG_HSCTLR)).itd :
949 ((SCTLR)tc->readMiscReg(MISCREG_SCTLR)).itd;
950
951 // The IT bits are forced to zero when returning to A32 state, or
952 // when returning to an EL with the ITD bit set to 1, and the IT
953 // bits are describing a multi-instruction block.
954 if (itd && bits(it, 2, 0) != 0)
955 return 0;
956
957 return it;
958}
959
960static bool
961illegalExceptionReturn(ThreadContext *tc, CPSR cpsr, CPSR spsr)
962{
963 const OperatingMode mode = (OperatingMode) (uint8_t)spsr.mode;
964 if (unknownMode(mode))
965 return true;
966
967 const OperatingMode cur_mode = (OperatingMode) (uint8_t)cpsr.mode;
968 const ExceptionLevel target_el = opModeToEL(mode);
969
970 HCR hcr = ((HCR)tc->readMiscReg(MISCREG_HCR_EL2));
971 SCR scr = ((SCR)tc->readMiscReg(MISCREG_SCR_EL3));
972
973 if (target_el > opModeToEL(cur_mode))
974 return true;
975
976 if (!ArmSystem::haveEL(tc, target_el))
977 return true;
978
979 if (target_el == EL1 && ArmSystem::haveEL(tc, EL2) && scr.ns && hcr.tge)
980 return true;
981
982 if (target_el == EL2 && ArmSystem::haveEL(tc, EL3) && !scr.ns)
983 return true;
984
985 bool spsr_mode_is_aarch32 = (spsr.width == 1);
986 bool known, target_el_is_aarch32;
987 std::tie(known, target_el_is_aarch32) = ELUsingAArch32K(tc, target_el);
988 assert(known || (target_el == EL0 && ELIs64(tc, EL1)));
989
990 if (known && (spsr_mode_is_aarch32 != target_el_is_aarch32))
991 return true;
992
993 if (!spsr.width) {
994 // aarch64
995 if (!ArmSystem::highestELIs64(tc))
996 return true;
997 if (spsr & 0x2)
998 return true;
999 if (target_el == EL0 && spsr.sp)
1000 return true;
1001 } else {
1002 // aarch32
1003 return unknownMode32(mode);
1004 }
1005
1006 return false;
1007}
1008
1009CPSR
1010ArmStaticInst::getPSTATEFromPSR(ThreadContext *tc, CPSR cpsr, CPSR spsr) const
1011{
1012 CPSR new_cpsr = 0;
1013
1014 // gem5 doesn't implement single-stepping, so force the SS bit to
1015 // 0.
1016 new_cpsr.ss = 0;
1017
1018 if (illegalExceptionReturn(tc, cpsr, spsr)) {
1019 // If the SPSR specifies an illegal exception return,
1020 // then PSTATE.{M, nRW, EL, SP} are unchanged and PSTATE.IL
1021 // is set to 1.
1022 new_cpsr.il = 1;
1023 if (cpsr.width) {
1024 new_cpsr.mode = cpsr.mode;
1025 } else {
1026 new_cpsr.width = cpsr.width;
1027 new_cpsr.el = cpsr.el;
1028 new_cpsr.sp = cpsr.sp;
1029 }
1030 } else {
1031 new_cpsr.il = spsr.il;
1032 if (spsr.width && unknownMode32((OperatingMode)(uint8_t)spsr.mode)) {
1033 new_cpsr.il = 1;
1034 } else if (spsr.width) {
1035 new_cpsr.mode = spsr.mode;
1036 } else {
1037 new_cpsr.el = spsr.el;
1038 new_cpsr.sp = spsr.sp;
1039 }
1040 }
1041
1042 new_cpsr.nz = spsr.nz;
1043 new_cpsr.c = spsr.c;
1044 new_cpsr.v = spsr.v;
1045 if (new_cpsr.width) {
1046 // aarch32
1047 const ITSTATE it = getRestoredITBits(tc, spsr);
1048 new_cpsr.q = spsr.q;
1049 new_cpsr.ge = spsr.ge;
1050 new_cpsr.e = spsr.e;
1051 new_cpsr.aif = spsr.aif;
1052 new_cpsr.t = spsr.t;
1053 new_cpsr.it2 = it.top6;
1054 new_cpsr.it1 = it.bottom2;
1055 } else {
1056 // aarch64
1057 new_cpsr.daif = spsr.daif;
1058 }
1059
1060 return new_cpsr;
1061}
1062
1063bool
1064ArmStaticInst::generalExceptionsToAArch64(ThreadContext *tc,
1065 ExceptionLevel pstateEL) const
1066{
1067 // Returns TRUE if exceptions normally routed to EL1 are being handled
1068 // at an Exception level using AArch64, because either EL1 is using
1069 // AArch64 or TGE is in force and EL2 is using AArch64.
1070 HCR hcr = ((HCR)tc->readMiscReg(MISCREG_HCR_EL2));
1071 return (pstateEL == EL0 && !ELIs32(tc, EL1)) ||
1072 (ArmSystem::haveEL(tc, EL2) && !inSecureState(tc) &&
1073 !ELIs32(tc, EL2) && hcr.tge);
1074}
1075
1076
1077}