static_inst.cc (12789:b28b286fa57d) static_inst.cc (13367:dc06baae4275)
1/*
2 * Copyright (c) 2010-2014, 2016-2018 ARM Limited
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * All rights reserved
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2007-2008 The Florida State University
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Stephen Hines
42 */
43
44#include "arch/arm/insts/static_inst.hh"
45
46#include "arch/arm/faults.hh"
47#include "base/condcodes.hh"
48#include "base/cprintf.hh"
49#include "base/loader/symtab.hh"
50#include "cpu/reg_class.hh"
51
52namespace ArmISA
53{
54// Shift Rm by an immediate value
55int32_t
56ArmStaticInst::shift_rm_imm(uint32_t base, uint32_t shamt,
57 uint32_t type, uint32_t cfval) const
58{
59 assert(shamt < 32);
60 ArmShiftType shiftType;
61 shiftType = (ArmShiftType)type;
62
63 switch (shiftType)
64 {
65 case LSL:
66 return base << shamt;
67 case LSR:
68 if (shamt == 0)
69 return 0;
70 else
71 return base >> shamt;
72 case ASR:
73 if (shamt == 0)
74 return (base >> 31) | -((base & (1 << 31)) >> 31);
75 else
76 return (base >> shamt) | -((base & (1 << 31)) >> shamt);
77 case ROR:
78 if (shamt == 0)
79 return (cfval << 31) | (base >> 1); // RRX
80 else
81 return (base << (32 - shamt)) | (base >> shamt);
82 default:
83 ccprintf(std::cerr, "Unhandled shift type\n");
84 exit(1);
85 break;
86 }
87 return 0;
88}
89
90int64_t
91ArmStaticInst::shiftReg64(uint64_t base, uint64_t shiftAmt,
92 ArmShiftType type, uint8_t width) const
93{
94 shiftAmt = shiftAmt % width;
95 ArmShiftType shiftType;
96 shiftType = (ArmShiftType)type;
97
98 switch (shiftType)
99 {
100 case LSL:
101 return base << shiftAmt;
102 case LSR:
103 if (shiftAmt == 0)
104 return base;
105 else
106 return (base & mask(width)) >> shiftAmt;
107 case ASR:
108 if (shiftAmt == 0) {
109 return base;
110 } else {
111 int sign_bit = bits(base, intWidth - 1);
112 base >>= shiftAmt;
113 base = sign_bit ? (base | ~mask(intWidth - shiftAmt)) : base;
114 return base & mask(intWidth);
115 }
116 case ROR:
117 if (shiftAmt == 0)
118 return base;
119 else
120 return (base << (width - shiftAmt)) | (base >> shiftAmt);
121 default:
122 ccprintf(std::cerr, "Unhandled shift type\n");
123 exit(1);
124 break;
125 }
126 return 0;
127}
128
129int64_t
130ArmStaticInst::extendReg64(uint64_t base, ArmExtendType type,
131 uint64_t shiftAmt, uint8_t width) const
132{
133 bool sign_extend = false;
134 int len = 0;
135 switch (type) {
136 case UXTB:
137 len = 8;
138 break;
139 case UXTH:
140 len = 16;
141 break;
142 case UXTW:
143 len = 32;
144 break;
145 case UXTX:
146 len = 64;
147 break;
148 case SXTB:
149 len = 8;
150 sign_extend = true;
151 break;
152 case SXTH:
153 len = 16;
154 sign_extend = true;
155 break;
156 case SXTW:
157 len = 32;
158 sign_extend = true;
159 break;
160 case SXTX:
161 len = 64;
162 sign_extend = true;
163 break;
164 }
165 len = len <= width - shiftAmt ? len : width - shiftAmt;
166 uint64_t tmp = (uint64_t) bits(base, len - 1, 0) << shiftAmt;
167 if (sign_extend) {
168 int sign_bit = bits(tmp, len + shiftAmt - 1);
169 tmp = sign_bit ? (tmp | ~mask(len + shiftAmt)) : tmp;
170 }
171 return tmp & mask(width);
172}
173
174// Shift Rm by Rs
175int32_t
176ArmStaticInst::shift_rm_rs(uint32_t base, uint32_t shamt,
177 uint32_t type, uint32_t cfval) const
178{
179 enum ArmShiftType shiftType;
180 shiftType = (enum ArmShiftType) type;
181
182 switch (shiftType)
183 {
184 case LSL:
185 if (shamt >= 32)
186 return 0;
187 else
188 return base << shamt;
189 case LSR:
190 if (shamt >= 32)
191 return 0;
192 else
193 return base >> shamt;
194 case ASR:
195 if (shamt >= 32)
196 return (base >> 31) | -((base & (1 << 31)) >> 31);
197 else
198 return (base >> shamt) | -((base & (1 << 31)) >> shamt);
199 case ROR:
200 shamt = shamt & 0x1f;
201 if (shamt == 0)
202 return base;
203 else
204 return (base << (32 - shamt)) | (base >> shamt);
205 default:
206 ccprintf(std::cerr, "Unhandled shift type\n");
207 exit(1);
208 break;
209 }
210 return 0;
211}
212
213
214// Generate C for a shift by immediate
215bool
216ArmStaticInst::shift_carry_imm(uint32_t base, uint32_t shamt,
217 uint32_t type, uint32_t cfval) const
218{
219 enum ArmShiftType shiftType;
220 shiftType = (enum ArmShiftType) type;
221
222 switch (shiftType)
223 {
224 case LSL:
225 if (shamt == 0)
226 return cfval;
227 else
228 return (base >> (32 - shamt)) & 1;
229 case LSR:
230 if (shamt == 0)
231 return (base >> 31);
232 else
233 return (base >> (shamt - 1)) & 1;
234 case ASR:
235 if (shamt == 0)
236 return (base >> 31);
237 else
238 return (base >> (shamt - 1)) & 1;
239 case ROR:
240 shamt = shamt & 0x1f;
241 if (shamt == 0)
242 return (base & 1); // RRX
243 else
244 return (base >> (shamt - 1)) & 1;
245 default:
246 ccprintf(std::cerr, "Unhandled shift type\n");
247 exit(1);
248 break;
249 }
250 return 0;
251}
252
253
254// Generate C for a shift by Rs
255bool
256ArmStaticInst::shift_carry_rs(uint32_t base, uint32_t shamt,
257 uint32_t type, uint32_t cfval) const
258{
259 enum ArmShiftType shiftType;
260 shiftType = (enum ArmShiftType) type;
261
262 if (shamt == 0)
263 return cfval;
264
265 switch (shiftType)
266 {
267 case LSL:
268 if (shamt > 32)
269 return 0;
270 else
271 return (base >> (32 - shamt)) & 1;
272 case LSR:
273 if (shamt > 32)
274 return 0;
275 else
276 return (base >> (shamt - 1)) & 1;
277 case ASR:
278 if (shamt > 32)
279 shamt = 32;
280 return (base >> (shamt - 1)) & 1;
281 case ROR:
282 shamt = shamt & 0x1f;
283 if (shamt == 0)
284 shamt = 32;
285 return (base >> (shamt - 1)) & 1;
286 default:
287 ccprintf(std::cerr, "Unhandled shift type\n");
288 exit(1);
289 break;
290 }
291 return 0;
292}
293
294void
295ArmStaticInst::printIntReg(std::ostream &os, RegIndex reg_idx) const
296{
297 if (aarch64) {
298 if (reg_idx == INTREG_UREG0)
299 ccprintf(os, "ureg0");
300 else if (reg_idx == INTREG_SPX)
301 ccprintf(os, "%s%s", (intWidth == 32) ? "w" : "", "sp");
302 else if (reg_idx == INTREG_X31)
303 ccprintf(os, "%szr", (intWidth == 32) ? "w" : "x");
304 else
305 ccprintf(os, "%s%d", (intWidth == 32) ? "w" : "x", reg_idx);
306 } else {
307 switch (reg_idx) {
308 case PCReg:
309 ccprintf(os, "pc");
310 break;
311 case StackPointerReg:
312 ccprintf(os, "sp");
313 break;
314 case FramePointerReg:
315 ccprintf(os, "fp");
316 break;
317 case ReturnAddressReg:
318 ccprintf(os, "lr");
319 break;
320 default:
321 ccprintf(os, "r%d", reg_idx);
322 break;
323 }
324 }
325}
326
1/*
2 * Copyright (c) 2010-2014, 2016-2018 ARM Limited
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * All rights reserved
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2007-2008 The Florida State University
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Stephen Hines
42 */
43
44#include "arch/arm/insts/static_inst.hh"
45
46#include "arch/arm/faults.hh"
47#include "base/condcodes.hh"
48#include "base/cprintf.hh"
49#include "base/loader/symtab.hh"
50#include "cpu/reg_class.hh"
51
52namespace ArmISA
53{
54// Shift Rm by an immediate value
55int32_t
56ArmStaticInst::shift_rm_imm(uint32_t base, uint32_t shamt,
57 uint32_t type, uint32_t cfval) const
58{
59 assert(shamt < 32);
60 ArmShiftType shiftType;
61 shiftType = (ArmShiftType)type;
62
63 switch (shiftType)
64 {
65 case LSL:
66 return base << shamt;
67 case LSR:
68 if (shamt == 0)
69 return 0;
70 else
71 return base >> shamt;
72 case ASR:
73 if (shamt == 0)
74 return (base >> 31) | -((base & (1 << 31)) >> 31);
75 else
76 return (base >> shamt) | -((base & (1 << 31)) >> shamt);
77 case ROR:
78 if (shamt == 0)
79 return (cfval << 31) | (base >> 1); // RRX
80 else
81 return (base << (32 - shamt)) | (base >> shamt);
82 default:
83 ccprintf(std::cerr, "Unhandled shift type\n");
84 exit(1);
85 break;
86 }
87 return 0;
88}
89
90int64_t
91ArmStaticInst::shiftReg64(uint64_t base, uint64_t shiftAmt,
92 ArmShiftType type, uint8_t width) const
93{
94 shiftAmt = shiftAmt % width;
95 ArmShiftType shiftType;
96 shiftType = (ArmShiftType)type;
97
98 switch (shiftType)
99 {
100 case LSL:
101 return base << shiftAmt;
102 case LSR:
103 if (shiftAmt == 0)
104 return base;
105 else
106 return (base & mask(width)) >> shiftAmt;
107 case ASR:
108 if (shiftAmt == 0) {
109 return base;
110 } else {
111 int sign_bit = bits(base, intWidth - 1);
112 base >>= shiftAmt;
113 base = sign_bit ? (base | ~mask(intWidth - shiftAmt)) : base;
114 return base & mask(intWidth);
115 }
116 case ROR:
117 if (shiftAmt == 0)
118 return base;
119 else
120 return (base << (width - shiftAmt)) | (base >> shiftAmt);
121 default:
122 ccprintf(std::cerr, "Unhandled shift type\n");
123 exit(1);
124 break;
125 }
126 return 0;
127}
128
129int64_t
130ArmStaticInst::extendReg64(uint64_t base, ArmExtendType type,
131 uint64_t shiftAmt, uint8_t width) const
132{
133 bool sign_extend = false;
134 int len = 0;
135 switch (type) {
136 case UXTB:
137 len = 8;
138 break;
139 case UXTH:
140 len = 16;
141 break;
142 case UXTW:
143 len = 32;
144 break;
145 case UXTX:
146 len = 64;
147 break;
148 case SXTB:
149 len = 8;
150 sign_extend = true;
151 break;
152 case SXTH:
153 len = 16;
154 sign_extend = true;
155 break;
156 case SXTW:
157 len = 32;
158 sign_extend = true;
159 break;
160 case SXTX:
161 len = 64;
162 sign_extend = true;
163 break;
164 }
165 len = len <= width - shiftAmt ? len : width - shiftAmt;
166 uint64_t tmp = (uint64_t) bits(base, len - 1, 0) << shiftAmt;
167 if (sign_extend) {
168 int sign_bit = bits(tmp, len + shiftAmt - 1);
169 tmp = sign_bit ? (tmp | ~mask(len + shiftAmt)) : tmp;
170 }
171 return tmp & mask(width);
172}
173
174// Shift Rm by Rs
175int32_t
176ArmStaticInst::shift_rm_rs(uint32_t base, uint32_t shamt,
177 uint32_t type, uint32_t cfval) const
178{
179 enum ArmShiftType shiftType;
180 shiftType = (enum ArmShiftType) type;
181
182 switch (shiftType)
183 {
184 case LSL:
185 if (shamt >= 32)
186 return 0;
187 else
188 return base << shamt;
189 case LSR:
190 if (shamt >= 32)
191 return 0;
192 else
193 return base >> shamt;
194 case ASR:
195 if (shamt >= 32)
196 return (base >> 31) | -((base & (1 << 31)) >> 31);
197 else
198 return (base >> shamt) | -((base & (1 << 31)) >> shamt);
199 case ROR:
200 shamt = shamt & 0x1f;
201 if (shamt == 0)
202 return base;
203 else
204 return (base << (32 - shamt)) | (base >> shamt);
205 default:
206 ccprintf(std::cerr, "Unhandled shift type\n");
207 exit(1);
208 break;
209 }
210 return 0;
211}
212
213
214// Generate C for a shift by immediate
215bool
216ArmStaticInst::shift_carry_imm(uint32_t base, uint32_t shamt,
217 uint32_t type, uint32_t cfval) const
218{
219 enum ArmShiftType shiftType;
220 shiftType = (enum ArmShiftType) type;
221
222 switch (shiftType)
223 {
224 case LSL:
225 if (shamt == 0)
226 return cfval;
227 else
228 return (base >> (32 - shamt)) & 1;
229 case LSR:
230 if (shamt == 0)
231 return (base >> 31);
232 else
233 return (base >> (shamt - 1)) & 1;
234 case ASR:
235 if (shamt == 0)
236 return (base >> 31);
237 else
238 return (base >> (shamt - 1)) & 1;
239 case ROR:
240 shamt = shamt & 0x1f;
241 if (shamt == 0)
242 return (base & 1); // RRX
243 else
244 return (base >> (shamt - 1)) & 1;
245 default:
246 ccprintf(std::cerr, "Unhandled shift type\n");
247 exit(1);
248 break;
249 }
250 return 0;
251}
252
253
254// Generate C for a shift by Rs
255bool
256ArmStaticInst::shift_carry_rs(uint32_t base, uint32_t shamt,
257 uint32_t type, uint32_t cfval) const
258{
259 enum ArmShiftType shiftType;
260 shiftType = (enum ArmShiftType) type;
261
262 if (shamt == 0)
263 return cfval;
264
265 switch (shiftType)
266 {
267 case LSL:
268 if (shamt > 32)
269 return 0;
270 else
271 return (base >> (32 - shamt)) & 1;
272 case LSR:
273 if (shamt > 32)
274 return 0;
275 else
276 return (base >> (shamt - 1)) & 1;
277 case ASR:
278 if (shamt > 32)
279 shamt = 32;
280 return (base >> (shamt - 1)) & 1;
281 case ROR:
282 shamt = shamt & 0x1f;
283 if (shamt == 0)
284 shamt = 32;
285 return (base >> (shamt - 1)) & 1;
286 default:
287 ccprintf(std::cerr, "Unhandled shift type\n");
288 exit(1);
289 break;
290 }
291 return 0;
292}
293
294void
295ArmStaticInst::printIntReg(std::ostream &os, RegIndex reg_idx) const
296{
297 if (aarch64) {
298 if (reg_idx == INTREG_UREG0)
299 ccprintf(os, "ureg0");
300 else if (reg_idx == INTREG_SPX)
301 ccprintf(os, "%s%s", (intWidth == 32) ? "w" : "", "sp");
302 else if (reg_idx == INTREG_X31)
303 ccprintf(os, "%szr", (intWidth == 32) ? "w" : "x");
304 else
305 ccprintf(os, "%s%d", (intWidth == 32) ? "w" : "x", reg_idx);
306 } else {
307 switch (reg_idx) {
308 case PCReg:
309 ccprintf(os, "pc");
310 break;
311 case StackPointerReg:
312 ccprintf(os, "sp");
313 break;
314 case FramePointerReg:
315 ccprintf(os, "fp");
316 break;
317 case ReturnAddressReg:
318 ccprintf(os, "lr");
319 break;
320 default:
321 ccprintf(os, "r%d", reg_idx);
322 break;
323 }
324 }
325}
326
327void ArmStaticInst::printPFflags(std::ostream &os, int flag) const
328{
329 const char *flagtoprfop[]= { "PLD", "PLI", "PST", "Reserved"};
330 const char *flagtotarget[] = { "L1", "L2", "L3", "Reserved"};
331 const char *flagtopolicy[] = { "KEEP", "STRM"};
332
333 ccprintf(os, "%s%s%s", flagtoprfop[(flag>>3)&3],
334 flagtotarget[(flag>>1)&3], flagtopolicy[flag&1]);
335}
336
327void
328ArmStaticInst::printFloatReg(std::ostream &os, RegIndex reg_idx) const
329{
330 ccprintf(os, "f%d", reg_idx);
331}
332
333void
334ArmStaticInst::printVecReg(std::ostream &os, RegIndex reg_idx) const
335{
336 ccprintf(os, "v%d", reg_idx);
337}
338
339void
340ArmStaticInst::printCCReg(std::ostream &os, RegIndex reg_idx) const
341{
342 ccprintf(os, "cc_%s", ArmISA::ccRegName[reg_idx]);
343}
344
345void
346ArmStaticInst::printMiscReg(std::ostream &os, RegIndex reg_idx) const
347{
348 assert(reg_idx < NUM_MISCREGS);
349 ccprintf(os, "%s", ArmISA::miscRegName[reg_idx]);
350}
351
352void
353ArmStaticInst::printMnemonic(std::ostream &os,
354 const std::string &suffix,
355 bool withPred,
356 bool withCond64,
357 ConditionCode cond64) const
358{
359 os << " " << mnemonic;
360 if (withPred && !aarch64) {
361 printCondition(os, machInst.condCode);
362 os << suffix;
363 } else if (withCond64) {
364 os << ".";
365 printCondition(os, cond64);
366 os << suffix;
367 }
368 if (machInst.bigThumb)
369 os << ".w";
370 os << " ";
371}
372
373void
374ArmStaticInst::printTarget(std::ostream &os, Addr target,
375 const SymbolTable *symtab) const
376{
377 Addr symbolAddr;
378 std::string symbol;
379
380 if (symtab && symtab->findNearestSymbol(target, symbol, symbolAddr)) {
381 ccprintf(os, "<%s", symbol);
382 if (symbolAddr != target)
383 ccprintf(os, "+%d>", target - symbolAddr);
384 else
385 ccprintf(os, ">");
386 } else {
387 ccprintf(os, "%#x", target);
388 }
389}
390
391void
392ArmStaticInst::printCondition(std::ostream &os,
393 unsigned code,
394 bool noImplicit) const
395{
396 switch (code) {
397 case COND_EQ:
398 os << "eq";
399 break;
400 case COND_NE:
401 os << "ne";
402 break;
403 case COND_CS:
404 os << "cs";
405 break;
406 case COND_CC:
407 os << "cc";
408 break;
409 case COND_MI:
410 os << "mi";
411 break;
412 case COND_PL:
413 os << "pl";
414 break;
415 case COND_VS:
416 os << "vs";
417 break;
418 case COND_VC:
419 os << "vc";
420 break;
421 case COND_HI:
422 os << "hi";
423 break;
424 case COND_LS:
425 os << "ls";
426 break;
427 case COND_GE:
428 os << "ge";
429 break;
430 case COND_LT:
431 os << "lt";
432 break;
433 case COND_GT:
434 os << "gt";
435 break;
436 case COND_LE:
437 os << "le";
438 break;
439 case COND_AL:
440 // This one is implicit.
441 if (noImplicit)
442 os << "al";
443 break;
444 case COND_UC:
445 // Unconditional.
446 if (noImplicit)
447 os << "uc";
448 break;
449 default:
450 panic("Unrecognized condition code %d.\n", code);
451 }
452}
453
454void
455ArmStaticInst::printMemSymbol(std::ostream &os,
456 const SymbolTable *symtab,
457 const std::string &prefix,
458 const Addr addr,
459 const std::string &suffix) const
460{
461 Addr symbolAddr;
462 std::string symbol;
463 if (symtab && symtab->findNearestSymbol(addr, symbol, symbolAddr)) {
464 ccprintf(os, "%s%s", prefix, symbol);
465 if (symbolAddr != addr)
466 ccprintf(os, "+%d", addr - symbolAddr);
467 ccprintf(os, suffix);
468 }
469}
470
471void
472ArmStaticInst::printShiftOperand(std::ostream &os,
473 IntRegIndex rm,
474 bool immShift,
475 uint32_t shiftAmt,
476 IntRegIndex rs,
477 ArmShiftType type) const
478{
479 bool firstOp = false;
480
481 if (rm != INTREG_ZERO) {
482 printIntReg(os, rm);
483 }
484
485 bool done = false;
486
487 if ((type == LSR || type == ASR) && immShift && shiftAmt == 0)
488 shiftAmt = 32;
489
490 switch (type) {
491 case LSL:
492 if (immShift && shiftAmt == 0) {
493 done = true;
494 break;
495 }
496 if (!firstOp)
497 os << ", ";
498 os << "LSL";
499 break;
500 case LSR:
501 if (!firstOp)
502 os << ", ";
503 os << "LSR";
504 break;
505 case ASR:
506 if (!firstOp)
507 os << ", ";
508 os << "ASR";
509 break;
510 case ROR:
511 if (immShift && shiftAmt == 0) {
512 if (!firstOp)
513 os << ", ";
514 os << "RRX";
515 done = true;
516 break;
517 }
518 if (!firstOp)
519 os << ", ";
520 os << "ROR";
521 break;
522 default:
523 panic("Tried to disassemble unrecognized shift type.\n");
524 }
525 if (!done) {
526 if (!firstOp)
527 os << " ";
528 if (immShift)
529 os << "#" << shiftAmt;
530 else
531 printIntReg(os, rs);
532 }
533}
534
535void
536ArmStaticInst::printExtendOperand(bool firstOperand, std::ostream &os,
537 IntRegIndex rm, ArmExtendType type,
538 int64_t shiftAmt) const
539{
540 if (!firstOperand)
541 ccprintf(os, ", ");
542 printIntReg(os, rm);
543 if (type == UXTX && shiftAmt == 0)
544 return;
545 switch (type) {
546 case UXTB: ccprintf(os, ", UXTB");
547 break;
548 case UXTH: ccprintf(os, ", UXTH");
549 break;
550 case UXTW: ccprintf(os, ", UXTW");
551 break;
552 case UXTX: ccprintf(os, ", LSL");
553 break;
554 case SXTB: ccprintf(os, ", SXTB");
555 break;
556 case SXTH: ccprintf(os, ", SXTH");
557 break;
558 case SXTW: ccprintf(os, ", SXTW");
559 break;
560 case SXTX: ccprintf(os, ", SXTW");
561 break;
562 }
563 if (type == UXTX || shiftAmt)
564 ccprintf(os, " #%d", shiftAmt);
565}
566
567void
568ArmStaticInst::printDataInst(std::ostream &os, bool withImm,
569 bool immShift, bool s, IntRegIndex rd, IntRegIndex rn,
570 IntRegIndex rm, IntRegIndex rs, uint32_t shiftAmt,
571 ArmShiftType type, uint64_t imm) const
572{
573 printMnemonic(os, s ? "s" : "");
574 bool firstOp = true;
575
576 // Destination
577 if (rd != INTREG_ZERO) {
578 firstOp = false;
579 printIntReg(os, rd);
580 }
581
582 // Source 1.
583 if (rn != INTREG_ZERO) {
584 if (!firstOp)
585 os << ", ";
586 firstOp = false;
587 printIntReg(os, rn);
588 }
589
590 if (!firstOp)
591 os << ", ";
592 if (withImm) {
593 ccprintf(os, "#%ld", imm);
594 } else {
595 printShiftOperand(os, rm, immShift, shiftAmt, rs, type);
596 }
597}
598
599std::string
600ArmStaticInst::generateDisassembly(Addr pc,
601 const SymbolTable *symtab) const
602{
603 std::stringstream ss;
604 printMnemonic(ss);
605 return ss.str();
606}
607
608Fault
609ArmStaticInst::softwareBreakpoint32(ExecContext *xc, uint16_t imm) const
610{
611 const auto tc = xc->tcBase();
612 const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2);
613 const HDCR mdcr = tc->readMiscRegNoEffect(MISCREG_MDCR_EL2);
614 if ((ArmSystem::haveEL(tc, EL2) && !inSecureState(tc) &&
615 !ELIs32(tc, EL2) && (hcr.tge == 1 || mdcr.tde == 1)) ||
616 !ELIs32(tc, EL1)) {
617 // Route to AArch64 Software Breakpoint
618 return std::make_shared<SoftwareBreakpoint>(machInst, imm);
619 } else {
620 // Execute AArch32 Software Breakpoint
621 return std::make_shared<PrefetchAbort>(readPC(xc),
622 ArmFault::DebugEvent);
623 }
624}
625
626Fault
627ArmStaticInst::advSIMDFPAccessTrap64(ExceptionLevel el) const
628{
629 switch (el) {
630 case EL1:
631 return std::make_shared<SupervisorTrap>(machInst, 0x1E00000,
632 EC_TRAPPED_SIMD_FP);
633 case EL2:
634 return std::make_shared<HypervisorTrap>(machInst, 0x1E00000,
635 EC_TRAPPED_SIMD_FP);
636 case EL3:
637 return std::make_shared<SecureMonitorTrap>(machInst, 0x1E00000,
638 EC_TRAPPED_SIMD_FP);
639
640 default:
641 panic("Illegal EL in advSIMDFPAccessTrap64\n");
642 }
643}
644
645
646Fault
647ArmStaticInst::checkFPAdvSIMDTrap64(ThreadContext *tc, CPSR cpsr) const
648{
649 if (ArmSystem::haveVirtualization(tc) && !inSecureState(tc)) {
650 HCPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL2);
651 if (cptrEnCheck.tfp)
652 return advSIMDFPAccessTrap64(EL2);
653 }
654
655 if (ArmSystem::haveSecurity(tc)) {
656 HCPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL3);
657 if (cptrEnCheck.tfp)
658 return advSIMDFPAccessTrap64(EL3);
659 }
660
661 return NoFault;
662}
663
664Fault
665ArmStaticInst::checkFPAdvSIMDEnabled64(ThreadContext *tc,
666 CPSR cpsr, CPACR cpacr) const
667{
668 const ExceptionLevel el = (ExceptionLevel) (uint8_t)cpsr.el;
669 if ((el == EL0 && cpacr.fpen != 0x3) ||
670 (el == EL1 && !(cpacr.fpen & 0x1)))
671 return advSIMDFPAccessTrap64(EL1);
672
673 return checkFPAdvSIMDTrap64(tc, cpsr);
674}
675
676Fault
677ArmStaticInst::checkAdvSIMDOrFPEnabled32(ThreadContext *tc,
678 CPSR cpsr, CPACR cpacr,
679 NSACR nsacr, FPEXC fpexc,
680 bool fpexc_check, bool advsimd) const
681{
682 const bool have_virtualization = ArmSystem::haveVirtualization(tc);
683 const bool have_security = ArmSystem::haveSecurity(tc);
684 const bool is_secure = inSecureState(tc);
685 const ExceptionLevel cur_el = opModeToEL(currOpMode(tc));
686
687 if (cur_el == EL0 && ELIs64(tc, EL1))
688 return checkFPAdvSIMDEnabled64(tc, cpsr, cpacr);
689
690 uint8_t cpacr_cp10 = cpacr.cp10;
691 bool cpacr_asedis = cpacr.asedis;
692
693 if (have_security && !ELIs64(tc, EL3) && !is_secure) {
694 if (nsacr.nsasedis)
695 cpacr_asedis = true;
696 if (nsacr.cp10 == 0)
697 cpacr_cp10 = 0;
698 }
699
700 if (cur_el != EL2) {
701 if (advsimd && cpacr_asedis)
702 return disabledFault();
703
704 if ((cur_el == EL0 && cpacr_cp10 != 0x3) ||
705 (cur_el != EL0 && !(cpacr_cp10 & 0x1)))
706 return disabledFault();
707 }
708
709 if (fpexc_check && !fpexc.en)
710 return disabledFault();
711
712 // -- aarch32/exceptions/traps/AArch32.CheckFPAdvSIMDTrap --
713
714 if (have_virtualization && !is_secure && ELIs64(tc, EL2))
715 return checkFPAdvSIMDTrap64(tc, cpsr);
716
717 if (have_virtualization && !is_secure) {
718 HCPTR hcptr = tc->readMiscReg(MISCREG_HCPTR);
719 bool hcptr_cp10 = hcptr.tcp10;
720 bool hcptr_tase = hcptr.tase;
721
722 if (have_security && !ELIs64(tc, EL3) && !is_secure) {
723 if (nsacr.nsasedis)
724 hcptr_tase = true;
725 if (nsacr.cp10)
726 hcptr_cp10 = true;
727 }
728
729 if ((advsimd && hcptr_tase) || hcptr_cp10) {
730 const uint32_t iss = advsimd ? (1 << 5) : 0xA;
731 if (cur_el == EL2) {
732 return std::make_shared<UndefinedInstruction>(
733 machInst, iss,
734 EC_TRAPPED_HCPTR, mnemonic);
735 } else {
736 return std::make_shared<HypervisorTrap>(
737 machInst, iss,
738 EC_TRAPPED_HCPTR);
739 }
740
741 }
742 }
743
744 if (have_security && ELIs64(tc, EL3)) {
745 HCPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL3);
746 if (cptrEnCheck.tfp)
747 return advSIMDFPAccessTrap64(EL3);
748 }
749
750 return NoFault;
751}
752
753inline bool
754ArmStaticInst::isWFxTrapping(ThreadContext *tc,
755 ExceptionLevel tgtEl,
756 bool isWfe) const
757{
758 bool trap = false;
759 SCTLR sctlr = ((SCTLR)tc->readMiscReg(MISCREG_SCTLR_EL1));
760 HCR hcr = ((HCR)tc->readMiscReg(MISCREG_HCR_EL2));
761 SCR scr = ((SCR)tc->readMiscReg(MISCREG_SCR_EL3));
762
763 switch (tgtEl) {
764 case EL1:
765 trap = isWfe? !sctlr.ntwe : !sctlr.ntwi;
766 break;
767 case EL2:
768 trap = isWfe? hcr.twe : hcr.twi;
769 break;
770 case EL3:
771 trap = isWfe? scr.twe : scr.twi;
772 break;
773 default:
774 break;
775 }
776
777 return trap;
778}
779
780Fault
781ArmStaticInst::checkForWFxTrap32(ThreadContext *tc,
782 ExceptionLevel targetEL,
783 bool isWfe) const
784{
785 // Check if target exception level is implemented.
786 assert(ArmSystem::haveEL(tc, targetEL));
787
788 // Check for routing to AArch64: this happens if the
789 // target exception level (where the trap will be handled)
790 // is using aarch64
791 if (ELIs64(tc, targetEL)) {
792 return checkForWFxTrap64(tc, targetEL, isWfe);
793 }
794
795 // Check if processor needs to trap at selected exception level
796 bool trap = isWFxTrapping(tc, targetEL, isWfe);
797
798 if (trap) {
799 uint32_t iss = isWfe? 0x1E00001 : /* WFE Instruction syndrome */
800 0x1E00000; /* WFI Instruction syndrome */
801 switch (targetEL) {
802 case EL1:
803 return std::make_shared<UndefinedInstruction>(
804 machInst, iss,
805 EC_TRAPPED_WFI_WFE, mnemonic);
806 case EL2:
807 return std::make_shared<HypervisorTrap>(machInst, iss,
808 EC_TRAPPED_WFI_WFE);
809 case EL3:
810 return std::make_shared<SecureMonitorTrap>(machInst, iss,
811 EC_TRAPPED_WFI_WFE);
812 default:
813 panic("Unrecognized Exception Level: %d\n", targetEL);
814 }
815 }
816
817 return NoFault;
818}
819
820Fault
821ArmStaticInst::checkForWFxTrap64(ThreadContext *tc,
822 ExceptionLevel targetEL,
823 bool isWfe) const
824{
825 // Check if target exception level is implemented.
826 assert(ArmSystem::haveEL(tc, targetEL));
827
828 // Check if processor needs to trap at selected exception level
829 bool trap = isWFxTrapping(tc, targetEL, isWfe);
830
831 if (trap) {
832 uint32_t iss = isWfe? 0x1E00001 : /* WFE Instruction syndrome */
833 0x1E00000; /* WFI Instruction syndrome */
834 switch (targetEL) {
835 case EL1:
836 return std::make_shared<SupervisorTrap>(machInst, iss,
837 EC_TRAPPED_WFI_WFE);
838 case EL2:
839 return std::make_shared<HypervisorTrap>(machInst, iss,
840 EC_TRAPPED_WFI_WFE);
841 case EL3:
842 return std::make_shared<SecureMonitorTrap>(machInst, iss,
843 EC_TRAPPED_WFI_WFE);
844 default:
845 panic("Unrecognized Exception Level: %d\n", targetEL);
846 }
847 }
848
849 return NoFault;
850}
851
852Fault
853ArmStaticInst::trapWFx(ThreadContext *tc,
854 CPSR cpsr, SCR scr,
855 bool isWfe) const
856{
857 Fault fault = NoFault;
858 if (cpsr.el == EL0) {
859 fault = checkForWFxTrap32(tc, EL1, isWfe);
860 }
861
862 if ((fault == NoFault) &&
863 ArmSystem::haveEL(tc, EL2) && !inSecureState(scr, cpsr) &&
864 ((cpsr.el == EL0) || (cpsr.el == EL1))) {
865
866 fault = checkForWFxTrap32(tc, EL2, isWfe);
867 }
868
869 if ((fault == NoFault) &&
870 ArmSystem::haveEL(tc, EL3) && cpsr.el != EL3) {
871 fault = checkForWFxTrap32(tc, EL3, isWfe);
872 }
873
874 return fault;
875}
876
877Fault
878ArmStaticInst::checkSETENDEnabled(ThreadContext *tc, CPSR cpsr) const
879{
880 bool setend_disabled(false);
881 ExceptionLevel pstateEL = (ExceptionLevel)(uint8_t)(cpsr.el);
882
883 if (pstateEL == EL2) {
884 setend_disabled = ((SCTLR)tc->readMiscRegNoEffect(MISCREG_HSCTLR)).sed;
885 } else {
886 // Please note: in the armarm pseudocode there is a distinction
887 // whether EL1 is aarch32 or aarch64:
888 // if ELUsingAArch32(EL1) then SCTLR.SED else SCTLR[].SED;
889 // Considering that SETEND is aarch32 only, ELUsingAArch32(EL1)
890 // will always be true (hence using SCTLR.SED) except for
891 // instruction executed at EL0, and with an AArch64 EL1.
892 // In this case SCTLR_EL1 will be used. In gem5 the register is
893 // mapped to SCTLR_ns. We can safely use SCTLR and choose the
894 // appropriate bank version.
895
896 // Get the index of the banked version of SCTLR:
897 // SCTLR_s or SCTLR_ns.
898 auto banked_sctlr = snsBankedIndex(
899 MISCREG_SCTLR, tc, !inSecureState(tc));
900
901 // SCTLR.SED bit is enabling/disabling the ue of SETEND instruction.
902 setend_disabled = ((SCTLR)tc->readMiscRegNoEffect(banked_sctlr)).sed;
903 }
904
905 return setend_disabled ? undefinedFault32(tc, pstateEL) :
906 NoFault;
907}
908
909Fault
910ArmStaticInst::undefinedFault32(ThreadContext *tc,
911 ExceptionLevel pstateEL) const
912{
913 // Even if we are running in aarch32, the fault might be dealt with in
914 // aarch64 ISA.
915 if (generalExceptionsToAArch64(tc, pstateEL)) {
916 return undefinedFault64(tc, pstateEL);
917 } else {
918 // Please note: according to the ARM ARM pseudocode we should handle
919 // the case when EL2 is aarch64 and HCR.TGE is 1 as well.
920 // However this case is already handled by the routeToHyp method in
921 // ArmFault class.
922 return std::make_shared<UndefinedInstruction>(
923 machInst, 0,
924 EC_UNKNOWN, mnemonic);
925 }
926}
927
928Fault
929ArmStaticInst::undefinedFault64(ThreadContext *tc,
930 ExceptionLevel pstateEL) const
931{
932 switch (pstateEL) {
933 case EL0:
934 case EL1:
935 return std::make_shared<SupervisorTrap>(machInst, 0, EC_UNKNOWN);
936 case EL2:
937 return std::make_shared<HypervisorTrap>(machInst, 0, EC_UNKNOWN);
938 case EL3:
939 return std::make_shared<SecureMonitorTrap>(machInst, 0, EC_UNKNOWN);
940 default:
941 panic("Unrecognized Exception Level: %d\n", pstateEL);
942 break;
943 }
944
945 return NoFault;
946}
947
948static uint8_t
949getRestoredITBits(ThreadContext *tc, CPSR spsr)
950{
951 // See: shared/functions/system/RestoredITBits in the ARM ARM
952
953 const ExceptionLevel el = opModeToEL((OperatingMode) (uint8_t)spsr.mode);
954 const uint8_t it = itState(spsr);
955
956 if (!spsr.t || spsr.il)
957 return 0;
958
959 // The IT bits are forced to zero when they are set to a reserved
960 // value.
961 if (bits(it, 7, 4) != 0 && bits(it, 3, 0) == 0)
962 return 0;
963
964 const bool itd = el == EL2 ?
965 ((SCTLR)tc->readMiscReg(MISCREG_HSCTLR)).itd :
966 ((SCTLR)tc->readMiscReg(MISCREG_SCTLR)).itd;
967
968 // The IT bits are forced to zero when returning to A32 state, or
969 // when returning to an EL with the ITD bit set to 1, and the IT
970 // bits are describing a multi-instruction block.
971 if (itd && bits(it, 2, 0) != 0)
972 return 0;
973
974 return it;
975}
976
977static bool
978illegalExceptionReturn(ThreadContext *tc, CPSR cpsr, CPSR spsr)
979{
980 const OperatingMode mode = (OperatingMode) (uint8_t)spsr.mode;
981 if (unknownMode(mode))
982 return true;
983
984 const OperatingMode cur_mode = (OperatingMode) (uint8_t)cpsr.mode;
985 const ExceptionLevel target_el = opModeToEL(mode);
986
987 HCR hcr = ((HCR)tc->readMiscReg(MISCREG_HCR_EL2));
988 SCR scr = ((SCR)tc->readMiscReg(MISCREG_SCR_EL3));
989
990 if (target_el > opModeToEL(cur_mode))
991 return true;
992
993 if (!ArmSystem::haveEL(tc, target_el))
994 return true;
995
996 if (target_el == EL1 && ArmSystem::haveEL(tc, EL2) && scr.ns && hcr.tge)
997 return true;
998
999 if (target_el == EL2 && ArmSystem::haveEL(tc, EL3) && !scr.ns)
1000 return true;
1001
1002 bool spsr_mode_is_aarch32 = (spsr.width == 1);
1003 bool known, target_el_is_aarch32;
1004 std::tie(known, target_el_is_aarch32) = ELUsingAArch32K(tc, target_el);
1005 assert(known || (target_el == EL0 && ELIs64(tc, EL1)));
1006
1007 if (known && (spsr_mode_is_aarch32 != target_el_is_aarch32))
1008 return true;
1009
1010 if (!spsr.width) {
1011 // aarch64
1012 if (!ArmSystem::highestELIs64(tc))
1013 return true;
1014 if (spsr & 0x2)
1015 return true;
1016 if (target_el == EL0 && spsr.sp)
1017 return true;
1018 } else {
1019 // aarch32
1020 return unknownMode32(mode);
1021 }
1022
1023 return false;
1024}
1025
1026CPSR
1027ArmStaticInst::getPSTATEFromPSR(ThreadContext *tc, CPSR cpsr, CPSR spsr) const
1028{
1029 CPSR new_cpsr = 0;
1030
1031 // gem5 doesn't implement single-stepping, so force the SS bit to
1032 // 0.
1033 new_cpsr.ss = 0;
1034
1035 if (illegalExceptionReturn(tc, cpsr, spsr)) {
1036 // If the SPSR specifies an illegal exception return,
1037 // then PSTATE.{M, nRW, EL, SP} are unchanged and PSTATE.IL
1038 // is set to 1.
1039 new_cpsr.il = 1;
1040 if (cpsr.width) {
1041 new_cpsr.mode = cpsr.mode;
1042 } else {
1043 new_cpsr.width = cpsr.width;
1044 new_cpsr.el = cpsr.el;
1045 new_cpsr.sp = cpsr.sp;
1046 }
1047 } else {
1048 new_cpsr.il = spsr.il;
1049 if (spsr.width && unknownMode32((OperatingMode)(uint8_t)spsr.mode)) {
1050 new_cpsr.il = 1;
1051 } else if (spsr.width) {
1052 new_cpsr.mode = spsr.mode;
1053 } else {
1054 new_cpsr.el = spsr.el;
1055 new_cpsr.sp = spsr.sp;
1056 }
1057 }
1058
1059 new_cpsr.nz = spsr.nz;
1060 new_cpsr.c = spsr.c;
1061 new_cpsr.v = spsr.v;
1062 if (new_cpsr.width) {
1063 // aarch32
1064 const ITSTATE it = getRestoredITBits(tc, spsr);
1065 new_cpsr.q = spsr.q;
1066 new_cpsr.ge = spsr.ge;
1067 new_cpsr.e = spsr.e;
1068 new_cpsr.aif = spsr.aif;
1069 new_cpsr.t = spsr.t;
1070 new_cpsr.it2 = it.top6;
1071 new_cpsr.it1 = it.bottom2;
1072 } else {
1073 // aarch64
1074 new_cpsr.daif = spsr.daif;
1075 }
1076
1077 return new_cpsr;
1078}
1079
1080bool
1081ArmStaticInst::generalExceptionsToAArch64(ThreadContext *tc,
1082 ExceptionLevel pstateEL) const
1083{
1084 // Returns TRUE if exceptions normally routed to EL1 are being handled
1085 // at an Exception level using AArch64, because either EL1 is using
1086 // AArch64 or TGE is in force and EL2 is using AArch64.
1087 HCR hcr = ((HCR)tc->readMiscReg(MISCREG_HCR_EL2));
1088 return (pstateEL == EL0 && !ELIs32(tc, EL1)) ||
1089 (ArmSystem::haveEL(tc, EL2) && !inSecureState(tc) &&
1090 !ELIs32(tc, EL2) && hcr.tge);
1091}
1092
1093
1094}
337void
338ArmStaticInst::printFloatReg(std::ostream &os, RegIndex reg_idx) const
339{
340 ccprintf(os, "f%d", reg_idx);
341}
342
343void
344ArmStaticInst::printVecReg(std::ostream &os, RegIndex reg_idx) const
345{
346 ccprintf(os, "v%d", reg_idx);
347}
348
349void
350ArmStaticInst::printCCReg(std::ostream &os, RegIndex reg_idx) const
351{
352 ccprintf(os, "cc_%s", ArmISA::ccRegName[reg_idx]);
353}
354
355void
356ArmStaticInst::printMiscReg(std::ostream &os, RegIndex reg_idx) const
357{
358 assert(reg_idx < NUM_MISCREGS);
359 ccprintf(os, "%s", ArmISA::miscRegName[reg_idx]);
360}
361
362void
363ArmStaticInst::printMnemonic(std::ostream &os,
364 const std::string &suffix,
365 bool withPred,
366 bool withCond64,
367 ConditionCode cond64) const
368{
369 os << " " << mnemonic;
370 if (withPred && !aarch64) {
371 printCondition(os, machInst.condCode);
372 os << suffix;
373 } else if (withCond64) {
374 os << ".";
375 printCondition(os, cond64);
376 os << suffix;
377 }
378 if (machInst.bigThumb)
379 os << ".w";
380 os << " ";
381}
382
383void
384ArmStaticInst::printTarget(std::ostream &os, Addr target,
385 const SymbolTable *symtab) const
386{
387 Addr symbolAddr;
388 std::string symbol;
389
390 if (symtab && symtab->findNearestSymbol(target, symbol, symbolAddr)) {
391 ccprintf(os, "<%s", symbol);
392 if (symbolAddr != target)
393 ccprintf(os, "+%d>", target - symbolAddr);
394 else
395 ccprintf(os, ">");
396 } else {
397 ccprintf(os, "%#x", target);
398 }
399}
400
401void
402ArmStaticInst::printCondition(std::ostream &os,
403 unsigned code,
404 bool noImplicit) const
405{
406 switch (code) {
407 case COND_EQ:
408 os << "eq";
409 break;
410 case COND_NE:
411 os << "ne";
412 break;
413 case COND_CS:
414 os << "cs";
415 break;
416 case COND_CC:
417 os << "cc";
418 break;
419 case COND_MI:
420 os << "mi";
421 break;
422 case COND_PL:
423 os << "pl";
424 break;
425 case COND_VS:
426 os << "vs";
427 break;
428 case COND_VC:
429 os << "vc";
430 break;
431 case COND_HI:
432 os << "hi";
433 break;
434 case COND_LS:
435 os << "ls";
436 break;
437 case COND_GE:
438 os << "ge";
439 break;
440 case COND_LT:
441 os << "lt";
442 break;
443 case COND_GT:
444 os << "gt";
445 break;
446 case COND_LE:
447 os << "le";
448 break;
449 case COND_AL:
450 // This one is implicit.
451 if (noImplicit)
452 os << "al";
453 break;
454 case COND_UC:
455 // Unconditional.
456 if (noImplicit)
457 os << "uc";
458 break;
459 default:
460 panic("Unrecognized condition code %d.\n", code);
461 }
462}
463
464void
465ArmStaticInst::printMemSymbol(std::ostream &os,
466 const SymbolTable *symtab,
467 const std::string &prefix,
468 const Addr addr,
469 const std::string &suffix) const
470{
471 Addr symbolAddr;
472 std::string symbol;
473 if (symtab && symtab->findNearestSymbol(addr, symbol, symbolAddr)) {
474 ccprintf(os, "%s%s", prefix, symbol);
475 if (symbolAddr != addr)
476 ccprintf(os, "+%d", addr - symbolAddr);
477 ccprintf(os, suffix);
478 }
479}
480
481void
482ArmStaticInst::printShiftOperand(std::ostream &os,
483 IntRegIndex rm,
484 bool immShift,
485 uint32_t shiftAmt,
486 IntRegIndex rs,
487 ArmShiftType type) const
488{
489 bool firstOp = false;
490
491 if (rm != INTREG_ZERO) {
492 printIntReg(os, rm);
493 }
494
495 bool done = false;
496
497 if ((type == LSR || type == ASR) && immShift && shiftAmt == 0)
498 shiftAmt = 32;
499
500 switch (type) {
501 case LSL:
502 if (immShift && shiftAmt == 0) {
503 done = true;
504 break;
505 }
506 if (!firstOp)
507 os << ", ";
508 os << "LSL";
509 break;
510 case LSR:
511 if (!firstOp)
512 os << ", ";
513 os << "LSR";
514 break;
515 case ASR:
516 if (!firstOp)
517 os << ", ";
518 os << "ASR";
519 break;
520 case ROR:
521 if (immShift && shiftAmt == 0) {
522 if (!firstOp)
523 os << ", ";
524 os << "RRX";
525 done = true;
526 break;
527 }
528 if (!firstOp)
529 os << ", ";
530 os << "ROR";
531 break;
532 default:
533 panic("Tried to disassemble unrecognized shift type.\n");
534 }
535 if (!done) {
536 if (!firstOp)
537 os << " ";
538 if (immShift)
539 os << "#" << shiftAmt;
540 else
541 printIntReg(os, rs);
542 }
543}
544
545void
546ArmStaticInst::printExtendOperand(bool firstOperand, std::ostream &os,
547 IntRegIndex rm, ArmExtendType type,
548 int64_t shiftAmt) const
549{
550 if (!firstOperand)
551 ccprintf(os, ", ");
552 printIntReg(os, rm);
553 if (type == UXTX && shiftAmt == 0)
554 return;
555 switch (type) {
556 case UXTB: ccprintf(os, ", UXTB");
557 break;
558 case UXTH: ccprintf(os, ", UXTH");
559 break;
560 case UXTW: ccprintf(os, ", UXTW");
561 break;
562 case UXTX: ccprintf(os, ", LSL");
563 break;
564 case SXTB: ccprintf(os, ", SXTB");
565 break;
566 case SXTH: ccprintf(os, ", SXTH");
567 break;
568 case SXTW: ccprintf(os, ", SXTW");
569 break;
570 case SXTX: ccprintf(os, ", SXTW");
571 break;
572 }
573 if (type == UXTX || shiftAmt)
574 ccprintf(os, " #%d", shiftAmt);
575}
576
577void
578ArmStaticInst::printDataInst(std::ostream &os, bool withImm,
579 bool immShift, bool s, IntRegIndex rd, IntRegIndex rn,
580 IntRegIndex rm, IntRegIndex rs, uint32_t shiftAmt,
581 ArmShiftType type, uint64_t imm) const
582{
583 printMnemonic(os, s ? "s" : "");
584 bool firstOp = true;
585
586 // Destination
587 if (rd != INTREG_ZERO) {
588 firstOp = false;
589 printIntReg(os, rd);
590 }
591
592 // Source 1.
593 if (rn != INTREG_ZERO) {
594 if (!firstOp)
595 os << ", ";
596 firstOp = false;
597 printIntReg(os, rn);
598 }
599
600 if (!firstOp)
601 os << ", ";
602 if (withImm) {
603 ccprintf(os, "#%ld", imm);
604 } else {
605 printShiftOperand(os, rm, immShift, shiftAmt, rs, type);
606 }
607}
608
609std::string
610ArmStaticInst::generateDisassembly(Addr pc,
611 const SymbolTable *symtab) const
612{
613 std::stringstream ss;
614 printMnemonic(ss);
615 return ss.str();
616}
617
618Fault
619ArmStaticInst::softwareBreakpoint32(ExecContext *xc, uint16_t imm) const
620{
621 const auto tc = xc->tcBase();
622 const HCR hcr = tc->readMiscReg(MISCREG_HCR_EL2);
623 const HDCR mdcr = tc->readMiscRegNoEffect(MISCREG_MDCR_EL2);
624 if ((ArmSystem::haveEL(tc, EL2) && !inSecureState(tc) &&
625 !ELIs32(tc, EL2) && (hcr.tge == 1 || mdcr.tde == 1)) ||
626 !ELIs32(tc, EL1)) {
627 // Route to AArch64 Software Breakpoint
628 return std::make_shared<SoftwareBreakpoint>(machInst, imm);
629 } else {
630 // Execute AArch32 Software Breakpoint
631 return std::make_shared<PrefetchAbort>(readPC(xc),
632 ArmFault::DebugEvent);
633 }
634}
635
636Fault
637ArmStaticInst::advSIMDFPAccessTrap64(ExceptionLevel el) const
638{
639 switch (el) {
640 case EL1:
641 return std::make_shared<SupervisorTrap>(machInst, 0x1E00000,
642 EC_TRAPPED_SIMD_FP);
643 case EL2:
644 return std::make_shared<HypervisorTrap>(machInst, 0x1E00000,
645 EC_TRAPPED_SIMD_FP);
646 case EL3:
647 return std::make_shared<SecureMonitorTrap>(machInst, 0x1E00000,
648 EC_TRAPPED_SIMD_FP);
649
650 default:
651 panic("Illegal EL in advSIMDFPAccessTrap64\n");
652 }
653}
654
655
656Fault
657ArmStaticInst::checkFPAdvSIMDTrap64(ThreadContext *tc, CPSR cpsr) const
658{
659 if (ArmSystem::haveVirtualization(tc) && !inSecureState(tc)) {
660 HCPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL2);
661 if (cptrEnCheck.tfp)
662 return advSIMDFPAccessTrap64(EL2);
663 }
664
665 if (ArmSystem::haveSecurity(tc)) {
666 HCPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL3);
667 if (cptrEnCheck.tfp)
668 return advSIMDFPAccessTrap64(EL3);
669 }
670
671 return NoFault;
672}
673
674Fault
675ArmStaticInst::checkFPAdvSIMDEnabled64(ThreadContext *tc,
676 CPSR cpsr, CPACR cpacr) const
677{
678 const ExceptionLevel el = (ExceptionLevel) (uint8_t)cpsr.el;
679 if ((el == EL0 && cpacr.fpen != 0x3) ||
680 (el == EL1 && !(cpacr.fpen & 0x1)))
681 return advSIMDFPAccessTrap64(EL1);
682
683 return checkFPAdvSIMDTrap64(tc, cpsr);
684}
685
686Fault
687ArmStaticInst::checkAdvSIMDOrFPEnabled32(ThreadContext *tc,
688 CPSR cpsr, CPACR cpacr,
689 NSACR nsacr, FPEXC fpexc,
690 bool fpexc_check, bool advsimd) const
691{
692 const bool have_virtualization = ArmSystem::haveVirtualization(tc);
693 const bool have_security = ArmSystem::haveSecurity(tc);
694 const bool is_secure = inSecureState(tc);
695 const ExceptionLevel cur_el = opModeToEL(currOpMode(tc));
696
697 if (cur_el == EL0 && ELIs64(tc, EL1))
698 return checkFPAdvSIMDEnabled64(tc, cpsr, cpacr);
699
700 uint8_t cpacr_cp10 = cpacr.cp10;
701 bool cpacr_asedis = cpacr.asedis;
702
703 if (have_security && !ELIs64(tc, EL3) && !is_secure) {
704 if (nsacr.nsasedis)
705 cpacr_asedis = true;
706 if (nsacr.cp10 == 0)
707 cpacr_cp10 = 0;
708 }
709
710 if (cur_el != EL2) {
711 if (advsimd && cpacr_asedis)
712 return disabledFault();
713
714 if ((cur_el == EL0 && cpacr_cp10 != 0x3) ||
715 (cur_el != EL0 && !(cpacr_cp10 & 0x1)))
716 return disabledFault();
717 }
718
719 if (fpexc_check && !fpexc.en)
720 return disabledFault();
721
722 // -- aarch32/exceptions/traps/AArch32.CheckFPAdvSIMDTrap --
723
724 if (have_virtualization && !is_secure && ELIs64(tc, EL2))
725 return checkFPAdvSIMDTrap64(tc, cpsr);
726
727 if (have_virtualization && !is_secure) {
728 HCPTR hcptr = tc->readMiscReg(MISCREG_HCPTR);
729 bool hcptr_cp10 = hcptr.tcp10;
730 bool hcptr_tase = hcptr.tase;
731
732 if (have_security && !ELIs64(tc, EL3) && !is_secure) {
733 if (nsacr.nsasedis)
734 hcptr_tase = true;
735 if (nsacr.cp10)
736 hcptr_cp10 = true;
737 }
738
739 if ((advsimd && hcptr_tase) || hcptr_cp10) {
740 const uint32_t iss = advsimd ? (1 << 5) : 0xA;
741 if (cur_el == EL2) {
742 return std::make_shared<UndefinedInstruction>(
743 machInst, iss,
744 EC_TRAPPED_HCPTR, mnemonic);
745 } else {
746 return std::make_shared<HypervisorTrap>(
747 machInst, iss,
748 EC_TRAPPED_HCPTR);
749 }
750
751 }
752 }
753
754 if (have_security && ELIs64(tc, EL3)) {
755 HCPTR cptrEnCheck = tc->readMiscReg(MISCREG_CPTR_EL3);
756 if (cptrEnCheck.tfp)
757 return advSIMDFPAccessTrap64(EL3);
758 }
759
760 return NoFault;
761}
762
763inline bool
764ArmStaticInst::isWFxTrapping(ThreadContext *tc,
765 ExceptionLevel tgtEl,
766 bool isWfe) const
767{
768 bool trap = false;
769 SCTLR sctlr = ((SCTLR)tc->readMiscReg(MISCREG_SCTLR_EL1));
770 HCR hcr = ((HCR)tc->readMiscReg(MISCREG_HCR_EL2));
771 SCR scr = ((SCR)tc->readMiscReg(MISCREG_SCR_EL3));
772
773 switch (tgtEl) {
774 case EL1:
775 trap = isWfe? !sctlr.ntwe : !sctlr.ntwi;
776 break;
777 case EL2:
778 trap = isWfe? hcr.twe : hcr.twi;
779 break;
780 case EL3:
781 trap = isWfe? scr.twe : scr.twi;
782 break;
783 default:
784 break;
785 }
786
787 return trap;
788}
789
790Fault
791ArmStaticInst::checkForWFxTrap32(ThreadContext *tc,
792 ExceptionLevel targetEL,
793 bool isWfe) const
794{
795 // Check if target exception level is implemented.
796 assert(ArmSystem::haveEL(tc, targetEL));
797
798 // Check for routing to AArch64: this happens if the
799 // target exception level (where the trap will be handled)
800 // is using aarch64
801 if (ELIs64(tc, targetEL)) {
802 return checkForWFxTrap64(tc, targetEL, isWfe);
803 }
804
805 // Check if processor needs to trap at selected exception level
806 bool trap = isWFxTrapping(tc, targetEL, isWfe);
807
808 if (trap) {
809 uint32_t iss = isWfe? 0x1E00001 : /* WFE Instruction syndrome */
810 0x1E00000; /* WFI Instruction syndrome */
811 switch (targetEL) {
812 case EL1:
813 return std::make_shared<UndefinedInstruction>(
814 machInst, iss,
815 EC_TRAPPED_WFI_WFE, mnemonic);
816 case EL2:
817 return std::make_shared<HypervisorTrap>(machInst, iss,
818 EC_TRAPPED_WFI_WFE);
819 case EL3:
820 return std::make_shared<SecureMonitorTrap>(machInst, iss,
821 EC_TRAPPED_WFI_WFE);
822 default:
823 panic("Unrecognized Exception Level: %d\n", targetEL);
824 }
825 }
826
827 return NoFault;
828}
829
830Fault
831ArmStaticInst::checkForWFxTrap64(ThreadContext *tc,
832 ExceptionLevel targetEL,
833 bool isWfe) const
834{
835 // Check if target exception level is implemented.
836 assert(ArmSystem::haveEL(tc, targetEL));
837
838 // Check if processor needs to trap at selected exception level
839 bool trap = isWFxTrapping(tc, targetEL, isWfe);
840
841 if (trap) {
842 uint32_t iss = isWfe? 0x1E00001 : /* WFE Instruction syndrome */
843 0x1E00000; /* WFI Instruction syndrome */
844 switch (targetEL) {
845 case EL1:
846 return std::make_shared<SupervisorTrap>(machInst, iss,
847 EC_TRAPPED_WFI_WFE);
848 case EL2:
849 return std::make_shared<HypervisorTrap>(machInst, iss,
850 EC_TRAPPED_WFI_WFE);
851 case EL3:
852 return std::make_shared<SecureMonitorTrap>(machInst, iss,
853 EC_TRAPPED_WFI_WFE);
854 default:
855 panic("Unrecognized Exception Level: %d\n", targetEL);
856 }
857 }
858
859 return NoFault;
860}
861
862Fault
863ArmStaticInst::trapWFx(ThreadContext *tc,
864 CPSR cpsr, SCR scr,
865 bool isWfe) const
866{
867 Fault fault = NoFault;
868 if (cpsr.el == EL0) {
869 fault = checkForWFxTrap32(tc, EL1, isWfe);
870 }
871
872 if ((fault == NoFault) &&
873 ArmSystem::haveEL(tc, EL2) && !inSecureState(scr, cpsr) &&
874 ((cpsr.el == EL0) || (cpsr.el == EL1))) {
875
876 fault = checkForWFxTrap32(tc, EL2, isWfe);
877 }
878
879 if ((fault == NoFault) &&
880 ArmSystem::haveEL(tc, EL3) && cpsr.el != EL3) {
881 fault = checkForWFxTrap32(tc, EL3, isWfe);
882 }
883
884 return fault;
885}
886
887Fault
888ArmStaticInst::checkSETENDEnabled(ThreadContext *tc, CPSR cpsr) const
889{
890 bool setend_disabled(false);
891 ExceptionLevel pstateEL = (ExceptionLevel)(uint8_t)(cpsr.el);
892
893 if (pstateEL == EL2) {
894 setend_disabled = ((SCTLR)tc->readMiscRegNoEffect(MISCREG_HSCTLR)).sed;
895 } else {
896 // Please note: in the armarm pseudocode there is a distinction
897 // whether EL1 is aarch32 or aarch64:
898 // if ELUsingAArch32(EL1) then SCTLR.SED else SCTLR[].SED;
899 // Considering that SETEND is aarch32 only, ELUsingAArch32(EL1)
900 // will always be true (hence using SCTLR.SED) except for
901 // instruction executed at EL0, and with an AArch64 EL1.
902 // In this case SCTLR_EL1 will be used. In gem5 the register is
903 // mapped to SCTLR_ns. We can safely use SCTLR and choose the
904 // appropriate bank version.
905
906 // Get the index of the banked version of SCTLR:
907 // SCTLR_s or SCTLR_ns.
908 auto banked_sctlr = snsBankedIndex(
909 MISCREG_SCTLR, tc, !inSecureState(tc));
910
911 // SCTLR.SED bit is enabling/disabling the ue of SETEND instruction.
912 setend_disabled = ((SCTLR)tc->readMiscRegNoEffect(banked_sctlr)).sed;
913 }
914
915 return setend_disabled ? undefinedFault32(tc, pstateEL) :
916 NoFault;
917}
918
919Fault
920ArmStaticInst::undefinedFault32(ThreadContext *tc,
921 ExceptionLevel pstateEL) const
922{
923 // Even if we are running in aarch32, the fault might be dealt with in
924 // aarch64 ISA.
925 if (generalExceptionsToAArch64(tc, pstateEL)) {
926 return undefinedFault64(tc, pstateEL);
927 } else {
928 // Please note: according to the ARM ARM pseudocode we should handle
929 // the case when EL2 is aarch64 and HCR.TGE is 1 as well.
930 // However this case is already handled by the routeToHyp method in
931 // ArmFault class.
932 return std::make_shared<UndefinedInstruction>(
933 machInst, 0,
934 EC_UNKNOWN, mnemonic);
935 }
936}
937
938Fault
939ArmStaticInst::undefinedFault64(ThreadContext *tc,
940 ExceptionLevel pstateEL) const
941{
942 switch (pstateEL) {
943 case EL0:
944 case EL1:
945 return std::make_shared<SupervisorTrap>(machInst, 0, EC_UNKNOWN);
946 case EL2:
947 return std::make_shared<HypervisorTrap>(machInst, 0, EC_UNKNOWN);
948 case EL3:
949 return std::make_shared<SecureMonitorTrap>(machInst, 0, EC_UNKNOWN);
950 default:
951 panic("Unrecognized Exception Level: %d\n", pstateEL);
952 break;
953 }
954
955 return NoFault;
956}
957
958static uint8_t
959getRestoredITBits(ThreadContext *tc, CPSR spsr)
960{
961 // See: shared/functions/system/RestoredITBits in the ARM ARM
962
963 const ExceptionLevel el = opModeToEL((OperatingMode) (uint8_t)spsr.mode);
964 const uint8_t it = itState(spsr);
965
966 if (!spsr.t || spsr.il)
967 return 0;
968
969 // The IT bits are forced to zero when they are set to a reserved
970 // value.
971 if (bits(it, 7, 4) != 0 && bits(it, 3, 0) == 0)
972 return 0;
973
974 const bool itd = el == EL2 ?
975 ((SCTLR)tc->readMiscReg(MISCREG_HSCTLR)).itd :
976 ((SCTLR)tc->readMiscReg(MISCREG_SCTLR)).itd;
977
978 // The IT bits are forced to zero when returning to A32 state, or
979 // when returning to an EL with the ITD bit set to 1, and the IT
980 // bits are describing a multi-instruction block.
981 if (itd && bits(it, 2, 0) != 0)
982 return 0;
983
984 return it;
985}
986
987static bool
988illegalExceptionReturn(ThreadContext *tc, CPSR cpsr, CPSR spsr)
989{
990 const OperatingMode mode = (OperatingMode) (uint8_t)spsr.mode;
991 if (unknownMode(mode))
992 return true;
993
994 const OperatingMode cur_mode = (OperatingMode) (uint8_t)cpsr.mode;
995 const ExceptionLevel target_el = opModeToEL(mode);
996
997 HCR hcr = ((HCR)tc->readMiscReg(MISCREG_HCR_EL2));
998 SCR scr = ((SCR)tc->readMiscReg(MISCREG_SCR_EL3));
999
1000 if (target_el > opModeToEL(cur_mode))
1001 return true;
1002
1003 if (!ArmSystem::haveEL(tc, target_el))
1004 return true;
1005
1006 if (target_el == EL1 && ArmSystem::haveEL(tc, EL2) && scr.ns && hcr.tge)
1007 return true;
1008
1009 if (target_el == EL2 && ArmSystem::haveEL(tc, EL3) && !scr.ns)
1010 return true;
1011
1012 bool spsr_mode_is_aarch32 = (spsr.width == 1);
1013 bool known, target_el_is_aarch32;
1014 std::tie(known, target_el_is_aarch32) = ELUsingAArch32K(tc, target_el);
1015 assert(known || (target_el == EL0 && ELIs64(tc, EL1)));
1016
1017 if (known && (spsr_mode_is_aarch32 != target_el_is_aarch32))
1018 return true;
1019
1020 if (!spsr.width) {
1021 // aarch64
1022 if (!ArmSystem::highestELIs64(tc))
1023 return true;
1024 if (spsr & 0x2)
1025 return true;
1026 if (target_el == EL0 && spsr.sp)
1027 return true;
1028 } else {
1029 // aarch32
1030 return unknownMode32(mode);
1031 }
1032
1033 return false;
1034}
1035
1036CPSR
1037ArmStaticInst::getPSTATEFromPSR(ThreadContext *tc, CPSR cpsr, CPSR spsr) const
1038{
1039 CPSR new_cpsr = 0;
1040
1041 // gem5 doesn't implement single-stepping, so force the SS bit to
1042 // 0.
1043 new_cpsr.ss = 0;
1044
1045 if (illegalExceptionReturn(tc, cpsr, spsr)) {
1046 // If the SPSR specifies an illegal exception return,
1047 // then PSTATE.{M, nRW, EL, SP} are unchanged and PSTATE.IL
1048 // is set to 1.
1049 new_cpsr.il = 1;
1050 if (cpsr.width) {
1051 new_cpsr.mode = cpsr.mode;
1052 } else {
1053 new_cpsr.width = cpsr.width;
1054 new_cpsr.el = cpsr.el;
1055 new_cpsr.sp = cpsr.sp;
1056 }
1057 } else {
1058 new_cpsr.il = spsr.il;
1059 if (spsr.width && unknownMode32((OperatingMode)(uint8_t)spsr.mode)) {
1060 new_cpsr.il = 1;
1061 } else if (spsr.width) {
1062 new_cpsr.mode = spsr.mode;
1063 } else {
1064 new_cpsr.el = spsr.el;
1065 new_cpsr.sp = spsr.sp;
1066 }
1067 }
1068
1069 new_cpsr.nz = spsr.nz;
1070 new_cpsr.c = spsr.c;
1071 new_cpsr.v = spsr.v;
1072 if (new_cpsr.width) {
1073 // aarch32
1074 const ITSTATE it = getRestoredITBits(tc, spsr);
1075 new_cpsr.q = spsr.q;
1076 new_cpsr.ge = spsr.ge;
1077 new_cpsr.e = spsr.e;
1078 new_cpsr.aif = spsr.aif;
1079 new_cpsr.t = spsr.t;
1080 new_cpsr.it2 = it.top6;
1081 new_cpsr.it1 = it.bottom2;
1082 } else {
1083 // aarch64
1084 new_cpsr.daif = spsr.daif;
1085 }
1086
1087 return new_cpsr;
1088}
1089
1090bool
1091ArmStaticInst::generalExceptionsToAArch64(ThreadContext *tc,
1092 ExceptionLevel pstateEL) const
1093{
1094 // Returns TRUE if exceptions normally routed to EL1 are being handled
1095 // at an Exception level using AArch64, because either EL1 is using
1096 // AArch64 or TGE is in force and EL2 is using AArch64.
1097 HCR hcr = ((HCR)tc->readMiscReg(MISCREG_HCR_EL2));
1098 return (pstateEL == EL0 && !ELIs32(tc, EL1)) ||
1099 (ArmSystem::haveEL(tc, EL2) && !inSecureState(tc) &&
1100 !ELIs32(tc, EL2) && hcr.tge);
1101}
1102
1103
1104}