fp.isa (7368:3053e3587124) fp.isa (7369:f71b906540cf)
1// -*- mode:c++ -*-
2
3// Copyright (c) 2010 ARM Limited
4// All rights reserved
5//
6// The license below extends only to copyright in the software and shall
7// not be construed as granting a license to any other intellectual
8// property including but not limited to intellectual property relating
9// to a hardware implementation of the functionality of the software
10// licensed hereunder. You may use the software subject to the license
11// terms below provided that you ensure that this notice is replicated
12// unmodified and in its entirety in all distributions of the software,
13// modified or unmodified, in source code or in binary form.
14//
15// Copyright (c) 2007-2008 The Florida State University
16// All rights reserved.
17//
18// Redistribution and use in source and binary forms, with or without
19// modification, are permitted provided that the following conditions are
20// met: redistributions of source code must retain the above copyright
21// notice, this list of conditions and the following disclaimer;
22// redistributions in binary form must reproduce the above copyright
23// notice, this list of conditions and the following disclaimer in the
24// documentation and/or other materials provided with the distribution;
25// neither the name of the copyright holders nor the names of its
26// contributors may be used to endorse or promote products derived from
27// this software without specific prior written permission.
28//
29// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40//
41// Authors: Stephen Hines
42
43////////////////////////////////////////////////////////////////////
44//
45// Floating Point operate instructions
46//
47
48def template FPAExecute {{
49 Fault %(class_name)s::execute(%(CPU_exec_context)s *xc, Trace::InstRecord *traceData) const
50 {
51 Fault fault = NoFault;
52
53 %(fp_enable_check)s;
54
55 %(op_decl)s;
56 %(op_rd)s;
57
58 if (%(predicate_test)s) {
59 %(code)s;
60 if (fault == NoFault) {
61 %(op_wb)s;
62 }
63 }
64
65 return fault;
66 }
67}};
68
69def template FloatDoubleDecode {{
70 {
71 ArmStaticInst *i = NULL;
72 switch (OPCODE_19 << 1 | OPCODE_7)
73 {
74 case 0:
75 i = (ArmStaticInst *)new %(class_name)sS(machInst);
76 break;
77 case 1:
78 i = (ArmStaticInst *)new %(class_name)sD(machInst);
79 break;
80 case 2:
81 case 3:
82 default:
83 panic("Cannot decode float/double nature of the instruction");
84 }
85 return i;
86 }
87}};
88
89// Primary format for float point operate instructions:
90def format FloatOp(code, *flags) {{
91 orig_code = code
92
93 cblk = code
94 iop = InstObjParams(name, Name, 'PredOp',
95 {"code": cblk,
96 "predicate_test": predicateTest},
97 flags)
98 header_output = BasicDeclare.subst(iop)
99 decoder_output = BasicConstructor.subst(iop)
100 exec_output = FPAExecute.subst(iop)
101
102 sng_cblk = code
103 sng_iop = InstObjParams(name, Name+'S', 'PredOp',
104 {"code": sng_cblk,
105 "predicate_test": predicateTest},
106 flags)
107 header_output += BasicDeclare.subst(sng_iop)
108 decoder_output += BasicConstructor.subst(sng_iop)
109 exec_output += FPAExecute.subst(sng_iop)
110
111 dbl_code = re.sub(r'\.sf', '.df', orig_code)
112
113 dbl_cblk = dbl_code
114 dbl_iop = InstObjParams(name, Name+'D', 'PredOp',
115 {"code": dbl_cblk,
116 "predicate_test": predicateTest},
117 flags)
118 header_output += BasicDeclare.subst(dbl_iop)
119 decoder_output += BasicConstructor.subst(dbl_iop)
120 exec_output += FPAExecute.subst(dbl_iop)
121
122 decode_block = FloatDoubleDecode.subst(iop)
123}};
124
125let {{
126 calcFPCcCode = '''
127 uint16_t _in, _iz, _ic, _iv;
128
129 _in = %(fReg1)s < %(fReg2)s;
130 _iz = %(fReg1)s == %(fReg2)s;
131 _ic = %(fReg1)s >= %(fReg2)s;
132 _iv = (isnan(%(fReg1)s) || isnan(%(fReg2)s)) & 1;
133
134 CondCodes = _in << 31 | _iz << 30 | _ic << 29 | _iv << 28 |
135 (CondCodes & 0x0FFFFFFF);
136 '''
137}};
138
139def format FloatCmp(fReg1, fReg2, *flags) {{
140 code = calcFPCcCode % vars()
141 iop = InstObjParams(name, Name, 'PredOp',
142 {"code": code,
143 "predicate_test": predicateTest},
144 flags)
145 header_output = BasicDeclare.subst(iop)
146 decoder_output = BasicConstructor.subst(iop)
147 decode_block = BasicDecode.subst(iop)
148 exec_output = FPAExecute.subst(iop)
149}};
150
151let {{
152 header_output = '''
153 StaticInstPtr
154 decodeExtensionRegLoadStore(ExtMachInst machInst);
155 '''
156 decoder_output = '''
157 StaticInstPtr
158 decodeExtensionRegLoadStore(ExtMachInst machInst)
159 {
160 const uint32_t opcode = bits(machInst, 24, 20);
161 const uint32_t offset = bits(machInst, 7, 0);
162 const bool single = (bits(machInst, 8) == 0);
163 const IntRegIndex rn = (IntRegIndex)(uint32_t)bits(machInst, 19, 16);
164 RegIndex vd;
165 if (single) {
166 vd = (RegIndex)(uint32_t)((bits(machInst, 15, 12) << 1) |
167 bits(machInst, 22));
168 } else {
169 vd = (RegIndex)(uint32_t)((bits(machInst, 15, 12) << 1) |
170 (bits(machInst, 22) << 5));
171 }
172 switch (bits(opcode, 4, 3)) {
173 case 0x0:
174 if (bits(opcode, 4, 1) == 0x2 &&
175 !(machInst.thumb == 1 && bits(machInst, 28) == 1) &&
176 !(machInst.thumb == 0 && machInst.condCode == 0xf)) {
177 if ((bits(machInst, 7, 4) & 0xd) != 1) {
178 break;
179 }
180 const IntRegIndex rt =
181 (IntRegIndex)(uint32_t)bits(machInst, 15, 12);
182 const IntRegIndex rt2 =
183 (IntRegIndex)(uint32_t)bits(machInst, 19, 16);
184 const bool op = bits(machInst, 20);
185 uint32_t vm;
186 if (single) {
187 vm = (bits(machInst, 3, 0) << 1) | bits(machInst, 5);
188 } else {
189 vm = (bits(machInst, 3, 0) << 1) |
190 (bits(machInst, 5) << 5);
191 }
192 if (op) {
193 return new Vmov2Core2Reg(machInst, rt, rt2,
194 (IntRegIndex)vm);
195 } else {
196 return new Vmov2Reg2Core(machInst, (IntRegIndex)vm,
197 rt, rt2);
198 }
199 }
200 break;
201 case 0x1:
202 switch (bits(opcode, 1, 0)) {
203 case 0x0:
204 return new VLdmStm(machInst, rn, vd, single,
205 true, false, false, offset);
206 case 0x1:
207 return new VLdmStm(machInst, rn, vd, single,
208 true, false, true, offset);
209 case 0x2:
210 return new VLdmStm(machInst, rn, vd, single,
211 true, true, false, offset);
212 case 0x3:
213 // If rn == sp, then this is called vpop.
214 return new VLdmStm(machInst, rn, vd, single,
215 true, true, true, offset);
216 }
217 case 0x2:
218 if (bits(opcode, 1, 0) == 0x2) {
219 // If rn == sp, then this is called vpush.
220 return new VLdmStm(machInst, rn, vd, single,
221 false, true, false, offset);
222 } else if (bits(opcode, 1, 0) == 0x3) {
223 return new VLdmStm(machInst, rn, vd, single,
224 false, true, true, offset);
225 }
226 // Fall through on purpose
227 case 0x3:
228 const bool up = (bits(machInst, 23) == 1);
229 const uint32_t imm = bits(machInst, 7, 0) << 2;
230 RegIndex vd;
231 if (single) {
232 vd = (RegIndex)(uint32_t)((bits(machInst, 15, 12) << 1) |
233 (bits(machInst, 22)));
234 } else {
235 vd = (RegIndex)(uint32_t)((bits(machInst, 15, 12) << 1) |
236 (bits(machInst, 22) << 5));
237 }
238 if (bits(opcode, 1, 0) == 0x0) {
239 if (single) {
240 if (up) {
241 return new %(vstr_us)s(machInst, vd, rn, up, imm);
242 } else {
243 return new %(vstr_s)s(machInst, vd, rn, up, imm);
244 }
245 } else {
246 if (up) {
247 return new %(vstr_ud)s(machInst, vd, vd + 1,
248 rn, up, imm);
249 } else {
250 return new %(vstr_d)s(machInst, vd, vd + 1,
251 rn, up, imm);
252 }
253 }
254 } else if (bits(opcode, 1, 0) == 0x1) {
255 if (single) {
256 if (up) {
257 return new %(vldr_us)s(machInst, vd, rn, up, imm);
258 } else {
259 return new %(vldr_s)s(machInst, vd, rn, up, imm);
260 }
261 } else {
262 if (up) {
263 return new %(vldr_ud)s(machInst, vd, vd + 1,
264 rn, up, imm);
265 } else {
266 return new %(vldr_d)s(machInst, vd, vd + 1,
267 rn, up, imm);
268 }
269 }
270 }
271 }
272 return new Unknown(machInst);
273 }
274 ''' % {
275 "vldr_us" : "VLDR_" + loadImmClassName(False, True, False),
276 "vldr_s" : "VLDR_" + loadImmClassName(False, False, False),
277 "vldr_ud" : "VLDR_" + loadDoubleImmClassName(False, True, False),
278 "vldr_d" : "VLDR_" + loadDoubleImmClassName(False, False, False),
279 "vstr_us" : "VSTR_" + storeImmClassName(False, True, False),
280 "vstr_s" : "VSTR_" + storeImmClassName(False, False, False),
281 "vstr_ud" : "VSTR_" + storeDoubleImmClassName(False, True, False),
282 "vstr_d" : "VSTR_" + storeDoubleImmClassName(False, False, False)
283 }
284}};
285
286def format ExtensionRegLoadStore() {{
287 decode_block = '''
288 return decodeExtensionRegLoadStore(machInst);
289 '''
290}};
291
292let {{
293 header_output = '''
294 StaticInstPtr
295 decodeShortFpTransfer(ExtMachInst machInst);
296 '''
297 decoder_output = '''
298 StaticInstPtr
299 decodeShortFpTransfer(ExtMachInst machInst)
300 {
301 const uint32_t l = bits(machInst, 20);
302 const uint32_t c = bits(machInst, 8);
303 const uint32_t a = bits(machInst, 23, 21);
304 const uint32_t b = bits(machInst, 6, 5);
305 if ((machInst.thumb == 1 && bits(machInst, 28) == 1) ||
306 (machInst.thumb == 0 && machInst.condCode == 0xf)) {
307 return new Unknown(machInst);
308 }
309 if (l == 0 && c == 0) {
310 if (a == 0) {
311 const uint32_t vn = (bits(machInst, 19, 16) << 1) |
312 bits(machInst, 7);
313 const IntRegIndex rt =
314 (IntRegIndex)(uint32_t)bits(machInst, 15, 12);
315 if (bits(machInst, 20) == 1) {
316 return new VmovRegCoreW(machInst, rt, (IntRegIndex)vn);
317 } else {
318 return new VmovCoreRegW(machInst, (IntRegIndex)vn, rt);
319 }
320 } else if (a == 0x7) {
321 const IntRegIndex rt =
322 (IntRegIndex)(uint32_t)bits(machInst, 15, 12);
323 uint32_t specReg = bits(machInst, 19, 16);
324 switch (specReg) {
325 case 0:
326 specReg = MISCREG_FPSID;
327 break;
328 case 1:
329 specReg = MISCREG_FPSCR;
330 break;
331 case 8:
332 specReg = MISCREG_FPEXC;
333 break;
334 default:
335 return new Unknown(machInst);
336 }
337 return new Vmsr(machInst, (IntRegIndex)specReg, rt);
338 }
339 } else if (l == 0 && c == 1) {
340 if (bits(a, 2) == 0) {
341 uint32_t vd = (bits(machInst, 7) << 5) |
342 (bits(machInst, 19, 16) << 1);
343 uint32_t index, size;
344 const IntRegIndex rt =
345 (IntRegIndex)(uint32_t)bits(machInst, 15, 12);
346 if (bits(machInst, 22) == 1) {
347 size = 8;
348 index = (bits(machInst, 21) << 2) |
349 bits(machInst, 6, 5);
350 } else if (bits(machInst, 5) == 1) {
351 size = 16;
352 index = (bits(machInst, 21) << 1) |
353 bits(machInst, 6);
354 } else if (bits(machInst, 6) == 0) {
355 size = 32;
356 index = bits(machInst, 21);
357 } else {
358 return new Unknown(machInst);
359 }
360 if (index >= (32 / size)) {
361 index -= (32 / size);
362 vd++;
363 }
364 switch (size) {
365 case 8:
366 return new VmovCoreRegB(machInst, (IntRegIndex)vd,
367 rt, index);
368 case 16:
369 return new VmovCoreRegH(machInst, (IntRegIndex)vd,
370 rt, index);
371 case 32:
372 return new VmovCoreRegW(machInst, (IntRegIndex)vd, rt);
373 }
374 } else if (bits(b, 1) == 0) {
375 // A8-594
376 return new WarnUnimplemented("vdup", machInst);
377 }
378 } else if (l == 1 && c == 0) {
379 if (a == 0) {
380 const uint32_t vn = (bits(machInst, 19, 16) << 1) |
381 bits(machInst, 7);
382 const IntRegIndex rt =
383 (IntRegIndex)(uint32_t)bits(machInst, 15, 12);
384 if (bits(machInst, 20) == 1) {
385 return new VmovRegCoreW(machInst, rt, (IntRegIndex)vn);
386 } else {
387 return new VmovCoreRegW(machInst, (IntRegIndex)vn, rt);
388 }
389 } else if (a == 7) {
390 const IntRegIndex rt =
391 (IntRegIndex)(uint32_t)bits(machInst, 15, 12);
392 uint32_t specReg = bits(machInst, 19, 16);
393 switch (specReg) {
394 case 0:
395 specReg = MISCREG_FPSID;
396 break;
397 case 1:
398 specReg = MISCREG_FPSCR;
399 break;
400 case 6:
401 specReg = MISCREG_MVFR1;
402 break;
403 case 7:
404 specReg = MISCREG_MVFR0;
405 break;
406 case 8:
407 specReg = MISCREG_FPEXC;
408 break;
409 default:
410 return new Unknown(machInst);
411 }
412 return new Vmrs(machInst, rt, (IntRegIndex)specReg);
413 }
414 } else {
415 uint32_t vd = (bits(machInst, 7) << 5) |
416 (bits(machInst, 19, 16) << 1);
417 uint32_t index, size;
418 const IntRegIndex rt =
419 (IntRegIndex)(uint32_t)bits(machInst, 15, 12);
420 const bool u = (bits(machInst, 23) == 1);
421 if (bits(machInst, 22) == 1) {
422 size = 8;
423 index = (bits(machInst, 21) << 2) |
424 bits(machInst, 6, 5);
425 } else if (bits(machInst, 5) == 1) {
426 size = 16;
427 index = (bits(machInst, 21) << 1) |
428 bits(machInst, 6);
429 } else if (bits(machInst, 6) == 0 && !u) {
430 size = 32;
431 index = bits(machInst, 21);
432 } else {
433 return new Unknown(machInst);
434 }
435 if (index >= (32 / size)) {
436 index -= (32 / size);
437 vd++;
438 }
439 switch (size) {
440 case 8:
441 if (u) {
442 return new VmovRegCoreUB(machInst, rt,
443 (IntRegIndex)vd, index);
444 } else {
445 return new VmovRegCoreSB(machInst, rt,
446 (IntRegIndex)vd, index);
447 }
448 case 16:
449 if (u) {
450 return new VmovRegCoreUH(machInst, rt,
451 (IntRegIndex)vd, index);
452 } else {
453 return new VmovRegCoreSH(machInst, rt,
454 (IntRegIndex)vd, index);
455 }
456 case 32:
457 return new VmovRegCoreW(machInst, rt, (IntRegIndex)vd);
458 }
459 }
460 return new Unknown(machInst);
461 }
462 '''
463}};
464
465def format ShortFpTransfer() {{
466 decode_block = '''
467 return decodeShortFpTransfer(machInst);
468 '''
469}};
470
471let {{
472 header_output = '''
473 StaticInstPtr
474 decodeVfpData(ExtMachInst machInst);
475 '''
476 decoder_output = '''
477 StaticInstPtr
478 decodeVfpData(ExtMachInst machInst)
479 {
480 const uint32_t opc1 = bits(machInst, 23, 20);
481 const uint32_t opc2 = bits(machInst, 19, 16);
482 const uint32_t opc3 = bits(machInst, 7, 6);
483 //const uint32_t opc4 = bits(machInst, 3, 0);
484 switch (opc1 & 0xb /* 1011 */) {
485 case 0x0:
486 return new WarnUnimplemented("vmla, vmls", machInst);
487 case 0x2:
488 if ((opc3 & 0x1) == 0) {
489 uint32_t vd;
490 uint32_t vm;
491 uint32_t vn;
492 if (bits(machInst, 8) == 0) {
493 vd = bits(machInst, 22) | (bits(machInst, 15, 12) << 1);
494 vm = bits(machInst, 5) | (bits(machInst, 3, 0) << 1);
495 vn = bits(machInst, 7) | (bits(machInst, 19, 16) << 1);
496 return new VmulS(machInst, (IntRegIndex)vd,
497 (IntRegIndex)vn, (IntRegIndex)vm);
498 } else {
499 vd = (bits(machInst, 22) << 5) |
500 (bits(machInst, 15, 12) << 1);
501 vm = (bits(machInst, 5) << 5) |
502 (bits(machInst, 3, 0) << 1);
503 vn = (bits(machInst, 7) << 5) |
504 (bits(machInst, 19, 16) << 1);
505 return new VmulD(machInst, (IntRegIndex)vd,
506 (IntRegIndex)vn, (IntRegIndex)vm);
507 }
508 }
509 case 0x1:
510 return new WarnUnimplemented("vnmla, vnmls, vnmul", machInst);
511 case 0x3:
512 if ((opc3 & 0x1) == 0) {
513 uint32_t vd;
514 uint32_t vm;
515 uint32_t vn;
516 if (bits(machInst, 8) == 0) {
517 vd = bits(machInst, 22) | (bits(machInst, 15, 12) << 1);
518 vm = bits(machInst, 5) | (bits(machInst, 3, 0) << 1);
519 vn = bits(machInst, 7) | (bits(machInst, 19, 16) << 1);
520 return new VaddS(machInst, (IntRegIndex)vd,
521 (IntRegIndex)vn, (IntRegIndex)vm);
522 } else {
523 vd = (bits(machInst, 22) << 5) |
524 (bits(machInst, 15, 12) << 1);
525 vm = (bits(machInst, 5) << 5) |
526 (bits(machInst, 3, 0) << 1);
527 vn = (bits(machInst, 7) << 5) |
528 (bits(machInst, 19, 16) << 1);
529 return new VaddD(machInst, (IntRegIndex)vd,
530 (IntRegIndex)vn, (IntRegIndex)vm);
531 }
532 } else {
533 uint32_t vd;
534 uint32_t vm;
535 uint32_t vn;
536 if (bits(machInst, 8) == 0) {
537 vd = bits(machInst, 22) | (bits(machInst, 15, 12) << 1);
538 vm = bits(machInst, 5) | (bits(machInst, 3, 0) << 1);
539 vn = bits(machInst, 7) | (bits(machInst, 19, 16) << 1);
540 return new VsubS(machInst, (IntRegIndex)vd,
541 (IntRegIndex)vn, (IntRegIndex)vm);
542 } else {
543 vd = (bits(machInst, 22) << 5) |
544 (bits(machInst, 15, 12) << 1);
545 vm = (bits(machInst, 5) << 5) |
546 (bits(machInst, 3, 0) << 1);
547 vn = (bits(machInst, 7) << 5) |
548 (bits(machInst, 19, 16) << 1);
549 return new VsubD(machInst, (IntRegIndex)vd,
550 (IntRegIndex)vn, (IntRegIndex)vm);
551 }
552 }
553 case 0x8:
554 if ((opc3 & 0x1) == 0) {
1// -*- mode:c++ -*-
2
3// Copyright (c) 2010 ARM Limited
4// All rights reserved
5//
6// The license below extends only to copyright in the software and shall
7// not be construed as granting a license to any other intellectual
8// property including but not limited to intellectual property relating
9// to a hardware implementation of the functionality of the software
10// licensed hereunder. You may use the software subject to the license
11// terms below provided that you ensure that this notice is replicated
12// unmodified and in its entirety in all distributions of the software,
13// modified or unmodified, in source code or in binary form.
14//
15// Copyright (c) 2007-2008 The Florida State University
16// All rights reserved.
17//
18// Redistribution and use in source and binary forms, with or without
19// modification, are permitted provided that the following conditions are
20// met: redistributions of source code must retain the above copyright
21// notice, this list of conditions and the following disclaimer;
22// redistributions in binary form must reproduce the above copyright
23// notice, this list of conditions and the following disclaimer in the
24// documentation and/or other materials provided with the distribution;
25// neither the name of the copyright holders nor the names of its
26// contributors may be used to endorse or promote products derived from
27// this software without specific prior written permission.
28//
29// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40//
41// Authors: Stephen Hines
42
43////////////////////////////////////////////////////////////////////
44//
45// Floating Point operate instructions
46//
47
48def template FPAExecute {{
49 Fault %(class_name)s::execute(%(CPU_exec_context)s *xc, Trace::InstRecord *traceData) const
50 {
51 Fault fault = NoFault;
52
53 %(fp_enable_check)s;
54
55 %(op_decl)s;
56 %(op_rd)s;
57
58 if (%(predicate_test)s) {
59 %(code)s;
60 if (fault == NoFault) {
61 %(op_wb)s;
62 }
63 }
64
65 return fault;
66 }
67}};
68
69def template FloatDoubleDecode {{
70 {
71 ArmStaticInst *i = NULL;
72 switch (OPCODE_19 << 1 | OPCODE_7)
73 {
74 case 0:
75 i = (ArmStaticInst *)new %(class_name)sS(machInst);
76 break;
77 case 1:
78 i = (ArmStaticInst *)new %(class_name)sD(machInst);
79 break;
80 case 2:
81 case 3:
82 default:
83 panic("Cannot decode float/double nature of the instruction");
84 }
85 return i;
86 }
87}};
88
89// Primary format for float point operate instructions:
90def format FloatOp(code, *flags) {{
91 orig_code = code
92
93 cblk = code
94 iop = InstObjParams(name, Name, 'PredOp',
95 {"code": cblk,
96 "predicate_test": predicateTest},
97 flags)
98 header_output = BasicDeclare.subst(iop)
99 decoder_output = BasicConstructor.subst(iop)
100 exec_output = FPAExecute.subst(iop)
101
102 sng_cblk = code
103 sng_iop = InstObjParams(name, Name+'S', 'PredOp',
104 {"code": sng_cblk,
105 "predicate_test": predicateTest},
106 flags)
107 header_output += BasicDeclare.subst(sng_iop)
108 decoder_output += BasicConstructor.subst(sng_iop)
109 exec_output += FPAExecute.subst(sng_iop)
110
111 dbl_code = re.sub(r'\.sf', '.df', orig_code)
112
113 dbl_cblk = dbl_code
114 dbl_iop = InstObjParams(name, Name+'D', 'PredOp',
115 {"code": dbl_cblk,
116 "predicate_test": predicateTest},
117 flags)
118 header_output += BasicDeclare.subst(dbl_iop)
119 decoder_output += BasicConstructor.subst(dbl_iop)
120 exec_output += FPAExecute.subst(dbl_iop)
121
122 decode_block = FloatDoubleDecode.subst(iop)
123}};
124
125let {{
126 calcFPCcCode = '''
127 uint16_t _in, _iz, _ic, _iv;
128
129 _in = %(fReg1)s < %(fReg2)s;
130 _iz = %(fReg1)s == %(fReg2)s;
131 _ic = %(fReg1)s >= %(fReg2)s;
132 _iv = (isnan(%(fReg1)s) || isnan(%(fReg2)s)) & 1;
133
134 CondCodes = _in << 31 | _iz << 30 | _ic << 29 | _iv << 28 |
135 (CondCodes & 0x0FFFFFFF);
136 '''
137}};
138
139def format FloatCmp(fReg1, fReg2, *flags) {{
140 code = calcFPCcCode % vars()
141 iop = InstObjParams(name, Name, 'PredOp',
142 {"code": code,
143 "predicate_test": predicateTest},
144 flags)
145 header_output = BasicDeclare.subst(iop)
146 decoder_output = BasicConstructor.subst(iop)
147 decode_block = BasicDecode.subst(iop)
148 exec_output = FPAExecute.subst(iop)
149}};
150
151let {{
152 header_output = '''
153 StaticInstPtr
154 decodeExtensionRegLoadStore(ExtMachInst machInst);
155 '''
156 decoder_output = '''
157 StaticInstPtr
158 decodeExtensionRegLoadStore(ExtMachInst machInst)
159 {
160 const uint32_t opcode = bits(machInst, 24, 20);
161 const uint32_t offset = bits(machInst, 7, 0);
162 const bool single = (bits(machInst, 8) == 0);
163 const IntRegIndex rn = (IntRegIndex)(uint32_t)bits(machInst, 19, 16);
164 RegIndex vd;
165 if (single) {
166 vd = (RegIndex)(uint32_t)((bits(machInst, 15, 12) << 1) |
167 bits(machInst, 22));
168 } else {
169 vd = (RegIndex)(uint32_t)((bits(machInst, 15, 12) << 1) |
170 (bits(machInst, 22) << 5));
171 }
172 switch (bits(opcode, 4, 3)) {
173 case 0x0:
174 if (bits(opcode, 4, 1) == 0x2 &&
175 !(machInst.thumb == 1 && bits(machInst, 28) == 1) &&
176 !(machInst.thumb == 0 && machInst.condCode == 0xf)) {
177 if ((bits(machInst, 7, 4) & 0xd) != 1) {
178 break;
179 }
180 const IntRegIndex rt =
181 (IntRegIndex)(uint32_t)bits(machInst, 15, 12);
182 const IntRegIndex rt2 =
183 (IntRegIndex)(uint32_t)bits(machInst, 19, 16);
184 const bool op = bits(machInst, 20);
185 uint32_t vm;
186 if (single) {
187 vm = (bits(machInst, 3, 0) << 1) | bits(machInst, 5);
188 } else {
189 vm = (bits(machInst, 3, 0) << 1) |
190 (bits(machInst, 5) << 5);
191 }
192 if (op) {
193 return new Vmov2Core2Reg(machInst, rt, rt2,
194 (IntRegIndex)vm);
195 } else {
196 return new Vmov2Reg2Core(machInst, (IntRegIndex)vm,
197 rt, rt2);
198 }
199 }
200 break;
201 case 0x1:
202 switch (bits(opcode, 1, 0)) {
203 case 0x0:
204 return new VLdmStm(machInst, rn, vd, single,
205 true, false, false, offset);
206 case 0x1:
207 return new VLdmStm(machInst, rn, vd, single,
208 true, false, true, offset);
209 case 0x2:
210 return new VLdmStm(machInst, rn, vd, single,
211 true, true, false, offset);
212 case 0x3:
213 // If rn == sp, then this is called vpop.
214 return new VLdmStm(machInst, rn, vd, single,
215 true, true, true, offset);
216 }
217 case 0x2:
218 if (bits(opcode, 1, 0) == 0x2) {
219 // If rn == sp, then this is called vpush.
220 return new VLdmStm(machInst, rn, vd, single,
221 false, true, false, offset);
222 } else if (bits(opcode, 1, 0) == 0x3) {
223 return new VLdmStm(machInst, rn, vd, single,
224 false, true, true, offset);
225 }
226 // Fall through on purpose
227 case 0x3:
228 const bool up = (bits(machInst, 23) == 1);
229 const uint32_t imm = bits(machInst, 7, 0) << 2;
230 RegIndex vd;
231 if (single) {
232 vd = (RegIndex)(uint32_t)((bits(machInst, 15, 12) << 1) |
233 (bits(machInst, 22)));
234 } else {
235 vd = (RegIndex)(uint32_t)((bits(machInst, 15, 12) << 1) |
236 (bits(machInst, 22) << 5));
237 }
238 if (bits(opcode, 1, 0) == 0x0) {
239 if (single) {
240 if (up) {
241 return new %(vstr_us)s(machInst, vd, rn, up, imm);
242 } else {
243 return new %(vstr_s)s(machInst, vd, rn, up, imm);
244 }
245 } else {
246 if (up) {
247 return new %(vstr_ud)s(machInst, vd, vd + 1,
248 rn, up, imm);
249 } else {
250 return new %(vstr_d)s(machInst, vd, vd + 1,
251 rn, up, imm);
252 }
253 }
254 } else if (bits(opcode, 1, 0) == 0x1) {
255 if (single) {
256 if (up) {
257 return new %(vldr_us)s(machInst, vd, rn, up, imm);
258 } else {
259 return new %(vldr_s)s(machInst, vd, rn, up, imm);
260 }
261 } else {
262 if (up) {
263 return new %(vldr_ud)s(machInst, vd, vd + 1,
264 rn, up, imm);
265 } else {
266 return new %(vldr_d)s(machInst, vd, vd + 1,
267 rn, up, imm);
268 }
269 }
270 }
271 }
272 return new Unknown(machInst);
273 }
274 ''' % {
275 "vldr_us" : "VLDR_" + loadImmClassName(False, True, False),
276 "vldr_s" : "VLDR_" + loadImmClassName(False, False, False),
277 "vldr_ud" : "VLDR_" + loadDoubleImmClassName(False, True, False),
278 "vldr_d" : "VLDR_" + loadDoubleImmClassName(False, False, False),
279 "vstr_us" : "VSTR_" + storeImmClassName(False, True, False),
280 "vstr_s" : "VSTR_" + storeImmClassName(False, False, False),
281 "vstr_ud" : "VSTR_" + storeDoubleImmClassName(False, True, False),
282 "vstr_d" : "VSTR_" + storeDoubleImmClassName(False, False, False)
283 }
284}};
285
286def format ExtensionRegLoadStore() {{
287 decode_block = '''
288 return decodeExtensionRegLoadStore(machInst);
289 '''
290}};
291
292let {{
293 header_output = '''
294 StaticInstPtr
295 decodeShortFpTransfer(ExtMachInst machInst);
296 '''
297 decoder_output = '''
298 StaticInstPtr
299 decodeShortFpTransfer(ExtMachInst machInst)
300 {
301 const uint32_t l = bits(machInst, 20);
302 const uint32_t c = bits(machInst, 8);
303 const uint32_t a = bits(machInst, 23, 21);
304 const uint32_t b = bits(machInst, 6, 5);
305 if ((machInst.thumb == 1 && bits(machInst, 28) == 1) ||
306 (machInst.thumb == 0 && machInst.condCode == 0xf)) {
307 return new Unknown(machInst);
308 }
309 if (l == 0 && c == 0) {
310 if (a == 0) {
311 const uint32_t vn = (bits(machInst, 19, 16) << 1) |
312 bits(machInst, 7);
313 const IntRegIndex rt =
314 (IntRegIndex)(uint32_t)bits(machInst, 15, 12);
315 if (bits(machInst, 20) == 1) {
316 return new VmovRegCoreW(machInst, rt, (IntRegIndex)vn);
317 } else {
318 return new VmovCoreRegW(machInst, (IntRegIndex)vn, rt);
319 }
320 } else if (a == 0x7) {
321 const IntRegIndex rt =
322 (IntRegIndex)(uint32_t)bits(machInst, 15, 12);
323 uint32_t specReg = bits(machInst, 19, 16);
324 switch (specReg) {
325 case 0:
326 specReg = MISCREG_FPSID;
327 break;
328 case 1:
329 specReg = MISCREG_FPSCR;
330 break;
331 case 8:
332 specReg = MISCREG_FPEXC;
333 break;
334 default:
335 return new Unknown(machInst);
336 }
337 return new Vmsr(machInst, (IntRegIndex)specReg, rt);
338 }
339 } else if (l == 0 && c == 1) {
340 if (bits(a, 2) == 0) {
341 uint32_t vd = (bits(machInst, 7) << 5) |
342 (bits(machInst, 19, 16) << 1);
343 uint32_t index, size;
344 const IntRegIndex rt =
345 (IntRegIndex)(uint32_t)bits(machInst, 15, 12);
346 if (bits(machInst, 22) == 1) {
347 size = 8;
348 index = (bits(machInst, 21) << 2) |
349 bits(machInst, 6, 5);
350 } else if (bits(machInst, 5) == 1) {
351 size = 16;
352 index = (bits(machInst, 21) << 1) |
353 bits(machInst, 6);
354 } else if (bits(machInst, 6) == 0) {
355 size = 32;
356 index = bits(machInst, 21);
357 } else {
358 return new Unknown(machInst);
359 }
360 if (index >= (32 / size)) {
361 index -= (32 / size);
362 vd++;
363 }
364 switch (size) {
365 case 8:
366 return new VmovCoreRegB(machInst, (IntRegIndex)vd,
367 rt, index);
368 case 16:
369 return new VmovCoreRegH(machInst, (IntRegIndex)vd,
370 rt, index);
371 case 32:
372 return new VmovCoreRegW(machInst, (IntRegIndex)vd, rt);
373 }
374 } else if (bits(b, 1) == 0) {
375 // A8-594
376 return new WarnUnimplemented("vdup", machInst);
377 }
378 } else if (l == 1 && c == 0) {
379 if (a == 0) {
380 const uint32_t vn = (bits(machInst, 19, 16) << 1) |
381 bits(machInst, 7);
382 const IntRegIndex rt =
383 (IntRegIndex)(uint32_t)bits(machInst, 15, 12);
384 if (bits(machInst, 20) == 1) {
385 return new VmovRegCoreW(machInst, rt, (IntRegIndex)vn);
386 } else {
387 return new VmovCoreRegW(machInst, (IntRegIndex)vn, rt);
388 }
389 } else if (a == 7) {
390 const IntRegIndex rt =
391 (IntRegIndex)(uint32_t)bits(machInst, 15, 12);
392 uint32_t specReg = bits(machInst, 19, 16);
393 switch (specReg) {
394 case 0:
395 specReg = MISCREG_FPSID;
396 break;
397 case 1:
398 specReg = MISCREG_FPSCR;
399 break;
400 case 6:
401 specReg = MISCREG_MVFR1;
402 break;
403 case 7:
404 specReg = MISCREG_MVFR0;
405 break;
406 case 8:
407 specReg = MISCREG_FPEXC;
408 break;
409 default:
410 return new Unknown(machInst);
411 }
412 return new Vmrs(machInst, rt, (IntRegIndex)specReg);
413 }
414 } else {
415 uint32_t vd = (bits(machInst, 7) << 5) |
416 (bits(machInst, 19, 16) << 1);
417 uint32_t index, size;
418 const IntRegIndex rt =
419 (IntRegIndex)(uint32_t)bits(machInst, 15, 12);
420 const bool u = (bits(machInst, 23) == 1);
421 if (bits(machInst, 22) == 1) {
422 size = 8;
423 index = (bits(machInst, 21) << 2) |
424 bits(machInst, 6, 5);
425 } else if (bits(machInst, 5) == 1) {
426 size = 16;
427 index = (bits(machInst, 21) << 1) |
428 bits(machInst, 6);
429 } else if (bits(machInst, 6) == 0 && !u) {
430 size = 32;
431 index = bits(machInst, 21);
432 } else {
433 return new Unknown(machInst);
434 }
435 if (index >= (32 / size)) {
436 index -= (32 / size);
437 vd++;
438 }
439 switch (size) {
440 case 8:
441 if (u) {
442 return new VmovRegCoreUB(machInst, rt,
443 (IntRegIndex)vd, index);
444 } else {
445 return new VmovRegCoreSB(machInst, rt,
446 (IntRegIndex)vd, index);
447 }
448 case 16:
449 if (u) {
450 return new VmovRegCoreUH(machInst, rt,
451 (IntRegIndex)vd, index);
452 } else {
453 return new VmovRegCoreSH(machInst, rt,
454 (IntRegIndex)vd, index);
455 }
456 case 32:
457 return new VmovRegCoreW(machInst, rt, (IntRegIndex)vd);
458 }
459 }
460 return new Unknown(machInst);
461 }
462 '''
463}};
464
465def format ShortFpTransfer() {{
466 decode_block = '''
467 return decodeShortFpTransfer(machInst);
468 '''
469}};
470
471let {{
472 header_output = '''
473 StaticInstPtr
474 decodeVfpData(ExtMachInst machInst);
475 '''
476 decoder_output = '''
477 StaticInstPtr
478 decodeVfpData(ExtMachInst machInst)
479 {
480 const uint32_t opc1 = bits(machInst, 23, 20);
481 const uint32_t opc2 = bits(machInst, 19, 16);
482 const uint32_t opc3 = bits(machInst, 7, 6);
483 //const uint32_t opc4 = bits(machInst, 3, 0);
484 switch (opc1 & 0xb /* 1011 */) {
485 case 0x0:
486 return new WarnUnimplemented("vmla, vmls", machInst);
487 case 0x2:
488 if ((opc3 & 0x1) == 0) {
489 uint32_t vd;
490 uint32_t vm;
491 uint32_t vn;
492 if (bits(machInst, 8) == 0) {
493 vd = bits(machInst, 22) | (bits(machInst, 15, 12) << 1);
494 vm = bits(machInst, 5) | (bits(machInst, 3, 0) << 1);
495 vn = bits(machInst, 7) | (bits(machInst, 19, 16) << 1);
496 return new VmulS(machInst, (IntRegIndex)vd,
497 (IntRegIndex)vn, (IntRegIndex)vm);
498 } else {
499 vd = (bits(machInst, 22) << 5) |
500 (bits(machInst, 15, 12) << 1);
501 vm = (bits(machInst, 5) << 5) |
502 (bits(machInst, 3, 0) << 1);
503 vn = (bits(machInst, 7) << 5) |
504 (bits(machInst, 19, 16) << 1);
505 return new VmulD(machInst, (IntRegIndex)vd,
506 (IntRegIndex)vn, (IntRegIndex)vm);
507 }
508 }
509 case 0x1:
510 return new WarnUnimplemented("vnmla, vnmls, vnmul", machInst);
511 case 0x3:
512 if ((opc3 & 0x1) == 0) {
513 uint32_t vd;
514 uint32_t vm;
515 uint32_t vn;
516 if (bits(machInst, 8) == 0) {
517 vd = bits(machInst, 22) | (bits(machInst, 15, 12) << 1);
518 vm = bits(machInst, 5) | (bits(machInst, 3, 0) << 1);
519 vn = bits(machInst, 7) | (bits(machInst, 19, 16) << 1);
520 return new VaddS(machInst, (IntRegIndex)vd,
521 (IntRegIndex)vn, (IntRegIndex)vm);
522 } else {
523 vd = (bits(machInst, 22) << 5) |
524 (bits(machInst, 15, 12) << 1);
525 vm = (bits(machInst, 5) << 5) |
526 (bits(machInst, 3, 0) << 1);
527 vn = (bits(machInst, 7) << 5) |
528 (bits(machInst, 19, 16) << 1);
529 return new VaddD(machInst, (IntRegIndex)vd,
530 (IntRegIndex)vn, (IntRegIndex)vm);
531 }
532 } else {
533 uint32_t vd;
534 uint32_t vm;
535 uint32_t vn;
536 if (bits(machInst, 8) == 0) {
537 vd = bits(machInst, 22) | (bits(machInst, 15, 12) << 1);
538 vm = bits(machInst, 5) | (bits(machInst, 3, 0) << 1);
539 vn = bits(machInst, 7) | (bits(machInst, 19, 16) << 1);
540 return new VsubS(machInst, (IntRegIndex)vd,
541 (IntRegIndex)vn, (IntRegIndex)vm);
542 } else {
543 vd = (bits(machInst, 22) << 5) |
544 (bits(machInst, 15, 12) << 1);
545 vm = (bits(machInst, 5) << 5) |
546 (bits(machInst, 3, 0) << 1);
547 vn = (bits(machInst, 7) << 5) |
548 (bits(machInst, 19, 16) << 1);
549 return new VsubD(machInst, (IntRegIndex)vd,
550 (IntRegIndex)vn, (IntRegIndex)vm);
551 }
552 }
553 case 0x8:
554 if ((opc3 & 0x1) == 0) {
555 return new WarnUnimplemented("vdiv", machInst);
555 uint32_t vd;
556 uint32_t vm;
557 uint32_t vn;
558 if (bits(machInst, 8) == 0) {
559 vd = bits(machInst, 22) | (bits(machInst, 15, 12) << 1);
560 vm = bits(machInst, 5) | (bits(machInst, 3, 0) << 1);
561 vn = bits(machInst, 7) | (bits(machInst, 19, 16) << 1);
562 return new VdivS(machInst, (IntRegIndex)vd,
563 (IntRegIndex)vn, (IntRegIndex)vm);
564 } else {
565 vd = (bits(machInst, 22) << 5) |
566 (bits(machInst, 15, 12) << 1);
567 vm = (bits(machInst, 5) << 5) |
568 (bits(machInst, 3, 0) << 1);
569 vn = (bits(machInst, 7) << 5) |
570 (bits(machInst, 19, 16) << 1);
571 return new VdivD(machInst, (IntRegIndex)vd,
572 (IntRegIndex)vn, (IntRegIndex)vm);
573 }
556 }
557 break;
558 case 0xb:
559 if ((opc3 & 0x1) == 0) {
560 uint32_t vd;
561 const uint32_t baseImm =
562 bits(machInst, 3, 0) | (bits(machInst, 19, 16) << 4);
563 if (bits(machInst, 8) == 0) {
564 vd = bits(machInst, 22) | (bits(machInst, 15, 12) << 1);
565 uint32_t imm = vfp_modified_imm(baseImm, false);
566 return new VmovImmS(machInst, (IntRegIndex)vd, imm);
567 } else {
568 vd = (bits(machInst, 22) << 5) |
569 (bits(machInst, 15, 12) << 1);
570 uint64_t imm = vfp_modified_imm(baseImm, true);
571 return new VmovImmD(machInst, (IntRegIndex)vd, imm);
572 }
573 }
574 switch (opc2) {
575 case 0x0:
576 if (opc3 == 1) {
577 uint32_t vd;
578 uint32_t vm;
579 if (bits(machInst, 8) == 0) {
580 vd = bits(machInst, 22) | (bits(machInst, 15, 12) << 1);
581 vm = bits(machInst, 5) | (bits(machInst, 3, 0) << 1);
582 return new VmovRegS(machInst,
583 (IntRegIndex)vd, (IntRegIndex)vm);
584 } else {
585 vd = (bits(machInst, 22) << 5) |
586 (bits(machInst, 15, 12) << 1);
587 vm = (bits(machInst, 5) << 5) |
588 (bits(machInst, 3, 0) << 1);
589 return new VmovRegD(machInst,
590 (IntRegIndex)vd, (IntRegIndex)vm);
591 }
592 } else {
593 uint32_t vd;
594 uint32_t vm;
595 if (bits(machInst, 8) == 0) {
596 vd = bits(machInst, 22) | (bits(machInst, 15, 12) << 1);
597 vm = bits(machInst, 5) | (bits(machInst, 3, 0) << 1);
598 return new VabsS(machInst,
599 (IntRegIndex)vd, (IntRegIndex)vm);
600 } else {
601 vd = (bits(machInst, 22) << 5) |
602 (bits(machInst, 15, 12) << 1);
603 vm = (bits(machInst, 5) << 5) |
604 (bits(machInst, 3, 0) << 1);
605 return new VabsD(machInst,
606 (IntRegIndex)vd, (IntRegIndex)vm);
607 }
608 }
609 case 0x1:
610 if (opc3 == 1) {
611 uint32_t vd;
612 uint32_t vm;
613 if (bits(machInst, 8) == 0) {
614 vd = bits(machInst, 22) | (bits(machInst, 15, 12) << 1);
615 vm = bits(machInst, 5) | (bits(machInst, 3, 0) << 1);
616 return new VnegS(machInst,
617 (IntRegIndex)vd, (IntRegIndex)vm);
618 } else {
619 vd = (bits(machInst, 22) << 5) |
620 (bits(machInst, 15, 12) << 1);
621 vm = (bits(machInst, 5) << 5) |
622 (bits(machInst, 3, 0) << 1);
623 return new VnegD(machInst,
624 (IntRegIndex)vd, (IntRegIndex)vm);
625 }
626 } else {
574 }
575 break;
576 case 0xb:
577 if ((opc3 & 0x1) == 0) {
578 uint32_t vd;
579 const uint32_t baseImm =
580 bits(machInst, 3, 0) | (bits(machInst, 19, 16) << 4);
581 if (bits(machInst, 8) == 0) {
582 vd = bits(machInst, 22) | (bits(machInst, 15, 12) << 1);
583 uint32_t imm = vfp_modified_imm(baseImm, false);
584 return new VmovImmS(machInst, (IntRegIndex)vd, imm);
585 } else {
586 vd = (bits(machInst, 22) << 5) |
587 (bits(machInst, 15, 12) << 1);
588 uint64_t imm = vfp_modified_imm(baseImm, true);
589 return new VmovImmD(machInst, (IntRegIndex)vd, imm);
590 }
591 }
592 switch (opc2) {
593 case 0x0:
594 if (opc3 == 1) {
595 uint32_t vd;
596 uint32_t vm;
597 if (bits(machInst, 8) == 0) {
598 vd = bits(machInst, 22) | (bits(machInst, 15, 12) << 1);
599 vm = bits(machInst, 5) | (bits(machInst, 3, 0) << 1);
600 return new VmovRegS(machInst,
601 (IntRegIndex)vd, (IntRegIndex)vm);
602 } else {
603 vd = (bits(machInst, 22) << 5) |
604 (bits(machInst, 15, 12) << 1);
605 vm = (bits(machInst, 5) << 5) |
606 (bits(machInst, 3, 0) << 1);
607 return new VmovRegD(machInst,
608 (IntRegIndex)vd, (IntRegIndex)vm);
609 }
610 } else {
611 uint32_t vd;
612 uint32_t vm;
613 if (bits(machInst, 8) == 0) {
614 vd = bits(machInst, 22) | (bits(machInst, 15, 12) << 1);
615 vm = bits(machInst, 5) | (bits(machInst, 3, 0) << 1);
616 return new VabsS(machInst,
617 (IntRegIndex)vd, (IntRegIndex)vm);
618 } else {
619 vd = (bits(machInst, 22) << 5) |
620 (bits(machInst, 15, 12) << 1);
621 vm = (bits(machInst, 5) << 5) |
622 (bits(machInst, 3, 0) << 1);
623 return new VabsD(machInst,
624 (IntRegIndex)vd, (IntRegIndex)vm);
625 }
626 }
627 case 0x1:
628 if (opc3 == 1) {
629 uint32_t vd;
630 uint32_t vm;
631 if (bits(machInst, 8) == 0) {
632 vd = bits(machInst, 22) | (bits(machInst, 15, 12) << 1);
633 vm = bits(machInst, 5) | (bits(machInst, 3, 0) << 1);
634 return new VnegS(machInst,
635 (IntRegIndex)vd, (IntRegIndex)vm);
636 } else {
637 vd = (bits(machInst, 22) << 5) |
638 (bits(machInst, 15, 12) << 1);
639 vm = (bits(machInst, 5) << 5) |
640 (bits(machInst, 3, 0) << 1);
641 return new VnegD(machInst,
642 (IntRegIndex)vd, (IntRegIndex)vm);
643 }
644 } else {
627 return new WarnUnimplemented("vsqrt", machInst);
645 uint32_t vd;
646 uint32_t vm;
647 if (bits(machInst, 8) == 0) {
648 vd = bits(machInst, 22) | (bits(machInst, 15, 12) << 1);
649 vm = bits(machInst, 5) | (bits(machInst, 3, 0) << 1);
650 return new VsqrtS(machInst,
651 (IntRegIndex)vd, (IntRegIndex)vm);
652 } else {
653 vd = (bits(machInst, 22) << 5) |
654 (bits(machInst, 15, 12) << 1);
655 vm = (bits(machInst, 5) << 5) |
656 (bits(machInst, 3, 0) << 1);
657 return new VsqrtD(machInst,
658 (IntRegIndex)vd, (IntRegIndex)vm);
659 }
628 }
629 case 0x2:
630 case 0x3:
631 // Between half and single precision.
632 return new WarnUnimplemented("vcvtb, vcvtt", machInst);
633 case 0x4:
634 case 0x5:
635 return new WarnUnimplemented("vcmp, vcmpe", machInst);
636 case 0x7:
637 if (opc3 == 0x3) {
638 // Between double and single precision.
639 return new WarnUnimplemented("vcvt", machInst);
640 }
641 break;
642 case 0x8:
643 // Between FP and int.
644 return new WarnUnimplemented("vcvt, vcvtr", machInst);
645 case 0xa:
646 case 0xb:
647 // Between FP and fixed point.
648 return new WarnUnimplemented("vcvt", machInst);
649 case 0xc:
650 case 0xd:
651 // Between FP and int.
652 return new WarnUnimplemented("vcvt, vcvtr", machInst);
653 case 0xe:
654 case 0xf:
655 // Between FP and fixed point.
656 return new WarnUnimplemented("vcvt", machInst);
657 }
658 break;
659 }
660 return new Unknown(machInst);
661 }
662 '''
663}};
664
665def format VfpData() {{
666 decode_block = '''
667 return decodeVfpData(machInst);
668 '''
669}};
660 }
661 case 0x2:
662 case 0x3:
663 // Between half and single precision.
664 return new WarnUnimplemented("vcvtb, vcvtt", machInst);
665 case 0x4:
666 case 0x5:
667 return new WarnUnimplemented("vcmp, vcmpe", machInst);
668 case 0x7:
669 if (opc3 == 0x3) {
670 // Between double and single precision.
671 return new WarnUnimplemented("vcvt", machInst);
672 }
673 break;
674 case 0x8:
675 // Between FP and int.
676 return new WarnUnimplemented("vcvt, vcvtr", machInst);
677 case 0xa:
678 case 0xb:
679 // Between FP and fixed point.
680 return new WarnUnimplemented("vcvt", machInst);
681 case 0xc:
682 case 0xd:
683 // Between FP and int.
684 return new WarnUnimplemented("vcvt, vcvtr", machInst);
685 case 0xe:
686 case 0xf:
687 // Between FP and fixed point.
688 return new WarnUnimplemented("vcvt", machInst);
689 }
690 break;
691 }
692 return new Unknown(machInst);
693 }
694 '''
695}};
696
697def format VfpData() {{
698 decode_block = '''
699 return decodeVfpData(machInst);
700 '''
701}};