sve_2nd_level.isa (14091:090449e74135) sve_2nd_level.isa (14106:293e3f4b1321)
1// Copyright (c) 2017-2019 ARM Limited
2// All rights reserved
3//
4// The license below extends only to copyright in the software and shall
5// not be construed as granting a license to any other intellectual
6// property including but not limited to intellectual property relating
7// to a hardware implementation of the functionality of the software
8// licensed hereunder. You may use the software subject to the license
9// terms below provided that you ensure that this notice is replicated
10// unmodified and in its entirety in all distributions of the software,
11// modified or unmodified, in source code or in binary form.
12//
13// Redistribution and use in source and binary forms, with or without
14// modification, are permitted provided that the following conditions are
15// met: redistributions of source code must retain the above copyright
16// notice, this list of conditions and the following disclaimer;
17// redistributions in binary form must reproduce the above copyright
18// notice, this list of conditions and the following disclaimer in the
19// documentation and/or other materials provided with the distribution;
20// neither the name of the copyright holders nor the names of its
21// contributors may be used to endorse or promote products derived from
22// this software without specific prior written permission.
23//
24// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
27// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
28// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
29// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
30// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
31// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
32// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
33// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
34// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35//
36// Authors: Giacomo Gabrielli
37
38/// @file
39/// SVE 2nd-level decoder.
40
41output decoder {{
42namespace Aarch64
43{
44
45 StaticInstPtr
46 decodeSveIntArithBinPred(ExtMachInst machInst)
47 {
48 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
49 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
50 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
51
52 switch (bits(machInst, 20, 19)) {
53 case 0x0:
54 {
55 uint8_t size = bits(machInst, 23, 22);
56 uint8_t opc = bits(machInst, 18, 16);
57 switch (opc) {
58 case 0x0:
59 return decodeSveBinDestrPredU<SveAddPred>(
60 size, machInst, zdn, zm, pg);
61 case 0x1:
62 return decodeSveBinDestrPredU<SveSubPred>(
63 size, machInst, zdn, zm, pg);
64 case 0x3:
65 return decodeSveBinDestrPredU<SveSubr>(
66 size, machInst, zdn, zm, pg);
67 default:
68 return new Unknown64(machInst);
69 }
70 }
71 case 0x1:
72 {
73 uint8_t size = bits(machInst, 23, 22);
74 uint8_t u = bits(machInst, 16);
75 uint8_t opc = bits(machInst, 18, 17);
76 switch (opc) {
77 case 0x0:
78 return decodeSveBinDestrPred<SveSmax, SveUmax>(
79 size, u, machInst, zdn, zm, pg);
80 case 0x1:
81 return decodeSveBinDestrPred<SveSmin, SveUmin>(
82 size, u, machInst, zdn, zm, pg);
83 case 0x2:
84 return decodeSveBinDestrPred<SveSabd, SveUabd>(
85 size, u, machInst, zdn, zm, pg);
86 default:
87 return new Unknown64(machInst);
88 }
89 }
90 case 0x2:
91 {
92 uint8_t size = bits(machInst, 23, 22);
93 uint8_t u = bits(machInst, 16);
94 uint8_t opc = bits(machInst, 18, 17);
95 switch (opc) {
96 case 0x0:
97 if (u == 0) {
98 return decodeSveBinDestrPredU<SveMul>(
99 size, machInst, zdn, zm, pg);
100 } else {
101 return new Unknown64(machInst);
102 }
103 case 0x1:
104 return decodeSveBinDestrPred<SveSmulh, SveUmulh>(
105 size, u, machInst, zdn, zm, pg);
106 case 0x2:
107 if (size == 0x2 || size == 0x3) {
108 return decodeSveBinDestrPred<SveSdiv, SveUdiv>(
109 size, u, machInst, zdn, zm, pg);
110 } else {
111 return new Unknown64(machInst);
112 }
113 case 0x3:
114 if (size == 0x2 || size == 0x3) {
115 return decodeSveBinDestrPred<SveSdivr, SveUdivr>(
116 size, u, machInst, zdn, zm, pg);
117 } else {
118 return new Unknown64(machInst);
119 }
120 }
121 break;
122 }
123 case 0x3:
124 {
125 uint8_t size = bits(machInst, 23, 22);
126 uint8_t opc = bits(machInst, 18, 16);
127
128 switch (opc) {
129 case 0x0:
130 return decodeSveBinDestrPredU<SveOrrPred>(
131 size, machInst, zdn, zm, pg);
132 case 0x1:
133 return decodeSveBinDestrPredU<SveEorPred>(
134 size, machInst, zdn, zm, pg);
135 case 0x2:
136 return decodeSveBinDestrPredU<SveAndPred>(
137 size, machInst, zdn, zm, pg);
138 case 0x3:
139 return decodeSveBinDestrPredU<SveBicPred>(
140 size, machInst, zdn, zm, pg);
141 default:
142 return new Unknown64(machInst);
143 }
144 }
145 }
146 return new Unknown64(machInst);
147 } // decodeSveArithBinPred
148
149 StaticInstPtr
150 decodeSveIntReduc(ExtMachInst machInst)
151 {
152 IntRegIndex vd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
153 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
154 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
155
156 uint8_t size = bits(machInst, 23, 22);
157
158 switch (bits(machInst, 20, 19)) {
159 case 0x0:
160 {
161 uint8_t u = bits(machInst, 16);
162 uint8_t opc = bits(machInst, 18, 17);
163 if (opc != 0x0 || (!u && size == 0x3)) {
164 return new Unknown64(machInst);
165 } else {
166 return decodeSveWideningReduc<SveSaddv, SveUaddv>(
167 size, u, machInst, vd, zn, pg);
168 }
169 }
170 case 0x1:
171 {
172 uint8_t u = bits(machInst, 16);
173 uint8_t opc = bits(machInst, 18, 17);
174 switch (opc) {
175 case 0x0:
176 return decodeSveUnaryPred<SveSmaxv, SveUmaxv>(
177 size, u, machInst, vd, zn, pg);
178 case 0x1:
179 return decodeSveUnaryPred<SveSminv, SveUminv>(
180 size, u, machInst, vd, zn, pg);
181 default:
182 return new Unknown64(machInst);
183 }
184 }
185 case 0x2:
186 {
187 uint8_t opc = bits(machInst, 18, 17);
188 uint8_t merge = bits(machInst, 16);
189 switch (opc) {
190 case 0x0:
191 if (merge) {
192 return decodeSveUnaryPredU<SveMovprfxPredM>(
193 size, machInst, vd /* zd */, zn, pg);
194 } else {
195 return decodeSveUnaryPredU<SveMovprfxPredZ>(
196 size, machInst, vd /* zd */, zn, pg);
197 }
198 default:
199 return new Unknown64(machInst);
200 }
201 }
202 case 0x3:
203 {
204 uint8_t opc = bits(machInst, 18, 16);
205 switch (opc) {
206 case 0x0:
207 return decodeSveUnaryPredU<SveOrv>(
208 size, machInst, vd, zn, pg);
209 case 0x1:
210 return decodeSveUnaryPredU<SveEorv>(
211 size, machInst, vd, zn, pg);
212 case 0x2:
213 return decodeSveUnaryPredU<SveAndv>(
214 size, machInst, vd, zn, pg);
215 default:
216 return new Unknown64(machInst);
217 }
218 }
219 }
220 return new Unknown64(machInst);
221 } // decodeSveIntReduc
222
223 StaticInstPtr
224 decodeSveIntMulAdd(ExtMachInst machInst)
225 {
226 IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
227 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
228 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
229 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
230
231 uint8_t size = bits(machInst, 23, 22);
232 uint8_t opc = (bits(machInst, 15) << 1) | bits(machInst, 13);
233 switch (opc) {
234 case 0x0:
235 return decodeSveTerPredS<SveMla>(
236 size, machInst, zda, zn, zm, pg);
237 case 0x1:
238 return decodeSveTerPredS<SveMls>(
239 size, machInst, zda, zn, zm, pg);
240 case 0x2:
241 return decodeSveTerPredS<SveMad>(
242 size, machInst, zda /* zdn */, zn /* za */, zm, pg);
243 case 0x3:
244 return decodeSveTerPredS<SveMsb>(
245 size, machInst, zda /* zdn */, zn /* za */, zm, pg);
246 }
247 return new Unknown64(machInst);
248 } // decodeSveIntMulAdd
249
250 StaticInstPtr
251 decodeSveShiftByImmPred0(ExtMachInst machInst)
252 {
253 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
254 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
255 uint8_t imm3 = (uint8_t) bits(machInst, 7, 5);
256
257 uint8_t tsize = (bits(machInst, 23, 22) << 2) | bits(machInst, 9, 8);
258 uint8_t esize = 0;
259 uint8_t size = 0;
260
261 if (tsize == 0x0) {
262 return new Unknown64(machInst);
263 } else if (tsize == 0x1) {
264 esize = 8;
265 } else if ((tsize & 0x0E) == 0x2) {
266 esize = 16;
267 size = 1;
268 } else if ((tsize & 0x0C) == 0x4) {
269 esize = 32;
270 size = 2;
271 } else if ((tsize & 0x08) == 0x8) {
272 esize = 64;
273 size = 3;
274 }
275
276 uint8_t opc = bits(machInst, 18, 16);
277 switch (opc) {
278 case 0x0:
279 {
280 unsigned shiftAmt = 2 * esize - ((tsize << 3) | imm3);
281 return decodeSveBinImmPredU<SveAsrImmPred>(
282 size, machInst, zdn, shiftAmt, pg);
283 }
284 case 0x01:
285 {
286 unsigned shiftAmt = 2 * esize - ((tsize << 3) | imm3);
287 return decodeSveBinImmPredU<SveLsrImmPred>(
288 size, machInst, zdn, shiftAmt, pg);
289 }
290 case 0x03:
291 {
292 unsigned shiftAmt = ((tsize << 3) | imm3) - esize;
293 return decodeSveBinImmPredU<SveLslImmPred>(
294 size, machInst, zdn, shiftAmt, pg);
295 }
296 case 0x04:
297 {
298 unsigned shiftAmt = 2 * esize - ((tsize << 3) | imm3);
299 return decodeSveBinImmPredS<SveAsrd>(
300 size, machInst, zdn, shiftAmt, pg);
301 }
302 }
303 return new Unknown64(machInst);
304 } // decodeSveShiftByImmPred0
305
306 StaticInstPtr
307 decodeSveShiftByVectorPred(ExtMachInst machInst)
308 {
309 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
310 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
311 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
312 uint8_t size = bits(machInst, 23, 22);
313 uint8_t opc = bits(machInst, 18, 16);
314 switch (opc) {
315 case 0:
316 return decodeSveBinDestrPredU<SveAsrPred>(
317 size, machInst, zdn, zm, pg);
318 case 1:
319 return decodeSveBinDestrPredU<SveLsrPred>(
320 size, machInst, zdn, zm, pg);
321 case 3:
322 return decodeSveBinDestrPredU<SveLslPred>(
323 size, machInst, zdn, zm, pg);
324 case 4:
325 return decodeSveBinDestrPredU<SveAsrr>(
326 size, machInst, zdn, zm, pg);
327 case 5:
328 return decodeSveBinDestrPredU<SveLsrr>(
329 size, machInst, zdn, zm, pg);
330 case 7:
331 return decodeSveBinDestrPredU<SveLslr>(
332 size, machInst, zdn, zm, pg);
333 }
334 return new Unknown64(machInst);
335 } // decodeSveShiftByVectorPred
336
337 StaticInstPtr
338 decodeSveShiftByWideElemsPred(ExtMachInst machInst)
339 {
340 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
341 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
342 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
343 uint8_t size = bits(machInst, 23, 22);
344 uint8_t opc = bits(machInst, 18, 16);
345 switch (opc) {
346 case 0x0:
347 return decodeSveBinDestrPredU<SveAsrWidePred>(
348 size, machInst, zdn, zm, pg);
349 case 0x1:
350 return decodeSveBinDestrPredU<SveLsrWidePred>(
351 size, machInst, zdn, zm, pg);
352 case 0x3:
353 return decodeSveBinDestrPredU<SveLslWidePred>(
354 size, machInst, zdn, zm, pg);
355 }
356 return new Unknown64(machInst);
357 } // decodeSveShiftByWideElemsPred
358
359 StaticInstPtr
360 decodeSveShiftByImmPred(ExtMachInst machInst)
361 {
362 uint8_t b20_19 = bits(machInst, 20, 19);
363 uint8_t b23_22 = bits(machInst, 23, 22);
364
365 if (b20_19 == 0x0) {
366 return decodeSveShiftByImmPred0(machInst);
367 } else if (b20_19 == 0x2) {
368 return decodeSveShiftByVectorPred(machInst);
369 } else if (b20_19 == 0x3 && b23_22 != 0x3) {
370 return decodeSveShiftByWideElemsPred(machInst);
371 }
372 return new Unknown64(machInst);
373 } // decodeSveShiftByImmPred
374
375 StaticInstPtr
376 decodeSveIntArithUnaryPred(ExtMachInst machInst)
377 {
378 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
379 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
380 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
381 unsigned esize = bits(machInst, 23, 22);
382 uint8_t opg = bits(machInst, 20, 19);
383 uint8_t opc = bits(machInst, 18, 16);
384 if (opg == 0x2) {
385 bool unsig = static_cast<bool>(opc & 1);
386 switch (opc) {
387 case 0:
388 case 1:
389 if (esize == 0) break;
390 if (unsig) {
391 return decodeSveUnaryExtendFromBPredU<SveUxtb>(
392 esize, machInst, zd, zn, pg);
393 } else {
394 return decodeSveUnaryExtendFromBPredU<SveSxtb>(
395 esize, machInst, zd, zn, pg);
396 }
397 case 2:
398 case 3:
399 if (esize < 2) break;
400 if (unsig) {
401 return decodeSveUnaryExtendFromHPredU<SveUxth>(
402 esize, machInst, zd, zn, pg);
403 } else {
404 return decodeSveUnaryExtendFromHPredU<SveSxth>(
405 esize, machInst, zd, zn, pg);
406 }
407 case 4:
408 case 5:
409 if (esize != 3) break;
410 if (unsig) {
411 return new SveUxtw<uint32_t, uint64_t>(
412 machInst, zd, zn, pg);
413 } else {
414 return new SveSxtw<uint32_t, uint64_t>(
415 machInst, zd, zn, pg);
416 }
417 case 6:
418 return decodeSveUnaryPredS<SveAbs>(
419 esize, machInst, zd, zn, pg);
420 case 7:
421 return decodeSveUnaryPredS<SveNeg>(
422 esize, machInst, zd, zn, pg);
423 }
424 } else if (opg == 0x3) {
425 switch (opc) {
426 case 0:
427 return decodeSveUnaryPredS<SveCls>(
428 esize, machInst, zd, zn, pg);
429 case 1:
430 return decodeSveUnaryPredS<SveClz>(
431 esize, machInst, zd, zn, pg);
432 case 2:
433 return decodeSveUnaryPredU<SveCnt>(
434 esize, machInst, zd, zn, pg);
435 case 3:
436 return decodeSveUnaryPredU<SveCnot>(
437 esize, machInst, zd, zn, pg);
438 case 4:
439 return decodeSveUnaryPredF<SveFabs>(
440 esize, machInst, zd, zn, pg);
441 case 5:
442 return decodeSveUnaryPredF<SveFneg>(
443 esize, machInst, zd, zn, pg);
444 case 6:
445 return decodeSveUnaryPredU<SveNot>(
446 esize, machInst, zd, zn, pg);
447 break;
448 }
449 }
450 return new Unknown64(machInst);
451 } // decodeSveIntArithUnaryPred
452
453 StaticInstPtr
454 decodeSveIntArithUnpred(ExtMachInst machInst)
455 {
456 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
457 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
458 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
459
460 uint8_t opc = (uint8_t) bits(machInst, 12, 10);
461 uint8_t size = (uint8_t) bits(machInst, 23, 22);
462
463 switch (opc) {
464 case 0x0:
465 return decodeSveBinUnpredU<SveAddUnpred>(size, machInst,
466 zd, zn, zm);
467 case 0x1:
468 return decodeSveBinUnpredU<SveSubUnpred>(size, machInst,
469 zd, zn, zm);
470 case 0x4:
471 return decodeSveBinUnpredS<SveSqadd>(size, machInst,
472 zd, zn, zm);
473 case 0x5:
474 return decodeSveBinUnpredU<SveUqadd>(size, machInst,
475 zd, zn, zm);
476 case 0x6:
477 return decodeSveBinUnpredS<SveSqsub>(size, machInst,
478 zd, zn, zm);
479 case 0x7:
480 return decodeSveBinUnpredU<SveUqsub>(size, machInst,
481 zd, zn, zm);
482 }
483
484 return new Unknown64(machInst);
485 } // decodeSveIntArithUnpred
486
487 StaticInstPtr
488 decodeSveIntLogUnpred(ExtMachInst machInst)
489 {
490 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
491 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
492 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
493 uint8_t opc = (uint8_t) (bits(machInst, 23, 22) << 3
494 | bits(machInst, 12, 10));
495
496 switch (opc) {
497 case 0x4:
498 return new SveAndUnpred<uint64_t>(machInst, zd, zn, zm);
499 case 0xc:
500 return new SveOrrUnpred<uint64_t>(machInst, zd, zn, zm);
501 case 0x14:
502 return new SveEorUnpred<uint64_t>(machInst, zd, zn, zm);
503 case 0x1c:
504 return new SveBicUnpred<uint64_t>(machInst, zd, zn, zm);
505 }
506
507 return new Unknown64(machInst);
508 } // decodeSveIntLogUnpred
509
510 StaticInstPtr
511 decodeSveIndexGen(ExtMachInst machInst)
512 {
513 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
514 uint8_t size = (uint8_t) bits(machInst, 23, 22);
515 uint8_t grp = (uint8_t) bits(machInst, 11, 10);
516
517 switch (grp) {
518 case 0:
519 { // INDEX (immediate)
520 int8_t imm5 = sext<5>(bits(machInst, 9, 5));
521 int8_t imm5b = sext<5>(bits(machInst, 20, 16));
522 switch (size) {
523 case 0:
524 return new SveIndexII<int8_t>(machInst,
525 zd, imm5, imm5b);
526 case 1:
527 return new SveIndexII<int16_t>(machInst,
528 zd, imm5, imm5b);
529 case 2:
530 return new SveIndexII<int32_t>(machInst,
531 zd, imm5, imm5b);
532 case 3:
533 return new SveIndexII<int64_t>(machInst,
534 zd, imm5, imm5b);
535 }
536 break;
537 }
538 case 1:
539 { // INDEX (scalar, immediate)
540 int8_t imm5 = sext<5>(bits(machInst, 20, 16));
541 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(
542 machInst, 9, 5);
543 switch (size) {
544 case 0:
545 return new SveIndexRI<int8_t>(machInst,
546 zd, zn, imm5);
547 case 1:
548 return new SveIndexRI<int16_t>(machInst,
549 zd, zn, imm5);
550 case 2:
551 return new SveIndexRI<int32_t>(machInst,
552 zd, zn, imm5);
553 case 3:
554 return new SveIndexRI<int64_t>(machInst,
555 zd, zn, imm5);
556 }
557 break;
558 }
559 case 2:
560 { // INDEX (immediate, scalar)
561 int8_t imm5 = sext<5>(bits(machInst, 9, 5));
562 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(
563 machInst, 20, 16);
564 switch (size) {
565 case 0:
566 return new SveIndexIR<int8_t>(machInst,
567 zd, imm5, zm);
568 case 1:
569 return new SveIndexIR<int16_t>(machInst,
570 zd, imm5, zm);
571 case 2:
572 return new SveIndexIR<int32_t>(machInst,
573 zd, imm5, zm);
574 case 3:
575 return new SveIndexIR<int64_t>(machInst,
576 zd, imm5, zm);
577 }
578 break;
579 }
580 case 3:
581 { // INDEX (scalars)
582 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(
583 machInst, 9, 5);
584 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(
585 machInst, 20, 16);
586 switch (size) {
587 case 0:
588 return new SveIndexRR<int8_t>(machInst,
589 zd, zn, zm);
590 case 1:
591 return new SveIndexRR<int16_t>(machInst,
592 zd, zn, zm);
593 case 2:
594 return new SveIndexRR<int32_t>(machInst,
595 zd, zn, zm);
596 case 3:
597 return new SveIndexRR<int64_t>(machInst,
598 zd, zn, zm);
599 }
600 }
601 }
602 return new Unknown64(machInst);
603 } // decodeSveIndexGen
604
605 StaticInstPtr
606 decodeSveStackAlloc(ExtMachInst machInst)
607 {
608 uint8_t b23_22 = bits(machInst, 23, 22);
609 uint8_t b11 = bits(machInst, 11);
610 if ((b23_22 & 0x2) == 0x0 && b11 == 0x0) {
611 IntRegIndex rd = makeSP(
612 (IntRegIndex) (uint8_t) bits(machInst, 4, 0));
613 IntRegIndex rn = makeSP(
614 (IntRegIndex) (uint8_t) bits(machInst, 20, 16));
615 uint64_t imm = sext<6>(bits(machInst, 10, 5));
616 if ((b23_22 & 0x1) == 0x0) {
617 return new AddvlXImm(machInst, rd, rn, imm);
618 } else {
619 return new AddplXImm(machInst, rd, rn, imm);
620 }
621 } else if (b23_22 == 0x2 && b11 == 0x0) {
622 IntRegIndex rd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
623 uint64_t imm = sext<6>(bits(machInst, 10, 5));
624 if (bits(machInst, 20, 16) == 0x1f) {
625 return new SveRdvl(machInst, rd, imm);
626 }
627 }
628 return new Unknown64(machInst);
629 } // decodeSveStackAlloc
630
631 StaticInstPtr
632 decodeSveShiftByWideElemsUnpred(ExtMachInst machInst)
633 {
634 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
635 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
636 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
637 uint8_t size = bits(machInst, 23, 22);
638 uint8_t opc = (uint8_t) bits(machInst, 11, 10);
639 switch (opc) {
640 case 0x0:
641 return decodeSveBinUnpredU<SveAsrWideUnpred>(
642 size, machInst, zd, zn, zm);
643 case 0x1:
644 return decodeSveBinUnpredU<SveLsrWideUnpred>(
645 size, machInst, zd, zn, zm);
646 case 0x3:
647 return decodeSveBinUnpredU<SveLslWideUnpred>(
648 size, machInst, zd, zn, zm);
649 }
650 return new Unknown64(machInst);
651 } // decodeSveShiftByWideElemsUnpred
652
653 StaticInstPtr
654 decodeSveShiftByImmUnpredB(ExtMachInst machInst)
655 {
656 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
657 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
658 uint8_t imm3 = (uint8_t) bits(machInst, 18, 16);
659
660 uint8_t tsize = (bits(machInst, 23, 22) << 2) | bits(machInst, 20, 19);
661 uint8_t esize = 0;
662 uint8_t size = 0;
663 if (tsize == 0x0) {
664 return new Unknown64(machInst);
665 } else if (tsize == 0x1) {
666 esize = 8;
667 } else if ((tsize & 0x0E) == 0x2) {
668 esize = 16;
669 size = 1;
670 } else if ((tsize & 0x0C) == 0x4) {
671 esize = 32;
672 size = 2;
673 } else if ((tsize & 0x08) == 0x8) {
674 esize = 64;
675 size = 3;
676 }
677
678 uint8_t opc = bits(machInst, 11, 10);
679 switch (opc) {
680 case 0x00:
681 {
682 unsigned shiftAmt = 2 * esize - ((tsize << 3) | imm3);
683 return decodeSveBinImmUnpredU<SveAsrImmUnpred>(
684 size, machInst, zd, zn, shiftAmt);
685 }
686 case 0x01:
687 {
688 unsigned shiftAmt = 2 * esize - ((tsize << 3) | imm3);
689 return decodeSveBinImmUnpredU<SveLsrImmUnpred>(
690 size, machInst, zd, zn, shiftAmt);
691 }
692 case 0x03:
693 {
694 unsigned shiftAmt = ((tsize << 3) | imm3) - esize;
695 return decodeSveBinImmUnpredU<SveLslImmUnpred>(
696 size, machInst, zd, zn, shiftAmt);
697 }
698 }
699
700 return new Unknown64(machInst);
701 } // decodeSveShiftByImmUnpredB
702
703 StaticInstPtr
704 decodeSveShiftByImmUnpred(ExtMachInst machInst)
705 {
706 if (bits(machInst, 12)) {
707 return decodeSveShiftByImmUnpredB(machInst);
708 } else {
709 return decodeSveShiftByWideElemsUnpred(machInst);
710 }
711 return new Unknown64(machInst);
712 } // decodeSveShiftByImmUnpred
713
714 StaticInstPtr
715 decodeSveCompVecAddr(ExtMachInst machInst)
716 {
717 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
718 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
719 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
720 uint8_t mult = 1 << bits(machInst, 11, 10);
721
722 uint8_t opc = bits(machInst, 23, 22);
723
724 switch (opc) {
725 case 0x0:
726 return new SveAdr<uint64_t>(machInst, zd, zn, zm, mult,
727 SveAdr<uint64_t>::SveAdrOffsetUnpackedSigned);
728 case 0x1:
729 return new SveAdr<uint64_t>(machInst, zd, zn, zm, mult,
730 SveAdr<uint64_t>::SveAdrOffsetUnpackedUnsigned);
731 case 0x2:
732 return new SveAdr<uint32_t>(machInst, zd, zn, zm, mult,
733 SveAdr<uint32_t>::SveAdrOffsetPacked);
734 case 0x3:
735 return new SveAdr<uint64_t>(machInst, zd, zn, zm, mult,
736 SveAdr<uint64_t>::SveAdrOffsetPacked);
737 }
738 return new Unknown64(machInst);
739 } // decodeSveCompVecAddr
740
741 StaticInstPtr
742 decodeSveIntMiscUnpred(ExtMachInst machInst)
743 {
744 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
745 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
746
747 uint8_t size = bits(machInst, 23, 22);
748 uint8_t opc = bits(machInst, 11, 10);
749 switch (opc) {
750 case 0x0:
751 // SVE floating-point trig select coefficient
752 {
753 if (size == 0) {
754 break;
755 }
756 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst,
757 20, 16);
758 return decodeSveBinUnpredF<SveFtssel>(
759 size, machInst, zd, zn, zm);
760 }
761 case 0x2:
762 // SVE floating-point exponential accelerator
763 if (size == 0) {
764 break;
765 }
766 return decodeSveUnaryUnpredF<SveFexpa>(size, machInst, zd, zn);
767 case 0x3:
768 // SVE constructive prefix (unpredicated)
769 if (size == 0x0 && bits(machInst, 20, 16) == 0x0) {
770 return new SveMovprfxUnpred<uint64_t>(machInst, zd, zn);
771 }
772 break;
773 }
774 return new Unknown64(machInst);
775 } // decodeSveIntMiscUnpred
776
777 StaticInstPtr
778 decodeSveElemCount(ExtMachInst machInst)
779 {
780 uint8_t opc20 = (uint8_t) bits(machInst, 20);
781 uint8_t b13_12 = (uint8_t) bits(machInst, 13, 12);
782 uint8_t opc11 = (uint8_t) bits(machInst, 11);
783 uint8_t opc10 = (uint8_t) bits(machInst, 10);
784 uint8_t opc11_10 = (uint8_t) bits(machInst, 11, 10);
785 if (b13_12 == 0) {
786 uint8_t pattern = (uint8_t) bits(machInst, 9, 5);
787 uint8_t imm4 = (uint8_t) bits(machInst, 19, 16) + 1;
788 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
789 unsigned size = (unsigned) bits(machInst, 23, 22);
790 if (opc20) {
791 if (opc11 == 0) {
792 if (opc10) {
793 return decodeSveElemIntCountLU<SveDecv>(size,
794 machInst, zdn, pattern, imm4);
795 } else {
796 return decodeSveElemIntCountLU<SveIncv>(size,
797 machInst, zdn, pattern, imm4);
798 }
799 }
800 } else {
801 if (opc11) {
802 if (opc10) {
803 return decodeSveElemIntCountLU<SveUqdecv>(size,
804 machInst, zdn, pattern, imm4);
805 } else {
806 return decodeSveElemIntCountLS<SveSqdecv>(size,
807 machInst, zdn, pattern, imm4);
808 }
809 } else {
810 if (opc10) {
811 return decodeSveElemIntCountLU<SveUqincv>(size,
812 machInst, zdn, pattern, imm4);
813 } else {
814 return decodeSveElemIntCountLS<SveSqincv>(size,
815 machInst, zdn, pattern, imm4);
816 }
817 }
818 }
819 } else if (b13_12 == 3) {
820 uint8_t pattern = (uint8_t) bits(machInst, 9, 5);
821 uint8_t imm4 = (uint8_t) bits(machInst, 19, 16) + 1;
822 IntRegIndex rdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
823 unsigned size = (unsigned) bits(machInst, 23, 22);
824 switch (opc11_10) {
825 case 0:
826 if (opc20) {
827 return decodeSveElemIntCountS<SveSqinc>(size,
828 machInst, rdn, pattern, imm4);
829 } else {
830 return decodeSveElemIntCountS<SveSqinc32>(size,
831 machInst, rdn, pattern, imm4);
832 }
833 case 1:
834 if (opc20) {
835 return decodeSveElemIntCountU<SveUqinc>(size,
836 machInst, rdn, pattern, imm4);
837 } else {
838 return decodeSveElemIntCountU<SveUqinc32>(size,
839 machInst, rdn, pattern, imm4);
840 }
841 case 2:
842 if (opc20) {
843 return decodeSveElemIntCountS<SveSqdec>(size,
844 machInst, rdn, pattern, imm4);
845 } else {
846 return decodeSveElemIntCountS<SveSqdec32>(size,
847 machInst, rdn, pattern, imm4);
848 }
849 case 3:
850 if (opc20) {
851 return decodeSveElemIntCountU<SveUqdec>(size,
852 machInst, rdn, pattern, imm4);
853 } else {
854 return decodeSveElemIntCountU<SveUqdec32>(size,
855 machInst, rdn, pattern, imm4);
856 }
857 }
858 } else if (opc20 && b13_12 == 2 && !(opc11_10 & 0x2)) {
859 uint8_t pattern = (uint8_t) bits(machInst, 9, 5);
860 uint8_t imm4 = (uint8_t) bits(machInst, 19, 16) + 1;
861 IntRegIndex rdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
862 unsigned size = (unsigned) bits(machInst, 23, 22);
863 if (opc11_10 & 0x1) {
864 return decodeSveElemIntCountU<SveDec>(size, machInst,
865 rdn, pattern, imm4);
866 } else {
867 return decodeSveElemIntCountU<SveInc>(size, machInst,
868 rdn, pattern, imm4);
869 }
870 } else if (!opc20 && b13_12 == 2 && opc11_10 == 0) {
871 uint8_t pattern = (uint8_t) bits(machInst, 9, 5);
872 uint8_t imm4 = (uint8_t) bits(machInst, 19, 16) + 1;
873 IntRegIndex rd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
874 unsigned size = (unsigned) bits(machInst, 23, 22);
875 return decodeSveElemIntCountU<SveCntx>(size, machInst,
876 rd, pattern, imm4);
877 }
878 return new Unknown64(machInst);
879 } // decodeSveElemCount
880
881 StaticInstPtr
882 decodeSveLogMaskImm(ExtMachInst machInst)
883 {
884 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
885 bool n = bits(machInst, 17);
886 uint8_t immr = bits(machInst, 16, 11);
887 uint8_t imms = bits(machInst, 10, 5);
888
889 // Decode bitmask
890 // len = MSB(n:NOT(imms)), len < 1 is undefined
891 uint8_t len = 0;
892 if (n) {
893 len = 6;
894 } else if (imms == 0x3f || imms == 0x3e) {
895 return new Unknown64(machInst);
896 } else {
897 len = findMsbSet(imms ^ 0x3f);
898 }
899 // Generate r, s, and size
900 uint64_t r = bits(immr, len - 1, 0);
901 uint64_t s = bits(imms, len - 1, 0);
902 uint8_t size = 1 << len;
903 if (s == size - 1)
904 return new Unknown64(machInst);
905 // Generate the pattern with s 1s, rotated by r, with size bits
906 uint64_t pattern = mask(s + 1);
907 if (r) {
908 pattern = (pattern >> r) | (pattern << (size - r));
909 pattern &= mask(size);
910 }
911 // Replicate that to fill up the immediate
912 for (unsigned i = 1; i < (64 / size); i *= 2)
913 pattern |= (pattern << (i * size));
914 uint64_t imm = pattern;
915
916 if (bits(machInst, 19, 18) == 0x0) {
917 if (bits(machInst, 23, 22) == 0x3) {
918 return new SveDupm<uint64_t>(machInst, zd, imm);
919 } else {
920 switch (bits(machInst, 23, 22)) {
921 case 0x0:
922 return new SveOrrImm<uint64_t>(machInst, zd, imm);
923 case 0x1:
924 return new SveEorImm<uint64_t>(machInst, zd, imm);
925 case 0x2:
926 return new SveAndImm<uint64_t>(machInst, zd, imm);
927 }
928 }
929 }
930
931 return new Unknown64(machInst);
932 } // decodeSveLogMaskImm
933
934 StaticInstPtr
935 decodeSveIntWideImmPred(ExtMachInst machInst)
936 {
937 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
938 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 19, 16);
939 uint8_t size = bits(machInst, 23, 22);
940
941 if (bits(machInst, 15) == 0x0) {
942 uint64_t imm = bits(machInst, 12, 5);
943 uint8_t sh = bits(machInst, 13);
944 uint8_t m = bits(machInst, 14);
945 if (sh) {
946 if (size == 0x0) {
947 return new Unknown64(machInst);
948 }
949 imm <<= 8;
950 }
951 if (m) {
952 if (sh) {
953 return decodeSveWideImmPredU<SveCpyImmMerge>(
954 size, machInst, zd, sext<16>(imm), pg);
955 } else {
956 return decodeSveWideImmPredU<SveCpyImmMerge>(
957 size, machInst, zd, sext<8>(imm), pg);
958 }
959 } else {
960 if (sh) {
961 return decodeSveWideImmPredU<SveCpyImmZero>(
962 size, machInst, zd, sext<16>(imm), pg,
963 false /* isMerging */);
964 } else {
965 return decodeSveWideImmPredU<SveCpyImmZero>(
966 size, machInst, zd, sext<8>(imm), pg,
967 false /* isMerging */);
968 }
969 }
970 } else if (bits(machInst, 15, 13) == 0x6 && size != 0x0) {
971 uint64_t imm = vfp_modified_imm(bits(machInst, 12, 5),
972 decode_fp_data_type(size));
973 return decodeSveWideImmPredF<SveFcpy>(
974 size, machInst, zd, imm, pg);
975 }
976
977 return new Unknown64(machInst);
978 } // decodeSveIntWideImmPred
979
980 StaticInstPtr
981 decodeSvePermExtract(ExtMachInst machInst)
982 {
983 uint8_t b23_22 = (unsigned) bits(machInst, 23, 22);
984 if (!b23_22) {
985 uint8_t position =
986 bits(machInst, 20, 16) << 3 | bits(machInst, 12, 10);
987 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
988 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
989 return new SveExt<uint8_t>(machInst, zdn, zm, position);
990 }
991 return new Unknown64(machInst);
992 } // decodeSvePermExtract
993
994 StaticInstPtr
995 decodeSvePermUnpred(ExtMachInst machInst)
996 {
997 uint8_t b12_10 = bits(machInst, 12, 10);
998 if (b12_10 == 0x4) {
999 unsigned size = (unsigned) bits(machInst, 23, 22);
1000 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1001 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1002 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
1003 return decodeSveBinUnpredU<SveTbl>(size, machInst, zd, zn, zm);
1004 } else if (bits(machInst, 20, 16) == 0x0 && b12_10 == 0x6) {
1005 uint8_t size = bits(machInst, 23, 22);
1006 IntRegIndex rn = makeSP(
1007 (IntRegIndex) (uint8_t) bits(machInst, 9, 5));
1008 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1009 return decodeSveUnaryUnpredU<SveDupScalar>(size, machInst, zd, rn);
1010 } else if (bits(machInst, 20, 16) == 0x4 && b12_10 == 0x6) {
1011 uint8_t size = bits(machInst, 23, 22);
1012 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1013 IntRegIndex rm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1014 return decodeSveUnaryUnpredU<SveInsr>(size, machInst, zdn, rm);
1015 } else if (bits(machInst, 20, 16) == 0x14 && b12_10 == 0x6) {
1016 uint8_t size = bits(machInst, 23, 22);
1017 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1018 IntRegIndex vm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1019 return decodeSveUnaryUnpredU<SveInsrf>(size, machInst, zdn, vm);
1020 } else if (bits(machInst, 20, 16) == 0x18 && b12_10 == 0x6) {
1021 uint8_t size = bits(machInst, 23, 22);
1022 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1023 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1024 return decodeSveUnaryUnpredU<SveRevv>(size, machInst, zd, zn);
1025 } else if (b12_10 == 0x0 && bits(machInst, 20, 16) != 0x0) {
1026 uint8_t imm =
1027 bits(machInst, 23, 22) << 5 | // imm3h
1028 bits(machInst, 20) << 4 | // imm3l
1029 bits(machInst, 19, 16); // tsz
1030 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1031 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1032 if (imm & 0x1) {
1033 imm >>= 1;
1034 return new SveDupIdx<uint8_t>(machInst, zd, zn, imm);
1035 } else if (imm & 0x2) {
1036 imm >>= 2;
1037 return new SveDupIdx<uint16_t>(machInst, zd, zn, imm);
1038 } else if (imm & 0x4) {
1039 imm >>= 3;
1040 return new SveDupIdx<uint32_t>(machInst, zd, zn, imm);
1041 } else if (imm & 0x8) {
1042 imm >>= 4;
1043 return new SveDupIdx<uint64_t>(machInst, zd, zn, imm);
1044 } else if (imm & 0x10) {
1045 imm >>= 5;
1046 return new SveDupIdx<__uint128_t>(machInst, zd, zn, imm);
1047 }
1048 return new Unknown64(machInst);
1049 } else if (bits(machInst, 23, 22) != 0x0 &&
1050 bits(machInst, 20, 18) == 0x4 && b12_10 == 0x6) {
1051 unsigned size = (unsigned) bits(machInst, 23, 22);
1052 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1053 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1054 if (bits(machInst, 17)) {
1055 if (bits(machInst, 16)) {
1056 return decodeSveUnpackU<SveUunpkhi>(size, machInst,
1057 zd, zn);
1058 } else {
1059 return decodeSveUnpackU<SveUunpklo>(size, machInst,
1060 zd, zn);
1061 }
1062 } else {
1063 if (bits(machInst, 16)) {
1064 return decodeSveUnpackS<SveSunpkhi>(size, machInst,
1065 zd, zn);
1066 } else {
1067 return decodeSveUnpackS<SveSunpklo>(size, machInst,
1068 zd, zn);
1069 }
1070 }
1071 }
1072 return new Unknown64(machInst);
1073 } // decodeSvePermUnpred
1074
1075 StaticInstPtr
1076 decodeSvePermPredicates(ExtMachInst machInst)
1077 {
1078 if (bits(machInst, 20) == 0x0 && bits(machInst, 12, 11) != 0x3 &&
1079 bits(machInst, 9) == 0x0 && bits(machInst, 4) == 0x0) {
1080 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1081 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1082 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
1083
1084 uint8_t size = bits(machInst, 23, 22);
1085
1086 uint8_t opc = bits(machInst, 12, 10);
1087
1088 switch (opc) {
1089 case 0x0:
1090 return decodeSveBinUnpredU<SveZip1Pred>(size,
1091 machInst, zd, zn, zm);
1092 case 0x1:
1093 return decodeSveBinUnpredU<SveZip2Pred>(size,
1094 machInst, zd, zn, zm);
1095 case 0x2:
1096 return decodeSveBinUnpredU<SveUzp1Pred>(size,
1097 machInst, zd, zn, zm);
1098 case 0x3:
1099 return decodeSveBinUnpredU<SveUzp2Pred>(size,
1100 machInst, zd, zn, zm);
1101 case 0x4:
1102 return decodeSveBinUnpredU<SveTrn1Pred>(size,
1103 machInst, zd, zn, zm);
1104 case 0x5:
1105 return decodeSveBinUnpredU<SveTrn2Pred>(size,
1106 machInst, zd, zn, zm);
1107 }
1108 } else if (bits(machInst, 23, 22) == 0x0 &&
1109 bits(machInst, 20, 17) == 0x8 && bits(machInst, 12, 9) == 0x0
1110 && bits(machInst, 4) == 0x0) {
1111 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1112 IntRegIndex pn = (IntRegIndex) (uint8_t) bits(machInst, 8, 5);
1113 if (bits(machInst, 16)) {
1114 return new SvePunpkhi<uint8_t, uint16_t>(machInst, pd, pn);
1115 } else {
1116 return new SvePunpklo<uint8_t, uint16_t>(machInst, pd, pn);
1117 }
1118 } else if (bits(machInst, 20, 16) == 0x14 &&
1119 bits(machInst, 12, 9) == 0x00 && bits(machInst, 4) == 0) {
1120 uint8_t size = bits(machInst, 23, 22);
1121 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1122 IntRegIndex pn = (IntRegIndex) (uint8_t) bits(machInst, 8, 5);
1123 return decodeSveUnaryUnpredU<SveRevp>(size, machInst, pd, pn);
1124 }
1125 return new Unknown64(machInst);
1126 } // decodeSvePermPredicates
1127
1128 StaticInstPtr
1129 decodeSvePermIntlv(ExtMachInst machInst)
1130 {
1131 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1132 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1133 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
1134
1135 uint8_t size = bits(machInst, 23, 22);
1136
1137 uint8_t opc = bits(machInst, 12, 10);
1138
1139 switch (opc) {
1140 case 0x0:
1141 return decodeSveBinUnpredU<SveZip1>(size, machInst, zd, zn, zm);
1142 case 0x1:
1143 return decodeSveBinUnpredU<SveZip2>(size, machInst, zd, zn, zm);
1144 case 0x2:
1145 return decodeSveBinUnpredU<SveUzp1>(size, machInst, zd, zn, zm);
1146 case 0x3:
1147 return decodeSveBinUnpredU<SveUzp2>(size, machInst, zd, zn, zm);
1148 case 0x4:
1149 return decodeSveBinUnpredU<SveTrn1>(size, machInst, zd, zn, zm);
1150 case 0x5:
1151 return decodeSveBinUnpredU<SveTrn2>(size, machInst, zd, zn, zm);
1152 }
1153 return new Unknown64(machInst);
1154 } // decodeSvePermIntlv
1155
1156 StaticInstPtr
1157 decodeSvePermPred(ExtMachInst machInst)
1158 {
1159 uint8_t b13 = bits(machInst, 13);
1160 uint8_t b23 = bits(machInst, 23);
1161 switch (bits(machInst, 20, 16)) {
1162 case 0x0:
1163 if (!b13) {
1164 uint8_t size = bits(machInst, 23, 22);
1165 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1166 IntRegIndex vn = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1167 IntRegIndex zd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1168 return decodeSveUnaryPredU<SveCpySimdFpScalar>(size,
1169 machInst, zd, vn, pg);
1170 }
1171 break;
1172 case 0x1:
1173 if (!b13 && b23) {
1174 // sve_int_perm_compact
1175 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1176 IntRegIndex zn = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1177 IntRegIndex zd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1178 if (bits(machInst, 22)) {
1179 return new SveCompact<uint64_t>(machInst, zd, zn, pg);
1180 } else {
1181 return new SveCompact<uint32_t>(machInst, zd, zn, pg);
1182 }
1183 }
1184 break;
1185 case 0x8:
1186 if (b13) {
1187 uint8_t size = bits(machInst, 23, 22);
1188 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1189 IntRegIndex rn = makeSP(
1190 (IntRegIndex)(uint8_t) bits(machInst, 9, 5));
1191 IntRegIndex zd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1192 return decodeSveUnaryPredU<SveCpyScalar>(size,
1193 machInst, zd, rn, pg);
1194 }
1195 break;
1196 case 0xC:
1197 if (!b13) {
1198 uint8_t size = bits(machInst, 23, 22);
1199 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1200 IntRegIndex zdn = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1201 IntRegIndex zm = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1202 return decodeSveBinDestrPredU<SveSplice>(size, machInst,
1203 zdn, zm, pg);
1204 }
1205 break;
1206 }
1207 switch (bits(machInst, 20, 17)) {
1208 case 0x0:
1209 if (b13) {
1210 uint8_t AB = bits(machInst, 16);
1211 uint8_t size = bits(machInst, 23, 22);
1212 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1213 IntRegIndex zn = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1214 IntRegIndex rd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1215 if (!AB) {
1216 return decodeSveUnaryPredU<SveLasta>(size,
1217 machInst, rd, zn, pg);
1218 } else {
1219 return decodeSveUnaryPredU<SveLastb>(size,
1220 machInst, rd, zn, pg);
1221 }
1222 }
1223 break;
1224 case 0x1:
1225 if (!b13) {
1226 uint8_t AB = bits(machInst, 16);
1227 uint8_t size = bits(machInst, 23, 22);
1228 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1229 IntRegIndex zn = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1230 IntRegIndex vd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1231 if (!AB) {
1232 return decodeSveUnaryPredU<SveLastaf>(size,
1233 machInst, vd, zn, pg);
1234 } else {
1235 return decodeSveUnaryPredU<SveLastbf>(size,
1236 machInst, vd, zn, pg);
1237 }
1238 }
1239 break;
1240 case 0x4:
1241 if (!b13) {
1242 uint8_t AB = bits(machInst, 16);
1243 uint8_t size = bits(machInst, 23, 22);
1244 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1245 IntRegIndex zm = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1246 IntRegIndex zdn = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1247 if (!AB) {
1248 return decodeSveUnaryPredU<SveClastav>(size,
1249 machInst, zdn, zm, pg);
1250 } else {
1251 return decodeSveUnaryPredU<SveClastbv>(size,
1252 machInst, zdn, zm, pg);
1253 }
1254 }
1255 break;
1256 case 0x5:
1257 if (!b13) {
1258 uint8_t AB = bits(machInst, 16);
1259 uint8_t size = bits(machInst, 23, 22);
1260 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1261 IntRegIndex zm = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1262 IntRegIndex zdn = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1263 if (!AB) {
1264 return decodeSveUnaryPredU<SveClastaf>(size,
1265 machInst, zdn, zm, pg);
1266 } else {
1267 return decodeSveUnaryPredU<SveClastbf>(size,
1268 machInst, zdn, zm, pg);
1269 }
1270 }
1271 break;
1272 case 0x8:
1273 if (b13) {
1274 uint8_t AB = bits(machInst, 16);
1275 uint8_t size = bits(machInst, 23, 22);
1276 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1277 IntRegIndex zm = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1278 IntRegIndex rdn = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1279 if (!AB) {
1280 return decodeSveUnaryPredU<SveClasta>(size,
1281 machInst, rdn, zm, pg);
1282 } else {
1283 return decodeSveUnaryPredU<SveClastb>(size,
1284 machInst, rdn, zm, pg);
1285 }
1286 }
1287 break;
1288 }
1289 if (bits(machInst, 20, 18) == 0x1 && !b13) {
1290 unsigned size = (unsigned) bits(machInst, 23, 22);
1291 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1292 IntRegIndex zn = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1293 IntRegIndex zd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1294 uint8_t opc17_16 = bits(machInst, 17, 16);
1295 switch (opc17_16) {
1296 case 0x00:
1297 switch (size) {
1298 case 1:
1299 return new SveRevb<uint16_t>(machInst, zd, zn, pg);
1300 case 2:
1301 return new SveRevb<uint32_t>(machInst, zd, zn, pg);
1302 case 3:
1303 return new SveRevb<uint64_t>(machInst, zd, zn, pg);
1304 }
1305 break;
1306 case 0x01:
1307 switch (size) {
1308 case 2:
1309 return new SveRevh<uint32_t>(machInst, zd, zn, pg);
1310 case 3:
1311 return new SveRevh<uint64_t>(machInst, zd, zn, pg);
1312 }
1313 break;
1314 case 0x02:
1315 if (size == 3) {
1316 return new SveRevw<uint64_t>(machInst, zd, zn, pg);
1317 }
1318 break;
1319 case 0x03:
1320 return decodeSveUnaryPredU<SveRbit>(
1321 size, machInst, zd, zn, pg);
1322 }
1323 }
1324 return new Unknown64(machInst);
1325 } // decodeSvePermPred
1326
1327 StaticInstPtr
1328 decodeSveSelVec(ExtMachInst machInst)
1329 {
1330 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1331 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1332 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 13, 10);
1333 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
1334
1335 uint8_t size = bits(machInst, 23, 22);
1336
1337 return decodeSveBinConstrPredU<SveSel>(size,
1338 machInst, zd, zn, zm, pg, SvePredType::SELECT);
1339 } // decodeSveSelVec
1340
1341 StaticInstPtr
1342 decodeSveIntCmpVec(ExtMachInst machInst)
1343 {
1344 uint8_t size = bits(machInst, 23, 22);
1345 uint8_t b14 = bits(machInst, 14);
1346 uint8_t opc =
1347 bits(machInst, 15) << 2 |
1348 bits(machInst, 13) << 1 |
1349 bits(machInst, 4);
1350 IntRegIndex pd = (IntRegIndex) (uint8_t)bits(machInst, 3, 0);
1351 IntRegIndex pg = (IntRegIndex) (uint8_t)bits(machInst, 12, 10);
1352 IntRegIndex zn = (IntRegIndex) (uint8_t)bits(machInst, 9, 5);
1353 IntRegIndex zm = (IntRegIndex) (uint8_t)bits(machInst, 20, 16);
1354 if (b14 && size != 3) {
1355 // sve_int_cmp_1
1356 switch (opc) {
1357 case 0:
1358 return decodeSveTerPredWS<SveCmpgew>(size,
1359 machInst, pd, zn, zm, pg);
1360 case 1:
1361 return decodeSveTerPredWS<SveCmpgtw>(size,
1362 machInst, pd, zn, zm, pg);
1363 case 2:
1364 return decodeSveTerPredWS<SveCmpltw>(size,
1365 machInst, pd, zn, zm, pg);
1366 case 3:
1367 return decodeSveTerPredWS<SveCmplew>(size,
1368 machInst, pd, zn, zm, pg);
1369 case 4:
1370 return decodeSveTerPredWU<SveCmphsw>(size,
1371 machInst, pd, zn, zm, pg);
1372 case 5:
1373 return decodeSveTerPredWU<SveCmphiw>(size,
1374 machInst, pd, zn, zm, pg);
1375 case 6:
1376 return decodeSveTerPredWU<SveCmplow>(size,
1377 machInst, pd, zn, zm, pg);
1378 case 7:
1379 return decodeSveTerPredWU<SveCmplsw>(size,
1380 machInst, pd, zn, zm, pg);
1381 }
1382 } else if (!b14) {
1383 switch (opc) {
1384 case 0:
1385 return decodeSveTerPredU<SveCmphs>(size,
1386 machInst, pd, zn, zm, pg);
1387 case 1:
1388 return decodeSveTerPredU<SveCmphi>(size,
1389 machInst, pd, zn, zm, pg);
1390 case 2:
1391 if (size != 3) {
1392 return decodeSveTerPredWU<SveCmpeqw>(size,
1393 machInst, pd, zn, zm, pg);
1394 }
1395 break;
1396 case 3:
1397 if (size != 3) {
1398 return decodeSveTerPredWU<SveCmpnew>(size,
1399 machInst, pd, zn, zm, pg);
1400 }
1401 break;
1402 case 4:
1403 return decodeSveTerPredS<SveCmpge>(size,
1404 machInst, pd, zn, zm, pg);
1405 case 5:
1406 return decodeSveTerPredS<SveCmpgt>(size,
1407 machInst, pd, zn, zm, pg);
1408 case 6:
1409 return decodeSveTerPredU<SveCmpeq>(size,
1410 machInst, pd, zn, zm, pg);
1411 case 7:
1412 return decodeSveTerPredU<SveCmpne>(size,
1413 machInst, pd, zn, zm, pg);
1414 }
1415 }
1416 return new Unknown64(machInst);
1417 } // decodeSveIntCmpVec
1418
1419 StaticInstPtr
1420 decodeSveIntCmpUImm(ExtMachInst machInst)
1421 {
1422 uint8_t cmp = bits(machInst, 13) << 1 | bits(machInst, 4);
1423 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1424 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1425 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
1426 int64_t imm = (int64_t) bits(machInst, 20, 14);
1427 uint8_t size = bits(machInst, 23, 22);
1428 switch (cmp) {
1429 case 0:
1430 return decodeSveTerImmPredU<SveCmphsi>(size,
1431 machInst, pd, zn, imm, pg);
1432 case 1:
1433 return decodeSveTerImmPredU<SveCmphii>(size,
1434 machInst, pd, zn, imm, pg);
1435 case 2:
1436 return decodeSveTerImmPredU<SveCmploi>(size,
1437 machInst, pd, zn, imm, pg);
1438 case 3:
1439 return decodeSveTerImmPredU<SveCmplsi>(size,
1440 machInst, pd, zn, imm, pg);
1441 }
1442 return new Unknown64(machInst);
1443 } // decodeSveIntCmpUImm
1444
1445 StaticInstPtr
1446 decodeSveIntCmpSImm(ExtMachInst machInst)
1447 {
1448 uint8_t opc = bits(machInst, 15) << 2 | bits(machInst, 13) << 1 |
1449 bits(machInst, 4);
1450 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1451 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1452 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
1453 int64_t imm = sext<5>(bits(machInst, 20, 16));
1454 uint8_t size = bits(machInst, 23, 22);
1455 switch (opc) {
1456 case 0:
1457 return decodeSveTerImmPredS<SveCmpgei>(size,
1458 machInst, pd, zn, imm, pg);
1459 case 1:
1460 return decodeSveTerImmPredS<SveCmpgti>(size,
1461 machInst, pd, zn, imm, pg);
1462 case 2:
1463 return decodeSveTerImmPredS<SveCmplti>(size,
1464 machInst, pd, zn, imm, pg);
1465 case 3:
1466 return decodeSveTerImmPredS<SveCmplei>(size,
1467 machInst, pd, zn, imm, pg);
1468 case 4:
1469 return decodeSveTerImmPredU<SveCmpeqi>(size,
1470 machInst, pd, zn, imm, pg);
1471 case 5:
1472 return decodeSveTerImmPredU<SveCmpnei>(size,
1473 machInst, pd, zn, imm, pg);
1474 default:
1475 return new Unknown64(machInst);
1476 }
1477 return new Unknown64(machInst);
1478 } // decodeSveIntCmpSImm
1479
1480 StaticInstPtr
1481 decodeSvePredLogicalOps(ExtMachInst machInst)
1482 {
1483 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1484 IntRegIndex pn = (IntRegIndex) (uint8_t) bits(machInst, 8, 5);
1485 IntRegIndex pm = (IntRegIndex) (uint8_t) bits(machInst, 19, 16);
1486 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 13, 10);
1487 uint8_t opc = (bits(machInst, 23, 22) << 2) |
1488 (bits(machInst, 9) << 1) |
1489 bits(machInst, 4);
1490 switch (opc) {
1491 case 0x0:
1492 return new SvePredAnd<uint8_t>(machInst, pd, pn, pm, pg);
1493 case 0x1:
1494 return new SvePredBic<uint8_t>(machInst, pd, pn, pm, pg);
1495 case 0x2:
1496 return new SvePredEor<uint8_t>(machInst, pd, pn, pm, pg);
1497 case 0x3:
1498 return new SvePredSel<uint8_t>(machInst, pd, pn, pm, pg, true);
1499 case 0x4:
1500 return new SvePredAnds<uint8_t>(machInst, pd, pn, pm, pg);
1501 case 0x5:
1502 return new SvePredBics<uint8_t>(machInst, pd, pn, pm, pg);
1503 case 0x6:
1504 return new SvePredEors<uint8_t>(machInst, pd, pn, pm, pg);
1505 case 0x8:
1506 return new SvePredOrr<uint8_t>(machInst, pd, pn, pm, pg);
1507 case 0x9:
1508 return new SvePredOrn<uint8_t>(machInst, pd, pn, pm, pg);
1509 case 0xa:
1510 return new SvePredNor<uint8_t>(machInst, pd, pn, pm, pg);
1511 case 0xb:
1512 return new SvePredNand<uint8_t>(machInst, pd, pn, pm, pg);
1513 case 0xc:
1514 return new SvePredOrrs<uint8_t>(machInst, pd, pn, pm, pg);
1515 case 0xd:
1516 return new SvePredOrns<uint8_t>(machInst, pd, pn, pm, pg);
1517 case 0xe:
1518 return new SvePredNors<uint8_t>(machInst, pd, pn, pm, pg);
1519 case 0xf:
1520 return new SvePredNands<uint8_t>(machInst, pd, pn, pm, pg);
1521 }
1522
1523 return new Unknown64(machInst);
1524 } // decodeSvePredLogicalOps
1525
1526 StaticInstPtr
1527 decodeSvePropBreakFromPrevPartition(ExtMachInst machInst)
1528 {
1529 if (bits(machInst, 23) == 0x0 && bits(machInst, 9) == 0x0) {
1530 uint8_t opc = (bits(machInst, 22) << 1) | bits(machInst, 4);
1531 IntRegIndex pm = (IntRegIndex)(uint8_t) bits(machInst, 19, 16);
1532 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 13, 10);
1533 IntRegIndex pn = (IntRegIndex)(uint8_t) bits(machInst, 8, 5);
1534 IntRegIndex pd = (IntRegIndex)(uint8_t) bits(machInst, 3, 0);
1535 switch (opc) {
1536 case 0x0:
1537 // BRKPA
1538 return new SveBrkpa(machInst, pd, pn, pm, pg);
1539 case 0x1:
1540 // BRKPB
1541 return new SveBrkpb(machInst, pd, pn, pm, pg);
1542 case 0x2:
1543 // BRKPAS
1544 return new SveBrkpas(machInst, pd, pn, pm, pg);
1545 case 0x3:
1546 // BRKPBS
1547 return new SveBrkpbs(machInst, pd, pn, pm, pg);
1548 }
1549 }
1550 return new Unknown64(machInst);
1551 } // decodeSvePropBreakFromPrevPartition
1552
1553 StaticInstPtr
1554 decodeSvePartitionBreakCond(ExtMachInst machInst)
1555 {
1556 if (bits(machInst, 18, 16) == 0x0 && bits(machInst, 9) == 0x0) {
1557 bool flagset = bits(machInst, 22);
1558 bool merging = bits(machInst, 4);
1559 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 13, 10);
1560 IntRegIndex pn = (IntRegIndex)(uint8_t) bits(machInst, 8, 5);
1561 IntRegIndex pd = (IntRegIndex)(uint8_t) bits(machInst, 3, 0);
1562 if (bits(machInst, 23)) {
1563 if (flagset) {
1564 if (!merging) {
1565 return new SveBrkbs(machInst, pd, pg, pn);
1566 }
1567 } else {
1568 if (merging) {
1569 return new SveBrkbm(machInst, pd, pg, pn);
1570 } else {
1571 return new SveBrkbz(machInst, pd, pg, pn);
1572 }
1573 }
1574 } else {
1575 if (flagset) {
1576 if (!merging) {
1577 return new SveBrkas(machInst, pd, pg, pn);
1578 }
1579 } else {
1580 if (merging) {
1581 return new SveBrkam(machInst, pd, pg, pn);
1582 } else {
1583 return new SveBrkaz(machInst, pd, pg, pn);
1584 }
1585 }
1586 }
1587 return new Unknown64(machInst);
1588 }
1589 return new Unknown64(machInst);
1590 } // decodeSvePartitionBreakCond
1591
1592 StaticInstPtr
1593 decodeSvePredTest(ExtMachInst machInst)
1594 {
1595 if (bits(machInst, 23, 22) == 0x1 &&
1596 bits(machInst, 18, 16) == 0x0 &&
1597 bits(machInst, 9) == 0x0) {
1598 IntRegIndex pn = (IntRegIndex) (uint8_t) bits(machInst, 8, 5);
1599 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 13, 10);
1600 return new SvePtest(machInst, pn, pg);
1601 }
1602 return new Unknown64(machInst);
1603 } // decodeSvePredTest
1604
1605 StaticInstPtr
1606 decodeSvePredIteration(ExtMachInst machInst)
1607 {
1608 uint8_t size = bits(machInst, 23, 22);
1609 uint8_t opc18_16 = bits(machInst, 18, 16);
1610 uint8_t opc10_9 = bits(machInst, 10, 9);
1611 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 8, 5);
1612 IntRegIndex pdn = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1613 if (opc18_16 == 0x1 && opc10_9 == 0x2) {
1614 return decodeSveUnaryPredU<SvePnext>(size,
1615 machInst, pdn, pdn, pg);
1616 } else if (size == 0x1 && opc18_16 == 0x0 && opc10_9 == 0) {
1617 return new SvePfirst<uint8_t>(machInst, pdn, pdn, pg);
1618 }
1619 return new Unknown64(machInst);
1620 } // decodeSvePredIteration
1621
1622 StaticInstPtr
1623 decodeSveInitPred(ExtMachInst machInst)
1624 {
1625 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1626 unsigned size = bits(machInst, 23, 22);
1627 uint8_t imm = bits(machInst, 9, 5);
1628
1629 if (bits(machInst, 16) == 0x0) {
1630 return decodeSvePtrue<SvePtrue>(size, machInst, pd, imm);
1631 } else {
1632 return decodeSvePtrue<SvePtrues>(size, machInst, pd, imm);
1633 }
1634 return new Unknown64(machInst);
1635 } // decodeSveInitPred
1636
1637 StaticInstPtr
1638 decodeSveZeroPredReg(ExtMachInst machInst)
1639 {
1640 if (bits(machInst, 23, 22) == 0x0 && bits(machInst, 18, 16) == 0x0) {
1641 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1642 return new SvePfalse(machInst, pd);
1643 }
1644 return new Unknown64(machInst);
1645 } // decodeSveZeroPredReg
1646
1647 StaticInstPtr
1648 decodeSvePropBreakToNextPartition(ExtMachInst machInst)
1649 {
1650 if (bits(machInst, 23) == 0x0 &&
1651 bits(machInst, 18, 16) == 0x0 &&
1652 bits(machInst, 9) == 0x0 &&
1653 bits(machInst, 4) == 0x0) {
1654 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 13, 10);
1655 IntRegIndex pn = (IntRegIndex)(uint8_t) bits(machInst, 8, 5);
1656 IntRegIndex pdm = (IntRegIndex)(uint8_t) bits(machInst, 3, 0);
1657 if (bits(machInst, 22) == 0x0) {
1658 return new SveBrkn(machInst, pdm, pn, pdm, pg);
1659 } else {
1660 return new SveBrkns(machInst, pdm, pn, pdm, pg);
1661 }
1662 return new Unknown64(machInst);
1663 }
1664 return new Unknown64(machInst);
1665 } // decodeSvePropBreakToNextPartition
1666
1667 StaticInstPtr
1668 decodeSveReadPredFromFFRPred(ExtMachInst machInst)
1669 {
1670 if (bits(machInst, 23)) {
1671 return new Unknown64(machInst);
1672 }
1673 IntRegIndex pd = (IntRegIndex)(uint8_t) bits(machInst, 3, 0);
1674 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 8, 5);
1675 if (bits(machInst, 22)) {
1676 return new SveRdffrsPred(machInst, pd, pg);
1677 } else {
1678 return new SveRdffrPred(machInst, pd, pg);
1679 }
1680 } // decodeSveReadPredFromFFRPred
1681
1682 StaticInstPtr
1683 decodeSveReadPredFromFFRUnpred(ExtMachInst machInst)
1684 {
1685 if (bits(machInst, 23, 22) != 0) {
1686 return new Unknown64(machInst);
1687 }
1688 IntRegIndex pd = (IntRegIndex)(uint8_t) bits(machInst, 3, 0);
1689 return new SveRdffrUnpred(machInst, pd);
1690 } // decodeSveReadPredFromFFRUnpred
1691
1692 StaticInstPtr
1693 decodeSvePredGen(ExtMachInst machInst)
1694 {
1695 uint8_t b_20_15 = (bits(machInst, 20) << 1) | bits(machInst, 15);
1696 switch (b_20_15) {
1697 case 0x0:
1698 return decodeSvePredLogicalOps(machInst);
1699 case 0x1:
1700 return decodeSvePropBreakFromPrevPartition(machInst);
1701 case 0x2:
1702 if (bits(machInst, 19) == 0x0) {
1703 return decodeSvePartitionBreakCond(machInst);
1704 } else {
1705 return decodeSvePropBreakToNextPartition(machInst);
1706 }
1707 case 0x3:
1708 if (bits(machInst, 19) == 0x0) {
1709 if (bits(machInst, 4, 0) == 0x0) {
1710 return decodeSvePredTest(machInst);
1711 } else {
1712 break;
1713 }
1714 } else {
1715 switch (bits(machInst, 13, 12)) {
1716 case 0x0:
1717 if (bits(machInst, 11) == 0x0 &&
1718 bits(machInst, 4) == 0x0) {
1719 return decodeSvePredIteration(machInst);
1720 } else {
1721 break;
1722 }
1723 case 0x1:
1724 break;
1725 case 0x2:
1726 if (bits(machInst, 11, 10) == 0x0 &&
1727 bits(machInst, 4) == 0x0) {
1728 return decodeSveInitPred(machInst);
1729 } else if (bits(machInst, 11, 4) == 0x40) {
1730 return decodeSveZeroPredReg(machInst);
1731 }
1732 break;
1733 case 0x3:
1734 if (bits(machInst, 11) == 0x0) {
1735 if (bits(machInst, 16) == 0x0) {
1736 return decodeSveReadPredFromFFRPred(machInst);
1737 } else if (bits(machInst, 8, 4) == 0x0) {
1738 return decodeSveReadPredFromFFRUnpred(machInst);
1739 }
1740 }
1741 break;
1742 }
1743 }
1744 break;
1745 }
1746 return new Unknown64(machInst);
1747 } // decodeSvePredGen
1748
1749 StaticInstPtr
1750 decodeSvePredCount(ExtMachInst machInst)
1751 {
1752 uint8_t b19 = bits(machInst, 19);
1753 if (b19) {
1754 uint8_t b13_11 = bits(machInst, 13, 11);
1755 switch (b13_11) {
1756 case 0x0:
1757 {
1758 if (bits(machInst, 10, 9) != 0x0) {
1759 return new Unknown64(machInst);
1760 }
1761 IntRegIndex zdn = (IntRegIndex) (uint8_t)
1762 bits(machInst, 4, 0);
1763 IntRegIndex pg = (IntRegIndex) (uint8_t)
1764 bits(machInst, 8, 5);
1765 uint8_t esize = bits(machInst, 23, 22);
1766 if (esize == 0x0) {
1767 return new Unknown64(machInst);
1768 }
1769 uint8_t opc = bits(machInst, 18, 17);
1770 if (opc == 0x0) {
1771 uint8_t u = bits(machInst, 16);
1772 if (u) {
1773 return decodeSvePredCountVU<SveUqincpv>(esize,
1774 machInst, zdn, pg);
1775 } else {
1776 return decodeSvePredCountVS<SveSqincpv>(esize,
1777 machInst, zdn, pg);
1778 }
1779 } else if (opc == 0x1) {
1780 uint8_t u = bits(machInst, 16);
1781 if (u) {
1782 return decodeSvePredCountVU<SveUqdecpv>(esize,
1783 machInst, zdn, pg);
1784 } else {
1785 return decodeSvePredCountVS<SveSqdecpv>(esize,
1786 machInst, zdn, pg);
1787 }
1788 } else if (opc == 0x2) {
1789 uint8_t d = bits(machInst, 16);
1790 if (d) {
1791 return decodeSvePredCountVU<SveDecpv>(esize,
1792 machInst, zdn, pg);
1793 } else {
1794 return decodeSvePredCountVU<SveIncpv>(esize,
1795 machInst, zdn, pg);
1796 }
1797 }
1798 }
1799 break;
1800 case 0x1:
1801 {
1802 IntRegIndex rdn = (IntRegIndex) (uint8_t)
1803 bits(machInst, 4, 0);
1804 IntRegIndex pg = (IntRegIndex) (uint8_t)
1805 bits(machInst, 8, 5);
1806 uint8_t esize = bits(machInst, 23, 22);
1807 uint8_t opc = bits(machInst, 18, 17);
1808 uint8_t opc2 = bits(machInst, 10, 9);
1809 if (opc == 0x0) {
1810 uint8_t u = bits(machInst, 16);
1811 if (opc2 == 0x0) {
1812 if (u) {
1813 return decodeSvePredCountU<SveUqincp32>(esize,
1814 machInst, rdn, pg);
1815 } else {
1816 return decodeSvePredCountS<SveSqincp32>(esize,
1817 machInst, rdn, pg);
1818 }
1819 } else if (opc2 == 0x2) {
1820 if (u) {
1821 return decodeSvePredCountU<SveUqincp64>(esize,
1822 machInst, rdn, pg);
1823 } else {
1824 return decodeSvePredCountS<SveSqincp64>(esize,
1825 machInst, rdn, pg);
1826 }
1827 }
1828 } else if (opc == 0x1) {
1829 uint8_t u = bits(machInst, 16);
1830 if (opc2 == 0x0) {
1831 if (u) {
1832 return decodeSvePredCountU<SveUqdecp32>(esize,
1833 machInst, rdn, pg);
1834 } else {
1835 return decodeSvePredCountS<SveSqdecp32>(esize,
1836 machInst, rdn, pg);
1837 }
1838 } else if (opc2 == 0x2) {
1839 if (u) {
1840 return decodeSvePredCountU<SveUqdecp64>(esize,
1841 machInst, rdn, pg);
1842 } else {
1843 return decodeSvePredCountS<SveSqdecp64>(esize,
1844 machInst, rdn, pg);
1845 }
1846 }
1847 } else if (opc == 0x2) {
1848 if (opc2 == 0x0) {
1849 if (bits(machInst, 16)) {
1850 return decodeSvePredCountU<SveDecp>(esize,
1851 machInst, rdn, pg);
1852 } else {
1853 return decodeSvePredCountU<SveIncp>(esize,
1854 machInst, rdn, pg);
1855 }
1856 }
1857 }
1858 }
1859 break;
1860 case 0x2:
1861 if (bits(machInst, 23, 22) == 0x0 &&
1862 bits(machInst, 10, 9) == 0x0 &&
1863 bits(machInst, 4, 0) == 0x0) {
1864 uint8_t opc = bits(machInst, 18, 16);
1865 if (opc == 0x0) {
1866 IntRegIndex pn = (IntRegIndex)(uint8_t)
1867 bits(machInst, 8, 5);
1868 return new SveWrffr(machInst, pn);
1869 } else if (opc == 0x4 && bits(machInst, 8, 5) == 0x0) {
1870 return new SveSetffr(machInst);
1871 }
1872 }
1873 break;
1874 }
1875 } else {
1876 uint8_t opc = bits(machInst, 18, 16);
1877 if (opc == 0 && bits(machInst, 9) == 0) {
1878 IntRegIndex rd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1879 IntRegIndex pn = (IntRegIndex) (uint8_t) bits(machInst, 8, 5);
1880 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 13,
1881 10);
1882 uint8_t esize = bits(machInst, 23, 22);
1883 return decodeSveUnaryPredU<SveCntp>(esize,
1884 machInst, rd, pn, pg);
1885 }
1886 }
1887 return new Unknown64(machInst);
1888 } // decodeSvePredCount
1889
1890 StaticInstPtr
1891 decodeSveIntCmpSca(ExtMachInst machInst)
1892 {
1893 uint16_t b23_13_12_11_10_3_2_1_0 = (uint16_t)
1894 (bits(machInst, 23) << 8) | (bits(machInst, 13, 10) << 4) |
1895 bits(machInst, 3, 0);
1896 uint8_t b10 = (uint8_t) bits(machInst, 10);
1897 IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1898 IntRegIndex rm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
1899 if (b23_13_12_11_10_3_2_1_0 == 0x180) {
1900 uint8_t s64b = bits(machInst, 22);
1901 uint8_t ne = bits(machInst, 4);
1902 if (ne) {
1903 if (s64b) {
1904 return new SveCtermne<uint64_t>(machInst, rn, rm);
1905 } else {
1906 return new SveCtermne<uint32_t>(machInst, rn, rm);
1907 }
1908 } else {
1909 if (s64b) {
1910 return new SveCtermeq<uint64_t>(machInst, rn, rm);
1911 } else {
1912 return new SveCtermeq<uint32_t>(machInst, rn, rm);
1913 }
1914 }
1915 } else if (b10) {
1916 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1917 uint8_t size = (uint8_t) bits(machInst, 23, 22);
1918 uint8_t s64b = (uint8_t) bits(machInst, 12);
1919 uint8_t opc = (uint8_t) bits(machInst, 11) << 1 |
1920 bits(machInst, 4);
1921 if (s64b) {
1922 switch (opc) {
1923 case 0:
1924 return decodeSveBinUnpredS<SveWhilelt64>(size,
1925 machInst, pd, rn, rm);
1926 case 1:
1927 return decodeSveBinUnpredS<SveWhilele64>(size,
1928 machInst, pd, rn, rm);
1929 case 2:
1930 return decodeSveBinUnpredU<SveWhilelo64>(size,
1931 machInst, pd, rn, rm);
1932 case 3:
1933 return decodeSveBinUnpredU<SveWhilels64>(size,
1934 machInst, pd, rn, rm);
1935 }
1936 } else {
1937 switch (opc) {
1938 case 0:
1939 return decodeSveBinUnpredS<SveWhilelt32>(size,
1940 machInst, pd, rn, rm);
1941 case 1:
1942 return decodeSveBinUnpredS<SveWhilele32>(size,
1943 machInst, pd, rn, rm);
1944 case 2:
1945 return decodeSveBinUnpredU<SveWhilelo32>(size,
1946 machInst, pd, rn, rm);
1947 case 3:
1948 return decodeSveBinUnpredU<SveWhilels32>(size,
1949 machInst, pd, rn, rm);
1950 }
1951 }
1952 }
1953 return new Unknown64(machInst);
1954 } // decodeSveIntCmpSca
1955
1956 StaticInstPtr
1957 decodeSveIntWideImmUnpred0(ExtMachInst machInst)
1958 {
1959 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1960 uint64_t imm = bits(machInst, 12, 5);
1961 uint8_t sh = bits(machInst, 13);
1962 uint8_t size = bits(machInst, 23, 22);
1963
1964 if (sh) {
1965 if (size == 0x0) {
1966 return new Unknown64(machInst);
1967 }
1968 imm <<= 8;
1969 }
1970
1971 switch (bits(machInst, 18, 16)) {
1972 case 0x0:
1973 return decodeSveWideImmUnpredU<SveAddImm>(
1974 size, machInst, zdn, imm);
1975 case 0x1:
1976 return decodeSveWideImmUnpredU<SveSubImm>(
1977 size, machInst, zdn, imm);
1978 case 0x3:
1979 return decodeSveWideImmUnpredU<SveSubrImm>(
1980 size, machInst, zdn, imm);
1981 case 0x4:
1982 return decodeSveWideImmUnpredS<SveSqaddImm>(
1983 size, machInst, zdn, imm);
1984 case 0x5:
1985 return decodeSveWideImmUnpredU<SveUqaddImm>(
1986 size, machInst, zdn, imm);
1987 case 0x6:
1988 return decodeSveWideImmUnpredS<SveSqsubImm>(
1989 size, machInst, zdn, imm);
1990 case 0x7:
1991 return decodeSveWideImmUnpredU<SveUqsubImm>(
1992 size, machInst, zdn, imm);
1993 }
1994
1995 return new Unknown64(machInst);
1996 } // decodeSveIntWideImmUnpred0
1997
1998 StaticInstPtr
1999 decodeSveIntWideImmUnpred1(ExtMachInst machInst)
2000 {
2001 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2002 uint64_t imm = bits(machInst, 12, 5);
2003 uint8_t size = bits(machInst, 23, 22);
2004
2005 switch (bits(machInst, 18, 16)) {
2006 case 0x0:
2007 return decodeSveWideImmUnpredS<SveSmaxImm>(
2008 size, machInst, zdn, sext<8>(imm));
2009 case 0x1:
2010 return decodeSveWideImmUnpredU<SveUmaxImm>(
2011 size, machInst, zdn, imm);
2012 case 0x2:
2013 return decodeSveWideImmUnpredS<SveSminImm>(
2014 size, machInst, zdn, sext<8>(imm));
2015 case 0x3:
2016 return decodeSveWideImmUnpredU<SveUminImm>(
2017 size, machInst, zdn, imm);
2018 }
2019
2020 return new Unknown64(machInst);
2021 } // decodeSveIntWideImmUnpred1
2022
2023 StaticInstPtr
2024 decodeSveIntWideImmUnpred2(ExtMachInst machInst)
2025 {
2026 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2027 uint64_t imm = bits(machInst, 12, 5);
2028 uint8_t size = bits(machInst, 23, 22);
2029
2030 if (bits(machInst, 18, 16) == 0x0) {
2031 return decodeSveWideImmUnpredU<SveMulImm>(
2032 size, machInst, zdn, sext<8>(imm));
2033 }
2034
2035 return new Unknown64(machInst);
2036 } // decodeSveIntWideImmUnpred2
2037
2038 StaticInstPtr
2039 decodeSveIntWideImmUnpred3(ExtMachInst machInst)
2040 {
2041 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2042 uint64_t imm = bits(machInst, 12, 5);
2043 uint8_t sh = bits(machInst, 13);
2044 uint8_t size = bits(machInst, 23, 22);
2045
2046 if (sh) {
2047 if (size == 0x0) {
2048 return new Unknown64(machInst);
2049 }
2050 imm <<= 8;
2051 }
2052
2053 if (bits(machInst, 18, 17) == 0x0) {
2054 if (sh) {
2055 return decodeSveWideImmUnpredU<SveDupImm>(
2056 size, machInst, zd, sext<16>(imm));
2057 } else {
2058 return decodeSveWideImmUnpredU<SveDupImm>(
2059 size, machInst, zd, sext<8>(imm));
2060 }
2061 }
2062
2063 return new Unknown64(machInst);
2064 } // decodeSveIntWideImmUnpred3
2065
2066 StaticInstPtr
2067 decodeSveIntWideImmUnpred4(ExtMachInst machInst)
2068 {
2069 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2070 uint8_t size = bits(machInst, 23, 22);
2071
2072 if (bits(machInst, 18, 17) == 0x0 && size != 0x0) {
2073 uint64_t imm = vfp_modified_imm(bits(machInst, 12, 5),
2074 decode_fp_data_type(size));
2075 return decodeSveWideImmUnpredF<SveFdup>(size, machInst, zd, imm);
2076 }
2077
2078 return new Unknown64(machInst);
2079 } // decodeSveIntWideImmUnpred4
2080
2081 StaticInstPtr
2082 decodeSveIntWideImmUnpred(ExtMachInst machInst)
2083 {
2084 switch (bits(machInst, 20, 19)) {
2085 case 0x0:
2086 if (bits(machInst, 18, 16) != 0x2) {
2087 return decodeSveIntWideImmUnpred0(machInst);
2088 }
2089 break;
2090 case 0x1:
2091 if (bits(machInst, 13) == 0x0) {
2092 return decodeSveIntWideImmUnpred1(machInst);
2093 }
2094 break;
2095 case 0x2:
2096 if (bits(machInst, 13) == 0x0) {
2097 return decodeSveIntWideImmUnpred2(machInst);
2098 }
2099 break;
2100 case 0x3:
2101 if (bits(machInst, 16) == 0x0) {
2102 return decodeSveIntWideImmUnpred3(machInst);
2103 } else if (bits(machInst, 13) == 0x0) {
2104 return decodeSveIntWideImmUnpred4(machInst);
2105 }
2106 break;
2107 }
2108 return new Unknown64(machInst);
2109 } // decodeSveIntWideImmUnpred
2110
2111 StaticInstPtr
2112 decodeSveMultiplyAddUnpred(ExtMachInst machInst)
2113 {
2114 IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2115 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2116 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
2117
2118 uint8_t size = (uint8_t) bits(machInst, 23, 22);
2119
2120 if (bits(machInst, 12, 11) != 0 || !(size & 0x2)) {
2121 return new Unknown64(machInst);
2122 }
2123
2124 uint8_t usig = (uint8_t) bits(machInst, 10);
2125 if (size & 0x1) {
2126 if (usig) {
2127 return new SveUdotv<uint16_t, uint64_t>(machInst,
2128 zda, zn, zm);
2129 } else {
2130 return new SveSdotv<int16_t, int64_t>(machInst,
2131 zda, zn, zm);
2132 }
2133 } else {
2134 if (usig) {
2135 return new SveUdotv<uint8_t, uint32_t>(machInst,
2136 zda, zn, zm);
2137 } else {
2138 return new SveSdotv<int8_t, int32_t>(machInst,
2139 zda, zn, zm);
2140 }
2141 }
2142
2143 return new Unknown64(machInst);
2144 } // decodeSveMultiplyAddUnpred
2145
2146 StaticInstPtr
2147 decodeSveMultiplyIndexed(ExtMachInst machInst)
2148 {
2149 IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2150 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2151
2152 uint8_t size = (uint8_t) bits(machInst, 23, 22);
2153
2154 if (bits(machInst, 12, 11) != 0 || !(size & 0x2)) {
2155 return new Unknown64(machInst);
2156 }
2157
2158 uint8_t usig = (uint8_t) bits(machInst, 10);
2159 if (size & 0x1) {
2160 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 19, 16);
2161 uint8_t i1 = (uint8_t) bits(machInst, 20);
2162 if (usig) {
2163 return new SveUdoti<uint16_t, uint64_t>(machInst,
2164 zda, zn, zm, i1);
2165 } else {
2166 return new SveSdoti<int16_t, int64_t>(machInst,
2167 zda, zn, zm, i1);
2168 }
2169 } else {
2170 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 18, 16);
2171 uint8_t i2 = (uint8_t) bits(machInst, 20, 19);
2172 if (usig) {
2173 return new SveUdoti<uint8_t, uint32_t>(machInst,
2174 zda, zn, zm, i2);
2175 } else {
2176 return new SveSdoti<int8_t, int32_t>(machInst,
2177 zda, zn, zm, i2);
2178 }
2179 }
2180 return new Unknown64(machInst);
2181 } // decodeSveMultiplyIndexed
2182
2183 StaticInstPtr
2184 decodeSveFpFastReduc(ExtMachInst machInst)
2185 {
2186 IntRegIndex vd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2187 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2188 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2189
2190 uint8_t size = bits(machInst, 23, 22);
2191
2192 if (size == 0x0) {
2193 return new Unknown64(machInst);
2194 }
2195
2196 switch (bits(machInst, 18, 16)) {
2197 case 0x0:
2198 return decodeSveUnaryPredF<SveFaddv>(size, machInst, vd, zn, pg);
2199 case 0x4:
2200 return decodeSveUnaryPredF<SveFmaxnmv>(size, machInst, vd, zn, pg);
2201 case 0x5:
2202 return decodeSveUnaryPredF<SveFminnmv>(size, machInst, vd, zn, pg);
2203 case 0x6:
2204 return decodeSveUnaryPredF<SveFmaxv>(size, machInst, vd, zn, pg);
2205 case 0x7:
2206 return decodeSveUnaryPredF<SveFminv>(size, machInst, vd, zn, pg);
2207 }
2208
2209 return new Unknown64(machInst);
2210 } // decodeSveFpFastReduc
2211
2212 StaticInstPtr
2213 decodeSveFpUnaryUnpred(ExtMachInst machInst)
2214 {
2215 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2216 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2217
2218 uint8_t size = (uint8_t) bits(machInst, 23, 22);
2219 if (size == 0) {
2220 return new Unknown64(machInst);
2221 }
2222 uint8_t opc = (uint8_t) bits(machInst, 18, 16);
2223
2224 switch (opc) {
2225 case 0x6:
2226 return decodeSveUnaryUnpredF<SveFrecpe>(
2227 size, machInst, zd, zn);
2228 case 0x7:
2229 return decodeSveUnaryUnpredF<SveFrsqrte>(
2230 size, machInst, zd, zn);
2231 }
2232 return new Unknown64(machInst);
2233 } // decodeSveFpUnaryUnpred
2234
2235 StaticInstPtr
2236 decodeSveFpCmpZero(ExtMachInst machInst)
2237 {
2238 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
2239 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2240 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2241
2242 uint8_t size = bits(machInst, 23, 22);
2243 if (size == 0) {
2244 return new Unknown64(machInst);
2245 }
2246 uint8_t opc = (bits(machInst, 17, 16) << 1) | bits(machInst, 4);
2247
2248 switch (opc) {
2249 case 0x0:
2250 return decodeSveCmpImmF<SveFcmgeZero>(
2251 size, machInst, pd, zn, 0x0, pg);
2252 case 0x1:
2253 return decodeSveCmpImmF<SveFcmgtZero>(
2254 size, machInst, pd, zn, 0x0, pg);
2255 case 0x2:
2256 return decodeSveCmpImmF<SveFcmltZero>(
2257 size, machInst, pd, zn, 0x0, pg);
2258 case 0x3:
2259 return decodeSveCmpImmF<SveFcmleZero>(
2260 size, machInst, pd, zn, 0x0, pg);
2261 case 0x4:
2262 return decodeSveCmpImmF<SveFcmeqZero>(
2263 size, machInst, pd, zn, 0x0, pg);
2264 case 0x6:
2265 return decodeSveCmpImmF<SveFcmneZero>(
2266 size, machInst, pd, zn, 0x0, pg);
2267 }
2268 return new Unknown64(machInst);
2269 } // decodeSveFpCmpZero
2270
2271 StaticInstPtr
2272 decodeSveFpAccumReduc(ExtMachInst machInst)
2273 {
2274 uint8_t opc = bits(machInst, 18, 16);
2275 uint8_t size = bits(machInst, 23, 22);
2276 if (opc != 0 || size == 0) {
2277 return new Unknown64(machInst);
2278 }
2279
2280 IntRegIndex vdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2281 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2282 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2283
2284 return decodeSveUnaryPredF<SveFadda>(size, machInst, vdn, zm, pg);
2285 } // decodeSveFpAccumReduc
2286
2287 StaticInstPtr
2288 decodeSveFpArithUnpred(ExtMachInst machInst)
2289 {
2290 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2291 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2292 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
2293
2294 uint8_t size = bits(machInst, 23, 22);
2295 if (size == 0) {
2296 return new Unknown64(machInst);
2297 }
2298 uint8_t opc = (uint8_t) bits(machInst, 12, 10);
2299
2300 switch (opc) {
2301 case 0x0:
2302 return decodeSveBinUnpredF<SveFaddUnpred>(
2303 size, machInst, zd, zn, zm);
2304 case 0x1:
2305 return decodeSveBinUnpredF<SveFsubUnpred>(
2306 size, machInst, zd, zn, zm);
2307 case 0x2:
2308 return decodeSveBinUnpredF<SveFmulUnpred>(
2309 size, machInst, zd, zn, zm);
2310 case 0x3:
2311 return decodeSveBinUnpredF<SveFtsmul>(
2312 size, machInst, zd, zn, zm);
2313 case 0x6:
2314 return decodeSveBinUnpredF<SveFrecps>(
2315 size, machInst, zd, zn, zm);
2316 case 0x7:
2317 return decodeSveBinUnpredF<SveFrsqrts>(
2318 size, machInst, zd, zn, zm);
2319 }
2320 return new Unknown64(machInst);
2321 } // decodeSveFpArithUnpred
2322
2323 StaticInstPtr
2324 decodeSveFpArithPred0(ExtMachInst machInst)
2325 {
2326 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2327 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2328 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2329
2330 uint8_t size = (uint8_t) bits(machInst, 23, 22);
2331 if (size == 0) {
2332 return new Unknown64(machInst);
2333 }
2334 uint8_t opc = (uint8_t) bits(machInst, 19, 16);
2335
2336 switch (opc) {
2337 case 0x0:
2338 return decodeSveBinDestrPredF<SveFaddPred>(
2339 size, machInst, zdn, zm, pg);
2340 case 0x1:
2341 return decodeSveBinDestrPredF<SveFsubPred>(
2342 size, machInst, zdn, zm, pg);
2343 case 0x2:
2344 return decodeSveBinDestrPredF<SveFmulPred>(
2345 size, machInst, zdn, zm, pg);
2346 case 0x3:
2347 return decodeSveBinDestrPredF<SveFsubr>(
2348 size, machInst, zdn, zm, pg);
2349 case 0x4:
2350 return decodeSveBinDestrPredF<SveFmaxnm>(
2351 size, machInst, zdn, zm, pg);
2352 case 0x5:
2353 return decodeSveBinDestrPredF<SveFminnm>(
2354 size, machInst, zdn, zm, pg);
2355 case 0x6:
2356 return decodeSveBinDestrPredF<SveFmax>(
2357 size, machInst, zdn, zm, pg);
2358 case 0x7:
2359 return decodeSveBinDestrPredF<SveFmin>(
2360 size, machInst, zdn, zm, pg);
2361 case 0x8:
2362 return decodeSveBinDestrPredF<SveFabd>(
2363 size, machInst, zdn, zm, pg);
2364 case 0x9:
2365 return decodeSveBinDestrPredF<SveFscale>(
2366 size, machInst, zdn, zm, pg);
2367 case 0xa:
2368 return decodeSveBinDestrPredF<SveFmulx>(
2369 size, machInst, zdn, zm, pg);
2370 case 0xc:
2371 return decodeSveBinDestrPredF<SveFdivr>(
2372 size, machInst, zdn, zm, pg);
2373 case 0xd:
2374 return decodeSveBinDestrPredF<SveFdiv>(
2375 size, machInst, zdn, zm, pg);
2376 }
2377 return new Unknown64(machInst);
2378 } // decodeSveFpArithPred0
2379
2380 StaticInstPtr
2381 decodeSveFpTrigMAddCoeff(ExtMachInst machInst)
2382 {
2383 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2384 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2385 uint8_t imm = (uint8_t) bits(machInst, 18, 16);
2386
2387 uint8_t size = (uint8_t) bits(machInst, 23, 22);
2388 if (size == 0) {
2389 return new Unknown64(machInst);
2390 }
2391
2392 return decodeSveTerImmUnpredF<SveFtmad>(size, machInst, zdn, zm, imm);
2393 } // decodeSveFpTrigMAddCoeff
2394
2395 StaticInstPtr
2396 decodeSveFpArithImmPred(ExtMachInst machInst)
2397 {
2398 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2399 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2400 uint64_t imm;
2401
2402 uint8_t size = (uint8_t) bits(machInst, 23, 22);
2403 if (size == 0) {
2404 return new Unknown64(machInst);
2405 }
2406
2407 uint8_t opc = (uint8_t) bits(machInst, 18, 16);
2408
2409 switch (opc) {
2410 case 0x0:
2411 imm = sveExpandFpImmAddSub((uint8_t) bits(machInst, 5), size);
2412 return decodeSveBinImmPredF<SveFaddImm>(
2413 size, machInst, zdn, imm, pg);
2414 case 0x1:
2415 imm = sveExpandFpImmAddSub((uint8_t) bits(machInst, 5), size);
2416 return decodeSveBinImmPredF<SveFsubImm>(
2417 size, machInst, zdn, imm, pg);
2418 case 0x2:
2419 imm = sveExpandFpImmMul((uint8_t) bits(machInst, 5), size);
2420 return decodeSveBinImmPredF<SveFmulImm>(
2421 size, machInst, zdn, imm, pg);
2422 case 0x3:
2423 imm = sveExpandFpImmAddSub((uint8_t) bits(machInst, 5), size);
2424 return decodeSveBinImmPredF<SveFsubrImm>(
2425 size, machInst, zdn, imm, pg);
2426 case 0x4:
2427 imm = sveExpandFpImmMaxMin((uint8_t) bits(machInst, 5), size);
2428 return decodeSveBinImmPredF<SveFmaxnmImm>(
2429 size, machInst, zdn, imm, pg);
2430 case 0x5:
2431 imm = sveExpandFpImmMaxMin((uint8_t) bits(machInst, 5), size);
2432 return decodeSveBinImmPredF<SveFminnmImm>(
2433 size, machInst, zdn, imm, pg);
2434 case 0x6:
2435 imm = sveExpandFpImmMaxMin((uint8_t) bits(machInst, 5), size);
2436 return decodeSveBinImmPredF<SveFmaxImm>(
2437 size, machInst, zdn, imm, pg);
2438 case 0x7:
2439 imm = sveExpandFpImmMaxMin((uint8_t) bits(machInst, 5), size);
2440 return decodeSveBinImmPredF<SveFminImm>(
2441 size, machInst, zdn, imm, pg);
2442 }
2443 return new Unknown64(machInst);
2444 } // decodeSveFpArithImmPred
2445
2446 StaticInstPtr
2447 decodeSveFpArithPred(ExtMachInst machInst)
2448 {
2449 if (bits(machInst, 20) == 0) {
2450 return decodeSveFpArithPred0(machInst);
2451 } else if (bits(machInst, 19) == 0) {
2452 return decodeSveFpTrigMAddCoeff(machInst);
2453 } else {
2454 return decodeSveFpArithImmPred(machInst);
2455 }
2456 } // decodeSveFpArithPred
2457
2458 StaticInstPtr
2459 decodeSveFpUnaryPred(ExtMachInst machInst)
2460 {
2461 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2462 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2463 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2464
2465 uint8_t size = (uint8_t) bits(machInst, 23, 22);
2466 if (size == 0) {
2467 return new Unknown64(machInst);
2468 }
2469
2470 uint8_t b20_19 = bits(machInst, 20, 19);
2471 switch (b20_19) {
2472 case 0x0:
2473 {
2474 if (bits(machInst, 18, 16) == 0x5) {
2475 return new Unknown64(machInst);
2476 }
2477 // SVE floating-point round to integral value
2478 uint8_t opc = (uint8_t) bits(machInst, 18, 16);
2479 switch (opc) {
2480 case 0x0:
2481 return decodeSveUnaryPredF<SveFrintn>(
2482 size, machInst, zd, zn, pg);
2483 case 0x1:
2484 return decodeSveUnaryPredF<SveFrintp>(
2485 size, machInst, zd, zn, pg);
2486 case 0x2:
2487 return decodeSveUnaryPredF<SveFrintm>(
2488 size, machInst, zd, zn, pg);
2489 case 0x3:
2490 return decodeSveUnaryPredF<SveFrintz>(
2491 size, machInst, zd, zn, pg);
2492 case 0x4:
2493 return decodeSveUnaryPredF<SveFrinta>(
2494 size, machInst, zd, zn, pg);
2495 case 0x6:
2496 return decodeSveUnaryPredF<SveFrintx>(
2497 size, machInst, zd, zn, pg);
2498 case 0x7:
2499 return decodeSveUnaryPredF<SveFrinti>(
2500 size, machInst, zd, zn, pg);
2501 }
2502 }
2503 break;
2504 case 0x1:
2505 {
2506 // SVE floating-point unary operations (predicated)
2507 uint8_t b18_16 = bits(machInst, 18, 16);
2508 switch (b18_16) {
2509 case 0x0:
2510 if (size == 0x2) {
2511 return new SveFcvtNarrow<uint32_t, uint16_t>(
2512 machInst, zd, zn, pg);
2513 } else if (size == 0x3) {
2514 return new SveFcvtNarrow<uint64_t, uint16_t>(
2515 machInst, zd, zn, pg);
2516 }
2517 break;
2518 case 0x1:
2519 if (size == 0x2) {
2520 return new SveFcvtWiden<uint16_t, uint32_t>(
2521 machInst, zd, zn, pg);
2522 } else if (size == 0x3) {
2523 return new SveFcvtWiden<uint16_t, uint64_t>(
2524 machInst, zd, zn, pg);
2525 }
2526 break;
2527 case 0x2:
2528 if (size == 0x3) {
2529 return new SveFcvtNarrow<uint64_t, uint32_t>(
2530 machInst, zd, zn, pg);
2531 }
2532 break;
2533 case 0x3:
2534 if (size == 0x3) {
2535 return new SveFcvtWiden<uint32_t, uint64_t>(
2536 machInst, zd, zn, pg);
2537 }
2538 break;
2539 case 0x4:
2540 if (size != 0x0) {
2541 return decodeSveUnaryPredF<SveFrecpx>(
2542 size, machInst, zd, zn, pg);
2543 }
2544 break;
2545 case 0x5:
2546 if (size != 0x0) {
2547 return decodeSveUnaryPredF<SveFsqrt>(
2548 size, machInst, zd, zn, pg);
2549 }
2550 break;
2551 }
2552 }
2553 break;
2554 case 0x2:
2555 {
2556 // SVE integer convert to floating-point
2557 uint8_t opc = (size << 3) | bits(machInst, 18, 16);
2558 switch (opc) {
2559 case 0xa:
2560 return new SveScvtfNarrow<uint16_t, uint16_t>(
2561 machInst, zd, zn, pg);
2562 case 0xb:
2563 return new SveUcvtfNarrow<uint16_t, uint16_t>(
2564 machInst, zd, zn, pg);
2565 case 0xc:
2566 return new SveScvtfNarrow<uint32_t, uint16_t>(
2567 machInst, zd, zn, pg);
2568 case 0xd:
2569 return new SveUcvtfNarrow<uint32_t, uint16_t>(
2570 machInst, zd, zn, pg);
2571 case 0xe:
2572 return new SveScvtfNarrow<uint64_t, uint16_t>(
2573 machInst, zd, zn, pg);
2574 case 0xf:
2575 return new SveUcvtfNarrow<uint64_t, uint16_t>(
2576 machInst, zd, zn, pg);
2577 case 0x14:
2578 return new SveScvtfNarrow<uint32_t, uint32_t>(
2579 machInst, zd, zn, pg);
2580 case 0x15:
2581 return new SveUcvtfNarrow<uint32_t, uint32_t>(
2582 machInst, zd, zn, pg);
2583 case 0x18:
2584 return new SveScvtfWiden<uint32_t, uint64_t>(
2585 machInst, zd, zn, pg);
2586 case 0x19:
2587 return new SveUcvtfWiden<uint32_t, uint64_t>(
2588 machInst, zd, zn, pg);
2589 case 0x1c:
2590 return new SveScvtfNarrow<uint64_t, uint32_t>(
2591 machInst, zd, zn, pg);
2592 case 0x1d:
2593 return new SveUcvtfNarrow<uint64_t, uint32_t>(
2594 machInst, zd, zn, pg);
2595 case 0x1e:
2596 return new SveScvtfNarrow<uint64_t, uint64_t>(
2597 machInst, zd, zn, pg);
2598 case 0x1f:
2599 return new SveUcvtfNarrow<uint64_t, uint64_t>(
2600 machInst, zd, zn, pg);
2601 }
2602 }
2603 break;
2604 case 0x3:
2605 {
2606 // SVE floating-point convert to integer
2607 uint8_t opc = (size << 3) | bits(machInst, 18, 16);
2608 switch (opc) {
2609 case 0xa:
2610 return new SveFcvtzsNarrow<uint16_t, uint16_t>(
2611 machInst, zd, zn, pg);
2612 case 0xb:
2613 return new SveFcvtzuNarrow<uint16_t, uint16_t>(
2614 machInst, zd, zn, pg);
2615 case 0xc:
2616 return new SveFcvtzsWiden<uint16_t, uint32_t>(
2617 machInst, zd, zn, pg);
2618 case 0xd:
2619 return new SveFcvtzuWiden<uint16_t, uint32_t>(
2620 machInst, zd, zn, pg);
2621 case 0xe:
2622 return new SveFcvtzsWiden<uint16_t, uint64_t>(
2623 machInst, zd, zn, pg);
2624 case 0xf:
2625 return new SveFcvtzuWiden<uint16_t, uint64_t>(
2626 machInst, zd, zn, pg);
2627 case 0x14:
2628 return new SveFcvtzsNarrow<uint32_t, uint32_t>(
2629 machInst, zd, zn, pg);
2630 case 0x15:
2631 return new SveFcvtzuNarrow<uint32_t, uint32_t>(
2632 machInst, zd, zn, pg);
2633 case 0x18:
2634 return new SveFcvtzsNarrow<uint64_t, uint32_t>(
2635 machInst, zd, zn, pg);
2636 case 0x19:
2637 return new SveFcvtzuNarrow<uint64_t, uint32_t>(
2638 machInst, zd, zn, pg);
2639 case 0x1c:
2640 return new SveFcvtzsWiden<uint32_t, uint64_t>(
2641 machInst, zd, zn, pg);
2642 case 0x1d:
2643 return new SveFcvtzuWiden<uint32_t, uint64_t>(
2644 machInst, zd, zn, pg);
2645 case 0x1e:
2646 return new SveFcvtzsNarrow<uint64_t, uint64_t>(
2647 machInst, zd, zn, pg);
2648 case 0x1f:
2649 return new SveFcvtzuNarrow<uint64_t, uint64_t>(
2650 machInst, zd, zn, pg);
2651 }
2652 }
2653 break;
2654 }
2655 return new Unknown64(machInst);
2656 } // decodeSveFpUnaryPred
2657
2658 StaticInstPtr
2659 decodeSveFpCmpVec(ExtMachInst machInst)
2660 {
2661 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
2662 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2663 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
2664 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2665
2666 uint8_t size = bits(machInst, 23, 22);
2667 if (size == 0) {
2668 return new Unknown64(machInst);
2669 }
2670 uint8_t opc = (bits(machInst, 15) << 2) |
2671 (bits(machInst, 13) << 1) |
2672 bits(machInst, 4);
2673
2674 switch (opc) {
2675 case 0x0:
2676 return decodeSveCmpF<SveFcmge>(size, machInst, pd, zn, zm, pg);
2677 case 0x1:
2678 return decodeSveCmpF<SveFcmgt>(size, machInst, pd, zn, zm, pg);
2679 case 0x2:
2680 return decodeSveCmpF<SveFcmeq>(size, machInst, pd, zn, zm, pg);
2681 case 0x3:
2682 return decodeSveCmpF<SveFcmne>(size, machInst, pd, zn, zm, pg);
2683 case 0x4:
2684 return decodeSveCmpF<SveFcmuo>(size, machInst, pd, zn, zm, pg);
2685 case 0x5:
2686 return decodeSveCmpF<SveFacge>(size, machInst, pd, zn, zm, pg);
2687 case 0x7:
2688 return decodeSveCmpF<SveFacgt>(size, machInst, pd, zn, zm, pg);
2689 }
2690 return new Unknown64(machInst);
2691 } // decodeSveFpCmpVec
2692
2693 StaticInstPtr
2694 decodeSveFpFusedMulAdd(ExtMachInst machInst)
2695 {
2696 IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2697 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2698 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
2699 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2700
2701 uint8_t size = bits(machInst, 23, 22);
2702 if (size == 0) {
2703 return new Unknown64(machInst);
2704 }
2705 uint8_t opc = bits(machInst, 15, 13);
2706
2707 switch (opc) {
2708 case 0x0:
2709 return decodeSveTerPredF<SveFmla>(
2710 size, machInst, zda, zn, zm, pg);
2711 case 0x1:
2712 return decodeSveTerPredF<SveFmls>(
2713 size, machInst, zda, zn, zm, pg);
2714 case 0x2:
2715 return decodeSveTerPredF<SveFnmla>(
2716 size, machInst, zda, zn, zm, pg);
2717 case 0x3:
2718 return decodeSveTerPredF<SveFnmls>(
2719 size, machInst, zda, zn, zm, pg);
2720 case 0x4:
2721 return decodeSveTerPredF<SveFmad>(
2722 size, machInst, zda /* zdn */, zm /* za */, zn, pg);
2723 case 0x5:
2724 return decodeSveTerPredF<SveFmsb>(
2725 size, machInst, zda /* zdn */, zm /* za */, zn, pg);
2726 case 0x6:
2727 return decodeSveTerPredF<SveFnmad>(
2728 size, machInst, zda /* zdn */, zm /* za */, zn, pg);
2729 case 0x7:
2730 return decodeSveTerPredF<SveFnmsb>(
2731 size, machInst, zda /* zdn */, zm /* za */, zn, pg);
2732 }
2733 return new Unknown64(machInst);
2734 } // decodeSveFpFusedMulAdd
2735
2736 StaticInstPtr
2737 decodeSveFpCplxAdd(ExtMachInst machInst)
2738 {
2739 uint8_t size = bits(machInst, 23, 22);
2740 uint8_t rot = bits(machInst, 16) << 1 | 0x01;
2741 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2742 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2743 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2744 switch (size) {
2745 case 1:
2746 return new SveFcadd<uint16_t>(machInst,
2747 zdn, zdn, zm, pg, rot);
2748 case 2:
2749 return new SveFcadd<uint32_t>(machInst,
2750 zdn, zdn, zm, pg, rot);
2751 case 3:
2752 return new SveFcadd<uint64_t>(machInst,
2753 zdn, zdn, zm, pg, rot);
2754 }
2755 return new Unknown64(machInst);
2756 }
2757
2758 StaticInstPtr
2759 decodeSveFpCplxMulAddVec(ExtMachInst machInst)
2760 {
2761 uint8_t size = bits(machInst, 23, 22);
2762 if (size == 0) {
2763 return new Unknown64(machInst);
2764 }
2765
2766 IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2767 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2768 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2769 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
2770 uint8_t rot = bits(machInst, 14, 13);
2771 switch (size) {
2772 case 1:
2773 return new SveFcmlav<uint16_t>(machInst,
2774 zda, zn, zm, pg, rot);
2775 case 2:
2776 return new SveFcmlav<uint32_t>(machInst,
2777 zda, zn, zm, pg, rot);
2778 case 3:
2779 return new SveFcmlav<uint64_t>(machInst,
2780 zda, zn, zm, pg, rot);
2781 }
2782
2783 return new Unknown64(machInst);
2784 } // decodeSveFpCplxMulAddVec
2785
2786 StaticInstPtr
2787 decodeSveFpCplxMulAddIndexed(ExtMachInst machInst)
2788 {
2789 uint8_t size = bits(machInst, 23, 22);
2790 if (size < 2) {
2791 return new Unknown64(machInst);
2792 }
2793
2794 IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2795 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2796 IntRegIndex zm;
2797 uint8_t rot = bits(machInst, 11, 10);
2798 uint8_t imm;
2799
2800 switch (size) {
2801 case 2:
2802 zm = (IntRegIndex) (uint8_t) bits(machInst, 18, 16);
2803 imm = bits(machInst, 20, 19);
2804 return new SveFcmlai<uint32_t>(machInst,
2805 zda, zn, zm, rot, imm);
2806 case 3:
2807 zm = (IntRegIndex) (uint8_t) bits(machInst, 19, 16);
2808 imm = bits(machInst, 20);
2809 return new SveFcmlai<uint64_t>(machInst,
2810 zda, zn, zm, rot, imm);
2811 }
2812 return new Unknown64(machInst);
2813 } // decodeSveFpCplxMulAddIndexed
2814
2815 StaticInstPtr
2816 decodeSveFpMulIndexed(ExtMachInst machInst)
2817 {
2818 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2819 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2820
2821 uint8_t size = bits(machInst, 23, 22);
2822 switch (size) {
2823 case 0x0:
2824 case 0x1:
2825 return new SveFmulIdx<uint16_t>(
2826 machInst, zd, zn,
2827 (IntRegIndex) (uint8_t) bits(machInst, 18, 16),
2828 bits(machInst, 20, 19) | (bits(machInst, 22) << 2));
2829 case 0x2:
2830 return new SveFmulIdx<uint32_t>(
2831 machInst, zd, zn,
2832 (IntRegIndex) (uint8_t) bits(machInst, 18, 16),
2833 bits(machInst, 20, 19));
2834 case 0x3:
2835 return new SveFmulIdx<uint64_t>(
2836 machInst, zd, zn,
2837 (IntRegIndex) (uint8_t) bits(machInst, 19, 16),
2838 bits(machInst, 20));
2839 default:
2840 return new Unknown64(machInst);
2841 }
2842
2843 } // decodeSveFpMulIndexed
2844
2845 StaticInstPtr
2846 decodeSveFpMulAddIndexed(ExtMachInst machInst)
2847 {
2848 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2849 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2850 const uint8_t op = bits(machInst, 10);
2851
2852 uint8_t size = bits(machInst, 23, 22);
2853 switch (size) {
2854 case 0x0:
2855 case 0x1:
2856 if (op) {
2857 return new SveFmlsIdx<uint16_t>(
2858 machInst, zd, zn,
2859 (IntRegIndex) (uint8_t) bits(machInst, 18, 16),
2860 bits(machInst, 20, 19) | (bits(machInst, 22) << 2));
2861 } else {
2862 return new SveFmlaIdx<uint16_t>(
2863 machInst, zd, zn,
2864 (IntRegIndex) (uint8_t) bits(machInst, 18, 16),
2865 bits(machInst, 20, 19) | (bits(machInst, 22) << 2));
2866 }
2867 case 0x2:
2868 if (op) {
2869 return new SveFmlsIdx<uint32_t>(
2870 machInst, zd, zn,
2871 (IntRegIndex) (uint8_t) bits(machInst, 18, 16),
2872 bits(machInst, 20, 19));
2873 } else {
2874 return new SveFmlaIdx<uint32_t>(
2875 machInst, zd, zn,
2876 (IntRegIndex) (uint8_t) bits(machInst, 18, 16),
2877 bits(machInst, 20, 19));
2878 }
2879 case 0x3:
2880 if (op) {
2881 return new SveFmlsIdx<uint64_t>(
2882 machInst, zd, zn,
2883 (IntRegIndex) (uint8_t) bits(machInst, 19, 16),
2884 bits(machInst, 20));
2885 } else {
2886 return new SveFmlaIdx<uint64_t>(
2887 machInst, zd, zn,
2888 (IntRegIndex) (uint8_t) bits(machInst, 19, 16),
2889 bits(machInst, 20));
2890 }
2891 default:
2892 return new Unknown64(machInst);
2893 }
2894 } // decodeSveFpMulAddIndexed
2895
2896 StaticInstPtr
2897 decodeSveMemGather32(ExtMachInst machInst)
2898 {
2899 if (bits(machInst, 15)) {
2900 if (bits(machInst, 22)) {
2901 // SVE load and broadcast element
2902 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2903 IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2904 uint64_t imm = bits(machInst, 21, 16);
2905 IntRegIndex pg = (IntRegIndex) (uint8_t)
2906 bits(machInst, 12, 10);
2907 uint8_t dtype = (bits(machInst, 24, 23) << 2) |
2908 bits(machInst, 14, 13);
2909 return decodeSveContigLoadSIInsts<SveLoadAndRepl>(
2910 dtype, machInst, zt, pg, rn, imm, false, true);
2911 } else {
2912 if (bits(machInst, 21)) {
2913 // SVE 32-bit gather load (vector plus immediate)
2914 IntRegIndex zt = (IntRegIndex) (uint8_t)
2915 bits(machInst, 4, 0);
2916 IntRegIndex zn = (IntRegIndex) (uint8_t)
2917 bits(machInst, 9, 5);
2918 uint64_t imm = bits(machInst, 20, 16);
2919 IntRegIndex pg = (IntRegIndex) (uint8_t)
2920 bits(machInst, 12, 10);
2921 uint8_t dtype = (bits(machInst, 24, 23) << 1) |
2922 bits(machInst, 14);
2923 uint8_t ff = bits(machInst, 13);
2924 return decodeSveGatherLoadVIInsts(
2925 dtype, machInst, zt, pg, zn, imm, true, ff);
2926 } else {
2927 uint8_t b14_13 = bits(machInst, 14, 13);
2928 if (b14_13 == 0x2 && bits(machInst, 4) == 0) {
2929 // TODO: SVE contiguous prefetch (scalar plus scalar)
2930 return new WarnUnimplemented("prf[bhwd]", machInst);
2931 } else if (b14_13 == 0x3 && bits(machInst, 4) == 0) {
2932 // TODO: SVE 32-bit gather prefetch (vector plus
2933 // immediate)
2934 return new WarnUnimplemented("prf[bhwd]", machInst);
2935 }
2936 }
2937 }
2938 } else {
2939 uint8_t b24_23 = bits(machInst, 24, 23);
2940 if (b24_23 != 0x3 && bits(machInst, 21) == 0) {
2941 // SVE 32-bit gather load (scalar plus 32-bit unscaled offsets)
2942 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2943 IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2944 IntRegIndex zm = (IntRegIndex) (uint8_t)
2945 bits(machInst, 20, 16);
2946 IntRegIndex pg = (IntRegIndex) (uint8_t)
2947 bits(machInst, 12, 10);
2948 uint8_t dtype = (bits(machInst, 24, 23) << 1) |
2949 bits(machInst, 14);
2950 uint8_t xs = bits(machInst, 22);
2951 uint8_t ff = bits(machInst, 13);
2952 return decodeSveGatherLoadSVInsts(
2953 dtype, machInst, zt, pg, rn, zm,
2954 true, true, xs, false, ff);
2955 }
2956 switch (b24_23) {
2957 case 0x0:
2958 if (bits(machInst, 21) && bits(machInst, 4) == 0) {
2959 // TODO: SVE 32-bit gather prefetch (vector plus immediate)
2960 return new WarnUnimplemented("prf[bhwd]", machInst);
2961 }
2962 break;
2963 case 0x1:
2964 if (bits(machInst, 21)) {
2965 // SVE 32-bit gather load halfwords (scalar plus 32-bit
2966 // scaled offsets)
2967 IntRegIndex zt = (IntRegIndex) (uint8_t)
2968 bits(machInst, 4, 0);
2969 IntRegIndex rn = (IntRegIndex) (uint8_t)
2970 bits(machInst, 9, 5);
2971 IntRegIndex zm = (IntRegIndex) (uint8_t)
2972 bits(machInst, 20, 16);
2973 IntRegIndex pg = (IntRegIndex) (uint8_t)
2974 bits(machInst, 12, 10);
2975 uint8_t xs = bits(machInst, 22);
2976 uint8_t ff = bits(machInst, 13);
2977 if (bits(machInst, 14)) {
2978 return
2979 new SveIndexedMemSV<uint32_t, uint16_t,
2980 SveGatherLoadSVMicroop,
2981 SveFirstFaultWritebackMicroop>(
2982 ff ? "ldff1" : "ld1", machInst, MemReadOp, zt, pg,
2983 rn, zm, true, xs, true, ff);
2984 } else {
2985 return
2986 new SveIndexedMemSV<int32_t, int16_t,
2987 SveGatherLoadSVMicroop,
2988 SveFirstFaultWritebackMicroop>(
2989 ff ? "ldff1" : "ld1", machInst, MemReadOp, zt, pg,
2990 rn, zm, true, xs, true, ff);
2991 }
2992 }
2993 break;
2994 case 0x2:
2995 if (bits(machInst, 21)) {
2996 // SVE 32-bit gather load words (scalar plus 32-bit scaled
2997 // offsets)
2998 IntRegIndex zt = (IntRegIndex) (uint8_t)
2999 bits(machInst, 4, 0);
3000 IntRegIndex rn = (IntRegIndex) (uint8_t)
3001 bits(machInst, 9, 5);
3002 IntRegIndex zm = (IntRegIndex) (uint8_t)
3003 bits(machInst, 20, 16);
3004 IntRegIndex pg = (IntRegIndex) (uint8_t)
3005 bits(machInst, 12, 10);
3006 uint8_t xs = bits(machInst, 22);
3007 uint8_t ff = bits(machInst, 13);
3008 return new SveIndexedMemSV<uint32_t, uint32_t,
3009 SveGatherLoadSVMicroop,
3010 SveFirstFaultWritebackMicroop>(
3011 ff ? "ldff1" : "ld1", machInst, MemReadOp, zt, pg, rn,
3012 zm, true, xs, true, ff);
3013 }
3014 break;
3015 case 0x3:
3016 if (bits(machInst, 22) == 0 && bits(machInst, 14, 13) == 0x0 &&
3017 bits(machInst, 4) == 0) {
3018 // SVE load predicate register
3019 IntRegIndex pt = (IntRegIndex) (uint8_t)
3020 bits(machInst, 3, 0);
3021 IntRegIndex rn = (IntRegIndex) (uint8_t)
3022 bits(machInst, 9, 5);
3023 uint64_t imm = sext<9>((bits(machInst, 21, 16) << 3) |
3024 bits(machInst, 12, 10));
3025 return new SveLdrPred(machInst, pt, rn, imm);
3026 } else if (bits(machInst, 22) == 0 &&
3027 bits(machInst, 14, 13) == 0x2) {
3028 // SVE load vector register
3029 IntRegIndex zt = (IntRegIndex) (uint8_t)
3030 bits(machInst, 4, 0);
3031 IntRegIndex rn = (IntRegIndex) (uint8_t)
3032 bits(machInst, 9, 5);
3033 uint64_t imm = sext<9>((bits(machInst, 21, 16) << 3) |
3034 bits(machInst, 12, 10));
3035 return new SveLdrVec(machInst, zt, rn, imm);
3036 } else if (bits(machInst, 22) == 1 &&
3037 bits(machInst, 4) == 0) {
3038 // TODO: SVE contiguous prefetch (scalar plus immediate)
3039 return new WarnUnimplemented("prf[bhwd]", machInst);
3040 }
3041 break;
3042 }
3043 }
3044 return new Unknown64(machInst);
3045 } // decodeSveMemGather32
3046
3047 StaticInstPtr
3048 decodeSveLoadBcastQuadSS(ExtMachInst machInst)
3049 {
3050 return new Unknown64(machInst);
3051 } // decodeSveLoadBcastQuadSS
3052
3053 StaticInstPtr
3054 decodeSveLoadBcastQuadSI(ExtMachInst machInst)
3055 {
3056 return new Unknown64(machInst);
3057 } // decodeSveLoadBcastQuadSI
3058
3059 StaticInstPtr
3060 decodeSveContigLoadSS(ExtMachInst machInst)
3061 {
3062 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3063 IntRegIndex rn = makeSP((IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3064 IntRegIndex rm = makeSP(
3065 (IntRegIndex) (uint8_t) bits(machInst, 20, 16));
3066 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
3067
3068 if (rm == 0x1f) {
3069 return new Unknown64(machInst);
3070 }
3071
3072 return decodeSveContigLoadSSInsts<SveContigLoadSS>(
3073 bits(machInst, 24, 21), machInst, zt, pg, rn, rm, false);
3074 } // decodeSveContigLoadSS
3075
3076 StaticInstPtr
3077 decodeSveContigFFLoadSS(ExtMachInst machInst)
3078 {
3079 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3080 IntRegIndex rn = makeSP((IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3081 IntRegIndex rm = makeSP(
3082 (IntRegIndex) (uint8_t) bits(machInst, 20, 16));
3083 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
3084
3085 if (rm == 0x1f) {
3086 return new Unknown64(machInst);
3087 }
3088
3089 return decodeSveContigLoadSSInsts<SveContigFFLoadSS>(
3090 bits(machInst, 24, 21), machInst, zt, pg, rn, rm, true);
3091 } // decodeSveContigFFLoadSS
3092
3093 StaticInstPtr
3094 decodeSveContigLoadSI(ExtMachInst machInst)
3095 {
3096 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3097 IntRegIndex rn = makeSP((IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3098 uint64_t imm = sext<4>(bits(machInst, 19, 16));
3099 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
3100
3101 return decodeSveContigLoadSIInsts<SveContigLoadSI>(
3102 bits(machInst, 24, 21), machInst, zt, pg, rn, imm, false);
3103 } // decodeSveContigLoadSI
3104
3105 StaticInstPtr
3106 decodeSveContigNFLoadSI(ExtMachInst machInst)
3107 {
3108 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3109 IntRegIndex rn = makeSP((IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3110 uint64_t imm = sext<4>(bits(machInst, 19, 16));
3111 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
3112
3113 return decodeSveContigLoadSIInsts<SveContigNFLoadSI>(
3114 bits(machInst, 24, 21), machInst, zt, pg, rn, imm, true);
3115 } // decodeSveContigNFLoadSI
3116
3117 StaticInstPtr
3118 decodeSveContigNTLoadSS(ExtMachInst machInst)
3119 {
3120 return new Unknown64(machInst);
3121 } // decodeSveContigNTLoadSS
3122
3123 StaticInstPtr
3124 decodeSveLoadStructsSS(ExtMachInst machInst)
3125 {
1// Copyright (c) 2017-2019 ARM Limited
2// All rights reserved
3//
4// The license below extends only to copyright in the software and shall
5// not be construed as granting a license to any other intellectual
6// property including but not limited to intellectual property relating
7// to a hardware implementation of the functionality of the software
8// licensed hereunder. You may use the software subject to the license
9// terms below provided that you ensure that this notice is replicated
10// unmodified and in its entirety in all distributions of the software,
11// modified or unmodified, in source code or in binary form.
12//
13// Redistribution and use in source and binary forms, with or without
14// modification, are permitted provided that the following conditions are
15// met: redistributions of source code must retain the above copyright
16// notice, this list of conditions and the following disclaimer;
17// redistributions in binary form must reproduce the above copyright
18// notice, this list of conditions and the following disclaimer in the
19// documentation and/or other materials provided with the distribution;
20// neither the name of the copyright holders nor the names of its
21// contributors may be used to endorse or promote products derived from
22// this software without specific prior written permission.
23//
24// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
27// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
28// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
29// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
30// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
31// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
32// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
33// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
34// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35//
36// Authors: Giacomo Gabrielli
37
38/// @file
39/// SVE 2nd-level decoder.
40
41output decoder {{
42namespace Aarch64
43{
44
45 StaticInstPtr
46 decodeSveIntArithBinPred(ExtMachInst machInst)
47 {
48 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
49 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
50 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
51
52 switch (bits(machInst, 20, 19)) {
53 case 0x0:
54 {
55 uint8_t size = bits(machInst, 23, 22);
56 uint8_t opc = bits(machInst, 18, 16);
57 switch (opc) {
58 case 0x0:
59 return decodeSveBinDestrPredU<SveAddPred>(
60 size, machInst, zdn, zm, pg);
61 case 0x1:
62 return decodeSveBinDestrPredU<SveSubPred>(
63 size, machInst, zdn, zm, pg);
64 case 0x3:
65 return decodeSveBinDestrPredU<SveSubr>(
66 size, machInst, zdn, zm, pg);
67 default:
68 return new Unknown64(machInst);
69 }
70 }
71 case 0x1:
72 {
73 uint8_t size = bits(machInst, 23, 22);
74 uint8_t u = bits(machInst, 16);
75 uint8_t opc = bits(machInst, 18, 17);
76 switch (opc) {
77 case 0x0:
78 return decodeSveBinDestrPred<SveSmax, SveUmax>(
79 size, u, machInst, zdn, zm, pg);
80 case 0x1:
81 return decodeSveBinDestrPred<SveSmin, SveUmin>(
82 size, u, machInst, zdn, zm, pg);
83 case 0x2:
84 return decodeSveBinDestrPred<SveSabd, SveUabd>(
85 size, u, machInst, zdn, zm, pg);
86 default:
87 return new Unknown64(machInst);
88 }
89 }
90 case 0x2:
91 {
92 uint8_t size = bits(machInst, 23, 22);
93 uint8_t u = bits(machInst, 16);
94 uint8_t opc = bits(machInst, 18, 17);
95 switch (opc) {
96 case 0x0:
97 if (u == 0) {
98 return decodeSveBinDestrPredU<SveMul>(
99 size, machInst, zdn, zm, pg);
100 } else {
101 return new Unknown64(machInst);
102 }
103 case 0x1:
104 return decodeSveBinDestrPred<SveSmulh, SveUmulh>(
105 size, u, machInst, zdn, zm, pg);
106 case 0x2:
107 if (size == 0x2 || size == 0x3) {
108 return decodeSveBinDestrPred<SveSdiv, SveUdiv>(
109 size, u, machInst, zdn, zm, pg);
110 } else {
111 return new Unknown64(machInst);
112 }
113 case 0x3:
114 if (size == 0x2 || size == 0x3) {
115 return decodeSveBinDestrPred<SveSdivr, SveUdivr>(
116 size, u, machInst, zdn, zm, pg);
117 } else {
118 return new Unknown64(machInst);
119 }
120 }
121 break;
122 }
123 case 0x3:
124 {
125 uint8_t size = bits(machInst, 23, 22);
126 uint8_t opc = bits(machInst, 18, 16);
127
128 switch (opc) {
129 case 0x0:
130 return decodeSveBinDestrPredU<SveOrrPred>(
131 size, machInst, zdn, zm, pg);
132 case 0x1:
133 return decodeSveBinDestrPredU<SveEorPred>(
134 size, machInst, zdn, zm, pg);
135 case 0x2:
136 return decodeSveBinDestrPredU<SveAndPred>(
137 size, machInst, zdn, zm, pg);
138 case 0x3:
139 return decodeSveBinDestrPredU<SveBicPred>(
140 size, machInst, zdn, zm, pg);
141 default:
142 return new Unknown64(machInst);
143 }
144 }
145 }
146 return new Unknown64(machInst);
147 } // decodeSveArithBinPred
148
149 StaticInstPtr
150 decodeSveIntReduc(ExtMachInst machInst)
151 {
152 IntRegIndex vd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
153 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
154 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
155
156 uint8_t size = bits(machInst, 23, 22);
157
158 switch (bits(machInst, 20, 19)) {
159 case 0x0:
160 {
161 uint8_t u = bits(machInst, 16);
162 uint8_t opc = bits(machInst, 18, 17);
163 if (opc != 0x0 || (!u && size == 0x3)) {
164 return new Unknown64(machInst);
165 } else {
166 return decodeSveWideningReduc<SveSaddv, SveUaddv>(
167 size, u, machInst, vd, zn, pg);
168 }
169 }
170 case 0x1:
171 {
172 uint8_t u = bits(machInst, 16);
173 uint8_t opc = bits(machInst, 18, 17);
174 switch (opc) {
175 case 0x0:
176 return decodeSveUnaryPred<SveSmaxv, SveUmaxv>(
177 size, u, machInst, vd, zn, pg);
178 case 0x1:
179 return decodeSveUnaryPred<SveSminv, SveUminv>(
180 size, u, machInst, vd, zn, pg);
181 default:
182 return new Unknown64(machInst);
183 }
184 }
185 case 0x2:
186 {
187 uint8_t opc = bits(machInst, 18, 17);
188 uint8_t merge = bits(machInst, 16);
189 switch (opc) {
190 case 0x0:
191 if (merge) {
192 return decodeSveUnaryPredU<SveMovprfxPredM>(
193 size, machInst, vd /* zd */, zn, pg);
194 } else {
195 return decodeSveUnaryPredU<SveMovprfxPredZ>(
196 size, machInst, vd /* zd */, zn, pg);
197 }
198 default:
199 return new Unknown64(machInst);
200 }
201 }
202 case 0x3:
203 {
204 uint8_t opc = bits(machInst, 18, 16);
205 switch (opc) {
206 case 0x0:
207 return decodeSveUnaryPredU<SveOrv>(
208 size, machInst, vd, zn, pg);
209 case 0x1:
210 return decodeSveUnaryPredU<SveEorv>(
211 size, machInst, vd, zn, pg);
212 case 0x2:
213 return decodeSveUnaryPredU<SveAndv>(
214 size, machInst, vd, zn, pg);
215 default:
216 return new Unknown64(machInst);
217 }
218 }
219 }
220 return new Unknown64(machInst);
221 } // decodeSveIntReduc
222
223 StaticInstPtr
224 decodeSveIntMulAdd(ExtMachInst machInst)
225 {
226 IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
227 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
228 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
229 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
230
231 uint8_t size = bits(machInst, 23, 22);
232 uint8_t opc = (bits(machInst, 15) << 1) | bits(machInst, 13);
233 switch (opc) {
234 case 0x0:
235 return decodeSveTerPredS<SveMla>(
236 size, machInst, zda, zn, zm, pg);
237 case 0x1:
238 return decodeSveTerPredS<SveMls>(
239 size, machInst, zda, zn, zm, pg);
240 case 0x2:
241 return decodeSveTerPredS<SveMad>(
242 size, machInst, zda /* zdn */, zn /* za */, zm, pg);
243 case 0x3:
244 return decodeSveTerPredS<SveMsb>(
245 size, machInst, zda /* zdn */, zn /* za */, zm, pg);
246 }
247 return new Unknown64(machInst);
248 } // decodeSveIntMulAdd
249
250 StaticInstPtr
251 decodeSveShiftByImmPred0(ExtMachInst machInst)
252 {
253 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
254 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
255 uint8_t imm3 = (uint8_t) bits(machInst, 7, 5);
256
257 uint8_t tsize = (bits(machInst, 23, 22) << 2) | bits(machInst, 9, 8);
258 uint8_t esize = 0;
259 uint8_t size = 0;
260
261 if (tsize == 0x0) {
262 return new Unknown64(machInst);
263 } else if (tsize == 0x1) {
264 esize = 8;
265 } else if ((tsize & 0x0E) == 0x2) {
266 esize = 16;
267 size = 1;
268 } else if ((tsize & 0x0C) == 0x4) {
269 esize = 32;
270 size = 2;
271 } else if ((tsize & 0x08) == 0x8) {
272 esize = 64;
273 size = 3;
274 }
275
276 uint8_t opc = bits(machInst, 18, 16);
277 switch (opc) {
278 case 0x0:
279 {
280 unsigned shiftAmt = 2 * esize - ((tsize << 3) | imm3);
281 return decodeSveBinImmPredU<SveAsrImmPred>(
282 size, machInst, zdn, shiftAmt, pg);
283 }
284 case 0x01:
285 {
286 unsigned shiftAmt = 2 * esize - ((tsize << 3) | imm3);
287 return decodeSveBinImmPredU<SveLsrImmPred>(
288 size, machInst, zdn, shiftAmt, pg);
289 }
290 case 0x03:
291 {
292 unsigned shiftAmt = ((tsize << 3) | imm3) - esize;
293 return decodeSveBinImmPredU<SveLslImmPred>(
294 size, machInst, zdn, shiftAmt, pg);
295 }
296 case 0x04:
297 {
298 unsigned shiftAmt = 2 * esize - ((tsize << 3) | imm3);
299 return decodeSveBinImmPredS<SveAsrd>(
300 size, machInst, zdn, shiftAmt, pg);
301 }
302 }
303 return new Unknown64(machInst);
304 } // decodeSveShiftByImmPred0
305
306 StaticInstPtr
307 decodeSveShiftByVectorPred(ExtMachInst machInst)
308 {
309 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
310 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
311 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
312 uint8_t size = bits(machInst, 23, 22);
313 uint8_t opc = bits(machInst, 18, 16);
314 switch (opc) {
315 case 0:
316 return decodeSveBinDestrPredU<SveAsrPred>(
317 size, machInst, zdn, zm, pg);
318 case 1:
319 return decodeSveBinDestrPredU<SveLsrPred>(
320 size, machInst, zdn, zm, pg);
321 case 3:
322 return decodeSveBinDestrPredU<SveLslPred>(
323 size, machInst, zdn, zm, pg);
324 case 4:
325 return decodeSveBinDestrPredU<SveAsrr>(
326 size, machInst, zdn, zm, pg);
327 case 5:
328 return decodeSveBinDestrPredU<SveLsrr>(
329 size, machInst, zdn, zm, pg);
330 case 7:
331 return decodeSveBinDestrPredU<SveLslr>(
332 size, machInst, zdn, zm, pg);
333 }
334 return new Unknown64(machInst);
335 } // decodeSveShiftByVectorPred
336
337 StaticInstPtr
338 decodeSveShiftByWideElemsPred(ExtMachInst machInst)
339 {
340 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
341 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
342 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
343 uint8_t size = bits(machInst, 23, 22);
344 uint8_t opc = bits(machInst, 18, 16);
345 switch (opc) {
346 case 0x0:
347 return decodeSveBinDestrPredU<SveAsrWidePred>(
348 size, machInst, zdn, zm, pg);
349 case 0x1:
350 return decodeSveBinDestrPredU<SveLsrWidePred>(
351 size, machInst, zdn, zm, pg);
352 case 0x3:
353 return decodeSveBinDestrPredU<SveLslWidePred>(
354 size, machInst, zdn, zm, pg);
355 }
356 return new Unknown64(machInst);
357 } // decodeSveShiftByWideElemsPred
358
359 StaticInstPtr
360 decodeSveShiftByImmPred(ExtMachInst machInst)
361 {
362 uint8_t b20_19 = bits(machInst, 20, 19);
363 uint8_t b23_22 = bits(machInst, 23, 22);
364
365 if (b20_19 == 0x0) {
366 return decodeSveShiftByImmPred0(machInst);
367 } else if (b20_19 == 0x2) {
368 return decodeSveShiftByVectorPred(machInst);
369 } else if (b20_19 == 0x3 && b23_22 != 0x3) {
370 return decodeSveShiftByWideElemsPred(machInst);
371 }
372 return new Unknown64(machInst);
373 } // decodeSveShiftByImmPred
374
375 StaticInstPtr
376 decodeSveIntArithUnaryPred(ExtMachInst machInst)
377 {
378 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
379 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
380 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
381 unsigned esize = bits(machInst, 23, 22);
382 uint8_t opg = bits(machInst, 20, 19);
383 uint8_t opc = bits(machInst, 18, 16);
384 if (opg == 0x2) {
385 bool unsig = static_cast<bool>(opc & 1);
386 switch (opc) {
387 case 0:
388 case 1:
389 if (esize == 0) break;
390 if (unsig) {
391 return decodeSveUnaryExtendFromBPredU<SveUxtb>(
392 esize, machInst, zd, zn, pg);
393 } else {
394 return decodeSveUnaryExtendFromBPredU<SveSxtb>(
395 esize, machInst, zd, zn, pg);
396 }
397 case 2:
398 case 3:
399 if (esize < 2) break;
400 if (unsig) {
401 return decodeSveUnaryExtendFromHPredU<SveUxth>(
402 esize, machInst, zd, zn, pg);
403 } else {
404 return decodeSveUnaryExtendFromHPredU<SveSxth>(
405 esize, machInst, zd, zn, pg);
406 }
407 case 4:
408 case 5:
409 if (esize != 3) break;
410 if (unsig) {
411 return new SveUxtw<uint32_t, uint64_t>(
412 machInst, zd, zn, pg);
413 } else {
414 return new SveSxtw<uint32_t, uint64_t>(
415 machInst, zd, zn, pg);
416 }
417 case 6:
418 return decodeSveUnaryPredS<SveAbs>(
419 esize, machInst, zd, zn, pg);
420 case 7:
421 return decodeSveUnaryPredS<SveNeg>(
422 esize, machInst, zd, zn, pg);
423 }
424 } else if (opg == 0x3) {
425 switch (opc) {
426 case 0:
427 return decodeSveUnaryPredS<SveCls>(
428 esize, machInst, zd, zn, pg);
429 case 1:
430 return decodeSveUnaryPredS<SveClz>(
431 esize, machInst, zd, zn, pg);
432 case 2:
433 return decodeSveUnaryPredU<SveCnt>(
434 esize, machInst, zd, zn, pg);
435 case 3:
436 return decodeSveUnaryPredU<SveCnot>(
437 esize, machInst, zd, zn, pg);
438 case 4:
439 return decodeSveUnaryPredF<SveFabs>(
440 esize, machInst, zd, zn, pg);
441 case 5:
442 return decodeSveUnaryPredF<SveFneg>(
443 esize, machInst, zd, zn, pg);
444 case 6:
445 return decodeSveUnaryPredU<SveNot>(
446 esize, machInst, zd, zn, pg);
447 break;
448 }
449 }
450 return new Unknown64(machInst);
451 } // decodeSveIntArithUnaryPred
452
453 StaticInstPtr
454 decodeSveIntArithUnpred(ExtMachInst machInst)
455 {
456 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
457 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
458 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
459
460 uint8_t opc = (uint8_t) bits(machInst, 12, 10);
461 uint8_t size = (uint8_t) bits(machInst, 23, 22);
462
463 switch (opc) {
464 case 0x0:
465 return decodeSveBinUnpredU<SveAddUnpred>(size, machInst,
466 zd, zn, zm);
467 case 0x1:
468 return decodeSveBinUnpredU<SveSubUnpred>(size, machInst,
469 zd, zn, zm);
470 case 0x4:
471 return decodeSveBinUnpredS<SveSqadd>(size, machInst,
472 zd, zn, zm);
473 case 0x5:
474 return decodeSveBinUnpredU<SveUqadd>(size, machInst,
475 zd, zn, zm);
476 case 0x6:
477 return decodeSveBinUnpredS<SveSqsub>(size, machInst,
478 zd, zn, zm);
479 case 0x7:
480 return decodeSveBinUnpredU<SveUqsub>(size, machInst,
481 zd, zn, zm);
482 }
483
484 return new Unknown64(machInst);
485 } // decodeSveIntArithUnpred
486
487 StaticInstPtr
488 decodeSveIntLogUnpred(ExtMachInst machInst)
489 {
490 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
491 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
492 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
493 uint8_t opc = (uint8_t) (bits(machInst, 23, 22) << 3
494 | bits(machInst, 12, 10));
495
496 switch (opc) {
497 case 0x4:
498 return new SveAndUnpred<uint64_t>(machInst, zd, zn, zm);
499 case 0xc:
500 return new SveOrrUnpred<uint64_t>(machInst, zd, zn, zm);
501 case 0x14:
502 return new SveEorUnpred<uint64_t>(machInst, zd, zn, zm);
503 case 0x1c:
504 return new SveBicUnpred<uint64_t>(machInst, zd, zn, zm);
505 }
506
507 return new Unknown64(machInst);
508 } // decodeSveIntLogUnpred
509
510 StaticInstPtr
511 decodeSveIndexGen(ExtMachInst machInst)
512 {
513 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
514 uint8_t size = (uint8_t) bits(machInst, 23, 22);
515 uint8_t grp = (uint8_t) bits(machInst, 11, 10);
516
517 switch (grp) {
518 case 0:
519 { // INDEX (immediate)
520 int8_t imm5 = sext<5>(bits(machInst, 9, 5));
521 int8_t imm5b = sext<5>(bits(machInst, 20, 16));
522 switch (size) {
523 case 0:
524 return new SveIndexII<int8_t>(machInst,
525 zd, imm5, imm5b);
526 case 1:
527 return new SveIndexII<int16_t>(machInst,
528 zd, imm5, imm5b);
529 case 2:
530 return new SveIndexII<int32_t>(machInst,
531 zd, imm5, imm5b);
532 case 3:
533 return new SveIndexII<int64_t>(machInst,
534 zd, imm5, imm5b);
535 }
536 break;
537 }
538 case 1:
539 { // INDEX (scalar, immediate)
540 int8_t imm5 = sext<5>(bits(machInst, 20, 16));
541 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(
542 machInst, 9, 5);
543 switch (size) {
544 case 0:
545 return new SveIndexRI<int8_t>(machInst,
546 zd, zn, imm5);
547 case 1:
548 return new SveIndexRI<int16_t>(machInst,
549 zd, zn, imm5);
550 case 2:
551 return new SveIndexRI<int32_t>(machInst,
552 zd, zn, imm5);
553 case 3:
554 return new SveIndexRI<int64_t>(machInst,
555 zd, zn, imm5);
556 }
557 break;
558 }
559 case 2:
560 { // INDEX (immediate, scalar)
561 int8_t imm5 = sext<5>(bits(machInst, 9, 5));
562 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(
563 machInst, 20, 16);
564 switch (size) {
565 case 0:
566 return new SveIndexIR<int8_t>(machInst,
567 zd, imm5, zm);
568 case 1:
569 return new SveIndexIR<int16_t>(machInst,
570 zd, imm5, zm);
571 case 2:
572 return new SveIndexIR<int32_t>(machInst,
573 zd, imm5, zm);
574 case 3:
575 return new SveIndexIR<int64_t>(machInst,
576 zd, imm5, zm);
577 }
578 break;
579 }
580 case 3:
581 { // INDEX (scalars)
582 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(
583 machInst, 9, 5);
584 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(
585 machInst, 20, 16);
586 switch (size) {
587 case 0:
588 return new SveIndexRR<int8_t>(machInst,
589 zd, zn, zm);
590 case 1:
591 return new SveIndexRR<int16_t>(machInst,
592 zd, zn, zm);
593 case 2:
594 return new SveIndexRR<int32_t>(machInst,
595 zd, zn, zm);
596 case 3:
597 return new SveIndexRR<int64_t>(machInst,
598 zd, zn, zm);
599 }
600 }
601 }
602 return new Unknown64(machInst);
603 } // decodeSveIndexGen
604
605 StaticInstPtr
606 decodeSveStackAlloc(ExtMachInst machInst)
607 {
608 uint8_t b23_22 = bits(machInst, 23, 22);
609 uint8_t b11 = bits(machInst, 11);
610 if ((b23_22 & 0x2) == 0x0 && b11 == 0x0) {
611 IntRegIndex rd = makeSP(
612 (IntRegIndex) (uint8_t) bits(machInst, 4, 0));
613 IntRegIndex rn = makeSP(
614 (IntRegIndex) (uint8_t) bits(machInst, 20, 16));
615 uint64_t imm = sext<6>(bits(machInst, 10, 5));
616 if ((b23_22 & 0x1) == 0x0) {
617 return new AddvlXImm(machInst, rd, rn, imm);
618 } else {
619 return new AddplXImm(machInst, rd, rn, imm);
620 }
621 } else if (b23_22 == 0x2 && b11 == 0x0) {
622 IntRegIndex rd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
623 uint64_t imm = sext<6>(bits(machInst, 10, 5));
624 if (bits(machInst, 20, 16) == 0x1f) {
625 return new SveRdvl(machInst, rd, imm);
626 }
627 }
628 return new Unknown64(machInst);
629 } // decodeSveStackAlloc
630
631 StaticInstPtr
632 decodeSveShiftByWideElemsUnpred(ExtMachInst machInst)
633 {
634 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
635 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
636 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
637 uint8_t size = bits(machInst, 23, 22);
638 uint8_t opc = (uint8_t) bits(machInst, 11, 10);
639 switch (opc) {
640 case 0x0:
641 return decodeSveBinUnpredU<SveAsrWideUnpred>(
642 size, machInst, zd, zn, zm);
643 case 0x1:
644 return decodeSveBinUnpredU<SveLsrWideUnpred>(
645 size, machInst, zd, zn, zm);
646 case 0x3:
647 return decodeSveBinUnpredU<SveLslWideUnpred>(
648 size, machInst, zd, zn, zm);
649 }
650 return new Unknown64(machInst);
651 } // decodeSveShiftByWideElemsUnpred
652
653 StaticInstPtr
654 decodeSveShiftByImmUnpredB(ExtMachInst machInst)
655 {
656 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
657 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
658 uint8_t imm3 = (uint8_t) bits(machInst, 18, 16);
659
660 uint8_t tsize = (bits(machInst, 23, 22) << 2) | bits(machInst, 20, 19);
661 uint8_t esize = 0;
662 uint8_t size = 0;
663 if (tsize == 0x0) {
664 return new Unknown64(machInst);
665 } else if (tsize == 0x1) {
666 esize = 8;
667 } else if ((tsize & 0x0E) == 0x2) {
668 esize = 16;
669 size = 1;
670 } else if ((tsize & 0x0C) == 0x4) {
671 esize = 32;
672 size = 2;
673 } else if ((tsize & 0x08) == 0x8) {
674 esize = 64;
675 size = 3;
676 }
677
678 uint8_t opc = bits(machInst, 11, 10);
679 switch (opc) {
680 case 0x00:
681 {
682 unsigned shiftAmt = 2 * esize - ((tsize << 3) | imm3);
683 return decodeSveBinImmUnpredU<SveAsrImmUnpred>(
684 size, machInst, zd, zn, shiftAmt);
685 }
686 case 0x01:
687 {
688 unsigned shiftAmt = 2 * esize - ((tsize << 3) | imm3);
689 return decodeSveBinImmUnpredU<SveLsrImmUnpred>(
690 size, machInst, zd, zn, shiftAmt);
691 }
692 case 0x03:
693 {
694 unsigned shiftAmt = ((tsize << 3) | imm3) - esize;
695 return decodeSveBinImmUnpredU<SveLslImmUnpred>(
696 size, machInst, zd, zn, shiftAmt);
697 }
698 }
699
700 return new Unknown64(machInst);
701 } // decodeSveShiftByImmUnpredB
702
703 StaticInstPtr
704 decodeSveShiftByImmUnpred(ExtMachInst machInst)
705 {
706 if (bits(machInst, 12)) {
707 return decodeSveShiftByImmUnpredB(machInst);
708 } else {
709 return decodeSveShiftByWideElemsUnpred(machInst);
710 }
711 return new Unknown64(machInst);
712 } // decodeSveShiftByImmUnpred
713
714 StaticInstPtr
715 decodeSveCompVecAddr(ExtMachInst machInst)
716 {
717 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
718 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
719 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
720 uint8_t mult = 1 << bits(machInst, 11, 10);
721
722 uint8_t opc = bits(machInst, 23, 22);
723
724 switch (opc) {
725 case 0x0:
726 return new SveAdr<uint64_t>(machInst, zd, zn, zm, mult,
727 SveAdr<uint64_t>::SveAdrOffsetUnpackedSigned);
728 case 0x1:
729 return new SveAdr<uint64_t>(machInst, zd, zn, zm, mult,
730 SveAdr<uint64_t>::SveAdrOffsetUnpackedUnsigned);
731 case 0x2:
732 return new SveAdr<uint32_t>(machInst, zd, zn, zm, mult,
733 SveAdr<uint32_t>::SveAdrOffsetPacked);
734 case 0x3:
735 return new SveAdr<uint64_t>(machInst, zd, zn, zm, mult,
736 SveAdr<uint64_t>::SveAdrOffsetPacked);
737 }
738 return new Unknown64(machInst);
739 } // decodeSveCompVecAddr
740
741 StaticInstPtr
742 decodeSveIntMiscUnpred(ExtMachInst machInst)
743 {
744 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
745 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
746
747 uint8_t size = bits(machInst, 23, 22);
748 uint8_t opc = bits(machInst, 11, 10);
749 switch (opc) {
750 case 0x0:
751 // SVE floating-point trig select coefficient
752 {
753 if (size == 0) {
754 break;
755 }
756 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst,
757 20, 16);
758 return decodeSveBinUnpredF<SveFtssel>(
759 size, machInst, zd, zn, zm);
760 }
761 case 0x2:
762 // SVE floating-point exponential accelerator
763 if (size == 0) {
764 break;
765 }
766 return decodeSveUnaryUnpredF<SveFexpa>(size, machInst, zd, zn);
767 case 0x3:
768 // SVE constructive prefix (unpredicated)
769 if (size == 0x0 && bits(machInst, 20, 16) == 0x0) {
770 return new SveMovprfxUnpred<uint64_t>(machInst, zd, zn);
771 }
772 break;
773 }
774 return new Unknown64(machInst);
775 } // decodeSveIntMiscUnpred
776
777 StaticInstPtr
778 decodeSveElemCount(ExtMachInst machInst)
779 {
780 uint8_t opc20 = (uint8_t) bits(machInst, 20);
781 uint8_t b13_12 = (uint8_t) bits(machInst, 13, 12);
782 uint8_t opc11 = (uint8_t) bits(machInst, 11);
783 uint8_t opc10 = (uint8_t) bits(machInst, 10);
784 uint8_t opc11_10 = (uint8_t) bits(machInst, 11, 10);
785 if (b13_12 == 0) {
786 uint8_t pattern = (uint8_t) bits(machInst, 9, 5);
787 uint8_t imm4 = (uint8_t) bits(machInst, 19, 16) + 1;
788 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
789 unsigned size = (unsigned) bits(machInst, 23, 22);
790 if (opc20) {
791 if (opc11 == 0) {
792 if (opc10) {
793 return decodeSveElemIntCountLU<SveDecv>(size,
794 machInst, zdn, pattern, imm4);
795 } else {
796 return decodeSveElemIntCountLU<SveIncv>(size,
797 machInst, zdn, pattern, imm4);
798 }
799 }
800 } else {
801 if (opc11) {
802 if (opc10) {
803 return decodeSveElemIntCountLU<SveUqdecv>(size,
804 machInst, zdn, pattern, imm4);
805 } else {
806 return decodeSveElemIntCountLS<SveSqdecv>(size,
807 machInst, zdn, pattern, imm4);
808 }
809 } else {
810 if (opc10) {
811 return decodeSveElemIntCountLU<SveUqincv>(size,
812 machInst, zdn, pattern, imm4);
813 } else {
814 return decodeSveElemIntCountLS<SveSqincv>(size,
815 machInst, zdn, pattern, imm4);
816 }
817 }
818 }
819 } else if (b13_12 == 3) {
820 uint8_t pattern = (uint8_t) bits(machInst, 9, 5);
821 uint8_t imm4 = (uint8_t) bits(machInst, 19, 16) + 1;
822 IntRegIndex rdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
823 unsigned size = (unsigned) bits(machInst, 23, 22);
824 switch (opc11_10) {
825 case 0:
826 if (opc20) {
827 return decodeSveElemIntCountS<SveSqinc>(size,
828 machInst, rdn, pattern, imm4);
829 } else {
830 return decodeSveElemIntCountS<SveSqinc32>(size,
831 machInst, rdn, pattern, imm4);
832 }
833 case 1:
834 if (opc20) {
835 return decodeSveElemIntCountU<SveUqinc>(size,
836 machInst, rdn, pattern, imm4);
837 } else {
838 return decodeSveElemIntCountU<SveUqinc32>(size,
839 machInst, rdn, pattern, imm4);
840 }
841 case 2:
842 if (opc20) {
843 return decodeSveElemIntCountS<SveSqdec>(size,
844 machInst, rdn, pattern, imm4);
845 } else {
846 return decodeSveElemIntCountS<SveSqdec32>(size,
847 machInst, rdn, pattern, imm4);
848 }
849 case 3:
850 if (opc20) {
851 return decodeSveElemIntCountU<SveUqdec>(size,
852 machInst, rdn, pattern, imm4);
853 } else {
854 return decodeSveElemIntCountU<SveUqdec32>(size,
855 machInst, rdn, pattern, imm4);
856 }
857 }
858 } else if (opc20 && b13_12 == 2 && !(opc11_10 & 0x2)) {
859 uint8_t pattern = (uint8_t) bits(machInst, 9, 5);
860 uint8_t imm4 = (uint8_t) bits(machInst, 19, 16) + 1;
861 IntRegIndex rdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
862 unsigned size = (unsigned) bits(machInst, 23, 22);
863 if (opc11_10 & 0x1) {
864 return decodeSveElemIntCountU<SveDec>(size, machInst,
865 rdn, pattern, imm4);
866 } else {
867 return decodeSveElemIntCountU<SveInc>(size, machInst,
868 rdn, pattern, imm4);
869 }
870 } else if (!opc20 && b13_12 == 2 && opc11_10 == 0) {
871 uint8_t pattern = (uint8_t) bits(machInst, 9, 5);
872 uint8_t imm4 = (uint8_t) bits(machInst, 19, 16) + 1;
873 IntRegIndex rd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
874 unsigned size = (unsigned) bits(machInst, 23, 22);
875 return decodeSveElemIntCountU<SveCntx>(size, machInst,
876 rd, pattern, imm4);
877 }
878 return new Unknown64(machInst);
879 } // decodeSveElemCount
880
881 StaticInstPtr
882 decodeSveLogMaskImm(ExtMachInst machInst)
883 {
884 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
885 bool n = bits(machInst, 17);
886 uint8_t immr = bits(machInst, 16, 11);
887 uint8_t imms = bits(machInst, 10, 5);
888
889 // Decode bitmask
890 // len = MSB(n:NOT(imms)), len < 1 is undefined
891 uint8_t len = 0;
892 if (n) {
893 len = 6;
894 } else if (imms == 0x3f || imms == 0x3e) {
895 return new Unknown64(machInst);
896 } else {
897 len = findMsbSet(imms ^ 0x3f);
898 }
899 // Generate r, s, and size
900 uint64_t r = bits(immr, len - 1, 0);
901 uint64_t s = bits(imms, len - 1, 0);
902 uint8_t size = 1 << len;
903 if (s == size - 1)
904 return new Unknown64(machInst);
905 // Generate the pattern with s 1s, rotated by r, with size bits
906 uint64_t pattern = mask(s + 1);
907 if (r) {
908 pattern = (pattern >> r) | (pattern << (size - r));
909 pattern &= mask(size);
910 }
911 // Replicate that to fill up the immediate
912 for (unsigned i = 1; i < (64 / size); i *= 2)
913 pattern |= (pattern << (i * size));
914 uint64_t imm = pattern;
915
916 if (bits(machInst, 19, 18) == 0x0) {
917 if (bits(machInst, 23, 22) == 0x3) {
918 return new SveDupm<uint64_t>(machInst, zd, imm);
919 } else {
920 switch (bits(machInst, 23, 22)) {
921 case 0x0:
922 return new SveOrrImm<uint64_t>(machInst, zd, imm);
923 case 0x1:
924 return new SveEorImm<uint64_t>(machInst, zd, imm);
925 case 0x2:
926 return new SveAndImm<uint64_t>(machInst, zd, imm);
927 }
928 }
929 }
930
931 return new Unknown64(machInst);
932 } // decodeSveLogMaskImm
933
934 StaticInstPtr
935 decodeSveIntWideImmPred(ExtMachInst machInst)
936 {
937 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
938 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 19, 16);
939 uint8_t size = bits(machInst, 23, 22);
940
941 if (bits(machInst, 15) == 0x0) {
942 uint64_t imm = bits(machInst, 12, 5);
943 uint8_t sh = bits(machInst, 13);
944 uint8_t m = bits(machInst, 14);
945 if (sh) {
946 if (size == 0x0) {
947 return new Unknown64(machInst);
948 }
949 imm <<= 8;
950 }
951 if (m) {
952 if (sh) {
953 return decodeSveWideImmPredU<SveCpyImmMerge>(
954 size, machInst, zd, sext<16>(imm), pg);
955 } else {
956 return decodeSveWideImmPredU<SveCpyImmMerge>(
957 size, machInst, zd, sext<8>(imm), pg);
958 }
959 } else {
960 if (sh) {
961 return decodeSveWideImmPredU<SveCpyImmZero>(
962 size, machInst, zd, sext<16>(imm), pg,
963 false /* isMerging */);
964 } else {
965 return decodeSveWideImmPredU<SveCpyImmZero>(
966 size, machInst, zd, sext<8>(imm), pg,
967 false /* isMerging */);
968 }
969 }
970 } else if (bits(machInst, 15, 13) == 0x6 && size != 0x0) {
971 uint64_t imm = vfp_modified_imm(bits(machInst, 12, 5),
972 decode_fp_data_type(size));
973 return decodeSveWideImmPredF<SveFcpy>(
974 size, machInst, zd, imm, pg);
975 }
976
977 return new Unknown64(machInst);
978 } // decodeSveIntWideImmPred
979
980 StaticInstPtr
981 decodeSvePermExtract(ExtMachInst machInst)
982 {
983 uint8_t b23_22 = (unsigned) bits(machInst, 23, 22);
984 if (!b23_22) {
985 uint8_t position =
986 bits(machInst, 20, 16) << 3 | bits(machInst, 12, 10);
987 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
988 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
989 return new SveExt<uint8_t>(machInst, zdn, zm, position);
990 }
991 return new Unknown64(machInst);
992 } // decodeSvePermExtract
993
994 StaticInstPtr
995 decodeSvePermUnpred(ExtMachInst machInst)
996 {
997 uint8_t b12_10 = bits(machInst, 12, 10);
998 if (b12_10 == 0x4) {
999 unsigned size = (unsigned) bits(machInst, 23, 22);
1000 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1001 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1002 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
1003 return decodeSveBinUnpredU<SveTbl>(size, machInst, zd, zn, zm);
1004 } else if (bits(machInst, 20, 16) == 0x0 && b12_10 == 0x6) {
1005 uint8_t size = bits(machInst, 23, 22);
1006 IntRegIndex rn = makeSP(
1007 (IntRegIndex) (uint8_t) bits(machInst, 9, 5));
1008 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1009 return decodeSveUnaryUnpredU<SveDupScalar>(size, machInst, zd, rn);
1010 } else if (bits(machInst, 20, 16) == 0x4 && b12_10 == 0x6) {
1011 uint8_t size = bits(machInst, 23, 22);
1012 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1013 IntRegIndex rm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1014 return decodeSveUnaryUnpredU<SveInsr>(size, machInst, zdn, rm);
1015 } else if (bits(machInst, 20, 16) == 0x14 && b12_10 == 0x6) {
1016 uint8_t size = bits(machInst, 23, 22);
1017 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1018 IntRegIndex vm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1019 return decodeSveUnaryUnpredU<SveInsrf>(size, machInst, zdn, vm);
1020 } else if (bits(machInst, 20, 16) == 0x18 && b12_10 == 0x6) {
1021 uint8_t size = bits(machInst, 23, 22);
1022 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1023 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1024 return decodeSveUnaryUnpredU<SveRevv>(size, machInst, zd, zn);
1025 } else if (b12_10 == 0x0 && bits(machInst, 20, 16) != 0x0) {
1026 uint8_t imm =
1027 bits(machInst, 23, 22) << 5 | // imm3h
1028 bits(machInst, 20) << 4 | // imm3l
1029 bits(machInst, 19, 16); // tsz
1030 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1031 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1032 if (imm & 0x1) {
1033 imm >>= 1;
1034 return new SveDupIdx<uint8_t>(machInst, zd, zn, imm);
1035 } else if (imm & 0x2) {
1036 imm >>= 2;
1037 return new SveDupIdx<uint16_t>(machInst, zd, zn, imm);
1038 } else if (imm & 0x4) {
1039 imm >>= 3;
1040 return new SveDupIdx<uint32_t>(machInst, zd, zn, imm);
1041 } else if (imm & 0x8) {
1042 imm >>= 4;
1043 return new SveDupIdx<uint64_t>(machInst, zd, zn, imm);
1044 } else if (imm & 0x10) {
1045 imm >>= 5;
1046 return new SveDupIdx<__uint128_t>(machInst, zd, zn, imm);
1047 }
1048 return new Unknown64(machInst);
1049 } else if (bits(machInst, 23, 22) != 0x0 &&
1050 bits(machInst, 20, 18) == 0x4 && b12_10 == 0x6) {
1051 unsigned size = (unsigned) bits(machInst, 23, 22);
1052 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1053 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1054 if (bits(machInst, 17)) {
1055 if (bits(machInst, 16)) {
1056 return decodeSveUnpackU<SveUunpkhi>(size, machInst,
1057 zd, zn);
1058 } else {
1059 return decodeSveUnpackU<SveUunpklo>(size, machInst,
1060 zd, zn);
1061 }
1062 } else {
1063 if (bits(machInst, 16)) {
1064 return decodeSveUnpackS<SveSunpkhi>(size, machInst,
1065 zd, zn);
1066 } else {
1067 return decodeSveUnpackS<SveSunpklo>(size, machInst,
1068 zd, zn);
1069 }
1070 }
1071 }
1072 return new Unknown64(machInst);
1073 } // decodeSvePermUnpred
1074
1075 StaticInstPtr
1076 decodeSvePermPredicates(ExtMachInst machInst)
1077 {
1078 if (bits(machInst, 20) == 0x0 && bits(machInst, 12, 11) != 0x3 &&
1079 bits(machInst, 9) == 0x0 && bits(machInst, 4) == 0x0) {
1080 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1081 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1082 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
1083
1084 uint8_t size = bits(machInst, 23, 22);
1085
1086 uint8_t opc = bits(machInst, 12, 10);
1087
1088 switch (opc) {
1089 case 0x0:
1090 return decodeSveBinUnpredU<SveZip1Pred>(size,
1091 machInst, zd, zn, zm);
1092 case 0x1:
1093 return decodeSveBinUnpredU<SveZip2Pred>(size,
1094 machInst, zd, zn, zm);
1095 case 0x2:
1096 return decodeSveBinUnpredU<SveUzp1Pred>(size,
1097 machInst, zd, zn, zm);
1098 case 0x3:
1099 return decodeSveBinUnpredU<SveUzp2Pred>(size,
1100 machInst, zd, zn, zm);
1101 case 0x4:
1102 return decodeSveBinUnpredU<SveTrn1Pred>(size,
1103 machInst, zd, zn, zm);
1104 case 0x5:
1105 return decodeSveBinUnpredU<SveTrn2Pred>(size,
1106 machInst, zd, zn, zm);
1107 }
1108 } else if (bits(machInst, 23, 22) == 0x0 &&
1109 bits(machInst, 20, 17) == 0x8 && bits(machInst, 12, 9) == 0x0
1110 && bits(machInst, 4) == 0x0) {
1111 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1112 IntRegIndex pn = (IntRegIndex) (uint8_t) bits(machInst, 8, 5);
1113 if (bits(machInst, 16)) {
1114 return new SvePunpkhi<uint8_t, uint16_t>(machInst, pd, pn);
1115 } else {
1116 return new SvePunpklo<uint8_t, uint16_t>(machInst, pd, pn);
1117 }
1118 } else if (bits(machInst, 20, 16) == 0x14 &&
1119 bits(machInst, 12, 9) == 0x00 && bits(machInst, 4) == 0) {
1120 uint8_t size = bits(machInst, 23, 22);
1121 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1122 IntRegIndex pn = (IntRegIndex) (uint8_t) bits(machInst, 8, 5);
1123 return decodeSveUnaryUnpredU<SveRevp>(size, machInst, pd, pn);
1124 }
1125 return new Unknown64(machInst);
1126 } // decodeSvePermPredicates
1127
1128 StaticInstPtr
1129 decodeSvePermIntlv(ExtMachInst machInst)
1130 {
1131 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1132 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1133 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
1134
1135 uint8_t size = bits(machInst, 23, 22);
1136
1137 uint8_t opc = bits(machInst, 12, 10);
1138
1139 switch (opc) {
1140 case 0x0:
1141 return decodeSveBinUnpredU<SveZip1>(size, machInst, zd, zn, zm);
1142 case 0x1:
1143 return decodeSveBinUnpredU<SveZip2>(size, machInst, zd, zn, zm);
1144 case 0x2:
1145 return decodeSveBinUnpredU<SveUzp1>(size, machInst, zd, zn, zm);
1146 case 0x3:
1147 return decodeSveBinUnpredU<SveUzp2>(size, machInst, zd, zn, zm);
1148 case 0x4:
1149 return decodeSveBinUnpredU<SveTrn1>(size, machInst, zd, zn, zm);
1150 case 0x5:
1151 return decodeSveBinUnpredU<SveTrn2>(size, machInst, zd, zn, zm);
1152 }
1153 return new Unknown64(machInst);
1154 } // decodeSvePermIntlv
1155
1156 StaticInstPtr
1157 decodeSvePermPred(ExtMachInst machInst)
1158 {
1159 uint8_t b13 = bits(machInst, 13);
1160 uint8_t b23 = bits(machInst, 23);
1161 switch (bits(machInst, 20, 16)) {
1162 case 0x0:
1163 if (!b13) {
1164 uint8_t size = bits(machInst, 23, 22);
1165 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1166 IntRegIndex vn = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1167 IntRegIndex zd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1168 return decodeSveUnaryPredU<SveCpySimdFpScalar>(size,
1169 machInst, zd, vn, pg);
1170 }
1171 break;
1172 case 0x1:
1173 if (!b13 && b23) {
1174 // sve_int_perm_compact
1175 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1176 IntRegIndex zn = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1177 IntRegIndex zd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1178 if (bits(machInst, 22)) {
1179 return new SveCompact<uint64_t>(machInst, zd, zn, pg);
1180 } else {
1181 return new SveCompact<uint32_t>(machInst, zd, zn, pg);
1182 }
1183 }
1184 break;
1185 case 0x8:
1186 if (b13) {
1187 uint8_t size = bits(machInst, 23, 22);
1188 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1189 IntRegIndex rn = makeSP(
1190 (IntRegIndex)(uint8_t) bits(machInst, 9, 5));
1191 IntRegIndex zd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1192 return decodeSveUnaryPredU<SveCpyScalar>(size,
1193 machInst, zd, rn, pg);
1194 }
1195 break;
1196 case 0xC:
1197 if (!b13) {
1198 uint8_t size = bits(machInst, 23, 22);
1199 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1200 IntRegIndex zdn = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1201 IntRegIndex zm = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1202 return decodeSveBinDestrPredU<SveSplice>(size, machInst,
1203 zdn, zm, pg);
1204 }
1205 break;
1206 }
1207 switch (bits(machInst, 20, 17)) {
1208 case 0x0:
1209 if (b13) {
1210 uint8_t AB = bits(machInst, 16);
1211 uint8_t size = bits(machInst, 23, 22);
1212 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1213 IntRegIndex zn = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1214 IntRegIndex rd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1215 if (!AB) {
1216 return decodeSveUnaryPredU<SveLasta>(size,
1217 machInst, rd, zn, pg);
1218 } else {
1219 return decodeSveUnaryPredU<SveLastb>(size,
1220 machInst, rd, zn, pg);
1221 }
1222 }
1223 break;
1224 case 0x1:
1225 if (!b13) {
1226 uint8_t AB = bits(machInst, 16);
1227 uint8_t size = bits(machInst, 23, 22);
1228 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1229 IntRegIndex zn = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1230 IntRegIndex vd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1231 if (!AB) {
1232 return decodeSveUnaryPredU<SveLastaf>(size,
1233 machInst, vd, zn, pg);
1234 } else {
1235 return decodeSveUnaryPredU<SveLastbf>(size,
1236 machInst, vd, zn, pg);
1237 }
1238 }
1239 break;
1240 case 0x4:
1241 if (!b13) {
1242 uint8_t AB = bits(machInst, 16);
1243 uint8_t size = bits(machInst, 23, 22);
1244 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1245 IntRegIndex zm = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1246 IntRegIndex zdn = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1247 if (!AB) {
1248 return decodeSveUnaryPredU<SveClastav>(size,
1249 machInst, zdn, zm, pg);
1250 } else {
1251 return decodeSveUnaryPredU<SveClastbv>(size,
1252 machInst, zdn, zm, pg);
1253 }
1254 }
1255 break;
1256 case 0x5:
1257 if (!b13) {
1258 uint8_t AB = bits(machInst, 16);
1259 uint8_t size = bits(machInst, 23, 22);
1260 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1261 IntRegIndex zm = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1262 IntRegIndex zdn = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1263 if (!AB) {
1264 return decodeSveUnaryPredU<SveClastaf>(size,
1265 machInst, zdn, zm, pg);
1266 } else {
1267 return decodeSveUnaryPredU<SveClastbf>(size,
1268 machInst, zdn, zm, pg);
1269 }
1270 }
1271 break;
1272 case 0x8:
1273 if (b13) {
1274 uint8_t AB = bits(machInst, 16);
1275 uint8_t size = bits(machInst, 23, 22);
1276 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1277 IntRegIndex zm = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1278 IntRegIndex rdn = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1279 if (!AB) {
1280 return decodeSveUnaryPredU<SveClasta>(size,
1281 machInst, rdn, zm, pg);
1282 } else {
1283 return decodeSveUnaryPredU<SveClastb>(size,
1284 machInst, rdn, zm, pg);
1285 }
1286 }
1287 break;
1288 }
1289 if (bits(machInst, 20, 18) == 0x1 && !b13) {
1290 unsigned size = (unsigned) bits(machInst, 23, 22);
1291 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1292 IntRegIndex zn = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1293 IntRegIndex zd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1294 uint8_t opc17_16 = bits(machInst, 17, 16);
1295 switch (opc17_16) {
1296 case 0x00:
1297 switch (size) {
1298 case 1:
1299 return new SveRevb<uint16_t>(machInst, zd, zn, pg);
1300 case 2:
1301 return new SveRevb<uint32_t>(machInst, zd, zn, pg);
1302 case 3:
1303 return new SveRevb<uint64_t>(machInst, zd, zn, pg);
1304 }
1305 break;
1306 case 0x01:
1307 switch (size) {
1308 case 2:
1309 return new SveRevh<uint32_t>(machInst, zd, zn, pg);
1310 case 3:
1311 return new SveRevh<uint64_t>(machInst, zd, zn, pg);
1312 }
1313 break;
1314 case 0x02:
1315 if (size == 3) {
1316 return new SveRevw<uint64_t>(machInst, zd, zn, pg);
1317 }
1318 break;
1319 case 0x03:
1320 return decodeSveUnaryPredU<SveRbit>(
1321 size, machInst, zd, zn, pg);
1322 }
1323 }
1324 return new Unknown64(machInst);
1325 } // decodeSvePermPred
1326
1327 StaticInstPtr
1328 decodeSveSelVec(ExtMachInst machInst)
1329 {
1330 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1331 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1332 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 13, 10);
1333 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
1334
1335 uint8_t size = bits(machInst, 23, 22);
1336
1337 return decodeSveBinConstrPredU<SveSel>(size,
1338 machInst, zd, zn, zm, pg, SvePredType::SELECT);
1339 } // decodeSveSelVec
1340
1341 StaticInstPtr
1342 decodeSveIntCmpVec(ExtMachInst machInst)
1343 {
1344 uint8_t size = bits(machInst, 23, 22);
1345 uint8_t b14 = bits(machInst, 14);
1346 uint8_t opc =
1347 bits(machInst, 15) << 2 |
1348 bits(machInst, 13) << 1 |
1349 bits(machInst, 4);
1350 IntRegIndex pd = (IntRegIndex) (uint8_t)bits(machInst, 3, 0);
1351 IntRegIndex pg = (IntRegIndex) (uint8_t)bits(machInst, 12, 10);
1352 IntRegIndex zn = (IntRegIndex) (uint8_t)bits(machInst, 9, 5);
1353 IntRegIndex zm = (IntRegIndex) (uint8_t)bits(machInst, 20, 16);
1354 if (b14 && size != 3) {
1355 // sve_int_cmp_1
1356 switch (opc) {
1357 case 0:
1358 return decodeSveTerPredWS<SveCmpgew>(size,
1359 machInst, pd, zn, zm, pg);
1360 case 1:
1361 return decodeSveTerPredWS<SveCmpgtw>(size,
1362 machInst, pd, zn, zm, pg);
1363 case 2:
1364 return decodeSveTerPredWS<SveCmpltw>(size,
1365 machInst, pd, zn, zm, pg);
1366 case 3:
1367 return decodeSveTerPredWS<SveCmplew>(size,
1368 machInst, pd, zn, zm, pg);
1369 case 4:
1370 return decodeSveTerPredWU<SveCmphsw>(size,
1371 machInst, pd, zn, zm, pg);
1372 case 5:
1373 return decodeSveTerPredWU<SveCmphiw>(size,
1374 machInst, pd, zn, zm, pg);
1375 case 6:
1376 return decodeSveTerPredWU<SveCmplow>(size,
1377 machInst, pd, zn, zm, pg);
1378 case 7:
1379 return decodeSveTerPredWU<SveCmplsw>(size,
1380 machInst, pd, zn, zm, pg);
1381 }
1382 } else if (!b14) {
1383 switch (opc) {
1384 case 0:
1385 return decodeSveTerPredU<SveCmphs>(size,
1386 machInst, pd, zn, zm, pg);
1387 case 1:
1388 return decodeSveTerPredU<SveCmphi>(size,
1389 machInst, pd, zn, zm, pg);
1390 case 2:
1391 if (size != 3) {
1392 return decodeSveTerPredWU<SveCmpeqw>(size,
1393 machInst, pd, zn, zm, pg);
1394 }
1395 break;
1396 case 3:
1397 if (size != 3) {
1398 return decodeSveTerPredWU<SveCmpnew>(size,
1399 machInst, pd, zn, zm, pg);
1400 }
1401 break;
1402 case 4:
1403 return decodeSveTerPredS<SveCmpge>(size,
1404 machInst, pd, zn, zm, pg);
1405 case 5:
1406 return decodeSveTerPredS<SveCmpgt>(size,
1407 machInst, pd, zn, zm, pg);
1408 case 6:
1409 return decodeSveTerPredU<SveCmpeq>(size,
1410 machInst, pd, zn, zm, pg);
1411 case 7:
1412 return decodeSveTerPredU<SveCmpne>(size,
1413 machInst, pd, zn, zm, pg);
1414 }
1415 }
1416 return new Unknown64(machInst);
1417 } // decodeSveIntCmpVec
1418
1419 StaticInstPtr
1420 decodeSveIntCmpUImm(ExtMachInst machInst)
1421 {
1422 uint8_t cmp = bits(machInst, 13) << 1 | bits(machInst, 4);
1423 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1424 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1425 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
1426 int64_t imm = (int64_t) bits(machInst, 20, 14);
1427 uint8_t size = bits(machInst, 23, 22);
1428 switch (cmp) {
1429 case 0:
1430 return decodeSveTerImmPredU<SveCmphsi>(size,
1431 machInst, pd, zn, imm, pg);
1432 case 1:
1433 return decodeSveTerImmPredU<SveCmphii>(size,
1434 machInst, pd, zn, imm, pg);
1435 case 2:
1436 return decodeSveTerImmPredU<SveCmploi>(size,
1437 machInst, pd, zn, imm, pg);
1438 case 3:
1439 return decodeSveTerImmPredU<SveCmplsi>(size,
1440 machInst, pd, zn, imm, pg);
1441 }
1442 return new Unknown64(machInst);
1443 } // decodeSveIntCmpUImm
1444
1445 StaticInstPtr
1446 decodeSveIntCmpSImm(ExtMachInst machInst)
1447 {
1448 uint8_t opc = bits(machInst, 15) << 2 | bits(machInst, 13) << 1 |
1449 bits(machInst, 4);
1450 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1451 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1452 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
1453 int64_t imm = sext<5>(bits(machInst, 20, 16));
1454 uint8_t size = bits(machInst, 23, 22);
1455 switch (opc) {
1456 case 0:
1457 return decodeSveTerImmPredS<SveCmpgei>(size,
1458 machInst, pd, zn, imm, pg);
1459 case 1:
1460 return decodeSveTerImmPredS<SveCmpgti>(size,
1461 machInst, pd, zn, imm, pg);
1462 case 2:
1463 return decodeSveTerImmPredS<SveCmplti>(size,
1464 machInst, pd, zn, imm, pg);
1465 case 3:
1466 return decodeSveTerImmPredS<SveCmplei>(size,
1467 machInst, pd, zn, imm, pg);
1468 case 4:
1469 return decodeSveTerImmPredU<SveCmpeqi>(size,
1470 machInst, pd, zn, imm, pg);
1471 case 5:
1472 return decodeSveTerImmPredU<SveCmpnei>(size,
1473 machInst, pd, zn, imm, pg);
1474 default:
1475 return new Unknown64(machInst);
1476 }
1477 return new Unknown64(machInst);
1478 } // decodeSveIntCmpSImm
1479
1480 StaticInstPtr
1481 decodeSvePredLogicalOps(ExtMachInst machInst)
1482 {
1483 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1484 IntRegIndex pn = (IntRegIndex) (uint8_t) bits(machInst, 8, 5);
1485 IntRegIndex pm = (IntRegIndex) (uint8_t) bits(machInst, 19, 16);
1486 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 13, 10);
1487 uint8_t opc = (bits(machInst, 23, 22) << 2) |
1488 (bits(machInst, 9) << 1) |
1489 bits(machInst, 4);
1490 switch (opc) {
1491 case 0x0:
1492 return new SvePredAnd<uint8_t>(machInst, pd, pn, pm, pg);
1493 case 0x1:
1494 return new SvePredBic<uint8_t>(machInst, pd, pn, pm, pg);
1495 case 0x2:
1496 return new SvePredEor<uint8_t>(machInst, pd, pn, pm, pg);
1497 case 0x3:
1498 return new SvePredSel<uint8_t>(machInst, pd, pn, pm, pg, true);
1499 case 0x4:
1500 return new SvePredAnds<uint8_t>(machInst, pd, pn, pm, pg);
1501 case 0x5:
1502 return new SvePredBics<uint8_t>(machInst, pd, pn, pm, pg);
1503 case 0x6:
1504 return new SvePredEors<uint8_t>(machInst, pd, pn, pm, pg);
1505 case 0x8:
1506 return new SvePredOrr<uint8_t>(machInst, pd, pn, pm, pg);
1507 case 0x9:
1508 return new SvePredOrn<uint8_t>(machInst, pd, pn, pm, pg);
1509 case 0xa:
1510 return new SvePredNor<uint8_t>(machInst, pd, pn, pm, pg);
1511 case 0xb:
1512 return new SvePredNand<uint8_t>(machInst, pd, pn, pm, pg);
1513 case 0xc:
1514 return new SvePredOrrs<uint8_t>(machInst, pd, pn, pm, pg);
1515 case 0xd:
1516 return new SvePredOrns<uint8_t>(machInst, pd, pn, pm, pg);
1517 case 0xe:
1518 return new SvePredNors<uint8_t>(machInst, pd, pn, pm, pg);
1519 case 0xf:
1520 return new SvePredNands<uint8_t>(machInst, pd, pn, pm, pg);
1521 }
1522
1523 return new Unknown64(machInst);
1524 } // decodeSvePredLogicalOps
1525
1526 StaticInstPtr
1527 decodeSvePropBreakFromPrevPartition(ExtMachInst machInst)
1528 {
1529 if (bits(machInst, 23) == 0x0 && bits(machInst, 9) == 0x0) {
1530 uint8_t opc = (bits(machInst, 22) << 1) | bits(machInst, 4);
1531 IntRegIndex pm = (IntRegIndex)(uint8_t) bits(machInst, 19, 16);
1532 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 13, 10);
1533 IntRegIndex pn = (IntRegIndex)(uint8_t) bits(machInst, 8, 5);
1534 IntRegIndex pd = (IntRegIndex)(uint8_t) bits(machInst, 3, 0);
1535 switch (opc) {
1536 case 0x0:
1537 // BRKPA
1538 return new SveBrkpa(machInst, pd, pn, pm, pg);
1539 case 0x1:
1540 // BRKPB
1541 return new SveBrkpb(machInst, pd, pn, pm, pg);
1542 case 0x2:
1543 // BRKPAS
1544 return new SveBrkpas(machInst, pd, pn, pm, pg);
1545 case 0x3:
1546 // BRKPBS
1547 return new SveBrkpbs(machInst, pd, pn, pm, pg);
1548 }
1549 }
1550 return new Unknown64(machInst);
1551 } // decodeSvePropBreakFromPrevPartition
1552
1553 StaticInstPtr
1554 decodeSvePartitionBreakCond(ExtMachInst machInst)
1555 {
1556 if (bits(machInst, 18, 16) == 0x0 && bits(machInst, 9) == 0x0) {
1557 bool flagset = bits(machInst, 22);
1558 bool merging = bits(machInst, 4);
1559 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 13, 10);
1560 IntRegIndex pn = (IntRegIndex)(uint8_t) bits(machInst, 8, 5);
1561 IntRegIndex pd = (IntRegIndex)(uint8_t) bits(machInst, 3, 0);
1562 if (bits(machInst, 23)) {
1563 if (flagset) {
1564 if (!merging) {
1565 return new SveBrkbs(machInst, pd, pg, pn);
1566 }
1567 } else {
1568 if (merging) {
1569 return new SveBrkbm(machInst, pd, pg, pn);
1570 } else {
1571 return new SveBrkbz(machInst, pd, pg, pn);
1572 }
1573 }
1574 } else {
1575 if (flagset) {
1576 if (!merging) {
1577 return new SveBrkas(machInst, pd, pg, pn);
1578 }
1579 } else {
1580 if (merging) {
1581 return new SveBrkam(machInst, pd, pg, pn);
1582 } else {
1583 return new SveBrkaz(machInst, pd, pg, pn);
1584 }
1585 }
1586 }
1587 return new Unknown64(machInst);
1588 }
1589 return new Unknown64(machInst);
1590 } // decodeSvePartitionBreakCond
1591
1592 StaticInstPtr
1593 decodeSvePredTest(ExtMachInst machInst)
1594 {
1595 if (bits(machInst, 23, 22) == 0x1 &&
1596 bits(machInst, 18, 16) == 0x0 &&
1597 bits(machInst, 9) == 0x0) {
1598 IntRegIndex pn = (IntRegIndex) (uint8_t) bits(machInst, 8, 5);
1599 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 13, 10);
1600 return new SvePtest(machInst, pn, pg);
1601 }
1602 return new Unknown64(machInst);
1603 } // decodeSvePredTest
1604
1605 StaticInstPtr
1606 decodeSvePredIteration(ExtMachInst machInst)
1607 {
1608 uint8_t size = bits(machInst, 23, 22);
1609 uint8_t opc18_16 = bits(machInst, 18, 16);
1610 uint8_t opc10_9 = bits(machInst, 10, 9);
1611 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 8, 5);
1612 IntRegIndex pdn = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1613 if (opc18_16 == 0x1 && opc10_9 == 0x2) {
1614 return decodeSveUnaryPredU<SvePnext>(size,
1615 machInst, pdn, pdn, pg);
1616 } else if (size == 0x1 && opc18_16 == 0x0 && opc10_9 == 0) {
1617 return new SvePfirst<uint8_t>(machInst, pdn, pdn, pg);
1618 }
1619 return new Unknown64(machInst);
1620 } // decodeSvePredIteration
1621
1622 StaticInstPtr
1623 decodeSveInitPred(ExtMachInst machInst)
1624 {
1625 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1626 unsigned size = bits(machInst, 23, 22);
1627 uint8_t imm = bits(machInst, 9, 5);
1628
1629 if (bits(machInst, 16) == 0x0) {
1630 return decodeSvePtrue<SvePtrue>(size, machInst, pd, imm);
1631 } else {
1632 return decodeSvePtrue<SvePtrues>(size, machInst, pd, imm);
1633 }
1634 return new Unknown64(machInst);
1635 } // decodeSveInitPred
1636
1637 StaticInstPtr
1638 decodeSveZeroPredReg(ExtMachInst machInst)
1639 {
1640 if (bits(machInst, 23, 22) == 0x0 && bits(machInst, 18, 16) == 0x0) {
1641 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1642 return new SvePfalse(machInst, pd);
1643 }
1644 return new Unknown64(machInst);
1645 } // decodeSveZeroPredReg
1646
1647 StaticInstPtr
1648 decodeSvePropBreakToNextPartition(ExtMachInst machInst)
1649 {
1650 if (bits(machInst, 23) == 0x0 &&
1651 bits(machInst, 18, 16) == 0x0 &&
1652 bits(machInst, 9) == 0x0 &&
1653 bits(machInst, 4) == 0x0) {
1654 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 13, 10);
1655 IntRegIndex pn = (IntRegIndex)(uint8_t) bits(machInst, 8, 5);
1656 IntRegIndex pdm = (IntRegIndex)(uint8_t) bits(machInst, 3, 0);
1657 if (bits(machInst, 22) == 0x0) {
1658 return new SveBrkn(machInst, pdm, pn, pdm, pg);
1659 } else {
1660 return new SveBrkns(machInst, pdm, pn, pdm, pg);
1661 }
1662 return new Unknown64(machInst);
1663 }
1664 return new Unknown64(machInst);
1665 } // decodeSvePropBreakToNextPartition
1666
1667 StaticInstPtr
1668 decodeSveReadPredFromFFRPred(ExtMachInst machInst)
1669 {
1670 if (bits(machInst, 23)) {
1671 return new Unknown64(machInst);
1672 }
1673 IntRegIndex pd = (IntRegIndex)(uint8_t) bits(machInst, 3, 0);
1674 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 8, 5);
1675 if (bits(machInst, 22)) {
1676 return new SveRdffrsPred(machInst, pd, pg);
1677 } else {
1678 return new SveRdffrPred(machInst, pd, pg);
1679 }
1680 } // decodeSveReadPredFromFFRPred
1681
1682 StaticInstPtr
1683 decodeSveReadPredFromFFRUnpred(ExtMachInst machInst)
1684 {
1685 if (bits(machInst, 23, 22) != 0) {
1686 return new Unknown64(machInst);
1687 }
1688 IntRegIndex pd = (IntRegIndex)(uint8_t) bits(machInst, 3, 0);
1689 return new SveRdffrUnpred(machInst, pd);
1690 } // decodeSveReadPredFromFFRUnpred
1691
1692 StaticInstPtr
1693 decodeSvePredGen(ExtMachInst machInst)
1694 {
1695 uint8_t b_20_15 = (bits(machInst, 20) << 1) | bits(machInst, 15);
1696 switch (b_20_15) {
1697 case 0x0:
1698 return decodeSvePredLogicalOps(machInst);
1699 case 0x1:
1700 return decodeSvePropBreakFromPrevPartition(machInst);
1701 case 0x2:
1702 if (bits(machInst, 19) == 0x0) {
1703 return decodeSvePartitionBreakCond(machInst);
1704 } else {
1705 return decodeSvePropBreakToNextPartition(machInst);
1706 }
1707 case 0x3:
1708 if (bits(machInst, 19) == 0x0) {
1709 if (bits(machInst, 4, 0) == 0x0) {
1710 return decodeSvePredTest(machInst);
1711 } else {
1712 break;
1713 }
1714 } else {
1715 switch (bits(machInst, 13, 12)) {
1716 case 0x0:
1717 if (bits(machInst, 11) == 0x0 &&
1718 bits(machInst, 4) == 0x0) {
1719 return decodeSvePredIteration(machInst);
1720 } else {
1721 break;
1722 }
1723 case 0x1:
1724 break;
1725 case 0x2:
1726 if (bits(machInst, 11, 10) == 0x0 &&
1727 bits(machInst, 4) == 0x0) {
1728 return decodeSveInitPred(machInst);
1729 } else if (bits(machInst, 11, 4) == 0x40) {
1730 return decodeSveZeroPredReg(machInst);
1731 }
1732 break;
1733 case 0x3:
1734 if (bits(machInst, 11) == 0x0) {
1735 if (bits(machInst, 16) == 0x0) {
1736 return decodeSveReadPredFromFFRPred(machInst);
1737 } else if (bits(machInst, 8, 4) == 0x0) {
1738 return decodeSveReadPredFromFFRUnpred(machInst);
1739 }
1740 }
1741 break;
1742 }
1743 }
1744 break;
1745 }
1746 return new Unknown64(machInst);
1747 } // decodeSvePredGen
1748
1749 StaticInstPtr
1750 decodeSvePredCount(ExtMachInst machInst)
1751 {
1752 uint8_t b19 = bits(machInst, 19);
1753 if (b19) {
1754 uint8_t b13_11 = bits(machInst, 13, 11);
1755 switch (b13_11) {
1756 case 0x0:
1757 {
1758 if (bits(machInst, 10, 9) != 0x0) {
1759 return new Unknown64(machInst);
1760 }
1761 IntRegIndex zdn = (IntRegIndex) (uint8_t)
1762 bits(machInst, 4, 0);
1763 IntRegIndex pg = (IntRegIndex) (uint8_t)
1764 bits(machInst, 8, 5);
1765 uint8_t esize = bits(machInst, 23, 22);
1766 if (esize == 0x0) {
1767 return new Unknown64(machInst);
1768 }
1769 uint8_t opc = bits(machInst, 18, 17);
1770 if (opc == 0x0) {
1771 uint8_t u = bits(machInst, 16);
1772 if (u) {
1773 return decodeSvePredCountVU<SveUqincpv>(esize,
1774 machInst, zdn, pg);
1775 } else {
1776 return decodeSvePredCountVS<SveSqincpv>(esize,
1777 machInst, zdn, pg);
1778 }
1779 } else if (opc == 0x1) {
1780 uint8_t u = bits(machInst, 16);
1781 if (u) {
1782 return decodeSvePredCountVU<SveUqdecpv>(esize,
1783 machInst, zdn, pg);
1784 } else {
1785 return decodeSvePredCountVS<SveSqdecpv>(esize,
1786 machInst, zdn, pg);
1787 }
1788 } else if (opc == 0x2) {
1789 uint8_t d = bits(machInst, 16);
1790 if (d) {
1791 return decodeSvePredCountVU<SveDecpv>(esize,
1792 machInst, zdn, pg);
1793 } else {
1794 return decodeSvePredCountVU<SveIncpv>(esize,
1795 machInst, zdn, pg);
1796 }
1797 }
1798 }
1799 break;
1800 case 0x1:
1801 {
1802 IntRegIndex rdn = (IntRegIndex) (uint8_t)
1803 bits(machInst, 4, 0);
1804 IntRegIndex pg = (IntRegIndex) (uint8_t)
1805 bits(machInst, 8, 5);
1806 uint8_t esize = bits(machInst, 23, 22);
1807 uint8_t opc = bits(machInst, 18, 17);
1808 uint8_t opc2 = bits(machInst, 10, 9);
1809 if (opc == 0x0) {
1810 uint8_t u = bits(machInst, 16);
1811 if (opc2 == 0x0) {
1812 if (u) {
1813 return decodeSvePredCountU<SveUqincp32>(esize,
1814 machInst, rdn, pg);
1815 } else {
1816 return decodeSvePredCountS<SveSqincp32>(esize,
1817 machInst, rdn, pg);
1818 }
1819 } else if (opc2 == 0x2) {
1820 if (u) {
1821 return decodeSvePredCountU<SveUqincp64>(esize,
1822 machInst, rdn, pg);
1823 } else {
1824 return decodeSvePredCountS<SveSqincp64>(esize,
1825 machInst, rdn, pg);
1826 }
1827 }
1828 } else if (opc == 0x1) {
1829 uint8_t u = bits(machInst, 16);
1830 if (opc2 == 0x0) {
1831 if (u) {
1832 return decodeSvePredCountU<SveUqdecp32>(esize,
1833 machInst, rdn, pg);
1834 } else {
1835 return decodeSvePredCountS<SveSqdecp32>(esize,
1836 machInst, rdn, pg);
1837 }
1838 } else if (opc2 == 0x2) {
1839 if (u) {
1840 return decodeSvePredCountU<SveUqdecp64>(esize,
1841 machInst, rdn, pg);
1842 } else {
1843 return decodeSvePredCountS<SveSqdecp64>(esize,
1844 machInst, rdn, pg);
1845 }
1846 }
1847 } else if (opc == 0x2) {
1848 if (opc2 == 0x0) {
1849 if (bits(machInst, 16)) {
1850 return decodeSvePredCountU<SveDecp>(esize,
1851 machInst, rdn, pg);
1852 } else {
1853 return decodeSvePredCountU<SveIncp>(esize,
1854 machInst, rdn, pg);
1855 }
1856 }
1857 }
1858 }
1859 break;
1860 case 0x2:
1861 if (bits(machInst, 23, 22) == 0x0 &&
1862 bits(machInst, 10, 9) == 0x0 &&
1863 bits(machInst, 4, 0) == 0x0) {
1864 uint8_t opc = bits(machInst, 18, 16);
1865 if (opc == 0x0) {
1866 IntRegIndex pn = (IntRegIndex)(uint8_t)
1867 bits(machInst, 8, 5);
1868 return new SveWrffr(machInst, pn);
1869 } else if (opc == 0x4 && bits(machInst, 8, 5) == 0x0) {
1870 return new SveSetffr(machInst);
1871 }
1872 }
1873 break;
1874 }
1875 } else {
1876 uint8_t opc = bits(machInst, 18, 16);
1877 if (opc == 0 && bits(machInst, 9) == 0) {
1878 IntRegIndex rd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1879 IntRegIndex pn = (IntRegIndex) (uint8_t) bits(machInst, 8, 5);
1880 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 13,
1881 10);
1882 uint8_t esize = bits(machInst, 23, 22);
1883 return decodeSveUnaryPredU<SveCntp>(esize,
1884 machInst, rd, pn, pg);
1885 }
1886 }
1887 return new Unknown64(machInst);
1888 } // decodeSvePredCount
1889
1890 StaticInstPtr
1891 decodeSveIntCmpSca(ExtMachInst machInst)
1892 {
1893 uint16_t b23_13_12_11_10_3_2_1_0 = (uint16_t)
1894 (bits(machInst, 23) << 8) | (bits(machInst, 13, 10) << 4) |
1895 bits(machInst, 3, 0);
1896 uint8_t b10 = (uint8_t) bits(machInst, 10);
1897 IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1898 IntRegIndex rm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
1899 if (b23_13_12_11_10_3_2_1_0 == 0x180) {
1900 uint8_t s64b = bits(machInst, 22);
1901 uint8_t ne = bits(machInst, 4);
1902 if (ne) {
1903 if (s64b) {
1904 return new SveCtermne<uint64_t>(machInst, rn, rm);
1905 } else {
1906 return new SveCtermne<uint32_t>(machInst, rn, rm);
1907 }
1908 } else {
1909 if (s64b) {
1910 return new SveCtermeq<uint64_t>(machInst, rn, rm);
1911 } else {
1912 return new SveCtermeq<uint32_t>(machInst, rn, rm);
1913 }
1914 }
1915 } else if (b10) {
1916 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1917 uint8_t size = (uint8_t) bits(machInst, 23, 22);
1918 uint8_t s64b = (uint8_t) bits(machInst, 12);
1919 uint8_t opc = (uint8_t) bits(machInst, 11) << 1 |
1920 bits(machInst, 4);
1921 if (s64b) {
1922 switch (opc) {
1923 case 0:
1924 return decodeSveBinUnpredS<SveWhilelt64>(size,
1925 machInst, pd, rn, rm);
1926 case 1:
1927 return decodeSveBinUnpredS<SveWhilele64>(size,
1928 machInst, pd, rn, rm);
1929 case 2:
1930 return decodeSveBinUnpredU<SveWhilelo64>(size,
1931 machInst, pd, rn, rm);
1932 case 3:
1933 return decodeSveBinUnpredU<SveWhilels64>(size,
1934 machInst, pd, rn, rm);
1935 }
1936 } else {
1937 switch (opc) {
1938 case 0:
1939 return decodeSveBinUnpredS<SveWhilelt32>(size,
1940 machInst, pd, rn, rm);
1941 case 1:
1942 return decodeSveBinUnpredS<SveWhilele32>(size,
1943 machInst, pd, rn, rm);
1944 case 2:
1945 return decodeSveBinUnpredU<SveWhilelo32>(size,
1946 machInst, pd, rn, rm);
1947 case 3:
1948 return decodeSveBinUnpredU<SveWhilels32>(size,
1949 machInst, pd, rn, rm);
1950 }
1951 }
1952 }
1953 return new Unknown64(machInst);
1954 } // decodeSveIntCmpSca
1955
1956 StaticInstPtr
1957 decodeSveIntWideImmUnpred0(ExtMachInst machInst)
1958 {
1959 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1960 uint64_t imm = bits(machInst, 12, 5);
1961 uint8_t sh = bits(machInst, 13);
1962 uint8_t size = bits(machInst, 23, 22);
1963
1964 if (sh) {
1965 if (size == 0x0) {
1966 return new Unknown64(machInst);
1967 }
1968 imm <<= 8;
1969 }
1970
1971 switch (bits(machInst, 18, 16)) {
1972 case 0x0:
1973 return decodeSveWideImmUnpredU<SveAddImm>(
1974 size, machInst, zdn, imm);
1975 case 0x1:
1976 return decodeSveWideImmUnpredU<SveSubImm>(
1977 size, machInst, zdn, imm);
1978 case 0x3:
1979 return decodeSveWideImmUnpredU<SveSubrImm>(
1980 size, machInst, zdn, imm);
1981 case 0x4:
1982 return decodeSveWideImmUnpredS<SveSqaddImm>(
1983 size, machInst, zdn, imm);
1984 case 0x5:
1985 return decodeSveWideImmUnpredU<SveUqaddImm>(
1986 size, machInst, zdn, imm);
1987 case 0x6:
1988 return decodeSveWideImmUnpredS<SveSqsubImm>(
1989 size, machInst, zdn, imm);
1990 case 0x7:
1991 return decodeSveWideImmUnpredU<SveUqsubImm>(
1992 size, machInst, zdn, imm);
1993 }
1994
1995 return new Unknown64(machInst);
1996 } // decodeSveIntWideImmUnpred0
1997
1998 StaticInstPtr
1999 decodeSveIntWideImmUnpred1(ExtMachInst machInst)
2000 {
2001 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2002 uint64_t imm = bits(machInst, 12, 5);
2003 uint8_t size = bits(machInst, 23, 22);
2004
2005 switch (bits(machInst, 18, 16)) {
2006 case 0x0:
2007 return decodeSveWideImmUnpredS<SveSmaxImm>(
2008 size, machInst, zdn, sext<8>(imm));
2009 case 0x1:
2010 return decodeSveWideImmUnpredU<SveUmaxImm>(
2011 size, machInst, zdn, imm);
2012 case 0x2:
2013 return decodeSveWideImmUnpredS<SveSminImm>(
2014 size, machInst, zdn, sext<8>(imm));
2015 case 0x3:
2016 return decodeSveWideImmUnpredU<SveUminImm>(
2017 size, machInst, zdn, imm);
2018 }
2019
2020 return new Unknown64(machInst);
2021 } // decodeSveIntWideImmUnpred1
2022
2023 StaticInstPtr
2024 decodeSveIntWideImmUnpred2(ExtMachInst machInst)
2025 {
2026 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2027 uint64_t imm = bits(machInst, 12, 5);
2028 uint8_t size = bits(machInst, 23, 22);
2029
2030 if (bits(machInst, 18, 16) == 0x0) {
2031 return decodeSveWideImmUnpredU<SveMulImm>(
2032 size, machInst, zdn, sext<8>(imm));
2033 }
2034
2035 return new Unknown64(machInst);
2036 } // decodeSveIntWideImmUnpred2
2037
2038 StaticInstPtr
2039 decodeSveIntWideImmUnpred3(ExtMachInst machInst)
2040 {
2041 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2042 uint64_t imm = bits(machInst, 12, 5);
2043 uint8_t sh = bits(machInst, 13);
2044 uint8_t size = bits(machInst, 23, 22);
2045
2046 if (sh) {
2047 if (size == 0x0) {
2048 return new Unknown64(machInst);
2049 }
2050 imm <<= 8;
2051 }
2052
2053 if (bits(machInst, 18, 17) == 0x0) {
2054 if (sh) {
2055 return decodeSveWideImmUnpredU<SveDupImm>(
2056 size, machInst, zd, sext<16>(imm));
2057 } else {
2058 return decodeSveWideImmUnpredU<SveDupImm>(
2059 size, machInst, zd, sext<8>(imm));
2060 }
2061 }
2062
2063 return new Unknown64(machInst);
2064 } // decodeSveIntWideImmUnpred3
2065
2066 StaticInstPtr
2067 decodeSveIntWideImmUnpred4(ExtMachInst machInst)
2068 {
2069 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2070 uint8_t size = bits(machInst, 23, 22);
2071
2072 if (bits(machInst, 18, 17) == 0x0 && size != 0x0) {
2073 uint64_t imm = vfp_modified_imm(bits(machInst, 12, 5),
2074 decode_fp_data_type(size));
2075 return decodeSveWideImmUnpredF<SveFdup>(size, machInst, zd, imm);
2076 }
2077
2078 return new Unknown64(machInst);
2079 } // decodeSveIntWideImmUnpred4
2080
2081 StaticInstPtr
2082 decodeSveIntWideImmUnpred(ExtMachInst machInst)
2083 {
2084 switch (bits(machInst, 20, 19)) {
2085 case 0x0:
2086 if (bits(machInst, 18, 16) != 0x2) {
2087 return decodeSveIntWideImmUnpred0(machInst);
2088 }
2089 break;
2090 case 0x1:
2091 if (bits(machInst, 13) == 0x0) {
2092 return decodeSveIntWideImmUnpred1(machInst);
2093 }
2094 break;
2095 case 0x2:
2096 if (bits(machInst, 13) == 0x0) {
2097 return decodeSveIntWideImmUnpred2(machInst);
2098 }
2099 break;
2100 case 0x3:
2101 if (bits(machInst, 16) == 0x0) {
2102 return decodeSveIntWideImmUnpred3(machInst);
2103 } else if (bits(machInst, 13) == 0x0) {
2104 return decodeSveIntWideImmUnpred4(machInst);
2105 }
2106 break;
2107 }
2108 return new Unknown64(machInst);
2109 } // decodeSveIntWideImmUnpred
2110
2111 StaticInstPtr
2112 decodeSveMultiplyAddUnpred(ExtMachInst machInst)
2113 {
2114 IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2115 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2116 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
2117
2118 uint8_t size = (uint8_t) bits(machInst, 23, 22);
2119
2120 if (bits(machInst, 12, 11) != 0 || !(size & 0x2)) {
2121 return new Unknown64(machInst);
2122 }
2123
2124 uint8_t usig = (uint8_t) bits(machInst, 10);
2125 if (size & 0x1) {
2126 if (usig) {
2127 return new SveUdotv<uint16_t, uint64_t>(machInst,
2128 zda, zn, zm);
2129 } else {
2130 return new SveSdotv<int16_t, int64_t>(machInst,
2131 zda, zn, zm);
2132 }
2133 } else {
2134 if (usig) {
2135 return new SveUdotv<uint8_t, uint32_t>(machInst,
2136 zda, zn, zm);
2137 } else {
2138 return new SveSdotv<int8_t, int32_t>(machInst,
2139 zda, zn, zm);
2140 }
2141 }
2142
2143 return new Unknown64(machInst);
2144 } // decodeSveMultiplyAddUnpred
2145
2146 StaticInstPtr
2147 decodeSveMultiplyIndexed(ExtMachInst machInst)
2148 {
2149 IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2150 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2151
2152 uint8_t size = (uint8_t) bits(machInst, 23, 22);
2153
2154 if (bits(machInst, 12, 11) != 0 || !(size & 0x2)) {
2155 return new Unknown64(machInst);
2156 }
2157
2158 uint8_t usig = (uint8_t) bits(machInst, 10);
2159 if (size & 0x1) {
2160 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 19, 16);
2161 uint8_t i1 = (uint8_t) bits(machInst, 20);
2162 if (usig) {
2163 return new SveUdoti<uint16_t, uint64_t>(machInst,
2164 zda, zn, zm, i1);
2165 } else {
2166 return new SveSdoti<int16_t, int64_t>(machInst,
2167 zda, zn, zm, i1);
2168 }
2169 } else {
2170 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 18, 16);
2171 uint8_t i2 = (uint8_t) bits(machInst, 20, 19);
2172 if (usig) {
2173 return new SveUdoti<uint8_t, uint32_t>(machInst,
2174 zda, zn, zm, i2);
2175 } else {
2176 return new SveSdoti<int8_t, int32_t>(machInst,
2177 zda, zn, zm, i2);
2178 }
2179 }
2180 return new Unknown64(machInst);
2181 } // decodeSveMultiplyIndexed
2182
2183 StaticInstPtr
2184 decodeSveFpFastReduc(ExtMachInst machInst)
2185 {
2186 IntRegIndex vd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2187 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2188 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2189
2190 uint8_t size = bits(machInst, 23, 22);
2191
2192 if (size == 0x0) {
2193 return new Unknown64(machInst);
2194 }
2195
2196 switch (bits(machInst, 18, 16)) {
2197 case 0x0:
2198 return decodeSveUnaryPredF<SveFaddv>(size, machInst, vd, zn, pg);
2199 case 0x4:
2200 return decodeSveUnaryPredF<SveFmaxnmv>(size, machInst, vd, zn, pg);
2201 case 0x5:
2202 return decodeSveUnaryPredF<SveFminnmv>(size, machInst, vd, zn, pg);
2203 case 0x6:
2204 return decodeSveUnaryPredF<SveFmaxv>(size, machInst, vd, zn, pg);
2205 case 0x7:
2206 return decodeSveUnaryPredF<SveFminv>(size, machInst, vd, zn, pg);
2207 }
2208
2209 return new Unknown64(machInst);
2210 } // decodeSveFpFastReduc
2211
2212 StaticInstPtr
2213 decodeSveFpUnaryUnpred(ExtMachInst machInst)
2214 {
2215 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2216 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2217
2218 uint8_t size = (uint8_t) bits(machInst, 23, 22);
2219 if (size == 0) {
2220 return new Unknown64(machInst);
2221 }
2222 uint8_t opc = (uint8_t) bits(machInst, 18, 16);
2223
2224 switch (opc) {
2225 case 0x6:
2226 return decodeSveUnaryUnpredF<SveFrecpe>(
2227 size, machInst, zd, zn);
2228 case 0x7:
2229 return decodeSveUnaryUnpredF<SveFrsqrte>(
2230 size, machInst, zd, zn);
2231 }
2232 return new Unknown64(machInst);
2233 } // decodeSveFpUnaryUnpred
2234
2235 StaticInstPtr
2236 decodeSveFpCmpZero(ExtMachInst machInst)
2237 {
2238 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
2239 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2240 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2241
2242 uint8_t size = bits(machInst, 23, 22);
2243 if (size == 0) {
2244 return new Unknown64(machInst);
2245 }
2246 uint8_t opc = (bits(machInst, 17, 16) << 1) | bits(machInst, 4);
2247
2248 switch (opc) {
2249 case 0x0:
2250 return decodeSveCmpImmF<SveFcmgeZero>(
2251 size, machInst, pd, zn, 0x0, pg);
2252 case 0x1:
2253 return decodeSveCmpImmF<SveFcmgtZero>(
2254 size, machInst, pd, zn, 0x0, pg);
2255 case 0x2:
2256 return decodeSveCmpImmF<SveFcmltZero>(
2257 size, machInst, pd, zn, 0x0, pg);
2258 case 0x3:
2259 return decodeSveCmpImmF<SveFcmleZero>(
2260 size, machInst, pd, zn, 0x0, pg);
2261 case 0x4:
2262 return decodeSveCmpImmF<SveFcmeqZero>(
2263 size, machInst, pd, zn, 0x0, pg);
2264 case 0x6:
2265 return decodeSveCmpImmF<SveFcmneZero>(
2266 size, machInst, pd, zn, 0x0, pg);
2267 }
2268 return new Unknown64(machInst);
2269 } // decodeSveFpCmpZero
2270
2271 StaticInstPtr
2272 decodeSveFpAccumReduc(ExtMachInst machInst)
2273 {
2274 uint8_t opc = bits(machInst, 18, 16);
2275 uint8_t size = bits(machInst, 23, 22);
2276 if (opc != 0 || size == 0) {
2277 return new Unknown64(machInst);
2278 }
2279
2280 IntRegIndex vdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2281 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2282 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2283
2284 return decodeSveUnaryPredF<SveFadda>(size, machInst, vdn, zm, pg);
2285 } // decodeSveFpAccumReduc
2286
2287 StaticInstPtr
2288 decodeSveFpArithUnpred(ExtMachInst machInst)
2289 {
2290 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2291 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2292 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
2293
2294 uint8_t size = bits(machInst, 23, 22);
2295 if (size == 0) {
2296 return new Unknown64(machInst);
2297 }
2298 uint8_t opc = (uint8_t) bits(machInst, 12, 10);
2299
2300 switch (opc) {
2301 case 0x0:
2302 return decodeSveBinUnpredF<SveFaddUnpred>(
2303 size, machInst, zd, zn, zm);
2304 case 0x1:
2305 return decodeSveBinUnpredF<SveFsubUnpred>(
2306 size, machInst, zd, zn, zm);
2307 case 0x2:
2308 return decodeSveBinUnpredF<SveFmulUnpred>(
2309 size, machInst, zd, zn, zm);
2310 case 0x3:
2311 return decodeSveBinUnpredF<SveFtsmul>(
2312 size, machInst, zd, zn, zm);
2313 case 0x6:
2314 return decodeSveBinUnpredF<SveFrecps>(
2315 size, machInst, zd, zn, zm);
2316 case 0x7:
2317 return decodeSveBinUnpredF<SveFrsqrts>(
2318 size, machInst, zd, zn, zm);
2319 }
2320 return new Unknown64(machInst);
2321 } // decodeSveFpArithUnpred
2322
2323 StaticInstPtr
2324 decodeSveFpArithPred0(ExtMachInst machInst)
2325 {
2326 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2327 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2328 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2329
2330 uint8_t size = (uint8_t) bits(machInst, 23, 22);
2331 if (size == 0) {
2332 return new Unknown64(machInst);
2333 }
2334 uint8_t opc = (uint8_t) bits(machInst, 19, 16);
2335
2336 switch (opc) {
2337 case 0x0:
2338 return decodeSveBinDestrPredF<SveFaddPred>(
2339 size, machInst, zdn, zm, pg);
2340 case 0x1:
2341 return decodeSveBinDestrPredF<SveFsubPred>(
2342 size, machInst, zdn, zm, pg);
2343 case 0x2:
2344 return decodeSveBinDestrPredF<SveFmulPred>(
2345 size, machInst, zdn, zm, pg);
2346 case 0x3:
2347 return decodeSveBinDestrPredF<SveFsubr>(
2348 size, machInst, zdn, zm, pg);
2349 case 0x4:
2350 return decodeSveBinDestrPredF<SveFmaxnm>(
2351 size, machInst, zdn, zm, pg);
2352 case 0x5:
2353 return decodeSveBinDestrPredF<SveFminnm>(
2354 size, machInst, zdn, zm, pg);
2355 case 0x6:
2356 return decodeSveBinDestrPredF<SveFmax>(
2357 size, machInst, zdn, zm, pg);
2358 case 0x7:
2359 return decodeSveBinDestrPredF<SveFmin>(
2360 size, machInst, zdn, zm, pg);
2361 case 0x8:
2362 return decodeSveBinDestrPredF<SveFabd>(
2363 size, machInst, zdn, zm, pg);
2364 case 0x9:
2365 return decodeSveBinDestrPredF<SveFscale>(
2366 size, machInst, zdn, zm, pg);
2367 case 0xa:
2368 return decodeSveBinDestrPredF<SveFmulx>(
2369 size, machInst, zdn, zm, pg);
2370 case 0xc:
2371 return decodeSveBinDestrPredF<SveFdivr>(
2372 size, machInst, zdn, zm, pg);
2373 case 0xd:
2374 return decodeSveBinDestrPredF<SveFdiv>(
2375 size, machInst, zdn, zm, pg);
2376 }
2377 return new Unknown64(machInst);
2378 } // decodeSveFpArithPred0
2379
2380 StaticInstPtr
2381 decodeSveFpTrigMAddCoeff(ExtMachInst machInst)
2382 {
2383 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2384 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2385 uint8_t imm = (uint8_t) bits(machInst, 18, 16);
2386
2387 uint8_t size = (uint8_t) bits(machInst, 23, 22);
2388 if (size == 0) {
2389 return new Unknown64(machInst);
2390 }
2391
2392 return decodeSveTerImmUnpredF<SveFtmad>(size, machInst, zdn, zm, imm);
2393 } // decodeSveFpTrigMAddCoeff
2394
2395 StaticInstPtr
2396 decodeSveFpArithImmPred(ExtMachInst machInst)
2397 {
2398 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2399 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2400 uint64_t imm;
2401
2402 uint8_t size = (uint8_t) bits(machInst, 23, 22);
2403 if (size == 0) {
2404 return new Unknown64(machInst);
2405 }
2406
2407 uint8_t opc = (uint8_t) bits(machInst, 18, 16);
2408
2409 switch (opc) {
2410 case 0x0:
2411 imm = sveExpandFpImmAddSub((uint8_t) bits(machInst, 5), size);
2412 return decodeSveBinImmPredF<SveFaddImm>(
2413 size, machInst, zdn, imm, pg);
2414 case 0x1:
2415 imm = sveExpandFpImmAddSub((uint8_t) bits(machInst, 5), size);
2416 return decodeSveBinImmPredF<SveFsubImm>(
2417 size, machInst, zdn, imm, pg);
2418 case 0x2:
2419 imm = sveExpandFpImmMul((uint8_t) bits(machInst, 5), size);
2420 return decodeSveBinImmPredF<SveFmulImm>(
2421 size, machInst, zdn, imm, pg);
2422 case 0x3:
2423 imm = sveExpandFpImmAddSub((uint8_t) bits(machInst, 5), size);
2424 return decodeSveBinImmPredF<SveFsubrImm>(
2425 size, machInst, zdn, imm, pg);
2426 case 0x4:
2427 imm = sveExpandFpImmMaxMin((uint8_t) bits(machInst, 5), size);
2428 return decodeSveBinImmPredF<SveFmaxnmImm>(
2429 size, machInst, zdn, imm, pg);
2430 case 0x5:
2431 imm = sveExpandFpImmMaxMin((uint8_t) bits(machInst, 5), size);
2432 return decodeSveBinImmPredF<SveFminnmImm>(
2433 size, machInst, zdn, imm, pg);
2434 case 0x6:
2435 imm = sveExpandFpImmMaxMin((uint8_t) bits(machInst, 5), size);
2436 return decodeSveBinImmPredF<SveFmaxImm>(
2437 size, machInst, zdn, imm, pg);
2438 case 0x7:
2439 imm = sveExpandFpImmMaxMin((uint8_t) bits(machInst, 5), size);
2440 return decodeSveBinImmPredF<SveFminImm>(
2441 size, machInst, zdn, imm, pg);
2442 }
2443 return new Unknown64(machInst);
2444 } // decodeSveFpArithImmPred
2445
2446 StaticInstPtr
2447 decodeSveFpArithPred(ExtMachInst machInst)
2448 {
2449 if (bits(machInst, 20) == 0) {
2450 return decodeSveFpArithPred0(machInst);
2451 } else if (bits(machInst, 19) == 0) {
2452 return decodeSveFpTrigMAddCoeff(machInst);
2453 } else {
2454 return decodeSveFpArithImmPred(machInst);
2455 }
2456 } // decodeSveFpArithPred
2457
2458 StaticInstPtr
2459 decodeSveFpUnaryPred(ExtMachInst machInst)
2460 {
2461 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2462 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2463 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2464
2465 uint8_t size = (uint8_t) bits(machInst, 23, 22);
2466 if (size == 0) {
2467 return new Unknown64(machInst);
2468 }
2469
2470 uint8_t b20_19 = bits(machInst, 20, 19);
2471 switch (b20_19) {
2472 case 0x0:
2473 {
2474 if (bits(machInst, 18, 16) == 0x5) {
2475 return new Unknown64(machInst);
2476 }
2477 // SVE floating-point round to integral value
2478 uint8_t opc = (uint8_t) bits(machInst, 18, 16);
2479 switch (opc) {
2480 case 0x0:
2481 return decodeSveUnaryPredF<SveFrintn>(
2482 size, machInst, zd, zn, pg);
2483 case 0x1:
2484 return decodeSveUnaryPredF<SveFrintp>(
2485 size, machInst, zd, zn, pg);
2486 case 0x2:
2487 return decodeSveUnaryPredF<SveFrintm>(
2488 size, machInst, zd, zn, pg);
2489 case 0x3:
2490 return decodeSveUnaryPredF<SveFrintz>(
2491 size, machInst, zd, zn, pg);
2492 case 0x4:
2493 return decodeSveUnaryPredF<SveFrinta>(
2494 size, machInst, zd, zn, pg);
2495 case 0x6:
2496 return decodeSveUnaryPredF<SveFrintx>(
2497 size, machInst, zd, zn, pg);
2498 case 0x7:
2499 return decodeSveUnaryPredF<SveFrinti>(
2500 size, machInst, zd, zn, pg);
2501 }
2502 }
2503 break;
2504 case 0x1:
2505 {
2506 // SVE floating-point unary operations (predicated)
2507 uint8_t b18_16 = bits(machInst, 18, 16);
2508 switch (b18_16) {
2509 case 0x0:
2510 if (size == 0x2) {
2511 return new SveFcvtNarrow<uint32_t, uint16_t>(
2512 machInst, zd, zn, pg);
2513 } else if (size == 0x3) {
2514 return new SveFcvtNarrow<uint64_t, uint16_t>(
2515 machInst, zd, zn, pg);
2516 }
2517 break;
2518 case 0x1:
2519 if (size == 0x2) {
2520 return new SveFcvtWiden<uint16_t, uint32_t>(
2521 machInst, zd, zn, pg);
2522 } else if (size == 0x3) {
2523 return new SveFcvtWiden<uint16_t, uint64_t>(
2524 machInst, zd, zn, pg);
2525 }
2526 break;
2527 case 0x2:
2528 if (size == 0x3) {
2529 return new SveFcvtNarrow<uint64_t, uint32_t>(
2530 machInst, zd, zn, pg);
2531 }
2532 break;
2533 case 0x3:
2534 if (size == 0x3) {
2535 return new SveFcvtWiden<uint32_t, uint64_t>(
2536 machInst, zd, zn, pg);
2537 }
2538 break;
2539 case 0x4:
2540 if (size != 0x0) {
2541 return decodeSveUnaryPredF<SveFrecpx>(
2542 size, machInst, zd, zn, pg);
2543 }
2544 break;
2545 case 0x5:
2546 if (size != 0x0) {
2547 return decodeSveUnaryPredF<SveFsqrt>(
2548 size, machInst, zd, zn, pg);
2549 }
2550 break;
2551 }
2552 }
2553 break;
2554 case 0x2:
2555 {
2556 // SVE integer convert to floating-point
2557 uint8_t opc = (size << 3) | bits(machInst, 18, 16);
2558 switch (opc) {
2559 case 0xa:
2560 return new SveScvtfNarrow<uint16_t, uint16_t>(
2561 machInst, zd, zn, pg);
2562 case 0xb:
2563 return new SveUcvtfNarrow<uint16_t, uint16_t>(
2564 machInst, zd, zn, pg);
2565 case 0xc:
2566 return new SveScvtfNarrow<uint32_t, uint16_t>(
2567 machInst, zd, zn, pg);
2568 case 0xd:
2569 return new SveUcvtfNarrow<uint32_t, uint16_t>(
2570 machInst, zd, zn, pg);
2571 case 0xe:
2572 return new SveScvtfNarrow<uint64_t, uint16_t>(
2573 machInst, zd, zn, pg);
2574 case 0xf:
2575 return new SveUcvtfNarrow<uint64_t, uint16_t>(
2576 machInst, zd, zn, pg);
2577 case 0x14:
2578 return new SveScvtfNarrow<uint32_t, uint32_t>(
2579 machInst, zd, zn, pg);
2580 case 0x15:
2581 return new SveUcvtfNarrow<uint32_t, uint32_t>(
2582 machInst, zd, zn, pg);
2583 case 0x18:
2584 return new SveScvtfWiden<uint32_t, uint64_t>(
2585 machInst, zd, zn, pg);
2586 case 0x19:
2587 return new SveUcvtfWiden<uint32_t, uint64_t>(
2588 machInst, zd, zn, pg);
2589 case 0x1c:
2590 return new SveScvtfNarrow<uint64_t, uint32_t>(
2591 machInst, zd, zn, pg);
2592 case 0x1d:
2593 return new SveUcvtfNarrow<uint64_t, uint32_t>(
2594 machInst, zd, zn, pg);
2595 case 0x1e:
2596 return new SveScvtfNarrow<uint64_t, uint64_t>(
2597 machInst, zd, zn, pg);
2598 case 0x1f:
2599 return new SveUcvtfNarrow<uint64_t, uint64_t>(
2600 machInst, zd, zn, pg);
2601 }
2602 }
2603 break;
2604 case 0x3:
2605 {
2606 // SVE floating-point convert to integer
2607 uint8_t opc = (size << 3) | bits(machInst, 18, 16);
2608 switch (opc) {
2609 case 0xa:
2610 return new SveFcvtzsNarrow<uint16_t, uint16_t>(
2611 machInst, zd, zn, pg);
2612 case 0xb:
2613 return new SveFcvtzuNarrow<uint16_t, uint16_t>(
2614 machInst, zd, zn, pg);
2615 case 0xc:
2616 return new SveFcvtzsWiden<uint16_t, uint32_t>(
2617 machInst, zd, zn, pg);
2618 case 0xd:
2619 return new SveFcvtzuWiden<uint16_t, uint32_t>(
2620 machInst, zd, zn, pg);
2621 case 0xe:
2622 return new SveFcvtzsWiden<uint16_t, uint64_t>(
2623 machInst, zd, zn, pg);
2624 case 0xf:
2625 return new SveFcvtzuWiden<uint16_t, uint64_t>(
2626 machInst, zd, zn, pg);
2627 case 0x14:
2628 return new SveFcvtzsNarrow<uint32_t, uint32_t>(
2629 machInst, zd, zn, pg);
2630 case 0x15:
2631 return new SveFcvtzuNarrow<uint32_t, uint32_t>(
2632 machInst, zd, zn, pg);
2633 case 0x18:
2634 return new SveFcvtzsNarrow<uint64_t, uint32_t>(
2635 machInst, zd, zn, pg);
2636 case 0x19:
2637 return new SveFcvtzuNarrow<uint64_t, uint32_t>(
2638 machInst, zd, zn, pg);
2639 case 0x1c:
2640 return new SveFcvtzsWiden<uint32_t, uint64_t>(
2641 machInst, zd, zn, pg);
2642 case 0x1d:
2643 return new SveFcvtzuWiden<uint32_t, uint64_t>(
2644 machInst, zd, zn, pg);
2645 case 0x1e:
2646 return new SveFcvtzsNarrow<uint64_t, uint64_t>(
2647 machInst, zd, zn, pg);
2648 case 0x1f:
2649 return new SveFcvtzuNarrow<uint64_t, uint64_t>(
2650 machInst, zd, zn, pg);
2651 }
2652 }
2653 break;
2654 }
2655 return new Unknown64(machInst);
2656 } // decodeSveFpUnaryPred
2657
2658 StaticInstPtr
2659 decodeSveFpCmpVec(ExtMachInst machInst)
2660 {
2661 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
2662 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2663 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
2664 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2665
2666 uint8_t size = bits(machInst, 23, 22);
2667 if (size == 0) {
2668 return new Unknown64(machInst);
2669 }
2670 uint8_t opc = (bits(machInst, 15) << 2) |
2671 (bits(machInst, 13) << 1) |
2672 bits(machInst, 4);
2673
2674 switch (opc) {
2675 case 0x0:
2676 return decodeSveCmpF<SveFcmge>(size, machInst, pd, zn, zm, pg);
2677 case 0x1:
2678 return decodeSveCmpF<SveFcmgt>(size, machInst, pd, zn, zm, pg);
2679 case 0x2:
2680 return decodeSveCmpF<SveFcmeq>(size, machInst, pd, zn, zm, pg);
2681 case 0x3:
2682 return decodeSveCmpF<SveFcmne>(size, machInst, pd, zn, zm, pg);
2683 case 0x4:
2684 return decodeSveCmpF<SveFcmuo>(size, machInst, pd, zn, zm, pg);
2685 case 0x5:
2686 return decodeSveCmpF<SveFacge>(size, machInst, pd, zn, zm, pg);
2687 case 0x7:
2688 return decodeSveCmpF<SveFacgt>(size, machInst, pd, zn, zm, pg);
2689 }
2690 return new Unknown64(machInst);
2691 } // decodeSveFpCmpVec
2692
2693 StaticInstPtr
2694 decodeSveFpFusedMulAdd(ExtMachInst machInst)
2695 {
2696 IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2697 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2698 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
2699 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2700
2701 uint8_t size = bits(machInst, 23, 22);
2702 if (size == 0) {
2703 return new Unknown64(machInst);
2704 }
2705 uint8_t opc = bits(machInst, 15, 13);
2706
2707 switch (opc) {
2708 case 0x0:
2709 return decodeSveTerPredF<SveFmla>(
2710 size, machInst, zda, zn, zm, pg);
2711 case 0x1:
2712 return decodeSveTerPredF<SveFmls>(
2713 size, machInst, zda, zn, zm, pg);
2714 case 0x2:
2715 return decodeSveTerPredF<SveFnmla>(
2716 size, machInst, zda, zn, zm, pg);
2717 case 0x3:
2718 return decodeSveTerPredF<SveFnmls>(
2719 size, machInst, zda, zn, zm, pg);
2720 case 0x4:
2721 return decodeSveTerPredF<SveFmad>(
2722 size, machInst, zda /* zdn */, zm /* za */, zn, pg);
2723 case 0x5:
2724 return decodeSveTerPredF<SveFmsb>(
2725 size, machInst, zda /* zdn */, zm /* za */, zn, pg);
2726 case 0x6:
2727 return decodeSveTerPredF<SveFnmad>(
2728 size, machInst, zda /* zdn */, zm /* za */, zn, pg);
2729 case 0x7:
2730 return decodeSveTerPredF<SveFnmsb>(
2731 size, machInst, zda /* zdn */, zm /* za */, zn, pg);
2732 }
2733 return new Unknown64(machInst);
2734 } // decodeSveFpFusedMulAdd
2735
2736 StaticInstPtr
2737 decodeSveFpCplxAdd(ExtMachInst machInst)
2738 {
2739 uint8_t size = bits(machInst, 23, 22);
2740 uint8_t rot = bits(machInst, 16) << 1 | 0x01;
2741 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2742 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2743 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2744 switch (size) {
2745 case 1:
2746 return new SveFcadd<uint16_t>(machInst,
2747 zdn, zdn, zm, pg, rot);
2748 case 2:
2749 return new SveFcadd<uint32_t>(machInst,
2750 zdn, zdn, zm, pg, rot);
2751 case 3:
2752 return new SveFcadd<uint64_t>(machInst,
2753 zdn, zdn, zm, pg, rot);
2754 }
2755 return new Unknown64(machInst);
2756 }
2757
2758 StaticInstPtr
2759 decodeSveFpCplxMulAddVec(ExtMachInst machInst)
2760 {
2761 uint8_t size = bits(machInst, 23, 22);
2762 if (size == 0) {
2763 return new Unknown64(machInst);
2764 }
2765
2766 IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2767 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2768 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2769 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
2770 uint8_t rot = bits(machInst, 14, 13);
2771 switch (size) {
2772 case 1:
2773 return new SveFcmlav<uint16_t>(machInst,
2774 zda, zn, zm, pg, rot);
2775 case 2:
2776 return new SveFcmlav<uint32_t>(machInst,
2777 zda, zn, zm, pg, rot);
2778 case 3:
2779 return new SveFcmlav<uint64_t>(machInst,
2780 zda, zn, zm, pg, rot);
2781 }
2782
2783 return new Unknown64(machInst);
2784 } // decodeSveFpCplxMulAddVec
2785
2786 StaticInstPtr
2787 decodeSveFpCplxMulAddIndexed(ExtMachInst machInst)
2788 {
2789 uint8_t size = bits(machInst, 23, 22);
2790 if (size < 2) {
2791 return new Unknown64(machInst);
2792 }
2793
2794 IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2795 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2796 IntRegIndex zm;
2797 uint8_t rot = bits(machInst, 11, 10);
2798 uint8_t imm;
2799
2800 switch (size) {
2801 case 2:
2802 zm = (IntRegIndex) (uint8_t) bits(machInst, 18, 16);
2803 imm = bits(machInst, 20, 19);
2804 return new SveFcmlai<uint32_t>(machInst,
2805 zda, zn, zm, rot, imm);
2806 case 3:
2807 zm = (IntRegIndex) (uint8_t) bits(machInst, 19, 16);
2808 imm = bits(machInst, 20);
2809 return new SveFcmlai<uint64_t>(machInst,
2810 zda, zn, zm, rot, imm);
2811 }
2812 return new Unknown64(machInst);
2813 } // decodeSveFpCplxMulAddIndexed
2814
2815 StaticInstPtr
2816 decodeSveFpMulIndexed(ExtMachInst machInst)
2817 {
2818 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2819 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2820
2821 uint8_t size = bits(machInst, 23, 22);
2822 switch (size) {
2823 case 0x0:
2824 case 0x1:
2825 return new SveFmulIdx<uint16_t>(
2826 machInst, zd, zn,
2827 (IntRegIndex) (uint8_t) bits(machInst, 18, 16),
2828 bits(machInst, 20, 19) | (bits(machInst, 22) << 2));
2829 case 0x2:
2830 return new SveFmulIdx<uint32_t>(
2831 machInst, zd, zn,
2832 (IntRegIndex) (uint8_t) bits(machInst, 18, 16),
2833 bits(machInst, 20, 19));
2834 case 0x3:
2835 return new SveFmulIdx<uint64_t>(
2836 machInst, zd, zn,
2837 (IntRegIndex) (uint8_t) bits(machInst, 19, 16),
2838 bits(machInst, 20));
2839 default:
2840 return new Unknown64(machInst);
2841 }
2842
2843 } // decodeSveFpMulIndexed
2844
2845 StaticInstPtr
2846 decodeSveFpMulAddIndexed(ExtMachInst machInst)
2847 {
2848 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2849 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2850 const uint8_t op = bits(machInst, 10);
2851
2852 uint8_t size = bits(machInst, 23, 22);
2853 switch (size) {
2854 case 0x0:
2855 case 0x1:
2856 if (op) {
2857 return new SveFmlsIdx<uint16_t>(
2858 machInst, zd, zn,
2859 (IntRegIndex) (uint8_t) bits(machInst, 18, 16),
2860 bits(machInst, 20, 19) | (bits(machInst, 22) << 2));
2861 } else {
2862 return new SveFmlaIdx<uint16_t>(
2863 machInst, zd, zn,
2864 (IntRegIndex) (uint8_t) bits(machInst, 18, 16),
2865 bits(machInst, 20, 19) | (bits(machInst, 22) << 2));
2866 }
2867 case 0x2:
2868 if (op) {
2869 return new SveFmlsIdx<uint32_t>(
2870 machInst, zd, zn,
2871 (IntRegIndex) (uint8_t) bits(machInst, 18, 16),
2872 bits(machInst, 20, 19));
2873 } else {
2874 return new SveFmlaIdx<uint32_t>(
2875 machInst, zd, zn,
2876 (IntRegIndex) (uint8_t) bits(machInst, 18, 16),
2877 bits(machInst, 20, 19));
2878 }
2879 case 0x3:
2880 if (op) {
2881 return new SveFmlsIdx<uint64_t>(
2882 machInst, zd, zn,
2883 (IntRegIndex) (uint8_t) bits(machInst, 19, 16),
2884 bits(machInst, 20));
2885 } else {
2886 return new SveFmlaIdx<uint64_t>(
2887 machInst, zd, zn,
2888 (IntRegIndex) (uint8_t) bits(machInst, 19, 16),
2889 bits(machInst, 20));
2890 }
2891 default:
2892 return new Unknown64(machInst);
2893 }
2894 } // decodeSveFpMulAddIndexed
2895
2896 StaticInstPtr
2897 decodeSveMemGather32(ExtMachInst machInst)
2898 {
2899 if (bits(machInst, 15)) {
2900 if (bits(machInst, 22)) {
2901 // SVE load and broadcast element
2902 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2903 IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2904 uint64_t imm = bits(machInst, 21, 16);
2905 IntRegIndex pg = (IntRegIndex) (uint8_t)
2906 bits(machInst, 12, 10);
2907 uint8_t dtype = (bits(machInst, 24, 23) << 2) |
2908 bits(machInst, 14, 13);
2909 return decodeSveContigLoadSIInsts<SveLoadAndRepl>(
2910 dtype, machInst, zt, pg, rn, imm, false, true);
2911 } else {
2912 if (bits(machInst, 21)) {
2913 // SVE 32-bit gather load (vector plus immediate)
2914 IntRegIndex zt = (IntRegIndex) (uint8_t)
2915 bits(machInst, 4, 0);
2916 IntRegIndex zn = (IntRegIndex) (uint8_t)
2917 bits(machInst, 9, 5);
2918 uint64_t imm = bits(machInst, 20, 16);
2919 IntRegIndex pg = (IntRegIndex) (uint8_t)
2920 bits(machInst, 12, 10);
2921 uint8_t dtype = (bits(machInst, 24, 23) << 1) |
2922 bits(machInst, 14);
2923 uint8_t ff = bits(machInst, 13);
2924 return decodeSveGatherLoadVIInsts(
2925 dtype, machInst, zt, pg, zn, imm, true, ff);
2926 } else {
2927 uint8_t b14_13 = bits(machInst, 14, 13);
2928 if (b14_13 == 0x2 && bits(machInst, 4) == 0) {
2929 // TODO: SVE contiguous prefetch (scalar plus scalar)
2930 return new WarnUnimplemented("prf[bhwd]", machInst);
2931 } else if (b14_13 == 0x3 && bits(machInst, 4) == 0) {
2932 // TODO: SVE 32-bit gather prefetch (vector plus
2933 // immediate)
2934 return new WarnUnimplemented("prf[bhwd]", machInst);
2935 }
2936 }
2937 }
2938 } else {
2939 uint8_t b24_23 = bits(machInst, 24, 23);
2940 if (b24_23 != 0x3 && bits(machInst, 21) == 0) {
2941 // SVE 32-bit gather load (scalar plus 32-bit unscaled offsets)
2942 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2943 IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2944 IntRegIndex zm = (IntRegIndex) (uint8_t)
2945 bits(machInst, 20, 16);
2946 IntRegIndex pg = (IntRegIndex) (uint8_t)
2947 bits(machInst, 12, 10);
2948 uint8_t dtype = (bits(machInst, 24, 23) << 1) |
2949 bits(machInst, 14);
2950 uint8_t xs = bits(machInst, 22);
2951 uint8_t ff = bits(machInst, 13);
2952 return decodeSveGatherLoadSVInsts(
2953 dtype, machInst, zt, pg, rn, zm,
2954 true, true, xs, false, ff);
2955 }
2956 switch (b24_23) {
2957 case 0x0:
2958 if (bits(machInst, 21) && bits(machInst, 4) == 0) {
2959 // TODO: SVE 32-bit gather prefetch (vector plus immediate)
2960 return new WarnUnimplemented("prf[bhwd]", machInst);
2961 }
2962 break;
2963 case 0x1:
2964 if (bits(machInst, 21)) {
2965 // SVE 32-bit gather load halfwords (scalar plus 32-bit
2966 // scaled offsets)
2967 IntRegIndex zt = (IntRegIndex) (uint8_t)
2968 bits(machInst, 4, 0);
2969 IntRegIndex rn = (IntRegIndex) (uint8_t)
2970 bits(machInst, 9, 5);
2971 IntRegIndex zm = (IntRegIndex) (uint8_t)
2972 bits(machInst, 20, 16);
2973 IntRegIndex pg = (IntRegIndex) (uint8_t)
2974 bits(machInst, 12, 10);
2975 uint8_t xs = bits(machInst, 22);
2976 uint8_t ff = bits(machInst, 13);
2977 if (bits(machInst, 14)) {
2978 return
2979 new SveIndexedMemSV<uint32_t, uint16_t,
2980 SveGatherLoadSVMicroop,
2981 SveFirstFaultWritebackMicroop>(
2982 ff ? "ldff1" : "ld1", machInst, MemReadOp, zt, pg,
2983 rn, zm, true, xs, true, ff);
2984 } else {
2985 return
2986 new SveIndexedMemSV<int32_t, int16_t,
2987 SveGatherLoadSVMicroop,
2988 SveFirstFaultWritebackMicroop>(
2989 ff ? "ldff1" : "ld1", machInst, MemReadOp, zt, pg,
2990 rn, zm, true, xs, true, ff);
2991 }
2992 }
2993 break;
2994 case 0x2:
2995 if (bits(machInst, 21)) {
2996 // SVE 32-bit gather load words (scalar plus 32-bit scaled
2997 // offsets)
2998 IntRegIndex zt = (IntRegIndex) (uint8_t)
2999 bits(machInst, 4, 0);
3000 IntRegIndex rn = (IntRegIndex) (uint8_t)
3001 bits(machInst, 9, 5);
3002 IntRegIndex zm = (IntRegIndex) (uint8_t)
3003 bits(machInst, 20, 16);
3004 IntRegIndex pg = (IntRegIndex) (uint8_t)
3005 bits(machInst, 12, 10);
3006 uint8_t xs = bits(machInst, 22);
3007 uint8_t ff = bits(machInst, 13);
3008 return new SveIndexedMemSV<uint32_t, uint32_t,
3009 SveGatherLoadSVMicroop,
3010 SveFirstFaultWritebackMicroop>(
3011 ff ? "ldff1" : "ld1", machInst, MemReadOp, zt, pg, rn,
3012 zm, true, xs, true, ff);
3013 }
3014 break;
3015 case 0x3:
3016 if (bits(machInst, 22) == 0 && bits(machInst, 14, 13) == 0x0 &&
3017 bits(machInst, 4) == 0) {
3018 // SVE load predicate register
3019 IntRegIndex pt = (IntRegIndex) (uint8_t)
3020 bits(machInst, 3, 0);
3021 IntRegIndex rn = (IntRegIndex) (uint8_t)
3022 bits(machInst, 9, 5);
3023 uint64_t imm = sext<9>((bits(machInst, 21, 16) << 3) |
3024 bits(machInst, 12, 10));
3025 return new SveLdrPred(machInst, pt, rn, imm);
3026 } else if (bits(machInst, 22) == 0 &&
3027 bits(machInst, 14, 13) == 0x2) {
3028 // SVE load vector register
3029 IntRegIndex zt = (IntRegIndex) (uint8_t)
3030 bits(machInst, 4, 0);
3031 IntRegIndex rn = (IntRegIndex) (uint8_t)
3032 bits(machInst, 9, 5);
3033 uint64_t imm = sext<9>((bits(machInst, 21, 16) << 3) |
3034 bits(machInst, 12, 10));
3035 return new SveLdrVec(machInst, zt, rn, imm);
3036 } else if (bits(machInst, 22) == 1 &&
3037 bits(machInst, 4) == 0) {
3038 // TODO: SVE contiguous prefetch (scalar plus immediate)
3039 return new WarnUnimplemented("prf[bhwd]", machInst);
3040 }
3041 break;
3042 }
3043 }
3044 return new Unknown64(machInst);
3045 } // decodeSveMemGather32
3046
3047 StaticInstPtr
3048 decodeSveLoadBcastQuadSS(ExtMachInst machInst)
3049 {
3050 return new Unknown64(machInst);
3051 } // decodeSveLoadBcastQuadSS
3052
3053 StaticInstPtr
3054 decodeSveLoadBcastQuadSI(ExtMachInst machInst)
3055 {
3056 return new Unknown64(machInst);
3057 } // decodeSveLoadBcastQuadSI
3058
3059 StaticInstPtr
3060 decodeSveContigLoadSS(ExtMachInst machInst)
3061 {
3062 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3063 IntRegIndex rn = makeSP((IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3064 IntRegIndex rm = makeSP(
3065 (IntRegIndex) (uint8_t) bits(machInst, 20, 16));
3066 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
3067
3068 if (rm == 0x1f) {
3069 return new Unknown64(machInst);
3070 }
3071
3072 return decodeSveContigLoadSSInsts<SveContigLoadSS>(
3073 bits(machInst, 24, 21), machInst, zt, pg, rn, rm, false);
3074 } // decodeSveContigLoadSS
3075
3076 StaticInstPtr
3077 decodeSveContigFFLoadSS(ExtMachInst machInst)
3078 {
3079 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3080 IntRegIndex rn = makeSP((IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3081 IntRegIndex rm = makeSP(
3082 (IntRegIndex) (uint8_t) bits(machInst, 20, 16));
3083 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
3084
3085 if (rm == 0x1f) {
3086 return new Unknown64(machInst);
3087 }
3088
3089 return decodeSveContigLoadSSInsts<SveContigFFLoadSS>(
3090 bits(machInst, 24, 21), machInst, zt, pg, rn, rm, true);
3091 } // decodeSveContigFFLoadSS
3092
3093 StaticInstPtr
3094 decodeSveContigLoadSI(ExtMachInst machInst)
3095 {
3096 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3097 IntRegIndex rn = makeSP((IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3098 uint64_t imm = sext<4>(bits(machInst, 19, 16));
3099 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
3100
3101 return decodeSveContigLoadSIInsts<SveContigLoadSI>(
3102 bits(machInst, 24, 21), machInst, zt, pg, rn, imm, false);
3103 } // decodeSveContigLoadSI
3104
3105 StaticInstPtr
3106 decodeSveContigNFLoadSI(ExtMachInst machInst)
3107 {
3108 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3109 IntRegIndex rn = makeSP((IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3110 uint64_t imm = sext<4>(bits(machInst, 19, 16));
3111 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
3112
3113 return decodeSveContigLoadSIInsts<SveContigNFLoadSI>(
3114 bits(machInst, 24, 21), machInst, zt, pg, rn, imm, true);
3115 } // decodeSveContigNFLoadSI
3116
3117 StaticInstPtr
3118 decodeSveContigNTLoadSS(ExtMachInst machInst)
3119 {
3120 return new Unknown64(machInst);
3121 } // decodeSveContigNTLoadSS
3122
3123 StaticInstPtr
3124 decodeSveLoadStructsSS(ExtMachInst machInst)
3125 {
3126 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3127 IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
3128 IntRegIndex rm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
3129 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
3130 uint8_t msz = bits(machInst, 24, 23);
3131 uint8_t num = bits(machInst, 22, 21);
3132
3133 if (rm != 0x1f && num != 0) {
3134 num++;
3135 return decodeSveStructLoadSSInsts(msz, machInst,
3136 zt, pg, rn, rm, num);
3137 }
3126 return new Unknown64(machInst);
3127 } // decodeSveLoadStructsSS
3128
3129 StaticInstPtr
3130 decodeSveContigNTLoadSI(ExtMachInst machInst)
3131 {
3132 return new Unknown64(machInst);
3133 } // decodeSveContigNTLoadSI
3134
3135 StaticInstPtr
3136 decodeSveLoadStructsSI(ExtMachInst machInst)
3137 {
3138 return new Unknown64(machInst);
3139 } // decodeSveLoadStructsSS
3140
3141 StaticInstPtr
3142 decodeSveContigNTLoadSI(ExtMachInst machInst)
3143 {
3144 return new Unknown64(machInst);
3145 } // decodeSveContigNTLoadSI
3146
3147 StaticInstPtr
3148 decodeSveLoadStructsSI(ExtMachInst machInst)
3149 {
3150 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3151 IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
3152 int64_t imm = sext<4>(bits(machInst, 19, 16));
3153 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
3154 uint8_t msz = bits(machInst, 24, 23);
3155 uint8_t num = bits(machInst, 22, 21);
3156
3157 if (num != 0) {
3158 num++;
3159 imm *= num;
3160 return decodeSveStructLoadSIInsts(msz, machInst,
3161 zt, pg, rn, imm, num);
3162 }
3138 return new Unknown64(machInst);
3139 } // decodeSveLoadStructsSI
3140
3141 StaticInstPtr
3142 decodeSveMemContigLoad(ExtMachInst machInst)
3143 {
3144 switch (bits(machInst, 15, 13)) {
3145 case 0x0:
3146 return decodeSveLoadBcastQuadSS(machInst);
3147 case 0x1:
3148 if (bits(machInst, 20) == 0x0) {
3149 return decodeSveLoadBcastQuadSI(machInst);
3150 }
3151 break;
3152 case 0x2:
3153 return decodeSveContigLoadSS(machInst);
3154 case 0x3:
3155 return decodeSveContigFFLoadSS(machInst);
3156 case 0x5:
3157 if (bits(machInst, 20) == 0x0) {
3158 return decodeSveContigLoadSI(machInst);
3159 } else {
3160 return decodeSveContigNFLoadSI(machInst);
3161 }
3162 case 0x6:
3163 if (bits(machInst, 22, 21) == 0x0) {
3164 return decodeSveContigNTLoadSS(machInst);
3165 } else {
3166 return decodeSveLoadStructsSS(machInst);
3167 }
3168 case 0x7:
3169 if (bits(machInst, 20) == 0) {
3170 if (bits(machInst, 22, 21) == 0x0) {
3171 return decodeSveContigNTLoadSI(machInst);
3172 } else {
3173 return decodeSveLoadStructsSI(machInst);
3174 }
3175 }
3176 break;
3177 }
3178 return new Unknown64(machInst);
3179 } // decodeSveMemContigLoad
3180
3181 StaticInstPtr
3182 decodeSveMemGather64(ExtMachInst machInst)
3183 {
3184 switch ((bits(machInst, 21) << 1) | bits(machInst, 15)) {
3185 case 0x0:
3186 {
3187 // SVE 64-bit gather load (scalar plus unpacked 32-bit unscaled
3188 // offsets)
3189 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3190 IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
3191 IntRegIndex zm = (IntRegIndex) (uint8_t)
3192 bits(machInst, 20, 16);
3193 IntRegIndex pg = (IntRegIndex) (uint8_t)
3194 bits(machInst, 12, 10);
3195 uint8_t dtype = (bits(machInst, 24, 23) << 1) |
3196 bits(machInst, 14);
3197 uint8_t xs = bits(machInst, 22);
3198 uint8_t ff = bits(machInst, 13);
3199 return decodeSveGatherLoadSVInsts(
3200 dtype, machInst, zt, pg, rn, zm,
3201 false, true, xs, false, ff);
3202 }
3203 case 0x1:
3204 if (bits(machInst, 22)) {
3205 // SVE 64-bit gather load (scalar plus 64-bit unscaled offsets)
3206 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3207 IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
3208 IntRegIndex zm = (IntRegIndex) (uint8_t)
3209 bits(machInst, 20, 16);
3210 IntRegIndex pg = (IntRegIndex) (uint8_t)
3211 bits(machInst, 12, 10);
3212 uint8_t dtype = (bits(machInst, 24, 23) << 1) |
3213 bits(machInst, 14);
3214 uint8_t ff = bits(machInst, 13);
3215 return decodeSveGatherLoadSVInsts(
3216 dtype, machInst, zt, pg, rn, zm,
3217 false, false, false, false, ff);
3218 } else {
3219 if (bits(machInst, 14, 13) == 0x3 && bits(machInst, 4) == 0) {
3220 // TODO: SVE 64-bit gather prefetch (vector plus immediate)
3221 return new WarnUnimplemented("prf[bhwd]", machInst);
3222 }
3223 }
3224 break;
3225 case 0x2:
3226 if (bits(machInst, 24, 23) != 0x0) {
3227 // SVE 64-bit gather load (scalar plus unpacked 32-bit scaled
3228 // offsets)
3229 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3230 IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
3231 IntRegIndex zm = (IntRegIndex) (uint8_t)
3232 bits(machInst, 20, 16);
3233 IntRegIndex pg = (IntRegIndex) (uint8_t)
3234 bits(machInst, 12, 10);
3235 uint8_t dtype = (bits(machInst, 24, 23) << 1) |
3236 bits(machInst, 14);
3237 uint8_t xs = bits(machInst, 22);
3238 uint8_t ff = bits(machInst, 13);
3239 return decodeSveGatherLoadSVInsts(
3240 dtype, machInst, zt, pg, rn, zm,
3241 false, true, xs, true, ff);
3242 } else if (bits(machInst, 4) == 0) {
3243 // TODO: SVE 64-bit gather prefetch (scalar plus unpacked
3244 // 32-bit scaled offsets)
3245 return new WarnUnimplemented("prf[bhwd]", machInst);
3246 }
3247 break;
3248 case 0x3:
3249 if (bits(machInst, 22) == 0) {
3250 // SVE 64-bit gather load (vector plus immediate)
3251 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3252 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
3253 uint64_t imm = bits(machInst, 20, 16);
3254 IntRegIndex pg = (IntRegIndex) (uint8_t)
3255 bits(machInst, 12, 10);
3256 uint8_t dtype = (bits(machInst, 24, 23) << 1) |
3257 bits(machInst, 14);
3258 uint8_t ff = bits(machInst, 13);
3259 return decodeSveGatherLoadVIInsts(
3260 dtype, machInst, zt, pg, zn, imm, false, ff);
3261 } else {
3262 if (bits(machInst, 24, 23) != 0x0) {
3263 // SVE 64-bit gather load (scalar plus 64-bit scaled
3264 // offsets)
3265 IntRegIndex zt = (IntRegIndex) (uint8_t)
3266 bits(machInst, 4, 0);
3267 IntRegIndex rn = (IntRegIndex) (uint8_t)
3268 bits(machInst, 9, 5);
3269 IntRegIndex zm = (IntRegIndex) (uint8_t)
3270 bits(machInst, 20, 16);
3271 IntRegIndex pg = (IntRegIndex) (uint8_t)
3272 bits(machInst, 12, 10);
3273 uint8_t dtype = (bits(machInst, 24, 23) << 1) |
3274 bits(machInst, 14);
3275 uint8_t ff = bits(machInst, 13);
3276 return decodeSveGatherLoadSVInsts(
3277 dtype, machInst, zt, pg, rn, zm,
3278 false, false, false, true, ff);
3279 } else if (bits(machInst, 4) == 0) {
3280 // TODO: SVE 64-bit gather prefetch (scalar plus 64-bit
3281 // scaled offsets)
3282 return new WarnUnimplemented("prf[bhwd]", machInst);
3283 }
3284 }
3285 break;
3286 }
3287 return new Unknown64(machInst);
3288 } // decodeSveMemGather64
3289
3290 StaticInstPtr
3291 decodeSveContigStoreSS(ExtMachInst machInst)
3292 {
3293 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3294 IntRegIndex rn = makeSP((IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3295 IntRegIndex rm = makeSP(
3296 (IntRegIndex) (uint8_t) bits(machInst, 20, 16));
3297 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
3298
3299 if (rm == 0x1f) {
3300 return new Unknown64(machInst);
3301 }
3302
3303 return decodeSveContigStoreSSInsts<SveContigStoreSS>(
3304 bits(machInst, 24, 21), machInst, zt, pg, rn, rm);
3305 } // decodeSveContigStoreSS
3306
3307 StaticInstPtr
3308 decodeSveContigStoreSI(ExtMachInst machInst)
3309 {
3310 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3311 IntRegIndex rn = makeSP((IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3312 int8_t imm = sext<4>(bits(machInst, 19, 16));
3313 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
3314
3315 return decodeSveContigStoreSIInsts<SveContigStoreSI>(
3316 bits(machInst, 24, 21), machInst, zt, pg, rn, imm);
3317 } // decodeSveContigStoreSI
3318
3319 StaticInstPtr
3320 decodeSveContigNTStoreSS(ExtMachInst machInst)
3321 {
3322 return new Unknown64(machInst);
3323 } // decodeSveContigNTStoreSS
3324
3325 StaticInstPtr
3326 decodeSveContigNTStoreSI(ExtMachInst machInst)
3327 {
3328 return new Unknown64(machInst);
3329 } // decodeSveContigNTStoreSI
3330
3331 StaticInstPtr
3332 decodeSveStoreStructsSS(ExtMachInst machInst)
3333 {
3163 return new Unknown64(machInst);
3164 } // decodeSveLoadStructsSI
3165
3166 StaticInstPtr
3167 decodeSveMemContigLoad(ExtMachInst machInst)
3168 {
3169 switch (bits(machInst, 15, 13)) {
3170 case 0x0:
3171 return decodeSveLoadBcastQuadSS(machInst);
3172 case 0x1:
3173 if (bits(machInst, 20) == 0x0) {
3174 return decodeSveLoadBcastQuadSI(machInst);
3175 }
3176 break;
3177 case 0x2:
3178 return decodeSveContigLoadSS(machInst);
3179 case 0x3:
3180 return decodeSveContigFFLoadSS(machInst);
3181 case 0x5:
3182 if (bits(machInst, 20) == 0x0) {
3183 return decodeSveContigLoadSI(machInst);
3184 } else {
3185 return decodeSveContigNFLoadSI(machInst);
3186 }
3187 case 0x6:
3188 if (bits(machInst, 22, 21) == 0x0) {
3189 return decodeSveContigNTLoadSS(machInst);
3190 } else {
3191 return decodeSveLoadStructsSS(machInst);
3192 }
3193 case 0x7:
3194 if (bits(machInst, 20) == 0) {
3195 if (bits(machInst, 22, 21) == 0x0) {
3196 return decodeSveContigNTLoadSI(machInst);
3197 } else {
3198 return decodeSveLoadStructsSI(machInst);
3199 }
3200 }
3201 break;
3202 }
3203 return new Unknown64(machInst);
3204 } // decodeSveMemContigLoad
3205
3206 StaticInstPtr
3207 decodeSveMemGather64(ExtMachInst machInst)
3208 {
3209 switch ((bits(machInst, 21) << 1) | bits(machInst, 15)) {
3210 case 0x0:
3211 {
3212 // SVE 64-bit gather load (scalar plus unpacked 32-bit unscaled
3213 // offsets)
3214 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3215 IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
3216 IntRegIndex zm = (IntRegIndex) (uint8_t)
3217 bits(machInst, 20, 16);
3218 IntRegIndex pg = (IntRegIndex) (uint8_t)
3219 bits(machInst, 12, 10);
3220 uint8_t dtype = (bits(machInst, 24, 23) << 1) |
3221 bits(machInst, 14);
3222 uint8_t xs = bits(machInst, 22);
3223 uint8_t ff = bits(machInst, 13);
3224 return decodeSveGatherLoadSVInsts(
3225 dtype, machInst, zt, pg, rn, zm,
3226 false, true, xs, false, ff);
3227 }
3228 case 0x1:
3229 if (bits(machInst, 22)) {
3230 // SVE 64-bit gather load (scalar plus 64-bit unscaled offsets)
3231 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3232 IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
3233 IntRegIndex zm = (IntRegIndex) (uint8_t)
3234 bits(machInst, 20, 16);
3235 IntRegIndex pg = (IntRegIndex) (uint8_t)
3236 bits(machInst, 12, 10);
3237 uint8_t dtype = (bits(machInst, 24, 23) << 1) |
3238 bits(machInst, 14);
3239 uint8_t ff = bits(machInst, 13);
3240 return decodeSveGatherLoadSVInsts(
3241 dtype, machInst, zt, pg, rn, zm,
3242 false, false, false, false, ff);
3243 } else {
3244 if (bits(machInst, 14, 13) == 0x3 && bits(machInst, 4) == 0) {
3245 // TODO: SVE 64-bit gather prefetch (vector plus immediate)
3246 return new WarnUnimplemented("prf[bhwd]", machInst);
3247 }
3248 }
3249 break;
3250 case 0x2:
3251 if (bits(machInst, 24, 23) != 0x0) {
3252 // SVE 64-bit gather load (scalar plus unpacked 32-bit scaled
3253 // offsets)
3254 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3255 IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
3256 IntRegIndex zm = (IntRegIndex) (uint8_t)
3257 bits(machInst, 20, 16);
3258 IntRegIndex pg = (IntRegIndex) (uint8_t)
3259 bits(machInst, 12, 10);
3260 uint8_t dtype = (bits(machInst, 24, 23) << 1) |
3261 bits(machInst, 14);
3262 uint8_t xs = bits(machInst, 22);
3263 uint8_t ff = bits(machInst, 13);
3264 return decodeSveGatherLoadSVInsts(
3265 dtype, machInst, zt, pg, rn, zm,
3266 false, true, xs, true, ff);
3267 } else if (bits(machInst, 4) == 0) {
3268 // TODO: SVE 64-bit gather prefetch (scalar plus unpacked
3269 // 32-bit scaled offsets)
3270 return new WarnUnimplemented("prf[bhwd]", machInst);
3271 }
3272 break;
3273 case 0x3:
3274 if (bits(machInst, 22) == 0) {
3275 // SVE 64-bit gather load (vector plus immediate)
3276 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3277 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
3278 uint64_t imm = bits(machInst, 20, 16);
3279 IntRegIndex pg = (IntRegIndex) (uint8_t)
3280 bits(machInst, 12, 10);
3281 uint8_t dtype = (bits(machInst, 24, 23) << 1) |
3282 bits(machInst, 14);
3283 uint8_t ff = bits(machInst, 13);
3284 return decodeSveGatherLoadVIInsts(
3285 dtype, machInst, zt, pg, zn, imm, false, ff);
3286 } else {
3287 if (bits(machInst, 24, 23) != 0x0) {
3288 // SVE 64-bit gather load (scalar plus 64-bit scaled
3289 // offsets)
3290 IntRegIndex zt = (IntRegIndex) (uint8_t)
3291 bits(machInst, 4, 0);
3292 IntRegIndex rn = (IntRegIndex) (uint8_t)
3293 bits(machInst, 9, 5);
3294 IntRegIndex zm = (IntRegIndex) (uint8_t)
3295 bits(machInst, 20, 16);
3296 IntRegIndex pg = (IntRegIndex) (uint8_t)
3297 bits(machInst, 12, 10);
3298 uint8_t dtype = (bits(machInst, 24, 23) << 1) |
3299 bits(machInst, 14);
3300 uint8_t ff = bits(machInst, 13);
3301 return decodeSveGatherLoadSVInsts(
3302 dtype, machInst, zt, pg, rn, zm,
3303 false, false, false, true, ff);
3304 } else if (bits(machInst, 4) == 0) {
3305 // TODO: SVE 64-bit gather prefetch (scalar plus 64-bit
3306 // scaled offsets)
3307 return new WarnUnimplemented("prf[bhwd]", machInst);
3308 }
3309 }
3310 break;
3311 }
3312 return new Unknown64(machInst);
3313 } // decodeSveMemGather64
3314
3315 StaticInstPtr
3316 decodeSveContigStoreSS(ExtMachInst machInst)
3317 {
3318 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3319 IntRegIndex rn = makeSP((IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3320 IntRegIndex rm = makeSP(
3321 (IntRegIndex) (uint8_t) bits(machInst, 20, 16));
3322 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
3323
3324 if (rm == 0x1f) {
3325 return new Unknown64(machInst);
3326 }
3327
3328 return decodeSveContigStoreSSInsts<SveContigStoreSS>(
3329 bits(machInst, 24, 21), machInst, zt, pg, rn, rm);
3330 } // decodeSveContigStoreSS
3331
3332 StaticInstPtr
3333 decodeSveContigStoreSI(ExtMachInst machInst)
3334 {
3335 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3336 IntRegIndex rn = makeSP((IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3337 int8_t imm = sext<4>(bits(machInst, 19, 16));
3338 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
3339
3340 return decodeSveContigStoreSIInsts<SveContigStoreSI>(
3341 bits(machInst, 24, 21), machInst, zt, pg, rn, imm);
3342 } // decodeSveContigStoreSI
3343
3344 StaticInstPtr
3345 decodeSveContigNTStoreSS(ExtMachInst machInst)
3346 {
3347 return new Unknown64(machInst);
3348 } // decodeSveContigNTStoreSS
3349
3350 StaticInstPtr
3351 decodeSveContigNTStoreSI(ExtMachInst machInst)
3352 {
3353 return new Unknown64(machInst);
3354 } // decodeSveContigNTStoreSI
3355
3356 StaticInstPtr
3357 decodeSveStoreStructsSS(ExtMachInst machInst)
3358 {
3359 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3360 IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
3361 IntRegIndex rm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
3362 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
3363 uint8_t msz = bits(machInst, 24, 23);
3364 uint8_t num = bits(machInst, 22, 21);
3365
3366 if (rm != 0x1f && num != 0) {
3367 num++;
3368 return decodeSveStructStoreSSInsts(msz, machInst,
3369 zt, pg, rn, rm, num);
3370 }
3334 return new Unknown64(machInst);
3335 } // decodeSveStoreStructsSS
3336
3337 StaticInstPtr
3338 decodeSveStoreStructsSI(ExtMachInst machInst)
3339 {
3371 return new Unknown64(machInst);
3372 } // decodeSveStoreStructsSS
3373
3374 StaticInstPtr
3375 decodeSveStoreStructsSI(ExtMachInst machInst)
3376 {
3377 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3378 IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
3379 int64_t imm = sext<4>(bits(machInst, 19, 16));
3380 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
3381 uint8_t msz = bits(machInst, 24, 23);
3382 uint8_t num = bits(machInst, 22, 21);
3383
3384 if (num != 0) {
3385 num++;
3386 imm *= num;
3387 return decodeSveStructStoreSIInsts(msz, machInst,
3388 zt, pg, rn, imm, num);
3389 }
3340 return new Unknown64(machInst);
3341 } // decodeSveStoreStructsSI
3342
3343 StaticInstPtr
3344 decodeSveMemStore(ExtMachInst machInst)
3345 {
3346 switch (bits(machInst, 15, 13)) {
3347 case 0x0:
3348 if (bits(machInst, 24, 22) == 0x6 && bits(machInst, 4) == 0x0) {
3349 IntRegIndex pt = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
3350 IntRegIndex rn = makeSP(
3351 (IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3352 int16_t imm = sext<9>((bits(machInst, 21, 16) << 3) |
3353 bits(machInst, 12, 10));
3354 return new SveStrPred(machInst, pt, rn, imm);
3355 }
3356 break;
3357 case 0x2:
3358 if (bits(machInst, 24, 22) == 0x6) {
3359 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3360 IntRegIndex rn = makeSP(
3361 (IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3362 int16_t imm = sext<9>((bits(machInst, 21, 16) << 3) |
3363 bits(machInst, 12, 10));
3364 return new SveStrVec(machInst, zt, rn, imm);
3365 } else {
3366 return decodeSveContigStoreSS(machInst);
3367 }
3368 break;
3369 case 0x3:
3370 if (bits(machInst, 22, 21) == 0x0) {
3371 return decodeSveContigNTStoreSS(machInst);
3372 } else {
3373 return decodeSveStoreStructsSS(machInst);
3374 }
3375 case 0x4:
3376 case 0x6:
3377 {
3378 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3379 IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
3380 IntRegIndex zm = (IntRegIndex) (uint8_t)
3381 bits(machInst, 20, 16);
3382 IntRegIndex pg = (IntRegIndex) (uint8_t)
3383 bits(machInst, 12, 10);
3384 uint8_t msz = bits(machInst, 24, 23);
3385 uint8_t xs = bits(machInst, 22);
3386
3387 switch (bits(machInst, 22, 21)) {
3388 case 0x0:
3389 // SVE 64-bit scatter store (scalar plus unpacked 32-bit
3390 // unscaled offsets)
3391 return decodeSveScatterStoreSVInsts(
3392 msz, machInst, zt, pg, rn, zm,
3393 false, true, xs, false);
3394 case 0x1:
3395 if (bits(machInst, 24, 23) != 0x0) {
3396 // SVE 64-bit scatter store (scalar plus unpacked
3397 // 32-bit scaled offsets)
3398 return decodeSveScatterStoreSVInsts(
3399 msz, machInst, zt, pg, rn, zm,
3400 false, true, xs, true);
3401 }
3402 break;
3403 case 0x2:
3404 if (bits(machInst, 24, 23) != 0x3) {
3405 // SVE 32-bit scatter store (scalar plus 32-bit
3406 // unscaled offsets)
3407 return decodeSveScatterStoreSVInsts(
3408 msz, machInst, zt, pg, rn, zm,
3409 true, true, xs, false);
3410 }
3411 break;
3412 case 0x3:
3413 // SVE 32-bit scatter store (scalar plus 32-bit scaled
3414 // offsets)
3415 return decodeSveScatterStoreSVInsts(
3416 msz, machInst, zt, pg, rn, zm,
3417 true, true, xs, true);
3418 }
3419 }
3420 break;
3421 case 0x5:
3422 switch (bits(machInst, 22, 21)) {
3423 case 0x0:
3424 {
3425 // SVE 64-bit scatter store (scalar plus 64-bit unscaled
3426 // offsets)
3427 IntRegIndex zt = (IntRegIndex) (uint8_t)
3428 bits(machInst, 4, 0);
3429 IntRegIndex rn = (IntRegIndex) (uint8_t)
3430 bits(machInst, 9, 5);
3431 IntRegIndex zm = (IntRegIndex) (uint8_t)
3432 bits(machInst, 20, 16);
3433 IntRegIndex pg = (IntRegIndex) (uint8_t)
3434 bits(machInst, 12, 10);
3435 uint8_t msz = bits(machInst, 24, 23);
3436
3437 return decodeSveScatterStoreSVInsts(
3438 msz, machInst, zt, pg, rn, zm,
3439 false, false, false, false);
3440 }
3441 case 0x1:
3442 if (bits(machInst, 24, 23) != 0x0) {
3443 // SVE 64-bit scatter store (scalar plus 64-bit scaled
3444 // offsets)
3445 IntRegIndex zt = (IntRegIndex) (uint8_t)
3446 bits(machInst, 4, 0);
3447 IntRegIndex rn = (IntRegIndex) (uint8_t)
3448 bits(machInst, 9, 5);
3449 IntRegIndex zm = (IntRegIndex) (uint8_t)
3450 bits(machInst, 20, 16);
3451 IntRegIndex pg = (IntRegIndex) (uint8_t)
3452 bits(machInst, 12, 10);
3453 uint8_t msz = bits(machInst, 24, 23);
3454
3455 return decodeSveScatterStoreSVInsts(
3456 msz, machInst, zt, pg, rn, zm,
3457 false, false, false, true);
3458 }
3459 break;
3460 case 0x2:
3461 {
3462 // SVE 64-bit scatter store (vector plus immediate)
3463 IntRegIndex zt = (IntRegIndex) (uint8_t)
3464 bits(machInst, 4, 0);
3465 IntRegIndex zn = (IntRegIndex) (uint8_t)
3466 bits(machInst, 9, 5);
3467 uint64_t imm = bits(machInst, 20, 16);
3468 IntRegIndex pg = (IntRegIndex) (uint8_t)
3469 bits(machInst, 12, 10);
3470 uint8_t msz = bits(machInst, 24, 23);
3471
3472 return decodeSveScatterStoreVIInsts(
3473 msz, machInst, zt, pg, zn, imm, false);
3474 }
3475 case 0x3:
3476 if (bits(machInst, 24, 23) != 0x3) {
3477 // SVE 32-bit scatter store (vector plus immediate)
3478 IntRegIndex zt = (IntRegIndex) (uint8_t)
3479 bits(machInst, 4, 0);
3480 IntRegIndex zn = (IntRegIndex) (uint8_t)
3481 bits(machInst, 9, 5);
3482 uint64_t imm = bits(machInst, 20, 16);
3483 IntRegIndex pg = (IntRegIndex) (uint8_t)
3484 bits(machInst, 12, 10);
3485 uint8_t msz = bits(machInst, 24, 23);
3486
3487 return decodeSveScatterStoreVIInsts(
3488 msz, machInst, zt, pg, zn, imm, true);
3489 }
3490 break;
3491 }
3492 break;
3493 case 0x7:
3494 if (bits(machInst, 20) == 0x0) {
3495 return decodeSveContigStoreSI(machInst);
3496 } else if (bits(machInst, 22, 21) == 0x0) {
3497 return decodeSveContigNTStoreSI(machInst);
3498 } else {
3499 return decodeSveStoreStructsSI(machInst);
3500 }
3501 }
3502 return new Unknown64(machInst);
3503 } // decodeSveMemStore
3504
3505} // namespace Aarch64
3506}};
3390 return new Unknown64(machInst);
3391 } // decodeSveStoreStructsSI
3392
3393 StaticInstPtr
3394 decodeSveMemStore(ExtMachInst machInst)
3395 {
3396 switch (bits(machInst, 15, 13)) {
3397 case 0x0:
3398 if (bits(machInst, 24, 22) == 0x6 && bits(machInst, 4) == 0x0) {
3399 IntRegIndex pt = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
3400 IntRegIndex rn = makeSP(
3401 (IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3402 int16_t imm = sext<9>((bits(machInst, 21, 16) << 3) |
3403 bits(machInst, 12, 10));
3404 return new SveStrPred(machInst, pt, rn, imm);
3405 }
3406 break;
3407 case 0x2:
3408 if (bits(machInst, 24, 22) == 0x6) {
3409 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3410 IntRegIndex rn = makeSP(
3411 (IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3412 int16_t imm = sext<9>((bits(machInst, 21, 16) << 3) |
3413 bits(machInst, 12, 10));
3414 return new SveStrVec(machInst, zt, rn, imm);
3415 } else {
3416 return decodeSveContigStoreSS(machInst);
3417 }
3418 break;
3419 case 0x3:
3420 if (bits(machInst, 22, 21) == 0x0) {
3421 return decodeSveContigNTStoreSS(machInst);
3422 } else {
3423 return decodeSveStoreStructsSS(machInst);
3424 }
3425 case 0x4:
3426 case 0x6:
3427 {
3428 IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3429 IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
3430 IntRegIndex zm = (IntRegIndex) (uint8_t)
3431 bits(machInst, 20, 16);
3432 IntRegIndex pg = (IntRegIndex) (uint8_t)
3433 bits(machInst, 12, 10);
3434 uint8_t msz = bits(machInst, 24, 23);
3435 uint8_t xs = bits(machInst, 22);
3436
3437 switch (bits(machInst, 22, 21)) {
3438 case 0x0:
3439 // SVE 64-bit scatter store (scalar plus unpacked 32-bit
3440 // unscaled offsets)
3441 return decodeSveScatterStoreSVInsts(
3442 msz, machInst, zt, pg, rn, zm,
3443 false, true, xs, false);
3444 case 0x1:
3445 if (bits(machInst, 24, 23) != 0x0) {
3446 // SVE 64-bit scatter store (scalar plus unpacked
3447 // 32-bit scaled offsets)
3448 return decodeSveScatterStoreSVInsts(
3449 msz, machInst, zt, pg, rn, zm,
3450 false, true, xs, true);
3451 }
3452 break;
3453 case 0x2:
3454 if (bits(machInst, 24, 23) != 0x3) {
3455 // SVE 32-bit scatter store (scalar plus 32-bit
3456 // unscaled offsets)
3457 return decodeSveScatterStoreSVInsts(
3458 msz, machInst, zt, pg, rn, zm,
3459 true, true, xs, false);
3460 }
3461 break;
3462 case 0x3:
3463 // SVE 32-bit scatter store (scalar plus 32-bit scaled
3464 // offsets)
3465 return decodeSveScatterStoreSVInsts(
3466 msz, machInst, zt, pg, rn, zm,
3467 true, true, xs, true);
3468 }
3469 }
3470 break;
3471 case 0x5:
3472 switch (bits(machInst, 22, 21)) {
3473 case 0x0:
3474 {
3475 // SVE 64-bit scatter store (scalar plus 64-bit unscaled
3476 // offsets)
3477 IntRegIndex zt = (IntRegIndex) (uint8_t)
3478 bits(machInst, 4, 0);
3479 IntRegIndex rn = (IntRegIndex) (uint8_t)
3480 bits(machInst, 9, 5);
3481 IntRegIndex zm = (IntRegIndex) (uint8_t)
3482 bits(machInst, 20, 16);
3483 IntRegIndex pg = (IntRegIndex) (uint8_t)
3484 bits(machInst, 12, 10);
3485 uint8_t msz = bits(machInst, 24, 23);
3486
3487 return decodeSveScatterStoreSVInsts(
3488 msz, machInst, zt, pg, rn, zm,
3489 false, false, false, false);
3490 }
3491 case 0x1:
3492 if (bits(machInst, 24, 23) != 0x0) {
3493 // SVE 64-bit scatter store (scalar plus 64-bit scaled
3494 // offsets)
3495 IntRegIndex zt = (IntRegIndex) (uint8_t)
3496 bits(machInst, 4, 0);
3497 IntRegIndex rn = (IntRegIndex) (uint8_t)
3498 bits(machInst, 9, 5);
3499 IntRegIndex zm = (IntRegIndex) (uint8_t)
3500 bits(machInst, 20, 16);
3501 IntRegIndex pg = (IntRegIndex) (uint8_t)
3502 bits(machInst, 12, 10);
3503 uint8_t msz = bits(machInst, 24, 23);
3504
3505 return decodeSveScatterStoreSVInsts(
3506 msz, machInst, zt, pg, rn, zm,
3507 false, false, false, true);
3508 }
3509 break;
3510 case 0x2:
3511 {
3512 // SVE 64-bit scatter store (vector plus immediate)
3513 IntRegIndex zt = (IntRegIndex) (uint8_t)
3514 bits(machInst, 4, 0);
3515 IntRegIndex zn = (IntRegIndex) (uint8_t)
3516 bits(machInst, 9, 5);
3517 uint64_t imm = bits(machInst, 20, 16);
3518 IntRegIndex pg = (IntRegIndex) (uint8_t)
3519 bits(machInst, 12, 10);
3520 uint8_t msz = bits(machInst, 24, 23);
3521
3522 return decodeSveScatterStoreVIInsts(
3523 msz, machInst, zt, pg, zn, imm, false);
3524 }
3525 case 0x3:
3526 if (bits(machInst, 24, 23) != 0x3) {
3527 // SVE 32-bit scatter store (vector plus immediate)
3528 IntRegIndex zt = (IntRegIndex) (uint8_t)
3529 bits(machInst, 4, 0);
3530 IntRegIndex zn = (IntRegIndex) (uint8_t)
3531 bits(machInst, 9, 5);
3532 uint64_t imm = bits(machInst, 20, 16);
3533 IntRegIndex pg = (IntRegIndex) (uint8_t)
3534 bits(machInst, 12, 10);
3535 uint8_t msz = bits(machInst, 24, 23);
3536
3537 return decodeSveScatterStoreVIInsts(
3538 msz, machInst, zt, pg, zn, imm, true);
3539 }
3540 break;
3541 }
3542 break;
3543 case 0x7:
3544 if (bits(machInst, 20) == 0x0) {
3545 return decodeSveContigStoreSI(machInst);
3546 } else if (bits(machInst, 22, 21) == 0x0) {
3547 return decodeSveContigNTStoreSI(machInst);
3548 } else {
3549 return decodeSveStoreStructsSI(machInst);
3550 }
3551 }
3552 return new Unknown64(machInst);
3553 } // decodeSveMemStore
3554
3555} // namespace Aarch64
3556}};