sve_2nd_level.isa revision 14029:744989da399f
1// Copyright (c) 2017-2019 ARM Limited
2// All rights reserved
3//
4// The license below extends only to copyright in the software and shall
5// not be construed as granting a license to any other intellectual
6// property including but not limited to intellectual property relating
7// to a hardware implementation of the functionality of the software
8// licensed hereunder.  You may use the software subject to the license
9// terms below provided that you ensure that this notice is replicated
10// unmodified and in its entirety in all distributions of the software,
11// modified or unmodified, in source code or in binary form.
12//
13// Redistribution and use in source and binary forms, with or without
14// modification, are permitted provided that the following conditions are
15// met: redistributions of source code must retain the above copyright
16// notice, this list of conditions and the following disclaimer;
17// redistributions in binary form must reproduce the above copyright
18// notice, this list of conditions and the following disclaimer in the
19// documentation and/or other materials provided with the distribution;
20// neither the name of the copyright holders nor the names of its
21// contributors may be used to endorse or promote products derived from
22// this software without specific prior written permission.
23//
24// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
27// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
28// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
29// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
30// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
31// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
32// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
33// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
34// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35//
36// Authors: Giacomo Gabrielli
37
38/// @file
39/// SVE 2nd-level decoder.
40
41output decoder {{
42namespace Aarch64
43{
44
45    StaticInstPtr
46    decodeSveIntArithBinPred(ExtMachInst machInst)
47    {
48        IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
49        IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
50        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
51
52        switch (bits(machInst, 20, 19)) {
53          case 0x0:
54            {
55                uint8_t size = bits(machInst, 23, 22);
56                uint8_t opc = bits(machInst, 18, 16);
57                switch (opc) {
58                  case 0x0:
59                    return decodeSveBinDestrPredU<SveAddPred>(
60                            size, machInst, zdn, zm, pg);
61                  case 0x1:
62                    return decodeSveBinDestrPredU<SveSubPred>(
63                            size, machInst, zdn, zm, pg);
64                  case 0x3:
65                    return decodeSveBinDestrPredU<SveSubr>(
66                            size, machInst, zdn, zm, pg);
67                  default:
68                    return new Unknown64(machInst);
69                }
70            }
71          case 0x1:
72            {
73                uint8_t size = bits(machInst, 23, 22);
74                uint8_t u = bits(machInst, 16);
75                uint8_t opc = bits(machInst, 18, 17);
76                switch (opc) {
77                  case 0x0:
78                    return decodeSveBinDestrPred<SveSmax, SveUmax>(
79                            size, u, machInst, zdn, zm, pg);
80                  case 0x1:
81                    return decodeSveBinDestrPred<SveSmin, SveUmin>(
82                            size, u, machInst, zdn, zm, pg);
83                  case 0x2:
84                    return decodeSveBinDestrPred<SveSabd, SveUabd>(
85                            size, u, machInst, zdn, zm, pg);
86                  default:
87                    return new Unknown64(machInst);
88                }
89            }
90          case 0x2:
91            {
92                uint8_t size = bits(machInst, 23, 22);
93                uint8_t u = bits(machInst, 16);
94                uint8_t opc = bits(machInst, 18, 17);
95                switch (opc) {
96                  case 0x0:
97                    if (u == 0) {
98                        return decodeSveBinDestrPredU<SveMul>(
99                                size, machInst, zdn, zm, pg);
100                    } else {
101                        return new Unknown64(machInst);
102                    }
103                  case 0x1:
104                    return decodeSveBinDestrPred<SveSmulh, SveUmulh>(
105                            size, u, machInst, zdn, zm, pg);
106                  case 0x2:
107                    if (size == 0x2 || size == 0x3) {
108                        return decodeSveBinDestrPred<SveSdiv, SveUdiv>(
109                                size, u, machInst, zdn, zm, pg);
110                    } else {
111                        return new Unknown64(machInst);
112                    }
113                  case 0x3:
114                    if (size == 0x2 || size == 0x3) {
115                        return decodeSveBinDestrPred<SveSdivr, SveUdivr>(
116                                size, u, machInst, zdn, zm, pg);
117                    } else {
118                        return new Unknown64(machInst);
119                    }
120                }
121                break;
122            }
123          case 0x3:
124            {
125                uint8_t size = bits(machInst, 23, 22);
126                uint8_t opc = bits(machInst, 18, 16);
127
128                switch (opc) {
129                  case 0x0:
130                    return decodeSveBinDestrPredU<SveOrrPred>(
131                            size, machInst, zdn, zm, pg);
132                  case 0x1:
133                    return decodeSveBinDestrPredU<SveEorPred>(
134                            size, machInst, zdn, zm, pg);
135                  case 0x2:
136                    return decodeSveBinDestrPredU<SveAndPred>(
137                            size, machInst, zdn, zm, pg);
138                  case 0x3:
139                    return decodeSveBinDestrPredU<SveBicPred>(
140                            size, machInst, zdn, zm, pg);
141                  default:
142                    return new Unknown64(machInst);
143                }
144            }
145        }
146        return new Unknown64(machInst);
147    }  // decodeSveArithBinPred
148
149    StaticInstPtr
150    decodeSveIntReduc(ExtMachInst machInst)
151    {
152        IntRegIndex vd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
153        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
154        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
155
156        uint8_t size = bits(machInst, 23, 22);
157
158        switch (bits(machInst, 20, 19)) {
159          case 0x0:
160            {
161                uint8_t u = bits(machInst, 16);
162                uint8_t opc = bits(machInst, 18, 17);
163                if (opc != 0x0 || (!u && size == 0x3)) {
164                    return new Unknown64(machInst);
165                } else {
166                    return decodeSveWideningReduc<SveSaddv, SveUaddv>(
167                            size, u, machInst, vd, zn, pg);
168                }
169            }
170          case 0x1:
171            {
172                uint8_t u = bits(machInst, 16);
173                uint8_t opc = bits(machInst, 18, 17);
174                switch (opc) {
175                  case 0x0:
176                    return decodeSveUnaryPred<SveSmaxv, SveUmaxv>(
177                            size, u, machInst, vd, zn, pg);
178                  case 0x1:
179                    return decodeSveUnaryPred<SveSminv, SveUminv>(
180                            size, u, machInst, vd, zn, pg);
181                  default:
182                    return new Unknown64(machInst);
183                }
184            }
185          case 0x2:
186            {
187                uint8_t opc = bits(machInst, 18, 17);
188                uint8_t merge = bits(machInst, 16);
189                switch (opc) {
190                  case 0x0:
191                    if (merge) {
192                        return decodeSveUnaryPredU<SveMovprfxPredM>(
193                                size, machInst, vd /* zd */, zn, pg);
194                    } else {
195                        return decodeSveUnaryPredU<SveMovprfxPredZ>(
196                                size, machInst, vd /* zd */, zn, pg);
197                    }
198                  default:
199                    return new Unknown64(machInst);
200                }
201            }
202          case 0x3:
203            {
204                uint8_t opc = bits(machInst, 18, 16);
205                switch (opc) {
206                  case 0x0:
207                    return decodeSveUnaryPredU<SveOrv>(
208                            size, machInst, vd, zn, pg);
209                  case 0x1:
210                    return decodeSveUnaryPredU<SveEorv>(
211                            size, machInst, vd, zn, pg);
212                  case 0x2:
213                    return decodeSveUnaryPredU<SveAndv>(
214                            size, machInst, vd, zn, pg);
215                  default:
216                    return new Unknown64(machInst);
217                }
218            }
219        }
220        return new Unknown64(machInst);
221    }  // decodeSveIntReduc
222
223    StaticInstPtr
224    decodeSveIntMulAdd(ExtMachInst machInst)
225    {
226        IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
227        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
228        IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
229        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
230
231        uint8_t size = bits(machInst, 23, 22);
232        uint8_t opc = (bits(machInst, 15) << 1) | bits(machInst, 13);
233        switch (opc) {
234          case 0x0:
235            return decodeSveTerPredS<SveMla>(
236                    size, machInst, zda, zn, zm, pg);
237          case 0x1:
238            return decodeSveTerPredS<SveMls>(
239                    size, machInst, zda, zn, zm, pg);
240          case 0x2:
241            return decodeSveTerPredS<SveMad>(
242                    size, machInst, zda /* zdn */, zn /* za */, zm, pg);
243          case 0x3:
244            return decodeSveTerPredS<SveMsb>(
245                    size, machInst, zda /* zdn */, zn /* za */, zm, pg);
246        }
247        return new Unknown64(machInst);
248    }  // decodeSveIntMulAdd
249
250    StaticInstPtr
251    decodeSveShiftByImmPred0(ExtMachInst machInst)
252    {
253        IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
254        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
255        uint8_t imm3 = (uint8_t) bits(machInst, 7, 5);
256
257        uint8_t tsize = (bits(machInst, 23, 22) << 2) | bits(machInst, 9, 8);
258        uint8_t esize = 0;
259        uint8_t size = 0;
260
261        if (tsize == 0x0) {
262            return new Unknown64(machInst);
263        } else if (tsize == 0x1) {
264            esize = 8;
265        } else if ((tsize & 0x0E) == 0x2) {
266            esize = 16;
267            size = 1;
268        } else if ((tsize & 0x0C) == 0x4) {
269            esize = 32;
270            size = 2;
271        } else if ((tsize & 0x08) == 0x8) {
272            esize = 64;
273            size = 3;
274        }
275
276        uint8_t opc = bits(machInst, 18, 16);
277        switch (opc) {
278            case 0x0:
279                {
280                    unsigned shiftAmt = 2 * esize - ((tsize << 3) | imm3);
281                    return decodeSveBinImmPredU<SveAsrImmPred>(
282                            size, machInst, zdn, shiftAmt, pg);
283                }
284            case 0x01:
285                {
286                    unsigned shiftAmt = 2 * esize - ((tsize << 3) | imm3);
287                    return decodeSveBinImmPredU<SveLsrImmPred>(
288                            size, machInst, zdn, shiftAmt, pg);
289                }
290            case 0x03:
291                {
292                    unsigned shiftAmt = ((tsize << 3) | imm3) - esize;
293                    return decodeSveBinImmPredU<SveLslImmPred>(
294                            size, machInst, zdn, shiftAmt, pg);
295                }
296            case 0x04:
297                {
298                    unsigned shiftAmt = 2 * esize - ((tsize << 3) | imm3);
299                    return decodeSveBinImmPredS<SveAsrd>(
300                            size, machInst, zdn, shiftAmt, pg);
301                }
302        }
303        return new Unknown64(machInst);
304    } // decodeSveShiftByImmPred0
305
306    StaticInstPtr
307    decodeSveShiftByVectorPred(ExtMachInst machInst)
308    {
309        IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
310        IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
311        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
312        uint8_t size = bits(machInst, 23, 22);
313        uint8_t opc = bits(machInst, 18, 16);
314        switch (opc) {
315            case 0:
316                return decodeSveBinDestrPredU<SveAsrPred>(
317                        size, machInst, zdn, zm, pg);
318            case 1:
319                return decodeSveBinDestrPredU<SveLsrPred>(
320                        size, machInst, zdn, zm, pg);
321            case 3:
322                return decodeSveBinDestrPredU<SveLslPred>(
323                        size, machInst, zdn, zm, pg);
324            case 4:
325                return decodeSveBinDestrPredU<SveAsrr>(
326                        size, machInst, zdn, zm, pg);
327            case 5:
328                return decodeSveBinDestrPredU<SveLsrr>(
329                        size, machInst, zdn, zm, pg);
330            case 7:
331                return decodeSveBinDestrPredU<SveLslr>(
332                        size, machInst, zdn, zm, pg);
333        }
334        return new Unknown64(machInst);
335    } // decodeSveShiftByVectorPred
336
337    StaticInstPtr
338    decodeSveShiftByWideElemsPred(ExtMachInst machInst)
339    {
340        IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
341        IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
342        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
343        uint8_t size = bits(machInst, 23, 22);
344        uint8_t opc = bits(machInst, 18, 16);
345        switch (opc) {
346            case 0x0:
347                return decodeSveBinDestrPredU<SveAsrWidePred>(
348                        size, machInst, zdn, zm, pg);
349            case 0x1:
350                return decodeSveBinDestrPredU<SveLsrWidePred>(
351                        size, machInst, zdn, zm, pg);
352            case 0x3:
353                return decodeSveBinDestrPredU<SveLslWidePred>(
354                        size, machInst, zdn, zm, pg);
355        }
356        return new Unknown64(machInst);
357    } // decodeSveShiftByWideElemsPred
358
359    StaticInstPtr
360    decodeSveShiftByImmPred(ExtMachInst machInst)
361    {
362        uint8_t b20_19 = bits(machInst, 20, 19);
363        uint8_t b23_22 = bits(machInst, 23, 22);
364
365        if (b20_19 == 0x0) {
366            return decodeSveShiftByImmPred0(machInst);
367        } else if (b20_19 == 0x2) {
368            return decodeSveShiftByVectorPred(machInst);
369        } else if (b20_19 == 0x3 && b23_22 != 0x3) {
370            return decodeSveShiftByWideElemsPred(machInst);
371        }
372        return new Unknown64(machInst);
373    }  // decodeSveShiftByImmPred
374
375    StaticInstPtr
376    decodeSveIntArithUnaryPred(ExtMachInst machInst)
377    {
378        IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
379        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
380        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
381        unsigned esize = bits(machInst, 23, 22);
382        uint8_t opg = bits(machInst, 20, 19);
383        uint8_t opc = bits(machInst, 18, 16);
384        if (opg == 0x2) {
385            bool unsig = static_cast<bool>(opc & 1);
386            switch (opc) {
387                case 0:
388                case 1:
389                    if (esize == 0) break;
390                    if (unsig) {
391                        return decodeSveUnaryExtendFromBPredU<SveUxtb>(
392                                esize, machInst, zd, zn, pg);
393                    } else {
394                        return decodeSveUnaryExtendFromBPredU<SveSxtb>(
395                                esize, machInst, zd, zn, pg);
396                    }
397                case 2:
398                case 3:
399                    if (esize < 2) break;
400                    if (unsig) {
401                        return decodeSveUnaryExtendFromHPredU<SveUxth>(
402                                esize, machInst, zd, zn, pg);
403                    } else {
404                        return decodeSveUnaryExtendFromHPredU<SveSxth>(
405                                esize, machInst, zd, zn, pg);
406                    }
407                case 4:
408                case 5:
409                    if (esize != 3) break;
410                    if (unsig) {
411                        return new SveUxtw<uint32_t, uint64_t>(
412                                machInst, zd, zn, pg);
413                    } else {
414                        return new SveSxtw<uint32_t, uint64_t>(
415                                machInst, zd, zn, pg);
416                    }
417                case 6:
418                    return decodeSveUnaryPredS<SveAbs>(
419                            esize, machInst, zd, zn, pg);
420                case 7:
421                    return decodeSveUnaryPredS<SveNeg>(
422                            esize, machInst, zd, zn, pg);
423            }
424        } else if (opg == 0x3) {
425            switch (opc) {
426                case 0:
427                    return decodeSveUnaryPredS<SveCls>(
428                            esize, machInst, zd, zn, pg);
429                case 1:
430                    return decodeSveUnaryPredS<SveClz>(
431                            esize, machInst, zd, zn, pg);
432                case 2:
433                    return decodeSveUnaryPredU<SveCnt>(
434                            esize, machInst, zd, zn, pg);
435                case 3:
436                    return decodeSveUnaryPredU<SveCnot>(
437                            esize, machInst, zd, zn, pg);
438                case 4:
439                    return decodeSveUnaryPredF<SveFabs>(
440                            esize, machInst, zd, zn, pg);
441                case 5:
442                    return decodeSveUnaryPredF<SveFneg>(
443                            esize, machInst, zd, zn, pg);
444                case 6:
445                    return decodeSveUnaryPredU<SveNot>(
446                            esize, machInst, zd, zn, pg);
447                    break;
448            }
449        }
450        return new Unknown64(machInst);
451    }  // decodeSveIntArithUnaryPred
452
453    StaticInstPtr
454    decodeSveIntArithUnpred(ExtMachInst machInst)
455    {
456        IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
457        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
458        IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
459
460        uint8_t opc = (uint8_t) bits(machInst, 12, 10);
461        uint8_t size = (uint8_t) bits(machInst, 23, 22);
462
463        switch (opc) {
464            case 0x0:
465                return decodeSveBinUnpredU<SveAddUnpred>(size, machInst,
466                        zd, zn, zm);
467            case 0x1:
468                return decodeSveBinUnpredU<SveSubUnpred>(size, machInst,
469                        zd, zn, zm);
470            case 0x4:
471                return decodeSveBinUnpredS<SveSqadd>(size, machInst,
472                        zd, zn, zm);
473            case 0x5:
474                return decodeSveBinUnpredU<SveUqadd>(size, machInst,
475                        zd, zn, zm);
476            case 0x6:
477                return decodeSveBinUnpredS<SveSqsub>(size, machInst,
478                        zd, zn, zm);
479            case 0x7:
480                return decodeSveBinUnpredU<SveUqsub>(size, machInst,
481                        zd, zn, zm);
482        }
483
484        return new Unknown64(machInst);
485    }  // decodeSveIntArithUnpred
486
487    StaticInstPtr
488    decodeSveIntLogUnpred(ExtMachInst machInst)
489    {
490        IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
491        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
492        IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
493        uint8_t opc = (uint8_t) (bits(machInst, 23, 22) << 3
494                | bits(machInst, 12, 10));
495
496        switch (opc) {
497            case 0x4:
498                return new SveAndUnpred<uint64_t>(machInst, zd, zn, zm);
499            case 0xc:
500                return new SveOrrUnpred<uint64_t>(machInst, zd, zn, zm);
501            case 0x14:
502                return new SveEorUnpred<uint64_t>(machInst, zd, zn, zm);
503            case 0x1c:
504                return new SveBicUnpred<uint64_t>(machInst, zd, zn, zm);
505        }
506
507        return new Unknown64(machInst);
508    }  // decodeSveIntLogUnpred
509
510    StaticInstPtr
511    decodeSveIndexGen(ExtMachInst machInst)
512    {
513        IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
514        uint8_t size = (uint8_t) bits(machInst, 23, 22);
515        uint8_t grp = (uint8_t) bits(machInst, 11, 10);
516
517        switch (grp) {
518            case 0:
519                { // INDEX (immediate)
520                    int8_t imm5 = sext<5>(bits(machInst, 9, 5));
521                    int8_t imm5b = sext<5>(bits(machInst, 20, 16));
522                    switch (size) {
523                        case 0:
524                            return new SveIndexII<int8_t>(machInst,
525                                    zd, imm5, imm5b);
526                        case 1:
527                            return new SveIndexII<int16_t>(machInst,
528                                    zd, imm5, imm5b);
529                        case 2:
530                            return new SveIndexII<int32_t>(machInst,
531                                    zd, imm5, imm5b);
532                        case 3:
533                            return new SveIndexII<int64_t>(machInst,
534                                    zd, imm5, imm5b);
535                    }
536                    break;
537                }
538            case 1:
539                { // INDEX (scalar, immediate)
540                    int8_t imm5 = sext<5>(bits(machInst, 20, 16));
541                    IntRegIndex zn = (IntRegIndex) (uint8_t) bits(
542                            machInst, 9, 5);
543                    switch (size) {
544                        case 0:
545                            return new SveIndexRI<int8_t>(machInst,
546                                    zd, zn, imm5);
547                        case 1:
548                            return new SveIndexRI<int16_t>(machInst,
549                                    zd, zn, imm5);
550                        case 2:
551                            return new SveIndexRI<int32_t>(machInst,
552                                    zd, zn, imm5);
553                        case 3:
554                            return new SveIndexRI<int64_t>(machInst,
555                                    zd, zn, imm5);
556                    }
557                    break;
558                }
559            case 2:
560                { // INDEX (immediate, scalar)
561                    int8_t imm5 = sext<5>(bits(machInst, 9, 5));
562                    IntRegIndex zm = (IntRegIndex) (uint8_t) bits(
563                            machInst, 20, 16);
564                    switch (size) {
565                        case 0:
566                            return new SveIndexIR<int8_t>(machInst,
567                                    zd, imm5, zm);
568                        case 1:
569                            return new SveIndexIR<int16_t>(machInst,
570                                    zd, imm5, zm);
571                        case 2:
572                            return new SveIndexIR<int32_t>(machInst,
573                                    zd, imm5, zm);
574                        case 3:
575                            return new SveIndexIR<int64_t>(machInst,
576                                    zd, imm5, zm);
577                    }
578                    break;
579                }
580            case 3:
581                { // INDEX (scalars)
582                    IntRegIndex zn = (IntRegIndex) (uint8_t) bits(
583                            machInst, 9, 5);
584                    IntRegIndex zm = (IntRegIndex) (uint8_t) bits(
585                            machInst, 20, 16);
586                    switch (size) {
587                        case 0:
588                            return new SveIndexRR<int8_t>(machInst,
589                                    zd, zn, zm);
590                        case 1:
591                            return new SveIndexRR<int16_t>(machInst,
592                                    zd, zn, zm);
593                        case 2:
594                            return new SveIndexRR<int32_t>(machInst,
595                                    zd, zn, zm);
596                        case 3:
597                            return new SveIndexRR<int64_t>(machInst,
598                                    zd, zn, zm);
599                    }
600                }
601        }
602        return new Unknown64(machInst);
603    }  // decodeSveIndexGen
604
605    StaticInstPtr
606    decodeSveStackAlloc(ExtMachInst machInst)
607    {
608        uint8_t b23_22 = bits(machInst, 23, 22);
609        uint8_t b11 = bits(machInst, 11);
610        if ((b23_22 & 0x2) == 0x0 && b11 == 0x0) {
611            IntRegIndex rd = makeSP(
612                (IntRegIndex) (uint8_t) bits(machInst, 4, 0));
613            IntRegIndex rn = makeSP(
614                (IntRegIndex) (uint8_t) bits(machInst, 20, 16));
615            uint64_t imm = sext<6>(bits(machInst, 10, 5));
616            if ((b23_22 & 0x1) == 0x0) {
617                return new AddvlXImm(machInst, rd, rn, imm);
618            } else {
619                return new AddplXImm(machInst, rd, rn, imm);
620            }
621        } else if (b23_22 == 0x2 && b11 == 0x0) {
622            IntRegIndex rd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
623            uint64_t imm = sext<6>(bits(machInst, 10, 5));
624            if (bits(machInst, 20, 16) == 0x1f) {
625                return new SveRdvl(machInst, rd, imm);
626            }
627        }
628        return new Unknown64(machInst);
629    }  // decodeSveStackAlloc
630
631    StaticInstPtr
632    decodeSveShiftByWideElemsUnpred(ExtMachInst machInst)
633    {
634        IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
635        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
636        IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
637        uint8_t size = bits(machInst, 23, 22);
638        uint8_t opc = (uint8_t) bits(machInst, 11, 10);
639        switch (opc) {
640            case 0x0:
641                return decodeSveBinUnpredU<SveAsrWideUnpred>(
642                        size, machInst, zd, zn, zm);
643            case 0x1:
644                return decodeSveBinUnpredU<SveLsrWideUnpred>(
645                        size, machInst, zd, zn, zm);
646            case 0x3:
647                return decodeSveBinUnpredU<SveLslWideUnpred>(
648                        size, machInst, zd, zn, zm);
649        }
650        return new Unknown64(machInst);
651    }  // decodeSveShiftByWideElemsUnpred
652
653    StaticInstPtr
654    decodeSveShiftByImmUnpredB(ExtMachInst machInst)
655    {
656        IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
657        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
658        uint8_t imm3 = (uint8_t) bits(machInst, 18, 16);
659
660        uint8_t tsize = (bits(machInst, 23, 22) << 2) | bits(machInst, 20, 19);
661        uint8_t esize = 0;
662        uint8_t size = 0;
663        if (tsize == 0x0) {
664            return new Unknown64(machInst);
665        } else if (tsize == 0x1) {
666            esize = 8;
667        } else if ((tsize & 0x0E) == 0x2) {
668            esize = 16;
669            size = 1;
670        } else if ((tsize & 0x0C) == 0x4) {
671            esize = 32;
672            size = 2;
673        } else if ((tsize & 0x08) == 0x8) {
674            esize = 64;
675            size = 3;
676        }
677
678        uint8_t opc = bits(machInst, 11, 10);
679        switch (opc) {
680            case 0x00:
681                {
682                    unsigned shiftAmt = 2 * esize - ((tsize << 3) | imm3);
683                    return decodeSveBinImmUnpredU<SveAsrImmUnpred>(
684                            size, machInst, zd, zn, shiftAmt);
685                }
686            case 0x01:
687                {
688                    unsigned shiftAmt = 2 * esize - ((tsize << 3) | imm3);
689                    return decodeSveBinImmUnpredU<SveLsrImmUnpred>(
690                            size, machInst, zd, zn, shiftAmt);
691                }
692            case 0x03:
693                {
694                    unsigned shiftAmt = ((tsize << 3) | imm3) - esize;
695                    return decodeSveBinImmUnpredU<SveLslImmUnpred>(
696                            size, machInst, zd, zn, shiftAmt);
697                }
698        }
699
700        return new Unknown64(machInst);
701    }  // decodeSveShiftByImmUnpredB
702
703    StaticInstPtr
704    decodeSveShiftByImmUnpred(ExtMachInst machInst)
705    {
706        if (bits(machInst, 12)) {
707            return decodeSveShiftByImmUnpredB(machInst);
708        } else {
709            return decodeSveShiftByWideElemsUnpred(machInst);
710        }
711        return new Unknown64(machInst);
712    }  // decodeSveShiftByImmUnpred
713
714    StaticInstPtr
715    decodeSveCompVecAddr(ExtMachInst machInst)
716    {
717        IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
718        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
719        IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
720        uint8_t mult = 1 << bits(machInst, 11, 10);
721
722        uint8_t opc = bits(machInst, 23, 22);
723
724        switch (opc) {
725          case 0x0:
726            return new SveAdr<uint64_t>(machInst, zd, zn, zm, mult,
727                    SveAdr<uint64_t>::SveAdrOffsetUnpackedSigned);
728          case 0x1:
729            return new SveAdr<uint64_t>(machInst, zd, zn, zm, mult,
730                    SveAdr<uint64_t>::SveAdrOffsetUnpackedUnsigned);
731          case 0x2:
732            return new SveAdr<uint32_t>(machInst, zd, zn, zm, mult,
733                    SveAdr<uint32_t>::SveAdrOffsetPacked);
734          case 0x3:
735            return new SveAdr<uint64_t>(machInst, zd, zn, zm, mult,
736                    SveAdr<uint64_t>::SveAdrOffsetPacked);
737        }
738        return new Unknown64(machInst);
739    }  // decodeSveCompVecAddr
740
741    StaticInstPtr
742    decodeSveIntMiscUnpred(ExtMachInst machInst)
743    {
744        IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
745        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
746
747        uint8_t size = bits(machInst, 23, 22);
748        uint8_t opc = bits(machInst, 11, 10);
749        switch (opc) {
750          case 0x0:
751            // SVE floating-point trig select coefficient
752            {
753                if (size == 0) {
754                    break;
755                }
756                IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst,
757                                                              20, 16);
758                return decodeSveBinUnpredF<SveFtssel>(
759                    size, machInst, zd, zn, zm);
760            }
761          case 0x2:
762            // SVE floating-point exponential accelerator
763            if (size == 0) {
764                break;
765            }
766            return decodeSveUnaryUnpredF<SveFexpa>(size, machInst, zd, zn);
767          case 0x3:
768            // SVE constructive prefix (unpredicated)
769            if (size == 0x0 && bits(machInst, 20, 16) == 0x0) {
770                return new SveMovprfxUnpred<uint64_t>(machInst, zd, zn);
771            }
772            break;
773        }
774        return new Unknown64(machInst);
775    }  // decodeSveIntMiscUnpred
776
777    StaticInstPtr
778    decodeSveElemCount(ExtMachInst machInst)
779    {
780        uint8_t opc20 = (uint8_t) bits(machInst, 20);
781        uint8_t b13_12 = (uint8_t) bits(machInst, 13, 12);
782        uint8_t opc11 = (uint8_t) bits(machInst, 11);
783        uint8_t opc10 = (uint8_t) bits(machInst, 10);
784        uint8_t opc11_10 = (uint8_t) bits(machInst, 11, 10);
785        if (b13_12 == 0) {
786            uint8_t pattern = (uint8_t) bits(machInst, 9, 5);
787            uint8_t imm4 = (uint8_t) bits(machInst, 19, 16) + 1;
788            IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
789            unsigned size = (unsigned) bits(machInst, 23, 22);
790            if (opc20) {
791                if (opc11 == 0) {
792                    if (opc10) {
793                        return decodeSveElemIntCountLU<SveDecv>(size,
794                                machInst, zdn, pattern, imm4);
795                    } else {
796                        return decodeSveElemIntCountLU<SveIncv>(size,
797                                machInst, zdn, pattern, imm4);
798                    }
799                }
800            } else {
801                if (opc11) {
802                    if (opc10) {
803                        return decodeSveElemIntCountLU<SveUqdecv>(size,
804                                machInst, zdn, pattern, imm4);
805                    } else {
806                        return decodeSveElemIntCountLS<SveSqdecv>(size,
807                                machInst, zdn, pattern, imm4);
808                    }
809                } else {
810                    if (opc10) {
811                        return decodeSveElemIntCountLU<SveUqincv>(size,
812                                machInst, zdn, pattern, imm4);
813                    } else {
814                        return decodeSveElemIntCountLS<SveSqincv>(size,
815                                machInst, zdn, pattern, imm4);
816                    }
817                }
818            }
819        } else if (b13_12 == 3) {
820            uint8_t pattern = (uint8_t) bits(machInst, 9, 5);
821            uint8_t imm4 = (uint8_t) bits(machInst, 19, 16) + 1;
822            IntRegIndex rdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
823            unsigned size = (unsigned) bits(machInst, 23, 22);
824            switch (opc11_10) {
825                case 0:
826                    if (opc20) {
827                        return decodeSveElemIntCountS<SveSqinc>(size,
828                                machInst, rdn, pattern, imm4);
829                    } else {
830                        return decodeSveElemIntCountS<SveSqinc32>(size,
831                                machInst, rdn, pattern, imm4);
832                    }
833                case 1:
834                    if (opc20) {
835                        return decodeSveElemIntCountU<SveUqinc>(size,
836                                machInst, rdn, pattern, imm4);
837                    } else {
838                        return decodeSveElemIntCountU<SveUqinc32>(size,
839                                machInst, rdn, pattern, imm4);
840                    }
841                case 2:
842                    if (opc20) {
843                        return decodeSveElemIntCountS<SveSqdec>(size,
844                                machInst, rdn, pattern, imm4);
845                    } else {
846                        return decodeSveElemIntCountS<SveSqdec32>(size,
847                                machInst, rdn, pattern, imm4);
848                    }
849                case 3:
850                    if (opc20) {
851                        return decodeSveElemIntCountU<SveUqdec>(size,
852                                machInst, rdn, pattern, imm4);
853                    } else {
854                        return decodeSveElemIntCountU<SveUqdec32>(size,
855                                machInst, rdn, pattern, imm4);
856                    }
857            }
858        } else if (opc20 && b13_12 == 2 && !(opc11_10 & 0x2)) {
859            uint8_t pattern = (uint8_t) bits(machInst, 9, 5);
860            uint8_t imm4 = (uint8_t) bits(machInst, 19, 16) + 1;
861            IntRegIndex rdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
862            unsigned size = (unsigned) bits(machInst, 23, 22);
863            if (opc11_10 & 0x1) {
864                return decodeSveElemIntCountU<SveDec>(size, machInst,
865                        rdn, pattern, imm4);
866            } else {
867                return decodeSveElemIntCountU<SveInc>(size, machInst,
868                        rdn, pattern, imm4);
869            }
870        } else if (!opc20 && b13_12 == 2 && opc11_10 == 0) {
871            uint8_t pattern = (uint8_t) bits(machInst, 9, 5);
872            uint8_t imm4 = (uint8_t) bits(machInst, 19, 16) + 1;
873            IntRegIndex rd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
874            unsigned size = (unsigned) bits(machInst, 23, 22);
875            return decodeSveElemIntCountU<SveCntx>(size, machInst,
876                    rd, pattern, imm4);
877        }
878        return new Unknown64(machInst);
879    }  // decodeSveElemCount
880
881    StaticInstPtr
882    decodeSveLogMaskImm(ExtMachInst machInst)
883    {
884        IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
885        bool n = bits(machInst, 17);
886        uint8_t immr = bits(machInst, 16, 11);
887        uint8_t imms = bits(machInst, 10, 5);
888
889        // Decode bitmask
890        // len = MSB(n:NOT(imms)), len < 1 is undefined
891        uint8_t len = 0;
892        if (n) {
893            len = 6;
894        } else if (imms == 0x3f || imms == 0x3e) {
895            return new Unknown64(machInst);
896        } else {
897            len = findMsbSet(imms ^ 0x3f);
898        }
899        // Generate r, s, and size
900        uint64_t r = bits(immr, len - 1, 0);
901        uint64_t s = bits(imms, len - 1, 0);
902        uint8_t size = 1 << len;
903        if (s == size - 1)
904            return new Unknown64(machInst);
905        // Generate the pattern with s 1s, rotated by r, with size bits
906        uint64_t pattern = mask(s + 1);
907        if (r) {
908            pattern = (pattern >> r) | (pattern << (size - r));
909            pattern &= mask(size);
910        }
911        // Replicate that to fill up the immediate
912        for (unsigned i = 1; i < (64 / size); i *= 2)
913            pattern |= (pattern << (i * size));
914        uint64_t imm = pattern;
915
916        if (bits(machInst, 19, 18) == 0x0) {
917            if (bits(machInst, 23, 22) == 0x3) {
918                return new SveDupm<uint64_t>(machInst, zd, imm);
919            } else {
920                switch (bits(machInst, 23, 22)) {
921                  case 0x0:
922                    return new SveOrrImm<uint64_t>(machInst, zd, imm);
923                  case 0x1:
924                    return new SveEorImm<uint64_t>(machInst, zd, imm);
925                  case 0x2:
926                    return new SveAndImm<uint64_t>(machInst, zd, imm);
927                }
928            }
929        }
930
931        return new Unknown64(machInst);
932    }  // decodeSveLogMaskImm
933
934    StaticInstPtr
935    decodeSveIntWideImmPred(ExtMachInst machInst)
936    {
937        IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
938        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 19, 16);
939        uint8_t size = bits(machInst, 23, 22);
940
941        if (bits(machInst, 15) == 0x0) {
942            uint64_t imm = bits(machInst, 12, 5);
943            uint8_t sh = bits(machInst, 13);
944            uint8_t m = bits(machInst, 14);
945            if (sh) {
946                if (size == 0x0) {
947                    return new Unknown64(machInst);
948                }
949                imm <<= 8;
950            }
951            if (m) {
952                if (sh) {
953                    return decodeSveWideImmPredU<SveCpyImmMerge>(
954                        size, machInst, zd, sext<16>(imm), pg);
955                } else {
956                    return decodeSveWideImmPredU<SveCpyImmMerge>(
957                        size, machInst, zd, sext<8>(imm), pg);
958                }
959            } else {
960                if (sh) {
961                    return decodeSveWideImmPredU<SveCpyImmZero>(
962                        size, machInst, zd, sext<16>(imm), pg,
963                        false /* isMerging */);
964                } else {
965                    return decodeSveWideImmPredU<SveCpyImmZero>(
966                        size, machInst, zd, sext<8>(imm), pg,
967                        false /* isMerging */);
968                }
969            }
970        } else if (bits(machInst, 15, 13) == 0x6 && size != 0x0) {
971            uint64_t imm = vfp_modified_imm(bits(machInst, 12, 5),
972                decode_fp_data_type(size));
973            return decodeSveWideImmPredF<SveFcpy>(
974                        size, machInst, zd, imm, pg);
975        }
976
977        return new Unknown64(machInst);
978    }  // decodeSveIntWideImmPred
979
980    StaticInstPtr
981    decodeSvePermExtract(ExtMachInst machInst)
982    {
983        uint8_t b23_22 = (unsigned) bits(machInst, 23, 22);
984        if (!b23_22) {
985            uint8_t position =
986                bits(machInst, 20, 16) << 3 | bits(machInst, 12, 10);
987            IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
988            IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
989            return new SveExt<uint8_t>(machInst, zdn, zm, position);
990        }
991        return new Unknown64(machInst);
992    }  // decodeSvePermExtract
993
994    StaticInstPtr
995    decodeSvePermUnpred(ExtMachInst machInst)
996    {
997        uint8_t b12_10 = bits(machInst, 12, 10);
998        if (b12_10 == 0x4) {
999            unsigned size = (unsigned) bits(machInst, 23, 22);
1000            IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1001            IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1002            IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
1003            return decodeSveBinUnpredU<SveTbl>(size, machInst, zd, zn, zm);
1004        } else if (bits(machInst, 20, 16) == 0x0 && b12_10 == 0x6) {
1005            uint8_t size = bits(machInst, 23, 22);
1006            IntRegIndex rn = makeSP(
1007                    (IntRegIndex) (uint8_t) bits(machInst, 9, 5));
1008            IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1009            return decodeSveUnaryUnpredU<SveDupScalar>(size, machInst, zd, rn);
1010        } else if (bits(machInst, 20, 16) == 0x4 && b12_10 == 0x6) {
1011            uint8_t size = bits(machInst, 23, 22);
1012            IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1013            IntRegIndex rm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1014            return decodeSveUnaryUnpredU<SveInsr>(size, machInst, zdn, rm);
1015        } else if (bits(machInst, 20, 16) == 0x14 && b12_10 == 0x6) {
1016            uint8_t size = bits(machInst, 23, 22);
1017            IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1018            IntRegIndex vm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1019            return decodeSveUnaryUnpredU<SveInsrf>(size, machInst, zdn, vm);
1020        } else if (bits(machInst, 20, 16) == 0x18 && b12_10 == 0x6) {
1021            uint8_t size = bits(machInst, 23, 22);
1022            IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1023            IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1024            return decodeSveUnaryUnpredU<SveRevv>(size, machInst, zd, zn);
1025        } else if (b12_10 == 0x0 && bits(machInst, 20, 16) != 0x0) {
1026            uint8_t imm =
1027                bits(machInst, 23, 22) << 5 | // imm3h
1028                bits(machInst, 20) << 4 |     // imm3l
1029                bits(machInst, 19, 16);       // tsz
1030            IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1031            IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1032            if (imm & 0x1) {
1033                imm >>= 1;
1034                return new SveDupIdx<uint8_t>(machInst, zd, zn, imm);
1035            } else if (imm & 0x2) {
1036                imm >>= 2;
1037                return new SveDupIdx<uint16_t>(machInst, zd, zn, imm);
1038            } else if (imm & 0x4) {
1039                imm >>= 3;
1040                return new SveDupIdx<uint32_t>(machInst, zd, zn, imm);
1041            } else if (imm & 0x8) {
1042                imm >>= 4;
1043                return new SveDupIdx<uint64_t>(machInst, zd, zn, imm);
1044            } else if (imm & 0x10) {
1045                imm >>= 5;
1046                return new SveDupIdx<__uint128_t>(machInst, zd, zn, imm);
1047            }
1048            return new Unknown64(machInst);
1049        } else if (bits(machInst, 23, 22) != 0x0 &&
1050                   bits(machInst, 20, 18) == 0x4 && b12_10 == 0x6) {
1051            unsigned size = (unsigned) bits(machInst, 23, 22);
1052            IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1053            IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1054            if (bits(machInst, 17)) {
1055                if (bits(machInst, 16)) {
1056                    return decodeSveUnpackU<SveUunpkhi>(size, machInst,
1057                                                        zd, zn);
1058                } else {
1059                    return decodeSveUnpackU<SveUunpklo>(size, machInst,
1060                                                        zd, zn);
1061                }
1062            } else {
1063                if (bits(machInst, 16)) {
1064                    return decodeSveUnpackS<SveSunpkhi>(size, machInst,
1065                                                        zd, zn);
1066                } else {
1067                    return decodeSveUnpackS<SveSunpklo>(size, machInst,
1068                                                        zd, zn);
1069                }
1070            }
1071        }
1072        return new Unknown64(machInst);
1073    }  // decodeSvePermUnpred
1074
1075    StaticInstPtr
1076    decodeSvePermPredicates(ExtMachInst machInst)
1077    {
1078        if (bits(machInst, 20) == 0x0 && bits(machInst, 12, 11) != 0x3 &&
1079                bits(machInst, 9) == 0x0 && bits(machInst, 4) == 0x0) {
1080            IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1081            IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1082            IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
1083
1084            uint8_t size = bits(machInst, 23, 22);
1085
1086            uint8_t opc = bits(machInst, 12, 10);
1087
1088            switch (opc) {
1089              case 0x0:
1090                return decodeSveBinUnpredU<SveZip1Pred>(size,
1091                        machInst, zd, zn, zm);
1092              case 0x1:
1093                return decodeSveBinUnpredU<SveZip2Pred>(size,
1094                        machInst, zd, zn, zm);
1095              case 0x2:
1096                return decodeSveBinUnpredU<SveUzp1Pred>(size,
1097                        machInst, zd, zn, zm);
1098              case 0x3:
1099                return decodeSveBinUnpredU<SveUzp2Pred>(size,
1100                        machInst, zd, zn, zm);
1101              case 0x4:
1102                return decodeSveBinUnpredU<SveTrn1Pred>(size,
1103                        machInst, zd, zn, zm);
1104              case 0x5:
1105                return decodeSveBinUnpredU<SveTrn2Pred>(size,
1106                        machInst, zd, zn, zm);
1107            }
1108        } else if (bits(machInst, 23, 22) == 0x0 &&
1109                bits(machInst, 20, 17) == 0x8 && bits(machInst, 12, 9) == 0x0
1110                && bits(machInst, 4) == 0x0) {
1111            IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1112            IntRegIndex pn = (IntRegIndex) (uint8_t) bits(machInst, 8, 5);
1113            if (bits(machInst, 16)) {
1114                return new SvePunpkhi<uint8_t, uint16_t>(machInst, pd, pn);
1115            } else {
1116                return new SvePunpklo<uint8_t, uint16_t>(machInst, pd, pn);
1117            }
1118        } else if (bits(machInst, 20, 16) == 0x14 &&
1119                bits(machInst, 12, 9) == 0x00 && bits(machInst, 4) == 0) {
1120            uint8_t size = bits(machInst, 23, 22);
1121            IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1122            IntRegIndex pn = (IntRegIndex) (uint8_t) bits(machInst, 8, 5);
1123            return decodeSveUnaryUnpredU<SveRevp>(size, machInst, pd, pn);
1124        }
1125        return new Unknown64(machInst);
1126    }  // decodeSvePermPredicates
1127
1128    StaticInstPtr
1129    decodeSvePermIntlv(ExtMachInst machInst)
1130    {
1131        IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1132        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1133        IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
1134
1135        uint8_t size = bits(machInst, 23, 22);
1136
1137        uint8_t opc = bits(machInst, 12, 10);
1138
1139        switch (opc) {
1140          case 0x0:
1141            return decodeSveBinUnpredU<SveZip1>(size, machInst, zd, zn, zm);
1142          case 0x1:
1143            return decodeSveBinUnpredU<SveZip2>(size, machInst, zd, zn, zm);
1144          case 0x2:
1145            return decodeSveBinUnpredU<SveUzp1>(size, machInst, zd, zn, zm);
1146          case 0x3:
1147            return decodeSveBinUnpredU<SveUzp2>(size, machInst, zd, zn, zm);
1148          case 0x4:
1149            return decodeSveBinUnpredU<SveTrn1>(size, machInst, zd, zn, zm);
1150          case 0x5:
1151            return decodeSveBinUnpredU<SveTrn2>(size, machInst, zd, zn, zm);
1152        }
1153        return new Unknown64(machInst);
1154    }  // decodeSvePermIntlv
1155
1156    StaticInstPtr
1157    decodeSvePermPred(ExtMachInst machInst)
1158    {
1159        uint8_t b13 = bits(machInst, 13);
1160        uint8_t b23 = bits(machInst, 23);
1161        switch (bits(machInst, 20, 16)) {
1162          case 0x0:
1163            if (!b13) {
1164                uint8_t size = bits(machInst, 23, 22);
1165                IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1166                IntRegIndex vn = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1167                IntRegIndex zd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1168                return decodeSveUnaryPredU<SveCpySimdFpScalar>(size,
1169                        machInst, zd, vn, pg);
1170            }
1171            break;
1172          case 0x1:
1173            if (!b13 && b23) {
1174                // sve_int_perm_compact
1175                IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1176                IntRegIndex zn = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1177                IntRegIndex zd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1178                if (bits(machInst, 22)) {
1179                    return new SveCompact<uint64_t>(machInst, zd, zn, pg);
1180                } else {
1181                    return new SveCompact<uint32_t>(machInst, zd, zn, pg);
1182                }
1183            }
1184            break;
1185          case 0x8:
1186            if (b13) {
1187                uint8_t size = bits(machInst, 23, 22);
1188                IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1189                IntRegIndex rn = makeSP(
1190                        (IntRegIndex)(uint8_t) bits(machInst, 9, 5));
1191                IntRegIndex zd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1192                return decodeSveUnaryPredU<SveCpyScalar>(size,
1193                        machInst, zd, rn, pg);
1194            }
1195            break;
1196          case 0xC:
1197            if (!b13) {
1198                uint8_t size = bits(machInst, 23, 22);
1199                IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1200                IntRegIndex zdn = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1201                IntRegIndex zm = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1202                return decodeSveBinDestrPredU<SveSplice>(size, machInst,
1203                        zdn, zm, pg);
1204            }
1205            break;
1206        }
1207        switch (bits(machInst, 20, 17)) {
1208          case 0x0:
1209            if (b13) {
1210                uint8_t AB = bits(machInst, 16);
1211                uint8_t size = bits(machInst, 23, 22);
1212                IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1213                IntRegIndex zn = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1214                IntRegIndex rd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1215                if (!AB) {
1216                    return decodeSveUnaryPredU<SveLasta>(size,
1217                            machInst, rd, zn, pg);
1218                } else {
1219                    return decodeSveUnaryPredU<SveLastb>(size,
1220                            machInst, rd, zn, pg);
1221                }
1222            }
1223            break;
1224          case 0x1:
1225            if (!b13) {
1226                uint8_t AB = bits(machInst, 16);
1227                uint8_t size = bits(machInst, 23, 22);
1228                IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1229                IntRegIndex zn = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1230                IntRegIndex vd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1231                if (!AB) {
1232                    return decodeSveUnaryPredU<SveLastaf>(size,
1233                            machInst, vd, zn, pg);
1234                } else {
1235                    return decodeSveUnaryPredU<SveLastbf>(size,
1236                            machInst, vd, zn, pg);
1237                }
1238            }
1239            break;
1240          case 0x4:
1241            if (!b13) {
1242                uint8_t AB = bits(machInst, 16);
1243                uint8_t size = bits(machInst, 23, 22);
1244                IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1245                IntRegIndex zm = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1246                IntRegIndex zdn = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1247                if (!AB) {
1248                    return decodeSveUnaryPredU<SveClastav>(size,
1249                            machInst, zdn, zm, pg);
1250                } else {
1251                    return decodeSveUnaryPredU<SveClastbv>(size,
1252                            machInst, zdn, zm, pg);
1253                }
1254            }
1255            break;
1256          case 0x5:
1257            if (!b13) {
1258                uint8_t AB = bits(machInst, 16);
1259                uint8_t size = bits(machInst, 23, 22);
1260                IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1261                IntRegIndex zm = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1262                IntRegIndex zdn = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1263                if (!AB) {
1264                    return decodeSveUnaryPredU<SveClastaf>(size,
1265                            machInst, zdn, zm, pg);
1266                } else {
1267                    return decodeSveUnaryPredU<SveClastbf>(size,
1268                            machInst, zdn, zm, pg);
1269                }
1270            }
1271            break;
1272          case 0x8:
1273            if (b13) {
1274                uint8_t AB = bits(machInst, 16);
1275                uint8_t size = bits(machInst, 23, 22);
1276                IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1277                IntRegIndex zm = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1278                IntRegIndex rdn = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1279                if (!AB) {
1280                    return decodeSveUnaryPredU<SveClasta>(size,
1281                            machInst, rdn, zm, pg);
1282                } else {
1283                    return decodeSveUnaryPredU<SveClastb>(size,
1284                            machInst, rdn, zm, pg);
1285                }
1286            }
1287            break;
1288        }
1289        if (bits(machInst, 20, 18) == 0x1 && !b13) {
1290            unsigned size = (unsigned) bits(machInst, 23, 22);
1291            IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10);
1292            IntRegIndex zn = (IntRegIndex)(uint8_t) bits(machInst, 9, 5);
1293            IntRegIndex zd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0);
1294            uint8_t opc17_16 = bits(machInst, 17, 16);
1295            switch (opc17_16) {
1296                case 0x00:
1297                    switch (size) {
1298                        case 1:
1299                            return new SveRevb<uint16_t>(machInst, zd, zn, pg);
1300                        case 2:
1301                            return new SveRevb<uint32_t>(machInst, zd, zn, pg);
1302                        case 3:
1303                            return new SveRevb<uint64_t>(machInst, zd, zn, pg);
1304                    }
1305                    break;
1306                case 0x01:
1307                    switch (size) {
1308                        case 2:
1309                            return new SveRevh<uint32_t>(machInst, zd, zn, pg);
1310                        case 3:
1311                            return new SveRevh<uint64_t>(machInst, zd, zn, pg);
1312                    }
1313                    break;
1314                case 0x02:
1315                    if (size == 3) {
1316                        return new SveRevw<uint64_t>(machInst, zd, zn, pg);
1317                    }
1318                    break;
1319                case 0x03:
1320                    return decodeSveUnaryPredU<SveRbit>(
1321                            size, machInst, zd, zn, pg);
1322            }
1323        }
1324        return new Unknown64(machInst);
1325    }  // decodeSvePermPred
1326
1327    StaticInstPtr
1328    decodeSveSelVec(ExtMachInst machInst)
1329    {
1330        IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1331        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1332        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 13, 10);
1333        IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
1334
1335        uint8_t size = bits(machInst, 23, 22);
1336
1337        return decodeSveBinConstrPredU<SveSel>(size,
1338            machInst, zd, zn, zm, pg, SvePredType::SELECT);
1339    }  // decodeSveSelVec
1340
1341    StaticInstPtr
1342    decodeSveIntCmpVec(ExtMachInst machInst)
1343    {
1344        uint8_t size = bits(machInst, 23, 22);
1345        uint8_t b14 = bits(machInst, 14);
1346        uint8_t opc =
1347            bits(machInst, 15) << 2 |
1348            bits(machInst, 13) << 1 |
1349            bits(machInst, 4);
1350        IntRegIndex pd = (IntRegIndex) (uint8_t)bits(machInst, 3, 0);
1351        IntRegIndex pg = (IntRegIndex) (uint8_t)bits(machInst, 12, 10);
1352        IntRegIndex zn = (IntRegIndex) (uint8_t)bits(machInst, 9, 5);
1353        IntRegIndex zm = (IntRegIndex) (uint8_t)bits(machInst, 20, 16);
1354        if (b14 && size != 3) {
1355            // sve_int_cmp_1
1356            switch (opc) {
1357                case 0:
1358                    return decodeSveTerPredWS<SveCmpgew>(size,
1359                            machInst, pd, zn, zm, pg);
1360                case 1:
1361                    return decodeSveTerPredWS<SveCmpgtw>(size,
1362                            machInst, pd, zn, zm, pg);
1363                case 2:
1364                    return decodeSveTerPredWS<SveCmpltw>(size,
1365                            machInst, pd, zn, zm, pg);
1366                case 3:
1367                    return decodeSveTerPredWS<SveCmplew>(size,
1368                            machInst, pd, zn, zm, pg);
1369                case 4:
1370                    return decodeSveTerPredWU<SveCmphsw>(size,
1371                            machInst, pd, zn, zm, pg);
1372                case 5:
1373                    return decodeSveTerPredWU<SveCmphiw>(size,
1374                            machInst, pd, zn, zm, pg);
1375                case 6:
1376                    return decodeSveTerPredWU<SveCmplow>(size,
1377                            machInst, pd, zn, zm, pg);
1378                case 7:
1379                    return decodeSveTerPredWU<SveCmplsw>(size,
1380                            machInst, pd, zn, zm, pg);
1381            }
1382        } else if (!b14) {
1383            switch (opc) {
1384                case 0:
1385                    return decodeSveTerPredU<SveCmphs>(size,
1386                            machInst, pd, zn, zm, pg);
1387                case 1:
1388                    return decodeSveTerPredU<SveCmphi>(size,
1389                            machInst, pd, zn, zm, pg);
1390                case 2:
1391                    if (size != 3) {
1392                        return decodeSveTerPredWU<SveCmpeqw>(size,
1393                                machInst, pd, zn, zm, pg);
1394                    }
1395                    break;
1396                case 3:
1397                    if (size != 3) {
1398                        return decodeSveTerPredWU<SveCmpnew>(size,
1399                                machInst, pd, zn, zm, pg);
1400                    }
1401                    break;
1402                case 4:
1403                    return decodeSveTerPredS<SveCmpge>(size,
1404                            machInst, pd, zn, zm, pg);
1405                case 5:
1406                    return decodeSveTerPredS<SveCmpgt>(size,
1407                            machInst, pd, zn, zm, pg);
1408                case 6:
1409                    return decodeSveTerPredU<SveCmpeq>(size,
1410                            machInst, pd, zn, zm, pg);
1411                case 7:
1412                    return decodeSveTerPredU<SveCmpne>(size,
1413                            machInst, pd, zn, zm, pg);
1414            }
1415        }
1416        return new Unknown64(machInst);
1417    }  // decodeSveIntCmpVec
1418
1419    StaticInstPtr
1420    decodeSveIntCmpUImm(ExtMachInst machInst)
1421    {
1422        uint8_t cmp = bits(machInst, 13) << 1 | bits(machInst, 4);
1423        IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1424        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1425        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
1426        int64_t imm = (int64_t) bits(machInst, 20, 14);
1427        uint8_t size = bits(machInst, 23, 22);
1428        switch (cmp) {
1429            case 0:
1430                return decodeSveTerImmPredU<SveCmphsi>(size,
1431                        machInst, pd, zn, imm, pg);
1432            case 1:
1433                return decodeSveTerImmPredU<SveCmphii>(size,
1434                        machInst, pd, zn, imm, pg);
1435            case 2:
1436                return decodeSveTerImmPredU<SveCmploi>(size,
1437                        machInst, pd, zn, imm, pg);
1438            case 3:
1439                return decodeSveTerImmPredU<SveCmplsi>(size,
1440                        machInst, pd, zn, imm, pg);
1441        }
1442        return new Unknown64(machInst);
1443    }  // decodeSveIntCmpUImm
1444
1445    StaticInstPtr
1446    decodeSveIntCmpSImm(ExtMachInst machInst)
1447    {
1448        uint8_t opc = bits(machInst, 15) << 2 | bits(machInst, 13) << 1 |
1449            bits(machInst, 4);
1450        IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1451        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1452        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
1453        int64_t imm = sext<5>(bits(machInst, 20, 16));
1454        uint8_t size = bits(machInst, 23, 22);
1455        switch (opc) {
1456            case 0:
1457                return decodeSveTerImmPredS<SveCmpgei>(size,
1458                        machInst, pd, zn, imm, pg);
1459            case 1:
1460                return decodeSveTerImmPredS<SveCmpgti>(size,
1461                        machInst, pd, zn, imm, pg);
1462            case 2:
1463                return decodeSveTerImmPredS<SveCmplti>(size,
1464                        machInst, pd, zn, imm, pg);
1465            case 3:
1466                return decodeSveTerImmPredS<SveCmplei>(size,
1467                        machInst, pd, zn, imm, pg);
1468            case 4:
1469                return decodeSveTerImmPredU<SveCmpeqi>(size,
1470                        machInst, pd, zn, imm, pg);
1471            case 5:
1472                return decodeSveTerImmPredU<SveCmpnei>(size,
1473                        machInst, pd, zn, imm, pg);
1474            default:
1475                return new Unknown64(machInst);
1476        }
1477        return new Unknown64(machInst);
1478    }  // decodeSveIntCmpSImm
1479
1480    StaticInstPtr
1481    decodeSvePredLogicalOps(ExtMachInst machInst)
1482    {
1483        IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1484        IntRegIndex pn = (IntRegIndex) (uint8_t) bits(machInst, 8, 5);
1485        IntRegIndex pm = (IntRegIndex) (uint8_t) bits(machInst, 19, 16);
1486        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 13, 10);
1487        uint8_t opc = (bits(machInst, 23, 22) << 2) |
1488                      (bits(machInst, 9) << 1) |
1489                      bits(machInst, 4);
1490        switch (opc) {
1491          case 0x0:
1492            return new SvePredAnd<uint8_t>(machInst, pd, pn, pm, pg);
1493          case 0x1:
1494            return new SvePredBic<uint8_t>(machInst, pd, pn, pm, pg);
1495          case 0x2:
1496            return new SvePredEor<uint8_t>(machInst, pd, pn, pm, pg);
1497          case 0x3:
1498            return new SvePredSel<uint8_t>(machInst, pd, pn, pm, pg, true);
1499          case 0x4:
1500            return new SvePredAnds<uint8_t>(machInst, pd, pn, pm, pg);
1501          case 0x5:
1502            return new SvePredBics<uint8_t>(machInst, pd, pn, pm, pg);
1503          case 0x6:
1504            return new SvePredEors<uint8_t>(machInst, pd, pn, pm, pg);
1505          case 0x8:
1506            return new SvePredOrr<uint8_t>(machInst, pd, pn, pm, pg);
1507          case 0x9:
1508            return new SvePredOrn<uint8_t>(machInst, pd, pn, pm, pg);
1509          case 0xa:
1510            return new SvePredNor<uint8_t>(machInst, pd, pn, pm, pg);
1511          case 0xb:
1512            return new SvePredNand<uint8_t>(machInst, pd, pn, pm, pg);
1513          case 0xc:
1514            return new SvePredOrrs<uint8_t>(machInst, pd, pn, pm, pg);
1515          case 0xd:
1516            return new SvePredOrns<uint8_t>(machInst, pd, pn, pm, pg);
1517          case 0xe:
1518            return new SvePredNors<uint8_t>(machInst, pd, pn, pm, pg);
1519          case 0xf:
1520            return new SvePredNands<uint8_t>(machInst, pd, pn, pm, pg);
1521        }
1522
1523        return new Unknown64(machInst);
1524    }  // decodeSvePredLogicalOps
1525
1526    StaticInstPtr
1527    decodeSvePropBreakFromPrevPartition(ExtMachInst machInst)
1528    {
1529        if (bits(machInst, 23) == 0x0 && bits(machInst, 9) == 0x0) {
1530            uint8_t opc = (bits(machInst, 22) << 1) | bits(machInst, 4);
1531            IntRegIndex pm = (IntRegIndex)(uint8_t) bits(machInst, 19, 16);
1532            IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 13, 10);
1533            IntRegIndex pn = (IntRegIndex)(uint8_t) bits(machInst, 8, 5);
1534            IntRegIndex pd = (IntRegIndex)(uint8_t) bits(machInst, 3, 0);
1535            switch (opc) {
1536              case 0x0:
1537                // BRKPA
1538                return new SveBrkpa(machInst, pd, pn, pm, pg);
1539              case 0x1:
1540                // BRKPB
1541                return new SveBrkpb(machInst, pd, pn, pm, pg);
1542              case 0x2:
1543                // BRKPAS
1544                return new SveBrkpas(machInst, pd, pn, pm, pg);
1545              case 0x3:
1546                // BRKPBS
1547                return new SveBrkpbs(machInst, pd, pn, pm, pg);
1548            }
1549        }
1550        return new Unknown64(machInst);
1551    }  // decodeSvePropBreakFromPrevPartition
1552
1553    StaticInstPtr
1554    decodeSvePartitionBreakCond(ExtMachInst machInst)
1555    {
1556        if (bits(machInst, 18, 16) == 0x0 && bits(machInst, 9) == 0x0) {
1557            bool flagset = bits(machInst, 22);
1558            bool merging = bits(machInst, 4);
1559            IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 13, 10);
1560            IntRegIndex pn = (IntRegIndex)(uint8_t) bits(machInst, 8, 5);
1561            IntRegIndex pd = (IntRegIndex)(uint8_t) bits(machInst, 3, 0);
1562            if (bits(machInst, 23)) {
1563                if (flagset) {
1564                    if (!merging) {
1565                        return new SveBrkbs(machInst, pd, pg, pn);
1566                    }
1567                } else {
1568                    if (merging) {
1569                        return new SveBrkbm(machInst, pd, pg, pn);
1570                    } else {
1571                        return new SveBrkbz(machInst, pd, pg, pn);
1572                    }
1573                }
1574            } else {
1575                if (flagset) {
1576                    if (!merging) {
1577                        return new SveBrkas(machInst, pd, pg, pn);
1578                    }
1579                } else {
1580                    if (merging) {
1581                        return new SveBrkam(machInst, pd, pg, pn);
1582                    } else {
1583                        return new SveBrkaz(machInst, pd, pg, pn);
1584                    }
1585                }
1586            }
1587            return new Unknown64(machInst);
1588        }
1589        return new Unknown64(machInst);
1590    }  // decodeSvePartitionBreakCond
1591
1592    StaticInstPtr
1593    decodeSvePredTest(ExtMachInst machInst)
1594    {
1595        if (bits(machInst, 23, 22) == 0x1 &&
1596                bits(machInst, 18, 16) == 0x0 &&
1597                bits(machInst, 9) == 0x0) {
1598            IntRegIndex pn = (IntRegIndex) (uint8_t) bits(machInst, 8, 5);
1599            IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 13, 10);
1600            return new SvePtest(machInst, pn, pg);
1601        }
1602        return new Unknown64(machInst);
1603    }  // decodeSvePredTest
1604
1605    StaticInstPtr
1606    decodeSvePredIteration(ExtMachInst machInst)
1607    {
1608        uint8_t size = bits(machInst, 23, 22);
1609        uint8_t opc18_16 = bits(machInst, 18, 16);
1610        uint8_t opc10_9 = bits(machInst, 10, 9);
1611        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 8, 5);
1612        IntRegIndex pdn = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1613        if (opc18_16 == 0x1 && opc10_9 == 0x2) {
1614            return decodeSveUnaryPredU<SvePnext>(size,
1615                    machInst, pdn, pdn, pg);
1616        } else if (size == 0x1 && opc18_16 == 0x0 && opc10_9 == 0) {
1617            return new SvePfirst<uint8_t>(machInst, pdn, pdn, pg);
1618        }
1619        return new Unknown64(machInst);
1620    }  // decodeSvePredIteration
1621
1622    StaticInstPtr
1623    decodeSveInitPred(ExtMachInst machInst)
1624    {
1625        IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1626        unsigned size = bits(machInst, 23, 22);
1627        uint8_t imm = bits(machInst, 9, 5);
1628
1629        if (bits(machInst, 16) == 0x0) {
1630            return decodeSvePtrue<SvePtrue>(size, machInst, pd, imm);
1631        } else {
1632            return decodeSvePtrue<SvePtrues>(size, machInst, pd, imm);
1633        }
1634        return new Unknown64(machInst);
1635    }  // decodeSveInitPred
1636
1637    StaticInstPtr
1638    decodeSveZeroPredReg(ExtMachInst machInst)
1639    {
1640        if (bits(machInst, 23, 22) == 0x0 && bits(machInst, 18, 16) == 0x0) {
1641            IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1642            return new SvePfalse(machInst, pd);
1643        }
1644        return new Unknown64(machInst);
1645    }  // decodeSveZeroPredReg
1646
1647    StaticInstPtr
1648    decodeSvePropBreakToNextPartition(ExtMachInst machInst)
1649    {
1650        if (bits(machInst, 23) == 0x0 &&
1651                bits(machInst, 18, 16) == 0x0 &&
1652                bits(machInst, 9) == 0x0 &&
1653                bits(machInst, 4) == 0x0) {
1654            IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 13, 10);
1655            IntRegIndex pn = (IntRegIndex)(uint8_t) bits(machInst, 8, 5);
1656            IntRegIndex pdm = (IntRegIndex)(uint8_t) bits(machInst, 3, 0);
1657            if (bits(machInst, 22) == 0x0) {
1658                return new SveBrkn(machInst, pdm, pn, pdm, pg);
1659            } else {
1660                return new SveBrkns(machInst, pdm, pn, pdm, pg);
1661            }
1662            return new Unknown64(machInst);
1663        }
1664        return new Unknown64(machInst);
1665    }  // decodeSvePropBreakToNextPartition
1666
1667    StaticInstPtr
1668    decodeSveReadPredFromFFRPred(ExtMachInst machInst)
1669    {
1670        if (bits(machInst, 23)) {
1671            return new Unknown64(machInst);
1672        }
1673        IntRegIndex pd = (IntRegIndex)(uint8_t) bits(machInst, 3, 0);
1674        IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 8, 5);
1675        if (bits(machInst, 22)) {
1676            return new SveRdffrsPred(machInst, pd, pg);
1677        } else {
1678            return new SveRdffrPred(machInst, pd, pg);
1679        }
1680    }  // decodeSveReadPredFromFFRPred
1681
1682    StaticInstPtr
1683    decodeSveReadPredFromFFRUnpred(ExtMachInst machInst)
1684    {
1685        if (bits(machInst, 23, 22) != 0) {
1686            return new Unknown64(machInst);
1687        }
1688        IntRegIndex pd = (IntRegIndex)(uint8_t) bits(machInst, 3, 0);
1689        return new SveRdffrUnpred(machInst, pd);
1690    }  // decodeSveReadPredFromFFRUnpred
1691
1692    StaticInstPtr
1693    decodeSvePredGen(ExtMachInst machInst)
1694    {
1695        uint8_t b_20_15 = (bits(machInst, 20) << 1) | bits(machInst, 15);
1696        switch (b_20_15) {
1697          case 0x0:
1698            return decodeSvePredLogicalOps(machInst);
1699          case 0x1:
1700            return decodeSvePropBreakFromPrevPartition(machInst);
1701          case 0x2:
1702            if (bits(machInst, 19) == 0x0) {
1703                return decodeSvePartitionBreakCond(machInst);
1704            } else {
1705                return decodeSvePropBreakToNextPartition(machInst);
1706            }
1707          case 0x3:
1708            if (bits(machInst, 19) == 0x0) {
1709                if (bits(machInst, 4, 0) == 0x0) {
1710                    return decodeSvePredTest(machInst);
1711                } else {
1712                    break;
1713                }
1714            } else {
1715                switch (bits(machInst, 13, 12)) {
1716                  case 0x0:
1717                    if (bits(machInst, 11) == 0x0 &&
1718                            bits(machInst, 4) == 0x0) {
1719                        return decodeSvePredIteration(machInst);
1720                    } else {
1721                        break;
1722                    }
1723                  case 0x1:
1724                    break;
1725                  case 0x2:
1726                    if (bits(machInst, 11, 10) == 0x0 &&
1727                            bits(machInst, 4) == 0x0) {
1728                        return decodeSveInitPred(machInst);
1729                    } else if (bits(machInst, 11, 4) == 0x40) {
1730                        return decodeSveZeroPredReg(machInst);
1731                    }
1732                    break;
1733                  case 0x3:
1734                    if (bits(machInst, 11) == 0x0) {
1735                        if (bits(machInst, 16) == 0x0) {
1736                            return decodeSveReadPredFromFFRPred(machInst);
1737                        } else if (bits(machInst, 8, 4) == 0x0) {
1738                            return decodeSveReadPredFromFFRUnpred(machInst);
1739                        }
1740                    }
1741                    break;
1742                }
1743            }
1744            break;
1745        }
1746        return new Unknown64(machInst);
1747    }  // decodeSvePredGen
1748
1749    StaticInstPtr
1750    decodeSvePredCount(ExtMachInst machInst)
1751    {
1752        uint8_t b19 = bits(machInst, 19);
1753        if (b19) {
1754            uint8_t b13_11 = bits(machInst, 13, 11);
1755            switch (b13_11) {
1756              case 0x0:
1757                {
1758                    if (bits(machInst, 10, 9) != 0x0) {
1759                        return new Unknown64(machInst);
1760                    }
1761                    IntRegIndex zdn = (IntRegIndex) (uint8_t)
1762                        bits(machInst, 4, 0);
1763                    IntRegIndex pg = (IntRegIndex) (uint8_t)
1764                        bits(machInst, 8, 5);
1765                    uint8_t esize = bits(machInst, 23, 22);
1766                    if (esize == 0x0) {
1767                        return new Unknown64(machInst);
1768                    }
1769                    uint8_t opc = bits(machInst, 18, 17);
1770                    if (opc == 0x0) {
1771                        uint8_t u = bits(machInst, 16);
1772                        if (u) {
1773                            return decodeSvePredCountVU<SveUqincpv>(esize,
1774                                    machInst, zdn, pg);
1775                        } else {
1776                            return decodeSvePredCountVS<SveSqincpv>(esize,
1777                                    machInst, zdn, pg);
1778                        }
1779                    } else if (opc == 0x1) {
1780                        uint8_t u = bits(machInst, 16);
1781                        if (u) {
1782                            return decodeSvePredCountVU<SveUqdecpv>(esize,
1783                                    machInst, zdn, pg);
1784                        } else {
1785                            return decodeSvePredCountVS<SveSqdecpv>(esize,
1786                                    machInst, zdn, pg);
1787                        }
1788                    } else if (opc == 0x2) {
1789                        uint8_t d = bits(machInst, 16);
1790                        if (d) {
1791                            return decodeSvePredCountVU<SveDecpv>(esize,
1792                                    machInst, zdn, pg);
1793                        } else {
1794                            return decodeSvePredCountVU<SveIncpv>(esize,
1795                                    machInst, zdn, pg);
1796                        }
1797                    }
1798                }
1799                break;
1800              case 0x1:
1801                {
1802                    IntRegIndex rdn = (IntRegIndex) (uint8_t)
1803                        bits(machInst, 4, 0);
1804                    IntRegIndex pg = (IntRegIndex) (uint8_t)
1805                        bits(machInst, 8, 5);
1806                    uint8_t esize = bits(machInst, 23, 22);
1807                    uint8_t opc = bits(machInst, 18, 17);
1808                    uint8_t opc2 = bits(machInst, 10, 9);
1809                    if (opc == 0x0) {
1810                        uint8_t u = bits(machInst, 16);
1811                        if (opc2 == 0x0) {
1812                            if (u) {
1813                                return decodeSvePredCountU<SveUqincp32>(esize,
1814                                        machInst, rdn, pg);
1815                            } else {
1816                                return decodeSvePredCountS<SveSqincp32>(esize,
1817                                        machInst, rdn, pg);
1818                            }
1819                        } else if (opc2 == 0x2) {
1820                            if (u) {
1821                                return decodeSvePredCountU<SveUqincp64>(esize,
1822                                        machInst, rdn, pg);
1823                            } else {
1824                                return decodeSvePredCountS<SveSqincp64>(esize,
1825                                        machInst, rdn, pg);
1826                            }
1827                        }
1828                    } else if (opc == 0x1) {
1829                        uint8_t u = bits(machInst, 16);
1830                        if (opc2 == 0x0) {
1831                            if (u) {
1832                                return decodeSvePredCountU<SveUqdecp32>(esize,
1833                                        machInst, rdn, pg);
1834                            } else {
1835                                return decodeSvePredCountS<SveSqdecp32>(esize,
1836                                        machInst, rdn, pg);
1837                            }
1838                        } else if (opc2 == 0x2) {
1839                            if (u) {
1840                                return decodeSvePredCountU<SveUqdecp64>(esize,
1841                                        machInst, rdn, pg);
1842                            } else {
1843                                return decodeSvePredCountS<SveSqdecp64>(esize,
1844                                        machInst, rdn, pg);
1845                            }
1846                        }
1847                    } else if (opc == 0x2) {
1848                        if (opc2 == 0x0) {
1849                            if (bits(machInst, 16)) {
1850                                return decodeSvePredCountU<SveDecp>(esize,
1851                                        machInst, rdn, pg);
1852                            } else {
1853                                return decodeSvePredCountU<SveIncp>(esize,
1854                                        machInst, rdn, pg);
1855                            }
1856                        }
1857                    }
1858                }
1859                break;
1860              case 0x2:
1861                if (bits(machInst, 23, 22) == 0x0 &&
1862                        bits(machInst, 10, 9) == 0x0 &&
1863                        bits(machInst, 4, 0) == 0x0) {
1864                    uint8_t opc = bits(machInst, 18, 16);
1865                    if (opc == 0x0) {
1866                        IntRegIndex pn = (IntRegIndex)(uint8_t)
1867                            bits(machInst, 8, 5);
1868                        return new SveWrffr(machInst, pn);
1869                    } else if (opc == 0x4 && bits(machInst, 8, 5) == 0x0) {
1870                        return new SveSetffr(machInst);
1871                    }
1872                }
1873                break;
1874            }
1875        } else {
1876            uint8_t opc = bits(machInst, 18, 16);
1877            if (opc == 0 && bits(machInst, 9) == 0) {
1878                IntRegIndex rd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1879                IntRegIndex pn = (IntRegIndex) (uint8_t) bits(machInst, 8, 5);
1880                IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 13,
1881                        10);
1882                uint8_t esize = bits(machInst, 23, 22);
1883                return decodeSveUnaryPredU<SveCntp>(esize,
1884                        machInst, rd, pn, pg);
1885            }
1886        }
1887        return new Unknown64(machInst);
1888    }  // decodeSvePredCount
1889
1890    StaticInstPtr
1891    decodeSveIntCmpSca(ExtMachInst machInst)
1892    {
1893        uint16_t b23_13_12_11_10_3_2_1_0 = (uint16_t)
1894            (bits(machInst, 23) << 8) | (bits(machInst, 13, 10) << 4) |
1895            bits(machInst, 3, 0);
1896        uint8_t b10 = (uint8_t) bits(machInst, 10);
1897        IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
1898        IntRegIndex rm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
1899        if (b23_13_12_11_10_3_2_1_0 == 0x180) {
1900            uint8_t s64b = bits(machInst, 22);
1901            uint8_t ne = bits(machInst, 4);
1902            if (ne) {
1903                if (s64b) {
1904                    return new SveCtermne<uint64_t>(machInst, rn, rm);
1905                } else {
1906                    return new SveCtermne<uint32_t>(machInst, rn, rm);
1907                }
1908            } else {
1909                if (s64b) {
1910                    return new SveCtermeq<uint64_t>(machInst, rn, rm);
1911                } else {
1912                    return new SveCtermeq<uint32_t>(machInst, rn, rm);
1913                }
1914            }
1915        } else if (b10) {
1916            IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
1917            uint8_t size = (uint8_t) bits(machInst, 23, 22);
1918            uint8_t s64b = (uint8_t) bits(machInst, 12);
1919            uint8_t opc = (uint8_t) bits(machInst, 11) << 1 |
1920                bits(machInst, 4);
1921            if (s64b) {
1922                switch (opc) {
1923                    case 0:
1924                        return decodeSveBinUnpredS<SveWhilelt64>(size,
1925                                machInst, pd, rn, rm);
1926                    case 1:
1927                        return decodeSveBinUnpredS<SveWhilele64>(size,
1928                                machInst, pd, rn, rm);
1929                    case 2:
1930                        return decodeSveBinUnpredU<SveWhilelo64>(size,
1931                                machInst, pd, rn, rm);
1932                    case 3:
1933                        return decodeSveBinUnpredU<SveWhilels64>(size,
1934                                machInst, pd, rn, rm);
1935                }
1936            } else {
1937                switch (opc) {
1938                    case 0:
1939                        return decodeSveBinUnpredS<SveWhilelt32>(size,
1940                                machInst, pd, rn, rm);
1941                    case 1:
1942                        return decodeSveBinUnpredS<SveWhilele32>(size,
1943                                machInst, pd, rn, rm);
1944                    case 2:
1945                        return decodeSveBinUnpredU<SveWhilelo32>(size,
1946                                machInst, pd, rn, rm);
1947                    case 3:
1948                        return decodeSveBinUnpredU<SveWhilels32>(size,
1949                                machInst, pd, rn, rm);
1950                }
1951            }
1952        }
1953        return new Unknown64(machInst);
1954    }  // decodeSveIntCmpSca
1955
1956    StaticInstPtr
1957    decodeSveIntWideImmUnpred0(ExtMachInst machInst)
1958    {
1959        IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
1960        uint64_t imm = bits(machInst, 12, 5);
1961        uint8_t sh = bits(machInst, 13);
1962        uint8_t size = bits(machInst, 23, 22);
1963
1964        if (sh) {
1965            if (size == 0x0) {
1966                return new Unknown64(machInst);
1967            }
1968            imm <<= 8;
1969        }
1970
1971        switch (bits(machInst, 18, 16)) {
1972          case 0x0:
1973            return decodeSveWideImmUnpredU<SveAddImm>(
1974                size, machInst, zdn, imm);
1975          case 0x1:
1976            return decodeSveWideImmUnpredU<SveSubImm>(
1977                size, machInst, zdn, imm);
1978          case 0x3:
1979            return decodeSveWideImmUnpredU<SveSubrImm>(
1980                size, machInst, zdn, imm);
1981          case 0x4:
1982            return decodeSveWideImmUnpredS<SveSqaddImm>(
1983                size, machInst, zdn, imm);
1984          case 0x5:
1985            return decodeSveWideImmUnpredU<SveUqaddImm>(
1986                size, machInst, zdn, imm);
1987          case 0x6:
1988            return decodeSveWideImmUnpredS<SveSqsubImm>(
1989                size, machInst, zdn, imm);
1990          case 0x7:
1991            return decodeSveWideImmUnpredU<SveUqsubImm>(
1992                size, machInst, zdn, imm);
1993        }
1994
1995        return new Unknown64(machInst);
1996    }  // decodeSveIntWideImmUnpred0
1997
1998    StaticInstPtr
1999    decodeSveIntWideImmUnpred1(ExtMachInst machInst)
2000    {
2001        IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2002        uint64_t imm = bits(machInst, 12, 5);
2003        uint8_t size = bits(machInst, 23, 22);
2004
2005        switch (bits(machInst, 18, 16)) {
2006          case 0x0:
2007            return decodeSveWideImmUnpredS<SveSmaxImm>(
2008                size, machInst, zdn, sext<8>(imm));
2009          case 0x1:
2010            return decodeSveWideImmUnpredU<SveUmaxImm>(
2011                size, machInst, zdn, imm);
2012          case 0x2:
2013            return decodeSveWideImmUnpredS<SveSminImm>(
2014                size, machInst, zdn, sext<8>(imm));
2015          case 0x3:
2016            return decodeSveWideImmUnpredU<SveUminImm>(
2017                size, machInst, zdn, imm);
2018        }
2019
2020        return new Unknown64(machInst);
2021    }  // decodeSveIntWideImmUnpred1
2022
2023    StaticInstPtr
2024    decodeSveIntWideImmUnpred2(ExtMachInst machInst)
2025    {
2026        IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2027        uint64_t imm = bits(machInst, 12, 5);
2028        uint8_t size = bits(machInst, 23, 22);
2029
2030        if (bits(machInst, 18, 16) == 0x0) {
2031            return decodeSveWideImmUnpredU<SveMulImm>(
2032                size, machInst, zdn, sext<8>(imm));
2033        }
2034
2035        return new Unknown64(machInst);
2036    }  // decodeSveIntWideImmUnpred2
2037
2038    StaticInstPtr
2039    decodeSveIntWideImmUnpred3(ExtMachInst machInst)
2040    {
2041        IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2042        uint64_t imm = bits(machInst, 12, 5);
2043        uint8_t sh = bits(machInst, 13);
2044        uint8_t size = bits(machInst, 23, 22);
2045
2046        if (sh) {
2047            if (size == 0x0) {
2048                return new Unknown64(machInst);
2049            }
2050            imm <<= 8;
2051        }
2052
2053        if (bits(machInst, 18, 17) == 0x0) {
2054            if (sh) {
2055                return decodeSveWideImmUnpredU<SveDupImm>(
2056                    size, machInst, zd, sext<16>(imm));
2057            } else {
2058                return decodeSveWideImmUnpredU<SveDupImm>(
2059                    size, machInst, zd, sext<8>(imm));
2060            }
2061        }
2062
2063        return new Unknown64(machInst);
2064    }  // decodeSveIntWideImmUnpred3
2065
2066    StaticInstPtr
2067    decodeSveIntWideImmUnpred4(ExtMachInst machInst)
2068    {
2069        IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2070        uint8_t size = bits(machInst, 23, 22);
2071
2072        if (bits(machInst, 18, 17) == 0x0 && size != 0x0) {
2073            uint64_t imm = vfp_modified_imm(bits(machInst, 12, 5),
2074                decode_fp_data_type(size));
2075            return decodeSveWideImmUnpredF<SveFdup>(size, machInst, zd, imm);
2076        }
2077
2078        return new Unknown64(machInst);
2079    }  // decodeSveIntWideImmUnpred4
2080
2081    StaticInstPtr
2082    decodeSveIntWideImmUnpred(ExtMachInst machInst)
2083    {
2084        switch (bits(machInst, 20, 19)) {
2085          case 0x0:
2086            if (bits(machInst, 18, 16) != 0x2) {
2087                return decodeSveIntWideImmUnpred0(machInst);
2088            }
2089            break;
2090          case 0x1:
2091            if (bits(machInst, 13) == 0x0) {
2092                return decodeSveIntWideImmUnpred1(machInst);
2093            }
2094            break;
2095          case 0x2:
2096            if (bits(machInst, 13) == 0x0) {
2097                return decodeSveIntWideImmUnpred2(machInst);
2098            }
2099            break;
2100          case 0x3:
2101            if (bits(machInst, 16) == 0x0) {
2102                return decodeSveIntWideImmUnpred3(machInst);
2103            } else if (bits(machInst, 13) == 0x0) {
2104                return decodeSveIntWideImmUnpred4(machInst);
2105            }
2106            break;
2107        }
2108        return new Unknown64(machInst);
2109    }  // decodeSveIntWideImmUnpred
2110
2111    StaticInstPtr
2112    decodeSveMultiplyAddUnpred(ExtMachInst machInst)
2113    {
2114        IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2115        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2116        IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
2117
2118        uint8_t size = (uint8_t) bits(machInst, 23, 22);
2119
2120        if (bits(machInst, 12, 11) != 0 || !(size & 0x2)) {
2121            return new Unknown64(machInst);
2122        }
2123
2124        uint8_t usig = (uint8_t) bits(machInst, 10);
2125        if (size & 0x1) {
2126            if (usig) {
2127                return new SveUdotv<uint16_t, uint64_t>(machInst,
2128                                                        zda, zn, zm);
2129            } else {
2130                return new SveSdotv<int16_t, int64_t>(machInst,
2131                                                        zda, zn, zm);
2132            }
2133        } else {
2134            if (usig) {
2135                return new SveUdotv<uint8_t, uint32_t>(machInst,
2136                                                        zda, zn, zm);
2137            } else {
2138                return new SveSdotv<int8_t, int32_t>(machInst,
2139                                                        zda, zn, zm);
2140            }
2141        }
2142
2143        return new Unknown64(machInst);
2144    } // decodeSveMultiplyAddUnpred
2145
2146    StaticInstPtr
2147    decodeSveMultiplyIndexed(ExtMachInst machInst)
2148    {
2149        IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2150        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2151
2152        uint8_t size = (uint8_t) bits(machInst, 23, 22);
2153
2154        if (bits(machInst, 12, 11) != 0 || !(size & 0x2)) {
2155            return new Unknown64(machInst);
2156        }
2157
2158        uint8_t usig = (uint8_t) bits(machInst, 10);
2159        if (size & 0x1) {
2160            IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 19, 16);
2161            uint8_t i1 = (uint8_t) bits(machInst, 20);
2162            if (usig) {
2163                return new SveUdoti<uint16_t, uint64_t>(machInst,
2164                                                        zda, zn, zm, i1);
2165            } else {
2166                return new SveSdoti<int16_t, int64_t>(machInst,
2167                                                        zda, zn, zm, i1);
2168            }
2169        } else {
2170            IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 18, 16);
2171            uint8_t i2 = (uint8_t) bits(machInst, 20, 19);
2172            if (usig) {
2173                return new SveUdoti<uint8_t, uint32_t>(machInst,
2174                                                        zda, zn, zm, i2);
2175            } else {
2176                return new SveSdoti<int8_t, int32_t>(machInst,
2177                                                        zda, zn, zm, i2);
2178            }
2179        }
2180        return new Unknown64(machInst);
2181    } // decodeSveMultiplyIndexed
2182
2183    StaticInstPtr
2184    decodeSveFpFastReduc(ExtMachInst machInst)
2185    {
2186        IntRegIndex vd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2187        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2188        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2189
2190        uint8_t size = bits(machInst, 23, 22);
2191
2192        if (size == 0x0) {
2193            return new Unknown64(machInst);
2194        }
2195
2196        switch (bits(machInst, 18, 16)) {
2197          case 0x0:
2198            return decodeSveUnaryPredF<SveFaddv>(size, machInst, vd, zn, pg);
2199          case 0x4:
2200            return decodeSveUnaryPredF<SveFmaxnmv>(size, machInst, vd, zn, pg);
2201          case 0x5:
2202            return decodeSveUnaryPredF<SveFminnmv>(size, machInst, vd, zn, pg);
2203          case 0x6:
2204            return decodeSveUnaryPredF<SveFmaxv>(size, machInst, vd, zn, pg);
2205          case 0x7:
2206            return decodeSveUnaryPredF<SveFminv>(size, machInst, vd, zn, pg);
2207        }
2208
2209        return new Unknown64(machInst);
2210    }  // decodeSveFpFastReduc
2211
2212    StaticInstPtr
2213    decodeSveFpUnaryUnpred(ExtMachInst machInst)
2214    {
2215        IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2216        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2217
2218        uint8_t size = (uint8_t) bits(machInst, 23, 22);
2219        if (size == 0) {
2220            return new Unknown64(machInst);
2221        }
2222        uint8_t opc = (uint8_t) bits(machInst, 18, 16);
2223
2224        switch (opc) {
2225          case 0x6:
2226            return decodeSveUnaryUnpredF<SveFrecpe>(
2227                    size, machInst, zd, zn);
2228          case 0x7:
2229            return decodeSveUnaryUnpredF<SveFrsqrte>(
2230                    size, machInst, zd, zn);
2231        }
2232        return new Unknown64(machInst);
2233    }  // decodeSveFpUnaryUnpred
2234
2235    StaticInstPtr
2236    decodeSveFpCmpZero(ExtMachInst machInst)
2237    {
2238        IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
2239        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2240        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2241
2242        uint8_t size = bits(machInst, 23, 22);
2243        if (size == 0) {
2244            return new Unknown64(machInst);
2245        }
2246        uint8_t opc = (bits(machInst, 17, 16) << 1) | bits(machInst, 4);
2247
2248        switch (opc) {
2249          case 0x0:
2250            return decodeSveCmpImmF<SveFcmgeZero>(
2251                size, machInst, pd, zn, 0x0, pg);
2252          case 0x1:
2253            return decodeSveCmpImmF<SveFcmgtZero>(
2254                size, machInst, pd, zn, 0x0, pg);
2255          case 0x2:
2256            return decodeSveCmpImmF<SveFcmltZero>(
2257                size, machInst, pd, zn, 0x0, pg);
2258          case 0x3:
2259            return decodeSveCmpImmF<SveFcmleZero>(
2260                size, machInst, pd, zn, 0x0, pg);
2261          case 0x4:
2262            return decodeSveCmpImmF<SveFcmeqZero>(
2263                size, machInst, pd, zn, 0x0, pg);
2264          case 0x6:
2265            return decodeSveCmpImmF<SveFcmneZero>(
2266                size, machInst, pd, zn, 0x0, pg);
2267        }
2268        return new Unknown64(machInst);
2269    }  // decodeSveFpCmpZero
2270
2271    StaticInstPtr
2272    decodeSveFpAccumReduc(ExtMachInst machInst)
2273    {
2274        uint8_t opc = bits(machInst, 18, 16);
2275        uint8_t size = bits(machInst, 23, 22);
2276        if (opc != 0 || size == 0) {
2277            return new Unknown64(machInst);
2278        }
2279
2280        IntRegIndex vdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2281        IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2282        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2283
2284        return decodeSveUnaryPredF<SveFadda>(size, machInst, vdn, zm, pg);
2285    }  // decodeSveFpAccumReduc
2286
2287    StaticInstPtr
2288    decodeSveFpArithUnpred(ExtMachInst machInst)
2289    {
2290        IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2291        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2292        IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
2293
2294        uint8_t size = bits(machInst, 23, 22);
2295        if (size == 0) {
2296            return new Unknown64(machInst);
2297        }
2298        uint8_t opc = (uint8_t) bits(machInst, 12, 10);
2299
2300        switch (opc) {
2301          case 0x0:
2302            return decodeSveBinUnpredF<SveFaddUnpred>(
2303                    size, machInst, zd, zn, zm);
2304          case 0x1:
2305            return decodeSveBinUnpredF<SveFsubUnpred>(
2306                    size, machInst, zd, zn, zm);
2307          case 0x2:
2308            return decodeSveBinUnpredF<SveFmulUnpred>(
2309                    size, machInst, zd, zn, zm);
2310          case 0x3:
2311            return decodeSveBinUnpredF<SveFtsmul>(
2312                    size, machInst, zd, zn, zm);
2313          case 0x6:
2314            return decodeSveBinUnpredF<SveFrecps>(
2315                    size, machInst, zd, zn, zm);
2316          case 0x7:
2317            return decodeSveBinUnpredF<SveFrsqrts>(
2318                    size, machInst, zd, zn, zm);
2319        }
2320        return new Unknown64(machInst);
2321    }  // decodeSveFpArithUnpred
2322
2323    StaticInstPtr
2324    decodeSveFpArithPred0(ExtMachInst machInst)
2325    {
2326        IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2327        IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2328        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2329
2330        uint8_t size = (uint8_t) bits(machInst, 23, 22);
2331        if (size == 0) {
2332            return new Unknown64(machInst);
2333        }
2334        uint8_t opc = (uint8_t) bits(machInst, 19, 16);
2335
2336        switch (opc) {
2337          case 0x0:
2338            return decodeSveBinDestrPredF<SveFaddPred>(
2339                    size, machInst, zdn, zm, pg);
2340          case 0x1:
2341            return decodeSveBinDestrPredF<SveFsubPred>(
2342                    size, machInst, zdn, zm, pg);
2343          case 0x2:
2344            return decodeSveBinDestrPredF<SveFmulPred>(
2345                    size, machInst, zdn, zm, pg);
2346          case 0x3:
2347            return decodeSveBinDestrPredF<SveFsubr>(
2348                    size, machInst, zdn, zm, pg);
2349          case 0x4:
2350            return decodeSveBinDestrPredF<SveFmaxnm>(
2351                    size, machInst, zdn, zm, pg);
2352          case 0x5:
2353            return decodeSveBinDestrPredF<SveFminnm>(
2354                    size, machInst, zdn, zm, pg);
2355          case 0x6:
2356            return decodeSveBinDestrPredF<SveFmax>(
2357                    size, machInst, zdn, zm, pg);
2358          case 0x7:
2359            return decodeSveBinDestrPredF<SveFmin>(
2360                    size, machInst, zdn, zm, pg);
2361          case 0x8:
2362            return decodeSveBinDestrPredF<SveFabd>(
2363                    size, machInst, zdn, zm, pg);
2364          case 0x9:
2365            return decodeSveBinDestrPredF<SveFscale>(
2366                    size, machInst, zdn, zm, pg);
2367          case 0xa:
2368            return decodeSveBinDestrPredF<SveFmulx>(
2369                    size, machInst, zdn, zm, pg);
2370          case 0xc:
2371            return decodeSveBinDestrPredF<SveFdivr>(
2372                    size, machInst, zdn, zm, pg);
2373          case 0xd:
2374            return decodeSveBinDestrPredF<SveFdiv>(
2375                    size, machInst, zdn, zm, pg);
2376        }
2377        return new Unknown64(machInst);
2378    }  // decodeSveFpArithPred0
2379
2380    StaticInstPtr
2381    decodeSveFpTrigMAddCoeff(ExtMachInst machInst)
2382    {
2383        IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2384        IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2385        uint8_t imm = (uint8_t) bits(machInst, 18, 16);
2386
2387        uint8_t size = (uint8_t) bits(machInst, 23, 22);
2388        if (size == 0) {
2389            return new Unknown64(machInst);
2390        }
2391
2392        return decodeSveTerImmUnpredF<SveFtmad>(size, machInst, zdn, zm, imm);
2393    }  // decodeSveFpTrigMAddCoeff
2394
2395    StaticInstPtr
2396    decodeSveFpArithImmPred(ExtMachInst machInst)
2397    {
2398        IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2399        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2400        uint64_t imm;
2401
2402        uint8_t size = (uint8_t) bits(machInst, 23, 22);
2403        if (size == 0) {
2404            return new Unknown64(machInst);
2405        }
2406
2407        uint8_t opc = (uint8_t) bits(machInst, 18, 16);
2408
2409        switch (opc) {
2410          case 0x0:
2411            imm = sveExpandFpImmAddSub((uint8_t) bits(machInst, 5), size);
2412            return decodeSveBinImmPredF<SveFaddImm>(
2413                    size, machInst, zdn, imm, pg);
2414          case 0x1:
2415            imm = sveExpandFpImmAddSub((uint8_t) bits(machInst, 5), size);
2416            return decodeSveBinImmPredF<SveFsubImm>(
2417                    size, machInst, zdn, imm, pg);
2418          case 0x2:
2419            imm = sveExpandFpImmMul((uint8_t) bits(machInst, 5), size);
2420            return decodeSveBinImmPredF<SveFmulImm>(
2421                    size, machInst, zdn, imm, pg);
2422          case 0x3:
2423            imm = sveExpandFpImmAddSub((uint8_t) bits(machInst, 5), size);
2424            return decodeSveBinImmPredF<SveFsubrImm>(
2425                    size, machInst, zdn, imm, pg);
2426          case 0x4:
2427            imm = sveExpandFpImmMaxMin((uint8_t) bits(machInst, 5), size);
2428            return decodeSveBinImmPredF<SveFmaxnmImm>(
2429                    size, machInst, zdn, imm, pg);
2430          case 0x5:
2431            imm = sveExpandFpImmMaxMin((uint8_t) bits(machInst, 5), size);
2432            return decodeSveBinImmPredF<SveFminnmImm>(
2433                    size, machInst, zdn, imm, pg);
2434          case 0x6:
2435            imm = sveExpandFpImmMaxMin((uint8_t) bits(machInst, 5), size);
2436            return decodeSveBinImmPredF<SveFmaxImm>(
2437                    size, machInst, zdn, imm, pg);
2438          case 0x7:
2439            imm = sveExpandFpImmMaxMin((uint8_t) bits(machInst, 5), size);
2440            return decodeSveBinImmPredF<SveFminImm>(
2441                    size, machInst, zdn, imm, pg);
2442        }
2443        return new Unknown64(machInst);
2444    }  // decodeSveFpArithImmPred
2445
2446    StaticInstPtr
2447    decodeSveFpArithPred(ExtMachInst machInst)
2448    {
2449        if (bits(machInst, 20) == 0) {
2450            return decodeSveFpArithPred0(machInst);
2451        } else if (bits(machInst, 19) == 0) {
2452            return decodeSveFpTrigMAddCoeff(machInst);
2453        } else {
2454            return decodeSveFpArithImmPred(machInst);
2455        }
2456    }  // decodeSveFpArithPred
2457
2458    StaticInstPtr
2459    decodeSveFpUnaryPred(ExtMachInst machInst)
2460    {
2461        IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2462        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2463        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2464
2465        uint8_t size = (uint8_t) bits(machInst, 23, 22);
2466        if (size == 0) {
2467            return new Unknown64(machInst);
2468        }
2469
2470        uint8_t b20_19 = bits(machInst, 20, 19);
2471        switch (b20_19) {
2472          case 0x0:
2473            {
2474                if (bits(machInst, 18, 16) == 0x5) {
2475                    return new Unknown64(machInst);
2476                }
2477                // SVE floating-point round to integral value
2478                uint8_t opc = (uint8_t) bits(machInst, 18, 16);
2479                switch (opc) {
2480                  case 0x0:
2481                    return decodeSveUnaryPredF<SveFrintn>(
2482                        size, machInst, zd, zn, pg);
2483                  case 0x1:
2484                    return decodeSveUnaryPredF<SveFrintp>(
2485                        size, machInst, zd, zn, pg);
2486                  case 0x2:
2487                    return decodeSveUnaryPredF<SveFrintm>(
2488                        size, machInst, zd, zn, pg);
2489                  case 0x3:
2490                    return decodeSveUnaryPredF<SveFrintz>(
2491                        size, machInst, zd, zn, pg);
2492                  case 0x4:
2493                    return decodeSveUnaryPredF<SveFrinta>(
2494                        size, machInst, zd, zn, pg);
2495                  case 0x6:
2496                    return decodeSveUnaryPredF<SveFrintx>(
2497                        size, machInst, zd, zn, pg);
2498                  case 0x7:
2499                    return decodeSveUnaryPredF<SveFrinti>(
2500                        size, machInst, zd, zn, pg);
2501                }
2502            }
2503            break;
2504          case 0x1:
2505            {
2506                // SVE floating-point unary operations (predicated)
2507                uint8_t b18_16 = bits(machInst, 18, 16);
2508                switch (b18_16) {
2509                  case 0x0:
2510                    if (size == 0x2) {
2511                        return new SveFcvtNarrow<uint32_t, uint16_t>(
2512                            machInst, zd, zn, pg);
2513                    } else if (size == 0x3) {
2514                        return new SveFcvtNarrow<uint64_t, uint16_t>(
2515                            machInst, zd, zn, pg);
2516                    }
2517                    break;
2518                  case 0x1:
2519                    if (size == 0x2) {
2520                        return new SveFcvtWiden<uint16_t, uint32_t>(
2521                            machInst, zd, zn, pg);
2522                    } else if (size == 0x3) {
2523                        return new SveFcvtWiden<uint16_t, uint64_t>(
2524                            machInst, zd, zn, pg);
2525                    }
2526                    break;
2527                  case 0x2:
2528                    if (size == 0x3) {
2529                        return new SveFcvtNarrow<uint64_t, uint32_t>(
2530                            machInst, zd, zn, pg);
2531                    }
2532                    break;
2533                  case 0x3:
2534                    if (size == 0x3) {
2535                        return new SveFcvtWiden<uint32_t, uint64_t>(
2536                            machInst, zd, zn, pg);
2537                    }
2538                    break;
2539                  case 0x4:
2540                    if (size != 0x0) {
2541                        return decodeSveUnaryPredF<SveFrecpx>(
2542                            size, machInst, zd, zn, pg);
2543                    }
2544                    break;
2545                  case 0x5:
2546                    if (size != 0x0) {
2547                        return decodeSveUnaryPredF<SveFsqrt>(
2548                            size, machInst, zd, zn, pg);
2549                    }
2550                    break;
2551                }
2552            }
2553            break;
2554          case 0x2:
2555            {
2556                // SVE integer convert to floating-point
2557                uint8_t opc = (size << 3) | bits(machInst, 18, 16);
2558                switch (opc) {
2559                  case 0xa:
2560                    return new SveScvtfNarrow<uint16_t, uint16_t>(
2561                        machInst, zd, zn, pg);
2562                  case 0xb:
2563                    return new SveUcvtfNarrow<uint16_t, uint16_t>(
2564                        machInst, zd, zn, pg);
2565                  case 0xc:
2566                    return new SveScvtfNarrow<uint32_t, uint16_t>(
2567                        machInst, zd, zn, pg);
2568                  case 0xd:
2569                    return new SveUcvtfNarrow<uint32_t, uint16_t>(
2570                        machInst, zd, zn, pg);
2571                  case 0xe:
2572                    return new SveScvtfNarrow<uint64_t, uint16_t>(
2573                        machInst, zd, zn, pg);
2574                  case 0xf:
2575                    return new SveUcvtfNarrow<uint64_t, uint16_t>(
2576                        machInst, zd, zn, pg);
2577                  case 0x14:
2578                    return new SveScvtfNarrow<uint32_t, uint32_t>(
2579                        machInst, zd, zn, pg);
2580                  case 0x15:
2581                    return new SveUcvtfNarrow<uint32_t, uint32_t>(
2582                        machInst, zd, zn, pg);
2583                  case 0x18:
2584                    return new SveScvtfWiden<uint32_t, uint64_t>(
2585                        machInst, zd, zn, pg);
2586                  case 0x19:
2587                    return new SveUcvtfWiden<uint32_t, uint64_t>(
2588                        machInst, zd, zn, pg);
2589                  case 0x1c:
2590                    return new SveScvtfNarrow<uint64_t, uint32_t>(
2591                        machInst, zd, zn, pg);
2592                  case 0x1d:
2593                    return new SveUcvtfNarrow<uint64_t, uint32_t>(
2594                        machInst, zd, zn, pg);
2595                  case 0x1e:
2596                    return new SveScvtfNarrow<uint64_t, uint64_t>(
2597                        machInst, zd, zn, pg);
2598                  case 0x1f:
2599                    return new SveUcvtfNarrow<uint64_t, uint64_t>(
2600                        machInst, zd, zn, pg);
2601                }
2602            }
2603            break;
2604          case 0x3:
2605            {
2606                // SVE floating-point convert to integer
2607                uint8_t opc = (size << 3) | bits(machInst, 18, 16);
2608                switch (opc) {
2609                  case 0xa:
2610                    return new SveFcvtzsNarrow<uint16_t, uint16_t>(
2611                        machInst, zd, zn, pg);
2612                  case 0xb:
2613                    return new SveFcvtzuNarrow<uint16_t, uint16_t>(
2614                        machInst, zd, zn, pg);
2615                  case 0xc:
2616                    return new SveFcvtzsWiden<uint16_t, uint32_t>(
2617                        machInst, zd, zn, pg);
2618                  case 0xd:
2619                    return new SveFcvtzuWiden<uint16_t, uint32_t>(
2620                        machInst, zd, zn, pg);
2621                  case 0xe:
2622                    return new SveFcvtzsWiden<uint16_t, uint64_t>(
2623                        machInst, zd, zn, pg);
2624                  case 0xf:
2625                    return new SveFcvtzuWiden<uint16_t, uint64_t>(
2626                        machInst, zd, zn, pg);
2627                  case 0x14:
2628                    return new SveFcvtzsNarrow<uint32_t, uint32_t>(
2629                        machInst, zd, zn, pg);
2630                  case 0x15:
2631                    return new SveFcvtzuNarrow<uint32_t, uint32_t>(
2632                        machInst, zd, zn, pg);
2633                  case 0x18:
2634                    return new SveFcvtzsNarrow<uint64_t, uint32_t>(
2635                        machInst, zd, zn, pg);
2636                  case 0x19:
2637                    return new SveFcvtzuNarrow<uint64_t, uint32_t>(
2638                        machInst, zd, zn, pg);
2639                  case 0x1c:
2640                    return new SveFcvtzsWiden<uint32_t, uint64_t>(
2641                        machInst, zd, zn, pg);
2642                  case 0x1d:
2643                    return new SveFcvtzuWiden<uint32_t, uint64_t>(
2644                        machInst, zd, zn, pg);
2645                  case 0x1e:
2646                    return new SveFcvtzsNarrow<uint64_t, uint64_t>(
2647                        machInst, zd, zn, pg);
2648                  case 0x1f:
2649                    return new SveFcvtzuNarrow<uint64_t, uint64_t>(
2650                        machInst, zd, zn, pg);
2651                }
2652            }
2653            break;
2654        }
2655        return new Unknown64(machInst);
2656    }  // decodeSveFpUnaryPred
2657
2658    StaticInstPtr
2659    decodeSveFpCmpVec(ExtMachInst machInst)
2660    {
2661        IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
2662        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2663        IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
2664        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2665
2666        uint8_t size = bits(machInst, 23, 22);
2667        if (size == 0) {
2668            return new Unknown64(machInst);
2669        }
2670        uint8_t opc = (bits(machInst, 15) << 2) |
2671                      (bits(machInst, 13) << 1) |
2672                      bits(machInst, 4);
2673
2674        switch (opc) {
2675          case 0x0:
2676            return decodeSveCmpF<SveFcmge>(size, machInst, pd, zn, zm, pg);
2677          case 0x1:
2678            return decodeSveCmpF<SveFcmgt>(size, machInst, pd, zn, zm, pg);
2679          case 0x2:
2680            return decodeSveCmpF<SveFcmeq>(size, machInst, pd, zn, zm, pg);
2681          case 0x3:
2682            return decodeSveCmpF<SveFcmne>(size, machInst, pd, zn, zm, pg);
2683          case 0x4:
2684            return decodeSveCmpF<SveFcmuo>(size, machInst, pd, zn, zm, pg);
2685          case 0x5:
2686            return decodeSveCmpF<SveFacge>(size, machInst, pd, zn, zm, pg);
2687          case 0x7:
2688            return decodeSveCmpF<SveFacgt>(size, machInst, pd, zn, zm, pg);
2689        }
2690        return new Unknown64(machInst);
2691    }  // decodeSveFpCmpVec
2692
2693    StaticInstPtr
2694    decodeSveFpFusedMulAdd(ExtMachInst machInst)
2695    {
2696        IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2697        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2698        IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
2699        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2700
2701        uint8_t size = bits(machInst, 23, 22);
2702        if (size == 0) {
2703            return new Unknown64(machInst);
2704        }
2705        uint8_t opc = bits(machInst, 15, 13);
2706
2707        switch (opc) {
2708          case 0x0:
2709            return decodeSveTerPredF<SveFmla>(
2710                    size, machInst, zda, zn, zm, pg);
2711          case 0x1:
2712            return decodeSveTerPredF<SveFmls>(
2713                    size, machInst, zda, zn, zm, pg);
2714          case 0x2:
2715            return decodeSveTerPredF<SveFnmla>(
2716                    size, machInst, zda, zn, zm, pg);
2717          case 0x3:
2718            return decodeSveTerPredF<SveFnmls>(
2719                    size, machInst, zda, zn, zm, pg);
2720          case 0x4:
2721            return decodeSveTerPredF<SveFmad>(
2722                    size, machInst, zda /* zdn */, zm /* za */, zn, pg);
2723          case 0x5:
2724            return decodeSveTerPredF<SveFmsb>(
2725                    size, machInst, zda /* zdn */, zm /* za */, zn, pg);
2726          case 0x6:
2727            return decodeSveTerPredF<SveFnmad>(
2728                    size, machInst, zda /* zdn */, zm /* za */, zn, pg);
2729          case 0x7:
2730            return decodeSveTerPredF<SveFnmsb>(
2731                    size, machInst, zda /* zdn */, zm /* za */, zn, pg);
2732        }
2733        return new Unknown64(machInst);
2734    }  // decodeSveFpFusedMulAdd
2735
2736    StaticInstPtr
2737    decodeSveFpCplxAdd(ExtMachInst machInst)
2738    {
2739        uint8_t size = bits(machInst, 23, 22);
2740        uint8_t rot = bits(machInst, 16) << 1 | 0x01;
2741        IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2742        IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2743        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2744        switch (size) {
2745            case 1:
2746                return new SveFcadd<uint16_t>(machInst,
2747                        zdn, zdn, zm, pg, rot);
2748            case 2:
2749                return new SveFcadd<uint32_t>(machInst,
2750                        zdn, zdn, zm, pg, rot);
2751            case 3:
2752                return new SveFcadd<uint64_t>(machInst,
2753                        zdn, zdn, zm, pg, rot);
2754        }
2755        return new Unknown64(machInst);
2756    }
2757
2758    StaticInstPtr
2759    decodeSveFpCplxMulAddVec(ExtMachInst machInst)
2760    {
2761        uint8_t size = bits(machInst, 23, 22);
2762        if (size == 0) {
2763            return new Unknown64(machInst);
2764        }
2765
2766        IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2767        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2768        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
2769        IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16);
2770        uint8_t rot = bits(machInst, 14, 13);
2771        switch (size) {
2772            case 1:
2773                return new SveFcmlav<uint16_t>(machInst,
2774                        zda, zn, zm, pg, rot);
2775            case 2:
2776                return new SveFcmlav<uint32_t>(machInst,
2777                        zda, zn, zm, pg, rot);
2778            case 3:
2779                return new SveFcmlav<uint64_t>(machInst,
2780                        zda, zn, zm, pg, rot);
2781        }
2782
2783        return new Unknown64(machInst);
2784    } // decodeSveFpCplxMulAddVec
2785
2786    StaticInstPtr
2787    decodeSveFpCplxMulAddIndexed(ExtMachInst machInst)
2788    {
2789        uint8_t size = bits(machInst, 23, 22);
2790        if (size < 2) {
2791            return new Unknown64(machInst);
2792        }
2793
2794        IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2795        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2796        IntRegIndex zm;
2797        uint8_t rot = bits(machInst, 11, 10);
2798        uint8_t imm;
2799
2800        switch (size) {
2801            case 2:
2802                zm = (IntRegIndex) (uint8_t) bits(machInst, 18, 16);
2803                imm = bits(machInst, 20, 19);
2804                return new SveFcmlai<uint32_t>(machInst,
2805                        zda, zn, zm, rot, imm);
2806            case 3:
2807                zm = (IntRegIndex) (uint8_t) bits(machInst, 19, 16);
2808                imm = bits(machInst, 20);
2809                return new SveFcmlai<uint64_t>(machInst,
2810                        zda, zn, zm, rot, imm);
2811        }
2812        return new Unknown64(machInst);
2813    } // decodeSveFpCplxMulAddIndexed
2814
2815    StaticInstPtr
2816    decodeSveFpMulIndexed(ExtMachInst machInst)
2817    {
2818        IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2819        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2820
2821        uint8_t size = bits(machInst, 23, 22);
2822        switch (size) {
2823          case 0x0:
2824          case 0x1:
2825            return new SveFmulIdx<uint16_t>(
2826                machInst, zd, zn,
2827                (IntRegIndex) (uint8_t) bits(machInst, 18, 16),
2828                bits(machInst, 20, 19) | (bits(machInst, 22) << 2));
2829          case 0x2:
2830            return new SveFmulIdx<uint32_t>(
2831                machInst, zd, zn,
2832                (IntRegIndex) (uint8_t) bits(machInst, 18, 16),
2833                bits(machInst, 20, 19));
2834          case 0x3:
2835            return new SveFmulIdx<uint64_t>(
2836                machInst, zd, zn,
2837                (IntRegIndex) (uint8_t) bits(machInst, 19, 16),
2838                bits(machInst, 20));
2839          default:
2840            return new Unknown64(machInst);
2841        }
2842
2843    } // decodeSveFpMulIndexed
2844
2845    StaticInstPtr
2846    decodeSveFpMulAddIndexed(ExtMachInst machInst)
2847    {
2848        IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2849        IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2850        const uint8_t op = bits(machInst, 10);
2851
2852        uint8_t size = bits(machInst, 23, 22);
2853        switch (size) {
2854          case 0x0:
2855          case 0x1:
2856            if (op) {
2857                return new SveFmlsIdx<uint16_t>(
2858                    machInst, zd, zn,
2859                    (IntRegIndex) (uint8_t) bits(machInst, 18, 16),
2860                    bits(machInst, 20, 19) | (bits(machInst, 22) << 2));
2861            } else {
2862                return new SveFmlaIdx<uint16_t>(
2863                    machInst, zd, zn,
2864                    (IntRegIndex) (uint8_t) bits(machInst, 18, 16),
2865                    bits(machInst, 20, 19) | (bits(machInst, 22) << 2));
2866            }
2867          case 0x2:
2868            if (op) {
2869                return new SveFmlsIdx<uint32_t>(
2870                    machInst, zd, zn,
2871                    (IntRegIndex) (uint8_t) bits(machInst, 18, 16),
2872                    bits(machInst, 20, 19));
2873            } else {
2874                return new SveFmlaIdx<uint32_t>(
2875                    machInst, zd, zn,
2876                    (IntRegIndex) (uint8_t) bits(machInst, 18, 16),
2877                    bits(machInst, 20, 19));
2878            }
2879          case 0x3:
2880            if (op) {
2881                return new SveFmlsIdx<uint64_t>(
2882                    machInst, zd, zn,
2883                    (IntRegIndex) (uint8_t) bits(machInst, 19, 16),
2884                    bits(machInst, 20));
2885            } else {
2886                return new SveFmlaIdx<uint64_t>(
2887                    machInst, zd, zn,
2888                    (IntRegIndex) (uint8_t) bits(machInst, 19, 16),
2889                    bits(machInst, 20));
2890            }
2891          default:
2892              return new Unknown64(machInst);
2893        }
2894    } // decodeSveFpMulAddIndexed
2895
2896    StaticInstPtr
2897    decodeSveMemGather32(ExtMachInst machInst)
2898    {
2899        if (bits(machInst, 15)) {
2900            if (bits(machInst, 22)) {
2901                // SVE load and broadcast element
2902                IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2903                IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2904                uint64_t imm = bits(machInst, 21, 16);
2905                IntRegIndex pg = (IntRegIndex) (uint8_t)
2906                                 bits(machInst, 12, 10);
2907                uint8_t dtype = (bits(machInst, 24, 23) << 2) |
2908                                bits(machInst, 14, 13);
2909                return decodeSveContigLoadSIInsts<SveLoadAndRepl>(
2910                        dtype, machInst, zt, pg, rn, imm, false, true);
2911            } else {
2912                if (bits(machInst, 21)) {
2913                    // SVE 32-bit gather load (vector plus immediate)
2914                    IntRegIndex zt = (IntRegIndex) (uint8_t)
2915                                     bits(machInst, 4, 0);
2916                    IntRegIndex zn = (IntRegIndex) (uint8_t)
2917                                     bits(machInst, 9, 5);
2918                    uint64_t imm = bits(machInst, 20, 16);
2919                    IntRegIndex pg = (IntRegIndex) (uint8_t)
2920                                     bits(machInst, 12, 10);
2921                    uint8_t dtype = (bits(machInst, 24, 23) << 1) |
2922                                    bits(machInst, 14);
2923                    uint8_t ff = bits(machInst, 13);
2924                    if (ff) {
2925                        return new Unknown64(machInst);
2926                    }
2927                    return decodeSveGatherLoadVIInsts(
2928                        dtype, machInst, zt, pg, zn, imm, true, ff);
2929                } else {
2930                    uint8_t b14_13 = bits(machInst, 14, 13);
2931                    if (b14_13 == 0x2 && bits(machInst, 4) == 0) {
2932                        // TODO: SVE contiguous prefetch (scalar plus scalar)
2933                        return new WarnUnimplemented("prf[bhwd]", machInst);
2934                    } else if (b14_13 == 0x3 && bits(machInst, 4) == 0) {
2935                        // TODO: SVE 32-bit gather prefetch (vector plus
2936                        // immediate)
2937                        return new WarnUnimplemented("prf[bhwd]", machInst);
2938                    }
2939                }
2940            }
2941        } else {
2942            uint8_t b24_23 = bits(machInst, 24, 23);
2943            if (b24_23 != 0x3 && bits(machInst, 21) == 0) {
2944                // SVE 32-bit gather load (scalar plus 32-bit unscaled offsets)
2945                IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
2946                IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
2947                IntRegIndex zm = (IntRegIndex) (uint8_t)
2948                         bits(machInst, 20, 16);
2949                IntRegIndex pg = (IntRegIndex) (uint8_t)
2950                         bits(machInst, 12, 10);
2951                uint8_t dtype = (bits(machInst, 24, 23) << 1) |
2952                                bits(machInst, 14);
2953                uint8_t xs = bits(machInst, 22);
2954                uint8_t ff = bits(machInst, 13);
2955                if (ff) {
2956                    return new Unknown64(machInst);
2957                }
2958                return decodeSveGatherLoadSVInsts(
2959                        dtype, machInst, zt, pg, rn, zm,
2960                        true, true, xs, false, ff);
2961            }
2962            switch (b24_23) {
2963              case 0x0:
2964                if (bits(machInst, 21) && bits(machInst, 4) == 0) {
2965                    // TODO: SVE 32-bit gather prefetch (vector plus immediate)
2966                    return new WarnUnimplemented("prf[bhwd]", machInst);
2967                }
2968                break;
2969              case 0x1:
2970                if (bits(machInst, 21)) {
2971                    // SVE 32-bit gather load halfwords (scalar plus 32-bit
2972                    // scaled offsets)
2973                    IntRegIndex zt = (IntRegIndex) (uint8_t)
2974                             bits(machInst, 4, 0);
2975                    IntRegIndex rn = (IntRegIndex) (uint8_t)
2976                             bits(machInst, 9, 5);
2977                    IntRegIndex zm = (IntRegIndex) (uint8_t)
2978                             bits(machInst, 20, 16);
2979                    IntRegIndex pg = (IntRegIndex) (uint8_t)
2980                             bits(machInst, 12, 10);
2981                    uint8_t xs = bits(machInst, 22);
2982                    uint8_t ff = bits(machInst, 13);
2983                    if (ff) {
2984                        return new Unknown64(machInst);
2985                    }
2986                    if (bits(machInst, 14)) {
2987                        return new SveIndexedMemSV<uint32_t, uint16_t,
2988                                                   SveGatherLoadSVMicroop>(
2989                            "ld1", machInst, MemReadOp, zt, pg, rn, zm,
2990                            true, xs, true);
2991                    } else {
2992                        return new SveIndexedMemSV<int32_t, int16_t,
2993                                                   SveGatherLoadSVMicroop>(
2994                            "ld1", machInst, MemReadOp, zt, pg, rn, zm,
2995                            true, xs, true);
2996                    }
2997                }
2998                break;
2999              case 0x2:
3000                if (bits(machInst, 21)) {
3001                    // SVE 32-bit gather load words (scalar plus 32-bit scaled
3002                    // offsets)
3003                    IntRegIndex zt = (IntRegIndex) (uint8_t)
3004                             bits(machInst, 4, 0);
3005                    IntRegIndex rn = (IntRegIndex) (uint8_t)
3006                             bits(machInst, 9, 5);
3007                    IntRegIndex zm = (IntRegIndex) (uint8_t)
3008                             bits(machInst, 20, 16);
3009                    IntRegIndex pg = (IntRegIndex) (uint8_t)
3010                             bits(machInst, 12, 10);
3011                    uint8_t xs = bits(machInst, 22);
3012                    uint8_t ff = bits(machInst, 13);
3013                    if (ff) {
3014                        return new Unknown64(machInst);
3015                    }
3016                    return new SveIndexedMemSV<uint32_t, uint32_t,
3017                                               SveGatherLoadSVMicroop>(
3018                        "ld1", machInst, MemReadOp, zt, pg, rn, zm,
3019                        true, xs, true);
3020                }
3021                break;
3022              case 0x3:
3023                if (bits(machInst, 22) == 0 && bits(machInst, 14, 13) == 0x0 &&
3024                        bits(machInst, 4) == 0) {
3025                    // SVE load predicate register
3026                    IntRegIndex pt = (IntRegIndex) (uint8_t)
3027                        bits(machInst, 3, 0);
3028                    IntRegIndex rn = (IntRegIndex) (uint8_t)
3029                        bits(machInst, 9, 5);
3030                    uint64_t imm = sext<9>((bits(machInst, 21, 16) << 3) |
3031                                           bits(machInst, 12, 10));
3032                    return new SveLdrPred(machInst, pt, rn, imm);
3033                } else if (bits(machInst, 22) == 0 &&
3034                           bits(machInst, 14, 13) == 0x2) {
3035                    // SVE load vector register
3036                    IntRegIndex zt = (IntRegIndex) (uint8_t)
3037                        bits(machInst, 4, 0);
3038                    IntRegIndex rn = (IntRegIndex) (uint8_t)
3039                        bits(machInst, 9, 5);
3040                    uint64_t imm = sext<9>((bits(machInst, 21, 16) << 3) |
3041                                           bits(machInst, 12, 10));
3042                    return new SveLdrVec(machInst, zt, rn, imm);
3043                } else if (bits(machInst, 22) == 1 &&
3044                           bits(machInst, 4) == 0) {
3045                    // TODO: SVE contiguous prefetch (scalar plus immediate)
3046                    return new WarnUnimplemented("prf[bhwd]", machInst);
3047                }
3048                break;
3049            }
3050        }
3051        return new Unknown64(machInst);
3052    }  // decodeSveMemGather32
3053
3054    StaticInstPtr
3055    decodeSveLoadBcastQuadSS(ExtMachInst machInst)
3056    {
3057        return new Unknown64(machInst);
3058    }  // decodeSveLoadBcastQuadSS
3059
3060    StaticInstPtr
3061    decodeSveLoadBcastQuadSI(ExtMachInst machInst)
3062    {
3063        return new Unknown64(machInst);
3064    }  // decodeSveLoadBcastQuadSI
3065
3066    StaticInstPtr
3067    decodeSveContigLoadSS(ExtMachInst machInst)
3068    {
3069        IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3070        IntRegIndex rn = makeSP((IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3071        IntRegIndex rm = makeSP(
3072            (IntRegIndex) (uint8_t) bits(machInst, 20, 16));
3073        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
3074
3075        if (rm == 0x1f) {
3076            return new Unknown64(machInst);
3077        }
3078
3079        return decodeSveContigLoadSSInsts<SveContigLoadSS>(
3080            bits(machInst, 24, 21), machInst, zt, pg, rn, rm, false);
3081    }  // decodeSveContigLoadSS
3082
3083    StaticInstPtr
3084    decodeSveContigFFLoadSS(ExtMachInst machInst)
3085    {
3086        return new Unknown64(machInst);
3087    }  // decodeSveContigFFLoadSS
3088
3089    StaticInstPtr
3090    decodeSveContigLoadSI(ExtMachInst machInst)
3091    {
3092        IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3093        IntRegIndex rn = makeSP((IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3094        uint64_t imm = sext<4>(bits(machInst, 19, 16));
3095        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
3096
3097        return decodeSveContigLoadSIInsts<SveContigLoadSI>(
3098            bits(machInst, 24, 21), machInst, zt, pg, rn, imm, false);
3099    }  // decodeSveContigLoadSI
3100
3101    StaticInstPtr
3102    decodeSveContigNFLoadSI(ExtMachInst machInst)
3103    {
3104        return new Unknown64(machInst);
3105    }  // decodeSveContigNFLoadSI
3106
3107    StaticInstPtr
3108    decodeSveContigNTLoadSS(ExtMachInst machInst)
3109    {
3110        return new Unknown64(machInst);
3111    }  // decodeSveContigNTLoadSS
3112
3113    StaticInstPtr
3114    decodeSveLoadStructsSS(ExtMachInst machInst)
3115    {
3116        return new Unknown64(machInst);
3117    }  // decodeSveLoadStructsSS
3118
3119    StaticInstPtr
3120    decodeSveContigNTLoadSI(ExtMachInst machInst)
3121    {
3122        return new Unknown64(machInst);
3123    }  // decodeSveContigNTLoadSI
3124
3125    StaticInstPtr
3126    decodeSveLoadStructsSI(ExtMachInst machInst)
3127    {
3128        return new Unknown64(machInst);
3129    }  // decodeSveLoadStructsSI
3130
3131    StaticInstPtr
3132    decodeSveMemContigLoad(ExtMachInst machInst)
3133    {
3134        switch (bits(machInst, 15, 13)) {
3135          case 0x0:
3136            return decodeSveLoadBcastQuadSS(machInst);
3137          case 0x1:
3138            if (bits(machInst, 20) == 0x0) {
3139                return decodeSveLoadBcastQuadSI(machInst);
3140            }
3141            break;
3142          case 0x2:
3143            return decodeSveContigLoadSS(machInst);
3144          case 0x3:
3145            return decodeSveContigFFLoadSS(machInst);
3146          case 0x5:
3147            if (bits(machInst, 20) == 0x0) {
3148                return decodeSveContigLoadSI(machInst);
3149            } else {
3150                return decodeSveContigNFLoadSI(machInst);
3151            }
3152          case 0x6:
3153            if (bits(machInst, 22, 21) == 0x0) {
3154                return decodeSveContigNTLoadSS(machInst);
3155            } else {
3156                return decodeSveLoadStructsSS(machInst);
3157            }
3158          case 0x7:
3159            if (bits(machInst, 20) == 0) {
3160                if (bits(machInst, 22, 21) == 0x0) {
3161                    return decodeSveContigNTLoadSI(machInst);
3162                } else {
3163                return decodeSveLoadStructsSI(machInst);
3164                }
3165            }
3166            break;
3167        }
3168        return new Unknown64(machInst);
3169    }  // decodeSveMemContigLoad
3170
3171    StaticInstPtr
3172    decodeSveMemGather64(ExtMachInst machInst)
3173    {
3174        switch ((bits(machInst, 21) << 1) | bits(machInst, 15)) {
3175          case 0x0:
3176            {
3177                // SVE 64-bit gather load (scalar plus unpacked 32-bit unscaled
3178                // offsets)
3179                IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3180                IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
3181                IntRegIndex zm = (IntRegIndex) (uint8_t)
3182                         bits(machInst, 20, 16);
3183                IntRegIndex pg = (IntRegIndex) (uint8_t)
3184                         bits(machInst, 12, 10);
3185                uint8_t dtype = (bits(machInst, 24, 23) << 1) |
3186                                bits(machInst, 14);
3187                uint8_t xs = bits(machInst, 22);
3188                uint8_t ff = bits(machInst, 13);
3189                if (ff) {
3190                    return new Unknown64(machInst);
3191                }
3192                return decodeSveGatherLoadSVInsts(
3193                        dtype, machInst, zt, pg, rn, zm,
3194                        false, true, xs, false, ff);
3195            }
3196          case 0x1:
3197            if (bits(machInst, 22)) {
3198                // SVE 64-bit gather load (scalar plus 64-bit unscaled offsets)
3199                IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3200                IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
3201                IntRegIndex zm = (IntRegIndex) (uint8_t)
3202                         bits(machInst, 20, 16);
3203                IntRegIndex pg = (IntRegIndex) (uint8_t)
3204                         bits(machInst, 12, 10);
3205                uint8_t dtype = (bits(machInst, 24, 23) << 1) |
3206                                bits(machInst, 14);
3207                uint8_t ff = bits(machInst, 13);
3208                if (ff) {
3209                    return new Unknown64(machInst);
3210                }
3211                return decodeSveGatherLoadSVInsts(
3212                        dtype, machInst, zt, pg, rn, zm,
3213                        false, false, false, false, ff);
3214            } else {
3215                if (bits(machInst, 14, 13) == 0x3 && bits(machInst, 4) == 0) {
3216                    // TODO: SVE 64-bit gather prefetch (vector plus immediate)
3217                    return new WarnUnimplemented("prf[bhwd]", machInst);
3218                }
3219            }
3220            break;
3221          case 0x2:
3222            if (bits(machInst, 24, 23) != 0x0) {
3223                //  SVE 64-bit gather load (scalar plus unpacked 32-bit scaled
3224                //  offsets)
3225                IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3226                IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
3227                IntRegIndex zm = (IntRegIndex) (uint8_t)
3228                         bits(machInst, 20, 16);
3229                IntRegIndex pg = (IntRegIndex) (uint8_t)
3230                         bits(machInst, 12, 10);
3231                uint8_t dtype = (bits(machInst, 24, 23) << 1) |
3232                                bits(machInst, 14);
3233                uint8_t xs = bits(machInst, 22);
3234                uint8_t ff = bits(machInst, 13);
3235                if (ff) {
3236                    return new Unknown64(machInst);
3237                }
3238                return decodeSveGatherLoadSVInsts(
3239                        dtype, machInst, zt, pg, rn, zm,
3240                        false, true, xs, true, ff);
3241            } else if (bits(machInst, 4) == 0) {
3242                // TODO: SVE 64-bit gather prefetch (scalar plus unpacked
3243                // 32-bit scaled offsets)
3244                return new WarnUnimplemented("prf[bhwd]", machInst);
3245            }
3246            break;
3247          case 0x3:
3248            if (bits(machInst, 22) == 0) {
3249                // SVE 64-bit gather load (vector plus immediate)
3250                IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3251                IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
3252                uint64_t imm = bits(machInst, 20, 16);
3253                IntRegIndex pg = (IntRegIndex) (uint8_t)
3254                                 bits(machInst, 12, 10);
3255                uint8_t dtype = (bits(machInst, 24, 23) << 1) |
3256                                bits(machInst, 14);
3257                uint8_t ff = bits(machInst, 13);
3258                if (ff) {
3259                    return new Unknown64(machInst);
3260                }
3261                return decodeSveGatherLoadVIInsts(
3262                    dtype, machInst, zt, pg, zn, imm, false, ff);
3263            } else {
3264                if (bits(machInst, 24, 23) != 0x0) {
3265                    // SVE 64-bit gather load (scalar plus 64-bit scaled
3266                    // offsets)
3267                    IntRegIndex zt = (IntRegIndex) (uint8_t)
3268                             bits(machInst, 4, 0);
3269                    IntRegIndex rn = (IntRegIndex) (uint8_t)
3270                             bits(machInst, 9, 5);
3271                    IntRegIndex zm = (IntRegIndex) (uint8_t)
3272                             bits(machInst, 20, 16);
3273                    IntRegIndex pg = (IntRegIndex) (uint8_t)
3274                             bits(machInst, 12, 10);
3275                    uint8_t dtype = (bits(machInst, 24, 23) << 1) |
3276                                    bits(machInst, 14);
3277                    uint8_t ff = bits(machInst, 13);
3278                    if (ff) {
3279                        return new Unknown64(machInst);
3280                    }
3281                    return decodeSveGatherLoadSVInsts(
3282                            dtype, machInst, zt, pg, rn, zm,
3283                            false, false, false, true, ff);
3284                } else if (bits(machInst, 4) == 0) {
3285                    // TODO: SVE 64-bit gather prefetch (scalar plus 64-bit
3286                    // scaled offsets)
3287                    return new WarnUnimplemented("prf[bhwd]", machInst);
3288                }
3289            }
3290            break;
3291        }
3292        return new Unknown64(machInst);
3293    }  // decodeSveMemGather64
3294
3295    StaticInstPtr
3296    decodeSveContigStoreSS(ExtMachInst machInst)
3297    {
3298        IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3299        IntRegIndex rn = makeSP((IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3300        IntRegIndex rm = makeSP(
3301            (IntRegIndex) (uint8_t) bits(machInst, 20, 16));
3302        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
3303
3304        if (rm == 0x1f) {
3305            return new Unknown64(machInst);
3306        }
3307
3308        return decodeSveContigStoreSSInsts<SveContigStoreSS>(
3309            bits(machInst, 24, 21), machInst, zt, pg, rn, rm);
3310    }  // decodeSveContigStoreSS
3311
3312    StaticInstPtr
3313    decodeSveContigStoreSI(ExtMachInst machInst)
3314    {
3315        IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3316        IntRegIndex rn = makeSP((IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3317        int8_t imm = sext<4>(bits(machInst, 19, 16));
3318        IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10);
3319
3320        return decodeSveContigStoreSIInsts<SveContigStoreSI>(
3321            bits(machInst, 24, 21), machInst, zt, pg, rn, imm);
3322    }  // decodeSveContigStoreSI
3323
3324    StaticInstPtr
3325    decodeSveContigNTStoreSS(ExtMachInst machInst)
3326    {
3327        return new Unknown64(machInst);
3328    }  // decodeSveContigNTStoreSS
3329
3330    StaticInstPtr
3331    decodeSveContigNTStoreSI(ExtMachInst machInst)
3332    {
3333        return new Unknown64(machInst);
3334    }  // decodeSveContigNTStoreSI
3335
3336    StaticInstPtr
3337    decodeSveStoreStructsSS(ExtMachInst machInst)
3338    {
3339        return new Unknown64(machInst);
3340    }  // decodeSveStoreStructsSS
3341
3342    StaticInstPtr
3343    decodeSveStoreStructsSI(ExtMachInst machInst)
3344    {
3345        return new Unknown64(machInst);
3346    }  // decodeSveStoreStructsSI
3347
3348    StaticInstPtr
3349    decodeSveMemStore(ExtMachInst machInst)
3350    {
3351        switch (bits(machInst, 15, 13)) {
3352          case 0x0:
3353            if (bits(machInst, 24, 22) == 0x6 && bits(machInst, 4) == 0x0) {
3354                IntRegIndex pt = (IntRegIndex) (uint8_t) bits(machInst, 3, 0);
3355                IntRegIndex rn = makeSP(
3356                    (IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3357                int16_t imm = sext<9>((bits(machInst, 21, 16) << 3) |
3358                                      bits(machInst, 12, 10));
3359                return new SveStrPred(machInst, pt, rn, imm);
3360            }
3361            break;
3362          case 0x2:
3363            if (bits(machInst, 24, 22) == 0x6) {
3364                IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3365                IntRegIndex rn = makeSP(
3366                    (IntRegIndex) (uint8_t) bits(machInst, 9, 5));
3367                int16_t imm = sext<9>((bits(machInst, 21, 16) << 3) |
3368                                      bits(machInst, 12, 10));
3369                return new SveStrVec(machInst, zt, rn, imm);
3370            } else {
3371                return decodeSveContigStoreSS(machInst);
3372            }
3373            break;
3374          case 0x3:
3375            if (bits(machInst, 22, 21) == 0x0) {
3376                return decodeSveContigNTStoreSS(machInst);
3377            } else {
3378                return decodeSveStoreStructsSS(machInst);
3379            }
3380          case 0x4:
3381          case 0x6:
3382            {
3383                IntRegIndex zt = (IntRegIndex) (uint8_t) bits(machInst, 4, 0);
3384                IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5);
3385                IntRegIndex zm = (IntRegIndex) (uint8_t)
3386                         bits(machInst, 20, 16);
3387                IntRegIndex pg = (IntRegIndex) (uint8_t)
3388                         bits(machInst, 12, 10);
3389                uint8_t msz = bits(machInst, 24, 23);
3390                uint8_t xs = bits(machInst, 22);
3391
3392                switch (bits(machInst, 22, 21)) {
3393                  case 0x0:
3394                    // SVE 64-bit scatter store (scalar plus unpacked 32-bit
3395                    // unscaled offsets)
3396                    return decodeSveScatterStoreSVInsts(
3397                            msz, machInst, zt, pg, rn, zm,
3398                            false, true, xs, false);
3399                  case 0x1:
3400                    if (bits(machInst, 24, 23) != 0x0) {
3401                        // SVE 64-bit scatter store (scalar plus unpacked
3402                        // 32-bit scaled offsets)
3403                        return decodeSveScatterStoreSVInsts(
3404                                msz, machInst, zt, pg, rn, zm,
3405                                false, true, xs, true);
3406                    }
3407                    break;
3408                  case 0x2:
3409                    if (bits(machInst, 24, 23) != 0x3) {
3410                        // SVE 32-bit scatter store (scalar plus 32-bit
3411                        // unscaled offsets)
3412                        return decodeSveScatterStoreSVInsts(
3413                                msz, machInst, zt, pg, rn, zm,
3414                                true, true, xs, false);
3415                    }
3416                    break;
3417                  case 0x3:
3418                    // SVE 32-bit scatter store (scalar plus 32-bit scaled
3419                    // offsets)
3420                    return decodeSveScatterStoreSVInsts(
3421                            msz, machInst, zt, pg, rn, zm,
3422                            true, true, xs, true);
3423                }
3424            }
3425            break;
3426          case 0x5:
3427            switch (bits(machInst, 22, 21)) {
3428              case 0x0:
3429                {
3430                    // SVE 64-bit scatter store (scalar plus 64-bit unscaled
3431                    // offsets)
3432                    IntRegIndex zt = (IntRegIndex) (uint8_t)
3433                            bits(machInst, 4, 0);
3434                    IntRegIndex rn = (IntRegIndex) (uint8_t)
3435                            bits(machInst, 9, 5);
3436                    IntRegIndex zm = (IntRegIndex) (uint8_t)
3437                            bits(machInst, 20, 16);
3438                    IntRegIndex pg = (IntRegIndex) (uint8_t)
3439                            bits(machInst, 12, 10);
3440                    uint8_t msz = bits(machInst, 24, 23);
3441
3442                    return decodeSveScatterStoreSVInsts(
3443                            msz, machInst, zt, pg, rn, zm,
3444                            false, false, false, false);
3445                }
3446              case 0x1:
3447                if (bits(machInst, 24, 23) != 0x0) {
3448                    // SVE 64-bit scatter store (scalar plus 64-bit scaled
3449                    // offsets)
3450                    IntRegIndex zt = (IntRegIndex) (uint8_t)
3451                            bits(machInst, 4, 0);
3452                    IntRegIndex rn = (IntRegIndex) (uint8_t)
3453                            bits(machInst, 9, 5);
3454                    IntRegIndex zm = (IntRegIndex) (uint8_t)
3455                            bits(machInst, 20, 16);
3456                    IntRegIndex pg = (IntRegIndex) (uint8_t)
3457                            bits(machInst, 12, 10);
3458                    uint8_t msz = bits(machInst, 24, 23);
3459
3460                    return decodeSveScatterStoreSVInsts(
3461                            msz, machInst, zt, pg, rn, zm,
3462                            false, false, false, true);
3463                }
3464                break;
3465              case 0x2:
3466                {
3467                    // SVE 64-bit scatter store (vector plus immediate)
3468                    IntRegIndex zt = (IntRegIndex) (uint8_t)
3469                            bits(machInst, 4, 0);
3470                    IntRegIndex zn = (IntRegIndex) (uint8_t)
3471                            bits(machInst, 9, 5);
3472                    uint64_t imm = bits(machInst, 20, 16);
3473                    IntRegIndex pg = (IntRegIndex) (uint8_t)
3474                            bits(machInst, 12, 10);
3475                    uint8_t msz = bits(machInst, 24, 23);
3476
3477                    return decodeSveScatterStoreVIInsts(
3478                        msz, machInst, zt, pg, zn, imm, false);
3479                }
3480              case 0x3:
3481                if (bits(machInst, 24, 23) != 0x3) {
3482                    // SVE 32-bit scatter store (vector plus immediate)
3483                    IntRegIndex zt = (IntRegIndex) (uint8_t)
3484                            bits(machInst, 4, 0);
3485                    IntRegIndex zn = (IntRegIndex) (uint8_t)
3486                            bits(machInst, 9, 5);
3487                    uint64_t imm = bits(machInst, 20, 16);
3488                    IntRegIndex pg = (IntRegIndex) (uint8_t)
3489                            bits(machInst, 12, 10);
3490                    uint8_t msz = bits(machInst, 24, 23);
3491
3492                    return decodeSveScatterStoreVIInsts(
3493                        msz, machInst, zt, pg, zn, imm, true);
3494                }
3495                break;
3496            }
3497            break;
3498          case 0x7:
3499            if (bits(machInst, 20) == 0x0) {
3500                return decodeSveContigStoreSI(machInst);
3501            } else if (bits(machInst, 22, 21) == 0x0) {
3502                return decodeSveContigNTStoreSI(machInst);
3503            } else {
3504                return decodeSveStoreStructsSI(machInst);
3505            }
3506        }
3507        return new Unknown64(machInst);
3508    }  // decodeSveMemStore
3509
3510}  // namespace Aarch64
3511}};
3512