sve_2nd_level.isa revision 13759:9941fca869a9
1// Copyright (c) 2017-2019 ARM Limited 2// All rights reserved 3// 4// The license below extends only to copyright in the software and shall 5// not be construed as granting a license to any other intellectual 6// property including but not limited to intellectual property relating 7// to a hardware implementation of the functionality of the software 8// licensed hereunder. You may use the software subject to the license 9// terms below provided that you ensure that this notice is replicated 10// unmodified and in its entirety in all distributions of the software, 11// modified or unmodified, in source code or in binary form. 12// 13// Redistribution and use in source and binary forms, with or without 14// modification, are permitted provided that the following conditions are 15// met: redistributions of source code must retain the above copyright 16// notice, this list of conditions and the following disclaimer; 17// redistributions in binary form must reproduce the above copyright 18// notice, this list of conditions and the following disclaimer in the 19// documentation and/or other materials provided with the distribution; 20// neither the name of the copyright holders nor the names of its 21// contributors may be used to endorse or promote products derived from 22// this software without specific prior written permission. 23// 24// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 27// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 28// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 29// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 30// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 31// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 32// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 33// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 34// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35// 36// Authors: Giacomo Gabrielli 37 38/// @file 39/// SVE 2nd-level decoder. 40 41output decoder {{ 42namespace Aarch64 43{ 44 45 StaticInstPtr 46 decodeSveIntArithBinPred(ExtMachInst machInst) 47 { 48 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 49 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 50 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10); 51 52 switch (bits(machInst, 20, 19)) { 53 case 0x0: 54 { 55 uint8_t size = bits(machInst, 23, 22); 56 uint8_t opc = bits(machInst, 18, 16); 57 switch (opc) { 58 case 0x0: 59 return decodeSveBinDestrPredU<SveAddPred>( 60 size, machInst, zdn, zm, pg); 61 case 0x1: 62 return decodeSveBinDestrPredU<SveSubPred>( 63 size, machInst, zdn, zm, pg); 64 case 0x3: 65 return decodeSveBinDestrPredU<SveSubr>( 66 size, machInst, zdn, zm, pg); 67 default: 68 return new Unknown64(machInst); 69 } 70 } 71 case 0x1: 72 { 73 uint8_t size = bits(machInst, 23, 22); 74 uint8_t u = bits(machInst, 16); 75 uint8_t opc = bits(machInst, 18, 17); 76 switch (opc) { 77 case 0x0: 78 return decodeSveBinDestrPred<SveSmax, SveUmax>( 79 size, u, machInst, zdn, zm, pg); 80 case 0x1: 81 return decodeSveBinDestrPred<SveSmin, SveUmin>( 82 size, u, machInst, zdn, zm, pg); 83 case 0x2: 84 return decodeSveBinDestrPred<SveSabd, SveUabd>( 85 size, u, machInst, zdn, zm, pg); 86 default: 87 return new Unknown64(machInst); 88 } 89 } 90 case 0x2: 91 { 92 uint8_t size = bits(machInst, 23, 22); 93 uint8_t u = bits(machInst, 16); 94 uint8_t opc = bits(machInst, 18, 17); 95 switch (opc) { 96 case 0x0: 97 if (u == 0) { 98 return decodeSveBinDestrPredU<SveMul>( 99 size, machInst, zdn, zm, pg); 100 } else { 101 return new Unknown64(machInst); 102 } 103 case 0x1: 104 return decodeSveBinDestrPred<SveSmulh, SveUmulh>( 105 size, u, machInst, zdn, zm, pg); 106 case 0x2: 107 if (size == 0x2 || size == 0x3) { 108 return decodeSveBinDestrPred<SveSdiv, SveUdiv>( 109 size, u, machInst, zdn, zm, pg); 110 } else { 111 return new Unknown64(machInst); 112 } 113 case 0x3: 114 if (size == 0x2 || size == 0x3) { 115 return decodeSveBinDestrPred<SveSdivr, SveUdivr>( 116 size, u, machInst, zdn, zm, pg); 117 } else { 118 return new Unknown64(machInst); 119 } 120 } 121 } 122 case 0x3: 123 { 124 uint8_t size = bits(machInst, 23, 22); 125 uint8_t opc = bits(machInst, 18, 16); 126 127 switch (opc) { 128 case 0x0: 129 return decodeSveBinDestrPredU<SveOrrPred>( 130 size, machInst, zdn, zm, pg); 131 case 0x1: 132 return decodeSveBinDestrPredU<SveEorPred>( 133 size, machInst, zdn, zm, pg); 134 case 0x2: 135 return decodeSveBinDestrPredU<SveAndPred>( 136 size, machInst, zdn, zm, pg); 137 case 0x3: 138 return decodeSveBinDestrPredU<SveBicPred>( 139 size, machInst, zdn, zm, pg); 140 default: 141 return new Unknown64(machInst); 142 } 143 } 144 } 145 return new Unknown64(machInst); 146 } // decodeSveArithBinPred 147 148 StaticInstPtr 149 decodeSveIntReduc(ExtMachInst machInst) 150 { 151 IntRegIndex vd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 152 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 153 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10); 154 155 uint8_t size = bits(machInst, 23, 22); 156 157 switch (bits(machInst, 20, 19)) { 158 case 0x0: 159 { 160 uint8_t u = bits(machInst, 16); 161 uint8_t opc = bits(machInst, 18, 17); 162 if (opc != 0x0 || (!u && size == 0x3)) { 163 return new Unknown64(machInst); 164 } else { 165 return decodeSveWideningReduc<SveSaddv, SveUaddv>( 166 size, u, machInst, vd, zn, pg); 167 } 168 } 169 case 0x1: 170 { 171 uint8_t u = bits(machInst, 16); 172 uint8_t opc = bits(machInst, 18, 17); 173 switch (opc) { 174 case 0x0: 175 return decodeSveUnaryPred<SveSmaxv, SveUmaxv>( 176 size, u, machInst, vd, zn, pg); 177 case 0x1: 178 return decodeSveUnaryPred<SveSminv, SveUminv>( 179 size, u, machInst, vd, zn, pg); 180 default: 181 return new Unknown64(machInst); 182 } 183 } 184 case 0x2: 185 { 186 uint8_t opc = bits(machInst, 18, 17); 187 uint8_t merge = bits(machInst, 16); 188 switch (opc) { 189 case 0x0: 190 if (merge) { 191 return decodeSveUnaryPredU<SveMovprfxPredM>( 192 size, machInst, vd /* zd */, zn, pg); 193 } else { 194 return decodeSveUnaryPredU<SveMovprfxPredZ>( 195 size, machInst, vd /* zd */, zn, pg); 196 } 197 default: 198 return new Unknown64(machInst); 199 } 200 } 201 case 0x3: 202 { 203 uint8_t opc = bits(machInst, 18, 16); 204 switch (opc) { 205 case 0x0: 206 return decodeSveUnaryPredU<SveOrv>( 207 size, machInst, vd, zn, pg); 208 case 0x1: 209 return decodeSveUnaryPredU<SveEorv>( 210 size, machInst, vd, zn, pg); 211 case 0x2: 212 return decodeSveUnaryPredU<SveAndv>( 213 size, machInst, vd, zn, pg); 214 default: 215 return new Unknown64(machInst); 216 } 217 } 218 } 219 return new Unknown64(machInst); 220 } // decodeSveIntReduc 221 222 StaticInstPtr 223 decodeSveIntMulAdd(ExtMachInst machInst) 224 { 225 IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 226 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 227 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16); 228 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10); 229 230 uint8_t size = bits(machInst, 23, 22); 231 uint8_t opc = (bits(machInst, 15) << 1) | bits(machInst, 13); 232 switch (opc) { 233 case 0x0: 234 return decodeSveTerPredS<SveMla>( 235 size, machInst, zda, zn, zm, pg); 236 case 0x1: 237 return decodeSveTerPredS<SveMls>( 238 size, machInst, zda, zn, zm, pg); 239 case 0x2: 240 return decodeSveTerPredS<SveMad>( 241 size, machInst, zda /* zdn */, zn /* za */, zm, pg); 242 case 0x3: 243 return decodeSveTerPredS<SveMsb>( 244 size, machInst, zda /* zdn */, zn /* za */, zm, pg); 245 } 246 return new Unknown64(machInst); 247 } // decodeSveIntMulAdd 248 249 StaticInstPtr 250 decodeSveShiftByImmPred0(ExtMachInst machInst) 251 { 252 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 253 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10); 254 uint8_t imm3 = (uint8_t) bits(machInst, 7, 5); 255 256 uint8_t tsize = (bits(machInst, 23, 22) << 2) | bits(machInst, 9, 8); 257 uint8_t esize = 0; 258 uint8_t size = 0; 259 260 if (tsize == 0x0) { 261 return new Unknown64(machInst); 262 } else if (tsize == 0x1) { 263 esize = 8; 264 } else if ((tsize & 0x0E) == 0x2) { 265 esize = 16; 266 size = 1; 267 } else if ((tsize & 0x0C) == 0x4) { 268 esize = 32; 269 size = 2; 270 } else if ((tsize & 0x08) == 0x8) { 271 esize = 64; 272 size = 3; 273 } 274 275 uint8_t opc = bits(machInst, 18, 16); 276 switch (opc) { 277 case 0x0: 278 { 279 unsigned shiftAmt = 2 * esize - ((tsize << 3) | imm3); 280 return decodeSveBinImmPredU<SveAsrImmPred>( 281 size, machInst, zdn, shiftAmt, pg); 282 } 283 case 0x01: 284 { 285 unsigned shiftAmt = 2 * esize - ((tsize << 3) | imm3); 286 return decodeSveBinImmPredU<SveLsrImmPred>( 287 size, machInst, zdn, shiftAmt, pg); 288 } 289 case 0x03: 290 { 291 unsigned shiftAmt = ((tsize << 3) | imm3) - esize; 292 return decodeSveBinImmPredU<SveLslImmPred>( 293 size, machInst, zdn, shiftAmt, pg); 294 } 295 case 0x04: 296 { 297 unsigned shiftAmt = 2 * esize - ((tsize << 3) | imm3); 298 return decodeSveBinImmPredS<SveAsrd>( 299 size, machInst, zdn, shiftAmt, pg); 300 } 301 } 302 return new Unknown64(machInst); 303 } // decodeSveShiftByImmPred0 304 305 StaticInstPtr 306 decodeSveShiftByVectorPred(ExtMachInst machInst) 307 { 308 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 309 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 310 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10); 311 uint8_t size = bits(machInst, 23, 22); 312 uint8_t opc = bits(machInst, 18, 16); 313 switch (opc) { 314 case 0: 315 return decodeSveBinDestrPredU<SveAsrPred>( 316 size, machInst, zdn, zm, pg); 317 case 1: 318 return decodeSveBinDestrPredU<SveLsrPred>( 319 size, machInst, zdn, zm, pg); 320 case 3: 321 return decodeSveBinDestrPredU<SveLslPred>( 322 size, machInst, zdn, zm, pg); 323 case 4: 324 return decodeSveBinDestrPredU<SveAsrr>( 325 size, machInst, zdn, zm, pg); 326 case 5: 327 return decodeSveBinDestrPredU<SveLsrr>( 328 size, machInst, zdn, zm, pg); 329 case 7: 330 return decodeSveBinDestrPredU<SveLslr>( 331 size, machInst, zdn, zm, pg); 332 } 333 return new Unknown64(machInst); 334 } // decodeSveShiftByVectorPred 335 336 StaticInstPtr 337 decodeSveShiftByWideElemsPred(ExtMachInst machInst) 338 { 339 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 340 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 341 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10); 342 uint8_t size = bits(machInst, 23, 22); 343 uint8_t opc = bits(machInst, 18, 16); 344 switch (opc) { 345 case 0x0: 346 return decodeSveBinDestrPredU<SveAsrWidePred>( 347 size, machInst, zdn, zm, pg); 348 case 0x1: 349 return decodeSveBinDestrPredU<SveLsrWidePred>( 350 size, machInst, zdn, zm, pg); 351 case 0x3: 352 return decodeSveBinDestrPredU<SveLslWidePred>( 353 size, machInst, zdn, zm, pg); 354 } 355 return new Unknown64(machInst); 356 } // decodeSveShiftByWideElemsPred 357 358 StaticInstPtr 359 decodeSveShiftByImmPred(ExtMachInst machInst) 360 { 361 uint8_t b20_19 = bits(machInst, 20, 19); 362 uint8_t b23_22 = bits(machInst, 23, 22); 363 364 if (b20_19 == 0x0) { 365 return decodeSveShiftByImmPred0(machInst); 366 } else if (b20_19 == 0x2) { 367 return decodeSveShiftByVectorPred(machInst); 368 } else if (b20_19 == 0x3 && b23_22 != 0x3) { 369 return decodeSveShiftByWideElemsPred(machInst); 370 } 371 return new Unknown64(machInst); 372 } // decodeSveShiftByImmPred 373 374 StaticInstPtr 375 decodeSveIntArithUnaryPred(ExtMachInst machInst) 376 { 377 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 378 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 379 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10); 380 unsigned esize = bits(machInst, 23, 22); 381 uint8_t opg = bits(machInst, 20, 19); 382 uint8_t opc = bits(machInst, 18, 16); 383 if (opg == 0x2) { 384 bool unsig = static_cast<bool>(opc & 1); 385 switch (opc) { 386 case 0: 387 case 1: 388 if (esize == 0) break; 389 if (unsig) { 390 return decodeSveUnaryExtendFromBPredU<SveUxtb>( 391 esize, machInst, zd, zn, pg); 392 } else { 393 return decodeSveUnaryExtendFromBPredU<SveSxtb>( 394 esize, machInst, zd, zn, pg); 395 } 396 case 2: 397 case 3: 398 if (esize < 2) break; 399 if (unsig) { 400 return decodeSveUnaryExtendFromHPredU<SveUxth>( 401 esize, machInst, zd, zn, pg); 402 } else { 403 return decodeSveUnaryExtendFromHPredU<SveSxth>( 404 esize, machInst, zd, zn, pg); 405 } 406 case 4: 407 case 5: 408 if (esize != 3) break; 409 if (unsig) { 410 return new SveUxtw<uint32_t, uint64_t>( 411 machInst, zd, zn, pg); 412 } else { 413 return new SveSxtw<uint32_t, uint64_t>( 414 machInst, zd, zn, pg); 415 } 416 case 6: 417 return decodeSveUnaryPredS<SveAbs>( 418 esize, machInst, zd, zn, pg); 419 case 7: 420 return decodeSveUnaryPredS<SveNeg>( 421 esize, machInst, zd, zn, pg); 422 } 423 } else if (opg == 0x3) { 424 switch (opc) { 425 case 0: 426 return decodeSveUnaryPredS<SveCls>( 427 esize, machInst, zd, zn, pg); 428 case 1: 429 return decodeSveUnaryPredS<SveClz>( 430 esize, machInst, zd, zn, pg); 431 case 2: 432 return decodeSveUnaryPredU<SveCnt>( 433 esize, machInst, zd, zn, pg); 434 case 3: 435 return decodeSveUnaryPredU<SveCnot>( 436 esize, machInst, zd, zn, pg); 437 case 4: 438 return decodeSveUnaryPredF<SveFabs>( 439 esize, machInst, zd, zn, pg); 440 case 5: 441 return decodeSveUnaryPredF<SveFneg>( 442 esize, machInst, zd, zn, pg); 443 case 6: 444 return decodeSveUnaryPredU<SveNot>( 445 esize, machInst, zd, zn, pg); 446 break; 447 } 448 } 449 return new Unknown64(machInst); 450 } // decodeSveIntArithUnaryPred 451 452 StaticInstPtr 453 decodeSveIntArithUnpred(ExtMachInst machInst) 454 { 455 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 456 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 457 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16); 458 459 uint8_t opc = (uint8_t) bits(machInst, 12, 10); 460 uint8_t size = (uint8_t) bits(machInst, 23, 22); 461 462 switch (opc) { 463 case 0x0: 464 return decodeSveBinUnpredU<SveAddUnpred>(size, machInst, 465 zd, zn, zm); 466 case 0x1: 467 return decodeSveBinUnpredU<SveSubUnpred>(size, machInst, 468 zd, zn, zm); 469 case 0x4: 470 return decodeSveBinUnpredS<SveSqadd>(size, machInst, 471 zd, zn, zm); 472 case 0x5: 473 return decodeSveBinUnpredU<SveUqadd>(size, machInst, 474 zd, zn, zm); 475 case 0x6: 476 return decodeSveBinUnpredS<SveSqsub>(size, machInst, 477 zd, zn, zm); 478 case 0x7: 479 return decodeSveBinUnpredU<SveUqsub>(size, machInst, 480 zd, zn, zm); 481 } 482 483 return new Unknown64(machInst); 484 } // decodeSveIntArithUnpred 485 486 StaticInstPtr 487 decodeSveIntLogUnpred(ExtMachInst machInst) 488 { 489 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 490 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 491 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16); 492 uint8_t opc = (uint8_t) (bits(machInst, 23, 22) << 3 493 | bits(machInst, 12, 10)); 494 495 switch (opc) { 496 case 0x4: 497 return new SveAndUnpred<uint64_t>(machInst, zd, zn, zm); 498 case 0xc: 499 return new SveOrrUnpred<uint64_t>(machInst, zd, zn, zm); 500 case 0x14: 501 return new SveEorUnpred<uint64_t>(machInst, zd, zn, zm); 502 case 0x1c: 503 return new SveBicUnpred<uint64_t>(machInst, zd, zn, zm); 504 } 505 506 return new Unknown64(machInst); 507 } // decodeSveIntLogUnpred 508 509 StaticInstPtr 510 decodeSveIndexGen(ExtMachInst machInst) 511 { 512 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 513 uint8_t size = (uint8_t) bits(machInst, 23, 22); 514 uint8_t grp = (uint8_t) bits(machInst, 11, 10); 515 516 switch (grp) { 517 case 0: 518 { // INDEX (immediate) 519 int8_t imm5 = sext<5>(bits(machInst, 9, 5)); 520 int8_t imm5b = sext<5>(bits(machInst, 20, 16)); 521 switch (size) { 522 case 0: 523 return new SveIndexII<int8_t>(machInst, 524 zd, imm5, imm5b); 525 case 1: 526 return new SveIndexII<int16_t>(machInst, 527 zd, imm5, imm5b); 528 case 2: 529 return new SveIndexII<int32_t>(machInst, 530 zd, imm5, imm5b); 531 case 3: 532 return new SveIndexII<int64_t>(machInst, 533 zd, imm5, imm5b); 534 } 535 } 536 case 1: 537 { // INDEX (scalar, immediate) 538 int8_t imm5 = sext<5>(bits(machInst, 20, 16)); 539 IntRegIndex zn = (IntRegIndex) (uint8_t) bits( 540 machInst, 9, 5); 541 switch (size) { 542 case 0: 543 return new SveIndexRI<int8_t>(machInst, 544 zd, zn, imm5); 545 case 1: 546 return new SveIndexRI<int16_t>(machInst, 547 zd, zn, imm5); 548 case 2: 549 return new SveIndexRI<int32_t>(machInst, 550 zd, zn, imm5); 551 case 3: 552 return new SveIndexRI<int64_t>(machInst, 553 zd, zn, imm5); 554 } 555 } 556 case 2: 557 { // INDEX (immediate, scalar) 558 int8_t imm5 = sext<5>(bits(machInst, 9, 5)); 559 IntRegIndex zm = (IntRegIndex) (uint8_t) bits( 560 machInst, 20, 16); 561 switch (size) { 562 case 0: 563 return new SveIndexIR<int8_t>(machInst, 564 zd, imm5, zm); 565 case 1: 566 return new SveIndexIR<int16_t>(machInst, 567 zd, imm5, zm); 568 case 2: 569 return new SveIndexIR<int32_t>(machInst, 570 zd, imm5, zm); 571 case 3: 572 return new SveIndexIR<int64_t>(machInst, 573 zd, imm5, zm); 574 } 575 } 576 case 3: 577 { // INDEX (scalars) 578 IntRegIndex zn = (IntRegIndex) (uint8_t) bits( 579 machInst, 9, 5); 580 IntRegIndex zm = (IntRegIndex) (uint8_t) bits( 581 machInst, 20, 16); 582 switch (size) { 583 case 0: 584 return new SveIndexRR<int8_t>(machInst, 585 zd, zn, zm); 586 case 1: 587 return new SveIndexRR<int16_t>(machInst, 588 zd, zn, zm); 589 case 2: 590 return new SveIndexRR<int32_t>(machInst, 591 zd, zn, zm); 592 case 3: 593 return new SveIndexRR<int64_t>(machInst, 594 zd, zn, zm); 595 } 596 } 597 } 598 return new Unknown64(machInst); 599 } // decodeSveIndexGen 600 601 StaticInstPtr 602 decodeSveStackAlloc(ExtMachInst machInst) 603 { 604 uint8_t b23_22 = bits(machInst, 23, 22); 605 uint8_t b11 = bits(machInst, 11); 606 if ((b23_22 & 0x2) == 0x0 && b11 == 0x0) { 607 IntRegIndex rd = makeSP( 608 (IntRegIndex) (uint8_t) bits(machInst, 4, 0)); 609 IntRegIndex rn = makeSP( 610 (IntRegIndex) (uint8_t) bits(machInst, 20, 16)); 611 uint64_t imm = sext<6>(bits(machInst, 10, 5)); 612 if ((b23_22 & 0x1) == 0x0) { 613 return new AddvlXImm(machInst, rd, rn, imm); 614 } else { 615 return new AddplXImm(machInst, rd, rn, imm); 616 } 617 } else if (b23_22 == 0x2 && b11 == 0x0) { 618 IntRegIndex rd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 619 uint64_t imm = sext<6>(bits(machInst, 10, 5)); 620 if (bits(machInst, 20, 16) == 0x1f) { 621 return new SveRdvl(machInst, rd, imm); 622 } 623 } 624 return new Unknown64(machInst); 625 } // decodeSveStackAlloc 626 627 StaticInstPtr 628 decodeSveShiftByWideElemsUnpred(ExtMachInst machInst) 629 { 630 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 631 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 632 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16); 633 uint8_t size = bits(machInst, 23, 22); 634 uint8_t opc = (uint8_t) bits(machInst, 11, 10); 635 switch (opc) { 636 case 0x0: 637 return decodeSveBinUnpredU<SveAsrWideUnpred>( 638 size, machInst, zd, zn, zm); 639 case 0x1: 640 return decodeSveBinUnpredU<SveLsrWideUnpred>( 641 size, machInst, zd, zn, zm); 642 case 0x3: 643 return decodeSveBinUnpredU<SveLslWideUnpred>( 644 size, machInst, zd, zn, zm); 645 } 646 return new Unknown64(machInst); 647 } // decodeSveShiftByWideElemsUnpred 648 649 StaticInstPtr 650 decodeSveShiftByImmUnpredB(ExtMachInst machInst) 651 { 652 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 653 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 654 uint8_t imm3 = (uint8_t) bits(machInst, 18, 16); 655 656 uint8_t tsize = (bits(machInst, 23, 22) << 2) | bits(machInst, 20, 19); 657 uint8_t esize = 0; 658 uint8_t size = 0; 659 if (tsize == 0x0) { 660 return new Unknown64(machInst); 661 } else if (tsize == 0x1) { 662 esize = 8; 663 } else if ((tsize & 0x0E) == 0x2) { 664 esize = 16; 665 size = 1; 666 } else if ((tsize & 0x0C) == 0x4) { 667 esize = 32; 668 size = 2; 669 } else if ((tsize & 0x08) == 0x8) { 670 esize = 64; 671 size = 3; 672 } 673 674 uint8_t opc = bits(machInst, 11, 10); 675 switch (opc) { 676 case 0x00: 677 { 678 unsigned shiftAmt = 2 * esize - ((tsize << 3) | imm3); 679 return decodeSveBinImmUnpredU<SveAsrImmUnpred>( 680 size, machInst, zd, zn, shiftAmt); 681 } 682 case 0x01: 683 { 684 unsigned shiftAmt = 2 * esize - ((tsize << 3) | imm3); 685 return decodeSveBinImmUnpredU<SveLsrImmUnpred>( 686 size, machInst, zd, zn, shiftAmt); 687 } 688 case 0x03: 689 { 690 unsigned shiftAmt = ((tsize << 3) | imm3) - esize; 691 return decodeSveBinImmUnpredU<SveLslImmUnpred>( 692 size, machInst, zd, zn, shiftAmt); 693 } 694 } 695 696 return new Unknown64(machInst); 697 } // decodeSveShiftByImmUnpredB 698 699 StaticInstPtr 700 decodeSveShiftByImmUnpred(ExtMachInst machInst) 701 { 702 if (bits(machInst, 12)) { 703 return decodeSveShiftByImmUnpredB(machInst); 704 } else { 705 return decodeSveShiftByWideElemsUnpred(machInst); 706 } 707 return new Unknown64(machInst); 708 } // decodeSveShiftByImmUnpred 709 710 StaticInstPtr 711 decodeSveCompVecAddr(ExtMachInst machInst) 712 { 713 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 714 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 715 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16); 716 uint8_t mult = 1 << bits(machInst, 11, 10); 717 718 uint8_t opc = bits(machInst, 23, 22); 719 720 switch (opc) { 721 case 0x0: 722 return new SveAdr<uint64_t>(machInst, zd, zn, zm, mult, 723 SveAdr<uint64_t>::SveAdrOffsetUnpackedSigned); 724 case 0x1: 725 return new SveAdr<uint64_t>(machInst, zd, zn, zm, mult, 726 SveAdr<uint64_t>::SveAdrOffsetUnpackedUnsigned); 727 case 0x2: 728 return new SveAdr<uint32_t>(machInst, zd, zn, zm, mult, 729 SveAdr<uint32_t>::SveAdrOffsetPacked); 730 case 0x3: 731 return new SveAdr<uint64_t>(machInst, zd, zn, zm, mult, 732 SveAdr<uint64_t>::SveAdrOffsetPacked); 733 } 734 return new Unknown64(machInst); 735 } // decodeSveCompVecAddr 736 737 StaticInstPtr 738 decodeSveIntMiscUnpred(ExtMachInst machInst) 739 { 740 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 741 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 742 743 uint8_t size = bits(machInst, 23, 22); 744 uint8_t opc = bits(machInst, 11, 10); 745 switch (opc) { 746 case 0x0: 747 // SVE floating-point trig select coefficient 748 { 749 if (size == 0) { 750 break; 751 } 752 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 753 20, 16); 754 return decodeSveBinUnpredF<SveFtssel>( 755 size, machInst, zd, zn, zm); 756 } 757 case 0x2: 758 // SVE floating-point exponential accelerator 759 if (size == 0) { 760 break; 761 } 762 return decodeSveUnaryUnpredF<SveFexpa>(size, machInst, zd, zn); 763 case 0x3: 764 // SVE constructive prefix (unpredicated) 765 if (size == 0x0 && bits(machInst, 20, 16) == 0x0) { 766 return new SveMovprfxUnpred<uint64_t>(machInst, zd, zn); 767 } 768 break; 769 } 770 return new Unknown64(machInst); 771 } // decodeSveIntMiscUnpred 772 773 StaticInstPtr 774 decodeSveElemCount(ExtMachInst machInst) 775 { 776 uint8_t opc20 = (uint8_t) bits(machInst, 20); 777 uint8_t b13_12 = (uint8_t) bits(machInst, 13, 12); 778 uint8_t opc11 = (uint8_t) bits(machInst, 11); 779 uint8_t opc10 = (uint8_t) bits(machInst, 10); 780 uint8_t opc11_10 = (uint8_t) bits(machInst, 11, 10); 781 if (b13_12 == 0) { 782 uint8_t pattern = (uint8_t) bits(machInst, 9, 5); 783 uint8_t imm4 = (uint8_t) bits(machInst, 19, 16) + 1; 784 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 785 unsigned size = (unsigned) bits(machInst, 23, 22); 786 if (opc20) { 787 if (opc11 == 0) { 788 if (opc10) { 789 return decodeSveElemIntCountLU<SveDecv>(size, 790 machInst, zdn, pattern, imm4); 791 } else { 792 return decodeSveElemIntCountLU<SveIncv>(size, 793 machInst, zdn, pattern, imm4); 794 } 795 } 796 } else { 797 if (opc11) { 798 if (opc10) { 799 return decodeSveElemIntCountLU<SveUqdecv>(size, 800 machInst, zdn, pattern, imm4); 801 } else { 802 return decodeSveElemIntCountLS<SveSqdecv>(size, 803 machInst, zdn, pattern, imm4); 804 } 805 } else { 806 if (opc10) { 807 return decodeSveElemIntCountLU<SveUqincv>(size, 808 machInst, zdn, pattern, imm4); 809 } else { 810 return decodeSveElemIntCountLS<SveSqincv>(size, 811 machInst, zdn, pattern, imm4); 812 } 813 } 814 } 815 } else if (b13_12 == 3) { 816 uint8_t pattern = (uint8_t) bits(machInst, 9, 5); 817 uint8_t imm4 = (uint8_t) bits(machInst, 19, 16) + 1; 818 IntRegIndex rdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 819 unsigned size = (unsigned) bits(machInst, 23, 22); 820 switch (opc11_10) { 821 case 0: 822 if (opc20) { 823 return decodeSveElemIntCountS<SveSqinc>(size, 824 machInst, rdn, pattern, imm4); 825 } else { 826 return decodeSveElemIntCountS<SveSqinc32>(size, 827 machInst, rdn, pattern, imm4); 828 } 829 case 1: 830 if (opc20) { 831 return decodeSveElemIntCountU<SveUqinc>(size, 832 machInst, rdn, pattern, imm4); 833 } else { 834 return decodeSveElemIntCountU<SveUqinc32>(size, 835 machInst, rdn, pattern, imm4); 836 } 837 case 2: 838 if (opc20) { 839 return decodeSveElemIntCountS<SveSqdec>(size, 840 machInst, rdn, pattern, imm4); 841 } else { 842 return decodeSveElemIntCountS<SveSqdec32>(size, 843 machInst, rdn, pattern, imm4); 844 } 845 case 3: 846 if (opc20) { 847 return decodeSveElemIntCountU<SveUqdec>(size, 848 machInst, rdn, pattern, imm4); 849 } else { 850 return decodeSveElemIntCountU<SveUqdec32>(size, 851 machInst, rdn, pattern, imm4); 852 } 853 } 854 } else if (opc20 && b13_12 == 2 && !(opc11_10 & 0x2)) { 855 uint8_t pattern = (uint8_t) bits(machInst, 9, 5); 856 uint8_t imm4 = (uint8_t) bits(machInst, 19, 16) + 1; 857 IntRegIndex rdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 858 unsigned size = (unsigned) bits(machInst, 23, 22); 859 if (opc11_10 & 0x1) { 860 return decodeSveElemIntCountU<SveDec>(size, machInst, 861 rdn, pattern, imm4); 862 } else { 863 return decodeSveElemIntCountU<SveInc>(size, machInst, 864 rdn, pattern, imm4); 865 } 866 } else if (!opc20 && b13_12 == 2 && opc11_10 == 0) { 867 uint8_t pattern = (uint8_t) bits(machInst, 9, 5); 868 uint8_t imm4 = (uint8_t) bits(machInst, 19, 16) + 1; 869 IntRegIndex rd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 870 unsigned size = (unsigned) bits(machInst, 23, 22); 871 return decodeSveElemIntCountU<SveCntx>(size, machInst, 872 rd, pattern, imm4); 873 } 874 return new Unknown64(machInst); 875 } // decodeSveElemCount 876 877 StaticInstPtr 878 decodeSveLogMaskImm(ExtMachInst machInst) 879 { 880 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 881 bool n = bits(machInst, 17); 882 uint8_t immr = bits(machInst, 16, 11); 883 uint8_t imms = bits(machInst, 10, 5); 884 885 // Decode bitmask 886 // len = MSB(n:NOT(imms)), len < 1 is undefined 887 uint8_t len = 0; 888 if (n) { 889 len = 6; 890 } else if (imms == 0x3f || imms == 0x3e) { 891 return new Unknown64(machInst); 892 } else { 893 len = findMsbSet(imms ^ 0x3f); 894 } 895 // Generate r, s, and size 896 uint64_t r = bits(immr, len - 1, 0); 897 uint64_t s = bits(imms, len - 1, 0); 898 uint8_t size = 1 << len; 899 if (s == size - 1) 900 return new Unknown64(machInst); 901 // Generate the pattern with s 1s, rotated by r, with size bits 902 uint64_t pattern = mask(s + 1); 903 if (r) { 904 pattern = (pattern >> r) | (pattern << (size - r)); 905 pattern &= mask(size); 906 } 907 // Replicate that to fill up the immediate 908 for (unsigned i = 1; i < (64 / size); i *= 2) 909 pattern |= (pattern << (i * size)); 910 uint64_t imm = pattern; 911 912 if (bits(machInst, 19, 18) == 0x0) { 913 if (bits(machInst, 23, 22) == 0x3) { 914 return new SveDupm<uint64_t>(machInst, zd, imm); 915 } else { 916 switch (bits(machInst, 23, 22)) { 917 case 0x0: 918 return new SveOrrImm<uint64_t>(machInst, zd, imm); 919 case 0x1: 920 return new SveEorImm<uint64_t>(machInst, zd, imm); 921 case 0x2: 922 return new SveAndImm<uint64_t>(machInst, zd, imm); 923 } 924 } 925 } 926 927 return new Unknown64(machInst); 928 } // decodeSveLogMaskImm 929 930 StaticInstPtr 931 decodeSveIntWideImmPred(ExtMachInst machInst) 932 { 933 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 934 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 19, 16); 935 uint8_t size = bits(machInst, 23, 22); 936 937 if (bits(machInst, 15) == 0x0) { 938 uint64_t imm = bits(machInst, 12, 5); 939 uint8_t sh = bits(machInst, 13); 940 uint8_t m = bits(machInst, 14); 941 if (sh) { 942 if (size == 0x0) { 943 return new Unknown64(machInst); 944 } 945 imm <<= 8; 946 } 947 if (m) { 948 if (sh) { 949 return decodeSveWideImmPredU<SveCpyImmMerge>( 950 size, machInst, zd, sext<16>(imm), pg); 951 } else { 952 return decodeSveWideImmPredU<SveCpyImmMerge>( 953 size, machInst, zd, sext<8>(imm), pg); 954 } 955 } else { 956 if (sh) { 957 return decodeSveWideImmPredU<SveCpyImmZero>( 958 size, machInst, zd, sext<16>(imm), pg, 959 false /* isMerging */); 960 } else { 961 return decodeSveWideImmPredU<SveCpyImmZero>( 962 size, machInst, zd, sext<8>(imm), pg, 963 false /* isMerging */); 964 } 965 } 966 } else if (bits(machInst, 15, 13) == 0x6 && size != 0x0) { 967 uint64_t imm = vfp_modified_imm(bits(machInst, 12, 5), 968 decode_fp_data_type(size)); 969 return decodeSveWideImmPredF<SveFcpy>( 970 size, machInst, zd, imm, pg); 971 } 972 973 return new Unknown64(machInst); 974 } // decodeSveIntWideImmPred 975 976 StaticInstPtr 977 decodeSvePermExtract(ExtMachInst machInst) 978 { 979 uint8_t b23_22 = (unsigned) bits(machInst, 23, 22); 980 if (!b23_22) { 981 uint8_t position = 982 bits(machInst, 20, 16) << 3 | bits(machInst, 12, 10); 983 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 984 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 985 return new SveExt<uint8_t>(machInst, zdn, zm, position); 986 } 987 return new Unknown64(machInst); 988 } // decodeSvePermExtract 989 990 StaticInstPtr 991 decodeSvePermUnpred(ExtMachInst machInst) 992 { 993 uint8_t b12_10 = bits(machInst, 12, 10); 994 if (b12_10 == 0x4) { 995 unsigned size = (unsigned) bits(machInst, 23, 22); 996 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 997 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 998 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16); 999 return decodeSveBinUnpredU<SveTbl>(size, machInst, zd, zn, zm); 1000 } else if (bits(machInst, 20, 16) == 0x0 && b12_10 == 0x6) { 1001 uint8_t size = bits(machInst, 23, 22); 1002 IntRegIndex rn = makeSP( 1003 (IntRegIndex) (uint8_t) bits(machInst, 9, 5)); 1004 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 1005 return decodeSveUnaryUnpredU<SveDupScalar>(size, machInst, zd, rn); 1006 } else if (bits(machInst, 20, 16) == 0x4 && b12_10 == 0x6) { 1007 uint8_t size = bits(machInst, 23, 22); 1008 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 1009 IntRegIndex rm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 1010 return decodeSveUnaryUnpredU<SveInsr>(size, machInst, zdn, rm); 1011 } else if (bits(machInst, 20, 16) == 0x14 && b12_10 == 0x6) { 1012 uint8_t size = bits(machInst, 23, 22); 1013 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 1014 IntRegIndex vm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 1015 return decodeSveUnaryUnpredU<SveInsrf>(size, machInst, zdn, vm); 1016 } else if (bits(machInst, 20, 16) == 0x18 && b12_10 == 0x6) { 1017 uint8_t size = bits(machInst, 23, 22); 1018 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 1019 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 1020 return decodeSveUnaryUnpredU<SveRevv>(size, machInst, zd, zn); 1021 } else if (b12_10 == 0x0 && bits(machInst, 20, 16) != 0x0) { 1022 uint8_t imm = 1023 bits(machInst, 23, 22) << 5 | // imm3h 1024 bits(machInst, 20) << 4 | // imm3l 1025 bits(machInst, 19, 16); // tsz 1026 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 1027 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 1028 if (imm & 0x1) { 1029 imm >>= 1; 1030 return new SveDupIdx<uint8_t>(machInst, zd, zn, imm); 1031 } else if (imm & 0x2) { 1032 imm >>= 2; 1033 return new SveDupIdx<uint16_t>(machInst, zd, zn, imm); 1034 } else if (imm & 0x4) { 1035 imm >>= 3; 1036 return new SveDupIdx<uint32_t>(machInst, zd, zn, imm); 1037 } else if (imm & 0x8) { 1038 imm >>= 4; 1039 return new SveDupIdx<uint64_t>(machInst, zd, zn, imm); 1040 } else if (imm & 0x10) { 1041 imm >>= 5; 1042 return new SveDupIdx<__uint128_t>(machInst, zd, zn, imm); 1043 } 1044 return new Unknown64(machInst); 1045 } else if (bits(machInst, 23, 22) != 0x0 && 1046 bits(machInst, 20, 18) == 0x4 && b12_10 == 0x6) { 1047 unsigned size = (unsigned) bits(machInst, 23, 22); 1048 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 1049 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 1050 if (bits(machInst, 17)) { 1051 if (bits(machInst, 16)) { 1052 return decodeSveUnpackU<SveUunpkhi>(size, machInst, 1053 zd, zn); 1054 } else { 1055 return decodeSveUnpackU<SveUunpklo>(size, machInst, 1056 zd, zn); 1057 } 1058 } else { 1059 if (bits(machInst, 16)) { 1060 return decodeSveUnpackS<SveSunpkhi>(size, machInst, 1061 zd, zn); 1062 } else { 1063 return decodeSveUnpackS<SveSunpklo>(size, machInst, 1064 zd, zn); 1065 } 1066 } 1067 } 1068 return new Unknown64(machInst); 1069 } // decodeSvePermUnpred 1070 1071 StaticInstPtr 1072 decodeSvePermPredicates(ExtMachInst machInst) 1073 { 1074 if (bits(machInst, 20) == 0x0 && bits(machInst, 12, 11) != 0x3 && 1075 bits(machInst, 9) == 0x0 && bits(machInst, 4) == 0x0) { 1076 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 1077 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 1078 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16); 1079 1080 uint8_t size = bits(machInst, 23, 22); 1081 1082 uint8_t opc = bits(machInst, 12, 10); 1083 1084 switch (opc) { 1085 case 0x0: 1086 return decodeSveBinUnpredU<SveZip1Pred>(size, 1087 machInst, zd, zn, zm); 1088 case 0x1: 1089 return decodeSveBinUnpredU<SveZip2Pred>(size, 1090 machInst, zd, zn, zm); 1091 case 0x2: 1092 return decodeSveBinUnpredU<SveUzp1Pred>(size, 1093 machInst, zd, zn, zm); 1094 case 0x3: 1095 return decodeSveBinUnpredU<SveUzp2Pred>(size, 1096 machInst, zd, zn, zm); 1097 case 0x4: 1098 return decodeSveBinUnpredU<SveTrn1Pred>(size, 1099 machInst, zd, zn, zm); 1100 case 0x5: 1101 return decodeSveBinUnpredU<SveTrn2Pred>(size, 1102 machInst, zd, zn, zm); 1103 } 1104 } else if (bits(machInst, 23, 22) == 0x0 && 1105 bits(machInst, 20, 17) == 0x8 && bits(machInst, 12, 9) == 0x0 1106 && bits(machInst, 4) == 0x0) { 1107 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0); 1108 IntRegIndex pn = (IntRegIndex) (uint8_t) bits(machInst, 8, 5); 1109 if (bits(machInst, 16)) { 1110 return new SvePunpkhi<uint8_t, uint16_t>(machInst, pd, pn); 1111 } else { 1112 return new SvePunpklo<uint8_t, uint16_t>(machInst, pd, pn); 1113 } 1114 } else if (bits(machInst, 20, 16) == 0x14 && 1115 bits(machInst, 12, 9) == 0x00 && bits(machInst, 4) == 0) { 1116 uint8_t size = bits(machInst, 23, 22); 1117 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0); 1118 IntRegIndex pn = (IntRegIndex) (uint8_t) bits(machInst, 8, 5); 1119 return decodeSveUnaryUnpredU<SveRevp>(size, machInst, pd, pn); 1120 } 1121 return new Unknown64(machInst); 1122 } // decodeSvePermPredicates 1123 1124 StaticInstPtr 1125 decodeSvePermIntlv(ExtMachInst machInst) 1126 { 1127 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 1128 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 1129 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16); 1130 1131 uint8_t size = bits(machInst, 23, 22); 1132 1133 uint8_t opc = bits(machInst, 12, 10); 1134 1135 switch (opc) { 1136 case 0x0: 1137 return decodeSveBinUnpredU<SveZip1>(size, machInst, zd, zn, zm); 1138 case 0x1: 1139 return decodeSveBinUnpredU<SveZip2>(size, machInst, zd, zn, zm); 1140 case 0x2: 1141 return decodeSveBinUnpredU<SveUzp1>(size, machInst, zd, zn, zm); 1142 case 0x3: 1143 return decodeSveBinUnpredU<SveUzp2>(size, machInst, zd, zn, zm); 1144 case 0x4: 1145 return decodeSveBinUnpredU<SveTrn1>(size, machInst, zd, zn, zm); 1146 case 0x5: 1147 return decodeSveBinUnpredU<SveTrn2>(size, machInst, zd, zn, zm); 1148 } 1149 return new Unknown64(machInst); 1150 } // decodeSvePermIntlv 1151 1152 StaticInstPtr 1153 decodeSvePermPred(ExtMachInst machInst) 1154 { 1155 uint8_t b13 = bits(machInst, 13); 1156 uint8_t b23 = bits(machInst, 23); 1157 switch (bits(machInst, 20, 16)) { 1158 case 0x0: 1159 if (!b13) { 1160 uint8_t size = bits(machInst, 23, 22); 1161 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10); 1162 IntRegIndex vn = (IntRegIndex)(uint8_t) bits(machInst, 9, 5); 1163 IntRegIndex zd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0); 1164 return decodeSveUnaryPredU<SveCpySimdFpScalar>(size, 1165 machInst, zd, vn, pg); 1166 } 1167 break; 1168 case 0x1: 1169 if (!b13 && b23) { 1170 // sve_int_perm_compact 1171 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10); 1172 IntRegIndex zn = (IntRegIndex)(uint8_t) bits(machInst, 9, 5); 1173 IntRegIndex zd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0); 1174 if (bits(machInst, 22)) { 1175 return new SveCompact<uint64_t>(machInst, zd, zn, pg); 1176 } else { 1177 return new SveCompact<uint32_t>(machInst, zd, zn, pg); 1178 } 1179 } 1180 break; 1181 case 0x8: 1182 if (b13) { 1183 uint8_t size = bits(machInst, 23, 22); 1184 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10); 1185 IntRegIndex rn = makeSP( 1186 (IntRegIndex)(uint8_t) bits(machInst, 9, 5)); 1187 IntRegIndex zd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0); 1188 return decodeSveUnaryPredU<SveCpyScalar>(size, 1189 machInst, zd, rn, pg); 1190 } 1191 break; 1192 case 0xC: 1193 if (!b13) { 1194 uint8_t size = bits(machInst, 23, 22); 1195 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10); 1196 IntRegIndex zdn = (IntRegIndex)(uint8_t) bits(machInst, 4, 0); 1197 IntRegIndex zm = (IntRegIndex)(uint8_t) bits(machInst, 9, 5); 1198 return decodeSveBinDestrPredU<SveSplice>(size, machInst, 1199 zdn, zm, pg); 1200 } 1201 break; 1202 } 1203 switch (bits(machInst, 20, 17)) { 1204 case 0x0: 1205 if (b13) { 1206 uint8_t AB = bits(machInst, 16); 1207 uint8_t size = bits(machInst, 23, 22); 1208 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10); 1209 IntRegIndex zn = (IntRegIndex)(uint8_t) bits(machInst, 9, 5); 1210 IntRegIndex rd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0); 1211 if (!AB) { 1212 return decodeSveUnaryPredU<SveLasta>(size, 1213 machInst, rd, zn, pg); 1214 } else { 1215 return decodeSveUnaryPredU<SveLastb>(size, 1216 machInst, rd, zn, pg); 1217 } 1218 } 1219 break; 1220 case 0x1: 1221 if (!b13) { 1222 uint8_t AB = bits(machInst, 16); 1223 uint8_t size = bits(machInst, 23, 22); 1224 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10); 1225 IntRegIndex zn = (IntRegIndex)(uint8_t) bits(machInst, 9, 5); 1226 IntRegIndex vd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0); 1227 if (!AB) { 1228 return decodeSveUnaryPredU<SveLastaf>(size, 1229 machInst, vd, zn, pg); 1230 } else { 1231 return decodeSveUnaryPredU<SveLastbf>(size, 1232 machInst, vd, zn, pg); 1233 } 1234 } 1235 break; 1236 case 0x4: 1237 if (!b13) { 1238 uint8_t AB = bits(machInst, 16); 1239 uint8_t size = bits(machInst, 23, 22); 1240 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10); 1241 IntRegIndex zm = (IntRegIndex)(uint8_t) bits(machInst, 9, 5); 1242 IntRegIndex zdn = (IntRegIndex)(uint8_t) bits(machInst, 4, 0); 1243 if (!AB) { 1244 return decodeSveUnaryPredU<SveClastav>(size, 1245 machInst, zdn, zm, pg); 1246 } else { 1247 return decodeSveUnaryPredU<SveClastbv>(size, 1248 machInst, zdn, zm, pg); 1249 } 1250 } 1251 break; 1252 case 0x5: 1253 if (!b13) { 1254 uint8_t AB = bits(machInst, 16); 1255 uint8_t size = bits(machInst, 23, 22); 1256 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10); 1257 IntRegIndex zm = (IntRegIndex)(uint8_t) bits(machInst, 9, 5); 1258 IntRegIndex zdn = (IntRegIndex)(uint8_t) bits(machInst, 4, 0); 1259 if (!AB) { 1260 return decodeSveUnaryPredU<SveClastaf>(size, 1261 machInst, zdn, zm, pg); 1262 } else { 1263 return decodeSveUnaryPredU<SveClastbf>(size, 1264 machInst, zdn, zm, pg); 1265 } 1266 } 1267 break; 1268 case 0x8: 1269 if (b13) { 1270 uint8_t AB = bits(machInst, 16); 1271 uint8_t size = bits(machInst, 23, 22); 1272 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10); 1273 IntRegIndex zm = (IntRegIndex)(uint8_t) bits(machInst, 9, 5); 1274 IntRegIndex rdn = (IntRegIndex)(uint8_t) bits(machInst, 4, 0); 1275 if (!AB) { 1276 return decodeSveUnaryPredU<SveClasta>(size, 1277 machInst, rdn, zm, pg); 1278 } else { 1279 return decodeSveUnaryPredU<SveClastb>(size, 1280 machInst, rdn, zm, pg); 1281 } 1282 } 1283 break; 1284 } 1285 if (bits(machInst, 20, 18) == 0x1 && !b13) { 1286 unsigned size = (unsigned) bits(machInst, 23, 22); 1287 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 12, 10); 1288 IntRegIndex zn = (IntRegIndex)(uint8_t) bits(machInst, 9, 5); 1289 IntRegIndex zd = (IntRegIndex)(uint8_t) bits(machInst, 4, 0); 1290 uint8_t opc17_16 = bits(machInst, 17, 16); 1291 switch (opc17_16) { 1292 case 0x00: 1293 switch (size) { 1294 case 1: 1295 return new SveRevb<uint16_t>(machInst, zd, zn, pg); 1296 case 2: 1297 return new SveRevb<uint32_t>(machInst, zd, zn, pg); 1298 case 3: 1299 return new SveRevb<uint64_t>(machInst, zd, zn, pg); 1300 } 1301 break; 1302 case 0x01: 1303 switch (size) { 1304 case 2: 1305 return new SveRevh<uint32_t>(machInst, zd, zn, pg); 1306 case 3: 1307 return new SveRevh<uint64_t>(machInst, zd, zn, pg); 1308 } 1309 break; 1310 case 0x02: 1311 if (size == 3) { 1312 return new SveRevw<uint64_t>(machInst, zd, zn, pg); 1313 } 1314 break; 1315 case 0x03: 1316 return decodeSveUnaryPredU<SveRbit>( 1317 size, machInst, zd, zn, pg); 1318 } 1319 } 1320 return new Unknown64(machInst); 1321 } // decodeSvePermPred 1322 1323 StaticInstPtr 1324 decodeSveSelVec(ExtMachInst machInst) 1325 { 1326 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 1327 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 1328 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 13, 10); 1329 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16); 1330 1331 uint8_t size = bits(machInst, 23, 22); 1332 1333 return decodeSveBinConstrPredU<SveSel>(size, 1334 machInst, zd, zn, zm, pg, SvePredType::SELECT); 1335 } // decodeSveSelVec 1336 1337 StaticInstPtr 1338 decodeSveIntCmpVec(ExtMachInst machInst) 1339 { 1340 uint8_t size = bits(machInst, 23, 22); 1341 uint8_t b14 = bits(machInst, 14); 1342 uint8_t opc = 1343 bits(machInst, 15) << 2 | 1344 bits(machInst, 13) << 1 | 1345 bits(machInst, 4); 1346 IntRegIndex pd = (IntRegIndex) (uint8_t)bits(machInst, 3, 0); 1347 IntRegIndex pg = (IntRegIndex) (uint8_t)bits(machInst, 12, 10); 1348 IntRegIndex zn = (IntRegIndex) (uint8_t)bits(machInst, 9, 5); 1349 IntRegIndex zm = (IntRegIndex) (uint8_t)bits(machInst, 20, 16); 1350 if (b14 && size != 3) { 1351 // sve_int_cmp_1 1352 switch (opc) { 1353 case 0: 1354 return decodeSveTerPredWS<SveCmpgew>(size, 1355 machInst, pd, zn, zm, pg); 1356 case 1: 1357 return decodeSveTerPredWS<SveCmpgtw>(size, 1358 machInst, pd, zn, zm, pg); 1359 case 2: 1360 return decodeSveTerPredWS<SveCmpltw>(size, 1361 machInst, pd, zn, zm, pg); 1362 case 3: 1363 return decodeSveTerPredWS<SveCmplew>(size, 1364 machInst, pd, zn, zm, pg); 1365 case 4: 1366 return decodeSveTerPredWU<SveCmphsw>(size, 1367 machInst, pd, zn, zm, pg); 1368 case 5: 1369 return decodeSveTerPredWU<SveCmphiw>(size, 1370 machInst, pd, zn, zm, pg); 1371 case 6: 1372 return decodeSveTerPredWU<SveCmplow>(size, 1373 machInst, pd, zn, zm, pg); 1374 case 7: 1375 return decodeSveTerPredWU<SveCmplsw>(size, 1376 machInst, pd, zn, zm, pg); 1377 } 1378 } else if (!b14) { 1379 switch (opc) { 1380 case 0: 1381 return decodeSveTerPredU<SveCmphs>(size, 1382 machInst, pd, zn, zm, pg); 1383 case 1: 1384 return decodeSveTerPredU<SveCmphi>(size, 1385 machInst, pd, zn, zm, pg); 1386 case 2: 1387 if (size != 3) { 1388 return decodeSveTerPredWU<SveCmpeqw>(size, 1389 machInst, pd, zn, zm, pg); 1390 } 1391 break; 1392 case 3: 1393 if (size != 3) { 1394 return decodeSveTerPredWU<SveCmpnew>(size, 1395 machInst, pd, zn, zm, pg); 1396 } 1397 break; 1398 case 4: 1399 return decodeSveTerPredS<SveCmpge>(size, 1400 machInst, pd, zn, zm, pg); 1401 case 5: 1402 return decodeSveTerPredS<SveCmpgt>(size, 1403 machInst, pd, zn, zm, pg); 1404 case 6: 1405 return decodeSveTerPredU<SveCmpeq>(size, 1406 machInst, pd, zn, zm, pg); 1407 case 7: 1408 return decodeSveTerPredU<SveCmpne>(size, 1409 machInst, pd, zn, zm, pg); 1410 } 1411 } 1412 return new Unknown64(machInst); 1413 } // decodeSveIntCmpVec 1414 1415 StaticInstPtr 1416 decodeSveIntCmpUImm(ExtMachInst machInst) 1417 { 1418 uint8_t cmp = bits(machInst, 13) << 1 | bits(machInst, 4); 1419 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0); 1420 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 1421 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10); 1422 int64_t imm = (int64_t) bits(machInst, 20, 14); 1423 uint8_t size = bits(machInst, 23, 22); 1424 switch (cmp) { 1425 case 0: 1426 return decodeSveTerImmPredU<SveCmphsi>(size, 1427 machInst, pd, zn, imm, pg); 1428 case 1: 1429 return decodeSveTerImmPredU<SveCmphii>(size, 1430 machInst, pd, zn, imm, pg); 1431 case 2: 1432 return decodeSveTerImmPredU<SveCmploi>(size, 1433 machInst, pd, zn, imm, pg); 1434 case 3: 1435 return decodeSveTerImmPredU<SveCmplsi>(size, 1436 machInst, pd, zn, imm, pg); 1437 } 1438 return new Unknown64(machInst); 1439 } // decodeSveIntCmpUImm 1440 1441 StaticInstPtr 1442 decodeSveIntCmpSImm(ExtMachInst machInst) 1443 { 1444 uint8_t opc = bits(machInst, 15) << 2 | bits(machInst, 13) << 1 | 1445 bits(machInst, 4); 1446 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0); 1447 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 1448 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10); 1449 int64_t imm = sext<5>(bits(machInst, 20, 16)); 1450 uint8_t size = bits(machInst, 23, 22); 1451 switch (opc) { 1452 case 0: 1453 return decodeSveTerImmPredS<SveCmpgei>(size, 1454 machInst, pd, zn, imm, pg); 1455 case 1: 1456 return decodeSveTerImmPredS<SveCmpgti>(size, 1457 machInst, pd, zn, imm, pg); 1458 case 2: 1459 return decodeSveTerImmPredS<SveCmplti>(size, 1460 machInst, pd, zn, imm, pg); 1461 case 3: 1462 return decodeSveTerImmPredS<SveCmplei>(size, 1463 machInst, pd, zn, imm, pg); 1464 case 4: 1465 return decodeSveTerImmPredU<SveCmpeqi>(size, 1466 machInst, pd, zn, imm, pg); 1467 case 5: 1468 return decodeSveTerImmPredU<SveCmpnei>(size, 1469 machInst, pd, zn, imm, pg); 1470 default: 1471 return new Unknown64(machInst); 1472 } 1473 return new Unknown64(machInst); 1474 } // decodeSveIntCmpSImm 1475 1476 StaticInstPtr 1477 decodeSvePredLogicalOps(ExtMachInst machInst) 1478 { 1479 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0); 1480 IntRegIndex pn = (IntRegIndex) (uint8_t) bits(machInst, 8, 5); 1481 IntRegIndex pm = (IntRegIndex) (uint8_t) bits(machInst, 19, 16); 1482 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 13, 10); 1483 uint8_t opc = (bits(machInst, 23, 22) << 2) | 1484 (bits(machInst, 9) << 1) | 1485 bits(machInst, 4); 1486 switch (opc) { 1487 case 0x0: 1488 return new SvePredAnd<uint8_t>(machInst, pd, pn, pm, pg); 1489 case 0x1: 1490 return new SvePredBic<uint8_t>(machInst, pd, pn, pm, pg); 1491 case 0x2: 1492 return new SvePredEor<uint8_t>(machInst, pd, pn, pm, pg); 1493 case 0x3: 1494 return new SvePredSel<uint8_t>(machInst, pd, pn, pm, pg, true); 1495 case 0x4: 1496 return new SvePredAnds<uint8_t>(machInst, pd, pn, pm, pg); 1497 case 0x5: 1498 return new SvePredBics<uint8_t>(machInst, pd, pn, pm, pg); 1499 case 0x6: 1500 return new SvePredEors<uint8_t>(machInst, pd, pn, pm, pg); 1501 case 0x8: 1502 return new SvePredOrr<uint8_t>(machInst, pd, pn, pm, pg); 1503 case 0x9: 1504 return new SvePredOrn<uint8_t>(machInst, pd, pn, pm, pg); 1505 case 0xa: 1506 return new SvePredNor<uint8_t>(machInst, pd, pn, pm, pg); 1507 case 0xb: 1508 return new SvePredNand<uint8_t>(machInst, pd, pn, pm, pg); 1509 case 0xc: 1510 return new SvePredOrrs<uint8_t>(machInst, pd, pn, pm, pg); 1511 case 0xd: 1512 return new SvePredOrns<uint8_t>(machInst, pd, pn, pm, pg); 1513 case 0xe: 1514 return new SvePredNors<uint8_t>(machInst, pd, pn, pm, pg); 1515 case 0xf: 1516 return new SvePredNands<uint8_t>(machInst, pd, pn, pm, pg); 1517 } 1518 1519 return new Unknown64(machInst); 1520 } // decodeSvePredLogicalOps 1521 1522 StaticInstPtr 1523 decodeSvePropBreakFromPrevPartition(ExtMachInst machInst) 1524 { 1525 if (bits(machInst, 23) == 0x0 && bits(machInst, 9) == 0x0) { 1526 uint8_t opc = (bits(machInst, 22) << 1) | bits(machInst, 4); 1527 IntRegIndex pm = (IntRegIndex)(uint8_t) bits(machInst, 19, 16); 1528 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 13, 10); 1529 IntRegIndex pn = (IntRegIndex)(uint8_t) bits(machInst, 8, 5); 1530 IntRegIndex pd = (IntRegIndex)(uint8_t) bits(machInst, 3, 0); 1531 switch (opc) { 1532 case 0x0: 1533 // BRKPA 1534 return new SveBrkpa(machInst, pd, pn, pm, pg); 1535 case 0x1: 1536 // BRKPB 1537 return new SveBrkpb(machInst, pd, pn, pm, pg); 1538 case 0x2: 1539 // BRKPAS 1540 return new SveBrkpas(machInst, pd, pn, pm, pg); 1541 case 0x3: 1542 // BRKPBS 1543 return new SveBrkpbs(machInst, pd, pn, pm, pg); 1544 } 1545 } 1546 return new Unknown64(machInst); 1547 } // decodeSvePropBreakFromPrevPartition 1548 1549 StaticInstPtr 1550 decodeSvePartitionBreakCond(ExtMachInst machInst) 1551 { 1552 if (bits(machInst, 18, 16) == 0x0 && bits(machInst, 9) == 0x0) { 1553 bool flagset = bits(machInst, 22); 1554 bool merging = bits(machInst, 4); 1555 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 13, 10); 1556 IntRegIndex pn = (IntRegIndex)(uint8_t) bits(machInst, 8, 5); 1557 IntRegIndex pd = (IntRegIndex)(uint8_t) bits(machInst, 3, 0); 1558 if (bits(machInst, 23)) { 1559 if (flagset) { 1560 if (!merging) { 1561 return new SveBrkbs(machInst, pd, pg, pn); 1562 } 1563 } else { 1564 if (merging) { 1565 return new SveBrkbm(machInst, pd, pg, pn); 1566 } else { 1567 return new SveBrkbz(machInst, pd, pg, pn); 1568 } 1569 } 1570 } else { 1571 if (flagset) { 1572 if (!merging) { 1573 return new SveBrkas(machInst, pd, pg, pn); 1574 } 1575 } else { 1576 if (merging) { 1577 return new SveBrkam(machInst, pd, pg, pn); 1578 } else { 1579 return new SveBrkaz(machInst, pd, pg, pn); 1580 } 1581 } 1582 } 1583 return new Unknown64(machInst); 1584 } 1585 return new Unknown64(machInst); 1586 } // decodeSvePartitionBreakCond 1587 1588 StaticInstPtr 1589 decodeSvePredTest(ExtMachInst machInst) 1590 { 1591 if (bits(machInst, 23, 22) == 0x1 && 1592 bits(machInst, 18, 16) == 0x0 && 1593 bits(machInst, 9) == 0x0) { 1594 IntRegIndex pn = (IntRegIndex) (uint8_t) bits(machInst, 8, 5); 1595 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 13, 10); 1596 return new SvePtest(machInst, pn, pg); 1597 } 1598 return new Unknown64(machInst); 1599 } // decodeSvePredTest 1600 1601 StaticInstPtr 1602 decodeSvePredIteration(ExtMachInst machInst) 1603 { 1604 uint8_t size = bits(machInst, 23, 22); 1605 uint8_t opc18_16 = bits(machInst, 18, 16); 1606 uint8_t opc10_9 = bits(machInst, 10, 9); 1607 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 8, 5); 1608 IntRegIndex pdn = (IntRegIndex) (uint8_t) bits(machInst, 3, 0); 1609 if (opc18_16 == 0x1 && opc10_9 == 0x2) { 1610 return decodeSveUnaryPredU<SvePnext>(size, 1611 machInst, pdn, pdn, pg); 1612 } else if (size == 0x1 && opc18_16 == 0x0 && opc10_9 == 0) { 1613 return new SvePfirst<uint8_t>(machInst, pdn, pdn, pg); 1614 } 1615 return new Unknown64(machInst); 1616 } // decodeSvePredIteration 1617 1618 StaticInstPtr 1619 decodeSveInitPred(ExtMachInst machInst) 1620 { 1621 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0); 1622 unsigned size = bits(machInst, 23, 22); 1623 uint8_t imm = bits(machInst, 9, 5); 1624 1625 if (bits(machInst, 16) == 0x0) { 1626 return decodeSvePtrue<SvePtrue>(size, machInst, pd, imm); 1627 } else { 1628 return decodeSvePtrue<SvePtrues>(size, machInst, pd, imm); 1629 } 1630 return new Unknown64(machInst); 1631 } // decodeSveInitPred 1632 1633 StaticInstPtr 1634 decodeSveZeroPredReg(ExtMachInst machInst) 1635 { 1636 if (bits(machInst, 23, 22) == 0x0 && bits(machInst, 18, 16) == 0x0) { 1637 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0); 1638 return new SvePfalse(machInst, pd); 1639 } 1640 return new Unknown64(machInst); 1641 } // decodeSveZeroPredReg 1642 1643 StaticInstPtr 1644 decodeSvePropBreakToNextPartition(ExtMachInst machInst) 1645 { 1646 if (bits(machInst, 23) == 0x0 && 1647 bits(machInst, 18, 16) == 0x0 && 1648 bits(machInst, 9) == 0x0 && 1649 bits(machInst, 4) == 0x0) { 1650 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 13, 10); 1651 IntRegIndex pn = (IntRegIndex)(uint8_t) bits(machInst, 8, 5); 1652 IntRegIndex pdm = (IntRegIndex)(uint8_t) bits(machInst, 3, 0); 1653 if (bits(machInst, 22) == 0x0) { 1654 return new SveBrkn(machInst, pdm, pn, pdm, pg); 1655 } else { 1656 return new SveBrkns(machInst, pdm, pn, pdm, pg); 1657 } 1658 return new Unknown64(machInst); 1659 } 1660 return new Unknown64(machInst); 1661 } // decodeSvePropBreakToNextPartition 1662 1663 StaticInstPtr 1664 decodeSveReadPredFromFFRPred(ExtMachInst machInst) 1665 { 1666 if (bits(machInst, 23)) { 1667 return new Unknown64(machInst); 1668 } 1669 IntRegIndex pd = (IntRegIndex)(uint8_t) bits(machInst, 3, 0); 1670 IntRegIndex pg = (IntRegIndex)(uint8_t) bits(machInst, 8, 5); 1671 if (bits(machInst, 22)) { 1672 return new SveRdffrsPred(machInst, pd, pg); 1673 } else { 1674 return new SveRdffrPred(machInst, pd, pg); 1675 } 1676 } // decodeSveReadPredFromFFRPred 1677 1678 StaticInstPtr 1679 decodeSveReadPredFromFFRUnpred(ExtMachInst machInst) 1680 { 1681 if (bits(machInst, 23, 22) != 0) { 1682 return new Unknown64(machInst); 1683 } 1684 IntRegIndex pd = (IntRegIndex)(uint8_t) bits(machInst, 3, 0); 1685 return new SveRdffrUnpred(machInst, pd); 1686 } // decodeSveReadPredFromFFRUnpred 1687 1688 StaticInstPtr 1689 decodeSvePredGen(ExtMachInst machInst) 1690 { 1691 uint8_t b_20_15 = (bits(machInst, 20) << 1) | bits(machInst, 15); 1692 switch (b_20_15) { 1693 case 0x0: 1694 return decodeSvePredLogicalOps(machInst); 1695 case 0x1: 1696 return decodeSvePropBreakFromPrevPartition(machInst); 1697 case 0x2: 1698 if (bits(machInst, 19) == 0x0) { 1699 return decodeSvePartitionBreakCond(machInst); 1700 } else { 1701 return decodeSvePropBreakToNextPartition(machInst); 1702 } 1703 case 0x3: 1704 if (bits(machInst, 19) == 0x0) { 1705 if (bits(machInst, 4, 0) == 0x0) { 1706 return decodeSvePredTest(machInst); 1707 } else { 1708 break; 1709 } 1710 } else { 1711 switch (bits(machInst, 13, 12)) { 1712 case 0x0: 1713 if (bits(machInst, 11) == 0x0 && 1714 bits(machInst, 4) == 0x0) { 1715 return decodeSvePredIteration(machInst); 1716 } else { 1717 break; 1718 } 1719 case 0x1: 1720 break; 1721 case 0x2: 1722 if (bits(machInst, 11, 10) == 0x0 && 1723 bits(machInst, 4) == 0x0) { 1724 return decodeSveInitPred(machInst); 1725 } else if (bits(machInst, 11, 4) == 0x40) { 1726 return decodeSveZeroPredReg(machInst); 1727 } 1728 break; 1729 case 0x3: 1730 if (bits(machInst, 11) == 0x0) { 1731 if (bits(machInst, 16) == 0x0) { 1732 return decodeSveReadPredFromFFRPred(machInst); 1733 } else if (bits(machInst, 8, 4) == 0x0) { 1734 return decodeSveReadPredFromFFRUnpred(machInst); 1735 } 1736 } 1737 break; 1738 } 1739 } 1740 break; 1741 } 1742 return new Unknown64(machInst); 1743 } // decodeSvePredGen 1744 1745 StaticInstPtr 1746 decodeSvePredCount(ExtMachInst machInst) 1747 { 1748 uint8_t b19 = bits(machInst, 19); 1749 if (b19) { 1750 uint8_t b13_11 = bits(machInst, 13, 11); 1751 switch (b13_11) { 1752 case 0x0: 1753 { 1754 if (bits(machInst, 10, 9) != 0x0) { 1755 return new Unknown64(machInst); 1756 } 1757 IntRegIndex zdn = (IntRegIndex) (uint8_t) 1758 bits(machInst, 4, 0); 1759 IntRegIndex pg = (IntRegIndex) (uint8_t) 1760 bits(machInst, 8, 5); 1761 uint8_t esize = bits(machInst, 23, 22); 1762 if (esize == 0x0) { 1763 return new Unknown64(machInst); 1764 } 1765 uint8_t opc = bits(machInst, 18, 17); 1766 if (opc == 0x0) { 1767 uint8_t u = bits(machInst, 16); 1768 if (u) { 1769 return decodeSvePredCountVU<SveUqincpv>(esize, 1770 machInst, zdn, pg); 1771 } else { 1772 return decodeSvePredCountVS<SveSqincpv>(esize, 1773 machInst, zdn, pg); 1774 } 1775 } else if (opc == 0x1) { 1776 uint8_t u = bits(machInst, 16); 1777 if (u) { 1778 return decodeSvePredCountVU<SveUqdecpv>(esize, 1779 machInst, zdn, pg); 1780 } else { 1781 return decodeSvePredCountVS<SveSqdecpv>(esize, 1782 machInst, zdn, pg); 1783 } 1784 } else if (opc == 0x2) { 1785 uint8_t d = bits(machInst, 16); 1786 if (d) { 1787 return decodeSvePredCountVU<SveDecpv>(esize, 1788 machInst, zdn, pg); 1789 } else { 1790 return decodeSvePredCountVU<SveIncpv>(esize, 1791 machInst, zdn, pg); 1792 } 1793 } 1794 } 1795 break; 1796 case 0x1: 1797 { 1798 IntRegIndex rdn = (IntRegIndex) (uint8_t) 1799 bits(machInst, 4, 0); 1800 IntRegIndex pg = (IntRegIndex) (uint8_t) 1801 bits(machInst, 8, 5); 1802 uint8_t esize = bits(machInst, 23, 22); 1803 uint8_t opc = bits(machInst, 18, 17); 1804 uint8_t opc2 = bits(machInst, 10, 9); 1805 if (opc == 0x0) { 1806 uint8_t u = bits(machInst, 16); 1807 if (opc2 == 0x0) { 1808 if (u) { 1809 return decodeSvePredCountU<SveUqincp32>(esize, 1810 machInst, rdn, pg); 1811 } else { 1812 return decodeSvePredCountS<SveSqincp32>(esize, 1813 machInst, rdn, pg); 1814 } 1815 } else if (opc2 == 0x2) { 1816 if (u) { 1817 return decodeSvePredCountU<SveUqincp64>(esize, 1818 machInst, rdn, pg); 1819 } else { 1820 return decodeSvePredCountS<SveSqincp64>(esize, 1821 machInst, rdn, pg); 1822 } 1823 } 1824 } else if (opc == 0x1) { 1825 uint8_t u = bits(machInst, 16); 1826 if (opc2 == 0x0) { 1827 if (u) { 1828 return decodeSvePredCountU<SveUqdecp32>(esize, 1829 machInst, rdn, pg); 1830 } else { 1831 return decodeSvePredCountS<SveSqdecp32>(esize, 1832 machInst, rdn, pg); 1833 } 1834 } else if (opc2 == 0x2) { 1835 if (u) { 1836 return decodeSvePredCountU<SveUqdecp64>(esize, 1837 machInst, rdn, pg); 1838 } else { 1839 return decodeSvePredCountS<SveSqdecp64>(esize, 1840 machInst, rdn, pg); 1841 } 1842 } 1843 } else if (opc == 0x2) { 1844 if (opc2 == 0x0) { 1845 if (bits(machInst, 16)) { 1846 return decodeSvePredCountU<SveDecp>(esize, 1847 machInst, rdn, pg); 1848 } else { 1849 return decodeSvePredCountU<SveIncp>(esize, 1850 machInst, rdn, pg); 1851 } 1852 } 1853 } 1854 } 1855 break; 1856 case 0x2: 1857 if (bits(machInst, 23, 22) == 0x0 && 1858 bits(machInst, 10, 9) == 0x0 && 1859 bits(machInst, 4, 0) == 0x0) { 1860 uint8_t opc = bits(machInst, 18, 16); 1861 if (opc == 0x0) { 1862 IntRegIndex pn = (IntRegIndex)(uint8_t) 1863 bits(machInst, 8, 5); 1864 return new SveWrffr(machInst, pn); 1865 } else if (opc == 0x4 && bits(machInst, 8, 5) == 0x0) { 1866 return new SveSetffr(machInst); 1867 } 1868 } 1869 break; 1870 } 1871 } else { 1872 uint8_t opc = bits(machInst, 18, 16); 1873 if (opc == 0 && bits(machInst, 9) == 0) { 1874 IntRegIndex rd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 1875 IntRegIndex pn = (IntRegIndex) (uint8_t) bits(machInst, 8, 5); 1876 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 13, 1877 10); 1878 uint8_t esize = bits(machInst, 23, 22); 1879 return decodeSveUnaryPredU<SveCntp>(esize, 1880 machInst, rd, pn, pg); 1881 } 1882 } 1883 return new Unknown64(machInst); 1884 } // decodeSvePredCount 1885 1886 StaticInstPtr 1887 decodeSveIntCmpSca(ExtMachInst machInst) 1888 { 1889 uint16_t b23_13_12_11_10_3_2_1_0 = (uint16_t) 1890 (bits(machInst, 23) << 8) | (bits(machInst, 13, 10) << 4) | 1891 bits(machInst, 3, 0); 1892 uint8_t b10 = (uint8_t) bits(machInst, 10); 1893 IntRegIndex rn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 1894 IntRegIndex rm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16); 1895 if (b23_13_12_11_10_3_2_1_0 == 0x180) { 1896 uint8_t s64b = bits(machInst, 22); 1897 uint8_t ne = bits(machInst, 4); 1898 if (ne) { 1899 if (s64b) { 1900 return new SveCtermne<uint64_t>(machInst, rn, rm); 1901 } else { 1902 return new SveCtermne<uint32_t>(machInst, rn, rm); 1903 } 1904 } else { 1905 if (s64b) { 1906 return new SveCtermeq<uint64_t>(machInst, rn, rm); 1907 } else { 1908 return new SveCtermeq<uint32_t>(machInst, rn, rm); 1909 } 1910 } 1911 } else if (b10) { 1912 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0); 1913 uint8_t size = (uint8_t) bits(machInst, 23, 22); 1914 uint8_t s64b = (uint8_t) bits(machInst, 12); 1915 uint8_t opc = (uint8_t) bits(machInst, 11) << 1 | 1916 bits(machInst, 4); 1917 if (s64b) { 1918 switch (opc) { 1919 case 0: 1920 return decodeSveBinUnpredS<SveWhilelt64>(size, 1921 machInst, pd, rn, rm); 1922 case 1: 1923 return decodeSveBinUnpredS<SveWhilele64>(size, 1924 machInst, pd, rn, rm); 1925 case 2: 1926 return decodeSveBinUnpredU<SveWhilelo64>(size, 1927 machInst, pd, rn, rm); 1928 case 3: 1929 return decodeSveBinUnpredU<SveWhilels64>(size, 1930 machInst, pd, rn, rm); 1931 } 1932 } else { 1933 switch (opc) { 1934 case 0: 1935 return decodeSveBinUnpredS<SveWhilelt32>(size, 1936 machInst, pd, rn, rm); 1937 case 1: 1938 return decodeSveBinUnpredS<SveWhilele32>(size, 1939 machInst, pd, rn, rm); 1940 case 2: 1941 return decodeSveBinUnpredU<SveWhilelo32>(size, 1942 machInst, pd, rn, rm); 1943 case 3: 1944 return decodeSveBinUnpredU<SveWhilels32>(size, 1945 machInst, pd, rn, rm); 1946 } 1947 } 1948 } 1949 return new Unknown64(machInst); 1950 } // decodeSveIntCmpSca 1951 1952 StaticInstPtr 1953 decodeSveIntWideImmUnpred0(ExtMachInst machInst) 1954 { 1955 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 1956 uint64_t imm = bits(machInst, 12, 5); 1957 uint8_t sh = bits(machInst, 13); 1958 uint8_t size = bits(machInst, 23, 22); 1959 1960 if (sh) { 1961 if (size == 0x0) { 1962 return new Unknown64(machInst); 1963 } 1964 imm <<= 8; 1965 } 1966 1967 switch (bits(machInst, 18, 16)) { 1968 case 0x0: 1969 return decodeSveWideImmUnpredU<SveAddImm>( 1970 size, machInst, zdn, imm); 1971 case 0x1: 1972 return decodeSveWideImmUnpredU<SveSubImm>( 1973 size, machInst, zdn, imm); 1974 case 0x3: 1975 return decodeSveWideImmUnpredU<SveSubrImm>( 1976 size, machInst, zdn, imm); 1977 case 0x4: 1978 return decodeSveWideImmUnpredS<SveSqaddImm>( 1979 size, machInst, zdn, imm); 1980 case 0x5: 1981 return decodeSveWideImmUnpredU<SveUqaddImm>( 1982 size, machInst, zdn, imm); 1983 case 0x6: 1984 return decodeSveWideImmUnpredS<SveSqsubImm>( 1985 size, machInst, zdn, imm); 1986 case 0x7: 1987 return decodeSveWideImmUnpredU<SveUqsubImm>( 1988 size, machInst, zdn, imm); 1989 } 1990 1991 return new Unknown64(machInst); 1992 } // decodeSveIntWideImmUnpred0 1993 1994 StaticInstPtr 1995 decodeSveIntWideImmUnpred1(ExtMachInst machInst) 1996 { 1997 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 1998 uint64_t imm = bits(machInst, 12, 5); 1999 uint8_t size = bits(machInst, 23, 22); 2000 2001 switch (bits(machInst, 18, 16)) { 2002 case 0x0: 2003 return decodeSveWideImmUnpredS<SveSmaxImm>( 2004 size, machInst, zdn, sext<8>(imm)); 2005 case 0x1: 2006 return decodeSveWideImmUnpredU<SveUmaxImm>( 2007 size, machInst, zdn, imm); 2008 case 0x2: 2009 return decodeSveWideImmUnpredS<SveSminImm>( 2010 size, machInst, zdn, sext<8>(imm)); 2011 case 0x3: 2012 return decodeSveWideImmUnpredU<SveUminImm>( 2013 size, machInst, zdn, imm); 2014 } 2015 2016 return new Unknown64(machInst); 2017 } // decodeSveIntWideImmUnpred1 2018 2019 StaticInstPtr 2020 decodeSveIntWideImmUnpred2(ExtMachInst machInst) 2021 { 2022 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 2023 uint64_t imm = bits(machInst, 12, 5); 2024 uint8_t size = bits(machInst, 23, 22); 2025 2026 if (bits(machInst, 18, 16) == 0x0) { 2027 return decodeSveWideImmUnpredU<SveMulImm>( 2028 size, machInst, zdn, sext<8>(imm)); 2029 } 2030 2031 return new Unknown64(machInst); 2032 } // decodeSveIntWideImmUnpred2 2033 2034 StaticInstPtr 2035 decodeSveIntWideImmUnpred3(ExtMachInst machInst) 2036 { 2037 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 2038 uint64_t imm = bits(machInst, 12, 5); 2039 uint8_t sh = bits(machInst, 13); 2040 uint8_t size = bits(machInst, 23, 22); 2041 2042 if (sh) { 2043 if (size == 0x0) { 2044 return new Unknown64(machInst); 2045 } 2046 imm <<= 8; 2047 } 2048 2049 if (bits(machInst, 18, 17) == 0x0) { 2050 if (sh) { 2051 return decodeSveWideImmUnpredU<SveDupImm>( 2052 size, machInst, zd, sext<16>(imm)); 2053 } else { 2054 return decodeSveWideImmUnpredU<SveDupImm>( 2055 size, machInst, zd, sext<8>(imm)); 2056 } 2057 } 2058 2059 return new Unknown64(machInst); 2060 } // decodeSveIntWideImmUnpred3 2061 2062 StaticInstPtr 2063 decodeSveIntWideImmUnpred4(ExtMachInst machInst) 2064 { 2065 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 2066 uint8_t size = bits(machInst, 23, 22); 2067 2068 if (bits(machInst, 18, 17) == 0x0 && size != 0x0) { 2069 uint64_t imm = vfp_modified_imm(bits(machInst, 12, 5), 2070 decode_fp_data_type(size)); 2071 return decodeSveWideImmUnpredF<SveFdup>(size, machInst, zd, imm); 2072 } 2073 2074 return new Unknown64(machInst); 2075 } // decodeSveIntWideImmUnpred4 2076 2077 StaticInstPtr 2078 decodeSveIntWideImmUnpred(ExtMachInst machInst) 2079 { 2080 switch (bits(machInst, 20, 19)) { 2081 case 0x0: 2082 if (bits(machInst, 18, 16) != 0x2) { 2083 return decodeSveIntWideImmUnpred0(machInst); 2084 } 2085 break; 2086 case 0x1: 2087 if (bits(machInst, 13) == 0x0) { 2088 return decodeSveIntWideImmUnpred1(machInst); 2089 } 2090 break; 2091 case 0x2: 2092 if (bits(machInst, 13) == 0x0) { 2093 return decodeSveIntWideImmUnpred2(machInst); 2094 } 2095 break; 2096 case 0x3: 2097 if (bits(machInst, 16) == 0x0) { 2098 return decodeSveIntWideImmUnpred3(machInst); 2099 } else if (bits(machInst, 13) == 0x0) { 2100 return decodeSveIntWideImmUnpred4(machInst); 2101 } 2102 break; 2103 } 2104 return new Unknown64(machInst); 2105 } // decodeSveIntWideImmUnpred 2106 2107 StaticInstPtr 2108 decodeSveMultiplyAddUnpred(ExtMachInst machInst) 2109 { 2110 IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 2111 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 2112 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16); 2113 2114 uint8_t size = (uint8_t) bits(machInst, 23, 22); 2115 2116 if (bits(machInst, 12, 11) != 0 || !(size & 0x2)) { 2117 return new Unknown64(machInst); 2118 } 2119 2120 uint8_t usig = (uint8_t) bits(machInst, 10); 2121 if (size & 0x1) { 2122 if (usig) { 2123 return new SveUdotv<uint16_t, uint64_t>(machInst, 2124 zda, zn, zm); 2125 } else { 2126 return new SveSdotv<int16_t, int64_t>(machInst, 2127 zda, zn, zm); 2128 } 2129 } else { 2130 if (usig) { 2131 return new SveUdotv<uint8_t, uint32_t>(machInst, 2132 zda, zn, zm); 2133 } else { 2134 return new SveSdotv<int8_t, int32_t>(machInst, 2135 zda, zn, zm); 2136 } 2137 } 2138 2139 return new Unknown64(machInst); 2140 } // decodeSveMultiplyAddUnpred 2141 2142 StaticInstPtr 2143 decodeSveMultiplyIndexed(ExtMachInst machInst) 2144 { 2145 IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 2146 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 2147 2148 uint8_t size = (uint8_t) bits(machInst, 23, 22); 2149 2150 if (bits(machInst, 12, 11) != 0 || !(size & 0x2)) { 2151 return new Unknown64(machInst); 2152 } 2153 2154 uint8_t usig = (uint8_t) bits(machInst, 10); 2155 if (size & 0x1) { 2156 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 19, 16); 2157 uint8_t i1 = (uint8_t) bits(machInst, 20); 2158 if (usig) { 2159 return new SveUdoti<uint16_t, uint64_t>(machInst, 2160 zda, zn, zm, i1); 2161 } else { 2162 return new SveSdoti<int16_t, int64_t>(machInst, 2163 zda, zn, zm, i1); 2164 } 2165 } else { 2166 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 18, 16); 2167 uint8_t i2 = (uint8_t) bits(machInst, 20, 19); 2168 if (usig) { 2169 return new SveUdoti<uint8_t, uint32_t>(machInst, 2170 zda, zn, zm, i2); 2171 } else { 2172 return new SveSdoti<int8_t, int32_t>(machInst, 2173 zda, zn, zm, i2); 2174 } 2175 } 2176 return new Unknown64(machInst); 2177 } // decodeSveMultiplyIndexed 2178 2179 StaticInstPtr 2180 decodeSveFpFastReduc(ExtMachInst machInst) 2181 { 2182 IntRegIndex vd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 2183 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 2184 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10); 2185 2186 uint8_t size = bits(machInst, 23, 22); 2187 2188 if (size == 0x0) { 2189 return new Unknown64(machInst); 2190 } 2191 2192 switch (bits(machInst, 18, 16)) { 2193 case 0x0: 2194 return decodeSveUnaryPredF<SveFaddv>(size, machInst, vd, zn, pg); 2195 case 0x4: 2196 return decodeSveUnaryPredF<SveFmaxnmv>(size, machInst, vd, zn, pg); 2197 case 0x5: 2198 return decodeSveUnaryPredF<SveFminnmv>(size, machInst, vd, zn, pg); 2199 case 0x6: 2200 return decodeSveUnaryPredF<SveFmaxv>(size, machInst, vd, zn, pg); 2201 case 0x7: 2202 return decodeSveUnaryPredF<SveFminv>(size, machInst, vd, zn, pg); 2203 } 2204 2205 return new Unknown64(machInst); 2206 } // decodeSveFpFastReduc 2207 2208 StaticInstPtr 2209 decodeSveFpUnaryUnpred(ExtMachInst machInst) 2210 { 2211 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 2212 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 2213 2214 uint8_t size = (uint8_t) bits(machInst, 23, 22); 2215 if (size == 0) { 2216 return new Unknown64(machInst); 2217 } 2218 uint8_t opc = (uint8_t) bits(machInst, 18, 16); 2219 2220 switch (opc) { 2221 case 0x6: 2222 return decodeSveUnaryUnpredF<SveFrecpe>( 2223 size, machInst, zd, zn); 2224 case 0x7: 2225 return decodeSveUnaryUnpredF<SveFrsqrte>( 2226 size, machInst, zd, zn); 2227 } 2228 return new Unknown64(machInst); 2229 } // decodeSveFpUnaryUnpred 2230 2231 StaticInstPtr 2232 decodeSveFpCmpZero(ExtMachInst machInst) 2233 { 2234 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0); 2235 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 2236 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10); 2237 2238 uint8_t size = bits(machInst, 23, 22); 2239 if (size == 0) { 2240 return new Unknown64(machInst); 2241 } 2242 uint8_t opc = (bits(machInst, 17, 16) << 1) | bits(machInst, 4); 2243 2244 switch (opc) { 2245 case 0x0: 2246 return decodeSveCmpImmF<SveFcmgeZero>( 2247 size, machInst, pd, zn, 0x0, pg); 2248 case 0x1: 2249 return decodeSveCmpImmF<SveFcmgtZero>( 2250 size, machInst, pd, zn, 0x0, pg); 2251 case 0x2: 2252 return decodeSveCmpImmF<SveFcmltZero>( 2253 size, machInst, pd, zn, 0x0, pg); 2254 case 0x3: 2255 return decodeSveCmpImmF<SveFcmleZero>( 2256 size, machInst, pd, zn, 0x0, pg); 2257 case 0x4: 2258 return decodeSveCmpImmF<SveFcmeqZero>( 2259 size, machInst, pd, zn, 0x0, pg); 2260 case 0x6: 2261 return decodeSveCmpImmF<SveFcmneZero>( 2262 size, machInst, pd, zn, 0x0, pg); 2263 } 2264 return new Unknown64(machInst); 2265 } // decodeSveFpCmpZero 2266 2267 StaticInstPtr 2268 decodeSveFpAccumReduc(ExtMachInst machInst) 2269 { 2270 uint8_t opc = bits(machInst, 18, 16); 2271 uint8_t size = bits(machInst, 23, 22); 2272 if (opc != 0 || size == 0) { 2273 return new Unknown64(machInst); 2274 } 2275 2276 IntRegIndex vdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 2277 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 2278 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10); 2279 2280 return decodeSveUnaryPredF<SveFadda>(size, machInst, vdn, zm, pg); 2281 } // decodeSveFpAccumReduc 2282 2283 StaticInstPtr 2284 decodeSveFpArithUnpred(ExtMachInst machInst) 2285 { 2286 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 2287 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 2288 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16); 2289 2290 uint8_t size = bits(machInst, 23, 22); 2291 if (size == 0) { 2292 return new Unknown64(machInst); 2293 } 2294 uint8_t opc = (uint8_t) bits(machInst, 12, 10); 2295 2296 switch (opc) { 2297 case 0x0: 2298 return decodeSveBinUnpredF<SveFaddUnpred>( 2299 size, machInst, zd, zn, zm); 2300 case 0x1: 2301 return decodeSveBinUnpredF<SveFsubUnpred>( 2302 size, machInst, zd, zn, zm); 2303 case 0x2: 2304 return decodeSveBinUnpredF<SveFmulUnpred>( 2305 size, machInst, zd, zn, zm); 2306 case 0x3: 2307 return decodeSveBinUnpredF<SveFtsmul>( 2308 size, machInst, zd, zn, zm); 2309 case 0x6: 2310 return decodeSveBinUnpredF<SveFrecps>( 2311 size, machInst, zd, zn, zm); 2312 case 0x7: 2313 return decodeSveBinUnpredF<SveFrsqrts>( 2314 size, machInst, zd, zn, zm); 2315 } 2316 return new Unknown64(machInst); 2317 } // decodeSveFpArithUnpred 2318 2319 StaticInstPtr 2320 decodeSveFpArithPred0(ExtMachInst machInst) 2321 { 2322 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 2323 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 2324 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10); 2325 2326 uint8_t size = (uint8_t) bits(machInst, 23, 22); 2327 if (size == 0) { 2328 return new Unknown64(machInst); 2329 } 2330 uint8_t opc = (uint8_t) bits(machInst, 19, 16); 2331 2332 switch (opc) { 2333 case 0x0: 2334 return decodeSveBinDestrPredF<SveFaddPred>( 2335 size, machInst, zdn, zm, pg); 2336 case 0x1: 2337 return decodeSveBinDestrPredF<SveFsubPred>( 2338 size, machInst, zdn, zm, pg); 2339 case 0x2: 2340 return decodeSveBinDestrPredF<SveFmulPred>( 2341 size, machInst, zdn, zm, pg); 2342 case 0x3: 2343 return decodeSveBinDestrPredF<SveFsubr>( 2344 size, machInst, zdn, zm, pg); 2345 case 0x4: 2346 return decodeSveBinDestrPredF<SveFmaxnm>( 2347 size, machInst, zdn, zm, pg); 2348 case 0x5: 2349 return decodeSveBinDestrPredF<SveFminnm>( 2350 size, machInst, zdn, zm, pg); 2351 case 0x6: 2352 return decodeSveBinDestrPredF<SveFmax>( 2353 size, machInst, zdn, zm, pg); 2354 case 0x7: 2355 return decodeSveBinDestrPredF<SveFmin>( 2356 size, machInst, zdn, zm, pg); 2357 case 0x8: 2358 return decodeSveBinDestrPredF<SveFabd>( 2359 size, machInst, zdn, zm, pg); 2360 case 0x9: 2361 return decodeSveBinDestrPredF<SveFscale>( 2362 size, machInst, zdn, zm, pg); 2363 case 0xa: 2364 return decodeSveBinDestrPredF<SveFmulx>( 2365 size, machInst, zdn, zm, pg); 2366 case 0xc: 2367 return decodeSveBinDestrPredF<SveFdivr>( 2368 size, machInst, zdn, zm, pg); 2369 case 0xd: 2370 return decodeSveBinDestrPredF<SveFdiv>( 2371 size, machInst, zdn, zm, pg); 2372 } 2373 return new Unknown64(machInst); 2374 } // decodeSveFpArithPred0 2375 2376 StaticInstPtr 2377 decodeSveFpTrigMAddCoeff(ExtMachInst machInst) 2378 { 2379 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 2380 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 2381 uint8_t imm = (uint8_t) bits(machInst, 18, 16); 2382 2383 uint8_t size = (uint8_t) bits(machInst, 23, 22); 2384 if (size == 0) { 2385 return new Unknown64(machInst); 2386 } 2387 2388 return decodeSveTerImmUnpredF<SveFtmad>(size, machInst, zdn, zm, imm); 2389 } // decodeSveFpTrigMAddCoeff 2390 2391 StaticInstPtr 2392 decodeSveFpArithImmPred(ExtMachInst machInst) 2393 { 2394 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 2395 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10); 2396 uint64_t imm; 2397 2398 uint8_t size = (uint8_t) bits(machInst, 23, 22); 2399 if (size == 0) { 2400 return new Unknown64(machInst); 2401 } 2402 2403 uint8_t opc = (uint8_t) bits(machInst, 18, 16); 2404 2405 switch (opc) { 2406 case 0x0: 2407 imm = sveExpandFpImmAddSub((uint8_t) bits(machInst, 5), size); 2408 return decodeSveBinImmPredF<SveFaddImm>( 2409 size, machInst, zdn, imm, pg); 2410 case 0x1: 2411 imm = sveExpandFpImmAddSub((uint8_t) bits(machInst, 5), size); 2412 return decodeSveBinImmPredF<SveFsubImm>( 2413 size, machInst, zdn, imm, pg); 2414 case 0x2: 2415 imm = sveExpandFpImmMul((uint8_t) bits(machInst, 5), size); 2416 return decodeSveBinImmPredF<SveFmulImm>( 2417 size, machInst, zdn, imm, pg); 2418 case 0x3: 2419 imm = sveExpandFpImmAddSub((uint8_t) bits(machInst, 5), size); 2420 return decodeSveBinImmPredF<SveFsubrImm>( 2421 size, machInst, zdn, imm, pg); 2422 case 0x4: 2423 imm = sveExpandFpImmMaxMin((uint8_t) bits(machInst, 5), size); 2424 return decodeSveBinImmPredF<SveFmaxnmImm>( 2425 size, machInst, zdn, imm, pg); 2426 case 0x5: 2427 imm = sveExpandFpImmMaxMin((uint8_t) bits(machInst, 5), size); 2428 return decodeSveBinImmPredF<SveFminnmImm>( 2429 size, machInst, zdn, imm, pg); 2430 case 0x6: 2431 imm = sveExpandFpImmMaxMin((uint8_t) bits(machInst, 5), size); 2432 return decodeSveBinImmPredF<SveFmaxImm>( 2433 size, machInst, zdn, imm, pg); 2434 case 0x7: 2435 imm = sveExpandFpImmMaxMin((uint8_t) bits(machInst, 5), size); 2436 return decodeSveBinImmPredF<SveFminImm>( 2437 size, machInst, zdn, imm, pg); 2438 } 2439 return new Unknown64(machInst); 2440 } // decodeSveFpArithImmPred 2441 2442 StaticInstPtr 2443 decodeSveFpArithPred(ExtMachInst machInst) 2444 { 2445 if (bits(machInst, 20) == 0) { 2446 return decodeSveFpArithPred0(machInst); 2447 } else if (bits(machInst, 19) == 0) { 2448 return decodeSveFpTrigMAddCoeff(machInst); 2449 } else { 2450 return decodeSveFpArithImmPred(machInst); 2451 } 2452 } // decodeSveFpArithPred 2453 2454 StaticInstPtr 2455 decodeSveFpUnaryPred(ExtMachInst machInst) 2456 { 2457 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 2458 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 2459 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10); 2460 2461 uint8_t size = (uint8_t) bits(machInst, 23, 22); 2462 if (size == 0) { 2463 return new Unknown64(machInst); 2464 } 2465 2466 uint8_t b20_19 = bits(machInst, 20, 19); 2467 switch (b20_19) { 2468 case 0x0: 2469 { 2470 if (bits(machInst, 18, 16) == 0x5) { 2471 return new Unknown64(machInst); 2472 } 2473 // SVE floating-point round to integral value 2474 uint8_t opc = (uint8_t) bits(machInst, 18, 16); 2475 switch (opc) { 2476 case 0x0: 2477 return decodeSveUnaryPredF<SveFrintn>( 2478 size, machInst, zd, zn, pg); 2479 case 0x1: 2480 return decodeSveUnaryPredF<SveFrintp>( 2481 size, machInst, zd, zn, pg); 2482 case 0x2: 2483 return decodeSveUnaryPredF<SveFrintm>( 2484 size, machInst, zd, zn, pg); 2485 case 0x3: 2486 return decodeSveUnaryPredF<SveFrintz>( 2487 size, machInst, zd, zn, pg); 2488 case 0x4: 2489 return decodeSveUnaryPredF<SveFrinta>( 2490 size, machInst, zd, zn, pg); 2491 case 0x6: 2492 return decodeSveUnaryPredF<SveFrintx>( 2493 size, machInst, zd, zn, pg); 2494 case 0x7: 2495 return decodeSveUnaryPredF<SveFrinti>( 2496 size, machInst, zd, zn, pg); 2497 } 2498 } 2499 break; 2500 case 0x1: 2501 { 2502 // SVE floating-point unary operations (predicated) 2503 uint8_t b18_16 = bits(machInst, 18, 16); 2504 switch (b18_16) { 2505 case 0x0: 2506 if (size == 0x2) { 2507 return new SveFcvtNarrow<uint32_t, uint16_t>( 2508 machInst, zd, zn, pg); 2509 } else if (size == 0x3) { 2510 return new SveFcvtNarrow<uint64_t, uint16_t>( 2511 machInst, zd, zn, pg); 2512 } 2513 break; 2514 case 0x1: 2515 if (size == 0x2) { 2516 return new SveFcvtWiden<uint16_t, uint32_t>( 2517 machInst, zd, zn, pg); 2518 } else if (size == 0x3) { 2519 return new SveFcvtWiden<uint16_t, uint64_t>( 2520 machInst, zd, zn, pg); 2521 } 2522 break; 2523 case 0x2: 2524 if (size == 0x3) { 2525 return new SveFcvtNarrow<uint64_t, uint32_t>( 2526 machInst, zd, zn, pg); 2527 } 2528 break; 2529 case 0x3: 2530 if (size == 0x3) { 2531 return new SveFcvtWiden<uint32_t, uint64_t>( 2532 machInst, zd, zn, pg); 2533 } 2534 break; 2535 case 0x4: 2536 if (size != 0x0) { 2537 return decodeSveUnaryPredF<SveFrecpx>( 2538 size, machInst, zd, zn, pg); 2539 } 2540 break; 2541 case 0x5: 2542 if (size != 0x0) { 2543 return decodeSveUnaryPredF<SveFsqrt>( 2544 size, machInst, zd, zn, pg); 2545 } 2546 break; 2547 } 2548 } 2549 break; 2550 case 0x2: 2551 { 2552 // SVE integer convert to floating-point 2553 uint8_t opc = (size << 3) | bits(machInst, 18, 16); 2554 switch (opc) { 2555 case 0xa: 2556 return new SveScvtfNarrow<uint16_t, uint16_t>( 2557 machInst, zd, zn, pg); 2558 case 0xb: 2559 return new SveUcvtfNarrow<uint16_t, uint16_t>( 2560 machInst, zd, zn, pg); 2561 case 0xc: 2562 return new SveScvtfNarrow<uint32_t, uint16_t>( 2563 machInst, zd, zn, pg); 2564 case 0xd: 2565 return new SveUcvtfNarrow<uint32_t, uint16_t>( 2566 machInst, zd, zn, pg); 2567 case 0xe: 2568 return new SveScvtfNarrow<uint64_t, uint16_t>( 2569 machInst, zd, zn, pg); 2570 case 0xf: 2571 return new SveUcvtfNarrow<uint64_t, uint16_t>( 2572 machInst, zd, zn, pg); 2573 case 0x14: 2574 return new SveScvtfNarrow<uint32_t, uint32_t>( 2575 machInst, zd, zn, pg); 2576 case 0x15: 2577 return new SveUcvtfNarrow<uint32_t, uint32_t>( 2578 machInst, zd, zn, pg); 2579 case 0x18: 2580 return new SveScvtfWiden<uint32_t, uint64_t>( 2581 machInst, zd, zn, pg); 2582 case 0x19: 2583 return new SveUcvtfWiden<uint32_t, uint64_t>( 2584 machInst, zd, zn, pg); 2585 case 0x1c: 2586 return new SveScvtfNarrow<uint64_t, uint32_t>( 2587 machInst, zd, zn, pg); 2588 case 0x1d: 2589 return new SveUcvtfNarrow<uint64_t, uint32_t>( 2590 machInst, zd, zn, pg); 2591 case 0x1e: 2592 return new SveScvtfNarrow<uint64_t, uint64_t>( 2593 machInst, zd, zn, pg); 2594 case 0x1f: 2595 return new SveUcvtfNarrow<uint64_t, uint64_t>( 2596 machInst, zd, zn, pg); 2597 } 2598 } 2599 break; 2600 case 0x3: 2601 { 2602 // SVE floating-point convert to integer 2603 uint8_t opc = (size << 3) | bits(machInst, 18, 16); 2604 switch (opc) { 2605 case 0xa: 2606 return new SveFcvtzsNarrow<uint16_t, uint16_t>( 2607 machInst, zd, zn, pg); 2608 case 0xb: 2609 return new SveFcvtzuNarrow<uint16_t, uint16_t>( 2610 machInst, zd, zn, pg); 2611 case 0xc: 2612 return new SveFcvtzsWiden<uint16_t, uint32_t>( 2613 machInst, zd, zn, pg); 2614 case 0xd: 2615 return new SveFcvtzuWiden<uint16_t, uint32_t>( 2616 machInst, zd, zn, pg); 2617 case 0xe: 2618 return new SveFcvtzsWiden<uint16_t, uint64_t>( 2619 machInst, zd, zn, pg); 2620 case 0xf: 2621 return new SveFcvtzuWiden<uint16_t, uint64_t>( 2622 machInst, zd, zn, pg); 2623 case 0x14: 2624 return new SveFcvtzsNarrow<uint32_t, uint32_t>( 2625 machInst, zd, zn, pg); 2626 case 0x15: 2627 return new SveFcvtzuNarrow<uint32_t, uint32_t>( 2628 machInst, zd, zn, pg); 2629 case 0x18: 2630 return new SveFcvtzsNarrow<uint64_t, uint32_t>( 2631 machInst, zd, zn, pg); 2632 case 0x19: 2633 return new SveFcvtzuNarrow<uint64_t, uint32_t>( 2634 machInst, zd, zn, pg); 2635 case 0x1c: 2636 return new SveFcvtzsWiden<uint32_t, uint64_t>( 2637 machInst, zd, zn, pg); 2638 case 0x1d: 2639 return new SveFcvtzuWiden<uint32_t, uint64_t>( 2640 machInst, zd, zn, pg); 2641 case 0x1e: 2642 return new SveFcvtzsNarrow<uint64_t, uint64_t>( 2643 machInst, zd, zn, pg); 2644 case 0x1f: 2645 return new SveFcvtzuNarrow<uint64_t, uint64_t>( 2646 machInst, zd, zn, pg); 2647 } 2648 } 2649 break; 2650 } 2651 return new Unknown64(machInst); 2652 } // decodeSveFpUnaryPred 2653 2654 StaticInstPtr 2655 decodeSveFpCmpVec(ExtMachInst machInst) 2656 { 2657 IntRegIndex pd = (IntRegIndex) (uint8_t) bits(machInst, 3, 0); 2658 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 2659 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16); 2660 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10); 2661 2662 uint8_t size = bits(machInst, 23, 22); 2663 if (size == 0) { 2664 return new Unknown64(machInst); 2665 } 2666 uint8_t opc = (bits(machInst, 15) << 2) | 2667 (bits(machInst, 13) << 1) | 2668 bits(machInst, 4); 2669 2670 switch (opc) { 2671 case 0x0: 2672 return decodeSveCmpF<SveFcmge>(size, machInst, pd, zn, zm, pg); 2673 case 0x1: 2674 return decodeSveCmpF<SveFcmgt>(size, machInst, pd, zn, zm, pg); 2675 case 0x2: 2676 return decodeSveCmpF<SveFcmeq>(size, machInst, pd, zn, zm, pg); 2677 case 0x3: 2678 return decodeSveCmpF<SveFcmne>(size, machInst, pd, zn, zm, pg); 2679 case 0x4: 2680 return decodeSveCmpF<SveFcmuo>(size, machInst, pd, zn, zm, pg); 2681 case 0x5: 2682 return decodeSveCmpF<SveFacge>(size, machInst, pd, zn, zm, pg); 2683 case 0x7: 2684 return decodeSveCmpF<SveFacgt>(size, machInst, pd, zn, zm, pg); 2685 } 2686 return new Unknown64(machInst); 2687 } // decodeSveFpCmpVec 2688 2689 StaticInstPtr 2690 decodeSveFpFusedMulAdd(ExtMachInst machInst) 2691 { 2692 IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 2693 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 2694 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16); 2695 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10); 2696 2697 uint8_t size = bits(machInst, 23, 22); 2698 if (size == 0) { 2699 return new Unknown64(machInst); 2700 } 2701 uint8_t opc = bits(machInst, 15, 13); 2702 2703 switch (opc) { 2704 case 0x0: 2705 return decodeSveTerPredF<SveFmla>( 2706 size, machInst, zda, zn, zm, pg); 2707 case 0x1: 2708 return decodeSveTerPredF<SveFmls>( 2709 size, machInst, zda, zn, zm, pg); 2710 case 0x2: 2711 return decodeSveTerPredF<SveFnmla>( 2712 size, machInst, zda, zn, zm, pg); 2713 case 0x3: 2714 return decodeSveTerPredF<SveFnmls>( 2715 size, machInst, zda, zn, zm, pg); 2716 case 0x4: 2717 return decodeSveTerPredF<SveFmad>( 2718 size, machInst, zda /* zdn */, zm /* za */, zn, pg); 2719 case 0x5: 2720 return decodeSveTerPredF<SveFmsb>( 2721 size, machInst, zda /* zdn */, zm /* za */, zn, pg); 2722 case 0x6: 2723 return decodeSveTerPredF<SveFnmad>( 2724 size, machInst, zda /* zdn */, zm /* za */, zn, pg); 2725 case 0x7: 2726 return decodeSveTerPredF<SveFnmsb>( 2727 size, machInst, zda /* zdn */, zm /* za */, zn, pg); 2728 } 2729 return new Unknown64(machInst); 2730 } // decodeSveFpFusedMulAdd 2731 2732 StaticInstPtr 2733 decodeSveFpCplxAdd(ExtMachInst machInst) 2734 { 2735 uint8_t size = bits(machInst, 23, 22); 2736 uint8_t rot = bits(machInst, 16) << 1 | 0x01; 2737 IntRegIndex zdn = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 2738 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 2739 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10); 2740 switch (size) { 2741 case 1: 2742 return new SveFcadd<uint16_t>(machInst, 2743 zdn, zdn, zm, pg, rot); 2744 case 2: 2745 return new SveFcadd<uint32_t>(machInst, 2746 zdn, zdn, zm, pg, rot); 2747 case 3: 2748 return new SveFcadd<uint64_t>(machInst, 2749 zdn, zdn, zm, pg, rot); 2750 } 2751 return new Unknown64(machInst); 2752 } 2753 2754 StaticInstPtr 2755 decodeSveFpCplxMulAddVec(ExtMachInst machInst) 2756 { 2757 uint8_t size = bits(machInst, 23, 22); 2758 if (size == 0) { 2759 return new Unknown64(machInst); 2760 } 2761 2762 IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 2763 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 2764 IntRegIndex pg = (IntRegIndex) (uint8_t) bits(machInst, 12, 10); 2765 IntRegIndex zm = (IntRegIndex) (uint8_t) bits(machInst, 20, 16); 2766 uint8_t rot = bits(machInst, 14, 13); 2767 switch (size) { 2768 case 1: 2769 return new SveFcmlav<uint16_t>(machInst, 2770 zda, zn, zm, pg, rot); 2771 case 2: 2772 return new SveFcmlav<uint32_t>(machInst, 2773 zda, zn, zm, pg, rot); 2774 case 3: 2775 return new SveFcmlav<uint64_t>(machInst, 2776 zda, zn, zm, pg, rot); 2777 } 2778 2779 return new Unknown64(machInst); 2780 } // decodeSveFpCplxMulAddVec 2781 2782 StaticInstPtr 2783 decodeSveFpCplxMulAddIndexed(ExtMachInst machInst) 2784 { 2785 uint8_t size = bits(machInst, 23, 22); 2786 if (size < 2) { 2787 return new Unknown64(machInst); 2788 } 2789 2790 IntRegIndex zda = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 2791 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 2792 IntRegIndex zm; 2793 uint8_t rot = bits(machInst, 11, 10); 2794 uint8_t imm; 2795 2796 switch (size) { 2797 case 2: 2798 zm = (IntRegIndex) (uint8_t) bits(machInst, 18, 16); 2799 imm = bits(machInst, 20, 19); 2800 return new SveFcmlai<uint32_t>(machInst, 2801 zda, zn, zm, rot, imm); 2802 case 3: 2803 zm = (IntRegIndex) (uint8_t) bits(machInst, 19, 16); 2804 imm = bits(machInst, 20); 2805 return new SveFcmlai<uint64_t>(machInst, 2806 zda, zn, zm, rot, imm); 2807 } 2808 return new Unknown64(machInst); 2809 } // decodeSveFpCplxMulAddIndexed 2810 2811 StaticInstPtr 2812 decodeSveFpMulIndexed(ExtMachInst machInst) 2813 { 2814 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 2815 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 2816 2817 uint8_t size = bits(machInst, 23, 22); 2818 switch (size) { 2819 case 0x0: 2820 case 0x1: 2821 return new SveFmulIdx<uint16_t>( 2822 machInst, zd, zn, 2823 (IntRegIndex) (uint8_t) bits(machInst, 18, 16), 2824 bits(machInst, 20, 19) | (bits(machInst, 22) << 2)); 2825 case 0x2: 2826 return new SveFmulIdx<uint32_t>( 2827 machInst, zd, zn, 2828 (IntRegIndex) (uint8_t) bits(machInst, 18, 16), 2829 bits(machInst, 20, 19)); 2830 case 0x3: 2831 return new SveFmulIdx<uint64_t>( 2832 machInst, zd, zn, 2833 (IntRegIndex) (uint8_t) bits(machInst, 19, 16), 2834 bits(machInst, 20)); 2835 default: 2836 return new Unknown64(machInst); 2837 } 2838 2839 } // decodeSveFpMulIndexed 2840 2841 StaticInstPtr 2842 decodeSveFpMulAddIndexed(ExtMachInst machInst) 2843 { 2844 IntRegIndex zd = (IntRegIndex) (uint8_t) bits(machInst, 4, 0); 2845 IntRegIndex zn = (IntRegIndex) (uint8_t) bits(machInst, 9, 5); 2846 const uint8_t op = bits(machInst, 10); 2847 2848 uint8_t size = bits(machInst, 23, 22); 2849 switch (size) { 2850 case 0x0: 2851 case 0x1: 2852 if (op) { 2853 return new SveFmlsIdx<uint16_t>( 2854 machInst, zd, zn, 2855 (IntRegIndex) (uint8_t) bits(machInst, 18, 16), 2856 bits(machInst, 20, 19) | (bits(machInst, 22) << 2)); 2857 } else { 2858 return new SveFmlaIdx<uint16_t>( 2859 machInst, zd, zn, 2860 (IntRegIndex) (uint8_t) bits(machInst, 18, 16), 2861 bits(machInst, 20, 19) | (bits(machInst, 22) << 2)); 2862 } 2863 case 0x2: 2864 if (op) { 2865 return new SveFmlsIdx<uint32_t>( 2866 machInst, zd, zn, 2867 (IntRegIndex) (uint8_t) bits(machInst, 18, 16), 2868 bits(machInst, 20, 19)); 2869 } else { 2870 return new SveFmlaIdx<uint32_t>( 2871 machInst, zd, zn, 2872 (IntRegIndex) (uint8_t) bits(machInst, 18, 16), 2873 bits(machInst, 20, 19)); 2874 } 2875 case 0x3: 2876 if (op) { 2877 return new SveFmlsIdx<uint64_t>( 2878 machInst, zd, zn, 2879 (IntRegIndex) (uint8_t) bits(machInst, 19, 16), 2880 bits(machInst, 20)); 2881 } else { 2882 return new SveFmlaIdx<uint64_t>( 2883 machInst, zd, zn, 2884 (IntRegIndex) (uint8_t) bits(machInst, 19, 16), 2885 bits(machInst, 20)); 2886 } 2887 default: 2888 return new Unknown64(machInst); 2889 } 2890 } // decodeSveFpMulAddIndexed 2891 2892 StaticInstPtr 2893 decodeSveMemGather32(ExtMachInst machInst) 2894 { 2895 return new Unknown64(machInst); 2896 } // decodeSveMemGather32 2897 2898 StaticInstPtr 2899 decodeSveMemContigLoad(ExtMachInst machInst) 2900 { 2901 return new Unknown64(machInst); 2902 } // decodeSveMemContigLoad 2903 2904 StaticInstPtr 2905 decodeSveMemGather64(ExtMachInst machInst) 2906 { 2907 return new Unknown64(machInst); 2908 } // decodeSveMemGather64 2909 2910 StaticInstPtr 2911 decodeSveMemStore(ExtMachInst machInst) 2912 { 2913 return new Unknown64(machInst); 2914 } // decodeSveMemStore 2915 2916} // namespace Aarch64 2917}}; 2918