1// Copyright (c) 2017-2019 ARM Limited 2// All rights reserved 3// 4// The license below extends only to copyright in the software and shall 5// not be construed as granting a license to any other intellectual 6// property including but not limited to intellectual property relating 7// to a hardware implementation of the functionality of the software 8// licensed hereunder. You may use the software subject to the license 9// terms below provided that you ensure that this notice is replicated 10// unmodified and in its entirety in all distributions of the software, 11// modified or unmodified, in source code or in binary form. 12// 13// Redistribution and use in source and binary forms, with or without 14// modification, are permitted provided that the following conditions are 15// met: redistributions of source code must retain the above copyright 16// notice, this list of conditions and the following disclaimer; 17// redistributions in binary form must reproduce the above copyright 18// notice, this list of conditions and the following disclaimer in the 19// documentation and/or other materials provided with the distribution; 20// neither the name of the copyright holders nor the names of its 21// contributors may be used to endorse or promote products derived from 22// this software without specific prior written permission. 23// 24// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 27// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 28// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 29// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 30// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 31// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 32// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 33// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 34// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35// 36// Authors: Giacomo Gabrielli 37 38/// @file 39/// SVE top-level decoder. 40 41output header {{ 42namespace Aarch64 43{ 44 StaticInstPtr decodeSveIntArithBinPred(ExtMachInst machInst); 45 StaticInstPtr decodeSveIntReduc(ExtMachInst machInst); 46 StaticInstPtr decodeSveShiftByImmPred(ExtMachInst machInst); 47 StaticInstPtr decodeSveIntArithUnaryPred(ExtMachInst machInst); 48 StaticInstPtr decodeSveIntMulAdd(ExtMachInst machInst); 49 StaticInstPtr decodeSveIntArithUnpred(ExtMachInst machInst); 50 StaticInstPtr decodeSveIntLogUnpred(ExtMachInst machInst); 51 StaticInstPtr decodeSveIndexGen(ExtMachInst machInst); 52 StaticInstPtr decodeSveStackAlloc(ExtMachInst machInst); 53 StaticInstPtr decodeSveShiftByImmUnpred(ExtMachInst machInst); 54 StaticInstPtr decodeSveCompVecAddr(ExtMachInst machInst); 55 StaticInstPtr decodeSveIntMiscUnpred(ExtMachInst machInst); 56 StaticInstPtr decodeSveElemCount(ExtMachInst machInst); 57 StaticInstPtr decodeSveLogMaskImm(ExtMachInst machInst); 58 StaticInstPtr decodeSveIntWideImmPred(ExtMachInst machInst); 59 StaticInstPtr decodeSvePermExtract(ExtMachInst machInst); 60 StaticInstPtr decodeSvePermUnpred(ExtMachInst machInst); 61 StaticInstPtr decodeSvePermPredicates(ExtMachInst machInst); 62 StaticInstPtr decodeSvePermIntlv(ExtMachInst machInst); 63 StaticInstPtr decodeSvePermPred(ExtMachInst machInst); 64 StaticInstPtr decodeSveSelVec(ExtMachInst machInst); 65 StaticInstPtr decodeSveIntCmpVec(ExtMachInst machInst); 66 StaticInstPtr decodeSveIntCmpUImm(ExtMachInst machInst); 67 StaticInstPtr decodeSveIntCmpSImm(ExtMachInst machInst); 68 StaticInstPtr decodeSvePredGen(ExtMachInst machInst); 69 StaticInstPtr decodeSvePredCount(ExtMachInst machInst); 70 StaticInstPtr decodeSveIntCmpSca(ExtMachInst machInst); 71 StaticInstPtr decodeSveIntWideImmUnpred(ExtMachInst machInst); 72 73 StaticInstPtr decodeSveMultiplyAddUnpred(ExtMachInst machInst); 74 StaticInstPtr decodeSveMultiplyIndexed(ExtMachInst machInst); 75 76 StaticInstPtr decodeSveFpFastReduc(ExtMachInst machInst); 77 StaticInstPtr decodeSveFpUnaryUnpred(ExtMachInst machInst); 78 StaticInstPtr decodeSveFpCmpZero(ExtMachInst machInst); 79 StaticInstPtr decodeSveFpAccumReduc(ExtMachInst machInst); 80 StaticInstPtr decodeSveFpArithUnpred(ExtMachInst machInst); 81 StaticInstPtr decodeSveFpArithPred(ExtMachInst machInst); 82 StaticInstPtr decodeSveFpUnaryPred(ExtMachInst machInst); 83 StaticInstPtr decodeSveFpCmpVec(ExtMachInst machInst); 84 StaticInstPtr decodeSveFpFusedMulAdd(ExtMachInst machInst); 85 StaticInstPtr decodeSveFpCplxAdd(ExtMachInst machInst); 86 StaticInstPtr decodeSveFpCplxMulAddVec(ExtMachInst machInst); 87 StaticInstPtr decodeSveFpMulAddIndexed(ExtMachInst machInst); 88 StaticInstPtr decodeSveFpCplxMulAddIndexed(ExtMachInst machInst); 89 StaticInstPtr decodeSveFpMulIndexed(ExtMachInst machInst); 90 91 StaticInstPtr decodeSveMemGather32(ExtMachInst machInst); 92 StaticInstPtr decodeSveMemContigLoad(ExtMachInst machInst); 93 StaticInstPtr decodeSveMemGather64(ExtMachInst machInst); 94 StaticInstPtr decodeSveMemStore(ExtMachInst machInst); 95} 96}}; 97 98output decoder {{ 99namespace Aarch64 100{ 101 102 StaticInstPtr 103 decodeSveInt(ExtMachInst machInst) 104 { 105 uint8_t b_29_24_21 = (bits(machInst, 29) << 2) | 106 (bits(machInst, 24) << 1) | 107 bits(machInst, 21); 108 switch (b_29_24_21) { 109 case 0x0: 110 { 111 if (bits(machInst, 14)) { 112 return decodeSveIntMulAdd(machInst); 113 } else { 114 uint8_t b_15_13 = (bits(machInst, 15) << 1) | 115 bits(machInst, 13); 116 switch (b_15_13) { 117 case 0x0: 118 if (bits(machInst, 30)) { 119 return decodeSveMultiplyAddUnpred(machInst); 120 } else { 121 return decodeSveIntArithBinPred(machInst); 122 } 123 case 0x1: 124 return decodeSveIntReduc(machInst); 125 case 0x2: 126 return decodeSveShiftByImmPred(machInst); 127 case 0x3: 128 return decodeSveIntArithUnaryPred(machInst); 129 } 130 } 131 break; 132 } 133 case 0x1: 134 { 135 uint8_t b_15_14 = bits(machInst, 15, 14); 136 uint8_t b_13 = bits(machInst, 13); 137 uint8_t b_12 = bits(machInst, 12); 138 switch (b_15_14) { 139 case 0x0: 140 if (b_13) { 141 return decodeSveIntLogUnpred(machInst); 142 } else { 143 if (bits(machInst, 30)) { 144 return decodeSveMultiplyIndexed(machInst); 145 } else { 146 return decodeSveIntArithUnpred(machInst); 147 } 148 } 149 case 0x1: 150 if (b_13) { 151 return new Unknown64(machInst); 152 } else if (b_12) { 153 return decodeSveStackAlloc(machInst); 154 } else { 155 return decodeSveIndexGen(machInst); 156 } 157 case 0x2: 158 if (b_13) { 159 if (b_12) { 160 return decodeSveIntMiscUnpred(machInst); 161 } else { 162 return decodeSveCompVecAddr(machInst); 163 } 164 } else { 165 return decodeSveShiftByImmUnpred(machInst); 166 } 167 case 0x3: 168 return decodeSveElemCount(machInst); 169 } 170 break; 171 } 172 case 0x2: 173 if (bits(machInst, 20)) { 174 return decodeSveIntWideImmPred(machInst); 175 } else { 176 return decodeSveLogMaskImm(machInst); 177 } 178 case 0x3: 179 { 180 uint8_t b_15_14 = bits(machInst, 15, 14); 181 uint8_t b_13 = bits(machInst, 13); 182 switch (b_15_14) { 183 case 0x0: 184 if (b_13) { 185 return decodeSvePermUnpred(machInst); 186 } else { 187 return decodeSvePermExtract(machInst); 188 } 189 case 0x1: 190 if (b_13) { 191 return decodeSvePermIntlv(machInst); 192 } else { 193 return decodeSvePermPredicates(machInst); 194 } 195 case 0x2: 196 return decodeSvePermPred(machInst); 197 case 0x3: 198 return decodeSveSelVec(machInst); 199 } 200 break; 201 } 202 case 0x4: 203 return decodeSveIntCmpVec(machInst); 204 case 0x5: 205 return decodeSveIntCmpUImm(machInst); 206 case 0x6: 207 if (bits(machInst, 14)) { 208 return decodeSvePredGen(machInst); 209 } else { 210 return decodeSveIntCmpSImm(machInst); 211 } 212 case 0x7: 213 { 214 uint8_t b_15_14 = bits(machInst, 15, 14); 215 switch (b_15_14) { 216 case 0x0: 217 return decodeSveIntCmpSca(machInst); 218 case 0x1: 219 return new Unknown64(machInst); 220 case 0x2: 221 return decodeSvePredCount(machInst); 222 case 0x3: 223 return decodeSveIntWideImmUnpred(machInst); 224 } 225 } 226 } 227 return new Unknown64(machInst); 228 } 229 230 StaticInstPtr 231 decodeSveFp(ExtMachInst machInst) 232 { 233 uint8_t b_24_21 = (bits(machInst, 24) << 1) | 234 bits(machInst, 21); 235 switch (b_24_21) { 236 case 0x0: 237 if (!bits(machInst, 15)) { 238 return decodeSveFpCplxMulAddVec(machInst); 239 } else if((bits(machInst, 20, 17) | bits(machInst, 14, 13)) == 0) { 240 return decodeSveFpCplxAdd(machInst); 241 } 242 return new Unknown64(machInst); 243 case 0x1: 244 if (bits(machInst, 15, 12) == 1) { 245 return decodeSveFpCplxMulAddIndexed(machInst); 246 } 247 switch (bits(machInst, 13, 11)) { 248 case 0: 249 return decodeSveFpMulAddIndexed(machInst); 250 case 4: 251 if (!bits(machInst, 10)) 252 return decodeSveFpMulIndexed(machInst); 253 M5_FALLTHROUGH; 254 default: 255 return new Unknown64(machInst); 256 } 257 case 0x2: 258 { 259 if (bits(machInst, 14)) { 260 return decodeSveFpCmpVec(machInst); 261 } else { 262 uint8_t b_15_13 = (bits(machInst, 15) << 1) | 263 bits(machInst, 13); 264 switch (b_15_13) { 265 case 0x0: 266 return decodeSveFpArithUnpred(machInst); 267 case 0x1: 268 { 269 uint8_t b_20_19 = (bits(machInst, 20) << 1) | 270 bits(machInst, 19); 271 switch (b_20_19) { 272 case 0x0: 273 return decodeSveFpFastReduc(machInst); 274 case 0x1: 275 if (bits(machInst, 12)) { 276 return decodeSveFpUnaryUnpred(machInst); 277 } else { 278 return new Unknown64(machInst); 279 } 280 case 0x2: 281 return decodeSveFpCmpZero(machInst); 282 case 0x3: 283 return decodeSveFpAccumReduc(machInst); 284 } 285 break; 286 } 287 case 0x2: 288 return decodeSveFpArithPred(machInst); 289 case 0x3: 290 return decodeSveFpUnaryPred(machInst); 291 } 292 } 293 break; 294 } 295 case 0x3: 296 return decodeSveFpFusedMulAdd(machInst); 297 } 298 return new Unknown64(machInst); 299 } 300 301 StaticInstPtr 302 decodeSveMem(ExtMachInst machInst) 303 { 304 uint8_t b_30_29 = bits(machInst, 30, 29); 305 switch (b_30_29) { 306 case 0x0: 307 return decodeSveMemGather32(machInst); 308 case 0x1: 309 return decodeSveMemContigLoad(machInst); 310 case 0x2: 311 return decodeSveMemGather64(machInst); 312 case 0x3: 313 return decodeSveMemStore(machInst); 314 } 315 return new Unknown64(machInst); 316 } 317 318} // namespace Aarch64 319}}; 320