sve_top_level.isa revision 13759:9941fca869a9
1// Copyright (c) 2017-2019 ARM Limited
2// All rights reserved
3//
4// The license below extends only to copyright in the software and shall
5// not be construed as granting a license to any other intellectual
6// property including but not limited to intellectual property relating
7// to a hardware implementation of the functionality of the software
8// licensed hereunder.  You may use the software subject to the license
9// terms below provided that you ensure that this notice is replicated
10// unmodified and in its entirety in all distributions of the software,
11// modified or unmodified, in source code or in binary form.
12//
13// Redistribution and use in source and binary forms, with or without
14// modification, are permitted provided that the following conditions are
15// met: redistributions of source code must retain the above copyright
16// notice, this list of conditions and the following disclaimer;
17// redistributions in binary form must reproduce the above copyright
18// notice, this list of conditions and the following disclaimer in the
19// documentation and/or other materials provided with the distribution;
20// neither the name of the copyright holders nor the names of its
21// contributors may be used to endorse or promote products derived from
22// this software without specific prior written permission.
23//
24// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
27// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
28// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
29// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
30// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
31// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
32// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
33// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
34// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35//
36// Authors: Giacomo Gabrielli
37
38/// @file
39/// SVE top-level decoder.
40
41output header {{
42namespace Aarch64
43{
44    StaticInstPtr decodeSveIntArithBinPred(ExtMachInst machInst);
45    StaticInstPtr decodeSveIntReduc(ExtMachInst machInst);
46    StaticInstPtr decodeSveShiftByImmPred(ExtMachInst machInst);
47    StaticInstPtr decodeSveIntArithUnaryPred(ExtMachInst machInst);
48    StaticInstPtr decodeSveIntMulAdd(ExtMachInst machInst);
49    StaticInstPtr decodeSveIntArithUnpred(ExtMachInst machInst);
50    StaticInstPtr decodeSveIntLogUnpred(ExtMachInst machInst);
51    StaticInstPtr decodeSveIndexGen(ExtMachInst machInst);
52    StaticInstPtr decodeSveStackAlloc(ExtMachInst machInst);
53    StaticInstPtr decodeSveShiftByImmUnpred(ExtMachInst machInst);
54    StaticInstPtr decodeSveCompVecAddr(ExtMachInst machInst);
55    StaticInstPtr decodeSveIntMiscUnpred(ExtMachInst machInst);
56    StaticInstPtr decodeSveElemCount(ExtMachInst machInst);
57    StaticInstPtr decodeSveLogMaskImm(ExtMachInst machInst);
58    StaticInstPtr decodeSveIntWideImmPred(ExtMachInst machInst);
59    StaticInstPtr decodeSvePermExtract(ExtMachInst machInst);
60    StaticInstPtr decodeSvePermUnpred(ExtMachInst machInst);
61    StaticInstPtr decodeSvePermPredicates(ExtMachInst machInst);
62    StaticInstPtr decodeSvePermIntlv(ExtMachInst machInst);
63    StaticInstPtr decodeSvePermPred(ExtMachInst machInst);
64    StaticInstPtr decodeSveSelVec(ExtMachInst machInst);
65    StaticInstPtr decodeSveIntCmpVec(ExtMachInst machInst);
66    StaticInstPtr decodeSveIntCmpUImm(ExtMachInst machInst);
67    StaticInstPtr decodeSveIntCmpSImm(ExtMachInst machInst);
68    StaticInstPtr decodeSvePredGen(ExtMachInst machInst);
69    StaticInstPtr decodeSvePredCount(ExtMachInst machInst);
70    StaticInstPtr decodeSveIntCmpSca(ExtMachInst machInst);
71    StaticInstPtr decodeSveIntWideImmUnpred(ExtMachInst machInst);
72
73    StaticInstPtr decodeSveMultiplyAddUnpred(ExtMachInst machInst);
74    StaticInstPtr decodeSveMultiplyIndexed(ExtMachInst machInst);
75
76    StaticInstPtr decodeSveFpFastReduc(ExtMachInst machInst);
77    StaticInstPtr decodeSveFpUnaryUnpred(ExtMachInst machInst);
78    StaticInstPtr decodeSveFpCmpZero(ExtMachInst machInst);
79    StaticInstPtr decodeSveFpAccumReduc(ExtMachInst machInst);
80    StaticInstPtr decodeSveFpArithUnpred(ExtMachInst machInst);
81    StaticInstPtr decodeSveFpArithPred(ExtMachInst machInst);
82    StaticInstPtr decodeSveFpUnaryPred(ExtMachInst machInst);
83    StaticInstPtr decodeSveFpCmpVec(ExtMachInst machInst);
84    StaticInstPtr decodeSveFpFusedMulAdd(ExtMachInst machInst);
85    StaticInstPtr decodeSveFpCplxAdd(ExtMachInst machInst);
86    StaticInstPtr decodeSveFpCplxMulAddVec(ExtMachInst machInst);
87    StaticInstPtr decodeSveFpMulAddIndexed(ExtMachInst machInst);
88    StaticInstPtr decodeSveFpCplxMulAddIndexed(ExtMachInst machInst);
89    StaticInstPtr decodeSveFpMulIndexed(ExtMachInst machInst);
90
91    StaticInstPtr decodeSveMemGather32(ExtMachInst machInst);
92    StaticInstPtr decodeSveMemContigLoad(ExtMachInst machInst);
93    StaticInstPtr decodeSveMemGather64(ExtMachInst machInst);
94    StaticInstPtr decodeSveMemStore(ExtMachInst machInst);
95}
96}};
97
98output decoder {{
99namespace Aarch64
100{
101
102    StaticInstPtr
103    decodeSveInt(ExtMachInst machInst)
104    {
105        uint8_t b_29_24_21 = (bits(machInst, 29) << 2) |
106                             (bits(machInst, 24) << 1) |
107                             bits(machInst, 21);
108        switch (b_29_24_21) {
109          case 0x0:
110            {
111                if (bits(machInst, 14)) {
112                    return decodeSveIntMulAdd(machInst);
113                } else {
114                    uint8_t b_15_13 = (bits(machInst, 15) << 1) |
115                                      bits(machInst, 13);
116                    switch (b_15_13) {
117                      case 0x0:
118                        if (bits(machInst, 30)) {
119                            return decodeSveMultiplyAddUnpred(machInst);
120                        } else {
121                            return decodeSveIntArithBinPred(machInst);
122                        }
123                      case 0x1:
124                        return decodeSveIntReduc(machInst);
125                      case 0x2:
126                        return decodeSveShiftByImmPred(machInst);
127                      case 0x3:
128                        return decodeSveIntArithUnaryPred(machInst);
129                    }
130                }
131            }
132          case 0x1:
133            {
134                uint8_t b_15_14 = bits(machInst, 15, 14);
135                uint8_t b_13 = bits(machInst, 13);
136                uint8_t b_12 = bits(machInst, 12);
137                switch (b_15_14) {
138                  case 0x0:
139                    if (b_13) {
140                        return decodeSveIntLogUnpred(machInst);
141                    } else {
142                        if (bits(machInst, 30)) {
143                            return decodeSveMultiplyIndexed(machInst);
144                        } else {
145                            return decodeSveIntArithUnpred(machInst);
146                        }
147                    }
148                  case 0x1:
149                    if (b_13) {
150                        return new Unknown64(machInst);
151                    } else if (b_12) {
152                        return decodeSveStackAlloc(machInst);
153                    } else {
154                        return decodeSveIndexGen(machInst);
155                    }
156                  case 0x2:
157                    if (b_13) {
158                        if (b_12) {
159                            return decodeSveIntMiscUnpred(machInst);
160                        } else {
161                            return decodeSveCompVecAddr(machInst);
162                        }
163                    } else {
164                        return decodeSveShiftByImmUnpred(machInst);
165                    }
166                  case 0x3:
167                    return decodeSveElemCount(machInst);
168                }
169            }
170          case 0x2:
171            if (bits(machInst, 20)) {
172                return decodeSveIntWideImmPred(machInst);
173            } else {
174                return decodeSveLogMaskImm(machInst);
175            }
176          case 0x3:
177            {
178                uint8_t b_15_14 = bits(machInst, 15, 14);
179                uint8_t b_13 = bits(machInst, 13);
180                switch (b_15_14) {
181                  case 0x0:
182                    if (b_13) {
183                        return decodeSvePermUnpred(machInst);
184                    } else {
185                        return decodeSvePermExtract(machInst);
186                    }
187                  case 0x1:
188                    if (b_13) {
189                        return decodeSvePermIntlv(machInst);
190                    } else {
191                        return decodeSvePermPredicates(machInst);
192                    }
193                  case 0x2:
194                    return decodeSvePermPred(machInst);
195                  case 0x3:
196                    return decodeSveSelVec(machInst);
197                }
198            }
199          case 0x4:
200            return decodeSveIntCmpVec(machInst);
201          case 0x5:
202            return decodeSveIntCmpUImm(machInst);
203          case 0x6:
204            if (bits(machInst, 14)) {
205                return decodeSvePredGen(machInst);
206            } else {
207                return decodeSveIntCmpSImm(machInst);
208            }
209          case 0x7:
210            {
211                uint8_t b_15_14 = bits(machInst, 15, 14);
212                switch (b_15_14) {
213                  case 0x0:
214                    return decodeSveIntCmpSca(machInst);
215                  case 0x1:
216                    return new Unknown64(machInst);
217                  case 0x2:
218                    return decodeSvePredCount(machInst);
219                  case 0x3:
220                    return decodeSveIntWideImmUnpred(machInst);
221                }
222            }
223        }
224        return new Unknown64(machInst);
225    }
226
227    StaticInstPtr
228    decodeSveFp(ExtMachInst machInst)
229    {
230        uint8_t b_24_21 = (bits(machInst, 24) << 1) |
231                          bits(machInst, 21);
232        switch (b_24_21) {
233          case 0x0:
234            if (!bits(machInst, 15)) {
235                return decodeSveFpCplxMulAddVec(machInst);
236            } else if((bits(machInst, 20, 17) | bits(machInst, 14, 13)) == 0) {
237                return decodeSveFpCplxAdd(machInst);
238            }
239            return new Unknown64(machInst);
240          case 0x1:
241            if (bits(machInst, 15, 12) == 1) {
242                return decodeSveFpCplxMulAddIndexed(machInst);
243            }
244            switch (bits(machInst, 13, 11)) {
245              case 0:
246                return decodeSveFpMulAddIndexed(machInst);
247              case 4:
248                if (!bits(machInst, 10))
249                    return decodeSveFpMulIndexed(machInst);
250                M5_FALLTHROUGH;
251              default:
252                return new Unknown64(machInst);
253            }
254          case 0x2:
255            {
256                if (bits(machInst, 14)) {
257                    return decodeSveFpCmpVec(machInst);
258                } else {
259                    uint8_t b_15_13 = (bits(machInst, 15) << 1) |
260                                      bits(machInst, 13);
261                    switch (b_15_13) {
262                      case 0x0:
263                        return decodeSveFpArithUnpred(machInst);
264                      case 0x1:
265                        {
266                            uint8_t b_20_19 = (bits(machInst, 20) << 1) |
267                                              bits(machInst, 19);
268                            switch (b_20_19) {
269                              case 0x0:
270                                  return decodeSveFpFastReduc(machInst);
271                              case 0x1:
272                                  if (bits(machInst, 12)) {
273                                      return decodeSveFpUnaryUnpred(machInst);
274                                  } else {
275                                      return new Unknown64(machInst);
276                                  }
277                              case 0x2:
278                                  return decodeSveFpCmpZero(machInst);
279                              case 0x3:
280                                  return decodeSveFpAccumReduc(machInst);
281                            }
282                        }
283                      case 0x2:
284                        return decodeSveFpArithPred(machInst);
285                      case 0x3:
286                        return decodeSveFpUnaryPred(machInst);
287                    }
288                }
289            }
290          case 0x3:
291            return decodeSveFpFusedMulAdd(machInst);
292        }
293        return new Unknown64(machInst);
294    }
295
296    StaticInstPtr
297    decodeSveMem(ExtMachInst machInst)
298    {
299        uint8_t b_30_29 = bits(machInst, 30, 29);
300        switch (b_30_29) {
301          case 0x0:
302            return decodeSveMemGather32(machInst);
303          case 0x1:
304            return decodeSveMemContigLoad(machInst);
305          case 0x2:
306            return decodeSveMemGather64(machInst);
307          case 0x3:
308            return decodeSveMemStore(machInst);
309        }
310        return new Unknown64(machInst);
311    }
312
313}  // namespace Aarch64
314}};
315