macromem.cc (9250:dab0f29394f0) macromem.cc (9368:3cd40209af8d)
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2007-2008 The Florida State University
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Stephen Hines
41 */
42
43#include <sstream>
44
45#include "arch/arm/insts/macromem.hh"
46#include "arch/arm/generated/decoder.hh"
47
48using namespace std;
49using namespace ArmISAInst;
50
51namespace ArmISA
52{
53
54MacroMemOp::MacroMemOp(const char *mnem, ExtMachInst machInst,
55 OpClass __opClass, IntRegIndex rn,
56 bool index, bool up, bool user, bool writeback,
57 bool load, uint32_t reglist) :
58 PredMacroOp(mnem, machInst, __opClass)
59{
60 uint32_t regs = reglist;
61 uint32_t ones = number_of_ones(reglist);
62 // Remember that writeback adds a uop or two and the temp register adds one
63 numMicroops = ones + (writeback ? (load ? 2 : 1) : 0) + 1;
64
65 // It's technically legal to do a lot of nothing
66 if (!ones)
67 numMicroops = 1;
68
69 microOps = new StaticInstPtr[numMicroops];
70 uint32_t addr = 0;
71
72 if (!up)
73 addr = (ones << 2) - 4;
74
75 if (!index)
76 addr += 4;
77
78 StaticInstPtr *uop = microOps;
79
80 // Add 0 to Rn and stick it in ureg0.
81 // This is equivalent to a move.
82 *uop = new MicroAddiUop(machInst, INTREG_UREG0, rn, 0);
83
84 unsigned reg = 0;
85 unsigned regIdx = 0;
86 bool force_user = user & !bits(reglist, 15);
87 bool exception_ret = user & bits(reglist, 15);
88
89 for (int i = 0; i < ones; i++) {
90 // Find the next register.
91 while (!bits(regs, reg))
92 reg++;
93 replaceBits(regs, reg, 0);
94
95 regIdx = reg;
96 if (force_user) {
97 regIdx = intRegInMode(MODE_USER, regIdx);
98 }
99
100 if (load) {
101 if (writeback && i == ones - 1) {
102 // If it's a writeback and this is the last register
103 // do the load into a temporary register which we'll move
104 // into the final one later
105 *++uop = new MicroLdrUop(machInst, INTREG_UREG1, INTREG_UREG0,
106 up, addr);
107 } else {
108 // Otherwise just do it normally
109 if (reg == INTREG_PC && exception_ret) {
110 // This must be the exception return form of ldm.
111 *++uop = new MicroLdrRetUop(machInst, regIdx,
112 INTREG_UREG0, up, addr);
113 } else {
114 *++uop = new MicroLdrUop(machInst, regIdx,
115 INTREG_UREG0, up, addr);
116 if (reg == INTREG_PC) {
117 (*uop)->setFlag(StaticInst::IsControl);
118 if (!(condCode == COND_AL || condCode == COND_UC))
119 (*uop)->setFlag(StaticInst::IsCondControl);
120 else
121 (*uop)->setFlag(StaticInst::IsUncondControl);
122 (*uop)->setFlag(StaticInst::IsIndirectControl);
123 }
124 }
125 }
126 } else {
127 *++uop = new MicroStrUop(machInst, regIdx, INTREG_UREG0, up, addr);
128 }
129
130 if (up)
131 addr += 4;
132 else
133 addr -= 4;
134 }
135
136 if (writeback && ones) {
137 // put the register update after we're done all loading
138 if (up)
139 *++uop = new MicroAddiUop(machInst, rn, rn, ones * 4);
140 else
141 *++uop = new MicroSubiUop(machInst, rn, rn, ones * 4);
142
143 // If this was a load move the last temporary value into place
144 // this way we can't take an exception after we update the base
145 // register.
146 if (load && reg == INTREG_PC && exception_ret) {
147 *++uop = new MicroUopRegMovRet(machInst, 0, INTREG_UREG1);
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2007-2008 The Florida State University
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Stephen Hines
41 */
42
43#include <sstream>
44
45#include "arch/arm/insts/macromem.hh"
46#include "arch/arm/generated/decoder.hh"
47
48using namespace std;
49using namespace ArmISAInst;
50
51namespace ArmISA
52{
53
54MacroMemOp::MacroMemOp(const char *mnem, ExtMachInst machInst,
55 OpClass __opClass, IntRegIndex rn,
56 bool index, bool up, bool user, bool writeback,
57 bool load, uint32_t reglist) :
58 PredMacroOp(mnem, machInst, __opClass)
59{
60 uint32_t regs = reglist;
61 uint32_t ones = number_of_ones(reglist);
62 // Remember that writeback adds a uop or two and the temp register adds one
63 numMicroops = ones + (writeback ? (load ? 2 : 1) : 0) + 1;
64
65 // It's technically legal to do a lot of nothing
66 if (!ones)
67 numMicroops = 1;
68
69 microOps = new StaticInstPtr[numMicroops];
70 uint32_t addr = 0;
71
72 if (!up)
73 addr = (ones << 2) - 4;
74
75 if (!index)
76 addr += 4;
77
78 StaticInstPtr *uop = microOps;
79
80 // Add 0 to Rn and stick it in ureg0.
81 // This is equivalent to a move.
82 *uop = new MicroAddiUop(machInst, INTREG_UREG0, rn, 0);
83
84 unsigned reg = 0;
85 unsigned regIdx = 0;
86 bool force_user = user & !bits(reglist, 15);
87 bool exception_ret = user & bits(reglist, 15);
88
89 for (int i = 0; i < ones; i++) {
90 // Find the next register.
91 while (!bits(regs, reg))
92 reg++;
93 replaceBits(regs, reg, 0);
94
95 regIdx = reg;
96 if (force_user) {
97 regIdx = intRegInMode(MODE_USER, regIdx);
98 }
99
100 if (load) {
101 if (writeback && i == ones - 1) {
102 // If it's a writeback and this is the last register
103 // do the load into a temporary register which we'll move
104 // into the final one later
105 *++uop = new MicroLdrUop(machInst, INTREG_UREG1, INTREG_UREG0,
106 up, addr);
107 } else {
108 // Otherwise just do it normally
109 if (reg == INTREG_PC && exception_ret) {
110 // This must be the exception return form of ldm.
111 *++uop = new MicroLdrRetUop(machInst, regIdx,
112 INTREG_UREG0, up, addr);
113 } else {
114 *++uop = new MicroLdrUop(machInst, regIdx,
115 INTREG_UREG0, up, addr);
116 if (reg == INTREG_PC) {
117 (*uop)->setFlag(StaticInst::IsControl);
118 if (!(condCode == COND_AL || condCode == COND_UC))
119 (*uop)->setFlag(StaticInst::IsCondControl);
120 else
121 (*uop)->setFlag(StaticInst::IsUncondControl);
122 (*uop)->setFlag(StaticInst::IsIndirectControl);
123 }
124 }
125 }
126 } else {
127 *++uop = new MicroStrUop(machInst, regIdx, INTREG_UREG0, up, addr);
128 }
129
130 if (up)
131 addr += 4;
132 else
133 addr -= 4;
134 }
135
136 if (writeback && ones) {
137 // put the register update after we're done all loading
138 if (up)
139 *++uop = new MicroAddiUop(machInst, rn, rn, ones * 4);
140 else
141 *++uop = new MicroSubiUop(machInst, rn, rn, ones * 4);
142
143 // If this was a load move the last temporary value into place
144 // this way we can't take an exception after we update the base
145 // register.
146 if (load && reg == INTREG_PC && exception_ret) {
147 *++uop = new MicroUopRegMovRet(machInst, 0, INTREG_UREG1);
148 if (!(condCode == COND_AL || condCode == COND_UC))
149 (*uop)->setFlag(StaticInst::IsCondControl);
150 else
151 (*uop)->setFlag(StaticInst::IsUncondControl);
148 } else if (load) {
149 *++uop = new MicroUopRegMov(machInst, regIdx, INTREG_UREG1);
150 if (reg == INTREG_PC) {
151 (*uop)->setFlag(StaticInst::IsControl);
152 (*uop)->setFlag(StaticInst::IsCondControl);
153 (*uop)->setFlag(StaticInst::IsIndirectControl);
154 // This is created as a RAS POP
155 if (rn == INTREG_SP)
156 (*uop)->setFlag(StaticInst::IsReturn);
157
158 }
159 }
160 }
161
162 (*uop)->setLastMicroop();
163
164 for (StaticInstPtr *curUop = microOps;
165 !(*curUop)->isLastMicroop(); curUop++) {
166 MicroOp * uopPtr = dynamic_cast<MicroOp *>(curUop->get());
167 assert(uopPtr);
168 uopPtr->setDelayedCommit();
169 }
170}
171
172VldMultOp::VldMultOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
173 unsigned elems, RegIndex rn, RegIndex vd, unsigned regs,
174 unsigned inc, uint32_t size, uint32_t align, RegIndex rm) :
175 PredMacroOp(mnem, machInst, __opClass)
176{
177 assert(regs > 0 && regs <= 4);
178 assert(regs % elems == 0);
179
180 numMicroops = (regs > 2) ? 2 : 1;
181 bool wb = (rm != 15);
182 bool deinterleave = (elems > 1);
183
184 if (wb) numMicroops++;
185 if (deinterleave) numMicroops += (regs / elems);
186 microOps = new StaticInstPtr[numMicroops];
187
188 RegIndex rMid = deinterleave ? NumFloatArchRegs : vd * 2;
189
190 uint32_t noAlign = TLB::MustBeOne;
191
192 unsigned uopIdx = 0;
193 switch (regs) {
194 case 4:
195 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
196 size, machInst, rMid, rn, 0, align);
197 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
198 size, machInst, rMid + 4, rn, 16, noAlign);
199 break;
200 case 3:
201 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
202 size, machInst, rMid, rn, 0, align);
203 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon8Uop>(
204 size, machInst, rMid + 4, rn, 16, noAlign);
205 break;
206 case 2:
207 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
208 size, machInst, rMid, rn, 0, align);
209 break;
210 case 1:
211 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon8Uop>(
212 size, machInst, rMid, rn, 0, align);
213 break;
214 default:
215 // Unknown number of registers
216 microOps[uopIdx++] = new Unknown(machInst);
217 }
218 if (wb) {
219 if (rm != 15 && rm != 13) {
220 microOps[uopIdx++] =
221 new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
222 } else {
223 microOps[uopIdx++] =
224 new MicroAddiUop(machInst, rn, rn, regs * 8);
225 }
226 }
227 if (deinterleave) {
228 switch (elems) {
229 case 4:
230 assert(regs == 4);
231 microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon8Uop>(
232 size, machInst, vd * 2, rMid, inc * 2);
233 break;
234 case 3:
235 assert(regs == 3);
236 microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon6Uop>(
237 size, machInst, vd * 2, rMid, inc * 2);
238 break;
239 case 2:
240 assert(regs == 4 || regs == 2);
241 if (regs == 4) {
242 microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon4Uop>(
243 size, machInst, vd * 2, rMid, inc * 2);
244 microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon4Uop>(
245 size, machInst, vd * 2 + 2, rMid + 4, inc * 2);
246 } else {
247 microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon4Uop>(
248 size, machInst, vd * 2, rMid, inc * 2);
249 }
250 break;
251 default:
252 // Bad number of elements to deinterleave
253 microOps[uopIdx++] = new Unknown(machInst);
254 }
255 }
256 assert(uopIdx == numMicroops);
257
258 for (unsigned i = 0; i < numMicroops - 1; i++) {
259 MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
260 assert(uopPtr);
261 uopPtr->setDelayedCommit();
262 }
263 microOps[numMicroops - 1]->setLastMicroop();
264}
265
266VldSingleOp::VldSingleOp(const char *mnem, ExtMachInst machInst,
267 OpClass __opClass, bool all, unsigned elems,
268 RegIndex rn, RegIndex vd, unsigned regs,
269 unsigned inc, uint32_t size, uint32_t align,
270 RegIndex rm, unsigned lane) :
271 PredMacroOp(mnem, machInst, __opClass)
272{
273 assert(regs > 0 && regs <= 4);
274 assert(regs % elems == 0);
275
276 unsigned eBytes = (1 << size);
277 unsigned loadSize = eBytes * elems;
278 unsigned loadRegs M5_VAR_USED = (loadSize + sizeof(FloatRegBits) - 1) /
279 sizeof(FloatRegBits);
280
281 assert(loadRegs > 0 && loadRegs <= 4);
282
283 numMicroops = 1;
284 bool wb = (rm != 15);
285
286 if (wb) numMicroops++;
287 numMicroops += (regs / elems);
288 microOps = new StaticInstPtr[numMicroops];
289
290 RegIndex ufp0 = NumFloatArchRegs;
291
292 unsigned uopIdx = 0;
293 switch (loadSize) {
294 case 1:
295 microOps[uopIdx++] = new MicroLdrNeon1Uop<uint8_t>(
296 machInst, ufp0, rn, 0, align);
297 break;
298 case 2:
299 if (eBytes == 2) {
300 microOps[uopIdx++] = new MicroLdrNeon2Uop<uint16_t>(
301 machInst, ufp0, rn, 0, align);
302 } else {
303 microOps[uopIdx++] = new MicroLdrNeon2Uop<uint8_t>(
304 machInst, ufp0, rn, 0, align);
305 }
306 break;
307 case 3:
308 microOps[uopIdx++] = new MicroLdrNeon3Uop<uint8_t>(
309 machInst, ufp0, rn, 0, align);
310 break;
311 case 4:
312 switch (eBytes) {
313 case 1:
314 microOps[uopIdx++] = new MicroLdrNeon4Uop<uint8_t>(
315 machInst, ufp0, rn, 0, align);
316 break;
317 case 2:
318 microOps[uopIdx++] = new MicroLdrNeon4Uop<uint16_t>(
319 machInst, ufp0, rn, 0, align);
320 break;
321 case 4:
322 microOps[uopIdx++] = new MicroLdrNeon4Uop<uint32_t>(
323 machInst, ufp0, rn, 0, align);
324 break;
325 }
326 break;
327 case 6:
328 microOps[uopIdx++] = new MicroLdrNeon6Uop<uint16_t>(
329 machInst, ufp0, rn, 0, align);
330 break;
331 case 8:
332 switch (eBytes) {
333 case 2:
334 microOps[uopIdx++] = new MicroLdrNeon8Uop<uint16_t>(
335 machInst, ufp0, rn, 0, align);
336 break;
337 case 4:
338 microOps[uopIdx++] = new MicroLdrNeon8Uop<uint32_t>(
339 machInst, ufp0, rn, 0, align);
340 break;
341 }
342 break;
343 case 12:
344 microOps[uopIdx++] = new MicroLdrNeon12Uop<uint32_t>(
345 machInst, ufp0, rn, 0, align);
346 break;
347 case 16:
348 microOps[uopIdx++] = new MicroLdrNeon16Uop<uint32_t>(
349 machInst, ufp0, rn, 0, align);
350 break;
351 default:
352 // Unrecognized load size
353 microOps[uopIdx++] = new Unknown(machInst);
354 }
355 if (wb) {
356 if (rm != 15 && rm != 13) {
357 microOps[uopIdx++] =
358 new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
359 } else {
360 microOps[uopIdx++] =
361 new MicroAddiUop(machInst, rn, rn, loadSize);
362 }
363 }
364 switch (elems) {
365 case 4:
366 assert(regs == 4);
367 switch (size) {
368 case 0:
369 if (all) {
370 microOps[uopIdx++] = new MicroUnpackAllNeon2to8Uop<uint8_t>(
371 machInst, vd * 2, ufp0, inc * 2);
372 } else {
373 microOps[uopIdx++] = new MicroUnpackNeon2to8Uop<uint8_t>(
374 machInst, vd * 2, ufp0, inc * 2, lane);
375 }
376 break;
377 case 1:
378 if (all) {
379 microOps[uopIdx++] = new MicroUnpackAllNeon2to8Uop<uint16_t>(
380 machInst, vd * 2, ufp0, inc * 2);
381 } else {
382 microOps[uopIdx++] = new MicroUnpackNeon2to8Uop<uint16_t>(
383 machInst, vd * 2, ufp0, inc * 2, lane);
384 }
385 break;
386 case 2:
387 if (all) {
388 microOps[uopIdx++] = new MicroUnpackAllNeon4to8Uop<uint32_t>(
389 machInst, vd * 2, ufp0, inc * 2);
390 } else {
391 microOps[uopIdx++] = new MicroUnpackNeon4to8Uop<uint32_t>(
392 machInst, vd * 2, ufp0, inc * 2, lane);
393 }
394 break;
395 default:
396 // Bad size
397 microOps[uopIdx++] = new Unknown(machInst);
398 break;
399 }
400 break;
401 case 3:
402 assert(regs == 3);
403 switch (size) {
404 case 0:
405 if (all) {
406 microOps[uopIdx++] = new MicroUnpackAllNeon2to6Uop<uint8_t>(
407 machInst, vd * 2, ufp0, inc * 2);
408 } else {
409 microOps[uopIdx++] = new MicroUnpackNeon2to6Uop<uint8_t>(
410 machInst, vd * 2, ufp0, inc * 2, lane);
411 }
412 break;
413 case 1:
414 if (all) {
415 microOps[uopIdx++] = new MicroUnpackAllNeon2to6Uop<uint16_t>(
416 machInst, vd * 2, ufp0, inc * 2);
417 } else {
418 microOps[uopIdx++] = new MicroUnpackNeon2to6Uop<uint16_t>(
419 machInst, vd * 2, ufp0, inc * 2, lane);
420 }
421 break;
422 case 2:
423 if (all) {
424 microOps[uopIdx++] = new MicroUnpackAllNeon4to6Uop<uint32_t>(
425 machInst, vd * 2, ufp0, inc * 2);
426 } else {
427 microOps[uopIdx++] = new MicroUnpackNeon4to6Uop<uint32_t>(
428 machInst, vd * 2, ufp0, inc * 2, lane);
429 }
430 break;
431 default:
432 // Bad size
433 microOps[uopIdx++] = new Unknown(machInst);
434 break;
435 }
436 break;
437 case 2:
438 assert(regs == 2);
439 assert(loadRegs <= 2);
440 switch (size) {
441 case 0:
442 if (all) {
443 microOps[uopIdx++] = new MicroUnpackAllNeon2to4Uop<uint8_t>(
444 machInst, vd * 2, ufp0, inc * 2);
445 } else {
446 microOps[uopIdx++] = new MicroUnpackNeon2to4Uop<uint8_t>(
447 machInst, vd * 2, ufp0, inc * 2, lane);
448 }
449 break;
450 case 1:
451 if (all) {
452 microOps[uopIdx++] = new MicroUnpackAllNeon2to4Uop<uint16_t>(
453 machInst, vd * 2, ufp0, inc * 2);
454 } else {
455 microOps[uopIdx++] = new MicroUnpackNeon2to4Uop<uint16_t>(
456 machInst, vd * 2, ufp0, inc * 2, lane);
457 }
458 break;
459 case 2:
460 if (all) {
461 microOps[uopIdx++] = new MicroUnpackAllNeon2to4Uop<uint32_t>(
462 machInst, vd * 2, ufp0, inc * 2);
463 } else {
464 microOps[uopIdx++] = new MicroUnpackNeon2to4Uop<uint32_t>(
465 machInst, vd * 2, ufp0, inc * 2, lane);
466 }
467 break;
468 default:
469 // Bad size
470 microOps[uopIdx++] = new Unknown(machInst);
471 break;
472 }
473 break;
474 case 1:
475 assert(regs == 1 || (all && regs == 2));
476 assert(loadRegs <= 2);
477 for (unsigned offset = 0; offset < regs; offset++) {
478 switch (size) {
479 case 0:
480 if (all) {
481 microOps[uopIdx++] =
482 new MicroUnpackAllNeon2to2Uop<uint8_t>(
483 machInst, (vd + offset) * 2, ufp0, inc * 2);
484 } else {
485 microOps[uopIdx++] =
486 new MicroUnpackNeon2to2Uop<uint8_t>(
487 machInst, (vd + offset) * 2, ufp0, inc * 2, lane);
488 }
489 break;
490 case 1:
491 if (all) {
492 microOps[uopIdx++] =
493 new MicroUnpackAllNeon2to2Uop<uint16_t>(
494 machInst, (vd + offset) * 2, ufp0, inc * 2);
495 } else {
496 microOps[uopIdx++] =
497 new MicroUnpackNeon2to2Uop<uint16_t>(
498 machInst, (vd + offset) * 2, ufp0, inc * 2, lane);
499 }
500 break;
501 case 2:
502 if (all) {
503 microOps[uopIdx++] =
504 new MicroUnpackAllNeon2to2Uop<uint32_t>(
505 machInst, (vd + offset) * 2, ufp0, inc * 2);
506 } else {
507 microOps[uopIdx++] =
508 new MicroUnpackNeon2to2Uop<uint32_t>(
509 machInst, (vd + offset) * 2, ufp0, inc * 2, lane);
510 }
511 break;
512 default:
513 // Bad size
514 microOps[uopIdx++] = new Unknown(machInst);
515 break;
516 }
517 }
518 break;
519 default:
520 // Bad number of elements to unpack
521 microOps[uopIdx++] = new Unknown(machInst);
522 }
523 assert(uopIdx == numMicroops);
524
525 for (unsigned i = 0; i < numMicroops - 1; i++) {
526 MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
527 assert(uopPtr);
528 uopPtr->setDelayedCommit();
529 }
530 microOps[numMicroops - 1]->setLastMicroop();
531}
532
533VstMultOp::VstMultOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
534 unsigned elems, RegIndex rn, RegIndex vd, unsigned regs,
535 unsigned inc, uint32_t size, uint32_t align, RegIndex rm) :
536 PredMacroOp(mnem, machInst, __opClass)
537{
538 assert(regs > 0 && regs <= 4);
539 assert(regs % elems == 0);
540
541 numMicroops = (regs > 2) ? 2 : 1;
542 bool wb = (rm != 15);
543 bool interleave = (elems > 1);
544
545 if (wb) numMicroops++;
546 if (interleave) numMicroops += (regs / elems);
547 microOps = new StaticInstPtr[numMicroops];
548
549 uint32_t noAlign = TLB::MustBeOne;
550
551 RegIndex rMid = interleave ? NumFloatArchRegs : vd * 2;
552
553 unsigned uopIdx = 0;
554 if (interleave) {
555 switch (elems) {
556 case 4:
557 assert(regs == 4);
558 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon8Uop>(
559 size, machInst, rMid, vd * 2, inc * 2);
560 break;
561 case 3:
562 assert(regs == 3);
563 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon6Uop>(
564 size, machInst, rMid, vd * 2, inc * 2);
565 break;
566 case 2:
567 assert(regs == 4 || regs == 2);
568 if (regs == 4) {
569 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon4Uop>(
570 size, machInst, rMid, vd * 2, inc * 2);
571 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon4Uop>(
572 size, machInst, rMid + 4, vd * 2 + 2, inc * 2);
573 } else {
574 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon4Uop>(
575 size, machInst, rMid, vd * 2, inc * 2);
576 }
577 break;
578 default:
579 // Bad number of elements to interleave
580 microOps[uopIdx++] = new Unknown(machInst);
581 }
582 }
583 switch (regs) {
584 case 4:
585 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
586 size, machInst, rMid, rn, 0, align);
587 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
588 size, machInst, rMid + 4, rn, 16, noAlign);
589 break;
590 case 3:
591 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
592 size, machInst, rMid, rn, 0, align);
593 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon8Uop>(
594 size, machInst, rMid + 4, rn, 16, noAlign);
595 break;
596 case 2:
597 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
598 size, machInst, rMid, rn, 0, align);
599 break;
600 case 1:
601 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon8Uop>(
602 size, machInst, rMid, rn, 0, align);
603 break;
604 default:
605 // Unknown number of registers
606 microOps[uopIdx++] = new Unknown(machInst);
607 }
608 if (wb) {
609 if (rm != 15 && rm != 13) {
610 microOps[uopIdx++] =
611 new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
612 } else {
613 microOps[uopIdx++] =
614 new MicroAddiUop(machInst, rn, rn, regs * 8);
615 }
616 }
617 assert(uopIdx == numMicroops);
618
619 for (unsigned i = 0; i < numMicroops - 1; i++) {
620 MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
621 assert(uopPtr);
622 uopPtr->setDelayedCommit();
623 }
624 microOps[numMicroops - 1]->setLastMicroop();
625}
626
627VstSingleOp::VstSingleOp(const char *mnem, ExtMachInst machInst,
628 OpClass __opClass, bool all, unsigned elems,
629 RegIndex rn, RegIndex vd, unsigned regs,
630 unsigned inc, uint32_t size, uint32_t align,
631 RegIndex rm, unsigned lane) :
632 PredMacroOp(mnem, machInst, __opClass)
633{
634 assert(!all);
635 assert(regs > 0 && regs <= 4);
636 assert(regs % elems == 0);
637
638 unsigned eBytes = (1 << size);
639 unsigned storeSize = eBytes * elems;
640 unsigned storeRegs M5_VAR_USED = (storeSize + sizeof(FloatRegBits) - 1) /
641 sizeof(FloatRegBits);
642
643 assert(storeRegs > 0 && storeRegs <= 4);
644
645 numMicroops = 1;
646 bool wb = (rm != 15);
647
648 if (wb) numMicroops++;
649 numMicroops += (regs / elems);
650 microOps = new StaticInstPtr[numMicroops];
651
652 RegIndex ufp0 = NumFloatArchRegs;
653
654 unsigned uopIdx = 0;
655 switch (elems) {
656 case 4:
657 assert(regs == 4);
658 switch (size) {
659 case 0:
660 microOps[uopIdx++] = new MicroPackNeon8to2Uop<uint8_t>(
661 machInst, ufp0, vd * 2, inc * 2, lane);
662 break;
663 case 1:
664 microOps[uopIdx++] = new MicroPackNeon8to2Uop<uint16_t>(
665 machInst, ufp0, vd * 2, inc * 2, lane);
666 break;
667 case 2:
668 microOps[uopIdx++] = new MicroPackNeon8to4Uop<uint32_t>(
669 machInst, ufp0, vd * 2, inc * 2, lane);
670 break;
671 default:
672 // Bad size
673 microOps[uopIdx++] = new Unknown(machInst);
674 break;
675 }
676 break;
677 case 3:
678 assert(regs == 3);
679 switch (size) {
680 case 0:
681 microOps[uopIdx++] = new MicroPackNeon6to2Uop<uint8_t>(
682 machInst, ufp0, vd * 2, inc * 2, lane);
683 break;
684 case 1:
685 microOps[uopIdx++] = new MicroPackNeon6to2Uop<uint16_t>(
686 machInst, ufp0, vd * 2, inc * 2, lane);
687 break;
688 case 2:
689 microOps[uopIdx++] = new MicroPackNeon6to4Uop<uint32_t>(
690 machInst, ufp0, vd * 2, inc * 2, lane);
691 break;
692 default:
693 // Bad size
694 microOps[uopIdx++] = new Unknown(machInst);
695 break;
696 }
697 break;
698 case 2:
699 assert(regs == 2);
700 assert(storeRegs <= 2);
701 switch (size) {
702 case 0:
703 microOps[uopIdx++] = new MicroPackNeon4to2Uop<uint8_t>(
704 machInst, ufp0, vd * 2, inc * 2, lane);
705 break;
706 case 1:
707 microOps[uopIdx++] = new MicroPackNeon4to2Uop<uint16_t>(
708 machInst, ufp0, vd * 2, inc * 2, lane);
709 break;
710 case 2:
711 microOps[uopIdx++] = new MicroPackNeon4to2Uop<uint32_t>(
712 machInst, ufp0, vd * 2, inc * 2, lane);
713 break;
714 default:
715 // Bad size
716 microOps[uopIdx++] = new Unknown(machInst);
717 break;
718 }
719 break;
720 case 1:
721 assert(regs == 1 || (all && regs == 2));
722 assert(storeRegs <= 2);
723 for (unsigned offset = 0; offset < regs; offset++) {
724 switch (size) {
725 case 0:
726 microOps[uopIdx++] = new MicroPackNeon2to2Uop<uint8_t>(
727 machInst, ufp0, (vd + offset) * 2, inc * 2, lane);
728 break;
729 case 1:
730 microOps[uopIdx++] = new MicroPackNeon2to2Uop<uint16_t>(
731 machInst, ufp0, (vd + offset) * 2, inc * 2, lane);
732 break;
733 case 2:
734 microOps[uopIdx++] = new MicroPackNeon2to2Uop<uint32_t>(
735 machInst, ufp0, (vd + offset) * 2, inc * 2, lane);
736 break;
737 default:
738 // Bad size
739 microOps[uopIdx++] = new Unknown(machInst);
740 break;
741 }
742 }
743 break;
744 default:
745 // Bad number of elements to unpack
746 microOps[uopIdx++] = new Unknown(machInst);
747 }
748 switch (storeSize) {
749 case 1:
750 microOps[uopIdx++] = new MicroStrNeon1Uop<uint8_t>(
751 machInst, ufp0, rn, 0, align);
752 break;
753 case 2:
754 if (eBytes == 2) {
755 microOps[uopIdx++] = new MicroStrNeon2Uop<uint16_t>(
756 machInst, ufp0, rn, 0, align);
757 } else {
758 microOps[uopIdx++] = new MicroStrNeon2Uop<uint8_t>(
759 machInst, ufp0, rn, 0, align);
760 }
761 break;
762 case 3:
763 microOps[uopIdx++] = new MicroStrNeon3Uop<uint8_t>(
764 machInst, ufp0, rn, 0, align);
765 break;
766 case 4:
767 switch (eBytes) {
768 case 1:
769 microOps[uopIdx++] = new MicroStrNeon4Uop<uint8_t>(
770 machInst, ufp0, rn, 0, align);
771 break;
772 case 2:
773 microOps[uopIdx++] = new MicroStrNeon4Uop<uint16_t>(
774 machInst, ufp0, rn, 0, align);
775 break;
776 case 4:
777 microOps[uopIdx++] = new MicroStrNeon4Uop<uint32_t>(
778 machInst, ufp0, rn, 0, align);
779 break;
780 }
781 break;
782 case 6:
783 microOps[uopIdx++] = new MicroStrNeon6Uop<uint16_t>(
784 machInst, ufp0, rn, 0, align);
785 break;
786 case 8:
787 switch (eBytes) {
788 case 2:
789 microOps[uopIdx++] = new MicroStrNeon8Uop<uint16_t>(
790 machInst, ufp0, rn, 0, align);
791 break;
792 case 4:
793 microOps[uopIdx++] = new MicroStrNeon8Uop<uint32_t>(
794 machInst, ufp0, rn, 0, align);
795 break;
796 }
797 break;
798 case 12:
799 microOps[uopIdx++] = new MicroStrNeon12Uop<uint32_t>(
800 machInst, ufp0, rn, 0, align);
801 break;
802 case 16:
803 microOps[uopIdx++] = new MicroStrNeon16Uop<uint32_t>(
804 machInst, ufp0, rn, 0, align);
805 break;
806 default:
807 // Bad store size
808 microOps[uopIdx++] = new Unknown(machInst);
809 }
810 if (wb) {
811 if (rm != 15 && rm != 13) {
812 microOps[uopIdx++] =
813 new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
814 } else {
815 microOps[uopIdx++] =
816 new MicroAddiUop(machInst, rn, rn, storeSize);
817 }
818 }
819 assert(uopIdx == numMicroops);
820
821 for (unsigned i = 0; i < numMicroops - 1; i++) {
822 MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
823 assert(uopPtr);
824 uopPtr->setDelayedCommit();
825 }
826 microOps[numMicroops - 1]->setLastMicroop();
827}
828
829MacroVFPMemOp::MacroVFPMemOp(const char *mnem, ExtMachInst machInst,
830 OpClass __opClass, IntRegIndex rn,
831 RegIndex vd, bool single, bool up,
832 bool writeback, bool load, uint32_t offset) :
833 PredMacroOp(mnem, machInst, __opClass)
834{
835 int i = 0;
836
837 // The lowest order bit selects fldmx (set) or fldmd (clear). These seem
838 // to be functionally identical except that fldmx is deprecated. For now
839 // we'll assume they're otherwise interchangable.
840 int count = (single ? offset : (offset / 2));
841 if (count == 0 || count > NumFloatArchRegs)
842 warn_once("Bad offset field for VFP load/store multiple.\n");
843 if (count == 0) {
844 // Force there to be at least one microop so the macroop makes sense.
845 writeback = true;
846 }
847 if (count > NumFloatArchRegs)
848 count = NumFloatArchRegs;
849
850 numMicroops = count * (single ? 1 : 2) + (writeback ? 1 : 0);
851 microOps = new StaticInstPtr[numMicroops];
852
853 int64_t addr = 0;
854
855 if (!up)
856 addr = 4 * offset;
857
858 bool tempUp = up;
859 for (int j = 0; j < count; j++) {
860 if (load) {
861 if (single) {
862 microOps[i++] = new MicroLdrFpUop(machInst, vd++, rn,
863 tempUp, addr);
864 } else {
865 microOps[i++] = new MicroLdrDBFpUop(machInst, vd++, rn,
866 tempUp, addr);
867 microOps[i++] = new MicroLdrDTFpUop(machInst, vd++, rn, tempUp,
868 addr + (up ? 4 : -4));
869 }
870 } else {
871 if (single) {
872 microOps[i++] = new MicroStrFpUop(machInst, vd++, rn,
873 tempUp, addr);
874 } else {
875 microOps[i++] = new MicroStrDBFpUop(machInst, vd++, rn,
876 tempUp, addr);
877 microOps[i++] = new MicroStrDTFpUop(machInst, vd++, rn, tempUp,
878 addr + (up ? 4 : -4));
879 }
880 }
881 if (!tempUp) {
882 addr -= (single ? 4 : 8);
883 // The microops don't handle negative displacement, so turn if we
884 // hit zero, flip polarity and start adding.
885 if (addr <= 0) {
886 tempUp = true;
887 addr = -addr;
888 }
889 } else {
890 addr += (single ? 4 : 8);
891 }
892 }
893
894 if (writeback) {
895 if (up) {
896 microOps[i++] =
897 new MicroAddiUop(machInst, rn, rn, 4 * offset);
898 } else {
899 microOps[i++] =
900 new MicroSubiUop(machInst, rn, rn, 4 * offset);
901 }
902 }
903
904 assert(numMicroops == i);
905 microOps[numMicroops - 1]->setLastMicroop();
906
907 for (StaticInstPtr *curUop = microOps;
908 !(*curUop)->isLastMicroop(); curUop++) {
909 MicroOp * uopPtr = dynamic_cast<MicroOp *>(curUop->get());
910 assert(uopPtr);
911 uopPtr->setDelayedCommit();
912 }
913}
914
915std::string
916MicroIntImmOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
917{
918 std::stringstream ss;
919 printMnemonic(ss);
920 printReg(ss, ura);
921 ss << ", ";
922 printReg(ss, urb);
923 ss << ", ";
924 ccprintf(ss, "#%d", imm);
925 return ss.str();
926}
927
928std::string
929MicroSetPCCPSR::generateDisassembly(Addr pc, const SymbolTable *symtab) const
930{
931 std::stringstream ss;
932 printMnemonic(ss);
933 ss << "[PC,CPSR]";
934 return ss.str();
935}
936
937std::string
938MicroIntMov::generateDisassembly(Addr pc, const SymbolTable *symtab) const
939{
940 std::stringstream ss;
941 printMnemonic(ss);
942 printReg(ss, ura);
943 ss << ", ";
944 printReg(ss, urb);
945 return ss.str();
946}
947
948std::string
949MicroIntOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
950{
951 std::stringstream ss;
952 printMnemonic(ss);
953 printReg(ss, ura);
954 ss << ", ";
955 printReg(ss, urb);
956 ss << ", ";
957 printReg(ss, urc);
958 return ss.str();
959}
960
961std::string
962MicroMemOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
963{
964 std::stringstream ss;
965 printMnemonic(ss);
966 printReg(ss, ura);
967 ss << ", [";
968 printReg(ss, urb);
969 ss << ", ";
970 ccprintf(ss, "#%d", imm);
971 ss << "]";
972 return ss.str();
973}
974
975}
152 } else if (load) {
153 *++uop = new MicroUopRegMov(machInst, regIdx, INTREG_UREG1);
154 if (reg == INTREG_PC) {
155 (*uop)->setFlag(StaticInst::IsControl);
156 (*uop)->setFlag(StaticInst::IsCondControl);
157 (*uop)->setFlag(StaticInst::IsIndirectControl);
158 // This is created as a RAS POP
159 if (rn == INTREG_SP)
160 (*uop)->setFlag(StaticInst::IsReturn);
161
162 }
163 }
164 }
165
166 (*uop)->setLastMicroop();
167
168 for (StaticInstPtr *curUop = microOps;
169 !(*curUop)->isLastMicroop(); curUop++) {
170 MicroOp * uopPtr = dynamic_cast<MicroOp *>(curUop->get());
171 assert(uopPtr);
172 uopPtr->setDelayedCommit();
173 }
174}
175
176VldMultOp::VldMultOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
177 unsigned elems, RegIndex rn, RegIndex vd, unsigned regs,
178 unsigned inc, uint32_t size, uint32_t align, RegIndex rm) :
179 PredMacroOp(mnem, machInst, __opClass)
180{
181 assert(regs > 0 && regs <= 4);
182 assert(regs % elems == 0);
183
184 numMicroops = (regs > 2) ? 2 : 1;
185 bool wb = (rm != 15);
186 bool deinterleave = (elems > 1);
187
188 if (wb) numMicroops++;
189 if (deinterleave) numMicroops += (regs / elems);
190 microOps = new StaticInstPtr[numMicroops];
191
192 RegIndex rMid = deinterleave ? NumFloatArchRegs : vd * 2;
193
194 uint32_t noAlign = TLB::MustBeOne;
195
196 unsigned uopIdx = 0;
197 switch (regs) {
198 case 4:
199 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
200 size, machInst, rMid, rn, 0, align);
201 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
202 size, machInst, rMid + 4, rn, 16, noAlign);
203 break;
204 case 3:
205 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
206 size, machInst, rMid, rn, 0, align);
207 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon8Uop>(
208 size, machInst, rMid + 4, rn, 16, noAlign);
209 break;
210 case 2:
211 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
212 size, machInst, rMid, rn, 0, align);
213 break;
214 case 1:
215 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon8Uop>(
216 size, machInst, rMid, rn, 0, align);
217 break;
218 default:
219 // Unknown number of registers
220 microOps[uopIdx++] = new Unknown(machInst);
221 }
222 if (wb) {
223 if (rm != 15 && rm != 13) {
224 microOps[uopIdx++] =
225 new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
226 } else {
227 microOps[uopIdx++] =
228 new MicroAddiUop(machInst, rn, rn, regs * 8);
229 }
230 }
231 if (deinterleave) {
232 switch (elems) {
233 case 4:
234 assert(regs == 4);
235 microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon8Uop>(
236 size, machInst, vd * 2, rMid, inc * 2);
237 break;
238 case 3:
239 assert(regs == 3);
240 microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon6Uop>(
241 size, machInst, vd * 2, rMid, inc * 2);
242 break;
243 case 2:
244 assert(regs == 4 || regs == 2);
245 if (regs == 4) {
246 microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon4Uop>(
247 size, machInst, vd * 2, rMid, inc * 2);
248 microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon4Uop>(
249 size, machInst, vd * 2 + 2, rMid + 4, inc * 2);
250 } else {
251 microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon4Uop>(
252 size, machInst, vd * 2, rMid, inc * 2);
253 }
254 break;
255 default:
256 // Bad number of elements to deinterleave
257 microOps[uopIdx++] = new Unknown(machInst);
258 }
259 }
260 assert(uopIdx == numMicroops);
261
262 for (unsigned i = 0; i < numMicroops - 1; i++) {
263 MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
264 assert(uopPtr);
265 uopPtr->setDelayedCommit();
266 }
267 microOps[numMicroops - 1]->setLastMicroop();
268}
269
270VldSingleOp::VldSingleOp(const char *mnem, ExtMachInst machInst,
271 OpClass __opClass, bool all, unsigned elems,
272 RegIndex rn, RegIndex vd, unsigned regs,
273 unsigned inc, uint32_t size, uint32_t align,
274 RegIndex rm, unsigned lane) :
275 PredMacroOp(mnem, machInst, __opClass)
276{
277 assert(regs > 0 && regs <= 4);
278 assert(regs % elems == 0);
279
280 unsigned eBytes = (1 << size);
281 unsigned loadSize = eBytes * elems;
282 unsigned loadRegs M5_VAR_USED = (loadSize + sizeof(FloatRegBits) - 1) /
283 sizeof(FloatRegBits);
284
285 assert(loadRegs > 0 && loadRegs <= 4);
286
287 numMicroops = 1;
288 bool wb = (rm != 15);
289
290 if (wb) numMicroops++;
291 numMicroops += (regs / elems);
292 microOps = new StaticInstPtr[numMicroops];
293
294 RegIndex ufp0 = NumFloatArchRegs;
295
296 unsigned uopIdx = 0;
297 switch (loadSize) {
298 case 1:
299 microOps[uopIdx++] = new MicroLdrNeon1Uop<uint8_t>(
300 machInst, ufp0, rn, 0, align);
301 break;
302 case 2:
303 if (eBytes == 2) {
304 microOps[uopIdx++] = new MicroLdrNeon2Uop<uint16_t>(
305 machInst, ufp0, rn, 0, align);
306 } else {
307 microOps[uopIdx++] = new MicroLdrNeon2Uop<uint8_t>(
308 machInst, ufp0, rn, 0, align);
309 }
310 break;
311 case 3:
312 microOps[uopIdx++] = new MicroLdrNeon3Uop<uint8_t>(
313 machInst, ufp0, rn, 0, align);
314 break;
315 case 4:
316 switch (eBytes) {
317 case 1:
318 microOps[uopIdx++] = new MicroLdrNeon4Uop<uint8_t>(
319 machInst, ufp0, rn, 0, align);
320 break;
321 case 2:
322 microOps[uopIdx++] = new MicroLdrNeon4Uop<uint16_t>(
323 machInst, ufp0, rn, 0, align);
324 break;
325 case 4:
326 microOps[uopIdx++] = new MicroLdrNeon4Uop<uint32_t>(
327 machInst, ufp0, rn, 0, align);
328 break;
329 }
330 break;
331 case 6:
332 microOps[uopIdx++] = new MicroLdrNeon6Uop<uint16_t>(
333 machInst, ufp0, rn, 0, align);
334 break;
335 case 8:
336 switch (eBytes) {
337 case 2:
338 microOps[uopIdx++] = new MicroLdrNeon8Uop<uint16_t>(
339 machInst, ufp0, rn, 0, align);
340 break;
341 case 4:
342 microOps[uopIdx++] = new MicroLdrNeon8Uop<uint32_t>(
343 machInst, ufp0, rn, 0, align);
344 break;
345 }
346 break;
347 case 12:
348 microOps[uopIdx++] = new MicroLdrNeon12Uop<uint32_t>(
349 machInst, ufp0, rn, 0, align);
350 break;
351 case 16:
352 microOps[uopIdx++] = new MicroLdrNeon16Uop<uint32_t>(
353 machInst, ufp0, rn, 0, align);
354 break;
355 default:
356 // Unrecognized load size
357 microOps[uopIdx++] = new Unknown(machInst);
358 }
359 if (wb) {
360 if (rm != 15 && rm != 13) {
361 microOps[uopIdx++] =
362 new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
363 } else {
364 microOps[uopIdx++] =
365 new MicroAddiUop(machInst, rn, rn, loadSize);
366 }
367 }
368 switch (elems) {
369 case 4:
370 assert(regs == 4);
371 switch (size) {
372 case 0:
373 if (all) {
374 microOps[uopIdx++] = new MicroUnpackAllNeon2to8Uop<uint8_t>(
375 machInst, vd * 2, ufp0, inc * 2);
376 } else {
377 microOps[uopIdx++] = new MicroUnpackNeon2to8Uop<uint8_t>(
378 machInst, vd * 2, ufp0, inc * 2, lane);
379 }
380 break;
381 case 1:
382 if (all) {
383 microOps[uopIdx++] = new MicroUnpackAllNeon2to8Uop<uint16_t>(
384 machInst, vd * 2, ufp0, inc * 2);
385 } else {
386 microOps[uopIdx++] = new MicroUnpackNeon2to8Uop<uint16_t>(
387 machInst, vd * 2, ufp0, inc * 2, lane);
388 }
389 break;
390 case 2:
391 if (all) {
392 microOps[uopIdx++] = new MicroUnpackAllNeon4to8Uop<uint32_t>(
393 machInst, vd * 2, ufp0, inc * 2);
394 } else {
395 microOps[uopIdx++] = new MicroUnpackNeon4to8Uop<uint32_t>(
396 machInst, vd * 2, ufp0, inc * 2, lane);
397 }
398 break;
399 default:
400 // Bad size
401 microOps[uopIdx++] = new Unknown(machInst);
402 break;
403 }
404 break;
405 case 3:
406 assert(regs == 3);
407 switch (size) {
408 case 0:
409 if (all) {
410 microOps[uopIdx++] = new MicroUnpackAllNeon2to6Uop<uint8_t>(
411 machInst, vd * 2, ufp0, inc * 2);
412 } else {
413 microOps[uopIdx++] = new MicroUnpackNeon2to6Uop<uint8_t>(
414 machInst, vd * 2, ufp0, inc * 2, lane);
415 }
416 break;
417 case 1:
418 if (all) {
419 microOps[uopIdx++] = new MicroUnpackAllNeon2to6Uop<uint16_t>(
420 machInst, vd * 2, ufp0, inc * 2);
421 } else {
422 microOps[uopIdx++] = new MicroUnpackNeon2to6Uop<uint16_t>(
423 machInst, vd * 2, ufp0, inc * 2, lane);
424 }
425 break;
426 case 2:
427 if (all) {
428 microOps[uopIdx++] = new MicroUnpackAllNeon4to6Uop<uint32_t>(
429 machInst, vd * 2, ufp0, inc * 2);
430 } else {
431 microOps[uopIdx++] = new MicroUnpackNeon4to6Uop<uint32_t>(
432 machInst, vd * 2, ufp0, inc * 2, lane);
433 }
434 break;
435 default:
436 // Bad size
437 microOps[uopIdx++] = new Unknown(machInst);
438 break;
439 }
440 break;
441 case 2:
442 assert(regs == 2);
443 assert(loadRegs <= 2);
444 switch (size) {
445 case 0:
446 if (all) {
447 microOps[uopIdx++] = new MicroUnpackAllNeon2to4Uop<uint8_t>(
448 machInst, vd * 2, ufp0, inc * 2);
449 } else {
450 microOps[uopIdx++] = new MicroUnpackNeon2to4Uop<uint8_t>(
451 machInst, vd * 2, ufp0, inc * 2, lane);
452 }
453 break;
454 case 1:
455 if (all) {
456 microOps[uopIdx++] = new MicroUnpackAllNeon2to4Uop<uint16_t>(
457 machInst, vd * 2, ufp0, inc * 2);
458 } else {
459 microOps[uopIdx++] = new MicroUnpackNeon2to4Uop<uint16_t>(
460 machInst, vd * 2, ufp0, inc * 2, lane);
461 }
462 break;
463 case 2:
464 if (all) {
465 microOps[uopIdx++] = new MicroUnpackAllNeon2to4Uop<uint32_t>(
466 machInst, vd * 2, ufp0, inc * 2);
467 } else {
468 microOps[uopIdx++] = new MicroUnpackNeon2to4Uop<uint32_t>(
469 machInst, vd * 2, ufp0, inc * 2, lane);
470 }
471 break;
472 default:
473 // Bad size
474 microOps[uopIdx++] = new Unknown(machInst);
475 break;
476 }
477 break;
478 case 1:
479 assert(regs == 1 || (all && regs == 2));
480 assert(loadRegs <= 2);
481 for (unsigned offset = 0; offset < regs; offset++) {
482 switch (size) {
483 case 0:
484 if (all) {
485 microOps[uopIdx++] =
486 new MicroUnpackAllNeon2to2Uop<uint8_t>(
487 machInst, (vd + offset) * 2, ufp0, inc * 2);
488 } else {
489 microOps[uopIdx++] =
490 new MicroUnpackNeon2to2Uop<uint8_t>(
491 machInst, (vd + offset) * 2, ufp0, inc * 2, lane);
492 }
493 break;
494 case 1:
495 if (all) {
496 microOps[uopIdx++] =
497 new MicroUnpackAllNeon2to2Uop<uint16_t>(
498 machInst, (vd + offset) * 2, ufp0, inc * 2);
499 } else {
500 microOps[uopIdx++] =
501 new MicroUnpackNeon2to2Uop<uint16_t>(
502 machInst, (vd + offset) * 2, ufp0, inc * 2, lane);
503 }
504 break;
505 case 2:
506 if (all) {
507 microOps[uopIdx++] =
508 new MicroUnpackAllNeon2to2Uop<uint32_t>(
509 machInst, (vd + offset) * 2, ufp0, inc * 2);
510 } else {
511 microOps[uopIdx++] =
512 new MicroUnpackNeon2to2Uop<uint32_t>(
513 machInst, (vd + offset) * 2, ufp0, inc * 2, lane);
514 }
515 break;
516 default:
517 // Bad size
518 microOps[uopIdx++] = new Unknown(machInst);
519 break;
520 }
521 }
522 break;
523 default:
524 // Bad number of elements to unpack
525 microOps[uopIdx++] = new Unknown(machInst);
526 }
527 assert(uopIdx == numMicroops);
528
529 for (unsigned i = 0; i < numMicroops - 1; i++) {
530 MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
531 assert(uopPtr);
532 uopPtr->setDelayedCommit();
533 }
534 microOps[numMicroops - 1]->setLastMicroop();
535}
536
537VstMultOp::VstMultOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
538 unsigned elems, RegIndex rn, RegIndex vd, unsigned regs,
539 unsigned inc, uint32_t size, uint32_t align, RegIndex rm) :
540 PredMacroOp(mnem, machInst, __opClass)
541{
542 assert(regs > 0 && regs <= 4);
543 assert(regs % elems == 0);
544
545 numMicroops = (regs > 2) ? 2 : 1;
546 bool wb = (rm != 15);
547 bool interleave = (elems > 1);
548
549 if (wb) numMicroops++;
550 if (interleave) numMicroops += (regs / elems);
551 microOps = new StaticInstPtr[numMicroops];
552
553 uint32_t noAlign = TLB::MustBeOne;
554
555 RegIndex rMid = interleave ? NumFloatArchRegs : vd * 2;
556
557 unsigned uopIdx = 0;
558 if (interleave) {
559 switch (elems) {
560 case 4:
561 assert(regs == 4);
562 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon8Uop>(
563 size, machInst, rMid, vd * 2, inc * 2);
564 break;
565 case 3:
566 assert(regs == 3);
567 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon6Uop>(
568 size, machInst, rMid, vd * 2, inc * 2);
569 break;
570 case 2:
571 assert(regs == 4 || regs == 2);
572 if (regs == 4) {
573 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon4Uop>(
574 size, machInst, rMid, vd * 2, inc * 2);
575 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon4Uop>(
576 size, machInst, rMid + 4, vd * 2 + 2, inc * 2);
577 } else {
578 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon4Uop>(
579 size, machInst, rMid, vd * 2, inc * 2);
580 }
581 break;
582 default:
583 // Bad number of elements to interleave
584 microOps[uopIdx++] = new Unknown(machInst);
585 }
586 }
587 switch (regs) {
588 case 4:
589 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
590 size, machInst, rMid, rn, 0, align);
591 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
592 size, machInst, rMid + 4, rn, 16, noAlign);
593 break;
594 case 3:
595 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
596 size, machInst, rMid, rn, 0, align);
597 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon8Uop>(
598 size, machInst, rMid + 4, rn, 16, noAlign);
599 break;
600 case 2:
601 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
602 size, machInst, rMid, rn, 0, align);
603 break;
604 case 1:
605 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon8Uop>(
606 size, machInst, rMid, rn, 0, align);
607 break;
608 default:
609 // Unknown number of registers
610 microOps[uopIdx++] = new Unknown(machInst);
611 }
612 if (wb) {
613 if (rm != 15 && rm != 13) {
614 microOps[uopIdx++] =
615 new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
616 } else {
617 microOps[uopIdx++] =
618 new MicroAddiUop(machInst, rn, rn, regs * 8);
619 }
620 }
621 assert(uopIdx == numMicroops);
622
623 for (unsigned i = 0; i < numMicroops - 1; i++) {
624 MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
625 assert(uopPtr);
626 uopPtr->setDelayedCommit();
627 }
628 microOps[numMicroops - 1]->setLastMicroop();
629}
630
631VstSingleOp::VstSingleOp(const char *mnem, ExtMachInst machInst,
632 OpClass __opClass, bool all, unsigned elems,
633 RegIndex rn, RegIndex vd, unsigned regs,
634 unsigned inc, uint32_t size, uint32_t align,
635 RegIndex rm, unsigned lane) :
636 PredMacroOp(mnem, machInst, __opClass)
637{
638 assert(!all);
639 assert(regs > 0 && regs <= 4);
640 assert(regs % elems == 0);
641
642 unsigned eBytes = (1 << size);
643 unsigned storeSize = eBytes * elems;
644 unsigned storeRegs M5_VAR_USED = (storeSize + sizeof(FloatRegBits) - 1) /
645 sizeof(FloatRegBits);
646
647 assert(storeRegs > 0 && storeRegs <= 4);
648
649 numMicroops = 1;
650 bool wb = (rm != 15);
651
652 if (wb) numMicroops++;
653 numMicroops += (regs / elems);
654 microOps = new StaticInstPtr[numMicroops];
655
656 RegIndex ufp0 = NumFloatArchRegs;
657
658 unsigned uopIdx = 0;
659 switch (elems) {
660 case 4:
661 assert(regs == 4);
662 switch (size) {
663 case 0:
664 microOps[uopIdx++] = new MicroPackNeon8to2Uop<uint8_t>(
665 machInst, ufp0, vd * 2, inc * 2, lane);
666 break;
667 case 1:
668 microOps[uopIdx++] = new MicroPackNeon8to2Uop<uint16_t>(
669 machInst, ufp0, vd * 2, inc * 2, lane);
670 break;
671 case 2:
672 microOps[uopIdx++] = new MicroPackNeon8to4Uop<uint32_t>(
673 machInst, ufp0, vd * 2, inc * 2, lane);
674 break;
675 default:
676 // Bad size
677 microOps[uopIdx++] = new Unknown(machInst);
678 break;
679 }
680 break;
681 case 3:
682 assert(regs == 3);
683 switch (size) {
684 case 0:
685 microOps[uopIdx++] = new MicroPackNeon6to2Uop<uint8_t>(
686 machInst, ufp0, vd * 2, inc * 2, lane);
687 break;
688 case 1:
689 microOps[uopIdx++] = new MicroPackNeon6to2Uop<uint16_t>(
690 machInst, ufp0, vd * 2, inc * 2, lane);
691 break;
692 case 2:
693 microOps[uopIdx++] = new MicroPackNeon6to4Uop<uint32_t>(
694 machInst, ufp0, vd * 2, inc * 2, lane);
695 break;
696 default:
697 // Bad size
698 microOps[uopIdx++] = new Unknown(machInst);
699 break;
700 }
701 break;
702 case 2:
703 assert(regs == 2);
704 assert(storeRegs <= 2);
705 switch (size) {
706 case 0:
707 microOps[uopIdx++] = new MicroPackNeon4to2Uop<uint8_t>(
708 machInst, ufp0, vd * 2, inc * 2, lane);
709 break;
710 case 1:
711 microOps[uopIdx++] = new MicroPackNeon4to2Uop<uint16_t>(
712 machInst, ufp0, vd * 2, inc * 2, lane);
713 break;
714 case 2:
715 microOps[uopIdx++] = new MicroPackNeon4to2Uop<uint32_t>(
716 machInst, ufp0, vd * 2, inc * 2, lane);
717 break;
718 default:
719 // Bad size
720 microOps[uopIdx++] = new Unknown(machInst);
721 break;
722 }
723 break;
724 case 1:
725 assert(regs == 1 || (all && regs == 2));
726 assert(storeRegs <= 2);
727 for (unsigned offset = 0; offset < regs; offset++) {
728 switch (size) {
729 case 0:
730 microOps[uopIdx++] = new MicroPackNeon2to2Uop<uint8_t>(
731 machInst, ufp0, (vd + offset) * 2, inc * 2, lane);
732 break;
733 case 1:
734 microOps[uopIdx++] = new MicroPackNeon2to2Uop<uint16_t>(
735 machInst, ufp0, (vd + offset) * 2, inc * 2, lane);
736 break;
737 case 2:
738 microOps[uopIdx++] = new MicroPackNeon2to2Uop<uint32_t>(
739 machInst, ufp0, (vd + offset) * 2, inc * 2, lane);
740 break;
741 default:
742 // Bad size
743 microOps[uopIdx++] = new Unknown(machInst);
744 break;
745 }
746 }
747 break;
748 default:
749 // Bad number of elements to unpack
750 microOps[uopIdx++] = new Unknown(machInst);
751 }
752 switch (storeSize) {
753 case 1:
754 microOps[uopIdx++] = new MicroStrNeon1Uop<uint8_t>(
755 machInst, ufp0, rn, 0, align);
756 break;
757 case 2:
758 if (eBytes == 2) {
759 microOps[uopIdx++] = new MicroStrNeon2Uop<uint16_t>(
760 machInst, ufp0, rn, 0, align);
761 } else {
762 microOps[uopIdx++] = new MicroStrNeon2Uop<uint8_t>(
763 machInst, ufp0, rn, 0, align);
764 }
765 break;
766 case 3:
767 microOps[uopIdx++] = new MicroStrNeon3Uop<uint8_t>(
768 machInst, ufp0, rn, 0, align);
769 break;
770 case 4:
771 switch (eBytes) {
772 case 1:
773 microOps[uopIdx++] = new MicroStrNeon4Uop<uint8_t>(
774 machInst, ufp0, rn, 0, align);
775 break;
776 case 2:
777 microOps[uopIdx++] = new MicroStrNeon4Uop<uint16_t>(
778 machInst, ufp0, rn, 0, align);
779 break;
780 case 4:
781 microOps[uopIdx++] = new MicroStrNeon4Uop<uint32_t>(
782 machInst, ufp0, rn, 0, align);
783 break;
784 }
785 break;
786 case 6:
787 microOps[uopIdx++] = new MicroStrNeon6Uop<uint16_t>(
788 machInst, ufp0, rn, 0, align);
789 break;
790 case 8:
791 switch (eBytes) {
792 case 2:
793 microOps[uopIdx++] = new MicroStrNeon8Uop<uint16_t>(
794 machInst, ufp0, rn, 0, align);
795 break;
796 case 4:
797 microOps[uopIdx++] = new MicroStrNeon8Uop<uint32_t>(
798 machInst, ufp0, rn, 0, align);
799 break;
800 }
801 break;
802 case 12:
803 microOps[uopIdx++] = new MicroStrNeon12Uop<uint32_t>(
804 machInst, ufp0, rn, 0, align);
805 break;
806 case 16:
807 microOps[uopIdx++] = new MicroStrNeon16Uop<uint32_t>(
808 machInst, ufp0, rn, 0, align);
809 break;
810 default:
811 // Bad store size
812 microOps[uopIdx++] = new Unknown(machInst);
813 }
814 if (wb) {
815 if (rm != 15 && rm != 13) {
816 microOps[uopIdx++] =
817 new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
818 } else {
819 microOps[uopIdx++] =
820 new MicroAddiUop(machInst, rn, rn, storeSize);
821 }
822 }
823 assert(uopIdx == numMicroops);
824
825 for (unsigned i = 0; i < numMicroops - 1; i++) {
826 MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
827 assert(uopPtr);
828 uopPtr->setDelayedCommit();
829 }
830 microOps[numMicroops - 1]->setLastMicroop();
831}
832
833MacroVFPMemOp::MacroVFPMemOp(const char *mnem, ExtMachInst machInst,
834 OpClass __opClass, IntRegIndex rn,
835 RegIndex vd, bool single, bool up,
836 bool writeback, bool load, uint32_t offset) :
837 PredMacroOp(mnem, machInst, __opClass)
838{
839 int i = 0;
840
841 // The lowest order bit selects fldmx (set) or fldmd (clear). These seem
842 // to be functionally identical except that fldmx is deprecated. For now
843 // we'll assume they're otherwise interchangable.
844 int count = (single ? offset : (offset / 2));
845 if (count == 0 || count > NumFloatArchRegs)
846 warn_once("Bad offset field for VFP load/store multiple.\n");
847 if (count == 0) {
848 // Force there to be at least one microop so the macroop makes sense.
849 writeback = true;
850 }
851 if (count > NumFloatArchRegs)
852 count = NumFloatArchRegs;
853
854 numMicroops = count * (single ? 1 : 2) + (writeback ? 1 : 0);
855 microOps = new StaticInstPtr[numMicroops];
856
857 int64_t addr = 0;
858
859 if (!up)
860 addr = 4 * offset;
861
862 bool tempUp = up;
863 for (int j = 0; j < count; j++) {
864 if (load) {
865 if (single) {
866 microOps[i++] = new MicroLdrFpUop(machInst, vd++, rn,
867 tempUp, addr);
868 } else {
869 microOps[i++] = new MicroLdrDBFpUop(machInst, vd++, rn,
870 tempUp, addr);
871 microOps[i++] = new MicroLdrDTFpUop(machInst, vd++, rn, tempUp,
872 addr + (up ? 4 : -4));
873 }
874 } else {
875 if (single) {
876 microOps[i++] = new MicroStrFpUop(machInst, vd++, rn,
877 tempUp, addr);
878 } else {
879 microOps[i++] = new MicroStrDBFpUop(machInst, vd++, rn,
880 tempUp, addr);
881 microOps[i++] = new MicroStrDTFpUop(machInst, vd++, rn, tempUp,
882 addr + (up ? 4 : -4));
883 }
884 }
885 if (!tempUp) {
886 addr -= (single ? 4 : 8);
887 // The microops don't handle negative displacement, so turn if we
888 // hit zero, flip polarity and start adding.
889 if (addr <= 0) {
890 tempUp = true;
891 addr = -addr;
892 }
893 } else {
894 addr += (single ? 4 : 8);
895 }
896 }
897
898 if (writeback) {
899 if (up) {
900 microOps[i++] =
901 new MicroAddiUop(machInst, rn, rn, 4 * offset);
902 } else {
903 microOps[i++] =
904 new MicroSubiUop(machInst, rn, rn, 4 * offset);
905 }
906 }
907
908 assert(numMicroops == i);
909 microOps[numMicroops - 1]->setLastMicroop();
910
911 for (StaticInstPtr *curUop = microOps;
912 !(*curUop)->isLastMicroop(); curUop++) {
913 MicroOp * uopPtr = dynamic_cast<MicroOp *>(curUop->get());
914 assert(uopPtr);
915 uopPtr->setDelayedCommit();
916 }
917}
918
919std::string
920MicroIntImmOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
921{
922 std::stringstream ss;
923 printMnemonic(ss);
924 printReg(ss, ura);
925 ss << ", ";
926 printReg(ss, urb);
927 ss << ", ";
928 ccprintf(ss, "#%d", imm);
929 return ss.str();
930}
931
932std::string
933MicroSetPCCPSR::generateDisassembly(Addr pc, const SymbolTable *symtab) const
934{
935 std::stringstream ss;
936 printMnemonic(ss);
937 ss << "[PC,CPSR]";
938 return ss.str();
939}
940
941std::string
942MicroIntMov::generateDisassembly(Addr pc, const SymbolTable *symtab) const
943{
944 std::stringstream ss;
945 printMnemonic(ss);
946 printReg(ss, ura);
947 ss << ", ";
948 printReg(ss, urb);
949 return ss.str();
950}
951
952std::string
953MicroIntOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
954{
955 std::stringstream ss;
956 printMnemonic(ss);
957 printReg(ss, ura);
958 ss << ", ";
959 printReg(ss, urb);
960 ss << ", ";
961 printReg(ss, urc);
962 return ss.str();
963}
964
965std::string
966MicroMemOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
967{
968 std::stringstream ss;
969 printMnemonic(ss);
970 printReg(ss, ura);
971 ss << ", [";
972 printReg(ss, urb);
973 ss << ", ";
974 ccprintf(ss, "#%d", imm);
975 ss << "]";
976 return ss.str();
977}
978
979}