macromem.cc (9368:3cd40209af8d) macromem.cc (9640:35198406dd72)
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2007-2008 The Florida State University
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Stephen Hines
41 */
42
43#include <sstream>
44
45#include "arch/arm/insts/macromem.hh"
46#include "arch/arm/generated/decoder.hh"
47
48using namespace std;
49using namespace ArmISAInst;
50
51namespace ArmISA
52{
53
54MacroMemOp::MacroMemOp(const char *mnem, ExtMachInst machInst,
55 OpClass __opClass, IntRegIndex rn,
56 bool index, bool up, bool user, bool writeback,
57 bool load, uint32_t reglist) :
58 PredMacroOp(mnem, machInst, __opClass)
59{
60 uint32_t regs = reglist;
61 uint32_t ones = number_of_ones(reglist);
62 // Remember that writeback adds a uop or two and the temp register adds one
63 numMicroops = ones + (writeback ? (load ? 2 : 1) : 0) + 1;
64
65 // It's technically legal to do a lot of nothing
66 if (!ones)
67 numMicroops = 1;
68
69 microOps = new StaticInstPtr[numMicroops];
70 uint32_t addr = 0;
71
72 if (!up)
73 addr = (ones << 2) - 4;
74
75 if (!index)
76 addr += 4;
77
78 StaticInstPtr *uop = microOps;
79
80 // Add 0 to Rn and stick it in ureg0.
81 // This is equivalent to a move.
82 *uop = new MicroAddiUop(machInst, INTREG_UREG0, rn, 0);
83
84 unsigned reg = 0;
85 unsigned regIdx = 0;
86 bool force_user = user & !bits(reglist, 15);
87 bool exception_ret = user & bits(reglist, 15);
88
89 for (int i = 0; i < ones; i++) {
90 // Find the next register.
91 while (!bits(regs, reg))
92 reg++;
93 replaceBits(regs, reg, 0);
94
95 regIdx = reg;
96 if (force_user) {
97 regIdx = intRegInMode(MODE_USER, regIdx);
98 }
99
100 if (load) {
101 if (writeback && i == ones - 1) {
102 // If it's a writeback and this is the last register
103 // do the load into a temporary register which we'll move
104 // into the final one later
105 *++uop = new MicroLdrUop(machInst, INTREG_UREG1, INTREG_UREG0,
106 up, addr);
107 } else {
108 // Otherwise just do it normally
109 if (reg == INTREG_PC && exception_ret) {
110 // This must be the exception return form of ldm.
111 *++uop = new MicroLdrRetUop(machInst, regIdx,
112 INTREG_UREG0, up, addr);
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2007-2008 The Florida State University
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Stephen Hines
41 */
42
43#include <sstream>
44
45#include "arch/arm/insts/macromem.hh"
46#include "arch/arm/generated/decoder.hh"
47
48using namespace std;
49using namespace ArmISAInst;
50
51namespace ArmISA
52{
53
54MacroMemOp::MacroMemOp(const char *mnem, ExtMachInst machInst,
55 OpClass __opClass, IntRegIndex rn,
56 bool index, bool up, bool user, bool writeback,
57 bool load, uint32_t reglist) :
58 PredMacroOp(mnem, machInst, __opClass)
59{
60 uint32_t regs = reglist;
61 uint32_t ones = number_of_ones(reglist);
62 // Remember that writeback adds a uop or two and the temp register adds one
63 numMicroops = ones + (writeback ? (load ? 2 : 1) : 0) + 1;
64
65 // It's technically legal to do a lot of nothing
66 if (!ones)
67 numMicroops = 1;
68
69 microOps = new StaticInstPtr[numMicroops];
70 uint32_t addr = 0;
71
72 if (!up)
73 addr = (ones << 2) - 4;
74
75 if (!index)
76 addr += 4;
77
78 StaticInstPtr *uop = microOps;
79
80 // Add 0 to Rn and stick it in ureg0.
81 // This is equivalent to a move.
82 *uop = new MicroAddiUop(machInst, INTREG_UREG0, rn, 0);
83
84 unsigned reg = 0;
85 unsigned regIdx = 0;
86 bool force_user = user & !bits(reglist, 15);
87 bool exception_ret = user & bits(reglist, 15);
88
89 for (int i = 0; i < ones; i++) {
90 // Find the next register.
91 while (!bits(regs, reg))
92 reg++;
93 replaceBits(regs, reg, 0);
94
95 regIdx = reg;
96 if (force_user) {
97 regIdx = intRegInMode(MODE_USER, regIdx);
98 }
99
100 if (load) {
101 if (writeback && i == ones - 1) {
102 // If it's a writeback and this is the last register
103 // do the load into a temporary register which we'll move
104 // into the final one later
105 *++uop = new MicroLdrUop(machInst, INTREG_UREG1, INTREG_UREG0,
106 up, addr);
107 } else {
108 // Otherwise just do it normally
109 if (reg == INTREG_PC && exception_ret) {
110 // This must be the exception return form of ldm.
111 *++uop = new MicroLdrRetUop(machInst, regIdx,
112 INTREG_UREG0, up, addr);
113 if (!(condCode == COND_AL || condCode == COND_UC))
114 (*uop)->setFlag(StaticInst::IsCondControl);
115 else
116 (*uop)->setFlag(StaticInst::IsUncondControl);
113 } else {
114 *++uop = new MicroLdrUop(machInst, regIdx,
115 INTREG_UREG0, up, addr);
116 if (reg == INTREG_PC) {
117 (*uop)->setFlag(StaticInst::IsControl);
118 if (!(condCode == COND_AL || condCode == COND_UC))
119 (*uop)->setFlag(StaticInst::IsCondControl);
120 else
121 (*uop)->setFlag(StaticInst::IsUncondControl);
122 (*uop)->setFlag(StaticInst::IsIndirectControl);
123 }
124 }
125 }
126 } else {
127 *++uop = new MicroStrUop(machInst, regIdx, INTREG_UREG0, up, addr);
128 }
129
130 if (up)
131 addr += 4;
132 else
133 addr -= 4;
134 }
135
136 if (writeback && ones) {
137 // put the register update after we're done all loading
138 if (up)
139 *++uop = new MicroAddiUop(machInst, rn, rn, ones * 4);
140 else
141 *++uop = new MicroSubiUop(machInst, rn, rn, ones * 4);
142
143 // If this was a load move the last temporary value into place
144 // this way we can't take an exception after we update the base
145 // register.
146 if (load && reg == INTREG_PC && exception_ret) {
147 *++uop = new MicroUopRegMovRet(machInst, 0, INTREG_UREG1);
148 if (!(condCode == COND_AL || condCode == COND_UC))
149 (*uop)->setFlag(StaticInst::IsCondControl);
150 else
151 (*uop)->setFlag(StaticInst::IsUncondControl);
152 } else if (load) {
153 *++uop = new MicroUopRegMov(machInst, regIdx, INTREG_UREG1);
154 if (reg == INTREG_PC) {
155 (*uop)->setFlag(StaticInst::IsControl);
156 (*uop)->setFlag(StaticInst::IsCondControl);
157 (*uop)->setFlag(StaticInst::IsIndirectControl);
158 // This is created as a RAS POP
159 if (rn == INTREG_SP)
160 (*uop)->setFlag(StaticInst::IsReturn);
161
162 }
163 }
164 }
165
166 (*uop)->setLastMicroop();
167
168 for (StaticInstPtr *curUop = microOps;
169 !(*curUop)->isLastMicroop(); curUop++) {
170 MicroOp * uopPtr = dynamic_cast<MicroOp *>(curUop->get());
171 assert(uopPtr);
172 uopPtr->setDelayedCommit();
173 }
174}
175
176VldMultOp::VldMultOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
177 unsigned elems, RegIndex rn, RegIndex vd, unsigned regs,
178 unsigned inc, uint32_t size, uint32_t align, RegIndex rm) :
179 PredMacroOp(mnem, machInst, __opClass)
180{
181 assert(regs > 0 && regs <= 4);
182 assert(regs % elems == 0);
183
184 numMicroops = (regs > 2) ? 2 : 1;
185 bool wb = (rm != 15);
186 bool deinterleave = (elems > 1);
187
188 if (wb) numMicroops++;
189 if (deinterleave) numMicroops += (regs / elems);
190 microOps = new StaticInstPtr[numMicroops];
191
192 RegIndex rMid = deinterleave ? NumFloatArchRegs : vd * 2;
193
194 uint32_t noAlign = TLB::MustBeOne;
195
196 unsigned uopIdx = 0;
197 switch (regs) {
198 case 4:
199 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
200 size, machInst, rMid, rn, 0, align);
201 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
202 size, machInst, rMid + 4, rn, 16, noAlign);
203 break;
204 case 3:
205 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
206 size, machInst, rMid, rn, 0, align);
207 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon8Uop>(
208 size, machInst, rMid + 4, rn, 16, noAlign);
209 break;
210 case 2:
211 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
212 size, machInst, rMid, rn, 0, align);
213 break;
214 case 1:
215 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon8Uop>(
216 size, machInst, rMid, rn, 0, align);
217 break;
218 default:
219 // Unknown number of registers
220 microOps[uopIdx++] = new Unknown(machInst);
221 }
222 if (wb) {
223 if (rm != 15 && rm != 13) {
224 microOps[uopIdx++] =
225 new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
226 } else {
227 microOps[uopIdx++] =
228 new MicroAddiUop(machInst, rn, rn, regs * 8);
229 }
230 }
231 if (deinterleave) {
232 switch (elems) {
233 case 4:
234 assert(regs == 4);
235 microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon8Uop>(
236 size, machInst, vd * 2, rMid, inc * 2);
237 break;
238 case 3:
239 assert(regs == 3);
240 microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon6Uop>(
241 size, machInst, vd * 2, rMid, inc * 2);
242 break;
243 case 2:
244 assert(regs == 4 || regs == 2);
245 if (regs == 4) {
246 microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon4Uop>(
247 size, machInst, vd * 2, rMid, inc * 2);
248 microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon4Uop>(
249 size, machInst, vd * 2 + 2, rMid + 4, inc * 2);
250 } else {
251 microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon4Uop>(
252 size, machInst, vd * 2, rMid, inc * 2);
253 }
254 break;
255 default:
256 // Bad number of elements to deinterleave
257 microOps[uopIdx++] = new Unknown(machInst);
258 }
259 }
260 assert(uopIdx == numMicroops);
261
262 for (unsigned i = 0; i < numMicroops - 1; i++) {
263 MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
264 assert(uopPtr);
265 uopPtr->setDelayedCommit();
266 }
267 microOps[numMicroops - 1]->setLastMicroop();
268}
269
270VldSingleOp::VldSingleOp(const char *mnem, ExtMachInst machInst,
271 OpClass __opClass, bool all, unsigned elems,
272 RegIndex rn, RegIndex vd, unsigned regs,
273 unsigned inc, uint32_t size, uint32_t align,
274 RegIndex rm, unsigned lane) :
275 PredMacroOp(mnem, machInst, __opClass)
276{
277 assert(regs > 0 && regs <= 4);
278 assert(regs % elems == 0);
279
280 unsigned eBytes = (1 << size);
281 unsigned loadSize = eBytes * elems;
282 unsigned loadRegs M5_VAR_USED = (loadSize + sizeof(FloatRegBits) - 1) /
283 sizeof(FloatRegBits);
284
285 assert(loadRegs > 0 && loadRegs <= 4);
286
287 numMicroops = 1;
288 bool wb = (rm != 15);
289
290 if (wb) numMicroops++;
291 numMicroops += (regs / elems);
292 microOps = new StaticInstPtr[numMicroops];
293
294 RegIndex ufp0 = NumFloatArchRegs;
295
296 unsigned uopIdx = 0;
297 switch (loadSize) {
298 case 1:
299 microOps[uopIdx++] = new MicroLdrNeon1Uop<uint8_t>(
300 machInst, ufp0, rn, 0, align);
301 break;
302 case 2:
303 if (eBytes == 2) {
304 microOps[uopIdx++] = new MicroLdrNeon2Uop<uint16_t>(
305 machInst, ufp0, rn, 0, align);
306 } else {
307 microOps[uopIdx++] = new MicroLdrNeon2Uop<uint8_t>(
308 machInst, ufp0, rn, 0, align);
309 }
310 break;
311 case 3:
312 microOps[uopIdx++] = new MicroLdrNeon3Uop<uint8_t>(
313 machInst, ufp0, rn, 0, align);
314 break;
315 case 4:
316 switch (eBytes) {
317 case 1:
318 microOps[uopIdx++] = new MicroLdrNeon4Uop<uint8_t>(
319 machInst, ufp0, rn, 0, align);
320 break;
321 case 2:
322 microOps[uopIdx++] = new MicroLdrNeon4Uop<uint16_t>(
323 machInst, ufp0, rn, 0, align);
324 break;
325 case 4:
326 microOps[uopIdx++] = new MicroLdrNeon4Uop<uint32_t>(
327 machInst, ufp0, rn, 0, align);
328 break;
329 }
330 break;
331 case 6:
332 microOps[uopIdx++] = new MicroLdrNeon6Uop<uint16_t>(
333 machInst, ufp0, rn, 0, align);
334 break;
335 case 8:
336 switch (eBytes) {
337 case 2:
338 microOps[uopIdx++] = new MicroLdrNeon8Uop<uint16_t>(
339 machInst, ufp0, rn, 0, align);
340 break;
341 case 4:
342 microOps[uopIdx++] = new MicroLdrNeon8Uop<uint32_t>(
343 machInst, ufp0, rn, 0, align);
344 break;
345 }
346 break;
347 case 12:
348 microOps[uopIdx++] = new MicroLdrNeon12Uop<uint32_t>(
349 machInst, ufp0, rn, 0, align);
350 break;
351 case 16:
352 microOps[uopIdx++] = new MicroLdrNeon16Uop<uint32_t>(
353 machInst, ufp0, rn, 0, align);
354 break;
355 default:
356 // Unrecognized load size
357 microOps[uopIdx++] = new Unknown(machInst);
358 }
359 if (wb) {
360 if (rm != 15 && rm != 13) {
361 microOps[uopIdx++] =
362 new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
363 } else {
364 microOps[uopIdx++] =
365 new MicroAddiUop(machInst, rn, rn, loadSize);
366 }
367 }
368 switch (elems) {
369 case 4:
370 assert(regs == 4);
371 switch (size) {
372 case 0:
373 if (all) {
374 microOps[uopIdx++] = new MicroUnpackAllNeon2to8Uop<uint8_t>(
375 machInst, vd * 2, ufp0, inc * 2);
376 } else {
377 microOps[uopIdx++] = new MicroUnpackNeon2to8Uop<uint8_t>(
378 machInst, vd * 2, ufp0, inc * 2, lane);
379 }
380 break;
381 case 1:
382 if (all) {
383 microOps[uopIdx++] = new MicroUnpackAllNeon2to8Uop<uint16_t>(
384 machInst, vd * 2, ufp0, inc * 2);
385 } else {
386 microOps[uopIdx++] = new MicroUnpackNeon2to8Uop<uint16_t>(
387 machInst, vd * 2, ufp0, inc * 2, lane);
388 }
389 break;
390 case 2:
391 if (all) {
392 microOps[uopIdx++] = new MicroUnpackAllNeon4to8Uop<uint32_t>(
393 machInst, vd * 2, ufp0, inc * 2);
394 } else {
395 microOps[uopIdx++] = new MicroUnpackNeon4to8Uop<uint32_t>(
396 machInst, vd * 2, ufp0, inc * 2, lane);
397 }
398 break;
399 default:
400 // Bad size
401 microOps[uopIdx++] = new Unknown(machInst);
402 break;
403 }
404 break;
405 case 3:
406 assert(regs == 3);
407 switch (size) {
408 case 0:
409 if (all) {
410 microOps[uopIdx++] = new MicroUnpackAllNeon2to6Uop<uint8_t>(
411 machInst, vd * 2, ufp0, inc * 2);
412 } else {
413 microOps[uopIdx++] = new MicroUnpackNeon2to6Uop<uint8_t>(
414 machInst, vd * 2, ufp0, inc * 2, lane);
415 }
416 break;
417 case 1:
418 if (all) {
419 microOps[uopIdx++] = new MicroUnpackAllNeon2to6Uop<uint16_t>(
420 machInst, vd * 2, ufp0, inc * 2);
421 } else {
422 microOps[uopIdx++] = new MicroUnpackNeon2to6Uop<uint16_t>(
423 machInst, vd * 2, ufp0, inc * 2, lane);
424 }
425 break;
426 case 2:
427 if (all) {
428 microOps[uopIdx++] = new MicroUnpackAllNeon4to6Uop<uint32_t>(
429 machInst, vd * 2, ufp0, inc * 2);
430 } else {
431 microOps[uopIdx++] = new MicroUnpackNeon4to6Uop<uint32_t>(
432 machInst, vd * 2, ufp0, inc * 2, lane);
433 }
434 break;
435 default:
436 // Bad size
437 microOps[uopIdx++] = new Unknown(machInst);
438 break;
439 }
440 break;
441 case 2:
442 assert(regs == 2);
443 assert(loadRegs <= 2);
444 switch (size) {
445 case 0:
446 if (all) {
447 microOps[uopIdx++] = new MicroUnpackAllNeon2to4Uop<uint8_t>(
448 machInst, vd * 2, ufp0, inc * 2);
449 } else {
450 microOps[uopIdx++] = new MicroUnpackNeon2to4Uop<uint8_t>(
451 machInst, vd * 2, ufp0, inc * 2, lane);
452 }
453 break;
454 case 1:
455 if (all) {
456 microOps[uopIdx++] = new MicroUnpackAllNeon2to4Uop<uint16_t>(
457 machInst, vd * 2, ufp0, inc * 2);
458 } else {
459 microOps[uopIdx++] = new MicroUnpackNeon2to4Uop<uint16_t>(
460 machInst, vd * 2, ufp0, inc * 2, lane);
461 }
462 break;
463 case 2:
464 if (all) {
465 microOps[uopIdx++] = new MicroUnpackAllNeon2to4Uop<uint32_t>(
466 machInst, vd * 2, ufp0, inc * 2);
467 } else {
468 microOps[uopIdx++] = new MicroUnpackNeon2to4Uop<uint32_t>(
469 machInst, vd * 2, ufp0, inc * 2, lane);
470 }
471 break;
472 default:
473 // Bad size
474 microOps[uopIdx++] = new Unknown(machInst);
475 break;
476 }
477 break;
478 case 1:
479 assert(regs == 1 || (all && regs == 2));
480 assert(loadRegs <= 2);
481 for (unsigned offset = 0; offset < regs; offset++) {
482 switch (size) {
483 case 0:
484 if (all) {
485 microOps[uopIdx++] =
486 new MicroUnpackAllNeon2to2Uop<uint8_t>(
487 machInst, (vd + offset) * 2, ufp0, inc * 2);
488 } else {
489 microOps[uopIdx++] =
490 new MicroUnpackNeon2to2Uop<uint8_t>(
491 machInst, (vd + offset) * 2, ufp0, inc * 2, lane);
492 }
493 break;
494 case 1:
495 if (all) {
496 microOps[uopIdx++] =
497 new MicroUnpackAllNeon2to2Uop<uint16_t>(
498 machInst, (vd + offset) * 2, ufp0, inc * 2);
499 } else {
500 microOps[uopIdx++] =
501 new MicroUnpackNeon2to2Uop<uint16_t>(
502 machInst, (vd + offset) * 2, ufp0, inc * 2, lane);
503 }
504 break;
505 case 2:
506 if (all) {
507 microOps[uopIdx++] =
508 new MicroUnpackAllNeon2to2Uop<uint32_t>(
509 machInst, (vd + offset) * 2, ufp0, inc * 2);
510 } else {
511 microOps[uopIdx++] =
512 new MicroUnpackNeon2to2Uop<uint32_t>(
513 machInst, (vd + offset) * 2, ufp0, inc * 2, lane);
514 }
515 break;
516 default:
517 // Bad size
518 microOps[uopIdx++] = new Unknown(machInst);
519 break;
520 }
521 }
522 break;
523 default:
524 // Bad number of elements to unpack
525 microOps[uopIdx++] = new Unknown(machInst);
526 }
527 assert(uopIdx == numMicroops);
528
529 for (unsigned i = 0; i < numMicroops - 1; i++) {
530 MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
531 assert(uopPtr);
532 uopPtr->setDelayedCommit();
533 }
534 microOps[numMicroops - 1]->setLastMicroop();
535}
536
537VstMultOp::VstMultOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
538 unsigned elems, RegIndex rn, RegIndex vd, unsigned regs,
539 unsigned inc, uint32_t size, uint32_t align, RegIndex rm) :
540 PredMacroOp(mnem, machInst, __opClass)
541{
542 assert(regs > 0 && regs <= 4);
543 assert(regs % elems == 0);
544
545 numMicroops = (regs > 2) ? 2 : 1;
546 bool wb = (rm != 15);
547 bool interleave = (elems > 1);
548
549 if (wb) numMicroops++;
550 if (interleave) numMicroops += (regs / elems);
551 microOps = new StaticInstPtr[numMicroops];
552
553 uint32_t noAlign = TLB::MustBeOne;
554
555 RegIndex rMid = interleave ? NumFloatArchRegs : vd * 2;
556
557 unsigned uopIdx = 0;
558 if (interleave) {
559 switch (elems) {
560 case 4:
561 assert(regs == 4);
562 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon8Uop>(
563 size, machInst, rMid, vd * 2, inc * 2);
564 break;
565 case 3:
566 assert(regs == 3);
567 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon6Uop>(
568 size, machInst, rMid, vd * 2, inc * 2);
569 break;
570 case 2:
571 assert(regs == 4 || regs == 2);
572 if (regs == 4) {
573 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon4Uop>(
574 size, machInst, rMid, vd * 2, inc * 2);
575 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon4Uop>(
576 size, machInst, rMid + 4, vd * 2 + 2, inc * 2);
577 } else {
578 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon4Uop>(
579 size, machInst, rMid, vd * 2, inc * 2);
580 }
581 break;
582 default:
583 // Bad number of elements to interleave
584 microOps[uopIdx++] = new Unknown(machInst);
585 }
586 }
587 switch (regs) {
588 case 4:
589 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
590 size, machInst, rMid, rn, 0, align);
591 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
592 size, machInst, rMid + 4, rn, 16, noAlign);
593 break;
594 case 3:
595 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
596 size, machInst, rMid, rn, 0, align);
597 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon8Uop>(
598 size, machInst, rMid + 4, rn, 16, noAlign);
599 break;
600 case 2:
601 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
602 size, machInst, rMid, rn, 0, align);
603 break;
604 case 1:
605 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon8Uop>(
606 size, machInst, rMid, rn, 0, align);
607 break;
608 default:
609 // Unknown number of registers
610 microOps[uopIdx++] = new Unknown(machInst);
611 }
612 if (wb) {
613 if (rm != 15 && rm != 13) {
614 microOps[uopIdx++] =
615 new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
616 } else {
617 microOps[uopIdx++] =
618 new MicroAddiUop(machInst, rn, rn, regs * 8);
619 }
620 }
621 assert(uopIdx == numMicroops);
622
623 for (unsigned i = 0; i < numMicroops - 1; i++) {
624 MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
625 assert(uopPtr);
626 uopPtr->setDelayedCommit();
627 }
628 microOps[numMicroops - 1]->setLastMicroop();
629}
630
631VstSingleOp::VstSingleOp(const char *mnem, ExtMachInst machInst,
632 OpClass __opClass, bool all, unsigned elems,
633 RegIndex rn, RegIndex vd, unsigned regs,
634 unsigned inc, uint32_t size, uint32_t align,
635 RegIndex rm, unsigned lane) :
636 PredMacroOp(mnem, machInst, __opClass)
637{
638 assert(!all);
639 assert(regs > 0 && regs <= 4);
640 assert(regs % elems == 0);
641
642 unsigned eBytes = (1 << size);
643 unsigned storeSize = eBytes * elems;
644 unsigned storeRegs M5_VAR_USED = (storeSize + sizeof(FloatRegBits) - 1) /
645 sizeof(FloatRegBits);
646
647 assert(storeRegs > 0 && storeRegs <= 4);
648
649 numMicroops = 1;
650 bool wb = (rm != 15);
651
652 if (wb) numMicroops++;
653 numMicroops += (regs / elems);
654 microOps = new StaticInstPtr[numMicroops];
655
656 RegIndex ufp0 = NumFloatArchRegs;
657
658 unsigned uopIdx = 0;
659 switch (elems) {
660 case 4:
661 assert(regs == 4);
662 switch (size) {
663 case 0:
664 microOps[uopIdx++] = new MicroPackNeon8to2Uop<uint8_t>(
665 machInst, ufp0, vd * 2, inc * 2, lane);
666 break;
667 case 1:
668 microOps[uopIdx++] = new MicroPackNeon8to2Uop<uint16_t>(
669 machInst, ufp0, vd * 2, inc * 2, lane);
670 break;
671 case 2:
672 microOps[uopIdx++] = new MicroPackNeon8to4Uop<uint32_t>(
673 machInst, ufp0, vd * 2, inc * 2, lane);
674 break;
675 default:
676 // Bad size
677 microOps[uopIdx++] = new Unknown(machInst);
678 break;
679 }
680 break;
681 case 3:
682 assert(regs == 3);
683 switch (size) {
684 case 0:
685 microOps[uopIdx++] = new MicroPackNeon6to2Uop<uint8_t>(
686 machInst, ufp0, vd * 2, inc * 2, lane);
687 break;
688 case 1:
689 microOps[uopIdx++] = new MicroPackNeon6to2Uop<uint16_t>(
690 machInst, ufp0, vd * 2, inc * 2, lane);
691 break;
692 case 2:
693 microOps[uopIdx++] = new MicroPackNeon6to4Uop<uint32_t>(
694 machInst, ufp0, vd * 2, inc * 2, lane);
695 break;
696 default:
697 // Bad size
698 microOps[uopIdx++] = new Unknown(machInst);
699 break;
700 }
701 break;
702 case 2:
703 assert(regs == 2);
704 assert(storeRegs <= 2);
705 switch (size) {
706 case 0:
707 microOps[uopIdx++] = new MicroPackNeon4to2Uop<uint8_t>(
708 machInst, ufp0, vd * 2, inc * 2, lane);
709 break;
710 case 1:
711 microOps[uopIdx++] = new MicroPackNeon4to2Uop<uint16_t>(
712 machInst, ufp0, vd * 2, inc * 2, lane);
713 break;
714 case 2:
715 microOps[uopIdx++] = new MicroPackNeon4to2Uop<uint32_t>(
716 machInst, ufp0, vd * 2, inc * 2, lane);
717 break;
718 default:
719 // Bad size
720 microOps[uopIdx++] = new Unknown(machInst);
721 break;
722 }
723 break;
724 case 1:
725 assert(regs == 1 || (all && regs == 2));
726 assert(storeRegs <= 2);
727 for (unsigned offset = 0; offset < regs; offset++) {
728 switch (size) {
729 case 0:
730 microOps[uopIdx++] = new MicroPackNeon2to2Uop<uint8_t>(
731 machInst, ufp0, (vd + offset) * 2, inc * 2, lane);
732 break;
733 case 1:
734 microOps[uopIdx++] = new MicroPackNeon2to2Uop<uint16_t>(
735 machInst, ufp0, (vd + offset) * 2, inc * 2, lane);
736 break;
737 case 2:
738 microOps[uopIdx++] = new MicroPackNeon2to2Uop<uint32_t>(
739 machInst, ufp0, (vd + offset) * 2, inc * 2, lane);
740 break;
741 default:
742 // Bad size
743 microOps[uopIdx++] = new Unknown(machInst);
744 break;
745 }
746 }
747 break;
748 default:
749 // Bad number of elements to unpack
750 microOps[uopIdx++] = new Unknown(machInst);
751 }
752 switch (storeSize) {
753 case 1:
754 microOps[uopIdx++] = new MicroStrNeon1Uop<uint8_t>(
755 machInst, ufp0, rn, 0, align);
756 break;
757 case 2:
758 if (eBytes == 2) {
759 microOps[uopIdx++] = new MicroStrNeon2Uop<uint16_t>(
760 machInst, ufp0, rn, 0, align);
761 } else {
762 microOps[uopIdx++] = new MicroStrNeon2Uop<uint8_t>(
763 machInst, ufp0, rn, 0, align);
764 }
765 break;
766 case 3:
767 microOps[uopIdx++] = new MicroStrNeon3Uop<uint8_t>(
768 machInst, ufp0, rn, 0, align);
769 break;
770 case 4:
771 switch (eBytes) {
772 case 1:
773 microOps[uopIdx++] = new MicroStrNeon4Uop<uint8_t>(
774 machInst, ufp0, rn, 0, align);
775 break;
776 case 2:
777 microOps[uopIdx++] = new MicroStrNeon4Uop<uint16_t>(
778 machInst, ufp0, rn, 0, align);
779 break;
780 case 4:
781 microOps[uopIdx++] = new MicroStrNeon4Uop<uint32_t>(
782 machInst, ufp0, rn, 0, align);
783 break;
784 }
785 break;
786 case 6:
787 microOps[uopIdx++] = new MicroStrNeon6Uop<uint16_t>(
788 machInst, ufp0, rn, 0, align);
789 break;
790 case 8:
791 switch (eBytes) {
792 case 2:
793 microOps[uopIdx++] = new MicroStrNeon8Uop<uint16_t>(
794 machInst, ufp0, rn, 0, align);
795 break;
796 case 4:
797 microOps[uopIdx++] = new MicroStrNeon8Uop<uint32_t>(
798 machInst, ufp0, rn, 0, align);
799 break;
800 }
801 break;
802 case 12:
803 microOps[uopIdx++] = new MicroStrNeon12Uop<uint32_t>(
804 machInst, ufp0, rn, 0, align);
805 break;
806 case 16:
807 microOps[uopIdx++] = new MicroStrNeon16Uop<uint32_t>(
808 machInst, ufp0, rn, 0, align);
809 break;
810 default:
811 // Bad store size
812 microOps[uopIdx++] = new Unknown(machInst);
813 }
814 if (wb) {
815 if (rm != 15 && rm != 13) {
816 microOps[uopIdx++] =
817 new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
818 } else {
819 microOps[uopIdx++] =
820 new MicroAddiUop(machInst, rn, rn, storeSize);
821 }
822 }
823 assert(uopIdx == numMicroops);
824
825 for (unsigned i = 0; i < numMicroops - 1; i++) {
826 MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
827 assert(uopPtr);
828 uopPtr->setDelayedCommit();
829 }
830 microOps[numMicroops - 1]->setLastMicroop();
831}
832
833MacroVFPMemOp::MacroVFPMemOp(const char *mnem, ExtMachInst machInst,
834 OpClass __opClass, IntRegIndex rn,
835 RegIndex vd, bool single, bool up,
836 bool writeback, bool load, uint32_t offset) :
837 PredMacroOp(mnem, machInst, __opClass)
838{
839 int i = 0;
840
841 // The lowest order bit selects fldmx (set) or fldmd (clear). These seem
842 // to be functionally identical except that fldmx is deprecated. For now
843 // we'll assume they're otherwise interchangable.
844 int count = (single ? offset : (offset / 2));
845 if (count == 0 || count > NumFloatArchRegs)
846 warn_once("Bad offset field for VFP load/store multiple.\n");
847 if (count == 0) {
848 // Force there to be at least one microop so the macroop makes sense.
849 writeback = true;
850 }
851 if (count > NumFloatArchRegs)
852 count = NumFloatArchRegs;
853
854 numMicroops = count * (single ? 1 : 2) + (writeback ? 1 : 0);
855 microOps = new StaticInstPtr[numMicroops];
856
857 int64_t addr = 0;
858
859 if (!up)
860 addr = 4 * offset;
861
862 bool tempUp = up;
863 for (int j = 0; j < count; j++) {
864 if (load) {
865 if (single) {
866 microOps[i++] = new MicroLdrFpUop(machInst, vd++, rn,
867 tempUp, addr);
868 } else {
869 microOps[i++] = new MicroLdrDBFpUop(machInst, vd++, rn,
870 tempUp, addr);
871 microOps[i++] = new MicroLdrDTFpUop(machInst, vd++, rn, tempUp,
872 addr + (up ? 4 : -4));
873 }
874 } else {
875 if (single) {
876 microOps[i++] = new MicroStrFpUop(machInst, vd++, rn,
877 tempUp, addr);
878 } else {
879 microOps[i++] = new MicroStrDBFpUop(machInst, vd++, rn,
880 tempUp, addr);
881 microOps[i++] = new MicroStrDTFpUop(machInst, vd++, rn, tempUp,
882 addr + (up ? 4 : -4));
883 }
884 }
885 if (!tempUp) {
886 addr -= (single ? 4 : 8);
887 // The microops don't handle negative displacement, so turn if we
888 // hit zero, flip polarity and start adding.
889 if (addr <= 0) {
890 tempUp = true;
891 addr = -addr;
892 }
893 } else {
894 addr += (single ? 4 : 8);
895 }
896 }
897
898 if (writeback) {
899 if (up) {
900 microOps[i++] =
901 new MicroAddiUop(machInst, rn, rn, 4 * offset);
902 } else {
903 microOps[i++] =
904 new MicroSubiUop(machInst, rn, rn, 4 * offset);
905 }
906 }
907
908 assert(numMicroops == i);
909 microOps[numMicroops - 1]->setLastMicroop();
910
911 for (StaticInstPtr *curUop = microOps;
912 !(*curUop)->isLastMicroop(); curUop++) {
913 MicroOp * uopPtr = dynamic_cast<MicroOp *>(curUop->get());
914 assert(uopPtr);
915 uopPtr->setDelayedCommit();
916 }
917}
918
919std::string
920MicroIntImmOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
921{
922 std::stringstream ss;
923 printMnemonic(ss);
924 printReg(ss, ura);
925 ss << ", ";
926 printReg(ss, urb);
927 ss << ", ";
928 ccprintf(ss, "#%d", imm);
929 return ss.str();
930}
931
932std::string
933MicroSetPCCPSR::generateDisassembly(Addr pc, const SymbolTable *symtab) const
934{
935 std::stringstream ss;
936 printMnemonic(ss);
937 ss << "[PC,CPSR]";
938 return ss.str();
939}
940
941std::string
942MicroIntMov::generateDisassembly(Addr pc, const SymbolTable *symtab) const
943{
944 std::stringstream ss;
945 printMnemonic(ss);
946 printReg(ss, ura);
947 ss << ", ";
948 printReg(ss, urb);
949 return ss.str();
950}
951
952std::string
953MicroIntOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
954{
955 std::stringstream ss;
956 printMnemonic(ss);
957 printReg(ss, ura);
958 ss << ", ";
959 printReg(ss, urb);
960 ss << ", ";
961 printReg(ss, urc);
962 return ss.str();
963}
964
965std::string
966MicroMemOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
967{
968 std::stringstream ss;
969 printMnemonic(ss);
970 printReg(ss, ura);
971 ss << ", [";
972 printReg(ss, urb);
973 ss << ", ";
974 ccprintf(ss, "#%d", imm);
975 ss << "]";
976 return ss.str();
977}
978
979}
117 } else {
118 *++uop = new MicroLdrUop(machInst, regIdx,
119 INTREG_UREG0, up, addr);
120 if (reg == INTREG_PC) {
121 (*uop)->setFlag(StaticInst::IsControl);
122 if (!(condCode == COND_AL || condCode == COND_UC))
123 (*uop)->setFlag(StaticInst::IsCondControl);
124 else
125 (*uop)->setFlag(StaticInst::IsUncondControl);
126 (*uop)->setFlag(StaticInst::IsIndirectControl);
127 }
128 }
129 }
130 } else {
131 *++uop = new MicroStrUop(machInst, regIdx, INTREG_UREG0, up, addr);
132 }
133
134 if (up)
135 addr += 4;
136 else
137 addr -= 4;
138 }
139
140 if (writeback && ones) {
141 // put the register update after we're done all loading
142 if (up)
143 *++uop = new MicroAddiUop(machInst, rn, rn, ones * 4);
144 else
145 *++uop = new MicroSubiUop(machInst, rn, rn, ones * 4);
146
147 // If this was a load move the last temporary value into place
148 // this way we can't take an exception after we update the base
149 // register.
150 if (load && reg == INTREG_PC && exception_ret) {
151 *++uop = new MicroUopRegMovRet(machInst, 0, INTREG_UREG1);
152 if (!(condCode == COND_AL || condCode == COND_UC))
153 (*uop)->setFlag(StaticInst::IsCondControl);
154 else
155 (*uop)->setFlag(StaticInst::IsUncondControl);
156 } else if (load) {
157 *++uop = new MicroUopRegMov(machInst, regIdx, INTREG_UREG1);
158 if (reg == INTREG_PC) {
159 (*uop)->setFlag(StaticInst::IsControl);
160 (*uop)->setFlag(StaticInst::IsCondControl);
161 (*uop)->setFlag(StaticInst::IsIndirectControl);
162 // This is created as a RAS POP
163 if (rn == INTREG_SP)
164 (*uop)->setFlag(StaticInst::IsReturn);
165
166 }
167 }
168 }
169
170 (*uop)->setLastMicroop();
171
172 for (StaticInstPtr *curUop = microOps;
173 !(*curUop)->isLastMicroop(); curUop++) {
174 MicroOp * uopPtr = dynamic_cast<MicroOp *>(curUop->get());
175 assert(uopPtr);
176 uopPtr->setDelayedCommit();
177 }
178}
179
180VldMultOp::VldMultOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
181 unsigned elems, RegIndex rn, RegIndex vd, unsigned regs,
182 unsigned inc, uint32_t size, uint32_t align, RegIndex rm) :
183 PredMacroOp(mnem, machInst, __opClass)
184{
185 assert(regs > 0 && regs <= 4);
186 assert(regs % elems == 0);
187
188 numMicroops = (regs > 2) ? 2 : 1;
189 bool wb = (rm != 15);
190 bool deinterleave = (elems > 1);
191
192 if (wb) numMicroops++;
193 if (deinterleave) numMicroops += (regs / elems);
194 microOps = new StaticInstPtr[numMicroops];
195
196 RegIndex rMid = deinterleave ? NumFloatArchRegs : vd * 2;
197
198 uint32_t noAlign = TLB::MustBeOne;
199
200 unsigned uopIdx = 0;
201 switch (regs) {
202 case 4:
203 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
204 size, machInst, rMid, rn, 0, align);
205 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
206 size, machInst, rMid + 4, rn, 16, noAlign);
207 break;
208 case 3:
209 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
210 size, machInst, rMid, rn, 0, align);
211 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon8Uop>(
212 size, machInst, rMid + 4, rn, 16, noAlign);
213 break;
214 case 2:
215 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
216 size, machInst, rMid, rn, 0, align);
217 break;
218 case 1:
219 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon8Uop>(
220 size, machInst, rMid, rn, 0, align);
221 break;
222 default:
223 // Unknown number of registers
224 microOps[uopIdx++] = new Unknown(machInst);
225 }
226 if (wb) {
227 if (rm != 15 && rm != 13) {
228 microOps[uopIdx++] =
229 new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
230 } else {
231 microOps[uopIdx++] =
232 new MicroAddiUop(machInst, rn, rn, regs * 8);
233 }
234 }
235 if (deinterleave) {
236 switch (elems) {
237 case 4:
238 assert(regs == 4);
239 microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon8Uop>(
240 size, machInst, vd * 2, rMid, inc * 2);
241 break;
242 case 3:
243 assert(regs == 3);
244 microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon6Uop>(
245 size, machInst, vd * 2, rMid, inc * 2);
246 break;
247 case 2:
248 assert(regs == 4 || regs == 2);
249 if (regs == 4) {
250 microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon4Uop>(
251 size, machInst, vd * 2, rMid, inc * 2);
252 microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon4Uop>(
253 size, machInst, vd * 2 + 2, rMid + 4, inc * 2);
254 } else {
255 microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon4Uop>(
256 size, machInst, vd * 2, rMid, inc * 2);
257 }
258 break;
259 default:
260 // Bad number of elements to deinterleave
261 microOps[uopIdx++] = new Unknown(machInst);
262 }
263 }
264 assert(uopIdx == numMicroops);
265
266 for (unsigned i = 0; i < numMicroops - 1; i++) {
267 MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
268 assert(uopPtr);
269 uopPtr->setDelayedCommit();
270 }
271 microOps[numMicroops - 1]->setLastMicroop();
272}
273
274VldSingleOp::VldSingleOp(const char *mnem, ExtMachInst machInst,
275 OpClass __opClass, bool all, unsigned elems,
276 RegIndex rn, RegIndex vd, unsigned regs,
277 unsigned inc, uint32_t size, uint32_t align,
278 RegIndex rm, unsigned lane) :
279 PredMacroOp(mnem, machInst, __opClass)
280{
281 assert(regs > 0 && regs <= 4);
282 assert(regs % elems == 0);
283
284 unsigned eBytes = (1 << size);
285 unsigned loadSize = eBytes * elems;
286 unsigned loadRegs M5_VAR_USED = (loadSize + sizeof(FloatRegBits) - 1) /
287 sizeof(FloatRegBits);
288
289 assert(loadRegs > 0 && loadRegs <= 4);
290
291 numMicroops = 1;
292 bool wb = (rm != 15);
293
294 if (wb) numMicroops++;
295 numMicroops += (regs / elems);
296 microOps = new StaticInstPtr[numMicroops];
297
298 RegIndex ufp0 = NumFloatArchRegs;
299
300 unsigned uopIdx = 0;
301 switch (loadSize) {
302 case 1:
303 microOps[uopIdx++] = new MicroLdrNeon1Uop<uint8_t>(
304 machInst, ufp0, rn, 0, align);
305 break;
306 case 2:
307 if (eBytes == 2) {
308 microOps[uopIdx++] = new MicroLdrNeon2Uop<uint16_t>(
309 machInst, ufp0, rn, 0, align);
310 } else {
311 microOps[uopIdx++] = new MicroLdrNeon2Uop<uint8_t>(
312 machInst, ufp0, rn, 0, align);
313 }
314 break;
315 case 3:
316 microOps[uopIdx++] = new MicroLdrNeon3Uop<uint8_t>(
317 machInst, ufp0, rn, 0, align);
318 break;
319 case 4:
320 switch (eBytes) {
321 case 1:
322 microOps[uopIdx++] = new MicroLdrNeon4Uop<uint8_t>(
323 machInst, ufp0, rn, 0, align);
324 break;
325 case 2:
326 microOps[uopIdx++] = new MicroLdrNeon4Uop<uint16_t>(
327 machInst, ufp0, rn, 0, align);
328 break;
329 case 4:
330 microOps[uopIdx++] = new MicroLdrNeon4Uop<uint32_t>(
331 machInst, ufp0, rn, 0, align);
332 break;
333 }
334 break;
335 case 6:
336 microOps[uopIdx++] = new MicroLdrNeon6Uop<uint16_t>(
337 machInst, ufp0, rn, 0, align);
338 break;
339 case 8:
340 switch (eBytes) {
341 case 2:
342 microOps[uopIdx++] = new MicroLdrNeon8Uop<uint16_t>(
343 machInst, ufp0, rn, 0, align);
344 break;
345 case 4:
346 microOps[uopIdx++] = new MicroLdrNeon8Uop<uint32_t>(
347 machInst, ufp0, rn, 0, align);
348 break;
349 }
350 break;
351 case 12:
352 microOps[uopIdx++] = new MicroLdrNeon12Uop<uint32_t>(
353 machInst, ufp0, rn, 0, align);
354 break;
355 case 16:
356 microOps[uopIdx++] = new MicroLdrNeon16Uop<uint32_t>(
357 machInst, ufp0, rn, 0, align);
358 break;
359 default:
360 // Unrecognized load size
361 microOps[uopIdx++] = new Unknown(machInst);
362 }
363 if (wb) {
364 if (rm != 15 && rm != 13) {
365 microOps[uopIdx++] =
366 new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
367 } else {
368 microOps[uopIdx++] =
369 new MicroAddiUop(machInst, rn, rn, loadSize);
370 }
371 }
372 switch (elems) {
373 case 4:
374 assert(regs == 4);
375 switch (size) {
376 case 0:
377 if (all) {
378 microOps[uopIdx++] = new MicroUnpackAllNeon2to8Uop<uint8_t>(
379 machInst, vd * 2, ufp0, inc * 2);
380 } else {
381 microOps[uopIdx++] = new MicroUnpackNeon2to8Uop<uint8_t>(
382 machInst, vd * 2, ufp0, inc * 2, lane);
383 }
384 break;
385 case 1:
386 if (all) {
387 microOps[uopIdx++] = new MicroUnpackAllNeon2to8Uop<uint16_t>(
388 machInst, vd * 2, ufp0, inc * 2);
389 } else {
390 microOps[uopIdx++] = new MicroUnpackNeon2to8Uop<uint16_t>(
391 machInst, vd * 2, ufp0, inc * 2, lane);
392 }
393 break;
394 case 2:
395 if (all) {
396 microOps[uopIdx++] = new MicroUnpackAllNeon4to8Uop<uint32_t>(
397 machInst, vd * 2, ufp0, inc * 2);
398 } else {
399 microOps[uopIdx++] = new MicroUnpackNeon4to8Uop<uint32_t>(
400 machInst, vd * 2, ufp0, inc * 2, lane);
401 }
402 break;
403 default:
404 // Bad size
405 microOps[uopIdx++] = new Unknown(machInst);
406 break;
407 }
408 break;
409 case 3:
410 assert(regs == 3);
411 switch (size) {
412 case 0:
413 if (all) {
414 microOps[uopIdx++] = new MicroUnpackAllNeon2to6Uop<uint8_t>(
415 machInst, vd * 2, ufp0, inc * 2);
416 } else {
417 microOps[uopIdx++] = new MicroUnpackNeon2to6Uop<uint8_t>(
418 machInst, vd * 2, ufp0, inc * 2, lane);
419 }
420 break;
421 case 1:
422 if (all) {
423 microOps[uopIdx++] = new MicroUnpackAllNeon2to6Uop<uint16_t>(
424 machInst, vd * 2, ufp0, inc * 2);
425 } else {
426 microOps[uopIdx++] = new MicroUnpackNeon2to6Uop<uint16_t>(
427 machInst, vd * 2, ufp0, inc * 2, lane);
428 }
429 break;
430 case 2:
431 if (all) {
432 microOps[uopIdx++] = new MicroUnpackAllNeon4to6Uop<uint32_t>(
433 machInst, vd * 2, ufp0, inc * 2);
434 } else {
435 microOps[uopIdx++] = new MicroUnpackNeon4to6Uop<uint32_t>(
436 machInst, vd * 2, ufp0, inc * 2, lane);
437 }
438 break;
439 default:
440 // Bad size
441 microOps[uopIdx++] = new Unknown(machInst);
442 break;
443 }
444 break;
445 case 2:
446 assert(regs == 2);
447 assert(loadRegs <= 2);
448 switch (size) {
449 case 0:
450 if (all) {
451 microOps[uopIdx++] = new MicroUnpackAllNeon2to4Uop<uint8_t>(
452 machInst, vd * 2, ufp0, inc * 2);
453 } else {
454 microOps[uopIdx++] = new MicroUnpackNeon2to4Uop<uint8_t>(
455 machInst, vd * 2, ufp0, inc * 2, lane);
456 }
457 break;
458 case 1:
459 if (all) {
460 microOps[uopIdx++] = new MicroUnpackAllNeon2to4Uop<uint16_t>(
461 machInst, vd * 2, ufp0, inc * 2);
462 } else {
463 microOps[uopIdx++] = new MicroUnpackNeon2to4Uop<uint16_t>(
464 machInst, vd * 2, ufp0, inc * 2, lane);
465 }
466 break;
467 case 2:
468 if (all) {
469 microOps[uopIdx++] = new MicroUnpackAllNeon2to4Uop<uint32_t>(
470 machInst, vd * 2, ufp0, inc * 2);
471 } else {
472 microOps[uopIdx++] = new MicroUnpackNeon2to4Uop<uint32_t>(
473 machInst, vd * 2, ufp0, inc * 2, lane);
474 }
475 break;
476 default:
477 // Bad size
478 microOps[uopIdx++] = new Unknown(machInst);
479 break;
480 }
481 break;
482 case 1:
483 assert(regs == 1 || (all && regs == 2));
484 assert(loadRegs <= 2);
485 for (unsigned offset = 0; offset < regs; offset++) {
486 switch (size) {
487 case 0:
488 if (all) {
489 microOps[uopIdx++] =
490 new MicroUnpackAllNeon2to2Uop<uint8_t>(
491 machInst, (vd + offset) * 2, ufp0, inc * 2);
492 } else {
493 microOps[uopIdx++] =
494 new MicroUnpackNeon2to2Uop<uint8_t>(
495 machInst, (vd + offset) * 2, ufp0, inc * 2, lane);
496 }
497 break;
498 case 1:
499 if (all) {
500 microOps[uopIdx++] =
501 new MicroUnpackAllNeon2to2Uop<uint16_t>(
502 machInst, (vd + offset) * 2, ufp0, inc * 2);
503 } else {
504 microOps[uopIdx++] =
505 new MicroUnpackNeon2to2Uop<uint16_t>(
506 machInst, (vd + offset) * 2, ufp0, inc * 2, lane);
507 }
508 break;
509 case 2:
510 if (all) {
511 microOps[uopIdx++] =
512 new MicroUnpackAllNeon2to2Uop<uint32_t>(
513 machInst, (vd + offset) * 2, ufp0, inc * 2);
514 } else {
515 microOps[uopIdx++] =
516 new MicroUnpackNeon2to2Uop<uint32_t>(
517 machInst, (vd + offset) * 2, ufp0, inc * 2, lane);
518 }
519 break;
520 default:
521 // Bad size
522 microOps[uopIdx++] = new Unknown(machInst);
523 break;
524 }
525 }
526 break;
527 default:
528 // Bad number of elements to unpack
529 microOps[uopIdx++] = new Unknown(machInst);
530 }
531 assert(uopIdx == numMicroops);
532
533 for (unsigned i = 0; i < numMicroops - 1; i++) {
534 MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
535 assert(uopPtr);
536 uopPtr->setDelayedCommit();
537 }
538 microOps[numMicroops - 1]->setLastMicroop();
539}
540
541VstMultOp::VstMultOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
542 unsigned elems, RegIndex rn, RegIndex vd, unsigned regs,
543 unsigned inc, uint32_t size, uint32_t align, RegIndex rm) :
544 PredMacroOp(mnem, machInst, __opClass)
545{
546 assert(regs > 0 && regs <= 4);
547 assert(regs % elems == 0);
548
549 numMicroops = (regs > 2) ? 2 : 1;
550 bool wb = (rm != 15);
551 bool interleave = (elems > 1);
552
553 if (wb) numMicroops++;
554 if (interleave) numMicroops += (regs / elems);
555 microOps = new StaticInstPtr[numMicroops];
556
557 uint32_t noAlign = TLB::MustBeOne;
558
559 RegIndex rMid = interleave ? NumFloatArchRegs : vd * 2;
560
561 unsigned uopIdx = 0;
562 if (interleave) {
563 switch (elems) {
564 case 4:
565 assert(regs == 4);
566 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon8Uop>(
567 size, machInst, rMid, vd * 2, inc * 2);
568 break;
569 case 3:
570 assert(regs == 3);
571 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon6Uop>(
572 size, machInst, rMid, vd * 2, inc * 2);
573 break;
574 case 2:
575 assert(regs == 4 || regs == 2);
576 if (regs == 4) {
577 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon4Uop>(
578 size, machInst, rMid, vd * 2, inc * 2);
579 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon4Uop>(
580 size, machInst, rMid + 4, vd * 2 + 2, inc * 2);
581 } else {
582 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon4Uop>(
583 size, machInst, rMid, vd * 2, inc * 2);
584 }
585 break;
586 default:
587 // Bad number of elements to interleave
588 microOps[uopIdx++] = new Unknown(machInst);
589 }
590 }
591 switch (regs) {
592 case 4:
593 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
594 size, machInst, rMid, rn, 0, align);
595 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
596 size, machInst, rMid + 4, rn, 16, noAlign);
597 break;
598 case 3:
599 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
600 size, machInst, rMid, rn, 0, align);
601 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon8Uop>(
602 size, machInst, rMid + 4, rn, 16, noAlign);
603 break;
604 case 2:
605 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
606 size, machInst, rMid, rn, 0, align);
607 break;
608 case 1:
609 microOps[uopIdx++] = newNeonMemInst<MicroStrNeon8Uop>(
610 size, machInst, rMid, rn, 0, align);
611 break;
612 default:
613 // Unknown number of registers
614 microOps[uopIdx++] = new Unknown(machInst);
615 }
616 if (wb) {
617 if (rm != 15 && rm != 13) {
618 microOps[uopIdx++] =
619 new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
620 } else {
621 microOps[uopIdx++] =
622 new MicroAddiUop(machInst, rn, rn, regs * 8);
623 }
624 }
625 assert(uopIdx == numMicroops);
626
627 for (unsigned i = 0; i < numMicroops - 1; i++) {
628 MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
629 assert(uopPtr);
630 uopPtr->setDelayedCommit();
631 }
632 microOps[numMicroops - 1]->setLastMicroop();
633}
634
635VstSingleOp::VstSingleOp(const char *mnem, ExtMachInst machInst,
636 OpClass __opClass, bool all, unsigned elems,
637 RegIndex rn, RegIndex vd, unsigned regs,
638 unsigned inc, uint32_t size, uint32_t align,
639 RegIndex rm, unsigned lane) :
640 PredMacroOp(mnem, machInst, __opClass)
641{
642 assert(!all);
643 assert(regs > 0 && regs <= 4);
644 assert(regs % elems == 0);
645
646 unsigned eBytes = (1 << size);
647 unsigned storeSize = eBytes * elems;
648 unsigned storeRegs M5_VAR_USED = (storeSize + sizeof(FloatRegBits) - 1) /
649 sizeof(FloatRegBits);
650
651 assert(storeRegs > 0 && storeRegs <= 4);
652
653 numMicroops = 1;
654 bool wb = (rm != 15);
655
656 if (wb) numMicroops++;
657 numMicroops += (regs / elems);
658 microOps = new StaticInstPtr[numMicroops];
659
660 RegIndex ufp0 = NumFloatArchRegs;
661
662 unsigned uopIdx = 0;
663 switch (elems) {
664 case 4:
665 assert(regs == 4);
666 switch (size) {
667 case 0:
668 microOps[uopIdx++] = new MicroPackNeon8to2Uop<uint8_t>(
669 machInst, ufp0, vd * 2, inc * 2, lane);
670 break;
671 case 1:
672 microOps[uopIdx++] = new MicroPackNeon8to2Uop<uint16_t>(
673 machInst, ufp0, vd * 2, inc * 2, lane);
674 break;
675 case 2:
676 microOps[uopIdx++] = new MicroPackNeon8to4Uop<uint32_t>(
677 machInst, ufp0, vd * 2, inc * 2, lane);
678 break;
679 default:
680 // Bad size
681 microOps[uopIdx++] = new Unknown(machInst);
682 break;
683 }
684 break;
685 case 3:
686 assert(regs == 3);
687 switch (size) {
688 case 0:
689 microOps[uopIdx++] = new MicroPackNeon6to2Uop<uint8_t>(
690 machInst, ufp0, vd * 2, inc * 2, lane);
691 break;
692 case 1:
693 microOps[uopIdx++] = new MicroPackNeon6to2Uop<uint16_t>(
694 machInst, ufp0, vd * 2, inc * 2, lane);
695 break;
696 case 2:
697 microOps[uopIdx++] = new MicroPackNeon6to4Uop<uint32_t>(
698 machInst, ufp0, vd * 2, inc * 2, lane);
699 break;
700 default:
701 // Bad size
702 microOps[uopIdx++] = new Unknown(machInst);
703 break;
704 }
705 break;
706 case 2:
707 assert(regs == 2);
708 assert(storeRegs <= 2);
709 switch (size) {
710 case 0:
711 microOps[uopIdx++] = new MicroPackNeon4to2Uop<uint8_t>(
712 machInst, ufp0, vd * 2, inc * 2, lane);
713 break;
714 case 1:
715 microOps[uopIdx++] = new MicroPackNeon4to2Uop<uint16_t>(
716 machInst, ufp0, vd * 2, inc * 2, lane);
717 break;
718 case 2:
719 microOps[uopIdx++] = new MicroPackNeon4to2Uop<uint32_t>(
720 machInst, ufp0, vd * 2, inc * 2, lane);
721 break;
722 default:
723 // Bad size
724 microOps[uopIdx++] = new Unknown(machInst);
725 break;
726 }
727 break;
728 case 1:
729 assert(regs == 1 || (all && regs == 2));
730 assert(storeRegs <= 2);
731 for (unsigned offset = 0; offset < regs; offset++) {
732 switch (size) {
733 case 0:
734 microOps[uopIdx++] = new MicroPackNeon2to2Uop<uint8_t>(
735 machInst, ufp0, (vd + offset) * 2, inc * 2, lane);
736 break;
737 case 1:
738 microOps[uopIdx++] = new MicroPackNeon2to2Uop<uint16_t>(
739 machInst, ufp0, (vd + offset) * 2, inc * 2, lane);
740 break;
741 case 2:
742 microOps[uopIdx++] = new MicroPackNeon2to2Uop<uint32_t>(
743 machInst, ufp0, (vd + offset) * 2, inc * 2, lane);
744 break;
745 default:
746 // Bad size
747 microOps[uopIdx++] = new Unknown(machInst);
748 break;
749 }
750 }
751 break;
752 default:
753 // Bad number of elements to unpack
754 microOps[uopIdx++] = new Unknown(machInst);
755 }
756 switch (storeSize) {
757 case 1:
758 microOps[uopIdx++] = new MicroStrNeon1Uop<uint8_t>(
759 machInst, ufp0, rn, 0, align);
760 break;
761 case 2:
762 if (eBytes == 2) {
763 microOps[uopIdx++] = new MicroStrNeon2Uop<uint16_t>(
764 machInst, ufp0, rn, 0, align);
765 } else {
766 microOps[uopIdx++] = new MicroStrNeon2Uop<uint8_t>(
767 machInst, ufp0, rn, 0, align);
768 }
769 break;
770 case 3:
771 microOps[uopIdx++] = new MicroStrNeon3Uop<uint8_t>(
772 machInst, ufp0, rn, 0, align);
773 break;
774 case 4:
775 switch (eBytes) {
776 case 1:
777 microOps[uopIdx++] = new MicroStrNeon4Uop<uint8_t>(
778 machInst, ufp0, rn, 0, align);
779 break;
780 case 2:
781 microOps[uopIdx++] = new MicroStrNeon4Uop<uint16_t>(
782 machInst, ufp0, rn, 0, align);
783 break;
784 case 4:
785 microOps[uopIdx++] = new MicroStrNeon4Uop<uint32_t>(
786 machInst, ufp0, rn, 0, align);
787 break;
788 }
789 break;
790 case 6:
791 microOps[uopIdx++] = new MicroStrNeon6Uop<uint16_t>(
792 machInst, ufp0, rn, 0, align);
793 break;
794 case 8:
795 switch (eBytes) {
796 case 2:
797 microOps[uopIdx++] = new MicroStrNeon8Uop<uint16_t>(
798 machInst, ufp0, rn, 0, align);
799 break;
800 case 4:
801 microOps[uopIdx++] = new MicroStrNeon8Uop<uint32_t>(
802 machInst, ufp0, rn, 0, align);
803 break;
804 }
805 break;
806 case 12:
807 microOps[uopIdx++] = new MicroStrNeon12Uop<uint32_t>(
808 machInst, ufp0, rn, 0, align);
809 break;
810 case 16:
811 microOps[uopIdx++] = new MicroStrNeon16Uop<uint32_t>(
812 machInst, ufp0, rn, 0, align);
813 break;
814 default:
815 // Bad store size
816 microOps[uopIdx++] = new Unknown(machInst);
817 }
818 if (wb) {
819 if (rm != 15 && rm != 13) {
820 microOps[uopIdx++] =
821 new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
822 } else {
823 microOps[uopIdx++] =
824 new MicroAddiUop(machInst, rn, rn, storeSize);
825 }
826 }
827 assert(uopIdx == numMicroops);
828
829 for (unsigned i = 0; i < numMicroops - 1; i++) {
830 MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
831 assert(uopPtr);
832 uopPtr->setDelayedCommit();
833 }
834 microOps[numMicroops - 1]->setLastMicroop();
835}
836
837MacroVFPMemOp::MacroVFPMemOp(const char *mnem, ExtMachInst machInst,
838 OpClass __opClass, IntRegIndex rn,
839 RegIndex vd, bool single, bool up,
840 bool writeback, bool load, uint32_t offset) :
841 PredMacroOp(mnem, machInst, __opClass)
842{
843 int i = 0;
844
845 // The lowest order bit selects fldmx (set) or fldmd (clear). These seem
846 // to be functionally identical except that fldmx is deprecated. For now
847 // we'll assume they're otherwise interchangable.
848 int count = (single ? offset : (offset / 2));
849 if (count == 0 || count > NumFloatArchRegs)
850 warn_once("Bad offset field for VFP load/store multiple.\n");
851 if (count == 0) {
852 // Force there to be at least one microop so the macroop makes sense.
853 writeback = true;
854 }
855 if (count > NumFloatArchRegs)
856 count = NumFloatArchRegs;
857
858 numMicroops = count * (single ? 1 : 2) + (writeback ? 1 : 0);
859 microOps = new StaticInstPtr[numMicroops];
860
861 int64_t addr = 0;
862
863 if (!up)
864 addr = 4 * offset;
865
866 bool tempUp = up;
867 for (int j = 0; j < count; j++) {
868 if (load) {
869 if (single) {
870 microOps[i++] = new MicroLdrFpUop(machInst, vd++, rn,
871 tempUp, addr);
872 } else {
873 microOps[i++] = new MicroLdrDBFpUop(machInst, vd++, rn,
874 tempUp, addr);
875 microOps[i++] = new MicroLdrDTFpUop(machInst, vd++, rn, tempUp,
876 addr + (up ? 4 : -4));
877 }
878 } else {
879 if (single) {
880 microOps[i++] = new MicroStrFpUop(machInst, vd++, rn,
881 tempUp, addr);
882 } else {
883 microOps[i++] = new MicroStrDBFpUop(machInst, vd++, rn,
884 tempUp, addr);
885 microOps[i++] = new MicroStrDTFpUop(machInst, vd++, rn, tempUp,
886 addr + (up ? 4 : -4));
887 }
888 }
889 if (!tempUp) {
890 addr -= (single ? 4 : 8);
891 // The microops don't handle negative displacement, so turn if we
892 // hit zero, flip polarity and start adding.
893 if (addr <= 0) {
894 tempUp = true;
895 addr = -addr;
896 }
897 } else {
898 addr += (single ? 4 : 8);
899 }
900 }
901
902 if (writeback) {
903 if (up) {
904 microOps[i++] =
905 new MicroAddiUop(machInst, rn, rn, 4 * offset);
906 } else {
907 microOps[i++] =
908 new MicroSubiUop(machInst, rn, rn, 4 * offset);
909 }
910 }
911
912 assert(numMicroops == i);
913 microOps[numMicroops - 1]->setLastMicroop();
914
915 for (StaticInstPtr *curUop = microOps;
916 !(*curUop)->isLastMicroop(); curUop++) {
917 MicroOp * uopPtr = dynamic_cast<MicroOp *>(curUop->get());
918 assert(uopPtr);
919 uopPtr->setDelayedCommit();
920 }
921}
922
923std::string
924MicroIntImmOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
925{
926 std::stringstream ss;
927 printMnemonic(ss);
928 printReg(ss, ura);
929 ss << ", ";
930 printReg(ss, urb);
931 ss << ", ";
932 ccprintf(ss, "#%d", imm);
933 return ss.str();
934}
935
936std::string
937MicroSetPCCPSR::generateDisassembly(Addr pc, const SymbolTable *symtab) const
938{
939 std::stringstream ss;
940 printMnemonic(ss);
941 ss << "[PC,CPSR]";
942 return ss.str();
943}
944
945std::string
946MicroIntMov::generateDisassembly(Addr pc, const SymbolTable *symtab) const
947{
948 std::stringstream ss;
949 printMnemonic(ss);
950 printReg(ss, ura);
951 ss << ", ";
952 printReg(ss, urb);
953 return ss.str();
954}
955
956std::string
957MicroIntOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
958{
959 std::stringstream ss;
960 printMnemonic(ss);
961 printReg(ss, ura);
962 ss << ", ";
963 printReg(ss, urb);
964 ss << ", ";
965 printReg(ss, urc);
966 return ss.str();
967}
968
969std::string
970MicroMemOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const
971{
972 std::stringstream ss;
973 printMnemonic(ss);
974 printReg(ss, ura);
975 ss << ", [";
976 printReg(ss, urb);
977 ss << ", ";
978 ccprintf(ss, "#%d", imm);
979 ss << "]";
980 return ss.str();
981}
982
983}