macromem.cc (9640:35198406dd72) | macromem.cc (10037:5cac77888310) |
---|---|
1/* | 1/* |
2 * Copyright (c) 2010 ARM Limited | 2 * Copyright (c) 2010-2013 ARM Limited |
3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated --- 27 unchanged lines hidden (view full) --- 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Stephen Hines 41 */ 42 43#include <sstream> 44 45#include "arch/arm/insts/macromem.hh" | 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated --- 27 unchanged lines hidden (view full) --- 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Stephen Hines 41 */ 42 43#include <sstream> 44 45#include "arch/arm/insts/macromem.hh" |
46 |
|
46#include "arch/arm/generated/decoder.hh" | 47#include "arch/arm/generated/decoder.hh" |
48#include "arch/arm/insts/neon64_mem.hh" |
|
47 48using namespace std; 49using namespace ArmISAInst; 50 51namespace ArmISA 52{ 53 54MacroMemOp::MacroMemOp(const char *mnem, ExtMachInst machInst, --- 117 unchanged lines hidden (view full) --- 172 for (StaticInstPtr *curUop = microOps; 173 !(*curUop)->isLastMicroop(); curUop++) { 174 MicroOp * uopPtr = dynamic_cast<MicroOp *>(curUop->get()); 175 assert(uopPtr); 176 uopPtr->setDelayedCommit(); 177 } 178} 179 | 49 50using namespace std; 51using namespace ArmISAInst; 52 53namespace ArmISA 54{ 55 56MacroMemOp::MacroMemOp(const char *mnem, ExtMachInst machInst, --- 117 unchanged lines hidden (view full) --- 174 for (StaticInstPtr *curUop = microOps; 175 !(*curUop)->isLastMicroop(); curUop++) { 176 MicroOp * uopPtr = dynamic_cast<MicroOp *>(curUop->get()); 177 assert(uopPtr); 178 uopPtr->setDelayedCommit(); 179 } 180} 181 |
182PairMemOp::PairMemOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, 183 uint32_t size, bool fp, bool load, bool noAlloc, 184 bool signExt, bool exclusive, bool acrel, 185 int64_t imm, AddrMode mode, 186 IntRegIndex rn, IntRegIndex rt, IntRegIndex rt2) : 187 PredMacroOp(mnem, machInst, __opClass) 188{ 189 bool writeback = (mode != AddrMd_Offset); 190 numMicroops = 1 + (size / 4) + (writeback ? 1 : 0); 191 microOps = new StaticInstPtr[numMicroops]; 192 193 StaticInstPtr *uop = microOps; 194 195 bool post = (mode == AddrMd_PostIndex); 196 197 rn = makeSP(rn); 198 199 *uop = new MicroAddXiSpAlignUop(machInst, INTREG_UREG0, rn, post ? 0 : imm); 200 201 if (fp) { 202 if (size == 16) { 203 if (load) { 204 *++uop = new MicroLdrQBFpXImmUop(machInst, rt, 205 INTREG_UREG0, 0, noAlloc, exclusive, acrel); 206 *++uop = new MicroLdrQTFpXImmUop(machInst, rt, 207 INTREG_UREG0, 0, noAlloc, exclusive, acrel); 208 *++uop = new MicroLdrQBFpXImmUop(machInst, rt2, 209 INTREG_UREG0, 16, noAlloc, exclusive, acrel); 210 *++uop = new MicroLdrQTFpXImmUop(machInst, rt2, 211 INTREG_UREG0, 16, noAlloc, exclusive, acrel); 212 } else { 213 *++uop = new MicroStrQBFpXImmUop(machInst, rt, 214 INTREG_UREG0, 0, noAlloc, exclusive, acrel); 215 *++uop = new MicroStrQTFpXImmUop(machInst, rt, 216 INTREG_UREG0, 0, noAlloc, exclusive, acrel); 217 *++uop = new MicroStrQBFpXImmUop(machInst, rt2, 218 INTREG_UREG0, 16, noAlloc, exclusive, acrel); 219 *++uop = new MicroStrQTFpXImmUop(machInst, rt2, 220 INTREG_UREG0, 16, noAlloc, exclusive, acrel); 221 } 222 } else if (size == 8) { 223 if (load) { 224 *++uop = new MicroLdrFpXImmUop(machInst, rt, 225 INTREG_UREG0, 0, noAlloc, exclusive, acrel); 226 *++uop = new MicroLdrFpXImmUop(machInst, rt2, 227 INTREG_UREG0, 8, noAlloc, exclusive, acrel); 228 } else { 229 *++uop = new MicroStrFpXImmUop(machInst, rt, 230 INTREG_UREG0, 0, noAlloc, exclusive, acrel); 231 *++uop = new MicroStrFpXImmUop(machInst, rt2, 232 INTREG_UREG0, 8, noAlloc, exclusive, acrel); 233 } 234 } else if (size == 4) { 235 if (load) { 236 *++uop = new MicroLdrDFpXImmUop(machInst, rt, rt2, 237 INTREG_UREG0, 0, noAlloc, exclusive, acrel); 238 } else { 239 *++uop = new MicroStrDFpXImmUop(machInst, rt, rt2, 240 INTREG_UREG0, 0, noAlloc, exclusive, acrel); 241 } 242 } 243 } else { 244 if (size == 8) { 245 if (load) { 246 *++uop = new MicroLdrXImmUop(machInst, rt, INTREG_UREG0, 247 0, noAlloc, exclusive, acrel); 248 *++uop = new MicroLdrXImmUop(machInst, rt2, INTREG_UREG0, 249 size, noAlloc, exclusive, acrel); 250 } else { 251 *++uop = new MicroStrXImmUop(machInst, rt, INTREG_UREG0, 252 0, noAlloc, exclusive, acrel); 253 *++uop = new MicroStrXImmUop(machInst, rt2, INTREG_UREG0, 254 size, noAlloc, exclusive, acrel); 255 } 256 } else if (size == 4) { 257 if (load) { 258 if (signExt) { 259 *++uop = new MicroLdrDSXImmUop(machInst, rt, rt2, 260 INTREG_UREG0, 0, noAlloc, exclusive, acrel); 261 } else { 262 *++uop = new MicroLdrDUXImmUop(machInst, rt, rt2, 263 INTREG_UREG0, 0, noAlloc, exclusive, acrel); 264 } 265 } else { 266 *++uop = new MicroStrDXImmUop(machInst, rt, rt2, 267 INTREG_UREG0, 0, noAlloc, exclusive, acrel); 268 } 269 } 270 } 271 272 if (writeback) { 273 *++uop = new MicroAddXiUop(machInst, rn, INTREG_UREG0, 274 post ? imm : 0); 275 } 276 277 (*uop)->setLastMicroop(); 278 279 for (StaticInstPtr *curUop = microOps; 280 !(*curUop)->isLastMicroop(); curUop++) { 281 (*curUop)->setDelayedCommit(); 282 } 283} 284 285BigFpMemImmOp::BigFpMemImmOp(const char *mnem, ExtMachInst machInst, 286 OpClass __opClass, bool load, IntRegIndex dest, 287 IntRegIndex base, int64_t imm) : 288 PredMacroOp(mnem, machInst, __opClass) 289{ 290 numMicroops = 2; 291 microOps = new StaticInstPtr[numMicroops]; 292 293 if (load) { 294 microOps[0] = new MicroLdrQBFpXImmUop(machInst, dest, base, imm); 295 microOps[1] = new MicroLdrQTFpXImmUop(machInst, dest, base, imm); 296 } else { 297 microOps[0] = new MicroStrQBFpXImmUop(machInst, dest, base, imm); 298 microOps[1] = new MicroStrQTFpXImmUop(machInst, dest, base, imm); 299 } 300 microOps[0]->setDelayedCommit(); 301 microOps[1]->setLastMicroop(); 302} 303 304BigFpMemPostOp::BigFpMemPostOp(const char *mnem, ExtMachInst machInst, 305 OpClass __opClass, bool load, IntRegIndex dest, 306 IntRegIndex base, int64_t imm) : 307 PredMacroOp(mnem, machInst, __opClass) 308{ 309 numMicroops = 3; 310 microOps = new StaticInstPtr[numMicroops]; 311 312 if (load) { 313 microOps[0] = new MicroLdrQBFpXImmUop(machInst, dest, base, 0); 314 microOps[1] = new MicroLdrQTFpXImmUop(machInst, dest, base, 0); 315 } else { 316 microOps[0] = new MicroStrQBFpXImmUop(machInst, dest, base, 0); 317 microOps[1] = new MicroStrQTFpXImmUop(machInst, dest, base, 0); 318 } 319 microOps[2] = new MicroAddXiUop(machInst, base, base, imm); 320 321 microOps[0]->setDelayedCommit(); 322 microOps[1]->setDelayedCommit(); 323 microOps[2]->setLastMicroop(); 324} 325 326BigFpMemPreOp::BigFpMemPreOp(const char *mnem, ExtMachInst machInst, 327 OpClass __opClass, bool load, IntRegIndex dest, 328 IntRegIndex base, int64_t imm) : 329 PredMacroOp(mnem, machInst, __opClass) 330{ 331 numMicroops = 3; 332 microOps = new StaticInstPtr[numMicroops]; 333 334 if (load) { 335 microOps[0] = new MicroLdrQBFpXImmUop(machInst, dest, base, imm); 336 microOps[1] = new MicroLdrQTFpXImmUop(machInst, dest, base, imm); 337 } else { 338 microOps[0] = new MicroStrQBFpXImmUop(machInst, dest, base, imm); 339 microOps[1] = new MicroStrQTFpXImmUop(machInst, dest, base, imm); 340 } 341 microOps[2] = new MicroAddXiUop(machInst, base, base, imm); 342 343 microOps[0]->setDelayedCommit(); 344 microOps[1]->setDelayedCommit(); 345 microOps[2]->setLastMicroop(); 346} 347 348BigFpMemRegOp::BigFpMemRegOp(const char *mnem, ExtMachInst machInst, 349 OpClass __opClass, bool load, IntRegIndex dest, 350 IntRegIndex base, IntRegIndex offset, 351 ArmExtendType type, int64_t imm) : 352 PredMacroOp(mnem, machInst, __opClass) 353{ 354 numMicroops = 2; 355 microOps = new StaticInstPtr[numMicroops]; 356 357 if (load) { 358 microOps[0] = new MicroLdrQBFpXRegUop(machInst, dest, base, 359 offset, type, imm); 360 microOps[1] = new MicroLdrQTFpXRegUop(machInst, dest, base, 361 offset, type, imm); 362 } else { 363 microOps[0] = new MicroStrQBFpXRegUop(machInst, dest, base, 364 offset, type, imm); 365 microOps[1] = new MicroStrQTFpXRegUop(machInst, dest, base, 366 offset, type, imm); 367 } 368 369 microOps[0]->setDelayedCommit(); 370 microOps[1]->setLastMicroop(); 371} 372 373BigFpMemLitOp::BigFpMemLitOp(const char *mnem, ExtMachInst machInst, 374 OpClass __opClass, IntRegIndex dest, 375 int64_t imm) : 376 PredMacroOp(mnem, machInst, __opClass) 377{ 378 numMicroops = 2; 379 microOps = new StaticInstPtr[numMicroops]; 380 381 microOps[0] = new MicroLdrQBFpXLitUop(machInst, dest, imm); 382 microOps[1] = new MicroLdrQTFpXLitUop(machInst, dest, imm); 383 384 microOps[0]->setDelayedCommit(); 385 microOps[1]->setLastMicroop(); 386} 387 |
|
180VldMultOp::VldMultOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, 181 unsigned elems, RegIndex rn, RegIndex vd, unsigned regs, 182 unsigned inc, uint32_t size, uint32_t align, RegIndex rm) : 183 PredMacroOp(mnem, machInst, __opClass) 184{ 185 assert(regs > 0 && regs <= 4); 186 assert(regs % elems == 0); 187 188 numMicroops = (regs > 2) ? 2 : 1; 189 bool wb = (rm != 15); 190 bool deinterleave = (elems > 1); 191 192 if (wb) numMicroops++; 193 if (deinterleave) numMicroops += (regs / elems); 194 microOps = new StaticInstPtr[numMicroops]; 195 | 388VldMultOp::VldMultOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, 389 unsigned elems, RegIndex rn, RegIndex vd, unsigned regs, 390 unsigned inc, uint32_t size, uint32_t align, RegIndex rm) : 391 PredMacroOp(mnem, machInst, __opClass) 392{ 393 assert(regs > 0 && regs <= 4); 394 assert(regs % elems == 0); 395 396 numMicroops = (regs > 2) ? 2 : 1; 397 bool wb = (rm != 15); 398 bool deinterleave = (elems > 1); 399 400 if (wb) numMicroops++; 401 if (deinterleave) numMicroops += (regs / elems); 402 microOps = new StaticInstPtr[numMicroops]; 403 |
196 RegIndex rMid = deinterleave ? NumFloatArchRegs : vd * 2; | 404 RegIndex rMid = deinterleave ? NumFloatV7ArchRegs : vd * 2; |
197 198 uint32_t noAlign = TLB::MustBeOne; 199 200 unsigned uopIdx = 0; 201 switch (regs) { 202 case 4: 203 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>( 204 size, machInst, rMid, rn, 0, align); --- 85 unchanged lines hidden (view full) --- 290 291 numMicroops = 1; 292 bool wb = (rm != 15); 293 294 if (wb) numMicroops++; 295 numMicroops += (regs / elems); 296 microOps = new StaticInstPtr[numMicroops]; 297 | 405 406 uint32_t noAlign = TLB::MustBeOne; 407 408 unsigned uopIdx = 0; 409 switch (regs) { 410 case 4: 411 microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>( 412 size, machInst, rMid, rn, 0, align); --- 85 unchanged lines hidden (view full) --- 498 499 numMicroops = 1; 500 bool wb = (rm != 15); 501 502 if (wb) numMicroops++; 503 numMicroops += (regs / elems); 504 microOps = new StaticInstPtr[numMicroops]; 505 |
298 RegIndex ufp0 = NumFloatArchRegs; | 506 RegIndex ufp0 = NumFloatV7ArchRegs; |
299 300 unsigned uopIdx = 0; 301 switch (loadSize) { 302 case 1: 303 microOps[uopIdx++] = new MicroLdrNeon1Uop<uint8_t>( 304 machInst, ufp0, rn, 0, align); 305 break; 306 case 2: --- 244 unchanged lines hidden (view full) --- 551 bool interleave = (elems > 1); 552 553 if (wb) numMicroops++; 554 if (interleave) numMicroops += (regs / elems); 555 microOps = new StaticInstPtr[numMicroops]; 556 557 uint32_t noAlign = TLB::MustBeOne; 558 | 507 508 unsigned uopIdx = 0; 509 switch (loadSize) { 510 case 1: 511 microOps[uopIdx++] = new MicroLdrNeon1Uop<uint8_t>( 512 machInst, ufp0, rn, 0, align); 513 break; 514 case 2: --- 244 unchanged lines hidden (view full) --- 759 bool interleave = (elems > 1); 760 761 if (wb) numMicroops++; 762 if (interleave) numMicroops += (regs / elems); 763 microOps = new StaticInstPtr[numMicroops]; 764 765 uint32_t noAlign = TLB::MustBeOne; 766 |
559 RegIndex rMid = interleave ? NumFloatArchRegs : vd * 2; | 767 RegIndex rMid = interleave ? NumFloatV7ArchRegs : vd * 2; |
560 561 unsigned uopIdx = 0; 562 if (interleave) { 563 switch (elems) { 564 case 4: 565 assert(regs == 4); 566 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon8Uop>( 567 size, machInst, rMid, vd * 2, inc * 2); --- 84 unchanged lines hidden (view full) --- 652 653 numMicroops = 1; 654 bool wb = (rm != 15); 655 656 if (wb) numMicroops++; 657 numMicroops += (regs / elems); 658 microOps = new StaticInstPtr[numMicroops]; 659 | 768 769 unsigned uopIdx = 0; 770 if (interleave) { 771 switch (elems) { 772 case 4: 773 assert(regs == 4); 774 microOps[uopIdx++] = newNeonMixInst<MicroInterNeon8Uop>( 775 size, machInst, rMid, vd * 2, inc * 2); --- 84 unchanged lines hidden (view full) --- 860 861 numMicroops = 1; 862 bool wb = (rm != 15); 863 864 if (wb) numMicroops++; 865 numMicroops += (regs / elems); 866 microOps = new StaticInstPtr[numMicroops]; 867 |
660 RegIndex ufp0 = NumFloatArchRegs; | 868 RegIndex ufp0 = NumFloatV7ArchRegs; |
661 662 unsigned uopIdx = 0; 663 switch (elems) { 664 case 4: 665 assert(regs == 4); 666 switch (size) { 667 case 0: 668 microOps[uopIdx++] = new MicroPackNeon8to2Uop<uint8_t>( --- 160 unchanged lines hidden (view full) --- 829 for (unsigned i = 0; i < numMicroops - 1; i++) { 830 MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get()); 831 assert(uopPtr); 832 uopPtr->setDelayedCommit(); 833 } 834 microOps[numMicroops - 1]->setLastMicroop(); 835} 836 | 869 870 unsigned uopIdx = 0; 871 switch (elems) { 872 case 4: 873 assert(regs == 4); 874 switch (size) { 875 case 0: 876 microOps[uopIdx++] = new MicroPackNeon8to2Uop<uint8_t>( --- 160 unchanged lines hidden (view full) --- 1037 for (unsigned i = 0; i < numMicroops - 1; i++) { 1038 MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get()); 1039 assert(uopPtr); 1040 uopPtr->setDelayedCommit(); 1041 } 1042 microOps[numMicroops - 1]->setLastMicroop(); 1043} 1044 |
1045VldMultOp64::VldMultOp64(const char *mnem, ExtMachInst machInst, 1046 OpClass __opClass, RegIndex rn, RegIndex vd, 1047 RegIndex rm, uint8_t eSize, uint8_t dataSize, 1048 uint8_t numStructElems, uint8_t numRegs, bool wb) : 1049 PredMacroOp(mnem, machInst, __opClass) 1050{ 1051 RegIndex vx = NumFloatV8ArchRegs / 4; 1052 RegIndex rnsp = (RegIndex) makeSP((IntRegIndex) rn); 1053 bool baseIsSP = isSP((IntRegIndex) rnsp); 1054 1055 numMicroops = wb ? 1 : 0; 1056 1057 int totNumBytes = numRegs * dataSize / 8; 1058 assert(totNumBytes <= 64); 1059 1060 // The guiding principle here is that no more than 16 bytes can be 1061 // transferred at a time 1062 int numMemMicroops = totNumBytes / 16; 1063 int residuum = totNumBytes % 16; 1064 if (residuum) 1065 ++numMemMicroops; 1066 numMicroops += numMemMicroops; 1067 1068 int numMarshalMicroops = numRegs / 2 + (numRegs % 2 ? 1 : 0); 1069 numMicroops += numMarshalMicroops; 1070 1071 microOps = new StaticInstPtr[numMicroops]; 1072 unsigned uopIdx = 0; 1073 uint32_t memaccessFlags = TLB::MustBeOne | (TLB::ArmFlags) eSize | 1074 TLB::AllowUnaligned; 1075 1076 int i = 0; 1077 for(; i < numMemMicroops - 1; ++i) { 1078 microOps[uopIdx++] = new MicroNeonLoad64( 1079 machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags, 1080 baseIsSP, 16 /* accSize */, eSize); 1081 } 1082 microOps[uopIdx++] = new MicroNeonLoad64( 1083 machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags, baseIsSP, 1084 residuum ? residuum : 16 /* accSize */, eSize); 1085 1086 // Writeback microop: the post-increment amount is encoded in "Rm": a 1087 // 64-bit general register OR as '11111' for an immediate value equal to 1088 // the total number of bytes transferred (i.e. 8, 16, 24, 32, 48 or 64) 1089 if (wb) { 1090 if (rm != ((RegIndex) INTREG_X31)) { 1091 microOps[uopIdx++] = new MicroAddXERegUop(machInst, rnsp, rnsp, rm, 1092 UXTX, 0); 1093 } else { 1094 microOps[uopIdx++] = new MicroAddXiUop(machInst, rnsp, rnsp, 1095 totNumBytes); 1096 } 1097 } 1098 1099 for (int i = 0; i < numMarshalMicroops; ++i) { 1100 microOps[uopIdx++] = new MicroDeintNeon64( 1101 machInst, vd + (RegIndex) (2 * i), vx, eSize, dataSize, 1102 numStructElems, numRegs, i /* step */); 1103 } 1104 1105 assert(uopIdx == numMicroops); 1106 1107 for (int i = 0; i < numMicroops - 1; ++i) { 1108 microOps[i]->setDelayedCommit(); 1109 } 1110 microOps[numMicroops - 1]->setLastMicroop(); 1111} 1112 1113VstMultOp64::VstMultOp64(const char *mnem, ExtMachInst machInst, 1114 OpClass __opClass, RegIndex rn, RegIndex vd, 1115 RegIndex rm, uint8_t eSize, uint8_t dataSize, 1116 uint8_t numStructElems, uint8_t numRegs, bool wb) : 1117 PredMacroOp(mnem, machInst, __opClass) 1118{ 1119 RegIndex vx = NumFloatV8ArchRegs / 4; 1120 RegIndex rnsp = (RegIndex) makeSP((IntRegIndex) rn); 1121 bool baseIsSP = isSP((IntRegIndex) rnsp); 1122 1123 numMicroops = wb ? 1 : 0; 1124 1125 int totNumBytes = numRegs * dataSize / 8; 1126 assert(totNumBytes <= 64); 1127 1128 // The guiding principle here is that no more than 16 bytes can be 1129 // transferred at a time 1130 int numMemMicroops = totNumBytes / 16; 1131 int residuum = totNumBytes % 16; 1132 if (residuum) 1133 ++numMemMicroops; 1134 numMicroops += numMemMicroops; 1135 1136 int numMarshalMicroops = totNumBytes > 32 ? 2 : 1; 1137 numMicroops += numMarshalMicroops; 1138 1139 microOps = new StaticInstPtr[numMicroops]; 1140 unsigned uopIdx = 0; 1141 1142 for(int i = 0; i < numMarshalMicroops; ++i) { 1143 microOps[uopIdx++] = new MicroIntNeon64( 1144 machInst, vx + (RegIndex) (2 * i), vd, eSize, dataSize, 1145 numStructElems, numRegs, i /* step */); 1146 } 1147 1148 uint32_t memaccessFlags = TLB::MustBeOne | (TLB::ArmFlags) eSize | 1149 TLB::AllowUnaligned; 1150 1151 int i = 0; 1152 for(; i < numMemMicroops - 1; ++i) { 1153 microOps[uopIdx++] = new MicroNeonStore64( 1154 machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags, 1155 baseIsSP, 16 /* accSize */, eSize); 1156 } 1157 microOps[uopIdx++] = new MicroNeonStore64( 1158 machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags, baseIsSP, 1159 residuum ? residuum : 16 /* accSize */, eSize); 1160 1161 // Writeback microop: the post-increment amount is encoded in "Rm": a 1162 // 64-bit general register OR as '11111' for an immediate value equal to 1163 // the total number of bytes transferred (i.e. 8, 16, 24, 32, 48 or 64) 1164 if (wb) { 1165 if (rm != ((RegIndex) INTREG_X31)) { 1166 microOps[uopIdx++] = new MicroAddXERegUop(machInst, rnsp, rnsp, rm, 1167 UXTX, 0); 1168 } else { 1169 microOps[uopIdx++] = new MicroAddXiUop(machInst, rnsp, rnsp, 1170 totNumBytes); 1171 } 1172 } 1173 1174 assert(uopIdx == numMicroops); 1175 1176 for (int i = 0; i < numMicroops - 1; i++) { 1177 microOps[i]->setDelayedCommit(); 1178 } 1179 microOps[numMicroops - 1]->setLastMicroop(); 1180} 1181 1182VldSingleOp64::VldSingleOp64(const char *mnem, ExtMachInst machInst, 1183 OpClass __opClass, RegIndex rn, RegIndex vd, 1184 RegIndex rm, uint8_t eSize, uint8_t dataSize, 1185 uint8_t numStructElems, uint8_t index, bool wb, 1186 bool replicate) : 1187 PredMacroOp(mnem, machInst, __opClass) 1188{ 1189 RegIndex vx = NumFloatV8ArchRegs / 4; 1190 RegIndex rnsp = (RegIndex) makeSP((IntRegIndex) rn); 1191 bool baseIsSP = isSP((IntRegIndex) rnsp); 1192 1193 numMicroops = wb ? 1 : 0; 1194 1195 int eSizeBytes = 1 << eSize; 1196 int totNumBytes = numStructElems * eSizeBytes; 1197 assert(totNumBytes <= 64); 1198 1199 // The guiding principle here is that no more than 16 bytes can be 1200 // transferred at a time 1201 int numMemMicroops = totNumBytes / 16; 1202 int residuum = totNumBytes % 16; 1203 if (residuum) 1204 ++numMemMicroops; 1205 numMicroops += numMemMicroops; 1206 1207 int numMarshalMicroops = numStructElems / 2 + (numStructElems % 2 ? 1 : 0); 1208 numMicroops += numMarshalMicroops; 1209 1210 microOps = new StaticInstPtr[numMicroops]; 1211 unsigned uopIdx = 0; 1212 1213 uint32_t memaccessFlags = TLB::MustBeOne | (TLB::ArmFlags) eSize | 1214 TLB::AllowUnaligned; 1215 1216 int i = 0; 1217 for (; i < numMemMicroops - 1; ++i) { 1218 microOps[uopIdx++] = new MicroNeonLoad64( 1219 machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags, 1220 baseIsSP, 16 /* accSize */, eSize); 1221 } 1222 microOps[uopIdx++] = new MicroNeonLoad64( 1223 machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags, baseIsSP, 1224 residuum ? residuum : 16 /* accSize */, eSize); 1225 1226 // Writeback microop: the post-increment amount is encoded in "Rm": a 1227 // 64-bit general register OR as '11111' for an immediate value equal to 1228 // the total number of bytes transferred (i.e. 8, 16, 24, 32, 48 or 64) 1229 if (wb) { 1230 if (rm != ((RegIndex) INTREG_X31)) { 1231 microOps[uopIdx++] = new MicroAddXERegUop(machInst, rnsp, rnsp, rm, 1232 UXTX, 0); 1233 } else { 1234 microOps[uopIdx++] = new MicroAddXiUop(machInst, rnsp, rnsp, 1235 totNumBytes); 1236 } 1237 } 1238 1239 for(int i = 0; i < numMarshalMicroops; ++i) { 1240 microOps[uopIdx++] = new MicroUnpackNeon64( 1241 machInst, vd + (RegIndex) (2 * i), vx, eSize, dataSize, 1242 numStructElems, index, i /* step */, replicate); 1243 } 1244 1245 assert(uopIdx == numMicroops); 1246 1247 for (int i = 0; i < numMicroops - 1; i++) { 1248 microOps[i]->setDelayedCommit(); 1249 } 1250 microOps[numMicroops - 1]->setLastMicroop(); 1251} 1252 1253VstSingleOp64::VstSingleOp64(const char *mnem, ExtMachInst machInst, 1254 OpClass __opClass, RegIndex rn, RegIndex vd, 1255 RegIndex rm, uint8_t eSize, uint8_t dataSize, 1256 uint8_t numStructElems, uint8_t index, bool wb, 1257 bool replicate) : 1258 PredMacroOp(mnem, machInst, __opClass) 1259{ 1260 RegIndex vx = NumFloatV8ArchRegs / 4; 1261 RegIndex rnsp = (RegIndex) makeSP((IntRegIndex) rn); 1262 bool baseIsSP = isSP((IntRegIndex) rnsp); 1263 1264 numMicroops = wb ? 1 : 0; 1265 1266 int eSizeBytes = 1 << eSize; 1267 int totNumBytes = numStructElems * eSizeBytes; 1268 assert(totNumBytes <= 64); 1269 1270 // The guiding principle here is that no more than 16 bytes can be 1271 // transferred at a time 1272 int numMemMicroops = totNumBytes / 16; 1273 int residuum = totNumBytes % 16; 1274 if (residuum) 1275 ++numMemMicroops; 1276 numMicroops += numMemMicroops; 1277 1278 int numMarshalMicroops = totNumBytes > 32 ? 2 : 1; 1279 numMicroops += numMarshalMicroops; 1280 1281 microOps = new StaticInstPtr[numMicroops]; 1282 unsigned uopIdx = 0; 1283 1284 for(int i = 0; i < numMarshalMicroops; ++i) { 1285 microOps[uopIdx++] = new MicroPackNeon64( 1286 machInst, vx + (RegIndex) (2 * i), vd, eSize, dataSize, 1287 numStructElems, index, i /* step */, replicate); 1288 } 1289 1290 uint32_t memaccessFlags = TLB::MustBeOne | (TLB::ArmFlags) eSize | 1291 TLB::AllowUnaligned; 1292 1293 int i = 0; 1294 for(; i < numMemMicroops - 1; ++i) { 1295 microOps[uopIdx++] = new MicroNeonStore64( 1296 machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags, 1297 baseIsSP, 16 /* accsize */, eSize); 1298 } 1299 microOps[uopIdx++] = new MicroNeonStore64( 1300 machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags, baseIsSP, 1301 residuum ? residuum : 16 /* accSize */, eSize); 1302 1303 // Writeback microop: the post-increment amount is encoded in "Rm": a 1304 // 64-bit general register OR as '11111' for an immediate value equal to 1305 // the total number of bytes transferred (i.e. 8, 16, 24, 32, 48 or 64) 1306 if (wb) { 1307 if (rm != ((RegIndex) INTREG_X31)) { 1308 microOps[uopIdx++] = new MicroAddXERegUop(machInst, rnsp, rnsp, rm, 1309 UXTX, 0); 1310 } else { 1311 microOps[uopIdx++] = new MicroAddXiUop(machInst, rnsp, rnsp, 1312 totNumBytes); 1313 } 1314 } 1315 1316 assert(uopIdx == numMicroops); 1317 1318 for (int i = 0; i < numMicroops - 1; i++) { 1319 microOps[i]->setDelayedCommit(); 1320 } 1321 microOps[numMicroops - 1]->setLastMicroop(); 1322} 1323 |
|
837MacroVFPMemOp::MacroVFPMemOp(const char *mnem, ExtMachInst machInst, 838 OpClass __opClass, IntRegIndex rn, 839 RegIndex vd, bool single, bool up, 840 bool writeback, bool load, uint32_t offset) : 841 PredMacroOp(mnem, machInst, __opClass) 842{ 843 int i = 0; 844 845 // The lowest order bit selects fldmx (set) or fldmd (clear). These seem 846 // to be functionally identical except that fldmx is deprecated. For now 847 // we'll assume they're otherwise interchangable. 848 int count = (single ? offset : (offset / 2)); | 1324MacroVFPMemOp::MacroVFPMemOp(const char *mnem, ExtMachInst machInst, 1325 OpClass __opClass, IntRegIndex rn, 1326 RegIndex vd, bool single, bool up, 1327 bool writeback, bool load, uint32_t offset) : 1328 PredMacroOp(mnem, machInst, __opClass) 1329{ 1330 int i = 0; 1331 1332 // The lowest order bit selects fldmx (set) or fldmd (clear). These seem 1333 // to be functionally identical except that fldmx is deprecated. For now 1334 // we'll assume they're otherwise interchangable. 1335 int count = (single ? offset : (offset / 2)); |
849 if (count == 0 || count > NumFloatArchRegs) | 1336 if (count == 0 || count > NumFloatV7ArchRegs) |
850 warn_once("Bad offset field for VFP load/store multiple.\n"); 851 if (count == 0) { 852 // Force there to be at least one microop so the macroop makes sense. 853 writeback = true; 854 } | 1337 warn_once("Bad offset field for VFP load/store multiple.\n"); 1338 if (count == 0) { 1339 // Force there to be at least one microop so the macroop makes sense. 1340 writeback = true; 1341 } |
855 if (count > NumFloatArchRegs) 856 count = NumFloatArchRegs; | 1342 if (count > NumFloatV7ArchRegs) 1343 count = NumFloatV7ArchRegs; |
857 858 numMicroops = count * (single ? 1 : 2) + (writeback ? 1 : 0); 859 microOps = new StaticInstPtr[numMicroops]; 860 861 int64_t addr = 0; 862 863 if (!up) 864 addr = 4 * offset; --- 64 unchanged lines hidden (view full) --- 929 ss << ", "; 930 printReg(ss, urb); 931 ss << ", "; 932 ccprintf(ss, "#%d", imm); 933 return ss.str(); 934} 935 936std::string | 1344 1345 numMicroops = count * (single ? 1 : 2) + (writeback ? 1 : 0); 1346 microOps = new StaticInstPtr[numMicroops]; 1347 1348 int64_t addr = 0; 1349 1350 if (!up) 1351 addr = 4 * offset; --- 64 unchanged lines hidden (view full) --- 1416 ss << ", "; 1417 printReg(ss, urb); 1418 ss << ", "; 1419 ccprintf(ss, "#%d", imm); 1420 return ss.str(); 1421} 1422 1423std::string |
1424MicroIntImmXOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const 1425{ 1426 std::stringstream ss; 1427 printMnemonic(ss); 1428 printReg(ss, ura); 1429 ss << ", "; 1430 printReg(ss, urb); 1431 ss << ", "; 1432 ccprintf(ss, "#%d", imm); 1433 return ss.str(); 1434} 1435 1436std::string |
|
937MicroSetPCCPSR::generateDisassembly(Addr pc, const SymbolTable *symtab) const 938{ 939 std::stringstream ss; 940 printMnemonic(ss); 941 ss << "[PC,CPSR]"; 942 return ss.str(); 943} 944 945std::string | 1437MicroSetPCCPSR::generateDisassembly(Addr pc, const SymbolTable *symtab) const 1438{ 1439 std::stringstream ss; 1440 printMnemonic(ss); 1441 ss << "[PC,CPSR]"; 1442 return ss.str(); 1443} 1444 1445std::string |
1446MicroIntRegXOp::generateDisassembly(Addr pc, const SymbolTable *symtab) const 1447{ 1448 std::stringstream ss; 1449 printMnemonic(ss); 1450 printReg(ss, ura); 1451 ccprintf(ss, ", "); 1452 printReg(ss, urb); 1453 printExtendOperand(false, ss, (IntRegIndex)urc, type, shiftAmt); 1454 return ss.str(); 1455} 1456 1457std::string |
|
946MicroIntMov::generateDisassembly(Addr pc, const SymbolTable *symtab) const 947{ 948 std::stringstream ss; 949 printMnemonic(ss); 950 printReg(ss, ura); 951 ss << ", "; 952 printReg(ss, urb); 953 return ss.str(); --- 30 unchanged lines hidden --- | 1458MicroIntMov::generateDisassembly(Addr pc, const SymbolTable *symtab) const 1459{ 1460 std::stringstream ss; 1461 printMnemonic(ss); 1462 printReg(ss, ura); 1463 ss << ", "; 1464 printReg(ss, urb); 1465 return ss.str(); --- 30 unchanged lines hidden --- |