ldstop.isa revision 11829:cb5390385d87
111308Santhony.gutierrez@amd.com// Copyright (c) 2007-2008 The Hewlett-Packard Development Company 211308Santhony.gutierrez@amd.com// Copyright (c) 2015 Advanced Micro Devices, Inc. 311308Santhony.gutierrez@amd.com// All rights reserved. 411308Santhony.gutierrez@amd.com// 511308Santhony.gutierrez@amd.com// The license below extends only to copyright in the software and shall 611308Santhony.gutierrez@amd.com// not be construed as granting a license to any other intellectual 711308Santhony.gutierrez@amd.com// property including but not limited to intellectual property relating 811308Santhony.gutierrez@amd.com// to a hardware implementation of the functionality of the software 911308Santhony.gutierrez@amd.com// licensed hereunder. You may use the software subject to the license 1011308Santhony.gutierrez@amd.com// terms below provided that you ensure that this notice is replicated 1111308Santhony.gutierrez@amd.com// unmodified and in its entirety in all distributions of the software, 1211308Santhony.gutierrez@amd.com// modified or unmodified, in source code or in binary form. 1311308Santhony.gutierrez@amd.com// 1411308Santhony.gutierrez@amd.com// Copyright (c) 2008 The Regents of The University of Michigan 1511308Santhony.gutierrez@amd.com// All rights reserved. 1611308Santhony.gutierrez@amd.com// 1711308Santhony.gutierrez@amd.com// Redistribution and use in source and binary forms, with or without 1811308Santhony.gutierrez@amd.com// modification, are permitted provided that the following conditions are 1911308Santhony.gutierrez@amd.com// met: redistributions of source code must retain the above copyright 2011308Santhony.gutierrez@amd.com// notice, this list of conditions and the following disclaimer; 2111308Santhony.gutierrez@amd.com// redistributions in binary form must reproduce the above copyright 2211308Santhony.gutierrez@amd.com// notice, this list of conditions and the following disclaimer in the 2311308Santhony.gutierrez@amd.com// documentation and/or other materials provided with the distribution; 2411308Santhony.gutierrez@amd.com// neither the name of the copyright holders nor the names of its 2511308Santhony.gutierrez@amd.com// contributors may be used to endorse or promote products derived from 2611308Santhony.gutierrez@amd.com// this software without specific prior written permission. 2711308Santhony.gutierrez@amd.com// 2811308Santhony.gutierrez@amd.com// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 2911308Santhony.gutierrez@amd.com// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 3011308Santhony.gutierrez@amd.com// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 3111308Santhony.gutierrez@amd.com// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 3211308Santhony.gutierrez@amd.com// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 3311308Santhony.gutierrez@amd.com// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 3411308Santhony.gutierrez@amd.com// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 3511308Santhony.gutierrez@amd.com// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 3611308Santhony.gutierrez@amd.com// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 3711308Santhony.gutierrez@amd.com// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 3811308Santhony.gutierrez@amd.com// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 3911308Santhony.gutierrez@amd.com// 4011308Santhony.gutierrez@amd.com// Authors: Gabe Black 4111308Santhony.gutierrez@amd.com 4211670Sandreas.hansson@arm.com////////////////////////////////////////////////////////////////////////// 4311308Santhony.gutierrez@amd.com// 4411670Sandreas.hansson@arm.com// LdStOp Microop templates 4511682Sandreas.hansson@arm.com// 4611308Santhony.gutierrez@amd.com////////////////////////////////////////////////////////////////////////// 4711308Santhony.gutierrez@amd.com 4811308Santhony.gutierrez@amd.com// LEA template 4911308Santhony.gutierrez@amd.com 5011308Santhony.gutierrez@amd.comdef template MicroLeaExecute {{ 5111308Santhony.gutierrez@amd.com Fault %(class_name)s::execute(CPU_EXEC_CONTEXT *xc, 5211308Santhony.gutierrez@amd.com Trace::InstRecord *traceData) const 5311310SBrad.Beckmann@amd.com { 5411308Santhony.gutierrez@amd.com Fault fault = NoFault; 5511308Santhony.gutierrez@amd.com Addr EA; 5611308Santhony.gutierrez@amd.com 5711308Santhony.gutierrez@amd.com %(op_decl)s; 5811308Santhony.gutierrez@amd.com %(op_rd)s; 5911308Santhony.gutierrez@amd.com %(ea_code)s; 6011308Santhony.gutierrez@amd.com DPRINTF(X86, "%s : %s: The address is %#x\n", instMnem, mnemonic, EA); 6111308Santhony.gutierrez@amd.com 6211308Santhony.gutierrez@amd.com %(code)s; 6311308Santhony.gutierrez@amd.com if(fault == NoFault) 6411308Santhony.gutierrez@amd.com { 6511308Santhony.gutierrez@amd.com %(op_wb)s; 6611308Santhony.gutierrez@amd.com } 6711308Santhony.gutierrez@amd.com 6811308Santhony.gutierrez@amd.com return fault; 6911308Santhony.gutierrez@amd.com } 7011308Santhony.gutierrez@amd.com}}; 7111308Santhony.gutierrez@amd.com 7211308Santhony.gutierrez@amd.comdef template MicroLeaDeclare {{ 7311308Santhony.gutierrez@amd.com class %(class_name)s : public %(base_class)s 7411308Santhony.gutierrez@amd.com { 7511308Santhony.gutierrez@amd.com public: 7611308Santhony.gutierrez@amd.com %(class_name)s(ExtMachInst _machInst, 7711308Santhony.gutierrez@amd.com const char * instMnem, uint64_t setFlags, 7811308Santhony.gutierrez@amd.com uint8_t _scale, InstRegIndex _index, InstRegIndex _base, 7911308Santhony.gutierrez@amd.com uint64_t _disp, InstRegIndex _segment, 8011308Santhony.gutierrez@amd.com InstRegIndex _data, 8111308Santhony.gutierrez@amd.com uint8_t _dataSize, uint8_t _addressSize, 8211308Santhony.gutierrez@amd.com Request::FlagsType _memFlags); 8311308Santhony.gutierrez@amd.com 8411308Santhony.gutierrez@amd.com %(BasicExecDeclare)s 8511308Santhony.gutierrez@amd.com }; 8611308Santhony.gutierrez@amd.com}}; 8711308Santhony.gutierrez@amd.com 8811308Santhony.gutierrez@amd.com// Load templates 8911308Santhony.gutierrez@amd.com 9011308Santhony.gutierrez@amd.comdef template MicroLoadExecute {{ 9111308Santhony.gutierrez@amd.com Fault %(class_name)s::execute(CPU_EXEC_CONTEXT *xc, 9211308Santhony.gutierrez@amd.com Trace::InstRecord *traceData) const 9311308Santhony.gutierrez@amd.com { 9411308Santhony.gutierrez@amd.com Fault fault = NoFault; 9511308Santhony.gutierrez@amd.com Addr EA; 9611308Santhony.gutierrez@amd.com 9711308Santhony.gutierrez@amd.com %(op_decl)s; 9811308Santhony.gutierrez@amd.com %(op_rd)s; 9911308Santhony.gutierrez@amd.com %(ea_code)s; 10011308Santhony.gutierrez@amd.com DPRINTF(X86, "%s : %s: The address is %#x\n", instMnem, mnemonic, EA); 10111308Santhony.gutierrez@amd.com 10211308Santhony.gutierrez@amd.com fault = readMemAtomic(xc, traceData, EA, Mem, 10311308Santhony.gutierrez@amd.com %(memDataSize)s, memFlags); 10411308Santhony.gutierrez@amd.com 10511308Santhony.gutierrez@amd.com if (fault == NoFault) { 10611308Santhony.gutierrez@amd.com %(code)s; 10711308Santhony.gutierrez@amd.com } else if (memFlags & Request::PREFETCH) { 10811308Santhony.gutierrez@amd.com // For prefetches, ignore any faults/exceptions. 10911308Santhony.gutierrez@amd.com return NoFault; 11011308Santhony.gutierrez@amd.com } 11111308Santhony.gutierrez@amd.com if(fault == NoFault) 11211308Santhony.gutierrez@amd.com { 11311308Santhony.gutierrez@amd.com %(op_wb)s; 11411308Santhony.gutierrez@amd.com } 11511308Santhony.gutierrez@amd.com 11611308Santhony.gutierrez@amd.com return fault; 11711308Santhony.gutierrez@amd.com } 11811308Santhony.gutierrez@amd.com}}; 11911308Santhony.gutierrez@amd.com 12011308Santhony.gutierrez@amd.comdef template MicroLoadInitiateAcc {{ 12111308Santhony.gutierrez@amd.com Fault %(class_name)s::initiateAcc(CPU_EXEC_CONTEXT * xc, 12211308Santhony.gutierrez@amd.com Trace::InstRecord * traceData) const 12311308Santhony.gutierrez@amd.com { 12411308Santhony.gutierrez@amd.com Fault fault = NoFault; 12511308Santhony.gutierrez@amd.com Addr EA; 12611308Santhony.gutierrez@amd.com 12711308Santhony.gutierrez@amd.com %(op_decl)s; 12811308Santhony.gutierrez@amd.com %(op_rd)s; 12911308Santhony.gutierrez@amd.com %(ea_code)s; 13011308Santhony.gutierrez@amd.com DPRINTF(X86, "%s : %s: The address is %#x\n", instMnem, mnemonic, EA); 13111308Santhony.gutierrez@amd.com 13211308Santhony.gutierrez@amd.com fault = initiateMemRead(xc, traceData, EA, 13311308Santhony.gutierrez@amd.com %(memDataSize)s, memFlags); 13411308Santhony.gutierrez@amd.com 13511308Santhony.gutierrez@amd.com return fault; 13611308Santhony.gutierrez@amd.com } 13711308Santhony.gutierrez@amd.com}}; 13811308Santhony.gutierrez@amd.com 13911308Santhony.gutierrez@amd.comdef template MicroLoadCompleteAcc {{ 14011308Santhony.gutierrez@amd.com Fault %(class_name)s::completeAcc(PacketPtr pkt, 14111308Santhony.gutierrez@amd.com CPU_EXEC_CONTEXT * xc, 14211308Santhony.gutierrez@amd.com Trace::InstRecord * traceData) const 14311308Santhony.gutierrez@amd.com { 14411308Santhony.gutierrez@amd.com Fault fault = NoFault; 14511308Santhony.gutierrez@amd.com 146 %(op_decl)s; 147 %(op_rd)s; 148 149 getMem(pkt, Mem, %(memDataSize)s, traceData); 150 151 %(code)s; 152 153 if(fault == NoFault) 154 { 155 %(op_wb)s; 156 } 157 158 return fault; 159 } 160}}; 161 162// Store templates 163 164def template MicroStoreExecute {{ 165 Fault %(class_name)s::execute(CPU_EXEC_CONTEXT * xc, 166 Trace::InstRecord *traceData) const 167 { 168 Fault fault = NoFault; 169 170 Addr EA; 171 %(op_decl)s; 172 %(op_rd)s; 173 %(ea_code)s; 174 DPRINTF(X86, "%s : %s: The address is %#x\n", instMnem, mnemonic, EA); 175 176 %(code)s; 177 178 if(fault == NoFault) 179 { 180 fault = writeMemAtomic(xc, traceData, Mem, %(memDataSize)s, EA, 181 memFlags, NULL); 182 if(fault == NoFault) 183 { 184 %(op_wb)s; 185 } 186 } 187 188 return fault; 189 } 190}}; 191 192def template MicroStoreInitiateAcc {{ 193 Fault %(class_name)s::initiateAcc(CPU_EXEC_CONTEXT * xc, 194 Trace::InstRecord * traceData) const 195 { 196 Fault fault = NoFault; 197 198 Addr EA; 199 %(op_decl)s; 200 %(op_rd)s; 201 %(ea_code)s; 202 DPRINTF(X86, "%s : %s: The address is %#x\n", instMnem, mnemonic, EA); 203 204 %(code)s; 205 206 if(fault == NoFault) 207 { 208 fault = writeMemTiming(xc, traceData, Mem, %(memDataSize)s, EA, 209 memFlags, NULL); 210 } 211 return fault; 212 } 213}}; 214 215def template MicroStoreCompleteAcc {{ 216 Fault %(class_name)s::completeAcc(PacketPtr pkt, 217 CPU_EXEC_CONTEXT * xc, Trace::InstRecord * traceData) const 218 { 219 %(op_decl)s; 220 %(op_rd)s; 221 %(complete_code)s; 222 %(op_wb)s; 223 return NoFault; 224 } 225}}; 226 227// Common templates 228 229//This delcares the initiateAcc function in memory operations 230def template InitiateAccDeclare {{ 231 Fault initiateAcc(%(CPU_exec_context)s *, Trace::InstRecord *) const; 232}}; 233 234//This declares the completeAcc function in memory operations 235def template CompleteAccDeclare {{ 236 Fault completeAcc(PacketPtr, %(CPU_exec_context)s *, Trace::InstRecord *) const; 237}}; 238 239def template MicroLdStOpDeclare {{ 240 class %(class_name)s : public %(base_class)s 241 { 242 public: 243 %(class_name)s(ExtMachInst _machInst, 244 const char * instMnem, uint64_t setFlags, 245 uint8_t _scale, InstRegIndex _index, InstRegIndex _base, 246 uint64_t _disp, InstRegIndex _segment, 247 InstRegIndex _data, 248 uint8_t _dataSize, uint8_t _addressSize, 249 Request::FlagsType _memFlags); 250 251 %(BasicExecDeclare)s 252 253 %(InitiateAccDeclare)s 254 255 %(CompleteAccDeclare)s 256 }; 257}}; 258 259// LdStSplitOp is a load or store that uses a pair of regs as the 260// source or destination. Used for cmpxchg{8,16}b. 261def template MicroLdStSplitOpDeclare {{ 262 class %(class_name)s : public %(base_class)s 263 { 264 public: 265 %(class_name)s(ExtMachInst _machInst, 266 const char * instMnem, uint64_t setFlags, 267 uint8_t _scale, InstRegIndex _index, InstRegIndex _base, 268 uint64_t _disp, InstRegIndex _segment, 269 InstRegIndex _dataLow, InstRegIndex _dataHi, 270 uint8_t _dataSize, uint8_t _addressSize, 271 Request::FlagsType _memFlags); 272 273 %(BasicExecDeclare)s 274 275 %(InitiateAccDeclare)s 276 277 %(CompleteAccDeclare)s 278 }; 279}}; 280 281def template MicroLdStOpConstructor {{ 282 %(class_name)s::%(class_name)s( 283 ExtMachInst machInst, const char * instMnem, uint64_t setFlags, 284 uint8_t _scale, InstRegIndex _index, InstRegIndex _base, 285 uint64_t _disp, InstRegIndex _segment, 286 InstRegIndex _data, 287 uint8_t _dataSize, uint8_t _addressSize, 288 Request::FlagsType _memFlags) : 289 %(base_class)s(machInst, "%(mnemonic)s", instMnem, setFlags, 290 _scale, _index, _base, 291 _disp, _segment, _data, 292 _dataSize, _addressSize, _memFlags, %(op_class)s) 293 { 294 %(constructor)s; 295 } 296}}; 297 298def template MicroLdStSplitOpConstructor {{ 299 %(class_name)s::%(class_name)s( 300 ExtMachInst machInst, const char * instMnem, uint64_t setFlags, 301 uint8_t _scale, InstRegIndex _index, InstRegIndex _base, 302 uint64_t _disp, InstRegIndex _segment, 303 InstRegIndex _dataLow, InstRegIndex _dataHi, 304 uint8_t _dataSize, uint8_t _addressSize, 305 Request::FlagsType _memFlags) : 306 %(base_class)s(machInst, "%(mnemonic)s", instMnem, setFlags, 307 _scale, _index, _base, 308 _disp, _segment, _dataLow, _dataHi, 309 _dataSize, _addressSize, _memFlags, %(op_class)s) 310 { 311 %(constructor)s; 312 } 313}}; 314 315let {{ 316 class LdStOp(X86Microop): 317 def __init__(self, data, segment, addr, disp, 318 dataSize, addressSize, baseFlags, atCPL0, prefetch, nonSpec, 319 implicitStack): 320 self.data = data 321 [self.scale, self.index, self.base] = addr 322 self.disp = disp 323 self.segment = segment 324 self.dataSize = dataSize 325 self.addressSize = addressSize 326 self.memFlags = baseFlags 327 if atCPL0: 328 self.memFlags += " | (CPL0FlagBit << FlagShift)" 329 self.instFlags = "" 330 if prefetch: 331 self.memFlags += " | Request::PREFETCH" 332 self.instFlags += " | (1ULL << StaticInst::IsDataPrefetch)" 333 if nonSpec: 334 self.instFlags += " | (1ULL << StaticInst::IsNonSpeculative)" 335 # For implicit stack operations, we should use *not* use the 336 # alternative addressing mode for loads/stores if the prefix is set 337 if not implicitStack: 338 self.memFlags += " | (machInst.legacy.addr ? " + \ 339 "(AddrSizeFlagBit << FlagShift) : 0)" 340 341 def getAllocator(self, microFlags): 342 allocator = '''new %(class_name)s(machInst, macrocodeBlock, 343 %(flags)s, %(scale)s, %(index)s, %(base)s, 344 %(disp)s, %(segment)s, %(data)s, 345 %(dataSize)s, %(addressSize)s, %(memFlags)s)''' % { 346 "class_name" : self.className, 347 "flags" : self.microFlagsText(microFlags) + self.instFlags, 348 "scale" : self.scale, "index" : self.index, 349 "base" : self.base, 350 "disp" : self.disp, 351 "segment" : self.segment, "data" : self.data, 352 "dataSize" : self.dataSize, "addressSize" : self.addressSize, 353 "memFlags" : self.memFlags} 354 return allocator 355 356 class BigLdStOp(X86Microop): 357 def __init__(self, data, segment, addr, disp, 358 dataSize, addressSize, baseFlags, atCPL0, prefetch, nonSpec, 359 implicitStack): 360 self.data = data 361 [self.scale, self.index, self.base] = addr 362 self.disp = disp 363 self.segment = segment 364 self.dataSize = dataSize 365 self.addressSize = addressSize 366 self.memFlags = baseFlags 367 if atCPL0: 368 self.memFlags += " | (CPL0FlagBit << FlagShift)" 369 self.instFlags = "" 370 if prefetch: 371 self.memFlags += " | Request::PREFETCH" 372 self.instFlags += " | (1ULL << StaticInst::IsDataPrefetch)" 373 if nonSpec: 374 self.instFlags += " | (1ULL << StaticInst::IsNonSpeculative)" 375 # For implicit stack operations, we should use *not* use the 376 # alternative addressing mode for loads/stores if the prefix is set 377 if not implicitStack: 378 self.memFlags += " | (machInst.legacy.addr ? " + \ 379 "(AddrSizeFlagBit << FlagShift) : 0)" 380 381 def getAllocator(self, microFlags): 382 allocString = ''' 383 (%(dataSize)s >= 4) ? 384 (StaticInstPtr)(new %(class_name)sBig(machInst, 385 macrocodeBlock, %(flags)s, %(scale)s, %(index)s, 386 %(base)s, %(disp)s, %(segment)s, %(data)s, 387 %(dataSize)s, %(addressSize)s, %(memFlags)s)) : 388 (StaticInstPtr)(new %(class_name)s(machInst, 389 macrocodeBlock, %(flags)s, %(scale)s, %(index)s, 390 %(base)s, %(disp)s, %(segment)s, %(data)s, 391 %(dataSize)s, %(addressSize)s, %(memFlags)s)) 392 ''' 393 allocator = allocString % { 394 "class_name" : self.className, 395 "flags" : self.microFlagsText(microFlags) + self.instFlags, 396 "scale" : self.scale, "index" : self.index, 397 "base" : self.base, 398 "disp" : self.disp, 399 "segment" : self.segment, "data" : self.data, 400 "dataSize" : self.dataSize, "addressSize" : self.addressSize, 401 "memFlags" : self.memFlags} 402 return allocator 403 404 class LdStSplitOp(LdStOp): 405 def __init__(self, data, segment, addr, disp, 406 dataSize, addressSize, baseFlags, atCPL0, prefetch, nonSpec, 407 implicitStack): 408 super(LdStSplitOp, self).__init__(0, segment, addr, disp, 409 dataSize, addressSize, baseFlags, atCPL0, prefetch, nonSpec, 410 implicitStack) 411 (self.dataLow, self.dataHi) = data 412 413 def getAllocator(self, microFlags): 414 allocString = '''(StaticInstPtr)(new %(class_name)s(machInst, 415 macrocodeBlock, %(flags)s, %(scale)s, %(index)s, 416 %(base)s, %(disp)s, %(segment)s, 417 %(dataLow)s, %(dataHi)s, 418 %(dataSize)s, %(addressSize)s, %(memFlags)s)) 419 ''' 420 allocator = allocString % { 421 "class_name" : self.className, 422 "flags" : self.microFlagsText(microFlags) + self.instFlags, 423 "scale" : self.scale, "index" : self.index, 424 "base" : self.base, 425 "disp" : self.disp, 426 "segment" : self.segment, 427 "dataLow" : self.dataLow, "dataHi" : self.dataHi, 428 "dataSize" : self.dataSize, "addressSize" : self.addressSize, 429 "memFlags" : self.memFlags} 430 return allocator 431 432}}; 433 434let {{ 435 436 # Make these empty strings so that concatenating onto 437 # them will always work. 438 header_output = "" 439 decoder_output = "" 440 exec_output = "" 441 442 segmentEAExpr = \ 443 'bits(scale * Index + Base + disp, addressSize * 8 - 1, 0);' 444 445 calculateEA = 'EA = SegBase + ' + segmentEAExpr 446 447 def defineMicroLoadOp(mnemonic, code, bigCode='', 448 mem_flags="0", big=True, nonSpec=False, 449 implicitStack=False): 450 global header_output 451 global decoder_output 452 global exec_output 453 global microopClasses 454 Name = mnemonic 455 name = mnemonic.lower() 456 457 # Build up the all register version of this micro op 458 iops = [InstObjParams(name, Name, 'X86ISA::LdStOp', 459 { "code": code, 460 "ea_code": calculateEA, 461 "memDataSize": "dataSize" })] 462 if big: 463 iops += [InstObjParams(name, Name + "Big", 'X86ISA::LdStOp', 464 { "code": bigCode, 465 "ea_code": calculateEA, 466 "memDataSize": "dataSize" })] 467 for iop in iops: 468 header_output += MicroLdStOpDeclare.subst(iop) 469 decoder_output += MicroLdStOpConstructor.subst(iop) 470 exec_output += MicroLoadExecute.subst(iop) 471 exec_output += MicroLoadInitiateAcc.subst(iop) 472 exec_output += MicroLoadCompleteAcc.subst(iop) 473 474 if implicitStack: 475 # For instructions that implicitly access the stack, the address 476 # size is the same as the stack segment pointer size, not the 477 # address size if specified by the instruction prefix 478 addressSize = "env.stackSize" 479 else: 480 addressSize = "env.addressSize" 481 482 base = LdStOp 483 if big: 484 base = BigLdStOp 485 class LoadOp(base): 486 def __init__(self, data, segment, addr, disp = 0, 487 dataSize="env.dataSize", 488 addressSize=addressSize, 489 atCPL0=False, prefetch=False, nonSpec=nonSpec, 490 implicitStack=implicitStack): 491 super(LoadOp, self).__init__(data, segment, addr, 492 disp, dataSize, addressSize, mem_flags, 493 atCPL0, prefetch, nonSpec, implicitStack) 494 self.className = Name 495 self.mnemonic = name 496 497 microopClasses[name] = LoadOp 498 499 defineMicroLoadOp('Ld', 'Data = merge(Data, Mem, dataSize);', 500 'Data = Mem & mask(dataSize * 8);') 501 defineMicroLoadOp('Ldis', 'Data = merge(Data, Mem, dataSize);', 502 'Data = Mem & mask(dataSize * 8);', 503 implicitStack=True) 504 defineMicroLoadOp('Ldst', 'Data = merge(Data, Mem, dataSize);', 505 'Data = Mem & mask(dataSize * 8);', 506 '(StoreCheck << FlagShift)') 507 defineMicroLoadOp('Ldstl', 'Data = merge(Data, Mem, dataSize);', 508 'Data = Mem & mask(dataSize * 8);', 509 '(StoreCheck << FlagShift) | Request::LOCKED_RMW', 510 nonSpec=True) 511 512 defineMicroLoadOp('Ldfp', code='FpData_uqw = Mem', big = False) 513 514 defineMicroLoadOp('Ldfp87', code=''' 515 switch (dataSize) 516 { 517 case 4: 518 FpData_df = *(float *)&Mem; 519 break; 520 case 8: 521 FpData_df = *(double *)&Mem; 522 break; 523 default: 524 panic("Unhandled data size in LdFp87.\\n"); 525 } 526 ''', big = False) 527 528 # Load integer from memory into x87 top-of-stack register. 529 # Used to implement fild instruction. 530 defineMicroLoadOp('Ldifp87', code=''' 531 switch (dataSize) 532 { 533 case 2: 534 FpData_df = (int64_t)sext<16>(Mem); 535 break; 536 case 4: 537 FpData_df = (int64_t)sext<32>(Mem); 538 break; 539 case 8: 540 FpData_df = (int64_t)Mem; 541 break; 542 default: 543 panic("Unhandled data size in LdIFp87.\\n"); 544 } 545 ''', big = False) 546 547 def defineMicroLoadSplitOp(mnemonic, code, mem_flags="0", nonSpec=False): 548 global header_output 549 global decoder_output 550 global exec_output 551 global microopClasses 552 Name = mnemonic 553 name = mnemonic.lower() 554 555 iop = InstObjParams(name, Name, 'X86ISA::LdStSplitOp', 556 { "code": code, 557 "ea_code": calculateEA, 558 "memDataSize": "2 * dataSize" }) 559 560 header_output += MicroLdStSplitOpDeclare.subst(iop) 561 decoder_output += MicroLdStSplitOpConstructor.subst(iop) 562 exec_output += MicroLoadExecute.subst(iop) 563 exec_output += MicroLoadInitiateAcc.subst(iop) 564 exec_output += MicroLoadCompleteAcc.subst(iop) 565 566 class LoadOp(LdStSplitOp): 567 def __init__(self, data, segment, addr, disp = 0, 568 dataSize="env.dataSize", 569 addressSize="env.addressSize", 570 atCPL0=False, prefetch=False, nonSpec=nonSpec, 571 implicitStack=False): 572 super(LoadOp, self).__init__(data, segment, addr, 573 disp, dataSize, addressSize, mem_flags, 574 atCPL0, prefetch, nonSpec, implicitStack) 575 self.className = Name 576 self.mnemonic = name 577 578 microopClasses[name] = LoadOp 579 580 code = ''' 581 switch (dataSize) { 582 case 4: 583 DataLow = bits(Mem_u2qw[0], 31, 0); 584 DataHi = bits(Mem_u2qw[0], 63, 32); 585 break; 586 case 8: 587 DataLow = Mem_u2qw[0]; 588 DataHi = Mem_u2qw[1]; 589 break; 590 default: 591 panic("Unhandled data size %d in LdSplit.\\n", dataSize); 592 }''' 593 594 defineMicroLoadSplitOp('LdSplit', code, 595 '(StoreCheck << FlagShift)') 596 597 defineMicroLoadSplitOp('LdSplitl', code, 598 '(StoreCheck << FlagShift) | Request::LOCKED_RMW', 599 nonSpec=True) 600 601 def defineMicroStoreOp(mnemonic, code, completeCode="", mem_flags="0", 602 implicitStack=False): 603 global header_output 604 global decoder_output 605 global exec_output 606 global microopClasses 607 Name = mnemonic 608 name = mnemonic.lower() 609 610 # Build up the all register version of this micro op 611 iop = InstObjParams(name, Name, 'X86ISA::LdStOp', 612 { "code": code, 613 "complete_code": completeCode, 614 "ea_code": calculateEA, 615 "memDataSize": "dataSize" }) 616 header_output += MicroLdStOpDeclare.subst(iop) 617 decoder_output += MicroLdStOpConstructor.subst(iop) 618 exec_output += MicroStoreExecute.subst(iop) 619 exec_output += MicroStoreInitiateAcc.subst(iop) 620 exec_output += MicroStoreCompleteAcc.subst(iop) 621 622 if implicitStack: 623 # For instructions that implicitly access the stack, the address 624 # size is the same as the stack segment pointer size, not the 625 # address size if specified by the instruction prefix 626 addressSize = "env.stackSize" 627 else: 628 addressSize = "env.addressSize" 629 630 class StoreOp(LdStOp): 631 def __init__(self, data, segment, addr, disp = 0, 632 dataSize="env.dataSize", 633 addressSize=addressSize, 634 atCPL0=False, nonSpec=False, implicitStack=implicitStack): 635 super(StoreOp, self).__init__(data, segment, addr, disp, 636 dataSize, addressSize, mem_flags, atCPL0, False, 637 nonSpec, implicitStack) 638 self.className = Name 639 self.mnemonic = name 640 641 microopClasses[name] = StoreOp 642 643 defineMicroStoreOp('St', 'Mem = pick(Data, 2, dataSize);') 644 defineMicroStoreOp('Stis', 'Mem = pick(Data, 2, dataSize);', 645 implicitStack=True) 646 defineMicroStoreOp('Stul', 'Mem = pick(Data, 2, dataSize);', 647 mem_flags="Request::LOCKED_RMW") 648 649 defineMicroStoreOp('Stfp', code='Mem = FpData_uqw;') 650 651 defineMicroStoreOp('Stfp87', code=''' 652 switch (dataSize) 653 { 654 case 4: { 655 float single(FpData_df); 656 Mem = *(uint32_t *)&single; 657 } break; 658 case 8: 659 Mem = *(uint64_t *)&FpData_df; 660 break; 661 default: 662 panic("Unhandled data size in StFp87.\\n"); 663 } 664 ''') 665 666 defineMicroStoreOp('Cda', 'Mem = 0;', mem_flags="Request::NO_ACCESS") 667 668 def defineMicroStoreSplitOp(mnemonic, code, 669 completeCode="", mem_flags="0"): 670 global header_output 671 global decoder_output 672 global exec_output 673 global microopClasses 674 Name = mnemonic 675 name = mnemonic.lower() 676 677 iop = InstObjParams(name, Name, 'X86ISA::LdStSplitOp', 678 { "code": code, 679 "complete_code": completeCode, 680 "ea_code": calculateEA, 681 "memDataSize": "2 * dataSize" }) 682 683 header_output += MicroLdStSplitOpDeclare.subst(iop) 684 decoder_output += MicroLdStSplitOpConstructor.subst(iop) 685 exec_output += MicroStoreExecute.subst(iop) 686 exec_output += MicroStoreInitiateAcc.subst(iop) 687 exec_output += MicroStoreCompleteAcc.subst(iop) 688 689 class StoreOp(LdStSplitOp): 690 def __init__(self, data, segment, addr, disp = 0, 691 dataSize="env.dataSize", 692 addressSize="env.addressSize", 693 atCPL0=False, nonSpec=False, implicitStack=False): 694 super(StoreOp, self).__init__(data, segment, addr, disp, 695 dataSize, addressSize, mem_flags, atCPL0, False, 696 nonSpec, implicitStack) 697 self.className = Name 698 self.mnemonic = name 699 700 microopClasses[name] = StoreOp 701 702 code = ''' 703 switch (dataSize) { 704 case 4: 705 Mem_u2qw[0] = (DataHi << 32) | DataLow; 706 break; 707 case 8: 708 Mem_u2qw[0] = DataLow; 709 Mem_u2qw[1] = DataHi; 710 break; 711 default: 712 panic("Unhandled data size %d in StSplit.\\n", dataSize); 713 }''' 714 715 defineMicroStoreSplitOp('StSplit', code); 716 717 defineMicroStoreSplitOp('StSplitul', code, 718 mem_flags='Request::LOCKED_RMW') 719 720 iop = InstObjParams("lea", "Lea", 'X86ISA::LdStOp', 721 { "code": "Data = merge(Data, EA, dataSize);", 722 "ea_code": "EA = " + segmentEAExpr, 723 "memDataSize": "dataSize" }) 724 header_output += MicroLeaDeclare.subst(iop) 725 decoder_output += MicroLdStOpConstructor.subst(iop) 726 exec_output += MicroLeaExecute.subst(iop) 727 728 class LeaOp(LdStOp): 729 def __init__(self, data, segment, addr, disp = 0, 730 dataSize="env.dataSize", addressSize="env.addressSize"): 731 super(LeaOp, self).__init__(data, segment, addr, disp, 732 dataSize, addressSize, "0", False, False, False, False) 733 self.className = "Lea" 734 self.mnemonic = "lea" 735 736 microopClasses["lea"] = LeaOp 737 738 739 iop = InstObjParams("tia", "Tia", 'X86ISA::LdStOp', 740 { "code": "xc->demapPage(EA, 0);", 741 "ea_code": calculateEA, 742 "memDataSize": "dataSize" }) 743 header_output += MicroLeaDeclare.subst(iop) 744 decoder_output += MicroLdStOpConstructor.subst(iop) 745 exec_output += MicroLeaExecute.subst(iop) 746 747 class TiaOp(LdStOp): 748 def __init__(self, segment, addr, disp = 0, 749 dataSize="env.dataSize", 750 addressSize="env.addressSize"): 751 super(TiaOp, self).__init__("InstRegIndex(NUM_INTREGS)", segment, 752 addr, disp, dataSize, addressSize, "0", False, False, 753 False, False) 754 self.className = "Tia" 755 self.mnemonic = "tia" 756 757 microopClasses["tia"] = TiaOp 758 759 class CdaOp(LdStOp): 760 def __init__(self, segment, addr, disp = 0, 761 dataSize="env.dataSize", 762 addressSize="env.addressSize", atCPL0=False): 763 super(CdaOp, self).__init__("InstRegIndex(NUM_INTREGS)", segment, 764 addr, disp, dataSize, addressSize, "Request::NO_ACCESS", 765 atCPL0, False, False, False) 766 self.className = "Cda" 767 self.mnemonic = "cda" 768 769 microopClasses["cda"] = CdaOp 770}}; 771