ldr64.isa (12527:264a2d7e5c1d) ldr64.isa (12856:cca88f84cb80)
1// -*- mode:c++ -*-
2
1// -*- mode:c++ -*-
2
3// Copyright (c) 2011-2014 ARM Limited
3// Copyright (c) 2011-2014, 2017 ARM Limited
4// All rights reserved
5//
6// The license below extends only to copyright in the software and shall
7// not be construed as granting a license to any other intellectual
8// property including but not limited to intellectual property relating
9// to a hardware implementation of the functionality of the software
10// licensed hereunder. You may use the software subject to the license
11// terms below provided that you ensure that this notice is replicated
12// unmodified and in its entirety in all distributions of the software,
13// modified or unmodified, in source code or in binary form.
14//
15// Redistribution and use in source and binary forms, with or without
16// modification, are permitted provided that the following conditions are
17// met: redistributions of source code must retain the above copyright
18// notice, this list of conditions and the following disclaimer;
19// redistributions in binary form must reproduce the above copyright
20// notice, this list of conditions and the following disclaimer in the
21// documentation and/or other materials provided with the distribution;
22// neither the name of the copyright holders nor the names of its
23// contributors may be used to endorse or promote products derived from
24// this software without specific prior written permission.
25//
26// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
27// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
28// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
29// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
30// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
31// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
32// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
36// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37//
38// Authors: Gabe Black
39
40let {{
41
42 header_output = ""
43 decoder_output = ""
44 exec_output = ""
45
46 class LoadInst64(LoadStoreInst):
47 execBase = 'Load64'
48 micro = False
49
50 def __init__(self, mnem, Name, size=4, sign=False, user=False,
51 literal=False, flavor="normal", top=False):
52 super(LoadInst64, self).__init__()
53
54 self.name = mnem
55 self.Name = Name
56 self.size = size
57 self.sign = sign
58 self.user = user
59 self.literal = literal
60 self.flavor = flavor
61 self.top = top
62
63 self.memFlags = ["ArmISA::TLB::MustBeOne"]
64 self.instFlags = []
65 self.codeBlobs = {"postacc_code" : ""}
66
67 # Add memory request flags where necessary
68 if self.user:
69 self.memFlags.append("ArmISA::TLB::UserMode")
70
71 if self.flavor == "dprefetch":
72 self.memFlags.append("Request::PREFETCH")
73 self.instFlags = ['IsDataPrefetch']
74 elif self.flavor == "iprefetch":
75 self.memFlags.append("Request::PREFETCH")
76 self.instFlags = ['IsInstPrefetch']
77 if self.micro:
78 self.instFlags.append("IsMicroop")
79
80 if self.flavor in ("acexp", "exp"):
81 # For exclusive pair ops alignment check is based on total size
82 self.memFlags.append("%d" % int(math.log(self.size, 2) + 1))
83 elif not (self.size == 16 and self.top):
84 # Only the first microop should perform alignment checking.
85 self.memFlags.append("%d" % int(math.log(self.size, 2)))
86
87 if self.flavor not in ("acquire", "acex", "exclusive",
88 "acexp", "exp"):
89 self.memFlags.append("ArmISA::TLB::AllowUnaligned")
90
91 if self.flavor in ("acquire", "acex", "acexp"):
92 self.instFlags.extend(["IsMemBarrier",
93 "IsWriteBarrier",
94 "IsReadBarrier"])
95 if self.flavor in ("acex", "exclusive", "exp", "acexp"):
96 self.memFlags.append("Request::LLSC")
97
98 def buildEACode(self):
99 # Address computation code
100 eaCode = ""
101 if self.flavor == "fp":
102 eaCode += vfp64EnabledCheckCode
103
104 if self.literal:
105 eaCode += "EA = RawPC"
106 else:
107 eaCode += SPAlignmentCheckCode + "EA = XBase"
108
109 if self.size == 16:
110 if self.top:
111 eaCode += " + (isBigEndian64(xc->tcBase()) ? 0 : 8)"
112 else:
113 eaCode += " + (isBigEndian64(xc->tcBase()) ? 8 : 0)"
114 if not self.post:
115 eaCode += self.offset
116 eaCode += ";"
117
118 self.codeBlobs["ea_code"] = eaCode
119
120 def emitHelper(self, base='Memory64', wbDecl=None):
121 global header_output, decoder_output, exec_output
122
123 # If this is a microop itself, don't allow anything that would
124 # require further microcoding.
125 if self.micro:
126 assert not wbDecl
127
128 fa_code = None
129 if not self.micro and self.flavor in ("normal", "widen", "acquire"):
130 fa_code = '''
131 fault->annotate(ArmFault::SAS, %s);
132 fault->annotate(ArmFault::SSE, %s);
133 fault->annotate(ArmFault::SRT, dest);
134 fault->annotate(ArmFault::SF, %s);
135 fault->annotate(ArmFault::AR, %s);
136 ''' % ("0" if self.size == 1 else
137 "1" if self.size == 2 else
138 "2" if self.size == 4 else "3",
139 "true" if self.sign else "false",
140 "true" if (self.size == 8 or
141 self.flavor == "widen") else "false",
142 "true" if self.flavor == "acquire" else "false")
143
144 (newHeader, newDecoder, newExec) = \
145 self.fillTemplates(self.name, self.Name, self.codeBlobs,
146 self.memFlags, self.instFlags,
147 base, wbDecl, faCode=fa_code)
148
149 header_output += newHeader
150 decoder_output += newDecoder
151 exec_output += newExec
152
153 class LoadImmInst64(LoadInst64):
154 def __init__(self, *args, **kargs):
155 super(LoadImmInst64, self).__init__(*args, **kargs)
156 self.offset = " + imm"
157
158 self.wbDecl = "MicroAddXiUop(machInst, base, base, imm);"
159
160 class LoadRegInst64(LoadInst64):
161 def __init__(self, *args, **kargs):
162 super(LoadRegInst64, self).__init__(*args, **kargs)
163 self.offset = " + extendReg64(XOffset, type, shiftAmt, 64)"
164
165 self.wbDecl = \
166 "MicroAddXERegUop(machInst, base, base, " + \
167 " offset, type, shiftAmt);"
168
169 class LoadRawRegInst64(LoadInst64):
170 def __init__(self, *args, **kargs):
171 super(LoadRawRegInst64, self).__init__(*args, **kargs)
172 self.offset = ""
173
174 class LoadSingle64(LoadInst64):
175 def emit(self):
176 self.buildEACode()
177
178 # Code that actually handles the access
179 if self.flavor in ("dprefetch", "iprefetch"):
180 accCode = 'uint64_t temp M5_VAR_USED = Mem%s;'
181 elif self.flavor == "fp":
182 if self.size in (1, 2, 4):
183 accCode = '''
184 AA64FpDestP0_uw = cSwap(Mem%s,
185 isBigEndian64(xc->tcBase()));
186 AA64FpDestP1_uw = 0;
187 AA64FpDestP2_uw = 0;
188 AA64FpDestP3_uw = 0;
189 '''
190 elif self.size == 8:
191 accCode = '''
192 uint64_t data = cSwap(Mem%s,
193 isBigEndian64(xc->tcBase()));
194 AA64FpDestP0_uw = (uint32_t)data;
195 AA64FpDestP1_uw = (data >> 32);
196 AA64FpDestP2_uw = 0;
197 AA64FpDestP3_uw = 0;
198 '''
199 elif self.size == 16:
200 accCode = '''
201 auto data = cSwap(Mem%s, isBigEndian64(xc->tcBase()));
202 AA64FpDestP0_uw = (uint32_t)data[0];
203 AA64FpDestP1_uw = (data[0] >> 32);
204 AA64FpDestP2_uw = (uint32_t)data[1];
205 AA64FpDestP3_uw = (data[1] >> 32);
206 '''
207 elif self.flavor == "widen" or self.size == 8:
208 accCode = "XDest = cSwap(Mem%s, isBigEndian64(xc->tcBase()));"
209 else:
210 accCode = "WDest = cSwap(Mem%s, isBigEndian64(xc->tcBase()));"
211
212 accCode = accCode % buildMemSuffix(self.sign, self.size)
213
214 self.codeBlobs["memacc_code"] = accCode
215
216 # Push it out to the output files
217 wbDecl = None
218 if self.writeback and not self.micro:
219 wbDecl = self.wbDecl
220 self.emitHelper(self.base, wbDecl)
221
222 class LoadDouble64(LoadInst64):
223 def emit(self):
224 self.buildEACode()
225
226 # Code that actually handles the access
227 if self.flavor == "fp":
228 if self.size == 4:
229 accCode = '''
230 uint64_t data = cSwap(Mem_ud, isBigEndian64(xc->tcBase()));
231 AA64FpDestP0_uw = isBigEndian64(xc->tcBase())
232 ? (data >> 32)
233 : (uint32_t)data;
234 AA64FpDestP1_uw = 0;
235 AA64FpDestP2_uw = 0;
236 AA64FpDestP3_uw = 0;
237 AA64FpDest2P0_uw = isBigEndian64(xc->tcBase())
238 ? (uint32_t)data
239 : (data >> 32);
240 AA64FpDest2P1_uw = 0;
241 AA64FpDest2P2_uw = 0;
242 AA64FpDest2P3_uw = 0;
243 '''
244 elif self.size == 8:
245 accCode = '''
246 uint64_t data_a = cSwap(Mem_tud[0],
247 isBigEndian64(xc->tcBase()));
248 uint64_t data_b = cSwap(Mem_tud[1],
249 isBigEndian64(xc->tcBase()));
250 AA64FpDestP0_uw = (uint32_t)data_a;
251 AA64FpDestP1_uw = (uint32_t)(data_a >> 32);
252 AA64FpDestP2_uw = 0;
253 AA64FpDestP3_uw = 0;
254 AA64FpDest2P0_uw = (uint32_t)data_b;
255 AA64FpDest2P1_uw = (uint32_t)(data_b >> 32);
256 AA64FpDest2P2_uw = 0;
257 AA64FpDest2P3_uw = 0;
258 '''
259 else:
260 if self.sign:
261 if self.size == 4:
262 accCode = '''
263 uint64_t data = cSwap(Mem_ud,
264 isBigEndian64(xc->tcBase()));
265 XDest = isBigEndian64(xc->tcBase())
266 ? sext<32>(data >> 32)
267 : sext<32>((uint32_t)data);
268 XDest2 = isBigEndian64(xc->tcBase())
269 ? sext<32>((uint32_t)data)
270 : sext<32>(data >> 32);
271 '''
272 elif self.size == 8:
273 accCode = '''
274 XDest = cSwap(Mem_tud[0],
275 isBigEndian64(xc->tcBase()));
276 XDest2 = cSwap(Mem_tud[1],
277 isBigEndian64(xc->tcBase()));
278 '''
279 else:
280 if self.size == 4:
281 accCode = '''
282 uint64_t data = cSwap(Mem_ud,
283 isBigEndian64(xc->tcBase()));
284 XDest = isBigEndian64(xc->tcBase())
285 ? (data >> 32)
286 : (uint32_t)data;
287 XDest2 = isBigEndian64(xc->tcBase())
288 ? (uint32_t)data
289 : (data >> 32);
290 '''
291 elif self.size == 8:
292 accCode = '''
293 XDest = cSwap(Mem_tud[0],
294 isBigEndian64(xc->tcBase()));
295 XDest2 = cSwap(Mem_tud[1],
296 isBigEndian64(xc->tcBase()));
297 '''
298 self.codeBlobs["memacc_code"] = accCode
299
300 # Push it out to the output files
301 wbDecl = None
302 if self.writeback and not self.micro:
303 wbDecl = self.wbDecl
304 self.emitHelper(self.base, wbDecl)
305
306 class LoadImm64(LoadImmInst64, LoadSingle64):
307 decConstBase = 'LoadStoreImm64'
308 base = 'ArmISA::MemoryImm64'
309 writeback = False
310 post = False
311
312 class LoadPre64(LoadImmInst64, LoadSingle64):
313 decConstBase = 'LoadStoreImm64'
314 base = 'ArmISA::MemoryPreIndex64'
315 writeback = True
316 post = False
317
318 class LoadPost64(LoadImmInst64, LoadSingle64):
319 decConstBase = 'LoadStoreImm64'
320 base = 'ArmISA::MemoryPostIndex64'
321 writeback = True
322 post = True
323
324 class LoadReg64(LoadRegInst64, LoadSingle64):
325 decConstBase = 'LoadStoreReg64'
326 base = 'ArmISA::MemoryReg64'
327 writeback = False
328 post = False
329
330 class LoadRaw64(LoadRawRegInst64, LoadSingle64):
331 decConstBase = 'LoadStoreRaw64'
332 base = 'ArmISA::MemoryRaw64'
333 writeback = False
334 post = False
335
336 class LoadEx64(LoadRawRegInst64, LoadSingle64):
337 decConstBase = 'LoadStoreEx64'
338 base = 'ArmISA::MemoryEx64'
339 writeback = False
340 post = False
341
342 class LoadLit64(LoadImmInst64, LoadSingle64):
343 decConstBase = 'LoadStoreLit64'
344 base = 'ArmISA::MemoryLiteral64'
345 writeback = False
346 post = False
347
348 def buildLoads64(mnem, NameBase, size, sign, flavor="normal"):
349 LoadImm64(mnem, NameBase + "_IMM", size, sign, flavor=flavor).emit()
350 LoadPre64(mnem, NameBase + "_PRE", size, sign, flavor=flavor).emit()
351 LoadPost64(mnem, NameBase + "_POST", size, sign, flavor=flavor).emit()
352 LoadReg64(mnem, NameBase + "_REG", size, sign, flavor=flavor).emit()
353
354 buildLoads64("ldrb", "LDRB64", 1, False)
355 buildLoads64("ldrsb", "LDRSBW64", 1, True)
356 buildLoads64("ldrsb", "LDRSBX64", 1, True, flavor="widen")
357 buildLoads64("ldrh", "LDRH64", 2, False)
358 buildLoads64("ldrsh", "LDRSHW64", 2, True)
359 buildLoads64("ldrsh", "LDRSHX64", 2, True, flavor="widen")
360 buildLoads64("ldrsw", "LDRSW64", 4, True, flavor="widen")
361 buildLoads64("ldr", "LDRW64", 4, False)
362 buildLoads64("ldr", "LDRX64", 8, False)
363 buildLoads64("ldr", "LDRBFP64", 1, False, flavor="fp")
364 buildLoads64("ldr", "LDRHFP64", 2, False, flavor="fp")
365 buildLoads64("ldr", "LDRSFP64", 4, False, flavor="fp")
366 buildLoads64("ldr", "LDRDFP64", 8, False, flavor="fp")
367
368 LoadImm64("prfm", "PRFM64_IMM", 8, flavor="dprefetch").emit()
369 LoadReg64("prfm", "PRFM64_REG", 8, flavor="dprefetch").emit()
370 LoadLit64("prfm", "PRFM64_LIT", 8, literal=True, flavor="dprefetch").emit()
371 LoadImm64("prfum", "PRFUM64_IMM", 8, flavor="dprefetch").emit()
372
373 LoadImm64("ldurb", "LDURB64_IMM", 1, False).emit()
374 LoadImm64("ldursb", "LDURSBW64_IMM", 1, True).emit()
375 LoadImm64("ldursb", "LDURSBX64_IMM", 1, True, flavor="widen").emit()
376 LoadImm64("ldurh", "LDURH64_IMM", 2, False).emit()
377 LoadImm64("ldursh", "LDURSHW64_IMM", 2, True).emit()
378 LoadImm64("ldursh", "LDURSHX64_IMM", 2, True, flavor="widen").emit()
379 LoadImm64("ldursw", "LDURSW64_IMM", 4, True, flavor="widen").emit()
380 LoadImm64("ldur", "LDURW64_IMM", 4, False).emit()
381 LoadImm64("ldur", "LDURX64_IMM", 8, False).emit()
382 LoadImm64("ldur", "LDURBFP64_IMM", 1, flavor="fp").emit()
383 LoadImm64("ldur", "LDURHFP64_IMM", 2, flavor="fp").emit()
384 LoadImm64("ldur", "LDURSFP64_IMM", 4, flavor="fp").emit()
385 LoadImm64("ldur", "LDURDFP64_IMM", 8, flavor="fp").emit()
386
387 LoadImm64("ldtrb", "LDTRB64_IMM", 1, False, True).emit()
388 LoadImm64("ldtrsb", "LDTRSBW64_IMM", 1, True, True).emit()
389 LoadImm64("ldtrsb", "LDTRSBX64_IMM", 1, True, True, flavor="widen").emit()
390 LoadImm64("ldtrh", "LDTRH64_IMM", 2, False, True).emit()
391 LoadImm64("ldtrsh", "LDTRSHW64_IMM", 2, True, True).emit()
392 LoadImm64("ldtrsh", "LDTRSHX64_IMM", 2, True, True, flavor="widen").emit()
393 LoadImm64("ldtrsw", "LDTRSW64_IMM", 4, True, flavor="widen").emit()
394 LoadImm64("ldtr", "LDTRW64_IMM", 4, False, True).emit()
395 LoadImm64("ldtr", "LDTRX64_IMM", 8, False, True).emit()
396
397 LoadLit64("ldrsw", "LDRSWL64_LIT", 4, True, \
398 literal=True, flavor="widen").emit()
399 LoadLit64("ldr", "LDRWL64_LIT", 4, False, literal=True).emit()
400 LoadLit64("ldr", "LDRXL64_LIT", 8, False, literal=True).emit()
401 LoadLit64("ldr", "LDRSFP64_LIT", 4, literal=True, flavor="fp").emit()
402 LoadLit64("ldr", "LDRDFP64_LIT", 8, literal=True, flavor="fp").emit()
403
404 LoadRaw64("ldar", "LDARX64", 8, flavor="acquire").emit()
405 LoadRaw64("ldar", "LDARW64", 4, flavor="acquire").emit()
406 LoadRaw64("ldarh", "LDARH64", 2, flavor="acquire").emit()
407 LoadRaw64("ldarb", "LDARB64", 1, flavor="acquire").emit()
408
409 LoadEx64("ldaxr", "LDAXRX64", 8, flavor="acex").emit()
410 LoadEx64("ldaxr", "LDAXRW64", 4, flavor="acex").emit()
411 LoadEx64("ldaxrh", "LDAXRH64", 2, flavor="acex").emit()
412 LoadEx64("ldaxrb", "LDAXRB64", 1, flavor="acex").emit()
413
414 LoadEx64("ldxr", "LDXRX64", 8, flavor="exclusive").emit()
415 LoadEx64("ldxr", "LDXRW64", 4, flavor="exclusive").emit()
416 LoadEx64("ldxrh", "LDXRH64", 2, flavor="exclusive").emit()
417 LoadEx64("ldxrb", "LDXRB64", 1, flavor="exclusive").emit()
418
4// All rights reserved
5//
6// The license below extends only to copyright in the software and shall
7// not be construed as granting a license to any other intellectual
8// property including but not limited to intellectual property relating
9// to a hardware implementation of the functionality of the software
10// licensed hereunder. You may use the software subject to the license
11// terms below provided that you ensure that this notice is replicated
12// unmodified and in its entirety in all distributions of the software,
13// modified or unmodified, in source code or in binary form.
14//
15// Redistribution and use in source and binary forms, with or without
16// modification, are permitted provided that the following conditions are
17// met: redistributions of source code must retain the above copyright
18// notice, this list of conditions and the following disclaimer;
19// redistributions in binary form must reproduce the above copyright
20// notice, this list of conditions and the following disclaimer in the
21// documentation and/or other materials provided with the distribution;
22// neither the name of the copyright holders nor the names of its
23// contributors may be used to endorse or promote products derived from
24// this software without specific prior written permission.
25//
26// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
27// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
28// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
29// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
30// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
31// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
32// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
36// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37//
38// Authors: Gabe Black
39
40let {{
41
42 header_output = ""
43 decoder_output = ""
44 exec_output = ""
45
46 class LoadInst64(LoadStoreInst):
47 execBase = 'Load64'
48 micro = False
49
50 def __init__(self, mnem, Name, size=4, sign=False, user=False,
51 literal=False, flavor="normal", top=False):
52 super(LoadInst64, self).__init__()
53
54 self.name = mnem
55 self.Name = Name
56 self.size = size
57 self.sign = sign
58 self.user = user
59 self.literal = literal
60 self.flavor = flavor
61 self.top = top
62
63 self.memFlags = ["ArmISA::TLB::MustBeOne"]
64 self.instFlags = []
65 self.codeBlobs = {"postacc_code" : ""}
66
67 # Add memory request flags where necessary
68 if self.user:
69 self.memFlags.append("ArmISA::TLB::UserMode")
70
71 if self.flavor == "dprefetch":
72 self.memFlags.append("Request::PREFETCH")
73 self.instFlags = ['IsDataPrefetch']
74 elif self.flavor == "iprefetch":
75 self.memFlags.append("Request::PREFETCH")
76 self.instFlags = ['IsInstPrefetch']
77 if self.micro:
78 self.instFlags.append("IsMicroop")
79
80 if self.flavor in ("acexp", "exp"):
81 # For exclusive pair ops alignment check is based on total size
82 self.memFlags.append("%d" % int(math.log(self.size, 2) + 1))
83 elif not (self.size == 16 and self.top):
84 # Only the first microop should perform alignment checking.
85 self.memFlags.append("%d" % int(math.log(self.size, 2)))
86
87 if self.flavor not in ("acquire", "acex", "exclusive",
88 "acexp", "exp"):
89 self.memFlags.append("ArmISA::TLB::AllowUnaligned")
90
91 if self.flavor in ("acquire", "acex", "acexp"):
92 self.instFlags.extend(["IsMemBarrier",
93 "IsWriteBarrier",
94 "IsReadBarrier"])
95 if self.flavor in ("acex", "exclusive", "exp", "acexp"):
96 self.memFlags.append("Request::LLSC")
97
98 def buildEACode(self):
99 # Address computation code
100 eaCode = ""
101 if self.flavor == "fp":
102 eaCode += vfp64EnabledCheckCode
103
104 if self.literal:
105 eaCode += "EA = RawPC"
106 else:
107 eaCode += SPAlignmentCheckCode + "EA = XBase"
108
109 if self.size == 16:
110 if self.top:
111 eaCode += " + (isBigEndian64(xc->tcBase()) ? 0 : 8)"
112 else:
113 eaCode += " + (isBigEndian64(xc->tcBase()) ? 8 : 0)"
114 if not self.post:
115 eaCode += self.offset
116 eaCode += ";"
117
118 self.codeBlobs["ea_code"] = eaCode
119
120 def emitHelper(self, base='Memory64', wbDecl=None):
121 global header_output, decoder_output, exec_output
122
123 # If this is a microop itself, don't allow anything that would
124 # require further microcoding.
125 if self.micro:
126 assert not wbDecl
127
128 fa_code = None
129 if not self.micro and self.flavor in ("normal", "widen", "acquire"):
130 fa_code = '''
131 fault->annotate(ArmFault::SAS, %s);
132 fault->annotate(ArmFault::SSE, %s);
133 fault->annotate(ArmFault::SRT, dest);
134 fault->annotate(ArmFault::SF, %s);
135 fault->annotate(ArmFault::AR, %s);
136 ''' % ("0" if self.size == 1 else
137 "1" if self.size == 2 else
138 "2" if self.size == 4 else "3",
139 "true" if self.sign else "false",
140 "true" if (self.size == 8 or
141 self.flavor == "widen") else "false",
142 "true" if self.flavor == "acquire" else "false")
143
144 (newHeader, newDecoder, newExec) = \
145 self.fillTemplates(self.name, self.Name, self.codeBlobs,
146 self.memFlags, self.instFlags,
147 base, wbDecl, faCode=fa_code)
148
149 header_output += newHeader
150 decoder_output += newDecoder
151 exec_output += newExec
152
153 class LoadImmInst64(LoadInst64):
154 def __init__(self, *args, **kargs):
155 super(LoadImmInst64, self).__init__(*args, **kargs)
156 self.offset = " + imm"
157
158 self.wbDecl = "MicroAddXiUop(machInst, base, base, imm);"
159
160 class LoadRegInst64(LoadInst64):
161 def __init__(self, *args, **kargs):
162 super(LoadRegInst64, self).__init__(*args, **kargs)
163 self.offset = " + extendReg64(XOffset, type, shiftAmt, 64)"
164
165 self.wbDecl = \
166 "MicroAddXERegUop(machInst, base, base, " + \
167 " offset, type, shiftAmt);"
168
169 class LoadRawRegInst64(LoadInst64):
170 def __init__(self, *args, **kargs):
171 super(LoadRawRegInst64, self).__init__(*args, **kargs)
172 self.offset = ""
173
174 class LoadSingle64(LoadInst64):
175 def emit(self):
176 self.buildEACode()
177
178 # Code that actually handles the access
179 if self.flavor in ("dprefetch", "iprefetch"):
180 accCode = 'uint64_t temp M5_VAR_USED = Mem%s;'
181 elif self.flavor == "fp":
182 if self.size in (1, 2, 4):
183 accCode = '''
184 AA64FpDestP0_uw = cSwap(Mem%s,
185 isBigEndian64(xc->tcBase()));
186 AA64FpDestP1_uw = 0;
187 AA64FpDestP2_uw = 0;
188 AA64FpDestP3_uw = 0;
189 '''
190 elif self.size == 8:
191 accCode = '''
192 uint64_t data = cSwap(Mem%s,
193 isBigEndian64(xc->tcBase()));
194 AA64FpDestP0_uw = (uint32_t)data;
195 AA64FpDestP1_uw = (data >> 32);
196 AA64FpDestP2_uw = 0;
197 AA64FpDestP3_uw = 0;
198 '''
199 elif self.size == 16:
200 accCode = '''
201 auto data = cSwap(Mem%s, isBigEndian64(xc->tcBase()));
202 AA64FpDestP0_uw = (uint32_t)data[0];
203 AA64FpDestP1_uw = (data[0] >> 32);
204 AA64FpDestP2_uw = (uint32_t)data[1];
205 AA64FpDestP3_uw = (data[1] >> 32);
206 '''
207 elif self.flavor == "widen" or self.size == 8:
208 accCode = "XDest = cSwap(Mem%s, isBigEndian64(xc->tcBase()));"
209 else:
210 accCode = "WDest = cSwap(Mem%s, isBigEndian64(xc->tcBase()));"
211
212 accCode = accCode % buildMemSuffix(self.sign, self.size)
213
214 self.codeBlobs["memacc_code"] = accCode
215
216 # Push it out to the output files
217 wbDecl = None
218 if self.writeback and not self.micro:
219 wbDecl = self.wbDecl
220 self.emitHelper(self.base, wbDecl)
221
222 class LoadDouble64(LoadInst64):
223 def emit(self):
224 self.buildEACode()
225
226 # Code that actually handles the access
227 if self.flavor == "fp":
228 if self.size == 4:
229 accCode = '''
230 uint64_t data = cSwap(Mem_ud, isBigEndian64(xc->tcBase()));
231 AA64FpDestP0_uw = isBigEndian64(xc->tcBase())
232 ? (data >> 32)
233 : (uint32_t)data;
234 AA64FpDestP1_uw = 0;
235 AA64FpDestP2_uw = 0;
236 AA64FpDestP3_uw = 0;
237 AA64FpDest2P0_uw = isBigEndian64(xc->tcBase())
238 ? (uint32_t)data
239 : (data >> 32);
240 AA64FpDest2P1_uw = 0;
241 AA64FpDest2P2_uw = 0;
242 AA64FpDest2P3_uw = 0;
243 '''
244 elif self.size == 8:
245 accCode = '''
246 uint64_t data_a = cSwap(Mem_tud[0],
247 isBigEndian64(xc->tcBase()));
248 uint64_t data_b = cSwap(Mem_tud[1],
249 isBigEndian64(xc->tcBase()));
250 AA64FpDestP0_uw = (uint32_t)data_a;
251 AA64FpDestP1_uw = (uint32_t)(data_a >> 32);
252 AA64FpDestP2_uw = 0;
253 AA64FpDestP3_uw = 0;
254 AA64FpDest2P0_uw = (uint32_t)data_b;
255 AA64FpDest2P1_uw = (uint32_t)(data_b >> 32);
256 AA64FpDest2P2_uw = 0;
257 AA64FpDest2P3_uw = 0;
258 '''
259 else:
260 if self.sign:
261 if self.size == 4:
262 accCode = '''
263 uint64_t data = cSwap(Mem_ud,
264 isBigEndian64(xc->tcBase()));
265 XDest = isBigEndian64(xc->tcBase())
266 ? sext<32>(data >> 32)
267 : sext<32>((uint32_t)data);
268 XDest2 = isBigEndian64(xc->tcBase())
269 ? sext<32>((uint32_t)data)
270 : sext<32>(data >> 32);
271 '''
272 elif self.size == 8:
273 accCode = '''
274 XDest = cSwap(Mem_tud[0],
275 isBigEndian64(xc->tcBase()));
276 XDest2 = cSwap(Mem_tud[1],
277 isBigEndian64(xc->tcBase()));
278 '''
279 else:
280 if self.size == 4:
281 accCode = '''
282 uint64_t data = cSwap(Mem_ud,
283 isBigEndian64(xc->tcBase()));
284 XDest = isBigEndian64(xc->tcBase())
285 ? (data >> 32)
286 : (uint32_t)data;
287 XDest2 = isBigEndian64(xc->tcBase())
288 ? (uint32_t)data
289 : (data >> 32);
290 '''
291 elif self.size == 8:
292 accCode = '''
293 XDest = cSwap(Mem_tud[0],
294 isBigEndian64(xc->tcBase()));
295 XDest2 = cSwap(Mem_tud[1],
296 isBigEndian64(xc->tcBase()));
297 '''
298 self.codeBlobs["memacc_code"] = accCode
299
300 # Push it out to the output files
301 wbDecl = None
302 if self.writeback and not self.micro:
303 wbDecl = self.wbDecl
304 self.emitHelper(self.base, wbDecl)
305
306 class LoadImm64(LoadImmInst64, LoadSingle64):
307 decConstBase = 'LoadStoreImm64'
308 base = 'ArmISA::MemoryImm64'
309 writeback = False
310 post = False
311
312 class LoadPre64(LoadImmInst64, LoadSingle64):
313 decConstBase = 'LoadStoreImm64'
314 base = 'ArmISA::MemoryPreIndex64'
315 writeback = True
316 post = False
317
318 class LoadPost64(LoadImmInst64, LoadSingle64):
319 decConstBase = 'LoadStoreImm64'
320 base = 'ArmISA::MemoryPostIndex64'
321 writeback = True
322 post = True
323
324 class LoadReg64(LoadRegInst64, LoadSingle64):
325 decConstBase = 'LoadStoreReg64'
326 base = 'ArmISA::MemoryReg64'
327 writeback = False
328 post = False
329
330 class LoadRaw64(LoadRawRegInst64, LoadSingle64):
331 decConstBase = 'LoadStoreRaw64'
332 base = 'ArmISA::MemoryRaw64'
333 writeback = False
334 post = False
335
336 class LoadEx64(LoadRawRegInst64, LoadSingle64):
337 decConstBase = 'LoadStoreEx64'
338 base = 'ArmISA::MemoryEx64'
339 writeback = False
340 post = False
341
342 class LoadLit64(LoadImmInst64, LoadSingle64):
343 decConstBase = 'LoadStoreLit64'
344 base = 'ArmISA::MemoryLiteral64'
345 writeback = False
346 post = False
347
348 def buildLoads64(mnem, NameBase, size, sign, flavor="normal"):
349 LoadImm64(mnem, NameBase + "_IMM", size, sign, flavor=flavor).emit()
350 LoadPre64(mnem, NameBase + "_PRE", size, sign, flavor=flavor).emit()
351 LoadPost64(mnem, NameBase + "_POST", size, sign, flavor=flavor).emit()
352 LoadReg64(mnem, NameBase + "_REG", size, sign, flavor=flavor).emit()
353
354 buildLoads64("ldrb", "LDRB64", 1, False)
355 buildLoads64("ldrsb", "LDRSBW64", 1, True)
356 buildLoads64("ldrsb", "LDRSBX64", 1, True, flavor="widen")
357 buildLoads64("ldrh", "LDRH64", 2, False)
358 buildLoads64("ldrsh", "LDRSHW64", 2, True)
359 buildLoads64("ldrsh", "LDRSHX64", 2, True, flavor="widen")
360 buildLoads64("ldrsw", "LDRSW64", 4, True, flavor="widen")
361 buildLoads64("ldr", "LDRW64", 4, False)
362 buildLoads64("ldr", "LDRX64", 8, False)
363 buildLoads64("ldr", "LDRBFP64", 1, False, flavor="fp")
364 buildLoads64("ldr", "LDRHFP64", 2, False, flavor="fp")
365 buildLoads64("ldr", "LDRSFP64", 4, False, flavor="fp")
366 buildLoads64("ldr", "LDRDFP64", 8, False, flavor="fp")
367
368 LoadImm64("prfm", "PRFM64_IMM", 8, flavor="dprefetch").emit()
369 LoadReg64("prfm", "PRFM64_REG", 8, flavor="dprefetch").emit()
370 LoadLit64("prfm", "PRFM64_LIT", 8, literal=True, flavor="dprefetch").emit()
371 LoadImm64("prfum", "PRFUM64_IMM", 8, flavor="dprefetch").emit()
372
373 LoadImm64("ldurb", "LDURB64_IMM", 1, False).emit()
374 LoadImm64("ldursb", "LDURSBW64_IMM", 1, True).emit()
375 LoadImm64("ldursb", "LDURSBX64_IMM", 1, True, flavor="widen").emit()
376 LoadImm64("ldurh", "LDURH64_IMM", 2, False).emit()
377 LoadImm64("ldursh", "LDURSHW64_IMM", 2, True).emit()
378 LoadImm64("ldursh", "LDURSHX64_IMM", 2, True, flavor="widen").emit()
379 LoadImm64("ldursw", "LDURSW64_IMM", 4, True, flavor="widen").emit()
380 LoadImm64("ldur", "LDURW64_IMM", 4, False).emit()
381 LoadImm64("ldur", "LDURX64_IMM", 8, False).emit()
382 LoadImm64("ldur", "LDURBFP64_IMM", 1, flavor="fp").emit()
383 LoadImm64("ldur", "LDURHFP64_IMM", 2, flavor="fp").emit()
384 LoadImm64("ldur", "LDURSFP64_IMM", 4, flavor="fp").emit()
385 LoadImm64("ldur", "LDURDFP64_IMM", 8, flavor="fp").emit()
386
387 LoadImm64("ldtrb", "LDTRB64_IMM", 1, False, True).emit()
388 LoadImm64("ldtrsb", "LDTRSBW64_IMM", 1, True, True).emit()
389 LoadImm64("ldtrsb", "LDTRSBX64_IMM", 1, True, True, flavor="widen").emit()
390 LoadImm64("ldtrh", "LDTRH64_IMM", 2, False, True).emit()
391 LoadImm64("ldtrsh", "LDTRSHW64_IMM", 2, True, True).emit()
392 LoadImm64("ldtrsh", "LDTRSHX64_IMM", 2, True, True, flavor="widen").emit()
393 LoadImm64("ldtrsw", "LDTRSW64_IMM", 4, True, flavor="widen").emit()
394 LoadImm64("ldtr", "LDTRW64_IMM", 4, False, True).emit()
395 LoadImm64("ldtr", "LDTRX64_IMM", 8, False, True).emit()
396
397 LoadLit64("ldrsw", "LDRSWL64_LIT", 4, True, \
398 literal=True, flavor="widen").emit()
399 LoadLit64("ldr", "LDRWL64_LIT", 4, False, literal=True).emit()
400 LoadLit64("ldr", "LDRXL64_LIT", 8, False, literal=True).emit()
401 LoadLit64("ldr", "LDRSFP64_LIT", 4, literal=True, flavor="fp").emit()
402 LoadLit64("ldr", "LDRDFP64_LIT", 8, literal=True, flavor="fp").emit()
403
404 LoadRaw64("ldar", "LDARX64", 8, flavor="acquire").emit()
405 LoadRaw64("ldar", "LDARW64", 4, flavor="acquire").emit()
406 LoadRaw64("ldarh", "LDARH64", 2, flavor="acquire").emit()
407 LoadRaw64("ldarb", "LDARB64", 1, flavor="acquire").emit()
408
409 LoadEx64("ldaxr", "LDAXRX64", 8, flavor="acex").emit()
410 LoadEx64("ldaxr", "LDAXRW64", 4, flavor="acex").emit()
411 LoadEx64("ldaxrh", "LDAXRH64", 2, flavor="acex").emit()
412 LoadEx64("ldaxrb", "LDAXRB64", 1, flavor="acex").emit()
413
414 LoadEx64("ldxr", "LDXRX64", 8, flavor="exclusive").emit()
415 LoadEx64("ldxr", "LDXRW64", 4, flavor="exclusive").emit()
416 LoadEx64("ldxrh", "LDXRH64", 2, flavor="exclusive").emit()
417 LoadEx64("ldxrb", "LDXRB64", 1, flavor="exclusive").emit()
418
419 LoadRaw64("ldapr", "LDAPRX64", 8, flavor="acquire").emit()
420 LoadRaw64("ldapr", "LDAPRW64", 4, flavor="acquire").emit()
421 LoadRaw64("ldaprh", "LDAPRH64", 2, flavor="acquire").emit()
422 LoadRaw64("ldaprb", "LDAPRB64", 1, flavor="acquire").emit()
423
419 class LoadImmU64(LoadImm64):
420 decConstBase = 'LoadStoreImmU64'
421 micro = True
422
423 class LoadImmDU64(LoadImmInst64, LoadDouble64):
424 decConstBase = 'LoadStoreImmDU64'
425 base = 'ArmISA::MemoryDImm64'
426 micro = True
427 post = False
428 writeback = False
429
430 class LoadImmDouble64(LoadImmInst64, LoadDouble64):
431 decConstBase = 'LoadStoreImmDU64'
432 base = 'ArmISA::MemoryDImm64'
433 micro = False
434 post = False
435 writeback = False
436
437 class LoadRegU64(LoadReg64):
438 decConstBase = 'LoadStoreRegU64'
439 micro = True
440
441 class LoadLitU64(LoadLit64):
442 decConstBase = 'LoadStoreLitU64'
443 micro = True
444
445 LoadImmDU64("ldp_uop", "MicroLdPairUop", 8).emit()
446 LoadImmDU64("ldp_fp8_uop", "MicroLdPairFp8Uop", 8, flavor="fp").emit()
447 LoadImmU64("ldfp16_uop", "MicroLdFp16Uop", 16, flavor="fp").emit()
448 LoadReg64("ldfp16reg_uop", "MicroLdFp16RegUop", 16, flavor="fp").emit()
449
450 LoadImmDouble64("ldaxp", "LDAXPW64", 4, flavor="acexp").emit()
451 LoadImmDouble64("ldaxp", "LDAXPX64", 8, flavor="acexp").emit()
452 LoadImmDouble64("ldxp", "LDXPW64", 4, flavor="exp").emit()
453 LoadImmDouble64("ldxp", "LDXPX64", 8, flavor="exp").emit()
454
455 LoadImmU64("ldrxi_uop", "MicroLdrXImmUop", 8).emit()
456 LoadRegU64("ldrxr_uop", "MicroLdrXRegUop", 8).emit()
457 LoadLitU64("ldrxl_uop", "MicroLdrXLitUop", 8, literal=True).emit()
458 LoadImmU64("ldrfpxi_uop", "MicroLdrFpXImmUop", 8, flavor="fp").emit()
459 LoadRegU64("ldrfpxr_uop", "MicroLdrFpXRegUop", 8, flavor="fp").emit()
460 LoadLitU64("ldrfpxl_uop", "MicroLdrFpXLitUop", 8, literal=True,
461 flavor="fp").emit()
462 LoadLitU64("ldfp16_lit__uop", "MicroLdFp16LitUop",
463 16, literal=True, flavor="fp").emit()
464 LoadImmDU64("ldrduxi_uop", "MicroLdrDUXImmUop", 4, sign=False).emit()
465 LoadImmDU64("ldrdsxi_uop", "MicroLdrDSXImmUop", 4, sign=True).emit()
466 LoadImmDU64("ldrdfpxi_uop", "MicroLdrDFpXImmUop", 4, flavor="fp").emit()
467}};
424 class LoadImmU64(LoadImm64):
425 decConstBase = 'LoadStoreImmU64'
426 micro = True
427
428 class LoadImmDU64(LoadImmInst64, LoadDouble64):
429 decConstBase = 'LoadStoreImmDU64'
430 base = 'ArmISA::MemoryDImm64'
431 micro = True
432 post = False
433 writeback = False
434
435 class LoadImmDouble64(LoadImmInst64, LoadDouble64):
436 decConstBase = 'LoadStoreImmDU64'
437 base = 'ArmISA::MemoryDImm64'
438 micro = False
439 post = False
440 writeback = False
441
442 class LoadRegU64(LoadReg64):
443 decConstBase = 'LoadStoreRegU64'
444 micro = True
445
446 class LoadLitU64(LoadLit64):
447 decConstBase = 'LoadStoreLitU64'
448 micro = True
449
450 LoadImmDU64("ldp_uop", "MicroLdPairUop", 8).emit()
451 LoadImmDU64("ldp_fp8_uop", "MicroLdPairFp8Uop", 8, flavor="fp").emit()
452 LoadImmU64("ldfp16_uop", "MicroLdFp16Uop", 16, flavor="fp").emit()
453 LoadReg64("ldfp16reg_uop", "MicroLdFp16RegUop", 16, flavor="fp").emit()
454
455 LoadImmDouble64("ldaxp", "LDAXPW64", 4, flavor="acexp").emit()
456 LoadImmDouble64("ldaxp", "LDAXPX64", 8, flavor="acexp").emit()
457 LoadImmDouble64("ldxp", "LDXPW64", 4, flavor="exp").emit()
458 LoadImmDouble64("ldxp", "LDXPX64", 8, flavor="exp").emit()
459
460 LoadImmU64("ldrxi_uop", "MicroLdrXImmUop", 8).emit()
461 LoadRegU64("ldrxr_uop", "MicroLdrXRegUop", 8).emit()
462 LoadLitU64("ldrxl_uop", "MicroLdrXLitUop", 8, literal=True).emit()
463 LoadImmU64("ldrfpxi_uop", "MicroLdrFpXImmUop", 8, flavor="fp").emit()
464 LoadRegU64("ldrfpxr_uop", "MicroLdrFpXRegUop", 8, flavor="fp").emit()
465 LoadLitU64("ldrfpxl_uop", "MicroLdrFpXLitUop", 8, literal=True,
466 flavor="fp").emit()
467 LoadLitU64("ldfp16_lit__uop", "MicroLdFp16LitUop",
468 16, literal=True, flavor="fp").emit()
469 LoadImmDU64("ldrduxi_uop", "MicroLdrDUXImmUop", 4, sign=False).emit()
470 LoadImmDU64("ldrdsxi_uop", "MicroLdrDSXImmUop", 4, sign=True).emit()
471 LoadImmDU64("ldrdfpxi_uop", "MicroLdrDFpXImmUop", 4, flavor="fp").emit()
472}};