Deleted Added
sdiff udiff text old ( 12856:cca88f84cb80 ) new ( 13367:dc06baae4275 )
full compact
1// -*- mode:c++ -*-
2
3// Copyright (c) 2011-2014, 2017 ARM Limited
4// All rights reserved
5//
6// The license below extends only to copyright in the software and shall
7// not be construed as granting a license to any other intellectual
8// property including but not limited to intellectual property relating
9// to a hardware implementation of the functionality of the software
10// licensed hereunder. You may use the software subject to the license
11// terms below provided that you ensure that this notice is replicated
12// unmodified and in its entirety in all distributions of the software,
13// modified or unmodified, in source code or in binary form.
14//
15// Redistribution and use in source and binary forms, with or without
16// modification, are permitted provided that the following conditions are
17// met: redistributions of source code must retain the above copyright
18// notice, this list of conditions and the following disclaimer;
19// redistributions in binary form must reproduce the above copyright
20// notice, this list of conditions and the following disclaimer in the
21// documentation and/or other materials provided with the distribution;
22// neither the name of the copyright holders nor the names of its
23// contributors may be used to endorse or promote products derived from
24// this software without specific prior written permission.
25//
26// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
27// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
28// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
29// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
30// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
31// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
32// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
36// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37//
38// Authors: Gabe Black
39
40let {{
41
42 header_output = ""
43 decoder_output = ""
44 exec_output = ""
45
46 class LoadInst64(LoadStoreInst):
47 execBase = 'Load64'
48 micro = False
49
50 def __init__(self, mnem, Name, size=4, sign=False, user=False,
51 literal=False, flavor="normal", top=False):
52 super(LoadInst64, self).__init__()
53
54 self.name = mnem
55 self.Name = Name
56 self.size = size
57 self.sign = sign
58 self.user = user
59 self.literal = literal
60 self.flavor = flavor
61 self.top = top
62
63 self.memFlags = ["ArmISA::TLB::MustBeOne"]
64 self.instFlags = []
65 self.codeBlobs = {"postacc_code" : ""}
66
67 # Add memory request flags where necessary
68 if self.user:
69 self.memFlags.append("ArmISA::TLB::UserMode")
70
71 if self.flavor == "dprefetch":
72 self.memFlags.append("Request::PREFETCH")
73 self.instFlags = ['IsDataPrefetch']
74 elif self.flavor == "iprefetch":
75 self.memFlags.append("Request::PREFETCH")
76 self.instFlags = ['IsInstPrefetch']
77 elif self.flavor == "mprefetch":
78 self.memFlags.append("((((dest>>3)&3)==2)? \
79 (Request::PF_EXCLUSIVE):(Request::PREFETCH))")
80 self.instFlags = ['IsDataPrefetch']
81 if self.micro:
82 self.instFlags.append("IsMicroop")
83
84 if self.flavor in ("acexp", "exp"):
85 # For exclusive pair ops alignment check is based on total size
86 self.memFlags.append("%d" % int(math.log(self.size, 2) + 1))
87 elif not (self.size == 16 and self.top):
88 # Only the first microop should perform alignment checking.
89 self.memFlags.append("%d" % int(math.log(self.size, 2)))
90
91 if self.flavor not in ("acquire", "acex", "exclusive",
92 "acexp", "exp"):
93 self.memFlags.append("ArmISA::TLB::AllowUnaligned")
94
95 if self.flavor in ("acquire", "acex", "acexp"):
96 self.instFlags.extend(["IsMemBarrier",
97 "IsWriteBarrier",
98 "IsReadBarrier"])
99 if self.flavor in ("acex", "exclusive", "exp", "acexp"):
100 self.memFlags.append("Request::LLSC")
101
102 def buildEACode(self):
103 # Address computation code
104 eaCode = ""
105 if self.flavor == "fp":
106 eaCode += vfp64EnabledCheckCode
107
108 if self.literal:
109 eaCode += "EA = RawPC"
110 else:
111 eaCode += SPAlignmentCheckCode + "EA = XBase"
112
113 if self.size == 16:
114 if self.top:
115 eaCode += " + (isBigEndian64(xc->tcBase()) ? 0 : 8)"
116 else:
117 eaCode += " + (isBigEndian64(xc->tcBase()) ? 8 : 0)"
118 if not self.post:
119 eaCode += self.offset
120 eaCode += ";"
121
122 self.codeBlobs["ea_code"] = eaCode
123
124 def emitHelper(self, base='Memory64', wbDecl=None):
125 global header_output, decoder_output, exec_output
126
127 # If this is a microop itself, don't allow anything that would
128 # require further microcoding.
129 if self.micro:
130 assert not wbDecl
131
132 fa_code = None
133 if not self.micro and self.flavor in ("normal", "widen", "acquire"):
134 fa_code = '''
135 fault->annotate(ArmFault::SAS, %s);
136 fault->annotate(ArmFault::SSE, %s);
137 fault->annotate(ArmFault::SRT, dest);
138 fault->annotate(ArmFault::SF, %s);
139 fault->annotate(ArmFault::AR, %s);
140 ''' % ("0" if self.size == 1 else
141 "1" if self.size == 2 else
142 "2" if self.size == 4 else "3",
143 "true" if self.sign else "false",
144 "true" if (self.size == 8 or
145 self.flavor == "widen") else "false",
146 "true" if self.flavor == "acquire" else "false")
147
148 (newHeader, newDecoder, newExec) = \
149 self.fillTemplates(self.name, self.Name, self.codeBlobs,
150 self.memFlags, self.instFlags,
151 base, wbDecl, faCode=fa_code)
152
153 header_output += newHeader
154 decoder_output += newDecoder
155 exec_output += newExec
156
157 class LoadImmInst64(LoadInst64):
158 def __init__(self, *args, **kargs):
159 super(LoadImmInst64, self).__init__(*args, **kargs)
160 self.offset = " + imm"
161
162 self.wbDecl = "MicroAddXiUop(machInst, base, base, imm);"
163
164 class LoadRegInst64(LoadInst64):
165 def __init__(self, *args, **kargs):
166 super(LoadRegInst64, self).__init__(*args, **kargs)
167 self.offset = " + extendReg64(XOffset, type, shiftAmt, 64)"
168
169 self.wbDecl = \
170 "MicroAddXERegUop(machInst, base, base, " + \
171 " offset, type, shiftAmt);"
172
173 class LoadRawRegInst64(LoadInst64):
174 def __init__(self, *args, **kargs):
175 super(LoadRawRegInst64, self).__init__(*args, **kargs)
176 self.offset = ""
177
178 class LoadSingle64(LoadInst64):
179 def emit(self):
180 self.buildEACode()
181
182 # Code that actually handles the access
183 if self.flavor in ("dprefetch", "iprefetch", "mprefetch"):
184 accCode = 'uint64_t temp M5_VAR_USED = Mem%s;'
185 elif self.flavor == "fp":
186 if self.size in (1, 2, 4):
187 accCode = '''
188 AA64FpDestP0_uw = cSwap(Mem%s,
189 isBigEndian64(xc->tcBase()));
190 AA64FpDestP1_uw = 0;
191 AA64FpDestP2_uw = 0;
192 AA64FpDestP3_uw = 0;
193 '''
194 elif self.size == 8:
195 accCode = '''
196 uint64_t data = cSwap(Mem%s,
197 isBigEndian64(xc->tcBase()));
198 AA64FpDestP0_uw = (uint32_t)data;
199 AA64FpDestP1_uw = (data >> 32);
200 AA64FpDestP2_uw = 0;
201 AA64FpDestP3_uw = 0;
202 '''
203 elif self.size == 16:
204 accCode = '''
205 auto data = cSwap(Mem%s, isBigEndian64(xc->tcBase()));
206 AA64FpDestP0_uw = (uint32_t)data[0];
207 AA64FpDestP1_uw = (data[0] >> 32);
208 AA64FpDestP2_uw = (uint32_t)data[1];
209 AA64FpDestP3_uw = (data[1] >> 32);
210 '''
211 elif self.flavor == "widen" or self.size == 8:
212 accCode = "XDest = cSwap(Mem%s, isBigEndian64(xc->tcBase()));"
213 else:
214 accCode = "WDest = cSwap(Mem%s, isBigEndian64(xc->tcBase()));"
215
216 accCode = accCode % buildMemSuffix(self.sign, self.size)
217
218 self.codeBlobs["memacc_code"] = accCode
219
220 # Push it out to the output files
221 wbDecl = None
222 if self.writeback and not self.micro:
223 wbDecl = self.wbDecl
224 self.emitHelper(self.base, wbDecl)
225
226 class LoadDouble64(LoadInst64):
227 def emit(self):
228 self.buildEACode()
229
230 # Code that actually handles the access
231 if self.flavor == "fp":
232 if self.size == 4:
233 accCode = '''
234 uint64_t data = cSwap(Mem_ud, isBigEndian64(xc->tcBase()));
235 AA64FpDestP0_uw = isBigEndian64(xc->tcBase())
236 ? (data >> 32)
237 : (uint32_t)data;
238 AA64FpDestP1_uw = 0;
239 AA64FpDestP2_uw = 0;
240 AA64FpDestP3_uw = 0;
241 AA64FpDest2P0_uw = isBigEndian64(xc->tcBase())
242 ? (uint32_t)data
243 : (data >> 32);
244 AA64FpDest2P1_uw = 0;
245 AA64FpDest2P2_uw = 0;
246 AA64FpDest2P3_uw = 0;
247 '''
248 elif self.size == 8:
249 accCode = '''
250 uint64_t data_a = cSwap(Mem_tud[0],
251 isBigEndian64(xc->tcBase()));
252 uint64_t data_b = cSwap(Mem_tud[1],
253 isBigEndian64(xc->tcBase()));
254 AA64FpDestP0_uw = (uint32_t)data_a;
255 AA64FpDestP1_uw = (uint32_t)(data_a >> 32);
256 AA64FpDestP2_uw = 0;
257 AA64FpDestP3_uw = 0;
258 AA64FpDest2P0_uw = (uint32_t)data_b;
259 AA64FpDest2P1_uw = (uint32_t)(data_b >> 32);
260 AA64FpDest2P2_uw = 0;
261 AA64FpDest2P3_uw = 0;
262 '''
263 else:
264 if self.sign:
265 if self.size == 4:
266 accCode = '''
267 uint64_t data = cSwap(Mem_ud,
268 isBigEndian64(xc->tcBase()));
269 XDest = isBigEndian64(xc->tcBase())
270 ? sext<32>(data >> 32)
271 : sext<32>((uint32_t)data);
272 XDest2 = isBigEndian64(xc->tcBase())
273 ? sext<32>((uint32_t)data)
274 : sext<32>(data >> 32);
275 '''
276 elif self.size == 8:
277 accCode = '''
278 XDest = cSwap(Mem_tud[0],
279 isBigEndian64(xc->tcBase()));
280 XDest2 = cSwap(Mem_tud[1],
281 isBigEndian64(xc->tcBase()));
282 '''
283 else:
284 if self.size == 4:
285 accCode = '''
286 uint64_t data = cSwap(Mem_ud,
287 isBigEndian64(xc->tcBase()));
288 XDest = isBigEndian64(xc->tcBase())
289 ? (data >> 32)
290 : (uint32_t)data;
291 XDest2 = isBigEndian64(xc->tcBase())
292 ? (uint32_t)data
293 : (data >> 32);
294 '''
295 elif self.size == 8:
296 accCode = '''
297 XDest = cSwap(Mem_tud[0],
298 isBigEndian64(xc->tcBase()));
299 XDest2 = cSwap(Mem_tud[1],
300 isBigEndian64(xc->tcBase()));
301 '''
302 self.codeBlobs["memacc_code"] = accCode
303
304 # Push it out to the output files
305 wbDecl = None
306 if self.writeback and not self.micro:
307 wbDecl = self.wbDecl
308 self.emitHelper(self.base, wbDecl)
309
310 class LoadImm64(LoadImmInst64, LoadSingle64):
311 decConstBase = 'LoadStoreImm64'
312 base = 'ArmISA::MemoryImm64'
313 writeback = False
314 post = False
315
316 class LoadPre64(LoadImmInst64, LoadSingle64):
317 decConstBase = 'LoadStoreImm64'
318 base = 'ArmISA::MemoryPreIndex64'
319 writeback = True
320 post = False
321
322 class LoadPost64(LoadImmInst64, LoadSingle64):
323 decConstBase = 'LoadStoreImm64'
324 base = 'ArmISA::MemoryPostIndex64'
325 writeback = True
326 post = True
327
328 class LoadReg64(LoadRegInst64, LoadSingle64):
329 decConstBase = 'LoadStoreReg64'
330 base = 'ArmISA::MemoryReg64'
331 writeback = False
332 post = False
333
334 class LoadRaw64(LoadRawRegInst64, LoadSingle64):
335 decConstBase = 'LoadStoreRaw64'
336 base = 'ArmISA::MemoryRaw64'
337 writeback = False
338 post = False
339
340 class LoadEx64(LoadRawRegInst64, LoadSingle64):
341 decConstBase = 'LoadStoreEx64'
342 base = 'ArmISA::MemoryEx64'
343 writeback = False
344 post = False
345
346 class LoadLit64(LoadImmInst64, LoadSingle64):
347 decConstBase = 'LoadStoreLit64'
348 base = 'ArmISA::MemoryLiteral64'
349 writeback = False
350 post = False
351
352 def buildLoads64(mnem, NameBase, size, sign, flavor="normal"):
353 LoadImm64(mnem, NameBase + "_IMM", size, sign, flavor=flavor).emit()
354 LoadPre64(mnem, NameBase + "_PRE", size, sign, flavor=flavor).emit()
355 LoadPost64(mnem, NameBase + "_POST", size, sign, flavor=flavor).emit()
356 LoadReg64(mnem, NameBase + "_REG", size, sign, flavor=flavor).emit()
357
358 buildLoads64("ldrb", "LDRB64", 1, False)
359 buildLoads64("ldrsb", "LDRSBW64", 1, True)
360 buildLoads64("ldrsb", "LDRSBX64", 1, True, flavor="widen")
361 buildLoads64("ldrh", "LDRH64", 2, False)
362 buildLoads64("ldrsh", "LDRSHW64", 2, True)
363 buildLoads64("ldrsh", "LDRSHX64", 2, True, flavor="widen")
364 buildLoads64("ldrsw", "LDRSW64", 4, True, flavor="widen")
365 buildLoads64("ldr", "LDRW64", 4, False)
366 buildLoads64("ldr", "LDRX64", 8, False)
367 buildLoads64("ldr", "LDRBFP64", 1, False, flavor="fp")
368 buildLoads64("ldr", "LDRHFP64", 2, False, flavor="fp")
369 buildLoads64("ldr", "LDRSFP64", 4, False, flavor="fp")
370 buildLoads64("ldr", "LDRDFP64", 8, False, flavor="fp")
371
372 LoadImm64("prfm", "PRFM64_IMM", 8, flavor="mprefetch").emit()
373 LoadReg64("prfm", "PRFM64_REG", 8, flavor="mprefetch").emit()
374 LoadLit64("prfm", "PRFM64_LIT", 8, literal=True,
375 flavor="mprefetch").emit()
376 LoadImm64("prfum", "PRFUM64_IMM", 8, flavor="mprefetch").emit()
377
378 LoadImm64("ldurb", "LDURB64_IMM", 1, False).emit()
379 LoadImm64("ldursb", "LDURSBW64_IMM", 1, True).emit()
380 LoadImm64("ldursb", "LDURSBX64_IMM", 1, True, flavor="widen").emit()
381 LoadImm64("ldurh", "LDURH64_IMM", 2, False).emit()
382 LoadImm64("ldursh", "LDURSHW64_IMM", 2, True).emit()
383 LoadImm64("ldursh", "LDURSHX64_IMM", 2, True, flavor="widen").emit()
384 LoadImm64("ldursw", "LDURSW64_IMM", 4, True, flavor="widen").emit()
385 LoadImm64("ldur", "LDURW64_IMM", 4, False).emit()
386 LoadImm64("ldur", "LDURX64_IMM", 8, False).emit()
387 LoadImm64("ldur", "LDURBFP64_IMM", 1, flavor="fp").emit()
388 LoadImm64("ldur", "LDURHFP64_IMM", 2, flavor="fp").emit()
389 LoadImm64("ldur", "LDURSFP64_IMM", 4, flavor="fp").emit()
390 LoadImm64("ldur", "LDURDFP64_IMM", 8, flavor="fp").emit()
391
392 LoadImm64("ldtrb", "LDTRB64_IMM", 1, False, True).emit()
393 LoadImm64("ldtrsb", "LDTRSBW64_IMM", 1, True, True).emit()
394 LoadImm64("ldtrsb", "LDTRSBX64_IMM", 1, True, True, flavor="widen").emit()
395 LoadImm64("ldtrh", "LDTRH64_IMM", 2, False, True).emit()
396 LoadImm64("ldtrsh", "LDTRSHW64_IMM", 2, True, True).emit()
397 LoadImm64("ldtrsh", "LDTRSHX64_IMM", 2, True, True, flavor="widen").emit()
398 LoadImm64("ldtrsw", "LDTRSW64_IMM", 4, True, flavor="widen").emit()
399 LoadImm64("ldtr", "LDTRW64_IMM", 4, False, True).emit()
400 LoadImm64("ldtr", "LDTRX64_IMM", 8, False, True).emit()
401
402 LoadLit64("ldrsw", "LDRSWL64_LIT", 4, True, \
403 literal=True, flavor="widen").emit()
404 LoadLit64("ldr", "LDRWL64_LIT", 4, False, literal=True).emit()
405 LoadLit64("ldr", "LDRXL64_LIT", 8, False, literal=True).emit()
406 LoadLit64("ldr", "LDRSFP64_LIT", 4, literal=True, flavor="fp").emit()
407 LoadLit64("ldr", "LDRDFP64_LIT", 8, literal=True, flavor="fp").emit()
408
409 LoadRaw64("ldar", "LDARX64", 8, flavor="acquire").emit()
410 LoadRaw64("ldar", "LDARW64", 4, flavor="acquire").emit()
411 LoadRaw64("ldarh", "LDARH64", 2, flavor="acquire").emit()
412 LoadRaw64("ldarb", "LDARB64", 1, flavor="acquire").emit()
413
414 LoadEx64("ldaxr", "LDAXRX64", 8, flavor="acex").emit()
415 LoadEx64("ldaxr", "LDAXRW64", 4, flavor="acex").emit()
416 LoadEx64("ldaxrh", "LDAXRH64", 2, flavor="acex").emit()
417 LoadEx64("ldaxrb", "LDAXRB64", 1, flavor="acex").emit()
418
419 LoadEx64("ldxr", "LDXRX64", 8, flavor="exclusive").emit()
420 LoadEx64("ldxr", "LDXRW64", 4, flavor="exclusive").emit()
421 LoadEx64("ldxrh", "LDXRH64", 2, flavor="exclusive").emit()
422 LoadEx64("ldxrb", "LDXRB64", 1, flavor="exclusive").emit()
423
424 LoadRaw64("ldapr", "LDAPRX64", 8, flavor="acquire").emit()
425 LoadRaw64("ldapr", "LDAPRW64", 4, flavor="acquire").emit()
426 LoadRaw64("ldaprh", "LDAPRH64", 2, flavor="acquire").emit()
427 LoadRaw64("ldaprb", "LDAPRB64", 1, flavor="acquire").emit()
428
429 class LoadImmU64(LoadImm64):
430 decConstBase = 'LoadStoreImmU64'
431 micro = True
432
433 class LoadImmDU64(LoadImmInst64, LoadDouble64):
434 decConstBase = 'LoadStoreImmDU64'
435 base = 'ArmISA::MemoryDImm64'
436 micro = True
437 post = False
438 writeback = False
439
440 class LoadImmDouble64(LoadImmInst64, LoadDouble64):
441 decConstBase = 'LoadStoreImmDU64'
442 base = 'ArmISA::MemoryDImm64'
443 micro = False
444 post = False
445 writeback = False
446
447 class LoadRegU64(LoadReg64):
448 decConstBase = 'LoadStoreRegU64'
449 micro = True
450
451 class LoadLitU64(LoadLit64):
452 decConstBase = 'LoadStoreLitU64'
453 micro = True
454
455 LoadImmDU64("ldp_uop", "MicroLdPairUop", 8).emit()
456 LoadImmDU64("ldp_fp8_uop", "MicroLdPairFp8Uop", 8, flavor="fp").emit()
457 LoadImmU64("ldfp16_uop", "MicroLdFp16Uop", 16, flavor="fp").emit()
458 LoadReg64("ldfp16reg_uop", "MicroLdFp16RegUop", 16, flavor="fp").emit()
459
460 LoadImmDouble64("ldaxp", "LDAXPW64", 4, flavor="acexp").emit()
461 LoadImmDouble64("ldaxp", "LDAXPX64", 8, flavor="acexp").emit()
462 LoadImmDouble64("ldxp", "LDXPW64", 4, flavor="exp").emit()
463 LoadImmDouble64("ldxp", "LDXPX64", 8, flavor="exp").emit()
464
465 LoadImmU64("ldrxi_uop", "MicroLdrXImmUop", 8).emit()
466 LoadRegU64("ldrxr_uop", "MicroLdrXRegUop", 8).emit()
467 LoadLitU64("ldrxl_uop", "MicroLdrXLitUop", 8, literal=True).emit()
468 LoadImmU64("ldrfpxi_uop", "MicroLdrFpXImmUop", 8, flavor="fp").emit()
469 LoadRegU64("ldrfpxr_uop", "MicroLdrFpXRegUop", 8, flavor="fp").emit()
470 LoadLitU64("ldrfpxl_uop", "MicroLdrFpXLitUop", 8, literal=True,
471 flavor="fp").emit()
472 LoadLitU64("ldfp16_lit__uop", "MicroLdFp16LitUop",
473 16, literal=True, flavor="fp").emit()
474 LoadImmDU64("ldrduxi_uop", "MicroLdrDUXImmUop", 4, sign=False).emit()
475 LoadImmDU64("ldrdsxi_uop", "MicroLdrDSXImmUop", 4, sign=True).emit()
476 LoadImmDU64("ldrdfpxi_uop", "MicroLdrDFpXImmUop", 4, flavor="fp").emit()
477}};