data64.isa revision 11582
1// -*- mode:c++ -*-
2
3// Copyright (c) 2011-2013, 2016 ARM Limited
4// All rights reserved
5//
6// The license below extends only to copyright in the software and shall
7// not be construed as granting a license to any other intellectual
8// property including but not limited to intellectual property relating
9// to a hardware implementation of the functionality of the software
10// licensed hereunder.  You may use the software subject to the license
11// terms below provided that you ensure that this notice is replicated
12// unmodified and in its entirety in all distributions of the software,
13// modified or unmodified, in source code or in binary form.
14//
15// Redistribution and use in source and binary forms, with or without
16// modification, are permitted provided that the following conditions are
17// met: redistributions of source code must retain the above copyright
18// notice, this list of conditions and the following disclaimer;
19// redistributions in binary form must reproduce the above copyright
20// notice, this list of conditions and the following disclaimer in the
21// documentation and/or other materials provided with the distribution;
22// neither the name of the copyright holders nor the names of its
23// contributors may be used to endorse or promote products derived from
24// this software without specific prior written permission.
25//
26// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
27// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
28// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
29// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
30// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
31// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
32// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
36// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37//
38// Authors: Gabe Black
39
40let {{
41
42    header_output = ""
43    decoder_output = ""
44    exec_output = ""
45
46    def createCcCode64(carry, overflow):
47        code = ""
48        code += '''
49            uint16_t _iz, _in;
50            _in = bits(resTemp, intWidth - 1);
51            _iz = ((resTemp & mask(intWidth)) == 0);
52            CondCodesNZ = (_in << 1) | _iz;
53            DPRINTF(Arm, "(in, iz) = (%%d, %%d)\\n", _in, _iz);
54        '''
55        if overflow and overflow != "none":
56            code +=  '''
57                uint16_t _iv;
58                _iv = %s & 1;
59                CondCodesV = _iv;
60                DPRINTF(Arm, "(iv) = (%%d)\\n", _iv);
61            ''' % overflow
62        if carry and carry != "none":
63            code += '''
64                uint16_t _ic;
65                _ic = %s & 1;
66                CondCodesC = _ic;
67                DPRINTF(Arm, "(ic) = (%%d)\\n", _ic);
68            ''' % carry
69        return code
70
71    oldC = 'CondCodesC'
72    oldV = 'CondCodesV'
73    # Dicts of ways to set the carry flag.
74    carryCode64 = {
75        "none": "none",
76        "add": 'findCarry(intWidth, resTemp, Op164, secOp)',
77        "sub": 'findCarry(intWidth, resTemp, Op164, ~secOp)',
78        "logic": '0'
79    }
80    # Dict of ways to set the overflow flag.
81    overflowCode64 = {
82        "none": "none",
83        "add": 'findOverflow(intWidth, resTemp, Op164, secOp)',
84        "sub": 'findOverflow(intWidth, resTemp, Op164, ~secOp)',
85        "logic": '0'
86    }
87
88    immOp2 = "uint64_t secOp M5_VAR_USED = imm;"
89    sRegOp2 = "uint64_t secOp M5_VAR_USED = " + \
90              "shiftReg64(Op264, shiftAmt, shiftType, intWidth);"
91    eRegOp2 = "uint64_t secOp M5_VAR_USED = " + \
92              "extendReg64(Op264, extendType, shiftAmt, intWidth);"
93
94    def buildDataWork(mnem, code, flagType, suffix, buildCc, buildNonCc,
95                      base, templateBase):
96        code = '''
97        uint64_t resTemp M5_VAR_USED = 0;
98        ''' + code
99        ccCode = createCcCode64(carryCode64[flagType], overflowCode64[flagType])
100        Name = mnem.capitalize() + suffix
101        iop = InstObjParams(mnem, Name, base, code)
102        iopCc = InstObjParams(mnem + "s", Name + "Cc", base, code + ccCode)
103
104        def subst(iop):
105            global header_output, decoder_output, exec_output
106            header_output += eval(templateBase + "Declare").subst(iop)
107            decoder_output += eval(templateBase + "Constructor").subst(iop)
108            exec_output += BasicExecute.subst(iop)
109
110        if buildNonCc:
111            subst(iop)
112        if buildCc:
113            subst(iopCc)
114
115    def buildXImmDataInst(mnem, code, flagType = "logic", \
116                          buildCc = True, buildNonCc = True, \
117                          suffix = "XImm"):
118        buildDataWork(mnem, immOp2 + code, flagType, suffix,
119                      buildCc, buildNonCc, "DataXImmOp", "DataXImm")
120
121    def buildXSRegDataInst(mnem, code, flagType = "logic", \
122                           buildCc = True, buildNonCc = True, \
123                           suffix = "XSReg"):
124        buildDataWork(mnem, sRegOp2 + code, flagType, suffix,
125                      buildCc, buildNonCc, "DataXSRegOp", "DataXSReg")
126
127    def buildXERegDataInst(mnem, code, flagType = "logic", \
128                           buildCc = True, buildNonCc = True, \
129                           suffix = "XEReg"):
130        buildDataWork(mnem, eRegOp2 + code, flagType, suffix,
131                      buildCc, buildNonCc, "DataXERegOp", "DataXEReg")
132
133    def buildDataInst(mnem, code, flagType = "logic",
134                      buildCc = True, buildNonCc = True):
135        buildXImmDataInst(mnem, code, flagType, buildCc, buildNonCc)
136        buildXSRegDataInst(mnem, code, flagType, buildCc, buildNonCc)
137        buildXERegDataInst(mnem, code, flagType, buildCc, buildNonCc)
138
139    buildXImmDataInst("adr", "Dest64 = RawPC + imm", buildCc = False);
140    buildXImmDataInst("adrp", "Dest64 = (RawPC & ~mask(12)) + imm",
141                      buildCc = False);
142    buildDataInst("and", "Dest64 = resTemp = Op164 & secOp;")
143    buildDataInst("eor", "Dest64 = Op164 ^ secOp;", buildCc = False)
144    buildXSRegDataInst("eon", "Dest64 = Op164 ^ ~secOp;", buildCc = False)
145    buildDataInst("sub", "Dest64 = resTemp = Op164 - secOp;", "sub")
146    buildDataInst("add", "Dest64 = resTemp = Op164 + secOp;", "add")
147    buildXSRegDataInst("adc",
148            "Dest64 = resTemp = Op164 + secOp + %s;" % oldC, "add")
149    buildXSRegDataInst("sbc",
150            "Dest64 = resTemp = Op164 - secOp - !%s;" % oldC, "sub")
151    buildDataInst("orr", "Dest64 = Op164 | secOp;", buildCc = False)
152    buildXSRegDataInst("orn", "Dest64 = Op164 | ~secOp;", buildCc = False)
153    buildXSRegDataInst("bic", "Dest64 = resTemp = Op164 & ~secOp;")
154
155    def buildDataXImmInst(mnem, code, optArgs = []):
156        global header_output, decoder_output, exec_output
157        classNamePrefix = mnem[0].upper() + mnem[1:]
158        templateBase = "DataXImm"
159        iop = InstObjParams(mnem, classNamePrefix + "64",
160                            templateBase + "Op", code, optArgs)
161        header_output += eval(templateBase + "Declare").subst(iop)
162        decoder_output += eval(templateBase + "Constructor").subst(iop)
163        exec_output += BasicExecute.subst(iop)
164
165    def buildDataXRegInst(mnem, regOps, code, optArgs = [],
166                          overrideOpClass=None):
167        global header_output, decoder_output, exec_output
168        templateBase = "DataX%dReg" % regOps
169        classNamePrefix = mnem[0].upper() + mnem[1:]
170        if overrideOpClass:
171            iop = InstObjParams(mnem, classNamePrefix + "64",
172                                templateBase + "Op",
173                                { 'code': code, 'op_class': overrideOpClass},
174                                optArgs)
175        else:
176            iop = InstObjParams(mnem, classNamePrefix + "64",
177                                templateBase + "Op", code, optArgs)
178        header_output += eval(templateBase + "Declare").subst(iop)
179        decoder_output += eval(templateBase + "Constructor").subst(iop)
180        exec_output += BasicExecute.subst(iop)
181
182    buildDataXRegInst("madd", 3, "Dest64 = Op164 + Op264 * Op364",
183        overrideOpClass="IntMultOp")
184    buildDataXRegInst("msub", 3, "Dest64 = Op164 - Op264 * Op364",
185        overrideOpClass="IntMultOp")
186    buildDataXRegInst("smaddl", 3,
187        "XDest = XOp1 + sext<32>(WOp2) * sext<32>(WOp3)",
188        overrideOpClass="IntMultOp")
189    buildDataXRegInst("smsubl", 3,
190        "XDest = XOp1 - sext<32>(WOp2) * sext<32>(WOp3)",
191        overrideOpClass="IntMultOp")
192    buildDataXRegInst("smulh", 2, '''
193        uint64_t op1H = (int32_t)(XOp1 >> 32);
194        uint64_t op1L = (uint32_t)XOp1;
195        uint64_t op2H = (int32_t)(XOp2 >> 32);
196        uint64_t op2L = (uint32_t)XOp2;
197        uint64_t mid1 = ((op1L * op2L) >> 32) + op1H * op2L;
198        uint64_t mid2 = op1L * op2H;
199        uint64_t result = ((uint64_t)(uint32_t)mid1 + (uint32_t)mid2) >> 32;
200        result += shiftReg64(mid1, 32, ASR, intWidth);
201        result += shiftReg64(mid2, 32, ASR, intWidth);
202        XDest = result + op1H * op2H;
203    ''', overrideOpClass="IntMultOp")
204    buildDataXRegInst("umaddl", 3, "XDest = XOp1 + WOp2 * WOp3",
205        overrideOpClass="IntMultOp")
206    buildDataXRegInst("umsubl", 3, "XDest = XOp1 - WOp2 * WOp3",
207        overrideOpClass="IntMultOp")
208    buildDataXRegInst("umulh", 2, '''
209        uint64_t op1H = (uint32_t)(XOp1 >> 32);
210        uint64_t op1L = (uint32_t)XOp1;
211        uint64_t op2H = (uint32_t)(XOp2 >> 32);
212        uint64_t op2L = (uint32_t)XOp2;
213        uint64_t mid1 = ((op1L * op2L) >> 32) + op1H * op2L;
214        uint64_t mid2 = op1L * op2H;
215        uint64_t result = ((uint64_t)(uint32_t)mid1 + (uint32_t)mid2) >> 32;
216        result += mid1 >> 32;
217        result += mid2 >> 32;
218        XDest = result + op1H * op2H;
219    ''', overrideOpClass="IntMultOp")
220
221    buildDataXRegInst("asrv", 2,
222        "Dest64 = shiftReg64(Op164, Op264, ASR, intWidth)")
223    buildDataXRegInst("lslv", 2,
224        "Dest64 = shiftReg64(Op164, Op264, LSL, intWidth)")
225    buildDataXRegInst("lsrv", 2,
226        "Dest64 = shiftReg64(Op164, Op264, LSR, intWidth)")
227    buildDataXRegInst("rorv", 2,
228        "Dest64 = shiftReg64(Op164, Op264, ROR, intWidth)")
229    buildDataXRegInst("sdiv", 2, '''
230        int64_t op1 = Op164;
231        int64_t op2 = Op264;
232        if (intWidth == 32) {
233            op1 = sext<32>(op1);
234            op2 = sext<32>(op2);
235        }
236        Dest64 = op2 == -1 ? -op1 : op2 ? op1 / op2 : 0;
237    ''', overrideOpClass="IntDivOp")
238    buildDataXRegInst("udiv", 2, "Dest64 = Op264 ? Op164 / Op264 : 0",
239        overrideOpClass="IntDivOp")
240
241    buildDataXRegInst("cls", 1, '''
242        uint64_t op1 = Op164;
243        if (bits(op1, intWidth - 1))
244            op1 ^= mask(intWidth);
245        Dest64 = (op1 == 0) ? intWidth - 1 : (intWidth - 2 - findMsbSet(op1));
246    ''')
247    buildDataXRegInst("clz", 1, '''
248        Dest64 = (Op164 == 0) ? intWidth : (intWidth - 1 - findMsbSet(Op164));
249    ''')
250    buildDataXRegInst("rbit", 1, '''
251        uint64_t result = Op164;
252        uint64_t lBit = 1ULL << (intWidth - 1);
253        uint64_t rBit = 1ULL;
254        while (lBit > rBit) {
255            uint64_t maskBits = lBit | rBit;
256            uint64_t testBits = result & maskBits;
257            // If these bits are different, swap them by toggling them.
258            if (testBits && testBits != maskBits)
259                result ^= maskBits;
260            lBit >>= 1; rBit <<= 1;
261        }
262        Dest64 = result;
263    ''')
264    buildDataXRegInst("rev", 1, '''
265        if (intWidth == 32)
266            Dest64 = betole<uint32_t>(Op164);
267        else
268            Dest64 = betole<uint64_t>(Op164);
269    ''')
270    buildDataXRegInst("rev16", 1, '''
271        int count = intWidth / 16;
272        uint64_t result = 0;
273        for (unsigned i = 0; i < count; i++) {
274            uint16_t hw = Op164 >> (i * 16);
275            result |= (uint64_t)betole<uint16_t>(hw) << (i * 16);
276        }
277        Dest64 = result;
278    ''')
279    buildDataXRegInst("rev32", 1, '''
280        int count = intWidth / 32;
281        uint64_t result = 0;
282        for (unsigned i = 0; i < count; i++) {
283            uint32_t hw = Op164 >> (i * 32);
284            result |= (uint64_t)betole<uint32_t>(hw) << (i * 32);
285        }
286        Dest64 = result;
287    ''')
288
289    msrMrs64EnabledCheckCode = '''
290        // Check for read/write access right
291        if (!can%sAArch64SysReg(flat_idx, Scr64, cpsr, xc->tcBase())) {
292            if (flat_idx == MISCREG_DAIF ||
293                flat_idx == MISCREG_DC_ZVA_Xt ||
294                flat_idx == MISCREG_DC_CVAC_Xt ||
295                flat_idx == MISCREG_DC_CIVAC_Xt
296                )
297                return std::make_shared<UndefinedInstruction>(
298                                    machInst, 0, EC_TRAPPED_MSR_MRS_64,
299                                    mnemonic);
300            return std::make_shared<UndefinedInstruction>(machInst, false,
301                                                          mnemonic);
302        }
303
304        // Check for traps to supervisor (FP/SIMD regs)
305        if (el <= EL1 && msrMrs64TrapToSup(flat_idx, el, Cpacr64))
306            return std::make_shared<SupervisorTrap>(machInst, 0x1E00000,
307                                                    EC_TRAPPED_SIMD_FP);
308
309        bool is_vfp_neon = false;
310
311        // Check for traps to hypervisor
312        if ((ArmSystem::haveVirtualization(xc->tcBase()) && el <= EL2) &&
313            msrMrs64TrapToHyp(flat_idx, el, %s, CptrEl264, Hcr64, &is_vfp_neon)) {
314            return std::make_shared<HypervisorTrap>(
315                machInst, is_vfp_neon ? 0x1E00000 : imm,
316                is_vfp_neon ? EC_TRAPPED_SIMD_FP : EC_TRAPPED_MSR_MRS_64);
317        }
318
319        // Check for traps to secure monitor
320        if ((ArmSystem::haveSecurity(xc->tcBase()) && el <= EL3) &&
321            msrMrs64TrapToMon(flat_idx, CptrEl364, el, &is_vfp_neon)) {
322            return std::make_shared<SecureMonitorTrap>(
323                machInst,
324                is_vfp_neon ? 0x1E00000 : imm,
325                is_vfp_neon ? EC_TRAPPED_SIMD_FP : EC_TRAPPED_MSR_MRS_64);
326        }
327    '''
328
329    buildDataXImmInst("mrs", '''
330        MiscRegIndex flat_idx = (MiscRegIndex) xc->tcBase()->
331            flattenMiscIndex(op1);
332        CPSR cpsr = Cpsr;
333        ExceptionLevel el = (ExceptionLevel) (uint8_t) cpsr.el;
334        %s
335        XDest = MiscOp1_ud;
336    ''' % (msrMrs64EnabledCheckCode % ('Read', 'true'),),
337        ["IsSerializeBefore"])
338
339    buildDataXRegInst("mrsNZCV", 1, '''
340        CPSR cpsr = 0;
341        cpsr.nz = CondCodesNZ;
342        cpsr.c = CondCodesC;
343        cpsr.v = CondCodesV;
344        XDest = cpsr;
345    ''')
346
347    buildDataXImmInst("msr", '''
348        MiscRegIndex flat_idx = (MiscRegIndex) xc->tcBase()->
349            flattenMiscIndex(dest);
350        CPSR cpsr = Cpsr;
351        ExceptionLevel el = (ExceptionLevel) (uint8_t) cpsr.el;
352        %s
353        MiscDest_ud = XOp1;
354    ''' % (msrMrs64EnabledCheckCode % ('Write', 'false'),),
355        ["IsSerializeAfter", "IsNonSpeculative"])
356
357    buildDataXRegInst("msrNZCV", 1, '''
358        CPSR cpsr = XOp1;
359        CondCodesNZ = cpsr.nz;
360        CondCodesC = cpsr.c;
361        CondCodesV = cpsr.v;
362    ''')
363
364    msrdczva_ea_code = '''
365        MiscRegIndex flat_idx = (MiscRegIndex) xc->tcBase()->flattenMiscIndex(dest);
366        CPSR cpsr = Cpsr;
367        ExceptionLevel el = (ExceptionLevel) (uint8_t) cpsr.el;
368    '''
369
370    msrdczva_ea_code += msrMrs64EnabledCheckCode % ('Write', 'false')
371    msrdczva_ea_code += '''
372           Request::Flags memAccessFlags = Request::CACHE_BLOCK_ZERO|ArmISA::TLB::MustBeOne;
373           EA = XBase;
374           assert(!(Dczid & 0x10));
375           uint64_t op_size = power(2, Dczid + 2);
376           EA &= ~(op_size - 1);
377
378   '''
379
380    msrDCZVAIop = InstObjParams("dczva", "Dczva", "SysDC64",
381                { "ea_code" : msrdczva_ea_code,
382                  "memacc_code" : ";", "use_uops" : 0,
383                  "op_wb" : ";", "fa_code" : ";"}, ['IsStore', 'IsMemRef']);
384    header_output += DCStore64Declare.subst(msrDCZVAIop);
385    decoder_output += DCStore64Constructor.subst(msrDCZVAIop);
386    exec_output += DCStore64Execute.subst(msrDCZVAIop);
387    exec_output += DCStore64InitiateAcc.subst(msrDCZVAIop);
388    exec_output += Store64CompleteAcc.subst(msrDCZVAIop);
389
390
391
392    buildDataXImmInst("msrSP", '''
393        if (!canWriteAArch64SysReg(
394                (MiscRegIndex) xc->tcBase()->flattenMiscIndex(dest),
395                Scr64, Cpsr, xc->tcBase())) {
396            return std::make_shared<UndefinedInstruction>(machInst, false,
397                                                          mnemonic);
398        }
399        MiscDest_ud = imm;
400    ''', optArgs = ["IsSerializeAfter", "IsNonSpeculative"])
401
402    buildDataXImmInst("msrDAIFSet", '''
403        if (!canWriteAArch64SysReg(
404                (MiscRegIndex) xc->tcBase()->flattenMiscIndex(dest),
405                Scr64, Cpsr, xc->tcBase())) {
406            return std::make_shared<UndefinedInstruction>(
407                            machInst, 0, EC_TRAPPED_MSR_MRS_64,
408                            mnemonic);
409        }
410        CPSR cpsr = Cpsr;
411        cpsr.daif = cpsr.daif | imm;
412        Cpsr = cpsr;
413    ''', optArgs = ["IsSerializeAfter", "IsNonSpeculative"])
414
415    buildDataXImmInst("msrDAIFClr", '''
416        if (!canWriteAArch64SysReg(
417                (MiscRegIndex) xc->tcBase()->flattenMiscIndex(dest),
418                Scr64, Cpsr, xc->tcBase())) {
419            return std::make_shared<UndefinedInstruction>(
420                                machInst, 0, EC_TRAPPED_MSR_MRS_64,
421                                mnemonic);
422        }
423        CPSR cpsr = Cpsr;
424        cpsr.daif = cpsr.daif & ~imm;
425        Cpsr = cpsr;
426    ''', optArgs = ["IsSerializeAfter", "IsNonSpeculative"])
427
428    def buildDataXCompInst(mnem, instType, suffix, code):
429        global header_output, decoder_output, exec_output
430        templateBase = "DataXCond%s" % instType
431        iop = InstObjParams(mnem, mnem.capitalize() + suffix + "64",
432                            templateBase + "Op", code)
433        header_output += eval(templateBase + "Declare").subst(iop)
434        decoder_output += eval(templateBase + "Constructor").subst(iop)
435        exec_output += BasicExecute.subst(iop)
436
437    def buildDataXCondImmInst(mnem, code):
438        buildDataXCompInst(mnem, "CompImm", "Imm", code)
439    def buildDataXCondRegInst(mnem, code):
440        buildDataXCompInst(mnem, "CompReg", "Reg", code)
441    def buildDataXCondSelInst(mnem, code):
442        buildDataXCompInst(mnem, "Sel", "", code)
443
444    def condCompCode(flagType, op, imm):
445        ccCode = createCcCode64(carryCode64[flagType], overflowCode64[flagType])
446        opDecl = "uint64_t secOp M5_VAR_USED = imm;"
447        if not imm:
448            opDecl = "uint64_t secOp M5_VAR_USED = Op264;"
449        return opDecl + '''
450            if (testPredicate(CondCodesNZ, CondCodesC, CondCodesV, condCode)) {
451                uint64_t resTemp = Op164 ''' + op + ''' secOp;
452        ''' + ccCode + '''
453            } else {
454                CondCodesNZ = (defCc >> 2) & 0x3;
455                CondCodesC = (defCc >> 1) & 0x1;
456                CondCodesV = defCc & 0x1;
457            }
458        '''
459
460    buildDataXCondImmInst("ccmn", condCompCode("add", "+", True))
461    buildDataXCondImmInst("ccmp", condCompCode("sub", "-", True))
462    buildDataXCondRegInst("ccmn", condCompCode("add", "+", False))
463    buildDataXCondRegInst("ccmp", condCompCode("sub", "-", False))
464
465    condSelCode = '''
466        if (testPredicate(CondCodesNZ, CondCodesC, CondCodesV, condCode)) {
467            Dest64 = Op164;
468        } else {
469            Dest64 = %(altVal)s;
470        }
471    '''
472    buildDataXCondSelInst("csel", condSelCode % {"altVal" : "Op264"})
473    buildDataXCondSelInst("csinc", condSelCode % {"altVal" : "Op264 + 1"})
474    buildDataXCondSelInst("csinv", condSelCode % {"altVal" : "~Op264"})
475    buildDataXCondSelInst("csneg", condSelCode % {"altVal" : "-Op264"})
476}};
477