data64.isa revision 10205:3ca67d0e0e7e
1// -*- mode:c++ -*-
2
3// Copyright (c) 2011-2013 ARM Limited
4// All rights reserved
5//
6// The license below extends only to copyright in the software and shall
7// not be construed as granting a license to any other intellectual
8// property including but not limited to intellectual property relating
9// to a hardware implementation of the functionality of the software
10// licensed hereunder.  You may use the software subject to the license
11// terms below provided that you ensure that this notice is replicated
12// unmodified and in its entirety in all distributions of the software,
13// modified or unmodified, in source code or in binary form.
14//
15// Redistribution and use in source and binary forms, with or without
16// modification, are permitted provided that the following conditions are
17// met: redistributions of source code must retain the above copyright
18// notice, this list of conditions and the following disclaimer;
19// redistributions in binary form must reproduce the above copyright
20// notice, this list of conditions and the following disclaimer in the
21// documentation and/or other materials provided with the distribution;
22// neither the name of the copyright holders nor the names of its
23// contributors may be used to endorse or promote products derived from
24// this software without specific prior written permission.
25//
26// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
27// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
28// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
29// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
30// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
31// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
32// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
36// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37//
38// Authors: Gabe Black
39
40let {{
41
42    header_output = ""
43    decoder_output = ""
44    exec_output = ""
45
46    def createCcCode64(carry, overflow):
47        code = ""
48        code += '''
49            uint16_t _iz, _in;
50            _in = bits(resTemp, intWidth - 1);
51            _iz = ((resTemp & mask(intWidth)) == 0);
52            CondCodesNZ = (_in << 1) | _iz;
53            DPRINTF(Arm, "(in, iz) = (%%d, %%d)\\n", _in, _iz);
54        '''
55        if overflow and overflow != "none":
56            code +=  '''
57                uint16_t _iv;
58                _iv = %s & 1;
59                CondCodesV = _iv;
60                DPRINTF(Arm, "(iv) = (%%d)\\n", _iv);
61            ''' % overflow
62        if carry and carry != "none":
63            code += '''
64                uint16_t _ic;
65                _ic = %s & 1;
66                CondCodesC = _ic;
67                DPRINTF(Arm, "(ic) = (%%d)\\n", _ic);
68            ''' % carry
69        return code
70
71    oldC = 'CondCodesC'
72    oldV = 'CondCodesV'
73    # Dicts of ways to set the carry flag.
74    carryCode64 = {
75        "none": "none",
76        "add": 'findCarry(intWidth, resTemp, Op164, secOp)',
77        "sub": 'findCarry(intWidth, resTemp, Op164, ~secOp)',
78        "logic": '0'
79    }
80    # Dict of ways to set the overflow flag.
81    overflowCode64 = {
82        "none": "none",
83        "add": 'findOverflow(intWidth, resTemp, Op164, secOp)',
84        "sub": 'findOverflow(intWidth, resTemp, Op164, ~secOp)',
85        "logic": '0'
86    }
87
88    immOp2 = "uint64_t secOp M5_VAR_USED = imm;"
89    sRegOp2 = "uint64_t secOp M5_VAR_USED = " + \
90              "shiftReg64(Op264, shiftAmt, shiftType, intWidth);"
91    eRegOp2 = "uint64_t secOp M5_VAR_USED = " + \
92              "extendReg64(Op264, extendType, shiftAmt, intWidth);"
93
94    def buildDataWork(mnem, code, flagType, suffix, buildCc, buildNonCc,
95                      base, templateBase):
96        code = '''
97        uint64_t resTemp M5_VAR_USED = 0;
98        ''' + code
99        ccCode = createCcCode64(carryCode64[flagType], overflowCode64[flagType])
100        Name = mnem.capitalize() + suffix
101        iop = InstObjParams(mnem, Name, base, code)
102        iopCc = InstObjParams(mnem + "s", Name + "Cc", base, code + ccCode)
103
104        def subst(iop):
105            global header_output, decoder_output, exec_output
106            header_output += eval(templateBase + "Declare").subst(iop)
107            decoder_output += eval(templateBase + "Constructor").subst(iop)
108            exec_output += BasicExecute.subst(iop)
109
110        if buildNonCc:
111            subst(iop)
112        if buildCc:
113            subst(iopCc)
114
115    def buildXImmDataInst(mnem, code, flagType = "logic", \
116                          buildCc = True, buildNonCc = True, \
117                          suffix = "XImm"):
118        buildDataWork(mnem, immOp2 + code, flagType, suffix,
119                      buildCc, buildNonCc, "DataXImmOp", "DataXImm")
120
121    def buildXSRegDataInst(mnem, code, flagType = "logic", \
122                           buildCc = True, buildNonCc = True, \
123                           suffix = "XSReg"):
124        buildDataWork(mnem, sRegOp2 + code, flagType, suffix,
125                      buildCc, buildNonCc, "DataXSRegOp", "DataXSReg")
126
127    def buildXERegDataInst(mnem, code, flagType = "logic", \
128                           buildCc = True, buildNonCc = True, \
129                           suffix = "XEReg"):
130        buildDataWork(mnem, eRegOp2 + code, flagType, suffix,
131                      buildCc, buildNonCc, "DataXERegOp", "DataXEReg")
132
133    def buildDataInst(mnem, code, flagType = "logic",
134                      buildCc = True, buildNonCc = True):
135        buildXImmDataInst(mnem, code, flagType, buildCc, buildNonCc)
136        buildXSRegDataInst(mnem, code, flagType, buildCc, buildNonCc)
137        buildXERegDataInst(mnem, code, flagType, buildCc, buildNonCc)
138
139    buildXImmDataInst("adr", "Dest64 = RawPC + imm", buildCc = False);
140    buildXImmDataInst("adrp", "Dest64 = (RawPC & ~mask(12)) + imm",
141                      buildCc = False);
142    buildDataInst("and", "Dest64 = resTemp = Op164 & secOp;")
143    buildDataInst("eor", "Dest64 = Op164 ^ secOp;", buildCc = False)
144    buildXSRegDataInst("eon", "Dest64 = Op164 ^ ~secOp;", buildCc = False)
145    buildDataInst("sub", "Dest64 = resTemp = Op164 - secOp;", "sub")
146    buildDataInst("add", "Dest64 = resTemp = Op164 + secOp;", "add")
147    buildXSRegDataInst("adc",
148            "Dest64 = resTemp = Op164 + secOp + %s;" % oldC, "add")
149    buildXSRegDataInst("sbc",
150            "Dest64 = resTemp = Op164 - secOp - !%s;" % oldC, "sub")
151    buildDataInst("orr", "Dest64 = Op164 | secOp;", buildCc = False)
152    buildXSRegDataInst("orn", "Dest64 = Op164 | ~secOp;", buildCc = False)
153    buildXSRegDataInst("bic", "Dest64 = resTemp = Op164 & ~secOp;")
154
155    def buildDataXImmInst(mnem, code, optArgs = []):
156        global header_output, decoder_output, exec_output
157        classNamePrefix = mnem[0].upper() + mnem[1:]
158        templateBase = "DataXImm"
159        iop = InstObjParams(mnem, classNamePrefix + "64",
160                            templateBase + "Op", code, optArgs)
161        header_output += eval(templateBase + "Declare").subst(iop)
162        decoder_output += eval(templateBase + "Constructor").subst(iop)
163        exec_output += BasicExecute.subst(iop)
164
165    def buildDataXRegInst(mnem, regOps, code, optArgs = [],
166                          overrideOpClass=None):
167        global header_output, decoder_output, exec_output
168        templateBase = "DataX%dReg" % regOps
169        classNamePrefix = mnem[0].upper() + mnem[1:]
170        if overrideOpClass:
171            iop = InstObjParams(mnem, classNamePrefix + "64",
172                                templateBase + "Op",
173                                { 'code': code, 'op_class': overrideOpClass},
174                                optArgs)
175        else:
176            iop = InstObjParams(mnem, classNamePrefix + "64",
177                                templateBase + "Op", code, optArgs)
178        header_output += eval(templateBase + "Declare").subst(iop)
179        decoder_output += eval(templateBase + "Constructor").subst(iop)
180        exec_output += BasicExecute.subst(iop)
181
182    buildDataXRegInst("madd", 3, "Dest64 = Op164 + Op264 * Op364",
183        overrideOpClass="IntMultOp")
184    buildDataXRegInst("msub", 3, "Dest64 = Op164 - Op264 * Op364",
185        overrideOpClass="IntMultOp")
186    buildDataXRegInst("smaddl", 3,
187        "XDest = XOp1 + sext<32>(WOp2) * sext<32>(WOp3)",
188        overrideOpClass="IntMultOp")
189    buildDataXRegInst("smsubl", 3,
190        "XDest = XOp1 - sext<32>(WOp2) * sext<32>(WOp3)",
191        overrideOpClass="IntMultOp")
192    buildDataXRegInst("smulh", 2, '''
193        uint64_t op1H = (int32_t)(XOp1 >> 32);
194        uint64_t op1L = (uint32_t)XOp1;
195        uint64_t op2H = (int32_t)(XOp2 >> 32);
196        uint64_t op2L = (uint32_t)XOp2;
197        uint64_t mid1 = ((op1L * op2L) >> 32) + op1H * op2L;
198        uint64_t mid2 = op1L * op2H;
199        uint64_t result = ((uint64_t)(uint32_t)mid1 + (uint32_t)mid2) >> 32;
200        result += shiftReg64(mid1, 32, ASR, intWidth);
201        result += shiftReg64(mid2, 32, ASR, intWidth);
202        XDest = result + op1H * op2H;
203    ''', overrideOpClass="IntMultOp")
204    buildDataXRegInst("umaddl", 3, "XDest = XOp1 + WOp2 * WOp3",
205        overrideOpClass="IntMultOp")
206    buildDataXRegInst("umsubl", 3, "XDest = XOp1 - WOp2 * WOp3",
207        overrideOpClass="IntMultOp")
208    buildDataXRegInst("umulh", 2, '''
209        uint64_t op1H = (uint32_t)(XOp1 >> 32);
210        uint64_t op1L = (uint32_t)XOp1;
211        uint64_t op2H = (uint32_t)(XOp2 >> 32);
212        uint64_t op2L = (uint32_t)XOp2;
213        uint64_t mid1 = ((op1L * op2L) >> 32) + op1H * op2L;
214        uint64_t mid2 = op1L * op2H;
215        uint64_t result = ((uint64_t)(uint32_t)mid1 + (uint32_t)mid2) >> 32;
216        result += mid1 >> 32;
217        result += mid2 >> 32;
218        XDest = result + op1H * op2H;
219    ''', overrideOpClass="IntMultOp")
220
221    buildDataXRegInst("asrv", 2,
222        "Dest64 = shiftReg64(Op164, Op264, ASR, intWidth)")
223    buildDataXRegInst("lslv", 2,
224        "Dest64 = shiftReg64(Op164, Op264, LSL, intWidth)")
225    buildDataXRegInst("lsrv", 2,
226        "Dest64 = shiftReg64(Op164, Op264, LSR, intWidth)")
227    buildDataXRegInst("rorv", 2,
228        "Dest64 = shiftReg64(Op164, Op264, ROR, intWidth)")
229    buildDataXRegInst("sdiv", 2, '''
230        int64_t op1 = Op164;
231        int64_t op2 = Op264;
232        if (intWidth == 32) {
233            op1 = sext<32>(op1);
234            op2 = sext<32>(op2);
235        }
236        Dest64 = op2 == -1 ? -op1 : op2 ? op1 / op2 : 0;
237    ''', overrideOpClass="IntDivOp")
238    buildDataXRegInst("udiv", 2, "Dest64 = Op264 ? Op164 / Op264 : 0",
239        overrideOpClass="IntDivOp")
240
241    buildDataXRegInst("cls", 1, '''
242        uint64_t op1 = Op164;
243        if (bits(op1, intWidth - 1))
244            op1 ^= mask(intWidth);
245        Dest64 = (op1 == 0) ? intWidth - 1 : (intWidth - 2 - findMsbSet(op1));
246    ''')
247    buildDataXRegInst("clz", 1, '''
248        Dest64 = (Op164 == 0) ? intWidth : (intWidth - 1 - findMsbSet(Op164));
249    ''')
250    buildDataXRegInst("rbit", 1, '''
251        uint64_t result = Op164;
252        uint64_t lBit = 1ULL << (intWidth - 1);
253        uint64_t rBit = 1ULL;
254        while (lBit > rBit) {
255            uint64_t maskBits = lBit | rBit;
256            uint64_t testBits = result & maskBits;
257            // If these bits are different, swap them by toggling them.
258            if (testBits && testBits != maskBits)
259                result ^= maskBits;
260            lBit >>= 1; rBit <<= 1;
261        }
262        Dest64 = result;
263    ''')
264    buildDataXRegInst("rev", 1, '''
265        if (intWidth == 32)
266            Dest64 = betole<uint32_t>(Op164);
267        else
268            Dest64 = betole<uint64_t>(Op164);
269    ''')
270    buildDataXRegInst("rev16", 1, '''
271        int count = intWidth / 16;
272        uint64_t result = 0;
273        for (unsigned i = 0; i < count; i++) {
274            uint16_t hw = Op164 >> (i * 16);
275            result |= (uint64_t)betole<uint16_t>(hw) << (i * 16);
276        }
277        Dest64 = result;
278    ''')
279    buildDataXRegInst("rev32", 1, '''
280        int count = intWidth / 32;
281        uint64_t result = 0;
282        for (unsigned i = 0; i < count; i++) {
283            uint32_t hw = Op164 >> (i * 32);
284            result |= (uint64_t)betole<uint32_t>(hw) << (i * 32);
285        }
286        Dest64 = result;
287    ''')
288
289    msrMrs64EnabledCheckCode = '''
290        // Check for read/write access right
291        if (!can%sAArch64SysReg(flat_idx, Scr64, cpsr, xc->tcBase())) {
292            if (flat_idx == MISCREG_DAIF ||
293                flat_idx == MISCREG_DC_ZVA_Xt ||
294                flat_idx == MISCREG_DC_CVAC_Xt ||
295                flat_idx == MISCREG_DC_CIVAC_Xt
296                )
297                return new UndefinedInstruction(machInst, 0, EC_TRAPPED_MSR_MRS_64,
298                                    mnemonic);
299            return new UndefinedInstruction(machInst, false, mnemonic);
300        }
301
302        // Check for traps to supervisor (FP/SIMD regs)
303        if (el <= EL1 && msrMrs64TrapToSup(flat_idx, el, Cpacr64))
304            return new SupervisorTrap(machInst, 0x1E00000, EC_TRAPPED_SIMD_FP);
305
306        bool is_vfp_neon = false;
307
308        // Check for traps to hypervisor
309        if ((ArmSystem::haveVirtualization(xc->tcBase()) && el <= EL2) &&
310            msrMrs64TrapToHyp(flat_idx, %s, CptrEl264, Hcr64, &is_vfp_neon)) {
311            return new HypervisorTrap(machInst, is_vfp_neon ? 0x1E00000 : imm,
312                is_vfp_neon ? EC_TRAPPED_SIMD_FP : EC_TRAPPED_MSR_MRS_64);
313        }
314
315        // Check for traps to secure monitor
316        if ((ArmSystem::haveSecurity(xc->tcBase()) && el <= EL3) &&
317            msrMrs64TrapToMon(flat_idx, CptrEl364, el, &is_vfp_neon)) {
318            return new SecureMonitorTrap(machInst,
319                is_vfp_neon ? 0x1E00000 : imm,
320                is_vfp_neon ? EC_TRAPPED_SIMD_FP : EC_TRAPPED_MSR_MRS_64);
321        }
322    '''
323
324    buildDataXImmInst("mrs", '''
325        MiscRegIndex flat_idx = (MiscRegIndex) xc->tcBase()->
326            flattenMiscIndex(op1);
327        CPSR cpsr = Cpsr;
328        ExceptionLevel el = (ExceptionLevel) (uint8_t) cpsr.el;
329        %s
330        XDest = MiscOp1_ud;
331    ''' % (msrMrs64EnabledCheckCode % ('Read', 'true'),),
332        ["IsSerializeBefore"])
333
334    buildDataXRegInst("mrsNZCV", 1, '''
335        CPSR cpsr = 0;
336        cpsr.nz = CondCodesNZ;
337        cpsr.c = CondCodesC;
338        cpsr.v = CondCodesV;
339        XDest = cpsr;
340    ''')
341
342    buildDataXImmInst("msr", '''
343        MiscRegIndex flat_idx = (MiscRegIndex) xc->tcBase()->
344            flattenMiscIndex(dest);
345        CPSR cpsr = Cpsr;
346        ExceptionLevel el = (ExceptionLevel) (uint8_t) cpsr.el;
347        %s
348        MiscDest_ud = XOp1;
349    ''' % (msrMrs64EnabledCheckCode % ('Write', 'false'),),
350        ["IsSerializeAfter", "IsNonSpeculative"])
351
352    buildDataXRegInst("msrNZCV", 1, '''
353        CPSR cpsr = XOp1;
354        CondCodesNZ = cpsr.nz;
355        CondCodesC = cpsr.c;
356        CondCodesV = cpsr.v;
357    ''')
358
359    msrdczva_ea_code = '''
360        MiscRegIndex flat_idx = (MiscRegIndex) xc->tcBase()->flattenMiscIndex(dest);
361        CPSR cpsr = Cpsr;
362        ExceptionLevel el = (ExceptionLevel) (uint8_t) cpsr.el;
363    '''
364
365    msrdczva_ea_code += msrMrs64EnabledCheckCode % ('Write', 'false')
366    msrdczva_ea_code += '''
367           Request::Flags memAccessFlags = Request::CACHE_BLOCK_ZERO|ArmISA::TLB::MustBeOne;
368           EA = XBase;
369           assert(!(Dczid & 0x10));
370           uint64_t op_size = power(2, Dczid + 2);
371           EA &= ~(op_size - 1);
372
373   '''
374
375    msrDCZVAIop = InstObjParams("dczva", "Dczva", "SysDC64",
376                { "ea_code" : msrdczva_ea_code,
377                  "memacc_code" : ";", "use_uops" : 0,
378                  "op_wb" : ";", "fa_code" : ";"}, ['IsStore', 'IsMemRef']);
379    header_output += DCStore64Declare.subst(msrDCZVAIop);
380    decoder_output += DCStore64Constructor.subst(msrDCZVAIop);
381    exec_output += DCStore64Execute.subst(msrDCZVAIop);
382    exec_output += DCStore64InitiateAcc.subst(msrDCZVAIop);
383    exec_output += Store64CompleteAcc.subst(msrDCZVAIop);
384
385
386
387    buildDataXImmInst("msrSP", '''
388        if (!canWriteAArch64SysReg(
389                (MiscRegIndex) xc->tcBase()->flattenMiscIndex(dest),
390                Scr64, Cpsr, xc->tcBase())) {
391            return new UndefinedInstruction(machInst, false, mnemonic);
392        }
393        MiscDest_ud = imm;
394    ''', optArgs = ["IsSerializeAfter", "IsNonSpeculative"])
395
396    buildDataXImmInst("msrDAIFSet", '''
397        if (!canWriteAArch64SysReg(
398                (MiscRegIndex) xc->tcBase()->flattenMiscIndex(dest),
399                Scr64, Cpsr, xc->tcBase())) {
400            return new UndefinedInstruction(machInst, 0, EC_TRAPPED_MSR_MRS_64,
401                            mnemonic);
402        }
403        CPSR cpsr = Cpsr;
404        cpsr.daif = cpsr.daif | imm;
405        Cpsr = cpsr;
406    ''', optArgs = ["IsSerializeAfter", "IsNonSpeculative"])
407
408    buildDataXImmInst("msrDAIFClr", '''
409        if (!canWriteAArch64SysReg(
410                (MiscRegIndex) xc->tcBase()->flattenMiscIndex(dest),
411                Scr64, Cpsr, xc->tcBase())) {
412            return new UndefinedInstruction(machInst, 0, EC_TRAPPED_MSR_MRS_64,
413                                mnemonic);
414        }
415        CPSR cpsr = Cpsr;
416        cpsr.daif = cpsr.daif & ~imm;
417        Cpsr = cpsr;
418    ''', optArgs = ["IsSerializeAfter", "IsNonSpeculative"])
419
420    def buildDataXCompInst(mnem, instType, suffix, code):
421        global header_output, decoder_output, exec_output
422        templateBase = "DataXCond%s" % instType
423        iop = InstObjParams(mnem, mnem.capitalize() + suffix + "64",
424                            templateBase + "Op", code)
425        header_output += eval(templateBase + "Declare").subst(iop)
426        decoder_output += eval(templateBase + "Constructor").subst(iop)
427        exec_output += BasicExecute.subst(iop)
428
429    def buildDataXCondImmInst(mnem, code):
430        buildDataXCompInst(mnem, "CompImm", "Imm", code)
431    def buildDataXCondRegInst(mnem, code):
432        buildDataXCompInst(mnem, "CompReg", "Reg", code)
433    def buildDataXCondSelInst(mnem, code):
434        buildDataXCompInst(mnem, "Sel", "", code)
435
436    def condCompCode(flagType, op, imm):
437        ccCode = createCcCode64(carryCode64[flagType], overflowCode64[flagType])
438        opDecl = "uint64_t secOp M5_VAR_USED = imm;"
439        if not imm:
440            opDecl = "uint64_t secOp M5_VAR_USED = Op264;"
441        return opDecl + '''
442            if (testPredicate(CondCodesNZ, CondCodesC, CondCodesV, condCode)) {
443                uint64_t resTemp = Op164 ''' + op + ''' secOp;
444        ''' + ccCode + '''
445            } else {
446                CondCodesNZ = (defCc >> 2) & 0x3;
447                CondCodesC = (defCc >> 1) & 0x1;
448                CondCodesV = defCc & 0x1;
449            }
450        '''
451
452    buildDataXCondImmInst("ccmn", condCompCode("add", "+", True))
453    buildDataXCondImmInst("ccmp", condCompCode("sub", "-", True))
454    buildDataXCondRegInst("ccmn", condCompCode("add", "+", False))
455    buildDataXCondRegInst("ccmp", condCompCode("sub", "-", False))
456
457    condSelCode = '''
458        if (testPredicate(CondCodesNZ, CondCodesC, CondCodesV, condCode)) {
459            Dest64 = Op164;
460        } else {
461            Dest64 = %(altVal)s;
462        }
463    '''
464    buildDataXCondSelInst("csel", condSelCode % {"altVal" : "Op264"})
465    buildDataXCondSelInst("csinc", condSelCode % {"altVal" : "Op264 + 1"})
466    buildDataXCondSelInst("csinv", condSelCode % {"altVal" : "~Op264"})
467    buildDataXCondSelInst("csneg", condSelCode % {"altVal" : "-Op264"})
468}};
469