vfp.hh revision 7381
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Gabe Black
38 */
39
40#ifndef __ARCH_ARM_INSTS_VFP_HH__
41#define __ARCH_ARM_INSTS_VFP_HH__
42
43#include "arch/arm/insts/misc.hh"
44#include "arch/arm/miscregs.hh"
45#include <fenv.h>
46
47enum VfpMicroMode {
48    VfpNotAMicroop,
49    VfpMicroop,
50    VfpFirstMicroop,
51    VfpLastMicroop
52};
53
54template<class T>
55static inline void
56setVfpMicroFlags(VfpMicroMode mode, T &flags)
57{
58    switch (mode) {
59      case VfpMicroop:
60        flags[StaticInst::IsMicroop] = true;
61        break;
62      case VfpFirstMicroop:
63        flags[StaticInst::IsMicroop] =
64            flags[StaticInst::IsFirstMicroop] = true;
65        break;
66      case VfpLastMicroop:
67        flags[StaticInst::IsMicroop] =
68            flags[StaticInst::IsLastMicroop] = true;
69        break;
70      case VfpNotAMicroop:
71        break;
72    }
73    if (mode == VfpMicroop || mode == VfpFirstMicroop) {
74        flags[StaticInst::IsDelayedCommit] = true;
75    }
76}
77
78enum FeExceptionBit
79{
80    FeDivByZero = FE_DIVBYZERO,
81    FeInexact = FE_INEXACT,
82    FeInvalid = FE_INVALID,
83    FeOverflow = FE_OVERFLOW,
84    FeUnderflow = FE_UNDERFLOW,
85    FeAllExceptions = FE_ALL_EXCEPT
86};
87
88enum FeRoundingMode
89{
90    FeRoundDown = FE_DOWNWARD,
91    FeRoundNearest = FE_TONEAREST,
92    FeRoundZero = FE_TOWARDZERO,
93    FeRoundUpward = FE_UPWARD
94};
95
96enum VfpRoundingMode
97{
98    VfpRoundNearest = 0,
99    VfpRoundUpward = 1,
100    VfpRoundDown = 2,
101    VfpRoundZero = 3
102};
103
104static inline uint64_t
105vfpFpSToFixed(float val, bool isSigned, bool half, uint8_t imm)
106{
107    fesetround(FeRoundZero);
108    val = val * powf(2.0, imm);
109    __asm__ __volatile__("" : "=m" (val) : "m" (val));
110    feclearexcept(FeAllExceptions);
111    if (isSigned) {
112        if (half) {
113            if ((double)val < (int16_t)(1 << 15)) {
114                feraiseexcept(FeInvalid);
115                return (int16_t)(1 << 15);
116            }
117            if ((double)val > (int16_t)mask(15)) {
118                feraiseexcept(FeInvalid);
119                return (int16_t)mask(15);
120            }
121            return (int16_t)val;
122        } else {
123            if ((double)val < (int32_t)(1 << 31)) {
124                feraiseexcept(FeInvalid);
125                return (int32_t)(1 << 31);
126            }
127            if ((double)val > (int32_t)mask(31)) {
128                feraiseexcept(FeInvalid);
129                return (int32_t)mask(31);
130            }
131            return (int32_t)val;
132        }
133    } else {
134        if (half) {
135            if ((double)val < 0) {
136                feraiseexcept(FeInvalid);
137                return 0;
138            }
139            if ((double)val > (mask(16))) {
140                feraiseexcept(FeInvalid);
141                return mask(16);
142            }
143            return (uint16_t)val;
144        } else {
145            if ((double)val < 0) {
146                feraiseexcept(FeInvalid);
147                return 0;
148            }
149            if ((double)val > (mask(32))) {
150                feraiseexcept(FeInvalid);
151                return mask(32);
152            }
153            return (uint32_t)val;
154        }
155    }
156}
157
158static inline float
159vfpUFixedToFpS(uint32_t val, bool half, uint8_t imm)
160{
161    fesetround(FeRoundNearest);
162    if (half)
163        val = (uint16_t)val;
164    return val / powf(2.0, imm);
165}
166
167static inline float
168vfpSFixedToFpS(int32_t val, bool half, uint8_t imm)
169{
170    fesetround(FeRoundNearest);
171    if (half)
172        val = sext<16>(val & mask(16));
173    return val / powf(2.0, imm);
174}
175
176static inline uint64_t
177vfpFpDToFixed(double val, bool isSigned, bool half, uint8_t imm)
178{
179    fesetround(FeRoundZero);
180    val = val * pow(2.0, imm);
181    __asm__ __volatile__("" : "=m" (val) : "m" (val));
182    feclearexcept(FeAllExceptions);
183    if (isSigned) {
184        if (half) {
185            if (val < (int16_t)(1 << 15)) {
186                feraiseexcept(FeInvalid);
187                return (int16_t)(1 << 15);
188            }
189            if (val > (int16_t)mask(15)) {
190                feraiseexcept(FeInvalid);
191                return (int16_t)mask(15);
192            }
193            return (int16_t)val;
194        } else {
195            if (val < (int32_t)(1 << 31)) {
196                feraiseexcept(FeInvalid);
197                return (int32_t)(1 << 31);
198            }
199            if (val > (int32_t)mask(31)) {
200                feraiseexcept(FeInvalid);
201                return (int32_t)mask(31);
202            }
203            return (int32_t)val;
204        }
205    } else {
206        if (half) {
207            if (val < 0) {
208                feraiseexcept(FeInvalid);
209                return 0;
210            }
211            if (val > mask(16)) {
212                feraiseexcept(FeInvalid);
213                return mask(16);
214            }
215            return (uint16_t)val;
216        } else {
217            if (val < 0) {
218                feraiseexcept(FeInvalid);
219                return 0;
220            }
221            if (val > mask(32)) {
222                feraiseexcept(FeInvalid);
223                return mask(32);
224            }
225            return (uint32_t)val;
226        }
227    }
228}
229
230static inline double
231vfpUFixedToFpD(uint32_t val, bool half, uint8_t imm)
232{
233    fesetround(FeRoundNearest);
234    if (half)
235        val = (uint16_t)val;
236    return val / pow(2.0, imm);
237}
238
239static inline double
240vfpSFixedToFpD(int32_t val, bool half, uint8_t imm)
241{
242    fesetround(FeRoundNearest);
243    if (half)
244        val = sext<16>(val & mask(16));
245    return val / pow(2.0, imm);
246}
247
248typedef int VfpSavedState;
249
250static inline VfpSavedState
251prepVfpFpscr(FPSCR fpscr)
252{
253    int roundingMode = fegetround();
254    feclearexcept(FeAllExceptions);
255    switch (fpscr.rMode) {
256      case VfpRoundNearest:
257        fesetround(FeRoundNearest);
258        break;
259      case VfpRoundUpward:
260        fesetround(FeRoundUpward);
261        break;
262      case VfpRoundDown:
263        fesetround(FeRoundDown);
264        break;
265      case VfpRoundZero:
266        fesetround(FeRoundZero);
267        break;
268    }
269    return roundingMode;
270}
271
272static inline FPSCR
273setVfpFpscr(FPSCR fpscr, VfpSavedState state)
274{
275    int exceptions = fetestexcept(FeAllExceptions);
276    if (exceptions & FeInvalid) {
277        fpscr.ioc = 1;
278    }
279    if (exceptions & FeDivByZero) {
280        fpscr.dzc = 1;
281    }
282    if (exceptions & FeOverflow) {
283        fpscr.ofc = 1;
284    }
285    if (exceptions & FeUnderflow) {
286        fpscr.ufc = 1;
287    }
288    if (exceptions & FeInexact) {
289        fpscr.ixc = 1;
290    }
291    fesetround(state);
292    return fpscr;
293}
294
295class VfpMacroOp : public PredMacroOp
296{
297  public:
298    static bool
299    inScalarBank(IntRegIndex idx)
300    {
301        return (idx % 32) < 8;
302    }
303
304  protected:
305    bool wide;
306
307    VfpMacroOp(const char *mnem, ExtMachInst _machInst,
308            OpClass __opClass, bool _wide) :
309        PredMacroOp(mnem, _machInst, __opClass), wide(_wide)
310    {}
311
312    IntRegIndex
313    addStride(IntRegIndex idx, unsigned stride)
314    {
315        if (wide) {
316            stride *= 2;
317        }
318        unsigned offset = idx % 8;
319        idx = (IntRegIndex)(idx - offset);
320        offset += stride;
321        idx = (IntRegIndex)(idx + (offset % 8));
322        return idx;
323    }
324
325    void
326    nextIdxs(IntRegIndex &dest, IntRegIndex &op1, IntRegIndex &op2)
327    {
328        unsigned stride = (machInst.fpscrStride == 0) ? 1 : 2;
329        assert(!inScalarBank(dest));
330        dest = addStride(dest, stride);
331        op1 = addStride(op1, stride);
332        if (!inScalarBank(op2)) {
333            op2 = addStride(op2, stride);
334        }
335    }
336
337    void
338    nextIdxs(IntRegIndex &dest, IntRegIndex &op1)
339    {
340        unsigned stride = (machInst.fpscrStride == 0) ? 1 : 2;
341        assert(!inScalarBank(dest));
342        dest = addStride(dest, stride);
343        if (!inScalarBank(op1)) {
344            op1 = addStride(op1, stride);
345        }
346    }
347
348    void
349    nextIdxs(IntRegIndex &dest)
350    {
351        unsigned stride = (machInst.fpscrStride == 0) ? 1 : 2;
352        assert(!inScalarBank(dest));
353        dest = addStride(dest, stride);
354    }
355};
356
357class VfpRegRegOp : public RegRegOp
358{
359  protected:
360    VfpRegRegOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
361                IntRegIndex _dest, IntRegIndex _op1,
362                VfpMicroMode mode = VfpNotAMicroop) :
363        RegRegOp(mnem, _machInst, __opClass, _dest, _op1)
364    {
365        setVfpMicroFlags(mode, flags);
366    }
367};
368
369class VfpRegImmOp : public RegImmOp
370{
371  protected:
372    VfpRegImmOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
373                IntRegIndex _dest, uint64_t _imm,
374                VfpMicroMode mode = VfpNotAMicroop) :
375        RegImmOp(mnem, _machInst, __opClass, _dest, _imm)
376    {
377        setVfpMicroFlags(mode, flags);
378    }
379};
380
381class VfpRegRegImmOp : public RegRegImmOp
382{
383  protected:
384    VfpRegRegImmOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
385                   IntRegIndex _dest, IntRegIndex _op1,
386                   uint64_t _imm, VfpMicroMode mode = VfpNotAMicroop) :
387        RegRegImmOp(mnem, _machInst, __opClass, _dest, _op1, _imm)
388    {
389        setVfpMicroFlags(mode, flags);
390    }
391};
392
393class VfpRegRegRegOp : public RegRegRegOp
394{
395  protected:
396    VfpRegRegRegOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
397                   IntRegIndex _dest, IntRegIndex _op1, IntRegIndex _op2,
398                   VfpMicroMode mode = VfpNotAMicroop) :
399        RegRegRegOp(mnem, _machInst, __opClass, _dest, _op1, _op2)
400    {
401        setVfpMicroFlags(mode, flags);
402    }
403};
404
405#endif //__ARCH_ARM_INSTS_VFP_HH__
406