vfp.hh (7382:b3c768629a54) vfp.hh (7384:f12b4f28e5eb)
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Gabe Black
38 */
39
40#ifndef __ARCH_ARM_INSTS_VFP_HH__
41#define __ARCH_ARM_INSTS_VFP_HH__
42
43#include "arch/arm/insts/misc.hh"
44#include "arch/arm/miscregs.hh"
45#include <fenv.h>
46#include <cmath>
47
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Gabe Black
38 */
39
40#ifndef __ARCH_ARM_INSTS_VFP_HH__
41#define __ARCH_ARM_INSTS_VFP_HH__
42
43#include "arch/arm/insts/misc.hh"
44#include "arch/arm/miscregs.hh"
45#include <fenv.h>
46#include <cmath>
47
48namespace ArmISA
49{
50
48enum VfpMicroMode {
49 VfpNotAMicroop,
50 VfpMicroop,
51 VfpFirstMicroop,
52 VfpLastMicroop
53};
54
55template<class T>
56static inline void
57setVfpMicroFlags(VfpMicroMode mode, T &flags)
58{
59 switch (mode) {
60 case VfpMicroop:
61 flags[StaticInst::IsMicroop] = true;
62 break;
63 case VfpFirstMicroop:
64 flags[StaticInst::IsMicroop] =
65 flags[StaticInst::IsFirstMicroop] = true;
66 break;
67 case VfpLastMicroop:
68 flags[StaticInst::IsMicroop] =
69 flags[StaticInst::IsLastMicroop] = true;
70 break;
71 case VfpNotAMicroop:
72 break;
73 }
74 if (mode == VfpMicroop || mode == VfpFirstMicroop) {
75 flags[StaticInst::IsDelayedCommit] = true;
76 }
77}
78
79enum FeExceptionBit
80{
81 FeDivByZero = FE_DIVBYZERO,
82 FeInexact = FE_INEXACT,
83 FeInvalid = FE_INVALID,
84 FeOverflow = FE_OVERFLOW,
85 FeUnderflow = FE_UNDERFLOW,
86 FeAllExceptions = FE_ALL_EXCEPT
87};
88
89enum FeRoundingMode
90{
91 FeRoundDown = FE_DOWNWARD,
92 FeRoundNearest = FE_TONEAREST,
93 FeRoundZero = FE_TOWARDZERO,
94 FeRoundUpward = FE_UPWARD
95};
96
97enum VfpRoundingMode
98{
99 VfpRoundNearest = 0,
100 VfpRoundUpward = 1,
101 VfpRoundDown = 2,
102 VfpRoundZero = 3
103};
104
105template <class fpType>
106static inline void
107vfpFlushToZero(uint32_t &_fpscr, fpType &op)
108{
109 FPSCR fpscr = _fpscr;
110 if (fpscr.fz == 1 && (std::fpclassify(op) == FP_SUBNORMAL)) {
111 fpscr.idc = 1;
112 op = 0;
113 }
114 _fpscr = fpscr;
115}
116
117template <class fpType>
118static inline void
119vfpFlushToZero(uint32_t &fpscr, fpType &op1, fpType &op2)
120{
121 vfpFlushToZero(fpscr, op1);
122 vfpFlushToZero(fpscr, op2);
123}
124
51enum VfpMicroMode {
52 VfpNotAMicroop,
53 VfpMicroop,
54 VfpFirstMicroop,
55 VfpLastMicroop
56};
57
58template<class T>
59static inline void
60setVfpMicroFlags(VfpMicroMode mode, T &flags)
61{
62 switch (mode) {
63 case VfpMicroop:
64 flags[StaticInst::IsMicroop] = true;
65 break;
66 case VfpFirstMicroop:
67 flags[StaticInst::IsMicroop] =
68 flags[StaticInst::IsFirstMicroop] = true;
69 break;
70 case VfpLastMicroop:
71 flags[StaticInst::IsMicroop] =
72 flags[StaticInst::IsLastMicroop] = true;
73 break;
74 case VfpNotAMicroop:
75 break;
76 }
77 if (mode == VfpMicroop || mode == VfpFirstMicroop) {
78 flags[StaticInst::IsDelayedCommit] = true;
79 }
80}
81
82enum FeExceptionBit
83{
84 FeDivByZero = FE_DIVBYZERO,
85 FeInexact = FE_INEXACT,
86 FeInvalid = FE_INVALID,
87 FeOverflow = FE_OVERFLOW,
88 FeUnderflow = FE_UNDERFLOW,
89 FeAllExceptions = FE_ALL_EXCEPT
90};
91
92enum FeRoundingMode
93{
94 FeRoundDown = FE_DOWNWARD,
95 FeRoundNearest = FE_TONEAREST,
96 FeRoundZero = FE_TOWARDZERO,
97 FeRoundUpward = FE_UPWARD
98};
99
100enum VfpRoundingMode
101{
102 VfpRoundNearest = 0,
103 VfpRoundUpward = 1,
104 VfpRoundDown = 2,
105 VfpRoundZero = 3
106};
107
108template <class fpType>
109static inline void
110vfpFlushToZero(uint32_t &_fpscr, fpType &op)
111{
112 FPSCR fpscr = _fpscr;
113 if (fpscr.fz == 1 && (std::fpclassify(op) == FP_SUBNORMAL)) {
114 fpscr.idc = 1;
115 op = 0;
116 }
117 _fpscr = fpscr;
118}
119
120template <class fpType>
121static inline void
122vfpFlushToZero(uint32_t &fpscr, fpType &op1, fpType &op2)
123{
124 vfpFlushToZero(fpscr, op1);
125 vfpFlushToZero(fpscr, op2);
126}
127
128static inline uint32_t
129fpToBits(float fp)
130{
131 union
132 {
133 float fp;
134 uint32_t bits;
135 } val;
136 val.fp = fp;
137 return val.bits;
138}
139
125static inline uint64_t
140static inline uint64_t
141fpToBits(double fp)
142{
143 union
144 {
145 double fp;
146 uint64_t bits;
147 } val;
148 val.fp = fp;
149 return val.bits;
150}
151
152static inline float
153bitsToFp(uint64_t bits, float junk)
154{
155 union
156 {
157 float fp;
158 uint32_t bits;
159 } val;
160 val.bits = bits;
161 return val.fp;
162}
163
164static inline double
165bitsToFp(uint64_t bits, double junk)
166{
167 union
168 {
169 double fp;
170 uint64_t bits;
171 } val;
172 val.bits = bits;
173 return val.fp;
174}
175
176template <class fpType>
177static inline fpType
178fixNan(FPSCR fpscr, fpType val, fpType op1, fpType op2)
179{
180 if (std::isnan(val)) {
181 const bool single = (sizeof(val) == sizeof(float));
182 const uint64_t qnan = single ? 0x7fc00000 : ULL(0x7ff8000000000000);
183 const bool nan1 = std::isnan(op1);
184 const bool nan2 = std::isnan(op2);
185 const bool signal1 = nan1 && ((fpToBits(op1) & qnan) != qnan);
186 const bool signal2 = nan2 && ((fpToBits(op2) & qnan) != qnan);
187 fpType junk = 0.0;
188 if ((!nan1 && !nan2) || (fpscr.dn == 1)) {
189 val = bitsToFp(qnan, junk);
190 } else if (signal1) {
191 val = bitsToFp(fpToBits(op1) | qnan, junk);
192 } else if (signal2) {
193 val = bitsToFp(fpToBits(op2) | qnan, junk);
194 } else if (nan1) {
195 val = op1;
196 } else if (nan2) {
197 val = op2;
198 }
199 }
200 return val;
201}
202
203static inline uint64_t
126vfpFpSToFixed(float val, bool isSigned, bool half, uint8_t imm)
127{
128 fesetround(FeRoundZero);
129 val = val * powf(2.0, imm);
130 __asm__ __volatile__("" : "=m" (val) : "m" (val));
131 feclearexcept(FeAllExceptions);
132 __asm__ __volatile__("" : "=m" (val) : "m" (val));
133 float origVal = val;
134 val = rintf(val);
135 int fpType = std::fpclassify(val);
136 if (fpType == FP_SUBNORMAL || fpType == FP_NAN) {
137 if (fpType == FP_NAN) {
138 feraiseexcept(FeInvalid);
139 }
140 val = 0.0;
141 } else if (origVal != val) {
142 feraiseexcept(FeInexact);
143 }
144
145 if (isSigned) {
146 if (half) {
147 if ((double)val < (int16_t)(1 << 15)) {
148 feraiseexcept(FeInvalid);
149 feclearexcept(FeInexact);
150 return (int16_t)(1 << 15);
151 }
152 if ((double)val > (int16_t)mask(15)) {
153 feraiseexcept(FeInvalid);
154 feclearexcept(FeInexact);
155 return (int16_t)mask(15);
156 }
157 return (int16_t)val;
158 } else {
159 if ((double)val < (int32_t)(1 << 31)) {
160 feraiseexcept(FeInvalid);
161 feclearexcept(FeInexact);
162 return (int32_t)(1 << 31);
163 }
164 if ((double)val > (int32_t)mask(31)) {
165 feraiseexcept(FeInvalid);
166 feclearexcept(FeInexact);
167 return (int32_t)mask(31);
168 }
169 return (int32_t)val;
170 }
171 } else {
172 if (half) {
173 if ((double)val < 0) {
174 feraiseexcept(FeInvalid);
175 feclearexcept(FeInexact);
176 return 0;
177 }
178 if ((double)val > (mask(16))) {
179 feraiseexcept(FeInvalid);
180 feclearexcept(FeInexact);
181 return mask(16);
182 }
183 return (uint16_t)val;
184 } else {
185 if ((double)val < 0) {
186 feraiseexcept(FeInvalid);
187 feclearexcept(FeInexact);
188 return 0;
189 }
190 if ((double)val > (mask(32))) {
191 feraiseexcept(FeInvalid);
192 feclearexcept(FeInexact);
193 return mask(32);
194 }
195 return (uint32_t)val;
196 }
197 }
198}
199
200static inline float
201vfpUFixedToFpS(uint32_t val, bool half, uint8_t imm)
202{
203 fesetround(FeRoundNearest);
204 if (half)
205 val = (uint16_t)val;
206 float scale = powf(2.0, imm);
207 __asm__ __volatile__("" : "=m" (scale) : "m" (scale));
208 feclearexcept(FeAllExceptions);
209 __asm__ __volatile__("" : "=m" (scale) : "m" (scale));
210 return val / scale;
211}
212
213static inline float
214vfpSFixedToFpS(int32_t val, bool half, uint8_t imm)
215{
216 fesetround(FeRoundNearest);
217 if (half)
218 val = sext<16>(val & mask(16));
219 float scale = powf(2.0, imm);
220 __asm__ __volatile__("" : "=m" (scale) : "m" (scale));
221 feclearexcept(FeAllExceptions);
222 __asm__ __volatile__("" : "=m" (scale) : "m" (scale));
223 return val / scale;
224}
225
226static inline uint64_t
227vfpFpDToFixed(double val, bool isSigned, bool half, uint8_t imm)
228{
229 fesetround(FeRoundNearest);
230 val = val * pow(2.0, imm);
231 __asm__ __volatile__("" : "=m" (val) : "m" (val));
232 fesetround(FeRoundZero);
233 feclearexcept(FeAllExceptions);
234 __asm__ __volatile__("" : "=m" (val) : "m" (val));
235 double origVal = val;
236 val = rint(val);
237 int fpType = std::fpclassify(val);
238 if (fpType == FP_SUBNORMAL || fpType == FP_NAN) {
239 if (fpType == FP_NAN) {
240 feraiseexcept(FeInvalid);
241 }
242 val = 0.0;
243 } else if (origVal != val) {
244 feraiseexcept(FeInexact);
245 }
246 if (isSigned) {
247 if (half) {
248 if (val < (int16_t)(1 << 15)) {
249 feraiseexcept(FeInvalid);
250 feclearexcept(FeInexact);
251 return (int16_t)(1 << 15);
252 }
253 if (val > (int16_t)mask(15)) {
254 feraiseexcept(FeInvalid);
255 feclearexcept(FeInexact);
256 return (int16_t)mask(15);
257 }
258 return (int16_t)val;
259 } else {
260 if (val < (int32_t)(1 << 31)) {
261 feraiseexcept(FeInvalid);
262 feclearexcept(FeInexact);
263 return (int32_t)(1 << 31);
264 }
265 if (val > (int32_t)mask(31)) {
266 feraiseexcept(FeInvalid);
267 feclearexcept(FeInexact);
268 return (int32_t)mask(31);
269 }
270 return (int32_t)val;
271 }
272 } else {
273 if (half) {
274 if (val < 0) {
275 feraiseexcept(FeInvalid);
276 feclearexcept(FeInexact);
277 return 0;
278 }
279 if (val > mask(16)) {
280 feraiseexcept(FeInvalid);
281 feclearexcept(FeInexact);
282 return mask(16);
283 }
284 return (uint16_t)val;
285 } else {
286 if (val < 0) {
287 feraiseexcept(FeInvalid);
288 feclearexcept(FeInexact);
289 return 0;
290 }
291 if (val > mask(32)) {
292 feraiseexcept(FeInvalid);
293 feclearexcept(FeInexact);
294 return mask(32);
295 }
296 return (uint32_t)val;
297 }
298 }
299}
300
301static inline double
302vfpUFixedToFpD(uint32_t val, bool half, uint8_t imm)
303{
304 fesetround(FeRoundNearest);
305 if (half)
306 val = (uint16_t)val;
307 double scale = pow(2.0, imm);
308 __asm__ __volatile__("" : "=m" (scale) : "m" (scale));
309 feclearexcept(FeAllExceptions);
310 __asm__ __volatile__("" : "=m" (scale) : "m" (scale));
311 return val / scale;
312}
313
314static inline double
315vfpSFixedToFpD(int32_t val, bool half, uint8_t imm)
316{
317 fesetround(FeRoundNearest);
318 if (half)
319 val = sext<16>(val & mask(16));
320 double scale = pow(2.0, imm);
321 __asm__ __volatile__("" : "=m" (scale) : "m" (scale));
322 feclearexcept(FeAllExceptions);
323 __asm__ __volatile__("" : "=m" (scale) : "m" (scale));
324 return val / scale;
325}
326
327typedef int VfpSavedState;
328
329static inline VfpSavedState
330prepVfpFpscr(FPSCR fpscr)
331{
332 int roundingMode = fegetround();
333 feclearexcept(FeAllExceptions);
334 switch (fpscr.rMode) {
335 case VfpRoundNearest:
336 fesetround(FeRoundNearest);
337 break;
338 case VfpRoundUpward:
339 fesetround(FeRoundUpward);
340 break;
341 case VfpRoundDown:
342 fesetround(FeRoundDown);
343 break;
344 case VfpRoundZero:
345 fesetround(FeRoundZero);
346 break;
347 }
348 return roundingMode;
349}
350
351static inline FPSCR
352setVfpFpscr(FPSCR fpscr, VfpSavedState state)
353{
354 int exceptions = fetestexcept(FeAllExceptions);
355 if (exceptions & FeInvalid) {
356 fpscr.ioc = 1;
357 }
358 if (exceptions & FeDivByZero) {
359 fpscr.dzc = 1;
360 }
361 if (exceptions & FeOverflow) {
362 fpscr.ofc = 1;
363 }
364 if (exceptions & FeUnderflow) {
365 fpscr.ufc = 1;
366 }
367 if (exceptions & FeInexact) {
368 fpscr.ixc = 1;
369 }
370 fesetround(state);
371 return fpscr;
372}
373
374class VfpMacroOp : public PredMacroOp
375{
376 public:
377 static bool
378 inScalarBank(IntRegIndex idx)
379 {
380 return (idx % 32) < 8;
381 }
382
383 protected:
384 bool wide;
385
386 VfpMacroOp(const char *mnem, ExtMachInst _machInst,
387 OpClass __opClass, bool _wide) :
388 PredMacroOp(mnem, _machInst, __opClass), wide(_wide)
389 {}
390
391 IntRegIndex
392 addStride(IntRegIndex idx, unsigned stride)
393 {
394 if (wide) {
395 stride *= 2;
396 }
397 unsigned offset = idx % 8;
398 idx = (IntRegIndex)(idx - offset);
399 offset += stride;
400 idx = (IntRegIndex)(idx + (offset % 8));
401 return idx;
402 }
403
404 void
405 nextIdxs(IntRegIndex &dest, IntRegIndex &op1, IntRegIndex &op2)
406 {
407 unsigned stride = (machInst.fpscrStride == 0) ? 1 : 2;
408 assert(!inScalarBank(dest));
409 dest = addStride(dest, stride);
410 op1 = addStride(op1, stride);
411 if (!inScalarBank(op2)) {
412 op2 = addStride(op2, stride);
413 }
414 }
415
416 void
417 nextIdxs(IntRegIndex &dest, IntRegIndex &op1)
418 {
419 unsigned stride = (machInst.fpscrStride == 0) ? 1 : 2;
420 assert(!inScalarBank(dest));
421 dest = addStride(dest, stride);
422 if (!inScalarBank(op1)) {
423 op1 = addStride(op1, stride);
424 }
425 }
426
427 void
428 nextIdxs(IntRegIndex &dest)
429 {
430 unsigned stride = (machInst.fpscrStride == 0) ? 1 : 2;
431 assert(!inScalarBank(dest));
432 dest = addStride(dest, stride);
433 }
434};
435
436class VfpRegRegOp : public RegRegOp
437{
438 protected:
439 VfpRegRegOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
440 IntRegIndex _dest, IntRegIndex _op1,
441 VfpMicroMode mode = VfpNotAMicroop) :
442 RegRegOp(mnem, _machInst, __opClass, _dest, _op1)
443 {
444 setVfpMicroFlags(mode, flags);
445 }
446};
447
448class VfpRegImmOp : public RegImmOp
449{
450 protected:
451 VfpRegImmOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
452 IntRegIndex _dest, uint64_t _imm,
453 VfpMicroMode mode = VfpNotAMicroop) :
454 RegImmOp(mnem, _machInst, __opClass, _dest, _imm)
455 {
456 setVfpMicroFlags(mode, flags);
457 }
458};
459
460class VfpRegRegImmOp : public RegRegImmOp
461{
462 protected:
463 VfpRegRegImmOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
464 IntRegIndex _dest, IntRegIndex _op1,
465 uint64_t _imm, VfpMicroMode mode = VfpNotAMicroop) :
466 RegRegImmOp(mnem, _machInst, __opClass, _dest, _op1, _imm)
467 {
468 setVfpMicroFlags(mode, flags);
469 }
470};
471
472class VfpRegRegRegOp : public RegRegRegOp
473{
474 protected:
475 VfpRegRegRegOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
476 IntRegIndex _dest, IntRegIndex _op1, IntRegIndex _op2,
477 VfpMicroMode mode = VfpNotAMicroop) :
478 RegRegRegOp(mnem, _machInst, __opClass, _dest, _op1, _op2)
479 {
480 setVfpMicroFlags(mode, flags);
481 }
482};
483
204vfpFpSToFixed(float val, bool isSigned, bool half, uint8_t imm)
205{
206 fesetround(FeRoundZero);
207 val = val * powf(2.0, imm);
208 __asm__ __volatile__("" : "=m" (val) : "m" (val));
209 feclearexcept(FeAllExceptions);
210 __asm__ __volatile__("" : "=m" (val) : "m" (val));
211 float origVal = val;
212 val = rintf(val);
213 int fpType = std::fpclassify(val);
214 if (fpType == FP_SUBNORMAL || fpType == FP_NAN) {
215 if (fpType == FP_NAN) {
216 feraiseexcept(FeInvalid);
217 }
218 val = 0.0;
219 } else if (origVal != val) {
220 feraiseexcept(FeInexact);
221 }
222
223 if (isSigned) {
224 if (half) {
225 if ((double)val < (int16_t)(1 << 15)) {
226 feraiseexcept(FeInvalid);
227 feclearexcept(FeInexact);
228 return (int16_t)(1 << 15);
229 }
230 if ((double)val > (int16_t)mask(15)) {
231 feraiseexcept(FeInvalid);
232 feclearexcept(FeInexact);
233 return (int16_t)mask(15);
234 }
235 return (int16_t)val;
236 } else {
237 if ((double)val < (int32_t)(1 << 31)) {
238 feraiseexcept(FeInvalid);
239 feclearexcept(FeInexact);
240 return (int32_t)(1 << 31);
241 }
242 if ((double)val > (int32_t)mask(31)) {
243 feraiseexcept(FeInvalid);
244 feclearexcept(FeInexact);
245 return (int32_t)mask(31);
246 }
247 return (int32_t)val;
248 }
249 } else {
250 if (half) {
251 if ((double)val < 0) {
252 feraiseexcept(FeInvalid);
253 feclearexcept(FeInexact);
254 return 0;
255 }
256 if ((double)val > (mask(16))) {
257 feraiseexcept(FeInvalid);
258 feclearexcept(FeInexact);
259 return mask(16);
260 }
261 return (uint16_t)val;
262 } else {
263 if ((double)val < 0) {
264 feraiseexcept(FeInvalid);
265 feclearexcept(FeInexact);
266 return 0;
267 }
268 if ((double)val > (mask(32))) {
269 feraiseexcept(FeInvalid);
270 feclearexcept(FeInexact);
271 return mask(32);
272 }
273 return (uint32_t)val;
274 }
275 }
276}
277
278static inline float
279vfpUFixedToFpS(uint32_t val, bool half, uint8_t imm)
280{
281 fesetround(FeRoundNearest);
282 if (half)
283 val = (uint16_t)val;
284 float scale = powf(2.0, imm);
285 __asm__ __volatile__("" : "=m" (scale) : "m" (scale));
286 feclearexcept(FeAllExceptions);
287 __asm__ __volatile__("" : "=m" (scale) : "m" (scale));
288 return val / scale;
289}
290
291static inline float
292vfpSFixedToFpS(int32_t val, bool half, uint8_t imm)
293{
294 fesetround(FeRoundNearest);
295 if (half)
296 val = sext<16>(val & mask(16));
297 float scale = powf(2.0, imm);
298 __asm__ __volatile__("" : "=m" (scale) : "m" (scale));
299 feclearexcept(FeAllExceptions);
300 __asm__ __volatile__("" : "=m" (scale) : "m" (scale));
301 return val / scale;
302}
303
304static inline uint64_t
305vfpFpDToFixed(double val, bool isSigned, bool half, uint8_t imm)
306{
307 fesetround(FeRoundNearest);
308 val = val * pow(2.0, imm);
309 __asm__ __volatile__("" : "=m" (val) : "m" (val));
310 fesetround(FeRoundZero);
311 feclearexcept(FeAllExceptions);
312 __asm__ __volatile__("" : "=m" (val) : "m" (val));
313 double origVal = val;
314 val = rint(val);
315 int fpType = std::fpclassify(val);
316 if (fpType == FP_SUBNORMAL || fpType == FP_NAN) {
317 if (fpType == FP_NAN) {
318 feraiseexcept(FeInvalid);
319 }
320 val = 0.0;
321 } else if (origVal != val) {
322 feraiseexcept(FeInexact);
323 }
324 if (isSigned) {
325 if (half) {
326 if (val < (int16_t)(1 << 15)) {
327 feraiseexcept(FeInvalid);
328 feclearexcept(FeInexact);
329 return (int16_t)(1 << 15);
330 }
331 if (val > (int16_t)mask(15)) {
332 feraiseexcept(FeInvalid);
333 feclearexcept(FeInexact);
334 return (int16_t)mask(15);
335 }
336 return (int16_t)val;
337 } else {
338 if (val < (int32_t)(1 << 31)) {
339 feraiseexcept(FeInvalid);
340 feclearexcept(FeInexact);
341 return (int32_t)(1 << 31);
342 }
343 if (val > (int32_t)mask(31)) {
344 feraiseexcept(FeInvalid);
345 feclearexcept(FeInexact);
346 return (int32_t)mask(31);
347 }
348 return (int32_t)val;
349 }
350 } else {
351 if (half) {
352 if (val < 0) {
353 feraiseexcept(FeInvalid);
354 feclearexcept(FeInexact);
355 return 0;
356 }
357 if (val > mask(16)) {
358 feraiseexcept(FeInvalid);
359 feclearexcept(FeInexact);
360 return mask(16);
361 }
362 return (uint16_t)val;
363 } else {
364 if (val < 0) {
365 feraiseexcept(FeInvalid);
366 feclearexcept(FeInexact);
367 return 0;
368 }
369 if (val > mask(32)) {
370 feraiseexcept(FeInvalid);
371 feclearexcept(FeInexact);
372 return mask(32);
373 }
374 return (uint32_t)val;
375 }
376 }
377}
378
379static inline double
380vfpUFixedToFpD(uint32_t val, bool half, uint8_t imm)
381{
382 fesetround(FeRoundNearest);
383 if (half)
384 val = (uint16_t)val;
385 double scale = pow(2.0, imm);
386 __asm__ __volatile__("" : "=m" (scale) : "m" (scale));
387 feclearexcept(FeAllExceptions);
388 __asm__ __volatile__("" : "=m" (scale) : "m" (scale));
389 return val / scale;
390}
391
392static inline double
393vfpSFixedToFpD(int32_t val, bool half, uint8_t imm)
394{
395 fesetround(FeRoundNearest);
396 if (half)
397 val = sext<16>(val & mask(16));
398 double scale = pow(2.0, imm);
399 __asm__ __volatile__("" : "=m" (scale) : "m" (scale));
400 feclearexcept(FeAllExceptions);
401 __asm__ __volatile__("" : "=m" (scale) : "m" (scale));
402 return val / scale;
403}
404
405typedef int VfpSavedState;
406
407static inline VfpSavedState
408prepVfpFpscr(FPSCR fpscr)
409{
410 int roundingMode = fegetround();
411 feclearexcept(FeAllExceptions);
412 switch (fpscr.rMode) {
413 case VfpRoundNearest:
414 fesetround(FeRoundNearest);
415 break;
416 case VfpRoundUpward:
417 fesetround(FeRoundUpward);
418 break;
419 case VfpRoundDown:
420 fesetround(FeRoundDown);
421 break;
422 case VfpRoundZero:
423 fesetround(FeRoundZero);
424 break;
425 }
426 return roundingMode;
427}
428
429static inline FPSCR
430setVfpFpscr(FPSCR fpscr, VfpSavedState state)
431{
432 int exceptions = fetestexcept(FeAllExceptions);
433 if (exceptions & FeInvalid) {
434 fpscr.ioc = 1;
435 }
436 if (exceptions & FeDivByZero) {
437 fpscr.dzc = 1;
438 }
439 if (exceptions & FeOverflow) {
440 fpscr.ofc = 1;
441 }
442 if (exceptions & FeUnderflow) {
443 fpscr.ufc = 1;
444 }
445 if (exceptions & FeInexact) {
446 fpscr.ixc = 1;
447 }
448 fesetround(state);
449 return fpscr;
450}
451
452class VfpMacroOp : public PredMacroOp
453{
454 public:
455 static bool
456 inScalarBank(IntRegIndex idx)
457 {
458 return (idx % 32) < 8;
459 }
460
461 protected:
462 bool wide;
463
464 VfpMacroOp(const char *mnem, ExtMachInst _machInst,
465 OpClass __opClass, bool _wide) :
466 PredMacroOp(mnem, _machInst, __opClass), wide(_wide)
467 {}
468
469 IntRegIndex
470 addStride(IntRegIndex idx, unsigned stride)
471 {
472 if (wide) {
473 stride *= 2;
474 }
475 unsigned offset = idx % 8;
476 idx = (IntRegIndex)(idx - offset);
477 offset += stride;
478 idx = (IntRegIndex)(idx + (offset % 8));
479 return idx;
480 }
481
482 void
483 nextIdxs(IntRegIndex &dest, IntRegIndex &op1, IntRegIndex &op2)
484 {
485 unsigned stride = (machInst.fpscrStride == 0) ? 1 : 2;
486 assert(!inScalarBank(dest));
487 dest = addStride(dest, stride);
488 op1 = addStride(op1, stride);
489 if (!inScalarBank(op2)) {
490 op2 = addStride(op2, stride);
491 }
492 }
493
494 void
495 nextIdxs(IntRegIndex &dest, IntRegIndex &op1)
496 {
497 unsigned stride = (machInst.fpscrStride == 0) ? 1 : 2;
498 assert(!inScalarBank(dest));
499 dest = addStride(dest, stride);
500 if (!inScalarBank(op1)) {
501 op1 = addStride(op1, stride);
502 }
503 }
504
505 void
506 nextIdxs(IntRegIndex &dest)
507 {
508 unsigned stride = (machInst.fpscrStride == 0) ? 1 : 2;
509 assert(!inScalarBank(dest));
510 dest = addStride(dest, stride);
511 }
512};
513
514class VfpRegRegOp : public RegRegOp
515{
516 protected:
517 VfpRegRegOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
518 IntRegIndex _dest, IntRegIndex _op1,
519 VfpMicroMode mode = VfpNotAMicroop) :
520 RegRegOp(mnem, _machInst, __opClass, _dest, _op1)
521 {
522 setVfpMicroFlags(mode, flags);
523 }
524};
525
526class VfpRegImmOp : public RegImmOp
527{
528 protected:
529 VfpRegImmOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
530 IntRegIndex _dest, uint64_t _imm,
531 VfpMicroMode mode = VfpNotAMicroop) :
532 RegImmOp(mnem, _machInst, __opClass, _dest, _imm)
533 {
534 setVfpMicroFlags(mode, flags);
535 }
536};
537
538class VfpRegRegImmOp : public RegRegImmOp
539{
540 protected:
541 VfpRegRegImmOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
542 IntRegIndex _dest, IntRegIndex _op1,
543 uint64_t _imm, VfpMicroMode mode = VfpNotAMicroop) :
544 RegRegImmOp(mnem, _machInst, __opClass, _dest, _op1, _imm)
545 {
546 setVfpMicroFlags(mode, flags);
547 }
548};
549
550class VfpRegRegRegOp : public RegRegRegOp
551{
552 protected:
553 VfpRegRegRegOp(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
554 IntRegIndex _dest, IntRegIndex _op1, IntRegIndex _op2,
555 VfpMicroMode mode = VfpNotAMicroop) :
556 RegRegRegOp(mnem, _machInst, __opClass, _dest, _op1, _op2)
557 {
558 setVfpMicroFlags(mode, flags);
559 }
560};
561
562}
563
484#endif //__ARCH_ARM_INSTS_VFP_HH__
564#endif //__ARCH_ARM_INSTS_VFP_HH__