Lines Matching refs:exp

217 fp16_normalise(uint16_t mnt, int *exp)
228 *exp -= shift;
235 fp32_normalise(uint32_t mnt, int *exp)
246 *exp -= shift;
253 fp64_normalise(uint64_t mnt, int *exp)
264 *exp -= shift;
271 fp128_normalise(uint64_t *mnt0, uint64_t *mnt1, int *exp)
284 *exp -= 64;
291 *exp -= shift;
300 fp16_pack(uint16_t sgn, uint16_t exp, uint16_t mnt)
302 return sgn << (FP16_BITS - 1) | exp << FP16_MANT_BITS | FP16_MANT(mnt);
306 fp32_pack(uint32_t sgn, uint32_t exp, uint32_t mnt)
308 return sgn << (FP32_BITS - 1) | exp << FP32_MANT_BITS | FP32_MANT(mnt);
312 fp64_pack(uint64_t sgn, uint64_t exp, uint64_t mnt)
314 return sgn << (FP64_BITS - 1) | exp << FP64_MANT_BITS | FP64_MANT(mnt);
390 fp16_unpack(int *sgn, int *exp, uint16_t *mnt, uint16_t x, int mode,
394 *exp = FP16_EXP(x);
398 if (*exp) {
401 ++*exp;
409 fp32_unpack(int *sgn, int *exp, uint32_t *mnt, uint32_t x, int mode,
413 *exp = FP32_EXP(x);
417 if (*exp) {
420 ++*exp;
429 fp64_unpack(int *sgn, int *exp, uint64_t *mnt, uint64_t x, int mode,
433 *exp = FP64_EXP(x);
437 if (*exp) {
440 ++*exp;
449 fp16_is_NaN(int exp, uint16_t mnt)
451 return exp == FP16_EXP_INF && FP16_MANT(mnt);
455 fp32_is_NaN(int exp, uint32_t mnt)
457 return exp == FP32_EXP_INF && FP32_MANT(mnt);
461 fp64_is_NaN(int exp, uint64_t mnt)
463 return exp == FP64_EXP_INF && FP64_MANT(mnt);
467 fp16_is_signalling_NaN(int exp, uint16_t mnt)
469 return fp16_is_NaN(exp, mnt) && !(mnt >> (FP16_MANT_BITS - 1) & 1);
473 fp32_is_signalling_NaN(int exp, uint32_t mnt)
475 return fp32_is_NaN(exp, mnt) && !(mnt >> (FP32_MANT_BITS - 1) & 1);
479 fp64_is_signalling_NaN(int exp, uint64_t mnt)
481 return fp64_is_NaN(exp, mnt) && !(mnt >> (FP64_MANT_BITS - 1) & 1);
485 fp16_is_quiet_NaN(int exp, uint16_t mnt)
487 return exp == FP16_EXP_INF && (mnt >> (FP16_MANT_BITS - 1) & 1);
491 fp32_is_quiet_NaN(int exp, uint32_t mnt)
493 return exp == FP32_EXP_INF && (mnt >> (FP32_MANT_BITS - 1) & 1);
497 fp64_is_quiet_NaN(int exp, uint64_t mnt)
499 return exp == FP64_EXP_INF && (mnt >> (FP64_MANT_BITS - 1) & 1);
503 fp16_is_infinity(int exp, uint16_t mnt)
505 return exp == FP16_EXP_INF && !FP16_MANT(mnt);
509 fp32_is_infinity(int exp, uint32_t mnt)
511 return exp == FP32_EXP_INF && !FP32_MANT(mnt);
515 fp64_is_infinity(int exp, uint64_t mnt)
517 return exp == FP64_EXP_INF && !FP64_MANT(mnt);
707 fp16_round_(int sgn, int exp, uint16_t mnt, int rm, int mode, int *flags)
716 if ((mode & FPLIB_FZ16) && exp < 1) {
725 if (exp > 0) {
726 biased_exp = exp;
731 int_mant = lsr16(mnt, 3 - exp);
732 error = (lsr16(mnt, 1 - exp) & 3) | !!(mnt & (lsl16(1, 1 - exp) - 1));
785 fp16_round(int sgn, int exp, uint16_t mnt, int mode, int *flags)
787 return fp16_round_(sgn, exp, mnt, mode & 3, mode, flags);
791 fp32_round_(int sgn, int exp, uint32_t mnt, int rm, int mode, int *flags)
800 if ((mode & FPLIB_FZ) && exp < 1) {
809 if (exp > 0) {
810 biased_exp = exp;
815 int_mant = lsr32(mnt, 3 - exp);
816 error = (lsr32(mnt, 1 - exp) & 3) | !!(mnt & (lsl32(1, 1 - exp) - 1));
862 fp32_round(int sgn, int exp, uint32_t mnt, int mode, int *flags)
864 return fp32_round_(sgn, exp, mnt, mode & 3, mode, flags);
868 fp64_round_(int sgn, int exp, uint64_t mnt, int rm, int mode, int *flags)
877 if ((mode & FPLIB_FZ) && exp < 1) {
886 if (exp > 0) {
887 biased_exp = exp;
892 int_mant = lsr64(mnt, 3 - exp);
893 error = (lsr64(mnt, 1 - exp) & 3) | !!(mnt & (lsl64(1, 1 - exp) - 1));
939 fp64_round(int sgn, int exp, uint64_t mnt, int mode, int *flags)
941 return fp64_round_(sgn, exp, mnt, mode & 3, mode, flags);
2617 int sgn, exp;
2622 fp32_unpack(&sgn, &exp, &mnt, op, mode, &flags);
2626 if (fp32_is_NaN(exp, mnt)) {
2637 } else if (exp == FP32_EXP_INF) {
2649 fp16_round_(sgn, exp - FP32_EXP_BIAS + FP16_EXP_BIAS,
2666 int sgn, exp;
2671 fp64_unpack(&sgn, &exp, &mnt, op, mode, &flags);
2675 if (fp64_is_NaN(exp, mnt)) {
2686 } else if (exp == FP64_EXP_INF) {
2698 fp16_round_(sgn, exp - FP64_EXP_BIAS + FP16_EXP_BIAS,
2715 int sgn, exp;
2720 fp16_unpack(&sgn, &exp, &mnt, op, mode & 0xf, &flags);
2722 if (fp16_is_NaN(exp, mnt) && !fpscr.ahp) {
2731 } else if (exp == FP16_EXP_INF && !fpscr.ahp) {
2736 mnt = fp16_normalise(mnt, &exp);
2737 result = fp32_pack(sgn, (exp - FP16_EXP_BIAS +
2753 int sgn, exp;
2758 fp64_unpack(&sgn, &exp, &mnt, op, mode, &flags);
2760 if (fp64_is_NaN(exp, mnt)) {
2769 } else if (exp == FP64_EXP_INF) {
2775 fp32_round_(sgn, exp - FP64_EXP_BIAS + FP32_EXP_BIAS,
2792 int sgn, exp;
2797 fp16_unpack(&sgn, &exp, &mnt, op, mode & 0xf, &flags);
2799 if (fp16_is_NaN(exp, mnt) && !fpscr.ahp) {
2808 } else if (exp == FP16_EXP_INF && !fpscr.ahp) {
2813 mnt = fp16_normalise(mnt, &exp);
2814 result = fp64_pack(sgn, (exp - FP16_EXP_BIAS +
2830 int sgn, exp;
2835 fp32_unpack(&sgn, &exp, &mnt, op, mode, &flags);
2837 if (fp32_is_NaN(exp, mnt)) {
2846 } else if (exp == FP32_EXP_INF) {
2851 mnt = fp32_normalise(mnt, &exp);
2852 result = fp64_pack(sgn, (exp - FP32_EXP_BIAS +
3113 fp16_repack(int sgn, int exp, uint16_t mnt)
3115 return fp16_pack(sgn, mnt >> FP16_MANT_BITS ? exp : 0, mnt);
3119 fp32_repack(int sgn, int exp, uint32_t mnt)
3121 return fp32_pack(sgn, mnt >> FP32_MANT_BITS ? exp : 0, mnt);
3125 fp64_repack(int sgn, int exp, uint64_t mnt)
3127 return fp64_pack(sgn, mnt >> FP64_MANT_BITS ? exp : 0, mnt);
3521 int sgn, exp;
3524 fp16_unpack(&sgn, &exp, &mnt, op, mode, &flags);
3526 if (fp16_is_NaN(exp, mnt)) {
3534 } else if (exp == FP16_EXP_INF) {
3537 exp += FP16_EXP_BITS;
3538 mnt = fp16_normalise(mnt, &exp);
3539 mnt = recip_sqrt_estimate[(~exp & 1) << 7 |
3541 result = fp16_pack(0, (3 * FP16_EXP_BIAS - exp - 1) >> 1,
3556 int sgn, exp;
3559 fp32_unpack(&sgn, &exp, &mnt, op, mode, &flags);
3561 if (fp32_is_NaN(exp, mnt)) {
3569 } else if (exp == FP32_EXP_INF) {
3572 exp += FP32_EXP_BITS;
3573 mnt = fp32_normalise(mnt, &exp);
3574 mnt = recip_sqrt_estimate[(~exp & 1) << 7 |
3576 result = fp32_pack(0, (3 * FP32_EXP_BIAS - exp - 1) >> 1,
3591 int sgn, exp;
3594 fp64_unpack(&sgn, &exp, &mnt, op, mode, &flags);
3596 if (fp64_is_NaN(exp, mnt)) {
3604 } else if (exp == FP64_EXP_INF) {
3607 exp += FP64_EXP_BITS;
3608 mnt = fp64_normalise(mnt, &exp);
3609 mnt = recip_sqrt_estimate[(~exp & 1) << 7 |
3611 result = fp64_pack(0, (3 * FP64_EXP_BIAS - exp - 1) >> 1,
3716 int sgn, exp;
3719 fp16_unpack(&sgn, &exp, &mnt, op, mode, &flags);
3721 if (fp16_is_NaN(exp, mnt)) {
3723 } else if (exp == FP16_EXP_INF) {
3748 } else if (fpscr.fz16 && exp >= 2 * FP16_EXP_BIAS - 1) {
3752 exp += FP16_EXP_BITS;
3753 mnt = fp16_normalise(mnt, &exp);
3754 int result_exp = 2 * FP16_EXP_BIAS - 1 - exp;
3778 int sgn, exp;
3781 fp32_unpack(&sgn, &exp, &mnt, op, mode, &flags);
3783 if (fp32_is_NaN(exp, mnt)) {
3785 } else if (exp == FP32_EXP_INF) {
3810 } else if (fpscr.fz && exp >= 2 * FP32_EXP_BIAS - 1) {
3814 exp += FP32_EXP_BITS;
3815 mnt = fp32_normalise(mnt, &exp);
3816 int result_exp = 2 * FP32_EXP_BIAS - 1 - exp;
3840 int sgn, exp;
3843 fp64_unpack(&sgn, &exp, &mnt, op, mode, &flags);
3845 if (fp64_is_NaN(exp, mnt)) {
3847 } else if (exp == FP64_EXP_INF) {
3872 } else if (fpscr.fz && exp >= 2 * FP64_EXP_BIAS - 1) {
3876 exp += FP64_EXP_BITS;
3877 mnt = fp64_normalise(mnt, &exp);
3878 int result_exp = 2 * FP64_EXP_BIAS - 1 - exp;
3992 int sgn, exp;
3995 fp16_unpack(&sgn, &exp, &mnt, op, mode, &flags);
3997 if (fp16_is_NaN(exp, mnt)) {
4004 result = fp16_pack(sgn, exp ^ FP16_EXP_INF, 0);
4019 int sgn, exp;
4022 fp32_unpack(&sgn, &exp, &mnt, op, mode, &flags);
4024 if (fp32_is_NaN(exp, mnt)) {
4031 result = fp32_pack(sgn, exp ^ FP32_EXP_INF, 0);
4046 int sgn, exp;
4049 fp64_unpack(&sgn, &exp, &mnt, op, mode, &flags);
4051 if (fp64_is_NaN(exp, mnt)) {
4058 result = fp64_pack(sgn, exp ^ FP64_EXP_INF, 0);
4074 int sgn, exp;
4078 fp16_unpack(&sgn, &exp, &mnt, op, mode, &flags);
4081 if (fp16_is_NaN(exp, mnt)) {
4083 } else if (exp == FP16_EXP_INF) {
4087 } else if (exp >= expint) {
4092 uint16_t x = expint - exp >= FP16_BITS ? 0 : mnt >> (expint - exp);
4093 int err = exp < expint - FP16_BITS ? 1 :
4094 ((mnt << 1 >> (expint - exp - 1) & 3) |
4095 ((uint16_t)(mnt << 2 << (FP16_BITS + exp - expint)) != 0));
4118 exp = expint;
4119 mnt = fp16_normalise(x, &exp);
4120 result = fp16_pack(sgn, exp + FP16_EXP_BITS, mnt >> FP16_EXP_BITS);
4139 int sgn, exp;
4143 fp32_unpack(&sgn, &exp, &mnt, op, mode, &flags);
4146 if (fp32_is_NaN(exp, mnt)) {
4148 } else if (exp == FP32_EXP_INF) {
4152 } else if (exp >= expint) {
4157 uint32_t x = expint - exp >= FP32_BITS ? 0 : mnt >> (expint - exp);
4158 int err = exp < expint - FP32_BITS ? 1 :
4159 ((mnt << 1 >> (expint - exp - 1) & 3) |
4160 ((uint32_t)(mnt << 2 << (FP32_BITS + exp - expint)) != 0));
4183 exp = expint;
4184 mnt = fp32_normalise(x, &exp);
4185 result = fp32_pack(sgn, exp + FP32_EXP_BITS, mnt >> FP32_EXP_BITS);
4204 int sgn, exp;
4208 fp64_unpack(&sgn, &exp, &mnt, op, mode, &flags);
4211 if (fp64_is_NaN(exp, mnt)) {
4213 } else if (exp == FP64_EXP_INF) {
4217 } else if (exp >= expint) {
4222 uint64_t x = expint - exp >= FP64_BITS ? 0 : mnt >> (expint - exp);
4223 int err = exp < expint - FP64_BITS ? 1 :
4224 ((mnt << 1 >> (expint - exp - 1) & 3) |
4225 ((uint64_t)(mnt << 2 << (FP64_BITS + exp - expint)) != 0));
4248 exp = expint;
4249 mnt = fp64_normalise(x, &exp);
4250 result = fp64_pack(sgn, exp + FP64_EXP_BITS, mnt >> FP64_EXP_BITS);
4459 int sgn, exp;
4466 fp16_unpack(&sgn, &exp, &mnt, result, mode, &flags);
4467 if (!fp16_is_NaN(exp, mnt)) {
4479 int sgn, exp;
4486 fp32_unpack(&sgn, &exp, &mnt, result, mode, &flags);
4487 if (!fp32_is_NaN(exp, mnt)) {
4498 int sgn, exp;
4505 fp64_unpack(&sgn, &exp, &mnt, result, mode, &flags);
4506 if (!fp64_is_NaN(exp, mnt)) {
4546 FPToFixed_64(int sgn, int exp, uint64_t mnt, bool u, FPRounding rounding,
4553 if (exp > expmax) {
4558 x = lsr64(mnt << FP64_EXP_BITS, expmax - exp);
4559 err = (exp > expmax - 2 ? 0 :
4560 (lsr64(mnt << FP64_EXP_BITS, expmax - 2 - exp) & 3) |
4561 !!(mnt << FP64_EXP_BITS & (lsl64(1, expmax - 2 - exp) - 1)));
4595 FPToFixed_32(int sgn, int exp, uint64_t mnt, bool u, FPRounding rounding,
4598 uint64_t x = FPToFixed_64(sgn, exp, mnt, u, rounding, flags);
4609 FPToFixed_16(int sgn, int exp, uint64_t mnt, bool u, FPRounding rounding,
4612 uint64_t x = FPToFixed_64(sgn, exp, mnt, u, rounding, flags);
4628 int sgn, exp;
4632 fp16_unpack(&sgn, &exp, &mnt, op, modeConv(fpscr), &flags);
4635 if (fp16_is_NaN(exp, mnt)) {
4642 FPToFixed_16(sgn, exp + FP64_EXP_BIAS - FP16_EXP_BIAS + fbits,
4658 int sgn, exp;
4663 fp16_unpack(&sgn, &exp, &mnt, op, modeConv(fpscr), &flags);
4666 if (fp16_is_NaN(exp, mnt)) {
4671 if (exp == FP16_EXP_INF)
4672 exp = 255; // infinity: make it big enough to saturate
4674 FPToFixed_32(sgn, exp + FP64_EXP_BIAS - FP16_EXP_BIAS + fbits,
4689 int sgn, exp;
4693 fp32_unpack(&sgn, &exp, &mnt, op, modeConv(fpscr), &flags);
4696 if (fp32_is_NaN(exp, mnt)) {
4703 FPToFixed_32(sgn, exp + FP64_EXP_BIAS - FP32_EXP_BIAS + fbits,
4718 int sgn, exp;
4723 fp64_unpack(&sgn, &exp, &mnt, op, modeConv(fpscr), &flags);
4726 if (fp64_is_NaN(exp, mnt)) {
4732 result = FPToFixed_32(sgn, exp + fbits, mnt, u, rounding, &flags);
4746 int sgn, exp;
4751 fp16_unpack(&sgn, &exp, &mnt, op, modeConv(fpscr), &flags);
4754 if (fp16_is_NaN(exp, mnt)) {
4759 if (exp == FP16_EXP_INF)
4760 exp = 255; // infinity: make it big enough to saturate
4762 FPToFixed_64(sgn, exp + FP64_EXP_BIAS - FP16_EXP_BIAS + fbits,
4777 int sgn, exp;
4782 fp32_unpack(&sgn, &exp, &mnt, op, modeConv(fpscr), &flags);
4785 if (fp32_is_NaN(exp, mnt)) {
4792 FPToFixed_64(sgn, exp + FP64_EXP_BIAS - FP32_EXP_BIAS + fbits,
4807 int sgn, exp;
4811 fp64_unpack(&sgn, &exp, &mnt, op, modeConv(fpscr), &flags);
4814 if (fp64_is_NaN(exp, mnt)) {
4820 result = FPToFixed_64(sgn, exp + fbits, mnt, u, rounding, &flags);