softfloat: Move addsub_floats to softfloat-parts.c.inc

In preparation for implementing multiple sizes.  Rename to parts_addsub,
split out parts_add/sub_normal for future reuse with muladd.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2020-10-22 15:22:55 -07:00
parent cb3ad0365f
commit da10a9074a
3 changed files with 255 additions and 141 deletions

View File

@ -0,0 +1,62 @@
/*
* Floating point arithmetic implementation
*
* The code in this source file is derived from release 2a of the SoftFloat
* IEC/IEEE Floating-point Arithmetic Package. Those parts of the code (and
* some later contributions) are provided under that license, as detailed below.
* It has subsequently been modified by contributors to the QEMU Project,
* so some portions are provided under:
* the SoftFloat-2a license
* the BSD license
* GPL-v2-or-later
*
* Any future contributions to this file after December 1st 2014 will be
* taken to be licensed under the Softfloat-2a license unless specifically
* indicated otherwise.
*/
static void partsN(add_normal)(FloatPartsN *a, FloatPartsN *b)
{
int exp_diff = a->exp - b->exp;
if (exp_diff > 0) {
frac_shrjam(b, exp_diff);
} else if (exp_diff < 0) {
frac_shrjam(a, -exp_diff);
a->exp = b->exp;
}
if (frac_add(a, a, b)) {
frac_shrjam(a, 1);
a->frac_hi |= DECOMPOSED_IMPLICIT_BIT;
a->exp += 1;
}
}
static bool partsN(sub_normal)(FloatPartsN *a, FloatPartsN *b)
{
int exp_diff = a->exp - b->exp;
int shift;
if (exp_diff > 0) {
frac_shrjam(b, exp_diff);
frac_sub(a, a, b);
} else if (exp_diff < 0) {
a->exp = b->exp;
a->sign ^= 1;
frac_shrjam(a, -exp_diff);
frac_sub(a, b, a);
} else if (frac_sub(a, a, b)) {
/* Overflow means that A was less than B. */
frac_neg(a);
a->sign ^= 1;
}
shift = frac_normalize(a);
if (likely(shift < N)) {
a->exp -= shift;
return true;
}
a->cls = float_class_zero;
return false;
}

View File

@ -281,3 +281,84 @@ static void partsN(uncanon)(FloatPartsN *p, float_status *s,
p->exp = exp;
float_raise(flags, s);
}
/*
* Returns the result of adding or subtracting the values of the
* floating-point values `a' and `b'. The operation is performed
* according to the IEC/IEEE Standard for Binary Floating-Point
* Arithmetic.
*/
static FloatPartsN *partsN(addsub)(FloatPartsN *a, FloatPartsN *b,
float_status *s, bool subtract)
{
bool b_sign = b->sign ^ subtract;
int ab_mask = float_cmask(a->cls) | float_cmask(b->cls);
if (a->sign != b_sign) {
/* Subtraction */
if (likely(ab_mask == float_cmask_normal)) {
if (parts_sub_normal(a, b)) {
return a;
}
/* Subtract was exact, fall through to set sign. */
ab_mask = float_cmask_zero;
}
if (ab_mask == float_cmask_zero) {
a->sign = s->float_rounding_mode == float_round_down;
return a;
}
if (unlikely(ab_mask & float_cmask_anynan)) {
goto p_nan;
}
if (ab_mask & float_cmask_inf) {
if (a->cls != float_class_inf) {
/* N - Inf */
goto return_b;
}
if (b->cls != float_class_inf) {
/* Inf - N */
return a;
}
/* Inf - Inf */
float_raise(float_flag_invalid, s);
parts_default_nan(a, s);
return a;
}
} else {
/* Addition */
if (likely(ab_mask == float_cmask_normal)) {
parts_add_normal(a, b);
return a;
}
if (ab_mask == float_cmask_zero) {
return a;
}
if (unlikely(ab_mask & float_cmask_anynan)) {
goto p_nan;
}
if (ab_mask & float_cmask_inf) {
a->cls = float_class_inf;
return a;
}
}
if (b->cls == float_class_zero) {
g_assert(a->cls == float_class_normal);
return a;
}
g_assert(a->cls == float_class_zero);
g_assert(b->cls == float_class_normal);
return_b:
b->sign = b_sign;
return b;
p_nan:
return parts_pick_nan(a, b, s);
}

View File

@ -749,6 +749,26 @@ static void parts128_uncanon(FloatParts128 *p, float_status *status,
#define parts_uncanon(A, S, F) \
PARTS_GENERIC_64_128(uncanon, A)(A, S, F)
static void parts64_add_normal(FloatParts64 *a, FloatParts64 *b);
static void parts128_add_normal(FloatParts128 *a, FloatParts128 *b);
#define parts_add_normal(A, B) \
PARTS_GENERIC_64_128(add_normal, A)(A, B)
static bool parts64_sub_normal(FloatParts64 *a, FloatParts64 *b);
static bool parts128_sub_normal(FloatParts128 *a, FloatParts128 *b);
#define parts_sub_normal(A, B) \
PARTS_GENERIC_64_128(sub_normal, A)(A, B)
static FloatParts64 *parts64_addsub(FloatParts64 *a, FloatParts64 *b,
float_status *s, bool subtract);
static FloatParts128 *parts128_addsub(FloatParts128 *a, FloatParts128 *b,
float_status *s, bool subtract);
#define parts_addsub(A, B, S, Z) \
PARTS_GENERIC_64_128(addsub, A)(A, B, S, Z)
/*
* Helper functions for softfloat-parts.c.inc, per-size operations.
*/
@ -756,6 +776,21 @@ static void parts128_uncanon(FloatParts128 *p, float_status *status,
#define FRAC_GENERIC_64_128(NAME, P) \
QEMU_GENERIC(P, (FloatParts128 *, frac128_##NAME), frac64_##NAME)
static bool frac64_add(FloatParts64 *r, FloatParts64 *a, FloatParts64 *b)
{
return uadd64_overflow(a->frac, b->frac, &r->frac);
}
static bool frac128_add(FloatParts128 *r, FloatParts128 *a, FloatParts128 *b)
{
bool c = 0;
r->frac_lo = uadd64_carry(a->frac_lo, b->frac_lo, &c);
r->frac_hi = uadd64_carry(a->frac_hi, b->frac_hi, &c);
return c;
}
#define frac_add(R, A, B) FRAC_GENERIC_64_128(add, R)(R, A, B)
static bool frac64_addi(FloatParts64 *r, FloatParts64 *a, uint64_t c)
{
return uadd64_overflow(a->frac, c, &r->frac);
@ -824,6 +859,20 @@ static bool frac128_eqz(FloatParts128 *a)
#define frac_eqz(A) FRAC_GENERIC_64_128(eqz, A)(A)
static void frac64_neg(FloatParts64 *a)
{
a->frac = -a->frac;
}
static void frac128_neg(FloatParts128 *a)
{
bool c = 0;
a->frac_lo = usub64_borrow(0, a->frac_lo, &c);
a->frac_hi = usub64_borrow(0, a->frac_hi, &c);
}
#define frac_neg(A) FRAC_GENERIC_64_128(neg, A)(A)
static int frac64_normalize(FloatParts64 *a)
{
if (a->frac) {
@ -891,18 +940,36 @@ static void frac128_shrjam(FloatParts128 *a, int c)
#define frac_shrjam(A, C) FRAC_GENERIC_64_128(shrjam, A)(A, C)
#define partsN(NAME) parts64_##NAME
#define FloatPartsN FloatParts64
static bool frac64_sub(FloatParts64 *r, FloatParts64 *a, FloatParts64 *b)
{
return usub64_overflow(a->frac, b->frac, &r->frac);
}
static bool frac128_sub(FloatParts128 *r, FloatParts128 *a, FloatParts128 *b)
{
bool c = 0;
r->frac_lo = usub64_borrow(a->frac_lo, b->frac_lo, &c);
r->frac_hi = usub64_borrow(a->frac_hi, b->frac_hi, &c);
return c;
}
#define frac_sub(R, A, B) FRAC_GENERIC_64_128(sub, R)(R, A, B)
#define partsN(NAME) glue(glue(glue(parts,N),_),NAME)
#define FloatPartsN glue(FloatParts,N)
#define N 64
#include "softfloat-parts-addsub.c.inc"
#include "softfloat-parts.c.inc"
#undef partsN
#undef FloatPartsN
#define partsN(NAME) parts128_##NAME
#define FloatPartsN FloatParts128
#undef N
#define N 128
#include "softfloat-parts-addsub.c.inc"
#include "softfloat-parts.c.inc"
#undef N
#undef partsN
#undef FloatPartsN
@ -980,165 +1047,73 @@ static float64 float64_round_pack_canonical(FloatParts64 *p,
}
/*
* Returns the result of adding or subtracting the values of the
* floating-point values `a' and `b'. The operation is performed
* according to the IEC/IEEE Standard for Binary Floating-Point
* Arithmetic.
* Addition and subtraction
*/
static FloatParts64 addsub_floats(FloatParts64 a, FloatParts64 b, bool subtract,
float_status *s)
static float16 QEMU_FLATTEN
float16_addsub(float16 a, float16 b, float_status *status, bool subtract)
{
bool a_sign = a.sign;
bool b_sign = b.sign ^ subtract;
if (a_sign != b_sign) {
/* Subtraction */
if (a.cls == float_class_normal && b.cls == float_class_normal) {
if (a.exp > b.exp || (a.exp == b.exp && a.frac >= b.frac)) {
shift64RightJamming(b.frac, a.exp - b.exp, &b.frac);
a.frac = a.frac - b.frac;
} else {
shift64RightJamming(a.frac, b.exp - a.exp, &a.frac);
a.frac = b.frac - a.frac;
a.exp = b.exp;
a_sign ^= 1;
}
if (a.frac == 0) {
a.cls = float_class_zero;
a.sign = s->float_rounding_mode == float_round_down;
} else {
int shift = clz64(a.frac);
a.frac = a.frac << shift;
a.exp = a.exp - shift;
a.sign = a_sign;
}
return a;
}
if (is_nan(a.cls) || is_nan(b.cls)) {
return *parts_pick_nan(&a, &b, s);
}
if (a.cls == float_class_inf) {
if (b.cls == float_class_inf) {
float_raise(float_flag_invalid, s);
parts_default_nan(&a, s);
}
return a;
}
if (a.cls == float_class_zero && b.cls == float_class_zero) {
a.sign = s->float_rounding_mode == float_round_down;
return a;
}
if (a.cls == float_class_zero || b.cls == float_class_inf) {
b.sign = a_sign ^ 1;
return b;
}
if (b.cls == float_class_zero) {
return a;
}
} else {
/* Addition */
if (a.cls == float_class_normal && b.cls == float_class_normal) {
if (a.exp > b.exp) {
shift64RightJamming(b.frac, a.exp - b.exp, &b.frac);
} else if (a.exp < b.exp) {
shift64RightJamming(a.frac, b.exp - a.exp, &a.frac);
a.exp = b.exp;
}
if (uadd64_overflow(a.frac, b.frac, &a.frac)) {
shift64RightJamming(a.frac, 1, &a.frac);
a.frac |= DECOMPOSED_IMPLICIT_BIT;
a.exp += 1;
}
return a;
}
if (is_nan(a.cls) || is_nan(b.cls)) {
return *parts_pick_nan(&a, &b, s);
}
if (a.cls == float_class_inf || b.cls == float_class_zero) {
return a;
}
if (b.cls == float_class_inf || a.cls == float_class_zero) {
b.sign = b_sign;
return b;
}
}
g_assert_not_reached();
}
/*
* Returns the result of adding or subtracting the floating-point
* values `a' and `b'. The operation is performed according to the
* IEC/IEEE Standard for Binary Floating-Point Arithmetic.
*/
float16 QEMU_FLATTEN float16_add(float16 a, float16 b, float_status *status)
{
FloatParts64 pa, pb, pr;
FloatParts64 pa, pb, *pr;
float16_unpack_canonical(&pa, a, status);
float16_unpack_canonical(&pb, b, status);
pr = addsub_floats(pa, pb, false, status);
pr = parts_addsub(&pa, &pb, status, subtract);
return float16_round_pack_canonical(&pr, status);
return float16_round_pack_canonical(pr, status);
}
float16 QEMU_FLATTEN float16_sub(float16 a, float16 b, float_status *status)
float16 float16_add(float16 a, float16 b, float_status *status)
{
FloatParts64 pa, pb, pr;
return float16_addsub(a, b, status, false);
}
float16_unpack_canonical(&pa, a, status);
float16_unpack_canonical(&pb, b, status);
pr = addsub_floats(pa, pb, true, status);
return float16_round_pack_canonical(&pr, status);
float16 float16_sub(float16 a, float16 b, float_status *status)
{
return float16_addsub(a, b, status, true);
}
static float32 QEMU_SOFTFLOAT_ATTR
soft_f32_addsub(float32 a, float32 b, bool subtract, float_status *status)
soft_f32_addsub(float32 a, float32 b, float_status *status, bool subtract)
{
FloatParts64 pa, pb, pr;
FloatParts64 pa, pb, *pr;
float32_unpack_canonical(&pa, a, status);
float32_unpack_canonical(&pb, b, status);
pr = addsub_floats(pa, pb, subtract, status);
pr = parts_addsub(&pa, &pb, status, subtract);
return float32_round_pack_canonical(&pr, status);
return float32_round_pack_canonical(pr, status);
}
static inline float32 soft_f32_add(float32 a, float32 b, float_status *status)
static float32 soft_f32_add(float32 a, float32 b, float_status *status)
{
return soft_f32_addsub(a, b, false, status);
return soft_f32_addsub(a, b, status, false);
}
static inline float32 soft_f32_sub(float32 a, float32 b, float_status *status)
static float32 soft_f32_sub(float32 a, float32 b, float_status *status)
{
return soft_f32_addsub(a, b, true, status);
return soft_f32_addsub(a, b, status, true);
}
static float64 QEMU_SOFTFLOAT_ATTR
soft_f64_addsub(float64 a, float64 b, bool subtract, float_status *status)
soft_f64_addsub(float64 a, float64 b, float_status *status, bool subtract)
{
FloatParts64 pa, pb, pr;
FloatParts64 pa, pb, *pr;
float64_unpack_canonical(&pa, a, status);
float64_unpack_canonical(&pb, b, status);
pr = addsub_floats(pa, pb, subtract, status);
pr = parts_addsub(&pa, &pb, status, subtract);
return float64_round_pack_canonical(&pr, status);
return float64_round_pack_canonical(pr, status);
}
static inline float64 soft_f64_add(float64 a, float64 b, float_status *status)
static float64 soft_f64_add(float64 a, float64 b, float_status *status)
{
return soft_f64_addsub(a, b, false, status);
return soft_f64_addsub(a, b, status, false);
}
static inline float64 soft_f64_sub(float64 a, float64 b, float_status *status)
static float64 soft_f64_sub(float64 a, float64 b, float_status *status)
{
return soft_f64_addsub(a, b, true, status);
return soft_f64_addsub(a, b, status, true);
}
static float hard_f32_add(float a, float b)
@ -1216,30 +1191,26 @@ float64_sub(float64 a, float64 b, float_status *s)
return float64_addsub(a, b, s, hard_f64_sub, soft_f64_sub);
}
/*
* Returns the result of adding or subtracting the bfloat16
* values `a' and `b'.
*/
bfloat16 QEMU_FLATTEN bfloat16_add(bfloat16 a, bfloat16 b, float_status *status)
static bfloat16 QEMU_FLATTEN
bfloat16_addsub(bfloat16 a, bfloat16 b, float_status *status, bool subtract)
{
FloatParts64 pa, pb, pr;
FloatParts64 pa, pb, *pr;
bfloat16_unpack_canonical(&pa, a, status);
bfloat16_unpack_canonical(&pb, b, status);
pr = addsub_floats(pa, pb, false, status);
pr = parts_addsub(&pa, &pb, status, subtract);
return bfloat16_round_pack_canonical(&pr, status);
return bfloat16_round_pack_canonical(pr, status);
}
bfloat16 QEMU_FLATTEN bfloat16_sub(bfloat16 a, bfloat16 b, float_status *status)
bfloat16 bfloat16_add(bfloat16 a, bfloat16 b, float_status *status)
{
FloatParts64 pa, pb, pr;
return bfloat16_addsub(a, b, status, false);
}
bfloat16_unpack_canonical(&pa, a, status);
bfloat16_unpack_canonical(&pb, b, status);
pr = addsub_floats(pa, pb, true, status);
return bfloat16_round_pack_canonical(&pr, status);
bfloat16 bfloat16_sub(bfloat16 a, bfloat16 b, float_status *status)
{
return bfloat16_addsub(a, b, status, true);
}
/*