Update libquadmath fmaq from glibc, fix nanq issues.

This patch extends update-quadmath.py to update fmaq from glibc.

The issue in that function was that quadmath-imp.h had a struct in a
union with mant_high and mant_low fields (up to 64-bit) whereas glibc
has mantissa0, mantissa1, mantissa2 and mantissa3 (up to 32-bit).  The
patch changes those fields to be the same as in glibc, moving printf /
strtod code that also uses those fields back to closer to the glibc
form.  This allows fmaq to be updated automatically from glibc (which
brings in at least one bug fix from glibc from 2015).

nanq was also using the mant_high field name, and had other issues: it
only partly initialized the union from which a value was returned, and
setting mant_high to 1 meant a signaling NaN would be returned rather
than a quiet NaN.  This patch fixes those issues as part of updating
it to use the changed interfaces (but does not fix the issue of not
using the argument).

Bootstrapped with no regressions on x86_64-pc-linux-gnu.

	* quadmath-imp.h (ieee854_float128): Use mantissa0, mantissa1,
	mantissa2 and mantissa3 fields instead of mant_high and mant_low.
	Change nan field to ieee_nan.
	* update-quadmath.py (update_sources): Also update fmaq.c.
	* math/nanq.c (nanq): Use ieee_nan field of union.
	Zero-initialize f.  Set quiet_nan field.
	* printf/flt1282mpn.c, printf/printf_fphex.c, strtod/mpn2flt128.c,
	strtod/strtoflt128.c: Use mantissa0, mantissa1, mantissa2 and
	mantissa3 fields.  Use ieee_nan and quiet_nan field.
	* math/fmaq.c: Regenerate from glibc sources with
	update-quadmath.py.

From-SVN: r265874
This commit is contained in:
Joseph Myers 2018-11-07 13:49:03 +00:00 committed by Joseph Myers
parent 296b0b042a
commit 667b3ec15d
9 changed files with 93 additions and 91 deletions

View File

@ -1,3 +1,17 @@
2018-11-07 Joseph Myers <joseph@codesourcery.com>
* quadmath-imp.h (ieee854_float128): Use mantissa0, mantissa1,
mantissa2 and mantissa3 fields instead of mant_high and mant_low.
Change nan field to ieee_nan.
* update-quadmath.py (update_sources): Also update fmaq.c.
* math/nanq.c (nanq): Use ieee_nan field of union.
Zero-initialize f. Set quiet_nan field.
* printf/flt1282mpn.c, printf/printf_fphex.c, strtod/mpn2flt128.c,
strtod/strtoflt128.c: Use mantissa0, mantissa1, mantissa2 and
mantissa3 fields. Use ieee_nan and quiet_nan field.
* math/fmaq.c: Regenerate from glibc sources with
update-quadmath.py.
2018-11-05 Joseph Myers <joseph@codesourcery.com>
PR libquadmath/68686

View File

@ -1,5 +1,5 @@
/* Compute x * y + z as ternary operation.
Copyright (C) 2010-2017 Free Software Foundation, Inc.
Copyright (C) 2010-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>, 2010.
@ -18,16 +18,6 @@
<http://www.gnu.org/licenses/>. */
#include "quadmath-imp.h"
#include <math.h>
#include <float.h>
#ifdef HAVE_FENV_H
# include <fenv.h>
# if defined HAVE_FEHOLDEXCEPT && defined HAVE_FESETROUND \
&& defined HAVE_FEUPDATEENV && defined HAVE_FETESTEXCEPT \
&& defined FE_TOWARDZERO && defined FE_INEXACT
# define USE_FENV_H
# endif
#endif
/* This implementation uses rounding to odd to avoid problems with
double rounding. See a paper by Boldo and Melquiond:
@ -73,7 +63,7 @@ fmaq (__float128 x, __float128 y, __float128 z)
if (u.ieee.exponent + v.ieee.exponent
> 0x7fff + IEEE854_FLOAT128_BIAS)
return x * y;
/* If x * y is less than 1/4 of FLT128_DENORM_MIN, neither the
/* If x * y is less than 1/4 of FLT128_TRUE_MIN, neither the
result nor whether there is underflow depends on its exact
value, only on its sign. */
if (u.ieee.exponent + v.ieee.exponent
@ -94,8 +84,10 @@ fmaq (__float128 x, __float128 y, __float128 z)
: (w.ieee.exponent == 0
|| (w.ieee.exponent == 1
&& w.ieee.negative != neg
&& w.ieee.mant_low == 0
&& w.ieee.mant_high == 0)))
&& w.ieee.mantissa3 == 0
&& w.ieee.mantissa2 == 0
&& w.ieee.mantissa1 == 0
&& w.ieee.mantissa0 == 0)))
{
__float128 force_underflow = x * y;
math_force_eval (force_underflow);
@ -124,7 +116,7 @@ fmaq (__float128 x, __float128 y, __float128 z)
very small, adjust them up to avoid spurious underflows,
rather than down. */
if (u.ieee.exponent + v.ieee.exponent
<= IEEE854_FLOAT128_BIAS + FLT128_MANT_DIG)
<= IEEE854_FLOAT128_BIAS + 2 * FLT128_MANT_DIG)
{
if (u.ieee.exponent > v.ieee.exponent)
u.ieee.exponent += 2 * FLT128_MANT_DIG + 2;
@ -181,17 +173,15 @@ fmaq (__float128 x, __float128 y, __float128 z)
}
/* Ensure correct sign of exact 0 + 0. */
if (__builtin_expect ((x == 0 || y == 0) && z == 0, 0))
if (__glibc_unlikely ((x == 0 || y == 0) && z == 0))
{
x = math_opt_barrier (x);
return x * y + z;
}
#ifdef USE_FENV_H
fenv_t env;
feholdexcept (&env);
fesetround (FE_TONEAREST);
#endif
/* Multiplication m1 + m2 = x * y using Dekker's algorithm. */
#define C ((1LL << (FLT128_MANT_DIG + 1) / 2) + 1)
@ -214,62 +204,46 @@ fmaq (__float128 x, __float128 y, __float128 z)
/* Ensure the arithmetic is not scheduled after feclearexcept call. */
math_force_eval (m2);
math_force_eval (a2);
#ifdef USE_FENV_H
feclearexcept (FE_INEXACT);
#endif
/* If the result is an exact zero, ensure it has the correct sign. */
if (a1 == 0 && m2 == 0)
{
#ifdef USE_FENV_H
feupdateenv (&env);
#endif
/* Ensure that round-to-nearest value of z + m1 is not reused. */
z = math_opt_barrier (z);
return z + m1;
}
#ifdef USE_FENV_H
fesetround (FE_TOWARDZERO);
#endif
/* Perform m2 + a2 addition with round to odd. */
u.value = a2 + m2;
if (__builtin_expect (adjust == 0, 1))
if (__glibc_likely (adjust == 0))
{
#ifdef USE_FENV_H
if ((u.ieee.mant_low & 1) == 0 && u.ieee.exponent != 0x7fff)
u.ieee.mant_low |= fetestexcept (FE_INEXACT) != 0;
if ((u.ieee.mantissa3 & 1) == 0 && u.ieee.exponent != 0x7fff)
u.ieee.mantissa3 |= fetestexcept (FE_INEXACT) != 0;
feupdateenv (&env);
#endif
/* Result is a1 + u.value. */
return a1 + u.value;
}
else if (__builtin_expect (adjust > 0, 1))
else if (__glibc_likely (adjust > 0))
{
#ifdef USE_FENV_H
if ((u.ieee.mant_low & 1) == 0 && u.ieee.exponent != 0x7fff)
u.ieee.mant_low |= fetestexcept (FE_INEXACT) != 0;
if ((u.ieee.mantissa3 & 1) == 0 && u.ieee.exponent != 0x7fff)
u.ieee.mantissa3 |= fetestexcept (FE_INEXACT) != 0;
feupdateenv (&env);
#endif
/* Result is a1 + u.value, scaled up. */
return (a1 + u.value) * 0x1p113Q;
}
else
{
#ifdef USE_FENV_H
if ((u.ieee.mant_low & 1) == 0)
u.ieee.mant_low |= fetestexcept (FE_INEXACT) != 0;
#endif
if ((u.ieee.mantissa3 & 1) == 0)
u.ieee.mantissa3 |= fetestexcept (FE_INEXACT) != 0;
v.value = a1 + u.value;
/* Ensure the addition is not scheduled after fetestexcept call. */
asm volatile ("" : : "m" (v.value));
#ifdef USE_FENV_H
math_force_eval (v.value);
int j = fetestexcept (FE_INEXACT) != 0;
feupdateenv (&env);
#else
int j = 0;
#endif
/* Ensure the following computations are performed in default rounding
mode instead of just reusing the round to zero computation. */
asm volatile ("" : "=m" (u) : "m" (u));
@ -281,11 +255,11 @@ fmaq (__float128 x, __float128 y, __float128 z)
rounding will occur. */
if (v.ieee.exponent > 228)
return (a1 + u.value) * 0x1p-228Q;
/* If v.value * 0x1p-228Q with round to zero is a subnormal above
or equal to FLT128_MIN / 2, then v.value * 0x1p-228Q shifts mantissa
down just by 1 bit, which means v.ieee.mant_low |= j would
/* If v.value * 0x1p-228L with round to zero is a subnormal above
or equal to FLT128_MIN / 2, then v.value * 0x1p-228L shifts mantissa
down just by 1 bit, which means v.ieee.mantissa3 |= j would
change the round bit, not sticky or guard bit.
v.value * 0x1p-228Q never normalizes by shifting up,
v.value * 0x1p-228L never normalizes by shifting up,
so round bit plus sticky bit should be already enough
for proper rounding. */
if (v.ieee.exponent == 228)
@ -301,18 +275,18 @@ fmaq (__float128 x, __float128 y, __float128 z)
if (w.ieee.exponent == 229)
return w.value * 0x1p-228Q;
}
/* v.ieee.mant_low & 2 is LSB bit of the result before rounding,
v.ieee.mant_low & 1 is the round bit and j is our sticky
bit. */
w.value = 0.0Q;
w.ieee.mant_low = ((v.ieee.mant_low & 3) << 1) | j;
/* v.ieee.mantissa3 & 2 is LSB bit of the result before rounding,
v.ieee.mantissa3 & 1 is the round bit and j is our sticky
bit. */
w.value = 0;
w.ieee.mantissa3 = ((v.ieee.mantissa3 & 3) << 1) | j;
w.ieee.negative = v.ieee.negative;
v.ieee.mant_low &= ~3U;
v.ieee.mantissa3 &= ~3U;
v.value *= 0x1p-228Q;
w.value *= 0x1p-2Q;
return v.value + w.value;
}
v.ieee.mant_low |= j;
v.ieee.mantissa3 |= j;
return v.value * 0x1p-228Q;
}
}

View File

@ -4,8 +4,8 @@ __float128
nanq (const char *tagp __attribute__ ((unused)))
{
// FIXME -- we should use the argument
ieee854_float128 f;
f.ieee.exponent = 0x7fff;
f.ieee.mant_high = 0x1;
ieee854_float128 f = { 0 };
f.ieee_nan.exponent = 0x7fff;
f.ieee_nan.quiet_nan = 0x1;
return f.value;
}

View File

@ -39,14 +39,14 @@ mpn_extract_flt128 (mp_ptr res_ptr, mp_size_t size,
*expt = (int) u.ieee.exponent - IEEE854_FLOAT128_BIAS;
#if BITS_PER_MP_LIMB == 32
res_ptr[0] = u.ieee.mant_low; /* Low-order 32 bits of fraction. */
res_ptr[1] = (u.ieee.mant_low >> 32);
res_ptr[2] = u.ieee.mant_high;
res_ptr[3] = (u.ieee.mant_high >> 32); /* High-order 32 bits. */
res_ptr[0] = u.ieee.mantissa3; /* Low-order 32 bits of fraction. */
res_ptr[1] = u.ieee.mantissa2;
res_ptr[2] = u.ieee.mantissa1;
res_ptr[3] = u.ieee.mantissa0; /* High-order 32 bits. */
#define N 4
#elif BITS_PER_MP_LIMB == 64
res_ptr[0] = u.ieee.mant_low;
res_ptr[1] = u.ieee.mant_high;
res_ptr[0] = ((mp_limb_t) u.ieee.mantissa2 << 32) | u.ieee.mantissa3;
res_ptr[1] = ((mp_limb_t) u.ieee.mantissa0 << 32) | u.ieee.mantissa1;
#define N 2
#else
#error "mp_limb size " BITS_PER_MP_LIMB "not accounted for"

View File

@ -235,8 +235,10 @@ __quadmath_printf_fphex (struct __quadmath_printf_file *fp,
assert (sizeof (long double) == 16);
num0 = fpnum.ieee.mant_high;
num1 = fpnum.ieee.mant_low;
num0 = (((unsigned long long int) fpnum.ieee.mantissa0) << 32
| fpnum.ieee.mantissa1);
num1 = (((unsigned long long int) fpnum.ieee.mantissa2) << 32
| fpnum.ieee.mantissa3);
zero_mantissa = (num0|num1) == 0;

View File

@ -96,11 +96,15 @@ typedef union
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
unsigned negative:1;
unsigned exponent:15;
uint64_t mant_high:48;
uint64_t mant_low:64;
unsigned mantissa0:16;
unsigned mantissa1:32;
unsigned mantissa2:32;
unsigned mantissa3:32;
#else
uint64_t mant_low:64;
uint64_t mant_high:48;
unsigned mantissa3:32;
unsigned mantissa2:32;
unsigned mantissa1:32;
unsigned mantissa0:16;
unsigned exponent:15;
unsigned negative:1;
#endif
@ -142,16 +146,20 @@ typedef union
unsigned negative:1;
unsigned exponent:15;
unsigned quiet_nan:1;
uint64_t mant_high:47;
uint64_t mant_low:64;
unsigned mantissa0:15;
unsigned mantissa1:32;
unsigned mantissa2:32;
unsigned mantissa3:32;
#else
uint64_t mant_low:64;
uint64_t mant_high:47;
unsigned mantissa3:32;
unsigned mantissa2:32;
unsigned mantissa1:32;
unsigned mantissa0:15;
unsigned quiet_nan:1;
unsigned exponent:15;
unsigned negative:1;
#endif
} nan;
} ieee_nan;
} ieee854_float128;

View File

@ -34,15 +34,17 @@ mpn_construct_float128 (mp_srcptr frac_ptr, int expt, int sign)
u.ieee.negative = sign;
u.ieee.exponent = expt + IEEE854_FLOAT128_BIAS;
#if BITS_PER_MP_LIMB == 32
u.ieee.mant_low = (((uint64_t) frac_ptr[1]) << 32)
| (frac_ptr[0] & 0xffffffff);
u.ieee.mant_high = (((uint64_t) frac_ptr[3]
& (((mp_limb_t) 1 << (FLT128_MANT_DIG - 96)) - 1))
<< 32) | (frac_ptr[2] & 0xffffffff);
u.ieee.mantissa3 = frac_ptr[0];
u.ieee.mantissa2 = frac_ptr[1];
u.ieee.mantissa1 = frac_ptr[2];
u.ieee.mantissa0 = frac_ptr[3] & (((mp_limb_t) 1
<< (FLT128_MANT_DIG - 96)) - 1);
#elif BITS_PER_MP_LIMB == 64
u.ieee.mant_low = frac_ptr[0];
u.ieee.mant_high = frac_ptr[1]
& (((mp_limb_t) 1 << (FLT128_MANT_DIG - 64)) - 1);
u.ieee.mantissa3 = frac_ptr[0] & (((mp_limb_t) 1 << 32) - 1);
u.ieee.mantissa2 = frac_ptr[0] >> 32;
u.ieee.mantissa1 = frac_ptr[1] & (((mp_limb_t) 1 << 32) - 1);
u.ieee.mantissa0 = (frac_ptr[1] >> 32) & (((mp_limb_t) 1
<< (FLT128_MANT_DIG - 96)) - 1);
#else
#error "mp_limb size " BITS_PER_MP_LIMB "not accounted for"
#endif

View File

@ -30,12 +30,15 @@
#endif
#define MPN2FLOAT mpn_construct_float128
#define FLOAT_HUGE_VAL HUGE_VALQ
#define SET_MANTISSA(flt, mant) \
do { ieee854_float128 u; \
u.value = (flt); \
u.ieee.mant_high = 0x800000000000ULL; \
u.ieee.mant_low = mant; \
(flt) = u.value; \
#define SET_MANTISSA(flt, mant) \
do { ieee854_float128 u; \
u.value = (flt); \
u.ieee_nan.mantissa0 = 0; \
u.ieee_nan.mantissa1 = 0; \
u.ieee_nan.mantissa2 = (mant) >> 32; \
u.ieee_nan.mantissa3 = (mant); \
u.ieee_nan.quiet_nan = 1; \
(flt) = u.value; \
} while (0)
static inline __attribute__((__always_inline__))

View File

@ -143,8 +143,7 @@ def update_sources(glibc_srcdir, quadmath_srcdir):
# Replace all #includes with a single include of quadmath-imp.h.
repl_map['(\n+#include[^\n]*)+\n+'] = '\n\n#include "quadmath-imp.h"\n\n'
# Omitted from this list because code comes from more than one
# glibc source file: rem_pio2. Omitted because of a union not
# currently provided in libquadmath: fma.
# glibc source file: rem_pio2.
ldbl_files = {
'e_acoshl.c': 'acoshq.c', 'e_acosl.c': 'acosq.c',
's_asinhl.c': 'asinhq.c', 'e_asinl.c': 'asinq.c',
@ -155,7 +154,7 @@ def update_sources(glibc_srcdir, quadmath_srcdir):
's_erfl.c': 'erfq.c', 's_expm1l.c': 'expm1q.c', 'e_expl.c': 'expq.c',
't_expl.h': 'expq_table.h', 's_fabsl.c': 'fabsq.c',
's_finitel.c': 'finiteq.c', 's_floorl.c': 'floorq.c',
'e_fmodl.c': 'fmodq.c', 's_frexpl.c': 'frexpq.c',
's_fmal.c': 'fmaq.c', 'e_fmodl.c': 'fmodq.c', 's_frexpl.c': 'frexpq.c',
'e_lgammal_r.c': 'lgammaq.c', 'lgamma_negl.c': 'lgammaq_neg.c',
'lgamma_productl.c': 'lgammaq_product.c', 'e_hypotl.c': 'hypotq.c',
'e_ilogbl.c': 'ilogbq.c', 's_isinfl.c': 'isinfq.c',