f029f4be17
2012-11-01 Tobias Burnus <burnus@net-b.de> * Makefile.am (libquadmath_la_SOURCES): Add new math/* files. * Makefile.in: Regenerated. * math/acoshq.c: Update comment. * math/acosq.c: Ditto. * math/asinhq.c: Ditto. * math/asinq.c: Ditto. * math/atan2q.c: Ditto. * math/atanhq.c: Ditto. * math/ceilq.c: Ditto. * math/copysignq.c: Ditto. * math/cosq.c: Ditto. * math/coshq.c: Ditto. * math/erfq.c: Ditto. * math/fabsq.c: Ditto. * math/finiteq.c: Ditto. * math/floorq.c: Ditto. * math/fmodq.c: Ditto. * math/frexpq.c: Ditto. * math/isnanq.c: Ditto. * math/j0q.c: Ditto. * math/j1q.c: Ditto. * math/ldexpq.c: Ditto. * math/llroundq.c: Ditto. * math/log10q.c: Ditto. * math/log1pq.c: Ditto. * math/log2q.c: Ditto. * math/logq.c: Ditto. * math/lroundq.c: Ditto. * math/modfq.c: Ditto. * math/nextafterq.c: Ditto. * math/powq.c: Ditto. * math/rem_pio2q.c: Ditto. * math/remainderq.c: Ditto. * math/rintq.c: Ditto. * math/roundq.c: Ditto. * math/scalblnq.c: Ditto. * math/scalbnq.c: Ditto. * math/sincosq_kernel.c: Ditto. * math/sinq.c: Ditto. * math/tanq.c: Ditto. * math/expq.c: Ditto. (__expq_table, expq): Renamed local array from __expl_table. * math/cosq_kernel.c (__quadmath_kernel_cosq): Fix sign * handling. * math/cacoshq.c: Changes from GLIBC; fix returned sign. * math/casinhq.c: Changes from GLIBC to fix special-case. * math/cbrtq.c: Use modified GLIBC version. * math/complex.c (ccoshd, cexpq, clog10q, clogq, csinhq, csinq, ctanhq, ctanq): Moved to separates files. (mult_c128, div_c128): Removed no longer needed functions. (cexpiq): Call sincosq instead of sinq and cosq. (cosq): Call cosh(-re,im) instead of cosq/sinq/sinh/cosh. * math/ccoshq.c (ccoshq): New file, moved from complex.c and modified based on GLIBC. * math/cexpq.c (cexp): Ditto. * math/clog10q.c (clog10q): Ditto. * math/clogq.c (clogq): Ditto. * math/csinhq.c: Ditto. * math/csinq.c: Ditto. * math/csqrtq.c: Ditto. * math/ctanhq.c: Ditto. * math/ctanq.c: Ditto. * math/fmaq.c (fmaq): Port TININESS_AFTER_ROUNDING handling from GLIBC. * math/ilogbq.c (ilogbq): Add errno = EDOM handling. * math/isinf_nsq.c (__quadmath_isinf_nsq): New file, ported from GLIBC. * math/lgammaq.c (lgammaq): Add signgam handling. * math/sinhq.c (sinhq): Fix sign handling. * math/sinq_kernel.c (__quadmath_kernel_sinq): Ditto. * math/tgammaq.c (tgammaq): Ditto. * math/x2y2m1q.c: New file. * quadmath-imp.h (TININESS_AFTER_ROUNDING): New define. (__quadmath_x2y2m1q, __quadmath_isinf_nsq): New prototypes. From-SVN: r193063
257 lines
8.4 KiB
C
257 lines
8.4 KiB
C
/* Compute x * y + z as ternary operation.
|
|
Copyright (C) 2010-2012 Free Software Foundation, Inc.
|
|
This file is part of the GNU C Library.
|
|
Contributed by Jakub Jelinek <jakub@redhat.com>, 2010.
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU Lesser General Public
|
|
License as published by the Free Software Foundation; either
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
Lesser General Public License for more details.
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
License along with the GNU C Library; if not, write to the Free
|
|
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
|
02111-1307 USA. */
|
|
|
|
#include "quadmath-imp.h"
|
|
#include <math.h>
|
|
#include <float.h>
|
|
#ifdef HAVE_FENV_H
|
|
# include <fenv.h>
|
|
# if defined HAVE_FEHOLDEXCEPT && defined HAVE_FESETROUND \
|
|
&& defined HAVE_FEUPDATEENV && defined HAVE_FETESTEXCEPT \
|
|
&& defined FE_TOWARDZERO && defined FE_INEXACT
|
|
# define USE_FENV_H
|
|
# endif
|
|
#endif
|
|
|
|
/* This implementation uses rounding to odd to avoid problems with
|
|
double rounding. See a paper by Boldo and Melquiond:
|
|
http://www.lri.fr/~melquion/doc/08-tc.pdf */
|
|
|
|
__float128
|
|
fmaq (__float128 x, __float128 y, __float128 z)
|
|
{
|
|
ieee854_float128 u, v, w;
|
|
int adjust = 0;
|
|
u.value = x;
|
|
v.value = y;
|
|
w.value = z;
|
|
if (__builtin_expect (u.ieee.exponent + v.ieee.exponent
|
|
>= 0x7fff + IEEE854_FLOAT128_BIAS
|
|
- FLT128_MANT_DIG, 0)
|
|
|| __builtin_expect (u.ieee.exponent >= 0x7fff - FLT128_MANT_DIG, 0)
|
|
|| __builtin_expect (v.ieee.exponent >= 0x7fff - FLT128_MANT_DIG, 0)
|
|
|| __builtin_expect (w.ieee.exponent >= 0x7fff - FLT128_MANT_DIG, 0)
|
|
|| __builtin_expect (u.ieee.exponent + v.ieee.exponent
|
|
<= IEEE854_FLOAT128_BIAS + FLT128_MANT_DIG, 0))
|
|
{
|
|
/* If z is Inf, but x and y are finite, the result should be
|
|
z rather than NaN. */
|
|
if (w.ieee.exponent == 0x7fff
|
|
&& u.ieee.exponent != 0x7fff
|
|
&& v.ieee.exponent != 0x7fff)
|
|
return (z + x) + y;
|
|
/* If z is zero and x are y are nonzero, compute the result
|
|
as x * y to avoid the wrong sign of a zero result if x * y
|
|
underflows to 0. */
|
|
if (z == 0 && x != 0 && y != 0)
|
|
return x * y;
|
|
/* If x or y or z is Inf/NaN, or if fma will certainly overflow,
|
|
or if x * y is less than half of FLT128_DENORM_MIN,
|
|
compute as x * y + z. */
|
|
if (u.ieee.exponent == 0x7fff
|
|
|| v.ieee.exponent == 0x7fff
|
|
|| w.ieee.exponent == 0x7fff
|
|
|| u.ieee.exponent + v.ieee.exponent
|
|
> 0x7fff + IEEE854_FLOAT128_BIAS
|
|
|| u.ieee.exponent + v.ieee.exponent
|
|
< IEEE854_FLOAT128_BIAS - FLT128_MANT_DIG - 2)
|
|
return x * y + z;
|
|
if (u.ieee.exponent + v.ieee.exponent
|
|
>= 0x7fff + IEEE854_FLOAT128_BIAS - FLT128_MANT_DIG)
|
|
{
|
|
/* Compute 1p-113 times smaller result and multiply
|
|
at the end. */
|
|
if (u.ieee.exponent > v.ieee.exponent)
|
|
u.ieee.exponent -= FLT128_MANT_DIG;
|
|
else
|
|
v.ieee.exponent -= FLT128_MANT_DIG;
|
|
/* If x + y exponent is very large and z exponent is very small,
|
|
it doesn't matter if we don't adjust it. */
|
|
if (w.ieee.exponent > FLT128_MANT_DIG)
|
|
w.ieee.exponent -= FLT128_MANT_DIG;
|
|
adjust = 1;
|
|
}
|
|
else if (w.ieee.exponent >= 0x7fff - FLT128_MANT_DIG)
|
|
{
|
|
/* Similarly.
|
|
If z exponent is very large and x and y exponents are
|
|
very small, it doesn't matter if we don't adjust it. */
|
|
if (u.ieee.exponent > v.ieee.exponent)
|
|
{
|
|
if (u.ieee.exponent > FLT128_MANT_DIG)
|
|
u.ieee.exponent -= FLT128_MANT_DIG;
|
|
}
|
|
else if (v.ieee.exponent > FLT128_MANT_DIG)
|
|
v.ieee.exponent -= FLT128_MANT_DIG;
|
|
w.ieee.exponent -= FLT128_MANT_DIG;
|
|
adjust = 1;
|
|
}
|
|
else if (u.ieee.exponent >= 0x7fff - FLT128_MANT_DIG)
|
|
{
|
|
u.ieee.exponent -= FLT128_MANT_DIG;
|
|
if (v.ieee.exponent)
|
|
v.ieee.exponent += FLT128_MANT_DIG;
|
|
else
|
|
v.value *= 0x1p113Q;
|
|
}
|
|
else if (v.ieee.exponent >= 0x7fff - FLT128_MANT_DIG)
|
|
{
|
|
v.ieee.exponent -= FLT128_MANT_DIG;
|
|
if (u.ieee.exponent)
|
|
u.ieee.exponent += FLT128_MANT_DIG;
|
|
else
|
|
u.value *= 0x1p113Q;
|
|
}
|
|
else /* if (u.ieee.exponent + v.ieee.exponent
|
|
<= IEEE854_FLOAT128_BIAS + FLT128_MANT_DIG) */
|
|
{
|
|
if (u.ieee.exponent > v.ieee.exponent)
|
|
u.ieee.exponent += 2 * FLT128_MANT_DIG;
|
|
else
|
|
v.ieee.exponent += 2 * FLT128_MANT_DIG;
|
|
if (w.ieee.exponent <= 4 * FLT128_MANT_DIG + 4)
|
|
{
|
|
if (w.ieee.exponent)
|
|
w.ieee.exponent += 2 * FLT128_MANT_DIG;
|
|
else
|
|
w.value *= 0x1p226Q;
|
|
adjust = -1;
|
|
}
|
|
/* Otherwise x * y should just affect inexact
|
|
and nothing else. */
|
|
}
|
|
x = u.value;
|
|
y = v.value;
|
|
z = w.value;
|
|
}
|
|
|
|
/* Ensure correct sign of exact 0 + 0. */
|
|
if (__builtin_expect ((x == 0 || y == 0) && z == 0, 0))
|
|
return x * y + z;
|
|
|
|
/* Multiplication m1 + m2 = x * y using Dekker's algorithm. */
|
|
#define C ((1LL << (FLT128_MANT_DIG + 1) / 2) + 1)
|
|
__float128 x1 = x * C;
|
|
__float128 y1 = y * C;
|
|
__float128 m1 = x * y;
|
|
x1 = (x - x1) + x1;
|
|
y1 = (y - y1) + y1;
|
|
__float128 x2 = x - x1;
|
|
__float128 y2 = y - y1;
|
|
__float128 m2 = (((x1 * y1 - m1) + x1 * y2) + x2 * y1) + x2 * y2;
|
|
|
|
/* Addition a1 + a2 = z + m1 using Knuth's algorithm. */
|
|
__float128 a1 = z + m1;
|
|
__float128 t1 = a1 - z;
|
|
__float128 t2 = a1 - t1;
|
|
t1 = m1 - t1;
|
|
t2 = z - t2;
|
|
__float128 a2 = t1 + t2;
|
|
|
|
#ifdef USE_FENV_H
|
|
fenv_t env;
|
|
feholdexcept (&env);
|
|
fesetround (FE_TOWARDZERO);
|
|
#endif
|
|
/* Perform m2 + a2 addition with round to odd. */
|
|
u.value = a2 + m2;
|
|
|
|
if (__builtin_expect (adjust == 0, 1))
|
|
{
|
|
#ifdef USE_FENV_H
|
|
if ((u.ieee.mant_low & 1) == 0 && u.ieee.exponent != 0x7fff)
|
|
u.ieee.mant_low |= fetestexcept (FE_INEXACT) != 0;
|
|
feupdateenv (&env);
|
|
#endif
|
|
/* Result is a1 + u.value. */
|
|
return a1 + u.value;
|
|
}
|
|
else if (__builtin_expect (adjust > 0, 1))
|
|
{
|
|
#ifdef USE_FENV_H
|
|
if ((u.ieee.mant_low & 1) == 0 && u.ieee.exponent != 0x7fff)
|
|
u.ieee.mant_low |= fetestexcept (FE_INEXACT) != 0;
|
|
feupdateenv (&env);
|
|
#endif
|
|
/* Result is a1 + u.value, scaled up. */
|
|
return (a1 + u.value) * 0x1p113Q;
|
|
}
|
|
else
|
|
{
|
|
#ifdef USE_FENV_H
|
|
if ((u.ieee.mant_low & 1) == 0)
|
|
u.ieee.mant_low |= fetestexcept (FE_INEXACT) != 0;
|
|
#endif
|
|
v.value = a1 + u.value;
|
|
/* Ensure the addition is not scheduled after fetestexcept call. */
|
|
asm volatile ("" : : "m" (v.value));
|
|
#ifdef USE_FENV_H
|
|
int j = fetestexcept (FE_INEXACT) != 0;
|
|
feupdateenv (&env);
|
|
#else
|
|
int j = 0;
|
|
#endif
|
|
/* Ensure the following computations are performed in default rounding
|
|
mode instead of just reusing the round to zero computation. */
|
|
asm volatile ("" : "=m" (u) : "m" (u));
|
|
/* If a1 + u.value is exact, the only rounding happens during
|
|
scaling down. */
|
|
if (j == 0)
|
|
return v.value * 0x1p-226Q;
|
|
/* If result rounded to zero is not subnormal, no double
|
|
rounding will occur. */
|
|
if (v.ieee.exponent > 226)
|
|
return (a1 + u.value) * 0x1p-226Q;
|
|
/* If v.value * 0x1p-226Q with round to zero is a subnormal above
|
|
or equal to FLT128_MIN / 2, then v.value * 0x1p-226Q shifts mantissa
|
|
down just by 1 bit, which means v.ieee.mant_low |= j would
|
|
change the round bit, not sticky or guard bit.
|
|
v.value * 0x1p-226Q never normalizes by shifting up,
|
|
so round bit plus sticky bit should be already enough
|
|
for proper rounding. */
|
|
if (v.ieee.exponent == 226)
|
|
{
|
|
/* If the exponent would be in the normal range when
|
|
rounding to normal precision with unbounded exponent
|
|
range, the exact result is known and spurious underflows
|
|
must be avoided on systems detecting tininess after
|
|
rounding. */
|
|
if (TININESS_AFTER_ROUNDING)
|
|
{
|
|
w.value = a1 + u.value;
|
|
if (w.ieee.exponent == 227)
|
|
return w.value * 0x1p-226L;
|
|
}
|
|
/* v.ieee.mant_low & 2 is LSB bit of the result before rounding,
|
|
v.ieee.mant_low & 1 is the round bit and j is our sticky
|
|
bit. */
|
|
w.value = 0.0Q;
|
|
w.ieee.mant_low = ((v.ieee.mant_low & 3) << 1) | j;
|
|
w.ieee.negative = v.ieee.negative;
|
|
v.ieee.mant_low &= ~3U;
|
|
v.value *= 0x1p-226L;
|
|
w.value *= 0x1p-2L;
|
|
return v.value + w.value;
|
|
}
|
|
v.ieee.mant_low |= j;
|
|
return v.value * 0x1p-226Q;
|
|
}
|
|
}
|