glibc/math/k_casinh.c

211 lines
5.9 KiB
C
Raw Normal View History

/* Return arc hyperbole sine for double value, with the imaginary part
of the result possibly adjusted for use in computing other
functions.
Copyright (C) 1997-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <complex.h>
#include <math.h>
#include <math_private.h>
#include <float.h>
/* Return the complex inverse hyperbolic sine of finite nonzero Z,
with the imaginary part of the result subtracted from pi/2 if ADJ
is nonzero. */
__complex__ double
__kernel_casinh (__complex__ double x, int adj)
{
__complex__ double res;
double rx, ix;
__complex__ double y;
/* Avoid cancellation by reducing to the first quadrant. */
rx = fabs (__real__ x);
ix = fabs (__imag__ x);
if (rx >= 1.0 / DBL_EPSILON || ix >= 1.0 / DBL_EPSILON)
{
/* For large x in the first quadrant, x + csqrt (1 + x * x)
is sufficiently close to 2 * x to make no significant
difference to the result; avoid possible overflow from
the squaring and addition. */
__real__ y = rx;
__imag__ y = ix;
if (adj)
{
double t = __real__ y;
__real__ y = __copysign (__imag__ y, __imag__ x);
__imag__ y = t;
}
res = __clog (y);
__real__ res += M_LN2;
}
else if (rx >= 0.5 && ix < DBL_EPSILON / 8.0)
{
double s = __ieee754_hypot (1.0, rx);
__real__ res = __ieee754_log (rx + s);
if (adj)
__imag__ res = __ieee754_atan2 (s, __imag__ x);
else
__imag__ res = __ieee754_atan2 (ix, s);
}
else if (rx < DBL_EPSILON / 8.0 && ix >= 1.5)
{
double s = __ieee754_sqrt ((ix + 1.0) * (ix - 1.0));
__real__ res = __ieee754_log (ix + s);
if (adj)
__imag__ res = __ieee754_atan2 (rx, __copysign (s, __imag__ x));
else
__imag__ res = __ieee754_atan2 (s, rx);
}
else if (ix > 1.0 && ix < 1.5 && rx < 0.5)
{
if (rx < DBL_EPSILON * DBL_EPSILON)
{
double ix2m1 = (ix + 1.0) * (ix - 1.0);
double s = __ieee754_sqrt (ix2m1);
__real__ res = __log1p (2.0 * (ix2m1 + ix * s)) / 2.0;
if (adj)
__imag__ res = __ieee754_atan2 (rx, __copysign (s, __imag__ x));
else
__imag__ res = __ieee754_atan2 (s, rx);
}
else
{
double ix2m1 = (ix + 1.0) * (ix - 1.0);
double rx2 = rx * rx;
double f = rx2 * (2.0 + rx2 + 2.0 * ix * ix);
double d = __ieee754_sqrt (ix2m1 * ix2m1 + f);
double dp = d + ix2m1;
double dm = f / dp;
double r1 = __ieee754_sqrt ((dm + rx2) / 2.0);
double r2 = rx * ix / r1;
__real__ res = __log1p (rx2 + dp + 2.0 * (rx * r1 + ix * r2)) / 2.0;
if (adj)
__imag__ res = __ieee754_atan2 (rx + r1, __copysign (ix + r2,
__imag__ x));
else
__imag__ res = __ieee754_atan2 (ix + r2, rx + r1);
}
}
else if (ix == 1.0 && rx < 0.5)
{
if (rx < DBL_EPSILON / 8.0)
{
__real__ res = __log1p (2.0 * (rx + __ieee754_sqrt (rx))) / 2.0;
if (adj)
__imag__ res = __ieee754_atan2 (__ieee754_sqrt (rx),
__copysign (1.0, __imag__ x));
else
__imag__ res = __ieee754_atan2 (1.0, __ieee754_sqrt (rx));
}
else
{
double d = rx * __ieee754_sqrt (4.0 + rx * rx);
double s1 = __ieee754_sqrt ((d + rx * rx) / 2.0);
double s2 = __ieee754_sqrt ((d - rx * rx) / 2.0);
__real__ res = __log1p (rx * rx + d + 2.0 * (rx * s1 + s2)) / 2.0;
if (adj)
__imag__ res = __ieee754_atan2 (rx + s1, __copysign (1.0 + s2,
__imag__ x));
else
__imag__ res = __ieee754_atan2 (1.0 + s2, rx + s1);
}
}
else if (ix < 1.0 && rx < 0.5)
{
if (ix >= DBL_EPSILON)
{
if (rx < DBL_EPSILON * DBL_EPSILON)
{
double onemix2 = (1.0 + ix) * (1.0 - ix);
double s = __ieee754_sqrt (onemix2);
__real__ res = __log1p (2.0 * rx / s) / 2.0;
if (adj)
__imag__ res = __ieee754_atan2 (s, __imag__ x);
else
__imag__ res = __ieee754_atan2 (ix, s);
}
else
{
double onemix2 = (1.0 + ix) * (1.0 - ix);
double rx2 = rx * rx;
double f = rx2 * (2.0 + rx2 + 2.0 * ix * ix);
double d = __ieee754_sqrt (onemix2 * onemix2 + f);
double dp = d + onemix2;
double dm = f / dp;
double r1 = __ieee754_sqrt ((dp + rx2) / 2.0);
double r2 = rx * ix / r1;
__real__ res
= __log1p (rx2 + dm + 2.0 * (rx * r1 + ix * r2)) / 2.0;
if (adj)
__imag__ res = __ieee754_atan2 (rx + r1,
__copysign (ix + r2,
__imag__ x));
else
__imag__ res = __ieee754_atan2 (ix + r2, rx + r1);
}
}
else
{
double s = __ieee754_hypot (1.0, rx);
__real__ res = __log1p (2.0 * rx * (rx + s)) / 2.0;
if (adj)
__imag__ res = __ieee754_atan2 (s, __imag__ x);
else
__imag__ res = __ieee754_atan2 (ix, s);
}
Refactor code forcing underflow exceptions. Various floating-point functions have code to force underflow exceptions if a tiny result was computed in a way that might not have resulted in such exceptions even though the result is inexact. This typically uses math_force_eval to ensure that the underflowing expression is evaluated, but sometimes uses volatile. This patch refactors such code to use three new macros math_check_force_underflow, math_check_force_underflow_nonneg and math_check_force_underflow_complex (which in turn use math_force_eval). In the limited number of cases not suited to a simple conversion to these macros, existing uses of volatile are changed to use math_force_eval instead. The converted code does not always execute exactly the same sequence of operations as the original code, but the overall effects should be the same. Tested for x86_64, x86, mips64 and powerpc. * sysdeps/generic/math_private.h (fabs_tg): New macro. (min_of_type): Likewise. (math_check_force_underflow): Likewise. (math_check_force_underflow_nonneg): Likewise. (math_check_force_underflow_complex): Likewise. * math/e_exp2l.c (__ieee754_exp2l): Use math_check_force_underflow_nonneg. * math/k_casinh.c (__kernel_casinh): Likewise. * math/k_casinhf.c (__kernel_casinhf): Likewise. * math/k_casinhl.c (__kernel_casinhl): Likewise. * math/s_catan.c (__catan): Use math_check_force_underflow_complex. * math/s_catanf.c (__catanf): Likewise. * math/s_catanh.c (__catanh): Likewise. * math/s_catanhf.c (__catanhf): Likewise. * math/s_catanhl.c (__catanhl): Likewise. * math/s_catanl.c (__catanl): Likewise. * math/s_ccosh.c (__ccosh): Likewise. * math/s_ccoshf.c (__ccoshf): Likewise. * math/s_ccoshl.c (__ccoshl): Likewise. * math/s_cexp.c (__cexp): Likewise. * math/s_cexpf.c (__cexpf): Likewise. * math/s_cexpl.c (__cexpl): Likewise. * math/s_clog.c (__clog): Use math_check_force_underflow_nonneg. * math/s_clog10.c (__clog10): Likewise. * math/s_clog10f.c (__clog10f): Likewise. * math/s_clog10l.c (__clog10l): Likewise. * math/s_clogf.c (__clogf): Likewise. * math/s_clogl.c (__clogl): Likewise. * math/s_csin.c (__csin): Use math_check_force_underflow_complex. * math/s_csinf.c (__csinf): Likewise. * math/s_csinh.c (__csinh): Likewise. * math/s_csinhf.c (__csinhf): Likewise. * math/s_csinhl.c (__csinhl): Likewise. * math/s_csinl.c (__csinl): Likewise. * math/s_csqrt.c (__csqrt): Use math_check_force_underflow. * math/s_csqrtf.c (__csqrtf): Likewise. * math/s_csqrtl.c (__csqrtl): Likewise. * math/s_ctan.c (__ctan): Use math_check_force_underflow_complex. * math/s_ctanf.c (__ctanf): Likewise. * math/s_ctanh.c (__ctanh): Likewise. * math/s_ctanhf.c (__ctanhf): Likewise. * math/s_ctanhl.c (__ctanhl): Likewise. * math/s_ctanl.c (__ctanl): Likewise. * stdlib/strtod_l.c (round_and_return): Use math_force_eval instead of volatile. * sysdeps/ieee754/dbl-64/e_asin.c (__ieee754_asin): Use math_check_force_underflow. * sysdeps/ieee754/dbl-64/e_atanh.c (__ieee754_atanh): Likewise. * sysdeps/ieee754/dbl-64/e_exp.c (__ieee754_exp): Do not use volatile when forcing underflow. * sysdeps/ieee754/dbl-64/e_exp2.c (__ieee754_exp2): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/dbl-64/e_gamma_r.c (__ieee754_gamma_r): Likewise. * sysdeps/ieee754/dbl-64/e_j1.c (__ieee754_j1): Use math_check_force_underflow. * sysdeps/ieee754/dbl-64/e_jn.c (__ieee754_jn): Likewise. * sysdeps/ieee754/dbl-64/e_sinh.c (__ieee754_sinh): Likewise. * sysdeps/ieee754/dbl-64/s_asinh.c (__asinh): Likewise. * sysdeps/ieee754/dbl-64/s_atan.c (atan): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/dbl-64/s_erf.c (__erf): Use math_check_force_underflow. * sysdeps/ieee754/dbl-64/s_expm1.c (__expm1): Likewise. * sysdeps/ieee754/dbl-64/s_fma.c (__fma): Use math_force_eval instead of volatile. * sysdeps/ieee754/dbl-64/s_log1p.c (__log1p): Use math_check_force_underflow. * sysdeps/ieee754/dbl-64/s_sin.c (__sin): Likewise. * sysdeps/ieee754/dbl-64/s_tan.c (tan): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/dbl-64/s_tanh.c (__tanh): Use math_check_force_underflow. * sysdeps/ieee754/flt-32/e_asinf.c (__ieee754_asinf): Likewise. * sysdeps/ieee754/flt-32/e_atanhf.c (__ieee754_atanhf): Likewise. * sysdeps/ieee754/flt-32/e_exp2f.c (__ieee754_exp2f): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/flt-32/e_gammaf_r.c (__ieee754_gammaf_r): Likewise. * sysdeps/ieee754/flt-32/e_j1f.c (__ieee754_j1f): Use math_check_force_underflow. * sysdeps/ieee754/flt-32/e_jnf.c (__ieee754_jnf): Likewise. * sysdeps/ieee754/flt-32/e_sinhf.c (__ieee754_sinhf): Likewise. * sysdeps/ieee754/flt-32/k_sinf.c (__kernel_sinf): Likewise. * sysdeps/ieee754/flt-32/k_tanf.c (__kernel_tanf): Likewise. * sysdeps/ieee754/flt-32/s_asinhf.c (__asinhf): Likewise. * sysdeps/ieee754/flt-32/s_atanf.c (__atanf): Likewise. * sysdeps/ieee754/flt-32/s_erff.c (__erff): Likewise. * sysdeps/ieee754/flt-32/s_expm1f.c (__expm1f): Likewise. * sysdeps/ieee754/flt-32/s_log1pf.c (__log1pf): Likewise. * sysdeps/ieee754/flt-32/s_tanhf.c (__tanhf): Likewise. * sysdeps/ieee754/ldbl-128/e_asinl.c (__ieee754_asinl): Likewise. * sysdeps/ieee754/ldbl-128/e_atanhl.c (__ieee754_atanhl): Likewise. * sysdeps/ieee754/ldbl-128/e_expl.c (__ieee754_expl): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/ldbl-128/e_gammal_r.c (__ieee754_gammal_r): Likewise. * sysdeps/ieee754/ldbl-128/e_j1l.c (__ieee754_j1l): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-128/e_jnl.c (__ieee754_jnl): Likewise. * sysdeps/ieee754/ldbl-128/e_sinhl.c (__ieee754_sinhl): Likewise. * sysdeps/ieee754/ldbl-128/k_sincosl.c (__kernel_sincosl): Likewise. * sysdeps/ieee754/ldbl-128/k_sinl.c (__kernel_sinl): Likewise. * sysdeps/ieee754/ldbl-128/k_tanl.c (__kernel_tanl): Likewise. * sysdeps/ieee754/ldbl-128/s_asinhl.c (__asinhl): Likewise. * sysdeps/ieee754/ldbl-128/s_atanl.c (__atanl): Likewise. * sysdeps/ieee754/ldbl-128/s_erfl.c (__erfl): Likewise. * sysdeps/ieee754/ldbl-128/s_expm1l.c (__expm1l): Likewise. * sysdeps/ieee754/ldbl-128/s_fmal.c (__fmal): Use math_force_eval instead of volatile. * sysdeps/ieee754/ldbl-128/s_log1pl.c (__log1pl): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-128/s_tanhl.c (__tanhl): Likewise. * sysdeps/ieee754/ldbl-128ibm/e_asinl.c (__ieee754_asinl): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-128ibm/e_atanhl.c (__ieee754_atanhl): Likewise. * sysdeps/ieee754/ldbl-128ibm/e_gammal_r.c (__ieee754_gammal_r): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/ldbl-128ibm/e_jnl.c (__ieee754_jnl): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-128ibm/e_sinhl.c (__ieee754_sinhl): Likewise. * sysdeps/ieee754/ldbl-128ibm/k_sincosl.c (__kernel_sincosl): Likewise. * sysdeps/ieee754/ldbl-128ibm/k_sinl.c (__kernel_sinl): Likewise. * sysdeps/ieee754/ldbl-128ibm/k_tanl.c (__kernel_tanl): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_asinhl.c (__asinhl): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_atanl.c (__atanl): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_erfl.c (__erfl): Likewise. * sysdeps/ieee754/ldbl-128ibm/s_tanhl.c (__tanhl): Likewise. * sysdeps/ieee754/ldbl-96/e_asinl.c (__ieee754_asinl): Likewise. * sysdeps/ieee754/ldbl-96/e_atanhl.c (__ieee754_atanhl): Likewise. * sysdeps/ieee754/ldbl-96/e_gammal_r.c (__ieee754_gammal_r): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/ldbl-96/e_j1l.c (__ieee754_j1l): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-96/e_jnl.c (__ieee754_jnl): Likewise. * sysdeps/ieee754/ldbl-96/e_sinhl.c (__ieee754_sinhl): Likewise. * sysdeps/ieee754/ldbl-96/k_sinl.c (__kernel_sinl): Likewise. * sysdeps/ieee754/ldbl-96/k_tanl.c (__kernel_tanl): Use math_check_force_underflow_nonneg. * sysdeps/ieee754/ldbl-96/s_asinhl.c (__asinhl): Use math_check_force_underflow. * sysdeps/ieee754/ldbl-96/s_erfl.c (__erfl): Likewise. * sysdeps/ieee754/ldbl-96/s_fmal.c (__fmal): Use math_force_eval instead of volatile. * sysdeps/ieee754/ldbl-96/s_tanhl.c (__tanhl): Use math_check_force_underflow.
2015-09-24 00:42:30 +02:00
math_check_force_underflow_nonneg (__real__ res);
}
else
{
__real__ y = (rx - ix) * (rx + ix) + 1.0;
__imag__ y = 2.0 * rx * ix;
y = __csqrt (y);
__real__ y += rx;
__imag__ y += ix;
if (adj)
{
double t = __real__ y;
2015-04-22 14:07:56 +02:00
__real__ y = __copysign (__imag__ y, __imag__ x);
__imag__ y = t;
}
res = __clog (y);
}
/* Give results the correct sign for the original argument. */
__real__ res = __copysign (__real__ res, __real__ x);
__imag__ res = __copysign (__imag__ res, (adj ? 1.0 : __imag__ x));
return res;
}