gcc/gcc/fold-const-call.c

1604 lines
43 KiB
C
Raw Normal View History

2015-11-02 17:34:16 +01:00
/* Constant folding for calls to built-in and internal functions.
Copyright (C) 1988-2017 Free Software Foundation, Inc.
2015-11-02 17:34:16 +01:00
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "realmpfr.h"
#include "tree.h"
#include "stor-layout.h"
#include "options.h"
#include "fold-const.h"
2015-11-02 17:34:16 +01:00
#include "fold-const-call.h"
#include "case-cfn-macros.h"
#include "tm.h" /* For C[LT]Z_DEFINED_AT_ZERO. */
#include "builtins.h"
#include "gimple-expr.h"
2015-11-02 17:34:16 +01:00
/* Functions that test for certain constant types, abstracting away the
decision about whether to check for overflow. */
static inline bool
integer_cst_p (tree t)
{
return TREE_CODE (t) == INTEGER_CST && !TREE_OVERFLOW (t);
}
static inline bool
real_cst_p (tree t)
{
return TREE_CODE (t) == REAL_CST && !TREE_OVERFLOW (t);
}
static inline bool
complex_cst_p (tree t)
{
return TREE_CODE (t) == COMPLEX_CST;
}
/* Return true if ARG is a constant in the range of the host size_t.
Store it in *SIZE_OUT if so. */
static inline bool
host_size_t_cst_p (tree t, size_t *size_out)
{
if (types_compatible_p (size_type_node, TREE_TYPE (t))
&& integer_cst_p (t)
&& wi::min_precision (t, UNSIGNED) <= sizeof (size_t) * CHAR_BIT)
{
*size_out = tree_to_uhwi (t);
return true;
}
return false;
}
/* RES is the result of a comparison in which < 0 means "less", 0 means
"equal" and > 0 means "more". Canonicalize it to -1, 0 or 1 and
return it in type TYPE. */
tree
build_cmp_result (tree type, int res)
{
return build_int_cst (type, res < 0 ? -1 : res > 0 ? 1 : 0);
}
2015-11-02 17:34:16 +01:00
/* M is the result of trying to constant-fold an expression (starting
with clear MPFR flags) and INEXACT says whether the result in M is
exact or inexact. Return true if M can be used as a constant-folded
result in format FORMAT, storing the value in *RESULT if so. */
static bool
do_mpfr_ckconv (real_value *result, mpfr_srcptr m, bool inexact,
const real_format *format)
{
/* Proceed iff we get a normal number, i.e. not NaN or Inf and no
overflow/underflow occurred. If -frounding-math, proceed iff the
result of calling FUNC was exact. */
if (!mpfr_number_p (m)
|| mpfr_overflow_p ()
|| mpfr_underflow_p ()
|| (flag_rounding_math && inexact))
return false;
REAL_VALUE_TYPE tmp;
real_from_mpfr (&tmp, m, format, GMP_RNDN);
/* Proceed iff GCC's REAL_VALUE_TYPE can hold the MPFR values.
If the REAL_VALUE_TYPE is zero but the mpft_t is not, then we
underflowed in the conversion. */
if (!real_isfinite (&tmp)
|| ((tmp.cl == rvc_zero) != (mpfr_zero_p (m) != 0)))
return false;
real_convert (result, format, &tmp);
return real_identical (result, &tmp);
}
/* Try to evaluate:
*RESULT = f (*ARG)
in format FORMAT, given that FUNC is the MPFR implementation of f.
Return true on success. */
static bool
do_mpfr_arg1 (real_value *result,
int (*func) (mpfr_ptr, mpfr_srcptr, mpfr_rnd_t),
const real_value *arg, const real_format *format)
{
/* To proceed, MPFR must exactly represent the target floating point
format, which only happens when the target base equals two. */
if (format->b != 2 || !real_isfinite (arg))
return false;
int prec = format->p;
mp_rnd_t rnd = format->round_towards_zero ? GMP_RNDZ : GMP_RNDN;
mpfr_t m;
mpfr_init2 (m, prec);
mpfr_from_real (m, arg, GMP_RNDN);
mpfr_clear_flags ();
bool inexact = func (m, m, rnd);
bool ok = do_mpfr_ckconv (result, m, inexact, format);
mpfr_clear (m);
return ok;
}
/* Try to evaluate:
*RESULT_SIN = sin (*ARG);
*RESULT_COS = cos (*ARG);
for format FORMAT. Return true on success. */
static bool
do_mpfr_sincos (real_value *result_sin, real_value *result_cos,
const real_value *arg, const real_format *format)
{
/* To proceed, MPFR must exactly represent the target floating point
format, which only happens when the target base equals two. */
if (format->b != 2 || !real_isfinite (arg))
return false;
int prec = format->p;
mp_rnd_t rnd = format->round_towards_zero ? GMP_RNDZ : GMP_RNDN;
mpfr_t m, ms, mc;
mpfr_inits2 (prec, m, ms, mc, NULL);
mpfr_from_real (m, arg, GMP_RNDN);
mpfr_clear_flags ();
bool inexact = mpfr_sin_cos (ms, mc, m, rnd);
bool ok = (do_mpfr_ckconv (result_sin, ms, inexact, format)
&& do_mpfr_ckconv (result_cos, mc, inexact, format));
mpfr_clears (m, ms, mc, NULL);
return ok;
}
/* Try to evaluate:
*RESULT = f (*ARG0, *ARG1)
in format FORMAT, given that FUNC is the MPFR implementation of f.
Return true on success. */
static bool
do_mpfr_arg2 (real_value *result,
int (*func) (mpfr_ptr, mpfr_srcptr, mpfr_srcptr, mpfr_rnd_t),
const real_value *arg0, const real_value *arg1,
const real_format *format)
{
/* To proceed, MPFR must exactly represent the target floating point
format, which only happens when the target base equals two. */
if (format->b != 2 || !real_isfinite (arg0) || !real_isfinite (arg1))
return false;
int prec = format->p;
mp_rnd_t rnd = format->round_towards_zero ? GMP_RNDZ : GMP_RNDN;
mpfr_t m0, m1;
mpfr_inits2 (prec, m0, m1, NULL);
mpfr_from_real (m0, arg0, GMP_RNDN);
mpfr_from_real (m1, arg1, GMP_RNDN);
mpfr_clear_flags ();
bool inexact = func (m0, m0, m1, rnd);
bool ok = do_mpfr_ckconv (result, m0, inexact, format);
mpfr_clears (m0, m1, NULL);
return ok;
}
/* Try to evaluate:
*RESULT = f (ARG0, *ARG1)
in format FORMAT, given that FUNC is the MPFR implementation of f.
Return true on success. */
static bool
do_mpfr_arg2 (real_value *result,
int (*func) (mpfr_ptr, long, mpfr_srcptr, mp_rnd_t),
const wide_int_ref &arg0, const real_value *arg1,
const real_format *format)
{
if (format->b != 2 || !real_isfinite (arg1))
return false;
int prec = format->p;
mp_rnd_t rnd = format->round_towards_zero ? GMP_RNDZ : GMP_RNDN;
mpfr_t m;
mpfr_init2 (m, prec);
mpfr_from_real (m, arg1, GMP_RNDN);
mpfr_clear_flags ();
bool inexact = func (m, arg0.to_shwi (), m, rnd);
bool ok = do_mpfr_ckconv (result, m, inexact, format);
mpfr_clear (m);
return ok;
}
/* Try to evaluate:
*RESULT = f (*ARG0, *ARG1, *ARG2)
in format FORMAT, given that FUNC is the MPFR implementation of f.
Return true on success. */
static bool
do_mpfr_arg3 (real_value *result,
int (*func) (mpfr_ptr, mpfr_srcptr, mpfr_srcptr,
mpfr_srcptr, mpfr_rnd_t),
const real_value *arg0, const real_value *arg1,
const real_value *arg2, const real_format *format)
{
/* To proceed, MPFR must exactly represent the target floating point
format, which only happens when the target base equals two. */
if (format->b != 2
|| !real_isfinite (arg0)
|| !real_isfinite (arg1)
|| !real_isfinite (arg2))
return false;
int prec = format->p;
mp_rnd_t rnd = format->round_towards_zero ? GMP_RNDZ : GMP_RNDN;
mpfr_t m0, m1, m2;
mpfr_inits2 (prec, m0, m1, m2, NULL);
mpfr_from_real (m0, arg0, GMP_RNDN);
mpfr_from_real (m1, arg1, GMP_RNDN);
mpfr_from_real (m2, arg2, GMP_RNDN);
mpfr_clear_flags ();
bool inexact = func (m0, m0, m1, m2, rnd);
bool ok = do_mpfr_ckconv (result, m0, inexact, format);
mpfr_clears (m0, m1, m2, NULL);
return ok;
}
/* M is the result of trying to constant-fold an expression (starting
with clear MPFR flags) and INEXACT says whether the result in M is
exact or inexact. Return true if M can be used as a constant-folded
result in which the real and imaginary parts have format FORMAT.
Store those parts in *RESULT_REAL and *RESULT_IMAG if so. */
static bool
do_mpc_ckconv (real_value *result_real, real_value *result_imag,
mpc_srcptr m, bool inexact, const real_format *format)
{
/* Proceed iff we get a normal number, i.e. not NaN or Inf and no
overflow/underflow occurred. If -frounding-math, proceed iff the
result of calling FUNC was exact. */
if (!mpfr_number_p (mpc_realref (m))
|| !mpfr_number_p (mpc_imagref (m))
|| mpfr_overflow_p ()
|| mpfr_underflow_p ()
|| (flag_rounding_math && inexact))
return false;
REAL_VALUE_TYPE tmp_real, tmp_imag;
real_from_mpfr (&tmp_real, mpc_realref (m), format, GMP_RNDN);
real_from_mpfr (&tmp_imag, mpc_imagref (m), format, GMP_RNDN);
/* Proceed iff GCC's REAL_VALUE_TYPE can hold the MPFR values.
If the REAL_VALUE_TYPE is zero but the mpft_t is not, then we
underflowed in the conversion. */
if (!real_isfinite (&tmp_real)
|| !real_isfinite (&tmp_imag)
|| (tmp_real.cl == rvc_zero) != (mpfr_zero_p (mpc_realref (m)) != 0)
|| (tmp_imag.cl == rvc_zero) != (mpfr_zero_p (mpc_imagref (m)) != 0))
return false;
real_convert (result_real, format, &tmp_real);
real_convert (result_imag, format, &tmp_imag);
return (real_identical (result_real, &tmp_real)
&& real_identical (result_imag, &tmp_imag));
}
/* Try to evaluate:
RESULT = f (ARG)
in format FORMAT, given that FUNC is the mpc implementation of f.
Return true on success. Both RESULT and ARG are represented as
real and imaginary pairs. */
static bool
do_mpc_arg1 (real_value *result_real, real_value *result_imag,
int (*func) (mpc_ptr, mpc_srcptr, mpc_rnd_t),
const real_value *arg_real, const real_value *arg_imag,
const real_format *format)
{
/* To proceed, MPFR must exactly represent the target floating point
format, which only happens when the target base equals two. */
if (format->b != 2
|| !real_isfinite (arg_real)
|| !real_isfinite (arg_imag))
return false;
int prec = format->p;
mpc_rnd_t crnd = format->round_towards_zero ? MPC_RNDZZ : MPC_RNDNN;
mpc_t m;
mpc_init2 (m, prec);
mpfr_from_real (mpc_realref (m), arg_real, GMP_RNDN);
mpfr_from_real (mpc_imagref (m), arg_imag, GMP_RNDN);
mpfr_clear_flags ();
bool inexact = func (m, m, crnd);
bool ok = do_mpc_ckconv (result_real, result_imag, m, inexact, format);
mpc_clear (m);
return ok;
}
/* Try to evaluate:
RESULT = f (ARG0, ARG1)
in format FORMAT, given that FUNC is the mpc implementation of f.
Return true on success. RESULT, ARG0 and ARG1 are represented as
real and imaginary pairs. */
static bool
do_mpc_arg2 (real_value *result_real, real_value *result_imag,
int (*func)(mpc_ptr, mpc_srcptr, mpc_srcptr, mpc_rnd_t),
const real_value *arg0_real, const real_value *arg0_imag,
const real_value *arg1_real, const real_value *arg1_imag,
const real_format *format)
{
if (!real_isfinite (arg0_real)
|| !real_isfinite (arg0_imag)
|| !real_isfinite (arg1_real)
|| !real_isfinite (arg1_imag))
return false;
int prec = format->p;
mpc_rnd_t crnd = format->round_towards_zero ? MPC_RNDZZ : MPC_RNDNN;
mpc_t m0, m1;
mpc_init2 (m0, prec);
mpc_init2 (m1, prec);
mpfr_from_real (mpc_realref (m0), arg0_real, GMP_RNDN);
mpfr_from_real (mpc_imagref (m0), arg0_imag, GMP_RNDN);
mpfr_from_real (mpc_realref (m1), arg1_real, GMP_RNDN);
mpfr_from_real (mpc_imagref (m1), arg1_imag, GMP_RNDN);
mpfr_clear_flags ();
bool inexact = func (m0, m0, m1, crnd);
bool ok = do_mpc_ckconv (result_real, result_imag, m0, inexact, format);
mpc_clear (m0);
mpc_clear (m1);
return ok;
}
/* Try to evaluate:
*RESULT = logb (*ARG)
in format FORMAT. Return true on success. */
static bool
fold_const_logb (real_value *result, const real_value *arg,
const real_format *format)
{
switch (arg->cl)
{
case rvc_nan:
/* If arg is +-NaN, then return it. */
*result = *arg;
return true;
case rvc_inf:
/* If arg is +-Inf, then return +Inf. */
*result = *arg;
result->sign = 0;
return true;
case rvc_zero:
/* Zero may set errno and/or raise an exception. */
return false;
case rvc_normal:
/* For normal numbers, proceed iff radix == 2. In GCC,
normalized significands are in the range [0.5, 1.0). We
want the exponent as if they were [1.0, 2.0) so get the
exponent and subtract 1. */
if (format->b == 2)
{
real_from_integer (result, format, REAL_EXP (arg) - 1, SIGNED);
return true;
}
return false;
}
gcc_unreachable ();
}
/* Try to evaluate:
*RESULT = significand (*ARG)
in format FORMAT. Return true on success. */
static bool
fold_const_significand (real_value *result, const real_value *arg,
const real_format *format)
{
switch (arg->cl)
{
case rvc_zero:
case rvc_nan:
case rvc_inf:
/* If arg is +-0, +-Inf or +-NaN, then return it. */
*result = *arg;
return true;
case rvc_normal:
/* For normal numbers, proceed iff radix == 2. */
if (format->b == 2)
{
*result = *arg;
/* In GCC, normalized significands are in the range [0.5, 1.0).
We want them to be [1.0, 2.0) so set the exponent to 1. */
SET_REAL_EXP (result, 1);
return true;
}
return false;
}
gcc_unreachable ();
}
/* Try to evaluate:
*RESULT = f (*ARG)
where FORMAT is the format of *ARG and PRECISION is the number of
significant bits in the result. Return true on success. */
static bool
fold_const_conversion (wide_int *result,
void (*fn) (real_value *, format_helper,
const real_value *),
const real_value *arg, unsigned int precision,
const real_format *format)
{
if (!real_isfinite (arg))
return false;
real_value rounded;
fn (&rounded, format, arg);
bool fail = false;
*result = real_to_integer (&rounded, &fail, precision);
return !fail;
}
/* Try to evaluate:
*RESULT = pow (*ARG0, *ARG1)
in format FORMAT. Return true on success. */
static bool
fold_const_pow (real_value *result, const real_value *arg0,
const real_value *arg1, const real_format *format)
{
if (do_mpfr_arg2 (result, mpfr_pow, arg0, arg1, format))
return true;
/* Check for an integer exponent. */
REAL_VALUE_TYPE cint1;
HOST_WIDE_INT n1 = real_to_integer (arg1);
real_from_integer (&cint1, VOIDmode, n1, SIGNED);
/* Attempt to evaluate pow at compile-time, unless this should
raise an exception. */
if (real_identical (arg1, &cint1)
&& (n1 > 0
|| (!flag_trapping_math && !flag_errno_math)
|| !real_equal (arg0, &dconst0)))
{
bool inexact = real_powi (result, format, arg0, n1);
This series of patches fix PR61441. This series of patches fix PR61441. This patch avoids various transformations with signaling NaN operands when flag_signaling_nans is on, to avoid folding which would lose exceptions. Bootstrapped & regression-tested on x86_64-linux-gnu. gcc/ * fold-const.c (const_binop): Convert sNaN to qNaN when flag_signaling_nans is off. (const_unop): Avoid the operation, other than NEGATE and ABS, if flag_signaling_nans is on and the operand is an sNaN. (fold_convert_const_real_from_real): Avoid the operation if flag_signaling_nans is on and the operand is an sNaN. (integer_valued_real_unary_p): Update comment stating it returns false for sNaN values. (integer_valued_real_binary_p, integer_valued_real_call_p): Same. (integer_valued_real_single_p): Same. (integer_valued_real_invalid_p, integer_valued_real_p): Same. * fold-const-call.c (fold_const_pow): Avoid the operation if flag_signaling_nans is on and the operand is an sNaN. (fold_const_builtin_load_exponent) Same. (fold_const_call_sss): Same for CASE_CFN_POWI. * gimple-fold.c (gimple_assign_integer_valued_real_p): Same. (gimple_call_integer_valued_real_p): Same. (gimple_phi_integer_valued_real_p): Same. (gimple_stmt_integer_valued_real_p): Same. * simplify-rtx.c (simplify_const_unary_operation): Avoid the operation if flag_signaling_nans is on and the operand is an sNaN. (simplify_const_binary_operation): Same. * tree-ssa-math-opts.c (gimple_expand_builtin_pow): Avoid the operation if flag_signaling_nans is on and the operand is an sNaN. * gcc.dg/pr61441.c: New testcase. From-SVN: r231901
2015-12-22 15:04:30 +01:00
/* Avoid the folding if flag_signaling_nans is on. */
if (flag_unsafe_math_optimizations
|| (!inexact
&& !(flag_signaling_nans
&& REAL_VALUE_ISSIGNALING_NAN (*arg0))))
2015-11-02 17:34:16 +01:00
return true;
}
return false;
}
/* Try to evaluate:
*RESULT = ldexp (*ARG0, ARG1)
in format FORMAT. Return true on success. */
static bool
fold_const_builtin_load_exponent (real_value *result, const real_value *arg0,
const wide_int_ref &arg1,
const real_format *format)
{
/* Bound the maximum adjustment to twice the range of the
mode's valid exponents. Use abs to ensure the range is
positive as a sanity check. */
int max_exp_adj = 2 * labs (format->emax - format->emin);
/* The requested adjustment must be inside this range. This
is a preliminary cap to avoid things like overflow, we
may still fail to compute the result for other reasons. */
if (wi::les_p (arg1, -max_exp_adj) || wi::ges_p (arg1, max_exp_adj))
return false;
This series of patches fix PR61441. This series of patches fix PR61441. This patch avoids various transformations with signaling NaN operands when flag_signaling_nans is on, to avoid folding which would lose exceptions. Bootstrapped & regression-tested on x86_64-linux-gnu. gcc/ * fold-const.c (const_binop): Convert sNaN to qNaN when flag_signaling_nans is off. (const_unop): Avoid the operation, other than NEGATE and ABS, if flag_signaling_nans is on and the operand is an sNaN. (fold_convert_const_real_from_real): Avoid the operation if flag_signaling_nans is on and the operand is an sNaN. (integer_valued_real_unary_p): Update comment stating it returns false for sNaN values. (integer_valued_real_binary_p, integer_valued_real_call_p): Same. (integer_valued_real_single_p): Same. (integer_valued_real_invalid_p, integer_valued_real_p): Same. * fold-const-call.c (fold_const_pow): Avoid the operation if flag_signaling_nans is on and the operand is an sNaN. (fold_const_builtin_load_exponent) Same. (fold_const_call_sss): Same for CASE_CFN_POWI. * gimple-fold.c (gimple_assign_integer_valued_real_p): Same. (gimple_call_integer_valued_real_p): Same. (gimple_phi_integer_valued_real_p): Same. (gimple_stmt_integer_valued_real_p): Same. * simplify-rtx.c (simplify_const_unary_operation): Avoid the operation if flag_signaling_nans is on and the operand is an sNaN. (simplify_const_binary_operation): Same. * tree-ssa-math-opts.c (gimple_expand_builtin_pow): Avoid the operation if flag_signaling_nans is on and the operand is an sNaN. * gcc.dg/pr61441.c: New testcase. From-SVN: r231901
2015-12-22 15:04:30 +01:00
/* Don't perform operation if we honor signaling NaNs and
operand is a signaling NaN. */
if (!flag_unsafe_math_optimizations
&& flag_signaling_nans
&& REAL_VALUE_ISSIGNALING_NAN (*arg0))
return false;
2015-11-02 17:34:16 +01:00
REAL_VALUE_TYPE initial_result;
real_ldexp (&initial_result, arg0, arg1.to_shwi ());
/* Ensure we didn't overflow. */
if (real_isinf (&initial_result))
return false;
/* Only proceed if the target mode can hold the
resulting value. */
*result = real_value_truncate (format, initial_result);
return real_equal (&initial_result, result);
}
/* Fold a call to __builtin_nan or __builtin_nans with argument ARG and
return type TYPE. QUIET is true if a quiet rather than signalling
NaN is required. */
static tree
fold_const_builtin_nan (tree type, tree arg, bool quiet)
{
REAL_VALUE_TYPE real;
const char *str = c_getstr (arg);
if (str && real_nan (&real, str, quiet, TYPE_MODE (type)))
return build_real (type, real);
return NULL_TREE;
}
2015-11-02 17:34:16 +01:00
/* Try to evaluate:
*RESULT = FN (*ARG)
in format FORMAT. Return true on success. */
static bool
fold_const_call_ss (real_value *result, combined_fn fn,
2015-11-02 17:34:16 +01:00
const real_value *arg, const real_format *format)
{
switch (fn)
{
CASE_CFN_SQRT:
2015-11-02 17:34:16 +01:00
return (real_compare (GE_EXPR, arg, &dconst0)
&& do_mpfr_arg1 (result, mpfr_sqrt, arg, format));
CASE_CFN_CBRT:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg1 (result, mpfr_cbrt, arg, format);
CASE_CFN_ASIN:
2015-11-02 17:34:16 +01:00
return (real_compare (GE_EXPR, arg, &dconstm1)
&& real_compare (LE_EXPR, arg, &dconst1)
&& do_mpfr_arg1 (result, mpfr_asin, arg, format));
CASE_CFN_ACOS:
2015-11-02 17:34:16 +01:00
return (real_compare (GE_EXPR, arg, &dconstm1)
&& real_compare (LE_EXPR, arg, &dconst1)
&& do_mpfr_arg1 (result, mpfr_acos, arg, format));
CASE_CFN_ATAN:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg1 (result, mpfr_atan, arg, format);
CASE_CFN_ASINH:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg1 (result, mpfr_asinh, arg, format);
CASE_CFN_ACOSH:
2015-11-02 17:34:16 +01:00
return (real_compare (GE_EXPR, arg, &dconst1)
&& do_mpfr_arg1 (result, mpfr_acosh, arg, format));
CASE_CFN_ATANH:
2015-11-02 17:34:16 +01:00
return (real_compare (GE_EXPR, arg, &dconstm1)
&& real_compare (LE_EXPR, arg, &dconst1)
&& do_mpfr_arg1 (result, mpfr_atanh, arg, format));
CASE_CFN_SIN:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg1 (result, mpfr_sin, arg, format);
CASE_CFN_COS:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg1 (result, mpfr_cos, arg, format);
CASE_CFN_TAN:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg1 (result, mpfr_tan, arg, format);
CASE_CFN_SINH:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg1 (result, mpfr_sinh, arg, format);
CASE_CFN_COSH:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg1 (result, mpfr_cosh, arg, format);
CASE_CFN_TANH:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg1 (result, mpfr_tanh, arg, format);
CASE_CFN_ERF:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg1 (result, mpfr_erf, arg, format);
CASE_CFN_ERFC:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg1 (result, mpfr_erfc, arg, format);
CASE_CFN_TGAMMA:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg1 (result, mpfr_gamma, arg, format);
CASE_CFN_EXP:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg1 (result, mpfr_exp, arg, format);
CASE_CFN_EXP2:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg1 (result, mpfr_exp2, arg, format);
CASE_CFN_EXP10:
CASE_CFN_POW10:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg1 (result, mpfr_exp10, arg, format);
CASE_CFN_EXPM1:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg1 (result, mpfr_expm1, arg, format);
CASE_CFN_LOG:
2015-11-02 17:34:16 +01:00
return (real_compare (GT_EXPR, arg, &dconst0)
&& do_mpfr_arg1 (result, mpfr_log, arg, format));
CASE_CFN_LOG2:
2015-11-02 17:34:16 +01:00
return (real_compare (GT_EXPR, arg, &dconst0)
&& do_mpfr_arg1 (result, mpfr_log2, arg, format));
CASE_CFN_LOG10:
2015-11-02 17:34:16 +01:00
return (real_compare (GT_EXPR, arg, &dconst0)
&& do_mpfr_arg1 (result, mpfr_log10, arg, format));
CASE_CFN_LOG1P:
2015-11-02 17:34:16 +01:00
return (real_compare (GT_EXPR, arg, &dconstm1)
&& do_mpfr_arg1 (result, mpfr_log1p, arg, format));
CASE_CFN_J0:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg1 (result, mpfr_j0, arg, format);
CASE_CFN_J1:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg1 (result, mpfr_j1, arg, format);
CASE_CFN_Y0:
2015-11-02 17:34:16 +01:00
return (real_compare (GT_EXPR, arg, &dconst0)
&& do_mpfr_arg1 (result, mpfr_y0, arg, format));
CASE_CFN_Y1:
2015-11-02 17:34:16 +01:00
return (real_compare (GT_EXPR, arg, &dconst0)
&& do_mpfr_arg1 (result, mpfr_y1, arg, format));
CASE_CFN_FLOOR:
2015-11-02 17:34:16 +01:00
if (!REAL_VALUE_ISNAN (*arg) || !flag_errno_math)
{
real_floor (result, format, arg);
return true;
}
return false;
CASE_CFN_CEIL:
2015-11-02 17:34:16 +01:00
if (!REAL_VALUE_ISNAN (*arg) || !flag_errno_math)
{
real_ceil (result, format, arg);
return true;
}
return false;
CASE_CFN_TRUNC:
2015-11-02 17:34:16 +01:00
real_trunc (result, format, arg);
return true;
CASE_CFN_ROUND:
2015-11-02 17:34:16 +01:00
if (!REAL_VALUE_ISNAN (*arg) || !flag_errno_math)
{
real_round (result, format, arg);
return true;
}
return false;
CASE_CFN_LOGB:
2015-11-02 17:34:16 +01:00
return fold_const_logb (result, arg, format);
CASE_CFN_SIGNIFICAND:
2015-11-02 17:34:16 +01:00
return fold_const_significand (result, arg, format);
default:
return false;
}
}
/* Try to evaluate:
*RESULT = FN (*ARG)
where FORMAT is the format of ARG and PRECISION is the number of
significant bits in the result. Return true on success. */
static bool
fold_const_call_ss (wide_int *result, combined_fn fn,
2015-11-02 17:34:16 +01:00
const real_value *arg, unsigned int precision,
const real_format *format)
{
switch (fn)
{
CASE_CFN_SIGNBIT:
2015-11-02 17:34:16 +01:00
if (real_isneg (arg))
*result = wi::one (precision);
else
*result = wi::zero (precision);
return true;
CASE_CFN_ILOGB:
2015-11-02 17:34:16 +01:00
/* For ilogb we don't know FP_ILOGB0, so only handle normal values.
Proceed iff radix == 2. In GCC, normalized significands are in
the range [0.5, 1.0). We want the exponent as if they were
[1.0, 2.0) so get the exponent and subtract 1. */
if (arg->cl == rvc_normal && format->b == 2)
{
*result = wi::shwi (REAL_EXP (arg) - 1, precision);
return true;
}
return false;
CASE_CFN_ICEIL:
CASE_CFN_LCEIL:
CASE_CFN_LLCEIL:
2015-11-02 17:34:16 +01:00
return fold_const_conversion (result, real_ceil, arg,
precision, format);
CASE_CFN_LFLOOR:
CASE_CFN_IFLOOR:
CASE_CFN_LLFLOOR:
2015-11-02 17:34:16 +01:00
return fold_const_conversion (result, real_floor, arg,
precision, format);
CASE_CFN_IROUND:
CASE_CFN_LROUND:
CASE_CFN_LLROUND:
2015-11-02 17:34:16 +01:00
return fold_const_conversion (result, real_round, arg,
precision, format);
CASE_CFN_IRINT:
CASE_CFN_LRINT:
CASE_CFN_LLRINT:
2015-11-02 17:34:16 +01:00
/* Not yet folded to a constant. */
return false;
CASE_CFN_FINITE:
case CFN_BUILT_IN_FINITED32:
case CFN_BUILT_IN_FINITED64:
case CFN_BUILT_IN_FINITED128:
case CFN_BUILT_IN_ISFINITE:
*result = wi::shwi (real_isfinite (arg) ? 1 : 0, precision);
return true;
CASE_CFN_ISINF:
case CFN_BUILT_IN_ISINFD32:
case CFN_BUILT_IN_ISINFD64:
case CFN_BUILT_IN_ISINFD128:
if (real_isinf (arg))
*result = wi::shwi (arg->sign ? -1 : 1, precision);
else
*result = wi::shwi (0, precision);
return true;
CASE_CFN_ISNAN:
case CFN_BUILT_IN_ISNAND32:
case CFN_BUILT_IN_ISNAND64:
case CFN_BUILT_IN_ISNAND128:
*result = wi::shwi (real_isnan (arg) ? 1 : 0, precision);
return true;
2015-11-02 17:34:16 +01:00
default:
return false;
}
}
/* Try to evaluate:
*RESULT = FN (ARG)
where ARG_TYPE is the type of ARG and PRECISION is the number of bits
in the result. Return true on success. */
static bool
fold_const_call_ss (wide_int *result, combined_fn fn, const wide_int_ref &arg,
unsigned int precision, tree arg_type)
{
switch (fn)
{
CASE_CFN_FFS:
*result = wi::shwi (wi::ffs (arg), precision);
return true;
CASE_CFN_CLZ:
{
int tmp;
if (wi::ne_p (arg, 0))
tmp = wi::clz (arg);
else if (! CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (arg_type), tmp))
tmp = TYPE_PRECISION (arg_type);
*result = wi::shwi (tmp, precision);
return true;
}
CASE_CFN_CTZ:
{
int tmp;
if (wi::ne_p (arg, 0))
tmp = wi::ctz (arg);
else if (! CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (arg_type), tmp))
tmp = TYPE_PRECISION (arg_type);
*result = wi::shwi (tmp, precision);
return true;
}
CASE_CFN_CLRSB:
*result = wi::shwi (wi::clrsb (arg), precision);
return true;
CASE_CFN_POPCOUNT:
*result = wi::shwi (wi::popcount (arg), precision);
return true;
CASE_CFN_PARITY:
*result = wi::shwi (wi::parity (arg), precision);
return true;
case CFN_BUILT_IN_BSWAP16:
case CFN_BUILT_IN_BSWAP32:
case CFN_BUILT_IN_BSWAP64:
*result = wide_int::from (arg, precision, TYPE_SIGN (arg_type)).bswap ();
return true;
default:
return false;
}
}
2015-11-02 17:34:16 +01:00
/* Try to evaluate:
RESULT = FN (*ARG)
where FORMAT is the format of ARG and of the real and imaginary parts
of RESULT, passed as RESULT_REAL and RESULT_IMAG respectively. Return
true on success. */
static bool
fold_const_call_cs (real_value *result_real, real_value *result_imag,
combined_fn fn, const real_value *arg,
2015-11-02 17:34:16 +01:00
const real_format *format)
{
switch (fn)
{
CASE_CFN_CEXPI:
2015-11-02 17:34:16 +01:00
/* cexpi(x+yi) = cos(x)+sin(y)*i. */
return do_mpfr_sincos (result_imag, result_real, arg, format);
default:
return false;
}
}
/* Try to evaluate:
*RESULT = fn (ARG)
where FORMAT is the format of RESULT and of the real and imaginary parts
of ARG, passed as ARG_REAL and ARG_IMAG respectively. Return true on
success. */
static bool
fold_const_call_sc (real_value *result, combined_fn fn,
2015-11-02 17:34:16 +01:00
const real_value *arg_real, const real_value *arg_imag,
const real_format *format)
{
switch (fn)
{
CASE_CFN_CABS:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg2 (result, mpfr_hypot, arg_real, arg_imag, format);
default:
return false;
}
}
/* Try to evaluate:
RESULT = fn (ARG)
where FORMAT is the format of the real and imaginary parts of RESULT
(RESULT_REAL and RESULT_IMAG) and of ARG (ARG_REAL and ARG_IMAG).
Return true on success. */
static bool
fold_const_call_cc (real_value *result_real, real_value *result_imag,
combined_fn fn, const real_value *arg_real,
2015-11-02 17:34:16 +01:00
const real_value *arg_imag, const real_format *format)
{
switch (fn)
{
CASE_CFN_CCOS:
2015-11-02 17:34:16 +01:00
return do_mpc_arg1 (result_real, result_imag, mpc_cos,
arg_real, arg_imag, format);
CASE_CFN_CCOSH:
2015-11-02 17:34:16 +01:00
return do_mpc_arg1 (result_real, result_imag, mpc_cosh,
arg_real, arg_imag, format);
CASE_CFN_CPROJ:
2015-11-02 17:34:16 +01:00
if (real_isinf (arg_real) || real_isinf (arg_imag))
{
real_inf (result_real);
*result_imag = dconst0;
result_imag->sign = arg_imag->sign;
}
else
{
*result_real = *arg_real;
*result_imag = *arg_imag;
}
return true;
CASE_CFN_CSIN:
2015-11-02 17:34:16 +01:00
return do_mpc_arg1 (result_real, result_imag, mpc_sin,
arg_real, arg_imag, format);
CASE_CFN_CSINH:
2015-11-02 17:34:16 +01:00
return do_mpc_arg1 (result_real, result_imag, mpc_sinh,
arg_real, arg_imag, format);
CASE_CFN_CTAN:
2015-11-02 17:34:16 +01:00
return do_mpc_arg1 (result_real, result_imag, mpc_tan,
arg_real, arg_imag, format);
CASE_CFN_CTANH:
2015-11-02 17:34:16 +01:00
return do_mpc_arg1 (result_real, result_imag, mpc_tanh,
arg_real, arg_imag, format);
CASE_CFN_CLOG:
2015-11-02 17:34:16 +01:00
return do_mpc_arg1 (result_real, result_imag, mpc_log,
arg_real, arg_imag, format);
CASE_CFN_CSQRT:
2015-11-02 17:34:16 +01:00
return do_mpc_arg1 (result_real, result_imag, mpc_sqrt,
arg_real, arg_imag, format);
CASE_CFN_CASIN:
2015-11-02 17:34:16 +01:00
return do_mpc_arg1 (result_real, result_imag, mpc_asin,
arg_real, arg_imag, format);
CASE_CFN_CACOS:
2015-11-02 17:34:16 +01:00
return do_mpc_arg1 (result_real, result_imag, mpc_acos,
arg_real, arg_imag, format);
CASE_CFN_CATAN:
2015-11-02 17:34:16 +01:00
return do_mpc_arg1 (result_real, result_imag, mpc_atan,
arg_real, arg_imag, format);
CASE_CFN_CASINH:
2015-11-02 17:34:16 +01:00
return do_mpc_arg1 (result_real, result_imag, mpc_asinh,
arg_real, arg_imag, format);
CASE_CFN_CACOSH:
2015-11-02 17:34:16 +01:00
return do_mpc_arg1 (result_real, result_imag, mpc_acosh,
arg_real, arg_imag, format);
CASE_CFN_CATANH:
2015-11-02 17:34:16 +01:00
return do_mpc_arg1 (result_real, result_imag, mpc_atanh,
arg_real, arg_imag, format);
CASE_CFN_CEXP:
2015-11-02 17:34:16 +01:00
return do_mpc_arg1 (result_real, result_imag, mpc_exp,
arg_real, arg_imag, format);
default:
return false;
}
}
/* Subroutine of fold_const_call, with the same interface. Handle cases
where the arguments and result are numerical. */
2015-11-02 17:34:16 +01:00
static tree
fold_const_call_1 (combined_fn fn, tree type, tree arg)
2015-11-02 17:34:16 +01:00
{
machine_mode mode = TYPE_MODE (type);
machine_mode arg_mode = TYPE_MODE (TREE_TYPE (arg));
if (integer_cst_p (arg))
{
if (SCALAR_INT_MODE_P (mode))
{
wide_int result;
if (fold_const_call_ss (&result, fn, arg, TYPE_PRECISION (type),
TREE_TYPE (arg)))
return wide_int_to_tree (type, result);
}
return NULL_TREE;
}
2015-11-02 17:34:16 +01:00
if (real_cst_p (arg))
{
gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg_mode));
if (mode == arg_mode)
{
/* real -> real. */
REAL_VALUE_TYPE result;
if (fold_const_call_ss (&result, fn, TREE_REAL_CST_PTR (arg),
REAL_MODE_FORMAT (mode)))
return build_real (type, result);
}
else if (COMPLEX_MODE_P (mode)
&& GET_MODE_INNER (mode) == arg_mode)
{
/* real -> complex real. */
REAL_VALUE_TYPE result_real, result_imag;
if (fold_const_call_cs (&result_real, &result_imag, fn,
TREE_REAL_CST_PTR (arg),
REAL_MODE_FORMAT (arg_mode)))
return build_complex (type,
build_real (TREE_TYPE (type), result_real),
build_real (TREE_TYPE (type), result_imag));
}
else if (INTEGRAL_TYPE_P (type))
{
/* real -> int. */
wide_int result;
if (fold_const_call_ss (&result, fn,
TREE_REAL_CST_PTR (arg),
TYPE_PRECISION (type),
REAL_MODE_FORMAT (arg_mode)))
return wide_int_to_tree (type, result);
}
return NULL_TREE;
}
if (complex_cst_p (arg))
{
gcc_checking_assert (COMPLEX_MODE_P (arg_mode));
machine_mode inner_mode = GET_MODE_INNER (arg_mode);
tree argr = TREE_REALPART (arg);
tree argi = TREE_IMAGPART (arg);
if (mode == arg_mode
&& real_cst_p (argr)
&& real_cst_p (argi))
{
/* complex real -> complex real. */
REAL_VALUE_TYPE result_real, result_imag;
if (fold_const_call_cc (&result_real, &result_imag, fn,
TREE_REAL_CST_PTR (argr),
TREE_REAL_CST_PTR (argi),
REAL_MODE_FORMAT (inner_mode)))
return build_complex (type,
build_real (TREE_TYPE (type), result_real),
build_real (TREE_TYPE (type), result_imag));
}
if (mode == inner_mode
&& real_cst_p (argr)
&& real_cst_p (argi))
{
/* complex real -> real. */
REAL_VALUE_TYPE result;
if (fold_const_call_sc (&result, fn,
TREE_REAL_CST_PTR (argr),
TREE_REAL_CST_PTR (argi),
REAL_MODE_FORMAT (inner_mode)))
return build_real (type, result);
}
return NULL_TREE;
}
return NULL_TREE;
}
/* Try to fold FN (ARG) to a constant. Return the constant on success,
otherwise return null. TYPE is the type of the return value. */
tree
fold_const_call (combined_fn fn, tree type, tree arg)
{
switch (fn)
{
case CFN_BUILT_IN_STRLEN:
if (const char *str = c_getstr (arg))
return build_int_cst (type, strlen (str));
return NULL_TREE;
CASE_CFN_NAN:
Add minimal _FloatN, _FloatNx built-in functions. This patch adds a minimal set of built-in functions for the new _FloatN and _FloatNx types. The functions added are __builtin_fabs*, __builtin_copysign*, __builtin_huge_val*, __builtin_inf*, __builtin_nan* and __builtin_nans* (where * = fN or fNx). That is, 42 new entries are added to the enum of built-in functions and the associated array of decls, where not all of them are actually supported on any one target. These functions are believed to be sufficient for libgcc (complex multiplication and division use __builtin_huge_val*, __builtin_copysign* and __builtin_fabs*) and for glibc (which also depends on complex multiplication from libgcc, as well as using such functions itself). The basic target-independent support for folding / expanding calls to these built-in functions is wired up, so those for constants can be used in static initializers, and the fabs and copysign built-ins can always be expanded to bit-manipulation inline (for any format setting signbit_ro and signbit_rw, which covers all formats supported for _FloatN and _FloatNx), although insn patterns for fabs (abs<mode>2) and copysign (copysign<mode>3) will be used when available and may result in more optimal code. The complex multiplication and division functions in libgcc rely on predefined macros (defined with -fbuilding-libgcc) to say what the built-in function suffixes to use with a particular mode are. This patch updates that code accordingly, where previously it involved a hack supposing that machine-specific suffixes for constants were also suffixes for built-in functions. As with the main _FloatN / _FloatNx patch, this patch does not update code dealing only with optimizations that currently has cases only covering float, double and long double, though some such cases are straightforward and may be covered in a followup patch. The functions are defined with DEF_GCC_BUILTIN, so calls to the TS 18661-3 functions such as fabsf128 and copysignf128, without the __builtin_, will not be optimized. As noted in the original _FloatN / _FloatNx patch submission, in principle the bulk of the libm functions that have built-in versions should have those versions extended to cover the new types, but that would require more consideration of the effects of increasing the size of the enum and initializing many more functions at startup. I don't know whether target-specific built-in functions can readily be made into aliases for target-independent functions, but if they can, it would make sense to do so for the x86, ia64 and rs6000 *q functions corresponding to these, so that they can benefit from the architecture-independent folding logic and from any optimizations enabled for these functions in future, and so that less target-specific code is needed to support them. Bootstrapped with no regressions on x86_64-pc-linux-gnu. gcc: * tree.h (CASE_FLT_FN_FLOATN_NX, float16_type_node) (float32_type_node, float64_type_node, float32x_type_node) (float128x_type_node): New macros. * builtin-types.def (BT_FLOAT16, BT_FLOAT32, BT_FLOAT64) (BT_FLOAT128, BT_FLOAT32X, BT_FLOAT64X, BT_FLOAT128X) (BT_FN_FLOAT16, BT_FN_FLOAT32, BT_FN_FLOAT64, BT_FN_FLOAT128) (BT_FN_FLOAT32X, BT_FN_FLOAT64X, BT_FN_FLOAT128X) (BT_FN_FLOAT16_FLOAT16, BT_FN_FLOAT32_FLOAT32) (BT_FN_FLOAT64_FLOAT64, BT_FN_FLOAT128_FLOAT128) (BT_FN_FLOAT32X_FLOAT32X, BT_FN_FLOAT64X_FLOAT64X) (BT_FN_FLOAT128X_FLOAT128X, BT_FN_FLOAT16_CONST_STRING) (BT_FN_FLOAT32_CONST_STRING, BT_FN_FLOAT64_CONST_STRING) (BT_FN_FLOAT128_CONST_STRING, BT_FN_FLOAT32X_CONST_STRING) (BT_FN_FLOAT64X_CONST_STRING, BT_FN_FLOAT128X_CONST_STRING) (BT_FN_FLOAT16_FLOAT16_FLOAT16, BT_FN_FLOAT32_FLOAT32_FLOAT32) (BT_FN_FLOAT64_FLOAT64_FLOAT64, BT_FN_FLOAT128_FLOAT128_FLOAT128) (BT_FN_FLOAT32X_FLOAT32X_FLOAT32X) (BT_FN_FLOAT64X_FLOAT64X_FLOAT64X) (BT_FN_FLOAT128X_FLOAT128X_FLOAT128X): New type definitions. * builtins.def (DEF_GCC_FLOATN_NX_BUILTINS): New macro. (copysign, fabs, huge_val, inf, nan, nans): Use it. * builtins.c (expand_builtin): Use CASE_FLT_FN_FLOATN_NX for fabs and copysign. (fold_builtin_0): Use CASE_FLT_FN_FLOATN_NX for inf and huge_val. (fold_builtin_1): Use CASE_FLT_FN_FLOATN_NX for fabs. * doc/extend.texi (Other Builtins): Document these built-in functions. * fold-const-call.c (fold_const_call): Use CASE_FLT_FN_FLOATN_NX for nan and nans. gcc/c-family: * c-family/c-cppbuiltin.c (c_cpp_builtins): Check _FloatN and _FloatNx types for suffixes for built-in functions. gcc/testsuite: * gcc.dg/torture/float128-builtin.c, gcc.dg/torture/float128-ieee-nan.c, gcc.dg/torture/float128x-builtin.c, gcc.dg/torture/float128x-nan.c, gcc.dg/torture/float16-builtin.c, gcc.dg/torture/float16-nan.c, gcc.dg/torture/float32-builtin.c, gcc.dg/torture/float32-nan.c, gcc.dg/torture/float32x-builtin.c, gcc.dg/torture/float32x-nan.c, gcc.dg/torture/float64-builtin.c, gcc.dg/torture/float64-nan.c, gcc.dg/torture/float64x-builtin.c, gcc.dg/torture/float64x-nan.c, gcc.dg/torture/floatn-builtin.h, gcc.dg/torture/floatn-nan.h: New tests. From-SVN: r239658
2016-08-22 13:57:39 +02:00
CASE_FLT_FN_FLOATN_NX (CFN_BUILT_IN_NAN):
case CFN_BUILT_IN_NAND32:
case CFN_BUILT_IN_NAND64:
case CFN_BUILT_IN_NAND128:
return fold_const_builtin_nan (type, arg, true);
CASE_CFN_NANS:
Add minimal _FloatN, _FloatNx built-in functions. This patch adds a minimal set of built-in functions for the new _FloatN and _FloatNx types. The functions added are __builtin_fabs*, __builtin_copysign*, __builtin_huge_val*, __builtin_inf*, __builtin_nan* and __builtin_nans* (where * = fN or fNx). That is, 42 new entries are added to the enum of built-in functions and the associated array of decls, where not all of them are actually supported on any one target. These functions are believed to be sufficient for libgcc (complex multiplication and division use __builtin_huge_val*, __builtin_copysign* and __builtin_fabs*) and for glibc (which also depends on complex multiplication from libgcc, as well as using such functions itself). The basic target-independent support for folding / expanding calls to these built-in functions is wired up, so those for constants can be used in static initializers, and the fabs and copysign built-ins can always be expanded to bit-manipulation inline (for any format setting signbit_ro and signbit_rw, which covers all formats supported for _FloatN and _FloatNx), although insn patterns for fabs (abs<mode>2) and copysign (copysign<mode>3) will be used when available and may result in more optimal code. The complex multiplication and division functions in libgcc rely on predefined macros (defined with -fbuilding-libgcc) to say what the built-in function suffixes to use with a particular mode are. This patch updates that code accordingly, where previously it involved a hack supposing that machine-specific suffixes for constants were also suffixes for built-in functions. As with the main _FloatN / _FloatNx patch, this patch does not update code dealing only with optimizations that currently has cases only covering float, double and long double, though some such cases are straightforward and may be covered in a followup patch. The functions are defined with DEF_GCC_BUILTIN, so calls to the TS 18661-3 functions such as fabsf128 and copysignf128, without the __builtin_, will not be optimized. As noted in the original _FloatN / _FloatNx patch submission, in principle the bulk of the libm functions that have built-in versions should have those versions extended to cover the new types, but that would require more consideration of the effects of increasing the size of the enum and initializing many more functions at startup. I don't know whether target-specific built-in functions can readily be made into aliases for target-independent functions, but if they can, it would make sense to do so for the x86, ia64 and rs6000 *q functions corresponding to these, so that they can benefit from the architecture-independent folding logic and from any optimizations enabled for these functions in future, and so that less target-specific code is needed to support them. Bootstrapped with no regressions on x86_64-pc-linux-gnu. gcc: * tree.h (CASE_FLT_FN_FLOATN_NX, float16_type_node) (float32_type_node, float64_type_node, float32x_type_node) (float128x_type_node): New macros. * builtin-types.def (BT_FLOAT16, BT_FLOAT32, BT_FLOAT64) (BT_FLOAT128, BT_FLOAT32X, BT_FLOAT64X, BT_FLOAT128X) (BT_FN_FLOAT16, BT_FN_FLOAT32, BT_FN_FLOAT64, BT_FN_FLOAT128) (BT_FN_FLOAT32X, BT_FN_FLOAT64X, BT_FN_FLOAT128X) (BT_FN_FLOAT16_FLOAT16, BT_FN_FLOAT32_FLOAT32) (BT_FN_FLOAT64_FLOAT64, BT_FN_FLOAT128_FLOAT128) (BT_FN_FLOAT32X_FLOAT32X, BT_FN_FLOAT64X_FLOAT64X) (BT_FN_FLOAT128X_FLOAT128X, BT_FN_FLOAT16_CONST_STRING) (BT_FN_FLOAT32_CONST_STRING, BT_FN_FLOAT64_CONST_STRING) (BT_FN_FLOAT128_CONST_STRING, BT_FN_FLOAT32X_CONST_STRING) (BT_FN_FLOAT64X_CONST_STRING, BT_FN_FLOAT128X_CONST_STRING) (BT_FN_FLOAT16_FLOAT16_FLOAT16, BT_FN_FLOAT32_FLOAT32_FLOAT32) (BT_FN_FLOAT64_FLOAT64_FLOAT64, BT_FN_FLOAT128_FLOAT128_FLOAT128) (BT_FN_FLOAT32X_FLOAT32X_FLOAT32X) (BT_FN_FLOAT64X_FLOAT64X_FLOAT64X) (BT_FN_FLOAT128X_FLOAT128X_FLOAT128X): New type definitions. * builtins.def (DEF_GCC_FLOATN_NX_BUILTINS): New macro. (copysign, fabs, huge_val, inf, nan, nans): Use it. * builtins.c (expand_builtin): Use CASE_FLT_FN_FLOATN_NX for fabs and copysign. (fold_builtin_0): Use CASE_FLT_FN_FLOATN_NX for inf and huge_val. (fold_builtin_1): Use CASE_FLT_FN_FLOATN_NX for fabs. * doc/extend.texi (Other Builtins): Document these built-in functions. * fold-const-call.c (fold_const_call): Use CASE_FLT_FN_FLOATN_NX for nan and nans. gcc/c-family: * c-family/c-cppbuiltin.c (c_cpp_builtins): Check _FloatN and _FloatNx types for suffixes for built-in functions. gcc/testsuite: * gcc.dg/torture/float128-builtin.c, gcc.dg/torture/float128-ieee-nan.c, gcc.dg/torture/float128x-builtin.c, gcc.dg/torture/float128x-nan.c, gcc.dg/torture/float16-builtin.c, gcc.dg/torture/float16-nan.c, gcc.dg/torture/float32-builtin.c, gcc.dg/torture/float32-nan.c, gcc.dg/torture/float32x-builtin.c, gcc.dg/torture/float32x-nan.c, gcc.dg/torture/float64-builtin.c, gcc.dg/torture/float64-nan.c, gcc.dg/torture/float64x-builtin.c, gcc.dg/torture/float64x-nan.c, gcc.dg/torture/floatn-builtin.h, gcc.dg/torture/floatn-nan.h: New tests. From-SVN: r239658
2016-08-22 13:57:39 +02:00
CASE_FLT_FN_FLOATN_NX (CFN_BUILT_IN_NANS):
return fold_const_builtin_nan (type, arg, false);
default:
return fold_const_call_1 (fn, type, arg);
}
}
2015-11-02 17:34:16 +01:00
/* Try to evaluate:
*RESULT = FN (*ARG0, *ARG1)
in format FORMAT. Return true on success. */
static bool
fold_const_call_sss (real_value *result, combined_fn fn,
2015-11-02 17:34:16 +01:00
const real_value *arg0, const real_value *arg1,
const real_format *format)
{
switch (fn)
{
CASE_CFN_DREM:
CASE_CFN_REMAINDER:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg2 (result, mpfr_remainder, arg0, arg1, format);
CASE_CFN_ATAN2:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg2 (result, mpfr_atan2, arg0, arg1, format);
CASE_CFN_FDIM:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg2 (result, mpfr_dim, arg0, arg1, format);
CASE_CFN_HYPOT:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg2 (result, mpfr_hypot, arg0, arg1, format);
CASE_CFN_COPYSIGN:
2015-11-02 17:34:16 +01:00
*result = *arg0;
real_copysign (result, arg1);
return true;
CASE_CFN_FMIN:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg2 (result, mpfr_min, arg0, arg1, format);
CASE_CFN_FMAX:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg2 (result, mpfr_max, arg0, arg1, format);
CASE_CFN_POW:
2015-11-02 17:34:16 +01:00
return fold_const_pow (result, arg0, arg1, format);
default:
return false;
}
}
/* Try to evaluate:
*RESULT = FN (*ARG0, ARG1)
where FORMAT is the format of *RESULT and *ARG0. Return true on
success. */
static bool
fold_const_call_sss (real_value *result, combined_fn fn,
2015-11-02 17:34:16 +01:00
const real_value *arg0, const wide_int_ref &arg1,
const real_format *format)
{
switch (fn)
{
CASE_CFN_LDEXP:
2015-11-02 17:34:16 +01:00
return fold_const_builtin_load_exponent (result, arg0, arg1, format);
CASE_CFN_SCALBN:
CASE_CFN_SCALBLN:
2015-11-02 17:34:16 +01:00
return (format->b == 2
&& fold_const_builtin_load_exponent (result, arg0, arg1,
format));
CASE_CFN_POWI:
This series of patches fix PR61441. This series of patches fix PR61441. This patch avoids various transformations with signaling NaN operands when flag_signaling_nans is on, to avoid folding which would lose exceptions. Bootstrapped & regression-tested on x86_64-linux-gnu. gcc/ * fold-const.c (const_binop): Convert sNaN to qNaN when flag_signaling_nans is off. (const_unop): Avoid the operation, other than NEGATE and ABS, if flag_signaling_nans is on and the operand is an sNaN. (fold_convert_const_real_from_real): Avoid the operation if flag_signaling_nans is on and the operand is an sNaN. (integer_valued_real_unary_p): Update comment stating it returns false for sNaN values. (integer_valued_real_binary_p, integer_valued_real_call_p): Same. (integer_valued_real_single_p): Same. (integer_valued_real_invalid_p, integer_valued_real_p): Same. * fold-const-call.c (fold_const_pow): Avoid the operation if flag_signaling_nans is on and the operand is an sNaN. (fold_const_builtin_load_exponent) Same. (fold_const_call_sss): Same for CASE_CFN_POWI. * gimple-fold.c (gimple_assign_integer_valued_real_p): Same. (gimple_call_integer_valued_real_p): Same. (gimple_phi_integer_valued_real_p): Same. (gimple_stmt_integer_valued_real_p): Same. * simplify-rtx.c (simplify_const_unary_operation): Avoid the operation if flag_signaling_nans is on and the operand is an sNaN. (simplify_const_binary_operation): Same. * tree-ssa-math-opts.c (gimple_expand_builtin_pow): Avoid the operation if flag_signaling_nans is on and the operand is an sNaN. * gcc.dg/pr61441.c: New testcase. From-SVN: r231901
2015-12-22 15:04:30 +01:00
/* Avoid the folding if flag_signaling_nans is on and
operand is a signaling NaN. */
if (!flag_unsafe_math_optimizations
&& flag_signaling_nans
&& REAL_VALUE_ISSIGNALING_NAN (*arg0))
return false;
2015-11-02 17:34:16 +01:00
real_powi (result, format, arg0, arg1.to_shwi ());
return true;
default:
return false;
}
}
/* Try to evaluate:
*RESULT = FN (ARG0, *ARG1)
where FORMAT is the format of *RESULT and *ARG1. Return true on
success. */
static bool
fold_const_call_sss (real_value *result, combined_fn fn,
2015-11-02 17:34:16 +01:00
const wide_int_ref &arg0, const real_value *arg1,
const real_format *format)
{
switch (fn)
{
CASE_CFN_JN:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg2 (result, mpfr_jn, arg0, arg1, format);
CASE_CFN_YN:
2015-11-02 17:34:16 +01:00
return (real_compare (GT_EXPR, arg1, &dconst0)
&& do_mpfr_arg2 (result, mpfr_yn, arg0, arg1, format));
default:
return false;
}
}
/* Try to evaluate:
RESULT = fn (ARG0, ARG1)
where FORMAT is the format of the real and imaginary parts of RESULT
(RESULT_REAL and RESULT_IMAG), of ARG0 (ARG0_REAL and ARG0_IMAG)
and of ARG1 (ARG1_REAL and ARG1_IMAG). Return true on success. */
static bool
fold_const_call_ccc (real_value *result_real, real_value *result_imag,
combined_fn fn, const real_value *arg0_real,
2015-11-02 17:34:16 +01:00
const real_value *arg0_imag, const real_value *arg1_real,
const real_value *arg1_imag, const real_format *format)
{
switch (fn)
{
CASE_CFN_CPOW:
2015-11-02 17:34:16 +01:00
return do_mpc_arg2 (result_real, result_imag, mpc_pow,
arg0_real, arg0_imag, arg1_real, arg1_imag, format);
default:
return false;
}
}
/* Subroutine of fold_const_call, with the same interface. Handle cases
where the arguments and result are numerical. */
2015-11-02 17:34:16 +01:00
static tree
fold_const_call_1 (combined_fn fn, tree type, tree arg0, tree arg1)
2015-11-02 17:34:16 +01:00
{
machine_mode mode = TYPE_MODE (type);
machine_mode arg0_mode = TYPE_MODE (TREE_TYPE (arg0));
machine_mode arg1_mode = TYPE_MODE (TREE_TYPE (arg1));
if (arg0_mode == arg1_mode
&& real_cst_p (arg0)
&& real_cst_p (arg1))
{
gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg0_mode));
if (mode == arg0_mode)
{
/* real, real -> real. */
REAL_VALUE_TYPE result;
if (fold_const_call_sss (&result, fn, TREE_REAL_CST_PTR (arg0),
TREE_REAL_CST_PTR (arg1),
REAL_MODE_FORMAT (mode)))
return build_real (type, result);
}
return NULL_TREE;
}
if (real_cst_p (arg0)
&& integer_cst_p (arg1))
{
gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg0_mode));
if (mode == arg0_mode)
{
/* real, int -> real. */
REAL_VALUE_TYPE result;
if (fold_const_call_sss (&result, fn, TREE_REAL_CST_PTR (arg0),
arg1, REAL_MODE_FORMAT (mode)))
return build_real (type, result);
}
return NULL_TREE;
}
if (integer_cst_p (arg0)
&& real_cst_p (arg1))
{
gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg1_mode));
if (mode == arg1_mode)
{
/* int, real -> real. */
REAL_VALUE_TYPE result;
if (fold_const_call_sss (&result, fn, arg0,
TREE_REAL_CST_PTR (arg1),
REAL_MODE_FORMAT (mode)))
return build_real (type, result);
}
return NULL_TREE;
}
if (arg0_mode == arg1_mode
&& complex_cst_p (arg0)
&& complex_cst_p (arg1))
{
gcc_checking_assert (COMPLEX_MODE_P (arg0_mode));
machine_mode inner_mode = GET_MODE_INNER (arg0_mode);
tree arg0r = TREE_REALPART (arg0);
tree arg0i = TREE_IMAGPART (arg0);
tree arg1r = TREE_REALPART (arg1);
tree arg1i = TREE_IMAGPART (arg1);
if (mode == arg0_mode
&& real_cst_p (arg0r)
&& real_cst_p (arg0i)
&& real_cst_p (arg1r)
&& real_cst_p (arg1i))
{
/* complex real, complex real -> complex real. */
REAL_VALUE_TYPE result_real, result_imag;
if (fold_const_call_ccc (&result_real, &result_imag, fn,
TREE_REAL_CST_PTR (arg0r),
TREE_REAL_CST_PTR (arg0i),
TREE_REAL_CST_PTR (arg1r),
TREE_REAL_CST_PTR (arg1i),
REAL_MODE_FORMAT (inner_mode)))
return build_complex (type,
build_real (TREE_TYPE (type), result_real),
build_real (TREE_TYPE (type), result_imag));
}
return NULL_TREE;
}
return NULL_TREE;
}
/* Try to fold FN (ARG0, ARG1) to a constant. Return the constant on success,
otherwise return null. TYPE is the type of the return value. */
tree
fold_const_call (combined_fn fn, tree type, tree arg0, tree arg1)
{
const char *p0, *p1;
char c;
switch (fn)
{
case CFN_BUILT_IN_STRSPN:
if ((p0 = c_getstr (arg0)) && (p1 = c_getstr (arg1)))
return build_int_cst (type, strspn (p0, p1));
return NULL_TREE;
case CFN_BUILT_IN_STRCSPN:
if ((p0 = c_getstr (arg0)) && (p1 = c_getstr (arg1)))
return build_int_cst (type, strcspn (p0, p1));
return NULL_TREE;
case CFN_BUILT_IN_STRCMP:
if ((p0 = c_getstr (arg0)) && (p1 = c_getstr (arg1)))
return build_cmp_result (type, strcmp (p0, p1));
return NULL_TREE;
case CFN_BUILT_IN_STRCASECMP:
if ((p0 = c_getstr (arg0)) && (p1 = c_getstr (arg1)))
{
int r = strcmp (p0, p1);
if (r == 0)
return build_cmp_result (type, r);
}
return NULL_TREE;
case CFN_BUILT_IN_INDEX:
case CFN_BUILT_IN_STRCHR:
if ((p0 = c_getstr (arg0)) && target_char_cst_p (arg1, &c))
{
const char *r = strchr (p0, c);
if (r == NULL)
return build_int_cst (type, 0);
return fold_convert (type,
fold_build_pointer_plus_hwi (arg0, r - p0));
}
return NULL_TREE;
case CFN_BUILT_IN_RINDEX:
case CFN_BUILT_IN_STRRCHR:
if ((p0 = c_getstr (arg0)) && target_char_cst_p (arg1, &c))
{
const char *r = strrchr (p0, c);
if (r == NULL)
return build_int_cst (type, 0);
return fold_convert (type,
fold_build_pointer_plus_hwi (arg0, r - p0));
}
return NULL_TREE;
case CFN_BUILT_IN_STRSTR:
if ((p1 = c_getstr (arg1)))
{
if ((p0 = c_getstr (arg0)))
{
const char *r = strstr (p0, p1);
if (r == NULL)
return build_int_cst (type, 0);
return fold_convert (type,
fold_build_pointer_plus_hwi (arg0, r - p0));
}
if (*p1 == '\0')
return fold_convert (type, arg0);
}
return NULL_TREE;
default:
return fold_const_call_1 (fn, type, arg0, arg1);
}
}
2015-11-02 17:34:16 +01:00
/* Try to evaluate:
*RESULT = FN (*ARG0, *ARG1, *ARG2)
in format FORMAT. Return true on success. */
static bool
fold_const_call_ssss (real_value *result, combined_fn fn,
2015-11-02 17:34:16 +01:00
const real_value *arg0, const real_value *arg1,
const real_value *arg2, const real_format *format)
{
switch (fn)
{
CASE_CFN_FMA:
2015-11-02 17:34:16 +01:00
return do_mpfr_arg3 (result, mpfr_fma, arg0, arg1, arg2, format);
default:
return false;
}
}
/* Subroutine of fold_const_call, with the same interface. Handle cases
where the arguments and result are numerical. */
2015-11-02 17:34:16 +01:00
static tree
fold_const_call_1 (combined_fn fn, tree type, tree arg0, tree arg1, tree arg2)
2015-11-02 17:34:16 +01:00
{
machine_mode mode = TYPE_MODE (type);
machine_mode arg0_mode = TYPE_MODE (TREE_TYPE (arg0));
machine_mode arg1_mode = TYPE_MODE (TREE_TYPE (arg1));
machine_mode arg2_mode = TYPE_MODE (TREE_TYPE (arg2));
if (arg0_mode == arg1_mode
&& arg0_mode == arg2_mode
&& real_cst_p (arg0)
&& real_cst_p (arg1)
&& real_cst_p (arg2))
{
gcc_checking_assert (SCALAR_FLOAT_MODE_P (arg0_mode));
if (mode == arg0_mode)
{
/* real, real, real -> real. */
REAL_VALUE_TYPE result;
if (fold_const_call_ssss (&result, fn, TREE_REAL_CST_PTR (arg0),
TREE_REAL_CST_PTR (arg1),
TREE_REAL_CST_PTR (arg2),
REAL_MODE_FORMAT (mode)))
return build_real (type, result);
}
return NULL_TREE;
}
return NULL_TREE;
}
/* Try to fold FN (ARG0, ARG1, ARG2) to a constant. Return the constant on
success, otherwise return null. TYPE is the type of the return value. */
tree
fold_const_call (combined_fn fn, tree type, tree arg0, tree arg1, tree arg2)
{
const char *p0, *p1;
char c;
unsigned HOST_WIDE_INT s0, s1;
size_t s2 = 0;
switch (fn)
{
case CFN_BUILT_IN_STRNCMP:
if (!host_size_t_cst_p (arg2, &s2))
return NULL_TREE;
if (s2 == 0
&& !TREE_SIDE_EFFECTS (arg0)
&& !TREE_SIDE_EFFECTS (arg1))
return build_int_cst (type, 0);
else if ((p0 = c_getstr (arg0)) && (p1 = c_getstr (arg1)))
return build_int_cst (type, strncmp (p0, p1, s2));
return NULL_TREE;
case CFN_BUILT_IN_STRNCASECMP:
if (!host_size_t_cst_p (arg2, &s2))
return NULL_TREE;
if (s2 == 0
&& !TREE_SIDE_EFFECTS (arg0)
&& !TREE_SIDE_EFFECTS (arg1))
return build_int_cst (type, 0);
else if ((p0 = c_getstr (arg0))
&& (p1 = c_getstr (arg1))
&& strncmp (p0, p1, s2) == 0)
return build_int_cst (type, 0);
return NULL_TREE;
case CFN_BUILT_IN_BCMP:
case CFN_BUILT_IN_MEMCMP:
if (!host_size_t_cst_p (arg2, &s2))
return NULL_TREE;
if (s2 == 0
&& !TREE_SIDE_EFFECTS (arg0)
&& !TREE_SIDE_EFFECTS (arg1))
return build_int_cst (type, 0);
if ((p0 = c_getstr (arg0, &s0))
&& (p1 = c_getstr (arg1, &s1))
&& s2 <= s0
&& s2 <= s1)
return build_cmp_result (type, memcmp (p0, p1, s2));
return NULL_TREE;
case CFN_BUILT_IN_MEMCHR:
if (!host_size_t_cst_p (arg2, &s2))
return NULL_TREE;
if (s2 == 0
&& !TREE_SIDE_EFFECTS (arg0)
&& !TREE_SIDE_EFFECTS (arg1))
return build_int_cst (type, 0);
if ((p0 = c_getstr (arg0, &s0))
&& s2 <= s0
&& target_char_cst_p (arg1, &c))
{
const char *r = (const char *) memchr (p0, c, s2);
if (r == NULL)
return build_int_cst (type, 0);
return fold_convert (type,
fold_build_pointer_plus_hwi (arg0, r - p0));
}
return NULL_TREE;
default:
return fold_const_call_1 (fn, type, arg0, arg1, arg2);
}
}
2015-11-02 17:34:16 +01:00
/* Fold a fma operation with arguments ARG[012]. */
tree
fold_fma (location_t, tree type, tree arg0, tree arg1, tree arg2)
{
REAL_VALUE_TYPE result;
if (real_cst_p (arg0)
&& real_cst_p (arg1)
&& real_cst_p (arg2)
&& do_mpfr_arg3 (&result, mpfr_fma, TREE_REAL_CST_PTR (arg0),
TREE_REAL_CST_PTR (arg1), TREE_REAL_CST_PTR (arg2),
REAL_MODE_FORMAT (TYPE_MODE (type))))
return build_real (type, result);
return NULL_TREE;
}