re PR rtl-optimization/2391 (Exponential compilation time explosion in combine)
PR opt/2391 * combine.c: Fix spelling in comment. (cached_nonzero_bits): New function. (cached_num_sign_bit_copies): New function. (nonzero_bits_with_known): New macro. (num_sign_bit_copies_with_known): New macro. (nonzero_bits1): Rename from nonzero_bits. Add three new arguments. Change calls from nonzero_bits to nonzero_bits_with_known. (num_sign_bit_copies1): Rename from num_sign_bit_copies. Add three new arguments. Change calls from num_sign_bit_copies to num_sign_bit_copies_with_known. (nonzero_bits): New macro. (num_sign_bit_copies): New macro. (update_table_tick): Don't traverse identical subexpression more than once. (get_last_value_validate): Likewise. From-SVN: r62893
This commit is contained in:
parent
fd295cb213
commit
8fd737543f
@ -1,3 +1,22 @@
|
||||
2003-02-13 Adam Nemet <anemet@lnxw.com>
|
||||
PR opt/2391
|
||||
* combine.c: Fix spelling in comment.
|
||||
(cached_nonzero_bits): New function.
|
||||
(cached_num_sign_bit_copies): New function.
|
||||
(nonzero_bits_with_known): New macro.
|
||||
(num_sign_bit_copies_with_known): New macro.
|
||||
(nonzero_bits1): Rename from nonzero_bits. Add three new
|
||||
arguments. Change calls from nonzero_bits to
|
||||
nonzero_bits_with_known.
|
||||
(num_sign_bit_copies1): Rename from num_sign_bit_copies. Add
|
||||
three new arguments. Change calls from num_sign_bit_copies to
|
||||
num_sign_bit_copies_with_known.
|
||||
(nonzero_bits): New macro.
|
||||
(num_sign_bit_copies): New macro.
|
||||
(update_table_tick): Don't traverse identical subexpression more
|
||||
than once.
|
||||
(get_last_value_validate): Likewise.
|
||||
|
||||
2003-02-13 Zack Weinberg <zack@codesourcery.com>
|
||||
|
||||
* emit-rtl.c (init_emit): Use ggc_alloc for regno_reg_rtx.
|
||||
|
319
gcc/combine.c
319
gcc/combine.c
@ -139,6 +139,12 @@ static int max_uid_cuid;
|
||||
#define UWIDE_SHIFT_LEFT_BY_BITS_PER_WORD(val) \
|
||||
(((unsigned HOST_WIDE_INT) (val) << (BITS_PER_WORD - 1)) << 1)
|
||||
|
||||
#define nonzero_bits(X, M) \
|
||||
cached_nonzero_bits (X, M, NULL_RTX, VOIDmode, 0)
|
||||
|
||||
#define num_sign_bit_copies(X, M) \
|
||||
cached_num_sign_bit_copies (X, M, NULL_RTX, VOIDmode, 0)
|
||||
|
||||
/* Maximum register number, which is the size of the tables below. */
|
||||
|
||||
static unsigned int combine_max_regno;
|
||||
@ -198,7 +204,7 @@ static basic_block this_basic_block;
|
||||
static sbitmap refresh_blocks;
|
||||
|
||||
/* The next group of arrays allows the recording of the last value assigned
|
||||
to (hard or pseudo) register n. We use this information to see if a
|
||||
to (hard or pseudo) register n. We use this information to see if an
|
||||
operation being processed is redundant given a prior operation performed
|
||||
on the register. For example, an `and' with a constant is redundant if
|
||||
all the zero bits are already known to be turned off.
|
||||
@ -371,8 +377,20 @@ static rtx make_field_assignment PARAMS ((rtx));
|
||||
static rtx apply_distributive_law PARAMS ((rtx));
|
||||
static rtx simplify_and_const_int PARAMS ((rtx, enum machine_mode, rtx,
|
||||
unsigned HOST_WIDE_INT));
|
||||
static unsigned HOST_WIDE_INT nonzero_bits PARAMS ((rtx, enum machine_mode));
|
||||
static unsigned int num_sign_bit_copies PARAMS ((rtx, enum machine_mode));
|
||||
static unsigned HOST_WIDE_INT cached_nonzero_bits
|
||||
PARAMS ((rtx, enum machine_mode, rtx,
|
||||
enum machine_mode,
|
||||
unsigned HOST_WIDE_INT));
|
||||
static unsigned HOST_WIDE_INT nonzero_bits1
|
||||
PARAMS ((rtx, enum machine_mode, rtx,
|
||||
enum machine_mode,
|
||||
unsigned HOST_WIDE_INT));
|
||||
static unsigned int cached_num_sign_bit_copies
|
||||
PARAMS ((rtx, enum machine_mode, rtx,
|
||||
enum machine_mode, unsigned int));
|
||||
static unsigned int num_sign_bit_copies1
|
||||
PARAMS ((rtx, enum machine_mode, rtx,
|
||||
enum machine_mode, unsigned int));
|
||||
static int merge_outer_ops PARAMS ((enum rtx_code *, HOST_WIDE_INT *,
|
||||
enum rtx_code, HOST_WIDE_INT,
|
||||
enum machine_mode, int *));
|
||||
@ -8161,12 +8179,62 @@ simplify_and_const_int (x, mode, varop, constop)
|
||||
return x;
|
||||
}
|
||||
|
||||
#define nonzero_bits_with_known(X, MODE) \
|
||||
cached_nonzero_bits (X, MODE, known_x, known_mode, known_ret)
|
||||
|
||||
/* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
|
||||
It avoids exponential behavior in nonzero_bits1 when X has
|
||||
identical subexpressions on the first or the second level. */
|
||||
|
||||
static unsigned HOST_WIDE_INT
|
||||
cached_nonzero_bits (x, mode, known_x, known_mode, known_ret)
|
||||
rtx x;
|
||||
enum machine_mode mode;
|
||||
rtx known_x;
|
||||
enum machine_mode known_mode;
|
||||
unsigned HOST_WIDE_INT known_ret;
|
||||
{
|
||||
if (x == known_x && mode == known_mode)
|
||||
return known_ret;
|
||||
|
||||
/* Try to find identical subexpressions. If found call
|
||||
nonzero_bits1 on X with the subexpressions as KNOWN_X and the
|
||||
precomputed value for the subexpression as KNOWN_RET. */
|
||||
|
||||
if (GET_RTX_CLASS (GET_CODE (x)) == '2'
|
||||
|| GET_RTX_CLASS (GET_CODE (x)) == 'c')
|
||||
{
|
||||
rtx x0 = XEXP (x, 0);
|
||||
rtx x1 = XEXP (x, 1);
|
||||
|
||||
/* Check the first level. */
|
||||
if (x0 == x1)
|
||||
return nonzero_bits1 (x, mode, x0, mode,
|
||||
nonzero_bits_with_known (x0, mode));
|
||||
|
||||
/* Check the second level. */
|
||||
if ((GET_RTX_CLASS (GET_CODE (x0)) == '2'
|
||||
|| GET_RTX_CLASS (GET_CODE (x0)) == 'c')
|
||||
&& (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
|
||||
return nonzero_bits1 (x, mode, x1, mode,
|
||||
nonzero_bits_with_known (x1, mode));
|
||||
|
||||
if ((GET_RTX_CLASS (GET_CODE (x1)) == '2'
|
||||
|| GET_RTX_CLASS (GET_CODE (x1)) == 'c')
|
||||
&& (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
|
||||
return nonzero_bits1 (x, mode, x0, mode,
|
||||
nonzero_bits_with_known (x0, mode));
|
||||
}
|
||||
|
||||
return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
|
||||
}
|
||||
|
||||
/* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
|
||||
We don't let nonzero_bits recur into num_sign_bit_copies, because that
|
||||
is less useful. We can't allow both, because that results in exponential
|
||||
run time recursion. There is a nullstone testcase that triggered
|
||||
this. This macro avoids accidental uses of num_sign_bit_copies. */
|
||||
#define num_sign_bit_copies()
|
||||
#define cached_num_sign_bit_copies()
|
||||
|
||||
/* Given an expression, X, compute which bits in X can be nonzero.
|
||||
We don't care about bits outside of those defined in MODE.
|
||||
@ -8175,9 +8243,12 @@ simplify_and_const_int (x, mode, varop, constop)
|
||||
a shift, AND, or zero_extract, we can do better. */
|
||||
|
||||
static unsigned HOST_WIDE_INT
|
||||
nonzero_bits (x, mode)
|
||||
nonzero_bits1 (x, mode, known_x, known_mode, known_ret)
|
||||
rtx x;
|
||||
enum machine_mode mode;
|
||||
rtx known_x;
|
||||
enum machine_mode known_mode;
|
||||
unsigned HOST_WIDE_INT known_ret;
|
||||
{
|
||||
unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
|
||||
unsigned HOST_WIDE_INT inner_nz;
|
||||
@ -8215,7 +8286,7 @@ nonzero_bits (x, mode)
|
||||
&& GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
|
||||
&& GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (GET_MODE (x)))
|
||||
{
|
||||
nonzero &= nonzero_bits (x, GET_MODE (x));
|
||||
nonzero &= nonzero_bits_with_known (x, GET_MODE (x));
|
||||
nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
|
||||
return nonzero;
|
||||
}
|
||||
@ -8297,7 +8368,7 @@ nonzero_bits (x, mode)
|
||||
| ((HOST_WIDE_INT) (-1)
|
||||
<< GET_MODE_BITSIZE (GET_MODE (x))));
|
||||
#endif
|
||||
return nonzero_bits (tem, mode) & nonzero;
|
||||
return nonzero_bits_with_known (tem, mode) & nonzero;
|
||||
}
|
||||
else if (nonzero_sign_valid && reg_nonzero_bits[REGNO (x)])
|
||||
{
|
||||
@ -8372,11 +8443,12 @@ nonzero_bits (x, mode)
|
||||
break;
|
||||
|
||||
case TRUNCATE:
|
||||
nonzero &= (nonzero_bits (XEXP (x, 0), mode) & GET_MODE_MASK (mode));
|
||||
nonzero &= (nonzero_bits_with_known (XEXP (x, 0), mode)
|
||||
& GET_MODE_MASK (mode));
|
||||
break;
|
||||
|
||||
case ZERO_EXTEND:
|
||||
nonzero &= nonzero_bits (XEXP (x, 0), mode);
|
||||
nonzero &= nonzero_bits_with_known (XEXP (x, 0), mode);
|
||||
if (GET_MODE (XEXP (x, 0)) != VOIDmode)
|
||||
nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
|
||||
break;
|
||||
@ -8385,7 +8457,7 @@ nonzero_bits (x, mode)
|
||||
/* If the sign bit is known clear, this is the same as ZERO_EXTEND.
|
||||
Otherwise, show all the bits in the outer mode but not the inner
|
||||
may be nonzero. */
|
||||
inner_nz = nonzero_bits (XEXP (x, 0), mode);
|
||||
inner_nz = nonzero_bits_with_known (XEXP (x, 0), mode);
|
||||
if (GET_MODE (XEXP (x, 0)) != VOIDmode)
|
||||
{
|
||||
inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
|
||||
@ -8400,19 +8472,21 @@ nonzero_bits (x, mode)
|
||||
break;
|
||||
|
||||
case AND:
|
||||
nonzero &= (nonzero_bits (XEXP (x, 0), mode)
|
||||
& nonzero_bits (XEXP (x, 1), mode));
|
||||
nonzero &= (nonzero_bits_with_known (XEXP (x, 0), mode)
|
||||
& nonzero_bits_with_known (XEXP (x, 1), mode));
|
||||
break;
|
||||
|
||||
case XOR: case IOR:
|
||||
case UMIN: case UMAX: case SMIN: case SMAX:
|
||||
{
|
||||
unsigned HOST_WIDE_INT nonzero0 = nonzero_bits (XEXP (x, 0), mode);
|
||||
unsigned HOST_WIDE_INT nonzero0 =
|
||||
nonzero_bits_with_known (XEXP (x, 0), mode);
|
||||
|
||||
/* Don't call nonzero_bits for the second time if it cannot change
|
||||
anything. */
|
||||
if ((nonzero & nonzero0) != nonzero)
|
||||
nonzero &= (nonzero0 | nonzero_bits (XEXP (x, 1), mode));
|
||||
nonzero &= (nonzero0
|
||||
| nonzero_bits_with_known (XEXP (x, 1), mode));
|
||||
}
|
||||
break;
|
||||
|
||||
@ -8425,8 +8499,10 @@ nonzero_bits (x, mode)
|
||||
computing the width (position of the highest-order nonzero bit)
|
||||
and the number of low-order zero bits for each value. */
|
||||
{
|
||||
unsigned HOST_WIDE_INT nz0 = nonzero_bits (XEXP (x, 0), mode);
|
||||
unsigned HOST_WIDE_INT nz1 = nonzero_bits (XEXP (x, 1), mode);
|
||||
unsigned HOST_WIDE_INT nz0 =
|
||||
nonzero_bits_with_known (XEXP (x, 0), mode);
|
||||
unsigned HOST_WIDE_INT nz1 =
|
||||
nonzero_bits_with_known (XEXP (x, 1), mode);
|
||||
int sign_index = GET_MODE_BITSIZE (GET_MODE (x)) - 1;
|
||||
int width0 = floor_log2 (nz0) + 1;
|
||||
int width1 = floor_log2 (nz1) + 1;
|
||||
@ -8511,7 +8587,7 @@ nonzero_bits (x, mode)
|
||||
|
||||
if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x) > 0)
|
||||
nonzero = (GET_MODE_MASK (GET_MODE (x))
|
||||
& nonzero_bits (SUBREG_REG (x), GET_MODE (x)));
|
||||
& nonzero_bits_with_known (SUBREG_REG (x), GET_MODE (x)));
|
||||
|
||||
/* If the inner mode is a single word for both the host and target
|
||||
machines, we can compute this from which bits of the inner
|
||||
@ -8520,7 +8596,7 @@ nonzero_bits (x, mode)
|
||||
&& (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
|
||||
<= HOST_BITS_PER_WIDE_INT))
|
||||
{
|
||||
nonzero &= nonzero_bits (SUBREG_REG (x), mode);
|
||||
nonzero &= nonzero_bits_with_known (SUBREG_REG (x), mode);
|
||||
|
||||
#if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
|
||||
/* If this is a typical RISC machine, we only have to worry
|
||||
@ -8563,7 +8639,8 @@ nonzero_bits (x, mode)
|
||||
unsigned int width = GET_MODE_BITSIZE (inner_mode);
|
||||
int count = INTVAL (XEXP (x, 1));
|
||||
unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
|
||||
unsigned HOST_WIDE_INT op_nonzero = nonzero_bits (XEXP (x, 0), mode);
|
||||
unsigned HOST_WIDE_INT op_nonzero =
|
||||
nonzero_bits_with_known (XEXP (x, 0), mode);
|
||||
unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
|
||||
unsigned HOST_WIDE_INT outer = 0;
|
||||
|
||||
@ -8621,8 +8698,8 @@ nonzero_bits (x, mode)
|
||||
break;
|
||||
|
||||
case IF_THEN_ELSE:
|
||||
nonzero &= (nonzero_bits (XEXP (x, 1), mode)
|
||||
| nonzero_bits (XEXP (x, 2), mode));
|
||||
nonzero &= (nonzero_bits_with_known (XEXP (x, 1), mode)
|
||||
| nonzero_bits_with_known (XEXP (x, 2), mode));
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -8633,17 +8710,74 @@ nonzero_bits (x, mode)
|
||||
}
|
||||
|
||||
/* See the macro definition above. */
|
||||
#undef num_sign_bit_copies
|
||||
#undef cached_num_sign_bit_copies
|
||||
|
||||
#define num_sign_bit_copies_with_known(X, M) \
|
||||
cached_num_sign_bit_copies (X, M, known_x, known_mode, known_ret)
|
||||
|
||||
/* The function cached_num_sign_bit_copies is a wrapper around
|
||||
num_sign_bit_copies1. It avoids exponential behavior in
|
||||
num_sign_bit_copies1 when X has identical subexpressions on the
|
||||
first or the second level. */
|
||||
|
||||
static unsigned int
|
||||
cached_num_sign_bit_copies (x, mode, known_x, known_mode, known_ret)
|
||||
rtx x;
|
||||
enum machine_mode mode;
|
||||
rtx known_x;
|
||||
enum machine_mode known_mode;
|
||||
unsigned int known_ret;
|
||||
{
|
||||
if (x == known_x && mode == known_mode)
|
||||
return known_ret;
|
||||
|
||||
/* Try to find identical subexpressions. If found call
|
||||
num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
|
||||
the precomputed value for the subexpression as KNOWN_RET. */
|
||||
|
||||
if (GET_RTX_CLASS (GET_CODE (x)) == '2'
|
||||
|| GET_RTX_CLASS (GET_CODE (x)) == 'c')
|
||||
{
|
||||
rtx x0 = XEXP (x, 0);
|
||||
rtx x1 = XEXP (x, 1);
|
||||
|
||||
/* Check the first level. */
|
||||
if (x0 == x1)
|
||||
return
|
||||
num_sign_bit_copies1 (x, mode, x0, mode,
|
||||
num_sign_bit_copies_with_known (x0, mode));
|
||||
|
||||
/* Check the second level. */
|
||||
if ((GET_RTX_CLASS (GET_CODE (x0)) == '2'
|
||||
|| GET_RTX_CLASS (GET_CODE (x0)) == 'c')
|
||||
&& (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
|
||||
return
|
||||
num_sign_bit_copies1 (x, mode, x1, mode,
|
||||
num_sign_bit_copies_with_known (x1, mode));
|
||||
|
||||
if ((GET_RTX_CLASS (GET_CODE (x1)) == '2'
|
||||
|| GET_RTX_CLASS (GET_CODE (x1)) == 'c')
|
||||
&& (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
|
||||
return
|
||||
num_sign_bit_copies1 (x, mode, x0, mode,
|
||||
num_sign_bit_copies_with_known (x0, mode));
|
||||
}
|
||||
|
||||
return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
|
||||
}
|
||||
|
||||
/* Return the number of bits at the high-order end of X that are known to
|
||||
be equal to the sign bit. X will be used in mode MODE; if MODE is
|
||||
VOIDmode, X will be used in its own mode. The returned value will always
|
||||
be between 1 and the number of bits in MODE. */
|
||||
|
||||
static unsigned int
|
||||
num_sign_bit_copies (x, mode)
|
||||
num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret)
|
||||
rtx x;
|
||||
enum machine_mode mode;
|
||||
rtx known_x;
|
||||
enum machine_mode known_mode;
|
||||
unsigned int known_ret;
|
||||
{
|
||||
enum rtx_code code = GET_CODE (x);
|
||||
unsigned int bitwidth;
|
||||
@ -8666,7 +8800,7 @@ num_sign_bit_copies (x, mode)
|
||||
/* For a smaller object, just ignore the high bits. */
|
||||
if (bitwidth < GET_MODE_BITSIZE (GET_MODE (x)))
|
||||
{
|
||||
num0 = num_sign_bit_copies (x, GET_MODE (x));
|
||||
num0 = num_sign_bit_copies_with_known (x, GET_MODE (x));
|
||||
return MAX (1,
|
||||
num0 - (int) (GET_MODE_BITSIZE (GET_MODE (x)) - bitwidth));
|
||||
}
|
||||
@ -8715,7 +8849,7 @@ num_sign_bit_copies (x, mode)
|
||||
|
||||
tem = get_last_value (x);
|
||||
if (tem != 0)
|
||||
return num_sign_bit_copies (tem, mode);
|
||||
return num_sign_bit_copies_with_known (tem, mode);
|
||||
|
||||
if (nonzero_sign_valid && reg_sign_bit_copies[REGNO (x)] != 0
|
||||
&& GET_MODE_BITSIZE (GET_MODE (x)) == bitwidth)
|
||||
@ -8748,7 +8882,7 @@ num_sign_bit_copies (x, mode)
|
||||
|
||||
if (SUBREG_PROMOTED_VAR_P (x) && ! SUBREG_PROMOTED_UNSIGNED_P (x))
|
||||
{
|
||||
num0 = num_sign_bit_copies (SUBREG_REG (x), mode);
|
||||
num0 = num_sign_bit_copies_with_known (SUBREG_REG (x), mode);
|
||||
return MAX ((int) bitwidth
|
||||
- (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1,
|
||||
num0);
|
||||
@ -8757,7 +8891,7 @@ num_sign_bit_copies (x, mode)
|
||||
/* For a smaller object, just ignore the high bits. */
|
||||
if (bitwidth <= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))))
|
||||
{
|
||||
num0 = num_sign_bit_copies (SUBREG_REG (x), VOIDmode);
|
||||
num0 = num_sign_bit_copies_with_known (SUBREG_REG (x), VOIDmode);
|
||||
return MAX (1, (num0
|
||||
- (int) (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
|
||||
- bitwidth)));
|
||||
@ -8779,7 +8913,7 @@ num_sign_bit_copies (x, mode)
|
||||
> GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
|
||||
&& LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
|
||||
&& GET_CODE (SUBREG_REG (x)) == MEM)
|
||||
return num_sign_bit_copies (SUBREG_REG (x), mode);
|
||||
return num_sign_bit_copies_with_known (SUBREG_REG (x), mode);
|
||||
#endif
|
||||
#endif
|
||||
break;
|
||||
@ -8791,16 +8925,16 @@ num_sign_bit_copies (x, mode)
|
||||
|
||||
case SIGN_EXTEND:
|
||||
return (bitwidth - GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
|
||||
+ num_sign_bit_copies (XEXP (x, 0), VOIDmode));
|
||||
+ num_sign_bit_copies_with_known (XEXP (x, 0), VOIDmode));
|
||||
|
||||
case TRUNCATE:
|
||||
/* For a smaller object, just ignore the high bits. */
|
||||
num0 = num_sign_bit_copies (XEXP (x, 0), VOIDmode);
|
||||
num0 = num_sign_bit_copies_with_known (XEXP (x, 0), VOIDmode);
|
||||
return MAX (1, (num0 - (int) (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
|
||||
- bitwidth)));
|
||||
|
||||
case NOT:
|
||||
return num_sign_bit_copies (XEXP (x, 0), mode);
|
||||
return num_sign_bit_copies_with_known (XEXP (x, 0), mode);
|
||||
|
||||
case ROTATE: case ROTATERT:
|
||||
/* If we are rotating left by a number of bits less than the number
|
||||
@ -8810,7 +8944,7 @@ num_sign_bit_copies (x, mode)
|
||||
&& INTVAL (XEXP (x, 1)) >= 0
|
||||
&& INTVAL (XEXP (x, 1)) < (int) bitwidth)
|
||||
{
|
||||
num0 = num_sign_bit_copies (XEXP (x, 0), mode);
|
||||
num0 = num_sign_bit_copies_with_known (XEXP (x, 0), mode);
|
||||
return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
|
||||
: (int) bitwidth - INTVAL (XEXP (x, 1))));
|
||||
}
|
||||
@ -8821,7 +8955,7 @@ num_sign_bit_copies (x, mode)
|
||||
is known to be positive, the number of sign bit copies is the
|
||||
same as that of the input. Finally, if the input has just one bit
|
||||
that might be nonzero, all the bits are copies of the sign bit. */
|
||||
num0 = num_sign_bit_copies (XEXP (x, 0), mode);
|
||||
num0 = num_sign_bit_copies_with_known (XEXP (x, 0), mode);
|
||||
if (bitwidth > HOST_BITS_PER_WIDE_INT)
|
||||
return num0 > 1 ? num0 - 1 : 1;
|
||||
|
||||
@ -8839,8 +8973,8 @@ num_sign_bit_copies (x, mode)
|
||||
case SMIN: case SMAX: case UMIN: case UMAX:
|
||||
/* Logical operations will preserve the number of sign-bit copies.
|
||||
MIN and MAX operations always return one of the operands. */
|
||||
num0 = num_sign_bit_copies (XEXP (x, 0), mode);
|
||||
num1 = num_sign_bit_copies (XEXP (x, 1), mode);
|
||||
num0 = num_sign_bit_copies_with_known (XEXP (x, 0), mode);
|
||||
num1 = num_sign_bit_copies_with_known (XEXP (x, 1), mode);
|
||||
return MIN (num0, num1);
|
||||
|
||||
case PLUS: case MINUS:
|
||||
@ -8858,8 +8992,8 @@ num_sign_bit_copies (x, mode)
|
||||
: bitwidth - floor_log2 (nonzero) - 1);
|
||||
}
|
||||
|
||||
num0 = num_sign_bit_copies (XEXP (x, 0), mode);
|
||||
num1 = num_sign_bit_copies (XEXP (x, 1), mode);
|
||||
num0 = num_sign_bit_copies_with_known (XEXP (x, 0), mode);
|
||||
num1 = num_sign_bit_copies_with_known (XEXP (x, 1), mode);
|
||||
result = MAX (1, MIN (num0, num1) - 1);
|
||||
|
||||
#ifdef POINTERS_EXTEND_UNSIGNED
|
||||
@ -8881,8 +9015,8 @@ num_sign_bit_copies (x, mode)
|
||||
to be positive, we must allow for an additional bit since negating
|
||||
a negative number can remove one sign bit copy. */
|
||||
|
||||
num0 = num_sign_bit_copies (XEXP (x, 0), mode);
|
||||
num1 = num_sign_bit_copies (XEXP (x, 1), mode);
|
||||
num0 = num_sign_bit_copies_with_known (XEXP (x, 0), mode);
|
||||
num1 = num_sign_bit_copies_with_known (XEXP (x, 1), mode);
|
||||
|
||||
result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
|
||||
if (result > 0
|
||||
@ -8905,17 +9039,17 @@ num_sign_bit_copies (x, mode)
|
||||
& ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
|
||||
return 1;
|
||||
else
|
||||
return num_sign_bit_copies (XEXP (x, 0), mode);
|
||||
return num_sign_bit_copies_with_known (XEXP (x, 0), mode);
|
||||
|
||||
case UMOD:
|
||||
/* The result must be <= the second operand. */
|
||||
return num_sign_bit_copies (XEXP (x, 1), mode);
|
||||
return num_sign_bit_copies_with_known (XEXP (x, 1), mode);
|
||||
|
||||
case DIV:
|
||||
/* Similar to unsigned division, except that we have to worry about
|
||||
the case where the divisor is negative, in which case we have
|
||||
to add 1. */
|
||||
result = num_sign_bit_copies (XEXP (x, 0), mode);
|
||||
result = num_sign_bit_copies_with_known (XEXP (x, 0), mode);
|
||||
if (result > 1
|
||||
&& (bitwidth > HOST_BITS_PER_WIDE_INT
|
||||
|| (nonzero_bits (XEXP (x, 1), mode)
|
||||
@ -8925,7 +9059,7 @@ num_sign_bit_copies (x, mode)
|
||||
return result;
|
||||
|
||||
case MOD:
|
||||
result = num_sign_bit_copies (XEXP (x, 1), mode);
|
||||
result = num_sign_bit_copies_with_known (XEXP (x, 1), mode);
|
||||
if (result > 1
|
||||
&& (bitwidth > HOST_BITS_PER_WIDE_INT
|
||||
|| (nonzero_bits (XEXP (x, 1), mode)
|
||||
@ -8937,7 +9071,7 @@ num_sign_bit_copies (x, mode)
|
||||
case ASHIFTRT:
|
||||
/* Shifts by a constant add to the number of bits equal to the
|
||||
sign bit. */
|
||||
num0 = num_sign_bit_copies (XEXP (x, 0), mode);
|
||||
num0 = num_sign_bit_copies_with_known (XEXP (x, 0), mode);
|
||||
if (GET_CODE (XEXP (x, 1)) == CONST_INT
|
||||
&& INTVAL (XEXP (x, 1)) > 0)
|
||||
num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
|
||||
@ -8951,12 +9085,12 @@ num_sign_bit_copies (x, mode)
|
||||
|| INTVAL (XEXP (x, 1)) >= (int) bitwidth)
|
||||
return 1;
|
||||
|
||||
num0 = num_sign_bit_copies (XEXP (x, 0), mode);
|
||||
num0 = num_sign_bit_copies_with_known (XEXP (x, 0), mode);
|
||||
return MAX (1, num0 - INTVAL (XEXP (x, 1)));
|
||||
|
||||
case IF_THEN_ELSE:
|
||||
num0 = num_sign_bit_copies (XEXP (x, 1), mode);
|
||||
num1 = num_sign_bit_copies (XEXP (x, 2), mode);
|
||||
num0 = num_sign_bit_copies_with_known (XEXP (x, 1), mode);
|
||||
num1 = num_sign_bit_copies_with_known (XEXP (x, 2), mode);
|
||||
return MIN (num0, num1);
|
||||
|
||||
case EQ: case NE: case GE: case GT: case LE: case LT:
|
||||
@ -11405,7 +11539,45 @@ update_table_tick (x)
|
||||
/* Note that we can't have an "E" in values stored; see
|
||||
get_last_value_validate. */
|
||||
if (fmt[i] == 'e')
|
||||
update_table_tick (XEXP (x, i));
|
||||
{
|
||||
/* Check for identical subexpressions. If x contains
|
||||
identical subexpression we only have to traverse one of
|
||||
them. */
|
||||
if (i == 0
|
||||
&& (GET_RTX_CLASS (code) == '2'
|
||||
|| GET_RTX_CLASS (code) == 'c'))
|
||||
{
|
||||
/* Note that at this point x1 has already been
|
||||
processed. */
|
||||
rtx x0 = XEXP (x, 0);
|
||||
rtx x1 = XEXP (x, 1);
|
||||
|
||||
/* If x0 and x1 are identical then there is no need to
|
||||
process x0. */
|
||||
if (x0 == x1)
|
||||
break;
|
||||
|
||||
/* If x0 is identical to a subexpression of x1 then while
|
||||
processing x1, x0 has already been processed. Thus we
|
||||
are done with x. */
|
||||
if ((GET_RTX_CLASS (GET_CODE (x1)) == '2'
|
||||
|| GET_RTX_CLASS (GET_CODE (x1)) == 'c')
|
||||
&& (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
|
||||
break;
|
||||
|
||||
/* If x1 is identical to a subexpression of x0 then we
|
||||
still have to process the rest of x0. */
|
||||
if ((GET_RTX_CLASS (GET_CODE (x0)) == '2'
|
||||
|| GET_RTX_CLASS (GET_CODE (x0)) == 'c')
|
||||
&& (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
|
||||
{
|
||||
update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
update_table_tick (XEXP (x, i));
|
||||
}
|
||||
}
|
||||
|
||||
/* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
|
||||
@ -11758,11 +11930,52 @@ get_last_value_validate (loc, insn, tick, replace)
|
||||
}
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
if ((fmt[i] == 'e'
|
||||
&& get_last_value_validate (&XEXP (x, i), insn, tick, replace) == 0)
|
||||
/* Don't bother with these. They shouldn't occur anyway. */
|
||||
|| fmt[i] == 'E')
|
||||
return 0;
|
||||
{
|
||||
if (fmt[i] == 'e')
|
||||
{
|
||||
/* Check for identical subexpressions. If x contains
|
||||
identical subexpression we only have to traverse one of
|
||||
them. */
|
||||
if (i == 1
|
||||
&& (GET_RTX_CLASS (GET_CODE (x)) == '2'
|
||||
|| GET_RTX_CLASS (GET_CODE (x)) == 'c'))
|
||||
{
|
||||
/* Note that at this point x0 has already been checked
|
||||
and found valid. */
|
||||
rtx x0 = XEXP (x, 0);
|
||||
rtx x1 = XEXP (x, 1);
|
||||
|
||||
/* If x0 and x1 are identical then x is also valid. */
|
||||
if (x0 == x1)
|
||||
return 1;
|
||||
|
||||
/* If x1 is identical to a subexpression of x0 then
|
||||
while checking x0, x1 has already been checked. Thus
|
||||
it is valid and so as x. */
|
||||
if ((GET_RTX_CLASS (GET_CODE (x0)) == '2'
|
||||
|| GET_RTX_CLASS (GET_CODE (x0)) == 'c')
|
||||
&& (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
|
||||
return 1;
|
||||
|
||||
/* If x0 is identical to a subexpression of x1 then x is
|
||||
valid iff the rest of x1 is valid. */
|
||||
if ((GET_RTX_CLASS (GET_CODE (x1)) == '2'
|
||||
|| GET_RTX_CLASS (GET_CODE (x1)) == 'c')
|
||||
&& (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
|
||||
return
|
||||
get_last_value_validate (&XEXP (x1,
|
||||
x0 == XEXP (x1, 0) ? 1 : 0),
|
||||
insn, tick, replace);
|
||||
}
|
||||
|
||||
if (get_last_value_validate (&XEXP (x, i), insn, tick,
|
||||
replace) == 0)
|
||||
return 0;
|
||||
}
|
||||
/* Don't bother with these. They shouldn't occur anyway. */
|
||||
else if (fmt[i] == 'E')
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* If we haven't found a reason for it to be invalid, it is valid. */
|
||||
return 1;
|
||||
|
Loading…
Reference in New Issue
Block a user