Use poly_int rtx accessors instead of hwi accessors

This patch generalises various places that used hwi rtx accessors so
that they can handle poly_ints instead.  In many cases these changes
are by inspection rather than because something had shown them to be
necessary.

2018-06-12  Richard Sandiford  <richard.sandiford@linaro.org>

gcc/
	* poly-int.h (can_div_trunc_p): Add new overload in which all values
	are poly_ints.
	* alias.c (get_addr): Extend CONST_INT handling to poly_int_rtx_p.
	(memrefs_conflict_p): Likewise.
	(init_alias_analysis): Likewise.
	* cfgexpand.c (expand_debug_expr): Likewise.
	* combine.c (combine_simplify_rtx, force_int_to_mode): Likewise.
	* cse.c (fold_rtx): Likewise.
	* explow.c (adjust_stack, anti_adjust_stack): Likewise.
	* expr.c (emit_block_move_hints): Likewise.
	(clear_storage_hints, push_block, emit_push_insn): Likewise.
	(store_expr_with_bounds, reduce_to_bit_field_precision): Likewise.
	(emit_group_load_1): Use rtx_to_poly_int64 for group offsets.
	(emit_group_store): Likewise.
	(find_args_size_adjust): Use strip_offset.  Use rtx_to_poly_int64
	to read the PRE/POST_MODIFY increment.
	* calls.c (store_one_arg): Use strip_offset.
	* rtlanal.c (rtx_addr_can_trap_p_1): Extend CONST_INT handling to
	poly_int_rtx_p.
	(set_noop_p): Use rtx_to_poly_int64 for the elements selected
	by a VEC_SELECT.
	* simplify-rtx.c (avoid_constant_pool_reference): Use strip_offset.
	(simplify_binary_operation_1): Extend CONST_INT handling to
	poly_int_rtx_p.
	* var-tracking.c (compute_cfa_pointer): Take a poly_int64 rather
	than a HOST_WIDE_INT.
	(hard_frame_pointer_adjustment): Change from HOST_WIDE_INT to
	poly_int64.
	(adjust_mems, add_stores): Update accodingly.
	(vt_canonicalize_addr): Track polynomial offsets.
	(emit_note_insn_var_location): Likewise.
	(vt_add_function_parameter): Likewise.
	(vt_initialize): Likewise.

From-SVN: r261530
This commit is contained in:
Richard Sandiford 2018-06-12 22:31:14 +00:00 committed by Richard Sandiford
parent 6044eae783
commit 5284e55987
12 changed files with 166 additions and 115 deletions

View File

@ -1,3 +1,39 @@
2018-06-12 Richard Sandiford <richard.sandiford@linaro.org>
* poly-int.h (can_div_trunc_p): Add new overload in which all values
are poly_ints.
* alias.c (get_addr): Extend CONST_INT handling to poly_int_rtx_p.
(memrefs_conflict_p): Likewise.
(init_alias_analysis): Likewise.
* cfgexpand.c (expand_debug_expr): Likewise.
* combine.c (combine_simplify_rtx, force_int_to_mode): Likewise.
* cse.c (fold_rtx): Likewise.
* explow.c (adjust_stack, anti_adjust_stack): Likewise.
* expr.c (emit_block_move_hints): Likewise.
(clear_storage_hints, push_block, emit_push_insn): Likewise.
(store_expr_with_bounds, reduce_to_bit_field_precision): Likewise.
(emit_group_load_1): Use rtx_to_poly_int64 for group offsets.
(emit_group_store): Likewise.
(find_args_size_adjust): Use strip_offset. Use rtx_to_poly_int64
to read the PRE/POST_MODIFY increment.
* calls.c (store_one_arg): Use strip_offset.
* rtlanal.c (rtx_addr_can_trap_p_1): Extend CONST_INT handling to
poly_int_rtx_p.
(set_noop_p): Use rtx_to_poly_int64 for the elements selected
by a VEC_SELECT.
* simplify-rtx.c (avoid_constant_pool_reference): Use strip_offset.
(simplify_binary_operation_1): Extend CONST_INT handling to
poly_int_rtx_p.
* var-tracking.c (compute_cfa_pointer): Take a poly_int64 rather
than a HOST_WIDE_INT.
(hard_frame_pointer_adjustment): Change from HOST_WIDE_INT to
poly_int64.
(adjust_mems, add_stores): Update accodingly.
(vt_canonicalize_addr): Track polynomial offsets.
(emit_note_insn_var_location): Likewise.
(vt_add_function_parameter): Likewise.
(vt_initialize): Likewise.
2018-06-12 Jeff Law <law@redhat.com>
* config.gcc (alpha*-*-freebsd*): Remove.

View File

@ -2262,9 +2262,10 @@ get_addr (rtx x)
rtx op0 = get_addr (XEXP (x, 0));
if (op0 != XEXP (x, 0))
{
poly_int64 c;
if (GET_CODE (x) == PLUS
&& GET_CODE (XEXP (x, 1)) == CONST_INT)
return plus_constant (GET_MODE (x), op0, INTVAL (XEXP (x, 1)));
&& poly_int_rtx_p (XEXP (x, 1), &c))
return plus_constant (GET_MODE (x), op0, c);
return simplify_gen_binary (GET_CODE (x), GET_MODE (x),
op0, XEXP (x, 1));
}
@ -2551,10 +2552,11 @@ memrefs_conflict_p (poly_int64 xsize, rtx x, poly_int64 ysize, rtx y,
return offset_overlap_p (c, xsize, ysize);
/* Can't properly adjust our sizes. */
if (!CONST_INT_P (x1)
|| !can_div_trunc_p (xsize, INTVAL (x1), &xsize)
|| !can_div_trunc_p (ysize, INTVAL (x1), &ysize)
|| !can_div_trunc_p (c, INTVAL (x1), &c))
poly_int64 c1;
if (!poly_int_rtx_p (x1, &c1)
|| !can_div_trunc_p (xsize, c1, &xsize)
|| !can_div_trunc_p (ysize, c1, &ysize)
|| !can_div_trunc_p (c, c1, &c))
return -1;
return memrefs_conflict_p (xsize, x0, ysize, y0, c);
}
@ -3407,6 +3409,7 @@ init_alias_analysis (void)
&& DF_REG_DEF_COUNT (regno) != 1)
note = NULL_RTX;
poly_int64 offset;
if (note != NULL_RTX
&& GET_CODE (XEXP (note, 0)) != EXPR_LIST
&& ! rtx_varies_p (XEXP (note, 0), 1)
@ -3421,10 +3424,9 @@ init_alias_analysis (void)
&& GET_CODE (src) == PLUS
&& REG_P (XEXP (src, 0))
&& (t = get_reg_known_value (REGNO (XEXP (src, 0))))
&& CONST_INT_P (XEXP (src, 1)))
&& poly_int_rtx_p (XEXP (src, 1), &offset))
{
t = plus_constant (GET_MODE (src), t,
INTVAL (XEXP (src, 1)));
t = plus_constant (GET_MODE (src), t, offset);
set_reg_known_value (regno, t);
set_reg_known_equiv_p (regno, false);
}

View File

@ -5676,15 +5676,9 @@ store_one_arg (struct arg_data *arg, rtx argblock, int flags,
rtx x = arg->value;
poly_int64 i = 0;
if (XEXP (x, 0) == crtl->args.internal_arg_pointer
|| (GET_CODE (XEXP (x, 0)) == PLUS
&& XEXP (XEXP (x, 0), 0) ==
crtl->args.internal_arg_pointer
&& CONST_INT_P (XEXP (XEXP (x, 0), 1))))
if (strip_offset (XEXP (x, 0), &i)
== crtl->args.internal_arg_pointer)
{
if (XEXP (x, 0) != crtl->args.internal_arg_pointer)
i = rtx_to_poly_int64 (XEXP (XEXP (x, 0), 1));
/* arg.locate doesn't contain the pretend_args_size offset,
it's part of argblock. Ensure we don't count it in I. */
if (STACK_GROWS_DOWNWARD)

View File

@ -4373,10 +4373,11 @@ expand_debug_expr (tree exp)
goto component_ref;
op1 = expand_debug_expr (TREE_OPERAND (exp, 1));
if (!op1 || !CONST_INT_P (op1))
poly_int64 offset;
if (!op1 || !poly_int_rtx_p (op1, &offset))
return NULL;
op0 = plus_constant (inner_mode, op0, INTVAL (op1));
op0 = plus_constant (inner_mode, op0, offset);
}
as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))));
@ -4890,10 +4891,11 @@ expand_debug_expr (tree exp)
{
op1 = expand_debug_expr (TREE_OPERAND (TREE_OPERAND (exp, 0),
1));
if (!op1 || !CONST_INT_P (op1))
poly_int64 offset;
if (!op1 || !poly_int_rtx_p (op1, &offset))
return NULL;
return plus_constant (mode, op0, INTVAL (op1));
return plus_constant (mode, op0, offset);
}
}

View File

@ -5978,8 +5978,11 @@ combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
GET_MODE_MASK (mode), 0));
/* We can truncate a constant value and return it. */
if (CONST_INT_P (XEXP (x, 0)))
return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
{
poly_int64 c;
if (poly_int_rtx_p (XEXP (x, 0), &c))
return gen_int_mode (c, mode);
}
/* Similarly to what we do in simplify-rtx.c, a truncate of a register
whose value is a comparison can be replaced with a subreg if
@ -8700,6 +8703,7 @@ force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode,
int next_select = just_select || code == XOR || code == NOT || code == NEG;
unsigned HOST_WIDE_INT fuller_mask;
rtx op0, op1, temp;
poly_int64 const_op0;
/* When we have an arithmetic operation, or a shift whose count we
do not know, we need to assume that all bits up to the highest-order
@ -8823,8 +8827,8 @@ force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode,
case MINUS:
/* If X is (minus C Y) where C's least set bit is larger than any bit
in the mask, then we may replace with (neg Y). */
if (CONST_INT_P (XEXP (x, 0))
&& least_bit_hwi (UINTVAL (XEXP (x, 0))) > mask)
if (poly_int_rtx_p (XEXP (x, 0), &const_op0)
&& (unsigned HOST_WIDE_INT) known_alignment (const_op0) > mask)
{
x = simplify_gen_unary (NEG, xmode, XEXP (x, 1), xmode);
return force_to_mode (x, mode, mask, next_select);

View File

@ -3112,6 +3112,7 @@ fold_rtx (rtx x, rtx_insn *insn)
int i;
rtx new_rtx = 0;
int changed = 0;
poly_int64 xval;
/* Operands of X. */
/* Workaround -Wmaybe-uninitialized false positive during
@ -3592,12 +3593,11 @@ fold_rtx (rtx x, rtx_insn *insn)
case MINUS:
/* If we have (MINUS Y C), see if Y is known to be (PLUS Z C2).
If so, produce (PLUS Z C2-C). */
if (const_arg1 != 0 && CONST_INT_P (const_arg1))
if (const_arg1 != 0 && poly_int_rtx_p (const_arg1, &xval))
{
rtx y = lookup_as_function (XEXP (x, 0), PLUS);
if (y && CONST_INT_P (XEXP (y, 1)))
return fold_rtx (plus_constant (mode, copy_rtx (y),
-INTVAL (const_arg1)),
if (y && poly_int_rtx_p (XEXP (y, 1)))
return fold_rtx (plus_constant (mode, copy_rtx (y), -xval),
NULL);
}

View File

@ -955,8 +955,9 @@ adjust_stack (rtx adjust)
/* We expect all variable sized adjustments to be multiple of
PREFERRED_STACK_BOUNDARY. */
if (CONST_INT_P (adjust))
stack_pointer_delta -= INTVAL (adjust);
poly_int64 const_adjust;
if (poly_int_rtx_p (adjust, &const_adjust))
stack_pointer_delta -= const_adjust;
adjust_stack_1 (adjust, false);
}
@ -972,8 +973,9 @@ anti_adjust_stack (rtx adjust)
/* We expect all variable sized adjustments to be multiple of
PREFERRED_STACK_BOUNDARY. */
if (CONST_INT_P (adjust))
stack_pointer_delta += INTVAL (adjust);
poly_int64 const_adjust;
if (poly_int_rtx_p (adjust, &const_adjust))
stack_pointer_delta += const_adjust;
adjust_stack_1 (adjust, true);
}

View File

@ -1612,12 +1612,13 @@ emit_block_move_hints (rtx x, rtx y, rtx size, enum block_op_methods method,
/* Set MEM_SIZE as appropriate for this block copy. The main place this
can be incorrect is coming from __builtin_memcpy. */
if (CONST_INT_P (size))
poly_int64 const_size;
if (poly_int_rtx_p (size, &const_size))
{
x = shallow_copy_rtx (x);
y = shallow_copy_rtx (y);
set_mem_size (x, INTVAL (size));
set_mem_size (y, INTVAL (size));
set_mem_size (x, const_size);
set_mem_size (y, const_size);
}
if (CONST_INT_P (size) && can_move_by_pieces (INTVAL (size), align))
@ -2146,7 +2147,7 @@ emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type,
for (i = start; i < XVECLEN (dst, 0); i++)
{
machine_mode mode = GET_MODE (XEXP (XVECEXP (dst, 0, i), 0));
poly_int64 bytepos = INTVAL (XEXP (XVECEXP (dst, 0, i), 1));
poly_int64 bytepos = rtx_to_poly_int64 (XEXP (XVECEXP (dst, 0, i), 1));
poly_int64 bytelen = GET_MODE_SIZE (mode);
poly_int64 shift = 0;
@ -2477,7 +2478,8 @@ emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED,
{
inner = GET_MODE (tmps[start]);
bytepos = subreg_lowpart_offset (inner, outer);
if (known_eq (INTVAL (XEXP (XVECEXP (src, 0, start), 1)), bytepos))
if (known_eq (rtx_to_poly_int64 (XEXP (XVECEXP (src, 0, start), 1)),
bytepos))
{
temp = simplify_gen_subreg (outer, tmps[start],
inner, 0);
@ -2496,7 +2498,8 @@ emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED,
{
inner = GET_MODE (tmps[finish - 1]);
bytepos = subreg_lowpart_offset (inner, outer);
if (known_eq (INTVAL (XEXP (XVECEXP (src, 0, finish - 1), 1)),
if (known_eq (rtx_to_poly_int64 (XEXP (XVECEXP (src, 0,
finish - 1), 1)),
bytepos))
{
temp = simplify_gen_subreg (outer, tmps[finish - 1],
@ -2518,7 +2521,7 @@ emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED,
/* Process the pieces. */
for (i = start; i < finish; i++)
{
poly_int64 bytepos = INTVAL (XEXP (XVECEXP (src, 0, i), 1));
poly_int64 bytepos = rtx_to_poly_int64 (XEXP (XVECEXP (src, 0, i), 1));
machine_mode mode = GET_MODE (tmps[i]);
poly_int64 bytelen = GET_MODE_SIZE (mode);
poly_uint64 adj_bytelen;
@ -2974,9 +2977,10 @@ clear_storage_hints (rtx object, rtx size, enum block_op_methods method,
/* If OBJECT is not BLKmode and SIZE is the same size as its mode,
just move a zero. Otherwise, do this a piece at a time. */
poly_int64 size_val;
if (mode != BLKmode
&& CONST_INT_P (size)
&& known_eq (INTVAL (size), GET_MODE_SIZE (mode)))
&& poly_int_rtx_p (size, &size_val)
&& known_eq (size_val, GET_MODE_SIZE (mode)))
{
rtx zero = CONST0_RTX (mode);
if (zero != NULL)
@ -3912,9 +3916,10 @@ push_block (rtx size, poly_int64 extra, int below)
}
else
{
if (CONST_INT_P (size))
poly_int64 csize;
if (poly_int_rtx_p (size, &csize))
temp = plus_constant (Pmode, virtual_outgoing_args_rtx,
-INTVAL (size) - (below ? 0 : extra));
-csize - (below ? 0 : extra));
else if (maybe_ne (extra, 0) && !below)
temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
negate_rtx (Pmode, plus_constant (Pmode, size,
@ -4034,11 +4039,10 @@ find_args_size_adjust (rtx_insn *insn)
/* Look for a trivial adjustment, otherwise assume nothing. */
/* Note that the SPU restore_stack_block pattern refers to
the stack pointer in V4SImode. Consider that non-trivial. */
poly_int64 offset;
if (SCALAR_INT_MODE_P (GET_MODE (dest))
&& GET_CODE (SET_SRC (set)) == PLUS
&& XEXP (SET_SRC (set), 0) == stack_pointer_rtx
&& CONST_INT_P (XEXP (SET_SRC (set), 1)))
return INTVAL (XEXP (SET_SRC (set), 1));
&& strip_offset (SET_SRC (set), &offset) == stack_pointer_rtx)
return offset;
/* ??? Reload can generate no-op moves, which will be cleaned
up later. Recognize it and continue searching. */
else if (rtx_equal_p (dest, SET_SRC (set)))
@ -4076,8 +4080,7 @@ find_args_size_adjust (rtx_insn *insn)
addr = XEXP (addr, 1);
gcc_assert (GET_CODE (addr) == PLUS);
gcc_assert (XEXP (addr, 0) == stack_pointer_rtx);
gcc_assert (CONST_INT_P (XEXP (addr, 1)));
return INTVAL (XEXP (addr, 1));
return rtx_to_poly_int64 (XEXP (addr, 1));
default:
gcc_unreachable ();
}
@ -4419,15 +4422,16 @@ emit_push_insn (rtx x, machine_mode mode, tree type, rtx size,
/* Get the address of the stack space.
In this case, we do not deal with EXTRA separately.
A single stack adjust will do. */
poly_int64 offset;
if (! args_addr)
{
temp = push_block (size, extra, where_pad == PAD_DOWNWARD);
extra = 0;
}
else if (CONST_INT_P (args_so_far))
else if (poly_int_rtx_p (args_so_far, &offset))
temp = memory_address (BLKmode,
plus_constant (Pmode, args_addr,
skip + INTVAL (args_so_far)));
skip + offset));
else
temp = memory_address (BLKmode,
plus_constant (Pmode,
@ -5724,12 +5728,11 @@ store_expr (tree exp, rtx target, int call_param_p,
/* Figure out how much is left in TARGET that we have to clear.
Do all calculations in pointer_mode. */
if (CONST_INT_P (copy_size_rtx))
poly_int64 const_copy_size;
if (poly_int_rtx_p (copy_size_rtx, &const_copy_size))
{
size = plus_constant (address_mode, size,
-INTVAL (copy_size_rtx));
target = adjust_address (target, BLKmode,
INTVAL (copy_size_rtx));
size = plus_constant (address_mode, size, -const_copy_size);
target = adjust_address (target, BLKmode, const_copy_size);
}
else
{
@ -11203,10 +11206,10 @@ reduce_to_bit_field_precision (rtx exp, rtx target, tree type)
if (target && GET_MODE (target) != GET_MODE (exp))
target = 0;
/* For constant values, reduce using build_int_cst_type. */
if (CONST_INT_P (exp))
poly_int64 const_exp;
if (poly_int_rtx_p (exp, &const_exp))
{
HOST_WIDE_INT value = INTVAL (exp);
tree t = build_int_cst_type (type, value);
tree t = build_int_cst_type (type, const_exp);
return expand_expr (t, target, VOIDmode, EXPAND_NORMAL);
}
else if (TYPE_UNSIGNED (type))

View File

@ -2346,6 +2346,27 @@ can_div_trunc_p (const poly_int_pod<N, Ca> &a, Cb b,
return true;
}
/* Return true if we can compute A / B at compile time, rounding towards zero.
Store the result in QUOTIENT if so.
This handles cases in which either B is constant or the result is
constant. */
template<unsigned int N, typename Ca, typename Cb, typename Cq>
inline bool
can_div_trunc_p (const poly_int_pod<N, Ca> &a,
const poly_int_pod<N, Cb> &b,
poly_int_pod<N, Cq> *quotient)
{
if (b.is_constant ())
return can_div_trunc_p (a, b.coeffs[0], quotient);
if (!can_div_trunc_p (a, b, &quotient->coeffs[0]))
return false;
for (unsigned int i = 1; i < N; ++i)
quotient->coeffs[i] = 0;
return true;
}
/* Return true if there is some constant Q and polynomial r such that:
(1) a = b * Q + r

View File

@ -462,6 +462,7 @@ rtx_addr_can_trap_p_1 (const_rtx x, poly_int64 offset, poly_int64 size,
{
enum rtx_code code = GET_CODE (x);
gcc_checking_assert (mode == BLKmode || known_size_p (size));
poly_int64 const_x1;
/* The offset must be a multiple of the mode size if we are considering
unaligned memory references on strict alignment machines. */
@ -653,8 +654,8 @@ rtx_addr_can_trap_p_1 (const_rtx x, poly_int64 offset, poly_int64 size,
return 0;
/* - or it is an address that can't trap plus a constant integer. */
if (CONST_INT_P (XEXP (x, 1))
&& !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
if (poly_int_rtx_p (XEXP (x, 1), &const_x1)
&& !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + const_x1,
size, mode, unaligned_mems))
return 0;
@ -1613,11 +1614,11 @@ set_noop_p (const_rtx set)
int i;
rtx par = XEXP (src, 1);
rtx src0 = XEXP (src, 0);
int c0 = INTVAL (XVECEXP (par, 0, 0));
HOST_WIDE_INT offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
poly_int64 c0 = rtx_to_poly_int64 (XVECEXP (par, 0, 0));
poly_int64 offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
for (i = 1; i < XVECLEN (par, 0); i++)
if (INTVAL (XVECEXP (par, 0, i)) != c0 + i)
if (maybe_ne (rtx_to_poly_int64 (XVECEXP (par, 0, i)), c0 + i))
return 0;
return
simplify_subreg_regno (REGNO (src0), GET_MODE (src0),

View File

@ -210,7 +210,7 @@ avoid_constant_pool_reference (rtx x)
{
rtx c, tmp, addr;
machine_mode cmode;
HOST_WIDE_INT offset = 0;
poly_int64 offset = 0;
switch (GET_CODE (x))
{
@ -239,13 +239,7 @@ avoid_constant_pool_reference (rtx x)
addr = targetm.delegitimize_address (addr);
/* Split the address into a base and integer offset. */
if (GET_CODE (addr) == CONST
&& GET_CODE (XEXP (addr, 0)) == PLUS
&& CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
{
offset = INTVAL (XEXP (XEXP (addr, 0), 1));
addr = XEXP (XEXP (addr, 0), 0);
}
addr = strip_offset (addr, &offset);
if (GET_CODE (addr) == LO_SUM)
addr = XEXP (addr, 1);
@ -261,7 +255,7 @@ avoid_constant_pool_reference (rtx x)
/* If we're accessing the constant in a different mode than it was
originally stored, attempt to fix that up via subreg simplifications.
If that fails we have no choice but to return the original memory. */
if (offset == 0 && cmode == GET_MODE (x))
if (known_eq (offset, 0) && cmode == GET_MODE (x))
return c;
else if (known_in_range_p (offset, 0, GET_MODE_SIZE (cmode)))
{
@ -2272,13 +2266,13 @@ simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
if ((GET_CODE (op0) == CONST
|| GET_CODE (op0) == SYMBOL_REF
|| GET_CODE (op0) == LABEL_REF)
&& CONST_INT_P (op1))
return plus_constant (mode, op0, INTVAL (op1));
&& poly_int_rtx_p (op1, &offset))
return plus_constant (mode, op0, offset);
else if ((GET_CODE (op1) == CONST
|| GET_CODE (op1) == SYMBOL_REF
|| GET_CODE (op1) == LABEL_REF)
&& CONST_INT_P (op0))
return plus_constant (mode, op1, INTVAL (op0));
&& poly_int_rtx_p (op0, &offset))
return plus_constant (mode, op1, offset);
/* See if this is something like X * C - X or vice versa or
if the multiplication is written as a shift. If so, we can

View File

@ -917,14 +917,14 @@ static HOST_WIDE_INT cfa_base_offset;
or hard_frame_pointer_rtx. */
static inline rtx
compute_cfa_pointer (HOST_WIDE_INT adjustment)
compute_cfa_pointer (poly_int64 adjustment)
{
return plus_constant (Pmode, cfa_base_rtx, adjustment + cfa_base_offset);
}
/* Adjustment for hard_frame_pointer_rtx to cfa base reg,
or -1 if the replacement shouldn't be done. */
static HOST_WIDE_INT hard_frame_pointer_adjustment = -1;
static poly_int64 hard_frame_pointer_adjustment = -1;
/* Data for adjust_mems callback. */
@ -1030,7 +1030,7 @@ adjust_mems (rtx loc, const_rtx old_rtx, void *data)
return compute_cfa_pointer (amd->stack_adjust);
else if (loc == hard_frame_pointer_rtx
&& frame_pointer_needed
&& hard_frame_pointer_adjustment != -1
&& maybe_ne (hard_frame_pointer_adjustment, -1)
&& cfa_base_rtx)
return compute_cfa_pointer (hard_frame_pointer_adjustment);
gcc_checking_assert (loc != virtual_incoming_args_rtx);
@ -2156,7 +2156,7 @@ get_addr_from_local_cache (dataflow_set *set, rtx const loc)
static rtx
vt_canonicalize_addr (dataflow_set *set, rtx oloc)
{
HOST_WIDE_INT ofst = 0;
poly_int64 ofst = 0, term;
machine_mode mode = GET_MODE (oloc);
rtx loc = oloc;
rtx x;
@ -2165,9 +2165,9 @@ vt_canonicalize_addr (dataflow_set *set, rtx oloc)
while (retry)
{
while (GET_CODE (loc) == PLUS
&& GET_CODE (XEXP (loc, 1)) == CONST_INT)
&& poly_int_rtx_p (XEXP (loc, 1), &term))
{
ofst += INTVAL (XEXP (loc, 1));
ofst += term;
loc = XEXP (loc, 0);
}
@ -2192,10 +2192,11 @@ vt_canonicalize_addr (dataflow_set *set, rtx oloc)
loc = get_addr_from_global_cache (loc);
/* Consolidate plus_constants. */
while (ofst && GET_CODE (loc) == PLUS
&& GET_CODE (XEXP (loc, 1)) == CONST_INT)
while (maybe_ne (ofst, 0)
&& GET_CODE (loc) == PLUS
&& poly_int_rtx_p (XEXP (loc, 1), &term))
{
ofst += INTVAL (XEXP (loc, 1));
ofst += term;
loc = XEXP (loc, 0);
}
@ -2211,12 +2212,10 @@ vt_canonicalize_addr (dataflow_set *set, rtx oloc)
}
/* Add OFST back in. */
if (ofst)
if (maybe_ne (ofst, 0))
{
/* Don't build new RTL if we can help it. */
if (GET_CODE (oloc) == PLUS
&& XEXP (oloc, 0) == loc
&& INTVAL (XEXP (oloc, 1)) == ofst)
if (strip_offset (oloc, &term) == loc && known_eq (term, ofst))
return oloc;
loc = plus_constant (mode, loc, ofst);
@ -6094,7 +6093,7 @@ add_stores (rtx loc, const_rtx expr, void *cuip)
}
if (loc == stack_pointer_rtx
&& hard_frame_pointer_adjustment != -1
&& maybe_ne (hard_frame_pointer_adjustment, -1)
&& preserve)
cselib_set_value_sp_based (v);
@ -8765,6 +8764,7 @@ emit_note_insn_var_location (variable **varp, emit_note_data *data)
&& GET_CODE (loc[n_var_parts]) == GET_CODE (loc2))
{
rtx new_loc = NULL;
poly_int64 offset2;
if (REG_P (loc[n_var_parts])
&& hard_regno_nregs (REGNO (loc[n_var_parts]), mode) * 2
@ -8789,18 +8789,13 @@ emit_note_insn_var_location (variable **varp, emit_note_data *data)
else if (MEM_P (loc[n_var_parts])
&& GET_CODE (XEXP (loc2, 0)) == PLUS
&& REG_P (XEXP (XEXP (loc2, 0), 0))
&& CONST_INT_P (XEXP (XEXP (loc2, 0), 1)))
&& poly_int_rtx_p (XEXP (XEXP (loc2, 0), 1), &offset2))
{
if ((REG_P (XEXP (loc[n_var_parts], 0))
&& rtx_equal_p (XEXP (loc[n_var_parts], 0),
XEXP (XEXP (loc2, 0), 0))
&& INTVAL (XEXP (XEXP (loc2, 0), 1)) == size)
|| (GET_CODE (XEXP (loc[n_var_parts], 0)) == PLUS
&& CONST_INT_P (XEXP (XEXP (loc[n_var_parts], 0), 1))
&& rtx_equal_p (XEXP (XEXP (loc[n_var_parts], 0), 0),
XEXP (XEXP (loc2, 0), 0))
&& INTVAL (XEXP (XEXP (loc[n_var_parts], 0), 1)) + size
== INTVAL (XEXP (XEXP (loc2, 0), 1))))
poly_int64 end1 = size;
rtx base1 = strip_offset_and_add (XEXP (loc[n_var_parts], 0),
&end1);
if (rtx_equal_p (base1, XEXP (XEXP (loc2, 0), 0))
&& known_eq (end1, offset2))
new_loc = adjust_address_nv (loc[n_var_parts],
wider_mode, 0);
}
@ -9670,20 +9665,17 @@ vt_add_function_parameter (tree parm)
rewrite the incoming location of parameters passed on the stack
into MEMs based on the argument pointer, so that incoming doesn't
depend on a pseudo. */
poly_int64 incoming_offset = 0;
if (MEM_P (incoming)
&& (XEXP (incoming, 0) == crtl->args.internal_arg_pointer
|| (GET_CODE (XEXP (incoming, 0)) == PLUS
&& XEXP (XEXP (incoming, 0), 0)
== crtl->args.internal_arg_pointer
&& CONST_INT_P (XEXP (XEXP (incoming, 0), 1)))))
&& (strip_offset (XEXP (incoming, 0), &incoming_offset)
== crtl->args.internal_arg_pointer))
{
HOST_WIDE_INT off = -FIRST_PARM_OFFSET (current_function_decl);
if (GET_CODE (XEXP (incoming, 0)) == PLUS)
off += INTVAL (XEXP (XEXP (incoming, 0), 1));
incoming
= replace_equiv_address_nv (incoming,
plus_constant (Pmode,
arg_pointer_rtx, off));
arg_pointer_rtx,
off + incoming_offset));
}
#ifdef HAVE_window_save
@ -9990,7 +9982,7 @@ static bool
vt_initialize (void)
{
basic_block bb;
HOST_WIDE_INT fp_cfa_offset = -1;
poly_int64 fp_cfa_offset = -1;
alloc_aux_for_blocks (sizeof (variable_tracking_info));
@ -10105,7 +10097,7 @@ vt_initialize (void)
{
if (GET_CODE (elim) == PLUS)
{
fp_cfa_offset -= INTVAL (XEXP (elim, 1));
fp_cfa_offset -= rtx_to_poly_int64 (XEXP (elim, 1));
elim = XEXP (elim, 0);
}
if (elim != hard_frame_pointer_rtx)
@ -10238,8 +10230,8 @@ vt_initialize (void)
VTI (bb)->out.stack_adjust += post;
}
if (fp_cfa_offset != -1
&& hard_frame_pointer_adjustment == -1
if (maybe_ne (fp_cfa_offset, -1)
&& known_eq (hard_frame_pointer_adjustment, -1)
&& fp_setter_insn (insn))
{
vt_init_cfa_base ();