Add a load_extend_op wrapper

LOAD_EXTEND_OP only applies to scalar integer modes that are narrower
than a word.  However, callers weren't consistent about which of these
checks they made beforehand, and also weren't consistent about whether
"smaller" was based on (bit)size or precision (IMO it's the latter).
This patch adds a wrapper to try to make the macro easier to use.

LOAD_EXTEND_OP is often used to disable transformations that aren't
beneficial when extends from memory are free, so being stricter about
the check accidentally exposed more optimisation opportunities.

"SUBREG_BYTE (...) == 0" and subreg_lowpart_p are implied by
paradoxical_subreg_p, so the patch also removes some redundant tests.

The patch doesn't change reload, since different checks could have
unforeseen consequences.

gcc/
2016-11-15  Richard Sandiford  <richard.sandiford@arm.com>
	    Alan Hayward  <alan.hayward@arm.com>
	    David Sherwood  <david.sherwood@arm.com>

	* rtl.h (load_extend_op): Declare.
	* rtlanal.c (load_extend_op): New function.
	(nonzero_bits1): Use it.
	(num_sign_bit_copies1): Likewise.
	* cse.c (cse_insn): Likewise.
	* fold-const.c (fold_single_bit_test): Likewise.
	(fold_unary_loc): Likewise.
	* fwprop.c (free_load_extend): Likewise.
	* postreload.c (reload_cse_simplify_set): Likewise.
	(reload_cse_simplify_operands): Likewise.
	* combine.c (try_combine): Likewise.
	(simplify_set): Likewise.  Remove redundant SUBREG_BYTE and
	subreg_lowpart_p checks.

Co-Authored-By: Alan Hayward <alan.hayward@arm.com>
Co-Authored-By: David Sherwood <david.sherwood@arm.com>

From-SVN: r242444
This commit is contained in:
Richard Sandiford 2016-11-15 18:13:56 +00:00 committed by Richard Sandiford
parent ba3c3dc070
commit 3712c7a301
8 changed files with 56 additions and 33 deletions

View File

@ -1,3 +1,21 @@
2016-11-15 Richard Sandiford <richard.sandiford@arm.com>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
* rtl.h (load_extend_op): Declare.
* rtlanal.c (load_extend_op): New function.
(nonzero_bits1): Use it.
(num_sign_bit_copies1): Likewise.
* cse.c (cse_insn): Likewise.
* fold-const.c (fold_single_bit_test): Likewise.
(fold_unary_loc): Likewise.
* fwprop.c (free_load_extend): Likewise.
* postreload.c (reload_cse_simplify_set): Likewise.
(reload_cse_simplify_operands): Likewise.
* combine.c (try_combine): Likewise.
(simplify_set): Likewise. Remove redundant SUBREG_BYTE and
subreg_lowpart_p checks.
2016-11-15 Richard Sandiford <richard.sandiford@arm.com>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>

View File

@ -3738,7 +3738,7 @@ try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
{
/* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
what it really is. */
if (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (*split)))
if (load_extend_op (GET_MODE (SUBREG_REG (*split)))
== SIGN_EXTEND)
SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
SUBREG_REG (*split)));
@ -6785,16 +6785,13 @@ simplify_set (rtx x)
would require a paradoxical subreg. Replace the subreg with a
zero_extend to avoid the reload that would otherwise be required. */
if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
&& INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (src)))
&& LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))) != UNKNOWN
&& SUBREG_BYTE (src) == 0
&& paradoxical_subreg_p (src)
&& MEM_P (SUBREG_REG (src)))
enum rtx_code extend_op;
if (paradoxical_subreg_p (src)
&& MEM_P (SUBREG_REG (src))
&& (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN)
{
SUBST (SET_SRC (x),
gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))),
GET_MODE (src), SUBREG_REG (src)));
gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src)));
src = SET_SRC (x);
}

View File

@ -4915,11 +4915,10 @@ cse_insn (rtx_insn *insn)
also have such operations, but this is only likely to be
beneficial on these machines. */
rtx_code extend_op;
if (flag_expensive_optimizations && src_related == 0
&& (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
&& GET_MODE_CLASS (mode) == MODE_INT
&& MEM_P (src) && ! do_not_record
&& LOAD_EXTEND_OP (mode) != UNKNOWN)
&& (extend_op = load_extend_op (mode)) != UNKNOWN)
{
struct rtx_def memory_extend_buf;
rtx memory_extend_rtx = &memory_extend_buf;
@ -4928,7 +4927,7 @@ cse_insn (rtx_insn *insn)
/* Set what we are trying to extend and the operation it might
have been extended with. */
memset (memory_extend_rtx, 0, sizeof (*memory_extend_rtx));
PUT_CODE (memory_extend_rtx, LOAD_EXTEND_OP (mode));
PUT_CODE (memory_extend_rtx, extend_op);
XEXP (memory_extend_rtx, 0) = src;
for (tmode = GET_MODE_WIDER_MODE (mode);

View File

@ -6725,7 +6725,7 @@ fold_single_bit_test (location_t loc, enum tree_code code,
/* If we are going to be able to omit the AND below, we must do our
operations as unsigned. If we must use the AND, we have a choice.
Normally unsigned is faster, but for some machines signed is. */
ops_unsigned = (LOAD_EXTEND_OP (operand_mode) == SIGN_EXTEND
ops_unsigned = (load_extend_op (operand_mode) == SIGN_EXTEND
&& !flag_syntax_only) ? 0 : 1;
signed_type = lang_hooks.types.type_for_mode (operand_mode, 0);
@ -7775,7 +7775,7 @@ fold_unary_loc (location_t loc, enum tree_code code, tree type, tree op0)
change = (cst == 0);
if (change
&& !flag_syntax_only
&& (LOAD_EXTEND_OP (TYPE_MODE (TREE_TYPE (and0)))
&& (load_extend_op (TYPE_MODE (TREE_TYPE (and0)))
== ZERO_EXTEND))
{
tree uns = unsigned_type_for (TREE_TYPE (and0));

View File

@ -1051,7 +1051,7 @@ free_load_extend (rtx src, rtx_insn *insn)
df_ref def, use;
reg = XEXP (src, 0);
if (LOAD_EXTEND_OP (GET_MODE (reg)) != GET_CODE (src))
if (load_extend_op (GET_MODE (reg)) != GET_CODE (src))
return false;
FOR_EACH_INSN_USE (use, insn)

View File

@ -256,8 +256,7 @@ reload_cse_simplify_set (rtx set, rtx_insn *insn)
generating an extend instruction instead of a reg->reg copy. Thus
the destination must be a register that we can widen. */
if (MEM_P (src)
&& GET_MODE_BITSIZE (GET_MODE (src)) < BITS_PER_WORD
&& (extend_op = LOAD_EXTEND_OP (GET_MODE (src))) != UNKNOWN
&& (extend_op = load_extend_op (GET_MODE (src))) != UNKNOWN
&& !REG_P (SET_DEST (set)))
return 0;
@ -330,8 +329,7 @@ reload_cse_simplify_set (rtx set, rtx_insn *insn)
&& REG_P (this_rtx)
&& !REG_P (SET_SRC (set))))
{
if (GET_MODE_BITSIZE (GET_MODE (SET_DEST (set))) < BITS_PER_WORD
&& extend_op != UNKNOWN
if (extend_op != UNKNOWN
#ifdef CANNOT_CHANGE_MODE_CLASS
&& !CANNOT_CHANGE_MODE_CLASS (GET_MODE (SET_DEST (set)),
word_mode,
@ -414,9 +412,7 @@ reload_cse_simplify_operands (rtx_insn *insn, rtx testreg)
continue;
op = recog_data.operand[i];
if (MEM_P (op)
&& GET_MODE_BITSIZE (GET_MODE (op)) < BITS_PER_WORD
&& LOAD_EXTEND_OP (GET_MODE (op)) != UNKNOWN)
if (MEM_P (op) && load_extend_op (GET_MODE (op)) != UNKNOWN)
{
rtx set = single_set (insn);
@ -449,7 +445,7 @@ reload_cse_simplify_operands (rtx_insn *insn, rtx testreg)
&& SET_DEST (set) == recog_data.operand[1-i])
{
validate_change (insn, recog_data.operand_loc[i],
gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (op)),
gen_rtx_fmt_e (load_extend_op (GET_MODE (op)),
word_mode, op),
1);
validate_change (insn, recog_data.operand_loc[1-i],

View File

@ -2954,6 +2954,7 @@ extern void set_insn_deleted (rtx);
/* Functions in rtlanal.c */
extern rtx_code load_extend_op (machine_mode);
extern rtx single_set_2 (const rtx_insn *, const_rtx);
extern bool contains_symbol_ref_p (const_rtx);
extern bool contains_symbolic_reference_p (const_rtx);

View File

@ -3861,6 +3861,18 @@ subreg_nregs_with_regno (unsigned int regno, const_rtx x)
return info.nregs;
}
/* If loads from memories of mode MODE always sign or zero extend,
return SIGN_EXTEND or ZERO_EXTEND as appropriate. Return UNKNOWN
otherwise. */
rtx_code
load_extend_op (machine_mode mode)
{
if (SCALAR_INT_MODE_P (mode)
&& GET_MODE_PRECISION (mode) < BITS_PER_WORD)
return LOAD_EXTEND_OP (mode);
return UNKNOWN;
}
struct parms_set_data
{
@ -4378,7 +4390,7 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
/* In many, if not most, RISC machines, reading a byte from memory
zeros the rest of the register. Noticing that fact saves a lot
of extra zero-extends. */
if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
if (load_extend_op (GET_MODE (x)) == ZERO_EXTEND)
nonzero &= GET_MODE_MASK (GET_MODE (x));
break;
@ -4581,12 +4593,13 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
/* On many CISC machines, accessing an object in a wider mode
causes the high-order bits to become undefined. So they are
not known to be zero. */
rtx_code extend_op;
if ((!WORD_REGISTER_OPERATIONS
/* If this is a typical RISC machine, we only have to worry
about the way loads are extended. */
|| (LOAD_EXTEND_OP (inner_mode) == SIGN_EXTEND
|| ((extend_op = load_extend_op (inner_mode)) == SIGN_EXTEND
? val_signbit_known_set_p (inner_mode, nonzero)
: LOAD_EXTEND_OP (inner_mode) != ZERO_EXTEND)
: extend_op != ZERO_EXTEND)
|| (!MEM_P (SUBREG_REG (x)) && !REG_P (SUBREG_REG (x))))
&& GET_MODE_PRECISION (GET_MODE (x))
> GET_MODE_PRECISION (inner_mode))
@ -4832,7 +4845,7 @@ num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x,
than a word and loads of that size don't sign extend, we can say
nothing about the high order bits. */
if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
&& LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND)
&& load_extend_op (GET_MODE (x)) != SIGN_EXTEND)
return 1;
}
@ -4874,7 +4887,7 @@ num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x,
case MEM:
/* Some RISC machines sign-extend all loads of smaller than a word. */
if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
if (load_extend_op (GET_MODE (x)) == SIGN_EXTEND)
return MAX (1, ((int) bitwidth
- (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
break;
@ -4924,8 +4937,7 @@ num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x,
to the stack. */
if (WORD_REGISTER_OPERATIONS
&& GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
&& LOAD_EXTEND_OP (inner_mode) == SIGN_EXTEND
&& load_extend_op (inner_mode) == SIGN_EXTEND
&& paradoxical_subreg_p (x)
&& (MEM_P (SUBREG_REG (x)) || REG_P (SUBREG_REG (x))))
return cached_num_sign_bit_copies (SUBREG_REG (x), mode,