cse.c: Use HOST_WIDE_INT_M1 instead of ~(HOST_WIDE_INT) 0.
* cse.c: Use HOST_WIDE_INT_M1 instead of ~(HOST_WIDE_INT) 0. * combine.c: Use HOST_WIDE_INT_M1U instead of ~(unsigned HOST_WIDE_INT) 0. * double-int.h: Ditto. * dse.c: Ditto. * dwarf2asm.c:Ditto. * expmed.c: Ditto. * genmodes.c: Ditto. * match.pd: Ditto. * read-rtl.c: Ditto. * tree-ssa-loop-ivopts.c: Ditto. * tree-ssa-loop-prefetch.c: Ditto. * tree-vect-generic.c: Ditto. * tree-vect-patterns.c: Ditto. * tree.c: Ditto. From-SVN: r238529
This commit is contained in:
parent
dbe9dfdd50
commit
dd4786fe81
@ -1,3 +1,21 @@
|
||||
2016-07-20 Uros Bizjak <ubizjak@gmail.com>
|
||||
|
||||
* cse.c: Use HOST_WIDE_INT_M1 instead of ~(HOST_WIDE_INT) 0.
|
||||
* combine.c: Use HOST_WIDE_INT_M1U instead of
|
||||
~(unsigned HOST_WIDE_INT) 0.
|
||||
* double-int.h: Ditto.
|
||||
* dse.c: Ditto.
|
||||
* dwarf2asm.c:Ditto.
|
||||
* expmed.c: Ditto.
|
||||
* genmodes.c: Ditto.
|
||||
* match.pd: Ditto.
|
||||
* read-rtl.c: Ditto.
|
||||
* tree-ssa-loop-ivopts.c: Ditto.
|
||||
* tree-ssa-loop-prefetch.c: Ditto.
|
||||
* tree-vect-generic.c: Ditto.
|
||||
* tree-vect-patterns.c: Ditto.
|
||||
* tree.c: Ditto.
|
||||
|
||||
2016-07-20 Georg-Johann Lay <avr@gjlay.de>
|
||||
|
||||
* gcc/config/avr.c (avr_legitimize_address) [AVR_TINY]: Force
|
||||
|
@ -1660,7 +1660,7 @@ update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
|
||||
}
|
||||
|
||||
/* Don't call nonzero_bits if it cannot change anything. */
|
||||
if (rsp->nonzero_bits != ~(unsigned HOST_WIDE_INT) 0)
|
||||
if (rsp->nonzero_bits != HOST_WIDE_INT_M1U)
|
||||
{
|
||||
bits = nonzero_bits (src, nonzero_bits_mode);
|
||||
if (reg_equal && bits)
|
||||
@ -6541,7 +6541,7 @@ simplify_set (rtx x)
|
||||
|
||||
if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
|
||||
{
|
||||
src = force_to_mode (src, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
|
||||
src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0);
|
||||
SUBST (SET_SRC (x), src);
|
||||
}
|
||||
|
||||
@ -7446,7 +7446,7 @@ make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
|
||||
else
|
||||
new_rtx = force_to_mode (inner, tmode,
|
||||
len >= HOST_BITS_PER_WIDE_INT
|
||||
? ~(unsigned HOST_WIDE_INT) 0
|
||||
? HOST_WIDE_INT_M1U
|
||||
: (HOST_WIDE_INT_1U << len) - 1,
|
||||
0);
|
||||
|
||||
@ -7635,7 +7635,7 @@ make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
|
||||
inner = force_to_mode (inner, wanted_inner_mode,
|
||||
pos_rtx
|
||||
|| len + orig_pos >= HOST_BITS_PER_WIDE_INT
|
||||
? ~(unsigned HOST_WIDE_INT) 0
|
||||
? HOST_WIDE_INT_M1U
|
||||
: (((HOST_WIDE_INT_1U << len) - 1)
|
||||
<< orig_pos),
|
||||
0);
|
||||
@ -8110,7 +8110,7 @@ make_compound_operation (rtx x, enum rtx_code in_code)
|
||||
&& subreg_lowpart_p (x))
|
||||
{
|
||||
rtx newer
|
||||
= force_to_mode (tem, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
|
||||
= force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0);
|
||||
|
||||
/* If we have something other than a SUBREG, we might have
|
||||
done an expansion, so rerun ourselves. */
|
||||
@ -8390,7 +8390,7 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
|
||||
do not know, we need to assume that all bits up to the highest-order
|
||||
bit in MASK will be needed. This is how we form such a mask. */
|
||||
if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
|
||||
fuller_mask = ~(unsigned HOST_WIDE_INT) 0;
|
||||
fuller_mask = HOST_WIDE_INT_M1U;
|
||||
else
|
||||
fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1))
|
||||
- 1);
|
||||
@ -8733,7 +8733,7 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
|
||||
|
||||
if (GET_MODE_PRECISION (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT)
|
||||
{
|
||||
nonzero = ~(unsigned HOST_WIDE_INT) 0;
|
||||
nonzero = HOST_WIDE_INT_M1U;
|
||||
|
||||
/* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
|
||||
is the number of bits a full-width mask would have set.
|
||||
@ -9496,7 +9496,7 @@ make_field_assignment (rtx x)
|
||||
dest);
|
||||
src = force_to_mode (src, mode,
|
||||
GET_MODE_PRECISION (mode) >= HOST_BITS_PER_WIDE_INT
|
||||
? ~(unsigned HOST_WIDE_INT) 0
|
||||
? HOST_WIDE_INT_M1U
|
||||
: (HOST_WIDE_INT_1U << len) - 1,
|
||||
0);
|
||||
|
||||
|
@ -4565,7 +4565,7 @@ cse_insn (rtx_insn *insn)
|
||||
else
|
||||
shift = INTVAL (pos);
|
||||
if (INTVAL (width) == HOST_BITS_PER_WIDE_INT)
|
||||
mask = ~(HOST_WIDE_INT) 0;
|
||||
mask = HOST_WIDE_INT_M1;
|
||||
else
|
||||
mask = (HOST_WIDE_INT_1 << INTVAL (width)) - 1;
|
||||
val = (val >> shift) & mask;
|
||||
@ -5233,7 +5233,7 @@ cse_insn (rtx_insn *insn)
|
||||
else
|
||||
shift = INTVAL (pos);
|
||||
if (INTVAL (width) == HOST_BITS_PER_WIDE_INT)
|
||||
mask = ~(HOST_WIDE_INT) 0;
|
||||
mask = HOST_WIDE_INT_M1;
|
||||
else
|
||||
mask = (HOST_WIDE_INT_1 << INTVAL (width)) - 1;
|
||||
val &= ~(mask << shift);
|
||||
|
@ -365,7 +365,7 @@ double_int::operator ^ (double_int b) const
|
||||
|
||||
void dump_double_int (FILE *, double_int, bool);
|
||||
|
||||
#define ALL_ONES (~((unsigned HOST_WIDE_INT) 0))
|
||||
#define ALL_ONES HOST_WIDE_INT_M1U
|
||||
|
||||
/* The operands of the following comparison functions must be processed
|
||||
with double_int_ext, if their precision is less than
|
||||
|
@ -288,7 +288,7 @@ struct store_info
|
||||
static unsigned HOST_WIDE_INT
|
||||
lowpart_bitmask (int n)
|
||||
{
|
||||
unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT) 0;
|
||||
unsigned HOST_WIDE_INT mask = HOST_WIDE_INT_M1U;
|
||||
return mask >> (HOST_BITS_PER_WIDE_INT - n);
|
||||
}
|
||||
|
||||
|
@ -97,7 +97,7 @@ dw2_asm_output_data (int size, unsigned HOST_WIDE_INT value,
|
||||
va_start (ap, comment);
|
||||
|
||||
if (size * 8 < HOST_BITS_PER_WIDE_INT)
|
||||
value &= ~(~(unsigned HOST_WIDE_INT) 0 << (size * 8));
|
||||
value &= ~(HOST_WIDE_INT_M1U << (size * 8));
|
||||
|
||||
if (op)
|
||||
{
|
||||
|
@ -3513,7 +3513,7 @@ invert_mod2n (unsigned HOST_WIDE_INT x, int n)
|
||||
int nbit = 3;
|
||||
|
||||
mask = (n == HOST_BITS_PER_WIDE_INT
|
||||
? ~(unsigned HOST_WIDE_INT) 0
|
||||
? HOST_WIDE_INT_M1U
|
||||
: (HOST_WIDE_INT_1U << n) - 1);
|
||||
|
||||
while (nbit < n)
|
||||
@ -4423,7 +4423,7 @@ expand_divmod (int rem_flag, enum tree_code code, machine_mode mode,
|
||||
|| size - 1 >= BITS_PER_WORD)
|
||||
goto fail1;
|
||||
|
||||
ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
|
||||
ml |= HOST_WIDE_INT_M1U << (size - 1);
|
||||
mlr = gen_int_mode (ml, compute_mode);
|
||||
extra_cost = (shift_cost (speed, compute_mode, post_shift)
|
||||
+ shift_cost (speed, compute_mode, size - 1)
|
||||
|
@ -1409,7 +1409,7 @@ emit_mode_mask (void)
|
||||
puts ("\
|
||||
#define MODE_MASK(m) \\\n\
|
||||
((m) >= HOST_BITS_PER_WIDE_INT) \\\n\
|
||||
? ~(unsigned HOST_WIDE_INT) 0 \\\n\
|
||||
? HOST_WIDE_INT_M1U \\\n\
|
||||
: (HOST_WIDE_INT_1U << (m)) - 1\n");
|
||||
|
||||
for_all_modes (c, m)
|
||||
|
@ -1487,7 +1487,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
|
||||
is all ones. */
|
||||
}
|
||||
}
|
||||
zerobits = ~(unsigned HOST_WIDE_INT) 0;
|
||||
zerobits = HOST_WIDE_INT_M1U;
|
||||
if (shiftc < prec)
|
||||
{
|
||||
zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
|
||||
@ -1522,7 +1522,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
|
||||
break;
|
||||
}
|
||||
(if (prec < HOST_BITS_PER_WIDE_INT
|
||||
|| newmask == ~(unsigned HOST_WIDE_INT) 0)
|
||||
|| newmask == HOST_WIDE_INT_M1U)
|
||||
(with
|
||||
{ tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
|
||||
(if (!tree_int_cst_equal (newmaskt, @2))
|
||||
|
@ -711,7 +711,7 @@ atoll (const char *p)
|
||||
if (new_wide < tmp_wide)
|
||||
{
|
||||
/* Return INT_MAX equiv on overflow. */
|
||||
tmp_wide = (~(unsigned HOST_WIDE_INT) 0) >> 1;
|
||||
tmp_wide = HOST_WIDE_INT_M1U >> 1;
|
||||
break;
|
||||
}
|
||||
tmp_wide = new_wide;
|
||||
|
@ -4217,7 +4217,7 @@ get_address_cost (bool symbol_present, bool var_present,
|
||||
}
|
||||
|
||||
bits = GET_MODE_BITSIZE (address_mode);
|
||||
mask = ~(~(unsigned HOST_WIDE_INT) 0 << (bits - 1) << 1);
|
||||
mask = ~(HOST_WIDE_INT_M1U << (bits - 1) << 1);
|
||||
offset &= mask;
|
||||
if ((offset >> (bits - 1) & 1))
|
||||
offset |= ~mask;
|
||||
|
@ -233,7 +233,7 @@ struct mem_ref_group
|
||||
|
||||
/* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
|
||||
|
||||
#define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
|
||||
#define PREFETCH_ALL HOST_WIDE_INT_M1U
|
||||
|
||||
/* Do not generate a prefetch if the unroll factor is significantly less
|
||||
than what is required by the prefetch. This is to avoid redundant
|
||||
|
@ -575,7 +575,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0,
|
||||
if (ml >= HOST_WIDE_INT_1U << (prec - 1))
|
||||
{
|
||||
this_mode = 4 + (d < 0);
|
||||
ml |= (~(unsigned HOST_WIDE_INT) 0) << (prec - 1);
|
||||
ml |= HOST_WIDE_INT_M1U << (prec - 1);
|
||||
}
|
||||
else
|
||||
this_mode = 2 + (d < 0);
|
||||
|
@ -2861,7 +2861,7 @@ vect_recog_divmod_pattern (vec<gimple *> *stmts,
|
||||
if (ml >= HOST_WIDE_INT_1U << (prec - 1))
|
||||
{
|
||||
add = true;
|
||||
ml |= (~(unsigned HOST_WIDE_INT) 0) << (prec - 1);
|
||||
ml |= HOST_WIDE_INT_M1U << (prec - 1);
|
||||
}
|
||||
if (post_shift >= prec)
|
||||
return NULL;
|
||||
|
@ -11338,9 +11338,9 @@ int_cst_value (const_tree x)
|
||||
{
|
||||
bool negative = ((val >> (bits - 1)) & 1) != 0;
|
||||
if (negative)
|
||||
val |= (~(unsigned HOST_WIDE_INT) 0) << (bits - 1) << 1;
|
||||
val |= HOST_WIDE_INT_M1U << (bits - 1) << 1;
|
||||
else
|
||||
val &= ~((~(unsigned HOST_WIDE_INT) 0) << (bits - 1) << 1);
|
||||
val &= ~(HOST_WIDE_INT_M1U << (bits - 1) << 1);
|
||||
}
|
||||
|
||||
return val;
|
||||
|
Loading…
Reference in New Issue
Block a user