poly_int: GET_MODE_BITSIZE

This patch changes GET_MODE_BITSIZE from an unsigned short
to a poly_uint16.

2018-01-03  Richard Sandiford  <richard.sandiford@linaro.org>
	    Alan Hayward  <alan.hayward@arm.com>
	    David Sherwood  <david.sherwood@arm.com>

gcc/
	* machmode.h (mode_to_bits): Return a poly_uint16 rather than an
	unsigned short.
	(GET_MODE_BITSIZE): Return a constant if ONLY_FIXED_SIZE_MODES,
	or if measurement_type is polynomial.
	* calls.c (shift_return_value): Treat GET_MODE_BITSIZE as polynomial.
	* combine.c (make_extraction): Likewise.
	* dse.c (find_shift_sequence): Likewise.
	* dwarf2out.c (mem_loc_descriptor): Likewise.
	* expmed.c (store_integral_bit_field, extract_bit_field_1): Likewise.
	(extract_bit_field, extract_low_bits): Likewise.
	* expr.c (convert_move, convert_modes, emit_move_insn_1): Likewise.
	(optimize_bitfield_assignment_op, expand_assignment): Likewise.
	(store_expr_with_bounds, store_field, expand_expr_real_1): Likewise.
	* fold-const.c (optimize_bit_field_compare, merge_ranges): Likewise.
	* gimple-fold.c (optimize_atomic_compare_exchange_p): Likewise.
	* reload.c (find_reloads): Likewise.
	* reload1.c (alter_reg): Likewise.
	* stor-layout.c (bitwise_mode_for_mode, compute_record_mode): Likewise.
	* targhooks.c (default_secondary_memory_needed_mode): Likewise.
	* tree-if-conv.c (predicate_mem_writes): Likewise.
	* tree-ssa-strlen.c (handle_builtin_memcmp): Likewise.
	* tree-vect-patterns.c (adjust_bool_pattern): Likewise.
	* tree-vect-stmts.c (vectorizable_simd_clone_call): Likewise.
	* valtrack.c (dead_debug_insert_temp): Likewise.
	* varasm.c (mergeable_constant_section): Likewise.
	* config/sh/sh.h (LOCAL_ALIGNMENT): Use as_a <fixed_size_mode>.

gcc/ada/
	* gcc-interface/misc.c (enumerate_modes): Treat GET_MODE_BITSIZE
	as polynomial.

gcc/c-family/
	* c-ubsan.c (ubsan_instrument_shift): Treat GET_MODE_BITSIZE
	as polynomial.

Co-Authored-By: Alan Hayward <alan.hayward@arm.com>
Co-Authored-By: David Sherwood <david.sherwood@arm.com>

From-SVN: r256200
This commit is contained in:
Richard Sandiford 2018-01-03 21:42:42 +00:00 committed by Richard Sandiford
parent 79c3f1b3c7
commit 73a699ae37
25 changed files with 159 additions and 65 deletions

View File

@ -1,3 +1,34 @@
2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
* machmode.h (mode_to_bits): Return a poly_uint16 rather than an
unsigned short.
(GET_MODE_BITSIZE): Return a constant if ONLY_FIXED_SIZE_MODES,
or if measurement_type is polynomial.
* calls.c (shift_return_value): Treat GET_MODE_BITSIZE as polynomial.
* combine.c (make_extraction): Likewise.
* dse.c (find_shift_sequence): Likewise.
* dwarf2out.c (mem_loc_descriptor): Likewise.
* expmed.c (store_integral_bit_field, extract_bit_field_1): Likewise.
(extract_bit_field, extract_low_bits): Likewise.
* expr.c (convert_move, convert_modes, emit_move_insn_1): Likewise.
(optimize_bitfield_assignment_op, expand_assignment): Likewise.
(store_expr_with_bounds, store_field, expand_expr_real_1): Likewise.
* fold-const.c (optimize_bit_field_compare, merge_ranges): Likewise.
* gimple-fold.c (optimize_atomic_compare_exchange_p): Likewise.
* reload.c (find_reloads): Likewise.
* reload1.c (alter_reg): Likewise.
* stor-layout.c (bitwise_mode_for_mode, compute_record_mode): Likewise.
* targhooks.c (default_secondary_memory_needed_mode): Likewise.
* tree-if-conv.c (predicate_mem_writes): Likewise.
* tree-ssa-strlen.c (handle_builtin_memcmp): Likewise.
* tree-vect-patterns.c (adjust_bool_pattern): Likewise.
* tree-vect-stmts.c (vectorizable_simd_clone_call): Likewise.
* valtrack.c (dead_debug_insert_temp): Likewise.
* varasm.c (mergeable_constant_section): Likewise.
* config/sh/sh.h (LOCAL_ALIGNMENT): Use as_a <fixed_size_mode>.
2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>

View File

@ -1,3 +1,10 @@
2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
* gcc-interface/misc.c (enumerate_modes): Treat GET_MODE_BITSIZE
as polynomial.
2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>

View File

@ -1301,14 +1301,14 @@ enumerate_modes (void (*f) (const char *, int, int, int, int, int, int, int))
}
/* If no predefined C types were found, register the mode itself. */
int nunits, precision;
int nunits, precision, bitsize;
if (!skip_p
&& GET_MODE_NUNITS (i).is_constant (&nunits)
&& GET_MODE_PRECISION (i).is_constant (&precision))
&& GET_MODE_PRECISION (i).is_constant (&precision)
&& GET_MODE_BITSIZE (i).is_constant (&bitsize))
f (GET_MODE_NAME (i), digs, complex_p,
vector_p ? nunits : 0, float_rep,
precision, GET_MODE_BITSIZE (i),
GET_MODE_ALIGNMENT (i));
precision, bitsize, GET_MODE_ALIGNMENT (i));
}
}

View File

@ -1,3 +1,10 @@
2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
* c-ubsan.c (ubsan_instrument_shift): Treat GET_MODE_BITSIZE
as polynomial.
2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>

View File

@ -132,7 +132,8 @@ ubsan_instrument_shift (location_t loc, enum tree_code code,
/* If this is not a signed operation, don't perform overflow checks.
Also punt on bit-fields. */
if (TYPE_OVERFLOW_WRAPS (type0)
|| GET_MODE_BITSIZE (TYPE_MODE (type0)) != TYPE_PRECISION (type0)
|| maybe_ne (GET_MODE_BITSIZE (TYPE_MODE (type0)),
TYPE_PRECISION (type0))
|| !sanitize_flags_p (SANITIZE_SHIFT_BASE))
;

View File

@ -3024,12 +3024,11 @@ check_sibcall_argument_overlap (rtx_insn *insn, struct arg_data *arg,
bool
shift_return_value (machine_mode mode, bool left_p, rtx value)
{
HOST_WIDE_INT shift;
gcc_assert (REG_P (value) && HARD_REGISTER_P (value));
machine_mode value_mode = GET_MODE (value);
shift = GET_MODE_BITSIZE (value_mode) - GET_MODE_BITSIZE (mode);
if (shift == 0)
poly_int64 shift = GET_MODE_BITSIZE (value_mode) - GET_MODE_BITSIZE (mode);
if (known_eq (shift, 0))
return false;
/* Use ashr rather than lshr for right shifts. This is for the benefit

View File

@ -7707,8 +7707,9 @@ make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
are the same as for a register operation, since at present we don't
have named patterns for aligned memory structures. */
struct extraction_insn insn;
if (get_best_reg_extraction_insn (&insn, pattern,
GET_MODE_BITSIZE (inner_mode), mode))
unsigned int inner_size;
if (GET_MODE_BITSIZE (inner_mode).is_constant (&inner_size)
&& get_best_reg_extraction_insn (&insn, pattern, inner_size, mode))
{
wanted_inner_reg_mode = insn.struct_mode.require ();
pos_mode = insn.pos_mode;
@ -7744,9 +7745,11 @@ make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
If it's a MEM we need to recompute POS relative to that.
However, if we're extracting from (or inserting into) a register,
we want to recompute POS relative to wanted_inner_mode. */
int width = (MEM_P (inner)
? GET_MODE_BITSIZE (is_mode)
: GET_MODE_BITSIZE (wanted_inner_mode));
int width;
if (!MEM_P (inner))
width = GET_MODE_BITSIZE (wanted_inner_mode);
else if (!GET_MODE_BITSIZE (is_mode).is_constant (&width))
return NULL_RTX;
if (pos_rtx == 0)
pos = width - len - pos;

View File

@ -468,7 +468,9 @@ extern const sh_atomic_model& selected_atomic_model (void);
#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
((GET_MODE_CLASS (TYPE_MODE (TYPE)) == MODE_COMPLEX_INT \
|| GET_MODE_CLASS (TYPE_MODE (TYPE)) == MODE_COMPLEX_FLOAT) \
? (unsigned) MIN (BIGGEST_ALIGNMENT, GET_MODE_BITSIZE (TYPE_MODE (TYPE))) \
? (unsigned) MIN (BIGGEST_ALIGNMENT, \
GET_MODE_BITSIZE (as_a <fixed_size_mode> \
(TYPE_MODE (TYPE)))) \
: (unsigned) DATA_ALIGNMENT(TYPE, ALIGN))
/* Make arrays of chars word-aligned for the same reasons. */

View File

@ -1734,7 +1734,7 @@ find_shift_sequence (poly_int64 access_size,
/* Try a wider mode if truncating the store mode to NEW_MODE
requires a real instruction. */
if (GET_MODE_BITSIZE (new_mode) < GET_MODE_BITSIZE (store_mode)
if (maybe_lt (GET_MODE_SIZE (new_mode), GET_MODE_SIZE (store_mode))
&& !TRULY_NOOP_TRUNCATION_MODES_P (new_mode, store_mode))
continue;

View File

@ -15395,7 +15395,8 @@ mem_loc_descriptor (rtx rtl, machine_mode mode,
We output CONST_DOUBLEs as blocks. */
if (mode == VOIDmode
|| (GET_MODE (rtl) == VOIDmode
&& GET_MODE_BITSIZE (mode) != HOST_BITS_PER_DOUBLE_INT))
&& maybe_ne (GET_MODE_BITSIZE (mode),
HOST_BITS_PER_DOUBLE_INT)))
break;
type_die = base_type_for_mode (mode, SCALAR_INT_MODE_P (mode));
if (type_die == NULL)

View File

@ -867,7 +867,7 @@ store_integral_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
if (!MEM_P (op0)
&& !reverse
&& lowpart_bit_field_p (bitnum, bitsize, op0_mode.require ())
&& bitsize == GET_MODE_BITSIZE (fieldmode)
&& known_eq (bitsize, GET_MODE_BITSIZE (fieldmode))
&& optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
{
struct expand_operand ops[2];
@ -1638,9 +1638,10 @@ extract_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
if (GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode))
{
scalar_mode inner_mode = GET_MODE_INNER (tmode);
unsigned int nunits = (GET_MODE_BITSIZE (GET_MODE (op0))
/ GET_MODE_UNIT_BITSIZE (tmode));
if (!mode_for_vector (inner_mode, nunits).exists (&new_mode)
poly_uint64 nunits;
if (!multiple_p (GET_MODE_BITSIZE (GET_MODE (op0)),
GET_MODE_UNIT_BITSIZE (tmode), &nunits)
|| !mode_for_vector (inner_mode, nunits).exists (&new_mode)
|| !VECTOR_MODE_P (new_mode)
|| GET_MODE_SIZE (new_mode) != GET_MODE_SIZE (GET_MODE (op0))
|| GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode)
@ -2043,9 +2044,9 @@ extract_bit_field (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
machine_mode mode1;
/* Handle -fstrict-volatile-bitfields in the cases where it applies. */
if (GET_MODE_BITSIZE (GET_MODE (str_rtx)) > 0)
if (maybe_ne (GET_MODE_BITSIZE (GET_MODE (str_rtx)), 0))
mode1 = GET_MODE (str_rtx);
else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
else if (target && maybe_ne (GET_MODE_BITSIZE (GET_MODE (target)), 0))
mode1 = GET_MODE (target);
else
mode1 = tmode;
@ -2361,7 +2362,7 @@ extract_low_bits (machine_mode mode, machine_mode src_mode, rtx src)
if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
return NULL_RTX;
if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (src_mode)
if (known_eq (GET_MODE_BITSIZE (mode), GET_MODE_BITSIZE (src_mode))
&& targetm.modes_tieable_p (mode, src_mode))
{
rtx x = gen_lowpart_common (mode, src);

View File

@ -246,7 +246,8 @@ convert_move (rtx to, rtx from, int unsignedp)
if (VECTOR_MODE_P (to_mode) || VECTOR_MODE_P (from_mode))
{
gcc_assert (GET_MODE_BITSIZE (from_mode) == GET_MODE_BITSIZE (to_mode));
gcc_assert (known_eq (GET_MODE_BITSIZE (from_mode),
GET_MODE_BITSIZE (to_mode)));
if (VECTOR_MODE_P (to_mode))
from = simplify_gen_subreg (to_mode, from, GET_MODE (from), 0);
@ -699,7 +700,8 @@ convert_modes (machine_mode mode, machine_mode oldmode, rtx x, int unsignedp)
subreg operation. */
if (VECTOR_MODE_P (mode) && GET_MODE (x) == VOIDmode)
{
gcc_assert (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (oldmode));
gcc_assert (known_eq (GET_MODE_BITSIZE (mode),
GET_MODE_BITSIZE (oldmode)));
return simplify_gen_subreg (mode, x, oldmode, 0);
}
@ -3680,7 +3682,8 @@ emit_move_insn_1 (rtx x, rtx y)
only safe when simplify_subreg can convert MODE constants into integer
constants. At present, it can only do this reliably if the value
fits within a HOST_WIDE_INT. */
if (!CONSTANT_P (y) || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
if (!CONSTANT_P (y)
|| known_le (GET_MODE_BITSIZE (mode), HOST_BITS_PER_WIDE_INT))
{
rtx_insn *ret = emit_move_via_integer (mode, x, y, lra_in_progress);
@ -4642,8 +4645,9 @@ optimize_bitfield_assignment_op (poly_uint64 pbitsize,
machine_mode mode1, rtx str_rtx,
tree to, tree src, bool reverse)
{
/* str_mode is not guaranteed to be a scalar type. */
machine_mode str_mode = GET_MODE (str_rtx);
unsigned int str_bitsize = GET_MODE_BITSIZE (str_mode);
unsigned int str_bitsize;
tree op0, op1;
rtx value, result;
optab binop;
@ -4657,6 +4661,7 @@ optimize_bitfield_assignment_op (poly_uint64 pbitsize,
|| !pbitregion_start.is_constant (&bitregion_start)
|| !pbitregion_end.is_constant (&bitregion_end)
|| bitsize >= BITS_PER_WORD
|| !GET_MODE_BITSIZE (str_mode).is_constant (&str_bitsize)
|| str_bitsize > BITS_PER_WORD
|| TREE_SIDE_EFFECTS (to)
|| TREE_THIS_VOLATILE (to))
@ -5208,7 +5213,7 @@ expand_assignment (tree to, tree from, bool nontemporal)
else
{
concat_store_slow:;
rtx temp = assign_stack_temp (GET_MODE (to_rtx),
rtx temp = assign_stack_temp (to_mode,
GET_MODE_SIZE (GET_MODE (to_rtx)));
write_complex_part (temp, XEXP (to_rtx, 0), false);
write_complex_part (temp, XEXP (to_rtx, 1), true);
@ -5651,8 +5656,8 @@ store_expr_with_bounds (tree exp, rtx target, int call_param_p,
{
if (GET_MODE_CLASS (GET_MODE (target))
!= GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (exp)))
&& GET_MODE_BITSIZE (GET_MODE (target))
== GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (exp))))
&& known_eq (GET_MODE_BITSIZE (GET_MODE (target)),
GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (exp)))))
{
rtx t = simplify_gen_subreg (GET_MODE (target), temp,
TYPE_MODE (TREE_TYPE (exp)), 0);
@ -6955,7 +6960,8 @@ store_field (rtx target, poly_int64 bitsize, poly_int64 bitpos,
{
tree type = TREE_TYPE (exp);
if (INTEGRAL_TYPE_P (type)
&& TYPE_PRECISION (type) < GET_MODE_BITSIZE (TYPE_MODE (type))
&& maybe_ne (TYPE_PRECISION (type),
GET_MODE_BITSIZE (TYPE_MODE (type)))
&& known_eq (bitsize, TYPE_PRECISION (type)))
{
tree op = gimple_assign_rhs1 (nop_def);
@ -10286,8 +10292,8 @@ expand_expr_real_1 (tree exp, rtx target, machine_mode tmode,
if (known_eq (offset, 0)
&& !reverse
&& tree_fits_uhwi_p (TYPE_SIZE (type))
&& (GET_MODE_BITSIZE (DECL_MODE (base))
== tree_to_uhwi (TYPE_SIZE (type))))
&& known_eq (GET_MODE_BITSIZE (DECL_MODE (base)),
tree_to_uhwi (TYPE_SIZE (type))))
return expand_expr (build1 (VIEW_CONVERT_EXPR, type, base),
target, tmode, modifier);
if (TYPE_MODE (type) == BLKmode)

View File

@ -4063,7 +4063,7 @@ optimize_bit_field_compare (location_t loc, enum tree_code code,
|| !known_size_p (plbitsize)
|| !plbitsize.is_constant (&lbitsize)
|| !plbitpos.is_constant (&lbitpos)
|| lbitsize == GET_MODE_BITSIZE (lmode)
|| known_eq (lbitsize, GET_MODE_BITSIZE (lmode))
|| offset != 0
|| TREE_CODE (linner) == PLACEHOLDER_EXPR
|| lvolatilep)
@ -5190,8 +5190,9 @@ merge_ranges (int *pin_p, tree *plow, tree *phigh, int in0_p, tree low0,
switch (TREE_CODE (TREE_TYPE (low0)))
{
case ENUMERAL_TYPE:
if (TYPE_PRECISION (TREE_TYPE (low0))
!= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (low0))))
if (maybe_ne (TYPE_PRECISION (TREE_TYPE (low0)),
GET_MODE_BITSIZE
(TYPE_MODE (TREE_TYPE (low0)))))
break;
/* FALLTHROUGH */
case INTEGER_TYPE:
@ -5213,8 +5214,9 @@ merge_ranges (int *pin_p, tree *plow, tree *phigh, int in0_p, tree low0,
switch (TREE_CODE (TREE_TYPE (high1)))
{
case ENUMERAL_TYPE:
if (TYPE_PRECISION (TREE_TYPE (high1))
!= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (high1))))
if (maybe_ne (TYPE_PRECISION (TREE_TYPE (high1)),
GET_MODE_BITSIZE
(TYPE_MODE (TREE_TYPE (high1)))))
break;
/* FALLTHROUGH */
case INTEGER_TYPE:

View File

@ -3786,7 +3786,8 @@ optimize_atomic_compare_exchange_p (gimple *stmt)
/* Don't optimize floating point expected vars, VIEW_CONVERT_EXPRs
might not preserve all the bits. See PR71716. */
|| SCALAR_FLOAT_TYPE_P (etype)
|| TYPE_PRECISION (etype) != GET_MODE_BITSIZE (TYPE_MODE (etype)))
|| maybe_ne (TYPE_PRECISION (etype),
GET_MODE_BITSIZE (TYPE_MODE (etype))))
return false;
tree weak = gimple_call_arg (stmt, 3);

View File

@ -527,7 +527,7 @@ mode_to_bytes (machine_mode mode)
/* Return the base GET_MODE_BITSIZE value for MODE. */
ALWAYS_INLINE unsigned short
ALWAYS_INLINE poly_uint16
mode_to_bits (machine_mode mode)
{
return mode_to_bytes (mode) * BITS_PER_UNIT;
@ -600,7 +600,29 @@ mode_to_nunits (machine_mode mode)
/* Get the size in bits of an object of mode MODE. */
#define GET_MODE_BITSIZE(MODE) (mode_to_bits (MODE))
#if ONLY_FIXED_SIZE_MODES
#define GET_MODE_BITSIZE(MODE) ((unsigned short) mode_to_bits (MODE).coeffs[0])
#else
ALWAYS_INLINE poly_uint16
GET_MODE_BITSIZE (machine_mode mode)
{
return mode_to_bits (mode);
}
template<typename T>
ALWAYS_INLINE typename if_poly<typename T::measurement_type>::type
GET_MODE_BITSIZE (const T &mode)
{
return mode_to_bits (mode);
}
template<typename T>
ALWAYS_INLINE typename if_nonpoly<typename T::measurement_type>::type
GET_MODE_BITSIZE (const T &mode)
{
return mode_to_bits (mode).coeffs[0];
}
#endif
/* Get the number of value bits of an object of mode MODE. */

View File

@ -3121,10 +3121,11 @@ find_reloads (rtx_insn *insn, int replace, int ind_levels, int live_known,
|| (REG_P (operand)
&& REGNO (operand) >= FIRST_PSEUDO_REGISTER))
&& (WORD_REGISTER_OPERATIONS
|| ((GET_MODE_BITSIZE (GET_MODE (operand))
< BIGGEST_ALIGNMENT)
&& paradoxical_subreg_p (operand_mode[i],
GET_MODE (operand)))
|| (((maybe_lt
(GET_MODE_BITSIZE (GET_MODE (operand)),
BIGGEST_ALIGNMENT))
&& (paradoxical_subreg_p
(operand_mode[i], GET_MODE (operand)))))
|| BYTES_BIG_ENDIAN
|| ((GET_MODE_SIZE (operand_mode[i])
<= UNITS_PER_WORD)

View File

@ -2146,7 +2146,11 @@ alter_reg (int i, int from_reg, bool dont_share_p)
unsigned int inherent_align = GET_MODE_ALIGNMENT (mode);
machine_mode wider_mode = wider_subreg_mode (mode, reg_max_ref_mode[i]);
poly_uint64 total_size = GET_MODE_SIZE (wider_mode);
unsigned int min_align = GET_MODE_BITSIZE (reg_max_ref_mode[i]);
/* ??? Seems strange to derive the minimum alignment from the size,
but that's the traditional behavior. For polynomial-size modes,
the natural extension is to use the minimum possible size. */
unsigned int min_align
= constant_lower_bound (GET_MODE_BITSIZE (reg_max_ref_mode[i]));
poly_int64 adjust = 0;
something_was_spilled = true;

View File

@ -411,7 +411,6 @@ opt_machine_mode
bitwise_mode_for_mode (machine_mode mode)
{
/* Quick exit if we already have a suitable mode. */
unsigned int bitsize = GET_MODE_BITSIZE (mode);
scalar_int_mode int_mode;
if (is_a <scalar_int_mode> (mode, &int_mode)
&& GET_MODE_BITSIZE (int_mode) <= MAX_FIXED_MODE_SIZE)
@ -420,6 +419,8 @@ bitwise_mode_for_mode (machine_mode mode)
/* Reuse the sanity checks from int_mode_for_mode. */
gcc_checking_assert ((int_mode_for_mode (mode), true));
poly_int64 bitsize = GET_MODE_BITSIZE (mode);
/* Try to replace complex modes with complex modes. In general we
expect both components to be processed independently, so we only
care whether there is a register for the inner mode. */
@ -434,7 +435,8 @@ bitwise_mode_for_mode (machine_mode mode)
/* Try to replace vector modes with vector modes. Also try using vector
modes if an integer mode would be too big. */
if (VECTOR_MODE_P (mode) || bitsize > MAX_FIXED_MODE_SIZE)
if (VECTOR_MODE_P (mode)
|| maybe_gt (bitsize, MAX_FIXED_MODE_SIZE))
{
machine_mode trial = mode;
if ((GET_MODE_CLASS (trial) == MODE_VECTOR_INT
@ -1772,7 +1774,7 @@ compute_record_mode (tree type)
does not apply to unions. */
if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode
&& tree_fits_uhwi_p (TYPE_SIZE (type))
&& GET_MODE_BITSIZE (mode) == tree_to_uhwi (TYPE_SIZE (type)))
&& known_eq (GET_MODE_BITSIZE (mode), tree_to_uhwi (TYPE_SIZE (type))))
;
else
mode = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1).else_blk ();

View File

@ -1179,7 +1179,7 @@ machine_mode
default_secondary_memory_needed_mode (machine_mode mode)
{
if (!targetm.lra_p ()
&& GET_MODE_BITSIZE (mode) < BITS_PER_WORD
&& known_lt (GET_MODE_BITSIZE (mode), BITS_PER_WORD)
&& INTEGRAL_MODE_P (mode))
return mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (mode), 0).require ();
return mode;

View File

@ -2229,7 +2229,10 @@ predicate_mem_writes (loop_p loop)
tree ref, addr, ptr, mask;
gcall *new_stmt;
gimple_seq stmts = NULL;
int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (lhs)));
machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
/* We checked before setting GF_PLF_2 that an equivalent
integer mode exists. */
int bitsize = GET_MODE_BITSIZE (mode).to_constant ();
ref = TREE_CODE (lhs) == SSA_NAME ? rhs : lhs;
mark_addressable (ref);
addr = force_gimple_operand_gsi (&gsi, build_fold_addr_expr (ref),

View File

@ -2690,7 +2690,7 @@ handle_builtin_memcmp (gimple_stmt_iterator *gsi)
location_t loc = gimple_location (stmt2);
tree type, off;
type = build_nonstandard_integer_type (leni, 1);
gcc_assert (GET_MODE_BITSIZE (TYPE_MODE (type)) == leni);
gcc_assert (known_eq (GET_MODE_BITSIZE (TYPE_MODE (type)), leni));
tree ptrtype = build_pointer_type_for_mode (char_type_node,
ptr_mode, true);
off = build_int_cst (ptrtype, 0);

View File

@ -3388,8 +3388,8 @@ adjust_bool_pattern (tree var, tree out_type,
gcc_assert (TREE_CODE_CLASS (rhs_code) == tcc_comparison);
if (TREE_CODE (TREE_TYPE (rhs1)) != INTEGER_TYPE
|| !TYPE_UNSIGNED (TREE_TYPE (rhs1))
|| (TYPE_PRECISION (TREE_TYPE (rhs1))
!= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (rhs1)))))
|| maybe_ne (TYPE_PRECISION (TREE_TYPE (rhs1)),
GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (rhs1)))))
{
scalar_mode mode = SCALAR_TYPE_MODE (TREE_TYPE (rhs1));
itype

View File

@ -3595,7 +3595,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
if (simd_clone_subparts (atype)
< simd_clone_subparts (arginfo[i].vectype))
{
unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
k = (simd_clone_subparts (arginfo[i].vectype)
/ simd_clone_subparts (atype));
gcc_assert ((k & (k - 1)) == 0);
@ -3759,7 +3759,8 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
if (simd_clone_subparts (vectype) < nunits)
{
unsigned int k, l;
unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
poly_uint64 bytes = GET_MODE_SIZE (TYPE_MODE (vectype));
k = nunits / simd_clone_subparts (vectype);
gcc_assert ((k & (k - 1)) == 0);
for (l = 0; l < k; l++)
@ -3769,8 +3770,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
{
t = build_fold_addr_expr (new_temp);
t = build2 (MEM_REF, vectype, t,
build_int_cst (TREE_TYPE (t),
l * prec / BITS_PER_UNIT));
build_int_cst (TREE_TYPE (t), l * bytes));
}
else
t = build3 (BIT_FIELD_REF, vectype, new_temp,

View File

@ -611,10 +611,13 @@ dead_debug_insert_temp (struct dead_debug_local *debug, unsigned int uregno,
usesp = &cur->next;
*tailp = cur->next;
cur->next = NULL;
/* "may" rather than "must" because we want (for example)
N V4SFs to win over plain V4SF even though N might be 1. */
rtx candidate = *DF_REF_REAL_LOC (cur->use);
if (!reg
|| (GET_MODE_BITSIZE (GET_MODE (reg))
< GET_MODE_BITSIZE (GET_MODE (*DF_REF_REAL_LOC (cur->use)))))
reg = *DF_REF_REAL_LOC (cur->use);
|| maybe_lt (GET_MODE_BITSIZE (GET_MODE (reg)),
GET_MODE_BITSIZE (GET_MODE (candidate))))
reg = candidate;
}
else
tailp = &(*tailp)->next;

View File

@ -843,12 +843,10 @@ mergeable_constant_section (machine_mode mode ATTRIBUTE_UNUSED,
unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED,
unsigned int flags ATTRIBUTE_UNUSED)
{
unsigned int modesize = GET_MODE_BITSIZE (mode);
if (HAVE_GAS_SHF_MERGE && flag_merge_constants
&& mode != VOIDmode
&& mode != BLKmode
&& modesize <= align
&& known_le (GET_MODE_BITSIZE (mode), align)
&& align >= 8
&& align <= 256
&& (align & (align - 1)) == 0)