poly_int: symbolic_number

This patch changes symbol_number::bytepos from a HOST_WIDE_INT
to a poly_int64.  perform_symbolic_merge can cope with symbolic
offsets as long as the difference between the two offsets is
constant.  (This could happen for a constant-sized field that
occurs at a variable offset, for example.)

2017-12-20  Richard Sandiford  <richard.sandiford@linaro.org>
	    Alan Hayward  <alan.hayward@arm.com>
	    David Sherwood  <david.sherwood@arm.com>

gcc/
	* gimple-ssa-store-merging.c (symbolic_number::bytepos): Change from
	HOST_WIDE_INT to poly_int64_pod.
	(perform_symbolic_merge): Update accordingly.
	(bswap_replace): Likewise.

Co-Authored-By: Alan Hayward <alan.hayward@arm.com>
Co-Authored-By: David Sherwood <david.sherwood@arm.com>

From-SVN: r255889
This commit is contained in:
Richard Sandiford 2017-12-20 12:55:57 +00:00 committed by Richard Sandiford
parent cc8bea0916
commit 4a022c701b
2 changed files with 22 additions and 9 deletions

View File

@ -1,3 +1,12 @@
2017-12-20 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
* gimple-ssa-store-merging.c (symbolic_number::bytepos): Change from
HOST_WIDE_INT to poly_int64_pod.
(perform_symbolic_merge): Update accordingly.
(bswap_replace): Likewise.
2017-12-20 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>

View File

@ -216,7 +216,7 @@ struct symbolic_number {
tree type;
tree base_addr;
tree offset;
HOST_WIDE_INT bytepos;
poly_int64_pod bytepos;
tree src;
tree alias_set;
tree vuse;
@ -452,7 +452,7 @@ perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
if (rhs1 != rhs2)
{
uint64_t inc;
HOST_WIDE_INT start_sub, end_sub, end1, end2, end;
HOST_WIDE_INT start1, start2, start_sub, end_sub, end1, end2, end;
struct symbolic_number *toinc_n_ptr, *n_end;
basic_block bb1, bb2;
@ -464,15 +464,19 @@ perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
|| (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
return NULL;
if (n1->bytepos < n2->bytepos)
start1 = 0;
if (!(n2->bytepos - n1->bytepos).is_constant (&start2))
return NULL;
if (start1 < start2)
{
n_start = n1;
start_sub = n2->bytepos - n1->bytepos;
start_sub = start2 - start1;
}
else
{
n_start = n2;
start_sub = n1->bytepos - n2->bytepos;
start_sub = start1 - start2;
}
bb1 = gimple_bb (source_stmt1);
@ -484,8 +488,8 @@ perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
/* Find the highest address at which a load is performed and
compute related info. */
end1 = n1->bytepos + (n1->range - 1);
end2 = n2->bytepos + (n2->range - 1);
end1 = start1 + (n1->range - 1);
end2 = start2 + (n2->range - 1);
if (end1 < end2)
{
end = end2;
@ -504,7 +508,7 @@ perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
else
toinc_n_ptr = (n_start == n1) ? n2 : n1;
n->range = end - n_start->bytepos + 1;
n->range = end - MIN (start1, start2) + 1;
/* Check that the range of memory covered can be represented by
a symbolic number. */
@ -933,7 +937,7 @@ bswap_replace (gimple_stmt_iterator gsi, gimple *ins_stmt, tree fndecl,
tree load_offset_ptr, aligned_load_type;
gimple *load_stmt;
unsigned align = get_object_alignment (src);
HOST_WIDE_INT load_offset = 0;
poly_int64 load_offset = 0;
if (cur_stmt)
{