re PR tree-optimization/37716 (ice for legal C++ code with -O2 on 20080926)

PR tree-optimization/37716
	* tree-sra.c (sra_build_assignment): For scalar bitfield SRC construct
	all the needed operations as trees and gimplify_assign it to dst.

	* g++.dg/torture/pr37716.C: New test.

From-SVN: r142392
This commit is contained in:
Jakub Jelinek 2008-12-03 13:27:48 +01:00 committed by Jakub Jelinek
parent d5fdc62c3f
commit ff1fe45737
4 changed files with 90 additions and 61 deletions

View File

@ -1,5 +1,9 @@
2008-12-03 Jakub Jelinek <jakub@redhat.com>
PR tree-optimization/37716
* tree-sra.c (sra_build_assignment): For scalar bitfield SRC construct
all the needed operations as trees and gimplify_assign it to dst.
PR target/37610
* configure.ac (gcc_cv_readelf): Look for readelf.
(gcc_cv_as_cfi_advance_working): Check for working

View File

@ -1,3 +1,8 @@
2008-12-03 Jakub Jelinek <jakub@redhat.com>
PR tree-optimization/37716
* g++.dg/torture/pr37716.C: New test.
2008-12-03 Eric Botcazou <ebotcazou@adacore.com>
* gcc.dg/torture/pr37868.c: Skip on the SPARC.

View File

@ -0,0 +1,56 @@
// PR tree-optimization/37716
// { dg-do compile }
struct A
{
struct B
{
int a, b, c, d;
void *e[1];
};
B *d;
inline void **f1 (int i) const
{
return d->e + d->c + i;
}
};
template <typename T>
struct C
{
struct D
{
void *v;
inline T & f3 ()
{
return *reinterpret_cast <T *> (this);
}
};
union
{
A p;
A::B *d;
};
T & operator[](int i)
{
if (d->a != 1)
f2 ();
return reinterpret_cast <D *> (p.f1 (i))->f3 ();
}
void f2 ();
void f3 (int i, const T & t);
};
class E
{
int e, f;
};
C <E> c;
void
foo (int x)
{
E e = c[x];
c.f3 (x, e);
}

View File

@ -2158,9 +2158,10 @@ sra_build_assignment (tree dst, tree src)
if (scalar_bitfield_p (src))
{
tree var, shift, width;
tree utype, stype, stmp, utmp, dtmp;
tree utype, stype;
bool unsignedp = (INTEGRAL_TYPE_P (TREE_TYPE (src))
? TYPE_UNSIGNED (TREE_TYPE (src)) : true);
struct gimplify_ctx gctx;
var = TREE_OPERAND (src, 0);
width = TREE_OPERAND (src, 1);
@ -2191,28 +2192,15 @@ sra_build_assignment (tree dst, tree src)
else if (!TYPE_UNSIGNED (utype))
utype = unsigned_type_for (utype);
stmp = make_rename_temp (stype, "SR");
/* Convert the base var of the BIT_FIELD_REF to the scalar type
we use for computation if we cannot use it directly. */
if (!useless_type_conversion_p (stype, TREE_TYPE (var)))
{
if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
stmt = gimple_build_assign (stmp, fold_convert (stype, var));
else
stmt = gimple_build_assign (stmp, fold_build1 (VIEW_CONVERT_EXPR,
stype, var));
gimple_seq_add_stmt (&seq, stmt);
var = stmp;
}
if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
var = fold_convert (stype, var);
else
var = fold_build1 (VIEW_CONVERT_EXPR, stype, var);
if (!integer_zerop (shift))
{
stmt = gimple_build_assign (stmp, fold_build2 (RSHIFT_EXPR, stype,
var, shift));
gimple_seq_add_stmt (&seq, stmt);
var = stmp;
}
var = fold_build2 (RSHIFT_EXPR, stype, var, shift);
/* If we need a masking operation, produce one. */
if (TREE_INT_CST_LOW (width) == TYPE_PRECISION (stype))
@ -2222,24 +2210,11 @@ sra_build_assignment (tree dst, tree src)
tree one = build_int_cst_wide (stype, 1, 0);
tree mask = int_const_binop (LSHIFT_EXPR, one, width, 0);
mask = int_const_binop (MINUS_EXPR, mask, one, 0);
stmt = gimple_build_assign (stmp, fold_build2 (BIT_AND_EXPR, stype,
var, mask));
gimple_seq_add_stmt (&seq, stmt);
var = stmp;
var = fold_build2 (BIT_AND_EXPR, stype, var, mask);
}
/* After shifting and masking, convert to the target type. */
utmp = stmp;
if (!useless_type_conversion_p (utype, stype))
{
utmp = make_rename_temp (utype, "SR");
stmt = gimple_build_assign (utmp, fold_convert (utype, var));
gimple_seq_add_stmt (&seq, stmt);
var = utmp;
}
var = fold_convert (utype, var);
/* Perform sign extension, if required.
??? This should never be necessary. */
@ -2250,40 +2225,29 @@ sra_build_assignment (tree dst, tree src)
size_binop (MINUS_EXPR, width,
bitsize_int (1)), 0);
stmt = gimple_build_assign (utmp, fold_build2 (BIT_XOR_EXPR, utype,
var, signbit));
gimple_seq_add_stmt (&seq, stmt);
stmt = gimple_build_assign (utmp, fold_build2 (MINUS_EXPR, utype,
utmp, signbit));
gimple_seq_add_stmt (&seq, stmt);
var = utmp;
var = fold_build2 (BIT_XOR_EXPR, utype, var, signbit);
var = fold_build2 (MINUS_EXPR, utype, var, signbit);
}
/* fold_build3 (BIT_FIELD_REF, ...) sometimes returns a cast. */
STRIP_NOPS (dst);
/* Finally, move and convert to the destination. */
if (!useless_type_conversion_p (TREE_TYPE (dst), TREE_TYPE (var)))
{
if (INTEGRAL_TYPE_P (TREE_TYPE (dst)))
var = fold_convert (TREE_TYPE (dst), var);
else
var = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (dst), var);
if (INTEGRAL_TYPE_P (TREE_TYPE (dst)))
var = fold_convert (TREE_TYPE (dst), var);
else
var = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (dst), var);
/* If the destination is not a register the conversion needs
to be a separate statement. */
if (!is_gimple_reg (dst))
{
dtmp = make_rename_temp (TREE_TYPE (dst), "SR");
stmt = gimple_build_assign (dtmp, var);
gimple_seq_add_stmt (&seq, stmt);
var = dtmp;
}
}
stmt = gimple_build_assign (dst, var);
gimple_seq_add_stmt (&seq, stmt);
push_gimplify_context (&gctx);
gctx.into_ssa = true;
gctx.allow_rhs_cond_expr = true;
gimplify_assign (dst, var, &seq);
if (gimple_referenced_vars (cfun))
for (var = gctx.temps; var; var = TREE_CHAIN (var))
add_referenced_var (var);
pop_gimplify_context (NULL);
return seq;
}