re PR rtl-optimization/36419 (Wrong unwind info with -Os -fasynchronous-unwind-tables)

PR rtl-optimization/36419
	* dwarf2out.c (barrier_args_size): New variable.
	(compute_barrier_args_size, compute_barrier_args_size_1): New
	functions.
	(dwarf2out_stack_adjust): For BARRIERs call compute_barrier_args_size
	if not called yet in the current function, use barrier_args_size
	array to find the new args_size value.
	(dwarf2out_frame_debug): Free and clear barrier_args_size.

	* g++.dg/eh/async-unwind2.C: New test.

From-SVN: r138427
This commit is contained in:
Jakub Jelinek 2008-07-31 20:08:36 +02:00 committed by Jakub Jelinek
parent 41b059f3d8
commit 88e2c8107b
4 changed files with 439 additions and 7 deletions

View File

@ -1,3 +1,14 @@
2008-07-31 Jakub Jelinek <jakub@redhat.com>
PR rtl-optimization/36419
* dwarf2out.c (barrier_args_size): New variable.
(compute_barrier_args_size, compute_barrier_args_size_1): New
functions.
(dwarf2out_stack_adjust): For BARRIERs call compute_barrier_args_size
if not called yet in the current function, use barrier_args_size
array to find the new args_size value.
(dwarf2out_frame_debug): Free and clear barrier_args_size.
2008-07-31 H.J. Lu <hongjiu.lu@intel.com>
PR debug/36980

View File

@ -1156,6 +1156,162 @@ stack_adjust_offset (const_rtx pattern)
return offset;
}
/* Precomputed args_size for CODE_LABELs and BARRIERs preceeding them,
indexed by INSN_UID. */
static HOST_WIDE_INT *barrier_args_size;
/* Helper function for compute_barrier_args_size. Handle one insn. */
static HOST_WIDE_INT
compute_barrier_args_size_1 (rtx insn, HOST_WIDE_INT cur_args_size,
VEC (rtx, heap) **next)
{
HOST_WIDE_INT offset = 0;
int i;
if (! RTX_FRAME_RELATED_P (insn))
{
if (prologue_epilogue_contains (insn)
|| sibcall_epilogue_contains (insn))
/* Nothing */;
else if (GET_CODE (PATTERN (insn)) == SET)
offset = stack_adjust_offset (PATTERN (insn));
else if (GET_CODE (PATTERN (insn)) == PARALLEL
|| GET_CODE (PATTERN (insn)) == SEQUENCE)
{
/* There may be stack adjustments inside compound insns. Search
for them. */
for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i));
}
}
else
{
rtx expr = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
if (expr)
{
expr = XEXP (expr, 0);
if (GET_CODE (expr) == PARALLEL
|| GET_CODE (expr) == SEQUENCE)
for (i = 1; i < XVECLEN (expr, 0); i++)
{
rtx elem = XVECEXP (expr, 0, i);
if (GET_CODE (elem) == SET && !RTX_FRAME_RELATED_P (elem))
offset += stack_adjust_offset (elem);
}
}
}
#ifndef STACK_GROWS_DOWNWARD
offset = -offset;
#endif
cur_args_size += offset;
if (cur_args_size < 0)
cur_args_size = 0;
if (JUMP_P (insn))
{
rtx dest = JUMP_LABEL (insn);
if (dest)
{
if (barrier_args_size [INSN_UID (dest)] < 0)
{
barrier_args_size [INSN_UID (dest)] = cur_args_size;
VEC_safe_push (rtx, heap, *next, dest);
}
else
gcc_assert (barrier_args_size[INSN_UID (dest)]
== cur_args_size);
}
}
return cur_args_size;
}
/* Walk the whole function and compute args_size on BARRIERs. */
static void
compute_barrier_args_size (void)
{
int max_uid = get_max_uid (), i;
rtx insn;
VEC (rtx, heap) *worklist, *next, *tmp;
barrier_args_size = XNEWVEC (HOST_WIDE_INT, max_uid);
for (i = 0; i < max_uid; i++)
barrier_args_size[i] = -1;
worklist = VEC_alloc (rtx, heap, 20);
next = VEC_alloc (rtx, heap, 20);
insn = get_insns ();
barrier_args_size[INSN_UID (insn)] = 0;
VEC_quick_push (rtx, worklist, insn);
for (;;)
{
while (!VEC_empty (rtx, worklist))
{
rtx prev, body;
HOST_WIDE_INT cur_args_size;
insn = VEC_pop (rtx, worklist);
cur_args_size = barrier_args_size[INSN_UID (insn)];
prev = prev_nonnote_insn (insn);
if (prev && BARRIER_P (prev))
barrier_args_size[INSN_UID (prev)] = cur_args_size;
for (; insn; insn = NEXT_INSN (insn))
{
if (INSN_DELETED_P (insn) || NOTE_P (insn))
continue;
if (BARRIER_P (insn))
break;
if (LABEL_P (insn))
{
gcc_assert (barrier_args_size[INSN_UID (insn)] < 0
|| barrier_args_size[INSN_UID (insn)]
== cur_args_size);
continue;
}
body = PATTERN (insn);
if (GET_CODE (body) == SEQUENCE)
{
for (i = 1; i < XVECLEN (body, 0); i++)
cur_args_size
= compute_barrier_args_size_1 (XVECEXP (body, 0, i),
cur_args_size, &next);
cur_args_size
= compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
cur_args_size, &next);
}
else
cur_args_size
= compute_barrier_args_size_1 (insn, cur_args_size, &next);
}
}
if (VEC_empty (rtx, next))
break;
/* Swap WORKLIST with NEXT and truncate NEXT for next iteration. */
tmp = next;
next = worklist;
worklist = tmp;
VEC_truncate (rtx, next, 0);
}
VEC_free (rtx, heap, worklist);
VEC_free (rtx, heap, next);
}
/* Check INSN to see if it looks like a push or a stack adjustment, and
make a note of it if it does. EH uses this information to find out how
much extra space it needs to pop off the stack. */
@ -1200,13 +1356,15 @@ dwarf2out_stack_adjust (rtx insn, bool after_p)
}
else if (BARRIER_P (insn))
{
/* When we see a BARRIER, we know to reset args_size to 0. Usually
the compiler will have already emitted a stack adjustment, but
doesn't bother for calls to noreturn functions. */
#ifdef STACK_GROWS_DOWNWARD
offset = -args_size;
#else
offset = args_size;
if (barrier_args_size == NULL)
compute_barrier_args_size ();
offset = barrier_args_size[INSN_UID (insn)];
if (offset < 0)
offset = 0;
offset -= args_size;
#ifndef STACK_GROWS_DOWNWARD
offset = -offset;
#endif
}
else if (GET_CODE (PATTERN (insn)) == SET)
@ -2160,6 +2318,12 @@ dwarf2out_frame_debug (rtx insn, bool after_p)
regs_saved_in_regs[i].saved_in_reg = NULL_RTX;
}
num_regs_saved_in_regs = 0;
if (barrier_args_size)
{
XDELETEVEC (barrier_args_size);
barrier_args_size = NULL;
}
return;
}

View File

@ -1,5 +1,8 @@
2008-07-31 Jakub Jelinek <jakub@redhat.com>
PR rtl-optimization/36419
* g++.dg/eh/async-unwind2.C: New test.
PR c++/36405
* g++.dg/rtti/typeid8.C: New test.

View File

@ -0,0 +1,254 @@
// PR rtl-optimization/36419
// { dg-do run { target { { i?86-*-* x86_64-*-* } && ilp32 } } }
// { dg-options "-Os -fasynchronous-unwind-tables -fpic -fno-inline" }
#include <stdarg.h>
extern "C" void abort ();
extern "C"
{
struct R { int r1; unsigned short r2[1]; };
int bar1 (unsigned short *, int, short) throw ();
void bar2 (R *) throw ();
void bar3 (R **, const unsigned short *, int) throw ();
void bar4 (R **, const char *) throw ();
void bar5 (void *, const char *, ...);
}
struct S
{
R *s;
struct T { };
S (R *x, T *) { s = x; }
~S () { bar2 (s); }
S &operator= (const S &x);
S &operator+= (const S &x);
S sfn1 (const S &x) const;
friend S operator+ (const S &x1, const S &x2);
static S sfn2 (int i)
{
unsigned short q[33];
R *p = 0;
bar3 (&p, q, bar1 (q, i, 10));
return S (p, (T *) 0);
}
static S sfn3 (const char *x)
{
R *p = 0;
bar4 (&p, x);
return S (p, (T *) 0);
}
};
struct U { };
template <class C> unsigned char operator >>= (const U &, C &);
struct V;
struct W
{
V *w;
unsigned char is () const;
};
template <class T> struct X : public W
{
inline ~X ();
X ();
X (const W &);
T *operator -> () const;
};
struct E
{
E ();
E (const S &, const X <V> &);
E (E const &);
~E ();
E &operator = (E const &);
};
struct V
{
virtual void release () throw ();
};
template <class T> X <T>::~X ()
{
if (w)
w->release ();
}
struct Y
{
virtual U yfn1 (const S &);
};
struct Z;
X <V> baz1 (const S &) throw (E);
X <Z> baz2 (const X <Z> &) throw (E);
template <typename T> X<T>::X ()
{
w = __null;
}
template <typename T> X<T>::X (W const &)
{
w = __null;
}
U Y::yfn1 (const S &)
{
throw 12;
}
Y y;
template <typename T> T *X<T>::operator -> () const
{
return &y;
}
X <V> baz1 (const S &) throw (E)
{
return X<V> ();
}
E::E ()
{
}
E::~E ()
{
}
X <Z> baz2 (const X <Z> &) throw (E)
{
throw E ();
}
int bar1 (unsigned short *, int, short) throw ()
{
asm volatile ("" : : : "memory");
return 0;
}
void bar2 (R *) throw ()
{
asm volatile ("" : : : "memory");
}
void bar3 (R **, const unsigned short *, int) throw ()
{
asm volatile ("" : : : "memory");
}
void bar4 (R **, const char *) throw ()
{
asm volatile ("" : : : "memory");
}
int events[2];
void *sp;
void bar5 (void *p, const char *s, ...)
{
va_list ap;
va_start (ap, s);
if (p)
throw 19;
switch (*s)
{
case 't':
if (events[0] != va_arg (ap, int))
abort ();
events[0]++;
break;
case 'f':
abort ();
case 'c':
if (events[1] != va_arg (ap, int))
abort ();
events[1]++;
if (events[1] == 1)
sp = va_arg (ap, void *);
else if (sp != va_arg (ap, void *))
abort ();
break;
}
}
unsigned char W::is () const
{
return 1;
}
S &S::operator += (const S &)
{
return *this;
}
template <class C> unsigned char operator >>= (const U &, C &)
{
throw 1;
}
template X<Y>::X ();
template X<Z>::X ();
template unsigned char operator >>= (const U &, X<Z> &);
template X<Y>::X (W const &);
template Y *X<Y>::operator-> () const;
X <Z> foo () throw ()
{
X <Z> a;
X <Y> b;
try
{
b = X <Y> (baz1 (S::sfn3 ("defg")));
}
catch (E &)
{
}
if (b.is ())
{
for (int n = 0; n < 10; n++)
{
S c = S::sfn3 ("abcd");
c += S::sfn2 (n);
X <Z> d;
try
{
bar5 ((void *) 0, "trying %d\n", n);
if ((b->yfn1 (c) >>= d))
if (d.is ())
{
bar5 ((void *) 0, "failure1 on %d\n", n);
a = baz2 (d);
if (a.is ())
break;
}
bar5 ((void *) 0, "failure2 on %d\n", n);
}
catch (...)
{
void *p;
asm volatile ("movl %%esp, %0" : "=r" (p));
bar5 ((void *) 0, "caught %d %p\n", n, p);
}
}
}
return a;
}
int
main ()
{
foo ();
if (events[0] != 10 || events[1] != 10)
abort ();
return 0;
}