diff --git a/gcc/ChangeLog b/gcc/ChangeLog index ec1458c1192..945e9046de8 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,9 @@ +2012-04-11 Richard Guenther + + PR middle-end/52918 + * except.c (sjlj_emit_dispatch_table): Properly update loop + structure. + 2012-04-11 Nick Clifton * config/rl78/rl78.c (rl78_expand_prologue): Set stack use diff --git a/gcc/except.c b/gcc/except.c index e3a9ef07422..e6e7794f246 100644 --- a/gcc/except.c +++ b/gcc/except.c @@ -1344,6 +1344,28 @@ sjlj_emit_dispatch_table (rtx dispatch_label, int num_dispatch) e = make_edge (bb, bb->next_bb, EDGE_FALLTHRU); e->count = bb->count; e->probability = REG_BR_PROB_BASE; + if (current_loops) + { + struct loop *loop = bb->next_bb->loop_father; + /* If we created a pre-header block, add the new block to the + outer loop, otherwise to the loop itself. */ + if (bb->next_bb == loop->header) + add_bb_to_loop (bb, loop_outer (loop)); + else + add_bb_to_loop (bb, loop); + /* ??? For multiple dispatches we will end up with edges + from the loop tree root into this loop, making it a + multiple-entry loop. Discard all affected loops. */ + if (num_dispatch > 1) + { + for (loop = bb->loop_father; + loop_outer (loop); loop = loop_outer (loop)) + { + loop->header = NULL; + loop->latch = NULL; + } + } + } disp_index++; } @@ -1364,6 +1386,24 @@ sjlj_emit_dispatch_table (rtx dispatch_label, int num_dispatch) e = make_edge (bb, bb->next_bb, EDGE_FALLTHRU); e->count = bb->count; e->probability = REG_BR_PROB_BASE; + if (current_loops) + { + struct loop *loop = bb->next_bb->loop_father; + /* If we created a pre-header block, add the new block to the + outer loop, otherwise to the loop itself. */ + if (bb->next_bb == loop->header) + add_bb_to_loop (bb, loop_outer (loop)); + else + add_bb_to_loop (bb, loop); + } + } + else + { + /* We are not wiring up edges here, but as the dispatcher call + is at function begin simply associate the block with the + outermost (non-)loop. */ + if (current_loops) + add_bb_to_loop (bb, current_loops->tree_root); } } diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index d657e47ee60..7e2a1c30b8e 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,9 @@ +2012-04-11 Richard Guenther + + PR middle-end/52918 + * g++.dg/torture/pr52918-1.C: New testcase. + * g++.dg/torture/pr52918-2.C: Likewise. + 2012-04-11 Tobias Burnus PR fortran/52729 diff --git a/gcc/testsuite/g++.dg/torture/pr52918-1.C b/gcc/testsuite/g++.dg/torture/pr52918-1.C new file mode 100644 index 00000000000..9e7b21ba6a3 --- /dev/null +++ b/gcc/testsuite/g++.dg/torture/pr52918-1.C @@ -0,0 +1,39 @@ +// { dg-do compile } + +typedef __SIZE_TYPE__ size_t; +class bad_alloc { }; +typedef struct { +} __gthread_mutex_t; +int __gthread_mutex_unlock (__gthread_mutex_t *__mutex); +class __concurrence_unlock_error { +}; +inline void __throw_concurrence_unlock_error() { + throw __concurrence_unlock_error(); +} +class __mutex { + __gthread_mutex_t _M_mutex; +public: + void unlock() { + if (__gthread_mutex_unlock(&_M_mutex) != 0) + __throw_concurrence_unlock_error(); + } +}; +class free_list { + typedef __mutex __mutex_type; + __mutex_type& _M_get_mutex(); + void _M_get(size_t __sz) throw(bad_alloc); +}; +void free_list::_M_get(size_t __sz) throw(bad_alloc) +{ + __mutex_type& __bfl_mutex = _M_get_mutex(); + __bfl_mutex.unlock(); + int __ctr = 2; + while (__ctr) { + size_t* __ret = 0; + --__ctr; + try { + __ret = (size_t*) (::operator new(__sz + sizeof(size_t))); + } + catch(const bad_alloc&) { } + } +} diff --git a/gcc/testsuite/g++.dg/torture/pr52918-2.C b/gcc/testsuite/g++.dg/torture/pr52918-2.C new file mode 100644 index 00000000000..ba31295e41e --- /dev/null +++ b/gcc/testsuite/g++.dg/torture/pr52918-2.C @@ -0,0 +1,40 @@ +// { dg-do compile } + +typedef __SIZE_TYPE__ size_t; +void* __cxa_allocate_exception(size_t) throw(); +typedef struct { } __gthread_mutex_t; +extern int __gthr_win32_mutex_unlock (__gthread_mutex_t *); +int __gthread_mutex_lock (__gthread_mutex_t *__mutex); +int __gthread_mutex_unlock (__gthread_mutex_t *__mutex); +void __throw_concurrence_lock_error(); +void __throw_concurrence_unlock_error(); +class __mutex { + __gthread_mutex_t _M_mutex; +public: + void lock() { + if (__gthread_mutex_lock(&_M_mutex) != 0) + __throw_concurrence_lock_error(); + } + void unlock() { + if (__gthread_mutex_unlock(&_M_mutex) != 0) + __throw_concurrence_unlock_error(); + } +}; +class __scoped_lock { + typedef __mutex __mutex_type; + __mutex_type& _M_device; +public: + explicit __scoped_lock(__mutex_type& __name) : _M_device(__name) { + _M_device.lock(); + } + ~__scoped_lock() throw() { + _M_device.unlock(); + } +}; +__mutex emergency_mutex; +void * __cxa_allocate_exception(size_t thrown_size) throw() +{ + void *ret; + if (! ret) + __scoped_lock sentry(emergency_mutex); +}