Add memory barriers to the double-checked locking used for static initialization.

libstdc++:
        Add memory barriers to the double-checked locking used for static
        initialization.
        * libsupc++/guard.cc (__test_and_acquire): Define default.
        (_GLIBCXX_GUARD_TEST_AND_ACQUIRE, __set_and_release)
        (_GLIBCXX_GUARD_SET_AND_RELEASE): Likewise.
        (recursion_push, recursion_pop): New abstraction functions.
        (__cxa_guard_acquire): Use _GLIBCXX_GUARD_TEST_AND_ACQUIRE.
        (__cxa_guard_release): Use _GLIBCXX_GUARD_SET_AND_RELEASE.
        * config/cpu/generic/cxxabi_tweaks.h (_GLIBCXX_GUARD_TEST): Rename
        from _GLIBCXX_GUARD_ACQUIRE and reverse sense.
        (_GLIBCXX_GUARD_SET): Rename from _GLIBCXX_GUARD_RELEASE.
        * config/cpu/arm/cxxabi_tweaks.h: Likewise.
        * config/cpu/alpha/atomic_word.h (_GLIBCXX_READ_MEM_BARRIER)
        (_GLIBCXX_WRITE_MEM_BARRIER): Define.
        * config/cpu/powerpc/atomic_word.h: Likewise.
        * config/cpu/sparc/atomic_word.h: Likewise.
        * config/cpu/generic/atomic_word.h: Define them, commented out.
        * include/bits/atomicity.h: Define defaults.
        * config/cpu/ia64/atomic_word.h (__test_and_acquire)
        (__set_and_release): New inlines.
        (_GLIBCXX_GUARD_TEST_AND_ACQUIRE): Define.
        (_GLIBCXX_GUARD_SET_AND_RELEASE): Define.

        * libsupc++/guard.cc (acquire_1): Use __builtin_trap instead of
        abort();

gcc:
        * doc/tm.texi (TARGET_RELAXED_ORDERING): Document.
        * target.h (struct gcc_target): Add relaxed_ordering field.
        * target-def.h (TARGET_RELAXED_ORDERING): Define default.
        (TARGET_INITIALIZER): Add it.
        * config/alpha/alpha.c (TARGET_RELAXED_ORDERING): Define.
        * config/ia64/ia64.c (TARGET_RELAXED_ORDERING): Define.
        * config/rs6000/rs6000.c (TARGET_RELAXED_ORDERING): Define.
        * config/sparc/sparc.c (TARGET_RELAXED_ORDERING): Define.
        * cp/decl.c (expand_static_init): Don't use shortcut if
        targetm.relaxed_ordering.

From-SVN: r92659
This commit is contained in:
Jason Merrill 2004-12-27 23:36:54 -05:00 committed by Jason Merrill
parent 1f7edb8b3d
commit 445cf5eb0d
20 changed files with 354 additions and 27 deletions

View File

@ -1,3 +1,14 @@
2004-12-27 Jason Merrill <jason@redhat.com>
* doc/tm.texi (TARGET_RELAXED_ORDERING): Document.
* target.h (struct gcc_target): Add relaxed_ordering field.
* target-def.h (TARGET_RELAXED_ORDERING): Define default.
(TARGET_INITIALIZER): Add it.
* config/alpha/alpha.c (TARGET_RELAXED_ORDERING): Define.
* config/ia64/ia64.c (TARGET_RELAXED_ORDERING): Define.
* config/rs6000/rs6000.c (TARGET_RELAXED_ORDERING): Define.
* config/sparc/sparc.c (TARGET_RELAXED_ORDERING): Define.
2004-12-27 Roger Sayle <roger@eyesopen.com>
PR driver/16118

View File

@ -9462,6 +9462,12 @@ alpha_init_libfuncs (void)
#undef TARGET_BUILD_BUILTIN_VA_LIST
#define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
/* The Alpha architecture does not require sequential consistency. See
http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
for an example of how it can be violated in practice. */
#undef TARGET_RELAXED_ORDERING
#define TARGET_RELAXED_ORDERING true
struct gcc_target targetm = TARGET_INITIALIZER;

View File

@ -420,6 +420,11 @@ static const struct attribute_spec ia64_attribute_table[] =
#undef TARGET_SCALAR_MODE_SUPPORTED_P
#define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
/* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
in an order different from the specified program order. */
#undef TARGET_RELAXED_ORDERING
#define TARGET_RELAXED_ORDERING true
struct gcc_target targetm = TARGET_INITIALIZER;
typedef enum

View File

@ -1004,6 +1004,17 @@ static const char alt_reg_names[][8] =
#undef TARGET_VECTOR_MODE_SUPPORTED_P
#define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
/* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
The PowerPC architecture requires only weak consistency among
processors--that is, memory accesses between processors need not be
sequentially consistent and memory accesses among processors can occur
in any order. The ability to order memory accesses weakly provides
opportunities for more efficient use of the system bus. Unless a
dependency exists, the 604e allows read operations to precede store
operations. */
#undef TARGET_RELAXED_ORDERING
#define TARGET_RELAXED_ORDERING true
struct gcc_target targetm = TARGET_INITIALIZER;

View File

@ -494,6 +494,11 @@ enum processor_type sparc_cpu;
#define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
#endif
/* The SPARC v9 architecture defines a relaxed memory ordering model (RMO)
which requires this if enabled, though it is never used in userspace,
and the Ultra3 processors don't implement it. */
#define TARGET_RELAXED_ORDERING TARGET_V9
struct gcc_target targetm = TARGET_INITIALIZER;
/* Validate and override various options, and do some machine dependent

View File

@ -1,3 +1,8 @@
2004-12-27 Jason Merrill <jason@redhat.com>
* decl.c (expand_static_init): Don't use shortcut if
targetm.relaxed_ordering.
2004-12-27 Mark Mitchell <mark@codesourcery.com>
PR c++/19149

View File

@ -5313,8 +5313,8 @@ expand_static_init (tree decl, tree init)
if (DECL_FUNCTION_SCOPE_P (decl))
{
/* Emit code to perform this initialization but once. */
tree if_stmt, inner_if_stmt = NULL_TREE;
tree then_clause, inner_then_clause = NULL_TREE;
tree if_stmt = NULL_TREE, inner_if_stmt = NULL_TREE;
tree then_clause = NULL_TREE, inner_then_clause = NULL_TREE;
tree guard, guard_addr, guard_addr_list;
tree acquire_fn, release_fn, abort_fn;
tree flag, begin;
@ -5353,10 +5353,16 @@ expand_static_init (tree decl, tree init)
/* Create the guard variable. */
guard = get_guard (decl);
/* Begin the conditional initialization. */
if_stmt = begin_if_stmt ();
finish_if_stmt_cond (get_guard_cond (guard), if_stmt);
then_clause = begin_compound_stmt (BCS_NO_SCOPE);
/* This optimization isn't safe on targets with relaxed memory
consistency. On such targets we force synchronization in
__cxa_guard_acquire. */
if (!targetm.relaxed_ordering || !flag_threadsafe_statics)
{
/* Begin the conditional initialization. */
if_stmt = begin_if_stmt ();
finish_if_stmt_cond (get_guard_cond (guard), if_stmt);
then_clause = begin_compound_stmt (BCS_NO_SCOPE);
}
if (flag_threadsafe_statics)
{
@ -5419,9 +5425,12 @@ expand_static_init (tree decl, tree init)
finish_if_stmt (inner_if_stmt);
}
finish_compound_stmt (then_clause);
finish_then_clause (if_stmt);
finish_if_stmt (if_stmt);
if (!targetm.relaxed_ordering || !flag_threadsafe_statics)
{
finish_compound_stmt (then_clause);
finish_then_clause (if_stmt);
finish_if_stmt (if_stmt);
}
}
else
static_aggregates = tree_cons (init, decl, static_aggregates);

View File

@ -9574,6 +9574,16 @@ If defined, this macro is the number of entries in
@code{TARGET_FORMAT_TYPES}.
@end defmac
@deftypefn {Target Hook} bool TARGET_RELAXED_ORDERING
If set to @code{true}, means that the target's memory model does not
guarantee that loads which do not depend on one another will access
main memory in the order of the instruction stream; if ordering is
important, an explicit memory barrier must be used. This is true of
many recent processors which implement a policy of ``relaxed,''
``weak,'' or ``release'' memory consistency, such as Alpha, PowerPC,
and ia64. The default is @code{false}.
@end deftypefn
@defmac TARGET_USE_JCR_SECTION
This macro determines whether to use the JCR section to register Java
classes. By default, TARGET_USE_JCR_SECTION is defined to 1 if both

View File

@ -394,6 +394,9 @@ Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#define TARGET_GIMPLIFY_VA_ARG_EXPR std_gimplify_va_arg_expr
#define TARGET_PASS_BY_REFERENCE hook_bool_CUMULATIVE_ARGS_mode_tree_bool_false
#define TARGET_RELAXED_ORDERING false
#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size_or_pad
#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_false
@ -533,6 +536,7 @@ Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
TARGET_ASM_FILE_START_FILE_DIRECTIVE, \
TARGET_HANDLE_PRAGMA_REDEFINE_EXTNAME, \
TARGET_HANDLE_PRAGMA_EXTERN_PREFIX, \
TARGET_RELAXED_ORDERING, \
}
#include "hooks.h"

View File

@ -598,6 +598,10 @@ struct gcc_target
/* True if #pragma extern_prefix is to be supported. */
bool handle_pragma_extern_prefix;
/* True if the target is allowed to reorder memory accesses unless
synchronization is explicitly requested. */
bool relaxed_ordering;
/* Leave the boolean fields at the end. */
};

View File

@ -1,3 +1,31 @@
2004-12-27 Jason Merrill <jason@redhat.com>
Add memory barriers to the double-checked locking used for static
initialization.
* libsupc++/guard.cc (__test_and_acquire): Define default.
(_GLIBCXX_GUARD_TEST_AND_ACQUIRE, __set_and_release)
(_GLIBCXX_GUARD_SET_AND_RELEASE): Likewise.
(recursion_push, recursion_pop): New abstraction functions.
(__cxa_guard_acquire): Use _GLIBCXX_GUARD_TEST_AND_ACQUIRE.
(__cxa_guard_release): Use _GLIBCXX_GUARD_SET_AND_RELEASE.
* config/cpu/generic/cxxabi_tweaks.h (_GLIBCXX_GUARD_TEST): Rename
from _GLIBCXX_GUARD_ACQUIRE and reverse sense.
(_GLIBCXX_GUARD_SET): Rename from _GLIBCXX_GUARD_RELEASE.
* config/cpu/arm/cxxabi_tweaks.h: Likewise.
* config/cpu/alpha/atomic_word.h (_GLIBCXX_READ_MEM_BARRIER)
(_GLIBCXX_WRITE_MEM_BARRIER): Define.
* config/cpu/powerpc/atomic_word.h: Likewise.
* config/cpu/sparc/atomic_word.h: Likewise.
* config/cpu/generic/atomic_word.h: Define them, commented out.
* include/bits/atomicity.h: Define defaults.
* config/cpu/ia64/atomic_word.h (__test_and_acquire)
(__set_and_release): New inlines.
(_GLIBCXX_GUARD_TEST_AND_ACQUIRE): Define.
(_GLIBCXX_GUARD_SET_AND_RELEASE): Define.
* libsupc++/guard.cc (acquire_1): Use __builtin_trap instead of
abort();
2004-12-27 Paolo Carlini <pcarlini@suse.de>
* include/tr1/type_traits: Rework the _DEFINE_SPEC* macros.

View File

@ -0,0 +1,38 @@
// Low-level type for atomic operations -*- C++ -*-
// Copyright (C) 2004 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 2, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING. If not, write to the Free
// Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
// USA.
// As a special exception, you may use this file as part of a free software
// library without restriction. Specifically, if other files instantiate
// templates or use macros or inline functions from this file, or you compile
// this file and link it with other files to produce an executable, this
// file does not by itself cause the resulting executable to be covered by
// the GNU General Public License. This exception does not however
// invalidate any other reasons why the executable file might be covered by
// the GNU General Public License.
#ifndef _GLIBCXX_ATOMIC_WORD_H
#define _GLIBCXX_ATOMIC_WORD_H 1
typedef int _Atomic_word;
#define _GLIBCXX_READ_MEM_BARRIER __asm __volatile ("mb":::"memory")
#define _GLIBCXX_WRITE_MEM_BARRIER __asm __volatile ("wmb":::"memory")
#endif

View File

@ -38,8 +38,8 @@ namespace __cxxabiv1
#ifdef __ARM_EABI__
// The ARM EABI uses the least significant bit of a 32-bit
// guard variable. */
#define _GLIBCXX_GUARD_ACQUIRE(x) (!(*(x) & 1))
#define _GLIBCXX_GUARD_RELEASE(x) *(x) = 1
#define _GLIBCXX_GUARD_TEST(x) ((*(x) & 1) != 0)
#define _GLIBCXX_GUARD_SET(x) *(x) = 1
typedef int __guard;
// We also want the element size in array cookies.
@ -54,8 +54,8 @@ namespace __cxxabiv1
#else // __ARM_EABI__
// The generic ABI uses the first byte of a 64-bit guard variable.
#define _GLIBCXX_GUARD_ACQUIRE(x) (!*(char *) (x))
#define _GLIBCXX_GUARD_RELEASE(x) *(char *) (x) = 1
#define _GLIBCXX_GUARD_TEST(x) (*(char *) (x) != 0)
#define _GLIBCXX_GUARD_SET(x) *(char *) (x) = 1
__extension__ typedef int __guard __attribute__((mode (__DI__)));
// __cxa_vec_ctor has void return type.

View File

@ -32,4 +32,17 @@
typedef int _Atomic_word;
// Define these two macros using the appropriate memory barrier for the target.
// The commented out versions below are the defaults.
// See ia64/atomic_word.h for an alternative approach.
// This one prevents loads from being hoisted across the barrier;
// in other words, this is a Load-Load acquire barrier.
// This is necessary iff TARGET_RELAXED_ORDERING is defined in tm.h.
// #define _GLIBCXX_READ_MEM_BARRIER __asm __volatile ("":::"memory")
// This one prevents stores from being sunk across the barrier; in other
// words, a Store-Store release barrier.
// #define _GLIBCXX_WRITE_MEM_BARRIER __asm __volatile ("":::"memory")
#endif

View File

@ -36,8 +36,8 @@ namespace __cxxabiv1
#endif
// The generic ABI uses the first byte of a 64-bit guard variable.
#define _GLIBCXX_GUARD_ACQUIRE(x) (!*(char *) (x))
#define _GLIBCXX_GUARD_RELEASE(x) *(char *) (x) = 1
#define _GLIBCXX_GUARD_TEST(x) (*(char *) (x) != 0)
#define _GLIBCXX_GUARD_SET(x) *(char *) (x) = 1
__extension__ typedef int __guard __attribute__((mode (__DI__)));
// __cxa_vec_ctor has void return type.

View File

@ -0,0 +1,69 @@
// Low-level type for atomic operations -*- C++ -*-
// Copyright (C) 2004 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 2, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING. If not, write to the Free
// Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
// USA.
// As a special exception, you may use this file as part of a free software
// library without restriction. Specifically, if other files instantiate
// templates or use macros or inline functions from this file, or you compile
// this file and link it with other files to produce an executable, this
// file does not by itself cause the resulting executable to be covered by
// the GNU General Public License. This exception does not however
// invalidate any other reasons why the executable file might be covered by
// the GNU General Public License.
#ifndef _GLIBCXX_ATOMIC_WORD_H
#define _GLIBCXX_ATOMIC_WORD_H 1
#include <cxxabi.h>
typedef int _Atomic_word;
namespace __gnu_cxx
{
// Test the first byte of __g and ensure that no loads are hoisted across
// the test.
inline bool
__test_and_acquire (__cxxabiv1::__guard *__g)
{
unsigned char __c;
unsigned char *__p = reinterpret_cast<unsigned char *>(__g);
// ldN.acq is a load with an implied hoist barrier.
// would ld8+mask be faster than just doing an ld1?
__asm __volatile ("ld1.acq %0 = %1" : "=r"(__c) : "m"(*__p) : "memory");
return __c != 0;
}
// Set the first byte of __g to 1 and ensure that no stores are sunk
// across the store.
inline void
__set_and_release (__cxxabiv1::__guard *__g)
{
unsigned char *__p = reinterpret_cast<unsigned char *>(__g);
// stN.rel is a store with an implied sink barrier.
// could load word, set flag, and CAS it back
__asm __volatile ("st1.rel %0 = %1" : "=m"(*__p) : "r"(1) : "memory");
}
// We don't define the _BARRIER macros on ia64 because the barriers are
// included in the test and set, above.
#define _GLIBCXX_GUARD_TEST_AND_ACQUIRE(G) __gnu_cxx::__test_and_acquire (G)
#define _GLIBCXX_GUARD_SET_AND_RELEASE(G) __gnu_cxx::__set_and_release (G)
}
#endif

View File

@ -0,0 +1,38 @@
// Low-level type for atomic operations -*- C++ -*-
// Copyright (C) 2004 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 2, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING. If not, write to the Free
// Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
// USA.
// As a special exception, you may use this file as part of a free software
// library without restriction. Specifically, if other files instantiate
// templates or use macros or inline functions from this file, or you compile
// this file and link it with other files to produce an executable, this
// file does not by itself cause the resulting executable to be covered by
// the GNU General Public License. This exception does not however
// invalidate any other reasons why the executable file might be covered by
// the GNU General Public License.
#ifndef _GLIBCXX_ATOMIC_WORD_H
#define _GLIBCXX_ATOMIC_WORD_H 1
typedef int _Atomic_word;
#define _GLIBCXX_READ_MEM_BARRIER __asm __volatile ("isync":::"memory")
#define _GLIBCXX_WRITE_MEM_BARRIER __asm __volatile ("lwsync":::"memory")
#endif

View File

@ -36,4 +36,18 @@
typedef int _Atomic_word;
#endif
#if defined(__sparc_v9__)
// These are necessary under the V9 RMO model, though it is almost never
// used in userspace.
#define _GLIBCXX_READ_MEM_BARRIER \
__asm __volatile ("membar #LoadLoad":::"memory")
#define _GLIBCXX_WRITE_MEM_BARRIER \
__asm __volatile ("membar #StoreStore":::"memory")
#elif defined(__sparc_v8__)
// This is necessary under the PSO model.
#define _GLIBCXX_WRITE_MEM_BARRIER __asm __volatile ("stbar":::"memory")
#endif
#endif

View File

@ -48,4 +48,13 @@ namespace __gnu_cxx
__atomic_add(volatile _Atomic_word* __mem, int __val);
} // namespace __gnu_cxx
/* Even if the CPU doesn't need a memory barrier, we need to ensure that
the compiler doesn't reorder memory accesses across the barriers. */
#ifndef _GLIBCXX_READ_MEM_BARRIER
#define _GLIBCXX_READ_MEM_BARRIER __asm __volatile ("":::"memory")
#endif
#ifndef _GLIBCXX_WRITE_MEM_BARRIER
#define _GLIBCXX_WRITE_MEM_BARRIER __asm __volatile ("":::"memory")
#endif
#endif

View File

@ -27,11 +27,13 @@
// the GNU General Public License.
// Written by Mark Mitchell, CodeSourcery LLC, <mark@codesourcery.com>
// Thread support written by Jason Merrill, Red Hat Inc. <jason@redhat.com>
#include <cxxabi.h>
#include <exception>
#include <bits/c++config.h>
#include <bits/gthr.h>
#include <bits/atomicity.h>
// The IA64/generic ABI uses the first byte of the guard variable.
// The ARM EABI uses the least significant bit.
@ -84,8 +86,36 @@ namespace
__gthread_recursive_mutex_unlock (&mutex);
}
}
#ifndef _GLIBCXX_GUARD_TEST_AND_ACQUIRE
inline bool
__test_and_acquire (__cxxabiv1::__guard *g)
{
bool b = _GLIBCXX_GUARD_TEST (g);
_GLIBCXX_READ_MEM_BARRIER;
return b;
}
#define _GLIBCXX_GUARD_TEST_AND_ACQUIRE(G) __test_and_acquire (G)
#endif
#ifndef _GLIBCXX_GUARD_SET_AND_RELEASE
inline void
__set_and_release (__cxxabiv1::__guard *g)
{
_GLIBCXX_WRITE_MEM_BARRIER;
_GLIBCXX_GUARD_SET (g);
}
#define _GLIBCXX_GUARD_SET_AND_RELEASE(G) __set_and_release (G)
#endif
#else /* !__GTHREADS */
#undef _GLIBCXX_GUARD_TEST_AND_ACQUIRE
#undef _GLIBCXX_GUARD_SET_AND_RELEASE
#define _GLIBCXX_GUARD_SET_AND_RELEASE(G) _GLIBCXX_GUARD_SET (G)
#endif /* __GTHREADS */
namespace __gnu_cxx
{
// 6.7[stmt.dcl]/4: If control re-enters the declaration (recursively)
@ -107,28 +137,46 @@ namespace __gnu_cxx
namespace __cxxabiv1
{
static inline int
recursion_push (__guard* g)
{
return ((char *)g)[1]++;
}
static inline void
recursion_pop (__guard* g)
{
--((char *)g)[1];
}
static int
acquire_1 (__guard *g)
{
if (_GLIBCXX_GUARD_ACQUIRE (g))
if (_GLIBCXX_GUARD_TEST (g))
return 0;
if (recursion_push (g))
{
if (((char *)g)[1]++)
{
#ifdef __EXCEPTIONS
throw __gnu_cxx::recursive_init();
throw __gnu_cxx::recursive_init();
#else
abort ();
// Use __builtin_trap so we don't require abort().
__builtin_trap ();
#endif
}
return 1;
}
return 0;
return 1;
}
extern "C"
int __cxa_guard_acquire (__guard *g)
{
#ifdef __GTHREADS
// If the target can reorder loads, we need to insert a read memory
// barrier so that accesses to the guarded variable happen after the
// guard test.
if (_GLIBCXX_GUARD_TEST_AND_ACQUIRE (g))
return 0;
if (__gthread_active_p ())
{
// Simple wrapper for exception safety.
@ -162,7 +210,7 @@ namespace __cxxabiv1
extern "C"
void __cxa_guard_abort (__guard *g)
{
((char *)g)[1]--;
recursion_pop (g);
#ifdef __GTHREADS
if (__gthread_active_p ())
static_mutex::unlock ();
@ -172,8 +220,8 @@ namespace __cxxabiv1
extern "C"
void __cxa_guard_release (__guard *g)
{
((char *)g)[1]--;
_GLIBCXX_GUARD_RELEASE (g);
recursion_pop (g);
_GLIBCXX_GUARD_SET_AND_RELEASE (g);
#ifdef __GTHREADS
if (__gthread_active_p ())
static_mutex::unlock ();