atomic: Remove atomic_address, uplift to N3225.

2011-02-16  Benjamin Kosnik  <bkoz@redhat.com>

	* include/std/atomic: Remove atomic_address, uplift to N3225.
	* include/bits/atomic_0.h: Same.
	* include/bits/atomic_2.h: Same.
	* include/bits/atomic_base.h: Same.
	* testsuite/29_atomics/atomic_address/*: Delete.

From-SVN: r170217
This commit is contained in:
Benjamin Kosnik 2011-02-16 19:01:51 +00:00
parent 3808007ca0
commit 036e0d4f8f
15 changed files with 790 additions and 1486 deletions

View File

@ -1,3 +1,11 @@
2011-02-16 Benjamin Kosnik <bkoz@redhat.com>
* include/std/atomic: Remove atomic_address, uplift to N3225.
* include/bits/atomic_0.h: Same.
* include/bits/atomic_2.h: Same.
* include/bits/atomic_base.h: Same.
* testsuite/29_atomics/atomic_address/*: Delete.
2011-02-14 Jonathan Wakely <jwakely.gcc@gmail.com>
* include/bits/regex.h (sub_match::sub_match): Add.
@ -36,7 +44,7 @@
* testsuite/ext/is_heap/47709.cc: New.
2011-02-12 Jakub Jelinek <jakub@redhat.com>
Jonathan Wakely <jwakely.gcc@gmail.com>
Jonathan Wakely <jwakely.gcc@gmail.com>
PR libstdc++/47662
* testsuite/17_intro/headers/c++200x/operator_names.cc: New.
@ -259,10 +267,10 @@
2011-01-31 Paolo Carlini <paolo.carlini@oracle.com>
* doc/html/ext/lwg-active.html: Update to Revision D73.
* doc/html/ext/lwg-closed.html: Likewise.
* doc/html/ext/lwg-defects.html: Likewise.
* doc/xml/manual/intro.xml: Update status of issues 408, 539, 865.
* doc/html/ext/lwg-active.html: Update to Revision D73.
* doc/html/ext/lwg-closed.html: Likewise.
* doc/html/ext/lwg-defects.html: Likewise.
* doc/xml/manual/intro.xml: Update status of issues 408, 539, 865.
2011-01-30 Benjamin Kosnik <bkoz@redhat.com>

View File

@ -134,300 +134,6 @@ namespace __atomic0
};
/// atomic_address
struct atomic_address
{
private:
void* _M_i;
public:
atomic_address() = default;
~atomic_address() = default;
atomic_address(const atomic_address&) = delete;
atomic_address& operator=(const atomic_address&) = delete;
atomic_address& operator=(const atomic_address&) volatile = delete;
constexpr atomic_address(void* __v): _M_i (__v) { }
bool
is_lock_free() const { return false; }
bool
is_lock_free() const volatile { return false; }
void
store(void* __v, memory_order __m = memory_order_seq_cst)
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
_ATOMIC_STORE_(this, __v, __m);
}
void
store(void* __v, memory_order __m = memory_order_seq_cst) volatile
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
_ATOMIC_STORE_(this, __v, __m);
}
void*
load(memory_order __m = memory_order_seq_cst) const
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
return _ATOMIC_LOAD_(this, __m);
}
void*
load(memory_order __m = memory_order_seq_cst) const volatile
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
return _ATOMIC_LOAD_(this, __m);
}
void*
exchange(void* __v, memory_order __m = memory_order_seq_cst)
{ return _ATOMIC_MODIFY_(this, =, __v, __m); }
void*
exchange(void* __v, memory_order __m = memory_order_seq_cst) volatile
{ return _ATOMIC_MODIFY_(this, =, __v, __m); }
bool
compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2)
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
}
bool
compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2) volatile
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
}
bool
compare_exchange_weak(void*& __v1, void* __v2,
memory_order __m = memory_order_seq_cst)
{
return compare_exchange_weak(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_weak(void*& __v1, void* __v2,
memory_order __m = memory_order_seq_cst) volatile
{
return compare_exchange_weak(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_weak(const void*& __v1, const void* __v2,
memory_order __m1, memory_order __m2)
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
}
bool
compare_exchange_weak(const void*& __v1, const void* __v2,
memory_order __m1, memory_order __m2) volatile
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
}
bool
compare_exchange_weak(const void*& __v1, const void* __v2,
memory_order __m = memory_order_seq_cst)
{
return compare_exchange_weak(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_weak(const void*& __v1, const void* __v2,
memory_order __m = memory_order_seq_cst) volatile
{
return compare_exchange_weak(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2)
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
}
bool
compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2) volatile
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
}
bool
compare_exchange_strong(void*& __v1, void* __v2,
memory_order __m = memory_order_seq_cst)
{
return compare_exchange_strong(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_strong(void*& __v1, void* __v2,
memory_order __m = memory_order_seq_cst) volatile
{
return compare_exchange_strong(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_strong(const void*& __v1, const void* __v2,
memory_order __m1, memory_order __m2)
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
}
bool
compare_exchange_strong(const void*& __v1, const void* __v2,
memory_order __m1, memory_order __m2) volatile
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
}
bool
compare_exchange_strong(const void*& __v1, const void* __v2,
memory_order __m = memory_order_seq_cst)
{
return compare_exchange_strong(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_strong(const void*& __v1, const void* __v2,
memory_order __m = memory_order_seq_cst) volatile
{
return compare_exchange_strong(__v1, __v2, __m,
__calculate_memory_order(__m));
}
void*
fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{
void** __p = &(_M_i);
__atomic_flag_base* __g = __atomic_flag_for_address(__p);
__atomic_flag_wait_explicit(__g, __m);
void* __r = *__p;
*__p = (void*)((char*)(*__p) + __d);
atomic_flag_clear_explicit(__g, __m);
return __r;
}
void*
fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
{
void* volatile* __p = &(_M_i);
__atomic_flag_base* __g = __atomic_flag_for_address(__p);
__atomic_flag_wait_explicit(__g, __m);
void* __r = *__p;
*__p = (void*)((char*)(*__p) + __d);
atomic_flag_clear_explicit(__g, __m);
return __r;
}
void*
fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{
void** __p = &(_M_i);
__atomic_flag_base* __g = __atomic_flag_for_address(__p);
__atomic_flag_wait_explicit(__g, __m);
void* __r = *__p;
*__p = (void*)((char*)(*__p) - __d);
atomic_flag_clear_explicit(__g, __m);
return __r;
}
void*
fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
{
void* volatile* __p = &(_M_i);
__atomic_flag_base* __g = __atomic_flag_for_address(__p);
__atomic_flag_wait_explicit(__g, __m);
void* __r = *__p;
*__p = (void*)((char*)(*__p) - __d);
atomic_flag_clear_explicit(__g, __m);
return __r;
}
operator void*() const
{ return load(); }
operator void*() const volatile
{ return load(); }
// XXX
void*
operator=(void* __v)
{
store(__v);
return __v;
}
void*
operator=(void* __v) volatile
{
store(__v);
return __v;
}
void*
operator+=(ptrdiff_t __d)
{ return fetch_add(__d) + __d; }
void*
operator+=(ptrdiff_t __d) volatile
{ return fetch_add(__d) + __d; }
void*
operator-=(ptrdiff_t __d)
{ return fetch_sub(__d) - __d; }
void*
operator-=(ptrdiff_t __d) volatile
{ return fetch_sub(__d) - __d; }
};
/// Base class for atomic integrals.
//
// For each of the integral types, define atomic_[integral type] struct
@ -728,6 +434,220 @@ namespace __atomic0
{ return _ATOMIC_MODIFY_(this, ^=, __i, __m); }
};
/// Partial specialization for pointer types.
template<typename _PTp>
struct __atomic_base<_PTp*>
{
private:
typedef _PTp* __return_pointer_type;
typedef void* __pointer_type;
__pointer_type _M_i;
public:
__atomic_base() = default;
~__atomic_base() = default;
__atomic_base(const __atomic_base&) = delete;
__atomic_base& operator=(const __atomic_base&) = delete;
__atomic_base& operator=(const __atomic_base&) volatile = delete;
// Requires __pointer_type convertible to _M_i.
constexpr __atomic_base(__return_pointer_type __p): _M_i (__p) { }
operator __return_pointer_type() const
{ return reinterpret_cast<__return_pointer_type>(load()); }
operator __return_pointer_type() const volatile
{ return reinterpret_cast<__return_pointer_type>(load()); }
__return_pointer_type
operator=(__pointer_type __p)
{
store(__p);
return reinterpret_cast<__return_pointer_type>(__p);
}
__return_pointer_type
operator=(__pointer_type __p) volatile
{
store(__p);
return reinterpret_cast<__return_pointer_type>(__p);
}
__return_pointer_type
operator++(int)
{ return reinterpret_cast<__return_pointer_type>(fetch_add(1)); }
__return_pointer_type
operator++(int) volatile
{ return reinterpret_cast<__return_pointer_type>(fetch_add(1)); }
__return_pointer_type
operator--(int)
{ return reinterpret_cast<__return_pointer_type>(fetch_sub(1)); }
__return_pointer_type
operator--(int) volatile
{ return reinterpret_cast<__return_pointer_type>(fetch_sub(1)); }
__return_pointer_type
operator++()
{ return reinterpret_cast<__return_pointer_type>(fetch_add(1) + 1); }
__return_pointer_type
operator++() volatile
{ return reinterpret_cast<__return_pointer_type>(fetch_add(1) + 1); }
__return_pointer_type
operator--()
{ return reinterpret_cast<__return_pointer_type>(fetch_sub(1) - 1); }
__return_pointer_type
operator--() volatile
{ return reinterpret_cast<__return_pointer_type>(fetch_sub(1) - 1); }
__return_pointer_type
operator+=(ptrdiff_t __d)
{ return reinterpret_cast<__return_pointer_type>(fetch_add(__d) + __d); }
__return_pointer_type
operator+=(ptrdiff_t __d) volatile
{ return reinterpret_cast<__return_pointer_type>(fetch_add(__d) + __d); }
__return_pointer_type
operator-=(ptrdiff_t __d)
{ return reinterpret_cast<__return_pointer_type>(fetch_sub(__d) - __d); }
__return_pointer_type
operator-=(ptrdiff_t __d) volatile
{ return reinterpret_cast<__return_pointer_type>(fetch_sub(__d) - __d); }
bool
is_lock_free() const
{ return true; }
bool
is_lock_free() const volatile
{ return true; }
void
store(__pointer_type __p, memory_order __m = memory_order_seq_cst)
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
_ATOMIC_STORE_(this, __p, __m);
}
void
store(__pointer_type __p,
memory_order __m = memory_order_seq_cst) volatile
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
volatile __pointer_type* __p2 = &_M_i;
__typeof__(__p) __w = (__p);
__atomic_flag_base* __g = __atomic_flag_for_address(__p2);
__atomic_flag_wait_explicit(__g, __m);
*__p2 = reinterpret_cast<__pointer_type>(__w);
atomic_flag_clear_explicit(__g, __m);
__w;
}
__return_pointer_type
load(memory_order __m = memory_order_seq_cst) const
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
void* __v = _ATOMIC_LOAD_(this, __m);
return reinterpret_cast<__return_pointer_type>(__v);
}
__return_pointer_type
load(memory_order __m = memory_order_seq_cst) const volatile
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
void* __v = _ATOMIC_LOAD_(this, __m);
return reinterpret_cast<__return_pointer_type>(__v);
}
__return_pointer_type
exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst)
{
void* __v = _ATOMIC_MODIFY_(this, =, __p, __m);
return reinterpret_cast<__return_pointer_type>(__v);
}
__return_pointer_type
exchange(__pointer_type __p,
memory_order __m = memory_order_seq_cst) volatile
{
volatile __pointer_type* __p2 = &_M_i;
__typeof__(__p) __w = (__p);
__atomic_flag_base* __g = __atomic_flag_for_address(__p2);
__atomic_flag_wait_explicit(__g, __m);
__pointer_type __r = *__p2;
*__p2 = __w;
atomic_flag_clear_explicit(__g, __m);
__r;
return reinterpret_cast<__return_pointer_type>(_M_i);
}
bool
compare_exchange_strong(__return_pointer_type& __rp1, __pointer_type __p2,
memory_order __m1, memory_order __m2)
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
__pointer_type& __p1 = reinterpret_cast<void*&>(__rp1);
return _ATOMIC_CMPEXCHNG_(this, &__p1, __p2, __m1);
}
bool
compare_exchange_strong(__return_pointer_type& __rp1, __pointer_type __p2,
memory_order __m1, memory_order __m2) volatile
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
__pointer_type& __p1 = reinterpret_cast<void*&>(__rp1);
return _ATOMIC_CMPEXCHNG_(this, &__p1, __p2, __m1);
}
__return_pointer_type
fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{
void* __v = _ATOMIC_MODIFY_(this, +=, __d, __m);
return reinterpret_cast<__return_pointer_type>(__v);
}
__return_pointer_type
fetch_add(ptrdiff_t __d,
memory_order __m = memory_order_seq_cst) volatile
{
void* __v = _ATOMIC_MODIFY_(this, +=, __d, __m);
return reinterpret_cast<__return_pointer_type>(__v);
}
__return_pointer_type
fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{
void* __v = _ATOMIC_MODIFY_(this, -=, __d, __m);
return reinterpret_cast<__return_pointer_type>(__v);
}
__return_pointer_type
fetch_sub(ptrdiff_t __d,
memory_order __m = memory_order_seq_cst) volatile
{
void* __v = _ATOMIC_MODIFY_(this, -=, __d, __m);
return reinterpret_cast<__return_pointer_type>(__v);
}
};
#undef _ATOMIC_LOAD_
#undef _ATOMIC_STORE_
#undef _ATOMIC_MODIFY_
@ -735,6 +655,6 @@ namespace __atomic0
} // namespace __atomic0
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace
} // namespace std
#endif

View File

@ -1,6 +1,6 @@
// -*- C++ -*- header.
// Copyright (C) 2008, 2009, 2010
// Copyright (C) 2008, 2009, 2010, 2011
// Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
@ -23,7 +23,7 @@
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file bits/atomic_2.h
/** @file bits/atomic_2.h
* This is an internal header file, included by other library headers.
* Do not attempt to use it directly. @headername{atomic}
*/
@ -101,317 +101,6 @@ namespace __atomic2
};
/// atomic_address
struct atomic_address
{
private:
void* _M_i;
public:
atomic_address() = default;
~atomic_address() = default;
atomic_address(const atomic_address&) = delete;
atomic_address& operator=(const atomic_address&) = delete;
atomic_address& operator=(const atomic_address&) volatile = delete;
constexpr atomic_address(void* __v): _M_i (__v) { }
bool
is_lock_free() const { return true; }
bool
is_lock_free() const volatile { return true; }
void
store(void* __v, memory_order __m = memory_order_seq_cst)
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
if (__m == memory_order_relaxed)
_M_i = __v;
else
{
// write_mem_barrier();
_M_i = __v;
if (__m == memory_order_seq_cst)
__sync_synchronize();
}
}
void
store(void* __v, memory_order __m = memory_order_seq_cst) volatile
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
if (__m == memory_order_relaxed)
_M_i = __v;
else
{
// write_mem_barrier();
_M_i = __v;
if (__m == memory_order_seq_cst)
__sync_synchronize();
}
}
void*
load(memory_order __m = memory_order_seq_cst) const
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
__sync_synchronize();
void* __ret = _M_i;
__sync_synchronize();
return __ret;
}
void*
load(memory_order __m = memory_order_seq_cst) const volatile
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
__sync_synchronize();
void* __ret = _M_i;
__sync_synchronize();
return __ret;
}
void*
exchange(void* __v, memory_order __m = memory_order_seq_cst)
{
// XXX built-in assumes memory_order_acquire.
return __sync_lock_test_and_set(&_M_i, __v);
}
void*
exchange(void* __v, memory_order __m = memory_order_seq_cst) volatile
{
// XXX built-in assumes memory_order_acquire.
return __sync_lock_test_and_set(&_M_i, __v);
}
bool
compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2)
{ return compare_exchange_strong(__v1, __v2, __m1, __m2); }
bool
compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2) volatile
{ return compare_exchange_strong(__v1, __v2, __m1, __m2); }
bool
compare_exchange_weak(void*& __v1, void* __v2,
memory_order __m = memory_order_seq_cst)
{
return compare_exchange_weak(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_weak(void*& __v1, void* __v2,
memory_order __m = memory_order_seq_cst) volatile
{
return compare_exchange_weak(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_weak(const void*& __v1, const void* __v2,
memory_order __m1, memory_order __m2)
{ return compare_exchange_strong(__v1, __v2, __m1, __m2); }
bool
compare_exchange_weak(const void*& __v1, const void* __v2,
memory_order __m1, memory_order __m2) volatile
{ return compare_exchange_strong(__v1, __v2, __m1, __m2); }
bool
compare_exchange_weak(const void*& __v1, const void* __v2,
memory_order __m = memory_order_seq_cst)
{
return compare_exchange_weak(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_weak(const void*& __v1, const void* __v2,
memory_order __m = memory_order_seq_cst) volatile
{
return compare_exchange_weak(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2)
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
void* __v1o = __v1;
void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2);
// Assume extra stores (of same value) allowed in true case.
__v1 = __v1n;
return __v1o == __v1n;
}
bool
compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
memory_order __m2) volatile
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
void* __v1o = __v1;
void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2);
// Assume extra stores (of same value) allowed in true case.
__v1 = __v1n;
return __v1o == __v1n;
}
bool
compare_exchange_strong(void*& __v1, void* __v2,
memory_order __m = memory_order_seq_cst)
{
return compare_exchange_strong(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_strong(void*& __v1, void* __v2,
memory_order __m = memory_order_seq_cst) volatile
{
return compare_exchange_strong(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_strong(const void*& __v1, const void* __v2,
memory_order __m1, memory_order __m2)
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
const void* __v1o = __v1;
const void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2);
// Assume extra stores (of same value) allowed in true case.
__v1 = __v1n;
return __v1o == __v1n;
}
bool
compare_exchange_strong(const void*& __v1, const void* __v2,
memory_order __m1, memory_order __m2) volatile
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
const void* __v1o = __v1;
const void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2);
// Assume extra stores (of same value) allowed in true case.
__v1 = __v1n;
return __v1o == __v1n;
}
bool
compare_exchange_strong(const void*& __v1, const void* __v2,
memory_order __m = memory_order_seq_cst)
{
return compare_exchange_strong(__v1, __v2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_strong(const void*& __v1, const void* __v2,
memory_order __m = memory_order_seq_cst) volatile
{
return compare_exchange_strong(__v1, __v2, __m,
__calculate_memory_order(__m));
}
void*
fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_add(&_M_i, __d); }
void*
fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_add(&_M_i, __d); }
void*
fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_sub(&_M_i, __d); }
void*
fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_sub(&_M_i, __d); }
operator void*() const
{ return load(); }
operator void*() const volatile
{ return load(); }
void*
#if 0
// XXX as specified but won't compile as store takes void*,
// invalid conversion from const void* to void*
// CD1 had this signature
operator=(const void* __v)
#else
operator=(void* __v)
#endif
{
store(__v);
return __v;
}
void*
#if 0
// XXX as specified but won't compile as store takes void*,
// invalid conversion from const void* to void*
// CD1 had this signature, but store and this could both be const void*?
operator=(const void* __v) volatile
#else
operator=(void* __v) volatile
#endif
{
store(__v);
return __v;
}
void*
operator+=(ptrdiff_t __d)
{ return __sync_add_and_fetch(&_M_i, __d); }
void*
operator+=(ptrdiff_t __d) volatile
{ return __sync_add_and_fetch(&_M_i, __d); }
void*
operator-=(ptrdiff_t __d)
{ return __sync_sub_and_fetch(&_M_i, __d); }
void*
operator-=(ptrdiff_t __d) volatile
{ return __sync_sub_and_fetch(&_M_i, __d); }
};
/// Base class for atomic integrals.
//
// For each of the integral types, define atomic_[integral type] struct
@ -747,9 +436,234 @@ namespace __atomic2
memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_xor(&_M_i, __i); }
};
/// Partial specialization for pointer types.
template<typename _PTp>
struct __atomic_base<_PTp*>
{
private:
typedef _PTp* __pointer_type;
__pointer_type _M_p;
public:
__atomic_base() = default;
~__atomic_base() = default;
__atomic_base(const __atomic_base&) = delete;
__atomic_base& operator=(const __atomic_base&) = delete;
__atomic_base& operator=(const __atomic_base&) volatile = delete;
// Requires __pointer_type convertible to _M_p.
constexpr __atomic_base(__pointer_type __p): _M_p (__p) { }
operator __pointer_type() const
{ return load(); }
operator __pointer_type() const volatile
{ return load(); }
__pointer_type
operator=(__pointer_type __p)
{
store(__p);
return __p;
}
__pointer_type
operator=(__pointer_type __p) volatile
{
store(__p);
return __p;
}
__pointer_type
operator++(int)
{ return fetch_add(1); }
__pointer_type
operator++(int) volatile
{ return fetch_add(1); }
__pointer_type
operator--(int)
{ return fetch_sub(1); }
__pointer_type
operator--(int) volatile
{ return fetch_sub(1); }
__pointer_type
operator++()
{ return fetch_add(1) + 1; }
__pointer_type
operator++() volatile
{ return fetch_add(1) + 1; }
__pointer_type
operator--()
{ return fetch_sub(1) -1; }
__pointer_type
operator--() volatile
{ return fetch_sub(1) -1; }
__pointer_type
operator+=(ptrdiff_t __d)
{ return fetch_add(__d) + __d; }
__pointer_type
operator+=(ptrdiff_t __d) volatile
{ return fetch_add(__d) + __d; }
__pointer_type
operator-=(ptrdiff_t __d)
{ return fetch_sub(__d) - __d; }
__pointer_type
operator-=(ptrdiff_t __d) volatile
{ return fetch_sub(__d) - __d; }
bool
is_lock_free() const
{ return true; }
bool
is_lock_free() const volatile
{ return true; }
void
store(__pointer_type __p, memory_order __m = memory_order_seq_cst)
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
if (__m == memory_order_relaxed)
_M_p = __p;
else
{
// write_mem_barrier();
_M_p = __p;
if (__m == memory_order_seq_cst)
__sync_synchronize();
}
}
void
store(__pointer_type __p,
memory_order __m = memory_order_seq_cst) volatile
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
if (__m == memory_order_relaxed)
_M_p = __p;
else
{
// write_mem_barrier();
_M_p = __p;
if (__m == memory_order_seq_cst)
__sync_synchronize();
}
}
__pointer_type
load(memory_order __m = memory_order_seq_cst) const
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
__sync_synchronize();
__pointer_type __ret = _M_p;
__sync_synchronize();
return __ret;
}
__pointer_type
load(memory_order __m = memory_order_seq_cst) const volatile
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
__sync_synchronize();
__pointer_type __ret = _M_p;
__sync_synchronize();
return __ret;
}
__pointer_type
exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst)
{
// XXX built-in assumes memory_order_acquire.
return __sync_lock_test_and_set(&_M_p, __p);
}
__pointer_type
exchange(__pointer_type __p,
memory_order __m = memory_order_seq_cst) volatile
{
// XXX built-in assumes memory_order_acquire.
return __sync_lock_test_and_set(&_M_p, __p);
}
bool
compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
memory_order __m1, memory_order __m2)
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
__pointer_type __p1o = __p1;
__pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2);
// Assume extra stores (of same value) allowed in true case.
__p1 = __p1n;
return __p1o == __p1n;
}
bool
compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
memory_order __m1, memory_order __m2) volatile
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
__pointer_type __p1o = __p1;
__pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2);
// Assume extra stores (of same value) allowed in true case.
__p1 = __p1n;
return __p1o == __p1n;
}
__pointer_type
fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_add(&_M_p, __d); }
__pointer_type
fetch_add(ptrdiff_t __d,
memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_add(&_M_p, __d); }
__pointer_type
fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_sub(&_M_p, __d); }
__pointer_type
fetch_sub(ptrdiff_t __d,
memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_sub(&_M_p, __d); }
};
} // namespace __atomic2
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace
} // namespace std
#endif

View File

@ -22,7 +22,7 @@
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file bits/atomic_base.h
/** @file bits/atomic_base.h
* This is an internal header file, included by other library headers.
* Do not attempt to use it directly. @headername{atomic}
*/
@ -68,6 +68,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
return __mo2;
}
void
atomic_thread_fence(memory_order);
void
atomic_signal_fence(memory_order);
/// kill_dependency
template<typename _Tp>
inline _Tp
@ -78,7 +84,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
/**
* @brief Base type for atomic_flag.
* @brief Base type for atomic_flag.
*
* Base type is POD with data, allowing atomic_flag to derive from
* it and meet the standard layout type requirement. In addition to
@ -114,27 +120,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
namespace __atomic0
{
struct atomic_flag;
struct atomic_address;
template<typename _IntTp>
struct __atomic_base;
}
}
namespace __atomic2
{
struct atomic_flag;
struct atomic_address;
template<typename _IntTp>
struct __atomic_base;
}
}
namespace __atomic1
{
using __atomic2::atomic_flag;
using __atomic0::atomic_address;
using __atomic0::__atomic_base;
}
}
/// Lock-free Property
#if defined(_GLIBCXX_ATOMIC_BUILTINS_1) && defined(_GLIBCXX_ATOMIC_BUILTINS_2) \
@ -157,7 +160,6 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
#define ATOMIC_INT_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_LONG_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_LLONG_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_ADDRESS_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
inline namespace _GLIBCXX_ATOMIC_NAMESPACE { }
@ -166,28 +168,28 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
typedef __atomic_base<char> atomic_char;
/// atomic_schar
typedef __atomic_base<signed char> atomic_schar;
typedef __atomic_base<signed char> atomic_schar;
/// atomic_uchar
typedef __atomic_base<unsigned char> atomic_uchar;
typedef __atomic_base<unsigned char> atomic_uchar;
/// atomic_short
typedef __atomic_base<short> atomic_short;
typedef __atomic_base<short> atomic_short;
/// atomic_ushort
typedef __atomic_base<unsigned short> atomic_ushort;
typedef __atomic_base<unsigned short> atomic_ushort;
/// atomic_int
typedef __atomic_base<int> atomic_int;
/// atomic_uint
typedef __atomic_base<unsigned int> atomic_uint;
typedef __atomic_base<unsigned int> atomic_uint;
/// atomic_long
typedef __atomic_base<long> atomic_long;
/// atomic_ulong
typedef __atomic_base<unsigned long> atomic_ulong;
typedef __atomic_base<unsigned long> atomic_ulong;
/// atomic_llong
typedef __atomic_base<long long> atomic_llong;
@ -212,50 +214,50 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
typedef __atomic_base<int_least8_t> atomic_int_least8_t;
/// atomic_uint_least8_t
typedef __atomic_base<uint_least8_t> atomic_uint_least8_t;
typedef __atomic_base<uint_least8_t> atomic_uint_least8_t;
/// atomic_int_least16_t
typedef __atomic_base<int_least16_t> atomic_int_least16_t;
typedef __atomic_base<int_least16_t> atomic_int_least16_t;
/// atomic_uint_least16_t
typedef __atomic_base<uint_least16_t> atomic_uint_least16_t;
typedef __atomic_base<uint_least16_t> atomic_uint_least16_t;
/// atomic_int_least32_t
typedef __atomic_base<int_least32_t> atomic_int_least32_t;
typedef __atomic_base<int_least32_t> atomic_int_least32_t;
/// atomic_uint_least32_t
typedef __atomic_base<uint_least32_t> atomic_uint_least32_t;
typedef __atomic_base<uint_least32_t> atomic_uint_least32_t;
/// atomic_int_least64_t
typedef __atomic_base<int_least64_t> atomic_int_least64_t;
typedef __atomic_base<int_least64_t> atomic_int_least64_t;
/// atomic_uint_least64_t
typedef __atomic_base<uint_least64_t> atomic_uint_least64_t;
typedef __atomic_base<uint_least64_t> atomic_uint_least64_t;
/// atomic_int_fast8_t
typedef __atomic_base<int_fast8_t> atomic_int_fast8_t;
/// atomic_uint_fast8_t
typedef __atomic_base<uint_fast8_t> atomic_uint_fast8_t;
typedef __atomic_base<uint_fast8_t> atomic_uint_fast8_t;
/// atomic_int_fast16_t
typedef __atomic_base<int_fast16_t> atomic_int_fast16_t;
typedef __atomic_base<int_fast16_t> atomic_int_fast16_t;
/// atomic_uint_fast16_t
typedef __atomic_base<uint_fast16_t> atomic_uint_fast16_t;
typedef __atomic_base<uint_fast16_t> atomic_uint_fast16_t;
/// atomic_int_fast32_t
typedef __atomic_base<int_fast32_t> atomic_int_fast32_t;
typedef __atomic_base<int_fast32_t> atomic_int_fast32_t;
/// atomic_uint_fast32_t
typedef __atomic_base<uint_fast32_t> atomic_uint_fast32_t;
typedef __atomic_base<uint_fast32_t> atomic_uint_fast32_t;
/// atomic_int_fast64_t
typedef __atomic_base<int_fast64_t> atomic_int_fast64_t;
typedef __atomic_base<int_fast64_t> atomic_int_fast64_t;
/// atomic_uint_fast64_t
typedef __atomic_base<uint_fast64_t> atomic_uint_fast64_t;
typedef __atomic_base<uint_fast64_t> atomic_uint_fast64_t;
/// atomic_intptr_t
@ -265,7 +267,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
typedef __atomic_base<uintptr_t> atomic_uintptr_t;
/// atomic_size_t
typedef __atomic_base<size_t> atomic_size_t;
typedef __atomic_base<size_t> atomic_size_t;
/// atomic_intmax_t
typedef __atomic_base<intmax_t> atomic_intmax_t;
@ -277,16 +279,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
typedef __atomic_base<ptrdiff_t> atomic_ptrdiff_t;
struct atomic_bool;
#define ATOMIC_VAR_INIT(_VI) { _VI }
template<typename _Tp>
struct atomic;
template<typename _Tp>
struct atomic<_Tp*>;
// @} group atomics
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace
} // namespace std
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,28 +0,0 @@
// { dg-options "-std=gnu++0x" }
// { dg-do compile }
// Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <atomic>
#include <cstddef>
int main()
{
std::atomic_address a __attribute__((unused)) = { { NULL } };
return 0;
}

View File

@ -1,31 +0,0 @@
// { dg-options "-std=gnu++0x" }
// { dg-do compile }
// Copyright (C) 2008, 2009 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <atomic>
void test01()
{
// Assign.
typedef std::atomic_address test_type;
test_type t1;
test_type t2;
t1 = t2; // { dg-error "deleted" }
}
// { dg-prune-output "include" }

View File

@ -1,29 +0,0 @@
// { dg-do compile }
// { dg-options "-std=gnu++0x" }
// Copyright (C) 2010 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <atomic>
#include <testsuite_common_types.h>
int main()
{
__gnu_test::constexpr_single_value_constructible test;
test.operator()<std::atomic_address, void*>();
return 0;
}

View File

@ -1,31 +0,0 @@
// { dg-options "-std=gnu++0x" }
// { dg-do compile }
// Copyright (C) 2008, 2009 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <atomic>
void test01()
{
// Copy.
typedef std::atomic_address test_type;
test_type t1;
test_type t2(t1); // { dg-error "deleted" }
}
// { dg-prune-output "include" }

View File

@ -1,27 +0,0 @@
// { dg-options "-std=gnu++0x" }
// Copyright (C) 2008, 2009 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <atomic>
int main()
{
// Default constructor.
std::atomic_address a;
return 0;
}

View File

@ -1,28 +0,0 @@
// { dg-options "-std=gnu++0x" }
// Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <atomic>
int main()
{
// Single value constructor.
void* v = 0;
std::atomic_address a(v);
return 0;
}

View File

@ -1,28 +0,0 @@
// { dg-options "-std=gnu++0x" }
// { dg-do compile }
// Copyright (C) 2008, 2009 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <atomic>
#include <testsuite_common_types.h>
void test01()
{
__gnu_test::standard_layout test;
test.operator()<std::atomic_address>();
}

View File

@ -1,28 +0,0 @@
// { dg-options "-std=gnu++0x" }
// { dg-do compile }
// Copyright (C) 2009 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <atomic>
#include <testsuite_common_types.h>
void test01()
{
__gnu_test::has_trivial_cons_dtor test;
test.operator()<std::atomic_address>();
}

View File

@ -94,14 +94,6 @@ namespace gnu
# endif
#endif
#ifndef ATOMIC_ADDRESS_LOCK_FREE
# error "ATOMIC_ADDRESS_LOCK_FREE must be a macro"
# if ATOMIC_ADDRESS_LOCK_FREE != 0 \
&& ATOMIC_ADDRESS_LOCK_FREE != 1 && ATOMIC_ADDRESS_LOCK_FREE != 2
# error "ATOMIC_ADDRESS_LOCK_FREE must be 0, 1, or 2"
# endif
#endif
#ifndef ATOMIC_FLAG_INIT
#error "ATOMIC_FLAG_INIT_must_be_a_macro"
#endif

View File

@ -1,7 +1,7 @@
// { dg-options "-std=gnu++0x" }
// { dg-do compile }
// Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc.
// Copyright (C) 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
@ -72,6 +72,4 @@ void test01()
using std::atomic_ptrdiff_t;
using std::atomic_intmax_t;
using std::atomic_uintmax_t;
using std::atomic_address;
}