d90d97ff9c
2010-01-12 Jonathan Wakely <jwakely.gcc@gmail.com> PR libstdc++/24803 PR libstdc++/35569 PR libstdc++/42593 * include/std/functional (bind): Forward rvalues and detect correct result type of bound function object. * include/std/mutex (call_once): Specify bind result type. * testsuite/20_util/reference_wrapper/invoke.cc: Remove invalid tests. * testsuite/20_util/reference_wrapper/24803.cc: Remove invalid tests and enable FIXME tests. * testsuite/20_util/bind/35569.cc: New. * testsuite/20_util/bind/ref2.cc: New. * testsuite/20_util/bind/38889.cc: New. * testsuite/20_util/bind/ref_neg.cc: New. * testsuite/20_util/bind/42593.cc: New. From-SVN: r155826
753 lines
18 KiB
C++
753 lines
18 KiB
C++
// <mutex> -*- C++ -*-
|
|
|
|
// Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
|
|
// Free Software Foundation, Inc.
|
|
//
|
|
// This file is part of the GNU ISO C++ Library. This library is free
|
|
// software; you can redistribute it and/or modify it under the
|
|
// terms of the GNU General Public License as published by the
|
|
// Free Software Foundation; either version 3, or (at your option)
|
|
// any later version.
|
|
|
|
// This library is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU General Public License for more details.
|
|
|
|
// Under Section 7 of GPL version 3, you are granted additional
|
|
// permissions described in the GCC Runtime Library Exception, version
|
|
// 3.1, as published by the Free Software Foundation.
|
|
|
|
// You should have received a copy of the GNU General Public License and
|
|
// a copy of the GCC Runtime Library Exception along with this program;
|
|
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
|
// <http://www.gnu.org/licenses/>.
|
|
|
|
/** @file mutex
|
|
* This is a Standard C++ Library header.
|
|
*/
|
|
|
|
#ifndef _GLIBCXX_MUTEX
|
|
#define _GLIBCXX_MUTEX 1
|
|
|
|
#pragma GCC system_header
|
|
|
|
#ifndef __GXX_EXPERIMENTAL_CXX0X__
|
|
# include <c++0x_warning.h>
|
|
#else
|
|
|
|
#include <tuple>
|
|
#include <cstddef>
|
|
#include <chrono>
|
|
#include <exception>
|
|
#include <type_traits>
|
|
#include <functional>
|
|
#include <system_error>
|
|
#include <bits/functexcept.h>
|
|
#include <bits/gthr.h>
|
|
#include <bits/move.h> // for std::swap
|
|
|
|
#if defined(_GLIBCXX_HAS_GTHREADS) && defined(_GLIBCXX_USE_C99_STDINT_TR1)
|
|
|
|
namespace std
|
|
{
|
|
/**
|
|
* @defgroup mutexes Mutexes
|
|
* @ingroup concurrency
|
|
*
|
|
* Classes for mutex support.
|
|
* @{
|
|
*/
|
|
|
|
/// mutex
|
|
class mutex
|
|
{
|
|
typedef __gthread_mutex_t __native_type;
|
|
__native_type _M_mutex;
|
|
|
|
public:
|
|
typedef __native_type* native_handle_type;
|
|
|
|
mutex()
|
|
{
|
|
// XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
|
|
#ifdef __GTHREAD_MUTEX_INIT
|
|
__native_type __tmp = __GTHREAD_MUTEX_INIT;
|
|
_M_mutex = __tmp;
|
|
#else
|
|
__GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
|
|
#endif
|
|
}
|
|
|
|
mutex(const mutex&) = delete;
|
|
mutex& operator=(const mutex&) = delete;
|
|
|
|
void
|
|
lock()
|
|
{
|
|
int __e = __gthread_mutex_lock(&_M_mutex);
|
|
|
|
// EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
|
|
if (__e)
|
|
__throw_system_error(__e);
|
|
}
|
|
|
|
bool
|
|
try_lock()
|
|
{
|
|
// XXX EINVAL, EAGAIN, EBUSY
|
|
return !__gthread_mutex_trylock(&_M_mutex);
|
|
}
|
|
|
|
void
|
|
unlock()
|
|
{
|
|
// XXX EINVAL, EAGAIN, EPERM
|
|
__gthread_mutex_unlock(&_M_mutex);
|
|
}
|
|
|
|
native_handle_type
|
|
native_handle()
|
|
{ return &_M_mutex; }
|
|
};
|
|
|
|
/// recursive_mutex
|
|
class recursive_mutex
|
|
{
|
|
typedef __gthread_recursive_mutex_t __native_type;
|
|
__native_type _M_mutex;
|
|
|
|
public:
|
|
typedef __native_type* native_handle_type;
|
|
|
|
recursive_mutex()
|
|
{
|
|
// XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
|
|
#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
|
|
__native_type __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
|
|
_M_mutex = __tmp;
|
|
#else
|
|
__GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
|
|
#endif
|
|
}
|
|
|
|
recursive_mutex(const recursive_mutex&) = delete;
|
|
recursive_mutex& operator=(const recursive_mutex&) = delete;
|
|
|
|
void
|
|
lock()
|
|
{
|
|
int __e = __gthread_recursive_mutex_lock(&_M_mutex);
|
|
|
|
// EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
|
|
if (__e)
|
|
__throw_system_error(__e);
|
|
}
|
|
|
|
bool
|
|
try_lock()
|
|
{
|
|
// XXX EINVAL, EAGAIN, EBUSY
|
|
return !__gthread_recursive_mutex_trylock(&_M_mutex);
|
|
}
|
|
|
|
void
|
|
unlock()
|
|
{
|
|
// XXX EINVAL, EAGAIN, EBUSY
|
|
__gthread_recursive_mutex_unlock(&_M_mutex);
|
|
}
|
|
|
|
native_handle_type
|
|
native_handle()
|
|
{ return &_M_mutex; }
|
|
};
|
|
|
|
/// timed_mutex
|
|
class timed_mutex
|
|
{
|
|
typedef __gthread_mutex_t __native_type;
|
|
|
|
#ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
|
|
typedef chrono::monotonic_clock __clock_t;
|
|
#else
|
|
typedef chrono::high_resolution_clock __clock_t;
|
|
#endif
|
|
|
|
__native_type _M_mutex;
|
|
|
|
public:
|
|
typedef __native_type* native_handle_type;
|
|
|
|
timed_mutex()
|
|
{
|
|
#ifdef __GTHREAD_MUTEX_INIT
|
|
__native_type __tmp = __GTHREAD_MUTEX_INIT;
|
|
_M_mutex = __tmp;
|
|
#else
|
|
__GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
|
|
#endif
|
|
}
|
|
|
|
timed_mutex(const timed_mutex&) = delete;
|
|
timed_mutex& operator=(const timed_mutex&) = delete;
|
|
|
|
void
|
|
lock()
|
|
{
|
|
int __e = __gthread_mutex_lock(&_M_mutex);
|
|
|
|
// EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
|
|
if (__e)
|
|
__throw_system_error(__e);
|
|
}
|
|
|
|
bool
|
|
try_lock()
|
|
{
|
|
// XXX EINVAL, EAGAIN, EBUSY
|
|
return !__gthread_mutex_trylock(&_M_mutex);
|
|
}
|
|
|
|
template <class _Rep, class _Period>
|
|
bool
|
|
try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
|
|
{ return __try_lock_for_impl(__rtime); }
|
|
|
|
template <class _Clock, class _Duration>
|
|
bool
|
|
try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
|
|
{
|
|
chrono::time_point<_Clock, chrono::seconds> __s =
|
|
chrono::time_point_cast<chrono::seconds>(__atime);
|
|
|
|
chrono::nanoseconds __ns =
|
|
chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
|
|
|
|
__gthread_time_t __ts = {
|
|
static_cast<std::time_t>(__s.time_since_epoch().count()),
|
|
static_cast<long>(__ns.count())
|
|
};
|
|
|
|
return !__gthread_mutex_timedlock(&_M_mutex, &__ts);
|
|
}
|
|
|
|
void
|
|
unlock()
|
|
{
|
|
// XXX EINVAL, EAGAIN, EBUSY
|
|
__gthread_mutex_unlock(&_M_mutex);
|
|
}
|
|
|
|
native_handle_type
|
|
native_handle()
|
|
{ return &_M_mutex; }
|
|
|
|
private:
|
|
template<typename _Rep, typename _Period>
|
|
typename enable_if<
|
|
ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
|
|
__try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
|
|
{
|
|
__clock_t::time_point __atime = __clock_t::now()
|
|
+ chrono::duration_cast<__clock_t::duration>(__rtime);
|
|
|
|
return try_lock_until(__atime);
|
|
}
|
|
|
|
template <typename _Rep, typename _Period>
|
|
typename enable_if<
|
|
!ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
|
|
__try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
|
|
{
|
|
__clock_t::time_point __atime = __clock_t::now()
|
|
+ ++chrono::duration_cast<__clock_t::duration>(__rtime);
|
|
|
|
return try_lock_until(__atime);
|
|
}
|
|
};
|
|
|
|
/// recursive_timed_mutex
|
|
class recursive_timed_mutex
|
|
{
|
|
typedef __gthread_recursive_mutex_t __native_type;
|
|
|
|
#ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
|
|
typedef chrono::monotonic_clock __clock_t;
|
|
#else
|
|
typedef chrono::high_resolution_clock __clock_t;
|
|
#endif
|
|
|
|
__native_type _M_mutex;
|
|
|
|
public:
|
|
typedef __native_type* native_handle_type;
|
|
|
|
recursive_timed_mutex()
|
|
{
|
|
// XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
|
|
#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
|
|
__native_type __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
|
|
_M_mutex = __tmp;
|
|
#else
|
|
__GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
|
|
#endif
|
|
}
|
|
|
|
recursive_timed_mutex(const recursive_timed_mutex&) = delete;
|
|
recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
|
|
|
|
void
|
|
lock()
|
|
{
|
|
int __e = __gthread_recursive_mutex_lock(&_M_mutex);
|
|
|
|
// EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
|
|
if (__e)
|
|
__throw_system_error(__e);
|
|
}
|
|
|
|
bool
|
|
try_lock()
|
|
{
|
|
// XXX EINVAL, EAGAIN, EBUSY
|
|
return !__gthread_recursive_mutex_trylock(&_M_mutex);
|
|
}
|
|
|
|
template <class _Rep, class _Period>
|
|
bool
|
|
try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
|
|
{ return __try_lock_for_impl(__rtime); }
|
|
|
|
template <class _Clock, class _Duration>
|
|
bool
|
|
try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
|
|
{
|
|
chrono::time_point<_Clock, chrono::seconds> __s =
|
|
chrono::time_point_cast<chrono::seconds>(__atime);
|
|
|
|
chrono::nanoseconds __ns =
|
|
chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
|
|
|
|
__gthread_time_t __ts = {
|
|
static_cast<std::time_t>(__s.time_since_epoch().count()),
|
|
static_cast<long>(__ns.count())
|
|
};
|
|
|
|
return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts);
|
|
}
|
|
|
|
void
|
|
unlock()
|
|
{
|
|
// XXX EINVAL, EAGAIN, EBUSY
|
|
__gthread_recursive_mutex_unlock(&_M_mutex);
|
|
}
|
|
|
|
native_handle_type
|
|
native_handle()
|
|
{ return &_M_mutex; }
|
|
|
|
private:
|
|
template<typename _Rep, typename _Period>
|
|
typename enable_if<
|
|
ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
|
|
__try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
|
|
{
|
|
__clock_t::time_point __atime = __clock_t::now()
|
|
+ chrono::duration_cast<__clock_t::duration>(__rtime);
|
|
|
|
return try_lock_until(__atime);
|
|
}
|
|
|
|
template <typename _Rep, typename _Period>
|
|
typename enable_if<
|
|
!ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
|
|
__try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
|
|
{
|
|
__clock_t::time_point __atime = __clock_t::now()
|
|
+ ++chrono::duration_cast<__clock_t::duration>(__rtime);
|
|
|
|
return try_lock_until(__atime);
|
|
}
|
|
};
|
|
|
|
/// Do not acquire ownership of the mutex.
|
|
struct defer_lock_t { };
|
|
|
|
/// Try to acquire ownership of the mutex without blocking.
|
|
struct try_to_lock_t { };
|
|
|
|
/// Assume the calling thread has already obtained mutex ownership
|
|
/// and manage it.
|
|
struct adopt_lock_t { };
|
|
|
|
extern const defer_lock_t defer_lock;
|
|
extern const try_to_lock_t try_to_lock;
|
|
extern const adopt_lock_t adopt_lock;
|
|
|
|
/// @brief Scoped lock idiom.
|
|
// Acquire the mutex here with a constructor call, then release with
|
|
// the destructor call in accordance with RAII style.
|
|
template<typename _Mutex>
|
|
class lock_guard
|
|
{
|
|
public:
|
|
typedef _Mutex mutex_type;
|
|
|
|
explicit lock_guard(mutex_type& __m) : _M_device(__m)
|
|
{ _M_device.lock(); }
|
|
|
|
lock_guard(mutex_type& __m, adopt_lock_t __a) : _M_device(__m)
|
|
{ _M_device.lock(); }
|
|
|
|
~lock_guard()
|
|
{ _M_device.unlock(); }
|
|
|
|
lock_guard(const lock_guard&) = delete;
|
|
lock_guard& operator=(const lock_guard&) = delete;
|
|
|
|
private:
|
|
mutex_type& _M_device;
|
|
};
|
|
|
|
/// unique_lock
|
|
template<typename _Mutex>
|
|
class unique_lock
|
|
{
|
|
public:
|
|
typedef _Mutex mutex_type;
|
|
|
|
unique_lock()
|
|
: _M_device(0), _M_owns(false)
|
|
{ }
|
|
|
|
explicit unique_lock(mutex_type& __m)
|
|
: _M_device(&__m), _M_owns(false)
|
|
{
|
|
lock();
|
|
_M_owns = true;
|
|
}
|
|
|
|
unique_lock(mutex_type& __m, defer_lock_t)
|
|
: _M_device(&__m), _M_owns(false)
|
|
{ }
|
|
|
|
unique_lock(mutex_type& __m, try_to_lock_t)
|
|
: _M_device(&__m), _M_owns(_M_device->try_lock())
|
|
{ }
|
|
|
|
unique_lock(mutex_type& __m, adopt_lock_t)
|
|
: _M_device(&__m), _M_owns(true)
|
|
{
|
|
// XXX calling thread owns mutex
|
|
}
|
|
|
|
template<typename _Clock, typename _Duration>
|
|
unique_lock(mutex_type& __m,
|
|
const chrono::time_point<_Clock, _Duration>& __atime)
|
|
: _M_device(&__m), _M_owns(_M_device->try_lock_until(__atime))
|
|
{ }
|
|
|
|
template<typename _Rep, typename _Period>
|
|
unique_lock(mutex_type& __m,
|
|
const chrono::duration<_Rep, _Period>& __rtime)
|
|
: _M_device(&__m), _M_owns(_M_device->try_lock_for(__rtime))
|
|
{ }
|
|
|
|
~unique_lock()
|
|
{
|
|
if (_M_owns)
|
|
unlock();
|
|
}
|
|
|
|
unique_lock(const unique_lock&) = delete;
|
|
unique_lock& operator=(const unique_lock&) = delete;
|
|
|
|
unique_lock(unique_lock&& __u)
|
|
: _M_device(__u._M_device), _M_owns(__u._M_owns)
|
|
{
|
|
__u._M_device = 0;
|
|
__u._M_owns = false;
|
|
}
|
|
|
|
unique_lock& operator=(unique_lock&& __u)
|
|
{
|
|
if(_M_owns)
|
|
unlock();
|
|
|
|
unique_lock(std::move(__u)).swap(*this);
|
|
|
|
__u._M_device = 0;
|
|
__u._M_owns = false;
|
|
|
|
return *this;
|
|
}
|
|
|
|
void
|
|
lock()
|
|
{
|
|
if (!_M_device)
|
|
__throw_system_error(int(errc::operation_not_permitted));
|
|
else if (_M_owns)
|
|
__throw_system_error(int(errc::resource_deadlock_would_occur));
|
|
else
|
|
{
|
|
_M_device->lock();
|
|
_M_owns = true;
|
|
}
|
|
}
|
|
|
|
bool
|
|
try_lock()
|
|
{
|
|
if (!_M_device)
|
|
__throw_system_error(int(errc::operation_not_permitted));
|
|
else if (_M_owns)
|
|
__throw_system_error(int(errc::resource_deadlock_would_occur));
|
|
else
|
|
{
|
|
_M_owns = _M_device->try_lock();
|
|
return _M_owns;
|
|
}
|
|
}
|
|
|
|
template<typename _Clock, typename _Duration>
|
|
bool
|
|
try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
|
|
{
|
|
if (!_M_device)
|
|
__throw_system_error(int(errc::operation_not_permitted));
|
|
else if (_M_owns)
|
|
__throw_system_error(int(errc::resource_deadlock_would_occur));
|
|
else
|
|
{
|
|
_M_owns = _M_device->try_lock_until(__atime);
|
|
return _M_owns;
|
|
}
|
|
}
|
|
|
|
template<typename _Rep, typename _Period>
|
|
bool
|
|
try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
|
|
{
|
|
if (!_M_device)
|
|
__throw_system_error(int(errc::operation_not_permitted));
|
|
else if (_M_owns)
|
|
__throw_system_error(int(errc::resource_deadlock_would_occur));
|
|
else
|
|
{
|
|
_M_owns = _M_device->try_lock_for(__rtime);
|
|
return _M_owns;
|
|
}
|
|
}
|
|
|
|
void
|
|
unlock()
|
|
{
|
|
if (!_M_owns)
|
|
__throw_system_error(int(errc::operation_not_permitted));
|
|
else if (_M_device)
|
|
{
|
|
_M_device->unlock();
|
|
_M_owns = false;
|
|
}
|
|
}
|
|
|
|
void
|
|
swap(unique_lock& __u)
|
|
{
|
|
std::swap(_M_device, __u._M_device);
|
|
std::swap(_M_owns, __u._M_owns);
|
|
}
|
|
|
|
mutex_type*
|
|
release()
|
|
{
|
|
mutex_type* __ret = _M_device;
|
|
_M_device = 0;
|
|
_M_owns = false;
|
|
return __ret;
|
|
}
|
|
|
|
bool
|
|
owns_lock() const
|
|
{ return _M_owns; }
|
|
|
|
explicit operator bool() const
|
|
{ return owns_lock(); }
|
|
|
|
mutex_type*
|
|
mutex() const
|
|
{ return _M_device; }
|
|
|
|
private:
|
|
mutex_type* _M_device;
|
|
bool _M_owns; // XXX use atomic_bool
|
|
};
|
|
|
|
template<typename _Mutex>
|
|
inline void
|
|
swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y)
|
|
{ __x.swap(__y); }
|
|
|
|
template<int _Idx>
|
|
struct __unlock_impl
|
|
{
|
|
template<typename... _Lock>
|
|
static void
|
|
__do_unlock(tuple<_Lock&...>& __locks)
|
|
{
|
|
std::get<_Idx>(__locks).unlock();
|
|
__unlock_impl<_Idx - 1>::__do_unlock(__locks);
|
|
}
|
|
};
|
|
|
|
template<>
|
|
struct __unlock_impl<-1>
|
|
{
|
|
template<typename... _Lock>
|
|
static void
|
|
__do_unlock(tuple<_Lock&...>&)
|
|
{ }
|
|
};
|
|
|
|
template<int _Idx, bool _Continue = true>
|
|
struct __try_lock_impl
|
|
{
|
|
template<typename... _Lock>
|
|
static int
|
|
__do_try_lock(tuple<_Lock&...>& __locks)
|
|
{
|
|
if(std::get<_Idx>(__locks).try_lock())
|
|
{
|
|
return __try_lock_impl<_Idx + 1,
|
|
_Idx + 2 < sizeof...(_Lock)>::__do_try_lock(__locks);
|
|
}
|
|
else
|
|
{
|
|
__unlock_impl<_Idx>::__do_unlock(__locks);
|
|
return _Idx;
|
|
}
|
|
}
|
|
};
|
|
|
|
template<int _Idx>
|
|
struct __try_lock_impl<_Idx, false>
|
|
{
|
|
template<typename... _Lock>
|
|
static int
|
|
__do_try_lock(tuple<_Lock&...>& __locks)
|
|
{
|
|
if(std::get<_Idx>(__locks).try_lock())
|
|
return -1;
|
|
else
|
|
{
|
|
__unlock_impl<_Idx>::__do_unlock(__locks);
|
|
return _Idx;
|
|
}
|
|
}
|
|
};
|
|
|
|
/** @brief Generic try_lock.
|
|
* @param __l1 Meets Mutex requirements (try_lock() may throw).
|
|
* @param __l2 Meets Mutex requirements (try_lock() may throw).
|
|
* @param __l3 Meets Mutex requirements (try_lock() may throw).
|
|
* @return Returns -1 if all try_lock() calls return true. Otherwise returns
|
|
* a 0-based index corresponding to the argument that returned false.
|
|
* @post Either all arguments are locked, or none will be.
|
|
*
|
|
* Sequentially calls try_lock() on each argument.
|
|
*/
|
|
template<typename _Lock1, typename _Lock2, typename... _Lock3>
|
|
int
|
|
try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
|
|
{
|
|
tuple<_Lock1&, _Lock2&, _Lock3&...> __locks(__l1, __l2, __l3...);
|
|
return __try_lock_impl<0>::__do_try_lock(__locks);
|
|
}
|
|
|
|
/// lock
|
|
template<typename _L1, typename _L2, typename ..._L3>
|
|
void
|
|
lock(_L1&, _L2&, _L3&...);
|
|
|
|
/// once_flag
|
|
struct once_flag
|
|
{
|
|
private:
|
|
typedef __gthread_once_t __native_type;
|
|
__native_type _M_once;
|
|
|
|
public:
|
|
once_flag()
|
|
{
|
|
__native_type __tmp = __GTHREAD_ONCE_INIT;
|
|
_M_once = __tmp;
|
|
}
|
|
|
|
once_flag(const once_flag&) = delete;
|
|
once_flag& operator=(const once_flag&) = delete;
|
|
|
|
template<typename _Callable, typename... _Args>
|
|
friend void
|
|
call_once(once_flag& __once, _Callable __f, _Args&&... __args);
|
|
};
|
|
|
|
#ifdef _GLIBCXX_HAVE_TLS
|
|
extern __thread void* __once_callable;
|
|
extern __thread void (*__once_call)();
|
|
|
|
template<typename _Callable>
|
|
inline void
|
|
__once_call_impl()
|
|
{
|
|
(*(_Callable*)__once_callable)();
|
|
}
|
|
#else
|
|
extern function<void()> __once_functor;
|
|
|
|
extern void
|
|
__set_once_functor_lock_ptr(unique_lock<mutex>*);
|
|
|
|
extern mutex&
|
|
__get_once_mutex();
|
|
#endif
|
|
|
|
extern "C" void __once_proxy();
|
|
|
|
/// call_once
|
|
template<typename _Callable, typename... _Args>
|
|
void
|
|
call_once(once_flag& __once, _Callable __f, _Args&&... __args)
|
|
{
|
|
#ifdef _GLIBCXX_HAVE_TLS
|
|
auto __bound_functor = std::bind<void>(__f, __args...);
|
|
__once_callable = &__bound_functor;
|
|
__once_call = &__once_call_impl<decltype(__bound_functor)>;
|
|
#else
|
|
unique_lock<mutex> __functor_lock(__get_once_mutex());
|
|
__once_functor = std::bind<void>(__f, __args...);
|
|
__set_once_functor_lock_ptr(&__functor_lock);
|
|
#endif
|
|
|
|
int __e = __gthread_once(&(__once._M_once), &__once_proxy);
|
|
|
|
#ifndef _GLIBCXX_HAVE_TLS
|
|
if (__functor_lock)
|
|
__set_once_functor_lock_ptr(0);
|
|
#endif
|
|
|
|
if (__e)
|
|
__throw_system_error(__e);
|
|
}
|
|
|
|
// @} group mutexes
|
|
}
|
|
|
|
#endif // _GLIBCXX_HAS_GTHREADS && _GLIBCXX_USE_C99_STDINT_TR1
|
|
|
|
#endif // __GXX_EXPERIMENTAL_CXX0X__
|
|
|
|
#endif // _GLIBCXX_MUTEX
|