Implement std::pmr::synchronized_pool_resource

Define the thread-safe pool resource, using a shared_mutex to allow
multiple threads to concurrently allocate from thread-specific pools.

Define new weak symbols for the pthread_rwlock_t functions, to avoid
making libstdc++.so depend on libpthread.so

When the necessary Gthread support is absent only define the
feature-test macro to 1, rather than 201603. This is intended to imply
incomplete support, because everything except synchronized_pool_resource
works.

	Implement std::pmr::synchronized_pool_resource
	* config/abi/pre/gnu.ver: Add new symbols.
	* include/std/memory_resource [_GLIBCXX_HAS_GTHREADS]
	(__cpp_lib_memory_resource): Define to expected value, 201603.
	(synchronized_pool_resource): New class.
	[!_GLIBCXX_HAS_GTHREADS] (__cpp_lib_memory_resource): Define to 1.
	* include/std/shared_mutex (__glibcxx_rwlock_rdlock)
	(__glibcxx_rwlock_tryrdlock, __glibcxx_rwlock_wrlock)
	(__glibcxx_rwlock_trywrlock, __glibcxx_rwlock_unlock)
	(__glibcxx_rwlock_destroy, __glibcxx_rwlock_init)
	(__glibcxx_rwlock_timedrdlock, __glibcxx_rwlock_timedwrlock): Define
	weak symbols for POSIX rwlock functions.
	(__shared_mutex_pthread): Use weak symbols.
	* include/std/version (__cpp_lib_memory_resource): Define.
	* src/c++17/memory_resource.cc [_GLIBCXX_HAS_GTHREADS]
	(synchronized_pool_resource::_TPools): New class.
	(destroy_TPools): New function for pthread_key_create destructor.
	(synchronized_pool_resource::synchronized_pool_resource)
	(synchronized_pool_resource::~synchronized_pool_resource)
	(synchronized_pool_resource::release)
	(synchronized_pool_resource::do_allocate)
	(synchronized_pool_resource::do_deallocate): Define public members.
	(synchronized_pool_resource::_M_thread_specific_pools)
	(synchronized_pool_resource::_M_alloc_tpools)
	(synchronized_pool_resource::_M_alloc_shared_tpools): Define private
	members.
	* testsuite/20_util/synchronized_pool_resource/allocate.cc: New test.
	* testsuite/20_util/synchronized_pool_resource/cons.cc: New test.
	* testsuite/20_util/synchronized_pool_resource/is_equal.cc: New test.
	* testsuite/20_util/synchronized_pool_resource/multithreaded.cc: New
	test.
	* testsuite/20_util/synchronized_pool_resource/release.cc: New test.
	* testsuite/performance/20_util/memory_resource/pools.cc: Add
	multithreaded tests using pmr::synchronized_pool_resource.

From-SVN: r266242
This commit is contained in:
Jonathan Wakely 2018-11-17 23:35:44 +00:00 committed by Jonathan Wakely
parent b579523b7b
commit c5be64810c
13 changed files with 1387 additions and 55 deletions

View File

@ -1,3 +1,40 @@
2018-11-17 Jonathan Wakely <jwakely@redhat.com>
Implement std::pmr::synchronized_pool_resource
* config/abi/pre/gnu.ver: Add new symbols.
* include/std/memory_resource [_GLIBCXX_HAS_GTHREADS]
(__cpp_lib_memory_resource): Define to expected value, 201603.
(synchronized_pool_resource): New class.
[!_GLIBCXX_HAS_GTHREADS] (__cpp_lib_memory_resource): Define to 1.
* include/std/shared_mutex (__glibcxx_rwlock_rdlock)
(__glibcxx_rwlock_tryrdlock, __glibcxx_rwlock_wrlock)
(__glibcxx_rwlock_trywrlock, __glibcxx_rwlock_unlock)
(__glibcxx_rwlock_destroy, __glibcxx_rwlock_init)
(__glibcxx_rwlock_timedrdlock, __glibcxx_rwlock_timedwrlock): Define
weak symbols for POSIX rwlock functions.
(__shared_mutex_pthread): Use weak symbols.
* include/std/version (__cpp_lib_memory_resource): Define.
* src/c++17/memory_resource.cc [_GLIBCXX_HAS_GTHREADS]
(synchronized_pool_resource::_TPools): New class.
(destroy_TPools): New function for pthread_key_create destructor.
(synchronized_pool_resource::synchronized_pool_resource)
(synchronized_pool_resource::~synchronized_pool_resource)
(synchronized_pool_resource::release)
(synchronized_pool_resource::do_allocate)
(synchronized_pool_resource::do_deallocate): Define public members.
(synchronized_pool_resource::_M_thread_specific_pools)
(synchronized_pool_resource::_M_alloc_tpools)
(synchronized_pool_resource::_M_alloc_shared_tpools): Define private
members.
* testsuite/20_util/synchronized_pool_resource/allocate.cc: New test.
* testsuite/20_util/synchronized_pool_resource/cons.cc: New test.
* testsuite/20_util/synchronized_pool_resource/is_equal.cc: New test.
* testsuite/20_util/synchronized_pool_resource/multithreaded.cc: New
test.
* testsuite/20_util/synchronized_pool_resource/release.cc: New test.
* testsuite/performance/20_util/memory_resource/pools.cc: Add
multithreaded tests using pmr::synchronized_pool_resource.
2018-11-16 Renlin Li <renlin.li@arm.com>
Tejas Belagod <tejas.belagod@arm.com>

View File

@ -2039,13 +2039,6 @@ GLIBCXX_3.4.26 {
_ZNSt7__cxx1118basic_stringstreamI[cw]St11char_traitsI[cw]ESaI[cw]EEC[12]Ev;
_ZNSt7__cxx1119basic_[io]stringstreamI[cw]St11char_traitsI[cw]ESaI[cw]EEC[12]Ev;
_ZNSt3pmr19new_delete_resourceEv;
_ZNSt3pmr20null_memory_resourceEv;
_ZNSt3pmr20get_default_resourceEv;
_ZNSt3pmr20set_default_resourceEPNS_15memory_resourceE;
_ZNSt3pmr25monotonic_buffer_resource13_M_new_bufferE[jmy][jmy];
_ZNSt3pmr25monotonic_buffer_resource18_M_release_buffersEv;
# std::__throw_ios_failure(const char*, int);
_ZSt19__throw_ios_failurePKci;
@ -2057,6 +2050,18 @@ GLIBCXX_3.4.26 {
_ZN11__gnu_debug25_Safe_local_iterator_base16_M_attach_singleEPNS_19_Safe_sequence_baseEb;
# <memory_resource> members
_ZNSt3pmr19new_delete_resourceEv;
_ZNSt3pmr20null_memory_resourceEv;
_ZNSt3pmr20get_default_resourceEv;
_ZNSt3pmr20set_default_resourceEPNS_15memory_resourceE;
_ZNSt3pmr25monotonic_buffer_resource13_M_new_bufferE[jmy][jmy];
_ZNSt3pmr25monotonic_buffer_resource18_M_release_buffersEv;
_ZTINSt3pmr26synchronized_pool_resourceE;
_ZNSt3pmr26synchronized_pool_resourceC[12]ERKNS_12pool_optionsEPNS_15memory_resourceE;
_ZNSt3pmr26synchronized_pool_resourceD[12]Ev;
_ZNSt3pmr26synchronized_pool_resource7releaseEv;
_ZNSt3pmr26synchronized_pool_resource11do_allocateE[jmy][jmy];
_ZNSt3pmr26synchronized_pool_resource13do_deallocateEPv[jmy][jmy];
_ZTINSt3pmr28unsynchronized_pool_resourceE;
_ZNSt3pmr28unsynchronized_pool_resourceC[12]ERKNS_12pool_optionsEPNS_15memory_resourceE;
_ZNSt3pmr28unsynchronized_pool_resourceD[12]Ev;

View File

@ -37,6 +37,7 @@
#include <utility> // pair, index_sequence
#include <vector> // vector
#include <cstddef> // size_t, max_align_t
#include <shared_mutex> // shared_mutex
#include <debug/assertions.h>
namespace std _GLIBCXX_VISIBILITY(default)
@ -44,7 +45,13 @@ namespace std _GLIBCXX_VISIBILITY(default)
_GLIBCXX_BEGIN_NAMESPACE_VERSION
namespace pmr
{
// #define __cpp_lib_memory_resource 201603
#ifdef _GLIBCXX_HAS_GTHREADS
// Header and all contents are present.
# define __cpp_lib_memory_resource 201603
#else
// The pmr::synchronized_pool_resource type is missing.
# define __cpp_lib_memory_resource 1
#endif
class memory_resource;
@ -60,7 +67,9 @@ namespace pmr
// Pool resource classes
struct pool_options;
#ifdef _GLIBCXX_HAS_GTHREADS
class synchronized_pool_resource;
#endif
class unsynchronized_pool_resource;
class monotonic_buffer_resource;
@ -338,7 +347,72 @@ namespace pmr
const int _M_npools;
};
// TODO class synchronized_pool_resource
#ifdef _GLIBCXX_HAS_GTHREADS
/// A thread-safe memory resource that manages pools of fixed-size blocks.
class synchronized_pool_resource : public memory_resource
{
public:
synchronized_pool_resource(const pool_options& __opts,
memory_resource* __upstream)
__attribute__((__nonnull__));
synchronized_pool_resource()
: synchronized_pool_resource(pool_options(), get_default_resource())
{ }
explicit
synchronized_pool_resource(memory_resource* __upstream)
__attribute__((__nonnull__))
: synchronized_pool_resource(pool_options(), __upstream)
{ }
explicit
synchronized_pool_resource(const pool_options& __opts)
: synchronized_pool_resource(__opts, get_default_resource()) { }
synchronized_pool_resource(const synchronized_pool_resource&) = delete;
virtual ~synchronized_pool_resource();
synchronized_pool_resource&
operator=(const synchronized_pool_resource&) = delete;
void release();
memory_resource*
upstream_resource() const noexcept
__attribute__((__returns_nonnull__))
{ return _M_impl.resource(); }
pool_options options() const noexcept { return _M_impl._M_opts; }
protected:
void*
do_allocate(size_t __bytes, size_t __alignment) override;
void
do_deallocate(void* __p, size_t __bytes, size_t __alignment) override;
bool
do_is_equal(const memory_resource& __other) const noexcept override
{ return this == &__other; }
public:
// Thread-specific pools (only public for access by implementation details)
struct _TPools;
private:
_TPools* _M_alloc_tpools(lock_guard<shared_mutex>&);
_TPools* _M_alloc_shared_tpools(lock_guard<shared_mutex>&);
auto _M_thread_specific_pools() noexcept;
__pool_resource _M_impl;
__gthread_key_t _M_key;
// Linked list of thread-specific pools. All threads share _M_tpools[0].
_TPools* _M_tpools = nullptr;
mutable shared_mutex _M_mx;
};
#endif
/// A non-thread-safe memory resource that manages pools of fixed-size blocks.
class unsynchronized_pool_resource : public memory_resource

View File

@ -57,6 +57,90 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
class shared_timed_mutex;
#if _GLIBCXX_USE_PTHREAD_RWLOCK_T
#ifdef __gthrw
#define _GLIBCXX_GTHRW(name) \
__gthrw(pthread_ ## name); \
static inline int \
__glibcxx_ ## name (pthread_rwlock_t *__rwlock) \
{ \
if (__gthread_active_p ()) \
return __gthrw_(pthread_ ## name) (__rwlock); \
else \
return 0; \
}
_GLIBCXX_GTHRW(rwlock_rdlock)
_GLIBCXX_GTHRW(rwlock_tryrdlock)
_GLIBCXX_GTHRW(rwlock_wrlock)
_GLIBCXX_GTHRW(rwlock_trywrlock)
_GLIBCXX_GTHRW(rwlock_unlock)
# ifndef PTHREAD_RWLOCK_INITIALIZER
_GLIBCXX_GTHRW(rwlock_destroy)
__gthrw(pthread_rwlock_init);
static inline int
__glibcxx_rwlock_init (pthread_rwlock_t *__rwlock)
{
if (__gthread_active_p ())
return __gthrw_(pthread_rwlock_init) (__rwlock, NULL);
else
return 0;
}
# endif
# if _GTHREAD_USE_MUTEX_TIMEDLOCK
__gthrw(pthread_rwlock_timedrdlock);
static inline int
__glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
const timespec *__ts)
{
if (__gthread_active_p ())
return __gthrw_(pthread_rwlock_timedrdlock) (__rwlock, __ts);
else
return 0;
}
__gthrw(pthread_rwlock_timedwrlock);
static inline int
__glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
const timespec *__ts)
{
if (__gthread_active_p ())
return __gthrw_(pthread_rwlock_timedwrlock) (__rwlock, __ts);
else
return 0;
}
# endif
#else
static inline int
__glibcxx_rwlock_rdlock (pthread_rwlock_t *__rwlock)
{ return pthread_rwlock_rdlock (__rwlock); }
static inline int
__glibcxx_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
{ return pthread_rwlock_tryrdlock (__rwlock); }
static inline int
__glibcxx_rwlock_wrlock (pthread_rwlock_t *__rwlock)
{ return pthread_rwlock_wrlock (__rwlock); }
static inline int
__glibcxx_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
{ return pthread_rwlock_trywrlock (__rwlock); }
static inline int
__glibcxx_rwlock_unlock (pthread_rwlock_t *__rwlock)
{ return pthread_rwlock_unlock (__rwlock); }
static inline int
__glibcxx_rwlock_destroy(pthread_rwlock_t *__rwlock)
{ return pthread_rwlock_destroy (__rwlock); }
static inline int
__glibcxx_rwlock_init(pthread_rwlock_t *__rwlock)
{ return pthread_rwlock_init (__rwlock, NULL); }
# if _GTHREAD_USE_MUTEX_TIMEDLOCK
static inline int
__glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
const timespec *__ts)
{ return pthread_rwlock_timedrdlock (__rwlock, __ts); }
static inline int
__glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
const timespec *__ts)
{ return pthread_rwlock_timedwrlock (__rwlock, __ts); }
# endif
#endif
/// A shared mutex type implemented using pthread_rwlock_t.
class __shared_mutex_pthread
{
@ -74,7 +158,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
public:
__shared_mutex_pthread()
{
int __ret = pthread_rwlock_init(&_M_rwlock, NULL);
int __ret = __glibcxx_rwlock_init(&_M_rwlock, NULL);
if (__ret == ENOMEM)
__throw_bad_alloc();
else if (__ret == EAGAIN)
@ -87,7 +171,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
~__shared_mutex_pthread()
{
int __ret __attribute((__unused__)) = pthread_rwlock_destroy(&_M_rwlock);
int __ret __attribute((__unused__)) = __glibcxx_rwlock_destroy(&_M_rwlock);
// Errors not handled: EBUSY, EINVAL
__glibcxx_assert(__ret == 0);
}
@ -99,7 +183,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
void
lock()
{
int __ret = pthread_rwlock_wrlock(&_M_rwlock);
int __ret = __glibcxx_rwlock_wrlock(&_M_rwlock);
if (__ret == EDEADLK)
__throw_system_error(int(errc::resource_deadlock_would_occur));
// Errors not handled: EINVAL
@ -109,7 +193,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
bool
try_lock()
{
int __ret = pthread_rwlock_trywrlock(&_M_rwlock);
int __ret = __glibcxx_rwlock_trywrlock(&_M_rwlock);
if (__ret == EBUSY) return false;
// Errors not handled: EINVAL
__glibcxx_assert(__ret == 0);
@ -119,7 +203,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
void
unlock()
{
int __ret __attribute((__unused__)) = pthread_rwlock_unlock(&_M_rwlock);
int __ret __attribute((__unused__)) = __glibcxx_rwlock_unlock(&_M_rwlock);
// Errors not handled: EPERM, EBUSY, EINVAL
__glibcxx_assert(__ret == 0);
}
@ -135,7 +219,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// is okay based on the current specification of forward progress
// guarantees by the standard.
do
__ret = pthread_rwlock_rdlock(&_M_rwlock);
__ret = __glibcxx_rwlock_rdlock(&_M_rwlock);
while (__ret == EAGAIN);
if (__ret == EDEADLK)
__throw_system_error(int(errc::resource_deadlock_would_occur));
@ -146,7 +230,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
bool
try_lock_shared()
{
int __ret = pthread_rwlock_tryrdlock(&_M_rwlock);
int __ret = __glibcxx_rwlock_tryrdlock(&_M_rwlock);
// If the maximum number of read locks has been exceeded, we just fail
// to acquire the lock. Unlike for lock(), we are not allowed to throw
// an exception.
@ -413,7 +497,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
static_cast<long>(__ns.count())
};
int __ret = pthread_rwlock_timedwrlock(&_M_rwlock, &__ts);
int __ret = __glibcxx_rwlock_timedwrlock(&_M_rwlock, &__ts);
// On self-deadlock, we just fail to acquire the lock. Technically,
// the program violated the precondition.
if (__ret == ETIMEDOUT || __ret == EDEADLK)
@ -466,7 +550,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// mistaken for a spurious failure, which might help users realise
// there is a deadlock.
do
__ret = pthread_rwlock_timedrdlock(&_M_rwlock, &__ts);
__ret = __glibcxx_rwlock_timedrdlock(&_M_rwlock, &__ts);
while (__ret == EAGAIN || __ret == EDEADLK);
if (__ret == ETIMEDOUT)
return false;

View File

@ -118,6 +118,11 @@
#define __cpp_lib_map_insertion 201411
#define __cpp_lib_map_try_emplace 201411
#define __cpp_lib_math_special_functions 201603L
#ifdef _GLIBCXX_HAS_GTHREADS
# define __cpp_lib_memory_resource 201603
#else
# define __cpp_lib_memory_resource 1
#endif
#define __cpp_lib_node_extract 201606
#define __cpp_lib_nonmember_container_access 201411
#define __cpp_lib_not_fn 201603

View File

@ -896,6 +896,11 @@ namespace pmr
return n + 1;
}
#ifdef _GLIBCXX_HAS_GTHREADS
using shared_lock = std::shared_lock<shared_mutex>;
using exclusive_lock = lock_guard<shared_mutex>;
#endif
} // namespace
__pool_resource::
@ -987,6 +992,292 @@ namespace pmr
return p;
}
#ifdef _GLIBCXX_HAS_GTHREADS
// synchronized_pool_resource members.
/* Notes on implementation and thread safety:
*
* Each synchronized_pool_resource manages an linked list of N+1 _TPools
* objects, where N is the number of threads using the pool resource.
* Each _TPools object has its own set of pools, with their own chunks.
* The first element of the list, _M_tpools[0], can be used by any thread.
* The rest of the list contains a _TPools object for each thread,
* accessed via the thread-specific key _M_key (and referred to for
* exposition as _M_tpools[_M_key]).
* The first element, _M_tpools[0], contains "orphaned chunks" which were
* allocated by a thread which has since exited, and so there is no
* _M_tpools[_M_key] for that thread.
* A thread can access its own thread-specific set of pools via _M_key
* while holding a shared lock on _M_mx. Accessing _M_impl._M_unpooled
* or _M_tpools[0] or any other thread's _M_tpools[_M_key] requires an
* exclusive lock.
* The upstream_resource() pointer can be obtained without a lock, but
* any dereference of that pointer requires an exclusive lock.
* The _M_impl._M_opts and _M_impl._M_npools members are immutable,
* and can safely be accessed concurrently.
*/
extern "C" {
static void destroy_TPools(void*);
}
struct synchronized_pool_resource::_TPools
{
// Exclusive lock must be held in the thread where this constructor runs.
explicit
_TPools(synchronized_pool_resource& owner, exclusive_lock&)
: owner(owner), pools(owner._M_impl._M_alloc_pools())
{
// __builtin_printf("%p constructing\n", this);
__glibcxx_assert(pools);
}
// Exclusive lock must be held in the thread where this destructor runs.
~_TPools()
{
__glibcxx_assert(pools);
if (pools)
{
memory_resource* r = owner.upstream_resource();
for (int i = 0; i < owner._M_impl._M_npools; ++i)
pools[i].release(r);
std::destroy_n(pools, owner._M_impl._M_npools);
polymorphic_allocator<__pool_resource::_Pool> a(r);
a.deallocate(pools, owner._M_impl._M_npools);
}
if (prev)
prev->next = next;
if (next)
next->prev = prev;
}
// Exclusive lock must be held in the thread where this function runs.
void move_nonempty_chunks()
{
__glibcxx_assert(pools);
if (!pools)
return;
memory_resource* r = owner.upstream_resource();
// move all non-empty chunks to the shared _TPools
for (int i = 0; i < owner._M_impl._M_npools; ++i)
for (auto& c : pools[i]._M_chunks)
if (!c.empty())
owner._M_tpools->pools[i]._M_chunks.insert(std::move(c), r);
}
synchronized_pool_resource& owner;
__pool_resource::_Pool* pools = nullptr;
_TPools* prev = nullptr;
_TPools* next = nullptr;
static void destroy(_TPools* p)
{
exclusive_lock l(p->owner._M_mx);
// __glibcxx_assert(p != p->owner._M_tpools);
p->move_nonempty_chunks();
polymorphic_allocator<_TPools> a(p->owner.upstream_resource());
p->~_TPools();
a.deallocate(p, 1);
}
};
// Called when a thread exits
extern "C" {
static void destroy_TPools(void* p)
{
using _TPools = synchronized_pool_resource::_TPools;
_TPools::destroy(static_cast<_TPools*>(p));
}
}
// Constructor
synchronized_pool_resource::
synchronized_pool_resource(const pool_options& opts,
memory_resource* upstream)
: _M_impl(opts, upstream)
{
if (int err = __gthread_key_create(&_M_key, destroy_TPools))
__throw_system_error(err);
exclusive_lock l(_M_mx);
_M_tpools = _M_alloc_shared_tpools(l);
}
// Destructor
synchronized_pool_resource::~synchronized_pool_resource()
{
release();
__gthread_key_delete(_M_key); // does not run destroy_TPools
}
void
synchronized_pool_resource::release()
{
exclusive_lock l(_M_mx);
if (_M_tpools)
{
__gthread_key_delete(_M_key); // does not run destroy_TPools
__gthread_key_create(&_M_key, destroy_TPools);
polymorphic_allocator<_TPools> a(upstream_resource());
// destroy+deallocate each _TPools
do
{
_TPools* p = _M_tpools;
_M_tpools = _M_tpools->next;
p->~_TPools();
a.deallocate(p, 1);
}
while (_M_tpools);
}
// release unpooled memory
_M_impl.release();
}
// Caller must hold shared or exclusive lock to ensure the pointer
// isn't invalidated before it can be used.
auto
synchronized_pool_resource::_M_thread_specific_pools() noexcept
{
__pool_resource::_Pool* pools = nullptr;
if (auto tp = static_cast<_TPools*>(__gthread_getspecific(_M_key)))
{
pools = tp->pools;
__glibcxx_assert(tp->pools);
}
return pools;
}
// Override for memory_resource::do_allocate
void*
synchronized_pool_resource::
do_allocate(size_t bytes, size_t alignment)
{
const auto block_size = std::max(bytes, alignment);
if (block_size <= _M_impl._M_opts.largest_required_pool_block)
{
const ptrdiff_t index = pool_index(block_size, _M_impl._M_npools);
memory_resource* const r = upstream_resource();
const pool_options opts = _M_impl._M_opts;
{
// Try to allocate from the thread-specific pool
shared_lock l(_M_mx);
if (auto pools = _M_thread_specific_pools()) // [[likely]]
{
// Need exclusive lock to replenish so use try_allocate:
if (void* p = pools[index].try_allocate())
return p;
// Need to take exclusive lock and replenish pool.
}
// Need to allocate or replenish thread-specific pools using
// upstream resource, so need to hold exclusive lock.
}
// N.B. Another thread could call release() now lock is not held.
exclusive_lock excl(_M_mx);
if (!_M_tpools) // [[unlikely]]
_M_tpools = _M_alloc_shared_tpools(excl);
auto pools = _M_thread_specific_pools();
if (!pools)
pools = _M_alloc_tpools(excl)->pools;
return pools[index].allocate(r, opts);
}
exclusive_lock l(_M_mx);
return _M_impl.allocate(bytes, alignment); // unpooled allocation
}
// Override for memory_resource::do_deallocate
void
synchronized_pool_resource::
do_deallocate(void* p, size_t bytes, size_t alignment)
{
size_t block_size = std::max(bytes, alignment);
if (block_size <= _M_impl._M_opts.largest_required_pool_block)
{
const ptrdiff_t index = pool_index(block_size, _M_impl._M_npools);
__glibcxx_assert(index != -1);
{
shared_lock l(_M_mx);
auto pools = _M_thread_specific_pools();
if (pools)
{
// No need to lock here, no other thread is accessing this pool.
if (pools[index].deallocate(upstream_resource(), p))
return;
}
// Block might have come from a different thread's pool,
// take exclusive lock and check every pool.
}
// TODO store {p, bytes, alignment} somewhere and defer returning
// the block to the correct thread-specific pool until we next
// take the exclusive lock.
exclusive_lock excl(_M_mx);
for (_TPools* t = _M_tpools; t != nullptr; t = t->next)
{
if (t->pools) // [[likely]]
{
if (t->pools[index].deallocate(upstream_resource(), p))
return;
}
}
}
exclusive_lock l(_M_mx);
_M_impl.deallocate(p, bytes, alignment);
}
// Allocate a thread-specific _TPools object and add it to the linked list.
auto
synchronized_pool_resource::_M_alloc_tpools(exclusive_lock& l)
-> _TPools*
{
__glibcxx_assert(_M_tpools != nullptr);
// dump_list(_M_tpools);
polymorphic_allocator<_TPools> a(upstream_resource());
_TPools* p = a.allocate(1);
bool constructed = false;
__try
{
a.construct(p, *this, l);
constructed = true;
// __glibcxx_assert(__gthread_getspecific(_M_key) == nullptr);
if (int err = __gthread_setspecific(_M_key, p))
__throw_system_error(err);
}
__catch(...)
{
if (constructed)
a.destroy(p);
a.deallocate(p, 1);
__throw_exception_again;
}
p->prev = _M_tpools;
p->next = _M_tpools->next;
_M_tpools->next = p;
if (p->next)
p->next->prev = p;
return p;
}
// Allocate the shared _TPools object, _M_tpools[0]
auto
synchronized_pool_resource::_M_alloc_shared_tpools(exclusive_lock& l)
-> _TPools*
{
__glibcxx_assert(_M_tpools == nullptr);
polymorphic_allocator<_TPools> a(upstream_resource());
_TPools* p = a.allocate(1);
__try
{
a.construct(p, *this, l);
}
__catch(...)
{
a.deallocate(p, 1);
__throw_exception_again;
}
// __glibcxx_assert(p->next == nullptr);
// __glibcxx_assert(p->prev == nullptr);
return p;
}
#endif // _GLIBCXX_HAS_GTHREADS
// unsynchronized_pool_resource member functions
// Constructor

View File

@ -0,0 +1,159 @@
// Copyright (C) 2018 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
// { dg-do run }
// { dg-options "-std=gnu++17 -pthread" }
// { dg-require-effective-target c++17 }
// { dg-require-effective-target pthread }
// { dg-require-gthreads "" }
#include <memory_resource>
#include <cstring>
#include <testsuite_allocator.h>
#include <testsuite_hooks.h>
void
test01()
{
__gnu_test::memory_resource test_mr;
{
std::pmr::synchronized_pool_resource r(&test_mr);
void* p1 = r.allocate(1, 1);
VERIFY( p1 != nullptr );
auto n = test_mr.number_of_active_allocations();
VERIFY( n > 0 );
// Ensure memory region can be written to (without corrupting heap!)
std::memset(p1, 0xff, 1);
void* p2 = r.allocate(1, 1);
VERIFY( p2 != nullptr );
VERIFY( p2 != p1 );
VERIFY( test_mr.number_of_active_allocations() == n );
std::memset(p1, 0xff, 1);
r.deallocate(p1, 1, 1);
// Returning single blocks to the pool doesn't return them upstream:
VERIFY( test_mr.number_of_active_allocations() == n );
r.deallocate(p2, 1, 1);
VERIFY( test_mr.number_of_active_allocations() == n );
}
VERIFY( test_mr.number_of_active_allocations() == 0 );
}
void
test02()
{
struct nullable_memory_resource : public std::pmr::memory_resource
{
void*
do_allocate(std::size_t bytes, std::size_t alignment) override
{ return upstream->allocate(bytes, alignment); }
void
do_deallocate(void* p, std::size_t bytes, std::size_t alignment) override
{ upstream->deallocate(p, bytes, alignment); }
bool
do_is_equal(const memory_resource& r) const noexcept override
{ return &r == this; }
std::pmr::memory_resource* upstream = std::pmr::get_default_resource();
};
nullable_memory_resource test_mr;
std::pmr::synchronized_pool_resource r(&test_mr);
void* p1 = r.allocate(8, 1);
VERIFY( p1 != nullptr );
std::memset(p1, 0xff, 8);
test_mr.upstream = nullptr;
void* p2 = r.allocate(8, 1); //should not need to replenish
VERIFY( p2 != nullptr );
VERIFY( p2 != p1 );
std::memset(p1, 0xff, 8);
r.deallocate(p1, 8, 1); // should not use upstream
r.deallocate(p2, 8, 1); // should not use upstream
// Destructor will return memory upstream, so restore the upstream resource:
test_mr.upstream = std::pmr::get_default_resource();
}
void
test03()
{
__gnu_test::memory_resource test_mr;
{
std::pmr::synchronized_pool_resource r({10, 16}, &test_mr);
std::size_t largest_pool = r.options().largest_required_pool_block;
void* p1 = r.allocate(2 * largest_pool);
VERIFY( p1 != nullptr );
const std::size_t n = test_mr.number_of_active_allocations();
// Allocation of pools + allocation of pmr::vector + oversize allocation:
VERIFY( n >= 1 );
std::memset(p1, 0xff, 2 * largest_pool);
void* p2 = r.allocate(3 * largest_pool);
VERIFY( p2 != nullptr );
VERIFY( p2 != p1 );
VERIFY( test_mr.number_of_active_allocations() == n + 1 );
std::memset(p2, 0xff, 3 * largest_pool);
r.deallocate(p1, 2 * largest_pool);
VERIFY( test_mr.number_of_active_allocations() == n );
r.deallocate(p2, 3 * largest_pool);
VERIFY( test_mr.number_of_active_allocations() == n - 1 );
}
VERIFY( test_mr.number_of_active_allocations() == 0 );
{
std::pmr::synchronized_pool_resource r({16, 16}, &test_mr);
(void) r.allocate(2);
(void) r.allocate(8);
(void) r.allocate(16);
(void) r.allocate(2);
(void) r.allocate(8);
(void) r.allocate(16);
(void) r.allocate(2 * r.options().largest_required_pool_block);
VERIFY( test_mr.number_of_active_allocations() != 0 );
// Destructor calls release()
}
VERIFY( test_mr.number_of_active_allocations() == 0 );
}
void
test04()
{
__gnu_test::memory_resource test_mr;
std::pmr::synchronized_pool_resource r({256, 256}, &test_mr);
// Check alignment
void* p1 = r.allocate(2, 64);
VERIFY( (std::uintptr_t)p1 % 64 == 0 );
void* p2 = r.allocate(2, 128);
VERIFY( (std::uintptr_t)p2 % 128 == 0 );
void* p3 = r.allocate(2, 256);
VERIFY( (std::uintptr_t)p3 % 256 == 0 );
const std::size_t largest_pool = r.options().largest_required_pool_block;
void* p4 = r.allocate(2 * largest_pool, 1024);
VERIFY( (std::uintptr_t)p4 % 1024 == 0 );
r.deallocate(p1, 2, 64);
r.deallocate(p2, 2, 128);
r.deallocate(p3, 2, 256);
r.deallocate(p4, 2 * largest_pool, 1024);
}
int
main()
{
test01();
test02();
test03();
test04();
}

View File

@ -0,0 +1,83 @@
// Copyright (C) 2018 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
// { dg-do run }
// { dg-options "-std=gnu++17 -pthread" }
// { dg-require-effective-target c++17 }
// { dg-require-effective-target pthread }
// { dg-require-gthreads "" }
#include <memory_resource>
#include <testsuite_hooks.h>
#include <testsuite_allocator.h>
void
test01()
{
__gnu_test::memory_resource test_mr1, test_mr2;
__gnu_test::default_resource_mgr mgr(&test_mr1);
const std::pmr::pool_options opts{1, 2};
using std::pmr::synchronized_pool_resource;
synchronized_pool_resource p1 = {opts, &test_mr2};
VERIFY( p1.upstream_resource() == &test_mr2 );
synchronized_pool_resource p2;
VERIFY( p2.upstream_resource() == std::pmr::get_default_resource() );
synchronized_pool_resource p3{&test_mr2};
VERIFY( p3.upstream_resource() == &test_mr2 );
synchronized_pool_resource p4{opts};
VERIFY( p4.upstream_resource() == std::pmr::get_default_resource() );
static_assert(!std::is_copy_constructible_v<synchronized_pool_resource>);
static_assert(!std::is_copy_assignable_v<synchronized_pool_resource>);
static_assert(std::is_destructible_v<synchronized_pool_resource>);
}
void
test02()
{
__gnu_test::memory_resource test_mr1, test_mr2;
__gnu_test::default_resource_mgr mgr(&test_mr1);
const std::pmr::pool_options opts{1, 2};
struct derived : std::pmr::synchronized_pool_resource
{
using synchronized_pool_resource::synchronized_pool_resource;
};
derived p1 = {opts, &test_mr2};
VERIFY( p1.upstream_resource() == &test_mr2 );
derived p2;
VERIFY( p2.upstream_resource() == std::pmr::get_default_resource() );
derived p3{&test_mr2};
VERIFY( p3.upstream_resource() == &test_mr2 );
derived p4{opts};
VERIFY( p4.upstream_resource() == std::pmr::get_default_resource() );
static_assert(!std::is_copy_constructible_v<derived>);
static_assert(!std::is_copy_assignable_v<derived>);
static_assert(std::is_destructible_v<derived>);
}
int
main()
{
test01();
test02();
}

View File

@ -0,0 +1,41 @@
// Copyright (C) 2018 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
// { dg-do run }
// { dg-options "-std=gnu++17 -pthread" }
// { dg-require-effective-target c++17 }
// { dg-require-effective-target pthread }
// { dg-require-gthreads "" }
#include <memory_resource>
#include <testsuite_hooks.h>
void
test01()
{
std::pmr::synchronized_pool_resource r1;
VERIFY( r1 == r1 );
std::pmr::synchronized_pool_resource r2;
VERIFY( r1 != r2 );
VERIFY( r2 != r1 );
}
int
main()
{
test01();
}

View File

@ -0,0 +1,86 @@
// Copyright (C) 2018 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
// { dg-do run }
// { dg-options "-std=gnu++17 -pthread" }
// { dg-require-effective-target c++17 }
// { dg-require-effective-target pthread }
// { dg-require-gthreads "" }
#include <memory_resource>
#include <future>
#include <testsuite_allocator.h>
#include <testsuite_hooks.h>
void
test01()
{
__gnu_test::memory_resource test_mr;
std::pmr::synchronized_pool_resource smr(&test_mr);
const std::size_t largest_pool = smr.options().largest_required_pool_block;
auto do_alloc = [&smr](void*& p, size_t n) {
// perform some other allocations and deallocations on the same thread:
void* p2 = smr.allocate(n);
smr.deallocate(p2, n);
p2 = smr.allocate(n);
p = smr.allocate(n);
smr.deallocate(p2, n);
};
auto do_dealloc = [&smr](void* p, size_t n) { smr.deallocate(p, n); };
void* p1;
void* p2;
void* p3;
auto f1 = std::async(std::launch::async, do_alloc, std::ref(p1), 8);
auto f2 = std::async(std::launch::async, do_alloc, std::ref(p2), 64);
auto f3 = std::async(std::launch::async, do_alloc, std::ref(p3),
largest_pool* 2);
f1.get();
f2.get();
f3.get();
VERIFY( p1 != nullptr );
VERIFY( p2 != nullptr );
VERIFY( p3 != nullptr );
size_t nallocs = test_mr.number_of_active_allocations();
VERIFY( nallocs >= 4 );
// deallocate on different threads from allocation:
f1 = std::async(std::launch::async, do_dealloc, p1, 8);
f2 = std::async(std::launch::async, do_dealloc, p2, 64);
f1.get();
f2.get();
// No additional memory is allocated by deallocating on new threads:
VERIFY( test_mr.number_of_active_allocations() == nallocs );
// Deallocate large unpooled allocation:
f3 = std::async(std::launch::async, do_dealloc, p3, largest_pool * 2);
f3.get();
// The large allocation should have been returned upstream:
VERIFY( test_mr.number_of_active_allocations() == nallocs - 1 );
smr.release();
VERIFY( test_mr.number_of_active_allocations() == 0 );
}
int
main()
{
test01();
}

View File

@ -0,0 +1,54 @@
// Copyright (C) 2018 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
// { dg-do run }
// { dg-options "-std=gnu++17 -pthread" }
// { dg-require-effective-target c++17 }
// { dg-require-effective-target pthread }
// { dg-require-gthreads "" }
#include <memory_resource>
#include <testsuite_hooks.h>
bool eq(const std::pmr::pool_options& lhs, const std::pmr::pool_options& rhs)
{
return lhs.max_blocks_per_chunk == rhs.max_blocks_per_chunk
&& lhs.largest_required_pool_block == rhs.largest_required_pool_block;
}
void
test01()
{
std::pmr::synchronized_pool_resource r0;
const std::pmr::pool_options opts = r0.options();
VERIFY( opts.max_blocks_per_chunk != 0 );
VERIFY( opts.largest_required_pool_block != 0 );
std::pmr::synchronized_pool_resource r1(opts);
const auto opts1 = r1.options();
VERIFY( eq(opts, opts1) );
std::pmr::synchronized_pool_resource r2(std::pmr::pool_options{0, 0});
const auto opts2 = r2.options();
VERIFY( eq(opts, opts2) );
}
int
main()
{
test01();
}

View File

@ -0,0 +1,116 @@
// Copyright (C) 2018 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
// { dg-do run }
// { dg-options "-std=gnu++17 -pthread" }
// { dg-require-effective-target c++17 }
// { dg-require-effective-target pthread }
// { dg-require-gthreads "" }
#include <memory_resource>
#include <testsuite_allocator.h>
#include <testsuite_hooks.h>
void
test01()
{
__gnu_test::memory_resource test_mr;
std::pmr::synchronized_pool_resource r(&test_mr);
r.release();
VERIFY( test_mr.number_of_active_allocations() == 0 );
r.release();
VERIFY( test_mr.number_of_active_allocations() == 0 );
(void) r.allocate(1);
VERIFY( test_mr.number_of_active_allocations() != 0 );
r.release();
VERIFY( test_mr.number_of_active_allocations() == 0 );
r.release();
VERIFY( test_mr.number_of_active_allocations() == 0 );
}
void
test02()
{
struct nullable_memory_resource : public std::pmr::memory_resource
{
void*
do_allocate(std::size_t bytes, std::size_t alignment) override
{ return upstream->allocate(bytes, alignment); }
void
do_deallocate(void* p, std::size_t bytes, std::size_t alignment) override
{ upstream->deallocate(p, bytes, alignment); }
bool
do_is_equal(const memory_resource& r) const noexcept override
{ return &r == this; }
std::pmr::memory_resource* upstream = std::pmr::get_default_resource();
};
nullable_memory_resource test_mr;
std::pmr::synchronized_pool_resource r(&test_mr);
r.release();
test_mr.upstream = nullptr;
r.release(); // should not need to call anything through upstream pointer
}
void
test03()
{
__gnu_test::memory_resource test_mr;
{
std::pmr::synchronized_pool_resource r(&test_mr);
// Destructor calls release()
}
VERIFY( test_mr.number_of_active_allocations() == 0 );
{
std::pmr::synchronized_pool_resource r(&test_mr);
(void) r.allocate(1);
VERIFY( test_mr.number_of_active_allocations() != 0 );
// Destructor calls release()
}
VERIFY( test_mr.number_of_active_allocations() == 0 );
{
std::pmr::synchronized_pool_resource r({10, 16}, &test_mr);
(void) r.allocate(2 * r.options().largest_required_pool_block);
VERIFY( test_mr.number_of_active_allocations() != 0 );
// Destructor calls release()
}
VERIFY( test_mr.number_of_active_allocations() == 0 );
{
std::pmr::synchronized_pool_resource r({16, 16}, &test_mr);
(void) r.allocate(2);
(void) r.allocate(8);
(void) r.allocate(16);
(void) r.allocate(2);
(void) r.allocate(8);
(void) r.allocate(16);
(void) r.allocate(2 * r.options().largest_required_pool_block);
VERIFY( test_mr.number_of_active_allocations() != 0 );
// Destructor calls release()
}
VERIFY( test_mr.number_of_active_allocations() == 0 );
}
int
main()
{
test01();
test02();
test03();
}

View File

@ -15,28 +15,27 @@
// Override the -std flag in the check_performance script: STD=gnu++17
// Run the test as both single- and multi-threaded: TEST_B
#include <memory_resource>
#include <list>
#include <string>
#include <testsuite_performance.h>
struct size16 { char c[16]; };
struct size32 { char c[32]; };
struct size64 { char c[64]; };
struct size128 { char c[128]; };
const int iterations = 100;
// Insert and remove elements of various sizes in std::list containers.
// If report=true the function will measure and report the total performance
// including the time taken to destroy the lists and deallocate everything.
// If dest=false the function will measure and report the performance of
// insert/remove operations only, not the destruction of the lists.
// If timers!=nullptr the function will pause the timer while the lists
// are cleared and deallocated, so that only insertions/removals are timed.
// Otherwise, the time taken to deallocate the lists is also counted.
void
populate_lists(std::pmr::memory_resource* r, std::string name, bool dest,
int kmax = 100)
populate_lists(std::pmr::memory_resource* r, __gnu_test::time_counter* timers,
int kmax = iterations)
{
name += " std::list push/pop";
if (dest)
name += "/destroy";
struct size16 { char c[16]; };
struct size32 { char c[32]; };
struct size64 { char c[64]; };
struct size128 { char c[128]; };
std::pmr::list<int> l4(r);
std::pmr::list<size16> l16(r);
@ -44,11 +43,6 @@ populate_lists(std::pmr::memory_resource* r, std::string name, bool dest,
std::pmr::list<size64> l64(r);
std::pmr::list<size128> l128(r);
using namespace __gnu_test;
time_counter time;
resource_counter resource;
start_counters(time, resource);
const int imax = 1000;
const int jmax = 100;
for (int k = 0; k < kmax; ++k)
@ -70,8 +64,8 @@ populate_lists(std::pmr::memory_resource* r, std::string name, bool dest,
l128.pop_front();
}
if (!dest)
time.stop();
if (timers)
timers->stop();
// Deallocate everything:
l4.clear();
@ -80,35 +74,338 @@ populate_lists(std::pmr::memory_resource* r, std::string name, bool dest,
l64.clear();
l128.clear();
if (!dest)
time.restart();
if (timers)
timers->restart();
}
stop_counters(time, resource);
report_performance(__FILE__, name.c_str(), time, resource);
clear_counters(time, resource);
}
int main()
// Test allocations and deallocations of node-based containers (std::list).
// In this test pmr::unsynchronized_pool_resource should be faster than
// pmr::new_delete_resource().
void test_lists_single_thread()
{
std::pmr::memory_resource* newdel = std::pmr::new_delete_resource();
std::pmr::unsynchronized_pool_resource pool;
#ifndef NOTHREAD
std::pmr::synchronized_pool_resource syncpool;
#endif
for (auto b : { false, true })
auto run_test = [](auto* memres, std::string name, bool time_dtors) {
name += " std::list push/pop";
if (time_dtors)
name += "/destroy";
__gnu_test::time_counter time;
__gnu_test::resource_counter resource;
start_counters(time, resource);
populate_lists(memres, time_dtors ? nullptr : &time);
stop_counters(time, resource);
report_performance(__FILE__, name, time, resource);
};
for (auto time_dtors : {false, true})
{
run_test(newdel, "new-delete-1 ", time_dtors);
run_test(newdel, "new-delete-2 ", time_dtors);
run_test(newdel, "new-delete-3 ", time_dtors);
// Start with an empty set of pools:
pool.release();
populate_lists(newdel, "new_delete 1", b);
populate_lists(newdel, "new_delete 2", b);
populate_lists(newdel, "new_delete 3", b);
populate_lists(&pool, "unsync pool 1", b);
run_test(&pool, "unsync-pool-1", time_dtors);
// Destroy pools and start fresh:
pool.release();
populate_lists(&pool, "unsync pool 2", b);
run_test(&pool, "unsync-pool-2", time_dtors);
// Do not destroy pools, reuse allocated memory:
populate_lists(&pool, "unsync pool 3", b);
run_test(&pool, "unsync-pool-3", time_dtors);
#ifndef NOTHREAD
syncpool.release();
run_test(&syncpool, "sync-pool-1 ", time_dtors);
// Destroy pools and start fresh:
syncpool.release();
run_test(&syncpool, "sync-pool-2 ", time_dtors);
// Do not destroy pools, reuse allocated memory:
run_test(&syncpool, "sync-pool-3 ", time_dtors);
#endif
}
}
// TODO test non-pooled large allocations from (un)synchronized_pool_resource
#ifndef NOTHREAD
# include <thread>
# include <mutex>
# include <cassert>
// Multithreaded std::list test with each thread having its own resource.
// (pmr::new_delete vs pmr::unsynchronized_pool vs pmr::synchronized_pool)
//
// In this test both pmr::unsynchronized_pool_resource and
// pmr::synchronized_pool_resource should be faster than
// pmr::new_delete_resource().
void test_lists_resource_per_thread()
{
std::mutex mx;
std::unique_lock<std::mutex> gate(mx, std::defer_lock);
struct state
{
std::thread thread;
// Per-thread pool resources:
std::pmr::unsynchronized_pool_resource unsync;
std::pmr::synchronized_pool_resource sync;
std::pmr::memory_resource* memres[3] = {
std::pmr::new_delete_resource(), &unsync, &sync
};
};
state states[4];
const std::string resnames[] = {"new-delete ", "unsync-pool", "sync-pool "};
auto run_test = [&mx] (std::pmr::memory_resource* memres,
__gnu_test::time_counter* timers)
{
std::lock_guard<std::mutex>{mx}; // block until the mutex can be locked
populate_lists(memres, timers);
};
auto time_threads = [&] (std::string testname, bool time_dtors, int which) {
__gnu_test::time_counter time;
__gnu_test::resource_counter resource;
gate.lock();
auto* time_ptr = time_dtors ? nullptr : &time;
for (auto& s : states)
s.thread = std::thread{ run_test, s.memres[which], time_ptr };
start_counters(time, resource);
gate.unlock(); // let the threads run
for (auto& s : states)
s.thread.join();
stop_counters(time, resource);
report_performance(__FILE__, resnames[which] + testname, time, resource);
};
for (auto time_dtors : {false, true})
{
std::string testname = " resource-per-thread std::list push/pop";
if (time_dtors)
testname += "/destroy";
for (int which : {0, 1, 2})
time_threads(testname, time_dtors, which);
}
}
// A naive memory_resource that adds a mutex to unsynchronized_pool_resource
struct locking_pool_resource : std::pmr::unsynchronized_pool_resource
{
void* do_allocate(std::size_t b, std::size_t a) override
{
std::lock_guard<std::mutex> l(m);
return unsynchronized_pool_resource::do_allocate(b, a);
}
void do_deallocate(void* p, std::size_t b, std::size_t a) override
{
std::lock_guard<std::mutex> l(m);
return unsynchronized_pool_resource::do_deallocate(p, b, a);
}
std::mutex m;
};
// Multithreaded std::list test with all threads sharing the same resource.
// (new_delete vs unsynchronized_pool+mutex vs synchronized_pool)
//
// pmr::synchronized_pool_resource is not expected to be anywhere near
// as fast as pmr::new_delete_resource() here, but should perform much
// better than the naive locking_pool_resource type.
void test_lists_shared_resource()
{
std::mutex mx;
std::unique_lock<std::mutex> gate(mx, std::defer_lock);
locking_pool_resource unsync;
std::pmr::synchronized_pool_resource sync;
std::pmr::memory_resource* memres[3] = {
std::pmr::new_delete_resource(), &unsync, &sync
};
std::thread threads[4];
const std::string resnames[3] = { "new-delete", "mutex-pool", "sync-pool " };
auto run_test = [&mx] (std::pmr::memory_resource* memres,
__gnu_test::time_counter* timers)
{
std::lock_guard<std::mutex>{mx}; // block until the mutex can be locked
populate_lists(memres, timers);
};
auto time_threads = [&] (std::string testname, bool time_dtors, int which) {
__gnu_test::time_counter time;
__gnu_test::resource_counter resource;
gate.lock();
auto* time_ptr = time_dtors ? nullptr : &time;
for (auto& t : threads)
t = std::thread{ run_test, memres[which], time_ptr };
start_counters(time, resource);
gate.unlock(); // let the threads run
for (auto& t : threads)
t.join();
stop_counters(time, resource);
report_performance(__FILE__, resnames[which] + testname, time, resource);
};
for (auto time_dtors : {false, true})
{
std::string testname = " shared-resource std::list push/pop";
if (time_dtors)
testname += "/destroy";
for (int which : {0, 1, 2})
time_threads(testname, time_dtors, which);
}
}
// TODO threaded test just doing loads of allocations, no deallocs
// both with per-thread resource (unsync vs sync vs newdel)
// and shared resource (locked vs sync vs newdel)
// TODO threaded test just doing loads of deallocations, no allocs
// both with per-thread resource (unsync vs sync vs newdel)
// and shared resource (locked vs sync vs newdel)
// Multithreaded test where deallocations happen on different threads.
// (new_delete vs unsynchronized_pool+mutex vs synchronized_pool)
//
// This hits the slow path for pmr::synchronized_pool_resource, where
// an exclusive lock must be taken to access other threads' pools.
// pmr::synchronized_pool_resource is not expected to be anywhere near
// as fast as pmr::new_delete_resource() here, but should perform much
// better than the naive locking_pool_resource type.
void test_cross_thread_dealloc()
{
const int num_threads = 4;
struct X {
void* ptr;
unsigned size;
};
// A buffer for each thread, and extra buffers for half of the threads:
std::vector<X> allocs[num_threads * 3 / 2];
for (auto& v : allocs)
v.resize(1000 * iterations);
// Use a few different pools
const std::size_t sizes[] = { 8, 16, 8, 16, 32, 64, 8, 16, 32, 64 };
std::mutex mx;
auto run_test =
[&, num_threads] (std::pmr::memory_resource* memres, int i, bool with_exit)
{
std::size_t counter = 0;
std::lock_guard<std::mutex>{mx};
// Fill this thread's buffer with allocations:
for (X& x : allocs[i])
{
x.size = sizes[counter++ % 10];
x.ptr = memres->allocate(x.size, 1);
}
if (with_exit && i == 0)
{
// One of the threads exits, so that its pools transfer to the
// non-thread-specific list of pools.
return;
}
else if (i < num_threads / 2)
{
// Other threads continue allocating, into the extra buffers:
for (X& x : allocs[num_threads + i])
{
x.size = sizes[counter++ % 10];
x.ptr = memres->allocate(x.size, 1);
}
}
else
{
// Half of the threads start deallocating their own memory and the
// memory belonging to another pool
const int other = i - num_threads / 2;
for (unsigned n = 0; n < allocs[i].size(); ++n)
{
// Deallocate memory allocated in this thread:
X& x1 = allocs[i][n];
memres->deallocate(x1.ptr, x1.size, 1);
x1 = {};
// Deallocate memory allocated in another thread:
X& x2 = allocs[other][n];
memres->deallocate(x2.ptr, x2.size, 1);
x2 = {};
}
}
};
std::thread threads[num_threads];
locking_pool_resource unsync;
std::pmr::synchronized_pool_resource sync;
std::pmr::memory_resource* memres[3] = {
std::pmr::new_delete_resource(), &unsync, &sync
};
const std::string resnames[3] = { "new-delete", "mutex-pool", "sync-pool " };
auto time_threads = [&] (std::string name, int which, bool with_exit)
{
__gnu_test::time_counter time;
__gnu_test::resource_counter resource;
std::unique_lock<std::mutex> gate(mx);
for (auto& t : threads)
t = std::thread{ run_test, memres[which], &t - threads, with_exit };
start_counters(time, resource);
gate.unlock();
for (auto& t : threads)
t.join();
stop_counters(time, resource);
report_performance(__FILE__, resnames[which] + name, time, resource);
// Clean up:
for (auto& a : allocs)
{
const int i = (&a - allocs);
if (i < num_threads) // These allocations were freed
for (auto& x : a)
{
assert(x.ptr == nullptr);
}
else if (with_exit && i == num_threads)
;
else
for (auto& x : a)
{
memres[which]->deallocate(x.ptr, x.size, 1);
x = {};
}
}
};
for (int which : {0, 1, 2})
time_threads(" cross-thread dealloc", which, false);
for (int which : {0, 1, 2})
time_threads(" cross-thread dealloc w/exit", which, true);
}
#endif
int main()
{
test_lists_single_thread();
#ifndef NOTHREAD
test_lists_resource_per_thread();
test_lists_shared_resource();
test_cross_thread_dealloc();
#endif
}