re PR libstdc++/24469 (Possible race condition in mt_allocator causing SIGSEGV)
2006-09-02 Paolo Carlini <pcarlini@suse.de> Richard Guenther <rguenther@suse.de> PR libstdc++/24469 * src/mt_allocator.cc (__pool<true>::_M_reserve_block, __pool<true>::_M_reclaim_block): Fix the logic to avoid races, exploit atomic counters stored in second part of the memory pointed by _M_used. (__pool<true>::_M_initialize): Adjust _M_used allocation. * include/ext/mt_allocator.h (__pool<true>::_Bin_record): Update comment. Co-Authored-By: Richard Guenther <rguenther@suse.de> From-SVN: r116660
This commit is contained in:
parent
b73aec4fa5
commit
1d3e6248e5
@ -1,3 +1,15 @@
|
||||
2006-09-02 Paolo Carlini <pcarlini@suse.de>
|
||||
Richard Guenther <rguenther@suse.de>
|
||||
|
||||
PR libstdc++/24469
|
||||
* src/mt_allocator.cc (__pool<true>::_M_reserve_block,
|
||||
__pool<true>::_M_reclaim_block): Fix the logic to avoid
|
||||
races, exploit atomic counters stored in second part of
|
||||
the memory pointed by _M_used.
|
||||
(__pool<true>::_M_initialize): Adjust _M_used allocation.
|
||||
* include/ext/mt_allocator.h (__pool<true>::_Bin_record):
|
||||
Update comment.
|
||||
|
||||
2006-08-31 Benjamin Kosnik <bkoz@redhat.com>
|
||||
|
||||
PR libstdc++/28671 continued
|
||||
|
@ -298,8 +298,13 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
|
||||
|
||||
// An "array" of counters used to keep track of the amount of
|
||||
// blocks that are on the freelist/used for each thread id.
|
||||
// Memory to these "arrays" is allocated in _S_initialize() for
|
||||
// _S_max_threads + global pool 0.
|
||||
// - Note that the second part of the allocated _M_used "array"
|
||||
// actually hosts (atomic) counters of reclaimed blocks: in
|
||||
// _M_reserve_block and in _M_reclaim_block those numbers are
|
||||
// subtracted from the first ones to obtain the actual size
|
||||
// of the "working set" of the given thread.
|
||||
// - Memory to these "arrays" is allocated in _S_initialize()
|
||||
// for _S_max_threads + global pool 0.
|
||||
size_t* _M_free;
|
||||
size_t* _M_used;
|
||||
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include <bits/c++config.h>
|
||||
#include <bits/concurrence.h>
|
||||
#include <ext/mt_allocator.h>
|
||||
#include <cstring>
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -263,13 +264,33 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
|
||||
// number of records is "high enough".
|
||||
const size_t __thread_id = _M_get_thread_id();
|
||||
const _Tune& __options = _M_get_options();
|
||||
const unsigned long __limit = 100 * (_M_bin_size - __which)
|
||||
* __options._M_freelist_headroom;
|
||||
const size_t __limit = (100 * (_M_bin_size - __which)
|
||||
* __options._M_freelist_headroom);
|
||||
|
||||
unsigned long __remove = __bin._M_free[__thread_id];
|
||||
size_t __remove = __bin._M_free[__thread_id];
|
||||
__remove *= __options._M_freelist_headroom;
|
||||
if (__remove >= __bin._M_used[__thread_id])
|
||||
__remove -= __bin._M_used[__thread_id];
|
||||
|
||||
// NB: We assume that reads of _Atomic_words are atomic.
|
||||
const size_t __max_threads = __options._M_max_threads + 1;
|
||||
_Atomic_word* const __reclaimed_base =
|
||||
reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
|
||||
const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
|
||||
const size_t __used = __bin._M_used[__thread_id] - __reclaimed;
|
||||
|
||||
// NB: For performance sake we don't resync every time, in order
|
||||
// to spare atomic ops. Note that if __reclaimed increased by,
|
||||
// say, 1024, since the last sync, it means that the other
|
||||
// threads executed the atomic in the else below at least the
|
||||
// same number of times (at least, because _M_reserve_block may
|
||||
// have decreased the counter), therefore one more cannot hurt.
|
||||
if (__reclaimed > 1024)
|
||||
{
|
||||
__bin._M_used[__thread_id] -= __reclaimed;
|
||||
__atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
|
||||
}
|
||||
|
||||
if (__remove >= __used)
|
||||
__remove -= __used;
|
||||
else
|
||||
__remove = 0;
|
||||
if (__remove > __limit && __remove > __bin._M_free[__thread_id])
|
||||
@ -277,7 +298,7 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
|
||||
_Block_record* __first = __bin._M_first[__thread_id];
|
||||
_Block_record* __tmp = __first;
|
||||
__remove /= __options._M_freelist_headroom;
|
||||
const unsigned long __removed = __remove;
|
||||
const size_t __removed = __remove;
|
||||
while (--__remove > 0)
|
||||
__tmp = __tmp->_M_next;
|
||||
__bin._M_first[__thread_id] = __tmp->_M_next;
|
||||
@ -292,8 +313,11 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
|
||||
|
||||
// Return this block to our list and update counters and
|
||||
// owner id as needed.
|
||||
--__bin._M_used[__block->_M_thread_id];
|
||||
|
||||
if (__block->_M_thread_id == __thread_id)
|
||||
--__bin._M_used[__thread_id];
|
||||
else
|
||||
__atomic_add(&__reclaimed_base[__block->_M_thread_id], 1);
|
||||
|
||||
__block->_M_next = __bin._M_first[__thread_id];
|
||||
__bin._M_first[__thread_id] = __block;
|
||||
|
||||
@ -333,6 +357,14 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
|
||||
_Block_record* __block = NULL;
|
||||
if (__gthread_active_p())
|
||||
{
|
||||
// Resync the _M_used counters.
|
||||
const size_t __max_threads = __options._M_max_threads + 1;
|
||||
_Atomic_word* const __reclaimed_base =
|
||||
reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
|
||||
const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
|
||||
__bin._M_used[__thread_id] -= __reclaimed;
|
||||
__atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
|
||||
|
||||
__gthread_mutex_lock(__bin._M_mutex);
|
||||
if (__bin._M_first[0] == NULL)
|
||||
{
|
||||
@ -533,14 +565,19 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
|
||||
{
|
||||
_Bin_record& __bin = _M_bin[__n];
|
||||
__v = ::operator new(sizeof(_Block_record*) * __max_threads);
|
||||
std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
|
||||
__bin._M_first = static_cast<_Block_record**>(__v);
|
||||
|
||||
__bin._M_address = NULL;
|
||||
|
||||
__v = ::operator new(sizeof(size_t) * __max_threads);
|
||||
std::memset(__v, 0, sizeof(size_t) * __max_threads);
|
||||
__bin._M_free = static_cast<size_t*>(__v);
|
||||
|
||||
__v = ::operator new(sizeof(size_t) * __max_threads);
|
||||
|
||||
__v = ::operator new(sizeof(size_t) * __max_threads
|
||||
+ sizeof(_Atomic_word) * __max_threads);
|
||||
std::memset(__v, 0, (sizeof(size_t) * __max_threads
|
||||
+ sizeof(_Atomic_word) * __max_threads));
|
||||
__bin._M_used = static_cast<size_t*>(__v);
|
||||
|
||||
__v = ::operator new(sizeof(__gthread_mutex_t));
|
||||
@ -555,12 +592,6 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
|
||||
#else
|
||||
{ __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
|
||||
#endif
|
||||
for (size_t __threadn = 0; __threadn < __max_threads; ++__threadn)
|
||||
{
|
||||
__bin._M_first[__threadn] = NULL;
|
||||
__bin._M_free[__threadn] = 0;
|
||||
__bin._M_used[__threadn] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -729,16 +760,21 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
|
||||
{
|
||||
_Bin_record& __bin = _M_bin[__n];
|
||||
__v = ::operator new(sizeof(_Block_record*) * __max_threads);
|
||||
std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
|
||||
__bin._M_first = static_cast<_Block_record**>(__v);
|
||||
|
||||
__bin._M_address = NULL;
|
||||
|
||||
__v = ::operator new(sizeof(size_t) * __max_threads);
|
||||
std::memset(__v, 0, sizeof(size_t) * __max_threads);
|
||||
__bin._M_free = static_cast<size_t*>(__v);
|
||||
|
||||
__v = ::operator new(sizeof(size_t) * __max_threads);
|
||||
__v = ::operator new(sizeof(size_t) * __max_threads +
|
||||
sizeof(_Atomic_word) * __max_threads);
|
||||
std::memset(__v, 0, (sizeof(size_t) * __max_threads
|
||||
+ sizeof(_Atomic_word) * __max_threads));
|
||||
__bin._M_used = static_cast<size_t*>(__v);
|
||||
|
||||
|
||||
__v = ::operator new(sizeof(__gthread_mutex_t));
|
||||
__bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
|
||||
|
||||
@ -751,12 +787,6 @@ _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
|
||||
#else
|
||||
{ __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
|
||||
#endif
|
||||
for (size_t __threadn = 0; __threadn < __max_threads; ++__threadn)
|
||||
{
|
||||
__bin._M_first[__threadn] = NULL;
|
||||
__bin._M_free[__threadn] = 0;
|
||||
__bin._M_used[__threadn] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
|
Loading…
Reference in New Issue
Block a user