pool_allocator.h: Convert to a global free-list, as per the original SGI/HP design...
2004-06-08 Paolo Carlini <pcarlini@suse.de> * include/ext/pool_allocator.h: Convert to a global free-list, as per the original SGI/HP design: move the implementation details to struct __pool_base, from which __pool_alloc derives. * src/allocator.cc: Instantiate __pool_base. From-SVN: r82794
This commit is contained in:
parent
908d0773f0
commit
5f1a5ede6c
|
@ -1,3 +1,10 @@
|
|||
2004-06-08 Paolo Carlini <pcarlini@suse.de>
|
||||
|
||||
* include/ext/pool_allocator.h: Convert to a global free-list,
|
||||
as per the original SGI/HP design: move the implementation
|
||||
details to struct __pool_base, from which __pool_alloc derives.
|
||||
* src/allocator.cc: Instantiate __pool_base.
|
||||
|
||||
2004-06-07 Dhruv Matani <dhruvbird@gmx.net>
|
||||
Paolo Carlini <pcarlini@suse.de>
|
||||
|
||||
|
|
|
@ -71,11 +71,70 @@ namespace __gnu_cxx
|
|||
* information that we can return the object to the proper free list
|
||||
* without permanently losing part of the object.
|
||||
*
|
||||
* The template parameter specifies whether more than one thread may use
|
||||
* this allocator. It is safe to allocate an object from one instance
|
||||
* of the allocator and deallocate it with another one. This effectively
|
||||
* transfers its ownership to the second one. This may have undesirable
|
||||
* effects on reference locality.
|
||||
*
|
||||
* @endif
|
||||
* (See @link Allocators allocators info @endlink for more.)
|
||||
*/
|
||||
template<bool __threads>
|
||||
struct __pool_base
|
||||
{
|
||||
enum { _S_align = 8 };
|
||||
enum { _S_max_bytes = 128 };
|
||||
enum { _S_freelists = _S_max_bytes / _S_align };
|
||||
|
||||
union _Obj
|
||||
{
|
||||
union _Obj* _M_free_list_link;
|
||||
char _M_client_data[1]; // The client sees this.
|
||||
};
|
||||
|
||||
static _Obj* volatile _S_free_list[_S_freelists];
|
||||
|
||||
// Chunk allocation state.
|
||||
static char* _S_start_free;
|
||||
static char* _S_end_free;
|
||||
static size_t _S_heap_size;
|
||||
|
||||
static _STL_mutex_lock _S_lock;
|
||||
static _Atomic_word _S_force_new;
|
||||
|
||||
static size_t
|
||||
_S_round_up(size_t __bytes)
|
||||
{ return ((__bytes + (size_t)_S_align - 1) & ~((size_t)_S_align - 1)); }
|
||||
|
||||
static size_t
|
||||
_S_freelist_index(size_t __bytes)
|
||||
{ return ((__bytes + (size_t)_S_align - 1) / (size_t)_S_align - 1); }
|
||||
|
||||
// Returns an object of size __n, and optionally adds to size __n
|
||||
// free list.
|
||||
static void*
|
||||
_S_refill(size_t __n);
|
||||
|
||||
// Allocates a chunk for nobjs of size size. nobjs may be reduced
|
||||
// if it is inconvenient to allocate the requested number.
|
||||
static char*
|
||||
_S_chunk_alloc(size_t __n, int& __nobjs);
|
||||
|
||||
// It would be nice to use _STL_auto_lock here. But we need a
|
||||
// test whether threads are in use.
|
||||
struct _Lock
|
||||
{
|
||||
_Lock() { if (__threads) _S_lock._M_acquire_lock(); }
|
||||
~_Lock() { if (__threads) _S_lock._M_release_lock(); }
|
||||
} __attribute__ ((__unused__));
|
||||
friend struct _Lock;
|
||||
};
|
||||
|
||||
typedef __pool_base<true> __pool_alloc_base;
|
||||
|
||||
template<typename _Tp>
|
||||
class __pool_alloc
|
||||
class __pool_alloc : private __pool_alloc_base
|
||||
{
|
||||
public:
|
||||
typedef size_t size_type;
|
||||
|
@ -123,54 +182,6 @@ namespace __gnu_cxx
|
|||
|
||||
void
|
||||
deallocate(pointer __p, size_type __n);
|
||||
|
||||
private:
|
||||
enum {_S_align = 8};
|
||||
enum {_S_max_bytes = 128};
|
||||
enum {_S_freelists = _S_max_bytes / _S_align};
|
||||
|
||||
union _Obj
|
||||
{
|
||||
union _Obj* _M_free_list_link;
|
||||
char _M_client_data[1]; // The client sees this.
|
||||
};
|
||||
|
||||
static _Obj* volatile _S_free_list[_S_freelists];
|
||||
|
||||
// Chunk allocation state.
|
||||
static char* _S_start_free;
|
||||
static char* _S_end_free;
|
||||
static size_t _S_heap_size;
|
||||
|
||||
static _STL_mutex_lock _S_lock;
|
||||
static _Atomic_word _S_force_new;
|
||||
|
||||
static size_t
|
||||
_S_round_up(size_t __bytes)
|
||||
{ return ((__bytes + (size_t)_S_align - 1) & ~((size_t)_S_align - 1)); }
|
||||
|
||||
static size_t
|
||||
_S_freelist_index(size_t __bytes)
|
||||
{ return ((__bytes + (size_t)_S_align - 1)/(size_t)_S_align - 1); }
|
||||
|
||||
// Returns an object of size __n, and optionally adds to size __n
|
||||
// free list.
|
||||
static void*
|
||||
_S_refill(size_t __n);
|
||||
|
||||
// Allocates a chunk for nobjs of size size. nobjs may be reduced
|
||||
// if it is inconvenient to allocate the requested number.
|
||||
static char*
|
||||
_S_chunk_alloc(size_t __n, int& __nobjs);
|
||||
|
||||
// It would be nice to use _STL_auto_lock here. But we need a
|
||||
// test whether threads are in use.
|
||||
struct _Lock
|
||||
{
|
||||
_Lock() { _S_lock._M_acquire_lock(); }
|
||||
~_Lock() { _S_lock._M_release_lock(); }
|
||||
} __attribute__ ((__unused__));
|
||||
friend struct _Lock;
|
||||
};
|
||||
|
||||
template<typename _Tp>
|
||||
|
@ -186,82 +197,83 @@ namespace __gnu_cxx
|
|||
// Allocate memory in large chunks in order to avoid fragmenting the
|
||||
// heap too much. Assume that __n is properly aligned. We hold
|
||||
// the allocation lock.
|
||||
template<typename _Tp>
|
||||
template<bool __threads>
|
||||
char*
|
||||
__pool_alloc<_Tp>::_S_chunk_alloc(size_t __n, int& __nobjs)
|
||||
__pool_base<__threads>::_S_chunk_alloc(size_t __n, int& __nobjs)
|
||||
{
|
||||
char* __result;
|
||||
size_t __total_bytes = __n * __nobjs;
|
||||
size_t __bytes_left = _S_end_free - _S_start_free;
|
||||
|
||||
|
||||
if (__bytes_left >= __total_bytes)
|
||||
{
|
||||
__result = _S_start_free;
|
||||
_S_start_free += __total_bytes;
|
||||
return __result ;
|
||||
}
|
||||
{
|
||||
__result = _S_start_free;
|
||||
_S_start_free += __total_bytes;
|
||||
return __result ;
|
||||
}
|
||||
else if (__bytes_left >= __n)
|
||||
{
|
||||
__nobjs = (int)(__bytes_left/__n);
|
||||
__total_bytes = __n * __nobjs;
|
||||
__result = _S_start_free;
|
||||
_S_start_free += __total_bytes;
|
||||
return __result;
|
||||
}
|
||||
{
|
||||
__nobjs = (int)(__bytes_left / __n);
|
||||
__total_bytes = __n * __nobjs;
|
||||
__result = _S_start_free;
|
||||
_S_start_free += __total_bytes;
|
||||
return __result;
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t __bytes_to_get =
|
||||
2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
|
||||
// Try to make use of the left-over piece.
|
||||
if (__bytes_left > 0)
|
||||
{
|
||||
_Obj* volatile* __free_list =
|
||||
_S_free_list + _S_freelist_index(__bytes_left);
|
||||
|
||||
((_Obj*)(void*)_S_start_free)->_M_free_list_link = *__free_list;
|
||||
*__free_list = (_Obj*)(void*)_S_start_free;
|
||||
}
|
||||
_S_start_free = static_cast<char*>(::operator new(__bytes_to_get));
|
||||
if (_S_start_free == 0)
|
||||
{
|
||||
size_t __i;
|
||||
_Obj* volatile* __free_list;
|
||||
_Obj* __p;
|
||||
// Try to make do with what we have. That can't hurt. We
|
||||
// do not try smaller requests, since that tends to result
|
||||
// in disaster on multi-process machines.
|
||||
__i = __n;
|
||||
for (; __i <= (size_t) _S_max_bytes; __i += (size_t) _S_align)
|
||||
{
|
||||
__free_list = _S_free_list + _S_freelist_index(__i);
|
||||
__p = *__free_list;
|
||||
if (__p != 0)
|
||||
{
|
||||
*__free_list = __p -> _M_free_list_link;
|
||||
_S_start_free = (char*)__p;
|
||||
_S_end_free = _S_start_free + __i;
|
||||
return _S_chunk_alloc(__n, __nobjs);
|
||||
// Any leftover piece will eventually make it to the
|
||||
// right free list.
|
||||
}
|
||||
}
|
||||
_S_end_free = 0; // In case of exception.
|
||||
_S_start_free = static_cast<char*>(::operator new(__bytes_to_get));
|
||||
// This should either throw an exception or remedy the situation.
|
||||
// Thus we assume it succeeded.
|
||||
}
|
||||
_S_heap_size += __bytes_to_get;
|
||||
_S_end_free = _S_start_free + __bytes_to_get;
|
||||
return _S_chunk_alloc(__n, __nobjs);
|
||||
}
|
||||
{
|
||||
size_t __bytes_to_get = (2 * __total_bytes
|
||||
+ _S_round_up(_S_heap_size >> 4));
|
||||
// Try to make use of the left-over piece.
|
||||
if (__bytes_left > 0)
|
||||
{
|
||||
_Obj* volatile* __free_list = (_S_free_list
|
||||
+ _S_freelist_index(__bytes_left));
|
||||
|
||||
((_Obj*)(void*)_S_start_free)->_M_free_list_link = *__free_list;
|
||||
*__free_list = (_Obj*)(void*)_S_start_free;
|
||||
}
|
||||
|
||||
_S_start_free = static_cast<char*>(::operator new(__bytes_to_get));
|
||||
if (_S_start_free == 0)
|
||||
{
|
||||
size_t __i;
|
||||
_Obj* volatile* __free_list;
|
||||
_Obj* __p;
|
||||
// Try to make do with what we have. That can't hurt. We
|
||||
// do not try smaller requests, since that tends to result
|
||||
// in disaster on multi-process machines.
|
||||
__i = __n;
|
||||
for (; __i <= (size_t) _S_max_bytes; __i += (size_t) _S_align)
|
||||
{
|
||||
__free_list = _S_free_list + _S_freelist_index(__i);
|
||||
__p = *__free_list;
|
||||
if (__p != 0)
|
||||
{
|
||||
*__free_list = __p -> _M_free_list_link;
|
||||
_S_start_free = (char*)__p;
|
||||
_S_end_free = _S_start_free + __i;
|
||||
return _S_chunk_alloc(__n, __nobjs);
|
||||
// Any leftover piece will eventually make it to the
|
||||
// right free list.
|
||||
}
|
||||
}
|
||||
_S_end_free = 0; // In case of exception.
|
||||
_S_start_free = static_cast<char*>(::operator new(__bytes_to_get));
|
||||
// This should either throw an exception or remedy the situation.
|
||||
// Thus we assume it succeeded.
|
||||
}
|
||||
_S_heap_size += __bytes_to_get;
|
||||
_S_end_free = _S_start_free + __bytes_to_get;
|
||||
return _S_chunk_alloc(__n, __nobjs);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Returns an object of size __n, and optionally adds to "size
|
||||
// __n"'s free list. We assume that __n is properly aligned. We
|
||||
// hold the allocation lock.
|
||||
template<typename _Tp>
|
||||
template<bool __threads>
|
||||
void*
|
||||
__pool_alloc<_Tp>::_S_refill(size_t __n)
|
||||
__pool_base<__threads>::_S_refill(size_t __n)
|
||||
{
|
||||
int __nobjs = 20;
|
||||
char* __chunk = _S_chunk_alloc(__n, __nobjs);
|
||||
|
@ -270,18 +282,18 @@ namespace __gnu_cxx
|
|||
_Obj* __current_obj;
|
||||
_Obj* __next_obj;
|
||||
int __i;
|
||||
|
||||
|
||||
if (1 == __nobjs)
|
||||
return __chunk;
|
||||
return __chunk;
|
||||
__free_list = _S_free_list + _S_freelist_index(__n);
|
||||
|
||||
|
||||
// Build free list in chunk.
|
||||
__result = (_Obj*)(void*)__chunk;
|
||||
*__free_list = __next_obj = (_Obj*)(void*)(__chunk + __n);
|
||||
for (__i = 1; ; __i++)
|
||||
{
|
||||
{
|
||||
__current_obj = __next_obj;
|
||||
__next_obj = (_Obj*)(void*)((char*)__next_obj + __n);
|
||||
__next_obj = (_Obj*)(void*)((char*)__next_obj + __n);
|
||||
if (__nobjs - 1 == __i)
|
||||
{
|
||||
__current_obj -> _M_free_list_link = 0;
|
||||
|
@ -329,7 +341,7 @@ namespace __gnu_cxx
|
|||
__ret = static_cast<_Tp*>(_S_refill(_S_round_up(__bytes)));
|
||||
else
|
||||
{
|
||||
*__free_list = __result -> _M_free_list_link;
|
||||
*__free_list = __result->_M_free_list_link;
|
||||
__ret = reinterpret_cast<_Tp*>(__result);
|
||||
}
|
||||
if (__builtin_expect(__ret == 0, 0))
|
||||
|
@ -367,25 +379,26 @@ namespace __gnu_cxx
|
|||
}
|
||||
}
|
||||
|
||||
template<typename _Tp>
|
||||
typename __pool_alloc<_Tp>::_Obj* volatile
|
||||
__pool_alloc<_Tp>::_S_free_list[_S_freelists];
|
||||
template<bool __threads>
|
||||
typename __pool_base<__threads>::_Obj* volatile
|
||||
__pool_base<__threads>::_S_free_list[_S_freelists];
|
||||
|
||||
template<typename _Tp>
|
||||
char* __pool_alloc<_Tp>::_S_start_free = 0;
|
||||
template<bool __threads>
|
||||
char* __pool_base<__threads>::_S_start_free = 0;
|
||||
|
||||
template<typename _Tp>
|
||||
char* __pool_alloc<_Tp>::_S_end_free = 0;
|
||||
template<bool __threads>
|
||||
char* __pool_base<__threads>::_S_end_free = 0;
|
||||
|
||||
template<typename _Tp>
|
||||
size_t __pool_alloc<_Tp>::_S_heap_size = 0;
|
||||
template<bool __threads>
|
||||
size_t __pool_base<__threads>::_S_heap_size = 0;
|
||||
|
||||
template<typename _Tp>
|
||||
template<bool __threads>
|
||||
_STL_mutex_lock
|
||||
__pool_alloc<_Tp>::_S_lock __STL_MUTEX_INITIALIZER;
|
||||
__pool_base<__threads>::_S_lock __STL_MUTEX_INITIALIZER;
|
||||
|
||||
template<typename _Tp> _Atomic_word
|
||||
__pool_alloc<_Tp>::_S_force_new = 0;
|
||||
template<bool __threads>
|
||||
_Atomic_word
|
||||
__pool_base<__threads>::_S_force_new = 0;
|
||||
} // namespace __gnu_cxx
|
||||
|
||||
#endif
|
||||
|
|
|
@ -46,4 +46,6 @@ namespace __gnu_cxx
|
|||
// Static members of __pool_alloc.
|
||||
template class __pool_alloc<char>;
|
||||
template class __pool_alloc<wchar_t>;
|
||||
|
||||
template class __pool_base<true>;
|
||||
} // namespace __gnu_cxx
|
||||
|
|
Loading…
Reference in New Issue