allocator-inst.cc: Explicitly instantiate.

2003-06-12  Benjamin Kosnik  <bkoz@redhat.com>

	* src/allocator-inst.cc: Explicitly instantiate.
	* include/ext/pool_allocator.h: Inhibit implicit instantiations.
	Tweaks.
	* config/linker-map.gnu: Add __pool_alloc bits. Tweaks.

From-SVN: r67882
This commit is contained in:
Benjamin Kosnik 2003-06-13 05:45:57 +00:00 committed by Benjamin Kosnik
parent 5482aaab6f
commit c37514ff62
4 changed files with 181 additions and 135 deletions

View File

@ -1,3 +1,10 @@
2003-06-12 Benjamin Kosnik <bkoz@redhat.com>
* src/allocator-inst.cc: Explicitly instantiate.
* include/ext/pool_allocator.h: Inhibit implicit instantiations.
Tweaks.
* config/linker-map.gnu: Add __pool_alloc bits. Tweaks.
2003-06-11 Benjamin Kosnik <bkoz@redhat.com>
* acinclude.m4 (GLIBCPP_ENABLE_CSTDIO): Simplify.

View File

@ -55,12 +55,7 @@ GLIBCPP_3.4 {
std::__num_base::_S_format_float*;
std::__num_base::_S_format_int*;
std::__num_base::_S_atoms_in;
std::__num_base::_S_atoms_out;
# Needed only when generic cpu's atomicity.h is in use.
__gnu_cxx::_Atomic_add_mutex;
__gnu_cxx::_Atomic_add_mutex_once;
__gnu_cxx::__gthread_atomic_add_mutex_once
std::__num_base::_S_atoms_out
};
# Names not in an 'extern' block are mangled names.
@ -78,28 +73,7 @@ GLIBCPP_3.4 {
# bool has_facet
_ZSt9has_facet*;
# operator new(size_t)
_Znw[jm];
# operator new(size_t, std::nothrow_t const&)
_Znw[jm]RKSt9nothrow_t;
# operator delete(void*)
_ZdlPv;
# operator delete(void*, std::nothrow_t const&)
_ZdlPvRKSt9nothrow_t;
# operator new[](size_t)
_Zna[jm];
# operator new[](size_t, std::nothrow_t const&)
_Zna[jm]RKSt9nothrow_t;
# operator delete[](void*)
_ZdaPv;
# operator delete[](void*, std::nothrow_t const&)
_ZdaPvRKSt9nothrow_t;
# vtable
_ZTVN9__gnu_cxx*;
# virtual table
_ZTVNSt8ios_base7failureE;
_ZTVNSt6locale5facetE;
_ZTVS[a-z];
@ -109,18 +83,15 @@ GLIBCPP_3.4 {
_ZTVSt23__codecvt_abstract_baseI[cw]c11__mbstate_tE;
_ZTVSt21__ctype_abstract_baseI[cw]E;
# VTT structure
_ZTTS[a-z];
_ZTTSt[0-9][A-Za-z]*;
_ZTTSt[0-9][0-9][A-Za-z]*;
# typeinfo
_ZTI[a-z];
# typeinfo structure
_ZTIS[a-z];
_ZTINSt8ios_base7failureE;
_ZTINSt6locale5facetE;
_ZTIN9__gnu_cxx*;
_ZTIP[a-z];
_ZTIPK[a-z];
_ZTIS[a-z];
_ZTISt[0-9][A-Za-z]*;
_ZTISt[0-9][0-9][A-Za-z]*;
_ZTISt11__timepunctI[cw]E;
@ -128,12 +99,9 @@ GLIBCPP_3.4 {
_ZTISt21__ctype_abstract_baseI[cw]E;
_ZTISt23__codecvt_abstract_baseI[cw]c11__mbstate_tE;
_ZTS[a-z];
# typeinfo name
_ZTSNSt8ios_base7failureE;
_ZTSNSt6locale5facetE;
_ZTSN9__gnu_cxx*;
_ZTSP[a-z];
_ZTSPK[a-z];
_ZTSS[a-z];
_ZTSSt[0-9][A-Za-z]*;
_ZTSSt[0-9][0-9][A-Za-z]*;
@ -143,12 +111,11 @@ GLIBCPP_3.4 {
_ZTSSt23__codecvt_abstract_baseI[cw]c11__mbstate_tE;
# function-scope static objects requires a guard variable.
_ZGV*;
_ZGVNSt*;
# virtual function thunks
_ZTh*;
_ZTv*;
_ZTc*;
_ZThn8_NS*;
_ZTv0_n12_NS*;
# std::__convert_to_v
_ZSt14__convert_to_v*;
@ -187,6 +154,18 @@ GLIBCPP_3.4 {
__signbitf;
__signbitl;
# __gnu_cxx::__pool_alloc
_ZN9__gnu_cxx12__pool_allocILb1ELi0EE8allocateE[jm];
_ZN9__gnu_cxx12__pool_allocILb1ELi0EE10deallocateEPv[jm];
# __gnu_cxx::stdio_sync_filebuf
_ZTVN9__gnu_cxx18stdio_sync_filebufI[cw]St11char_traitsI[cw]EEE;
# Needed only when generic cpu's atomicity.h is in use.
_ZN9__gnu_cxx17_Atomic_add_mutexE;
_ZN9__gnu_cxx22_Atomic_add_mutex_onceE;
_ZN9__gnu_cxx31__gthread_atomic_add_mutex_onceEv;
local:
*;
};
@ -227,16 +206,61 @@ CXXABI_1.3 {
__gxx_personality_sj0;
__dynamic_cast;
# operator new(size_t)
_Znw[jm];
# operator new(size_t, std::nothrow_t const&)
_Znw[jm]RKSt9nothrow_t;
# operator delete(void*)
_ZdlPv;
# operator delete(void*, std::nothrow_t const&)
_ZdlPvRKSt9nothrow_t;
# operator new[](size_t)
_Zna[jm];
# operator new[](size_t, std::nothrow_t const&)
_Zna[jm]RKSt9nothrow_t;
# operator delete[](void*)
_ZdaPv;
# operator delete[](void*, std::nothrow_t const&)
_ZdaPvRKSt9nothrow_t;
# virtual table
_ZTVN10__cxxabiv117__class_type_infoE;
_ZTVN10__cxxabiv120__si_class_type_infoE;
_ZTVN10__cxxabiv121__vmi_class_type_infoE;
_ZTVN10__cxxabiv123__fundamental_type_infoE;
_ZTVN10__cxxabiv117__array_type_infoE;
_ZTVN10__cxxabiv120__function_type_infoE;
_ZTVN10__cxxabiv116__enum_type_infoE;
_ZTVN10__cxxabiv117__pbase_type_infoE;
_ZTVN10__cxxabiv119__pointer_type_infoE;
_ZTVN10__cxxabiv129__pointer_to_member_type_infoE;
# typeinfo structure (and some names)
_ZTI[a-z];
_ZTIP[a-z];
_ZTIPK[a-z];
_ZTIN10__cxxabiv117__class_type_infoE;
_ZTIN10__cxxabiv120__si_class_type_infoE;
_ZTIN10__cxxabiv121__vmi_class_type_infoE;
_ZTIN10__cxxabiv123__fundamental_type_infoE;
_ZTIN10__cxxabiv117__array_type_infoE;
_ZTIN10__cxxabiv120__function_type_infoE;
_ZTIN10__cxxabiv116__enum_type_infoE;
_ZTIN10__cxxabiv117__pbase_type_infoE;
_ZTIN10__cxxabiv119__pointer_type_infoE;
_ZTIN10__cxxabiv129__pointer_to_member_type_infoE;
# typeinfo name
_ZTS[a-z];
_ZTSP[a-z];
_ZTSPK[a-z];
# __gnu_cxx::_verbose_terminate_handler()
_ZN9__gnu_cxx27__verbose_terminate_handlerEv;
# typeinfo
_ZTIN10__cxxabi*;
_ZTSN10__cxxabi*;
# vtable
_ZTVN10__cxxabi*;
local:
*;
};

View File

@ -129,7 +129,7 @@ namespace __gnu_cxx
// Allocates a chunk for nobjs of size size. nobjs may be reduced
// if it is inconvenient to allocate the requested number.
static char*
_S_chunk_alloc(size_t __size, int& __nobjs);
_S_chunk_alloc(size_t __n, int& __nobjs);
// It would be nice to use _STL_auto_lock here. But we need a
// test whether threads are in use.
@ -143,70 +143,13 @@ namespace __gnu_cxx
public:
// __n must be > 0
static void*
allocate(size_t __n)
{
void* __ret = 0;
// If there is a race through here, assume answer from getenv
// will resolve in same direction. Inspired by techniques
// to efficiently support threading found in basic_string.h.
if (_S_force_new == 0)
{
if (getenv("GLIBCPP_FORCE_NEW"))
__atomic_add(&_S_force_new, 1);
else
__atomic_add(&_S_force_new, -1);
}
if ((__n > (size_t) _S_max_bytes) || (_S_force_new > 0))
__ret = __new_alloc::allocate(__n);
else
{
_Obj* volatile* __my_free_list = _S_free_list
+ _S_freelist_index(__n);
// Acquire the lock here with a constructor call. This
// ensures that it is released in exit or during stack
// unwinding.
_Lock __lock_instance;
_Obj* __restrict__ __result = *__my_free_list;
if (__builtin_expect(__result == 0, 0))
__ret = _S_refill(_S_round_up(__n));
else
{
*__my_free_list = __result -> _M_free_list_link;
__ret = __result;
}
if (__builtin_expect(__ret == 0, 0))
__throw_bad_alloc();
}
return __ret;
}
allocate(size_t __n);
// __p may not be 0
static void
deallocate(void* __p, size_t __n)
{
if ((__n > (size_t) _S_max_bytes) || (_S_force_new > 0))
__new_alloc::deallocate(__p, __n);
else
{
_Obj* volatile* __my_free_list = _S_free_list
+ _S_freelist_index(__n);
_Obj* __q = (_Obj*)__p;
// Acquire the lock here with a constructor call. This
// ensures that it is released in exit or during stack
// unwinding.
_Lock __lock_instance;
__q -> _M_free_list_link = *__my_free_list;
*__my_free_list = __q;
}
}
deallocate(void* __p, size_t __n);
};
template<bool __threads, int __inst> _Atomic_word
__pool_alloc<__threads, __inst>::_S_force_new = 0;
template<bool __threads, int __inst>
inline bool
operator==(const __pool_alloc<__threads,__inst>&,
@ -220,16 +163,15 @@ namespace __gnu_cxx
{ return false; }
// We allocate memory in large chunks in order to avoid fragmenting the
// heap too much. We assume that __size is properly aligned. We hold
// Allocate memory in large chunks in order to avoid fragmenting the
// heap too much. Assume that __n is properly aligned. We hold
// the allocation lock.
template<bool __threads, int __inst>
char*
__pool_alloc<__threads, __inst>::
_S_chunk_alloc(size_t __size, int& __nobjs)
__pool_alloc<__threads, __inst>::_S_chunk_alloc(size_t __n, int& __nobjs)
{
char* __result;
size_t __total_bytes = __size * __nobjs;
size_t __total_bytes = __n * __nobjs;
size_t __bytes_left = _S_end_free - _S_start_free;
if (__bytes_left >= __total_bytes)
@ -238,10 +180,10 @@ namespace __gnu_cxx
_S_start_free += __total_bytes;
return __result ;
}
else if (__bytes_left >= __size)
else if (__bytes_left >= __n)
{
__nobjs = (int)(__bytes_left/__size);
__total_bytes = __size * __nobjs;
__nobjs = (int)(__bytes_left/__n);
__total_bytes = __n * __nobjs;
__result = _S_start_free;
_S_start_free += __total_bytes;
return __result;
@ -253,32 +195,32 @@ namespace __gnu_cxx
// Try to make use of the left-over piece.
if (__bytes_left > 0)
{
_Obj* volatile* __my_free_list =
_Obj* volatile* __free_list =
_S_free_list + _S_freelist_index(__bytes_left);
((_Obj*)(void*)_S_start_free) -> _M_free_list_link = *__my_free_list;
*__my_free_list = (_Obj*)(void*)_S_start_free;
((_Obj*)(void*)_S_start_free)->_M_free_list_link = *__free_list;
*__free_list = (_Obj*)(void*)_S_start_free;
}
_S_start_free = (char*) __new_alloc::allocate(__bytes_to_get);
if (_S_start_free == 0)
{
size_t __i;
_Obj* volatile* __my_free_list;
_Obj* volatile* __free_list;
_Obj* __p;
// Try to make do with what we have. That can't hurt. We
// do not try smaller requests, since that tends to result
// in disaster on multi-process machines.
__i = __size;
__i = __n;
for (; __i <= (size_t) _S_max_bytes; __i += (size_t) _S_align)
{
__my_free_list = _S_free_list + _S_freelist_index(__i);
__p = *__my_free_list;
__free_list = _S_free_list + _S_freelist_index(__i);
__p = *__free_list;
if (__p != 0)
{
*__my_free_list = __p -> _M_free_list_link;
*__free_list = __p -> _M_free_list_link;
_S_start_free = (char*)__p;
_S_end_free = _S_start_free + __i;
return _S_chunk_alloc(__size, __nobjs);
return _S_chunk_alloc(__n, __nobjs);
// Any leftover piece will eventually make it to the
// right free list.
}
@ -290,11 +232,10 @@ namespace __gnu_cxx
}
_S_heap_size += __bytes_to_get;
_S_end_free = _S_start_free + __bytes_to_get;
return _S_chunk_alloc(__size, __nobjs);
return _S_chunk_alloc(__n, __nobjs);
}
}
// Returns an object of size __n, and optionally adds to "size
// __n"'s free list. We assume that __n is properly aligned. We
// hold the allocation lock.
@ -304,7 +245,7 @@ namespace __gnu_cxx
{
int __nobjs = 20;
char* __chunk = _S_chunk_alloc(__n, __nobjs);
_Obj* volatile* __my_free_list;
_Obj* volatile* __free_list;
_Obj* __result;
_Obj* __current_obj;
_Obj* __next_obj;
@ -312,11 +253,11 @@ namespace __gnu_cxx
if (1 == __nobjs)
return __chunk;
__my_free_list = _S_free_list + _S_freelist_index(__n);
__free_list = _S_free_list + _S_freelist_index(__n);
// Build free list in chunk.
__result = (_Obj*)(void*)__chunk;
*__my_free_list = __next_obj = (_Obj*)(void*)(__chunk + __n);
*__free_list = __next_obj = (_Obj*)(void*)(__chunk + __n);
for (__i = 1; ; __i++)
{
__current_obj = __next_obj;
@ -332,10 +273,69 @@ namespace __gnu_cxx
return __result;
}
template<bool __threads, int __inst>
void*
__pool_alloc<__threads, __inst>::allocate(size_t __n)
{
void* __ret = 0;
// If there is a race through here, assume answer from getenv
// will resolve in same direction. Inspired by techniques
// to efficiently support threading found in basic_string.h.
if (_S_force_new == 0)
{
if (getenv("GLIBCPP_FORCE_NEW"))
__atomic_add(&_S_force_new, 1);
else
__atomic_add(&_S_force_new, -1);
}
if ((__n > (size_t) _S_max_bytes) || (_S_force_new > 0))
__ret = __new_alloc::allocate(__n);
else
{
_Obj* volatile* __free_list = _S_free_list + _S_freelist_index(__n);
// Acquire the lock here with a constructor call. This
// ensures that it is released in exit or during stack
// unwinding.
_Lock __lock_instance;
_Obj* __restrict__ __result = *__free_list;
if (__builtin_expect(__result == 0, 0))
__ret = _S_refill(_S_round_up(__n));
else
{
*__free_list = __result -> _M_free_list_link;
__ret = __result;
}
if (__builtin_expect(__ret == 0, 0))
__throw_bad_alloc();
}
return __ret;
}
template<bool __threads, int __inst>
void
__pool_alloc<__threads, __inst>::deallocate(void* __p, size_t __n)
{
if ((__n > (size_t) _S_max_bytes) || (_S_force_new > 0))
__new_alloc::deallocate(__p, __n);
else
{
_Obj* volatile* __free_list = _S_free_list + _S_freelist_index(__n);
_Obj* __q = (_Obj*)__p;
// Acquire the lock here with a constructor call. This
// ensures that it is released in exit or during stack
// unwinding.
_Lock __lock_instance;
__q -> _M_free_list_link = *__free_list;
*__free_list = __q;
}
}
template<bool __threads, int __inst>
_STL_mutex_lock
__pool_alloc<__threads, __inst>::_S_lock __STL_MUTEX_INITIALIZER;
typename __pool_alloc<__threads, __inst>::_Obj* volatile
__pool_alloc<__threads, __inst>::_S_free_list[_S_freelists];
template<bool __threads, int __inst>
char* __pool_alloc<__threads, __inst>::_S_start_free = 0;
@ -347,8 +347,18 @@ namespace __gnu_cxx
size_t __pool_alloc<__threads, __inst>::_S_heap_size = 0;
template<bool __threads, int __inst>
typename __pool_alloc<__threads, __inst>::_Obj* volatile
__pool_alloc<__threads, __inst>::_S_free_list[_S_freelists];
_STL_mutex_lock
__pool_alloc<__threads, __inst>::_S_lock __STL_MUTEX_INITIALIZER;
template<bool __threads, int __inst> _Atomic_word
__pool_alloc<__threads, __inst>::_S_force_new = 0;
// Inhibit implicit instantiations for required instantiations,
// which are defined via explicit instantiations elsewhere.
// NB: This syntax is a GNU extension.
#if _GLIBCPP_EXTERN_TEMPLATE
extern template class __pool_alloc<true, 0>;
#endif
} // namespace __gnu_cxx
namespace std

View File

@ -39,3 +39,8 @@ namespace std
template class allocator<char>;
template class allocator<wchar_t>;
} // namespace std
namespace __gnu_cxx
{
template class __pool_alloc<true, 0>;
} // namespace __gnu_cxx