libstdc++: Refactor _Hashtable to prepare for custom pointers

Limit usage of node pointers in _Hashtable implementation details so that when
we move to allocator custom pointer type we do not need to add an _Alloc
template parameter everywhere which would impact abi.

This is done by reviewing node type definition. It is now based on new basic
types which are not pointer dependant. The _Hashtable helper
types are now using those new node base types and do not receive node pointers.

libstdc++-v3/ChangeLog

	* include/bits/hashtable_policy.h
	(_Hash_node_value_base<>): Remove _Hash_node_base inheritance.
	(_Hash_node_code_cache<bool _Cache_hash_code>): New.
	(_Hash_node_value<typename _Value, bool _Cache_hash_code>): New.
	(_Hash_node<>): Inherits _Hash_node_base<> and _Hash_node_value<>.
	(_Map_base<>::__node_type): Remove.
	(_Map_base<>::iterator): Remove.
	(_Insert_base<>::__hash_cached): New.
	(_Insert_base<>::__constant_iterators): New.
	(_Insert_base<>::__hashtable_alloc): New.
	(_Insert_base<>::__node_type): Remove.
	(_Insert_base<>::__node_ptr): New.
	(_Hash_code_base<>): Remove specializations.
	(_Hash_code_base<>::__node_type): Remove.
	(_Hash_code_base<>::_M_bucket_index(const __node_type*, size_t)):
	Replace by...
	(_Hash_code_base<>::_M_bucket_index(const _Hash_node_value<>&, size_t)):
	...this.
	(_Hash_code_base<>::_M_store_code(__node_type*, __hash_code)):
	Replace by...
	(_Hash_code_base<>::_M_store_code(_Hash_node_code_cache<>&, __hash_code)):
	...this.
	(_Hash_code_base<>::_M_copy_code(__node_type*, const __node_type*)):
	Replace by...
	(_Hash_code_base<>::_M_copy_code(_Hash_node_code_cache<>&,
	const _Hash_node_code_base<>&)): ...this.
	(_Hashtable_base<>::__constant_iterators): Remove.
	(_Hashtable_base<>::__unique_keys): Remove.
	(_Hashtable_base<>::__node_type): Remove.
	(_Hashtable_base<>::iterator): Remove.
	(_Hashtable_base<>::const_iterator): Remove.
	(_Hashtable_base<>::local_iterator): Remove.
	(_Hashtable_base<>::const_local_iterator): Remove.
	(_Hashtable_base<>::__ireturn_type): Remove.
	(_Hashtable_base<>::_Equal_hash_code<>::_S_equals): Replace by...
	(_Hashtable_base<>::_S_equals(__hash_code, const _Hash_node_code_hash<>&)):
	...this.
	(_Hashtable_base<>::_Equal_hash_code<>::_S_node_equals): Replace by...
	(_Hashtable_base<>::_S_node_equals(__hash_code,
	const _Hash_node_code_hash<>&)): ...this.
	(_Hashtable_base<>::_Equal_hash_code<>): Remove.
	(_Hashtable_base<>::_M_equals): Adapt.
	(_Hashtable_baxe<>::_M_node_equals): Adapt.
	(_Equality<>::_M_equal): Adapt.
	(_Hashtable_alloc<>::__node_ptr): New.
	(_Hashtable_alloc<>::__bucket_type): Rename into...
	(_Hashtable_alloc<>::__node_base_ptr): ...this.
	(_Hashtable_alloc<>::__bucket_alloc_type): Rename into...
	(_Hashtable_alloc<>::__buckets_alloc_type): ...this.
	(_Hashtable_alloc<>::__bucket_alloc_traits): Rename into...
	(_Hashtable_alloc<>::__buckets_alloc_traits): ...this.
	(_Hashtable_alloc<>::__buckets_ptr): New.
	(_Hashtable_alloc<>::_M_allocate_node): Adapt.
	(_Hashtable_alloc<>::_M_deallocate_node): Adapt.
	(_Hashtable_alloc<>::_M_deallocate_node_ptr): Adapt.
	(_Hashtable_alloc<>::_M_deallocate_nodes): Adapt.
	(_Hashtable_alloc<>::_M_allocate_buckets): Adapt.
	(_Hashtable_alloc<>::_M_deallocate_buckets): Adapt.
	* include/bits/hashtable.h (_Hashtable<>): Adapt.
This commit is contained in:
François Dumont 2020-10-04 18:06:11 +02:00
parent 15b8e2af42
commit 1b6f047683
2 changed files with 320 additions and 379 deletions

View File

@ -101,7 +101,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
* - size_type _M_bucket_count
* - size_type _M_element_count
*
* with _Bucket being _Hash_node* and _Hash_node containing:
* with _Bucket being _Hash_node_base* and _Hash_node containing:
*
* - _Hash_node* _M_next
* - Tp _M_value
@ -194,17 +194,27 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
using __traits_type = _Traits;
using __hash_cached = typename __traits_type::__hash_cached;
using __constant_iterators = typename __traits_type::__constant_iterators;
using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>;
using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>;
using __node_value_type =
__detail::_Hash_node_value<_Value, __hash_cached::value>;
using __node_ptr = typename __hashtable_alloc::__node_ptr;
using __value_alloc_traits =
typename __hashtable_alloc::__value_alloc_traits;
using __node_alloc_traits =
typename __hashtable_alloc::__node_alloc_traits;
using __node_base = typename __hashtable_alloc::__node_base;
using __bucket_type = typename __hashtable_alloc::__bucket_type;
using __node_base_ptr = typename __hashtable_alloc::__node_base_ptr;
using __buckets_ptr = typename __hashtable_alloc::__buckets_ptr;
using __insert_base = __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey,
_Equal, _Hash,
_RangeHash, _Unused,
_RehashPolicy, _Traits>;
public:
typedef _Key key_type;
@ -219,11 +229,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
typedef value_type& reference;
typedef const value_type& const_reference;
using iterator = typename __insert_base::iterator;
using const_iterator = typename __insert_base::const_iterator;
using local_iterator = __detail::_Local_iterator<key_type, _Value,
_ExtractKey, _Hash, _RangeHash, _Unused,
__constant_iterators::value,
__hash_cached::value>;
using const_local_iterator = __detail::_Local_const_iterator<
key_type, _Value,
_ExtractKey, _Hash, _RangeHash, _Unused,
__constant_iterators::value, __hash_cached::value>;
private:
using __rehash_type = _RehashPolicy;
using __rehash_state = typename __rehash_type::_State;
using __constant_iterators = typename __traits_type::__constant_iterators;
using __unique_keys = typename __traits_type::__unique_keys;
using __hashtable_base = __detail::
@ -232,7 +255,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
using __hash_code_base = typename __hashtable_base::__hash_code_base;
using __hash_code = typename __hashtable_base::__hash_code;
using __ireturn_type = typename __hashtable_base::__ireturn_type;
using __ireturn_type = typename __insert_base::__ireturn_type;
using __map_base = __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey,
_Equal, _Hash, _RangeHash, _Unused,
@ -256,7 +279,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
struct _Scoped_node
{
// Take ownership of a node with a constructed element.
_Scoped_node(__node_type* __n, __hashtable_alloc* __h)
_Scoped_node(__node_ptr __n, __hashtable_alloc* __h)
: _M_h(__h), _M_node(__n) { }
// Allocate a node and construct an element within it.
@ -273,7 +296,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_Scoped_node& operator=(const _Scoped_node&) = delete;
__hashtable_alloc* _M_h;
__node_type* _M_node;
__node_ptr _M_node;
};
template<typename _Ht>
@ -293,8 +316,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// Getting a bucket index from a node shall not throw because it is used
// in methods (erase, swap...) that shall not throw.
static_assert(noexcept(declval<const __hash_code_base_access&>()
._M_bucket_index((const __node_type*)nullptr,
(std::size_t)0)),
._M_bucket_index(declval<const __node_value_type&>(),
(std::size_t)0)),
"Cache the hash code or qualify your functors involved"
" in hash code and bucket index computation with noexcept");
@ -345,20 +368,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
using size_type = typename __hashtable_base::size_type;
using difference_type = typename __hashtable_base::difference_type;
using iterator = typename __hashtable_base::iterator;
using const_iterator = typename __hashtable_base::const_iterator;
using local_iterator = typename __hashtable_base::local_iterator;
using const_local_iterator = typename __hashtable_base::
const_local_iterator;
#if __cplusplus > 201402L
using node_type = _Node_handle<_Key, _Value, __node_alloc_type>;
using insert_return_type = _Node_insert_return<iterator, node_type>;
#endif
private:
__bucket_type* _M_buckets = &_M_single_bucket;
__buckets_ptr _M_buckets = &_M_single_bucket;
size_type _M_bucket_count = 1;
__node_base _M_before_begin;
size_type _M_element_count = 0;
@ -370,24 +386,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// qualified.
// Note that we can't leave hashtable with 0 bucket without adding
// numerous checks in the code to avoid 0 modulus.
__bucket_type _M_single_bucket = nullptr;
__node_base_ptr _M_single_bucket = nullptr;
void
_M_update_bbegin()
{
if (_M_begin())
_M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin;
_M_buckets[_M_bucket_index(*_M_begin())] = &_M_before_begin;
}
void
_M_update_bbegin(__node_type* __n)
_M_update_bbegin(__node_ptr __n)
{
_M_before_begin._M_nxt = __n;
_M_update_bbegin();
}
bool
_M_uses_single_bucket(__bucket_type* __bkts) const
_M_uses_single_bucket(__buckets_ptr __bkts) const
{ return __builtin_expect(__bkts == &_M_single_bucket, false); }
bool
@ -397,7 +413,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__hashtable_alloc&
_M_base_alloc() { return *this; }
__bucket_type*
__buckets_ptr
_M_allocate_buckets(size_type __bkt_count)
{
if (__builtin_expect(__bkt_count == 1, false))
@ -410,7 +426,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
void
_M_deallocate_buckets(__bucket_type* __bkts, size_type __bkt_count)
_M_deallocate_buckets(__buckets_ptr __bkts, size_type __bkt_count)
{
if (_M_uses_single_bucket(__bkts))
return;
@ -424,12 +440,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// Gets bucket begin, deals with the fact that non-empty buckets contain
// their before begin node.
__node_type*
__node_ptr
_M_bucket_begin(size_type __bkt) const;
__node_type*
__node_ptr
_M_begin() const
{ return static_cast<__node_type*>(_M_before_begin._M_nxt); }
{ return static_cast<__node_ptr>(_M_before_begin._M_nxt); }
// Assign *this using another _Hashtable instance. Whether elements
// are copied or moved depends on the _Ht reference.
@ -711,7 +727,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
private:
// Bucket index computation helpers.
size_type
_M_bucket_index(__node_type* __n) const noexcept
_M_bucket_index(const __node_value_type& __n) const noexcept
{ return __hash_code_base::_M_bucket_index(__n, _M_bucket_count); }
size_type
@ -720,44 +736,44 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// Find and insert helper functions and types
// Find the node before the one matching the criteria.
__node_base*
__node_base_ptr
_M_find_before_node(size_type, const key_type&, __hash_code) const;
__node_type*
__node_ptr
_M_find_node(size_type __bkt, const key_type& __key,
__hash_code __c) const
{
__node_base* __before_n = _M_find_before_node(__bkt, __key, __c);
__node_base_ptr __before_n = _M_find_before_node(__bkt, __key, __c);
if (__before_n)
return static_cast<__node_type*>(__before_n->_M_nxt);
return static_cast<__node_ptr>(__before_n->_M_nxt);
return nullptr;
}
// Insert a node at the beginning of a bucket.
void
_M_insert_bucket_begin(size_type, __node_type*);
_M_insert_bucket_begin(size_type, __node_ptr);
// Remove the bucket first node
void
_M_remove_bucket_begin(size_type __bkt, __node_type* __next_n,
_M_remove_bucket_begin(size_type __bkt, __node_ptr __next_n,
size_type __next_bkt);
// Get the node before __n in the bucket __bkt
__node_base*
_M_get_previous_node(size_type __bkt, __node_base* __n);
__node_base_ptr
_M_get_previous_node(size_type __bkt, __node_ptr __n);
// Insert node __n with hash code __code, in bucket __bkt if no
// rehash (assumes no element with same key already present).
// Takes ownership of __n if insertion succeeds, throws otherwise.
iterator
_M_insert_unique_node(size_type __bkt, __hash_code,
__node_type* __n, size_type __n_elt = 1);
__node_ptr __n, size_type __n_elt = 1);
// Insert node __n with key __k and hash code __code.
// Takes ownership of __n if insertion succeeds, throws otherwise.
iterator
_M_insert_multi_node(__node_type* __hint,
__hash_code __code, __node_type* __n);
_M_insert_multi_node(__node_ptr __hint,
__hash_code __code, __node_ptr __n);
template<typename... _Args>
std::pair<iterator, bool>
@ -814,7 +830,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_M_erase(false_type __uks, const key_type&);
iterator
_M_erase(size_type __bkt, __node_base* __prev_n, __node_type* __n);
_M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n);
public:
// Emplace
@ -874,7 +890,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
const key_type& __k = __nh._M_key();
__hash_code __code = this->_M_hash_code(__k);
size_type __bkt = _M_bucket_index(__code);
if (__node_type* __n = _M_find_node(__bkt, __k, __code))
if (__node_ptr __n = _M_find_node(__bkt, __k, __code))
{
__ret.node = std::move(__nh);
__ret.position = iterator(__n);
@ -910,15 +926,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
private:
node_type
_M_extract_node(size_t __bkt, __node_base* __prev_n)
_M_extract_node(size_t __bkt, __node_base_ptr __prev_n)
{
__node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);
__node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
if (__prev_n == _M_buckets[__bkt])
_M_remove_bucket_begin(__bkt, __n->_M_next(),
__n->_M_nxt ? _M_bucket_index(__n->_M_next()) : 0);
__n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
else if (__n->_M_nxt)
{
size_type __next_bkt = _M_bucket_index(__n->_M_next());
size_type __next_bkt = _M_bucket_index(*__n->_M_next());
if (__next_bkt != __bkt)
_M_buckets[__next_bkt] = __prev_n;
}
@ -934,7 +950,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
node_type
extract(const_iterator __pos)
{
size_t __bkt = _M_bucket_index(__pos._M_cur);
size_t __bkt = _M_bucket_index(*__pos._M_cur);
return _M_extract_node(__bkt,
_M_get_previous_node(__bkt, __pos._M_cur));
}
@ -946,7 +962,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
node_type __nh;
__hash_code __code = this->_M_hash_code(__k);
std::size_t __bkt = _M_bucket_index(__code);
if (__node_base* __prev_node = _M_find_before_node(__bkt, __k, __code))
if (__node_base_ptr __prev_node = _M_find_before_node(__bkt, __k, __code))
__nh = _M_extract_node(__bkt, __prev_node);
return __nh;
}
@ -1016,10 +1032,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
_M_bucket_begin(size_type __bkt) const
-> __node_type*
-> __node_ptr
{
__node_base* __n = _M_buckets[__bkt];
return __n ? static_cast<__node_type*>(__n->_M_nxt) : nullptr;
__node_base_ptr __n = _M_buckets[__bkt];
return __n ? static_cast<__node_ptr>(__n->_M_nxt) : nullptr;
}
template<typename _Key, typename _Value, typename _Alloc,
@ -1148,7 +1164,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
_M_assign_elements(_Ht&& __ht)
{
__bucket_type* __former_buckets = nullptr;
__buckets_ptr __former_buckets = nullptr;
std::size_t __former_bucket_count = _M_bucket_count;
const __rehash_state& __former_state = _M_rehash_policy._M_state();
@ -1160,7 +1176,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
else
__builtin_memset(_M_buckets, 0,
_M_bucket_count * sizeof(__bucket_type));
_M_bucket_count * sizeof(__node_base_ptr));
__try
{
@ -1184,7 +1200,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_M_bucket_count = __former_bucket_count;
}
__builtin_memset(_M_buckets, 0,
_M_bucket_count * sizeof(__bucket_type));
_M_bucket_count * sizeof(__node_base_ptr));
__throw_exception_again;
}
}
@ -1199,7 +1215,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
_M_assign(_Ht&& __ht, const _NodeGenerator& __node_gen)
{
__bucket_type* __buckets = nullptr;
__buckets_ptr __buckets = nullptr;
if (!_M_buckets)
_M_buckets = __buckets = _M_allocate_buckets(_M_bucket_count);
@ -1210,20 +1226,20 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// First deal with the special first node pointed to by
// _M_before_begin.
__node_type* __ht_n = __ht._M_begin();
__node_type* __this_n
__node_ptr __ht_n = __ht._M_begin();
__node_ptr __this_n
= __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
this->_M_copy_code(__this_n, __ht_n);
this->_M_copy_code(*__this_n, *__ht_n);
_M_update_bbegin(__this_n);
// Then deal with other nodes.
__node_base* __prev_n = __this_n;
__node_ptr __prev_n = __this_n;
for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
{
__this_n = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
__prev_n->_M_nxt = __this_n;
this->_M_copy_code(__this_n, __ht_n);
size_type __bkt = _M_bucket_index(__this_n);
this->_M_copy_code(*__this_n, *__ht_n);
size_type __bkt = _M_bucket_index(*__this_n);
if (!_M_buckets[__bkt])
_M_buckets[__bkt] = __prev_n;
__prev_n = __this_n;
@ -1538,7 +1554,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// find any new equivalent value.
size_type __result = 1;
for (auto __ref = __it++;
__it._M_cur && this->_M_node_equals(__ref._M_cur, __it._M_cur);
__it._M_cur && this->_M_node_equals(*__ref._M_cur, *__it._M_cur);
++__it)
++__result;
@ -1566,7 +1582,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// All equivalent values are next to each other, if we find a
// non-equivalent value after an equivalent one it means that we won't
// find any new equivalent value.
while (__ite._M_cur && this->_M_node_equals(__beg._M_cur, __ite._M_cur))
while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
++__ite;
return { __beg, __ite };
@ -1593,7 +1609,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// All equivalent values are next to each other, if we find a
// non-equivalent value after an equivalent one it means that we won't
// find any new equivalent value.
while (__ite._M_cur && this->_M_node_equals(__beg._M_cur, __ite._M_cur))
while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
++__ite;
return { __beg, __ite };
@ -1610,19 +1626,19 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
_M_find_before_node(size_type __bkt, const key_type& __k,
__hash_code __code) const
-> __node_base*
-> __node_base_ptr
{
__node_base* __prev_p = _M_buckets[__bkt];
__node_base_ptr __prev_p = _M_buckets[__bkt];
if (!__prev_p)
return nullptr;
for (__node_type* __p = static_cast<__node_type*>(__prev_p->_M_nxt);;
for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
__p = __p->_M_next())
{
if (this->_M_equals(__k, __code, __p))
if (this->_M_equals(__k, __code, *__p))
return __prev_p;
if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __bkt)
if (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt)
break;
__prev_p = __p;
}
@ -1637,7 +1653,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
void
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
_M_insert_bucket_begin(size_type __bkt, __node_type* __node)
_M_insert_bucket_begin(size_type __bkt, __node_ptr __node)
{
if (_M_buckets[__bkt])
{
@ -1657,7 +1673,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
if (__node->_M_nxt)
// We must update former begin bucket that is pointing to
// _M_before_begin.
_M_buckets[_M_bucket_index(__node->_M_next())] = __node;
_M_buckets[_M_bucket_index(*__node->_M_next())] = __node;
_M_buckets[__bkt] = &_M_before_begin;
}
@ -1670,7 +1686,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
void
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
_M_remove_bucket_begin(size_type __bkt, __node_type* __next,
_M_remove_bucket_begin(size_type __bkt, __node_ptr __next,
size_type __next_bkt)
{
if (!__next || __next_bkt != __bkt)
@ -1694,10 +1710,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
auto
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
_M_get_previous_node(size_type __bkt, __node_base* __n)
-> __node_base*
_M_get_previous_node(size_type __bkt, __node_ptr __n)
-> __node_base_ptr
{
__node_base* __prev_n = _M_buckets[__bkt];
__node_base_ptr __prev_n = _M_buckets[__bkt];
while (__prev_n->_M_nxt != __n)
__prev_n = __prev_n->_M_nxt;
return __prev_n;
@ -1719,7 +1735,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
__hash_code __code = this->_M_hash_code(__k);
size_type __bkt = _M_bucket_index(__code);
if (__node_type* __p = _M_find_node(__bkt, __k, __code))
if (__node_ptr __p = _M_find_node(__bkt, __k, __code))
// There is already an equivalent node, no insertion
return std::make_pair(iterator(__p), false);
@ -1760,7 +1776,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
_M_insert_unique_node(size_type __bkt, __hash_code __code,
__node_type* __node, size_type __n_elt)
__node_ptr __node, size_type __n_elt)
-> iterator
{
const __rehash_state& __saved_state = _M_rehash_policy._M_state();
@ -1774,7 +1790,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__bkt = _M_bucket_index(__code);
}
this->_M_store_code(__node, __code);
this->_M_store_code(*__node, __code);
// Always insert at the beginning of the bucket.
_M_insert_bucket_begin(__bkt, __node);
@ -1789,8 +1805,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
auto
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
_M_insert_multi_node(__node_type* __hint,
__hash_code __code, __node_type* __node)
_M_insert_multi_node(__node_ptr __hint,
__hash_code __code, __node_ptr __node)
-> iterator
{
const __rehash_state& __saved_state = _M_rehash_policy._M_state();
@ -1800,17 +1816,18 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
if (__do_rehash.first)
_M_rehash(__do_rehash.second, __saved_state);
this->_M_store_code(__node, __code);
this->_M_store_code(*__node, __code);
const key_type& __k = _ExtractKey{}(__node->_M_v());
size_type __bkt = _M_bucket_index(__code);
// Find the node before an equivalent one or use hint if it exists and
// if it is equivalent.
__node_base* __prev
__node_base_ptr __prev
= __builtin_expect(__hint != nullptr, false)
&& this->_M_equals(__k, __code, __hint)
&& this->_M_equals(__k, __code, *__hint)
? __hint
: _M_find_before_node(__bkt, __k, __code);
if (__prev)
{
// Insert after the node before the equivalent one.
@ -1820,9 +1837,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// hint might be the last bucket node, in this case we need to
// update next bucket.
if (__node->_M_nxt
&& !this->_M_equals(__k, __code, __node->_M_next()))
&& !this->_M_equals(__k, __code, *__node->_M_next()))
{
size_type __next_bkt = _M_bucket_index(__node->_M_next());
size_type __next_bkt = _M_bucket_index(*__node->_M_next());
if (__next_bkt != __bkt)
_M_buckets[__next_bkt] = __node;
}
@ -1853,7 +1870,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__hash_code __code = this->_M_hash_code(__k);
size_type __bkt = _M_bucket_index(__code);
if (__node_type* __node = _M_find_node(__bkt, __k, __code))
if (__node_ptr __node = _M_find_node(__bkt, __k, __code))
return { iterator(__node), false };
_Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
@ -1899,13 +1916,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
erase(const_iterator __it)
-> iterator
{
__node_type* __n = __it._M_cur;
std::size_t __bkt = _M_bucket_index(__n);
__node_ptr __n = __it._M_cur;
std::size_t __bkt = _M_bucket_index(*__n);
// Look for previous node to unlink it from the erased one, this
// is why we need buckets to contain the before begin to make
// this search fast.
__node_base* __prev_n = _M_get_previous_node(__bkt, __n);
__node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
return _M_erase(__bkt, __prev_n, __n);
}
@ -1916,15 +1933,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
auto
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
_M_erase(size_type __bkt, __node_base* __prev_n, __node_type* __n)
_M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n)
-> iterator
{
if (__prev_n == _M_buckets[__bkt])
_M_remove_bucket_begin(__bkt, __n->_M_next(),
__n->_M_nxt ? _M_bucket_index(__n->_M_next()) : 0);
__n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
else if (__n->_M_nxt)
{
size_type __next_bkt = _M_bucket_index(__n->_M_next());
size_type __next_bkt = _M_bucket_index(*__n->_M_next());
if (__next_bkt != __bkt)
_M_buckets[__next_bkt] = __prev_n;
}
@ -1951,12 +1968,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
std::size_t __bkt = _M_bucket_index(__code);
// Look for the node before the first matching node.
__node_base* __prev_n = _M_find_before_node(__bkt, __k, __code);
__node_base_ptr __prev_n = _M_find_before_node(__bkt, __k, __code);
if (!__prev_n)
return 0;
// We found a matching node, erase it.
__node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);
__node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
_M_erase(__bkt, __prev_n, __n);
return 1;
}
@ -1975,7 +1992,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
std::size_t __bkt = _M_bucket_index(__code);
// Look for the node before the first matching node.
__node_base* __prev_n = _M_find_before_node(__bkt, __k, __code);
__node_base_ptr __prev_n = _M_find_before_node(__bkt, __k, __code);
if (!__prev_n)
return 0;
@ -1985,18 +2002,18 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// We use one loop to find all matching nodes and another to deallocate
// them so that the key stays valid during the first loop. It might be
// invalidated indirectly when destroying nodes.
__node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);
__node_type* __n_last = __n->_M_next();
while (__n_last && this->_M_node_equals(__n, __n_last))
__node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
__node_ptr __n_last = __n->_M_next();
while (__n_last && this->_M_node_equals(*__n, *__n_last))
__n_last = __n_last->_M_next();
std::size_t __n_last_bkt = __n_last ? _M_bucket_index(__n_last) : __bkt;
std::size_t __n_last_bkt = __n_last ? _M_bucket_index(*__n_last) : __bkt;
// Deallocate nodes.
size_type __result = 0;
do
{
__node_type* __p = __n->_M_next();
__node_ptr __p = __n->_M_next();
this->_M_deallocate_node(__n);
__n = __p;
++__result;
@ -2022,27 +2039,27 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
erase(const_iterator __first, const_iterator __last)
-> iterator
{
__node_type* __n = __first._M_cur;
__node_type* __last_n = __last._M_cur;
__node_ptr __n = __first._M_cur;
__node_ptr __last_n = __last._M_cur;
if (__n == __last_n)
return iterator(__n);
std::size_t __bkt = _M_bucket_index(__n);
std::size_t __bkt = _M_bucket_index(*__n);
__node_base* __prev_n = _M_get_previous_node(__bkt, __n);
__node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
std::size_t __n_bkt = __bkt;
for (;;)
{
do
{
__node_type* __tmp = __n;
__node_ptr __tmp = __n;
__n = __n->_M_next();
this->_M_deallocate_node(__tmp);
--_M_element_count;
if (!__n)
break;
__n_bkt = _M_bucket_index(__n);
__n_bkt = _M_bucket_index(*__n);
}
while (__n != __last_n && __n_bkt == __bkt);
if (__is_bucket_begin)
@ -2069,7 +2086,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
clear() noexcept
{
this->_M_deallocate_nodes(_M_begin());
__builtin_memset(_M_buckets, 0, _M_bucket_count * sizeof(__bucket_type));
__builtin_memset(_M_buckets, 0,
_M_bucket_count * sizeof(__node_base_ptr));
_M_element_count = 0;
_M_before_begin._M_nxt = nullptr;
}
@ -2129,15 +2147,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
_M_rehash_aux(size_type __bkt_count, true_type /* __uks */)
{
__bucket_type* __new_buckets = _M_allocate_buckets(__bkt_count);
__node_type* __p = _M_begin();
__buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
__node_ptr __p = _M_begin();
_M_before_begin._M_nxt = nullptr;
std::size_t __bbegin_bkt = 0;
while (__p)
{
__node_type* __next = __p->_M_next();
__node_ptr __next = __p->_M_next();
std::size_t __bkt
= __hash_code_base::_M_bucket_index(__p, __bkt_count);
= __hash_code_base::_M_bucket_index(*__p, __bkt_count);
if (!__new_buckets[__bkt])
{
__p->_M_nxt = _M_before_begin._M_nxt;
@ -2172,20 +2190,19 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
_M_rehash_aux(size_type __bkt_count, false_type /* __uks */)
{
__bucket_type* __new_buckets = _M_allocate_buckets(__bkt_count);
__node_type* __p = _M_begin();
__buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
__node_ptr __p = _M_begin();
_M_before_begin._M_nxt = nullptr;
std::size_t __bbegin_bkt = 0;
std::size_t __prev_bkt = 0;
__node_type* __prev_p = nullptr;
__node_ptr __prev_p = nullptr;
bool __check_bucket = false;
while (__p)
{
__node_type* __next = __p->_M_next();
__node_ptr __next = __p->_M_next();
std::size_t __bkt
= __hash_code_base::_M_bucket_index(__p, __bkt_count);
= __hash_code_base::_M_bucket_index(*__p, __bkt_count);
if (__prev_p && __prev_bkt == __bkt)
{
@ -2211,8 +2228,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
if (__prev_p->_M_nxt)
{
std::size_t __next_bkt
= __hash_code_base::_M_bucket_index(__prev_p->_M_next(),
__bkt_count);
= __hash_code_base::_M_bucket_index(
*__prev_p->_M_next(), __bkt_count);
if (__next_bkt != __prev_bkt)
__new_buckets[__next_bkt] = __prev_p;
}
@ -2242,7 +2259,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
if (__check_bucket && __prev_p->_M_nxt)
{
std::size_t __next_bkt
= __hash_code_base::_M_bucket_index(__prev_p->_M_next(),
= __hash_code_base::_M_bucket_index(*__prev_p->_M_next(),
__bkt_count);
if (__next_bkt != __prev_bkt)
__new_buckets[__next_bkt] = __prev_p;

View File

@ -226,7 +226,7 @@ namespace __detail
* Node type with the value to store.
*/
template<typename _Value>
struct _Hash_node_value_base : _Hash_node_base
struct _Hash_node_value_base
{
typedef _Value value_type;
@ -249,34 +249,33 @@ namespace __detail
{ return *_M_valptr(); }
};
/**
* Primary template struct _Hash_node_code_cache.
*/
template<bool _Cache_hash_code>
struct _Hash_node_code_cache
{ };
/**
* Specialization for node with cache, struct _Hash_node_code_cache.
*/
template<>
struct _Hash_node_code_cache<true>
{ std::size_t _M_hash_code; };
template<typename _Value, bool _Cache_hash_code>
struct _Hash_node_value
: _Hash_node_value_base<_Value>
, _Hash_node_code_cache<_Cache_hash_code>
{ };
/**
* Primary template struct _Hash_node.
*/
template<typename _Value, bool _Cache_hash_code>
struct _Hash_node;
/**
* Specialization for nodes with caches, struct _Hash_node.
*
* Base class is __detail::_Hash_node_value_base.
*/
template<typename _Value>
struct _Hash_node<_Value, true> : _Hash_node_value_base<_Value>
{
std::size_t _M_hash_code;
_Hash_node*
_M_next() const noexcept
{ return static_cast<_Hash_node*>(this->_M_nxt); }
};
/**
* Specialization for nodes without caches, struct _Hash_node.
*
* Base class is __detail::_Hash_node_value_base.
*/
template<typename _Value>
struct _Hash_node<_Value, false> : _Hash_node_value_base<_Value>
struct _Hash_node
: _Hash_node_base
, _Hash_node_value<_Value, _Cache_hash_code>
{
_Hash_node*
_M_next() const noexcept
@ -289,7 +288,7 @@ namespace __detail
{
using __node_type = _Hash_node<_Value, _Cache_hash_code>;
__node_type* _M_cur;
__node_type* _M_cur;
_Node_iterator_base() = default;
_Node_iterator_base(__node_type* __p) noexcept
@ -327,10 +326,10 @@ namespace __detail
typedef std::forward_iterator_tag iterator_category;
using pointer = typename std::conditional<__constant_iterators,
const _Value*, _Value*>::type;
const value_type*, value_type*>::type;
using reference = typename std::conditional<__constant_iterators,
const _Value&, _Value&>::type;
const value_type&, value_type&>::type;
_Node_iterator() noexcept
: __base_type(nullptr) { }
@ -377,8 +376,8 @@ namespace __detail
typedef std::ptrdiff_t difference_type;
typedef std::forward_iterator_tag iterator_category;
typedef const _Value* pointer;
typedef const _Value& reference;
typedef const value_type* pointer;
typedef const value_type& reference;
_Node_const_iterator() noexcept
: __base_type(nullptr) { }
@ -671,11 +670,9 @@ namespace __detail
_Unused, _RehashPolicy, _Traits>;
using __hash_code = typename __hashtable_base::__hash_code;
using __node_type = typename __hashtable_base::__node_type;
public:
using key_type = typename __hashtable_base::key_type;
using iterator = typename __hashtable_base::iterator;
using mapped_type = typename std::tuple_element<1, _Pair>::type;
mapped_type&
@ -705,7 +702,7 @@ namespace __detail
__hashtable* __h = static_cast<__hashtable*>(this);
__hash_code __code = __h->_M_hash_code(__k);
std::size_t __bkt = __h->_M_bucket_index(__code);
if (__node_type* __node = __h->_M_find_node(__bkt, __k, __code))
if (auto __node = __h->_M_find_node(__bkt, __k, __code))
return __node->_M_v().second;
typename __hashtable::_Scoped_node __node {
@ -732,7 +729,7 @@ namespace __detail
__hashtable* __h = static_cast<__hashtable*>(this);
__hash_code __code = __h->_M_hash_code(__k);
std::size_t __bkt = __h->_M_bucket_index(__code);
if (__node_type* __node = __h->_M_find_node(__bkt, __k, __code))
if (auto __node = __h->_M_find_node(__bkt, __k, __code))
return __node->_M_v().second;
typename __hashtable::_Scoped_node __node {
@ -801,15 +798,18 @@ namespace __detail
_Hash, _RangeHash,
_Unused, _RehashPolicy, _Traits>;
using __hash_cached = typename _Traits::__hash_cached;
using __constant_iterators = typename _Traits::__constant_iterators;
using __hashtable_alloc = _Hashtable_alloc<
__alloc_rebind<_Alloc, _Hash_node<_Value,
__hash_cached::value>>>;
using value_type = typename __hashtable_base::value_type;
using iterator = typename __hashtable_base::iterator;
using const_iterator = typename __hashtable_base::const_iterator;
using size_type = typename __hashtable_base::size_type;
using __unique_keys = typename __hashtable_base::__unique_keys;
using __ireturn_type = typename __hashtable_base::__ireturn_type;
using __node_type = _Hash_node<_Value, _Traits::__hash_cached::value>;
using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
using __unique_keys = typename _Traits::__unique_keys;
using __node_alloc_type = typename __hashtable_alloc::__node_alloc_type;
using __node_gen_type = _AllocNode<__node_alloc_type>;
__hashtable&
@ -827,6 +827,16 @@ namespace __detail
const _NodeGetter&, false_type __uks);
public:
using iterator = _Node_iterator<_Value, __constant_iterators::value,
__hash_cached::value>;
using const_iterator = _Node_const_iterator<_Value, __constant_iterators::value,
__hash_cached::value>;
using __ireturn_type = typename std::conditional<__unique_keys::value,
std::pair<iterator, bool>,
iterator>::type;
__ireturn_type
insert(const value_type& __v)
{
@ -850,7 +860,7 @@ namespace __detail
__hashtable& __h = _M_conjure_hashtable();
auto __code = __h._M_hash_code(__k);
std::size_t __bkt = __h._M_bucket_index(__code);
if (__node_type* __node = __h._M_find_node(__bkt, __k, __code))
if (auto __node = __h._M_find_node(__bkt, __k, __code))
return { iterator(__node), false };
typename __hashtable::_Scoped_node __node {
@ -958,16 +968,12 @@ namespace __detail
_Equal, _Hash, _RangeHash, _Unused,
_RehashPolicy, _Traits>;
using __hashtable_base = _Hashtable_base<_Key, _Value, _ExtractKey,
_Equal, _Hash, _RangeHash,
_Unused, _Traits>;
using value_type = typename __base_type::value_type;
using iterator = typename __base_type::iterator;
using const_iterator = typename __base_type::const_iterator;
using __ireturn_type = typename __base_type::__ireturn_type;
using __unique_keys = typename __base_type::__unique_keys;
using __ireturn_type = typename __hashtable_base::__ireturn_type;
using __hashtable = typename __base_type::__hashtable;
using __node_gen_type = typename __base_type::__node_gen_type;
@ -1176,21 +1182,11 @@ namespace __detail
* is inherited in some cases by the _Local_iterator_base type used
* to implement local_iterator and const_local_iterator. As with
* any iterator type we prefer to make it as small as possible.
*
* Primary template is unused except as a hook for specializations.
*/
template<typename _Key, typename _Value, typename _ExtractKey,
typename _Hash, typename _RangeHash, typename _Unused,
bool __cache_hash_code>
struct _Hash_code_base;
/// Specialization: hash function and range-hashing function, no
/// caching of hash codes.
/// Provides typedef and accessor required by C++ 11.
template<typename _Key, typename _Value, typename _ExtractKey,
typename _Hash, typename _RangeHash, typename _Unused>
struct _Hash_code_base<_Key, _Value, _ExtractKey, _Hash, _RangeHash,
_Unused, false>
struct _Hash_code_base
: private _Hashtable_ebo_helper<1, _Hash>
{
private:
@ -1209,7 +1205,6 @@ namespace __detail
protected:
typedef std::size_t __hash_code;
typedef _Hash_node<_Value, false> __node_type;
// We need the default constructor for the local iterators and _Hashtable
// default constructor.
@ -1229,83 +1224,40 @@ namespace __detail
{ return _RangeHash{}(__c, __bkt_count); }
std::size_t
_M_bucket_index(const __node_type* __p, std::size_t __bkt_count) const
_M_bucket_index(const _Hash_node_value<_Value, false>& __n,
std::size_t __bkt_count) const
noexcept( noexcept(declval<const _Hash&>()(declval<const _Key&>()))
&& noexcept(declval<const _RangeHash&>()((__hash_code)0,
(std::size_t)0)) )
{
return _RangeHash{}(_M_hash()(_ExtractKey{}(__p->_M_v())),
return _RangeHash{}(_M_hash_code(_ExtractKey{}(__n._M_v())),
__bkt_count);
}
void
_M_store_code(__node_type*, __hash_code) const
{ }
void
_M_copy_code(__node_type*, const __node_type*) const
{ }
void
_M_swap(_Hash_code_base& __x)
{ std::swap(__ebo_hash::_M_get(), __x.__ebo_hash::_M_get()); }
const _Hash&
_M_hash() const { return __ebo_hash::_M_cget(); }
};
/// Specialization: hash function and range-hashing function,
/// caching hash codes. H is provided but ignored. Provides
/// typedef and accessor required by C++ 11.
template<typename _Key, typename _Value, typename _ExtractKey,
typename _Hash, typename _RangeHash, typename _Unused>
struct _Hash_code_base<_Key, _Value, _ExtractKey, _Hash, _RangeHash,
_Unused, true>
: private _Hashtable_ebo_helper<1, _Hash>
{
private:
using __ebo_hash = _Hashtable_ebo_helper<1, _Hash>;
public:
typedef _Hash hasher;
hasher
hash_function() const
{ return _M_hash(); }
protected:
typedef std::size_t __hash_code;
typedef _Hash_node<_Value, true> __node_type;
// We need the default constructor for _Hashtable default constructor.
_Hash_code_base() = default;
_Hash_code_base(const _Hash& __hash) : __ebo_hash(__hash) { }
__hash_code
_M_hash_code(const _Key& __k) const
{
static_assert(__is_invocable<const _Hash&, const _Key&>{},
"hash function must be invocable with an argument of key type");
return _M_hash()(__k);
}
std::size_t
_M_bucket_index(__hash_code __c, std::size_t __bkt_count) const
{ return _RangeHash{}(__c, __bkt_count); }
std::size_t
_M_bucket_index(const __node_type* __p, std::size_t __bkt_count) const
_M_bucket_index(const _Hash_node_value<_Value, true>& __n,
std::size_t __bkt_count) const
noexcept( noexcept(declval<const _RangeHash&>()((__hash_code)0,
(std::size_t)0)) )
{ return _RangeHash{}(__p->_M_hash_code, __bkt_count); }
{ return _RangeHash{}(__n._M_hash_code, __bkt_count); }
void
_M_store_code(__node_type* __n, __hash_code __c) const
{ __n->_M_hash_code = __c; }
_M_store_code(_Hash_node_code_cache<false>&, __hash_code) const
{ }
void
_M_copy_code(__node_type* __to, const __node_type* __from) const
{ __to->_M_hash_code = __from->_M_hash_code; }
_M_copy_code(_Hash_node_code_cache<false>&,
const _Hash_node_code_cache<false>&) const
{ }
void
_M_store_code(_Hash_node_code_cache<true>& __n, __hash_code __c) const
{ __n._M_hash_code = __c; }
void
_M_copy_code(_Hash_node_code_cache<true>& __to,
const _Hash_node_code_cache<true>& __from) const
{ __to._M_hash_code = __from._M_hash_code; }
void
_M_swap(_Hash_code_base& __x)
@ -1421,7 +1373,7 @@ namespace __detail
}
_Local_iterator_base(const _Local_iterator_base& __iter)
: __node_iter_base(__iter), _M_bucket(__iter._M_bucket)
: __node_iter_base(__iter._M_cur), _M_bucket(__iter._M_bucket)
, _M_bucket_count(__iter._M_bucket_count)
{
if (_M_bucket_count != -1)
@ -1447,7 +1399,7 @@ namespace __detail
__node_iter_base::_M_incr();
if (this->_M_cur)
{
std::size_t __bkt = this->_M_h()->_M_bucket_index(this->_M_cur,
std::size_t __bkt = this->_M_h()->_M_bucket_index(*this->_M_cur,
_M_bucket_count);
if (__bkt != _M_bucket)
this->_M_cur = nullptr;
@ -1485,10 +1437,10 @@ namespace __detail
public:
typedef _Value value_type;
typedef typename std::conditional<__constant_iterators,
const _Value*, _Value*>::type
const value_type*, value_type*>::type
pointer;
typedef typename std::conditional<__constant_iterators,
const _Value&, _Value&>::type
const value_type&, value_type&>::type
reference;
typedef std::ptrdiff_t difference_type;
typedef std::forward_iterator_tag iterator_category;
@ -1540,8 +1492,8 @@ namespace __detail
public:
typedef _Value value_type;
typedef const _Value* pointer;
typedef const _Value& reference;
typedef const value_type* pointer;
typedef const value_type& reference;
typedef std::ptrdiff_t difference_type;
typedef std::forward_iterator_tag iterator_category;
@ -1600,110 +1552,80 @@ namespace __detail
struct _Hashtable_base
: public _Hash_code_base<_Key, _Value, _ExtractKey, _Hash, _RangeHash,
_Unused, _Traits::__hash_cached::value>,
private _Hashtable_ebo_helper<0, _Equal>
{
public:
typedef _Key key_type;
typedef _Value value_type;
typedef _Equal key_equal;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
using __traits_type = _Traits;
using __hash_cached = typename __traits_type::__hash_cached;
using __constant_iterators = typename __traits_type::__constant_iterators;
using __unique_keys = typename __traits_type::__unique_keys;
using __hash_code_base = _Hash_code_base<_Key, _Value, _ExtractKey,
_Hash, _RangeHash, _Unused,
__hash_cached::value>;
using __hash_code = typename __hash_code_base::__hash_code;
using __node_type = typename __hash_code_base::__node_type;
using iterator = _Node_iterator<value_type,
__constant_iterators::value,
__hash_cached::value>;
using const_iterator = _Node_const_iterator<value_type,
__constant_iterators::value,
__hash_cached::value>;
using local_iterator = _Local_iterator<key_type, value_type,
_ExtractKey, _Hash, _RangeHash, _Unused,
__constant_iterators::value,
__hash_cached::value>;
using const_local_iterator = _Local_const_iterator<key_type, value_type,
_ExtractKey, _Hash, _RangeHash, _Unused,
__constant_iterators::value,
__hash_cached::value>;
using __ireturn_type = typename std::conditional<__unique_keys::value,
std::pair<iterator, bool>,
iterator>::type;
private:
using _EqualEBO = _Hashtable_ebo_helper<0, _Equal>;
template<typename _NodeT>
struct _Equal_hash_code
{
static bool
_S_equals(__hash_code, const _NodeT&)
{ return true; }
static bool
_S_node_equals(const _NodeT&, const _NodeT&)
{ return true; }
};
template<typename _Ptr2>
struct _Equal_hash_code<_Hash_node<_Ptr2, true>>
{
static bool
_S_equals(__hash_code __c, const _Hash_node<_Ptr2, true>& __n)
{ return __c == __n._M_hash_code; }
static bool
_S_node_equals(const _Hash_node<_Ptr2, true>& __lhn,
const _Hash_node<_Ptr2, true>& __rhn)
{ return __lhn._M_hash_code == __rhn._M_hash_code; }
};
protected:
_Hashtable_base() = default;
_Hashtable_base(const _Hash& __hash, const _Equal& __eq)
: __hash_code_base(__hash), _EqualEBO(__eq)
{ }
bool
_M_equals(const _Key& __k, __hash_code __c, const __node_type* __n) const
private _Hashtable_ebo_helper<0, _Equal>
{
static_assert(__is_invocable<const _Equal&, const _Key&, const _Key&>{},
public:
typedef _Key key_type;
typedef _Value value_type;
typedef _Equal key_equal;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
using __traits_type = _Traits;
using __hash_cached = typename __traits_type::__hash_cached;
using __hash_code_base = _Hash_code_base<_Key, _Value, _ExtractKey,
_Hash, _RangeHash, _Unused,
__hash_cached::value>;
using __hash_code = typename __hash_code_base::__hash_code;
private:
using _EqualEBO = _Hashtable_ebo_helper<0, _Equal>;
static bool
_S_equals(__hash_code, const _Hash_node_code_cache<false>&)
{ return true; }
static bool
_S_node_equals(const _Hash_node_code_cache<false>&,
const _Hash_node_code_cache<false>&)
{ return true; }
static bool
_S_equals(__hash_code __c, const _Hash_node_code_cache<true>& __n)
{ return __c == __n._M_hash_code; }
static bool
_S_node_equals(const _Hash_node_code_cache<true>& __lhn,
const _Hash_node_code_cache<true>& __rhn)
{ return __lhn._M_hash_code == __rhn._M_hash_code; }
protected:
_Hashtable_base() = default;
_Hashtable_base(const _Hash& __hash, const _Equal& __eq)
: __hash_code_base(__hash), _EqualEBO(__eq)
{ }
bool
_M_equals(const _Key& __k, __hash_code __c,
const _Hash_node_value<_Value, __hash_cached::value>& __n) const
{
static_assert(__is_invocable<const _Equal&, const _Key&, const _Key&>{},
"key equality predicate must be invocable with two arguments of "
"key type");
return _Equal_hash_code<__node_type>::_S_equals(__c, *__n)
&& _M_eq()(__k, _ExtractKey{}(__n->_M_v()));
}
return _S_equals(__c, __n) && _M_eq()(__k, _ExtractKey{}(__n._M_v()));
}
bool
_M_node_equals(const __node_type* __lhn, const __node_type* __rhn) const
{
return _Equal_hash_code<__node_type>::_S_node_equals(*__lhn, *__rhn)
&& _M_eq()(_ExtractKey{}(__lhn->_M_v()),
_ExtractKey{}(__rhn->_M_v()));
}
bool
_M_node_equals(
const _Hash_node_value<_Value, __hash_cached::value>& __lhn,
const _Hash_node_value<_Value, __hash_cached::value>& __rhn) const
{
return _S_node_equals(__lhn, __rhn)
&& _M_eq()(_ExtractKey{}(__lhn._M_v()), _ExtractKey{}(__rhn._M_v()));
}
void
_M_swap(_Hashtable_base& __x)
{
__hash_code_base::_M_swap(__x);
std::swap(_EqualEBO::_M_get(), __x._EqualEBO::_M_get());
}
void
_M_swap(_Hashtable_base& __x)
{
__hash_code_base::_M_swap(__x);
std::swap(_EqualEBO::_M_get(), __x._EqualEBO::_M_get());
}
const _Equal&
_M_eq() const { return _EqualEBO::_M_cget(); }
};
const _Equal&
_M_eq() const { return _EqualEBO::_M_cget(); }
};
/**
* Primary class template _Equality.
@ -1745,7 +1667,6 @@ namespace __detail
_Hash, _RangeHash, _Unused, _RehashPolicy, _Traits, true>::
_M_equal(const __hashtable& __other) const
{
using __node_base = typename __hashtable::__node_base;
using __node_type = typename __hashtable::__node_type;
const __hashtable* __this = static_cast<const __hashtable*>(this);
if (__this->size() != __other.size())
@ -1753,8 +1674,8 @@ namespace __detail
for (auto __itx = __this->begin(); __itx != __this->end(); ++__itx)
{
std::size_t __ybkt = __other._M_bucket_index(__itx._M_cur);
__node_base* __prev_n = __other._M_buckets[__ybkt];
std::size_t __ybkt = __other._M_bucket_index(*__itx._M_cur);
auto __prev_n = __other._M_buckets[__ybkt];
if (!__prev_n)
return false;
@ -1765,7 +1686,7 @@ namespace __detail
break;
if (!__n->_M_nxt
|| __other._M_bucket_index(__n->_M_next()) != __ybkt)
|| __other._M_bucket_index(*__n->_M_next()) != __ybkt)
return false;
}
}
@ -1798,7 +1719,6 @@ namespace __detail
_Hash, _RangeHash, _Unused, _RehashPolicy, _Traits, false>::
_M_equal(const __hashtable& __other) const
{
using __node_base = typename __hashtable::__node_base;
using __node_type = typename __hashtable::__node_type;
const __hashtable* __this = static_cast<const __hashtable*>(this);
if (__this->size() != __other.size())
@ -1814,8 +1734,8 @@ namespace __detail
++__itx_end)
++__x_count;
std::size_t __ybkt = __other._M_bucket_index(__itx._M_cur);
__node_base* __y_prev_n = __other._M_buckets[__ybkt];
std::size_t __ybkt = __other._M_bucket_index(*__itx._M_cur);
auto __y_prev_n = __other._M_buckets[__ybkt];
if (!__y_prev_n)
return false;
@ -1826,12 +1746,12 @@ namespace __detail
_ExtractKey{}(*__itx)))
break;
__node_type* __y_ref_n = __y_n;
auto __y_ref_n = __y_n;
for (__y_n = __y_n->_M_next(); __y_n; __y_n = __y_n->_M_next())
if (!__other._M_node_equals(__y_ref_n, __y_n))
if (!__other._M_node_equals(*__y_ref_n, *__y_n))
break;
if (!__y_n || __other._M_bucket_index(__y_n) != __ybkt)
if (!__y_n || __other._M_bucket_index(*__y_n) != __ybkt)
return false;
}
@ -1869,11 +1789,13 @@ namespace __detail
using __value_alloc_traits = typename __node_alloc_traits::template
rebind_traits<typename __node_type::value_type>;
using __node_base = __detail::_Hash_node_base;
using __bucket_type = __node_base*;
using __bucket_alloc_type =
__alloc_rebind<__node_alloc_type, __bucket_type>;
using __bucket_alloc_traits = std::allocator_traits<__bucket_alloc_type>;
using __node_ptr = __node_type*;
using __node_base = _Hash_node_base;
using __node_base_ptr = __node_base*;
using __buckets_alloc_type =
__alloc_rebind<__node_alloc_type, __node_base_ptr>;
using __buckets_alloc_traits = std::allocator_traits<__buckets_alloc_type>;
using __buckets_ptr = __node_base_ptr*;
_Hashtable_alloc() = default;
_Hashtable_alloc(const _Hashtable_alloc&) = default;
@ -1894,27 +1816,27 @@ namespace __detail
// Allocate a node and construct an element within it.
template<typename... _Args>
__node_type*
__node_ptr
_M_allocate_node(_Args&&... __args);
// Destroy the element within a node and deallocate the node.
void
_M_deallocate_node(__node_type* __n);
_M_deallocate_node(__node_ptr __n);
// Deallocate a node.
void
_M_deallocate_node_ptr(__node_type* __n);
_M_deallocate_node_ptr(__node_ptr __n);
// Deallocate the linked list of nodes pointed to by __n.
// The elements within the nodes are destroyed.
void
_M_deallocate_nodes(__node_type* __n);
_M_deallocate_nodes(__node_ptr __n);
__bucket_type*
__buckets_ptr
_M_allocate_buckets(std::size_t __bkt_count);
void
_M_deallocate_buckets(__bucket_type*, std::size_t __bkt_count);
_M_deallocate_buckets(__buckets_ptr, std::size_t __bkt_count);
};
// Definitions of class template _Hashtable_alloc's out-of-line member
@ -1923,10 +1845,10 @@ namespace __detail
template<typename... _Args>
auto
_Hashtable_alloc<_NodeAlloc>::_M_allocate_node(_Args&&... __args)
-> __node_type*
-> __node_ptr
{
auto __nptr = __node_alloc_traits::allocate(_M_node_allocator(), 1);
__node_type* __n = std::__to_address(__nptr);
__node_ptr __n = std::__to_address(__nptr);
__try
{
::new ((void*)__n) __node_type;
@ -1944,7 +1866,7 @@ namespace __detail
template<typename _NodeAlloc>
void
_Hashtable_alloc<_NodeAlloc>::_M_deallocate_node(__node_type* __n)
_Hashtable_alloc<_NodeAlloc>::_M_deallocate_node(__node_ptr __n)
{
__node_alloc_traits::destroy(_M_node_allocator(), __n->_M_valptr());
_M_deallocate_node_ptr(__n);
@ -1952,7 +1874,7 @@ namespace __detail
template<typename _NodeAlloc>
void
_Hashtable_alloc<_NodeAlloc>::_M_deallocate_node_ptr(__node_type* __n)
_Hashtable_alloc<_NodeAlloc>::_M_deallocate_node_ptr(__node_ptr __n)
{
typedef typename __node_alloc_traits::pointer _Ptr;
auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__n);
@ -1962,37 +1884,39 @@ namespace __detail
template<typename _NodeAlloc>
void
_Hashtable_alloc<_NodeAlloc>::_M_deallocate_nodes(__node_type* __n)
_Hashtable_alloc<_NodeAlloc>::_M_deallocate_nodes(__node_ptr __n)
{
while (__n)
{
__node_type* __tmp = __n;
__node_ptr __tmp = __n;
__n = __n->_M_next();
_M_deallocate_node(__tmp);
}
}
template<typename _NodeAlloc>
typename _Hashtable_alloc<_NodeAlloc>::__bucket_type*
auto
_Hashtable_alloc<_NodeAlloc>::_M_allocate_buckets(std::size_t __bkt_count)
-> __buckets_ptr
{
__bucket_alloc_type __alloc(_M_node_allocator());
__buckets_alloc_type __alloc(_M_node_allocator());
auto __ptr = __bucket_alloc_traits::allocate(__alloc, __bkt_count);
__bucket_type* __p = std::__to_address(__ptr);
__builtin_memset(__p, 0, __bkt_count * sizeof(__bucket_type));
auto __ptr = __buckets_alloc_traits::allocate(__alloc, __bkt_count);
__buckets_ptr __p = std::__to_address(__ptr);
__builtin_memset(__p, 0, __bkt_count * sizeof(__node_base_ptr));
return __p;
}
template<typename _NodeAlloc>
void
_Hashtable_alloc<_NodeAlloc>::_M_deallocate_buckets(__bucket_type* __bkts,
std::size_t __bkt_count)
_Hashtable_alloc<_NodeAlloc>::
_M_deallocate_buckets(__buckets_ptr __bkts,
std::size_t __bkt_count)
{
typedef typename __bucket_alloc_traits::pointer _Ptr;
typedef typename __buckets_alloc_traits::pointer _Ptr;
auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__bkts);
__bucket_alloc_type __alloc(_M_node_allocator());
__bucket_alloc_traits::deallocate(__alloc, __ptr, __bkt_count);
__buckets_alloc_type __alloc(_M_node_allocator());
__buckets_alloc_traits::deallocate(__alloc, __ptr, __bkt_count);
}
//@} hashtable-detail