hashtable_policy.h (_Local_iterator_base): Use _Hashtable_ebo_helper to embed functors into the local_iterator when necessary.

2013-01-28  François Dumont  <fdumont@gcc.gnu.org>

	* include/bits/hashtable_policy.h (_Local_iterator_base): Use
	_Hashtable_ebo_helper to embed functors into the local_iterator
	when necessary. Pass information about functors involved in hash
	code by copy.
	* include/bits/hashtable.h (__cache_default): Do not cache for
	builtin integral types unless the hash functor is not noexcept
	qualified or is not default constructible. Adapt static assertions
	and local iterator instantiations.
	* include/debug/unordered_set
	(std::__debug::unordered_set<>::erase): Detect local iterators to
	invalidate using contained node rather than generating a dummy
	local_iterator instance.
	(std::__debug::unordered_multiset<>::erase): Likewise.
	* include/debug/unordered_map
	(std::__debug::unordered_map<>::erase): Likewise.
	(std::__debug::unordered_multimap<>::erase): Likewise.
	* testsuite/performance/23_containers/insert_erase/41975.cc: Test
	std::tr1 and std versions of unordered_set regardless of any
	macro. Add test on default cache behavior.
	* testsuite/performance/23_containers/insert/54075.cc: Likewise.
	* testsuite/23_containers/unordered_set/instantiation_neg.cc:
	Adapt line number.
	* testsuite/23_containers/unordered_set/
	not_default_constructible_hash_neg.cc: New.
	* testsuite/23_containers/unordered_set/buckets/swap.cc: New.

From-SVN: r195517
This commit is contained in:
François Dumont 2013-01-28 20:52:13 +00:00
parent 5a579c3b65
commit 5b3be7cf99
10 changed files with 444 additions and 262 deletions

View File

@ -1,3 +1,31 @@
2013-01-28 François Dumont <fdumont@gcc.gnu.org>
* include/bits/hashtable_policy.h (_Local_iterator_base): Use
_Hashtable_ebo_helper to embed functors into the local_iterator
when necessary. Pass information about functors involved in hash
code by copy.
* include/bits/hashtable.h (__cache_default): Do not cache for
builtin integral types unless the hash functor is not noexcept
qualified or is not default constructible. Adapt static assertions
and local iterator instantiations.
* include/debug/unordered_set
(std::__debug::unordered_set<>::erase): Detect local iterators to
invalidate using contained node rather than generating a dummy
local_iterator instance.
(std::__debug::unordered_multiset<>::erase): Likewise.
* include/debug/unordered_map
(std::__debug::unordered_map<>::erase): Likewise.
(std::__debug::unordered_multimap<>::erase): Likewise.
* testsuite/performance/23_containers/insert_erase/41975.cc: Test
std::tr1 and std versions of unordered_set regardless of any
macro. Add test on default cache behavior.
* testsuite/performance/23_containers/insert/54075.cc: Likewise.
* testsuite/23_containers/unordered_set/instantiation_neg.cc:
Adapt line number.
* testsuite/23_containers/unordered_set/
not_default_constructible_hash_neg.cc: New.
* testsuite/23_containers/unordered_set/buckets/swap.cc: New.
2013-01-24 Paolo Carlini <paolo.carlini@oracle.com>
PR libstdc++/56085

View File

@ -1,6 +1,6 @@
// hashtable.h header -*- C++ -*-
// Copyright (C) 2007-2012 Free Software Foundation, Inc.
// Copyright (C) 2007-2013 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
@ -39,10 +39,15 @@ namespace std _GLIBCXX_VISIBILITY(default)
_GLIBCXX_BEGIN_NAMESPACE_VERSION
template<typename _Tp, typename _Hash>
using __cache_default = __not_<__and_<is_integral<_Tp>,
is_empty<_Hash>,
integral_constant<bool, !__is_final(_Hash)>,
__detail::__is_noexcept_hash<_Tp, _Hash> >>;
using __cache_default
= __not_<__and_<// Do not cache for builtin integral types having trivial
// hasher.
is_integral<_Tp>,
// Mandatory to make local_iterator default
// constructible.
is_default_constructible<_Hash>,
// Mandatory to have erase not throwing.
__detail::__is_noexcept_hash<_Tp, _Hash>>>;
/**
* Primary class template _Hashtable.
@ -249,21 +254,21 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
" or qualify your hash functor with noexcept");
// Following two static assertions are necessary to guarantee
// that swapping two hashtable instances won't invalidate
// associated local iterators.
// that local_iterator will be default constructible.
// When hash codes are cached local iterator only uses H2 which
// must then be empty.
static_assert(__if_hash_cached<is_empty<_H2>>::value,
// When hash codes are cached local iterator inherits from H2 functor
// which must then be default constructible.
static_assert(__if_hash_cached<is_default_constructible<_H2>>::value,
"Functor used to map hash code to bucket index"
" must be empty");
" must be default constructible");
// When hash codes are not cached local iterator is going to use
// __hash_code_base above to compute node bucket index so it has
// to be empty.
static_assert(__if_hash_not_cached<is_empty<__hash_code_base>>::value,
"Cache the hash code or make functors involved in hash code"
" and bucket index computation empty");
// When hash codes are not cached local iterator inherits from
// __hash_code_base above to compute node bucket index so it has to be
// default constructible.
static_assert(__if_hash_not_cached<
is_default_constructible<__hash_code_base>>::value,
"Cache the hash code or make functors involved in hash code"
" and bucket index computation default constructible");
public:
template<typename _Keya, typename _Valuea, typename _Alloca,
@ -500,30 +505,37 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
local_iterator
begin(size_type __n)
{ return local_iterator(_M_bucket_begin(__n), __n, _M_bucket_count); }
{
return local_iterator(*this, _M_bucket_begin(__n),
__n, _M_bucket_count);
}
local_iterator
end(size_type __n)
{ return local_iterator(nullptr, __n, _M_bucket_count); }
{ return local_iterator(*this, nullptr, __n, _M_bucket_count); }
const_local_iterator
begin(size_type __n) const
{ return const_local_iterator(_M_bucket_begin(__n), __n,
_M_bucket_count); }
{
return const_local_iterator(*this, _M_bucket_begin(__n),
__n, _M_bucket_count);
}
const_local_iterator
end(size_type __n) const
{ return const_local_iterator(nullptr, __n, _M_bucket_count); }
{ return const_local_iterator(*this, nullptr, __n, _M_bucket_count); }
// DR 691.
const_local_iterator
cbegin(size_type __n) const
{ return const_local_iterator(_M_bucket_begin(__n), __n,
_M_bucket_count); }
{
return const_local_iterator(*this, _M_bucket_begin(__n),
__n, _M_bucket_count);
}
const_local_iterator
cend(size_type __n) const
{ return const_local_iterator(nullptr, __n, _M_bucket_count); }
{ return const_local_iterator(*this, nullptr, __n, _M_bucket_count); }
float
load_factor() const noexcept
@ -1141,7 +1153,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
{
if (this->_M_equals(__k, __code, __p))
return __prev_p;
if (!(__p->_M_nxt) || _M_bucket_index(__p->_M_next()) != __n)
if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __n)
break;
__prev_p = __p;
}

View File

@ -1,6 +1,6 @@
// Internal policy header for unordered_set and unordered_map -*- C++ -*-
// Copyright (C) 2010, 2011, 2012 Free Software Foundation, Inc.
// Copyright (C) 2010-2013 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
@ -202,7 +202,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
template<typename _Value, bool _Cache_hash_code>
struct _Node_iterator_base
{
typedef _Hash_node<_Value, _Cache_hash_code> __node_type;
using __node_type = _Hash_node<_Value, _Cache_hash_code>;
__node_type* _M_cur;
@ -282,7 +282,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
struct _Node_const_iterator
: public _Node_iterator_base<_Value, __cache>
{
private:
private:
using __base_type = _Node_iterator_base<_Value, __cache>;
using __node_type = typename __base_type::__node_type;
@ -940,6 +940,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_Tp _M_tp;
};
/**
* Primary class template _Local_iterator_base.
*
* Base class for local iterators, used to iterate within a bucket
* but not between buckets.
*/
template<typename _Key, typename _Value, typename _ExtractKey,
typename _H1, typename _H2, typename _Hash,
bool __cache_hash_code>
struct _Local_iterator_base;
/**
* Primary class template _Hash_code_base.
*
@ -974,8 +985,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
private _Hashtable_ebo_helper<1, _Hash>
{
private:
typedef _Hashtable_ebo_helper<0, _ExtractKey> _EboExtractKey;
typedef _Hashtable_ebo_helper<1, _Hash> _EboHash;
using __ebo_extract_key = _Hashtable_ebo_helper<0, _ExtractKey>;
using __ebo_hash = _Hashtable_ebo_helper<1, _Hash>;
protected:
typedef void* __hash_code;
@ -986,7 +997,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_Hash_code_base(const _ExtractKey& __ex, const _H1&, const _H2&,
const _Hash& __h)
: _EboExtractKey(__ex), _EboHash(__h) { }
: __ebo_extract_key(__ex), __ebo_hash(__h) { }
__hash_code
_M_hash_code(const _Key& __key) const
@ -1017,16 +1028,16 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
protected:
const _ExtractKey&
_M_extract() const { return _EboExtractKey::_S_cget(*this); }
_M_extract() const { return __ebo_extract_key::_S_cget(*this); }
_ExtractKey&
_M_extract() { return _EboExtractKey::_S_get(*this); }
_M_extract() { return __ebo_extract_key::_S_get(*this); }
const _Hash&
_M_ranged_hash() const { return _EboHash::_S_cget(*this); }
_M_ranged_hash() const { return __ebo_hash::_S_cget(*this); }
_Hash&
_M_ranged_hash() { return _EboHash::_S_get(*this); }
_M_ranged_hash() { return __ebo_hash::_S_get(*this); }
};
// No specialization for ranged hash function while caching hash codes.
@ -1041,7 +1052,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
/// Specialization: hash function and range-hashing function, no
/// caching of hash codes.
/// Provides typedef and accessor required by TR1.
/// Provides typedef and accessor required by C++ 11.
template<typename _Key, typename _Value, typename _ExtractKey,
typename _H1, typename _H2>
struct _Hash_code_base<_Key, _Value, _ExtractKey, _H1, _H2,
@ -1051,9 +1062,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
private _Hashtable_ebo_helper<2, _H2>
{
private:
typedef _Hashtable_ebo_helper<0, _ExtractKey> _EboExtractKey;
typedef _Hashtable_ebo_helper<1, _H1> _EboH1;
typedef _Hashtable_ebo_helper<2, _H2> _EboH2;
using __ebo_extract_key = _Hashtable_ebo_helper<0, _ExtractKey>;
using __ebo_h1 = _Hashtable_ebo_helper<1, _H1>;
using __ebo_h2 = _Hashtable_ebo_helper<2, _H2>;
public:
typedef _H1 hasher;
@ -1062,17 +1073,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
hash_function() const
{ return _M_h1(); }
protected:
typedef std::size_t __hash_code;
typedef _Hash_node<_Value, false> __node_type;
protected:
// We need the default constructor for the local iterators.
_Hash_code_base() = default;
_Hash_code_base(const _ExtractKey& __ex,
const _H1& __h1, const _H2& __h2,
const _Default_ranged_hash&)
: _EboExtractKey(__ex), _EboH1(__h1), _EboH2(__h2) { }
: __ebo_extract_key(__ex), __ebo_h1(__h1), __ebo_h2(__h2) { }
__hash_code
_M_hash_code(const _Key& __k) const
@ -1104,27 +1115,27 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
const _ExtractKey&
_M_extract() const { return _EboExtractKey::_S_cget(*this); }
_M_extract() const { return __ebo_extract_key::_S_cget(*this); }
_ExtractKey&
_M_extract() { return _EboExtractKey::_S_get(*this); }
_M_extract() { return __ebo_extract_key::_S_get(*this); }
const _H1&
_M_h1() const { return _EboH1::_S_cget(*this); }
_M_h1() const { return __ebo_h1::_S_cget(*this); }
_H1&
_M_h1() { return _EboH1::_S_get(*this); }
_M_h1() { return __ebo_h1::_S_get(*this); }
const _H2&
_M_h2() const { return _EboH2::_S_cget(*this); }
_M_h2() const { return __ebo_h2::_S_cget(*this); }
_H2&
_M_h2() { return _EboH2::_S_get(*this); }
_M_h2() { return __ebo_h2::_S_get(*this); }
};
/// Specialization: hash function and range-hashing function,
/// caching hash codes. H is provided but ignored. Provides
/// typedef and accessor required by TR1.
/// typedef and accessor required by C++ 11.
template<typename _Key, typename _Value, typename _ExtractKey,
typename _H1, typename _H2>
struct _Hash_code_base<_Key, _Value, _ExtractKey, _H1, _H2,
@ -1134,9 +1145,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
private _Hashtable_ebo_helper<2, _H2>
{
private:
typedef _Hashtable_ebo_helper<0, _ExtractKey> _EboExtractKey;
typedef _Hashtable_ebo_helper<1, _H1> _EboH1;
typedef _Hashtable_ebo_helper<2, _H2> _EboH2;
// Gives access to _M_h2() to the local iterator implementation.
friend struct _Local_iterator_base<_Key, _Value, _ExtractKey, _H1, _H2,
_Default_ranged_hash, true>;
using __ebo_extract_key = _Hashtable_ebo_helper<0, _ExtractKey>;
using __ebo_h1 = _Hashtable_ebo_helper<1, _H1>;
using __ebo_h2 = _Hashtable_ebo_helper<2, _H2>;
public:
typedef _H1 hasher;
@ -1145,14 +1160,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
hash_function() const
{ return _M_h1(); }
protected:
typedef std::size_t __hash_code;
typedef _Hash_node<_Value, true> __node_type;
protected:
_Hash_code_base(const _ExtractKey& __ex,
const _H1& __h1, const _H2& __h2,
const _Default_ranged_hash&)
: _EboExtractKey(__ex), _EboH1(__h1), _EboH2(__h2) { }
: __ebo_extract_key(__ex), __ebo_h1(__h1), __ebo_h2(__h2) { }
__hash_code
_M_hash_code(const _Key& __k) const
@ -1184,22 +1199,22 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
const _ExtractKey&
_M_extract() const { return _EboExtractKey::_S_cget(*this); }
_M_extract() const { return __ebo_extract_key::_S_cget(*this); }
_ExtractKey&
_M_extract() { return _EboExtractKey::_S_get(*this); }
_M_extract() { return __ebo_extract_key::_S_get(*this); }
const _H1&
_M_h1() const { return _EboH1::_S_cget(*this); }
_M_h1() const { return __ebo_h1::_S_cget(*this); }
_H1&
_M_h1() { return _EboH1::_S_get(*this); }
_M_h1() { return __ebo_h1::_S_get(*this); }
const _H2&
_M_h2() const { return _EboH2::_S_cget(*this); }
_M_h2() const { return __ebo_h2::_S_cget(*this); }
_H2&
_M_h2() { return _EboH2::_S_get(*this); }
_M_h2() { return __ebo_h2::_S_get(*this); }
};
/**
@ -1234,28 +1249,25 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
};
/**
* Primary class template _Local_iterator_base.
*
* Base class for local iterators, used to iterate within a bucket
* but not between buckets.
*/
template<typename _Key, typename _Value, typename _ExtractKey,
typename _H1, typename _H2, typename _Hash,
bool __cache_hash_code>
struct _Local_iterator_base;
/// Specialization.
template<typename _Key, typename _Value, typename _ExtractKey,
typename _H1, typename _H2, typename _Hash>
struct _Local_iterator_base<_Key, _Value, _ExtractKey,
_H1, _H2, _Hash, true>
: private _H2
: private _Hashtable_ebo_helper<0, _H2>
{
protected:
using __base_type = _Hashtable_ebo_helper<0, _H2>;
using __hash_code_base = _Hash_code_base<_Key, _Value, _ExtractKey,
_H1, _H2, _Hash, true>;
public:
_Local_iterator_base() = default;
_Local_iterator_base(_Hash_node<_Value, true>* __p,
_Local_iterator_base(const __hash_code_base& __base,
_Hash_node<_Value, true>* __p,
std::size_t __bkt, std::size_t __bkt_count)
: _M_cur(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count) { }
: __base_type(__base._M_h2()),
_M_cur(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count) { }
void
_M_incr()
@ -1263,15 +1275,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_M_cur = _M_cur->_M_next();
if (_M_cur)
{
std::size_t __bkt = _M_h2()(_M_cur->_M_hash_code, _M_bucket_count);
std::size_t __bkt
= __base_type::_S_get(*this)(_M_cur->_M_hash_code,
_M_bucket_count);
if (__bkt != _M_bucket)
_M_cur = nullptr;
}
}
const _H2& _M_h2() const
{ return *this; }
_Hash_node<_Value, true>* _M_cur;
std::size_t _M_bucket;
std::size_t _M_bucket_count;
@ -1285,10 +1296,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
: private _Hash_code_base<_Key, _Value, _ExtractKey,
_H1, _H2, _Hash, false>
{
protected:
using __hash_code_base = _Hash_code_base<_Key, _Value, _ExtractKey,
_H1, _H2, _Hash, false>;
public:
_Local_iterator_base() = default;
_Local_iterator_base(_Hash_node<_Value, false>* __p,
_Local_iterator_base(const __hash_code_base& __base,
_Hash_node<_Value, false>* __p,
std::size_t __bkt, std::size_t __bkt_count)
: _M_cur(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count) { }
: __hash_code_base(__base),
_M_cur(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count) { }
void
_M_incr()
@ -1333,6 +1351,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
: public _Local_iterator_base<_Key, _Value, _ExtractKey,
_H1, _H2, _Hash, __cache>
{
private:
using __base_type = _Local_iterator_base<_Key, _Value, _ExtractKey,
_H1, _H2, _Hash, __cache>;
using __hash_code_base = typename __base_type::__hash_code_base;
public:
typedef _Value value_type;
typedef typename std::conditional<__constant_iterators,
const _Value*, _Value*>::type
@ -1345,11 +1368,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_Local_iterator() = default;
explicit
_Local_iterator(_Hash_node<_Value, __cache>* __p,
_Local_iterator(const __hash_code_base& __base,
_Hash_node<_Value, __cache>* __p,
std::size_t __bkt, std::size_t __bkt_count)
: _Local_iterator_base<_Key, _Value, _ExtractKey, _H1, _H2, _Hash,
__cache>(__p, __bkt, __bkt_count)
: __base_type(__base, __p, __bkt, __bkt_count)
{ }
reference
@ -1384,6 +1406,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
: public _Local_iterator_base<_Key, _Value, _ExtractKey,
_H1, _H2, _Hash, __cache>
{
private:
using __base_type = _Local_iterator_base<_Key, _Value, _ExtractKey,
_H1, _H2, _Hash, __cache>;
using __hash_code_base = typename __base_type::__hash_code_base;
public:
typedef _Value value_type;
typedef const _Value* pointer;
typedef const _Value& reference;
@ -1392,20 +1420,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_Local_const_iterator() = default;
explicit
_Local_const_iterator(_Hash_node<_Value, __cache>* __p,
_Local_const_iterator(const __hash_code_base& __base,
_Hash_node<_Value, __cache>* __p,
std::size_t __bkt, std::size_t __bkt_count)
: _Local_iterator_base<_Key, _Value, _ExtractKey, _H1, _H2, _Hash,
__cache>(__p, __bkt, __bkt_count)
: __base_type(__base, __p, __bkt, __bkt_count)
{ }
_Local_const_iterator(const _Local_iterator<_Key, _Value, _ExtractKey,
_H1, _H2, _Hash,
__constant_iterators,
__cache>& __x)
: _Local_iterator_base<_Key, _Value, _ExtractKey, _H1, _H2, _Hash,
__cache>(__x._M_cur, __x._M_bucket,
__x._M_bucket_count)
: __base_type(__x)
{ }
reference

View File

@ -361,10 +361,9 @@ namespace __debug
{
this->_M_invalidate_if([__victim](_Base_const_iterator __it)
{ return __it == __victim; });
_Base_local_iterator __local_victim = _S_to_local(__victim);
this->_M_invalidate_local_if(
[__local_victim](_Base_const_local_iterator __it)
{ return __it == __local_victim; });
[__victim](_Base_const_local_iterator __it)
{ return __it._M_cur == __victim._M_cur; });
size_type __bucket_count = this->bucket_count();
_Base::erase(__victim);
_M_check_rehashed(__bucket_count);
@ -380,10 +379,9 @@ namespace __debug
_Base_const_iterator __victim = __it.base();
this->_M_invalidate_if([__victim](_Base_const_iterator __it)
{ return __it == __victim; });
_Base_const_local_iterator __local_victim = _S_to_local(__victim);
this->_M_invalidate_local_if(
[__local_victim](_Base_const_local_iterator __it)
{ return __it == __local_victim; });
[__victim](_Base_const_local_iterator __it)
{ return __it._M_cur == __victim._M_cur; });
size_type __bucket_count = this->bucket_count();
_Base_iterator __next = _Base::erase(__it.base());
_M_check_rehashed(__bucket_count);
@ -407,10 +405,9 @@ namespace __debug
._M_iterator(__last, "last"));
this->_M_invalidate_if([__tmp](_Base_const_iterator __it)
{ return __it == __tmp; });
_Base_const_local_iterator __local_tmp = _S_to_local(__tmp);
this->_M_invalidate_local_if(
[__local_tmp](_Base_const_local_iterator __it)
{ return __it == __local_tmp; });
[__tmp](_Base_const_local_iterator __it)
{ return __it._M_cur == __tmp._M_cur; });
}
size_type __bucket_count = this->bucket_count();
_Base_iterator __next = _Base::erase(__first.base(), __last.base());
@ -449,22 +446,6 @@ namespace __debug
if (__prev_count != this->bucket_count())
_M_invalidate_locals();
}
static _Base_local_iterator
_S_to_local(_Base_iterator __it)
{
// The returned local iterator will not be incremented so we don't
// need to compute __it's node bucket
return _Base_local_iterator(__it._M_cur, 0, 0);
}
static _Base_const_local_iterator
_S_to_local(_Base_const_iterator __it)
{
// The returned local iterator will not be incremented so we don't
// need to compute __it's node bucket
return _Base_const_local_iterator(__it._M_cur, 0, 0);
}
};
template<typename _Key, typename _Tp, typename _Hash,
@ -807,10 +788,9 @@ namespace __debug
{
this->_M_invalidate_if([__victim](_Base_const_iterator __it)
{ return __it == __victim; });
_Base_local_iterator __local_victim = _S_to_local(__victim);
this->_M_invalidate_local_if(
[__local_victim](_Base_const_local_iterator __it)
{ return __it == __local_victim; });
[__victim](_Base_const_local_iterator __it)
{ return __it._M_cur == __victim._M_cur; });
_Base::erase(__victim++);
++__ret;
}
@ -825,10 +805,9 @@ namespace __debug
_Base_const_iterator __victim = __it.base();
this->_M_invalidate_if([__victim](_Base_const_iterator __it)
{ return __it == __victim; });
_Base_const_local_iterator __local_victim = _S_to_local(__victim);
this->_M_invalidate_local_if(
[__local_victim](_Base_const_local_iterator __it)
{ return __it == __local_victim; });
[__victim](_Base_const_local_iterator __it)
{ return __it._M_cur == __victim._M_cur; });
size_type __bucket_count = this->bucket_count();
_Base_iterator __next = _Base::erase(__it.base());
_M_check_rehashed(__bucket_count);
@ -852,10 +831,9 @@ namespace __debug
._M_iterator(__last, "last"));
this->_M_invalidate_if([__tmp](_Base_const_iterator __it)
{ return __it == __tmp; });
_Base_const_local_iterator __local_tmp = _S_to_local(__tmp);
this->_M_invalidate_local_if(
[__local_tmp](_Base_const_local_iterator __it)
{ return __it == __local_tmp; });
[__tmp](_Base_const_local_iterator __it)
{ return __it._M_cur == __tmp._M_cur; });
}
size_type __bucket_count = this->bucket_count();
_Base_iterator __next = _Base::erase(__first.base(), __last.base());
@ -894,22 +872,6 @@ namespace __debug
if (__prev_count != this->bucket_count())
_M_invalidate_locals();
}
static _Base_local_iterator
_S_to_local(_Base_iterator __it)
{
// The returned local iterator will not be incremented so we don't
// need to compute __it's node bucket
return _Base_local_iterator(__it._M_cur, 0, 0);
}
static _Base_const_local_iterator
_S_to_local(_Base_const_iterator __it)
{
// The returned local iterator will not be incremented so we don't
// need to compute __it's node bucket
return _Base_const_local_iterator(__it._M_cur, 0, 0);
}
};
template<typename _Key, typename _Tp, typename _Hash,

View File

@ -356,10 +356,9 @@ namespace __debug
this->_M_invalidate_if(
[__victim](_Base_const_iterator __it)
{ return __it == __victim; });
_Base_local_iterator __local_victim = _S_to_local(__victim);
this->_M_invalidate_local_if(
[__local_victim](_Base_const_local_iterator __it)
{ return __it == __local_victim; });
[__victim](_Base_const_local_iterator __it)
{ return __it._M_cur == __victim._M_cur; });
size_type __bucket_count = this->bucket_count();
_Base::erase(__victim);
_M_check_rehashed(__bucket_count);
@ -376,10 +375,9 @@ namespace __debug
this->_M_invalidate_if(
[__victim](_Base_const_iterator __it)
{ return __it == __victim; });
_Base_const_local_iterator __local_victim = _S_to_local(__victim);
this->_M_invalidate_local_if(
[__local_victim](_Base_const_local_iterator __it)
{ return __it == __local_victim; });
[__victim](_Base_const_local_iterator __it)
{ return __it._M_cur == __victim._M_cur; });
size_type __bucket_count = this->bucket_count();
_Base_iterator __next = _Base::erase(__it.base());
_M_check_rehashed(__bucket_count);
@ -404,10 +402,9 @@ namespace __debug
this->_M_invalidate_if(
[__tmp](_Base_const_iterator __it)
{ return __it == __tmp; });
_Base_const_local_iterator __local_tmp = _S_to_local(__tmp);
this->_M_invalidate_local_if(
[__local_tmp](_Base_const_local_iterator __it)
{ return __it == __local_tmp; });
[__tmp](_Base_const_local_iterator __it)
{ return __it._M_cur == __tmp._M_cur; });
}
size_type __bucket_count = this->bucket_count();
_Base_iterator __next = _Base::erase(__first.base(),
@ -448,22 +445,6 @@ namespace __debug
if (__prev_count != this->bucket_count())
_M_invalidate_locals();
}
static _Base_local_iterator
_S_to_local(_Base_iterator __it)
{
// The returned local iterator will not be incremented so we don't
// need to compute __it's node bucket
return _Base_local_iterator(__it._M_cur, 0, 0);
}
static _Base_const_local_iterator
_S_to_local(_Base_const_iterator __it)
{
// The returned local iterator will not be incremented so we don't
// need to compute __it's node bucket
return _Base_const_local_iterator(__it._M_cur, 0, 0);
}
};
template<typename _Value, typename _Hash, typename _Pred, typename _Alloc>
@ -798,10 +779,9 @@ namespace __debug
{
this->_M_invalidate_if([__victim](_Base_const_iterator __it)
{ return __it == __victim; });
_Base_local_iterator __local_victim = _S_to_local(__victim);
this->_M_invalidate_local_if(
[__local_victim](_Base_const_local_iterator __it)
{ return __it == __local_victim; });
[__victim](_Base_const_local_iterator __it)
{ return __it._M_cur == __victim._M_cur; });
_Base::erase(__victim++);
++__ret;
}
@ -815,10 +795,9 @@ namespace __debug
_Base_const_iterator __victim = __it.base();
this->_M_invalidate_if([__victim](_Base_const_iterator __it)
{ return __it == __victim; });
_Base_const_local_iterator __local_victim = _S_to_local(__victim);
this->_M_invalidate_local_if(
[__local_victim](_Base_const_local_iterator __it)
{ return __it == __local_victim; });
[__victim](_Base_const_local_iterator __it)
{ return __it._M_cur == __victim._M_cur; });
return iterator(_Base::erase(__it.base()), this);
}
@ -839,10 +818,9 @@ namespace __debug
._M_iterator(__last, "last"));
this->_M_invalidate_if([__tmp](_Base_const_iterator __it)
{ return __it == __tmp; });
_Base_const_local_iterator __local_tmp = _S_to_local(__tmp);
this->_M_invalidate_local_if(
[__local_tmp](_Base_const_local_iterator __it)
{ return __it == __local_tmp; });
[__tmp](_Base_const_local_iterator __it)
{ return __it._M_cur == __tmp._M_cur; });
}
return iterator(_Base::erase(__first.base(),
__last.base()), this);
@ -879,22 +857,6 @@ namespace __debug
if (__prev_count != this->bucket_count())
_M_invalidate_locals();
}
static _Base_local_iterator
_S_to_local(_Base_iterator __it)
{
// The returned local iterator will not be incremented so we don't
// need to compute __it's node bucket
return _Base_local_iterator(__it._M_cur, 0, 0);
}
static _Base_const_local_iterator
_S_to_local(_Base_const_iterator __it)
{
// The returned local iterator will not be incremented so we don't
// need to compute __it's node bucket
return _Base_const_local_iterator(__it._M_cur, 0, 0);
}
};
template<typename _Value, typename _Hash, typename _Pred, typename _Alloc>

View File

@ -0,0 +1,72 @@
// { dg-options "-std=c++11" }
// Copyright (C) 2013 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <testsuite_hooks.h>
#include <unordered_set>
namespace
{
struct hash
{
hash() = default;
hash(int modulo)
: _M_modulo(modulo)
{ }
std::size_t operator() (int val) const noexcept
{ return val % _M_modulo; }
int _M_modulo;
};
}
void
test01()
{
bool test __attribute__((unused)) = true;
// static_assert(std::__cache_default<int, hash>::value,
// "Unexpected default cache value");
typedef std::unordered_set<int, hash> us_t;
us_t us1(10, hash(13));
us_t us2(10, hash(7));
VERIFY( us1.hash_function()._M_modulo == 13 );
VERIFY( us2.hash_function()._M_modulo == 7 );
const int nb = 5;
for (int i = 0; i != nb * us1.hash_function()._M_modulo; ++i)
us1.insert(i);
us_t::local_iterator lit = us1.begin(12);
us_t::local_iterator litend = us1.end(12);
VERIFY( std::distance(lit, litend) == nb );
us1.swap(us2);
VERIFY( us1.hash_function()._M_modulo == 7 );
VERIFY( us2.hash_function()._M_modulo == 13 );
VERIFY( std::distance(lit, litend) == nb );
}
int main()
{
test01();
}

View File

@ -2,7 +2,7 @@
// { dg-options "-std=gnu++0x" }
// { dg-require-normal-mode "" }
// Copyright (C) 2011, 2012 Free Software Foundation, Inc.
// Copyright (C) 2011-2013 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
@ -19,7 +19,7 @@
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
// { dg-error "with noexcept" "" { target *-*-* } 247 }
// { dg-error "with noexcept" "" { target *-*-* } 252 }
#include <unordered_set>

View File

@ -0,0 +1,51 @@
// { dg-do compile }
// { dg-options "-std=c++11" }
// { dg-require-normal-mode "" }
// Copyright (C) 2013 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
// { dg-error "default constructible" "" { target *-*-* } 268 }
#include <unordered_set>
namespace
{
struct hash
{
hash(std::size_t seed)
: _M_seed(seed)
{ }
std::size_t operator() (int val) const noexcept
{ return val ^ _M_seed; }
private:
std::size_t _M_seed;
};
}
void
test01()
{
using traits = std::__detail::_Hashtable_traits<false, true, true>;
using hashtable = std::__uset_hashtable<int, hash,
std::equal_to<int>,
std::allocator<int>, traits>;
hashtable ht(10, hash(1));
}

View File

@ -1,4 +1,4 @@
// Copyright (C) 2012 Free Software Foundation, Inc.
// Copyright (C) 2012-2013 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
@ -21,10 +21,14 @@
#include <random>
#include <sstream>
#include <tr1/unordered_set>
#include<unordered_set>
#include <unordered_set>
#define USE_MY_FOO 1
struct Foo
{
#if USE_MY_FOO
typedef std::random_device::result_type _Type;
_Type bar;
_Type baz;
@ -38,6 +42,18 @@ struct Foo
meh = randev();
}
#else
int bar;
int baz;
int meh;
Foo()
{ bar = random(); baz = random(); meh = random(); }
Foo(const Foo&) = default;
#endif
std::size_t
hash() const noexcept
{ return std::size_t(bar ^ baz ^ meh); }
@ -54,36 +70,30 @@ struct HashFunction
{ return t.hash(); }
};
const int sz = 300000;
template<typename _ContType>
void bench(const char* container_desc)
void
bench(const char* container_desc, const typename _ContType::value_type* foos)
{
using namespace __gnu_test;
_ContType s;
time_counter time;
resource_counter resource;
const int sz = 300000;
Foo foos[sz];
{
std::random_device randev;
for (int i = 0; i != sz; ++i)
foos[i].init(randev);
}
_ContType s;
start_counters(time, resource);
for (int i = 0; i != sz ; ++i)
s.insert(foos[i]);
s.insert(foos[i]);
stop_counters(time, resource);
std::ostringstream ostr;
ostr << container_desc << sz << " Foo insertions";
ostr << container_desc << sz << " insertion attempts, "
<< s.size() << " inserted";
report_performance(__FILE__, ostr.str().c_str(), time, resource);
// Try to insert again to check performance of collision detection
const int nb_loop = 10;
start_counters(time, resource);
@ -94,7 +104,7 @@ template<typename _ContType>
stop_counters(time, resource);
ostr.str("");
ostr << container_desc << nb_loop << " times insertion of "
<< sz << " Foo";
<< sz << " elements";
report_performance(__FILE__, ostr.str().c_str(), time, resource);
}
@ -121,12 +131,58 @@ template<bool cache>
int main()
{
bench<__tr1_uset<false>>("std::tr1::unordered_set without hash code cached ");
bench<__tr1_uset<true>>("std::tr1::unordered_set with hash code cached ");
bench<__tr1_umset<false>>("std::tr1::unordered_multiset without hash code cached ");
bench<__tr1_umset<true>>("std::tr1::unordered_multiset with hash code cached ");
bench<__uset<false>>("std::unordered_set without hash code cached ");
bench<__uset<true>>("std::unordered_set with hash code cached ");
bench<__umset<false>>("std::unordered_multiset without hash code cached ");
bench<__umset<true>>("std::unordered_multiset with hash code cached ");
using namespace __gnu_test;
{
int bars[sz];
for (int i = 0; i != sz; ++i)
bars[i] = i;
bench<std::tr1::unordered_set<int>>(
"std::tr1::unordered_set<int> ", bars);
bench<std::unordered_set<int>>(
"std::unordered_set<int> ", bars);
}
Foo foos[sz];
#if USE_MY_FOO
{
std::random_device randev;
for (int i = 0; i != sz; ++i)
foos[i].init(randev);
}
#endif
time_counter time;
resource_counter resource;
start_counters(time, resource);
bench<__tr1_uset<false>>(
"std::tr1::unordered_set without hash code cached ", foos);
bench<__tr1_uset<true>>(
"std::tr1::unordered_set with hash code cached ", foos);
bench<__tr1_umset<false>>(
"std::tr1::unordered_multiset without hash code cached ", foos);
bench<__tr1_umset<true>>(
"std::tr1::unordered_multiset with hash code cached ", foos);
stop_counters(time, resource);
report_performance(__FILE__, "tr1 benches", time, resource);
start_counters(time, resource);
bench<__uset<false>>(
"std::unordered_set without hash code cached ", foos);
bench<__uset<true>>(
"std::unordered_set with hash code cached ", foos);
bench<__umset<false>>(
"std::unordered_multiset without hash code cached ", foos);
bench<__umset<true>>(
"std::unordered_multiset with hash code cached ", foos);
stop_counters(time, resource);
report_performance(__FILE__, "std benches", time, resource);
bench<std::unordered_set<Foo, HashFunction>>(
"std::unordered_set default cache ", foos);
bench<std::unordered_multiset<Foo, HashFunction>>(
"std::unordered_multiset default cache ", foos);
}

View File

@ -1,6 +1,6 @@
// { dg-options "-std=gnu++0x" }
// Copyright (C) 2011 Free Software Foundation, Inc.
// Copyright (C) 2011-2013 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
@ -18,25 +18,18 @@
// <http://www.gnu.org/licenses/>.
#include <sstream>
#ifdef _USE_TR1
# include <tr1/unordered_set>
#else
# include <unordered_set>
#endif
#include <tr1/unordered_set>
#include <unordered_set>
#include <testsuite_performance.h>
namespace
{
// Bench using an unordered_set<int>. Hash functor for int is quite
// predictable so it helps bench very specific use cases.
template<bool use_cache>
void bench()
template<typename _ContType>
void bench(const char* desc)
{
using namespace __gnu_test;
std::ostringstream ostr;
ostr << "unordered_set<int> " << (use_cache ? "with" : "without")
<< " cache";
const std::string desc = ostr.str();
time_counter time;
resource_counter resource;
@ -44,20 +37,12 @@ namespace
const int nb = 200000;
start_counters(time, resource);
#ifdef _USE_TR1
std::tr1::__unordered_set<int, std::hash<int>, std::equal_to<int>,
std::allocator<int>,
use_cache> us;
#else
std::__uset_hashtable<int, std::hash<int>, std::equal_to<int>,
std::allocator<int>,
std::__uset_traits<use_cache>> us;
#endif
_ContType us;
for (int i = 0; i != nb; ++i)
us.insert(i);
stop_counters(time, resource);
ostr.str("");
std::ostringstream ostr;
ostr << desc << ": first insert";
report_performance(__FILE__, ostr.str().c_str(), time, resource);
@ -110,14 +95,10 @@ namespace
// Bench using unordered_set<string> that show how important it is to cache
// hash code as computing string hash code is quite expensive compared to
// computing it for int.
template<bool use_cache>
void bench_str()
template<typename _ContType>
void bench_str(const char* desc)
{
using namespace __gnu_test;
std::ostringstream ostr;
ostr << "unordered_set<string> " << (use_cache ? "with" : "without")
<< " cache";
const std::string desc = ostr.str();
time_counter time;
resource_counter resource;
@ -125,6 +106,7 @@ namespace
const int nb = 200000;
// First generate once strings that are going to be used throughout the
// bench:
std::ostringstream ostr;
std::vector<std::string> strs;
strs.reserve(nb);
for (int i = 0; i != nb; ++i)
@ -136,17 +118,7 @@ namespace
start_counters(time, resource);
#ifdef _USE_TR1
std::tr1::__unordered_set<std::string, std::hash<std::string>,
std::equal_to<std::string>,
std::allocator<std::string>,
use_cache> us;
#else
std::__uset_hashtable<std::string, std::hash<std::string>,
std::equal_to<std::string>,
std::allocator<std::string>,
std::__uset_traits<use_cache>> us;
#endif
_ContType us;
for (int i = 0; i != nb; ++i)
us.insert(strs[i]);
@ -192,11 +164,53 @@ namespace
}
}
template<bool cache>
using __uset =
std::__uset_hashtable<int, std::hash<int>, std::equal_to<int>,
std::allocator<int>,
std::__uset_traits<cache>>;
template<bool cache>
using __tr1_uset =
std::tr1::__unordered_set<int, std::hash<int>, std::equal_to<int>,
std::allocator<int>,
cache>;
template<bool cache>
using __str_uset =
std::__uset_hashtable<std::string, std::hash<std::string>,
std::equal_to<std::string>,
std::allocator<std::string>,
std::__uset_traits<cache>>;
template<bool cache>
using __tr1_str_uset =
std::tr1::__unordered_set<std::string, std::hash<std::string>,
std::equal_to<std::string>,
std::allocator<std::string>,
cache>;
int main()
{
bench<false>();
bench<true>();
bench_str<false>();
bench_str<true>();
bench<__tr1_uset<false>>(
"std::tr1::unordered_set<int> without hash code cached");
bench<__tr1_uset<true>>(
"std::tr1::unordered_set<int> with hash code cached");
bench<__uset<false>>(
"std::unordered_set<int> without hash code cached");
bench<__uset<true>>(
"std::unordered_set<int> with hash code cached");
bench<std::unordered_set<int>>(
"std::unordered_set<int> default cache");
bench_str<__tr1_str_uset<false>>(
"std::tr1::unordered_set<string> without hash code cached");
bench_str<__tr1_str_uset<true>>(
"std::tr1::unordered_set<string> with hash code cached");
bench_str<__str_uset<false>>(
"std::unordered_set<string> without hash code cached");
bench_str<__str_uset<true>>(
"std::unordered_set<string> with hash code cached");
bench_str<std::unordered_set<std::string>>(
"std::unordered_set<string> default cache");
return 0;
}