Intel(R) Threading Building Blocks Doxygen Documentation version 4.2.3
tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator > Class Template Reference

Unordered map from Key to T. More...

#include <concurrent_hash_map.h>

Inheritance diagram for tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >:
Collaboration diagram for tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >:

Classes

class  accessor
 Allows write access to elements and combines data access, locking, and garbage collection. More...
 
struct  accessor_not_used
 
class  bucket_accessor
 bucket accessor is to find, rehash, acquire a lock, and access a bucket More...
 
struct  call_clear_on_leave
 
class  const_accessor
 Combines data access, locking, and garbage collection. More...
 
class  node
 
struct  node_scoped_guard
 

Public Types

typedef Key key_type
 
typedef T mapped_type
 
typedef std::pair< const Key, T > value_type
 
typedef hash_map_base::size_type size_type
 
typedef ptrdiff_t difference_type
 
typedef value_typepointer
 
typedef const value_typeconst_pointer
 
typedef value_typereference
 
typedef const value_typeconst_reference
 
typedef internal::hash_map_iterator< concurrent_hash_map, value_typeiterator
 
typedef internal::hash_map_iterator< concurrent_hash_map, const value_typeconst_iterator
 
typedef internal::hash_map_range< iteratorrange_type
 
typedef internal::hash_map_range< const_iteratorconst_range_type
 
typedef Allocator allocator_type
 

Public Member Functions

 concurrent_hash_map (const allocator_type &a=allocator_type())
 Construct empty table. More...
 
 concurrent_hash_map (const HashCompare &compare, const allocator_type &a=allocator_type())
 
 concurrent_hash_map (size_type n, const allocator_type &a=allocator_type())
 Construct empty table with n preallocated buckets. This number serves also as initial concurrency level. More...
 
 concurrent_hash_map (size_type n, const HashCompare &compare, const allocator_type &a=allocator_type())
 
 concurrent_hash_map (const concurrent_hash_map &table)
 Copy constructor. More...
 
 concurrent_hash_map (const concurrent_hash_map &table, const allocator_type &a)
 
 concurrent_hash_map (concurrent_hash_map &&table)
 Move constructor. More...
 
 concurrent_hash_map (concurrent_hash_map &&table, const allocator_type &a)
 Move constructor. More...
 
template<typename I >
 concurrent_hash_map (I first, I last, const allocator_type &a=allocator_type())
 Construction with copying iteration range and given allocator instance. More...
 
template<typename I >
 concurrent_hash_map (I first, I last, const HashCompare &compare, const allocator_type &a=allocator_type())
 
 concurrent_hash_map (std::initializer_list< value_type > il, const allocator_type &a=allocator_type())
 Construct empty table with n preallocated buckets. This number serves also as initial concurrency level. More...
 
 concurrent_hash_map (std::initializer_list< value_type > il, const HashCompare &compare, const allocator_type &a=allocator_type())
 
concurrent_hash_mapoperator= (const concurrent_hash_map &table)
 Assignment. More...
 
concurrent_hash_mapoperator= (concurrent_hash_map &&table)
 Move Assignment. More...
 
concurrent_hash_mapoperator= (std::initializer_list< value_type > il)
 Assignment. More...
 
void rehash (size_type n=0)
 Rehashes and optionally resizes the whole table. More...
 
void clear ()
 Clear table. More...
 
 ~concurrent_hash_map ()
 Clear table and destroy it. More...
 
range_type range (size_type grainsize=1)
 
const_range_type range (size_type grainsize=1) const
 
iterator begin ()
 
iterator end ()
 
const_iterator begin () const
 
const_iterator end () const
 
std::pair< iterator, iteratorequal_range (const Key &key)
 
std::pair< const_iterator, const_iteratorequal_range (const Key &key) const
 
size_type size () const
 Number of items in table. More...
 
bool empty () const
 True if size()==0. More...
 
size_type max_size () const
 Upper bound on size. More...
 
size_type bucket_count () const
 Returns the current number of buckets. More...
 
allocator_type get_allocator () const
 return allocator object More...
 
void swap (concurrent_hash_map &table)
 swap two instances. Iterators are invalidated More...
 
size_type count (const Key &key) const
 Return count of items (0 or 1) More...
 
bool find (const_accessor &result, const Key &key) const
 Find item and acquire a read lock on the item. More...
 
bool find (accessor &result, const Key &key)
 Find item and acquire a write lock on the item. More...
 
bool insert (const_accessor &result, const Key &key)
 Insert item (if not already present) and acquire a read lock on the item. More...
 
bool insert (accessor &result, const Key &key)
 Insert item (if not already present) and acquire a write lock on the item. More...
 
bool insert (const_accessor &result, const value_type &value)
 Insert item by copying if there is no such key present already and acquire a read lock on the item. More...
 
bool insert (accessor &result, const value_type &value)
 Insert item by copying if there is no such key present already and acquire a write lock on the item. More...
 
bool insert (const value_type &value)
 Insert item by copying if there is no such key present already. More...
 
bool insert (const_accessor &result, value_type &&value)
 Insert item by copying if there is no such key present already and acquire a read lock on the item. More...
 
bool insert (accessor &result, value_type &&value)
 Insert item by copying if there is no such key present already and acquire a write lock on the item. More...
 
bool insert (value_type &&value)
 Insert item by copying if there is no such key present already. More...
 
template<typename... Args>
bool emplace (const_accessor &result, Args &&... args)
 Insert item by copying if there is no such key present already and acquire a read lock on the item. More...
 
template<typename... Args>
bool emplace (accessor &result, Args &&... args)
 Insert item by copying if there is no such key present already and acquire a write lock on the item. More...
 
template<typename... Args>
bool emplace (Args &&... args)
 Insert item by copying if there is no such key present already. More...
 
template<typename I >
void insert (I first, I last)
 Insert range [first, last) More...
 
void insert (std::initializer_list< value_type > il)
 Insert initializer list. More...
 
bool erase (const Key &key)
 Erase item. More...
 
bool erase (const_accessor &item_accessor)
 Erase item by const_accessor. More...
 
bool erase (accessor &item_accessor)
 Erase item by accessor. More...
 

Protected Types

typedef tbb::internal::allocator_rebind< Allocator, node >::type node_allocator_type
 
typedef tbb::internal::allocator_traits< node_allocator_typenode_allocator_traits
 
- Protected Types inherited from tbb::interface5::internal::hash_map_base
typedef size_t size_type
 Size type. More...
 
typedef size_t hashcode_t
 Type of a hash code. More...
 
typedef size_t segment_index_t
 Segment index type. More...
 
typedef hash_map_node_base node_base
 Node base type. More...
 
typedef bucketsegment_ptr_t
 Segment pointer. More...
 
typedef segment_ptr_t segments_table_t[pointers_per_table]
 Segment pointers table type. More...
 

Protected Member Functions

void delete_node (node_base *n)
 
nodesearch_bucket (const key_type &key, bucket *b) const
 
void rehash_bucket (bucket *b_new, const hashcode_t h)
 
bool lookup (bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node *(*allocate_node)(node_allocator_type &, const Key &, const T *), node *tmp_n=0)
 Insert or find item and optionally acquire a lock on the item. More...
 
template<typename Accessor >
bool generic_move_insert (Accessor &&result, value_type &&value)
 
template<typename Accessor , typename... Args>
bool generic_emplace (Accessor &&result, Args &&... args)
 
bool exclude (const_accessor &item_accessor)
 delete item by accessor More...
 
template<typename I >
std::pair< I, I > internal_equal_range (const Key &key, I end) const
 Returns an iterator for an item defined by the key, or for the next item after it (if upper==true) More...
 
void internal_copy (const concurrent_hash_map &source)
 Copy "source" to *this, where *this must start out empty. More...
 
template<typename I >
void internal_copy (I first, I last, size_type reserve_size)
 
void internal_move_assign (concurrent_hash_map &&other, tbb::internal::traits_true_type)
 
void internal_move_assign (concurrent_hash_map &&other, tbb::internal::traits_false_type)
 
const_pointer internal_fast_find (const Key &key) const
 Fast find when no concurrent erasure is used. For internal use inside TBB only! More...
 
- Protected Member Functions inherited from tbb::interface5::internal::hash_map_base
 hash_map_base ()
 Constructor. More...
 
template<typename Allocator >
void enable_segment (segment_index_t k, const Allocator &allocator, bool is_initial=false)
 Enable segment. More...
 
template<typename Allocator >
void delete_segment (segment_index_t s, const Allocator &allocator)
 
bucketget_bucket (hashcode_t h) const throw ()
 Get bucket by (masked) hashcode. More...
 
void mark_rehashed_levels (hashcode_t h) throw ()
 
bool check_mask_race (const hashcode_t h, hashcode_t &m) const
 Check for mask race. More...
 
bool check_rehashing_collision (const hashcode_t h, hashcode_t m_old, hashcode_t m) const
 Process mask race, check for rehashing collision. More...
 
segment_index_t insert_new_node (bucket *b, node_base *n, hashcode_t mask)
 Insert a node and check for load factor. More...
 
template<typename Allocator >
void reserve (size_type buckets, const Allocator &allocator)
 Prepare enough segments for number of buckets. More...
 
void internal_swap (hash_map_base &table)
 Swap hash_map_bases. More...
 
void internal_move (hash_map_base &&other)
 

Static Protected Member Functions

template<typename... Args>
static nodecreate_node (node_allocator_type &allocator, Args &&... args)
 
static nodeallocate_node_copy_construct (node_allocator_type &allocator, const Key &key, const T *t)
 
static nodeallocate_node_move_construct (node_allocator_type &allocator, const Key &key, const T *t)
 
static nodeallocate_node_default_construct (node_allocator_type &allocator, const Key &key, const T *)
 
static nodedo_not_allocate_node (node_allocator_type &, const Key &, const T *)
 
- Static Protected Member Functions inherited from tbb::interface5::internal::hash_map_base
static segment_index_t segment_index_of (size_type index)
 
static segment_index_t segment_base (segment_index_t k)
 
static size_type segment_size (segment_index_t k)
 
static bool is_valid (void *ptr)
 
static void init_buckets (segment_ptr_t ptr, size_type sz, bool is_initial)
 Initialize buckets. More...
 
static void add_to_bucket (bucket *b, node_base *n)
 Add node. More...
 

Protected Attributes

node_allocator_type my_allocator
 
HashCompare my_hash_compare
 
- Protected Attributes inherited from tbb::interface5::internal::hash_map_base
atomic< hashcode_tmy_mask
 Hash mask = sum of allocated segment sizes - 1. More...
 
segments_table_t my_table
 Segment pointers table. Also prevents false sharing between my_mask and my_size. More...
 
atomic< size_typemy_size
 Size of container in stored items. More...
 
bucket my_embedded_segment [embedded_buckets]
 Zero segment. More...
 

Friends

template<typename Container , typename Value >
class internal::hash_map_iterator
 
template<typename I >
class internal::hash_map_range
 
class const_accessor
 
const_accessoraccessor_location (accessor_not_used const &)
 
const_accessoraccessor_location (const_accessor &a)
 
bool is_write_access_needed (accessor const &)
 
bool is_write_access_needed (const_accessor const &)
 
bool is_write_access_needed (accessor_not_used const &)
 

Additional Inherited Members

- Static Protected Attributes inherited from tbb::interface5::internal::hash_map_base
static size_type const embedded_block = 1
 Count of segments in the first block. More...
 
static size_type const embedded_buckets = 1<<embedded_block
 Count of segments in the first block. More...
 
static size_type const first_block = 8
 Count of segments in the first block. More...
 
static size_type const pointers_per_table = sizeof(segment_index_t) * 8
 Size of a pointer / table size. More...
 

Detailed Description

template<typename Key, typename T, typename HashCompare, typename Allocator>
class tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >

Unordered map from Key to T.

concurrent_hash_map is associative container with concurrent access.

Compatibility
The class meets all Container Requirements from C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1).
Exception Safety
  • Hash function is not permitted to throw an exception. User-defined types Key and T are forbidden from throwing an exception in destructors.
  • If exception happens during insert() operations, it has no effect (unless exception raised by HashCompare::hash() function during grow_segment).
  • If exception happens during operator=() operation, the container can have a part of source items, and methods size() and empty() can return wrong results.
Changes since TBB 2.1
  • Replaced internal algorithm and data structure. Patent is pending.
  • Added buckets number argument for constructor
Changes since TBB 2.0

Definition at line 585 of file concurrent_hash_map.h.

Member Typedef Documentation

◆ allocator_type

template<typename Key , typename T , typename HashCompare , typename Allocator >
typedef Allocator tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::allocator_type

Definition at line 606 of file concurrent_hash_map.h.

◆ const_iterator

template<typename Key , typename T , typename HashCompare , typename Allocator >
typedef internal::hash_map_iterator<concurrent_hash_map,const value_type> tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::const_iterator

Definition at line 603 of file concurrent_hash_map.h.

◆ const_pointer

template<typename Key , typename T , typename HashCompare , typename Allocator >
typedef const value_type* tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::const_pointer

Definition at line 599 of file concurrent_hash_map.h.

◆ const_range_type

template<typename Key , typename T , typename HashCompare , typename Allocator >
typedef internal::hash_map_range<const_iterator> tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::const_range_type

Definition at line 605 of file concurrent_hash_map.h.

◆ const_reference

template<typename Key , typename T , typename HashCompare , typename Allocator >
typedef const value_type& tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::const_reference

Definition at line 601 of file concurrent_hash_map.h.

◆ difference_type

template<typename Key , typename T , typename HashCompare , typename Allocator >
typedef ptrdiff_t tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::difference_type

Definition at line 597 of file concurrent_hash_map.h.

◆ iterator

template<typename Key , typename T , typename HashCompare , typename Allocator >
typedef internal::hash_map_iterator<concurrent_hash_map,value_type> tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::iterator

Definition at line 602 of file concurrent_hash_map.h.

◆ key_type

template<typename Key , typename T , typename HashCompare , typename Allocator >
typedef Key tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::key_type

Definition at line 593 of file concurrent_hash_map.h.

◆ mapped_type

template<typename Key , typename T , typename HashCompare , typename Allocator >
typedef T tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::mapped_type

Definition at line 594 of file concurrent_hash_map.h.

◆ node_allocator_traits

template<typename Key , typename T , typename HashCompare , typename Allocator >
typedef tbb::internal::allocator_traits<node_allocator_type> tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::node_allocator_traits
protected

Definition at line 612 of file concurrent_hash_map.h.

◆ node_allocator_type

template<typename Key , typename T , typename HashCompare , typename Allocator >
typedef tbb::internal::allocator_rebind<Allocator,node>::type tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::node_allocator_type
protected

Definition at line 611 of file concurrent_hash_map.h.

◆ pointer

template<typename Key , typename T , typename HashCompare , typename Allocator >
typedef value_type* tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::pointer

Definition at line 598 of file concurrent_hash_map.h.

◆ range_type

template<typename Key , typename T , typename HashCompare , typename Allocator >
typedef internal::hash_map_range<iterator> tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::range_type

Definition at line 604 of file concurrent_hash_map.h.

◆ reference

template<typename Key , typename T , typename HashCompare , typename Allocator >
typedef value_type& tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::reference

Definition at line 600 of file concurrent_hash_map.h.

◆ size_type

template<typename Key , typename T , typename HashCompare , typename Allocator >
typedef hash_map_base::size_type tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::size_type

Definition at line 596 of file concurrent_hash_map.h.

◆ value_type

template<typename Key , typename T , typename HashCompare , typename Allocator >
typedef std::pair<const Key,T> tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::value_type

Definition at line 595 of file concurrent_hash_map.h.

Constructor & Destructor Documentation

◆ concurrent_hash_map() [1/12]

template<typename Key , typename T , typename HashCompare , typename Allocator >
tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::concurrent_hash_map ( const allocator_type a = allocator_type())
inlineexplicit

Construct empty table.

Definition at line 830 of file concurrent_hash_map.h.

831 : internal::hash_map_base(), my_allocator(a)
832 {}

◆ concurrent_hash_map() [2/12]

template<typename Key , typename T , typename HashCompare , typename Allocator >
tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::concurrent_hash_map ( const HashCompare &  compare,
const allocator_type a = allocator_type() 
)
inlineexplicit

Definition at line 834 of file concurrent_hash_map.h.

835 : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
836 {}

◆ concurrent_hash_map() [3/12]

template<typename Key , typename T , typename HashCompare , typename Allocator >
tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::concurrent_hash_map ( size_type  n,
const allocator_type a = allocator_type() 
)
inline

Construct empty table with n preallocated buckets. This number serves also as initial concurrency level.

Definition at line 839 of file concurrent_hash_map.h.

840 : internal::hash_map_base(), my_allocator(a)
841 {
842 reserve( n, my_allocator );
843 }
void reserve(size_type buckets, const Allocator &allocator)
Prepare enough segments for number of buckets.

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::my_allocator, and tbb::interface5::internal::hash_map_base::reserve().

Here is the call graph for this function:

◆ concurrent_hash_map() [4/12]

template<typename Key , typename T , typename HashCompare , typename Allocator >
tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::concurrent_hash_map ( size_type  n,
const HashCompare &  compare,
const allocator_type a = allocator_type() 
)
inline

Definition at line 845 of file concurrent_hash_map.h.

846 : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
847 {
848 reserve( n, my_allocator );
849 }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::my_allocator, and tbb::interface5::internal::hash_map_base::reserve().

Here is the call graph for this function:

◆ concurrent_hash_map() [5/12]

template<typename Key , typename T , typename HashCompare , typename Allocator >
tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::concurrent_hash_map ( const concurrent_hash_map< Key, T, HashCompare, Allocator > &  table)
inline

Copy constructor.

Definition at line 852 of file concurrent_hash_map.h.

853 : internal::hash_map_base(),
855 {
856 call_clear_on_leave scope_guard(this);
857 internal_copy(table);
858 scope_guard.dismiss();
859 }
void internal_copy(const concurrent_hash_map &source)
Copy "source" to *this, where *this must start out empty.
static Alloc select_on_container_copy_construction(const Alloc &a)

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::call_clear_on_leave::dismiss(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_copy().

Here is the call graph for this function:

◆ concurrent_hash_map() [6/12]

template<typename Key , typename T , typename HashCompare , typename Allocator >
tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::concurrent_hash_map ( const concurrent_hash_map< Key, T, HashCompare, Allocator > &  table,
const allocator_type a 
)
inline

Definition at line 861 of file concurrent_hash_map.h.

862 : internal::hash_map_base(), my_allocator(a)
863 {
864 call_clear_on_leave scope_guard(this);
865 internal_copy(table);
866 scope_guard.dismiss();
867 }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::call_clear_on_leave::dismiss(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_copy().

Here is the call graph for this function:

◆ concurrent_hash_map() [7/12]

template<typename Key , typename T , typename HashCompare , typename Allocator >
tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::concurrent_hash_map ( concurrent_hash_map< Key, T, HashCompare, Allocator > &&  table)
inline

Move constructor.

Definition at line 871 of file concurrent_hash_map.h.

872 : internal::hash_map_base(), my_allocator(std::move(table.get_allocator()))
873 {
874 internal_move(std::move(table));
875 }
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:319

References tbb::interface5::internal::hash_map_base::internal_move(), and tbb::move().

Here is the call graph for this function:

◆ concurrent_hash_map() [8/12]

template<typename Key , typename T , typename HashCompare , typename Allocator >
tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::concurrent_hash_map ( concurrent_hash_map< Key, T, HashCompare, Allocator > &&  table,
const allocator_type a 
)
inline

Move constructor.

Definition at line 878 of file concurrent_hash_map.h.

879 : internal::hash_map_base(), my_allocator(a)
880 {
881 if (a == table.get_allocator()){
882 internal_move(std::move(table));
883 }else{
884 call_clear_on_leave scope_guard(this);
885 internal_copy(std::make_move_iterator(table.begin()), std::make_move_iterator(table.end()), table.size());
886 scope_guard.dismiss();
887 }
888 }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::call_clear_on_leave::dismiss(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_copy(), tbb::interface5::internal::hash_map_base::internal_move(), and tbb::move().

Here is the call graph for this function:

◆ concurrent_hash_map() [9/12]

template<typename Key , typename T , typename HashCompare , typename Allocator >
template<typename I >
tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::concurrent_hash_map ( first,
last,
const allocator_type a = allocator_type() 
)
inline

Construction with copying iteration range and given allocator instance.

Definition at line 893 of file concurrent_hash_map.h.

894 : internal::hash_map_base(), my_allocator(a)
895 {
896 call_clear_on_leave scope_guard(this);
897 internal_copy(first, last, std::distance(first, last));
898 scope_guard.dismiss();
899 }
auto last(Container &c) -> decltype(begin(c))
auto first(Container &c) -> decltype(begin(c))

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::call_clear_on_leave::dismiss(), tbb::internal::first(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_copy(), and tbb::internal::last().

Here is the call graph for this function:

◆ concurrent_hash_map() [10/12]

template<typename Key , typename T , typename HashCompare , typename Allocator >
template<typename I >
tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::concurrent_hash_map ( first,
last,
const HashCompare &  compare,
const allocator_type a = allocator_type() 
)
inline

Definition at line 902 of file concurrent_hash_map.h.

903 : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
904 {
905 call_clear_on_leave scope_guard(this);
906 internal_copy(first, last, std::distance(first, last));
907 scope_guard.dismiss();
908 }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::call_clear_on_leave::dismiss(), tbb::internal::first(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_copy(), and tbb::internal::last().

Here is the call graph for this function:

◆ concurrent_hash_map() [11/12]

template<typename Key , typename T , typename HashCompare , typename Allocator >
tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::concurrent_hash_map ( std::initializer_list< value_type il,
const allocator_type a = allocator_type() 
)
inline

Construct empty table with n preallocated buckets. This number serves also as initial concurrency level.

Definition at line 912 of file concurrent_hash_map.h.

913 : internal::hash_map_base(), my_allocator(a)
914 {
915 call_clear_on_leave scope_guard(this);
916 internal_copy(il.begin(), il.end(), il.size());
917 scope_guard.dismiss();
918 }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::call_clear_on_leave::dismiss(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_copy().

Here is the call graph for this function:

◆ concurrent_hash_map() [12/12]

template<typename Key , typename T , typename HashCompare , typename Allocator >
tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::concurrent_hash_map ( std::initializer_list< value_type il,
const HashCompare &  compare,
const allocator_type a = allocator_type() 
)
inline

Definition at line 920 of file concurrent_hash_map.h.

921 : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
922 {
923 call_clear_on_leave scope_guard(this);
924 internal_copy(il.begin(), il.end(), il.size());
925 scope_guard.dismiss();
926 }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::call_clear_on_leave::dismiss(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_copy().

Here is the call graph for this function:

◆ ~concurrent_hash_map()

template<typename Key , typename T , typename HashCompare , typename Allocator >
tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::~concurrent_hash_map ( )
inline

Clear table and destroy it.

Definition at line 971 of file concurrent_hash_map.h.

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::clear().

Here is the call graph for this function:

Member Function Documentation

◆ allocate_node_copy_construct()

template<typename Key , typename T , typename HashCompare , typename Allocator >
static node * tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::allocate_node_copy_construct ( node_allocator_type allocator,
const Key &  key,
const T *  t 
)
inlinestaticprotected

Definition at line 663 of file concurrent_hash_map.h.

663 {
664 return create_node(allocator, key, *t);
665 }
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle * key
static node * create_node(node_allocator_type &allocator, Args &&... args)

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::create_node(), and key.

Referenced by tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::insert().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ allocate_node_default_construct()

template<typename Key , typename T , typename HashCompare , typename Allocator >
static node * tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::allocate_node_default_construct ( node_allocator_type allocator,
const Key &  key,
const T *   
)
inlinestaticprotected

Definition at line 673 of file concurrent_hash_map.h.

673 {
674#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT
675 // Emplace construct an empty T object inside the pair
676 return create_node(allocator, std::piecewise_construct,
677 std::forward_as_tuple(key), std::forward_as_tuple());
678#else
679 // Use of a temporary object is impossible, because create_node takes a non-const reference.
680 // copy-initialization is possible because T is already required to be CopyConstructible.
681 T obj = T();
682 return create_node(allocator, key, tbb::internal::move(obj));
683#endif
684 }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::create_node(), key, and tbb::move().

Referenced by tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::insert().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ allocate_node_move_construct()

template<typename Key , typename T , typename HashCompare , typename Allocator >
static node * tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::allocate_node_move_construct ( node_allocator_type allocator,
const Key &  key,
const T *  t 
)
inlinestaticprotected

Definition at line 668 of file concurrent_hash_map.h.

668 {
669 return create_node(allocator, key, std::move(*const_cast<T*>(t)));
670 }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::create_node(), key, and tbb::move().

Referenced by tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::generic_move_insert().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ begin() [1/2]

template<typename Key , typename T , typename HashCompare , typename Allocator >
iterator tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::begin ( )
inline

◆ begin() [2/2]

template<typename Key , typename T , typename HashCompare , typename Allocator >
const_iterator tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::begin ( ) const
inline

◆ bucket_count()

template<typename Key , typename T , typename HashCompare , typename Allocator >
size_type tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::bucket_count ( ) const
inline

Returns the current number of buckets.

Definition at line 1003 of file concurrent_hash_map.h.

1003{ return my_mask+1; }
atomic< hashcode_t > my_mask
Hash mask = sum of allocated segment sizes - 1.

References tbb::interface5::internal::hash_map_base::my_mask.

◆ clear()

template<typename Key , typename T , typename HashCompare , typename A >
void tbb::interface5::concurrent_hash_map< Key, T, HashCompare, A >::clear

Clear table.

Definition at line 1505 of file concurrent_hash_map.h.

1505 {
1506 hashcode_t m = my_mask;
1507 __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1508#if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1509#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1510 int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
1511 static bool reported = false;
1512#endif
1513 bucket *bp = 0;
1514 // check consistency
1515 for( segment_index_t b = 0; b <= m; b++ ) {
1516 if( b & (b-2) ) ++bp; // not the beginning of a segment
1517 else bp = get_bucket( b );
1518 node_base *n = bp->node_list;
1519 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1520 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" );
1521#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1522 if( n == internal::empty_rehashed ) empty_buckets++;
1523 else if( n == internal::rehash_req ) buckets--;
1524 else if( n->next ) overpopulated_buckets++;
1525#endif
1526#if __TBB_EXTRA_DEBUG
1527 for(; is_valid(n); n = n->next ) {
1528 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->value().first );
1529 h &= m;
1530 __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" );
1531 }
1532#endif
1533 }
1534#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1535#if __TBB_STATISTICS
1536 printf( "items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d"
1537 " concurrent: resizes=%u rehashes=%u restarts=%u\n",
1538 current_size, int(m+1), buckets, empty_buckets, overpopulated_buckets,
1539 unsigned(my_info_resizes), unsigned(my_info_rehashes), unsigned(my_info_restarts) );
1540 my_info_resizes = 0; // concurrent ones
1541 my_info_restarts = 0; // race collisions
1542 my_info_rehashes = 0; // invocations of rehash_bucket
1543#endif
1544 if( buckets > current_size) empty_buckets -= buckets - current_size;
1545 else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
1546 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1548 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1550 typeid(*this).name(),
1551#else
1552 "concurrent_hash_map",
1553#endif
1554 current_size, empty_buckets, overpopulated_buckets );
1555 reported = true;
1556 }
1557#endif
1558#endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1559 my_size = 0;
1561 __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" );
1562 do {
1563 __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" );
1564 segment_ptr_t buckets_ptr = my_table[s];
1565 size_type sz = segment_size( s ? s : 1 );
1566 for( segment_index_t i = 0; i < sz; i++ )
1567 for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {
1568 buckets_ptr[i].node_list = n->next;
1569 delete_node( n );
1570 }
1572 } while(s-- > 0);
1574}
#define __TBB_USE_OPTIONAL_RTTI
Definition: tbb_config.h:125
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
void const char const char int ITT_FORMAT __itt_group_sync s
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h
void __TBB_EXPORTED_FUNC runtime_warning(const char *format,...)
Report a runtime warning.
static hash_map_node_base *const empty_rehashed
Rehashed empty bucket flag.
static hash_map_node_base *const rehash_req
Incompleteness flag value.
hash_map_node_base node_base
Node base type.
bucket * get_bucket(hashcode_t h) const
Get bucket by (masked) hashcode.
segments_table_t my_table
Segment pointers table. Also prevents false sharing between my_mask and my_size.
void delete_segment(segment_index_t s, const Allocator &allocator)
static size_type const pointers_per_table
Size of a pointer / table size.
static segment_index_t segment_index_of(size_type index)
static size_type const embedded_buckets
Count of segments in the first block.
static size_type segment_size(segment_index_t k)
atomic< size_type > my_size
Size of container in stored items.

References __TBB_ASSERT, __TBB_USE_OPTIONAL_RTTI, tbb::interface5::internal::empty_rehashed, h, int, tbb::interface5::internal::hash_map_base::bucket::mutex, tbb::interface5::internal::hash_map_node_base::next, tbb::interface5::internal::hash_map_base::bucket::node_list, tbb::interface5::internal::rehash_req, tbb::internal::runtime_warning(), s, and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::node::value().

Referenced by tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::operator=(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::~concurrent_hash_map().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ count()

template<typename Key , typename T , typename HashCompare , typename Allocator >
size_type tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::count ( const Key &  key) const
inline

Return count of items (0 or 1)

Definition at line 1016 of file concurrent_hash_map.h.

1016 {
1017 return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, NULL, /*write=*/false, &do_not_allocate_node );
1018 }
static node * do_not_allocate_node(node_allocator_type &, const Key &, const T *)
bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node *(*allocate_node)(node_allocator_type &, const Key &, const T *), node *tmp_n=0)
Insert or find item and optionally acquire a lock on the item.
concurrent_hash_map(const allocator_type &a=allocator_type())
Construct empty table.

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::do_not_allocate_node(), key, and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::lookup().

Here is the call graph for this function:

◆ create_node()

template<typename Key , typename T , typename HashCompare , typename Allocator >
template<typename... Args>
static node * tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::create_node ( node_allocator_type allocator,
Args &&...  args 
)
inlinestaticprotected

Definition at line 645 of file concurrent_hash_map.h.

650 {
651 node* node_ptr = node_allocator_traits::allocate(allocator, 1);
652 node_scoped_guard guard(node_ptr, allocator);
653 node_allocator_traits::construct(allocator, node_ptr);
654#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
655 node_allocator_traits::construct(allocator, node_ptr->storage(), std::forward<Args>(args)...);
656#else
657 node_allocator_traits::construct(allocator, node_ptr->storage(), tbb::internal::forward<Arg1>(arg1), tbb::internal::forward<Arg2>(arg2));
658#endif
659 guard.dismiss();
660 return node_ptr;
661 }
static void construct(Alloc &, PT *p)
static pointer allocate(Alloc &a, size_type n)

References tbb::internal::allocator_traits< Alloc >::allocate(), tbb::internal::allocator_traits< Alloc >::construct(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::node_scoped_guard::dismiss(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::node::storage().

Referenced by tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::allocate_node_copy_construct(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::allocate_node_default_construct(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::allocate_node_move_construct(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::generic_emplace().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ delete_node()

template<typename Key , typename T , typename HashCompare , typename Allocator >
void tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::delete_node ( node_base n)
inlineprotected

Definition at line 623 of file concurrent_hash_map.h.

623 {
624 node_allocator_traits::destroy(my_allocator, static_cast<node*>(n)->storage());
625 node_allocator_traits::destroy(my_allocator, static_cast<node*>(n));
626 node_allocator_traits::deallocate(my_allocator, static_cast<node*>(n), 1);
627 }
static void deallocate(Alloc &a, pointer p, size_type n)
static void destroy(Alloc &, T *p)

References tbb::internal::allocator_traits< Alloc >::deallocate(), tbb::internal::allocator_traits< Alloc >::destroy(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::my_allocator.

Here is the call graph for this function:

◆ do_not_allocate_node()

template<typename Key , typename T , typename HashCompare , typename Allocator >
static node * tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::do_not_allocate_node ( node_allocator_type ,
const Key &  ,
const T *   
)
inlinestaticprotected

Definition at line 686 of file concurrent_hash_map.h.

686 {
687 __TBB_ASSERT(false,"this dummy function should not be called");
688 return NULL;
689 }

References __TBB_ASSERT.

Referenced by tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::count(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::find(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::generic_emplace().

Here is the caller graph for this function:

◆ emplace() [1/3]

template<typename Key , typename T , typename HashCompare , typename Allocator >
template<typename... Args>
bool tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::emplace ( accessor result,
Args &&...  args 
)
inline

Insert item by copying if there is no such key present already and acquire a write lock on the item.

Returns true if item is new.

Definition at line 1098 of file concurrent_hash_map.h.

1098 {
1099 return generic_emplace(result, std::forward<Args>(args)...);
1100 }
bool generic_emplace(Accessor &&result, Args &&... args)

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::generic_emplace().

Here is the call graph for this function:

◆ emplace() [2/3]

template<typename Key , typename T , typename HashCompare , typename Allocator >
template<typename... Args>
bool tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::emplace ( Args &&...  args)
inline

Insert item by copying if there is no such key present already.

Returns true if item is inserted.

Definition at line 1105 of file concurrent_hash_map.h.

1105 {
1106 return generic_emplace(accessor_not_used(), std::forward<Args>(args)...);
1107 }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::generic_emplace().

Here is the call graph for this function:

◆ emplace() [3/3]

template<typename Key , typename T , typename HashCompare , typename Allocator >
template<typename... Args>
bool tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::emplace ( const_accessor result,
Args &&...  args 
)
inline

Insert item by copying if there is no such key present already and acquire a read lock on the item.

Returns true if item is new.

Definition at line 1091 of file concurrent_hash_map.h.

1091 {
1092 return generic_emplace(result, std::forward<Args>(args)...);
1093 }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::generic_emplace().

Here is the call graph for this function:

◆ empty()

template<typename Key , typename T , typename HashCompare , typename Allocator >
bool tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::empty ( ) const
inline

True if size()==0.

Definition at line 997 of file concurrent_hash_map.h.

997{ return my_size == 0; }

References tbb::interface5::internal::hash_map_base::my_size.

◆ end() [1/2]

template<typename Key , typename T , typename HashCompare , typename Allocator >
iterator tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::end ( )
inline

◆ end() [2/2]

template<typename Key , typename T , typename HashCompare , typename Allocator >
const_iterator tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::end ( ) const
inline

Definition at line 989 of file concurrent_hash_map.h.

989{ return const_iterator( *this, 0, 0, 0 ); }

◆ equal_range() [1/2]

template<typename Key , typename T , typename HashCompare , typename Allocator >
std::pair< iterator, iterator > tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::equal_range ( const Key &  key)
inline

Definition at line 990 of file concurrent_hash_map.h.

990{ return internal_equal_range( key, end() ); }
std::pair< I, I > internal_equal_range(const Key &key, I end) const
Returns an iterator for an item defined by the key, or for the next item after it (if upper==true)

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::end(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_equal_range(), and key.

Referenced by tbb::operator==().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ equal_range() [2/2]

template<typename Key , typename T , typename HashCompare , typename Allocator >
std::pair< const_iterator, const_iterator > tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::equal_range ( const Key &  key) const
inline

◆ erase() [1/3]

template<typename Key , typename T , typename HashCompare , typename Allocator >
bool tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::erase ( accessor item_accessor)
inline

Erase item by accessor.

Return true if item was erased by particularly this call.

Definition at line 1137 of file concurrent_hash_map.h.

1137 {
1138 return exclude( item_accessor );
1139 }
bool exclude(const_accessor &item_accessor)
delete item by accessor

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::exclude().

Here is the call graph for this function:

◆ erase() [2/3]

template<typename Key , typename T , typename HashCompare , typename A >
bool tbb::interface5::concurrent_hash_map< Key, T, HashCompare, A >::erase ( const Key &  key)

Erase item.

Return true if item was erased by particularly this call.

Definition at line 1386 of file concurrent_hash_map.h.

1386 {
1387 node_base *n;
1388 hashcode_t const h = my_hash_compare.hash( key );
1390restart:
1391 {//lock scope
1392 // get bucket
1393 bucket_accessor b( this, h & m );
1394 search:
1395 node_base **p = &b()->node_list;
1396 n = *p;
1397 while( is_valid(n) && !my_hash_compare.equal(key, static_cast<node*>(n)->value().first ) ) {
1398 p = &n->next;
1399 n = *p;
1400 }
1401 if( !n ) { // not found, but mask could be changed
1402 if( check_mask_race( h, m ) )
1403 goto restart;
1404 return false;
1405 }
1406 else if( !b.is_writer() && !b.upgrade_to_writer() ) {
1407 if( check_mask_race( h, m ) ) // contended upgrade, check mask
1408 goto restart;
1409 goto search;
1410 }
1411 *p = n->next;
1412 my_size--;
1413 }
1414 {
1415 typename node::scoped_t item_locker( n->mutex, /*write=*/true );
1416 }
1417 // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor!
1418 delete_node( n ); // Only one thread can delete it due to write lock on the bucket
1419 return true;
1420}
void const char const char int ITT_FORMAT __itt_group_sync p
T itt_load_word_with_acquire(const tbb::atomic< T > &src)
mutex_t::scoped_lock scoped_t
Scoped lock type for mutex.
bool check_mask_race(const hashcode_t h, hashcode_t &m) const
Check for mask race.

References h, tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::bucket_accessor::is_writer(), tbb::internal::itt_load_word_with_acquire(), key, tbb::interface5::internal::hash_map_node_base::mutex, tbb::interface5::internal::hash_map_node_base::next, p, and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::node::value().

Here is the call graph for this function:

◆ erase() [3/3]

template<typename Key , typename T , typename HashCompare , typename Allocator >
bool tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::erase ( const_accessor item_accessor)
inline

Erase item by const_accessor.

Return true if item was erased by particularly this call.

Definition at line 1131 of file concurrent_hash_map.h.

1131 {
1132 return exclude( item_accessor );
1133 }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::exclude().

Here is the call graph for this function:

◆ exclude()

template<typename Key , typename T , typename HashCompare , typename A >
bool tbb::interface5::concurrent_hash_map< Key, T, HashCompare, A >::exclude ( const_accessor item_accessor)
protected

delete item by accessor

Definition at line 1356 of file concurrent_hash_map.h.

1356 {
1357 __TBB_ASSERT( item_accessor.my_node, NULL );
1358 node_base *const n = item_accessor.my_node;
1359 hashcode_t const h = item_accessor.my_hash;
1361 do {
1362 // get bucket
1363 bucket_accessor b( this, h & m, /*writer=*/true );
1364 node_base **p = &b()->node_list;
1365 while( *p && *p != n )
1366 p = &(*p)->next;
1367 if( !*p ) { // someone else was first
1368 if( check_mask_race( h, m ) )
1369 continue;
1370 item_accessor.release();
1371 return false;
1372 }
1373 __TBB_ASSERT( *p == n, NULL );
1374 *p = n->next; // remove from container
1375 my_size--;
1376 break;
1377 } while(true);
1378 if( !item_accessor.is_writer() ) // need to get exclusive lock
1379 item_accessor.upgrade_to_writer(); // return value means nothing here
1380 item_accessor.release();
1381 delete_node( n ); // Only one thread can delete it
1382 return true;
1383}

References __TBB_ASSERT, h, tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::const_accessor::is_writer(), tbb::internal::itt_load_word_with_acquire(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::const_accessor::my_hash, tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::const_accessor::my_node, tbb::interface5::internal::hash_map_node_base::next, p, and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::const_accessor::release().

Referenced by tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::erase().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ find() [1/2]

template<typename Key , typename T , typename HashCompare , typename Allocator >
bool tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::find ( accessor result,
const Key &  key 
)
inline

Find item and acquire a write lock on the item.

Return true if item is found, false otherwise.

Definition at line 1029 of file concurrent_hash_map.h.

1029 {
1030 result.release();
1031 return lookup(/*insert*/false, key, NULL, &result, /*write=*/true, &do_not_allocate_node );
1032 }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::do_not_allocate_node(), key, tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::lookup(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::const_accessor::release().

Here is the call graph for this function:

◆ find() [2/2]

template<typename Key , typename T , typename HashCompare , typename Allocator >
bool tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::find ( const_accessor result,
const Key &  key 
) const
inline

Find item and acquire a read lock on the item.

Return true if item is found, false otherwise.

Definition at line 1022 of file concurrent_hash_map.h.

1022 {
1023 result.release();
1024 return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, &result, /*write=*/false, &do_not_allocate_node );
1025 }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::do_not_allocate_node(), key, tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::lookup(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::const_accessor::release().

Here is the call graph for this function:

◆ generic_emplace()

template<typename Key , typename T , typename HashCompare , typename Allocator >
template<typename Accessor , typename... Args>
bool tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::generic_emplace ( Accessor &&  result,
Args &&...  args 
)
inlineprotected

◆ generic_move_insert()

template<typename Key , typename T , typename HashCompare , typename Allocator >
template<typename Accessor >
bool tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::generic_move_insert ( Accessor &&  result,
value_type &&  value 
)
inlineprotected

Definition at line 1155 of file concurrent_hash_map.h.

1155 {
1156 result.release();
1157 return lookup(/*insert*/true, value.first, &value.second, accessor_location(result), is_write_access_needed(result), &allocate_node_move_construct );
1158 }
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
static node * allocate_node_move_construct(node_allocator_type &allocator, const Key &key, const T *t)

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::accessor_location, tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::allocate_node_move_construct(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::is_write_access_needed, tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::lookup(), and value.

Referenced by tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::insert().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ get_allocator()

template<typename Key , typename T , typename HashCompare , typename Allocator >
allocator_type tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::get_allocator ( ) const
inline

return allocator object

Definition at line 1006 of file concurrent_hash_map.h.

1006{ return this->my_allocator; }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::my_allocator.

◆ insert() [1/10]

template<typename Key , typename T , typename HashCompare , typename Allocator >
bool tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::insert ( accessor result,
const Key &  key 
)
inline

Insert item (if not already present) and acquire a write lock on the item.

Returns true if item is new.

Definition at line 1043 of file concurrent_hash_map.h.

1043 {
1044 result.release();
1045 return lookup(/*insert*/true, key, NULL, &result, /*write=*/true, &allocate_node_default_construct );
1046 }
static node * allocate_node_default_construct(node_allocator_type &allocator, const Key &key, const T *)

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::allocate_node_default_construct(), key, tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::lookup(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::const_accessor::release().

Here is the call graph for this function:

◆ insert() [2/10]

template<typename Key , typename T , typename HashCompare , typename Allocator >
bool tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::insert ( accessor result,
const value_type value 
)
inline

Insert item by copying if there is no such key present already and acquire a write lock on the item.

Returns true if item is new.

Definition at line 1057 of file concurrent_hash_map.h.

1057 {
1058 result.release();
1059 return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/true, &allocate_node_copy_construct );
1060 }
static node * allocate_node_copy_construct(node_allocator_type &allocator, const Key &key, const T *t)

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::allocate_node_copy_construct(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::lookup(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::const_accessor::release(), and value.

Here is the call graph for this function:

◆ insert() [3/10]

template<typename Key , typename T , typename HashCompare , typename Allocator >
bool tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::insert ( accessor result,
value_type &&  value 
)
inline

Insert item by copying if there is no such key present already and acquire a write lock on the item.

Returns true if item is new.

Definition at line 1077 of file concurrent_hash_map.h.

1077 {
1078 return generic_move_insert(result, std::move(value));
1079 }
bool generic_move_insert(Accessor &&result, value_type &&value)

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::generic_move_insert(), tbb::move(), and value.

Here is the call graph for this function:

◆ insert() [4/10]

template<typename Key , typename T , typename HashCompare , typename Allocator >
bool tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::insert ( const value_type value)
inline

Insert item by copying if there is no such key present already.

Returns true if item is inserted.

Definition at line 1064 of file concurrent_hash_map.h.

1064 {
1065 return lookup(/*insert*/true, value.first, &value.second, NULL, /*write=*/false, &allocate_node_copy_construct );
1066 }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::allocate_node_copy_construct(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::lookup(), and value.

Here is the call graph for this function:

◆ insert() [5/10]

template<typename Key , typename T , typename HashCompare , typename Allocator >
bool tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::insert ( const_accessor result,
const Key &  key 
)
inline

Insert item (if not already present) and acquire a read lock on the item.

Returns true if item is new.

Definition at line 1036 of file concurrent_hash_map.h.

1036 {
1037 result.release();
1038 return lookup(/*insert*/true, key, NULL, &result, /*write=*/false, &allocate_node_default_construct );
1039 }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::allocate_node_default_construct(), key, tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::lookup(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::const_accessor::release().

Referenced by tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::insert().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ insert() [6/10]

template<typename Key , typename T , typename HashCompare , typename Allocator >
bool tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::insert ( const_accessor result,
const value_type value 
)
inline

Insert item by copying if there is no such key present already and acquire a read lock on the item.

Returns true if item is new.

Definition at line 1050 of file concurrent_hash_map.h.

1050 {
1051 result.release();
1052 return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/false, &allocate_node_copy_construct );
1053 }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::allocate_node_copy_construct(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::lookup(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::const_accessor::release(), and value.

Here is the call graph for this function:

◆ insert() [7/10]

template<typename Key , typename T , typename HashCompare , typename Allocator >
bool tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::insert ( const_accessor result,
value_type &&  value 
)
inline

Insert item by copying if there is no such key present already and acquire a read lock on the item.

Returns true if item is new.

Definition at line 1071 of file concurrent_hash_map.h.

1071 {
1072 return generic_move_insert(result, std::move(value));
1073 }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::generic_move_insert(), tbb::move(), and value.

Here is the call graph for this function:

◆ insert() [8/10]

template<typename Key , typename T , typename HashCompare , typename Allocator >
template<typename I >
void tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::insert ( first,
last 
)
inline

Insert range [first, last)

Definition at line 1113 of file concurrent_hash_map.h.

1113 {
1114 for ( ; first != last; ++first )
1115 insert( *first );
1116 }
bool insert(const_accessor &result, const Key &key)
Insert item (if not already present) and acquire a read lock on the item.

References tbb::internal::first(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::insert(), and tbb::internal::last().

Here is the call graph for this function:

◆ insert() [9/10]

template<typename Key , typename T , typename HashCompare , typename Allocator >
void tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::insert ( std::initializer_list< value_type il)
inline

Insert initializer list.

Definition at line 1120 of file concurrent_hash_map.h.

1120 {
1121 insert( il.begin(), il.end() );
1122 }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::insert().

Here is the call graph for this function:

◆ insert() [10/10]

template<typename Key , typename T , typename HashCompare , typename Allocator >
bool tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::insert ( value_type &&  value)
inline

Insert item by copying if there is no such key present already.

Returns true if item is inserted.

Definition at line 1083 of file concurrent_hash_map.h.

1083 {
1084 return generic_move_insert(accessor_not_used(), std::move(value));
1085 }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::generic_move_insert(), tbb::move(), and value.

Here is the call graph for this function:

◆ internal_copy() [1/2]

template<typename Key , typename T , typename HashCompare , typename A >
void tbb::interface5::concurrent_hash_map< Key, T, HashCompare, A >::internal_copy ( const concurrent_hash_map< Key, T, HashCompare, Allocator > &  source)
protected

Copy "source" to *this, where *this must start out empty.

Definition at line 1577 of file concurrent_hash_map.h.

1577 {
1578 hashcode_t mask = source.my_mask;
1579 if( my_mask == mask ) { // optimized version
1580 reserve( source.my_size, my_allocator ); // TODO: load_factor?
1581 bucket *dst = 0, *src = 0;
1582 bool rehash_required = false;
1583 for( hashcode_t k = 0; k <= mask; k++ ) {
1584 if( k & (k-2) ) ++dst,src++; // not the beginning of a segment
1585 else { dst = get_bucket( k ); src = source.get_bucket( k ); }
1586 __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table");
1587 node *n = static_cast<node*>( src->node_list );
1588 if( n == internal::rehash_req ) { // source is not rehashed, items are in previous buckets
1589 rehash_required = true;
1590 dst->node_list = internal::rehash_req;
1591 } else for(; n; n = static_cast<node*>( n->next ) ) {
1592 node* node_ptr = create_node(my_allocator, n->value().first, n->value().second);
1593 add_to_bucket( dst, node_ptr);
1594 ++my_size; // TODO: replace by non-atomic op
1595 }
1596 }
1597 if( rehash_required ) rehash();
1598 } else internal_copy( source.begin(), source.end(), source.my_size );
1599}
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
void rehash(size_type n=0)
Rehashes and optionally resizes the whole table.
static void add_to_bucket(bucket *b, node_base *n)
Add node.

References __TBB_ASSERT, tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::begin(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::end(), tbb::interface5::internal::hash_map_base::get_bucket(), mask, tbb::interface5::internal::hash_map_base::my_mask, tbb::interface5::internal::hash_map_base::my_size, tbb::interface5::internal::hash_map_node_base::next, tbb::interface5::internal::hash_map_base::bucket::node_list, tbb::interface5::internal::rehash_req, and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::node::value().

Referenced by tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::concurrent_hash_map(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_move_assign(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::operator=().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ internal_copy() [2/2]

template<typename Key , typename T , typename HashCompare , typename A >
template<typename I >
void tbb::interface5::concurrent_hash_map< Key, T, HashCompare, A >::internal_copy ( first,
last,
size_type  reserve_size 
)
protected

Definition at line 1603 of file concurrent_hash_map.h.

1603 {
1604 reserve( reserve_size, my_allocator ); // TODO: load_factor?
1605 hashcode_t m = my_mask;
1606 for(; first != last; ++first) {
1607 hashcode_t h = my_hash_compare.hash( (*first).first );
1608 bucket *b = get_bucket( h & m );
1609 __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table");
1610 node* node_ptr = create_node(my_allocator, (*first).first, (*first).second);
1611 add_to_bucket( b, node_ptr );
1612 ++my_size; // TODO: replace by non-atomic op
1613 }
1614}

References __TBB_ASSERT, tbb::internal::first(), h, tbb::internal::last(), tbb::interface5::internal::hash_map_base::bucket::node_list, and tbb::interface5::internal::rehash_req.

Here is the call graph for this function:

◆ internal_equal_range()

template<typename Key , typename T , typename HashCompare , typename A >
template<typename I >
std::pair< I, I > tbb::interface5::concurrent_hash_map< Key, T, HashCompare, A >::internal_equal_range ( const Key &  key,
end 
) const
protected

Returns an iterator for an item defined by the key, or for the next item after it (if upper==true)

Definition at line 1338 of file concurrent_hash_map.h.

1338 {
1339 hashcode_t h = my_hash_compare.hash( key );
1340 hashcode_t m = my_mask;
1341 __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1342 h &= m;
1343 bucket *b = get_bucket( h );
1344 while( b->node_list == internal::rehash_req ) {
1345 m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
1346 b = get_bucket( h &= m );
1347 }
1348 node *n = search_bucket( key, b );
1349 if( !n )
1350 return std::make_pair(end_, end_);
1351 iterator lower(*this, h, b, n), upper(lower);
1352 return std::make_pair(lower, ++upper);
1353}
intptr_t __TBB_Log2(uintptr_t x)
Definition: tbb_machine.h:860
node * search_bucket(const key_type &key, bucket *b) const

References __TBB_ASSERT, __TBB_Log2(), h, key, tbb::interface5::internal::hash_map_base::bucket::node_list, and tbb::interface5::internal::rehash_req.

Referenced by tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::equal_range().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ internal_fast_find()

template<typename Key , typename T , typename HashCompare , typename Allocator >
const_pointer tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_fast_find ( const Key &  key) const
inlineprotected

Fast find when no concurrent erasure is used. For internal use inside TBB only!

Return pointer to item with given key, or NULL if no such item exists. Must not be called concurrently with erasure operations.

Definition at line 1203 of file concurrent_hash_map.h.

1203 {
1204 hashcode_t h = my_hash_compare.hash( key );
1206 node *n;
1207 restart:
1208 __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1209 bucket *b = get_bucket( h & m );
1210 // TODO: actually, notification is unnecessary here, just hiding double-check
1212 {
1214 if( lock.try_acquire( b->mutex, /*write=*/true ) ) {
1215 if( b->node_list == internal::rehash_req)
1216 const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m ); //recursive rehashing
1217 }
1218 else lock.acquire( b->mutex, /*write=*/false );
1219 __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL);
1220 }
1221 n = search_bucket( key, b );
1222 if( n )
1223 return n->storage();
1224 else if( check_mask_race( h, m ) )
1225 goto restart;
1226 return 0;
1227 }
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
void rehash_bucket(bucket *b_new, const hashcode_t h)
mutex_t::scoped_lock scoped_t
Scoped lock type for mutex.

References __TBB_ASSERT, tbb::interface5::internal::hash_map_base::check_mask_race(), tbb::interface5::internal::hash_map_base::get_bucket(), h, tbb::internal::itt_load_word_with_acquire(), key, lock, tbb::interface5::internal::hash_map_base::bucket::mutex, tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::my_hash_compare, tbb::interface5::internal::hash_map_base::my_mask, tbb::interface5::internal::hash_map_base::bucket::node_list, tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::rehash_bucket(), tbb::interface5::internal::rehash_req, tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::search_bucket(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::node::storage().

Here is the call graph for this function:

◆ internal_move_assign() [1/2]

template<typename Key , typename T , typename HashCompare , typename Allocator >
void tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_move_assign ( concurrent_hash_map< Key, T, HashCompare, Allocator > &&  other,
tbb::internal::traits_false_type   
)
inlineprotected

Definition at line 1190 of file concurrent_hash_map.h.

1190 {
1191 if (this->my_allocator == other.my_allocator) {
1192 internal_move(std::move(other));
1193 } else {
1194 //do per element move
1195 internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()), other.size());
1196 }
1197 }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_copy(), tbb::interface5::internal::hash_map_base::internal_move(), and tbb::move().

Here is the call graph for this function:

◆ internal_move_assign() [2/2]

template<typename Key , typename T , typename HashCompare , typename Allocator >
void tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_move_assign ( concurrent_hash_map< Key, T, HashCompare, Allocator > &&  other,
tbb::internal::traits_true_type   
)
inlineprotected

Definition at line 1185 of file concurrent_hash_map.h.

1185 {
1187 internal_move(std::move(other));
1188 }
void allocator_move_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)

References tbb::internal::allocator_move_assignment(), tbb::interface5::internal::hash_map_base::internal_move(), tbb::move(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::my_allocator.

Referenced by tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::operator=().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ lookup()

template<typename Key , typename T , typename HashCompare , typename A >
bool tbb::interface5::concurrent_hash_map< Key, T, HashCompare, A >::lookup ( bool  op_insert,
const Key &  key,
const T *  t,
const_accessor result,
bool  write,
node *(*)(node_allocator_type &, const Key &, const T *)  allocate_node,
node tmp_n = 0 
)
protected

Insert or find item and optionally acquire a lock on the item.

Definition at line 1258 of file concurrent_hash_map.h.

1258 {
1259 __TBB_ASSERT( !result || !result->my_node, NULL );
1260 bool return_value;
1261 hashcode_t const h = my_hash_compare.hash( key );
1263 segment_index_t grow_segment = 0;
1264 node *n;
1265 restart:
1266 {//lock scope
1267 __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1268 return_value = false;
1269 // get bucket
1270 bucket_accessor b( this, h & m );
1271
1272 // find a node
1273 n = search_bucket( key, b() );
1274 if( op_insert ) {
1275 // [opt] insert a key
1276 if( !n ) {
1277 if( !tmp_n ) {
1278 tmp_n = allocate_node(my_allocator, key, t);
1279 }
1280 if( !b.is_writer() && !b.upgrade_to_writer() ) { // TODO: improved insertion
1281 // Rerun search_list, in case another thread inserted the item during the upgrade.
1282 n = search_bucket( key, b() );
1283 if( is_valid(n) ) { // unfortunately, it did
1284 b.downgrade_to_reader();
1285 goto exists;
1286 }
1287 }
1288 if( check_mask_race(h, m) )
1289 goto restart; // b.release() is done in ~b().
1290 // insert and set flag to grow the container
1291 grow_segment = insert_new_node( b(), n = tmp_n, m );
1292 tmp_n = 0;
1293 return_value = true;
1294 }
1295 } else { // find or count
1296 if( !n ) {
1297 if( check_mask_race( h, m ) )
1298 goto restart; // b.release() is done in ~b(). TODO: replace by continue
1299 return false;
1300 }
1301 return_value = true;
1302 }
1303 exists:
1304 if( !result ) goto check_growth;
1305 // TODO: the following seems as generic/regular operation
1306 // acquire the item
1307 if( !result->try_acquire( n->mutex, write ) ) {
1308 for( tbb::internal::atomic_backoff backoff(true);; ) {
1309 if( result->try_acquire( n->mutex, write ) ) break;
1310 if( !backoff.bounded_pause() ) {
1311 // the wait takes really long, restart the operation
1312 b.release();
1313 __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" );
1314 __TBB_Yield();
1316 goto restart;
1317 }
1318 }
1319 }
1320 }//lock scope
1321 result->my_node = n;
1322 result->my_hash = h;
1323check_growth:
1324 // [opt] grow the container
1325 if( grow_segment ) {
1326#if __TBB_STATISTICS
1327 my_info_resizes++; // concurrent ones
1328#endif
1329 enable_segment( grow_segment, my_allocator );
1330 }
1331 if( tmp_n ) // if op_insert only
1332 delete_node( tmp_n );
1333 return return_value;
1334}
#define __TBB_Yield()
Definition: ibm_aix51.h:44
void enable_segment(segment_index_t k, const Allocator &allocator, bool is_initial=false)
Enable segment.
segment_index_t insert_new_node(bucket *b, node_base *n, hashcode_t mask)
Insert a node and check for load factor.
Class that implements exponential backoff.
Definition: tbb_machine.h:345

References __TBB_ASSERT, __TBB_Yield, h, tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::bucket_accessor::is_writer(), tbb::internal::itt_load_word_with_acquire(), key, tbb::interface5::internal::hash_map_node_base::mutex, tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::const_accessor::my_hash, and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::const_accessor::my_node.

Referenced by tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::count(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::find(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::generic_emplace(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::generic_move_insert(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::insert().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ max_size()

template<typename Key , typename T , typename HashCompare , typename Allocator >
size_type tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::max_size ( ) const
inline

Upper bound on size.

Definition at line 1000 of file concurrent_hash_map.h.

1000{return (~size_type(0))/sizeof(node);}

◆ operator=() [1/3]

template<typename Key , typename T , typename HashCompare , typename Allocator >
concurrent_hash_map & tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::operator= ( concurrent_hash_map< Key, T, HashCompare, Allocator > &&  table)
inline

Move Assignment.

Definition at line 943 of file concurrent_hash_map.h.

943 {
944 if(this != &table) {
946 internal_move_assign(std::move(table), pocma_type());
947 }
948 return *this;
949 }
void internal_move_assign(concurrent_hash_map &&other, tbb::internal::traits_true_type)
tbb::internal::false_type propagate_on_container_move_assignment

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_move_assign(), and tbb::move().

Here is the call graph for this function:

◆ operator=() [2/3]

template<typename Key , typename T , typename HashCompare , typename Allocator >
concurrent_hash_map & tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::operator= ( const concurrent_hash_map< Key, T, HashCompare, Allocator > &  table)
inline

Assignment.

Definition at line 931 of file concurrent_hash_map.h.

931 {
932 if( this!=&table ) {
934 clear();
935 tbb::internal::allocator_copy_assignment(my_allocator, table.my_allocator, pocca_type());
936 internal_copy(table);
937 }
938 return *this;
939 }
void allocator_copy_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
tbb::internal::false_type propagate_on_container_copy_assignment

References tbb::internal::allocator_copy_assignment(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::clear(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_copy(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::my_allocator.

Here is the call graph for this function:

◆ operator=() [3/3]

template<typename Key , typename T , typename HashCompare , typename Allocator >
concurrent_hash_map & tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::operator= ( std::initializer_list< value_type il)
inline

Assignment.

Definition at line 954 of file concurrent_hash_map.h.

954 {
955 clear();
956 internal_copy(il.begin(), il.end(), il.size());
957 return *this;
958 }

References tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::clear(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_copy().

Here is the call graph for this function:

◆ range() [1/2]

template<typename Key , typename T , typename HashCompare , typename Allocator >
range_type tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::range ( size_type  grainsize = 1)
inline

Definition at line 976 of file concurrent_hash_map.h.

976 {
977 return range_type( *this, grainsize );
978 }
internal::hash_map_range< iterator > range_type

◆ range() [2/2]

template<typename Key , typename T , typename HashCompare , typename Allocator >
const_range_type tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::range ( size_type  grainsize = 1) const
inline

Definition at line 979 of file concurrent_hash_map.h.

979 {
980 return const_range_type( *this, grainsize );
981 }
internal::hash_map_range< const_iterator > const_range_type

◆ rehash()

template<typename Key , typename T , typename HashCompare , typename A >
void tbb::interface5::concurrent_hash_map< Key, T, HashCompare, A >::rehash ( size_type  n = 0)

Rehashes and optionally resizes the whole table.

Useful to optimize performance before or after concurrent operations. Also enables using of find() and count() concurrent methods in serial context.

Definition at line 1434 of file concurrent_hash_map.h.

1434 {
1435 reserve( sz, my_allocator ); // TODO: add reduction of number of buckets as well
1437 hashcode_t b = (mask+1)>>1; // size or first index of the last segment
1438 __TBB_ASSERT((b&(b-1))==0, NULL); // zero or power of 2
1439 bucket *bp = get_bucket( b ); // only the last segment should be scanned for rehashing
1440 for(; b <= mask; b++, bp++ ) {
1441 node_base *n = bp->node_list;
1442 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1443 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1444 if( n == internal::rehash_req ) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one
1445 hashcode_t h = b; bucket *b_old = bp;
1446 do {
1447 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
1448 hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
1449 b_old = get_bucket( h &= m );
1450 } while( b_old->node_list == internal::rehash_req );
1451 // now h - is index of the root rehashed bucket b_old
1452 mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments
1453 for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) {
1454 hashcode_t c = my_hash_compare.hash( static_cast<node*>(q)->value().first );
1455 if( (c & mask) != h ) { // should be rehashed
1456 *p = q->next; // exclude from b_old
1457 bucket *b_new = get_bucket( c & mask );
1458 __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" );
1459 add_to_bucket( b_new, q );
1460 } else p = &q->next; // iterate to next item
1461 }
1462 }
1463 }
1464#if TBB_USE_PERFORMANCE_WARNINGS
1465 int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
1466 static bool reported = false;
1467#endif
1468#if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1469 for( b = 0; b <= mask; b++ ) {// only last segment should be scanned for rehashing
1470 if( b & (b-2) ) ++bp; // not the beginning of a segment
1471 else bp = get_bucket( b );
1472 node_base *n = bp->node_list;
1473 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1474 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" );
1475#if TBB_USE_PERFORMANCE_WARNINGS
1476 if( n == internal::empty_rehashed ) empty_buckets++;
1477 else if( n->next ) overpopulated_buckets++;
1478#endif
1479#if TBB_USE_ASSERT
1480 for( ; is_valid(n); n = n->next ) {
1481 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->value().first ) & mask;
1482 __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" );
1483 }
1484#endif
1485 }
1486#endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1487#if TBB_USE_PERFORMANCE_WARNINGS
1488 if( buckets > current_size) empty_buckets -= buckets - current_size;
1489 else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
1490 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1492 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1494 typeid(*this).name(),
1495#else
1496 "concurrent_hash_map",
1497#endif
1498 current_size, empty_buckets, overpopulated_buckets );
1499 reported = true;
1500 }
1501#endif
1502}

References __TBB_ASSERT, __TBB_Log2(), __TBB_USE_OPTIONAL_RTTI, tbb::interface5::internal::empty_rehashed, tbb::internal::first(), h, int, mask, tbb::interface5::internal::hash_map_base::bucket::mutex, tbb::interface5::internal::hash_map_node_base::next, tbb::interface5::internal::hash_map_base::bucket::node_list, p, tbb::interface5::internal::rehash_req, tbb::internal::runtime_warning(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::node::value(), and value.

Here is the call graph for this function:

◆ rehash_bucket()

template<typename Key , typename T , typename HashCompare , typename Allocator >
void tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::rehash_bucket ( bucket b_new,
const hashcode_t  h 
)
inlineprotected

Definition at line 723 of file concurrent_hash_map.h.

723 {
724 __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)");
725 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
726 __TBB_store_with_release(b_new->node_list, internal::empty_rehashed); // mark rehashed
727 hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
728#if __TBB_STATISTICS
729 my_info_rehashes++; // invocations of rehash_bucket
730#endif
731
732 bucket_accessor b_old( this, h & mask );
733
734 mask = (mask<<1) | 1; // get full mask for new bucket
735 __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );
736 restart:
737 for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {
738 hashcode_t c = my_hash_compare.hash( static_cast<node*>(n)->value().first );
739#if TBB_USE_ASSERT
740 hashcode_t bmask = h & (mask>>1);
741 bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1; // minimal mask of parent bucket
742 __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" );
743#endif
744 if( (c & mask) == h ) {
745 if( !b_old.is_writer() )
746 if( !b_old.upgrade_to_writer() ) {
747 goto restart; // node ptr can be invalid due to concurrent erase
748 }
749 *p = n->next; // exclude from b_old
750 add_to_bucket( b_new, n );
751 } else p = &n->next; // iterate to next item
752 }
753 }
T __TBB_load_with_acquire(const volatile T &location)
Definition: tbb_machine.h:709
void __TBB_store_with_release(volatile T &location, V value)
Definition: tbb_machine.h:713

References __TBB_ASSERT, tbb::internal::__TBB_load_with_acquire(), __TBB_Log2(), tbb::internal::__TBB_store_with_release(), tbb::interface5::internal::hash_map_base::add_to_bucket(), tbb::interface5::internal::empty_rehashed, h, tbb::interface5::internal::hash_map_base::is_valid(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::bucket_accessor::is_writer(), mask, tbb::interface5::internal::hash_map_base::bucket::mutex, tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::my_hash_compare, tbb::interface5::internal::hash_map_base::bucket::node_list, p, and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::node::value().

Referenced by tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::bucket_accessor::acquire(), and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_fast_find().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ search_bucket()

template<typename Key , typename T , typename HashCompare , typename Allocator >
node * tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::search_bucket ( const key_type key,
bucket b 
) const
inlineprotected

Definition at line 691 of file concurrent_hash_map.h.

691 {
692 node *n = static_cast<node*>( b->node_list );
693 while( is_valid(n) && !my_hash_compare.equal(key, n->value().first) )
694 n = static_cast<node*>( n->next );
695 __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket");
696 return n;
697 }

References __TBB_ASSERT, tbb::interface5::internal::hash_map_base::is_valid(), key, tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::my_hash_compare, tbb::interface5::internal::hash_map_node_base::next, tbb::interface5::internal::hash_map_base::bucket::node_list, tbb::interface5::internal::rehash_req, and tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::node::value().

Referenced by tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::internal_fast_find().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ size()

template<typename Key , typename T , typename HashCompare , typename Allocator >
size_type tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::size ( ) const
inline

Number of items in table.

Definition at line 994 of file concurrent_hash_map.h.

994{ return my_size; }

References tbb::interface5::internal::hash_map_base::my_size.

Referenced by tbb::operator==().

Here is the caller graph for this function:

◆ swap()

template<typename Key , typename T , typename HashCompare , typename Allocator >
void tbb::interface5::concurrent_hash_map< Key, T, HashCompare, A >::swap ( concurrent_hash_map< Key, T, HashCompare, Allocator > &  table)

swap two instances. Iterators are invalidated

Definition at line 1423 of file concurrent_hash_map.h.

1423 {
1424 typedef typename node_allocator_traits::propagate_on_container_swap pocs_type;
1425 if (this != &table && (pocs_type::value || my_allocator == table.my_allocator)) {
1426 using std::swap;
1427 tbb::internal::allocator_swap(this->my_allocator, table.my_allocator, pocs_type());
1428 swap(this->my_hash_compare, table.my_hash_compare);
1429 internal_swap(table);
1430 }
1431}
void swap(concurrent_hash_map< Key, T, HashCompare, A > &a, concurrent_hash_map< Key, T, HashCompare, A > &b)
void allocator_swap(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
void swap(concurrent_hash_map &table)
swap two instances. Iterators are invalidated
void internal_swap(hash_map_base &table)
Swap hash_map_bases.
tbb::internal::false_type propagate_on_container_swap

References tbb::internal::allocator_swap(), tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::my_allocator, tbb::interface5::concurrent_hash_map< Key, T, HashCompare, Allocator >::my_hash_compare, tbb::swap(), and value.

Referenced by tbb::swap().

Here is the call graph for this function:
Here is the caller graph for this function:

Friends And Related Function Documentation

◆ accessor_location [1/2]

template<typename Key , typename T , typename HashCompare , typename Allocator >
const_accessor * accessor_location ( accessor_not_used const &  )
friend

◆ accessor_location [2/2]

template<typename Key , typename T , typename HashCompare , typename Allocator >
const_accessor * accessor_location ( const_accessor a)
friend

Definition at line 1147 of file concurrent_hash_map.h.

1147{ return &a;}

◆ const_accessor

template<typename Key , typename T , typename HashCompare , typename Allocator >
friend class const_accessor
friend

Definition at line 609 of file concurrent_hash_map.h.

◆ internal::hash_map_iterator

template<typename Key , typename T , typename HashCompare , typename Allocator >
template<typename Container , typename Value >
friend class internal::hash_map_iterator
friend

Definition at line 587 of file concurrent_hash_map.h.

◆ internal::hash_map_range

template<typename Key , typename T , typename HashCompare , typename Allocator >
template<typename I >
friend class internal::hash_map_range
friend

Definition at line 590 of file concurrent_hash_map.h.

◆ is_write_access_needed [1/3]

template<typename Key , typename T , typename HashCompare , typename Allocator >
bool is_write_access_needed ( accessor const &  )
friend

◆ is_write_access_needed [2/3]

template<typename Key , typename T , typename HashCompare , typename Allocator >
bool is_write_access_needed ( accessor_not_used const &  )
friend

Definition at line 1151 of file concurrent_hash_map.h.

1151{ return false;}

◆ is_write_access_needed [3/3]

template<typename Key , typename T , typename HashCompare , typename Allocator >
bool is_write_access_needed ( const_accessor const &  )
friend

Definition at line 1150 of file concurrent_hash_map.h.

1150{ return false;}

Member Data Documentation

◆ my_allocator

◆ my_hash_compare


The documentation for this class was generated from the following file:

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.