00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #ifndef __TBB_concurrent_hash_map_H
00022 #define __TBB_concurrent_hash_map_H
00023
00024 #include "tbb_stddef.h"
00025
00026 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00027
00028 #pragma warning (push)
00029 #pragma warning (disable: 4530)
00030 #endif
00031
00032 #include <iterator>
00033 #include <utility>
00034 #include <cstring>
00035
00036 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00037 #pragma warning (pop)
00038 #endif
00039
00040 #include "cache_aligned_allocator.h"
00041 #include "tbb_allocator.h"
00042 #include "spin_rw_mutex.h"
00043 #include "atomic.h"
00044 #include "aligned_space.h"
00045 #include "tbb_exception.h"
00046 #include "_concurrent_unordered_internal.h"
00047 #if TBB_USE_PERFORMANCE_WARNINGS
00048 #include <typeinfo>
00049 #endif
00050
00051 namespace tbb {
00052
00054 namespace internal {
00056 void* __TBB_EXPORTED_FUNC itt_load_pointer_with_acquire_v3( const void* src );
00058 void __TBB_EXPORTED_FUNC itt_store_pointer_with_release_v3( void* dst, void* src );
00060 void* __TBB_EXPORTED_FUNC itt_load_pointer_v3( const void* src );
00061 }
00063
00065 template<typename Key>
00066 struct tbb_hash_compare {
00067 static size_t hash( const Key& a ) { return tbb_hasher(a); }
00068 static bool equal( const Key& a, const Key& b ) { return a == b; }
00069 };
00070
00071 namespace interface4 {
00072
00073 template<typename Key, typename T, typename HashCompare = tbb_hash_compare<Key>, typename A = tbb_allocator<std::pair<Key, T> > >
00074 class concurrent_hash_map;
00075
00077 namespace internal {
00078
00079
00081 typedef size_t hashcode_t;
00083 struct hash_map_node_base : tbb::internal::no_copy {
00085 typedef spin_rw_mutex mutex_t;
00087 typedef mutex_t::scoped_lock scoped_t;
00089 hash_map_node_base *next;
00090 mutex_t mutex;
00091 };
00093 static hash_map_node_base *const rehash_req = reinterpret_cast<hash_map_node_base*>(size_t(3));
00095 static hash_map_node_base *const empty_rehashed = reinterpret_cast<hash_map_node_base*>(size_t(0));
00097 class hash_map_base {
00098 public:
00100 typedef size_t size_type;
00102 typedef size_t hashcode_t;
00104 typedef size_t segment_index_t;
00106 typedef hash_map_node_base node_base;
00108 struct bucket : tbb::internal::no_copy {
00110 typedef spin_rw_mutex mutex_t;
00112 typedef mutex_t::scoped_lock scoped_t;
00113 mutex_t mutex;
00114 node_base *node_list;
00115 };
00117 static size_type const embedded_block = 1;
00119 static size_type const embedded_buckets = 1<<embedded_block;
00121 static size_type const first_block = 8;
00123 static size_type const pointers_per_table = sizeof(segment_index_t) * 8;
00125 typedef bucket *segment_ptr_t;
00127 typedef segment_ptr_t segments_table_t[pointers_per_table];
00129 atomic<hashcode_t> my_mask;
00131 segments_table_t my_table;
00133 atomic<size_type> my_size;
00135 bucket my_embedded_segment[embedded_buckets];
00136 #if __TBB_STATISTICS
00137 atomic<unsigned> my_info_resizes;
00138 mutable atomic<unsigned> my_info_restarts;
00139 atomic<unsigned> my_info_rehashes;
00140 #if !TBB_USE_PERFORMANCE_WARNINGS
00141 #error Please enable TBB_USE_PERFORMANCE_WARNINGS as well
00142 #endif
00143 #endif
00145 hash_map_base() {
00146 std::memset( this, 0, pointers_per_table*sizeof(segment_ptr_t)
00147 + sizeof(my_size) + sizeof(my_mask)
00148 + embedded_buckets*sizeof(bucket) );
00149 for( size_type i = 0; i < embedded_block; i++ )
00150 my_table[i] = my_embedded_segment + segment_base(i);
00151 my_mask = embedded_buckets - 1;
00152 __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks");
00153 #if __TBB_STATISTICS
00154 my_info_resizes = 0;
00155 my_info_restarts = 0;
00156 my_info_rehashes = 0;
00157 #endif
00158 }
00159
00161 static segment_index_t segment_index_of( size_type index ) {
00162 return segment_index_t( __TBB_Log2( index|1 ) );
00163 }
00164
00166 static segment_index_t segment_base( segment_index_t k ) {
00167 return (segment_index_t(1)<<k & ~segment_index_t(1));
00168 }
00169
00171 static size_type segment_size( segment_index_t k ) {
00172 return size_type(1)<<k;
00173 }
00174
00176 static bool is_valid( void *ptr ) {
00177 return reinterpret_cast<size_t>(ptr) > size_t(63);
00178 }
00179
00181 static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) {
00182 if( is_initial ) std::memset(ptr, 0, sz*sizeof(bucket) );
00183 else for(size_type i = 0; i < sz; i++, ptr++) {
00184 *reinterpret_cast<intptr_t*>(&ptr->mutex) = 0;
00185 ptr->node_list = rehash_req;
00186 }
00187 }
00188
00190 static void add_to_bucket( bucket *b, node_base *n ) {
00191 __TBB_ASSERT(b->node_list != rehash_req, NULL);
00192 n->next = b->node_list;
00193 b->node_list = n;
00194 }
00195
00197 struct enable_segment_failsafe {
00198 segment_ptr_t *my_segment_ptr;
00199 enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {}
00200 ~enable_segment_failsafe() {
00201 if( my_segment_ptr ) *my_segment_ptr = 0;
00202 }
00203 };
00204
00206 void enable_segment( segment_index_t k, bool is_initial = false ) {
00207 __TBB_ASSERT( k, "Zero segment must be embedded" );
00208 enable_segment_failsafe watchdog( my_table, k );
00209 cache_aligned_allocator<bucket> alloc;
00210 size_type sz;
00211 __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment");
00212 if( k >= first_block ) {
00213 sz = segment_size( k );
00214 segment_ptr_t ptr = alloc.allocate( sz );
00215 init_buckets( ptr, sz, is_initial );
00216 #if TBB_USE_THREADING_TOOLS
00217
00218 itt_store_pointer_with_release_v3( my_table + k, ptr );
00219 #else
00220 my_table[k] = ptr;
00221 #endif
00222 sz <<= 1;
00223 } else {
00224 __TBB_ASSERT( k == embedded_block, "Wrong segment index" );
00225 sz = segment_size( first_block );
00226 segment_ptr_t ptr = alloc.allocate( sz - embedded_buckets );
00227 init_buckets( ptr, sz - embedded_buckets, is_initial );
00228 ptr -= segment_base(embedded_block);
00229 for(segment_index_t i = embedded_block; i < first_block; i++)
00230 #if TBB_USE_THREADING_TOOLS
00231 itt_store_pointer_with_release_v3( my_table + i, ptr + segment_base(i) );
00232 #else
00233 my_table[i] = ptr + segment_base(i);
00234 #endif
00235 }
00236 #if TBB_USE_THREADING_TOOLS
00237 itt_store_pointer_with_release_v3( &my_mask, (void*)(sz-1) );
00238 #else
00239 my_mask = sz - 1;
00240 #endif
00241 watchdog.my_segment_ptr = 0;
00242 }
00243
00245 bucket *get_bucket( hashcode_t h ) const throw() {
00246 segment_index_t s = segment_index_of( h );
00247 h -= segment_base(s);
00248 segment_ptr_t seg = my_table[s];
00249 __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" );
00250 return &seg[h];
00251 }
00252
00253
00254 void mark_rehashed_levels( hashcode_t h ) throw () {
00255 segment_index_t s = segment_index_of( h );
00256 while( segment_ptr_t seg = my_table[++s] )
00257 if( seg[h].node_list == rehash_req ) {
00258 seg[h].node_list = empty_rehashed;
00259 mark_rehashed_levels( h + segment_base(s) );
00260 }
00261 }
00262
00264
00265 inline bool check_mask_race( const hashcode_t h, hashcode_t &m ) const {
00266 hashcode_t m_now, m_old = m;
00267 #if TBB_USE_THREADING_TOOLS
00268 m_now = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
00269 #else
00270 m_now = my_mask;
00271 #endif
00272 if( m_old != m_now )
00273 return check_rehashing_collision( h, m_old, m = m_now );
00274 return false;
00275 }
00276
00278 bool check_rehashing_collision( const hashcode_t h, hashcode_t m_old, hashcode_t m ) const {
00279 __TBB_ASSERT(m_old != m, NULL);
00280 if( (h & m_old) != (h & m) ) {
00281
00282
00283 for( ++m_old; !(h & m_old); m_old <<= 1 )
00284 ;
00285 m_old = (m_old<<1) - 1;
00286 __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL);
00287
00288 #if TBB_USE_THREADING_TOOLS
00289 if( itt_load_pointer_with_acquire_v3(&( get_bucket(h & m_old)->node_list )) != rehash_req )
00290 #else
00291 if( __TBB_load_with_acquire(get_bucket( h & m_old )->node_list) != rehash_req )
00292 #endif
00293 {
00294 #if __TBB_STATISTICS
00295 my_info_restarts++;
00296 #endif
00297 return true;
00298 }
00299 }
00300 return false;
00301 }
00302
00304 segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) {
00305 size_type sz = ++my_size;
00306 add_to_bucket( b, n );
00307
00308 if( sz >= mask ) {
00309 segment_index_t new_seg = segment_index_of( mask+1 );
00310 __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated");
00311 #if TBB_USE_THREADING_TOOLS
00312 if( !itt_load_pointer_v3(my_table+new_seg)
00313 #else
00314 if( !my_table[new_seg]
00315 #endif
00316 && __TBB_CompareAndSwapW(&my_table[new_seg], 2, 0) == 0 )
00317 return new_seg;
00318 }
00319 return 0;
00320 }
00321
00323 void reserve(size_type buckets) {
00324 if( !buckets-- ) return;
00325 bool is_initial = !my_size;
00326 for( size_type m = my_mask; buckets > m; m = my_mask )
00327 enable_segment( segment_index_of( m+1 ), is_initial );
00328 }
00330 void internal_swap(hash_map_base &table) {
00331 std::swap(this->my_mask, table.my_mask);
00332 std::swap(this->my_size, table.my_size);
00333 for(size_type i = 0; i < embedded_buckets; i++)
00334 std::swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list);
00335 for(size_type i = embedded_block; i < pointers_per_table; i++)
00336 std::swap(this->my_table[i], table.my_table[i]);
00337 }
00338 };
00339
00340 template<typename Iterator>
00341 class hash_map_range;
00342
00344
00346 template<typename Container, typename Value>
00347 class hash_map_iterator
00348 : public std::iterator<std::forward_iterator_tag,Value>
00349 {
00350 typedef Container map_type;
00351 typedef typename Container::node node;
00352 typedef hash_map_base::node_base node_base;
00353 typedef hash_map_base::bucket bucket;
00354
00355 template<typename C, typename T, typename U>
00356 friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00357
00358 template<typename C, typename T, typename U>
00359 friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00360
00361 template<typename C, typename T, typename U>
00362 friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00363
00364 template<typename C, typename U>
00365 friend class hash_map_iterator;
00366
00367 template<typename I>
00368 friend class hash_map_range;
00369
00370 void advance_to_next_bucket() {
00371 size_t k = my_index+1;
00372 while( my_bucket && k <= my_map->my_mask ) {
00373
00374 if( k& (k-2) )
00375 ++my_bucket;
00376 else my_bucket = my_map->get_bucket( k );
00377 my_node = static_cast<node*>( my_bucket->node_list );
00378 if( hash_map_base::is_valid(my_node) ) {
00379 my_index = k; return;
00380 }
00381 ++k;
00382 }
00383 my_bucket = 0; my_node = 0; my_index = k;
00384 }
00385 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
00386 template<typename Key, typename T, typename HashCompare, typename A>
00387 friend class interface4::concurrent_hash_map;
00388 #else
00389 public:
00390 #endif
00392 const Container *my_map;
00393
00395 size_t my_index;
00396
00398 const bucket *my_bucket;
00399
00401 node *my_node;
00402
00403 hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n );
00404
00405 public:
00407 hash_map_iterator() {}
00408 hash_map_iterator( const hash_map_iterator<Container,typename Container::value_type> &other ) :
00409 my_map(other.my_map),
00410 my_index(other.my_index),
00411 my_bucket(other.my_bucket),
00412 my_node(other.my_node)
00413 {}
00414 Value& operator*() const {
00415 __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" );
00416 return my_node->item;
00417 }
00418 Value* operator->() const {return &operator*();}
00419 hash_map_iterator& operator++();
00420
00422 hash_map_iterator operator++(int) {
00423 hash_map_iterator old(*this);
00424 operator++();
00425 return old;
00426 }
00427 };
00428
00429 template<typename Container, typename Value>
00430 hash_map_iterator<Container,Value>::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) :
00431 my_map(&map),
00432 my_index(index),
00433 my_bucket(b),
00434 my_node( static_cast<node*>(n) )
00435 {
00436 if( b && !hash_map_base::is_valid(n) )
00437 advance_to_next_bucket();
00438 }
00439
00440 template<typename Container, typename Value>
00441 hash_map_iterator<Container,Value>& hash_map_iterator<Container,Value>::operator++() {
00442 my_node = static_cast<node*>( my_node->next );
00443 if( !my_node ) advance_to_next_bucket();
00444 return *this;
00445 }
00446
00447 template<typename Container, typename T, typename U>
00448 bool operator==( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
00449 return i.my_node == j.my_node && i.my_map == j.my_map;
00450 }
00451
00452 template<typename Container, typename T, typename U>
00453 bool operator!=( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
00454 return i.my_node != j.my_node || i.my_map != j.my_map;
00455 }
00456
00458
00459 template<typename Iterator>
00460 class hash_map_range {
00461 typedef typename Iterator::map_type map_type;
00462 Iterator my_begin;
00463 Iterator my_end;
00464 mutable Iterator my_midpoint;
00465 size_t my_grainsize;
00467 void set_midpoint() const;
00468 template<typename U> friend class hash_map_range;
00469 public:
00471 typedef std::size_t size_type;
00472 typedef typename Iterator::value_type value_type;
00473 typedef typename Iterator::reference reference;
00474 typedef typename Iterator::difference_type difference_type;
00475 typedef Iterator iterator;
00476
00478 bool empty() const {return my_begin==my_end;}
00479
00481 bool is_divisible() const {
00482 return my_midpoint!=my_end;
00483 }
00485 hash_map_range( hash_map_range& r, split ) :
00486 my_end(r.my_end),
00487 my_grainsize(r.my_grainsize)
00488 {
00489 r.my_end = my_begin = r.my_midpoint;
00490 __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
00491 __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
00492 set_midpoint();
00493 r.set_midpoint();
00494 }
00496 template<typename U>
00497 hash_map_range( hash_map_range<U>& r) :
00498 my_begin(r.my_begin),
00499 my_end(r.my_end),
00500 my_midpoint(r.my_midpoint),
00501 my_grainsize(r.my_grainsize)
00502 {}
00503 #if TBB_DEPRECATED
00505 hash_map_range( const Iterator& begin_, const Iterator& end_, size_type grainsize_ = 1 ) :
00506 my_begin(begin_),
00507 my_end(end_),
00508 my_grainsize(grainsize_)
00509 {
00510 if(!my_end.my_index && !my_end.my_bucket)
00511 my_end.my_index = my_end.my_map->my_mask + 1;
00512 set_midpoint();
00513 __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
00514 }
00515 #endif
00517 hash_map_range( const map_type &map, size_type grainsize_ = 1 ) :
00518 my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),
00519 my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
00520 my_grainsize( grainsize_ )
00521 {
00522 __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
00523 set_midpoint();
00524 }
00525 const Iterator& begin() const {return my_begin;}
00526 const Iterator& end() const {return my_end;}
00528 size_type grainsize() const {return my_grainsize;}
00529 };
00530
00531 template<typename Iterator>
00532 void hash_map_range<Iterator>::set_midpoint() const {
00533
00534 size_t m = my_end.my_index-my_begin.my_index;
00535 if( m > my_grainsize ) {
00536 m = my_begin.my_index + m/2u;
00537 hash_map_base::bucket *b = my_begin.my_map->get_bucket(m);
00538 my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list);
00539 } else {
00540 my_midpoint = my_end;
00541 }
00542 __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
00543 "my_begin is after my_midpoint" );
00544 __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,
00545 "my_midpoint is after my_end" );
00546 __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
00547 "[my_begin, my_midpoint) range should not be empty" );
00548 }
00549
00550 }
00552
00554
00583 template<typename Key, typename T, typename HashCompare, typename Allocator>
00584 class concurrent_hash_map : protected internal::hash_map_base {
00585 template<typename Container, typename Value>
00586 friend class internal::hash_map_iterator;
00587
00588 template<typename I>
00589 friend class internal::hash_map_range;
00590
00591 public:
00592 typedef Key key_type;
00593 typedef T mapped_type;
00594 typedef std::pair<const Key,T> value_type;
00595 typedef hash_map_base::size_type size_type;
00596 typedef ptrdiff_t difference_type;
00597 typedef value_type *pointer;
00598 typedef const value_type *const_pointer;
00599 typedef value_type &reference;
00600 typedef const value_type &const_reference;
00601 typedef internal::hash_map_iterator<concurrent_hash_map,value_type> iterator;
00602 typedef internal::hash_map_iterator<concurrent_hash_map,const value_type> const_iterator;
00603 typedef internal::hash_map_range<iterator> range_type;
00604 typedef internal::hash_map_range<const_iterator> const_range_type;
00605 typedef Allocator allocator_type;
00606
00607 protected:
00608 friend class const_accessor;
00609 struct node;
00610 typedef typename Allocator::template rebind<node>::other node_allocator_type;
00611 node_allocator_type my_allocator;
00612 HashCompare my_hash_compare;
00613
00614 struct node : public node_base {
00615 value_type item;
00616 node( const Key &key ) : item(key, T()) {}
00617 node( const Key &key, const T &t ) : item(key, t) {}
00618
00619 void *operator new( size_t , node_allocator_type &a ) {
00620 void *ptr = a.allocate(1);
00621 if(!ptr)
00622 tbb::internal::throw_exception(tbb::internal::eid_bad_alloc);
00623 return ptr;
00624 }
00625
00626 void operator delete( void *ptr, node_allocator_type &a ) {return a.deallocate(static_cast<node*>(ptr),1); }
00627 };
00628
00629 void delete_node( node_base *n ) {
00630 my_allocator.destroy( static_cast<node*>(n) );
00631 my_allocator.deallocate( static_cast<node*>(n), 1);
00632 }
00633
00634 node *search_bucket( const key_type &key, bucket *b ) const {
00635 node *n = static_cast<node*>( b->node_list );
00636 while( is_valid(n) && !my_hash_compare.equal(key, n->item.first) )
00637 n = static_cast<node*>( n->next );
00638 __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket");
00639 return n;
00640 }
00641
00643 class bucket_accessor : public bucket::scoped_t {
00644 bool my_is_writer;
00645 bucket *my_b;
00646 public:
00647 bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); }
00649 inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) {
00650 my_b = base->get_bucket( h );
00651 #if TBB_USE_THREADING_TOOLS
00652
00653 if( itt_load_pointer_with_acquire_v3(&my_b->node_list) == internal::rehash_req
00654 #else
00655 if( __TBB_load_with_acquire(my_b->node_list) == internal::rehash_req
00656 #endif
00657 && try_acquire( my_b->mutex, true ) )
00658 {
00659 if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h );
00660 my_is_writer = true;
00661 }
00662 else bucket::scoped_t::acquire( my_b->mutex, my_is_writer = writer );
00663 __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL);
00664 }
00666 bool is_writer() { return my_is_writer; }
00668 bucket *operator() () { return my_b; }
00669
00670 bool upgrade_to_writer() { my_is_writer = true; return bucket::scoped_t::upgrade_to_writer(); }
00671 };
00672
00673
00674 void rehash_bucket( bucket *b_new, const hashcode_t h ) {
00675 __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)");
00676 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
00677 __TBB_store_with_release(b_new->node_list, internal::empty_rehashed);
00678 hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1;
00679 #if __TBB_STATISTICS
00680 my_info_rehashes++;
00681 #endif
00682
00683 bucket_accessor b_old( this, h & mask );
00684
00685 mask = (mask<<1) | 1;
00686 __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );
00687 restart:
00688 for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {
00689 hashcode_t c = my_hash_compare.hash( static_cast<node*>(n)->item.first );
00690 #if TBB_USE_ASSERT
00691 hashcode_t bmask = h & (mask>>1);
00692 bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1;
00693 __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" );
00694 #endif
00695 if( (c & mask) == h ) {
00696 if( !b_old.is_writer() )
00697 if( !b_old.upgrade_to_writer() ) {
00698 goto restart;
00699 }
00700 *p = n->next;
00701 add_to_bucket( b_new, n );
00702 } else p = &n->next;
00703 }
00704 }
00705
00706 public:
00707
00708 class accessor;
00710 class const_accessor {
00711 friend class concurrent_hash_map<Key,T,HashCompare,Allocator>;
00712 friend class accessor;
00713 void operator=( const accessor & ) const;
00714 const_accessor( const accessor & );
00715 public:
00717 typedef const typename concurrent_hash_map::value_type value_type;
00718
00720 bool empty() const {return !my_node;}
00721
00723 void release() {
00724 if( my_node ) {
00725 my_lock.release();
00726 my_node = 0;
00727 }
00728 }
00729
00731 const_reference operator*() const {
00732 __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
00733 return my_node->item;
00734 }
00735
00737 const_pointer operator->() const {
00738 return &operator*();
00739 }
00740
00742 const_accessor() : my_node(NULL) {}
00743
00745 ~const_accessor() {
00746 my_node = NULL;
00747 }
00748 private:
00749 node *my_node;
00750 typename node::scoped_t my_lock;
00751 hashcode_t my_hash;
00752 };
00753
00755 class accessor: public const_accessor {
00756 public:
00758 typedef typename concurrent_hash_map::value_type value_type;
00759
00761 reference operator*() const {
00762 __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
00763 return this->my_node->item;
00764 }
00765
00767 pointer operator->() const {
00768 return &operator*();
00769 }
00770 };
00771
00773 concurrent_hash_map(const allocator_type &a = allocator_type())
00774 : internal::hash_map_base(), my_allocator(a)
00775 {}
00776
00778 concurrent_hash_map(size_type n, const allocator_type &a = allocator_type())
00779 : my_allocator(a)
00780 {
00781 reserve( n );
00782 }
00783
00785 concurrent_hash_map( const concurrent_hash_map& table, const allocator_type &a = allocator_type())
00786 : internal::hash_map_base(), my_allocator(a)
00787 {
00788 internal_copy(table);
00789 }
00790
00792 template<typename I>
00793 concurrent_hash_map(I first, I last, const allocator_type &a = allocator_type())
00794 : my_allocator(a)
00795 {
00796 reserve( std::distance(first, last) );
00797 internal_copy(first, last);
00798 }
00799
00801 concurrent_hash_map& operator=( const concurrent_hash_map& table ) {
00802 if( this!=&table ) {
00803 clear();
00804 internal_copy(table);
00805 }
00806 return *this;
00807 }
00808
00809
00811
00813 void rehash(size_type n = 0);
00814
00816 void clear();
00817
00819 ~concurrent_hash_map() { clear(); }
00820
00821
00822
00823
00824 range_type range( size_type grainsize=1 ) {
00825 return range_type( *this, grainsize );
00826 }
00827 const_range_type range( size_type grainsize=1 ) const {
00828 return const_range_type( *this, grainsize );
00829 }
00830
00831
00832
00833
00834 iterator begin() {return iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);}
00835 iterator end() {return iterator(*this,0,0,0);}
00836 const_iterator begin() const {return const_iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);}
00837 const_iterator end() const {return const_iterator(*this,0,0,0);}
00838 std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range(key, end()); }
00839 std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range(key, end()); }
00840
00842 size_type size() const { return my_size; }
00843
00845 bool empty() const { return my_size == 0; }
00846
00848 size_type max_size() const {return (~size_type(0))/sizeof(node);}
00849
00851 size_type bucket_count() const { return my_mask+1; }
00852
00854 allocator_type get_allocator() const { return this->my_allocator; }
00855
00857 void swap(concurrent_hash_map &table);
00858
00859
00860
00861
00862
00864 size_type count( const Key &key ) const {
00865 return const_cast<concurrent_hash_map*>(this)->lookup(false, key, NULL, NULL, false );
00866 }
00867
00869
00870 bool find( const_accessor &result, const Key &key ) const {
00871 result.release();
00872 return const_cast<concurrent_hash_map*>(this)->lookup(false, key, NULL, &result, false );
00873 }
00874
00876
00877 bool find( accessor &result, const Key &key ) {
00878 result.release();
00879 return lookup(false, key, NULL, &result, true );
00880 }
00881
00883
00884 bool insert( const_accessor &result, const Key &key ) {
00885 result.release();
00886 return lookup(true, key, NULL, &result, false );
00887 }
00888
00890
00891 bool insert( accessor &result, const Key &key ) {
00892 result.release();
00893 return lookup(true, key, NULL, &result, true );
00894 }
00895
00897
00898 bool insert( const_accessor &result, const value_type &value ) {
00899 result.release();
00900 return lookup(true, value.first, &value.second, &result, false );
00901 }
00902
00904
00905 bool insert( accessor &result, const value_type &value ) {
00906 result.release();
00907 return lookup(true, value.first, &value.second, &result, true );
00908 }
00909
00911
00912 bool insert( const value_type &value ) {
00913 return lookup(true, value.first, &value.second, NULL, false );
00914 }
00915
00917 template<typename I>
00918 void insert(I first, I last) {
00919 for(; first != last; ++first)
00920 insert( *first );
00921 }
00922
00924
00925 bool erase( const Key& key );
00926
00928
00929 bool erase( const_accessor& item_accessor ) {
00930 return exclude( item_accessor, true );
00931 }
00932
00934
00935 bool erase( accessor& item_accessor ) {
00936 return exclude( item_accessor, false );
00937 }
00938
00939 protected:
00941 bool lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write );
00942
00944 bool exclude( const_accessor &item_accessor, bool readonly );
00945
00947 template<typename I>
00948 std::pair<I, I> internal_equal_range( const Key& key, I end ) const;
00949
00951 void internal_copy( const concurrent_hash_map& source );
00952
00953 template<typename I>
00954 void internal_copy(I first, I last);
00955
00957
00959 const_pointer internal_fast_find( const Key& key ) const {
00960 hashcode_t h = my_hash_compare.hash( key );
00961 #if TBB_USE_THREADING_TOOLS
00962 hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
00963 #else
00964 hashcode_t m = my_mask;
00965 #endif
00966 node *n;
00967 restart:
00968 __TBB_ASSERT((m&(m+1))==0, NULL);
00969 bucket *b = get_bucket( h & m );
00970 #if TBB_USE_THREADING_TOOLS
00971
00972 if( itt_load_pointer_with_acquire_v3(&b->node_list) == internal::rehash_req )
00973 #else
00974 if( __TBB_load_with_acquire(b->node_list) == internal::rehash_req )
00975 #endif
00976 {
00977 bucket::scoped_t lock;
00978 if( lock.try_acquire( b->mutex, true ) ) {
00979 if( b->node_list == internal::rehash_req)
00980 const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m );
00981 }
00982 else lock.acquire( b->mutex, false );
00983 __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL);
00984 }
00985 n = search_bucket( key, b );
00986 if( n )
00987 return &n->item;
00988 else if( check_mask_race( h, m ) )
00989 goto restart;
00990 return 0;
00991 }
00992 };
00993
00994 #if _MSC_VER && !defined(__INTEL_COMPILER)
00995
00996 #pragma warning( push )
00997 #pragma warning( disable: 4127 )
00998 #endif
00999
01000 template<typename Key, typename T, typename HashCompare, typename A>
01001 bool concurrent_hash_map<Key,T,HashCompare,A>::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write ) {
01002 __TBB_ASSERT( !result || !result->my_node, NULL );
01003 bool return_value;
01004 hashcode_t const h = my_hash_compare.hash( key );
01005 #if TBB_USE_THREADING_TOOLS
01006 hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
01007 #else
01008 hashcode_t m = my_mask;
01009 #endif
01010 segment_index_t grow_segment = 0;
01011 node *n, *tmp_n = 0;
01012 restart:
01013 {
01014 __TBB_ASSERT((m&(m+1))==0, NULL);
01015 return_value = false;
01016
01017 bucket_accessor b( this, h & m );
01018
01019
01020 n = search_bucket( key, b() );
01021 if( op_insert ) {
01022
01023 if( !n ) {
01024 if( !tmp_n ) {
01025 if(t) tmp_n = new( my_allocator ) node(key, *t);
01026 else tmp_n = new( my_allocator ) node(key);
01027 }
01028 if( !b.is_writer() && !b.upgrade_to_writer() ) {
01029
01030 n = search_bucket( key, b() );
01031 if( is_valid(n) ) {
01032 b.downgrade_to_reader();
01033 goto exists;
01034 }
01035 }
01036 if( check_mask_race(h, m) )
01037 goto restart;
01038
01039 grow_segment = insert_new_node( b(), n = tmp_n, m );
01040 tmp_n = 0;
01041 return_value = true;
01042 }
01043 } else {
01044 if( !n ) {
01045 if( check_mask_race( h, m ) )
01046 goto restart;
01047 return false;
01048 }
01049 return_value = true;
01050 }
01051 exists:
01052 if( !result ) goto check_growth;
01053
01054
01055 if( !result->my_lock.try_acquire( n->mutex, write ) ) {
01056
01057 tbb::internal::atomic_backoff trials;
01058 do {
01059 if( !trials.bounded_pause() ) {
01060
01061 b.release();
01062 __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" );
01063 __TBB_Yield();
01064 #if TBB_USE_THREADING_TOOLS
01065 m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
01066 #else
01067 m = my_mask;
01068 #endif
01069 goto restart;
01070 }
01071 } while( !result->my_lock.try_acquire( n->mutex, write ) );
01072 }
01073 }
01074 result->my_node = n;
01075 result->my_hash = h;
01076 check_growth:
01077
01078 if( grow_segment ) {
01079 #if __TBB_STATISTICS
01080 my_info_resizes++;
01081 #endif
01082 enable_segment( grow_segment );
01083 }
01084 if( tmp_n )
01085 delete_node( tmp_n );
01086 return return_value;
01087 }
01088
01089 template<typename Key, typename T, typename HashCompare, typename A>
01090 template<typename I>
01091 std::pair<I, I> concurrent_hash_map<Key,T,HashCompare,A>::internal_equal_range( const Key& key, I end_ ) const {
01092 hashcode_t h = my_hash_compare.hash( key );
01093 hashcode_t m = my_mask;
01094 __TBB_ASSERT((m&(m+1))==0, NULL);
01095 h &= m;
01096 bucket *b = get_bucket( h );
01097 while( b->node_list == internal::rehash_req ) {
01098 m = ( 1u<<__TBB_Log2( h ) ) - 1;
01099 b = get_bucket( h &= m );
01100 }
01101 node *n = search_bucket( key, b );
01102 if( !n )
01103 return std::make_pair(end_, end_);
01104 iterator lower(*this, h, b, n), upper(lower);
01105 return std::make_pair(lower, ++upper);
01106 }
01107
01108 template<typename Key, typename T, typename HashCompare, typename A>
01109 bool concurrent_hash_map<Key,T,HashCompare,A>::exclude( const_accessor &item_accessor, bool readonly ) {
01110 __TBB_ASSERT( item_accessor.my_node, NULL );
01111 node_base *const n = item_accessor.my_node;
01112 item_accessor.my_node = NULL;
01113 hashcode_t const h = item_accessor.my_hash;
01114 #if TBB_USE_THREADING_TOOLS
01115 hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
01116 #else
01117 hashcode_t m = my_mask;
01118 #endif
01119 do {
01120
01121 bucket_accessor b( this, h & m, true );
01122 node_base **p = &b()->node_list;
01123 while( *p && *p != n )
01124 p = &(*p)->next;
01125 if( !*p ) {
01126 if( check_mask_race( h, m ) )
01127 continue;
01128 item_accessor.my_lock.release();
01129 return false;
01130 }
01131 __TBB_ASSERT( *p == n, NULL );
01132 *p = n->next;
01133 my_size--;
01134 break;
01135 } while(true);
01136 if( readonly )
01137 item_accessor.my_lock.upgrade_to_writer();
01138 item_accessor.my_lock.release();
01139 delete_node( n );
01140 return true;
01141 }
01142
01143 template<typename Key, typename T, typename HashCompare, typename A>
01144 bool concurrent_hash_map<Key,T,HashCompare,A>::erase( const Key &key ) {
01145 node_base *n;
01146 hashcode_t const h = my_hash_compare.hash( key );
01147 #if TBB_USE_THREADING_TOOLS
01148 hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
01149 #else
01150 hashcode_t m = my_mask;
01151 #endif
01152 restart:
01153 {
01154
01155 bucket_accessor b( this, h & m );
01156 search:
01157 node_base **p = &b()->node_list;
01158 n = *p;
01159 while( is_valid(n) && !my_hash_compare.equal(key, static_cast<node*>(n)->item.first ) ) {
01160 p = &n->next;
01161 n = *p;
01162 }
01163 if( !n ) {
01164 if( check_mask_race( h, m ) )
01165 goto restart;
01166 return false;
01167 }
01168 else if( !b.is_writer() && !b.upgrade_to_writer() ) {
01169 if( check_mask_race( h, m ) )
01170 goto restart;
01171 goto search;
01172 }
01173 *p = n->next;
01174 my_size--;
01175 }
01176 {
01177 typename node::scoped_t item_locker( n->mutex, true );
01178 }
01179
01180 delete_node( n );
01181 return true;
01182 }
01183
01184 template<typename Key, typename T, typename HashCompare, typename A>
01185 void concurrent_hash_map<Key,T,HashCompare,A>::swap(concurrent_hash_map<Key,T,HashCompare,A> &table) {
01186 std::swap(this->my_allocator, table.my_allocator);
01187 std::swap(this->my_hash_compare, table.my_hash_compare);
01188 internal_swap(table);
01189 }
01190
01191 template<typename Key, typename T, typename HashCompare, typename A>
01192 void concurrent_hash_map<Key,T,HashCompare,A>::rehash(size_type sz) {
01193 reserve( sz );
01194 hashcode_t mask = my_mask;
01195 hashcode_t b = (mask+1)>>1;
01196 __TBB_ASSERT((b&(b-1))==0, NULL);
01197 bucket *bp = get_bucket( b );
01198 for(; b <= mask; b++, bp++ ) {
01199 node_base *n = bp->node_list;
01200 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
01201 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
01202 if( n == internal::rehash_req ) {
01203 hashcode_t h = b; bucket *b_old = bp;
01204 do {
01205 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
01206 hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1;
01207 b_old = get_bucket( h &= m );
01208 } while( b_old->node_list == internal::rehash_req );
01209
01210 mark_rehashed_levels( h );
01211 for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) {
01212 hashcode_t c = my_hash_compare.hash( static_cast<node*>(q)->item.first );
01213 if( (c & mask) != h ) {
01214 *p = q->next;
01215 bucket *b_new = get_bucket( c & mask );
01216 __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" );
01217 add_to_bucket( b_new, q );
01218 } else p = &q->next;
01219 }
01220 }
01221 }
01222 #if TBB_USE_PERFORMANCE_WARNINGS
01223 int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0;
01224 static bool reported = false;
01225 #endif
01226 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01227 for( b = 0; b <= mask; b++ ) {
01228 if( b & (b-2) ) ++bp;
01229 else bp = get_bucket( b );
01230 node_base *n = bp->node_list;
01231 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
01232 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" );
01233 #if TBB_USE_PERFORMANCE_WARNINGS
01234 if( n == internal::empty_rehashed ) empty_buckets++;
01235 else if( n->next ) overpopulated_buckets++;
01236 #endif
01237 #if TBB_USE_ASSERT
01238 for( ; is_valid(n); n = n->next ) {
01239 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first ) & mask;
01240 __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" );
01241 }
01242 #endif
01243 }
01244 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01245 #if TBB_USE_PERFORMANCE_WARNINGS
01246 if( buckets > current_size) empty_buckets -= buckets - current_size;
01247 else overpopulated_buckets -= current_size - buckets;
01248 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
01249 tbb::internal::runtime_warning(
01250 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
01251 typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets );
01252 reported = true;
01253 }
01254 #endif
01255 }
01256
01257 template<typename Key, typename T, typename HashCompare, typename A>
01258 void concurrent_hash_map<Key,T,HashCompare,A>::clear() {
01259 hashcode_t m = my_mask;
01260 __TBB_ASSERT((m&(m+1))==0, NULL);
01261 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01262 #if TBB_USE_PERFORMANCE_WARNINGS
01263 int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0;
01264 static bool reported = false;
01265 #endif
01266 bucket *bp = 0;
01267
01268 for( segment_index_t b = 0; b <= m; b++ ) {
01269 if( b & (b-2) ) ++bp;
01270 else bp = get_bucket( b );
01271 node_base *n = bp->node_list;
01272 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
01273 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" );
01274 #if TBB_USE_PERFORMANCE_WARNINGS
01275 if( n == internal::empty_rehashed ) empty_buckets++;
01276 else if( n == internal::rehash_req ) buckets--;
01277 else if( n->next ) overpopulated_buckets++;
01278 #endif
01279 #if __TBB_EXTRA_DEBUG
01280 for(; is_valid(n); n = n->next ) {
01281 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first );
01282 h &= m;
01283 __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" );
01284 }
01285 #endif
01286 }
01287 #if TBB_USE_PERFORMANCE_WARNINGS
01288 #if __TBB_STATISTICS
01289 printf( "items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d"
01290 " concurrent: resizes=%u rehashes=%u restarts=%u\n",
01291 current_size, int(m+1), buckets, empty_buckets, overpopulated_buckets,
01292 unsigned(my_info_resizes), unsigned(my_info_rehashes), unsigned(my_info_restarts) );
01293 my_info_resizes = 0;
01294 my_info_restarts = 0;
01295 my_info_rehashes = 0;
01296 #endif
01297 if( buckets > current_size) empty_buckets -= buckets - current_size;
01298 else overpopulated_buckets -= current_size - buckets;
01299 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
01300 tbb::internal::runtime_warning(
01301 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
01302 typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets );
01303 reported = true;
01304 }
01305 #endif
01306 #endif//TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01307 my_size = 0;
01308 segment_index_t s = segment_index_of( m );
01309 __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" );
01310 cache_aligned_allocator<bucket> alloc;
01311 do {
01312 __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" );
01313 segment_ptr_t buckets_ptr = my_table[s];
01314 size_type sz = segment_size( s ? s : 1 );
01315 for( segment_index_t i = 0; i < sz; i++ )
01316 for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {
01317 buckets_ptr[i].node_list = n->next;
01318 delete_node( n );
01319 }
01320 if( s >= first_block)
01321 alloc.deallocate( buckets_ptr, sz );
01322 else if( s == embedded_block && embedded_block != first_block )
01323 alloc.deallocate( buckets_ptr, segment_size(first_block)-embedded_buckets );
01324 if( s >= embedded_block ) my_table[s] = 0;
01325 } while(s-- > 0);
01326 my_mask = embedded_buckets - 1;
01327 }
01328
01329 template<typename Key, typename T, typename HashCompare, typename A>
01330 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy( const concurrent_hash_map& source ) {
01331 reserve( source.my_size );
01332 hashcode_t mask = source.my_mask;
01333 if( my_mask == mask ) {
01334 bucket *dst = 0, *src = 0;
01335 bool rehash_required = false;
01336 for( hashcode_t k = 0; k <= mask; k++ ) {
01337 if( k & (k-2) ) ++dst,src++;
01338 else { dst = get_bucket( k ); src = source.get_bucket( k ); }
01339 __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table");
01340 node *n = static_cast<node*>( src->node_list );
01341 if( n == internal::rehash_req ) {
01342 rehash_required = true;
01343 dst->node_list = internal::rehash_req;
01344 } else for(; n; n = static_cast<node*>( n->next ) ) {
01345 add_to_bucket( dst, new( my_allocator ) node(n->item.first, n->item.second) );
01346 ++my_size;
01347 }
01348 }
01349 if( rehash_required ) rehash();
01350 } else internal_copy( source.begin(), source.end() );
01351 }
01352
01353 template<typename Key, typename T, typename HashCompare, typename A>
01354 template<typename I>
01355 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy(I first, I last) {
01356 hashcode_t m = my_mask;
01357 for(; first != last; ++first) {
01358 hashcode_t h = my_hash_compare.hash( first->first );
01359 bucket *b = get_bucket( h & m );
01360 __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table");
01361 node *n = new( my_allocator ) node(first->first, first->second);
01362 add_to_bucket( b, n );
01363 ++my_size;
01364 }
01365 }
01366
01367 }
01368
01369 using interface4::concurrent_hash_map;
01370
01371
01372 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
01373 inline bool operator==(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b) {
01374 if(a.size() != b.size()) return false;
01375 typename concurrent_hash_map<Key, T, HashCompare, A1>::const_iterator i(a.begin()), i_end(a.end());
01376 typename concurrent_hash_map<Key, T, HashCompare, A2>::const_iterator j, j_end(b.end());
01377 for(; i != i_end; ++i) {
01378 j = b.equal_range(i->first).first;
01379 if( j == j_end || !(i->second == j->second) ) return false;
01380 }
01381 return true;
01382 }
01383
01384 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
01385 inline bool operator!=(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b)
01386 { return !(a == b); }
01387
01388 template<typename Key, typename T, typename HashCompare, typename A>
01389 inline void swap(concurrent_hash_map<Key, T, HashCompare, A> &a, concurrent_hash_map<Key, T, HashCompare, A> &b)
01390 { a.swap( b ); }
01391
01392 #if _MSC_VER && !defined(__INTEL_COMPILER)
01393 #pragma warning( pop )
01394 #endif // warning 4127 is back
01395
01396 }
01397
01398 #endif