libstdc++
bits/hashtable.h
Go to the documentation of this file.
1 // hashtable.h header -*- C++ -*-
2 
3 // Copyright (C) 2007-2022 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/hashtable.h
26  * This is an internal header file, included by other library headers.
27  * Do not attempt to use it directly. @headername{unordered_map, unordered_set}
28  */
29 
30 #ifndef _HASHTABLE_H
31 #define _HASHTABLE_H 1
32 
33 #pragma GCC system_header
34 
35 #include <bits/hashtable_policy.h>
37 #if __cplusplus > 201402L
38 # include <bits/node_handle.h>
39 #endif
40 #include <bits/functional_hash.h>
41 #include <bits/stl_function.h> // equal_to, _Identity, _Select1st
42 
43 namespace std _GLIBCXX_VISIBILITY(default)
44 {
45 _GLIBCXX_BEGIN_NAMESPACE_VERSION
46 /// @cond undocumented
47 
48  template<typename _Tp, typename _Hash>
49  using __cache_default
50  = __not_<__and_<// Do not cache for fast hasher.
51  __is_fast_hash<_Hash>,
52  // Mandatory to have erase not throwing.
53  __is_nothrow_invocable<const _Hash&, const _Tp&>>>;
54 
55  // Helper to conditionally delete the default constructor.
56  // The _Hash_node_base type is used to distinguish this specialization
57  // from any other potentially-overlapping subobjects of the hashtable.
58  template<typename _Equal, typename _Hash, typename _Allocator>
59  using _Hashtable_enable_default_ctor
60  = _Enable_default_constructor<__and_<is_default_constructible<_Equal>,
61  is_default_constructible<_Hash>,
62  is_default_constructible<_Allocator>>{},
63  __detail::_Hash_node_base>;
64 
65  /**
66  * Primary class template _Hashtable.
67  *
68  * @ingroup hashtable-detail
69  *
70  * @tparam _Value CopyConstructible type.
71  *
72  * @tparam _Key CopyConstructible type.
73  *
74  * @tparam _Alloc An allocator type
75  * ([lib.allocator.requirements]) whose _Alloc::value_type is
76  * _Value. As a conforming extension, we allow for
77  * _Alloc::value_type != _Value.
78  *
79  * @tparam _ExtractKey Function object that takes an object of type
80  * _Value and returns a value of type _Key.
81  *
82  * @tparam _Equal Function object that takes two objects of type k
83  * and returns a bool-like value that is true if the two objects
84  * are considered equal.
85  *
86  * @tparam _Hash The hash function. A unary function object with
87  * argument type _Key and result type size_t. Return values should
88  * be distributed over the entire range [0, numeric_limits<size_t>:::max()].
89  *
90  * @tparam _RangeHash The range-hashing function (in the terminology of
91  * Tavori and Dreizin). A binary function object whose argument
92  * types and result type are all size_t. Given arguments r and N,
93  * the return value is in the range [0, N).
94  *
95  * @tparam _Unused Not used.
96  *
97  * @tparam _RehashPolicy Policy class with three members, all of
98  * which govern the bucket count. _M_next_bkt(n) returns a bucket
99  * count no smaller than n. _M_bkt_for_elements(n) returns a
100  * bucket count appropriate for an element count of n.
101  * _M_need_rehash(n_bkt, n_elt, n_ins) determines whether, if the
102  * current bucket count is n_bkt and the current element count is
103  * n_elt, we need to increase the bucket count for n_ins insertions.
104  * If so, returns make_pair(true, n), where n is the new bucket count. If
105  * not, returns make_pair(false, <anything>)
106  *
107  * @tparam _Traits Compile-time class with three boolean
108  * std::integral_constant members: __cache_hash_code, __constant_iterators,
109  * __unique_keys.
110  *
111  * Each _Hashtable data structure has:
112  *
113  * - _Bucket[] _M_buckets
114  * - _Hash_node_base _M_before_begin
115  * - size_type _M_bucket_count
116  * - size_type _M_element_count
117  *
118  * with _Bucket being _Hash_node_base* and _Hash_node containing:
119  *
120  * - _Hash_node* _M_next
121  * - Tp _M_value
122  * - size_t _M_hash_code if cache_hash_code is true
123  *
124  * In terms of Standard containers the hashtable is like the aggregation of:
125  *
126  * - std::forward_list<_Node> containing the elements
127  * - std::vector<std::forward_list<_Node>::iterator> representing the buckets
128  *
129  * The non-empty buckets contain the node before the first node in the
130  * bucket. This design makes it possible to implement something like a
131  * std::forward_list::insert_after on container insertion and
132  * std::forward_list::erase_after on container erase
133  * calls. _M_before_begin is equivalent to
134  * std::forward_list::before_begin. Empty buckets contain
135  * nullptr. Note that one of the non-empty buckets contains
136  * &_M_before_begin which is not a dereferenceable node so the
137  * node pointer in a bucket shall never be dereferenced, only its
138  * next node can be.
139  *
140  * Walking through a bucket's nodes requires a check on the hash code to
141  * see if each node is still in the bucket. Such a design assumes a
142  * quite efficient hash functor and is one of the reasons it is
143  * highly advisable to set __cache_hash_code to true.
144  *
145  * The container iterators are simply built from nodes. This way
146  * incrementing the iterator is perfectly efficient independent of
147  * how many empty buckets there are in the container.
148  *
149  * On insert we compute the element's hash code and use it to find the
150  * bucket index. If the element must be inserted in an empty bucket
151  * we add it at the beginning of the singly linked list and make the
152  * bucket point to _M_before_begin. The bucket that used to point to
153  * _M_before_begin, if any, is updated to point to its new before
154  * begin node.
155  *
156  * On erase, the simple iterator design requires using the hash
157  * functor to get the index of the bucket to update. For this
158  * reason, when __cache_hash_code is set to false the hash functor must
159  * not throw and this is enforced by a static assertion.
160  *
161  * Functionality is implemented by decomposition into base classes,
162  * where the derived _Hashtable class is used in _Map_base,
163  * _Insert, _Rehash_base, and _Equality base classes to access the
164  * "this" pointer. _Hashtable_base is used in the base classes as a
165  * non-recursive, fully-completed-type so that detailed nested type
166  * information, such as iterator type and node type, can be
167  * used. This is similar to the "Curiously Recurring Template
168  * Pattern" (CRTP) technique, but uses a reconstructed, not
169  * explicitly passed, template pattern.
170  *
171  * Base class templates are:
172  * - __detail::_Hashtable_base
173  * - __detail::_Map_base
174  * - __detail::_Insert
175  * - __detail::_Rehash_base
176  * - __detail::_Equality
177  */
178  template<typename _Key, typename _Value, typename _Alloc,
179  typename _ExtractKey, typename _Equal,
180  typename _Hash, typename _RangeHash, typename _Unused,
181  typename _RehashPolicy, typename _Traits>
182  class _Hashtable
183  : public __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal,
184  _Hash, _RangeHash, _Unused, _Traits>,
185  public __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
186  _Hash, _RangeHash, _Unused,
187  _RehashPolicy, _Traits>,
188  public __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey, _Equal,
189  _Hash, _RangeHash, _Unused,
190  _RehashPolicy, _Traits>,
191  public __detail::_Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
192  _Hash, _RangeHash, _Unused,
193  _RehashPolicy, _Traits>,
194  public __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey, _Equal,
195  _Hash, _RangeHash, _Unused,
196  _RehashPolicy, _Traits>,
197  private __detail::_Hashtable_alloc<
198  __alloc_rebind<_Alloc,
199  __detail::_Hash_node<_Value,
200  _Traits::__hash_cached::value>>>,
201  private _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>
202  {
203  static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value,
204  "unordered container must have a non-const, non-volatile value_type");
205 #if __cplusplus > 201703L || defined __STRICT_ANSI__
206  static_assert(is_same<typename _Alloc::value_type, _Value>{},
207  "unordered container must have the same value_type as its allocator");
208 #endif
209 
210  using __traits_type = _Traits;
211  using __hash_cached = typename __traits_type::__hash_cached;
212  using __constant_iterators = typename __traits_type::__constant_iterators;
213  using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>;
214  using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
215 
216  using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>;
217 
218  using __node_value_type =
219  __detail::_Hash_node_value<_Value, __hash_cached::value>;
220  using __node_ptr = typename __hashtable_alloc::__node_ptr;
221  using __value_alloc_traits =
222  typename __hashtable_alloc::__value_alloc_traits;
223  using __node_alloc_traits =
224  typename __hashtable_alloc::__node_alloc_traits;
225  using __node_base = typename __hashtable_alloc::__node_base;
226  using __node_base_ptr = typename __hashtable_alloc::__node_base_ptr;
227  using __buckets_ptr = typename __hashtable_alloc::__buckets_ptr;
228 
229  using __insert_base = __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey,
230  _Equal, _Hash,
231  _RangeHash, _Unused,
232  _RehashPolicy, _Traits>;
233  using __enable_default_ctor
234  = _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>;
235 
236  public:
237  typedef _Key key_type;
238  typedef _Value value_type;
239  typedef _Alloc allocator_type;
240  typedef _Equal key_equal;
241 
242  // mapped_type, if present, comes from _Map_base.
243  // hasher, if present, comes from _Hash_code_base/_Hashtable_base.
244  typedef typename __value_alloc_traits::pointer pointer;
245  typedef typename __value_alloc_traits::const_pointer const_pointer;
246  typedef value_type& reference;
247  typedef const value_type& const_reference;
248 
249  using iterator = typename __insert_base::iterator;
250 
251  using const_iterator = typename __insert_base::const_iterator;
252 
253  using local_iterator = __detail::_Local_iterator<key_type, _Value,
254  _ExtractKey, _Hash, _RangeHash, _Unused,
255  __constant_iterators::value,
256  __hash_cached::value>;
257 
258  using const_local_iterator = __detail::_Local_const_iterator<
259  key_type, _Value,
260  _ExtractKey, _Hash, _RangeHash, _Unused,
261  __constant_iterators::value, __hash_cached::value>;
262 
263  private:
264  using __rehash_type = _RehashPolicy;
265  using __rehash_state = typename __rehash_type::_State;
266 
267  using __unique_keys = typename __traits_type::__unique_keys;
268 
269  using __hashtable_base = __detail::
270  _Hashtable_base<_Key, _Value, _ExtractKey,
271  _Equal, _Hash, _RangeHash, _Unused, _Traits>;
272 
273  using __hash_code_base = typename __hashtable_base::__hash_code_base;
274  using __hash_code = typename __hashtable_base::__hash_code;
275  using __ireturn_type = typename __insert_base::__ireturn_type;
276 
277  using __map_base = __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey,
278  _Equal, _Hash, _RangeHash, _Unused,
279  _RehashPolicy, _Traits>;
280 
281  using __rehash_base = __detail::_Rehash_base<_Key, _Value, _Alloc,
282  _ExtractKey, _Equal,
283  _Hash, _RangeHash, _Unused,
284  _RehashPolicy, _Traits>;
285 
286  using __eq_base = __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey,
287  _Equal, _Hash, _RangeHash, _Unused,
288  _RehashPolicy, _Traits>;
289 
290  using __reuse_or_alloc_node_gen_t =
291  __detail::_ReuseOrAllocNode<__node_alloc_type>;
292  using __alloc_node_gen_t =
293  __detail::_AllocNode<__node_alloc_type>;
294  using __node_builder_t =
295  __detail::_NodeBuilder<_ExtractKey>;
296 
297  // Simple RAII type for managing a node containing an element
298  struct _Scoped_node
299  {
300  // Take ownership of a node with a constructed element.
301  _Scoped_node(__node_ptr __n, __hashtable_alloc* __h)
302  : _M_h(__h), _M_node(__n) { }
303 
304  // Allocate a node and construct an element within it.
305  template<typename... _Args>
306  _Scoped_node(__hashtable_alloc* __h, _Args&&... __args)
307  : _M_h(__h),
308  _M_node(__h->_M_allocate_node(std::forward<_Args>(__args)...))
309  { }
310 
311  // Destroy element and deallocate node.
312  ~_Scoped_node() { if (_M_node) _M_h->_M_deallocate_node(_M_node); };
313 
314  _Scoped_node(const _Scoped_node&) = delete;
315  _Scoped_node& operator=(const _Scoped_node&) = delete;
316 
317  __hashtable_alloc* _M_h;
318  __node_ptr _M_node;
319  };
320 
321  template<typename _Ht>
322  static constexpr
323  __conditional_t<std::is_lvalue_reference<_Ht>::value,
324  const value_type&, value_type&&>
325  __fwd_value_for(value_type& __val) noexcept
326  { return std::move(__val); }
327 
328  // Compile-time diagnostics.
329 
330  // _Hash_code_base has everything protected, so use this derived type to
331  // access it.
332  struct __hash_code_base_access : __hash_code_base
333  { using __hash_code_base::_M_bucket_index; };
334 
335  // To get bucket index we need _RangeHash not to throw.
336  static_assert(is_nothrow_default_constructible<_RangeHash>::value,
337  "Functor used to map hash code to bucket index"
338  " must be nothrow default constructible");
339  static_assert(noexcept(
340  std::declval<const _RangeHash&>()((std::size_t)0, (std::size_t)0)),
341  "Functor used to map hash code to bucket index must be"
342  " noexcept");
343 
344  // To compute bucket index we also need _ExtratKey not to throw.
345  static_assert(is_nothrow_default_constructible<_ExtractKey>::value,
346  "_ExtractKey must be nothrow default constructible");
347  static_assert(noexcept(
348  std::declval<const _ExtractKey&>()(std::declval<_Value>())),
349  "_ExtractKey functor must be noexcept invocable");
350 
351  template<typename _Keya, typename _Valuea, typename _Alloca,
352  typename _ExtractKeya, typename _Equala,
353  typename _Hasha, typename _RangeHasha, typename _Unuseda,
354  typename _RehashPolicya, typename _Traitsa,
355  bool _Unique_keysa>
356  friend struct __detail::_Map_base;
357 
358  template<typename _Keya, typename _Valuea, typename _Alloca,
359  typename _ExtractKeya, typename _Equala,
360  typename _Hasha, typename _RangeHasha, typename _Unuseda,
361  typename _RehashPolicya, typename _Traitsa>
362  friend struct __detail::_Insert_base;
363 
364  template<typename _Keya, typename _Valuea, typename _Alloca,
365  typename _ExtractKeya, typename _Equala,
366  typename _Hasha, typename _RangeHasha, typename _Unuseda,
367  typename _RehashPolicya, typename _Traitsa,
368  bool _Constant_iteratorsa>
369  friend struct __detail::_Insert;
370 
371  template<typename _Keya, typename _Valuea, typename _Alloca,
372  typename _ExtractKeya, typename _Equala,
373  typename _Hasha, typename _RangeHasha, typename _Unuseda,
374  typename _RehashPolicya, typename _Traitsa,
375  bool _Unique_keysa>
376  friend struct __detail::_Equality;
377 
378  public:
379  using size_type = typename __hashtable_base::size_type;
380  using difference_type = typename __hashtable_base::difference_type;
381 
382 #if __cplusplus > 201402L
383  using node_type = _Node_handle<_Key, _Value, __node_alloc_type>;
384  using insert_return_type = _Node_insert_return<iterator, node_type>;
385 #endif
386 
387  private:
388  __buckets_ptr _M_buckets = &_M_single_bucket;
389  size_type _M_bucket_count = 1;
390  __node_base _M_before_begin;
391  size_type _M_element_count = 0;
392  _RehashPolicy _M_rehash_policy;
393 
394  // A single bucket used when only need for 1 bucket. Especially
395  // interesting in move semantic to leave hashtable with only 1 bucket
396  // which is not allocated so that we can have those operations noexcept
397  // qualified.
398  // Note that we can't leave hashtable with 0 bucket without adding
399  // numerous checks in the code to avoid 0 modulus.
400  __node_base_ptr _M_single_bucket = nullptr;
401 
402  void
403  _M_update_bbegin()
404  {
405  if (_M_begin())
406  _M_buckets[_M_bucket_index(*_M_begin())] = &_M_before_begin;
407  }
408 
409  void
410  _M_update_bbegin(__node_ptr __n)
411  {
412  _M_before_begin._M_nxt = __n;
413  _M_update_bbegin();
414  }
415 
416  bool
417  _M_uses_single_bucket(__buckets_ptr __bkts) const
418  { return __builtin_expect(__bkts == &_M_single_bucket, false); }
419 
420  bool
421  _M_uses_single_bucket() const
422  { return _M_uses_single_bucket(_M_buckets); }
423 
424  static constexpr size_t
425  __small_size_threshold() noexcept
426  {
427  return
428  __detail::_Hashtable_hash_traits<_Hash>::__small_size_threshold();
429  }
430 
431  __hashtable_alloc&
432  _M_base_alloc() { return *this; }
433 
434  __buckets_ptr
435  _M_allocate_buckets(size_type __bkt_count)
436  {
437  if (__builtin_expect(__bkt_count == 1, false))
438  {
439  _M_single_bucket = nullptr;
440  return &_M_single_bucket;
441  }
442 
443  return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
444  }
445 
446  void
447  _M_deallocate_buckets(__buckets_ptr __bkts, size_type __bkt_count)
448  {
449  if (_M_uses_single_bucket(__bkts))
450  return;
451 
452  __hashtable_alloc::_M_deallocate_buckets(__bkts, __bkt_count);
453  }
454 
455  void
456  _M_deallocate_buckets()
457  { _M_deallocate_buckets(_M_buckets, _M_bucket_count); }
458 
459  // Gets bucket begin, deals with the fact that non-empty buckets contain
460  // their before begin node.
461  __node_ptr
462  _M_bucket_begin(size_type __bkt) const;
463 
464  __node_ptr
465  _M_begin() const
466  { return static_cast<__node_ptr>(_M_before_begin._M_nxt); }
467 
468  // Assign *this using another _Hashtable instance. Whether elements
469  // are copied or moved depends on the _Ht reference.
470  template<typename _Ht>
471  void
472  _M_assign_elements(_Ht&&);
473 
474  template<typename _Ht, typename _NodeGenerator>
475  void
476  _M_assign(_Ht&&, const _NodeGenerator&);
477 
478  void
479  _M_move_assign(_Hashtable&&, true_type);
480 
481  void
482  _M_move_assign(_Hashtable&&, false_type);
483 
484  void
485  _M_reset() noexcept;
486 
487  _Hashtable(const _Hash& __h, const _Equal& __eq,
488  const allocator_type& __a)
489  : __hashtable_base(__h, __eq),
490  __hashtable_alloc(__node_alloc_type(__a)),
491  __enable_default_ctor(_Enable_default_constructor_tag{})
492  { }
493 
494  template<bool _No_realloc = true>
495  static constexpr bool
496  _S_nothrow_move()
497  {
498 #if __cplusplus <= 201402L
499  return __and_<__bool_constant<_No_realloc>,
500  is_nothrow_copy_constructible<_Hash>,
501  is_nothrow_copy_constructible<_Equal>>::value;
502 #else
503  if constexpr (_No_realloc)
504  if constexpr (is_nothrow_copy_constructible<_Hash>())
505  return is_nothrow_copy_constructible<_Equal>();
506  return false;
507 #endif
508  }
509 
510  _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
511  true_type /* alloc always equal */)
512  noexcept(_S_nothrow_move());
513 
514  _Hashtable(_Hashtable&&, __node_alloc_type&&,
515  false_type /* alloc always equal */);
516 
517  template<typename _InputIterator>
518  _Hashtable(_InputIterator __first, _InputIterator __last,
519  size_type __bkt_count_hint,
520  const _Hash&, const _Equal&, const allocator_type&,
521  true_type __uks);
522 
523  template<typename _InputIterator>
524  _Hashtable(_InputIterator __first, _InputIterator __last,
525  size_type __bkt_count_hint,
526  const _Hash&, const _Equal&, const allocator_type&,
527  false_type __uks);
528 
529  public:
530  // Constructor, destructor, assignment, swap
531  _Hashtable() = default;
532 
533  _Hashtable(const _Hashtable&);
534 
535  _Hashtable(const _Hashtable&, const allocator_type&);
536 
537  explicit
538  _Hashtable(size_type __bkt_count_hint,
539  const _Hash& __hf = _Hash(),
540  const key_equal& __eql = key_equal(),
541  const allocator_type& __a = allocator_type());
542 
543  // Use delegating constructors.
544  _Hashtable(_Hashtable&& __ht)
545  noexcept(_S_nothrow_move())
546  : _Hashtable(std::move(__ht), std::move(__ht._M_node_allocator()),
547  true_type{})
548  { }
549 
550  _Hashtable(_Hashtable&& __ht, const allocator_type& __a)
551  noexcept(_S_nothrow_move<__node_alloc_traits::_S_always_equal()>())
552  : _Hashtable(std::move(__ht), __node_alloc_type(__a),
553  typename __node_alloc_traits::is_always_equal{})
554  { }
555 
556  explicit
557  _Hashtable(const allocator_type& __a)
558  : __hashtable_alloc(__node_alloc_type(__a)),
559  __enable_default_ctor(_Enable_default_constructor_tag{})
560  { }
561 
562  template<typename _InputIterator>
563  _Hashtable(_InputIterator __f, _InputIterator __l,
564  size_type __bkt_count_hint = 0,
565  const _Hash& __hf = _Hash(),
566  const key_equal& __eql = key_equal(),
567  const allocator_type& __a = allocator_type())
568  : _Hashtable(__f, __l, __bkt_count_hint, __hf, __eql, __a,
569  __unique_keys{})
570  { }
571 
572  _Hashtable(initializer_list<value_type> __l,
573  size_type __bkt_count_hint = 0,
574  const _Hash& __hf = _Hash(),
575  const key_equal& __eql = key_equal(),
576  const allocator_type& __a = allocator_type())
577  : _Hashtable(__l.begin(), __l.end(), __bkt_count_hint,
578  __hf, __eql, __a, __unique_keys{})
579  { }
580 
581  _Hashtable&
582  operator=(const _Hashtable& __ht);
583 
584  _Hashtable&
585  operator=(_Hashtable&& __ht)
586  noexcept(__node_alloc_traits::_S_nothrow_move()
587  && is_nothrow_move_assignable<_Hash>::value
588  && is_nothrow_move_assignable<_Equal>::value)
589  {
590  constexpr bool __move_storage =
591  __node_alloc_traits::_S_propagate_on_move_assign()
592  || __node_alloc_traits::_S_always_equal();
593  _M_move_assign(std::move(__ht), __bool_constant<__move_storage>());
594  return *this;
595  }
596 
597  _Hashtable&
598  operator=(initializer_list<value_type> __l)
599  {
600  __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
601  _M_before_begin._M_nxt = nullptr;
602  clear();
603 
604  // We consider that all elements of __l are going to be inserted.
605  auto __l_bkt_count = _M_rehash_policy._M_bkt_for_elements(__l.size());
606 
607  // Do not shrink to keep potential user reservation.
608  if (_M_bucket_count < __l_bkt_count)
609  rehash(__l_bkt_count);
610 
611  this->_M_insert_range(__l.begin(), __l.end(), __roan, __unique_keys{});
612  return *this;
613  }
614 
615  ~_Hashtable() noexcept;
616 
617  void
618  swap(_Hashtable&)
619  noexcept(__and_<__is_nothrow_swappable<_Hash>,
620  __is_nothrow_swappable<_Equal>>::value);
621 
622  // Basic container operations
623  iterator
624  begin() noexcept
625  { return iterator(_M_begin()); }
626 
627  const_iterator
628  begin() const noexcept
629  { return const_iterator(_M_begin()); }
630 
631  iterator
632  end() noexcept
633  { return iterator(nullptr); }
634 
635  const_iterator
636  end() const noexcept
637  { return const_iterator(nullptr); }
638 
639  const_iterator
640  cbegin() const noexcept
641  { return const_iterator(_M_begin()); }
642 
643  const_iterator
644  cend() const noexcept
645  { return const_iterator(nullptr); }
646 
647  size_type
648  size() const noexcept
649  { return _M_element_count; }
650 
651  _GLIBCXX_NODISCARD bool
652  empty() const noexcept
653  { return size() == 0; }
654 
655  allocator_type
656  get_allocator() const noexcept
657  { return allocator_type(this->_M_node_allocator()); }
658 
659  size_type
660  max_size() const noexcept
661  { return __node_alloc_traits::max_size(this->_M_node_allocator()); }
662 
663  // Observers
664  key_equal
665  key_eq() const
666  { return this->_M_eq(); }
667 
668  // hash_function, if present, comes from _Hash_code_base.
669 
670  // Bucket operations
671  size_type
672  bucket_count() const noexcept
673  { return _M_bucket_count; }
674 
675  size_type
676  max_bucket_count() const noexcept
677  { return max_size(); }
678 
679  size_type
680  bucket_size(size_type __bkt) const
681  { return std::distance(begin(__bkt), end(__bkt)); }
682 
683  size_type
684  bucket(const key_type& __k) const
685  { return _M_bucket_index(this->_M_hash_code(__k)); }
686 
687  local_iterator
688  begin(size_type __bkt)
689  {
690  return local_iterator(*this, _M_bucket_begin(__bkt),
691  __bkt, _M_bucket_count);
692  }
693 
694  local_iterator
695  end(size_type __bkt)
696  { return local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
697 
698  const_local_iterator
699  begin(size_type __bkt) const
700  {
701  return const_local_iterator(*this, _M_bucket_begin(__bkt),
702  __bkt, _M_bucket_count);
703  }
704 
705  const_local_iterator
706  end(size_type __bkt) const
707  { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
708 
709  // DR 691.
710  const_local_iterator
711  cbegin(size_type __bkt) const
712  {
713  return const_local_iterator(*this, _M_bucket_begin(__bkt),
714  __bkt, _M_bucket_count);
715  }
716 
717  const_local_iterator
718  cend(size_type __bkt) const
719  { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
720 
721  float
722  load_factor() const noexcept
723  {
724  return static_cast<float>(size()) / static_cast<float>(bucket_count());
725  }
726 
727  // max_load_factor, if present, comes from _Rehash_base.
728 
729  // Generalization of max_load_factor. Extension, not found in
730  // TR1. Only useful if _RehashPolicy is something other than
731  // the default.
732  const _RehashPolicy&
733  __rehash_policy() const
734  { return _M_rehash_policy; }
735 
736  void
737  __rehash_policy(const _RehashPolicy& __pol)
738  { _M_rehash_policy = __pol; }
739 
740  // Lookup.
741  iterator
742  find(const key_type& __k);
743 
744  const_iterator
745  find(const key_type& __k) const;
746 
747  size_type
748  count(const key_type& __k) const;
749 
751  equal_range(const key_type& __k);
752 
754  equal_range(const key_type& __k) const;
755 
756 #if __cplusplus >= 202002L
757 #define __cpp_lib_generic_unordered_lookup 201811L
758 
759  template<typename _Kt,
760  typename = __has_is_transparent_t<_Hash, _Kt>,
761  typename = __has_is_transparent_t<_Equal, _Kt>>
762  iterator
763  _M_find_tr(const _Kt& __k);
764 
765  template<typename _Kt,
766  typename = __has_is_transparent_t<_Hash, _Kt>,
767  typename = __has_is_transparent_t<_Equal, _Kt>>
768  const_iterator
769  _M_find_tr(const _Kt& __k) const;
770 
771  template<typename _Kt,
772  typename = __has_is_transparent_t<_Hash, _Kt>,
773  typename = __has_is_transparent_t<_Equal, _Kt>>
774  size_type
775  _M_count_tr(const _Kt& __k) const;
776 
777  template<typename _Kt,
778  typename = __has_is_transparent_t<_Hash, _Kt>,
779  typename = __has_is_transparent_t<_Equal, _Kt>>
780  pair<iterator, iterator>
781  _M_equal_range_tr(const _Kt& __k);
782 
783  template<typename _Kt,
784  typename = __has_is_transparent_t<_Hash, _Kt>,
785  typename = __has_is_transparent_t<_Equal, _Kt>>
786  pair<const_iterator, const_iterator>
787  _M_equal_range_tr(const _Kt& __k) const;
788 #endif // C++20
789 
790  private:
791  // Bucket index computation helpers.
792  size_type
793  _M_bucket_index(const __node_value_type& __n) const noexcept
794  { return __hash_code_base::_M_bucket_index(__n, _M_bucket_count); }
795 
796  size_type
797  _M_bucket_index(__hash_code __c) const
798  { return __hash_code_base::_M_bucket_index(__c, _M_bucket_count); }
799 
800  __node_base_ptr
801  _M_find_before_node(const key_type&);
802 
803  // Find and insert helper functions and types
804  // Find the node before the one matching the criteria.
805  __node_base_ptr
806  _M_find_before_node(size_type, const key_type&, __hash_code) const;
807 
808  template<typename _Kt>
809  __node_base_ptr
810  _M_find_before_node_tr(size_type, const _Kt&, __hash_code) const;
811 
812  __node_ptr
813  _M_find_node(size_type __bkt, const key_type& __key,
814  __hash_code __c) const
815  {
816  __node_base_ptr __before_n = _M_find_before_node(__bkt, __key, __c);
817  if (__before_n)
818  return static_cast<__node_ptr>(__before_n->_M_nxt);
819  return nullptr;
820  }
821 
822  template<typename _Kt>
823  __node_ptr
824  _M_find_node_tr(size_type __bkt, const _Kt& __key,
825  __hash_code __c) const
826  {
827  auto __before_n = _M_find_before_node_tr(__bkt, __key, __c);
828  if (__before_n)
829  return static_cast<__node_ptr>(__before_n->_M_nxt);
830  return nullptr;
831  }
832 
833  // Insert a node at the beginning of a bucket.
834  void
835  _M_insert_bucket_begin(size_type, __node_ptr);
836 
837  // Remove the bucket first node
838  void
839  _M_remove_bucket_begin(size_type __bkt, __node_ptr __next_n,
840  size_type __next_bkt);
841 
842  // Get the node before __n in the bucket __bkt
843  __node_base_ptr
844  _M_get_previous_node(size_type __bkt, __node_ptr __n);
845 
846  pair<const_iterator, __hash_code>
847  _M_compute_hash_code(const_iterator __hint, const key_type& __k) const;
848 
849  // Insert node __n with hash code __code, in bucket __bkt if no
850  // rehash (assumes no element with same key already present).
851  // Takes ownership of __n if insertion succeeds, throws otherwise.
852  iterator
853  _M_insert_unique_node(size_type __bkt, __hash_code,
854  __node_ptr __n, size_type __n_elt = 1);
855 
856  // Insert node __n with key __k and hash code __code.
857  // Takes ownership of __n if insertion succeeds, throws otherwise.
858  iterator
859  _M_insert_multi_node(__node_ptr __hint,
860  __hash_code __code, __node_ptr __n);
861 
862  template<typename... _Args>
864  _M_emplace(true_type __uks, _Args&&... __args);
865 
866  template<typename... _Args>
867  iterator
868  _M_emplace(false_type __uks, _Args&&... __args)
869  { return _M_emplace(cend(), __uks, std::forward<_Args>(__args)...); }
870 
871  // Emplace with hint, useless when keys are unique.
872  template<typename... _Args>
873  iterator
874  _M_emplace(const_iterator, true_type __uks, _Args&&... __args)
875  { return _M_emplace(__uks, std::forward<_Args>(__args)...).first; }
876 
877  template<typename... _Args>
878  iterator
879  _M_emplace(const_iterator, false_type __uks, _Args&&... __args);
880 
881  template<typename _Kt, typename _Arg, typename _NodeGenerator>
883  _M_insert_unique(_Kt&&, _Arg&&, const _NodeGenerator&);
884 
885  template<typename _Kt>
886  static __conditional_t<
887  __and_<__is_nothrow_invocable<_Hash&, const key_type&>,
888  __not_<__is_nothrow_invocable<_Hash&, _Kt>>>::value,
889  key_type, _Kt&&>
890  _S_forward_key(_Kt&& __k)
891  { return std::forward<_Kt>(__k); }
892 
893  static const key_type&
894  _S_forward_key(const key_type& __k)
895  { return __k; }
896 
897  static key_type&&
898  _S_forward_key(key_type&& __k)
899  { return std::move(__k); }
900 
901  template<typename _Arg, typename _NodeGenerator>
903  _M_insert(_Arg&& __arg, const _NodeGenerator& __node_gen,
904  true_type /* __uks */)
905  {
906  return _M_insert_unique(
907  _S_forward_key(_ExtractKey{}(std::forward<_Arg>(__arg))),
908  std::forward<_Arg>(__arg), __node_gen);
909  }
910 
911  template<typename _Arg, typename _NodeGenerator>
912  iterator
913  _M_insert(_Arg&& __arg, const _NodeGenerator& __node_gen,
914  false_type __uks)
915  {
916  return _M_insert(cend(), std::forward<_Arg>(__arg), __node_gen,
917  __uks);
918  }
919 
920  // Insert with hint, not used when keys are unique.
921  template<typename _Arg, typename _NodeGenerator>
922  iterator
923  _M_insert(const_iterator, _Arg&& __arg,
924  const _NodeGenerator& __node_gen, true_type __uks)
925  {
926  return
927  _M_insert(std::forward<_Arg>(__arg), __node_gen, __uks).first;
928  }
929 
930  // Insert with hint when keys are not unique.
931  template<typename _Arg, typename _NodeGenerator>
932  iterator
933  _M_insert(const_iterator, _Arg&&,
934  const _NodeGenerator&, false_type __uks);
935 
936  size_type
937  _M_erase(true_type __uks, const key_type&);
938 
939  size_type
940  _M_erase(false_type __uks, const key_type&);
941 
942  iterator
943  _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n);
944 
945  public:
946  // Emplace
947  template<typename... _Args>
948  __ireturn_type
949  emplace(_Args&&... __args)
950  { return _M_emplace(__unique_keys{}, std::forward<_Args>(__args)...); }
951 
952  template<typename... _Args>
953  iterator
954  emplace_hint(const_iterator __hint, _Args&&... __args)
955  {
956  return _M_emplace(__hint, __unique_keys{},
957  std::forward<_Args>(__args)...);
958  }
959 
960  // Insert member functions via inheritance.
961 
962  // Erase
963  iterator
964  erase(const_iterator);
965 
966  // LWG 2059.
967  iterator
968  erase(iterator __it)
969  { return erase(const_iterator(__it)); }
970 
971  size_type
972  erase(const key_type& __k)
973  { return _M_erase(__unique_keys{}, __k); }
974 
975  iterator
976  erase(const_iterator, const_iterator);
977 
978  void
979  clear() noexcept;
980 
981  // Set number of buckets keeping it appropriate for container's number
982  // of elements.
983  void rehash(size_type __bkt_count);
984 
985  // DR 1189.
986  // reserve, if present, comes from _Rehash_base.
987 
988 #if __cplusplus > 201402L
989  /// Re-insert an extracted node into a container with unique keys.
990  insert_return_type
991  _M_reinsert_node(node_type&& __nh)
992  {
993  insert_return_type __ret;
994  if (__nh.empty())
995  __ret.position = end();
996  else
997  {
998  __glibcxx_assert(get_allocator() == __nh.get_allocator());
999 
1000  const key_type& __k = __nh._M_key();
1001  __hash_code __code = this->_M_hash_code(__k);
1002  size_type __bkt = _M_bucket_index(__code);
1003  if (__node_ptr __n = _M_find_node(__bkt, __k, __code))
1004  {
1005  __ret.node = std::move(__nh);
1006  __ret.position = iterator(__n);
1007  __ret.inserted = false;
1008  }
1009  else
1010  {
1011  __ret.position
1012  = _M_insert_unique_node(__bkt, __code, __nh._M_ptr);
1013  __nh._M_ptr = nullptr;
1014  __ret.inserted = true;
1015  }
1016  }
1017  return __ret;
1018  }
1019 
1020  /// Re-insert an extracted node into a container with equivalent keys.
1021  iterator
1022  _M_reinsert_node_multi(const_iterator __hint, node_type&& __nh)
1023  {
1024  if (__nh.empty())
1025  return end();
1026 
1027  __glibcxx_assert(get_allocator() == __nh.get_allocator());
1028 
1029  const key_type& __k = __nh._M_key();
1030  auto __code = this->_M_hash_code(__k);
1031  auto __ret
1032  = _M_insert_multi_node(__hint._M_cur, __code, __nh._M_ptr);
1033  __nh._M_ptr = nullptr;
1034  return __ret;
1035  }
1036 
1037  private:
1038  node_type
1039  _M_extract_node(size_t __bkt, __node_base_ptr __prev_n)
1040  {
1041  __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
1042  if (__prev_n == _M_buckets[__bkt])
1043  _M_remove_bucket_begin(__bkt, __n->_M_next(),
1044  __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
1045  else if (__n->_M_nxt)
1046  {
1047  size_type __next_bkt = _M_bucket_index(*__n->_M_next());
1048  if (__next_bkt != __bkt)
1049  _M_buckets[__next_bkt] = __prev_n;
1050  }
1051 
1052  __prev_n->_M_nxt = __n->_M_nxt;
1053  __n->_M_nxt = nullptr;
1054  --_M_element_count;
1055  return { __n, this->_M_node_allocator() };
1056  }
1057 
1058  public:
1059  // Extract a node.
1060  node_type
1061  extract(const_iterator __pos)
1062  {
1063  size_t __bkt = _M_bucket_index(*__pos._M_cur);
1064  return _M_extract_node(__bkt,
1065  _M_get_previous_node(__bkt, __pos._M_cur));
1066  }
1067 
1068  /// Extract a node.
1069  node_type
1070  extract(const _Key& __k)
1071  {
1072  node_type __nh;
1073  __hash_code __code = this->_M_hash_code(__k);
1074  std::size_t __bkt = _M_bucket_index(__code);
1075  if (__node_base_ptr __prev_node = _M_find_before_node(__bkt, __k, __code))
1076  __nh = _M_extract_node(__bkt, __prev_node);
1077  return __nh;
1078  }
1079 
1080  /// Merge from a compatible container into one with unique keys.
1081  template<typename _Compatible_Hashtable>
1082  void
1083  _M_merge_unique(_Compatible_Hashtable& __src)
1084  {
1085  static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1086  node_type>, "Node types are compatible");
1087  __glibcxx_assert(get_allocator() == __src.get_allocator());
1088 
1089  auto __n_elt = __src.size();
1090  for (auto __i = __src.cbegin(), __end = __src.cend(); __i != __end;)
1091  {
1092  auto __pos = __i++;
1093  const key_type& __k = _ExtractKey{}(*__pos);
1094  __hash_code __code
1095  = this->_M_hash_code(__src.hash_function(), *__pos._M_cur);
1096  size_type __bkt = _M_bucket_index(__code);
1097  if (_M_find_node(__bkt, __k, __code) == nullptr)
1098  {
1099  auto __nh = __src.extract(__pos);
1100  _M_insert_unique_node(__bkt, __code, __nh._M_ptr, __n_elt);
1101  __nh._M_ptr = nullptr;
1102  __n_elt = 1;
1103  }
1104  else if (__n_elt != 1)
1105  --__n_elt;
1106  }
1107  }
1108 
1109  /// Merge from a compatible container into one with equivalent keys.
1110  template<typename _Compatible_Hashtable>
1111  void
1112  _M_merge_multi(_Compatible_Hashtable& __src)
1113  {
1114  static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1115  node_type>, "Node types are compatible");
1116  __glibcxx_assert(get_allocator() == __src.get_allocator());
1117 
1118  __node_ptr __hint = nullptr;
1119  this->reserve(size() + __src.size());
1120  for (auto __i = __src.cbegin(), __end = __src.cend(); __i != __end;)
1121  {
1122  auto __pos = __i++;
1123  __hash_code __code
1124  = this->_M_hash_code(__src.hash_function(), *__pos._M_cur);
1125  auto __nh = __src.extract(__pos);
1126  __hint = _M_insert_multi_node(__hint, __code, __nh._M_ptr)._M_cur;
1127  __nh._M_ptr = nullptr;
1128  }
1129  }
1130 #endif // C++17
1131 
1132  private:
1133  // Helper rehash method used when keys are unique.
1134  void _M_rehash_aux(size_type __bkt_count, true_type __uks);
1135 
1136  // Helper rehash method used when keys can be non-unique.
1137  void _M_rehash_aux(size_type __bkt_count, false_type __uks);
1138 
1139  // Unconditionally change size of bucket array to n, restore
1140  // hash policy state to __state on exception.
1141  void _M_rehash(size_type __bkt_count, const __rehash_state& __state);
1142  };
1143 
1144  // Definitions of class template _Hashtable's out-of-line member functions.
1145  template<typename _Key, typename _Value, typename _Alloc,
1146  typename _ExtractKey, typename _Equal,
1147  typename _Hash, typename _RangeHash, typename _Unused,
1148  typename _RehashPolicy, typename _Traits>
1149  auto
1150  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1151  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1152  _M_bucket_begin(size_type __bkt) const
1153  -> __node_ptr
1154  {
1155  __node_base_ptr __n = _M_buckets[__bkt];
1156  return __n ? static_cast<__node_ptr>(__n->_M_nxt) : nullptr;
1157  }
1158 
1159  template<typename _Key, typename _Value, typename _Alloc,
1160  typename _ExtractKey, typename _Equal,
1161  typename _Hash, typename _RangeHash, typename _Unused,
1162  typename _RehashPolicy, typename _Traits>
1163  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1164  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1165  _Hashtable(size_type __bkt_count_hint,
1166  const _Hash& __h, const _Equal& __eq, const allocator_type& __a)
1167  : _Hashtable(__h, __eq, __a)
1168  {
1169  auto __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count_hint);
1170  if (__bkt_count > _M_bucket_count)
1171  {
1172  _M_buckets = _M_allocate_buckets(__bkt_count);
1173  _M_bucket_count = __bkt_count;
1174  }
1175  }
1176 
1177  template<typename _Key, typename _Value, typename _Alloc,
1178  typename _ExtractKey, typename _Equal,
1179  typename _Hash, typename _RangeHash, typename _Unused,
1180  typename _RehashPolicy, typename _Traits>
1181  template<typename _InputIterator>
1182  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1183  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1184  _Hashtable(_InputIterator __f, _InputIterator __l,
1185  size_type __bkt_count_hint,
1186  const _Hash& __h, const _Equal& __eq,
1187  const allocator_type& __a, true_type /* __uks */)
1188  : _Hashtable(__bkt_count_hint, __h, __eq, __a)
1189  {
1190  for (; __f != __l; ++__f)
1191  this->insert(*__f);
1192  }
1193 
1194  template<typename _Key, typename _Value, typename _Alloc,
1195  typename _ExtractKey, typename _Equal,
1196  typename _Hash, typename _RangeHash, typename _Unused,
1197  typename _RehashPolicy, typename _Traits>
1198  template<typename _InputIterator>
1199  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1200  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1201  _Hashtable(_InputIterator __f, _InputIterator __l,
1202  size_type __bkt_count_hint,
1203  const _Hash& __h, const _Equal& __eq,
1204  const allocator_type& __a, false_type /* __uks */)
1205  : _Hashtable(__h, __eq, __a)
1206  {
1207  auto __nb_elems = __detail::__distance_fw(__f, __l);
1208  auto __bkt_count =
1209  _M_rehash_policy._M_next_bkt(
1210  std::max(_M_rehash_policy._M_bkt_for_elements(__nb_elems),
1211  __bkt_count_hint));
1212 
1213  if (__bkt_count > _M_bucket_count)
1214  {
1215  _M_buckets = _M_allocate_buckets(__bkt_count);
1216  _M_bucket_count = __bkt_count;
1217  }
1218 
1219  for (; __f != __l; ++__f)
1220  this->insert(*__f);
1221  }
1222 
1223  template<typename _Key, typename _Value, typename _Alloc,
1224  typename _ExtractKey, typename _Equal,
1225  typename _Hash, typename _RangeHash, typename _Unused,
1226  typename _RehashPolicy, typename _Traits>
1227  auto
1228  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1229  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1230  operator=(const _Hashtable& __ht)
1231  -> _Hashtable&
1232  {
1233  if (&__ht == this)
1234  return *this;
1235 
1236  if (__node_alloc_traits::_S_propagate_on_copy_assign())
1237  {
1238  auto& __this_alloc = this->_M_node_allocator();
1239  auto& __that_alloc = __ht._M_node_allocator();
1240  if (!__node_alloc_traits::_S_always_equal()
1241  && __this_alloc != __that_alloc)
1242  {
1243  // Replacement allocator cannot free existing storage.
1244  this->_M_deallocate_nodes(_M_begin());
1245  _M_before_begin._M_nxt = nullptr;
1246  _M_deallocate_buckets();
1247  _M_buckets = nullptr;
1248  std::__alloc_on_copy(__this_alloc, __that_alloc);
1249  __hashtable_base::operator=(__ht);
1250  _M_bucket_count = __ht._M_bucket_count;
1251  _M_element_count = __ht._M_element_count;
1252  _M_rehash_policy = __ht._M_rehash_policy;
1253  __alloc_node_gen_t __alloc_node_gen(*this);
1254  __try
1255  {
1256  _M_assign(__ht, __alloc_node_gen);
1257  }
1258  __catch(...)
1259  {
1260  // _M_assign took care of deallocating all memory. Now we
1261  // must make sure this instance remains in a usable state.
1262  _M_reset();
1263  __throw_exception_again;
1264  }
1265  return *this;
1266  }
1267  std::__alloc_on_copy(__this_alloc, __that_alloc);
1268  }
1269 
1270  // Reuse allocated buckets and nodes.
1271  _M_assign_elements(__ht);
1272  return *this;
1273  }
1274 
1275  template<typename _Key, typename _Value, typename _Alloc,
1276  typename _ExtractKey, typename _Equal,
1277  typename _Hash, typename _RangeHash, typename _Unused,
1278  typename _RehashPolicy, typename _Traits>
1279  template<typename _Ht>
1280  void
1281  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1282  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1283  _M_assign_elements(_Ht&& __ht)
1284  {
1285  __buckets_ptr __former_buckets = nullptr;
1286  std::size_t __former_bucket_count = _M_bucket_count;
1287  const __rehash_state& __former_state = _M_rehash_policy._M_state();
1288 
1289  if (_M_bucket_count != __ht._M_bucket_count)
1290  {
1291  __former_buckets = _M_buckets;
1292  _M_buckets = _M_allocate_buckets(__ht._M_bucket_count);
1293  _M_bucket_count = __ht._M_bucket_count;
1294  }
1295  else
1296  __builtin_memset(_M_buckets, 0,
1297  _M_bucket_count * sizeof(__node_base_ptr));
1298 
1299  __try
1300  {
1301  __hashtable_base::operator=(std::forward<_Ht>(__ht));
1302  _M_element_count = __ht._M_element_count;
1303  _M_rehash_policy = __ht._M_rehash_policy;
1304  __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
1305  _M_before_begin._M_nxt = nullptr;
1306  _M_assign(std::forward<_Ht>(__ht), __roan);
1307  if (__former_buckets)
1308  _M_deallocate_buckets(__former_buckets, __former_bucket_count);
1309  }
1310  __catch(...)
1311  {
1312  if (__former_buckets)
1313  {
1314  // Restore previous buckets.
1315  _M_deallocate_buckets();
1316  _M_rehash_policy._M_reset(__former_state);
1317  _M_buckets = __former_buckets;
1318  _M_bucket_count = __former_bucket_count;
1319  }
1320  __builtin_memset(_M_buckets, 0,
1321  _M_bucket_count * sizeof(__node_base_ptr));
1322  __throw_exception_again;
1323  }
1324  }
1325 
1326  template<typename _Key, typename _Value, typename _Alloc,
1327  typename _ExtractKey, typename _Equal,
1328  typename _Hash, typename _RangeHash, typename _Unused,
1329  typename _RehashPolicy, typename _Traits>
1330  template<typename _Ht, typename _NodeGenerator>
1331  void
1332  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1333  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1334  _M_assign(_Ht&& __ht, const _NodeGenerator& __node_gen)
1335  {
1336  __buckets_ptr __buckets = nullptr;
1337  if (!_M_buckets)
1338  _M_buckets = __buckets = _M_allocate_buckets(_M_bucket_count);
1339 
1340  __try
1341  {
1342  if (!__ht._M_before_begin._M_nxt)
1343  return;
1344 
1345  // First deal with the special first node pointed to by
1346  // _M_before_begin.
1347  __node_ptr __ht_n = __ht._M_begin();
1348  __node_ptr __this_n
1349  = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
1350  this->_M_copy_code(*__this_n, *__ht_n);
1351  _M_update_bbegin(__this_n);
1352 
1353  // Then deal with other nodes.
1354  __node_ptr __prev_n = __this_n;
1355  for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
1356  {
1357  __this_n = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
1358  __prev_n->_M_nxt = __this_n;
1359  this->_M_copy_code(*__this_n, *__ht_n);
1360  size_type __bkt = _M_bucket_index(*__this_n);
1361  if (!_M_buckets[__bkt])
1362  _M_buckets[__bkt] = __prev_n;
1363  __prev_n = __this_n;
1364  }
1365  }
1366  __catch(...)
1367  {
1368  clear();
1369  if (__buckets)
1370  _M_deallocate_buckets();
1371  __throw_exception_again;
1372  }
1373  }
1374 
1375  template<typename _Key, typename _Value, typename _Alloc,
1376  typename _ExtractKey, typename _Equal,
1377  typename _Hash, typename _RangeHash, typename _Unused,
1378  typename _RehashPolicy, typename _Traits>
1379  void
1380  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1381  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1382  _M_reset() noexcept
1383  {
1384  _M_rehash_policy._M_reset();
1385  _M_bucket_count = 1;
1386  _M_single_bucket = nullptr;
1387  _M_buckets = &_M_single_bucket;
1388  _M_before_begin._M_nxt = nullptr;
1389  _M_element_count = 0;
1390  }
1391 
1392  template<typename _Key, typename _Value, typename _Alloc,
1393  typename _ExtractKey, typename _Equal,
1394  typename _Hash, typename _RangeHash, typename _Unused,
1395  typename _RehashPolicy, typename _Traits>
1396  void
1397  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1398  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1399  _M_move_assign(_Hashtable&& __ht, true_type)
1400  {
1401  if (__builtin_expect(std::__addressof(__ht) == this, false))
1402  return;
1403 
1404  this->_M_deallocate_nodes(_M_begin());
1405  _M_deallocate_buckets();
1406  __hashtable_base::operator=(std::move(__ht));
1407  _M_rehash_policy = __ht._M_rehash_policy;
1408  if (!__ht._M_uses_single_bucket())
1409  _M_buckets = __ht._M_buckets;
1410  else
1411  {
1412  _M_buckets = &_M_single_bucket;
1413  _M_single_bucket = __ht._M_single_bucket;
1414  }
1415 
1416  _M_bucket_count = __ht._M_bucket_count;
1417  _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt;
1418  _M_element_count = __ht._M_element_count;
1419  std::__alloc_on_move(this->_M_node_allocator(), __ht._M_node_allocator());
1420 
1421  // Fix bucket containing the _M_before_begin pointer that can't be moved.
1422  _M_update_bbegin();
1423  __ht._M_reset();
1424  }
1425 
1426  template<typename _Key, typename _Value, typename _Alloc,
1427  typename _ExtractKey, typename _Equal,
1428  typename _Hash, typename _RangeHash, typename _Unused,
1429  typename _RehashPolicy, typename _Traits>
1430  void
1431  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1432  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1433  _M_move_assign(_Hashtable&& __ht, false_type)
1434  {
1435  if (__ht._M_node_allocator() == this->_M_node_allocator())
1436  _M_move_assign(std::move(__ht), true_type{});
1437  else
1438  {
1439  // Can't move memory, move elements then.
1440  _M_assign_elements(std::move(__ht));
1441  __ht.clear();
1442  }
1443  }
1444 
1445  template<typename _Key, typename _Value, typename _Alloc,
1446  typename _ExtractKey, typename _Equal,
1447  typename _Hash, typename _RangeHash, typename _Unused,
1448  typename _RehashPolicy, typename _Traits>
1449  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1450  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1451  _Hashtable(const _Hashtable& __ht)
1452  : __hashtable_base(__ht),
1453  __map_base(__ht),
1454  __rehash_base(__ht),
1455  __hashtable_alloc(
1456  __node_alloc_traits::_S_select_on_copy(__ht._M_node_allocator())),
1457  __enable_default_ctor(__ht),
1458  _M_buckets(nullptr),
1459  _M_bucket_count(__ht._M_bucket_count),
1460  _M_element_count(__ht._M_element_count),
1461  _M_rehash_policy(__ht._M_rehash_policy)
1462  {
1463  __alloc_node_gen_t __alloc_node_gen(*this);
1464  _M_assign(__ht, __alloc_node_gen);
1465  }
1466 
1467  template<typename _Key, typename _Value, typename _Alloc,
1468  typename _ExtractKey, typename _Equal,
1469  typename _Hash, typename _RangeHash, typename _Unused,
1470  typename _RehashPolicy, typename _Traits>
1471  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1472  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1473  _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1474  true_type /* alloc always equal */)
1475  noexcept(_S_nothrow_move())
1476  : __hashtable_base(__ht),
1477  __map_base(__ht),
1478  __rehash_base(__ht),
1479  __hashtable_alloc(std::move(__a)),
1480  __enable_default_ctor(__ht),
1481  _M_buckets(__ht._M_buckets),
1482  _M_bucket_count(__ht._M_bucket_count),
1483  _M_before_begin(__ht._M_before_begin._M_nxt),
1484  _M_element_count(__ht._M_element_count),
1485  _M_rehash_policy(__ht._M_rehash_policy)
1486  {
1487  // Update buckets if __ht is using its single bucket.
1488  if (__ht._M_uses_single_bucket())
1489  {
1490  _M_buckets = &_M_single_bucket;
1491  _M_single_bucket = __ht._M_single_bucket;
1492  }
1493 
1494  // Fix bucket containing the _M_before_begin pointer that can't be moved.
1495  _M_update_bbegin();
1496 
1497  __ht._M_reset();
1498  }
1499 
1500  template<typename _Key, typename _Value, typename _Alloc,
1501  typename _ExtractKey, typename _Equal,
1502  typename _Hash, typename _RangeHash, typename _Unused,
1503  typename _RehashPolicy, typename _Traits>
1504  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1505  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1506  _Hashtable(const _Hashtable& __ht, const allocator_type& __a)
1507  : __hashtable_base(__ht),
1508  __map_base(__ht),
1509  __rehash_base(__ht),
1510  __hashtable_alloc(__node_alloc_type(__a)),
1511  __enable_default_ctor(__ht),
1512  _M_buckets(),
1513  _M_bucket_count(__ht._M_bucket_count),
1514  _M_element_count(__ht._M_element_count),
1515  _M_rehash_policy(__ht._M_rehash_policy)
1516  {
1517  __alloc_node_gen_t __alloc_node_gen(*this);
1518  _M_assign(__ht, __alloc_node_gen);
1519  }
1520 
1521  template<typename _Key, typename _Value, typename _Alloc,
1522  typename _ExtractKey, typename _Equal,
1523  typename _Hash, typename _RangeHash, typename _Unused,
1524  typename _RehashPolicy, typename _Traits>
1525  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1526  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1527  _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1528  false_type /* alloc always equal */)
1529  : __hashtable_base(__ht),
1530  __map_base(__ht),
1531  __rehash_base(__ht),
1532  __hashtable_alloc(std::move(__a)),
1533  __enable_default_ctor(__ht),
1534  _M_buckets(nullptr),
1535  _M_bucket_count(__ht._M_bucket_count),
1536  _M_element_count(__ht._M_element_count),
1537  _M_rehash_policy(__ht._M_rehash_policy)
1538  {
1539  if (__ht._M_node_allocator() == this->_M_node_allocator())
1540  {
1541  if (__ht._M_uses_single_bucket())
1542  {
1543  _M_buckets = &_M_single_bucket;
1544  _M_single_bucket = __ht._M_single_bucket;
1545  }
1546  else
1547  _M_buckets = __ht._M_buckets;
1548 
1549  // Fix bucket containing the _M_before_begin pointer that can't be
1550  // moved.
1551  _M_update_bbegin(__ht._M_begin());
1552 
1553  __ht._M_reset();
1554  }
1555  else
1556  {
1557  __alloc_node_gen_t __alloc_gen(*this);
1558 
1559  using _Fwd_Ht = __conditional_t<
1560  __move_if_noexcept_cond<value_type>::value,
1561  const _Hashtable&, _Hashtable&&>;
1562  _M_assign(std::forward<_Fwd_Ht>(__ht), __alloc_gen);
1563  __ht.clear();
1564  }
1565  }
1566 
1567  template<typename _Key, typename _Value, typename _Alloc,
1568  typename _ExtractKey, typename _Equal,
1569  typename _Hash, typename _RangeHash, typename _Unused,
1570  typename _RehashPolicy, typename _Traits>
1571  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1572  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1573  ~_Hashtable() noexcept
1574  {
1575  // Getting a bucket index from a node shall not throw because it is used
1576  // in methods (erase, swap...) that shall not throw. Need a complete
1577  // type to check this, so do it in the destructor not at class scope.
1578  static_assert(noexcept(declval<const __hash_code_base_access&>()
1579  ._M_bucket_index(declval<const __node_value_type&>(),
1580  (std::size_t)0)),
1581  "Cache the hash code or qualify your functors involved"
1582  " in hash code and bucket index computation with noexcept");
1583 
1584  clear();
1585  _M_deallocate_buckets();
1586  }
1587 
1588  template<typename _Key, typename _Value, typename _Alloc,
1589  typename _ExtractKey, typename _Equal,
1590  typename _Hash, typename _RangeHash, typename _Unused,
1591  typename _RehashPolicy, typename _Traits>
1592  void
1593  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1594  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1595  swap(_Hashtable& __x)
1596  noexcept(__and_<__is_nothrow_swappable<_Hash>,
1597  __is_nothrow_swappable<_Equal>>::value)
1598  {
1599  // The only base class with member variables is hash_code_base.
1600  // We define _Hash_code_base::_M_swap because different
1601  // specializations have different members.
1602  this->_M_swap(__x);
1603 
1604  std::__alloc_on_swap(this->_M_node_allocator(), __x._M_node_allocator());
1605  std::swap(_M_rehash_policy, __x._M_rehash_policy);
1606 
1607  // Deal properly with potentially moved instances.
1608  if (this->_M_uses_single_bucket())
1609  {
1610  if (!__x._M_uses_single_bucket())
1611  {
1612  _M_buckets = __x._M_buckets;
1613  __x._M_buckets = &__x._M_single_bucket;
1614  }
1615  }
1616  else if (__x._M_uses_single_bucket())
1617  {
1618  __x._M_buckets = _M_buckets;
1619  _M_buckets = &_M_single_bucket;
1620  }
1621  else
1622  std::swap(_M_buckets, __x._M_buckets);
1623 
1624  std::swap(_M_bucket_count, __x._M_bucket_count);
1625  std::swap(_M_before_begin._M_nxt, __x._M_before_begin._M_nxt);
1626  std::swap(_M_element_count, __x._M_element_count);
1627  std::swap(_M_single_bucket, __x._M_single_bucket);
1628 
1629  // Fix buckets containing the _M_before_begin pointers that can't be
1630  // swapped.
1631  _M_update_bbegin();
1632  __x._M_update_bbegin();
1633  }
1634 
1635  template<typename _Key, typename _Value, typename _Alloc,
1636  typename _ExtractKey, typename _Equal,
1637  typename _Hash, typename _RangeHash, typename _Unused,
1638  typename _RehashPolicy, typename _Traits>
1639  auto
1640  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1641  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1642  find(const key_type& __k)
1643  -> iterator
1644  {
1645  if (size() <= __small_size_threshold())
1646  {
1647  for (auto __it = begin(); __it != end(); ++__it)
1648  if (this->_M_key_equals(__k, *__it._M_cur))
1649  return __it;
1650  return end();
1651  }
1652 
1653  __hash_code __code = this->_M_hash_code(__k);
1654  std::size_t __bkt = _M_bucket_index(__code);
1655  return iterator(_M_find_node(__bkt, __k, __code));
1656  }
1657 
1658  template<typename _Key, typename _Value, typename _Alloc,
1659  typename _ExtractKey, typename _Equal,
1660  typename _Hash, typename _RangeHash, typename _Unused,
1661  typename _RehashPolicy, typename _Traits>
1662  auto
1663  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1664  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1665  find(const key_type& __k) const
1666  -> const_iterator
1667  {
1668  if (size() <= __small_size_threshold())
1669  {
1670  for (auto __it = begin(); __it != end(); ++__it)
1671  if (this->_M_key_equals(__k, *__it._M_cur))
1672  return __it;
1673  return end();
1674  }
1675 
1676  __hash_code __code = this->_M_hash_code(__k);
1677  std::size_t __bkt = _M_bucket_index(__code);
1678  return const_iterator(_M_find_node(__bkt, __k, __code));
1679  }
1680 
1681 #if __cplusplus > 201703L
1682  template<typename _Key, typename _Value, typename _Alloc,
1683  typename _ExtractKey, typename _Equal,
1684  typename _Hash, typename _RangeHash, typename _Unused,
1685  typename _RehashPolicy, typename _Traits>
1686  template<typename _Kt, typename, typename>
1687  auto
1688  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1689  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1690  _M_find_tr(const _Kt& __k)
1691  -> iterator
1692  {
1693  __hash_code __code = this->_M_hash_code_tr(__k);
1694  std::size_t __bkt = _M_bucket_index(__code);
1695  return iterator(_M_find_node_tr(__bkt, __k, __code));
1696  }
1697 
1698  template<typename _Key, typename _Value, typename _Alloc,
1699  typename _ExtractKey, typename _Equal,
1700  typename _Hash, typename _RangeHash, typename _Unused,
1701  typename _RehashPolicy, typename _Traits>
1702  template<typename _Kt, typename, typename>
1703  auto
1704  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1705  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1706  _M_find_tr(const _Kt& __k) const
1707  -> const_iterator
1708  {
1709  __hash_code __code = this->_M_hash_code_tr(__k);
1710  std::size_t __bkt = _M_bucket_index(__code);
1711  return const_iterator(_M_find_node_tr(__bkt, __k, __code));
1712  }
1713 #endif
1714 
1715  template<typename _Key, typename _Value, typename _Alloc,
1716  typename _ExtractKey, typename _Equal,
1717  typename _Hash, typename _RangeHash, typename _Unused,
1718  typename _RehashPolicy, typename _Traits>
1719  auto
1720  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1721  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1722  count(const key_type& __k) const
1723  -> size_type
1724  {
1725  auto __it = find(__k);
1726  if (!__it._M_cur)
1727  return 0;
1728 
1729  if (__unique_keys::value)
1730  return 1;
1731 
1732  // All equivalent values are next to each other, if we find a
1733  // non-equivalent value after an equivalent one it means that we won't
1734  // find any new equivalent value.
1735  size_type __result = 1;
1736  for (auto __ref = __it++;
1737  __it._M_cur && this->_M_node_equals(*__ref._M_cur, *__it._M_cur);
1738  ++__it)
1739  ++__result;
1740 
1741  return __result;
1742  }
1743 
1744 #if __cplusplus > 201703L
1745  template<typename _Key, typename _Value, typename _Alloc,
1746  typename _ExtractKey, typename _Equal,
1747  typename _Hash, typename _RangeHash, typename _Unused,
1748  typename _RehashPolicy, typename _Traits>
1749  template<typename _Kt, typename, typename>
1750  auto
1751  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1752  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1753  _M_count_tr(const _Kt& __k) const
1754  -> size_type
1755  {
1756  __hash_code __code = this->_M_hash_code_tr(__k);
1757  std::size_t __bkt = _M_bucket_index(__code);
1758  auto __n = _M_find_node_tr(__bkt, __k, __code);
1759  if (!__n)
1760  return 0;
1761 
1762  // All equivalent values are next to each other, if we find a
1763  // non-equivalent value after an equivalent one it means that we won't
1764  // find any new equivalent value.
1765  iterator __it(__n);
1766  size_type __result = 1;
1767  for (++__it;
1768  __it._M_cur && this->_M_equals_tr(__k, __code, *__it._M_cur);
1769  ++__it)
1770  ++__result;
1771 
1772  return __result;
1773  }
1774 #endif
1775 
1776  template<typename _Key, typename _Value, typename _Alloc,
1777  typename _ExtractKey, typename _Equal,
1778  typename _Hash, typename _RangeHash, typename _Unused,
1779  typename _RehashPolicy, typename _Traits>
1780  auto
1781  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1782  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1783  equal_range(const key_type& __k)
1784  -> pair<iterator, iterator>
1785  {
1786  auto __ite = find(__k);
1787  if (!__ite._M_cur)
1788  return { __ite, __ite };
1789 
1790  auto __beg = __ite++;
1791  if (__unique_keys::value)
1792  return { __beg, __ite };
1793 
1794  // All equivalent values are next to each other, if we find a
1795  // non-equivalent value after an equivalent one it means that we won't
1796  // find any new equivalent value.
1797  while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
1798  ++__ite;
1799 
1800  return { __beg, __ite };
1801  }
1802 
1803  template<typename _Key, typename _Value, typename _Alloc,
1804  typename _ExtractKey, typename _Equal,
1805  typename _Hash, typename _RangeHash, typename _Unused,
1806  typename _RehashPolicy, typename _Traits>
1807  auto
1808  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1809  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1810  equal_range(const key_type& __k) const
1811  -> pair<const_iterator, const_iterator>
1812  {
1813  auto __ite = find(__k);
1814  if (!__ite._M_cur)
1815  return { __ite, __ite };
1816 
1817  auto __beg = __ite++;
1818  if (__unique_keys::value)
1819  return { __beg, __ite };
1820 
1821  // All equivalent values are next to each other, if we find a
1822  // non-equivalent value after an equivalent one it means that we won't
1823  // find any new equivalent value.
1824  while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
1825  ++__ite;
1826 
1827  return { __beg, __ite };
1828  }
1829 
1830 #if __cplusplus > 201703L
1831  template<typename _Key, typename _Value, typename _Alloc,
1832  typename _ExtractKey, typename _Equal,
1833  typename _Hash, typename _RangeHash, typename _Unused,
1834  typename _RehashPolicy, typename _Traits>
1835  template<typename _Kt, typename, typename>
1836  auto
1837  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1838  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1839  _M_equal_range_tr(const _Kt& __k)
1840  -> pair<iterator, iterator>
1841  {
1842  __hash_code __code = this->_M_hash_code_tr(__k);
1843  std::size_t __bkt = _M_bucket_index(__code);
1844  auto __n = _M_find_node_tr(__bkt, __k, __code);
1845  iterator __ite(__n);
1846  if (!__n)
1847  return { __ite, __ite };
1848 
1849  // All equivalent values are next to each other, if we find a
1850  // non-equivalent value after an equivalent one it means that we won't
1851  // find any new equivalent value.
1852  auto __beg = __ite++;
1853  while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
1854  ++__ite;
1855 
1856  return { __beg, __ite };
1857  }
1858 
1859  template<typename _Key, typename _Value, typename _Alloc,
1860  typename _ExtractKey, typename _Equal,
1861  typename _Hash, typename _RangeHash, typename _Unused,
1862  typename _RehashPolicy, typename _Traits>
1863  template<typename _Kt, typename, typename>
1864  auto
1865  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1866  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1867  _M_equal_range_tr(const _Kt& __k) const
1868  -> pair<const_iterator, const_iterator>
1869  {
1870  __hash_code __code = this->_M_hash_code_tr(__k);
1871  std::size_t __bkt = _M_bucket_index(__code);
1872  auto __n = _M_find_node_tr(__bkt, __k, __code);
1873  const_iterator __ite(__n);
1874  if (!__n)
1875  return { __ite, __ite };
1876 
1877  // All equivalent values are next to each other, if we find a
1878  // non-equivalent value after an equivalent one it means that we won't
1879  // find any new equivalent value.
1880  auto __beg = __ite++;
1881  while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
1882  ++__ite;
1883 
1884  return { __beg, __ite };
1885  }
1886 #endif
1887 
1888  // Find the node before the one whose key compares equal to k.
1889  // Return nullptr if no node is found.
1890  template<typename _Key, typename _Value, typename _Alloc,
1891  typename _ExtractKey, typename _Equal,
1892  typename _Hash, typename _RangeHash, typename _Unused,
1893  typename _RehashPolicy, typename _Traits>
1894  auto
1895  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1896  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1897  _M_find_before_node(const key_type& __k)
1898  -> __node_base_ptr
1899  {
1900  __node_base_ptr __prev_p = &_M_before_begin;
1901  if (!__prev_p->_M_nxt)
1902  return nullptr;
1903 
1904  for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);
1905  __p != nullptr;
1906  __p = __p->_M_next())
1907  {
1908  if (this->_M_key_equals(__k, *__p))
1909  return __prev_p;
1910 
1911  __prev_p = __p;
1912  }
1913 
1914  return nullptr;
1915  }
1916 
1917  // Find the node before the one whose key compares equal to k in the bucket
1918  // bkt. Return nullptr if no node is found.
1919  template<typename _Key, typename _Value, typename _Alloc,
1920  typename _ExtractKey, typename _Equal,
1921  typename _Hash, typename _RangeHash, typename _Unused,
1922  typename _RehashPolicy, typename _Traits>
1923  auto
1924  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1925  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1926  _M_find_before_node(size_type __bkt, const key_type& __k,
1927  __hash_code __code) const
1928  -> __node_base_ptr
1929  {
1930  __node_base_ptr __prev_p = _M_buckets[__bkt];
1931  if (!__prev_p)
1932  return nullptr;
1933 
1934  for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
1935  __p = __p->_M_next())
1936  {
1937  if (this->_M_equals(__k, __code, *__p))
1938  return __prev_p;
1939 
1940  if (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt)
1941  break;
1942  __prev_p = __p;
1943  }
1944 
1945  return nullptr;
1946  }
1947 
1948  template<typename _Key, typename _Value, typename _Alloc,
1949  typename _ExtractKey, typename _Equal,
1950  typename _Hash, typename _RangeHash, typename _Unused,
1951  typename _RehashPolicy, typename _Traits>
1952  template<typename _Kt>
1953  auto
1954  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1955  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1956  _M_find_before_node_tr(size_type __bkt, const _Kt& __k,
1957  __hash_code __code) const
1958  -> __node_base_ptr
1959  {
1960  __node_base_ptr __prev_p = _M_buckets[__bkt];
1961  if (!__prev_p)
1962  return nullptr;
1963 
1964  for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
1965  __p = __p->_M_next())
1966  {
1967  if (this->_M_equals_tr(__k, __code, *__p))
1968  return __prev_p;
1969 
1970  if (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt)
1971  break;
1972  __prev_p = __p;
1973  }
1974 
1975  return nullptr;
1976  }
1977 
1978  template<typename _Key, typename _Value, typename _Alloc,
1979  typename _ExtractKey, typename _Equal,
1980  typename _Hash, typename _RangeHash, typename _Unused,
1981  typename _RehashPolicy, typename _Traits>
1982  void
1983  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1984  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1985  _M_insert_bucket_begin(size_type __bkt, __node_ptr __node)
1986  {
1987  if (_M_buckets[__bkt])
1988  {
1989  // Bucket is not empty, we just need to insert the new node
1990  // after the bucket before begin.
1991  __node->_M_nxt = _M_buckets[__bkt]->_M_nxt;
1992  _M_buckets[__bkt]->_M_nxt = __node;
1993  }
1994  else
1995  {
1996  // The bucket is empty, the new node is inserted at the
1997  // beginning of the singly-linked list and the bucket will
1998  // contain _M_before_begin pointer.
1999  __node->_M_nxt = _M_before_begin._M_nxt;
2000  _M_before_begin._M_nxt = __node;
2001 
2002  if (__node->_M_nxt)
2003  // We must update former begin bucket that is pointing to
2004  // _M_before_begin.
2005  _M_buckets[_M_bucket_index(*__node->_M_next())] = __node;
2006 
2007  _M_buckets[__bkt] = &_M_before_begin;
2008  }
2009  }
2010 
2011  template<typename _Key, typename _Value, typename _Alloc,
2012  typename _ExtractKey, typename _Equal,
2013  typename _Hash, typename _RangeHash, typename _Unused,
2014  typename _RehashPolicy, typename _Traits>
2015  void
2016  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2017  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2018  _M_remove_bucket_begin(size_type __bkt, __node_ptr __next,
2019  size_type __next_bkt)
2020  {
2021  if (!__next || __next_bkt != __bkt)
2022  {
2023  // Bucket is now empty
2024  // First update next bucket if any
2025  if (__next)
2026  _M_buckets[__next_bkt] = _M_buckets[__bkt];
2027 
2028  // Second update before begin node if necessary
2029  if (&_M_before_begin == _M_buckets[__bkt])
2030  _M_before_begin._M_nxt = __next;
2031  _M_buckets[__bkt] = nullptr;
2032  }
2033  }
2034 
2035  template<typename _Key, typename _Value, typename _Alloc,
2036  typename _ExtractKey, typename _Equal,
2037  typename _Hash, typename _RangeHash, typename _Unused,
2038  typename _RehashPolicy, typename _Traits>
2039  auto
2040  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2041  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2042  _M_get_previous_node(size_type __bkt, __node_ptr __n)
2043  -> __node_base_ptr
2044  {
2045  __node_base_ptr __prev_n = _M_buckets[__bkt];
2046  while (__prev_n->_M_nxt != __n)
2047  __prev_n = __prev_n->_M_nxt;
2048  return __prev_n;
2049  }
2050 
2051  template<typename _Key, typename _Value, typename _Alloc,
2052  typename _ExtractKey, typename _Equal,
2053  typename _Hash, typename _RangeHash, typename _Unused,
2054  typename _RehashPolicy, typename _Traits>
2055  template<typename... _Args>
2056  auto
2057  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2058  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2059  _M_emplace(true_type /* __uks */, _Args&&... __args)
2060  -> pair<iterator, bool>
2061  {
2062  // First build the node to get access to the hash code
2063  _Scoped_node __node { this, std::forward<_Args>(__args)... };
2064  const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
2065  if (size() <= __small_size_threshold())
2066  {
2067  for (auto __it = begin(); __it != end(); ++__it)
2068  if (this->_M_key_equals(__k, *__it._M_cur))
2069  // There is already an equivalent node, no insertion
2070  return { __it, false };
2071  }
2072 
2073  __hash_code __code = this->_M_hash_code(__k);
2074  size_type __bkt = _M_bucket_index(__code);
2075  if (size() > __small_size_threshold())
2076  if (__node_ptr __p = _M_find_node(__bkt, __k, __code))
2077  // There is already an equivalent node, no insertion
2078  return { iterator(__p), false };
2079 
2080  // Insert the node
2081  auto __pos = _M_insert_unique_node(__bkt, __code, __node._M_node);
2082  __node._M_node = nullptr;
2083  return { __pos, true };
2084  }
2085 
2086  template<typename _Key, typename _Value, typename _Alloc,
2087  typename _ExtractKey, typename _Equal,
2088  typename _Hash, typename _RangeHash, typename _Unused,
2089  typename _RehashPolicy, typename _Traits>
2090  template<typename... _Args>
2091  auto
2092  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2093  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2094  _M_emplace(const_iterator __hint, false_type /* __uks */,
2095  _Args&&... __args)
2096  -> iterator
2097  {
2098  // First build the node to get its hash code.
2099  _Scoped_node __node { this, std::forward<_Args>(__args)... };
2100  const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
2101 
2102  auto __res = this->_M_compute_hash_code(__hint, __k);
2103  auto __pos
2104  = _M_insert_multi_node(__res.first._M_cur, __res.second,
2105  __node._M_node);
2106  __node._M_node = nullptr;
2107  return __pos;
2108  }
2109 
2110  template<typename _Key, typename _Value, typename _Alloc,
2111  typename _ExtractKey, typename _Equal,
2112  typename _Hash, typename _RangeHash, typename _Unused,
2113  typename _RehashPolicy, typename _Traits>
2114  auto
2115  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2116  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2117  _M_compute_hash_code(const_iterator __hint, const key_type& __k) const
2118  -> pair<const_iterator, __hash_code>
2119  {
2120  if (size() <= __small_size_threshold())
2121  {
2122  if (__hint != cend())
2123  {
2124  for (auto __it = __hint; __it != cend(); ++__it)
2125  if (this->_M_key_equals(__k, *__it._M_cur))
2126  return { __it, this->_M_hash_code(*__it._M_cur) };
2127  }
2128 
2129  for (auto __it = cbegin(); __it != __hint; ++__it)
2130  if (this->_M_key_equals(__k, *__it._M_cur))
2131  return { __it, this->_M_hash_code(*__it._M_cur) };
2132  }
2133 
2134  return { __hint, this->_M_hash_code(__k) };
2135  }
2136 
2137  template<typename _Key, typename _Value, typename _Alloc,
2138  typename _ExtractKey, typename _Equal,
2139  typename _Hash, typename _RangeHash, typename _Unused,
2140  typename _RehashPolicy, typename _Traits>
2141  auto
2142  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2143  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2144  _M_insert_unique_node(size_type __bkt, __hash_code __code,
2145  __node_ptr __node, size_type __n_elt)
2146  -> iterator
2147  {
2148  const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2149  std::pair<bool, std::size_t> __do_rehash
2150  = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count,
2151  __n_elt);
2152 
2153  if (__do_rehash.first)
2154  {
2155  _M_rehash(__do_rehash.second, __saved_state);
2156  __bkt = _M_bucket_index(__code);
2157  }
2158 
2159  this->_M_store_code(*__node, __code);
2160 
2161  // Always insert at the beginning of the bucket.
2162  _M_insert_bucket_begin(__bkt, __node);
2163  ++_M_element_count;
2164  return iterator(__node);
2165  }
2166 
2167  template<typename _Key, typename _Value, typename _Alloc,
2168  typename _ExtractKey, typename _Equal,
2169  typename _Hash, typename _RangeHash, typename _Unused,
2170  typename _RehashPolicy, typename _Traits>
2171  auto
2172  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2173  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2174  _M_insert_multi_node(__node_ptr __hint,
2175  __hash_code __code, __node_ptr __node)
2176  -> iterator
2177  {
2178  const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2179  std::pair<bool, std::size_t> __do_rehash
2180  = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1);
2181 
2182  if (__do_rehash.first)
2183  _M_rehash(__do_rehash.second, __saved_state);
2184 
2185  this->_M_store_code(*__node, __code);
2186  const key_type& __k = _ExtractKey{}(__node->_M_v());
2187  size_type __bkt = _M_bucket_index(__code);
2188 
2189  // Find the node before an equivalent one or use hint if it exists and
2190  // if it is equivalent.
2191  __node_base_ptr __prev
2192  = __builtin_expect(__hint != nullptr, false)
2193  && this->_M_equals(__k, __code, *__hint)
2194  ? __hint
2195  : _M_find_before_node(__bkt, __k, __code);
2196 
2197  if (__prev)
2198  {
2199  // Insert after the node before the equivalent one.
2200  __node->_M_nxt = __prev->_M_nxt;
2201  __prev->_M_nxt = __node;
2202  if (__builtin_expect(__prev == __hint, false))
2203  // hint might be the last bucket node, in this case we need to
2204  // update next bucket.
2205  if (__node->_M_nxt
2206  && !this->_M_equals(__k, __code, *__node->_M_next()))
2207  {
2208  size_type __next_bkt = _M_bucket_index(*__node->_M_next());
2209  if (__next_bkt != __bkt)
2210  _M_buckets[__next_bkt] = __node;
2211  }
2212  }
2213  else
2214  // The inserted node has no equivalent in the hashtable. We must
2215  // insert the new node at the beginning of the bucket to preserve
2216  // equivalent elements' relative positions.
2217  _M_insert_bucket_begin(__bkt, __node);
2218  ++_M_element_count;
2219  return iterator(__node);
2220  }
2221 
2222  // Insert v if no element with its key is already present.
2223  template<typename _Key, typename _Value, typename _Alloc,
2224  typename _ExtractKey, typename _Equal,
2225  typename _Hash, typename _RangeHash, typename _Unused,
2226  typename _RehashPolicy, typename _Traits>
2227  template<typename _Kt, typename _Arg, typename _NodeGenerator>
2228  auto
2229  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2230  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2231  _M_insert_unique(_Kt&& __k, _Arg&& __v,
2232  const _NodeGenerator& __node_gen)
2233  -> pair<iterator, bool>
2234  {
2235  if (size() <= __small_size_threshold())
2236  for (auto __it = begin(); __it != end(); ++__it)
2237  if (this->_M_key_equals_tr(__k, *__it._M_cur))
2238  return { __it, false };
2239 
2240  __hash_code __code = this->_M_hash_code_tr(__k);
2241  size_type __bkt = _M_bucket_index(__code);
2242 
2243  if (size() > __small_size_threshold())
2244  if (__node_ptr __node = _M_find_node_tr(__bkt, __k, __code))
2245  return { iterator(__node), false };
2246 
2247  _Scoped_node __node {
2248  __node_builder_t::_S_build(std::forward<_Kt>(__k),
2249  std::forward<_Arg>(__v),
2250  __node_gen),
2251  this
2252  };
2253  auto __pos
2254  = _M_insert_unique_node(__bkt, __code, __node._M_node);
2255  __node._M_node = nullptr;
2256  return { __pos, true };
2257  }
2258 
2259  // Insert v unconditionally.
2260  template<typename _Key, typename _Value, typename _Alloc,
2261  typename _ExtractKey, typename _Equal,
2262  typename _Hash, typename _RangeHash, typename _Unused,
2263  typename _RehashPolicy, typename _Traits>
2264  template<typename _Arg, typename _NodeGenerator>
2265  auto
2266  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2267  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2268  _M_insert(const_iterator __hint, _Arg&& __v,
2269  const _NodeGenerator& __node_gen,
2270  false_type /* __uks */)
2271  -> iterator
2272  {
2273  // First allocate new node so that we don't do anything if it throws.
2274  _Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
2275 
2276  // Second compute the hash code so that we don't rehash if it throws.
2277  auto __res = this->_M_compute_hash_code(
2278  __hint, _ExtractKey{}(__node._M_node->_M_v()));
2279 
2280  auto __pos
2281  = _M_insert_multi_node(__res.first._M_cur, __res.second,
2282  __node._M_node);
2283  __node._M_node = nullptr;
2284  return __pos;
2285  }
2286 
2287  template<typename _Key, typename _Value, typename _Alloc,
2288  typename _ExtractKey, typename _Equal,
2289  typename _Hash, typename _RangeHash, typename _Unused,
2290  typename _RehashPolicy, typename _Traits>
2291  auto
2292  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2293  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2294  erase(const_iterator __it)
2295  -> iterator
2296  {
2297  __node_ptr __n = __it._M_cur;
2298  std::size_t __bkt = _M_bucket_index(*__n);
2299 
2300  // Look for previous node to unlink it from the erased one, this
2301  // is why we need buckets to contain the before begin to make
2302  // this search fast.
2303  __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2304  return _M_erase(__bkt, __prev_n, __n);
2305  }
2306 
2307  template<typename _Key, typename _Value, typename _Alloc,
2308  typename _ExtractKey, typename _Equal,
2309  typename _Hash, typename _RangeHash, typename _Unused,
2310  typename _RehashPolicy, typename _Traits>
2311  auto
2312  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2313  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2314  _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n)
2315  -> iterator
2316  {
2317  if (__prev_n == _M_buckets[__bkt])
2318  _M_remove_bucket_begin(__bkt, __n->_M_next(),
2319  __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
2320  else if (__n->_M_nxt)
2321  {
2322  size_type __next_bkt = _M_bucket_index(*__n->_M_next());
2323  if (__next_bkt != __bkt)
2324  _M_buckets[__next_bkt] = __prev_n;
2325  }
2326 
2327  __prev_n->_M_nxt = __n->_M_nxt;
2328  iterator __result(__n->_M_next());
2329  this->_M_deallocate_node(__n);
2330  --_M_element_count;
2331 
2332  return __result;
2333  }
2334 
2335  template<typename _Key, typename _Value, typename _Alloc,
2336  typename _ExtractKey, typename _Equal,
2337  typename _Hash, typename _RangeHash, typename _Unused,
2338  typename _RehashPolicy, typename _Traits>
2339  auto
2340  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2341  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2342  _M_erase(true_type /* __uks */, const key_type& __k)
2343  -> size_type
2344  {
2345  __node_base_ptr __prev_n;
2346  __node_ptr __n;
2347  std::size_t __bkt;
2348  if (size() <= __small_size_threshold())
2349  {
2350  __prev_n = _M_find_before_node(__k);
2351  if (!__prev_n)
2352  return 0;
2353 
2354  // We found a matching node, erase it.
2355  __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2356  __bkt = _M_bucket_index(*__n);
2357  }
2358  else
2359  {
2360  __hash_code __code = this->_M_hash_code(__k);
2361  __bkt = _M_bucket_index(__code);
2362 
2363  // Look for the node before the first matching node.
2364  __prev_n = _M_find_before_node(__bkt, __k, __code);
2365  if (!__prev_n)
2366  return 0;
2367 
2368  // We found a matching node, erase it.
2369  __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2370  }
2371 
2372  _M_erase(__bkt, __prev_n, __n);
2373  return 1;
2374  }
2375 
2376  template<typename _Key, typename _Value, typename _Alloc,
2377  typename _ExtractKey, typename _Equal,
2378  typename _Hash, typename _RangeHash, typename _Unused,
2379  typename _RehashPolicy, typename _Traits>
2380  auto
2381  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2382  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2383  _M_erase(false_type /* __uks */, const key_type& __k)
2384  -> size_type
2385  {
2386  std::size_t __bkt;
2387  __node_base_ptr __prev_n;
2388  __node_ptr __n;
2389  if (size() <= __small_size_threshold())
2390  {
2391  __prev_n = _M_find_before_node(__k);
2392  if (!__prev_n)
2393  return 0;
2394 
2395  // We found a matching node, erase it.
2396  __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2397  __bkt = _M_bucket_index(*__n);
2398  }
2399  else
2400  {
2401  __hash_code __code = this->_M_hash_code(__k);
2402  __bkt = _M_bucket_index(__code);
2403 
2404  // Look for the node before the first matching node.
2405  __prev_n = _M_find_before_node(__bkt, __k, __code);
2406  if (!__prev_n)
2407  return 0;
2408 
2409  __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
2410  }
2411 
2412  // _GLIBCXX_RESOLVE_LIB_DEFECTS
2413  // 526. Is it undefined if a function in the standard changes
2414  // in parameters?
2415  // We use one loop to find all matching nodes and another to deallocate
2416  // them so that the key stays valid during the first loop. It might be
2417  // invalidated indirectly when destroying nodes.
2418  __node_ptr __n_last = __n->_M_next();
2419  while (__n_last && this->_M_node_equals(*__n, *__n_last))
2420  __n_last = __n_last->_M_next();
2421 
2422  std::size_t __n_last_bkt = __n_last ? _M_bucket_index(*__n_last) : __bkt;
2423 
2424  // Deallocate nodes.
2425  size_type __result = 0;
2426  do
2427  {
2428  __node_ptr __p = __n->_M_next();
2429  this->_M_deallocate_node(__n);
2430  __n = __p;
2431  ++__result;
2432  }
2433  while (__n != __n_last);
2434 
2435  _M_element_count -= __result;
2436  if (__prev_n == _M_buckets[__bkt])
2437  _M_remove_bucket_begin(__bkt, __n_last, __n_last_bkt);
2438  else if (__n_last_bkt != __bkt)
2439  _M_buckets[__n_last_bkt] = __prev_n;
2440  __prev_n->_M_nxt = __n_last;
2441  return __result;
2442  }
2443 
2444  template<typename _Key, typename _Value, typename _Alloc,
2445  typename _ExtractKey, typename _Equal,
2446  typename _Hash, typename _RangeHash, typename _Unused,
2447  typename _RehashPolicy, typename _Traits>
2448  auto
2449  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2450  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2451  erase(const_iterator __first, const_iterator __last)
2452  -> iterator
2453  {
2454  __node_ptr __n = __first._M_cur;
2455  __node_ptr __last_n = __last._M_cur;
2456  if (__n == __last_n)
2457  return iterator(__n);
2458 
2459  std::size_t __bkt = _M_bucket_index(*__n);
2460 
2461  __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2462  bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
2463  std::size_t __n_bkt = __bkt;
2464  for (;;)
2465  {
2466  do
2467  {
2468  __node_ptr __tmp = __n;
2469  __n = __n->_M_next();
2470  this->_M_deallocate_node(__tmp);
2471  --_M_element_count;
2472  if (!__n)
2473  break;
2474  __n_bkt = _M_bucket_index(*__n);
2475  }
2476  while (__n != __last_n && __n_bkt == __bkt);
2477  if (__is_bucket_begin)
2478  _M_remove_bucket_begin(__bkt, __n, __n_bkt);
2479  if (__n == __last_n)
2480  break;
2481  __is_bucket_begin = true;
2482  __bkt = __n_bkt;
2483  }
2484 
2485  if (__n && (__n_bkt != __bkt || __is_bucket_begin))
2486  _M_buckets[__n_bkt] = __prev_n;
2487  __prev_n->_M_nxt = __n;
2488  return iterator(__n);
2489  }
2490 
2491  template<typename _Key, typename _Value, typename _Alloc,
2492  typename _ExtractKey, typename _Equal,
2493  typename _Hash, typename _RangeHash, typename _Unused,
2494  typename _RehashPolicy, typename _Traits>
2495  void
2496  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2497  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2498  clear() noexcept
2499  {
2500  this->_M_deallocate_nodes(_M_begin());
2501  __builtin_memset(_M_buckets, 0,
2502  _M_bucket_count * sizeof(__node_base_ptr));
2503  _M_element_count = 0;
2504  _M_before_begin._M_nxt = nullptr;
2505  }
2506 
2507  template<typename _Key, typename _Value, typename _Alloc,
2508  typename _ExtractKey, typename _Equal,
2509  typename _Hash, typename _RangeHash, typename _Unused,
2510  typename _RehashPolicy, typename _Traits>
2511  void
2512  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2513  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2514  rehash(size_type __bkt_count)
2515  {
2516  const __rehash_state& __saved_state = _M_rehash_policy._M_state();
2517  __bkt_count
2518  = std::max(_M_rehash_policy._M_bkt_for_elements(_M_element_count + 1),
2519  __bkt_count);
2520  __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count);
2521 
2522  if (__bkt_count != _M_bucket_count)
2523  _M_rehash(__bkt_count, __saved_state);
2524  else
2525  // No rehash, restore previous state to keep it consistent with
2526  // container state.
2527  _M_rehash_policy._M_reset(__saved_state);
2528  }
2529 
2530  template<typename _Key, typename _Value, typename _Alloc,
2531  typename _ExtractKey, typename _Equal,
2532  typename _Hash, typename _RangeHash, typename _Unused,
2533  typename _RehashPolicy, typename _Traits>
2534  void
2535  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2536  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2537  _M_rehash(size_type __bkt_count, const __rehash_state& __state)
2538  {
2539  __try
2540  {
2541  _M_rehash_aux(__bkt_count, __unique_keys{});
2542  }
2543  __catch(...)
2544  {
2545  // A failure here means that buckets allocation failed. We only
2546  // have to restore hash policy previous state.
2547  _M_rehash_policy._M_reset(__state);
2548  __throw_exception_again;
2549  }
2550  }
2551 
2552  // Rehash when there is no equivalent elements.
2553  template<typename _Key, typename _Value, typename _Alloc,
2554  typename _ExtractKey, typename _Equal,
2555  typename _Hash, typename _RangeHash, typename _Unused,
2556  typename _RehashPolicy, typename _Traits>
2557  void
2558  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2559  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2560  _M_rehash_aux(size_type __bkt_count, true_type /* __uks */)
2561  {
2562  __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2563  __node_ptr __p = _M_begin();
2564  _M_before_begin._M_nxt = nullptr;
2565  std::size_t __bbegin_bkt = 0;
2566  while (__p)
2567  {
2568  __node_ptr __next = __p->_M_next();
2569  std::size_t __bkt
2570  = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2571  if (!__new_buckets[__bkt])
2572  {
2573  __p->_M_nxt = _M_before_begin._M_nxt;
2574  _M_before_begin._M_nxt = __p;
2575  __new_buckets[__bkt] = &_M_before_begin;
2576  if (__p->_M_nxt)
2577  __new_buckets[__bbegin_bkt] = __p;
2578  __bbegin_bkt = __bkt;
2579  }
2580  else
2581  {
2582  __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2583  __new_buckets[__bkt]->_M_nxt = __p;
2584  }
2585 
2586  __p = __next;
2587  }
2588 
2589  _M_deallocate_buckets();
2590  _M_bucket_count = __bkt_count;
2591  _M_buckets = __new_buckets;
2592  }
2593 
2594  // Rehash when there can be equivalent elements, preserve their relative
2595  // order.
2596  template<typename _Key, typename _Value, typename _Alloc,
2597  typename _ExtractKey, typename _Equal,
2598  typename _Hash, typename _RangeHash, typename _Unused,
2599  typename _RehashPolicy, typename _Traits>
2600  void
2601  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2602  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2603  _M_rehash_aux(size_type __bkt_count, false_type /* __uks */)
2604  {
2605  __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2606  __node_ptr __p = _M_begin();
2607  _M_before_begin._M_nxt = nullptr;
2608  std::size_t __bbegin_bkt = 0;
2609  std::size_t __prev_bkt = 0;
2610  __node_ptr __prev_p = nullptr;
2611  bool __check_bucket = false;
2612 
2613  while (__p)
2614  {
2615  __node_ptr __next = __p->_M_next();
2616  std::size_t __bkt
2617  = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2618 
2619  if (__prev_p && __prev_bkt == __bkt)
2620  {
2621  // Previous insert was already in this bucket, we insert after
2622  // the previously inserted one to preserve equivalent elements
2623  // relative order.
2624  __p->_M_nxt = __prev_p->_M_nxt;
2625  __prev_p->_M_nxt = __p;
2626 
2627  // Inserting after a node in a bucket require to check that we
2628  // haven't change the bucket last node, in this case next
2629  // bucket containing its before begin node must be updated. We
2630  // schedule a check as soon as we move out of the sequence of
2631  // equivalent nodes to limit the number of checks.
2632  __check_bucket = true;
2633  }
2634  else
2635  {
2636  if (__check_bucket)
2637  {
2638  // Check if we shall update the next bucket because of
2639  // insertions into __prev_bkt bucket.
2640  if (__prev_p->_M_nxt)
2641  {
2642  std::size_t __next_bkt
2643  = __hash_code_base::_M_bucket_index(
2644  *__prev_p->_M_next(), __bkt_count);
2645  if (__next_bkt != __prev_bkt)
2646  __new_buckets[__next_bkt] = __prev_p;
2647  }
2648  __check_bucket = false;
2649  }
2650 
2651  if (!__new_buckets[__bkt])
2652  {
2653  __p->_M_nxt = _M_before_begin._M_nxt;
2654  _M_before_begin._M_nxt = __p;
2655  __new_buckets[__bkt] = &_M_before_begin;
2656  if (__p->_M_nxt)
2657  __new_buckets[__bbegin_bkt] = __p;
2658  __bbegin_bkt = __bkt;
2659  }
2660  else
2661  {
2662  __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2663  __new_buckets[__bkt]->_M_nxt = __p;
2664  }
2665  }
2666  __prev_p = __p;
2667  __prev_bkt = __bkt;
2668  __p = __next;
2669  }
2670 
2671  if (__check_bucket && __prev_p->_M_nxt)
2672  {
2673  std::size_t __next_bkt
2674  = __hash_code_base::_M_bucket_index(*__prev_p->_M_next(),
2675  __bkt_count);
2676  if (__next_bkt != __prev_bkt)
2677  __new_buckets[__next_bkt] = __prev_p;
2678  }
2679 
2680  _M_deallocate_buckets();
2681  _M_bucket_count = __bkt_count;
2682  _M_buckets = __new_buckets;
2683  }
2684 
2685 #if __cplusplus > 201402L
2686  template<typename, typename, typename> class _Hash_merge_helper { };
2687 #endif // C++17
2688 
2689 #if __cpp_deduction_guides >= 201606
2690  // Used to constrain deduction guides
2691  template<typename _Hash>
2692  using _RequireNotAllocatorOrIntegral
2693  = __enable_if_t<!__or_<is_integral<_Hash>, __is_allocator<_Hash>>::value>;
2694 #endif
2695 
2696 /// @endcond
2697 _GLIBCXX_END_NAMESPACE_VERSION
2698 } // namespace std
2699 
2700 #endif // _HASHTABLE_H
integral_constant< bool, true > true_type
The type used as a compile-time boolean with true value.
Definition: type_traits:82
integral_constant< bool, false > false_type
The type used as a compile-time boolean with false value.
Definition: type_traits:85
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
Definition: move.h:49
constexpr std::remove_reference< _Tp >::type && move(_Tp &&__t) noexcept
Convert a value to an rvalue.
Definition: move.h:104
constexpr _Tp && forward(typename std::remove_reference< _Tp >::type &__t) noexcept
Forward an lvalue.
Definition: move.h:77
void swap(any &__x, any &__y) noexcept
Exchange the states of two any objects.
Definition: any:429
_Tp * end(valarray< _Tp > &__va) noexcept
Return an iterator pointing to one past the last element of the valarray.
Definition: valarray:1237
_Tp * begin(valarray< _Tp > &__va) noexcept
Return an iterator pointing to the first element of the valarray.
Definition: valarray:1215
constexpr const _Tp & max(const _Tp &, const _Tp &)
This does what you think it does.
Definition: stl_algobase.h:254
void swap(shared_lock< _Mutex > &__x, shared_lock< _Mutex > &__y) noexcept
Swap specialization for shared_lock.
Definition: shared_mutex:851
ISO C++ entities toplevel namespace is std.
constexpr iterator_traits< _InputIterator >::difference_type distance(_InputIterator __first, _InputIterator __last)
A generalization of pointer arithmetic.
constexpr auto cend(const _Container &__cont) noexcept(noexcept(std::end(__cont))) -> decltype(std::end(__cont))
Return an iterator pointing to one past the last element of the const container.
Definition: range_access.h:138
constexpr auto empty(const _Container &__cont) noexcept(noexcept(__cont.empty())) -> decltype(__cont.empty())
Return whether a container is empty.
Definition: range_access.h:283
constexpr auto size(const _Container &__cont) noexcept(noexcept(__cont.size())) -> decltype(__cont.size())
Return the size of a container.
Definition: range_access.h:264
constexpr auto cbegin(const _Container &__cont) noexcept(noexcept(std::begin(__cont))) -> decltype(std::begin(__cont))
Return an iterator pointing to the first element of the const container.
Definition: range_access.h:126
constexpr const _Tp * begin(initializer_list< _Tp > __ils) noexcept
Return an iterator pointing to the first element of the initializer_list.
constexpr const _Tp * end(initializer_list< _Tp > __ils) noexcept
Return an iterator pointing to one past the last element of the initializer_list.
Struct holding two objects of arbitrary type.
Definition: stl_pair.h:187
_T1 first
The first member.
Definition: stl_pair.h:191
_T2 second
The second member.
Definition: stl_pair.h:192