libstdc++
mutex
Go to the documentation of this file.
1 // <mutex> -*- C++ -*-
2 
3 // Copyright (C) 2003-2022 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file include/mutex
26  * This is a Standard C++ Library header.
27  */
28 
29 #ifndef _GLIBCXX_MUTEX
30 #define _GLIBCXX_MUTEX 1
31 
32 #pragma GCC system_header
33 
34 #if __cplusplus < 201103L
35 # include <bits/c++0x_warning.h>
36 #else
37 
38 #include <tuple>
39 #include <exception>
40 #include <type_traits>
41 #include <system_error>
42 #include <bits/chrono.h>
43 #include <bits/std_mutex.h>
44 #include <bits/unique_lock.h>
45 #if ! _GTHREAD_USE_MUTEX_TIMEDLOCK
46 # include <condition_variable>
47 # include <thread>
48 #endif
49 #include <ext/atomicity.h> // __gnu_cxx::__is_single_threaded
50 
51 #if defined _GLIBCXX_HAS_GTHREADS && ! defined _GLIBCXX_HAVE_TLS
52 # include <bits/std_function.h> // std::function
53 #endif
54 
55 namespace std _GLIBCXX_VISIBILITY(default)
56 {
57 _GLIBCXX_BEGIN_NAMESPACE_VERSION
58 
59  /**
60  * @addtogroup mutexes
61  * @{
62  */
63 
64 #ifdef _GLIBCXX_HAS_GTHREADS
65 
66  // Common base class for std::recursive_mutex and std::recursive_timed_mutex
67  class __recursive_mutex_base
68  {
69  protected:
70  typedef __gthread_recursive_mutex_t __native_type;
71 
72  __recursive_mutex_base(const __recursive_mutex_base&) = delete;
73  __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
74 
75 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
76  __native_type _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT;
77 
78  __recursive_mutex_base() = default;
79 #else
80  __native_type _M_mutex;
81 
82  __recursive_mutex_base()
83  {
84  // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
85  __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
86  }
87 
88  ~__recursive_mutex_base()
89  { __gthread_recursive_mutex_destroy(&_M_mutex); }
90 #endif
91  };
92 
93  /// The standard recursive mutex type.
94  class recursive_mutex : private __recursive_mutex_base
95  {
96  public:
97  typedef __native_type* native_handle_type;
98 
99  recursive_mutex() = default;
100  ~recursive_mutex() = default;
101 
102  recursive_mutex(const recursive_mutex&) = delete;
103  recursive_mutex& operator=(const recursive_mutex&) = delete;
104 
105  void
106  lock()
107  {
108  int __e = __gthread_recursive_mutex_lock(&_M_mutex);
109 
110  // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
111  if (__e)
112  __throw_system_error(__e);
113  }
114 
115  bool
116  try_lock() noexcept
117  {
118  // XXX EINVAL, EAGAIN, EBUSY
119  return !__gthread_recursive_mutex_trylock(&_M_mutex);
120  }
121 
122  void
123  unlock()
124  {
125  // XXX EINVAL, EAGAIN, EBUSY
126  __gthread_recursive_mutex_unlock(&_M_mutex);
127  }
128 
129  native_handle_type
130  native_handle() noexcept
131  { return &_M_mutex; }
132  };
133 
134 #if _GTHREAD_USE_MUTEX_TIMEDLOCK
135  template<typename _Derived>
136  class __timed_mutex_impl
137  {
138  protected:
139  template<typename _Rep, typename _Period>
140  bool
141  _M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
142  {
143 #if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
144  using __clock = chrono::steady_clock;
145 #else
146  using __clock = chrono::system_clock;
147 #endif
148 
149  auto __rt = chrono::duration_cast<__clock::duration>(__rtime);
150  if (ratio_greater<__clock::period, _Period>())
151  ++__rt;
152  return _M_try_lock_until(__clock::now() + __rt);
153  }
154 
155  template<typename _Duration>
156  bool
157  _M_try_lock_until(const chrono::time_point<chrono::system_clock,
158  _Duration>& __atime)
159  {
160  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
161  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
162 
163  __gthread_time_t __ts = {
164  static_cast<std::time_t>(__s.time_since_epoch().count()),
165  static_cast<long>(__ns.count())
166  };
167 
168  return static_cast<_Derived*>(this)->_M_timedlock(__ts);
169  }
170 
171 #ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
172  template<typename _Duration>
173  bool
174  _M_try_lock_until(const chrono::time_point<chrono::steady_clock,
175  _Duration>& __atime)
176  {
177  auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
178  auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
179 
180  __gthread_time_t __ts = {
181  static_cast<std::time_t>(__s.time_since_epoch().count()),
182  static_cast<long>(__ns.count())
183  };
184 
185  return static_cast<_Derived*>(this)->_M_clocklock(CLOCK_MONOTONIC,
186  __ts);
187  }
188 #endif
189 
190  template<typename _Clock, typename _Duration>
191  bool
192  _M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
193  {
194 #if __cplusplus > 201703L
195  static_assert(chrono::is_clock_v<_Clock>);
196 #endif
197  // The user-supplied clock may not tick at the same rate as
198  // steady_clock, so we must loop in order to guarantee that
199  // the timeout has expired before returning false.
200  auto __now = _Clock::now();
201  do {
202  auto __rtime = __atime - __now;
203  if (_M_try_lock_for(__rtime))
204  return true;
205  __now = _Clock::now();
206  } while (__atime > __now);
207  return false;
208  }
209  };
210 
211  /// The standard timed mutex type.
212  class timed_mutex
213  : private __mutex_base, public __timed_mutex_impl<timed_mutex>
214  {
215  public:
216  typedef __native_type* native_handle_type;
217 
218  timed_mutex() = default;
219  ~timed_mutex() = default;
220 
221  timed_mutex(const timed_mutex&) = delete;
222  timed_mutex& operator=(const timed_mutex&) = delete;
223 
224  void
225  lock()
226  {
227  int __e = __gthread_mutex_lock(&_M_mutex);
228 
229  // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
230  if (__e)
231  __throw_system_error(__e);
232  }
233 
234  bool
235  try_lock() noexcept
236  {
237  // XXX EINVAL, EAGAIN, EBUSY
238  return !__gthread_mutex_trylock(&_M_mutex);
239  }
240 
241  template <class _Rep, class _Period>
242  bool
243  try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
244  { return _M_try_lock_for(__rtime); }
245 
246  template <class _Clock, class _Duration>
247  bool
248  try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
249  { return _M_try_lock_until(__atime); }
250 
251  void
252  unlock()
253  {
254  // XXX EINVAL, EAGAIN, EBUSY
255  __gthread_mutex_unlock(&_M_mutex);
256  }
257 
258  native_handle_type
259  native_handle() noexcept
260  { return &_M_mutex; }
261 
262  private:
263  friend class __timed_mutex_impl<timed_mutex>;
264 
265  bool
266  _M_timedlock(const __gthread_time_t& __ts)
267  { return !__gthread_mutex_timedlock(&_M_mutex, &__ts); }
268 
269 #if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
270  bool
271  _M_clocklock(clockid_t clockid, const __gthread_time_t& __ts)
272  { return !pthread_mutex_clocklock(&_M_mutex, clockid, &__ts); }
273 #endif
274  };
275 
276  /// recursive_timed_mutex
277  class recursive_timed_mutex
278  : private __recursive_mutex_base,
279  public __timed_mutex_impl<recursive_timed_mutex>
280  {
281  public:
282  typedef __native_type* native_handle_type;
283 
284  recursive_timed_mutex() = default;
285  ~recursive_timed_mutex() = default;
286 
287  recursive_timed_mutex(const recursive_timed_mutex&) = delete;
288  recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
289 
290  void
291  lock()
292  {
293  int __e = __gthread_recursive_mutex_lock(&_M_mutex);
294 
295  // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
296  if (__e)
297  __throw_system_error(__e);
298  }
299 
300  bool
301  try_lock() noexcept
302  {
303  // XXX EINVAL, EAGAIN, EBUSY
304  return !__gthread_recursive_mutex_trylock(&_M_mutex);
305  }
306 
307  template <class _Rep, class _Period>
308  bool
309  try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
310  { return _M_try_lock_for(__rtime); }
311 
312  template <class _Clock, class _Duration>
313  bool
314  try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
315  { return _M_try_lock_until(__atime); }
316 
317  void
318  unlock()
319  {
320  // XXX EINVAL, EAGAIN, EBUSY
321  __gthread_recursive_mutex_unlock(&_M_mutex);
322  }
323 
324  native_handle_type
325  native_handle() noexcept
326  { return &_M_mutex; }
327 
328  private:
329  friend class __timed_mutex_impl<recursive_timed_mutex>;
330 
331  bool
332  _M_timedlock(const __gthread_time_t& __ts)
333  { return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts); }
334 
335 #ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
336  bool
337  _M_clocklock(clockid_t clockid, const __gthread_time_t& __ts)
338  { return !pthread_mutex_clocklock(&_M_mutex, clockid, &__ts); }
339 #endif
340  };
341 
342 #else // !_GTHREAD_USE_MUTEX_TIMEDLOCK
343 
344  /// timed_mutex
345  class timed_mutex
346  {
347  mutex _M_mut;
348  condition_variable _M_cv;
349  bool _M_locked = false;
350 
351  public:
352 
353  timed_mutex() = default;
354  ~timed_mutex() { __glibcxx_assert( !_M_locked ); }
355 
356  timed_mutex(const timed_mutex&) = delete;
357  timed_mutex& operator=(const timed_mutex&) = delete;
358 
359  void
360  lock()
361  {
362  unique_lock<mutex> __lk(_M_mut);
363  _M_cv.wait(__lk, [&]{ return !_M_locked; });
364  _M_locked = true;
365  }
366 
367  bool
368  try_lock()
369  {
370  lock_guard<mutex> __lk(_M_mut);
371  if (_M_locked)
372  return false;
373  _M_locked = true;
374  return true;
375  }
376 
377  template<typename _Rep, typename _Period>
378  bool
379  try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
380  {
381  unique_lock<mutex> __lk(_M_mut);
382  if (!_M_cv.wait_for(__lk, __rtime, [&]{ return !_M_locked; }))
383  return false;
384  _M_locked = true;
385  return true;
386  }
387 
388  template<typename _Clock, typename _Duration>
389  bool
390  try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
391  {
392  unique_lock<mutex> __lk(_M_mut);
393  if (!_M_cv.wait_until(__lk, __atime, [&]{ return !_M_locked; }))
394  return false;
395  _M_locked = true;
396  return true;
397  }
398 
399  void
400  unlock()
401  {
402  lock_guard<mutex> __lk(_M_mut);
403  __glibcxx_assert( _M_locked );
404  _M_locked = false;
405  _M_cv.notify_one();
406  }
407  };
408 
409  /// recursive_timed_mutex
410  class recursive_timed_mutex
411  {
412  mutex _M_mut;
413  condition_variable _M_cv;
414  thread::id _M_owner;
415  unsigned _M_count = 0;
416 
417  // Predicate type that tests whether the current thread can lock a mutex.
418  struct _Can_lock
419  {
420  // Returns true if the mutex is unlocked or is locked by _M_caller.
421  bool
422  operator()() const noexcept
423  { return _M_mx->_M_count == 0 || _M_mx->_M_owner == _M_caller; }
424 
425  const recursive_timed_mutex* _M_mx;
426  thread::id _M_caller;
427  };
428 
429  public:
430 
431  recursive_timed_mutex() = default;
432  ~recursive_timed_mutex() { __glibcxx_assert( _M_count == 0 ); }
433 
434  recursive_timed_mutex(const recursive_timed_mutex&) = delete;
435  recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
436 
437  void
438  lock()
439  {
440  auto __id = this_thread::get_id();
441  _Can_lock __can_lock{this, __id};
442  unique_lock<mutex> __lk(_M_mut);
443  _M_cv.wait(__lk, __can_lock);
444  if (_M_count == -1u)
445  __throw_system_error(EAGAIN); // [thread.timedmutex.recursive]/3
446  _M_owner = __id;
447  ++_M_count;
448  }
449 
450  bool
451  try_lock()
452  {
453  auto __id = this_thread::get_id();
454  _Can_lock __can_lock{this, __id};
455  lock_guard<mutex> __lk(_M_mut);
456  if (!__can_lock())
457  return false;
458  if (_M_count == -1u)
459  return false;
460  _M_owner = __id;
461  ++_M_count;
462  return true;
463  }
464 
465  template<typename _Rep, typename _Period>
466  bool
467  try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
468  {
469  auto __id = this_thread::get_id();
470  _Can_lock __can_lock{this, __id};
471  unique_lock<mutex> __lk(_M_mut);
472  if (!_M_cv.wait_for(__lk, __rtime, __can_lock))
473  return false;
474  if (_M_count == -1u)
475  return false;
476  _M_owner = __id;
477  ++_M_count;
478  return true;
479  }
480 
481  template<typename _Clock, typename _Duration>
482  bool
483  try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
484  {
485  auto __id = this_thread::get_id();
486  _Can_lock __can_lock{this, __id};
487  unique_lock<mutex> __lk(_M_mut);
488  if (!_M_cv.wait_until(__lk, __atime, __can_lock))
489  return false;
490  if (_M_count == -1u)
491  return false;
492  _M_owner = __id;
493  ++_M_count;
494  return true;
495  }
496 
497  void
498  unlock()
499  {
500  lock_guard<mutex> __lk(_M_mut);
501  __glibcxx_assert( _M_owner == this_thread::get_id() );
502  __glibcxx_assert( _M_count > 0 );
503  if (--_M_count == 0)
504  {
505  _M_owner = {};
506  _M_cv.notify_one();
507  }
508  }
509  };
510 
511 #endif
512 #endif // _GLIBCXX_HAS_GTHREADS
513 
514  /// @cond undocumented
515  namespace __detail
516  {
517  // Lock the last lockable, after all previous ones are locked.
518  template<typename _Lockable>
519  inline int
520  __try_lock_impl(_Lockable& __l)
521  {
522  if (unique_lock<_Lockable> __lock{__l, try_to_lock})
523  {
524  __lock.release();
525  return -1;
526  }
527  else
528  return 0;
529  }
530 
531  // Lock each lockable in turn.
532  // Use iteration if all lockables are the same type, recursion otherwise.
533  template<typename _L0, typename... _Lockables>
534  inline int
535  __try_lock_impl(_L0& __l0, _Lockables&... __lockables)
536  {
537 #if __cplusplus >= 201703L
538  if constexpr ((is_same_v<_L0, _Lockables> && ...))
539  {
540  constexpr int _Np = 1 + sizeof...(_Lockables);
541  unique_lock<_L0> __locks[_Np] = {
542  {__l0, defer_lock}, {__lockables, defer_lock}...
543  };
544  for (int __i = 0; __i < _Np; ++__i)
545  {
546  if (!__locks[__i].try_lock())
547  {
548  const int __failed = __i;
549  while (__i--)
550  __locks[__i].unlock();
551  return __failed;
552  }
553  }
554  for (auto& __l : __locks)
555  __l.release();
556  return -1;
557  }
558  else
559 #endif
560  if (unique_lock<_L0> __lock{__l0, try_to_lock})
561  {
562  int __idx = __detail::__try_lock_impl(__lockables...);
563  if (__idx == -1)
564  {
565  __lock.release();
566  return -1;
567  }
568  return __idx + 1;
569  }
570  else
571  return 0;
572  }
573 
574  } // namespace __detail
575  /// @endcond
576 
577  /** @brief Generic try_lock.
578  * @param __l1 Meets Lockable requirements (try_lock() may throw).
579  * @param __l2 Meets Lockable requirements (try_lock() may throw).
580  * @param __l3 Meets Lockable requirements (try_lock() may throw).
581  * @return Returns -1 if all try_lock() calls return true. Otherwise returns
582  * a 0-based index corresponding to the argument that returned false.
583  * @post Either all arguments are locked, or none will be.
584  *
585  * Sequentially calls try_lock() on each argument.
586  */
587  template<typename _L1, typename _L2, typename... _L3>
588  inline int
589  try_lock(_L1& __l1, _L2& __l2, _L3&... __l3)
590  {
591  return __detail::__try_lock_impl(__l1, __l2, __l3...);
592  }
593 
594  /// @cond undocumented
595  namespace __detail
596  {
597  // This function can recurse up to N levels deep, for N = 1+sizeof...(L1).
598  // On each recursion the lockables are rotated left one position,
599  // e.g. depth 0: l0, l1, l2; depth 1: l1, l2, l0; depth 2: l2, l0, l1.
600  // When a call to l_i.try_lock() fails it recurses/returns to depth=i
601  // so that l_i is the first argument, and then blocks until l_i is locked.
602  template<typename _L0, typename... _L1>
603  void
604  __lock_impl(int& __i, int __depth, _L0& __l0, _L1&... __l1)
605  {
606  while (__i >= __depth)
607  {
608  if (__i == __depth)
609  {
610  int __failed = 1; // index that couldn't be locked
611  {
612  unique_lock<_L0> __first(__l0);
613  __failed += __detail::__try_lock_impl(__l1...);
614  if (!__failed)
615  {
616  __i = -1; // finished
617  __first.release();
618  return;
619  }
620  }
621 #if defined _GLIBCXX_HAS_GTHREADS && defined _GLIBCXX_USE_SCHED_YIELD
622  __gthread_yield();
623 #endif
624  constexpr auto __n = 1 + sizeof...(_L1);
625  __i = (__depth + __failed) % __n;
626  }
627  else // rotate left until l_i is first.
628  __detail::__lock_impl(__i, __depth + 1, __l1..., __l0);
629  }
630  }
631 
632  } // namespace __detail
633  /// @endcond
634 
635  /** @brief Generic lock.
636  * @param __l1 Meets Lockable requirements (try_lock() may throw).
637  * @param __l2 Meets Lockable requirements (try_lock() may throw).
638  * @param __l3 Meets Lockable requirements (try_lock() may throw).
639  * @throw An exception thrown by an argument's lock() or try_lock() member.
640  * @post All arguments are locked.
641  *
642  * All arguments are locked via a sequence of calls to lock(), try_lock()
643  * and unlock(). If this function exits via an exception any locks that
644  * were obtained will be released.
645  */
646  template<typename _L1, typename _L2, typename... _L3>
647  void
648  lock(_L1& __l1, _L2& __l2, _L3&... __l3)
649  {
650 #if __cplusplus >= 201703L
651  if constexpr (is_same_v<_L1, _L2> && (is_same_v<_L1, _L3> && ...))
652  {
653  constexpr int _Np = 2 + sizeof...(_L3);
654  unique_lock<_L1> __locks[] = {
655  {__l1, defer_lock}, {__l2, defer_lock}, {__l3, defer_lock}...
656  };
657  int __first = 0;
658  do {
659  __locks[__first].lock();
660  for (int __j = 1; __j < _Np; ++__j)
661  {
662  const int __idx = (__first + __j) % _Np;
663  if (!__locks[__idx].try_lock())
664  {
665  for (int __k = __j; __k != 0; --__k)
666  __locks[(__first + __k - 1) % _Np].unlock();
667  __first = __idx;
668  break;
669  }
670  }
671  } while (!__locks[__first].owns_lock());
672 
673  for (auto& __l : __locks)
674  __l.release();
675  }
676  else
677 #endif
678  {
679  int __i = 0;
680  __detail::__lock_impl(__i, 0, __l1, __l2, __l3...);
681  }
682  }
683 
684 #if __cplusplus >= 201703L
685 #define __cpp_lib_scoped_lock 201703L
686  /** @brief A scoped lock type for multiple lockable objects.
687  *
688  * A scoped_lock controls mutex ownership within a scope, releasing
689  * ownership in the destructor.
690  */
691  template<typename... _MutexTypes>
692  class scoped_lock
693  {
694  public:
695  explicit scoped_lock(_MutexTypes&... __m) : _M_devices(std::tie(__m...))
696  { std::lock(__m...); }
697 
698  explicit scoped_lock(adopt_lock_t, _MutexTypes&... __m) noexcept
699  : _M_devices(std::tie(__m...))
700  { } // calling thread owns mutex
701 
702  ~scoped_lock()
703  { std::apply([](auto&... __m) { (__m.unlock(), ...); }, _M_devices); }
704 
705  scoped_lock(const scoped_lock&) = delete;
706  scoped_lock& operator=(const scoped_lock&) = delete;
707 
708  private:
709  tuple<_MutexTypes&...> _M_devices;
710  };
711 
712  template<>
713  class scoped_lock<>
714  {
715  public:
716  explicit scoped_lock() = default;
717  explicit scoped_lock(adopt_lock_t) noexcept { }
718  ~scoped_lock() = default;
719 
720  scoped_lock(const scoped_lock&) = delete;
721  scoped_lock& operator=(const scoped_lock&) = delete;
722  };
723 
724  template<typename _Mutex>
725  class scoped_lock<_Mutex>
726  {
727  public:
728  using mutex_type = _Mutex;
729 
730  explicit scoped_lock(mutex_type& __m) : _M_device(__m)
731  { _M_device.lock(); }
732 
733  explicit scoped_lock(adopt_lock_t, mutex_type& __m) noexcept
734  : _M_device(__m)
735  { } // calling thread owns mutex
736 
737  ~scoped_lock()
738  { _M_device.unlock(); }
739 
740  scoped_lock(const scoped_lock&) = delete;
741  scoped_lock& operator=(const scoped_lock&) = delete;
742 
743  private:
744  mutex_type& _M_device;
745  };
746 #endif // C++17
747 
748 #ifdef _GLIBCXX_HAS_GTHREADS
749  /// Flag type used by std::call_once
750  struct once_flag
751  {
752  constexpr once_flag() noexcept = default;
753 
754  /// Deleted copy constructor
755  once_flag(const once_flag&) = delete;
756  /// Deleted assignment operator
757  once_flag& operator=(const once_flag&) = delete;
758 
759  private:
760  // For gthreads targets a pthread_once_t is used with pthread_once, but
761  // for most targets this doesn't work correctly for exceptional executions.
762  __gthread_once_t _M_once = __GTHREAD_ONCE_INIT;
763 
764  struct _Prepare_execution;
765 
766  template<typename _Callable, typename... _Args>
767  friend void
768  call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
769  };
770 
771  /// @cond undocumented
772 # ifdef _GLIBCXX_HAVE_TLS
773  // If TLS is available use thread-local state for the type-erased callable
774  // that is being run by std::call_once in the current thread.
775  extern __thread void* __once_callable;
776  extern __thread void (*__once_call)();
777 
778  // RAII type to set up state for pthread_once call.
779  struct once_flag::_Prepare_execution
780  {
781  template<typename _Callable>
782  explicit
783  _Prepare_execution(_Callable& __c)
784  {
785  // Store address in thread-local pointer:
786  __once_callable = std::__addressof(__c);
787  // Trampoline function to invoke the closure via thread-local pointer:
788  __once_call = [] { (*static_cast<_Callable*>(__once_callable))(); };
789  }
790 
791  ~_Prepare_execution()
792  {
793  // PR libstdc++/82481
794  __once_callable = nullptr;
795  __once_call = nullptr;
796  }
797 
798  _Prepare_execution(const _Prepare_execution&) = delete;
799  _Prepare_execution& operator=(const _Prepare_execution&) = delete;
800  };
801 
802 # else
803  // Without TLS use a global std::mutex and store the callable in a
804  // global std::function.
805  extern function<void()> __once_functor;
806 
807  extern void
808  __set_once_functor_lock_ptr(unique_lock<mutex>*);
809 
810  extern mutex&
811  __get_once_mutex();
812 
813  // RAII type to set up state for pthread_once call.
814  struct once_flag::_Prepare_execution
815  {
816  template<typename _Callable>
817  explicit
818  _Prepare_execution(_Callable& __c)
819  {
820  // Store the callable in the global std::function
821  __once_functor = __c;
822  __set_once_functor_lock_ptr(&_M_functor_lock);
823  }
824 
825  ~_Prepare_execution()
826  {
827  if (_M_functor_lock)
828  __set_once_functor_lock_ptr(nullptr);
829  }
830 
831  private:
832  // XXX This deadlocks if used recursively (PR 97949)
833  unique_lock<mutex> _M_functor_lock{__get_once_mutex()};
834 
835  _Prepare_execution(const _Prepare_execution&) = delete;
836  _Prepare_execution& operator=(const _Prepare_execution&) = delete;
837  };
838 # endif
839  /// @endcond
840 
841  // This function is passed to pthread_once by std::call_once.
842  // It runs __once_call() or __once_functor().
843  extern "C" void __once_proxy(void);
844 
845  /// Invoke a callable and synchronize with other calls using the same flag
846  template<typename _Callable, typename... _Args>
847  void
848  call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
849  {
850  // Closure type that runs the function
851  auto __callable = [&] {
852  std::__invoke(std::forward<_Callable>(__f),
853  std::forward<_Args>(__args)...);
854  };
855 
856  once_flag::_Prepare_execution __exec(__callable);
857 
858  // XXX pthread_once does not reset the flag if an exception is thrown.
859  if (int __e = __gthread_once(&__once._M_once, &__once_proxy))
860  __throw_system_error(__e);
861  }
862 
863 #else // _GLIBCXX_HAS_GTHREADS
864 
865  /// Flag type used by std::call_once
866  struct once_flag
867  {
868  constexpr once_flag() noexcept = default;
869 
870  /// Deleted copy constructor
871  once_flag(const once_flag&) = delete;
872  /// Deleted assignment operator
873  once_flag& operator=(const once_flag&) = delete;
874 
875  private:
876  // There are two different std::once_flag interfaces, abstracting four
877  // different implementations.
878  // The single-threaded interface uses the _M_activate() and _M_finish(bool)
879  // functions, which start and finish an active execution respectively.
880  // See [thread.once.callonce] in C++11 for the definition of
881  // active/passive/returning/exceptional executions.
882  enum _Bits : int { _Init = 0, _Active = 1, _Done = 2 };
883 
884  int _M_once = _Bits::_Init;
885 
886  // Check to see if all executions will be passive now.
887  bool
888  _M_passive() const noexcept;
889 
890  // Attempts to begin an active execution.
891  bool _M_activate();
892 
893  // Must be called to complete an active execution.
894  // The argument is true if the active execution was a returning execution,
895  // false if it was an exceptional execution.
896  void _M_finish(bool __returning) noexcept;
897 
898  // RAII helper to call _M_finish.
899  struct _Active_execution
900  {
901  explicit _Active_execution(once_flag& __flag) : _M_flag(__flag) { }
902 
903  ~_Active_execution() { _M_flag._M_finish(_M_returning); }
904 
905  _Active_execution(const _Active_execution&) = delete;
906  _Active_execution& operator=(const _Active_execution&) = delete;
907 
908  once_flag& _M_flag;
909  bool _M_returning = false;
910  };
911 
912  template<typename _Callable, typename... _Args>
913  friend void
914  call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
915  };
916 
917  // Inline definitions of std::once_flag members for single-threaded targets.
918 
919  inline bool
920  once_flag::_M_passive() const noexcept
921  { return _M_once == _Bits::_Done; }
922 
923  inline bool
924  once_flag::_M_activate()
925  {
926  if (_M_once == _Bits::_Init) [[__likely__]]
927  {
928  _M_once = _Bits::_Active;
929  return true;
930  }
931  else if (_M_passive()) // Caller should have checked this already.
932  return false;
933  else
934  __throw_system_error(EDEADLK);
935  }
936 
937  inline void
938  once_flag::_M_finish(bool __returning) noexcept
939  { _M_once = __returning ? _Bits::_Done : _Bits::_Init; }
940 
941  /// Invoke a callable and synchronize with other calls using the same flag
942  template<typename _Callable, typename... _Args>
943  inline void
944  call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
945  {
946  if (__once._M_passive())
947  return;
948  else if (__once._M_activate())
949  {
950  once_flag::_Active_execution __exec(__once);
951 
952  // _GLIBCXX_RESOLVE_LIB_DEFECTS
953  // 2442. call_once() shouldn't DECAY_COPY()
954  std::__invoke(std::forward<_Callable>(__f),
955  std::forward<_Args>(__args)...);
956 
957  // __f(__args...) did not throw
958  __exec._M_returning = true;
959  }
960  }
961 #endif // _GLIBCXX_HAS_GTHREADS
962 
963  /// @} group mutexes
964 _GLIBCXX_END_NAMESPACE_VERSION
965 } // namespace
966 
967 #endif // C++11
968 
969 #endif // _GLIBCXX_MUTEX