libstdc++
simd_fixed_size.h
1 // Simd fixed_size ABI specific implementations -*- C++ -*-
2 
3 // Copyright (C) 2020-2022 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /*
26  * The fixed_size ABI gives the following guarantees:
27  * - simd objects are passed via the stack
28  * - memory layout of `simd<_Tp, _Np>` is equivalent to `array<_Tp, _Np>`
29  * - alignment of `simd<_Tp, _Np>` is `_Np * sizeof(_Tp)` if _Np is __a
30  * power-of-2 value, otherwise `std::__bit_ceil(_Np * sizeof(_Tp))` (Note:
31  * if the alignment were to exceed the system/compiler maximum, it is bounded
32  * to that maximum)
33  * - simd_mask objects are passed like bitset<_Np>
34  * - memory layout of `simd_mask<_Tp, _Np>` is equivalent to `bitset<_Np>`
35  * - alignment of `simd_mask<_Tp, _Np>` is equal to the alignment of
36  * `bitset<_Np>`
37  */
38 
39 #ifndef _GLIBCXX_EXPERIMENTAL_SIMD_FIXED_SIZE_H_
40 #define _GLIBCXX_EXPERIMENTAL_SIMD_FIXED_SIZE_H_
41 
42 #if __cplusplus >= 201703L
43 
44 #include <array>
45 
46 _GLIBCXX_SIMD_BEGIN_NAMESPACE
47 
48 // __simd_tuple_element {{{
49 template <size_t _I, typename _Tp>
50  struct __simd_tuple_element;
51 
52 template <typename _Tp, typename _A0, typename... _As>
53  struct __simd_tuple_element<0, _SimdTuple<_Tp, _A0, _As...>>
54  { using type = simd<_Tp, _A0>; };
55 
56 template <size_t _I, typename _Tp, typename _A0, typename... _As>
57  struct __simd_tuple_element<_I, _SimdTuple<_Tp, _A0, _As...>>
58  {
59  using type =
60  typename __simd_tuple_element<_I - 1, _SimdTuple<_Tp, _As...>>::type;
61  };
62 
63 template <size_t _I, typename _Tp>
64  using __simd_tuple_element_t = typename __simd_tuple_element<_I, _Tp>::type;
65 
66 // }}}
67 // __simd_tuple_concat {{{
68 
69 template <typename _Tp, typename... _A0s, typename... _A1s>
70  _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple<_Tp, _A0s..., _A1s...>
71  __simd_tuple_concat(const _SimdTuple<_Tp, _A0s...>& __left,
72  const _SimdTuple<_Tp, _A1s...>& __right)
73  {
74  if constexpr (sizeof...(_A0s) == 0)
75  return __right;
76  else if constexpr (sizeof...(_A1s) == 0)
77  return __left;
78  else
79  return {__left.first, __simd_tuple_concat(__left.second, __right)};
80  }
81 
82 template <typename _Tp, typename _A10, typename... _A1s>
83  _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple<_Tp, simd_abi::scalar, _A10,
84  _A1s...>
85  __simd_tuple_concat(const _Tp& __left,
86  const _SimdTuple<_Tp, _A10, _A1s...>& __right)
87  { return {__left, __right}; }
88 
89 // }}}
90 // __simd_tuple_pop_front {{{
91 // Returns the next _SimdTuple in __x that has _Np elements less.
92 // Precondition: _Np must match the number of elements in __first (recursively)
93 template <size_t _Np, typename _Tp>
94  _GLIBCXX_SIMD_INTRINSIC constexpr decltype(auto)
95  __simd_tuple_pop_front(_Tp&& __x)
96  {
97  if constexpr (_Np == 0)
98  return static_cast<_Tp&&>(__x);
99  else
100  {
101  using _Up = __remove_cvref_t<_Tp>;
102  static_assert(_Np >= _Up::_S_first_size);
103  return __simd_tuple_pop_front<_Np - _Up::_S_first_size>(__x.second);
104  }
105  }
106 
107 // }}}
108 // __get_simd_at<_Np> {{{1
109 struct __as_simd {};
110 
111 struct __as_simd_tuple {};
112 
113 template <typename _Tp, typename _A0, typename... _Abis>
114  _GLIBCXX_SIMD_INTRINSIC constexpr simd<_Tp, _A0>
115  __simd_tuple_get_impl(__as_simd, const _SimdTuple<_Tp, _A0, _Abis...>& __t,
116  _SizeConstant<0>)
117  { return {__private_init, __t.first}; }
118 
119 template <typename _Tp, typename _A0, typename... _Abis>
120  _GLIBCXX_SIMD_INTRINSIC constexpr const auto&
121  __simd_tuple_get_impl(__as_simd_tuple,
122  const _SimdTuple<_Tp, _A0, _Abis...>& __t,
123  _SizeConstant<0>)
124  { return __t.first; }
125 
126 template <typename _Tp, typename _A0, typename... _Abis>
127  _GLIBCXX_SIMD_INTRINSIC constexpr auto&
128  __simd_tuple_get_impl(__as_simd_tuple, _SimdTuple<_Tp, _A0, _Abis...>& __t,
129  _SizeConstant<0>)
130  { return __t.first; }
131 
132 template <typename _R, size_t _Np, typename _Tp, typename... _Abis>
133  _GLIBCXX_SIMD_INTRINSIC constexpr auto
134  __simd_tuple_get_impl(_R, const _SimdTuple<_Tp, _Abis...>& __t,
135  _SizeConstant<_Np>)
136  { return __simd_tuple_get_impl(_R(), __t.second, _SizeConstant<_Np - 1>()); }
137 
138 template <size_t _Np, typename _Tp, typename... _Abis>
139  _GLIBCXX_SIMD_INTRINSIC constexpr auto&
140  __simd_tuple_get_impl(__as_simd_tuple, _SimdTuple<_Tp, _Abis...>& __t,
141  _SizeConstant<_Np>)
142  {
143  return __simd_tuple_get_impl(__as_simd_tuple(), __t.second,
144  _SizeConstant<_Np - 1>());
145  }
146 
147 template <size_t _Np, typename _Tp, typename... _Abis>
148  _GLIBCXX_SIMD_INTRINSIC constexpr auto
149  __get_simd_at(const _SimdTuple<_Tp, _Abis...>& __t)
150  { return __simd_tuple_get_impl(__as_simd(), __t, _SizeConstant<_Np>()); }
151 
152 // }}}
153 // __get_tuple_at<_Np> {{{
154 template <size_t _Np, typename _Tp, typename... _Abis>
155  _GLIBCXX_SIMD_INTRINSIC constexpr auto
156  __get_tuple_at(const _SimdTuple<_Tp, _Abis...>& __t)
157  {
158  return __simd_tuple_get_impl(__as_simd_tuple(), __t, _SizeConstant<_Np>());
159  }
160 
161 template <size_t _Np, typename _Tp, typename... _Abis>
162  _GLIBCXX_SIMD_INTRINSIC constexpr auto&
163  __get_tuple_at(_SimdTuple<_Tp, _Abis...>& __t)
164  {
165  return __simd_tuple_get_impl(__as_simd_tuple(), __t, _SizeConstant<_Np>());
166  }
167 
168 // __tuple_element_meta {{{1
169 template <typename _Tp, typename _Abi, size_t _Offset>
170  struct __tuple_element_meta : public _Abi::_SimdImpl
171  {
172  static_assert(is_same_v<typename _Abi::_SimdImpl::abi_type,
173  _Abi>); // this fails e.g. when _SimdImpl is an
174  // alias for _SimdImplBuiltin<_DifferentAbi>
175  using value_type = _Tp;
176  using abi_type = _Abi;
177  using _Traits = _SimdTraits<_Tp, _Abi>;
178  using _MaskImpl = typename _Abi::_MaskImpl;
179  using _MaskMember = typename _Traits::_MaskMember;
180  using simd_type = simd<_Tp, _Abi>;
181  static constexpr size_t _S_offset = _Offset;
182  static constexpr size_t _S_size() { return simd_size<_Tp, _Abi>::value; }
183  static constexpr _MaskImpl _S_mask_impl = {};
184 
185  template <size_t _Np, bool _Sanitized>
186  _GLIBCXX_SIMD_INTRINSIC static auto
187  _S_submask(_BitMask<_Np, _Sanitized> __bits)
188  { return __bits.template _M_extract<_Offset, _S_size()>(); }
189 
190  template <size_t _Np, bool _Sanitized>
191  _GLIBCXX_SIMD_INTRINSIC static _MaskMember
192  _S_make_mask(_BitMask<_Np, _Sanitized> __bits)
193  {
194  return _MaskImpl::template _S_convert<_Tp>(
195  __bits.template _M_extract<_Offset, _S_size()>()._M_sanitized());
196  }
197 
198  _GLIBCXX_SIMD_INTRINSIC static _ULLong
199  _S_mask_to_shifted_ullong(_MaskMember __k)
200  { return _MaskImpl::_S_to_bits(__k).to_ullong() << _Offset; }
201  };
202 
203 template <size_t _Offset, typename _Tp, typename _Abi, typename... _As>
204  _GLIBCXX_SIMD_INTRINSIC
205  __tuple_element_meta<_Tp, _Abi, _Offset>
206  __make_meta(const _SimdTuple<_Tp, _Abi, _As...>&)
207  { return {}; }
208 
209 // }}}1
210 // _WithOffset wrapper class {{{
211 template <size_t _Offset, typename _Base>
212  struct _WithOffset : public _Base
213  {
214  static inline constexpr size_t _S_offset = _Offset;
215 
216  _GLIBCXX_SIMD_INTRINSIC char* _M_as_charptr()
217  {
218  return reinterpret_cast<char*>(this)
219  + _S_offset * sizeof(typename _Base::value_type);
220  }
221 
222  _GLIBCXX_SIMD_INTRINSIC const char* _M_as_charptr() const
223  {
224  return reinterpret_cast<const char*>(this)
225  + _S_offset * sizeof(typename _Base::value_type);
226  }
227  };
228 
229 // make _WithOffset<_WithOffset> ill-formed to use:
230 template <size_t _O0, size_t _O1, typename _Base>
231  struct _WithOffset<_O0, _WithOffset<_O1, _Base>> {};
232 
233 template <size_t _Offset, typename _Tp>
234  _GLIBCXX_SIMD_INTRINSIC
235  decltype(auto)
236  __add_offset(_Tp& __base)
237  { return static_cast<_WithOffset<_Offset, __remove_cvref_t<_Tp>>&>(__base); }
238 
239 template <size_t _Offset, typename _Tp>
240  _GLIBCXX_SIMD_INTRINSIC
241  decltype(auto)
242  __add_offset(const _Tp& __base)
243  {
244  return static_cast<const _WithOffset<_Offset, __remove_cvref_t<_Tp>>&>(
245  __base);
246  }
247 
248 template <size_t _Offset, size_t _ExistingOffset, typename _Tp>
249  _GLIBCXX_SIMD_INTRINSIC
250  decltype(auto)
251  __add_offset(_WithOffset<_ExistingOffset, _Tp>& __base)
252  {
253  return static_cast<_WithOffset<_Offset + _ExistingOffset, _Tp>&>(
254  static_cast<_Tp&>(__base));
255  }
256 
257 template <size_t _Offset, size_t _ExistingOffset, typename _Tp>
258  _GLIBCXX_SIMD_INTRINSIC
259  decltype(auto)
260  __add_offset(const _WithOffset<_ExistingOffset, _Tp>& __base)
261  {
262  return static_cast<const _WithOffset<_Offset + _ExistingOffset, _Tp>&>(
263  static_cast<const _Tp&>(__base));
264  }
265 
266 template <typename _Tp>
267  constexpr inline size_t __offset = 0;
268 
269 template <size_t _Offset, typename _Tp>
270  constexpr inline size_t __offset<_WithOffset<_Offset, _Tp>>
271  = _WithOffset<_Offset, _Tp>::_S_offset;
272 
273 template <typename _Tp>
274  constexpr inline size_t __offset<const _Tp> = __offset<_Tp>;
275 
276 template <typename _Tp>
277  constexpr inline size_t __offset<_Tp&> = __offset<_Tp>;
278 
279 template <typename _Tp>
280  constexpr inline size_t __offset<_Tp&&> = __offset<_Tp>;
281 
282 // }}}
283 // _SimdTuple specializations {{{1
284 // empty {{{2
285 template <typename _Tp>
286  struct _SimdTuple<_Tp>
287  {
288  using value_type = _Tp;
289  static constexpr size_t _S_tuple_size = 0;
290  static constexpr size_t _S_size() { return 0; }
291  };
292 
293 // _SimdTupleData {{{2
294 template <typename _FirstType, typename _SecondType>
295  struct _SimdTupleData
296  {
297  _FirstType first;
298  _SecondType second;
299 
300  _GLIBCXX_SIMD_INTRINSIC
301  constexpr bool _M_is_constprop() const
302  {
303  if constexpr (is_class_v<_FirstType>)
304  return first._M_is_constprop() && second._M_is_constprop();
305  else
306  return __builtin_constant_p(first) && second._M_is_constprop();
307  }
308  };
309 
310 template <typename _FirstType, typename _Tp>
311  struct _SimdTupleData<_FirstType, _SimdTuple<_Tp>>
312  {
313  _FirstType first;
314  static constexpr _SimdTuple<_Tp> second = {};
315 
316  _GLIBCXX_SIMD_INTRINSIC
317  constexpr bool _M_is_constprop() const
318  {
319  if constexpr (is_class_v<_FirstType>)
320  return first._M_is_constprop();
321  else
322  return __builtin_constant_p(first);
323  }
324  };
325 
326 // 1 or more {{{2
327 template <typename _Tp, typename _Abi0, typename... _Abis>
328  struct _SimdTuple<_Tp, _Abi0, _Abis...>
329  : _SimdTupleData<typename _SimdTraits<_Tp, _Abi0>::_SimdMember,
330  _SimdTuple<_Tp, _Abis...>>
331  {
332  static_assert(!__is_fixed_size_abi_v<_Abi0>);
333  using value_type = _Tp;
334  using _FirstType = typename _SimdTraits<_Tp, _Abi0>::_SimdMember;
335  using _FirstAbi = _Abi0;
336  using _SecondType = _SimdTuple<_Tp, _Abis...>;
337  static constexpr size_t _S_tuple_size = sizeof...(_Abis) + 1;
338 
339  static constexpr size_t _S_size()
340  { return simd_size_v<_Tp, _Abi0> + _SecondType::_S_size(); }
341 
342  static constexpr size_t _S_first_size = simd_size_v<_Tp, _Abi0>;
343  static constexpr bool _S_is_homogeneous = (is_same_v<_Abi0, _Abis> && ...);
344 
345  using _Base = _SimdTupleData<typename _SimdTraits<_Tp, _Abi0>::_SimdMember,
346  _SimdTuple<_Tp, _Abis...>>;
347  using _Base::first;
348  using _Base::second;
349 
350  _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple() = default;
351  _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple(const _SimdTuple&) = default;
352  _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple& operator=(const _SimdTuple&)
353  = default;
354 
355  template <typename _Up>
356  _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple(_Up&& __x)
357  : _Base{static_cast<_Up&&>(__x)} {}
358 
359  template <typename _Up, typename _Up2>
360  _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple(_Up&& __x, _Up2&& __y)
361  : _Base{static_cast<_Up&&>(__x), static_cast<_Up2&&>(__y)} {}
362 
363  template <typename _Up>
364  _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple(_Up&& __x, _SimdTuple<_Tp>)
365  : _Base{static_cast<_Up&&>(__x)} {}
366 
367  _GLIBCXX_SIMD_INTRINSIC char* _M_as_charptr()
368  { return reinterpret_cast<char*>(this); }
369 
370  _GLIBCXX_SIMD_INTRINSIC const char* _M_as_charptr() const
371  { return reinterpret_cast<const char*>(this); }
372 
373  template <size_t _Np>
374  _GLIBCXX_SIMD_INTRINSIC constexpr auto& _M_at()
375  {
376  if constexpr (_Np == 0)
377  return first;
378  else
379  return second.template _M_at<_Np - 1>();
380  }
381 
382  template <size_t _Np>
383  _GLIBCXX_SIMD_INTRINSIC constexpr const auto& _M_at() const
384  {
385  if constexpr (_Np == 0)
386  return first;
387  else
388  return second.template _M_at<_Np - 1>();
389  }
390 
391  template <size_t _Np>
392  _GLIBCXX_SIMD_INTRINSIC constexpr auto _M_simd_at() const
393  {
394  if constexpr (_Np == 0)
395  return simd<_Tp, _Abi0>(__private_init, first);
396  else
397  return second.template _M_simd_at<_Np - 1>();
398  }
399 
400  template <size_t _Offset = 0, typename _Fp>
401  _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdTuple
402  _S_generate(_Fp&& __gen, _SizeConstant<_Offset> = {})
403  {
404  auto&& __first = __gen(__tuple_element_meta<_Tp, _Abi0, _Offset>());
405  if constexpr (_S_tuple_size == 1)
406  return {__first};
407  else
408  return {__first,
409  _SecondType::_S_generate(
410  static_cast<_Fp&&>(__gen),
411  _SizeConstant<_Offset + simd_size_v<_Tp, _Abi0>>())};
412  }
413 
414  template <size_t _Offset = 0, typename _Fp, typename... _More>
415  _GLIBCXX_SIMD_INTRINSIC _SimdTuple
416  _M_apply_wrapped(_Fp&& __fun, const _More&... __more) const
417  {
418  auto&& __first
419  = __fun(__make_meta<_Offset>(*this), first, __more.first...);
420  if constexpr (_S_tuple_size == 1)
421  return {__first};
422  else
423  return {
424  __first,
425  second.template _M_apply_wrapped<_Offset + simd_size_v<_Tp, _Abi0>>(
426  static_cast<_Fp&&>(__fun), __more.second...)};
427  }
428 
429  template <typename _Tup>
430  _GLIBCXX_SIMD_INTRINSIC constexpr decltype(auto)
431  _M_extract_argument(_Tup&& __tup) const
432  {
433  using _TupT = typename __remove_cvref_t<_Tup>::value_type;
434  if constexpr (is_same_v<_SimdTuple, __remove_cvref_t<_Tup>>)
435  return __tup.first;
436  else if (__builtin_is_constant_evaluated())
437  return __fixed_size_storage_t<_TupT, _S_first_size>::_S_generate([&](
438  auto __meta) constexpr {
439  return __meta._S_generator(
440  [&](auto __i) constexpr { return __tup[__i]; },
441  static_cast<_TupT*>(nullptr));
442  });
443  else
444  return [&]() {
445  __fixed_size_storage_t<_TupT, _S_first_size> __r;
446  __builtin_memcpy(__r._M_as_charptr(), __tup._M_as_charptr(),
447  sizeof(__r));
448  return __r;
449  }();
450  }
451 
452  template <typename _Tup>
453  _GLIBCXX_SIMD_INTRINSIC constexpr auto&
454  _M_skip_argument(_Tup&& __tup) const
455  {
456  static_assert(_S_tuple_size > 1);
457  using _Up = __remove_cvref_t<_Tup>;
458  constexpr size_t __off = __offset<_Up>;
459  if constexpr (_S_first_size == _Up::_S_first_size && __off == 0)
460  return __tup.second;
461  else if constexpr (_S_first_size > _Up::_S_first_size
462  && _S_first_size % _Up::_S_first_size == 0
463  && __off == 0)
464  return __simd_tuple_pop_front<_S_first_size>(__tup);
465  else if constexpr (_S_first_size + __off < _Up::_S_first_size)
466  return __add_offset<_S_first_size>(__tup);
467  else if constexpr (_S_first_size + __off == _Up::_S_first_size)
468  return __tup.second;
469  else
470  __assert_unreachable<_Tup>();
471  }
472 
473  template <size_t _Offset, typename... _More>
474  _GLIBCXX_SIMD_INTRINSIC constexpr void
475  _M_assign_front(const _SimdTuple<_Tp, _Abi0, _More...>& __x) &
476  {
477  static_assert(_Offset == 0);
478  first = __x.first;
479  if constexpr (sizeof...(_More) > 0)
480  {
481  static_assert(sizeof...(_Abis) >= sizeof...(_More));
482  second.template _M_assign_front<0>(__x.second);
483  }
484  }
485 
486  template <size_t _Offset>
487  _GLIBCXX_SIMD_INTRINSIC constexpr void
488  _M_assign_front(const _FirstType& __x) &
489  {
490  static_assert(_Offset == 0);
491  first = __x;
492  }
493 
494  template <size_t _Offset, typename... _As>
495  _GLIBCXX_SIMD_INTRINSIC constexpr void
496  _M_assign_front(const _SimdTuple<_Tp, _As...>& __x) &
497  {
498  __builtin_memcpy(_M_as_charptr() + _Offset * sizeof(value_type),
499  __x._M_as_charptr(),
500  sizeof(_Tp) * _SimdTuple<_Tp, _As...>::_S_size());
501  }
502 
503  /*
504  * Iterate over the first objects in this _SimdTuple and call __fun for each
505  * of them. If additional arguments are passed via __more, chunk them into
506  * _SimdTuple or __vector_type_t objects of the same number of values.
507  */
508  template <typename _Fp, typename... _More>
509  _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple
510  _M_apply_per_chunk(_Fp&& __fun, _More&&... __more) const
511  {
512  if constexpr ((...
513  || conjunction_v<
514  is_lvalue_reference<_More>,
515  negation<is_const<remove_reference_t<_More>>>>) )
516  {
517  // need to write back at least one of __more after calling __fun
518  auto&& __first = [&](auto... __args) constexpr
519  {
520  auto __r = __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), first,
521  __args...);
522  [[maybe_unused]] auto&& __ignore_me = {(
523  [](auto&& __dst, const auto& __src) {
524  if constexpr (is_assignable_v<decltype(__dst),
525  decltype(__dst)>)
526  {
527  __dst.template _M_assign_front<__offset<decltype(__dst)>>(
528  __src);
529  }
530  }(static_cast<_More&&>(__more), __args),
531  0)...};
532  return __r;
533  }
534  (_M_extract_argument(__more)...);
535  if constexpr (_S_tuple_size == 1)
536  return {__first};
537  else
538  return {__first,
539  second._M_apply_per_chunk(static_cast<_Fp&&>(__fun),
540  _M_skip_argument(__more)...)};
541  }
542  else
543  {
544  auto&& __first = __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), first,
545  _M_extract_argument(__more)...);
546  if constexpr (_S_tuple_size == 1)
547  return {__first};
548  else
549  return {__first,
550  second._M_apply_per_chunk(static_cast<_Fp&&>(__fun),
551  _M_skip_argument(__more)...)};
552  }
553  }
554 
555  template <typename _R = _Tp, typename _Fp, typename... _More>
556  _GLIBCXX_SIMD_INTRINSIC auto _M_apply_r(_Fp&& __fun,
557  const _More&... __more) const
558  {
559  auto&& __first = __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), first,
560  __more.first...);
561  if constexpr (_S_tuple_size == 1)
562  return __first;
563  else
564  return __simd_tuple_concat<_R>(
565  __first, second.template _M_apply_r<_R>(static_cast<_Fp&&>(__fun),
566  __more.second...));
567  }
568 
569  template <typename _Fp, typename... _More>
570  _GLIBCXX_SIMD_INTRINSIC constexpr friend _SanitizedBitMask<_S_size()>
571  _M_test(const _Fp& __fun, const _SimdTuple& __x, const _More&... __more)
572  {
573  const _SanitizedBitMask<_S_first_size> __first
574  = _Abi0::_MaskImpl::_S_to_bits(
575  __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), __x.first,
576  __more.first...));
577  if constexpr (_S_tuple_size == 1)
578  return __first;
579  else
580  return _M_test(__fun, __x.second, __more.second...)
581  ._M_prepend(__first);
582  }
583 
584  template <typename _Up, _Up _I>
585  _GLIBCXX_SIMD_INTRINSIC constexpr _Tp
586  operator[](integral_constant<_Up, _I>) const noexcept
587  {
588  if constexpr (_I < simd_size_v<_Tp, _Abi0>)
589  return _M_subscript_read(_I);
590  else
591  return second[integral_constant<_Up, _I - simd_size_v<_Tp, _Abi0>>()];
592  }
593 
594  _GLIBCXX_SIMD_INTRINSIC
595  _Tp operator[](size_t __i) const noexcept
596  {
597  if constexpr (_S_tuple_size == 1)
598  return _M_subscript_read(__i);
599  else
600  {
601 #ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
602  return reinterpret_cast<const __may_alias<_Tp>*>(this)[__i];
603 #else
604  if constexpr (__is_scalar_abi<_Abi0>())
605  {
606  const _Tp* ptr = &first;
607  return ptr[__i];
608  }
609  else
610  return __i < simd_size_v<_Tp, _Abi0>
611  ? _M_subscript_read(__i)
612  : second[__i - simd_size_v<_Tp, _Abi0>];
613 #endif
614  }
615  }
616 
617  _GLIBCXX_SIMD_INTRINSIC
618  void _M_set(size_t __i, _Tp __val) noexcept
619  {
620  if constexpr (_S_tuple_size == 1)
621  return _M_subscript_write(__i, __val);
622  else
623  {
624 #ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
625  reinterpret_cast<__may_alias<_Tp>*>(this)[__i] = __val;
626 #else
627  if (__i < simd_size_v<_Tp, _Abi0>)
628  _M_subscript_write(__i, __val);
629  else
630  second._M_set(__i - simd_size_v<_Tp, _Abi0>, __val);
631 #endif
632  }
633  }
634 
635  private:
636  // _M_subscript_read/_write {{{
637  _GLIBCXX_SIMD_INTRINSIC
638  _Tp _M_subscript_read([[maybe_unused]] size_t __i) const noexcept
639  {
640  if constexpr (__is_vectorizable_v<_FirstType>)
641  return first;
642  else
643  return first[__i];
644  }
645 
646  _GLIBCXX_SIMD_INTRINSIC
647  void _M_subscript_write([[maybe_unused]] size_t __i, _Tp __y) noexcept
648  {
649  if constexpr (__is_vectorizable_v<_FirstType>)
650  first = __y;
651  else
652  first._M_set(__i, __y);
653  }
654 
655  // }}}
656  };
657 
658 // __make_simd_tuple {{{1
659 template <typename _Tp, typename _A0>
660  _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0>
661  __make_simd_tuple(simd<_Tp, _A0> __x0)
662  { return {__data(__x0)}; }
663 
664 template <typename _Tp, typename _A0, typename... _As>
665  _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0, _As...>
666  __make_simd_tuple(const simd<_Tp, _A0>& __x0, const simd<_Tp, _As>&... __xs)
667  { return {__data(__x0), __make_simd_tuple(__xs...)}; }
668 
669 template <typename _Tp, typename _A0>
670  _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0>
671  __make_simd_tuple(const typename _SimdTraits<_Tp, _A0>::_SimdMember& __arg0)
672  { return {__arg0}; }
673 
674 template <typename _Tp, typename _A0, typename _A1, typename... _Abis>
675  _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0, _A1, _Abis...>
676  __make_simd_tuple(
677  const typename _SimdTraits<_Tp, _A0>::_SimdMember& __arg0,
678  const typename _SimdTraits<_Tp, _A1>::_SimdMember& __arg1,
679  const typename _SimdTraits<_Tp, _Abis>::_SimdMember&... __args)
680  { return {__arg0, __make_simd_tuple<_Tp, _A1, _Abis...>(__arg1, __args...)}; }
681 
682 // __to_simd_tuple {{{1
683 template <typename _Tp, size_t _Np, typename _V, size_t _NV, typename... _VX>
684  _GLIBCXX_SIMD_INTRINSIC constexpr __fixed_size_storage_t<_Tp, _Np>
685  __to_simd_tuple(const array<_V, _NV>& __from, const _VX... __fromX);
686 
687 template <typename _Tp, size_t _Np,
688  size_t _Offset = 0, // skip this many elements in __from0
689  typename _R = __fixed_size_storage_t<_Tp, _Np>, typename _V0,
690  typename _V0VT = _VectorTraits<_V0>, typename... _VX>
691  _GLIBCXX_SIMD_INTRINSIC _R constexpr __to_simd_tuple(const _V0 __from0,
692  const _VX... __fromX)
693  {
694  static_assert(is_same_v<typename _V0VT::value_type, _Tp>);
695  static_assert(_Offset < _V0VT::_S_full_size);
696  using _R0 = __vector_type_t<_Tp, _R::_S_first_size>;
697  if constexpr (_R::_S_tuple_size == 1)
698  {
699  if constexpr (_Np == 1)
700  return _R{__from0[_Offset]};
701  else if constexpr (_Offset == 0 && _V0VT::_S_full_size >= _Np)
702  return _R{__intrin_bitcast<_R0>(__from0)};
703  else if constexpr (_Offset * 2 == _V0VT::_S_full_size
704  && _V0VT::_S_full_size / 2 >= _Np)
705  return _R{__intrin_bitcast<_R0>(__extract_part<1, 2>(__from0))};
706  else if constexpr (_Offset * 4 == _V0VT::_S_full_size
707  && _V0VT::_S_full_size / 4 >= _Np)
708  return _R{__intrin_bitcast<_R0>(__extract_part<1, 4>(__from0))};
709  else
710  __assert_unreachable<_Tp>();
711  }
712  else
713  {
714  if constexpr (1 == _R::_S_first_size)
715  { // extract one scalar and recurse
716  if constexpr (_Offset + 1 < _V0VT::_S_full_size)
717  return _R{__from0[_Offset],
718  __to_simd_tuple<_Tp, _Np - 1, _Offset + 1>(__from0,
719  __fromX...)};
720  else
721  return _R{__from0[_Offset],
722  __to_simd_tuple<_Tp, _Np - 1, 0>(__fromX...)};
723  }
724 
725  // place __from0 into _R::first and recurse for __fromX -> _R::second
726  else if constexpr (_V0VT::_S_full_size == _R::_S_first_size
727  && _Offset == 0)
728  return _R{__from0,
729  __to_simd_tuple<_Tp, _Np - _R::_S_first_size>(__fromX...)};
730 
731  // place lower part of __from0 into _R::first and recurse with _Offset
732  else if constexpr (_V0VT::_S_full_size > _R::_S_first_size
733  && _Offset == 0)
734  return _R{__intrin_bitcast<_R0>(__from0),
735  __to_simd_tuple<_Tp, _Np - _R::_S_first_size,
736  _R::_S_first_size>(__from0, __fromX...)};
737 
738  // place lower part of second quarter of __from0 into _R::first and
739  // recurse with _Offset
740  else if constexpr (_Offset * 4 == _V0VT::_S_full_size
741  && _V0VT::_S_full_size >= 4 * _R::_S_first_size)
742  return _R{__intrin_bitcast<_R0>(__extract_part<2, 4>(__from0)),
743  __to_simd_tuple<_Tp, _Np - _R::_S_first_size,
744  _Offset + _R::_S_first_size>(__from0,
745  __fromX...)};
746 
747  // place lower half of high half of __from0 into _R::first and recurse
748  // with _Offset
749  else if constexpr (_Offset * 2 == _V0VT::_S_full_size
750  && _V0VT::_S_full_size >= 4 * _R::_S_first_size)
751  return _R{__intrin_bitcast<_R0>(__extract_part<2, 4>(__from0)),
752  __to_simd_tuple<_Tp, _Np - _R::_S_first_size,
753  _Offset + _R::_S_first_size>(__from0,
754  __fromX...)};
755 
756  // place high half of __from0 into _R::first and recurse with __fromX
757  else if constexpr (_Offset * 2 == _V0VT::_S_full_size
758  && _V0VT::_S_full_size / 2 >= _R::_S_first_size)
759  return _R{__intrin_bitcast<_R0>(__extract_part<1, 2>(__from0)),
760  __to_simd_tuple<_Tp, _Np - _R::_S_first_size, 0>(
761  __fromX...)};
762 
763  // ill-formed if some unforseen pattern is needed
764  else
765  __assert_unreachable<_Tp>();
766  }
767  }
768 
769 template <typename _Tp, size_t _Np, typename _V, size_t _NV, typename... _VX>
770  _GLIBCXX_SIMD_INTRINSIC constexpr __fixed_size_storage_t<_Tp, _Np>
771  __to_simd_tuple(const array<_V, _NV>& __from, const _VX... __fromX)
772  {
773  if constexpr (is_same_v<_Tp, _V>)
774  {
775  static_assert(
776  sizeof...(_VX) == 0,
777  "An array of scalars must be the last argument to __to_simd_tuple");
778  return __call_with_subscripts(
779  __from,
780  make_index_sequence<_NV>(), [&](const auto... __args) constexpr {
781  return __simd_tuple_concat(
782  _SimdTuple<_Tp, simd_abi::scalar>{__args}..., _SimdTuple<_Tp>());
783  });
784  }
785  else
786  return __call_with_subscripts(
787  __from,
788  make_index_sequence<_NV>(), [&](const auto... __args) constexpr {
789  return __to_simd_tuple<_Tp, _Np>(__args..., __fromX...);
790  });
791  }
792 
793 template <size_t, typename _Tp>
794  using __to_tuple_helper = _Tp;
795 
796 template <typename _Tp, typename _A0, size_t _NOut, size_t _Np,
797  size_t... _Indexes>
798  _GLIBCXX_SIMD_INTRINSIC __fixed_size_storage_t<_Tp, _NOut>
799  __to_simd_tuple_impl(index_sequence<_Indexes...>,
800  const array<__vector_type_t<_Tp, simd_size_v<_Tp, _A0>>, _Np>& __args)
801  {
802  return __make_simd_tuple<_Tp, __to_tuple_helper<_Indexes, _A0>...>(
803  __args[_Indexes]...);
804  }
805 
806 template <typename _Tp, typename _A0, size_t _NOut, size_t _Np,
807  typename _R = __fixed_size_storage_t<_Tp, _NOut>>
808  _GLIBCXX_SIMD_INTRINSIC _R
809  __to_simd_tuple_sized(
810  const array<__vector_type_t<_Tp, simd_size_v<_Tp, _A0>>, _Np>& __args)
811  {
812  static_assert(_Np * simd_size_v<_Tp, _A0> >= _NOut);
813  return __to_simd_tuple_impl<_Tp, _A0, _NOut>(
814  make_index_sequence<_R::_S_tuple_size>(), __args);
815  }
816 
817 // __optimize_simd_tuple {{{1
818 template <typename _Tp>
819  _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp>
820  __optimize_simd_tuple(const _SimdTuple<_Tp>)
821  { return {}; }
822 
823 template <typename _Tp, typename _Ap>
824  _GLIBCXX_SIMD_INTRINSIC const _SimdTuple<_Tp, _Ap>&
825  __optimize_simd_tuple(const _SimdTuple<_Tp, _Ap>& __x)
826  { return __x; }
827 
828 template <typename _Tp, typename _A0, typename _A1, typename... _Abis,
829  typename _R = __fixed_size_storage_t<
830  _Tp, _SimdTuple<_Tp, _A0, _A1, _Abis...>::_S_size()>>
831  _GLIBCXX_SIMD_INTRINSIC _R
832  __optimize_simd_tuple(const _SimdTuple<_Tp, _A0, _A1, _Abis...>& __x)
833  {
834  using _Tup = _SimdTuple<_Tp, _A0, _A1, _Abis...>;
835  if constexpr (is_same_v<_R, _Tup>)
836  return __x;
837  else if constexpr (is_same_v<typename _R::_FirstType,
838  typename _Tup::_FirstType>)
839  return {__x.first, __optimize_simd_tuple(__x.second)};
840  else if constexpr (__is_scalar_abi<_A0>()
841  || _A0::template _S_is_partial<_Tp>)
842  return {__generate_from_n_evaluations<_R::_S_first_size,
843  typename _R::_FirstType>(
844  [&](auto __i) { return __x[__i]; }),
845  __optimize_simd_tuple(
846  __simd_tuple_pop_front<_R::_S_first_size>(__x))};
847  else if constexpr (is_same_v<_A0, _A1>
848  && _R::_S_first_size == simd_size_v<_Tp, _A0> + simd_size_v<_Tp, _A1>)
849  return {__concat(__x.template _M_at<0>(), __x.template _M_at<1>()),
850  __optimize_simd_tuple(__x.second.second)};
851  else if constexpr (sizeof...(_Abis) >= 2
852  && _R::_S_first_size == (4 * simd_size_v<_Tp, _A0>)
853  && simd_size_v<_Tp, _A0> == __simd_tuple_element_t<
854  (sizeof...(_Abis) >= 2 ? 3 : 0), _Tup>::size())
855  return {
856  __concat(__concat(__x.template _M_at<0>(), __x.template _M_at<1>()),
857  __concat(__x.template _M_at<2>(), __x.template _M_at<3>())),
858  __optimize_simd_tuple(__x.second.second.second.second)};
859  else
860  {
861  static_assert(sizeof(_R) == sizeof(__x));
862  _R __r;
863  __builtin_memcpy(__r._M_as_charptr(), __x._M_as_charptr(),
864  sizeof(_Tp) * _R::_S_size());
865  return __r;
866  }
867  }
868 
869 // __for_each(const _SimdTuple &, Fun) {{{1
870 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _Fp>
871  _GLIBCXX_SIMD_INTRINSIC constexpr void
872  __for_each(const _SimdTuple<_Tp, _A0>& __t, _Fp&& __fun)
873  { static_cast<_Fp&&>(__fun)(__make_meta<_Offset>(__t), __t.first); }
874 
875 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _A1,
876  typename... _As, typename _Fp>
877  _GLIBCXX_SIMD_INTRINSIC constexpr void
878  __for_each(const _SimdTuple<_Tp, _A0, _A1, _As...>& __t, _Fp&& __fun)
879  {
880  __fun(__make_meta<_Offset>(__t), __t.first);
881  __for_each<_Offset + simd_size<_Tp, _A0>::value>(__t.second,
882  static_cast<_Fp&&>(__fun));
883  }
884 
885 // __for_each(_SimdTuple &, Fun) {{{1
886 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _Fp>
887  _GLIBCXX_SIMD_INTRINSIC constexpr void
888  __for_each(_SimdTuple<_Tp, _A0>& __t, _Fp&& __fun)
889  { static_cast<_Fp&&>(__fun)(__make_meta<_Offset>(__t), __t.first); }
890 
891 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _A1,
892  typename... _As, typename _Fp>
893  _GLIBCXX_SIMD_INTRINSIC constexpr void
894  __for_each(_SimdTuple<_Tp, _A0, _A1, _As...>& __t, _Fp&& __fun)
895  {
896  __fun(__make_meta<_Offset>(__t), __t.first);
897  __for_each<_Offset + simd_size<_Tp, _A0>::value>(__t.second,
898  static_cast<_Fp&&>(__fun));
899  }
900 
901 // __for_each(_SimdTuple &, const _SimdTuple &, Fun) {{{1
902 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _Fp>
903  _GLIBCXX_SIMD_INTRINSIC constexpr void
904  __for_each(_SimdTuple<_Tp, _A0>& __a, const _SimdTuple<_Tp, _A0>& __b,
905  _Fp&& __fun)
906  {
907  static_cast<_Fp&&>(__fun)(__make_meta<_Offset>(__a), __a.first, __b.first);
908  }
909 
910 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _A1,
911  typename... _As, typename _Fp>
912  _GLIBCXX_SIMD_INTRINSIC constexpr void
913  __for_each(_SimdTuple<_Tp, _A0, _A1, _As...>& __a,
914  const _SimdTuple<_Tp, _A0, _A1, _As...>& __b, _Fp&& __fun)
915  {
916  __fun(__make_meta<_Offset>(__a), __a.first, __b.first);
917  __for_each<_Offset + simd_size<_Tp, _A0>::value>(__a.second, __b.second,
918  static_cast<_Fp&&>(__fun));
919  }
920 
921 // __for_each(const _SimdTuple &, const _SimdTuple &, Fun) {{{1
922 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _Fp>
923  _GLIBCXX_SIMD_INTRINSIC constexpr void
924  __for_each(const _SimdTuple<_Tp, _A0>& __a, const _SimdTuple<_Tp, _A0>& __b,
925  _Fp&& __fun)
926  {
927  static_cast<_Fp&&>(__fun)(__make_meta<_Offset>(__a), __a.first, __b.first);
928  }
929 
930 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _A1,
931  typename... _As, typename _Fp>
932  _GLIBCXX_SIMD_INTRINSIC constexpr void
933  __for_each(const _SimdTuple<_Tp, _A0, _A1, _As...>& __a,
934  const _SimdTuple<_Tp, _A0, _A1, _As...>& __b, _Fp&& __fun)
935  {
936  __fun(__make_meta<_Offset>(__a), __a.first, __b.first);
937  __for_each<_Offset + simd_size<_Tp, _A0>::value>(__a.second, __b.second,
938  static_cast<_Fp&&>(__fun));
939  }
940 
941 // }}}1
942 // __extract_part(_SimdTuple) {{{
943 template <int _Index, int _Total, int _Combine, typename _Tp, typename _A0,
944  typename... _As>
945  _GLIBCXX_SIMD_INTRINSIC auto // __vector_type_t or _SimdTuple
946  __extract_part(const _SimdTuple<_Tp, _A0, _As...>& __x)
947  {
948  // worst cases:
949  // (a) 4, 4, 4 => 3, 3, 3, 3 (_Total = 4)
950  // (b) 2, 2, 2 => 3, 3 (_Total = 2)
951  // (c) 4, 2 => 2, 2, 2 (_Total = 3)
952  using _Tuple = _SimdTuple<_Tp, _A0, _As...>;
953  static_assert(_Index + _Combine <= _Total && _Index >= 0 && _Total >= 1);
954  constexpr size_t _Np = _Tuple::_S_size();
955  static_assert(_Np >= _Total && _Np % _Total == 0);
956  constexpr size_t __values_per_part = _Np / _Total;
957  [[maybe_unused]] constexpr size_t __values_to_skip
958  = _Index * __values_per_part;
959  constexpr size_t __return_size = __values_per_part * _Combine;
960  using _RetAbi = simd_abi::deduce_t<_Tp, __return_size>;
961 
962  // handle (optimize) the simple cases
963  if constexpr (_Index == 0 && _Tuple::_S_first_size == __return_size)
964  return __x.first._M_data;
965  else if constexpr (_Index == 0 && _Total == _Combine)
966  return __x;
967  else if constexpr (_Index == 0 && _Tuple::_S_first_size >= __return_size)
968  return __intrin_bitcast<__vector_type_t<_Tp, __return_size>>(
969  __as_vector(__x.first));
970 
971  // recurse to skip unused data members at the beginning of _SimdTuple
972  else if constexpr (__values_to_skip >= _Tuple::_S_first_size)
973  { // recurse
974  if constexpr (_Tuple::_S_first_size % __values_per_part == 0)
975  {
976  constexpr int __parts_in_first
977  = _Tuple::_S_first_size / __values_per_part;
978  return __extract_part<_Index - __parts_in_first,
979  _Total - __parts_in_first, _Combine>(
980  __x.second);
981  }
982  else
983  return __extract_part<__values_to_skip - _Tuple::_S_first_size,
984  _Np - _Tuple::_S_first_size, __return_size>(
985  __x.second);
986  }
987 
988  // extract from multiple _SimdTuple data members
989  else if constexpr (__return_size > _Tuple::_S_first_size - __values_to_skip)
990  {
991 #ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
992  const __may_alias<_Tp>* const element_ptr
993  = reinterpret_cast<const __may_alias<_Tp>*>(&__x) + __values_to_skip;
994  return __as_vector(simd<_Tp, _RetAbi>(element_ptr, element_aligned));
995 #else
996  [[maybe_unused]] constexpr size_t __offset = __values_to_skip;
997  return __as_vector(simd<_Tp, _RetAbi>([&](auto __i) constexpr {
998  constexpr _SizeConstant<__i + __offset> __k;
999  return __x[__k];
1000  }));
1001 #endif
1002  }
1003 
1004  // all of the return values are in __x.first
1005  else if constexpr (_Tuple::_S_first_size % __values_per_part == 0)
1006  return __extract_part<_Index, _Tuple::_S_first_size / __values_per_part,
1007  _Combine>(__x.first);
1008  else
1009  return __extract_part<__values_to_skip, _Tuple::_S_first_size,
1010  _Combine * __values_per_part>(__x.first);
1011  }
1012 
1013 // }}}
1014 // __fixed_size_storage_t<_Tp, _Np>{{{
1015 template <typename _Tp, int _Np, typename _Tuple,
1016  typename _Next = simd<_Tp, _AllNativeAbis::_BestAbi<_Tp, _Np>>,
1017  int _Remain = _Np - int(_Next::size())>
1018  struct __fixed_size_storage_builder;
1019 
1020 template <typename _Tp, int _Np>
1021  struct __fixed_size_storage
1022  : public __fixed_size_storage_builder<_Tp, _Np, _SimdTuple<_Tp>> {};
1023 
1024 template <typename _Tp, int _Np, typename... _As, typename _Next>
1025  struct __fixed_size_storage_builder<_Tp, _Np, _SimdTuple<_Tp, _As...>, _Next,
1026  0>
1027  { using type = _SimdTuple<_Tp, _As..., typename _Next::abi_type>; };
1028 
1029 template <typename _Tp, int _Np, typename... _As, typename _Next, int _Remain>
1030  struct __fixed_size_storage_builder<_Tp, _Np, _SimdTuple<_Tp, _As...>, _Next,
1031  _Remain>
1032  {
1033  using type = typename __fixed_size_storage_builder<
1034  _Tp, _Remain, _SimdTuple<_Tp, _As..., typename _Next::abi_type>>::type;
1035  };
1036 
1037 // }}}
1038 // __autocvt_to_simd {{{
1039 template <typename _Tp, bool = is_arithmetic_v<__remove_cvref_t<_Tp>>>
1040  struct __autocvt_to_simd
1041  {
1042  _Tp _M_data;
1043  using _TT = __remove_cvref_t<_Tp>;
1044 
1045  _GLIBCXX_SIMD_INTRINSIC
1046  operator _TT()
1047  { return _M_data; }
1048 
1049  _GLIBCXX_SIMD_INTRINSIC
1050  operator _TT&()
1051  {
1052  static_assert(is_lvalue_reference<_Tp>::value, "");
1053  static_assert(!is_const<_Tp>::value, "");
1054  return _M_data;
1055  }
1056 
1057  _GLIBCXX_SIMD_INTRINSIC
1058  operator _TT*()
1059  {
1060  static_assert(is_lvalue_reference<_Tp>::value, "");
1061  static_assert(!is_const<_Tp>::value, "");
1062  return &_M_data;
1063  }
1064 
1065  _GLIBCXX_SIMD_INTRINSIC
1066  constexpr __autocvt_to_simd(_Tp dd) : _M_data(dd) {}
1067 
1068  template <typename _Abi>
1069  _GLIBCXX_SIMD_INTRINSIC
1070  operator simd<typename _TT::value_type, _Abi>()
1071  { return {__private_init, _M_data}; }
1072 
1073  template <typename _Abi>
1074  _GLIBCXX_SIMD_INTRINSIC
1075  operator simd<typename _TT::value_type, _Abi>&()
1076  {
1077  return *reinterpret_cast<simd<typename _TT::value_type, _Abi>*>(
1078  &_M_data);
1079  }
1080 
1081  template <typename _Abi>
1082  _GLIBCXX_SIMD_INTRINSIC
1083  operator simd<typename _TT::value_type, _Abi>*()
1084  {
1085  return reinterpret_cast<simd<typename _TT::value_type, _Abi>*>(
1086  &_M_data);
1087  }
1088  };
1089 
1090 template <typename _Tp>
1091  __autocvt_to_simd(_Tp &&) -> __autocvt_to_simd<_Tp>;
1092 
1093 template <typename _Tp>
1094  struct __autocvt_to_simd<_Tp, true>
1095  {
1096  using _TT = __remove_cvref_t<_Tp>;
1097  _Tp _M_data;
1098  fixed_size_simd<_TT, 1> _M_fd;
1099 
1100  _GLIBCXX_SIMD_INTRINSIC
1101  constexpr __autocvt_to_simd(_Tp dd) : _M_data(dd), _M_fd(_M_data) {}
1102 
1103  _GLIBCXX_SIMD_INTRINSIC
1104  ~__autocvt_to_simd()
1105  { _M_data = __data(_M_fd).first; }
1106 
1107  _GLIBCXX_SIMD_INTRINSIC
1108  operator fixed_size_simd<_TT, 1>()
1109  { return _M_fd; }
1110 
1111  _GLIBCXX_SIMD_INTRINSIC
1112  operator fixed_size_simd<_TT, 1> &()
1113  {
1114  static_assert(is_lvalue_reference<_Tp>::value, "");
1115  static_assert(!is_const<_Tp>::value, "");
1116  return _M_fd;
1117  }
1118 
1119  _GLIBCXX_SIMD_INTRINSIC
1120  operator fixed_size_simd<_TT, 1> *()
1121  {
1122  static_assert(is_lvalue_reference<_Tp>::value, "");
1123  static_assert(!is_const<_Tp>::value, "");
1124  return &_M_fd;
1125  }
1126  };
1127 
1128 // }}}
1129 
1130 struct _CommonImplFixedSize;
1131 template <int _Np, typename = __detail::__odr_helper> struct _SimdImplFixedSize;
1132 template <int _Np, typename = __detail::__odr_helper> struct _MaskImplFixedSize;
1133 // simd_abi::_Fixed {{{
1134 template <int _Np>
1135  struct simd_abi::_Fixed
1136  {
1137  template <typename _Tp> static constexpr size_t _S_size = _Np;
1138  template <typename _Tp> static constexpr size_t _S_full_size = _Np;
1139  // validity traits {{{
1140  struct _IsValidAbiTag : public __bool_constant<(_Np > 0)> {};
1141 
1142  template <typename _Tp>
1143  struct _IsValidSizeFor
1144  : __bool_constant<(_Np <= simd_abi::max_fixed_size<_Tp>)> {};
1145 
1146  template <typename _Tp>
1147  struct _IsValid : conjunction<_IsValidAbiTag, __is_vectorizable<_Tp>,
1148  _IsValidSizeFor<_Tp>> {};
1149 
1150  template <typename _Tp>
1151  static constexpr bool _S_is_valid_v = _IsValid<_Tp>::value;
1152 
1153  // }}}
1154  // _S_masked {{{
1155  _GLIBCXX_SIMD_INTRINSIC static constexpr _SanitizedBitMask<_Np>
1156  _S_masked(_BitMask<_Np> __x)
1157  { return __x._M_sanitized(); }
1158 
1159  _GLIBCXX_SIMD_INTRINSIC static constexpr _SanitizedBitMask<_Np>
1160  _S_masked(_SanitizedBitMask<_Np> __x)
1161  { return __x; }
1162 
1163  // }}}
1164  // _*Impl {{{
1165  using _CommonImpl = _CommonImplFixedSize;
1166  using _SimdImpl = _SimdImplFixedSize<_Np>;
1167  using _MaskImpl = _MaskImplFixedSize<_Np>;
1168 
1169  // }}}
1170  // __traits {{{
1171  template <typename _Tp, bool = _S_is_valid_v<_Tp>>
1172  struct __traits : _InvalidTraits {};
1173 
1174  template <typename _Tp>
1175  struct __traits<_Tp, true>
1176  {
1177  using _IsValid = true_type;
1178  using _SimdImpl = _SimdImplFixedSize<_Np>;
1179  using _MaskImpl = _MaskImplFixedSize<_Np>;
1180 
1181  // simd and simd_mask member types {{{
1182  using _SimdMember = __fixed_size_storage_t<_Tp, _Np>;
1183  using _MaskMember = _SanitizedBitMask<_Np>;
1184 
1185  static constexpr size_t _S_simd_align
1186  = std::__bit_ceil(_Np * sizeof(_Tp));
1187 
1188  static constexpr size_t _S_mask_align = alignof(_MaskMember);
1189 
1190  // }}}
1191  // _SimdBase / base class for simd, providing extra conversions {{{
1192  struct _SimdBase
1193  {
1194  // The following ensures, function arguments are passed via the stack.
1195  // This is important for ABI compatibility across TU boundaries
1196  _GLIBCXX_SIMD_ALWAYS_INLINE
1197  _SimdBase(const _SimdBase&) {}
1198  _SimdBase() = default;
1199 
1200  _GLIBCXX_SIMD_ALWAYS_INLINE
1201  explicit operator const _SimdMember &() const
1202  { return static_cast<const simd<_Tp, _Fixed>*>(this)->_M_data; }
1203 
1204  _GLIBCXX_SIMD_ALWAYS_INLINE
1205  explicit operator array<_Tp, _Np>() const
1206  {
1207  array<_Tp, _Np> __r;
1208  // _SimdMember can be larger because of higher alignment
1209  static_assert(sizeof(__r) <= sizeof(_SimdMember), "");
1210  __builtin_memcpy(__r.data(), &static_cast<const _SimdMember&>(*this),
1211  sizeof(__r));
1212  return __r;
1213  }
1214  };
1215 
1216  // }}}
1217  // _MaskBase {{{
1218  // empty. The bitset interface suffices
1219  struct _MaskBase {};
1220 
1221  // }}}
1222  // _SimdCastType {{{
1223  struct _SimdCastType
1224  {
1225  _GLIBCXX_SIMD_ALWAYS_INLINE
1226  _SimdCastType(const array<_Tp, _Np>&);
1227  _GLIBCXX_SIMD_ALWAYS_INLINE
1228  _SimdCastType(const _SimdMember& dd) : _M_data(dd) {}
1229  _GLIBCXX_SIMD_ALWAYS_INLINE
1230  explicit operator const _SimdMember &() const { return _M_data; }
1231 
1232  private:
1233  const _SimdMember& _M_data;
1234  };
1235 
1236  // }}}
1237  // _MaskCastType {{{
1238  class _MaskCastType
1239  {
1240  _MaskCastType() = delete;
1241  };
1242  // }}}
1243  };
1244  // }}}
1245  };
1246 
1247 // }}}
1248 // _CommonImplFixedSize {{{
1249 struct _CommonImplFixedSize
1250 {
1251  // _S_store {{{
1252  template <typename _Tp, typename... _As>
1253  _GLIBCXX_SIMD_INTRINSIC static void
1254  _S_store(const _SimdTuple<_Tp, _As...>& __x, void* __addr)
1255  {
1256  constexpr size_t _Np = _SimdTuple<_Tp, _As...>::_S_size();
1257  __builtin_memcpy(__addr, &__x, _Np * sizeof(_Tp));
1258  }
1259 
1260  // }}}
1261 };
1262 
1263 // }}}
1264 // _SimdImplFixedSize {{{1
1265 // fixed_size should not inherit from _SimdMathFallback in order for
1266 // specializations in the used _SimdTuple Abis to get used
1267 template <int _Np, typename>
1268  struct _SimdImplFixedSize
1269  {
1270  // member types {{{2
1271  using _MaskMember = _SanitizedBitMask<_Np>;
1272 
1273  template <typename _Tp>
1274  using _SimdMember = __fixed_size_storage_t<_Tp, _Np>;
1275 
1276  template <typename _Tp>
1277  static constexpr size_t _S_tuple_size = _SimdMember<_Tp>::_S_tuple_size;
1278 
1279  template <typename _Tp>
1280  using _Simd = simd<_Tp, simd_abi::fixed_size<_Np>>;
1281 
1282  template <typename _Tp>
1283  using _TypeTag = _Tp*;
1284 
1285  // broadcast {{{2
1286  template <typename _Tp>
1287  static constexpr inline _SimdMember<_Tp> _S_broadcast(_Tp __x) noexcept
1288  {
1289  return _SimdMember<_Tp>::_S_generate([&](auto __meta) constexpr {
1290  return __meta._S_broadcast(__x);
1291  });
1292  }
1293 
1294  // _S_generator {{{2
1295  template <typename _Fp, typename _Tp>
1296  static constexpr inline _SimdMember<_Tp> _S_generator(_Fp&& __gen,
1297  _TypeTag<_Tp>)
1298  {
1299  return _SimdMember<_Tp>::_S_generate([&__gen](auto __meta) constexpr {
1300  return __meta._S_generator(
1301  [&](auto __i) constexpr {
1302  return __i < _Np ? __gen(_SizeConstant<__meta._S_offset + __i>())
1303  : 0;
1304  },
1305  _TypeTag<_Tp>());
1306  });
1307  }
1308 
1309  // _S_load {{{2
1310  template <typename _Tp, typename _Up>
1311  static inline _SimdMember<_Tp> _S_load(const _Up* __mem,
1312  _TypeTag<_Tp>) noexcept
1313  {
1314  return _SimdMember<_Tp>::_S_generate([&](auto __meta) {
1315  return __meta._S_load(&__mem[__meta._S_offset], _TypeTag<_Tp>());
1316  });
1317  }
1318 
1319  // _S_masked_load {{{2
1320  template <typename _Tp, typename... _As, typename _Up>
1321  static inline _SimdTuple<_Tp, _As...>
1322  _S_masked_load(const _SimdTuple<_Tp, _As...>& __old,
1323  const _MaskMember __bits, const _Up* __mem) noexcept
1324  {
1325  auto __merge = __old;
1326  __for_each(__merge, [&](auto __meta, auto& __native) {
1327  if (__meta._S_submask(__bits).any())
1328 #pragma GCC diagnostic push
1329  // __mem + __mem._S_offset could be UB ([expr.add]/4.3, but it punts
1330  // the responsibility for avoiding UB to the caller of the masked load
1331  // via the mask. Consequently, the compiler may assume this branch is
1332  // unreachable, if the pointer arithmetic is UB.
1333 #pragma GCC diagnostic ignored "-Warray-bounds"
1334  __native
1335  = __meta._S_masked_load(__native, __meta._S_make_mask(__bits),
1336  __mem + __meta._S_offset);
1337 #pragma GCC diagnostic pop
1338  });
1339  return __merge;
1340  }
1341 
1342  // _S_store {{{2
1343  template <typename _Tp, typename _Up>
1344  static inline void _S_store(const _SimdMember<_Tp>& __v, _Up* __mem,
1345  _TypeTag<_Tp>) noexcept
1346  {
1347  __for_each(__v, [&](auto __meta, auto __native) {
1348  __meta._S_store(__native, &__mem[__meta._S_offset], _TypeTag<_Tp>());
1349  });
1350  }
1351 
1352  // _S_masked_store {{{2
1353  template <typename _Tp, typename... _As, typename _Up>
1354  static inline void _S_masked_store(const _SimdTuple<_Tp, _As...>& __v,
1355  _Up* __mem,
1356  const _MaskMember __bits) noexcept
1357  {
1358  __for_each(__v, [&](auto __meta, auto __native) {
1359  if (__meta._S_submask(__bits).any())
1360 #pragma GCC diagnostic push
1361  // __mem + __mem._S_offset could be UB ([expr.add]/4.3, but it punts
1362  // the responsibility for avoiding UB to the caller of the masked
1363  // store via the mask. Consequently, the compiler may assume this
1364  // branch is unreachable, if the pointer arithmetic is UB.
1365 #pragma GCC diagnostic ignored "-Warray-bounds"
1366  __meta._S_masked_store(__native, __mem + __meta._S_offset,
1367  __meta._S_make_mask(__bits));
1368 #pragma GCC diagnostic pop
1369  });
1370  }
1371 
1372  // negation {{{2
1373  template <typename _Tp, typename... _As>
1374  static inline _MaskMember
1375  _S_negate(const _SimdTuple<_Tp, _As...>& __x) noexcept
1376  {
1377  _MaskMember __bits = 0;
1378  __for_each(
1379  __x, [&__bits](auto __meta, auto __native) constexpr {
1380  __bits
1381  |= __meta._S_mask_to_shifted_ullong(__meta._S_negate(__native));
1382  });
1383  return __bits;
1384  }
1385 
1386  // reductions {{{2
1387  template <typename _Tp, typename _BinaryOperation>
1388  static constexpr inline _Tp _S_reduce(const _Simd<_Tp>& __x,
1389  const _BinaryOperation& __binary_op)
1390  {
1391  using _Tup = _SimdMember<_Tp>;
1392  const _Tup& __tup = __data(__x);
1393  if constexpr (_Tup::_S_tuple_size == 1)
1394  return _Tup::_FirstAbi::_SimdImpl::_S_reduce(
1395  __tup.template _M_simd_at<0>(), __binary_op);
1396  else if constexpr (_Tup::_S_tuple_size == 2 && _Tup::_S_size() > 2
1397  && _Tup::_SecondType::_S_size() == 1)
1398  {
1399  return __binary_op(simd<_Tp, simd_abi::scalar>(
1400  reduce(__tup.template _M_simd_at<0>(),
1401  __binary_op)),
1402  __tup.template _M_simd_at<1>())[0];
1403  }
1404  else if constexpr (_Tup::_S_tuple_size == 2 && _Tup::_S_size() > 4
1405  && _Tup::_SecondType::_S_size() == 2)
1406  {
1407  return __binary_op(
1408  simd<_Tp, simd_abi::scalar>(
1409  reduce(__tup.template _M_simd_at<0>(), __binary_op)),
1410  simd<_Tp, simd_abi::scalar>(
1411  reduce(__tup.template _M_simd_at<1>(), __binary_op)))[0];
1412  }
1413  else
1414  {
1415  const auto& __x2 = __call_with_n_evaluations<
1416  __div_roundup(_Tup::_S_tuple_size, 2)>(
1417  [](auto __first_simd, auto... __remaining) {
1418  if constexpr (sizeof...(__remaining) == 0)
1419  return __first_simd;
1420  else
1421  {
1422  using _Tup2
1423  = _SimdTuple<_Tp,
1424  typename decltype(__first_simd)::abi_type,
1425  typename decltype(__remaining)::abi_type...>;
1426  return fixed_size_simd<_Tp, _Tup2::_S_size()>(
1427  __private_init,
1428  __make_simd_tuple(__first_simd, __remaining...));
1429  }
1430  },
1431  [&](auto __i) {
1432  auto __left = __tup.template _M_simd_at<2 * __i>();
1433  if constexpr (2 * __i + 1 == _Tup::_S_tuple_size)
1434  return __left;
1435  else
1436  {
1437  auto __right = __tup.template _M_simd_at<2 * __i + 1>();
1438  using _LT = decltype(__left);
1439  using _RT = decltype(__right);
1440  if constexpr (_LT::size() == _RT::size())
1441  return __binary_op(__left, __right);
1442  else
1443  {
1444  _GLIBCXX_SIMD_USE_CONSTEXPR_API
1445  typename _LT::mask_type __k(
1446  __private_init,
1447  [](auto __j) constexpr { return __j < _RT::size(); });
1448  _LT __ext_right = __left;
1449  where(__k, __ext_right)
1450  = __proposed::resizing_simd_cast<_LT>(__right);
1451  where(__k, __left) = __binary_op(__left, __ext_right);
1452  return __left;
1453  }
1454  }
1455  });
1456  return reduce(__x2, __binary_op);
1457  }
1458  }
1459 
1460  // _S_min, _S_max {{{2
1461  template <typename _Tp, typename... _As>
1462  static inline constexpr _SimdTuple<_Tp, _As...>
1463  _S_min(const _SimdTuple<_Tp, _As...>& __a,
1464  const _SimdTuple<_Tp, _As...>& __b)
1465  {
1466  return __a._M_apply_per_chunk(
1467  [](auto __impl, auto __aa, auto __bb) constexpr {
1468  return __impl._S_min(__aa, __bb);
1469  },
1470  __b);
1471  }
1472 
1473  template <typename _Tp, typename... _As>
1474  static inline constexpr _SimdTuple<_Tp, _As...>
1475  _S_max(const _SimdTuple<_Tp, _As...>& __a,
1476  const _SimdTuple<_Tp, _As...>& __b)
1477  {
1478  return __a._M_apply_per_chunk(
1479  [](auto __impl, auto __aa, auto __bb) constexpr {
1480  return __impl._S_max(__aa, __bb);
1481  },
1482  __b);
1483  }
1484 
1485  // _S_complement {{{2
1486  template <typename _Tp, typename... _As>
1487  static inline constexpr _SimdTuple<_Tp, _As...>
1488  _S_complement(const _SimdTuple<_Tp, _As...>& __x) noexcept
1489  {
1490  return __x._M_apply_per_chunk([](auto __impl, auto __xx) constexpr {
1491  return __impl._S_complement(__xx);
1492  });
1493  }
1494 
1495  // _S_unary_minus {{{2
1496  template <typename _Tp, typename... _As>
1497  static inline constexpr _SimdTuple<_Tp, _As...>
1498  _S_unary_minus(const _SimdTuple<_Tp, _As...>& __x) noexcept
1499  {
1500  return __x._M_apply_per_chunk([](auto __impl, auto __xx) constexpr {
1501  return __impl._S_unary_minus(__xx);
1502  });
1503  }
1504 
1505  // arithmetic operators {{{2
1506 
1507 #define _GLIBCXX_SIMD_FIXED_OP(name_, op_) \
1508  template <typename _Tp, typename... _As> \
1509  static inline constexpr _SimdTuple<_Tp, _As...> name_( \
1510  const _SimdTuple<_Tp, _As...>& __x, const _SimdTuple<_Tp, _As...>& __y)\
1511  { \
1512  return __x._M_apply_per_chunk( \
1513  [](auto __impl, auto __xx, auto __yy) constexpr { \
1514  return __impl.name_(__xx, __yy); \
1515  }, \
1516  __y); \
1517  }
1518 
1519  _GLIBCXX_SIMD_FIXED_OP(_S_plus, +)
1520  _GLIBCXX_SIMD_FIXED_OP(_S_minus, -)
1521  _GLIBCXX_SIMD_FIXED_OP(_S_multiplies, *)
1522  _GLIBCXX_SIMD_FIXED_OP(_S_divides, /)
1523  _GLIBCXX_SIMD_FIXED_OP(_S_modulus, %)
1524  _GLIBCXX_SIMD_FIXED_OP(_S_bit_and, &)
1525  _GLIBCXX_SIMD_FIXED_OP(_S_bit_or, |)
1526  _GLIBCXX_SIMD_FIXED_OP(_S_bit_xor, ^)
1527  _GLIBCXX_SIMD_FIXED_OP(_S_bit_shift_left, <<)
1528  _GLIBCXX_SIMD_FIXED_OP(_S_bit_shift_right, >>)
1529 #undef _GLIBCXX_SIMD_FIXED_OP
1530 
1531  template <typename _Tp, typename... _As>
1532  static inline constexpr _SimdTuple<_Tp, _As...>
1533  _S_bit_shift_left(const _SimdTuple<_Tp, _As...>& __x, int __y)
1534  {
1535  return __x._M_apply_per_chunk([__y](auto __impl, auto __xx) constexpr {
1536  return __impl._S_bit_shift_left(__xx, __y);
1537  });
1538  }
1539 
1540  template <typename _Tp, typename... _As>
1541  static inline constexpr _SimdTuple<_Tp, _As...>
1542  _S_bit_shift_right(const _SimdTuple<_Tp, _As...>& __x, int __y)
1543  {
1544  return __x._M_apply_per_chunk([__y](auto __impl, auto __xx) constexpr {
1545  return __impl._S_bit_shift_right(__xx, __y);
1546  });
1547  }
1548 
1549  // math {{{2
1550 #define _GLIBCXX_SIMD_APPLY_ON_TUPLE(_RetTp, __name) \
1551  template <typename _Tp, typename... _As, typename... _More> \
1552  static inline __fixed_size_storage_t<_RetTp, _Np> \
1553  _S_##__name(const _SimdTuple<_Tp, _As...>& __x, \
1554  const _More&... __more) \
1555  { \
1556  if constexpr (sizeof...(_More) == 0) \
1557  { \
1558  if constexpr (is_same_v<_Tp, _RetTp>) \
1559  return __x._M_apply_per_chunk( \
1560  [](auto __impl, auto __xx) constexpr { \
1561  using _V = typename decltype(__impl)::simd_type; \
1562  return __data(__name(_V(__private_init, __xx))); \
1563  }); \
1564  else \
1565  return __optimize_simd_tuple( \
1566  __x.template _M_apply_r<_RetTp>([](auto __impl, auto __xx) { \
1567  return __impl._S_##__name(__xx); \
1568  })); \
1569  } \
1570  else if constexpr ( \
1571  is_same_v< \
1572  _Tp, \
1573  _RetTp> && (... && is_same_v<_SimdTuple<_Tp, _As...>, _More>) ) \
1574  return __x._M_apply_per_chunk( \
1575  [](auto __impl, auto __xx, auto... __pack) constexpr { \
1576  using _V = typename decltype(__impl)::simd_type; \
1577  return __data(__name(_V(__private_init, __xx), \
1578  _V(__private_init, __pack)...)); \
1579  }, \
1580  __more...); \
1581  else if constexpr (is_same_v<_Tp, _RetTp>) \
1582  return __x._M_apply_per_chunk( \
1583  [](auto __impl, auto __xx, auto... __pack) constexpr { \
1584  using _V = typename decltype(__impl)::simd_type; \
1585  return __data(__name(_V(__private_init, __xx), \
1586  __autocvt_to_simd(__pack)...)); \
1587  }, \
1588  __more...); \
1589  else \
1590  __assert_unreachable<_Tp>(); \
1591  }
1592 
1593  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, acos)
1594  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, asin)
1595  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, atan)
1596  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, atan2)
1597  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, cos)
1598  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, sin)
1599  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, tan)
1600  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, acosh)
1601  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, asinh)
1602  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, atanh)
1603  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, cosh)
1604  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, sinh)
1605  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, tanh)
1606  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, exp)
1607  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, exp2)
1608  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, expm1)
1609  _GLIBCXX_SIMD_APPLY_ON_TUPLE(int, ilogb)
1610  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log)
1611  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log10)
1612  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log1p)
1613  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log2)
1614  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, logb)
1615  // modf implemented in simd_math.h
1616  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp,
1617  scalbn) // double scalbn(double x, int exp);
1618  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, scalbln)
1619  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, cbrt)
1620  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, abs)
1621  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fabs)
1622  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, pow)
1623  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, sqrt)
1624  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, erf)
1625  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, erfc)
1626  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, lgamma)
1627  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, tgamma)
1628  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, trunc)
1629  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, ceil)
1630  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, floor)
1631  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, nearbyint)
1632 
1633  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, rint)
1634  _GLIBCXX_SIMD_APPLY_ON_TUPLE(long, lrint)
1635  _GLIBCXX_SIMD_APPLY_ON_TUPLE(long long, llrint)
1636 
1637  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, round)
1638  _GLIBCXX_SIMD_APPLY_ON_TUPLE(long, lround)
1639  _GLIBCXX_SIMD_APPLY_ON_TUPLE(long long, llround)
1640 
1641  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, ldexp)
1642  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fmod)
1643  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, remainder)
1644  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, copysign)
1645  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, nextafter)
1646  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fdim)
1647  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fmax)
1648  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fmin)
1649  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fma)
1650  _GLIBCXX_SIMD_APPLY_ON_TUPLE(int, fpclassify)
1651 #undef _GLIBCXX_SIMD_APPLY_ON_TUPLE
1652 
1653  template <typename _Tp, typename... _Abis>
1654  static _SimdTuple<_Tp, _Abis...> _S_remquo(
1655  const _SimdTuple<_Tp, _Abis...>& __x,
1656  const _SimdTuple<_Tp, _Abis...>& __y,
1657  __fixed_size_storage_t<int, _SimdTuple<_Tp, _Abis...>::_S_size()>* __z)
1658  {
1659  return __x._M_apply_per_chunk(
1660  [](auto __impl, const auto __xx, const auto __yy, auto& __zz) {
1661  return __impl._S_remquo(__xx, __yy, &__zz);
1662  },
1663  __y, *__z);
1664  }
1665 
1666  template <typename _Tp, typename... _As>
1667  static inline _SimdTuple<_Tp, _As...>
1668  _S_frexp(const _SimdTuple<_Tp, _As...>& __x,
1669  __fixed_size_storage_t<int, _Np>& __exp) noexcept
1670  {
1671  return __x._M_apply_per_chunk(
1672  [](auto __impl, const auto& __a, auto& __b) {
1673  return __data(
1674  frexp(typename decltype(__impl)::simd_type(__private_init, __a),
1675  __autocvt_to_simd(__b)));
1676  },
1677  __exp);
1678  }
1679 
1680 #define _GLIBCXX_SIMD_TEST_ON_TUPLE_(name_) \
1681  template <typename _Tp, typename... _As> \
1682  static inline _MaskMember \
1683  _S_##name_(const _SimdTuple<_Tp, _As...>& __x) noexcept \
1684  { \
1685  return _M_test([](auto __impl, \
1686  auto __xx) { return __impl._S_##name_(__xx); }, \
1687  __x); \
1688  }
1689 
1690  _GLIBCXX_SIMD_TEST_ON_TUPLE_(isinf)
1691  _GLIBCXX_SIMD_TEST_ON_TUPLE_(isfinite)
1692  _GLIBCXX_SIMD_TEST_ON_TUPLE_(isnan)
1693  _GLIBCXX_SIMD_TEST_ON_TUPLE_(isnormal)
1694  _GLIBCXX_SIMD_TEST_ON_TUPLE_(signbit)
1695 #undef _GLIBCXX_SIMD_TEST_ON_TUPLE_
1696 
1697  // _S_increment & _S_decrement{{{2
1698  template <typename... _Ts>
1699  _GLIBCXX_SIMD_INTRINSIC static constexpr void
1700  _S_increment(_SimdTuple<_Ts...>& __x)
1701  {
1702  __for_each(
1703  __x, [](auto __meta, auto& native) constexpr {
1704  __meta._S_increment(native);
1705  });
1706  }
1707 
1708  template <typename... _Ts>
1709  _GLIBCXX_SIMD_INTRINSIC static constexpr void
1710  _S_decrement(_SimdTuple<_Ts...>& __x)
1711  {
1712  __for_each(
1713  __x, [](auto __meta, auto& native) constexpr {
1714  __meta._S_decrement(native);
1715  });
1716  }
1717 
1718  // compares {{{2
1719 #define _GLIBCXX_SIMD_CMP_OPERATIONS(__cmp) \
1720  template <typename _Tp, typename... _As> \
1721  _GLIBCXX_SIMD_INTRINSIC constexpr static _MaskMember \
1722  __cmp(const _SimdTuple<_Tp, _As...>& __x, \
1723  const _SimdTuple<_Tp, _As...>& __y) \
1724  { \
1725  return _M_test( \
1726  [](auto __impl, auto __xx, auto __yy) constexpr { \
1727  return __impl.__cmp(__xx, __yy); \
1728  }, \
1729  __x, __y); \
1730  }
1731 
1732  _GLIBCXX_SIMD_CMP_OPERATIONS(_S_equal_to)
1733  _GLIBCXX_SIMD_CMP_OPERATIONS(_S_not_equal_to)
1734  _GLIBCXX_SIMD_CMP_OPERATIONS(_S_less)
1735  _GLIBCXX_SIMD_CMP_OPERATIONS(_S_less_equal)
1736  _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isless)
1737  _GLIBCXX_SIMD_CMP_OPERATIONS(_S_islessequal)
1738  _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isgreater)
1739  _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isgreaterequal)
1740  _GLIBCXX_SIMD_CMP_OPERATIONS(_S_islessgreater)
1741  _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isunordered)
1742 #undef _GLIBCXX_SIMD_CMP_OPERATIONS
1743 
1744  // smart_reference access {{{2
1745  template <typename _Tp, typename... _As, typename _Up>
1746  _GLIBCXX_SIMD_INTRINSIC static void _S_set(_SimdTuple<_Tp, _As...>& __v,
1747  int __i, _Up&& __x) noexcept
1748  { __v._M_set(__i, static_cast<_Up&&>(__x)); }
1749 
1750  // _S_masked_assign {{{2
1751  template <typename _Tp, typename... _As>
1752  _GLIBCXX_SIMD_INTRINSIC static void
1753  _S_masked_assign(const _MaskMember __bits, _SimdTuple<_Tp, _As...>& __lhs,
1754  const __type_identity_t<_SimdTuple<_Tp, _As...>>& __rhs)
1755  {
1756  __for_each(
1757  __lhs, __rhs,
1758  [&](auto __meta, auto& __native_lhs, auto __native_rhs) constexpr {
1759  __meta._S_masked_assign(__meta._S_make_mask(__bits), __native_lhs,
1760  __native_rhs);
1761  });
1762  }
1763 
1764  // Optimization for the case where the RHS is a scalar. No need to broadcast
1765  // the scalar to a simd first.
1766  template <typename _Tp, typename... _As>
1767  _GLIBCXX_SIMD_INTRINSIC static void
1768  _S_masked_assign(const _MaskMember __bits, _SimdTuple<_Tp, _As...>& __lhs,
1769  const __type_identity_t<_Tp> __rhs)
1770  {
1771  __for_each(
1772  __lhs, [&](auto __meta, auto& __native_lhs) constexpr {
1773  __meta._S_masked_assign(__meta._S_make_mask(__bits), __native_lhs,
1774  __rhs);
1775  });
1776  }
1777 
1778  // _S_masked_cassign {{{2
1779  template <typename _Op, typename _Tp, typename... _As>
1780  static inline void _S_masked_cassign(const _MaskMember __bits,
1781  _SimdTuple<_Tp, _As...>& __lhs,
1782  const _SimdTuple<_Tp, _As...>& __rhs,
1783  _Op __op)
1784  {
1785  __for_each(
1786  __lhs, __rhs,
1787  [&](auto __meta, auto& __native_lhs, auto __native_rhs) constexpr {
1788  __meta.template _S_masked_cassign(__meta._S_make_mask(__bits),
1789  __native_lhs, __native_rhs, __op);
1790  });
1791  }
1792 
1793  // Optimization for the case where the RHS is a scalar. No need to broadcast
1794  // the scalar to a simd first.
1795  template <typename _Op, typename _Tp, typename... _As>
1796  static inline void _S_masked_cassign(const _MaskMember __bits,
1797  _SimdTuple<_Tp, _As...>& __lhs,
1798  const _Tp& __rhs, _Op __op)
1799  {
1800  __for_each(
1801  __lhs, [&](auto __meta, auto& __native_lhs) constexpr {
1802  __meta.template _S_masked_cassign(__meta._S_make_mask(__bits),
1803  __native_lhs, __rhs, __op);
1804  });
1805  }
1806 
1807  // _S_masked_unary {{{2
1808  template <template <typename> class _Op, typename _Tp, typename... _As>
1809  static inline _SimdTuple<_Tp, _As...>
1810  _S_masked_unary(const _MaskMember __bits, const _SimdTuple<_Tp, _As...>& __v)
1811  {
1812  return __v._M_apply_wrapped([&__bits](auto __meta,
1813  auto __native) constexpr {
1814  return __meta.template _S_masked_unary<_Op>(__meta._S_make_mask(
1815  __bits),
1816  __native);
1817  });
1818  }
1819 
1820  // }}}2
1821  };
1822 
1823 // _MaskImplFixedSize {{{1
1824 template <int _Np, typename>
1825  struct _MaskImplFixedSize
1826  {
1827  static_assert(
1828  sizeof(_ULLong) * __CHAR_BIT__ >= _Np,
1829  "The fixed_size implementation relies on one _ULLong being able to store "
1830  "all boolean elements."); // required in load & store
1831 
1832  // member types {{{
1833  using _Abi = simd_abi::fixed_size<_Np>;
1834 
1835  using _MaskMember = _SanitizedBitMask<_Np>;
1836 
1837  template <typename _Tp>
1838  using _FirstAbi = typename __fixed_size_storage_t<_Tp, _Np>::_FirstAbi;
1839 
1840  template <typename _Tp>
1841  using _TypeTag = _Tp*;
1842 
1843  // }}}
1844  // _S_broadcast {{{
1845  template <typename>
1846  _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
1847  _S_broadcast(bool __x)
1848  { return __x ? ~_MaskMember() : _MaskMember(); }
1849 
1850  // }}}
1851  // _S_load {{{
1852  template <typename>
1853  _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
1854  _S_load(const bool* __mem)
1855  {
1856  using _Ip = __int_for_sizeof_t<bool>;
1857  // the following load uses element_aligned and relies on __mem already
1858  // carrying alignment information from when this load function was
1859  // called.
1860  const simd<_Ip, _Abi> __bools(reinterpret_cast<const __may_alias<_Ip>*>(
1861  __mem),
1862  element_aligned);
1863  return __data(__bools != 0);
1864  }
1865 
1866  // }}}
1867  // _S_to_bits {{{
1868  template <bool _Sanitized>
1869  _GLIBCXX_SIMD_INTRINSIC static constexpr _SanitizedBitMask<_Np>
1870  _S_to_bits(_BitMask<_Np, _Sanitized> __x)
1871  {
1872  if constexpr (_Sanitized)
1873  return __x;
1874  else
1875  return __x._M_sanitized();
1876  }
1877 
1878  // }}}
1879  // _S_convert {{{
1880  template <typename _Tp, typename _Up, typename _UAbi>
1881  _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
1882  _S_convert(simd_mask<_Up, _UAbi> __x)
1883  {
1884  return _UAbi::_MaskImpl::_S_to_bits(__data(__x))
1885  .template _M_extract<0, _Np>();
1886  }
1887 
1888  // }}}
1889  // _S_from_bitmask {{{2
1890  template <typename _Tp>
1891  _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1892  _S_from_bitmask(_MaskMember __bits, _TypeTag<_Tp>) noexcept
1893  { return __bits; }
1894 
1895  // _S_load {{{2
1896  static inline _MaskMember _S_load(const bool* __mem) noexcept
1897  {
1898  // TODO: _UChar is not necessarily the best type to use here. For smaller
1899  // _Np _UShort, _UInt, _ULLong, float, and double can be more efficient.
1900  _ULLong __r = 0;
1901  using _Vs = __fixed_size_storage_t<_UChar, _Np>;
1902  __for_each(_Vs{}, [&](auto __meta, auto) {
1903  __r |= __meta._S_mask_to_shifted_ullong(
1904  __meta._S_mask_impl._S_load(&__mem[__meta._S_offset],
1905  _SizeConstant<__meta._S_size()>()));
1906  });
1907  return __r;
1908  }
1909 
1910  // _S_masked_load {{{2
1911  static inline _MaskMember _S_masked_load(_MaskMember __merge,
1912  _MaskMember __mask,
1913  const bool* __mem) noexcept
1914  {
1915  _BitOps::_S_bit_iteration(__mask.to_ullong(), [&](auto __i) {
1916  __merge.set(__i, __mem[__i]);
1917  });
1918  return __merge;
1919  }
1920 
1921  // _S_store {{{2
1922  static inline void _S_store(const _MaskMember __bitmask,
1923  bool* __mem) noexcept
1924  {
1925  if constexpr (_Np == 1)
1926  __mem[0] = __bitmask[0];
1927  else
1928  _FirstAbi<_UChar>::_CommonImpl::_S_store_bool_array(__bitmask, __mem);
1929  }
1930 
1931  // _S_masked_store {{{2
1932  static inline void _S_masked_store(const _MaskMember __v, bool* __mem,
1933  const _MaskMember __k) noexcept
1934  {
1935  _BitOps::_S_bit_iteration(__k, [&](auto __i) { __mem[__i] = __v[__i]; });
1936  }
1937 
1938  // logical and bitwise operators {{{2
1939  _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1940  _S_logical_and(const _MaskMember& __x, const _MaskMember& __y) noexcept
1941  { return __x & __y; }
1942 
1943  _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1944  _S_logical_or(const _MaskMember& __x, const _MaskMember& __y) noexcept
1945  { return __x | __y; }
1946 
1947  _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
1948  _S_bit_not(const _MaskMember& __x) noexcept
1949  { return ~__x; }
1950 
1951  _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1952  _S_bit_and(const _MaskMember& __x, const _MaskMember& __y) noexcept
1953  { return __x & __y; }
1954 
1955  _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1956  _S_bit_or(const _MaskMember& __x, const _MaskMember& __y) noexcept
1957  { return __x | __y; }
1958 
1959  _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1960  _S_bit_xor(const _MaskMember& __x, const _MaskMember& __y) noexcept
1961  { return __x ^ __y; }
1962 
1963  // smart_reference access {{{2
1964  _GLIBCXX_SIMD_INTRINSIC static void _S_set(_MaskMember& __k, int __i,
1965  bool __x) noexcept
1966  { __k.set(__i, __x); }
1967 
1968  // _S_masked_assign {{{2
1969  _GLIBCXX_SIMD_INTRINSIC static void
1970  _S_masked_assign(const _MaskMember __k, _MaskMember& __lhs,
1971  const _MaskMember __rhs)
1972  { __lhs = (__lhs & ~__k) | (__rhs & __k); }
1973 
1974  // Optimization for the case where the RHS is a scalar.
1975  _GLIBCXX_SIMD_INTRINSIC static void _S_masked_assign(const _MaskMember __k,
1976  _MaskMember& __lhs,
1977  const bool __rhs)
1978  {
1979  if (__rhs)
1980  __lhs |= __k;
1981  else
1982  __lhs &= ~__k;
1983  }
1984 
1985  // }}}2
1986  // _S_all_of {{{
1987  template <typename _Tp>
1988  _GLIBCXX_SIMD_INTRINSIC static bool _S_all_of(simd_mask<_Tp, _Abi> __k)
1989  { return __data(__k).all(); }
1990 
1991  // }}}
1992  // _S_any_of {{{
1993  template <typename _Tp>
1994  _GLIBCXX_SIMD_INTRINSIC static bool _S_any_of(simd_mask<_Tp, _Abi> __k)
1995  { return __data(__k).any(); }
1996 
1997  // }}}
1998  // _S_none_of {{{
1999  template <typename _Tp>
2000  _GLIBCXX_SIMD_INTRINSIC static bool _S_none_of(simd_mask<_Tp, _Abi> __k)
2001  { return __data(__k).none(); }
2002 
2003  // }}}
2004  // _S_some_of {{{
2005  template <typename _Tp>
2006  _GLIBCXX_SIMD_INTRINSIC static bool
2007  _S_some_of([[maybe_unused]] simd_mask<_Tp, _Abi> __k)
2008  {
2009  if constexpr (_Np == 1)
2010  return false;
2011  else
2012  return __data(__k).any() && !__data(__k).all();
2013  }
2014 
2015  // }}}
2016  // _S_popcount {{{
2017  template <typename _Tp>
2018  _GLIBCXX_SIMD_INTRINSIC static int _S_popcount(simd_mask<_Tp, _Abi> __k)
2019  { return __data(__k).count(); }
2020 
2021  // }}}
2022  // _S_find_first_set {{{
2023  template <typename _Tp>
2024  _GLIBCXX_SIMD_INTRINSIC static int
2025  _S_find_first_set(simd_mask<_Tp, _Abi> __k)
2026  { return std::__countr_zero(__data(__k).to_ullong()); }
2027 
2028  // }}}
2029  // _S_find_last_set {{{
2030  template <typename _Tp>
2031  _GLIBCXX_SIMD_INTRINSIC static int
2032  _S_find_last_set(simd_mask<_Tp, _Abi> __k)
2033  { return std::__bit_width(__data(__k).to_ullong()) - 1; }
2034 
2035  // }}}
2036  };
2037 // }}}1
2038 
2039 _GLIBCXX_SIMD_END_NAMESPACE
2040 #endif // __cplusplus >= 201703L
2041 #endif // _GLIBCXX_EXPERIMENTAL_SIMD_FIXED_SIZE_H_
2042 
2043 // vim: foldmethod=marker sw=2 noet ts=8 sts=2 tw=80
complex< _Tp > log10(const complex< _Tp > &)
Return complex base 10 logarithm of z.
Definition: complex:829
complex< _Tp > sin(const complex< _Tp > &)
Return complex sine of z.
Definition: complex:859
complex< _Tp > log(const complex< _Tp > &)
Return complex natural logarithm of z.
Definition: complex:824
complex< _Tp > tan(const complex< _Tp > &)
Return complex tangent of z.
Definition: complex:960
_Tp abs(const complex< _Tp > &)
Return magnitude of z.
Definition: complex:630
complex< _Tp > exp(const complex< _Tp > &)
Return complex base e exponential of z.
Definition: complex:797
complex< _Tp > cosh(const complex< _Tp > &)
Return complex hyperbolic cosine of z.
Definition: complex:771
complex< _Tp > tanh(const complex< _Tp > &)
Return complex hyperbolic tangent of z.
Definition: complex:988
complex< _Tp > pow(const complex< _Tp > &, int)
Return x to the y'th power.
Definition: complex:1019
complex< _Tp > sinh(const complex< _Tp > &)
Return complex hyperbolic sine of z.
Definition: complex:889
complex< _Tp > cos(const complex< _Tp > &)
Return complex cosine of z.
Definition: complex:741
complex< _Tp > sqrt(const complex< _Tp > &)
Return complex square root of z.
Definition: complex:933
constexpr _Tp reduce(_InputIterator __first, _InputIterator __last, _Tp __init, _BinaryOperation __binary_op)
Calculate reduction of values in a range.
Definition: numeric:287
_Tp fabs(const std::complex< _Tp > &)
fabs(__z) [8.1.8].
Definition: complex:1817
std::complex< _Tp > asinh(const std::complex< _Tp > &)
asinh(__z) [8.1.6].
Definition: complex:1764
std::complex< _Tp > atan(const std::complex< _Tp > &)
atan(__z) [8.1.4].
Definition: complex:1689
std::complex< _Tp > atanh(const std::complex< _Tp > &)
atanh(__z) [8.1.7].
Definition: complex:1808
std::complex< _Tp > acosh(const std::complex< _Tp > &)
acosh(__z) [8.1.5].
Definition: complex:1725
std::complex< _Tp > acos(const std::complex< _Tp > &)
acos(__z) [8.1.2].
Definition: complex:1609
std::complex< _Tp > asin(const std::complex< _Tp > &)
asin(__z) [8.1.3].
Definition: complex:1645
constexpr _Iterator __base(_Iterator __it)