libstdc++
simd.h
1// Definition of the public simd interfaces -*- C++ -*-
2
3// Copyright (C) 2020-2024 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25#ifndef _GLIBCXX_EXPERIMENTAL_SIMD_H
26#define _GLIBCXX_EXPERIMENTAL_SIMD_H
27
28#if __cplusplus >= 201703L
29
30#include "simd_detail.h"
31#include "numeric_traits.h"
32#include <bit>
33#include <bitset>
34#ifdef _GLIBCXX_DEBUG_UB
35#include <cstdio> // for stderr
36#endif
37#include <cstring>
38#include <cmath>
39#include <functional>
40#include <iosfwd>
41#include <utility>
42#include <algorithm>
43
44#if _GLIBCXX_SIMD_X86INTRIN
45#include <x86intrin.h>
46#elif _GLIBCXX_SIMD_HAVE_NEON
47#include <arm_neon.h>
48#endif
49#if _GLIBCXX_SIMD_HAVE_SVE
50#include <arm_sve.h>
51#endif
52
53/** @ingroup ts_simd
54 * @{
55 */
56/* There are several closely related types, with the following naming
57 * convention:
58 * _Tp: vectorizable (arithmetic) type (or any type)
59 * _TV: __vector_type_t<_Tp, _Np>
60 * _TW: _SimdWrapper<_Tp, _Np>
61 * _TI: __intrinsic_type_t<_Tp, _Np>
62 * _TVT: _VectorTraits<_TV> or _VectorTraits<_TW>
63 * If one additional type is needed use _U instead of _T.
64 * Otherwise use _T\d, _TV\d, _TW\d, TI\d, _TVT\d.
65 *
66 * More naming conventions:
67 * _Ap or _Abi: An ABI tag from the simd_abi namespace
68 * _Ip: often used for integer types with sizeof(_Ip) == sizeof(_Tp),
69 * _IV, _IW as for _TV, _TW
70 * _Np: number of elements (not bytes)
71 * _Bytes: number of bytes
72 *
73 * Variable names:
74 * __k: mask object (vector- or bitmask)
75 */
76_GLIBCXX_SIMD_BEGIN_NAMESPACE
77
78#if !_GLIBCXX_SIMD_X86INTRIN
79using __m128 [[__gnu__::__vector_size__(16)]] = float;
80using __m128d [[__gnu__::__vector_size__(16)]] = double;
81using __m128i [[__gnu__::__vector_size__(16)]] = long long;
82using __m256 [[__gnu__::__vector_size__(32)]] = float;
83using __m256d [[__gnu__::__vector_size__(32)]] = double;
84using __m256i [[__gnu__::__vector_size__(32)]] = long long;
85using __m512 [[__gnu__::__vector_size__(64)]] = float;
86using __m512d [[__gnu__::__vector_size__(64)]] = double;
87using __m512i [[__gnu__::__vector_size__(64)]] = long long;
88#endif
89
90#if _GLIBCXX_SIMD_HAVE_SVE
91constexpr inline int __sve_vectorized_size_bytes = __ARM_FEATURE_SVE_BITS / 8;
92#else
93constexpr inline int __sve_vectorized_size_bytes = 0;
94#endif
95
96namespace simd_abi {
97// simd_abi forward declarations {{{
98// implementation details:
99struct _Scalar;
100
101template <int _Np>
102 struct _Fixed;
103
104// There are two major ABIs that appear on different architectures.
105// Both have non-boolean values packed into an N Byte register
106// -> #elements = N / sizeof(T)
107// Masks differ:
108// 1. Use value vector registers for masks (all 0 or all 1)
109// 2. Use bitmasks (mask registers) with one bit per value in the corresponding
110// value vector
111//
112// Both can be partially used, masking off the rest when doing horizontal
113// operations or operations that can trap (e.g. FP_INVALID or integer division
114// by 0). This is encoded as the number of used bytes.
115template <int _UsedBytes>
116 struct _VecBuiltin;
117
118template <int _UsedBytes>
119 struct _VecBltnBtmsk;
120
121template <int _UsedBytes, int _TotalBytes = __sve_vectorized_size_bytes>
122 struct _SveAbi;
123
124template <typename _Tp, int _Np>
125 using _VecN = _VecBuiltin<sizeof(_Tp) * _Np>;
126
127template <int _UsedBytes = 16>
128 using _Sse = _VecBuiltin<_UsedBytes>;
129
130template <int _UsedBytes = 32>
131 using _Avx = _VecBuiltin<_UsedBytes>;
132
133template <int _UsedBytes = 64>
134 using _Avx512 = _VecBltnBtmsk<_UsedBytes>;
135
136template <int _UsedBytes = 16>
137 using _Neon = _VecBuiltin<_UsedBytes>;
138
139template <int _UsedBytes = __sve_vectorized_size_bytes>
140 using _Sve = _SveAbi<_UsedBytes, __sve_vectorized_size_bytes>;
141
142// implementation-defined:
143using __sse = _Sse<>;
144using __avx = _Avx<>;
145using __avx512 = _Avx512<>;
146using __neon = _Neon<>;
147using __neon128 = _Neon<16>;
148using __neon64 = _Neon<8>;
149using __sve = _Sve<>;
150
151// standard:
152template <typename _Tp, size_t _Np, typename...>
153 struct deduce;
154
155template <int _Np>
156 using fixed_size = _Fixed<_Np>;
157
158using scalar = _Scalar;
159
160// }}}
161} // namespace simd_abi
162// forward declarations is_simd(_mask), simd(_mask), simd_size {{{
163template <typename _Tp>
164 struct is_simd;
165
166template <typename _Tp>
167 struct is_simd_mask;
168
169template <typename _Tp, typename _Abi>
170 class simd;
171
172template <typename _Tp, typename _Abi>
173 class simd_mask;
174
175template <typename _Tp, typename _Abi>
176 struct simd_size;
177
178// }}}
179// load/store flags {{{
180struct element_aligned_tag
181{
182 template <typename _Tp, typename _Up = typename _Tp::value_type>
183 static constexpr size_t _S_alignment = alignof(_Up);
184
185 template <typename _Tp, typename _Up>
186 _GLIBCXX_SIMD_INTRINSIC static constexpr _Up*
187 _S_apply(_Up* __ptr)
188 { return __ptr; }
189};
190
191struct vector_aligned_tag
192{
193 template <typename _Tp, typename _Up = typename _Tp::value_type>
194 static constexpr size_t _S_alignment
195 = std::__bit_ceil(sizeof(_Up) * _Tp::size());
196
197 template <typename _Tp, typename _Up>
198 _GLIBCXX_SIMD_INTRINSIC static constexpr _Up*
199 _S_apply(_Up* __ptr)
200 { return static_cast<_Up*>(__builtin_assume_aligned(__ptr, _S_alignment<_Tp, _Up>)); }
201};
202
203template <size_t _Np> struct overaligned_tag
204{
205 template <typename _Tp, typename _Up = typename _Tp::value_type>
206 static constexpr size_t _S_alignment = _Np;
207
208 template <typename _Tp, typename _Up>
209 _GLIBCXX_SIMD_INTRINSIC static constexpr _Up*
210 _S_apply(_Up* __ptr)
211 { return static_cast<_Up*>(__builtin_assume_aligned(__ptr, _Np)); }
212};
213
214inline constexpr element_aligned_tag element_aligned = {};
215
216inline constexpr vector_aligned_tag vector_aligned = {};
217
218template <size_t _Np>
219 inline constexpr overaligned_tag<_Np> overaligned = {};
220
221// }}}
222template <size_t _Xp>
223 using _SizeConstant = integral_constant<size_t, _Xp>;
224// constexpr feature detection{{{
225constexpr inline bool __have_mmx = _GLIBCXX_SIMD_HAVE_MMX;
226constexpr inline bool __have_sse = _GLIBCXX_SIMD_HAVE_SSE;
227constexpr inline bool __have_sse2 = _GLIBCXX_SIMD_HAVE_SSE2;
228constexpr inline bool __have_sse3 = _GLIBCXX_SIMD_HAVE_SSE3;
229constexpr inline bool __have_ssse3 = _GLIBCXX_SIMD_HAVE_SSSE3;
230constexpr inline bool __have_sse4_1 = _GLIBCXX_SIMD_HAVE_SSE4_1;
231constexpr inline bool __have_sse4_2 = _GLIBCXX_SIMD_HAVE_SSE4_2;
232constexpr inline bool __have_xop = _GLIBCXX_SIMD_HAVE_XOP;
233constexpr inline bool __have_avx = _GLIBCXX_SIMD_HAVE_AVX;
234constexpr inline bool __have_avx2 = _GLIBCXX_SIMD_HAVE_AVX2;
235constexpr inline bool __have_bmi = _GLIBCXX_SIMD_HAVE_BMI1;
236constexpr inline bool __have_bmi2 = _GLIBCXX_SIMD_HAVE_BMI2;
237constexpr inline bool __have_lzcnt = _GLIBCXX_SIMD_HAVE_LZCNT;
238constexpr inline bool __have_sse4a = _GLIBCXX_SIMD_HAVE_SSE4A;
239constexpr inline bool __have_fma = _GLIBCXX_SIMD_HAVE_FMA;
240constexpr inline bool __have_fma4 = _GLIBCXX_SIMD_HAVE_FMA4;
241constexpr inline bool __have_f16c = _GLIBCXX_SIMD_HAVE_F16C;
242constexpr inline bool __have_popcnt = _GLIBCXX_SIMD_HAVE_POPCNT;
243constexpr inline bool __have_avx512f = _GLIBCXX_SIMD_HAVE_AVX512F;
244constexpr inline bool __have_avx512dq = _GLIBCXX_SIMD_HAVE_AVX512DQ;
245constexpr inline bool __have_avx512vl = _GLIBCXX_SIMD_HAVE_AVX512VL;
246constexpr inline bool __have_avx512bw = _GLIBCXX_SIMD_HAVE_AVX512BW;
247constexpr inline bool __have_avx512dq_vl = __have_avx512dq && __have_avx512vl;
248constexpr inline bool __have_avx512bw_vl = __have_avx512bw && __have_avx512vl;
249constexpr inline bool __have_avx512bitalg = _GLIBCXX_SIMD_HAVE_AVX512BITALG;
250constexpr inline bool __have_avx512vbmi2 = _GLIBCXX_SIMD_HAVE_AVX512VBMI2;
251constexpr inline bool __have_avx512vbmi = _GLIBCXX_SIMD_HAVE_AVX512VBMI;
252constexpr inline bool __have_avx512ifma = _GLIBCXX_SIMD_HAVE_AVX512IFMA;
253constexpr inline bool __have_avx512cd = _GLIBCXX_SIMD_HAVE_AVX512CD;
254constexpr inline bool __have_avx512vnni = _GLIBCXX_SIMD_HAVE_AVX512VNNI;
255constexpr inline bool __have_avx512vpopcntdq = _GLIBCXX_SIMD_HAVE_AVX512VPOPCNTDQ;
256constexpr inline bool __have_avx512vp2intersect = _GLIBCXX_SIMD_HAVE_AVX512VP2INTERSECT;
257
258constexpr inline bool __have_neon = _GLIBCXX_SIMD_HAVE_NEON;
259constexpr inline bool __have_neon_a32 = _GLIBCXX_SIMD_HAVE_NEON_A32;
260constexpr inline bool __have_neon_a64 = _GLIBCXX_SIMD_HAVE_NEON_A64;
261constexpr inline bool __support_neon_float =
262#if defined __GCC_IEC_559
263 __GCC_IEC_559 == 0;
264#elif defined __FAST_MATH__
265 true;
266#else
267 false;
268#endif
269
270constexpr inline bool __have_sve = _GLIBCXX_SIMD_HAVE_SVE;
271constexpr inline bool __have_sve2 = _GLIBCXX_SIMD_HAVE_SVE2;
272
273#ifdef _ARCH_PWR10
274constexpr inline bool __have_power10vec = true;
275#else
276constexpr inline bool __have_power10vec = false;
277#endif
278#ifdef __POWER9_VECTOR__
279constexpr inline bool __have_power9vec = true;
280#else
281constexpr inline bool __have_power9vec = false;
282#endif
283#if defined __POWER8_VECTOR__
284constexpr inline bool __have_power8vec = true;
285#else
286constexpr inline bool __have_power8vec = __have_power9vec;
287#endif
288#if defined __VSX__
289constexpr inline bool __have_power_vsx = true;
290#else
291constexpr inline bool __have_power_vsx = __have_power8vec;
292#endif
293#if defined __ALTIVEC__
294constexpr inline bool __have_power_vmx = true;
295#else
296constexpr inline bool __have_power_vmx = __have_power_vsx;
297#endif
298
299// }}}
300
301namespace __detail
302{
303#ifdef math_errhandling
304 // Determines _S_handle_fpexcept from math_errhandling if it is defined and expands to a constant
305 // expression. math_errhandling may expand to an extern symbol, in which case a constexpr value
306 // must be guessed.
307 template <int = math_errhandling>
308 constexpr bool
309 __handle_fpexcept_impl(int)
310 { return math_errhandling & MATH_ERREXCEPT; }
311#endif
312
313 // Fallback if math_errhandling doesn't work: with fast-math assume floating-point exceptions are
314 // ignored, otherwise implement correct exception behavior.
315 constexpr bool
316 __handle_fpexcept_impl(float)
317 {
318#if defined __FAST_MATH__
319 return false;
320#else
321 return true;
322#endif
323 }
324
325 /// True if math functions must raise floating-point exceptions as specified by C17.
326 static constexpr bool _S_handle_fpexcept = __handle_fpexcept_impl(0);
327
328 constexpr std::uint_least64_t
329 __floating_point_flags()
330 {
331 std::uint_least64_t __flags = 0;
332 if constexpr (_S_handle_fpexcept)
333 __flags |= 1;
334#ifdef __FAST_MATH__
335 __flags |= 1 << 1;
336#elif __FINITE_MATH_ONLY__
337 __flags |= 2 << 1;
338#elif __GCC_IEC_559 < 2
339 __flags |= 3 << 1;
340#endif
341 __flags |= (__FLT_EVAL_METHOD__ + 1) << 3;
342 return __flags;
343 }
344
345 constexpr std::uint_least64_t
346 __machine_flags()
347 {
348 if constexpr (__have_mmx || __have_sse)
349 return __have_mmx
350 | (__have_sse << 1)
351 | (__have_sse2 << 2)
352 | (__have_sse3 << 3)
353 | (__have_ssse3 << 4)
354 | (__have_sse4_1 << 5)
355 | (__have_sse4_2 << 6)
356 | (__have_xop << 7)
357 | (__have_avx << 8)
358 | (__have_avx2 << 9)
359 | (__have_bmi << 10)
360 | (__have_bmi2 << 11)
361 | (__have_lzcnt << 12)
362 | (__have_sse4a << 13)
363 | (__have_fma << 14)
364 | (__have_fma4 << 15)
365 | (__have_f16c << 16)
366 | (__have_popcnt << 17)
367 | (__have_avx512f << 18)
368 | (__have_avx512dq << 19)
369 | (__have_avx512vl << 20)
370 | (__have_avx512bw << 21)
371 | (__have_avx512bitalg << 22)
372 | (__have_avx512vbmi2 << 23)
373 | (__have_avx512vbmi << 24)
374 | (__have_avx512ifma << 25)
375 | (__have_avx512cd << 26)
376 | (__have_avx512vnni << 27)
377 | (__have_avx512vpopcntdq << 28)
378 | (__have_avx512vp2intersect << 29);
379 else if constexpr (__have_neon || __have_sve)
380 return __have_neon
381 | (__have_neon_a32 << 1)
382 | (__have_neon_a64 << 2)
383 | (__have_neon_a64 << 2)
384 | (__support_neon_float << 3)
385 | (__have_sve << 4)
386 | (__have_sve2 << 5);
387 else if constexpr (__have_power_vmx)
388 return __have_power_vmx
389 | (__have_power_vsx << 1)
390 | (__have_power8vec << 2)
391 | (__have_power9vec << 3)
392 | (__have_power10vec << 4);
393 else
394 return 0;
395 }
396
397 namespace
398 {
399 struct _OdrEnforcer {};
400 }
401
402 template <std::uint_least64_t...>
403 struct _MachineFlagsTemplate {};
404
405 /**@internal
406 * Use this type as default template argument to all function templates that
407 * are not declared always_inline. It ensures, that a function
408 * specialization, which the compiler decides not to inline, has a unique symbol
409 * (_OdrEnforcer) or a symbol matching the machine/architecture flags
410 * (_MachineFlagsTemplate). This helps to avoid ODR violations in cases where
411 * users link TUs compiled with different flags. This is especially important
412 * for using simd in libraries.
413 */
414 using __odr_helper
415 = conditional_t<__machine_flags() == 0, _OdrEnforcer,
416 _MachineFlagsTemplate<__machine_flags(), __floating_point_flags()>>;
417
418 struct _Minimum
419 {
420 template <typename _Tp>
421 _GLIBCXX_SIMD_INTRINSIC constexpr
422 _Tp
423 operator()(_Tp __a, _Tp __b) const
424 {
425 using std::min;
426 return min(__a, __b);
427 }
428 };
429
430 struct _Maximum
431 {
432 template <typename _Tp>
433 _GLIBCXX_SIMD_INTRINSIC constexpr
434 _Tp
435 operator()(_Tp __a, _Tp __b) const
436 {
437 using std::max;
438 return max(__a, __b);
439 }
440 };
441} // namespace __detail
442
443// unrolled/pack execution helpers
444// __execute_n_times{{{
445template <typename _Fp, size_t... _I>
446 [[__gnu__::__flatten__]] _GLIBCXX_SIMD_INTRINSIC constexpr
447 void
448 __execute_on_index_sequence(_Fp&& __f, index_sequence<_I...>)
449 { ((void)__f(_SizeConstant<_I>()), ...); }
450
451template <typename _Fp>
452 _GLIBCXX_SIMD_INTRINSIC constexpr void
453 __execute_on_index_sequence(_Fp&&, index_sequence<>)
454 { }
455
456template <size_t _Np, typename _Fp>
457 _GLIBCXX_SIMD_INTRINSIC constexpr void
458 __execute_n_times(_Fp&& __f)
459 {
460 __execute_on_index_sequence(static_cast<_Fp&&>(__f),
461 make_index_sequence<_Np>{});
462 }
463
464// }}}
465// __generate_from_n_evaluations{{{
466template <typename _R, typename _Fp, size_t... _I>
467 [[__gnu__::__flatten__]] _GLIBCXX_SIMD_INTRINSIC constexpr
468 _R
469 __execute_on_index_sequence_with_return(_Fp&& __f, index_sequence<_I...>)
470 { return _R{__f(_SizeConstant<_I>())...}; }
471
472template <size_t _Np, typename _R, typename _Fp>
473 _GLIBCXX_SIMD_INTRINSIC constexpr _R
474 __generate_from_n_evaluations(_Fp&& __f)
475 {
476 return __execute_on_index_sequence_with_return<_R>(
477 static_cast<_Fp&&>(__f), make_index_sequence<_Np>{});
478 }
479
480// }}}
481// __call_with_n_evaluations{{{
482template <size_t... _I, typename _F0, typename _FArgs>
483 [[__gnu__::__flatten__]] _GLIBCXX_SIMD_INTRINSIC constexpr
484 auto
485 __call_with_n_evaluations(index_sequence<_I...>, _F0&& __f0, _FArgs&& __fargs)
486 { return __f0(__fargs(_SizeConstant<_I>())...); }
487
488template <size_t _Np, typename _F0, typename _FArgs>
489 _GLIBCXX_SIMD_INTRINSIC constexpr auto
490 __call_with_n_evaluations(_F0&& __f0, _FArgs&& __fargs)
491 {
492 return __call_with_n_evaluations(make_index_sequence<_Np>{},
493 static_cast<_F0&&>(__f0),
494 static_cast<_FArgs&&>(__fargs));
495 }
496
497// }}}
498// __call_with_subscripts{{{
499template <size_t _First = 0, size_t... _It, typename _Tp, typename _Fp>
500 [[__gnu__::__flatten__]] _GLIBCXX_SIMD_INTRINSIC constexpr
501 auto
502 __call_with_subscripts(_Tp&& __x, index_sequence<_It...>, _Fp&& __fun)
503 { return __fun(__x[_First + _It]...); }
504
505template <size_t _Np, size_t _First = 0, typename _Tp, typename _Fp>
506 _GLIBCXX_SIMD_INTRINSIC constexpr auto
507 __call_with_subscripts(_Tp&& __x, _Fp&& __fun)
508 {
509 return __call_with_subscripts<_First>(static_cast<_Tp&&>(__x),
510 make_index_sequence<_Np>(),
511 static_cast<_Fp&&>(__fun));
512 }
513
514// }}}
515
516// vvv ---- type traits ---- vvv
517// integer type aliases{{{
518using _UChar = unsigned char;
519using _SChar = signed char;
520using _UShort = unsigned short;
521using _UInt = unsigned int;
522using _ULong = unsigned long;
523using _ULLong = unsigned long long;
524using _LLong = long long;
525
526//}}}
527// __first_of_pack{{{
528template <typename _T0, typename...>
529 struct __first_of_pack
530 { using type = _T0; };
531
532template <typename... _Ts>
533 using __first_of_pack_t = typename __first_of_pack<_Ts...>::type;
534
535//}}}
536// __value_type_or_identity_t {{{
537template <typename _Tp>
538 typename _Tp::value_type
539 __value_type_or_identity_impl(int);
540
541template <typename _Tp>
542 _Tp
543 __value_type_or_identity_impl(float);
544
545template <typename _Tp>
546 using __value_type_or_identity_t
547 = decltype(__value_type_or_identity_impl<_Tp>(int()));
548
549// }}}
550// __is_vectorizable {{{
551template <typename _Tp>
552 struct __is_vectorizable : public is_arithmetic<_Tp> {};
553
554template <>
555 struct __is_vectorizable<bool> : public false_type {};
556
557template <typename _Tp>
558 inline constexpr bool __is_vectorizable_v = __is_vectorizable<_Tp>::value;
559
560// Deduces to a vectorizable type
561template <typename _Tp, typename = enable_if_t<__is_vectorizable_v<_Tp>>>
562 using _Vectorizable = _Tp;
563
564// }}}
565// _LoadStorePtr / __is_possible_loadstore_conversion {{{
566template <typename _Ptr, typename _ValueType>
567 struct __is_possible_loadstore_conversion
568 : conjunction<__is_vectorizable<_Ptr>, __is_vectorizable<_ValueType>> {};
569
570template <>
571 struct __is_possible_loadstore_conversion<bool, bool> : true_type {};
572
573// Deduces to a type allowed for load/store with the given value type.
574template <typename _Ptr, typename _ValueType,
575 typename = enable_if_t<
576 __is_possible_loadstore_conversion<_Ptr, _ValueType>::value>>
577 using _LoadStorePtr = _Ptr;
578
579// }}}
580// __is_bitmask{{{
581template <typename _Tp, typename = void_t<>>
582 struct __is_bitmask : false_type {};
583
584template <typename _Tp>
585 inline constexpr bool __is_bitmask_v = __is_bitmask<_Tp>::value;
586
587// the __mmaskXX case:
588template <typename _Tp>
589 struct __is_bitmask<_Tp,
590 void_t<decltype(declval<unsigned&>() = declval<_Tp>() & 1u)>>
591 : true_type {};
592
593// }}}
594// __int_for_sizeof{{{
595#pragma GCC diagnostic push
596#pragma GCC diagnostic ignored "-Wpedantic"
597template <size_t _Bytes>
598 constexpr auto
599 __int_for_sizeof()
600 {
601 if constexpr (_Bytes == sizeof(int))
602 return int();
603 #ifdef __clang__
604 else if constexpr (_Bytes == sizeof(char))
605 return char();
606 #else
607 else if constexpr (_Bytes == sizeof(_SChar))
608 return _SChar();
609 #endif
610 else if constexpr (_Bytes == sizeof(short))
611 return short();
612 #ifndef __clang__
613 else if constexpr (_Bytes == sizeof(long))
614 return long();
615 #endif
616 else if constexpr (_Bytes == sizeof(_LLong))
617 return _LLong();
618 #ifdef __SIZEOF_INT128__
619 else if constexpr (_Bytes == sizeof(__int128))
620 return __int128();
621 #endif // __SIZEOF_INT128__
622 else if constexpr (_Bytes % sizeof(int) == 0)
623 {
624 constexpr size_t _Np = _Bytes / sizeof(int);
625 struct _Ip
626 {
627 int _M_data[_Np];
628
629 _GLIBCXX_SIMD_INTRINSIC constexpr _Ip
630 operator&(_Ip __rhs) const
631 {
632 return __generate_from_n_evaluations<_Np, _Ip>(
633 [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
634 return __rhs._M_data[__i] & _M_data[__i];
635 });
636 }
637
638 _GLIBCXX_SIMD_INTRINSIC constexpr _Ip
639 operator|(_Ip __rhs) const
640 {
641 return __generate_from_n_evaluations<_Np, _Ip>(
642 [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
643 return __rhs._M_data[__i] | _M_data[__i];
644 });
645 }
646
647 _GLIBCXX_SIMD_INTRINSIC constexpr _Ip
648 operator^(_Ip __rhs) const
649 {
650 return __generate_from_n_evaluations<_Np, _Ip>(
651 [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
652 return __rhs._M_data[__i] ^ _M_data[__i];
653 });
654 }
655
656 _GLIBCXX_SIMD_INTRINSIC constexpr _Ip
657 operator~() const
658 {
659 return __generate_from_n_evaluations<_Np, _Ip>(
660 [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { return ~_M_data[__i]; });
661 }
662 };
663 return _Ip{};
664 }
665 else
666 static_assert(_Bytes != _Bytes, "this should be unreachable");
667 }
668#pragma GCC diagnostic pop
669
670template <typename _Tp>
671 using __int_for_sizeof_t = decltype(__int_for_sizeof<sizeof(_Tp)>());
672
673template <size_t _Np>
674 using __int_with_sizeof_t = decltype(__int_for_sizeof<_Np>());
675
676// }}}
677// __is_fixed_size_abi{{{
678template <typename _Tp>
679 struct __is_fixed_size_abi : false_type {};
680
681template <int _Np>
682 struct __is_fixed_size_abi<simd_abi::fixed_size<_Np>> : true_type {};
683
684template <typename _Tp>
685 inline constexpr bool __is_fixed_size_abi_v = __is_fixed_size_abi<_Tp>::value;
686
687// }}}
688// __is_scalar_abi {{{
689template <typename _Abi>
690 constexpr bool
691 __is_scalar_abi()
692 { return is_same_v<simd_abi::scalar, _Abi>; }
693
694// }}}
695// __abi_bytes_v {{{
696template <template <int> class _Abi, int _Bytes>
697 constexpr int
698 __abi_bytes_impl(_Abi<_Bytes>*)
699 { return _Bytes; }
700
701template <typename _Tp>
702 constexpr int
703 __abi_bytes_impl(_Tp*)
704 { return -1; }
705
706template <typename _Abi>
707 inline constexpr int __abi_bytes_v
708 = __abi_bytes_impl(static_cast<_Abi*>(nullptr));
709
710// }}}
711// __is_builtin_bitmask_abi {{{
712template <typename _Abi>
713 constexpr bool
714 __is_builtin_bitmask_abi()
715 { return is_same_v<simd_abi::_VecBltnBtmsk<__abi_bytes_v<_Abi>>, _Abi>; }
716
717// }}}
718// __is_sse_abi {{{
719template <typename _Abi>
720 constexpr bool
721 __is_sse_abi()
722 {
723 constexpr auto _Bytes = __abi_bytes_v<_Abi>;
724 return _Bytes <= 16 && is_same_v<simd_abi::_VecBuiltin<_Bytes>, _Abi>;
725 }
726
727// }}}
728// __is_avx_abi {{{
729template <typename _Abi>
730 constexpr bool
731 __is_avx_abi()
732 {
733 constexpr auto _Bytes = __abi_bytes_v<_Abi>;
734 return _Bytes > 16 && _Bytes <= 32
735 && is_same_v<simd_abi::_VecBuiltin<_Bytes>, _Abi>;
736 }
737
738// }}}
739// __is_avx512_abi {{{
740template <typename _Abi>
741 constexpr bool
742 __is_avx512_abi()
743 {
744 constexpr auto _Bytes = __abi_bytes_v<_Abi>;
745 return _Bytes <= 64 && is_same_v<simd_abi::_Avx512<_Bytes>, _Abi>;
746 }
747
748// }}}
749// __is_neon_abi {{{
750template <typename _Abi>
751 constexpr bool
752 __is_neon_abi()
753 {
754 constexpr auto _Bytes = __abi_bytes_v<_Abi>;
755 return _Bytes <= 16 && is_same_v<simd_abi::_VecBuiltin<_Bytes>, _Abi>;
756 }
757
758// }}}
759// __is_sve_abi {{{
760template <typename _Abi>
761 constexpr bool
762 __is_sve_abi()
763 {
764 constexpr auto _Bytes = __abi_bytes_v<_Abi>;
765 return _Bytes <= __sve_vectorized_size_bytes && is_same_v<simd_abi::_Sve<_Bytes>, _Abi>;
766 }
767
768// }}}
769// __make_dependent_t {{{
770template <typename, typename _Up>
771 struct __make_dependent
772 { using type = _Up; };
773
774template <typename _Tp, typename _Up>
775 using __make_dependent_t = typename __make_dependent<_Tp, _Up>::type;
776
777// }}}
778// ^^^ ---- type traits ---- ^^^
779
780// __invoke_ub{{{
781template <typename... _Args>
782 [[noreturn]] _GLIBCXX_SIMD_ALWAYS_INLINE void
783 __invoke_ub([[maybe_unused]] const char* __msg, [[maybe_unused]] const _Args&... __args)
784 {
785#ifdef _GLIBCXX_DEBUG_UB
786 __builtin_fprintf(stderr, __msg, __args...);
787 __builtin_trap();
788#else
789 __builtin_unreachable();
790#endif
791 }
792
793// }}}
794// __assert_unreachable{{{
795template <typename _Tp>
796 struct __assert_unreachable
797 { static_assert(!is_same_v<_Tp, _Tp>, "this should be unreachable"); };
798
799// }}}
800// __size_or_zero_v {{{
801template <typename _Tp, typename _Ap, size_t _Np = simd_size<_Tp, _Ap>::value>
802 constexpr size_t
803 __size_or_zero_dispatch(int)
804 { return _Np; }
805
806template <typename _Tp, typename _Ap>
807 constexpr size_t
808 __size_or_zero_dispatch(float)
809 { return 0; }
810
811template <typename _Tp, typename _Ap>
812 inline constexpr size_t __size_or_zero_v
813 = __size_or_zero_dispatch<_Tp, _Ap>(0);
814
815// }}}
816// __div_roundup {{{
817inline constexpr size_t
818__div_roundup(size_t __a, size_t __b)
819{ return (__a + __b - 1) / __b; }
820
821// }}}
822// _ExactBool{{{
823class _ExactBool
824{
825 const bool _M_data;
826
827public:
828 _GLIBCXX_SIMD_INTRINSIC constexpr
829 _ExactBool(bool __b) : _M_data(__b) {}
830
831 _ExactBool(int) = delete;
832
833 _GLIBCXX_SIMD_INTRINSIC constexpr
834 operator bool() const
835 { return _M_data; }
836};
837
838// }}}
839// __may_alias{{{
840/**@internal
841 * Helper __may_alias<_Tp> that turns _Tp into the type to be used for an
842 * aliasing pointer. This adds the __may_alias attribute to _Tp (with compilers
843 * that support it).
844 */
845template <typename _Tp>
846 using __may_alias [[__gnu__::__may_alias__]] = _Tp;
847
848// }}}
849// _UnsupportedBase {{{
850// simd and simd_mask base for unsupported <_Tp, _Abi>
851struct _UnsupportedBase
852{
853 _UnsupportedBase() = delete;
854 _UnsupportedBase(const _UnsupportedBase&) = delete;
855 _UnsupportedBase& operator=(const _UnsupportedBase&) = delete;
856 ~_UnsupportedBase() = delete;
857};
858
859// }}}
860// _InvalidTraits {{{
861/**
862 * @internal
863 * Defines the implementation of __a given <_Tp, _Abi>.
864 *
865 * Implementations must ensure that only valid <_Tp, _Abi> instantiations are
866 * possible. Static assertions in the type definition do not suffice. It is
867 * important that SFINAE works.
868 */
869struct _InvalidTraits
870{
871 using _IsValid = false_type;
872 using _SimdBase = _UnsupportedBase;
873 using _MaskBase = _UnsupportedBase;
874
875 static constexpr size_t _S_full_size = 0;
876 static constexpr bool _S_is_partial = false;
877
878 static constexpr size_t _S_simd_align = 1;
879 struct _SimdImpl;
880 struct _SimdMember {};
881 struct _SimdCastType;
882
883 static constexpr size_t _S_mask_align = 1;
884 struct _MaskImpl;
885 struct _MaskMember {};
886 struct _MaskCastType;
887};
888
889// }}}
890// _SimdTraits {{{
891template <typename _Tp, typename _Abi, typename = void_t<>>
892 struct _SimdTraits : _InvalidTraits {};
893
894// }}}
895// __private_init, __bitset_init{{{
896/**
897 * @internal
898 * Tag used for private init constructor of simd and simd_mask
899 */
900inline constexpr struct _PrivateInit {} __private_init = {};
901
902inline constexpr struct _BitsetInit {} __bitset_init = {};
903
904// }}}
905// __is_narrowing_conversion<_From, _To>{{{
906template <typename _From, typename _To, bool = is_arithmetic_v<_From>,
907 bool = is_arithmetic_v<_To>>
908 struct __is_narrowing_conversion;
909
910// ignore "signed/unsigned mismatch" in the following trait.
911// The implicit conversions will do the right thing here.
912template <typename _From, typename _To>
913 struct __is_narrowing_conversion<_From, _To, true, true>
914 : public __bool_constant<(
915 __digits_v<_From> > __digits_v<_To>
916 || __finite_max_v<_From> > __finite_max_v<_To>
917 || __finite_min_v<_From> < __finite_min_v<_To>
918 || (is_signed_v<_From> && is_unsigned_v<_To>))> {};
919
920template <typename _Tp>
921 struct __is_narrowing_conversion<_Tp, bool, true, true>
922 : public true_type {};
923
924template <>
925 struct __is_narrowing_conversion<bool, bool, true, true>
926 : public false_type {};
927
928template <typename _Tp>
929 struct __is_narrowing_conversion<_Tp, _Tp, true, true>
930 : public false_type {};
931
932template <typename _From, typename _To>
933 struct __is_narrowing_conversion<_From, _To, false, true>
934 : public negation<is_convertible<_From, _To>> {};
935
936// }}}
937// __converts_to_higher_integer_rank{{{
938template <typename _From, typename _To, bool = (sizeof(_From) < sizeof(_To))>
939 struct __converts_to_higher_integer_rank : public true_type {};
940
941// this may fail for char -> short if sizeof(char) == sizeof(short)
942template <typename _From, typename _To>
943 struct __converts_to_higher_integer_rank<_From, _To, false>
944 : public is_same<decltype(declval<_From>() + declval<_To>()), _To> {};
945
946// }}}
947// __data(simd/simd_mask) {{{
948template <typename _Tp, typename _Ap>
949 _GLIBCXX_SIMD_INTRINSIC constexpr const auto&
950 __data(const simd<_Tp, _Ap>& __x);
951
952template <typename _Tp, typename _Ap>
953 _GLIBCXX_SIMD_INTRINSIC constexpr auto&
954 __data(simd<_Tp, _Ap>& __x);
955
956template <typename _Tp, typename _Ap>
957 _GLIBCXX_SIMD_INTRINSIC constexpr const auto&
958 __data(const simd_mask<_Tp, _Ap>& __x);
959
960template <typename _Tp, typename _Ap>
961 _GLIBCXX_SIMD_INTRINSIC constexpr auto&
962 __data(simd_mask<_Tp, _Ap>& __x);
963
964// }}}
965// _SimdConverter {{{
966template <typename _FromT, typename _FromA, typename _ToT, typename _ToA,
967 typename = void>
968 struct _SimdConverter;
969
970template <typename _Tp, typename _Ap>
971 struct _SimdConverter<_Tp, _Ap, _Tp, _Ap, void>
972 {
973 template <typename _Up>
974 _GLIBCXX_SIMD_INTRINSIC const _Up&
975 operator()(const _Up& __x)
976 { return __x; }
977 };
978
979// }}}
980// __to_value_type_or_member_type {{{
981template <typename _V>
982 _GLIBCXX_SIMD_INTRINSIC constexpr auto
983 __to_value_type_or_member_type(const _V& __x) -> decltype(__data(__x))
984 { return __data(__x); }
985
986template <typename _V>
987 _GLIBCXX_SIMD_INTRINSIC constexpr const typename _V::value_type&
988 __to_value_type_or_member_type(const typename _V::value_type& __x)
989 { return __x; }
990
991// }}}
992// __bool_storage_member_type{{{
993template <size_t _Size>
994 struct __bool_storage_member_type;
995
996template <size_t _Size>
997 using __bool_storage_member_type_t =
998 typename __bool_storage_member_type<_Size>::type;
999
1000// }}}
1001// _SimdTuple {{{
1002// why not tuple?
1003// 1. tuple gives no guarantee about the storage order, but I require
1004// storage
1005// equivalent to array<_Tp, _Np>
1006// 2. direct access to the element type (first template argument)
1007// 3. enforces equal element type, only different _Abi types are allowed
1008template <typename _Tp, typename... _Abis>
1009 struct _SimdTuple;
1010
1011//}}}
1012// __fixed_size_storage_t {{{
1013template <typename _Tp, int _Np>
1014 struct __fixed_size_storage;
1015
1016template <typename _Tp, int _Np>
1017 using __fixed_size_storage_t = typename __fixed_size_storage<_Tp, _Np>::type;
1018
1019// }}}
1020// _SimdWrapper fwd decl{{{
1021template <typename _Tp, size_t _Size, typename = void_t<>>
1022 struct _SimdWrapper;
1023
1024template <typename _Tp>
1025 using _SimdWrapper8 = _SimdWrapper<_Tp, 8 / sizeof(_Tp)>;
1026template <typename _Tp>
1027 using _SimdWrapper16 = _SimdWrapper<_Tp, 16 / sizeof(_Tp)>;
1028template <typename _Tp>
1029 using _SimdWrapper32 = _SimdWrapper<_Tp, 32 / sizeof(_Tp)>;
1030template <typename _Tp>
1031 using _SimdWrapper64 = _SimdWrapper<_Tp, 64 / sizeof(_Tp)>;
1032
1033template <typename _Tp, size_t _Width>
1034 struct _SveSimdWrapper;
1035
1036// }}}
1037// __is_simd_wrapper {{{
1038template <typename _Tp>
1039 struct __is_simd_wrapper : false_type {};
1040
1041template <typename _Tp, size_t _Np>
1042 struct __is_simd_wrapper<_SimdWrapper<_Tp, _Np>> : true_type {};
1043
1044template <typename _Tp>
1045 inline constexpr bool __is_simd_wrapper_v = __is_simd_wrapper<_Tp>::value;
1046
1047// }}}
1048// _BitOps {{{
1049struct _BitOps
1050{
1051 // _S_bit_iteration {{{
1052 template <typename _Tp, typename _Fp>
1053 static void
1054 _S_bit_iteration(_Tp __mask, _Fp&& __f)
1055 {
1056 static_assert(sizeof(_ULLong) >= sizeof(_Tp));
1057 conditional_t<sizeof(_Tp) <= sizeof(_UInt), _UInt, _ULLong> __k;
1058 if constexpr (is_convertible_v<_Tp, decltype(__k)>)
1059 __k = __mask;
1060 else
1061 __k = __mask.to_ullong();
1062 while(__k)
1063 {
1064 __f(std::__countr_zero(__k));
1065 __k &= (__k - 1);
1066 }
1067 }
1068
1069 //}}}
1070};
1071
1072//}}}
1073// __increment, __decrement {{{
1074template <typename _Tp = void>
1075 struct __increment
1076 { constexpr _Tp operator()(_Tp __a) const { return ++__a; } };
1077
1078template <>
1079 struct __increment<void>
1080 {
1081 template <typename _Tp>
1082 constexpr _Tp
1083 operator()(_Tp __a) const
1084 { return ++__a; }
1085 };
1086
1087template <typename _Tp = void>
1088 struct __decrement
1089 { constexpr _Tp operator()(_Tp __a) const { return --__a; } };
1090
1091template <>
1092 struct __decrement<void>
1093 {
1094 template <typename _Tp>
1095 constexpr _Tp
1096 operator()(_Tp __a) const
1097 { return --__a; }
1098 };
1099
1100// }}}
1101// _ValuePreserving(OrInt) {{{
1102template <typename _From, typename _To,
1103 typename = enable_if_t<negation<
1104 __is_narrowing_conversion<__remove_cvref_t<_From>, _To>>::value>>
1105 using _ValuePreserving = _From;
1106
1107template <typename _From, typename _To,
1108 typename _DecayedFrom = __remove_cvref_t<_From>,
1109 typename = enable_if_t<conjunction<
1110 is_convertible<_From, _To>,
1111 disjunction<
1112 is_same<_DecayedFrom, _To>, is_same<_DecayedFrom, int>,
1113 conjunction<is_same<_DecayedFrom, _UInt>, is_unsigned<_To>>,
1114 negation<__is_narrowing_conversion<_DecayedFrom, _To>>>>::value>>
1115 using _ValuePreservingOrInt = _From;
1116
1117// }}}
1118// __intrinsic_type {{{
1119template <typename _Tp, size_t _Bytes, typename = void_t<>>
1120 struct __intrinsic_type;
1121
1122template <typename _Tp, size_t _Size>
1123 using __intrinsic_type_t =
1124 typename __intrinsic_type<_Tp, _Size * sizeof(_Tp)>::type;
1125
1126template <typename _Tp>
1127 using __intrinsic_type2_t = typename __intrinsic_type<_Tp, 2>::type;
1128template <typename _Tp>
1129 using __intrinsic_type4_t = typename __intrinsic_type<_Tp, 4>::type;
1130template <typename _Tp>
1131 using __intrinsic_type8_t = typename __intrinsic_type<_Tp, 8>::type;
1132template <typename _Tp>
1133 using __intrinsic_type16_t = typename __intrinsic_type<_Tp, 16>::type;
1134template <typename _Tp>
1135 using __intrinsic_type32_t = typename __intrinsic_type<_Tp, 32>::type;
1136template <typename _Tp>
1137 using __intrinsic_type64_t = typename __intrinsic_type<_Tp, 64>::type;
1138
1139// }}}
1140// _BitMask {{{
1141template <size_t _Np, bool _Sanitized = false>
1142 struct _BitMask;
1143
1144template <size_t _Np, bool _Sanitized>
1145 struct __is_bitmask<_BitMask<_Np, _Sanitized>, void> : true_type {};
1146
1147template <size_t _Np>
1148 using _SanitizedBitMask = _BitMask<_Np, true>;
1149
1150template <size_t _Np, bool _Sanitized>
1151 struct _BitMask
1152 {
1153 static_assert(_Np > 0);
1154
1155 static constexpr size_t _NBytes = __div_roundup(_Np, __CHAR_BIT__);
1156
1157 using _Tp = conditional_t<_Np == 1, bool,
1158 make_unsigned_t<__int_with_sizeof_t<std::min(
1159 sizeof(_ULLong), std::__bit_ceil(_NBytes))>>>;
1160
1161 static constexpr int _S_array_size = __div_roundup(_NBytes, sizeof(_Tp));
1162
1163 _Tp _M_bits[_S_array_size];
1164
1165 static constexpr int _S_unused_bits
1166 = _Np == 1 ? 0 : _S_array_size * sizeof(_Tp) * __CHAR_BIT__ - _Np;
1167
1168 static constexpr _Tp _S_bitmask = +_Tp(~_Tp()) >> _S_unused_bits;
1169
1170 constexpr _BitMask() noexcept = default;
1171
1172 constexpr _BitMask(unsigned long long __x) noexcept
1173 : _M_bits{static_cast<_Tp>(__x)} {}
1174
1175 _BitMask(bitset<_Np> __x) noexcept : _BitMask(__x.to_ullong()) {}
1176
1177 constexpr _BitMask(const _BitMask&) noexcept = default;
1178
1179 template <bool _RhsSanitized, typename = enable_if_t<_RhsSanitized == false
1180 && _Sanitized == true>>
1181 constexpr _BitMask(const _BitMask<_Np, _RhsSanitized>& __rhs) noexcept
1182 : _BitMask(__rhs._M_sanitized()) {}
1183
1184 constexpr operator _SimdWrapper<bool, _Np>() const noexcept
1185 {
1186 static_assert(_S_array_size == 1);
1187 return _M_bits[0];
1188 }
1189
1190 // precondition: is sanitized
1191 constexpr _Tp
1192 _M_to_bits() const noexcept
1193 {
1194 static_assert(_S_array_size == 1);
1195 return _M_bits[0];
1196 }
1197
1198 // precondition: is sanitized
1199 constexpr unsigned long long
1200 to_ullong() const noexcept
1201 {
1202 static_assert(_S_array_size == 1);
1203 return _M_bits[0];
1204 }
1205
1206 // precondition: is sanitized
1207 constexpr unsigned long
1208 to_ulong() const noexcept
1209 {
1210 static_assert(_S_array_size == 1);
1211 return _M_bits[0];
1212 }
1213
1214 constexpr bitset<_Np>
1215 _M_to_bitset() const noexcept
1216 {
1217 static_assert(_S_array_size == 1);
1218 return _M_bits[0];
1219 }
1220
1221 constexpr decltype(auto)
1222 _M_sanitized() const noexcept
1223 {
1224 if constexpr (_Sanitized)
1225 return *this;
1226 else if constexpr (_Np == 1)
1227 return _SanitizedBitMask<_Np>(_M_bits[0]);
1228 else
1229 {
1230 _SanitizedBitMask<_Np> __r = {};
1231 for (int __i = 0; __i < _S_array_size; ++__i)
1232 __r._M_bits[__i] = _M_bits[__i];
1233 if constexpr (_S_unused_bits > 0)
1234 __r._M_bits[_S_array_size - 1] &= _S_bitmask;
1235 return __r;
1236 }
1237 }
1238
1239 template <size_t _Mp, bool _LSanitized>
1240 constexpr _BitMask<_Np + _Mp, _Sanitized>
1241 _M_prepend(_BitMask<_Mp, _LSanitized> __lsb) const noexcept
1242 {
1243 constexpr size_t _RN = _Np + _Mp;
1244 using _Rp = _BitMask<_RN, _Sanitized>;
1245 if constexpr (_Rp::_S_array_size == 1)
1246 {
1247 _Rp __r{{_M_bits[0]}};
1248 __r._M_bits[0] <<= _Mp;
1249 __r._M_bits[0] |= __lsb._M_sanitized()._M_bits[0];
1250 return __r;
1251 }
1252 else
1253 __assert_unreachable<_Rp>();
1254 }
1255
1256 // Return a new _BitMask with size _NewSize while dropping _DropLsb least
1257 // significant bits. If the operation implicitly produces a sanitized bitmask,
1258 // the result type will have _Sanitized set.
1259 template <size_t _DropLsb, size_t _NewSize = _Np - _DropLsb>
1260 constexpr auto
1261 _M_extract() const noexcept
1262 {
1263 static_assert(_Np > _DropLsb);
1264 static_assert(_DropLsb + _NewSize <= sizeof(_ULLong) * __CHAR_BIT__,
1265 "not implemented for bitmasks larger than one ullong");
1266 if constexpr (_NewSize == 1)
1267 // must sanitize because the return _Tp is bool
1268 return _SanitizedBitMask<1>(_M_bits[0] & (_Tp(1) << _DropLsb));
1269 else
1270 return _BitMask<_NewSize,
1271 ((_NewSize + _DropLsb == sizeof(_Tp) * __CHAR_BIT__
1272 && _NewSize + _DropLsb <= _Np)
1273 || ((_Sanitized || _Np == sizeof(_Tp) * __CHAR_BIT__)
1274 && _NewSize + _DropLsb >= _Np))>(_M_bits[0]
1275 >> _DropLsb);
1276 }
1277
1278 // True if all bits are set. Implicitly sanitizes if _Sanitized == false.
1279 constexpr bool
1280 all() const noexcept
1281 {
1282 if constexpr (_Np == 1)
1283 return _M_bits[0];
1284 else if constexpr (!_Sanitized)
1285 return _M_sanitized().all();
1286 else
1287 {
1288 constexpr _Tp __allbits = ~_Tp();
1289 for (int __i = 0; __i < _S_array_size - 1; ++__i)
1290 if (_M_bits[__i] != __allbits)
1291 return false;
1292 return _M_bits[_S_array_size - 1] == _S_bitmask;
1293 }
1294 }
1295
1296 // True if at least one bit is set. Implicitly sanitizes if _Sanitized ==
1297 // false.
1298 constexpr bool
1299 any() const noexcept
1300 {
1301 if constexpr (_Np == 1)
1302 return _M_bits[0];
1303 else if constexpr (!_Sanitized)
1304 return _M_sanitized().any();
1305 else
1306 {
1307 for (int __i = 0; __i < _S_array_size - 1; ++__i)
1308 if (_M_bits[__i] != 0)
1309 return true;
1310 return _M_bits[_S_array_size - 1] != 0;
1311 }
1312 }
1313
1314 // True if no bit is set. Implicitly sanitizes if _Sanitized == false.
1315 constexpr bool
1316 none() const noexcept
1317 {
1318 if constexpr (_Np == 1)
1319 return !_M_bits[0];
1320 else if constexpr (!_Sanitized)
1321 return _M_sanitized().none();
1322 else
1323 {
1324 for (int __i = 0; __i < _S_array_size - 1; ++__i)
1325 if (_M_bits[__i] != 0)
1326 return false;
1327 return _M_bits[_S_array_size - 1] == 0;
1328 }
1329 }
1330
1331 // Returns the number of set bits. Implicitly sanitizes if _Sanitized ==
1332 // false.
1333 constexpr int
1334 count() const noexcept
1335 {
1336 if constexpr (_Np == 1)
1337 return _M_bits[0];
1338 else if constexpr (!_Sanitized)
1339 return _M_sanitized().none();
1340 else
1341 {
1342 int __result = __builtin_popcountll(_M_bits[0]);
1343 for (int __i = 1; __i < _S_array_size; ++__i)
1344 __result += __builtin_popcountll(_M_bits[__i]);
1345 return __result;
1346 }
1347 }
1348
1349 // Returns the bit at offset __i as bool.
1350 constexpr bool
1351 operator[](size_t __i) const noexcept
1352 {
1353 if constexpr (_Np == 1)
1354 return _M_bits[0];
1355 else if constexpr (_S_array_size == 1)
1356 return (_M_bits[0] >> __i) & 1;
1357 else
1358 {
1359 const size_t __j = __i / (sizeof(_Tp) * __CHAR_BIT__);
1360 const size_t __shift = __i % (sizeof(_Tp) * __CHAR_BIT__);
1361 return (_M_bits[__j] >> __shift) & 1;
1362 }
1363 }
1364
1365 template <size_t __i>
1366 constexpr bool
1367 operator[](_SizeConstant<__i>) const noexcept
1368 {
1369 static_assert(__i < _Np);
1370 constexpr size_t __j = __i / (sizeof(_Tp) * __CHAR_BIT__);
1371 constexpr size_t __shift = __i % (sizeof(_Tp) * __CHAR_BIT__);
1372 return static_cast<bool>(_M_bits[__j] & (_Tp(1) << __shift));
1373 }
1374
1375 // Set the bit at offset __i to __x.
1376 constexpr void
1377 set(size_t __i, bool __x) noexcept
1378 {
1379 if constexpr (_Np == 1)
1380 _M_bits[0] = __x;
1381 else if constexpr (_S_array_size == 1)
1382 {
1383 _M_bits[0] &= ~_Tp(_Tp(1) << __i);
1384 _M_bits[0] |= _Tp(_Tp(__x) << __i);
1385 }
1386 else
1387 {
1388 const size_t __j = __i / (sizeof(_Tp) * __CHAR_BIT__);
1389 const size_t __shift = __i % (sizeof(_Tp) * __CHAR_BIT__);
1390 _M_bits[__j] &= ~_Tp(_Tp(1) << __shift);
1391 _M_bits[__j] |= _Tp(_Tp(__x) << __shift);
1392 }
1393 }
1394
1395 template <size_t __i>
1396 constexpr void
1397 set(_SizeConstant<__i>, bool __x) noexcept
1398 {
1399 static_assert(__i < _Np);
1400 if constexpr (_Np == 1)
1401 _M_bits[0] = __x;
1402 else
1403 {
1404 constexpr size_t __j = __i / (sizeof(_Tp) * __CHAR_BIT__);
1405 constexpr size_t __shift = __i % (sizeof(_Tp) * __CHAR_BIT__);
1406 constexpr _Tp __mask = ~_Tp(_Tp(1) << __shift);
1407 _M_bits[__j] &= __mask;
1408 _M_bits[__j] |= _Tp(_Tp(__x) << __shift);
1409 }
1410 }
1411
1412 // Inverts all bits. Sanitized input leads to sanitized output.
1413 constexpr _BitMask
1414 operator~() const noexcept
1415 {
1416 if constexpr (_Np == 1)
1417 return !_M_bits[0];
1418 else
1419 {
1420 _BitMask __result{};
1421 for (int __i = 0; __i < _S_array_size - 1; ++__i)
1422 __result._M_bits[__i] = ~_M_bits[__i];
1423 if constexpr (_Sanitized)
1424 __result._M_bits[_S_array_size - 1]
1425 = _M_bits[_S_array_size - 1] ^ _S_bitmask;
1426 else
1427 __result._M_bits[_S_array_size - 1] = ~_M_bits[_S_array_size - 1];
1428 return __result;
1429 }
1430 }
1431
1432 constexpr _BitMask&
1433 operator^=(const _BitMask& __b) & noexcept
1434 {
1435 __execute_n_times<_S_array_size>(
1436 [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { _M_bits[__i] ^= __b._M_bits[__i]; });
1437 return *this;
1438 }
1439
1440 constexpr _BitMask&
1441 operator|=(const _BitMask& __b) & noexcept
1442 {
1443 __execute_n_times<_S_array_size>(
1444 [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { _M_bits[__i] |= __b._M_bits[__i]; });
1445 return *this;
1446 }
1447
1448 constexpr _BitMask&
1449 operator&=(const _BitMask& __b) & noexcept
1450 {
1451 __execute_n_times<_S_array_size>(
1452 [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { _M_bits[__i] &= __b._M_bits[__i]; });
1453 return *this;
1454 }
1455
1456 friend constexpr _BitMask
1457 operator^(const _BitMask& __a, const _BitMask& __b) noexcept
1458 {
1459 _BitMask __r = __a;
1460 __r ^= __b;
1461 return __r;
1462 }
1463
1464 friend constexpr _BitMask
1465 operator|(const _BitMask& __a, const _BitMask& __b) noexcept
1466 {
1467 _BitMask __r = __a;
1468 __r |= __b;
1469 return __r;
1470 }
1471
1472 friend constexpr _BitMask
1473 operator&(const _BitMask& __a, const _BitMask& __b) noexcept
1474 {
1475 _BitMask __r = __a;
1476 __r &= __b;
1477 return __r;
1478 }
1479
1480 _GLIBCXX_SIMD_INTRINSIC
1481 constexpr bool
1482 _M_is_constprop() const
1483 {
1484 if constexpr (_S_array_size == 0)
1485 return __builtin_constant_p(_M_bits[0]);
1486 else
1487 {
1488 for (int __i = 0; __i < _S_array_size; ++__i)
1489 if (!__builtin_constant_p(_M_bits[__i]))
1490 return false;
1491 return true;
1492 }
1493 }
1494 };
1495
1496// }}}
1497
1498// vvv ---- builtin vector types [[gnu::vector_size(N)]] and operations ---- vvv
1499// __min_vector_size {{{
1500template <typename _Tp = void>
1501 static inline constexpr int __min_vector_size = 2 * sizeof(_Tp);
1502
1503#if _GLIBCXX_SIMD_HAVE_NEON
1504template <>
1505 inline constexpr int __min_vector_size<void> = 8;
1506#else
1507template <>
1508 inline constexpr int __min_vector_size<void> = 16;
1509#endif
1510
1511// }}}
1512// __vector_type {{{
1513template <typename _Tp, size_t _Np, typename = void>
1514 struct __vector_type_n {};
1515
1516// substition failure for 0-element case
1517template <typename _Tp>
1518 struct __vector_type_n<_Tp, 0, void> {};
1519
1520// special case 1-element to be _Tp itself
1521template <typename _Tp>
1522 struct __vector_type_n<_Tp, 1, enable_if_t<__is_vectorizable_v<_Tp>>>
1523 { using type = _Tp; };
1524
1525// else, use GNU-style builtin vector types
1526template <typename _Tp, size_t _Np>
1527 struct __vector_type_n<_Tp, _Np, enable_if_t<__is_vectorizable_v<_Tp> && _Np >= 2>>
1528 {
1529 static constexpr size_t _S_Np2 = std::__bit_ceil(_Np * sizeof(_Tp));
1530
1531 static constexpr size_t _S_Bytes =
1532#ifdef __i386__
1533 // Using [[gnu::vector_size(8)]] would wreak havoc on the FPU because
1534 // those objects are passed via MMX registers and nothing ever calls EMMS.
1535 _S_Np2 == 8 ? 16 :
1536#endif
1537 _S_Np2 < __min_vector_size<_Tp> ? __min_vector_size<_Tp>
1538 : _S_Np2;
1539
1540 using type [[__gnu__::__vector_size__(_S_Bytes)]] = _Tp;
1541 };
1542
1543template <typename _Tp, size_t _Bytes, size_t = _Bytes % sizeof(_Tp)>
1544 struct __vector_type;
1545
1546template <typename _Tp, size_t _Bytes>
1547 struct __vector_type<_Tp, _Bytes, 0>
1548 : __vector_type_n<_Tp, _Bytes / sizeof(_Tp)> {};
1549
1550template <typename _Tp, size_t _Size>
1551 using __vector_type_t = typename __vector_type_n<_Tp, _Size>::type;
1552
1553template <typename _Tp>
1554 using __vector_type2_t = typename __vector_type<_Tp, 2>::type;
1555template <typename _Tp>
1556 using __vector_type4_t = typename __vector_type<_Tp, 4>::type;
1557template <typename _Tp>
1558 using __vector_type8_t = typename __vector_type<_Tp, 8>::type;
1559template <typename _Tp>
1560 using __vector_type16_t = typename __vector_type<_Tp, 16>::type;
1561template <typename _Tp>
1562 using __vector_type32_t = typename __vector_type<_Tp, 32>::type;
1563template <typename _Tp>
1564 using __vector_type64_t = typename __vector_type<_Tp, 64>::type;
1565
1566// }}}
1567// __is_vector_type {{{
1568template <typename _Tp, typename = void_t<>>
1569 struct __is_vector_type : false_type {};
1570
1571template <typename _Tp>
1572 struct __is_vector_type<
1573 _Tp, void_t<typename __vector_type<
1574 remove_reference_t<decltype(declval<_Tp>()[0])>, sizeof(_Tp)>::type>>
1575 : is_same<_Tp, typename __vector_type<
1576 remove_reference_t<decltype(declval<_Tp>()[0])>,
1577 sizeof(_Tp)>::type> {};
1578
1579template <typename _Tp>
1580 inline constexpr bool __is_vector_type_v = __is_vector_type<_Tp>::value;
1581
1582// }}}
1583// __is_intrinsic_type {{{
1584#if _GLIBCXX_SIMD_HAVE_SSE_ABI
1585template <typename _Tp>
1586 using __is_intrinsic_type = __is_vector_type<_Tp>;
1587#else // not SSE (x86)
1588template <typename _Tp, typename = void_t<>>
1589 struct __is_intrinsic_type : false_type {};
1590
1591template <typename _Tp>
1592 struct __is_intrinsic_type<
1593 _Tp, void_t<typename __intrinsic_type<
1594 remove_reference_t<decltype(declval<_Tp>()[0])>, sizeof(_Tp)>::type>>
1595 : is_same<_Tp, typename __intrinsic_type<
1596 remove_reference_t<decltype(declval<_Tp>()[0])>,
1597 sizeof(_Tp)>::type> {};
1598#endif
1599
1600template <typename _Tp>
1601 inline constexpr bool __is_intrinsic_type_v = __is_intrinsic_type<_Tp>::value;
1602
1603// }}}
1604// _VectorTraits{{{
1605template <typename _Tp, typename = void_t<>>
1606 struct _VectorTraitsImpl;
1607
1608template <typename _Tp>
1609 struct _VectorTraitsImpl<_Tp, enable_if_t<__is_vector_type_v<_Tp>
1610 || __is_intrinsic_type_v<_Tp>>>
1611 {
1612 using type = _Tp;
1613 using value_type = remove_reference_t<decltype(declval<_Tp>()[0])>;
1614 static constexpr int _S_full_size = sizeof(_Tp) / sizeof(value_type);
1615 using _Wrapper = _SimdWrapper<value_type, _S_full_size>;
1616 template <typename _Up, int _W = _S_full_size>
1617 static constexpr bool _S_is
1618 = is_same_v<value_type, _Up> && _W == _S_full_size;
1619 };
1620
1621template <typename _Tp, size_t _Np>
1622 struct _VectorTraitsImpl<_SimdWrapper<_Tp, _Np>,
1623 void_t<__vector_type_t<_Tp, _Np>>>
1624 {
1625 using type = __vector_type_t<_Tp, _Np>;
1626 using value_type = _Tp;
1627 static constexpr int _S_full_size = sizeof(type) / sizeof(value_type);
1628 using _Wrapper = _SimdWrapper<_Tp, _Np>;
1629 static constexpr bool _S_is_partial = (_Np == _S_full_size);
1630 static constexpr int _S_partial_width = _Np;
1631 template <typename _Up, int _W = _S_full_size>
1632 static constexpr bool _S_is
1633 = is_same_v<value_type, _Up>&& _W == _S_full_size;
1634 };
1635
1636template <typename _Tp, typename = typename _VectorTraitsImpl<_Tp>::type>
1637 using _VectorTraits = _VectorTraitsImpl<_Tp>;
1638
1639// }}}
1640// __as_vector{{{
1641template <typename _V>
1642 _GLIBCXX_SIMD_INTRINSIC constexpr auto
1643 __as_vector(_V __x)
1644 {
1645 if constexpr (__is_vector_type_v<_V>)
1646 return __x;
1647 else if constexpr (is_simd<_V>::value || is_simd_mask<_V>::value)
1648 return __data(__x)._M_data;
1649 else if constexpr (__is_vectorizable_v<_V>)
1650 return __vector_type_t<_V, 2>{__x};
1651 else
1652 return __x._M_data;
1653 }
1654
1655// }}}
1656// __as_wrapper{{{
1657template <size_t _Np = 0, typename _V>
1658 _GLIBCXX_SIMD_INTRINSIC constexpr auto
1659 __as_wrapper(_V __x)
1660 {
1661 if constexpr (__is_vector_type_v<_V>)
1662 return _SimdWrapper<typename _VectorTraits<_V>::value_type,
1663 (_Np > 0 ? _Np : _VectorTraits<_V>::_S_full_size)>(__x);
1664 else if constexpr (is_simd<_V>::value || is_simd_mask<_V>::value)
1665 {
1666 static_assert(_V::size() == _Np);
1667 return __data(__x);
1668 }
1669 else
1670 {
1671 static_assert(_V::_S_size == _Np);
1672 return __x;
1673 }
1674 }
1675
1676// }}}
1677// __intrin_bitcast{{{
1678template <typename _To, typename _From>
1679 _GLIBCXX_SIMD_INTRINSIC constexpr _To
1680 __intrin_bitcast(_From __v)
1681 {
1682 static_assert((__is_vector_type_v<_From> || __is_intrinsic_type_v<_From>)
1683 && (__is_vector_type_v<_To> || __is_intrinsic_type_v<_To>));
1684 if constexpr (sizeof(_To) == sizeof(_From))
1685 return reinterpret_cast<_To>(__v);
1686 else if constexpr (sizeof(_From) > sizeof(_To))
1687 if constexpr (sizeof(_To) >= 16)
1688 return reinterpret_cast<const __may_alias<_To>&>(__v);
1689 else
1690 {
1691 _To __r;
1692 __builtin_memcpy(&__r, &__v, sizeof(_To));
1693 return __r;
1694 }
1695#if _GLIBCXX_SIMD_X86INTRIN && !defined __clang__
1696 else if constexpr (__have_avx && sizeof(_From) == 16 && sizeof(_To) == 32)
1697 return reinterpret_cast<_To>(__builtin_ia32_ps256_ps(
1698 reinterpret_cast<__vector_type_t<float, 4>>(__v)));
1699 else if constexpr (__have_avx512f && sizeof(_From) == 16
1700 && sizeof(_To) == 64)
1701 return reinterpret_cast<_To>(__builtin_ia32_ps512_ps(
1702 reinterpret_cast<__vector_type_t<float, 4>>(__v)));
1703 else if constexpr (__have_avx512f && sizeof(_From) == 32
1704 && sizeof(_To) == 64)
1705 return reinterpret_cast<_To>(__builtin_ia32_ps512_256ps(
1706 reinterpret_cast<__vector_type_t<float, 8>>(__v)));
1707#endif // _GLIBCXX_SIMD_X86INTRIN
1708 else if constexpr (sizeof(__v) <= 8)
1709 return reinterpret_cast<_To>(
1710 __vector_type_t<__int_for_sizeof_t<_From>, sizeof(_To) / sizeof(_From)>{
1711 reinterpret_cast<__int_for_sizeof_t<_From>>(__v)});
1712 else
1713 {
1714 static_assert(sizeof(_To) > sizeof(_From));
1715 _To __r = {};
1716 __builtin_memcpy(&__r, &__v, sizeof(_From));
1717 return __r;
1718 }
1719 }
1720
1721// }}}
1722// __vector_bitcast{{{
1723template <typename _To, size_t _NN = 0, typename _From,
1724 typename _FromVT = _VectorTraits<_From>,
1725 size_t _Np = _NN == 0 ? sizeof(_From) / sizeof(_To) : _NN>
1726 _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_To, _Np>
1727 __vector_bitcast(_From __x)
1728 {
1729 using _R = __vector_type_t<_To, _Np>;
1730 return __intrin_bitcast<_R>(__x);
1731 }
1732
1733template <typename _To, size_t _NN = 0, typename _Tp, size_t _Nx,
1734 size_t _Np
1735 = _NN == 0 ? sizeof(_SimdWrapper<_Tp, _Nx>) / sizeof(_To) : _NN>
1736 _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_To, _Np>
1737 __vector_bitcast(const _SimdWrapper<_Tp, _Nx>& __x)
1738 {
1739 static_assert(_Np > 1);
1740 return __intrin_bitcast<__vector_type_t<_To, _Np>>(__x._M_data);
1741 }
1742
1743// }}}
1744// __convert_x86 declarations {{{
1745#ifdef _GLIBCXX_SIMD_WORKAROUND_PR85048
1746template <typename _To, typename _Tp, typename _TVT = _VectorTraits<_Tp>>
1747 _To __convert_x86(_Tp);
1748
1749template <typename _To, typename _Tp, typename _TVT = _VectorTraits<_Tp>>
1750 _To __convert_x86(_Tp, _Tp);
1751
1752template <typename _To, typename _Tp, typename _TVT = _VectorTraits<_Tp>>
1753 _To __convert_x86(_Tp, _Tp, _Tp, _Tp);
1754
1755template <typename _To, typename _Tp, typename _TVT = _VectorTraits<_Tp>>
1756 _To __convert_x86(_Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp);
1757
1758template <typename _To, typename _Tp, typename _TVT = _VectorTraits<_Tp>>
1759 _To __convert_x86(_Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp,
1760 _Tp, _Tp, _Tp, _Tp);
1761#endif // _GLIBCXX_SIMD_WORKAROUND_PR85048
1762
1763//}}}
1764// __bit_cast {{{
1765template <typename _To, typename _From>
1766 _GLIBCXX_SIMD_INTRINSIC constexpr _To
1767 __bit_cast(const _From __x)
1768 {
1769#if __has_builtin(__builtin_bit_cast)
1770 return __builtin_bit_cast(_To, __x);
1771#else
1772 static_assert(sizeof(_To) == sizeof(_From));
1773 constexpr bool __to_is_vectorizable
1774 = is_arithmetic_v<_To> || is_enum_v<_To>;
1775 constexpr bool __from_is_vectorizable
1776 = is_arithmetic_v<_From> || is_enum_v<_From>;
1777 if constexpr (__is_vector_type_v<_To> && __is_vector_type_v<_From>)
1778 return reinterpret_cast<_To>(__x);
1779 else if constexpr (__is_vector_type_v<_To> && __from_is_vectorizable)
1780 {
1781 using _FV [[__gnu__::__vector_size__(sizeof(_From))]] = _From;
1782 return reinterpret_cast<_To>(_FV{__x});
1783 }
1784 else if constexpr (__to_is_vectorizable && __from_is_vectorizable)
1785 {
1786 using _TV [[__gnu__::__vector_size__(sizeof(_To))]] = _To;
1787 using _FV [[__gnu__::__vector_size__(sizeof(_From))]] = _From;
1788 return reinterpret_cast<_TV>(_FV{__x})[0];
1789 }
1790 else if constexpr (__to_is_vectorizable && __is_vector_type_v<_From>)
1791 {
1792 using _TV [[__gnu__::__vector_size__(sizeof(_To))]] = _To;
1793 return reinterpret_cast<_TV>(__x)[0];
1794 }
1795 else
1796 {
1797 _To __r;
1798 __builtin_memcpy(reinterpret_cast<char*>(&__r),
1799 reinterpret_cast<const char*>(&__x), sizeof(_To));
1800 return __r;
1801 }
1802#endif
1803 }
1804
1805// }}}
1806// __to_intrin {{{
1807template <typename _Tp, typename _TVT = _VectorTraits<_Tp>,
1808 typename _R = __intrinsic_type_t<typename _TVT::value_type, _TVT::_S_full_size>>
1809 _GLIBCXX_SIMD_INTRINSIC constexpr _R
1810 __to_intrin(_Tp __x)
1811 {
1812 static_assert(sizeof(__x) <= sizeof(_R),
1813 "__to_intrin may never drop values off the end");
1814 if constexpr (sizeof(__x) == sizeof(_R))
1815 return reinterpret_cast<_R>(__as_vector(__x));
1816 else
1817 {
1818 using _Up = __int_for_sizeof_t<_Tp>;
1819 return reinterpret_cast<_R>(
1820 __vector_type_t<_Up, sizeof(_R) / sizeof(_Up)>{__bit_cast<_Up>(__x)});
1821 }
1822 }
1823
1824// }}}
1825// __make_vector{{{
1826template <typename _Tp, typename... _Args>
1827 _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_Tp, sizeof...(_Args)>
1828 __make_vector(const _Args&... __args)
1829 { return __vector_type_t<_Tp, sizeof...(_Args)>{static_cast<_Tp>(__args)...}; }
1830
1831// }}}
1832// __vector_broadcast{{{
1833template <size_t _Np, typename _Tp, size_t... _I>
1834 _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_Tp, _Np>
1835 __vector_broadcast_impl(_Tp __x, index_sequence<_I...>)
1836 { return __vector_type_t<_Tp, _Np>{((void)_I, __x)...}; }
1837
1838template <size_t _Np, typename _Tp>
1839 _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_Tp, _Np>
1840 __vector_broadcast(_Tp __x)
1841 { return __vector_broadcast_impl<_Np, _Tp>(__x, make_index_sequence<_Np>()); }
1842
1843// }}}
1844// __generate_vector{{{
1845 template <typename _Tp, size_t _Np, typename _Gp, size_t... _I>
1846 _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_Tp, _Np>
1847 __generate_vector_impl(_Gp&& __gen, index_sequence<_I...>)
1848 { return __vector_type_t<_Tp, _Np>{ static_cast<_Tp>(__gen(_SizeConstant<_I>()))...}; }
1849
1850template <typename _V, typename _VVT = _VectorTraits<_V>, typename _Gp>
1851 _GLIBCXX_SIMD_INTRINSIC constexpr _V
1852 __generate_vector(_Gp&& __gen)
1853 {
1854 if constexpr (__is_vector_type_v<_V>)
1855 return __generate_vector_impl<typename _VVT::value_type,
1856 _VVT::_S_full_size>(
1857 static_cast<_Gp&&>(__gen), make_index_sequence<_VVT::_S_full_size>());
1858 else
1859 return __generate_vector_impl<typename _VVT::value_type,
1860 _VVT::_S_partial_width>(
1861 static_cast<_Gp&&>(__gen),
1862 make_index_sequence<_VVT::_S_partial_width>());
1863 }
1864
1865template <typename _Tp, size_t _Np, typename _Gp>
1866 _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_Tp, _Np>
1867 __generate_vector(_Gp&& __gen)
1868 {
1869 return __generate_vector_impl<_Tp, _Np>(static_cast<_Gp&&>(__gen),
1870 make_index_sequence<_Np>());
1871 }
1872
1873// }}}
1874// __xor{{{
1875template <typename _TW>
1876 _GLIBCXX_SIMD_INTRINSIC constexpr _TW
1877 __xor(_TW __a, _TW __b) noexcept
1878 {
1879 if constexpr (__is_vector_type_v<_TW> || __is_simd_wrapper_v<_TW>)
1880 {
1881 using _Tp = typename conditional_t<__is_simd_wrapper_v<_TW>, _TW,
1882 _VectorTraitsImpl<_TW>>::value_type;
1883 if constexpr (is_floating_point_v<_Tp>)
1884 {
1885 using _Ip = make_unsigned_t<__int_for_sizeof_t<_Tp>>;
1886 return __vector_bitcast<_Tp>(__vector_bitcast<_Ip>(__a)
1887 ^ __vector_bitcast<_Ip>(__b));
1888 }
1889 else if constexpr (__is_vector_type_v<_TW>)
1890 return __a ^ __b;
1891 else
1892 return __a._M_data ^ __b._M_data;
1893 }
1894 else
1895 return __a ^ __b;
1896 }
1897
1898// }}}
1899// __or{{{
1900template <typename _TW>
1901 _GLIBCXX_SIMD_INTRINSIC constexpr _TW
1902 __or(_TW __a, _TW __b) noexcept
1903 {
1904 if constexpr (__is_vector_type_v<_TW> || __is_simd_wrapper_v<_TW>)
1905 {
1906 using _Tp = typename conditional_t<__is_simd_wrapper_v<_TW>, _TW,
1907 _VectorTraitsImpl<_TW>>::value_type;
1908 if constexpr (is_floating_point_v<_Tp>)
1909 {
1910 using _Ip = make_unsigned_t<__int_for_sizeof_t<_Tp>>;
1911 return __vector_bitcast<_Tp>(__vector_bitcast<_Ip>(__a)
1912 | __vector_bitcast<_Ip>(__b));
1913 }
1914 else if constexpr (__is_vector_type_v<_TW>)
1915 return __a | __b;
1916 else
1917 return __a._M_data | __b._M_data;
1918 }
1919 else
1920 return __a | __b;
1921 }
1922
1923// }}}
1924// __and{{{
1925template <typename _TW>
1926 _GLIBCXX_SIMD_INTRINSIC constexpr _TW
1927 __and(_TW __a, _TW __b) noexcept
1928 {
1929 if constexpr (__is_vector_type_v<_TW> || __is_simd_wrapper_v<_TW>)
1930 {
1931 using _Tp = typename conditional_t<__is_simd_wrapper_v<_TW>, _TW,
1932 _VectorTraitsImpl<_TW>>::value_type;
1933 if constexpr (is_floating_point_v<_Tp>)
1934 {
1935 using _Ip = make_unsigned_t<__int_for_sizeof_t<_Tp>>;
1936 return __vector_bitcast<_Tp>(__vector_bitcast<_Ip>(__a)
1937 & __vector_bitcast<_Ip>(__b));
1938 }
1939 else if constexpr (__is_vector_type_v<_TW>)
1940 return __a & __b;
1941 else
1942 return __a._M_data & __b._M_data;
1943 }
1944 else
1945 return __a & __b;
1946 }
1947
1948// }}}
1949// __andnot{{{
1950#if _GLIBCXX_SIMD_X86INTRIN && !defined __clang__
1951static constexpr struct
1952{
1953 _GLIBCXX_SIMD_INTRINSIC __v4sf
1954 operator()(__v4sf __a, __v4sf __b) const noexcept
1955 { return __builtin_ia32_andnps(__a, __b); }
1956
1957 _GLIBCXX_SIMD_INTRINSIC __v2df
1958 operator()(__v2df __a, __v2df __b) const noexcept
1959 { return __builtin_ia32_andnpd(__a, __b); }
1960
1961 _GLIBCXX_SIMD_INTRINSIC __v2di
1962 operator()(__v2di __a, __v2di __b) const noexcept
1963 { return __builtin_ia32_pandn128(__a, __b); }
1964
1965 _GLIBCXX_SIMD_INTRINSIC __v8sf
1966 operator()(__v8sf __a, __v8sf __b) const noexcept
1967 { return __builtin_ia32_andnps256(__a, __b); }
1968
1969 _GLIBCXX_SIMD_INTRINSIC __v4df
1970 operator()(__v4df __a, __v4df __b) const noexcept
1971 { return __builtin_ia32_andnpd256(__a, __b); }
1972
1973 _GLIBCXX_SIMD_INTRINSIC __v4di
1974 operator()(__v4di __a, __v4di __b) const noexcept
1975 {
1976 if constexpr (__have_avx2)
1977 return __builtin_ia32_andnotsi256(__a, __b);
1978 else
1979 return reinterpret_cast<__v4di>(
1980 __builtin_ia32_andnpd256(reinterpret_cast<__v4df>(__a),
1981 reinterpret_cast<__v4df>(__b)));
1982 }
1983
1984 _GLIBCXX_SIMD_INTRINSIC __v16sf
1985 operator()(__v16sf __a, __v16sf __b) const noexcept
1986 {
1987 if constexpr (__have_avx512dq)
1988 return _mm512_andnot_ps(__a, __b);
1989 else
1990 return reinterpret_cast<__v16sf>(
1991 _mm512_andnot_si512(reinterpret_cast<__v8di>(__a),
1992 reinterpret_cast<__v8di>(__b)));
1993 }
1994
1995 _GLIBCXX_SIMD_INTRINSIC __v8df
1996 operator()(__v8df __a, __v8df __b) const noexcept
1997 {
1998 if constexpr (__have_avx512dq)
1999 return _mm512_andnot_pd(__a, __b);
2000 else
2001 return reinterpret_cast<__v8df>(
2002 _mm512_andnot_si512(reinterpret_cast<__v8di>(__a),
2003 reinterpret_cast<__v8di>(__b)));
2004 }
2005
2006 _GLIBCXX_SIMD_INTRINSIC __v8di
2007 operator()(__v8di __a, __v8di __b) const noexcept
2008 { return _mm512_andnot_si512(__a, __b); }
2009} _S_x86_andnot;
2010#endif // _GLIBCXX_SIMD_X86INTRIN && !__clang__
2011
2012template <typename _TW>
2013 _GLIBCXX_SIMD_INTRINSIC constexpr _TW
2014 __andnot(_TW __a, _TW __b) noexcept
2015 {
2016 if constexpr (__is_vector_type_v<_TW> || __is_simd_wrapper_v<_TW>)
2017 {
2018 using _TVT = conditional_t<__is_simd_wrapper_v<_TW>, _TW,
2019 _VectorTraitsImpl<_TW>>;
2020 using _Tp = typename _TVT::value_type;
2021#if _GLIBCXX_SIMD_X86INTRIN && !defined __clang__
2022 if constexpr (sizeof(_TW) >= 16)
2023 {
2024 const auto __ai = __to_intrin(__a);
2025 const auto __bi = __to_intrin(__b);
2026 if (!__builtin_is_constant_evaluated()
2027 && !(__builtin_constant_p(__ai) && __builtin_constant_p(__bi)))
2028 {
2029 const auto __r = _S_x86_andnot(__ai, __bi);
2030 if constexpr (is_convertible_v<decltype(__r), _TW>)
2031 return __r;
2032 else
2033 return reinterpret_cast<typename _TVT::type>(__r);
2034 }
2035 }
2036#endif // _GLIBCXX_SIMD_X86INTRIN
2037 using _Ip = make_unsigned_t<__int_for_sizeof_t<_Tp>>;
2038 return __vector_bitcast<_Tp>(~__vector_bitcast<_Ip>(__a)
2039 & __vector_bitcast<_Ip>(__b));
2040 }
2041 else
2042 return ~__a & __b;
2043 }
2044
2045// }}}
2046// __not{{{
2047template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
2048 _GLIBCXX_SIMD_INTRINSIC constexpr _Tp
2049 __not(_Tp __a) noexcept
2050 {
2051 if constexpr (is_floating_point_v<typename _TVT::value_type>)
2052 return reinterpret_cast<typename _TVT::type>(
2053 ~__vector_bitcast<unsigned>(__a));
2054 else
2055 return ~__a;
2056 }
2057
2058// }}}
2059// __concat{{{
2060template <typename _Tp, typename _TVT = _VectorTraits<_Tp>,
2061 typename _R = __vector_type_t<typename _TVT::value_type, _TVT::_S_full_size * 2>>
2062 constexpr _R
2063 __concat(_Tp a_, _Tp b_)
2064 {
2065#ifdef _GLIBCXX_SIMD_WORKAROUND_XXX_1
2066 using _W
2067 = conditional_t<is_floating_point_v<typename _TVT::value_type>, double,
2068 conditional_t<(sizeof(_Tp) >= 2 * sizeof(long long)),
2069 long long, typename _TVT::value_type>>;
2070 constexpr int input_width = sizeof(_Tp) / sizeof(_W);
2071 const auto __a = __vector_bitcast<_W>(a_);
2072 const auto __b = __vector_bitcast<_W>(b_);
2073 using _Up = __vector_type_t<_W, sizeof(_R) / sizeof(_W)>;
2074#else
2075 constexpr int input_width = _TVT::_S_full_size;
2076 const _Tp& __a = a_;
2077 const _Tp& __b = b_;
2078 using _Up = _R;
2079#endif
2080 if constexpr (input_width == 2)
2081 return reinterpret_cast<_R>(_Up{__a[0], __a[1], __b[0], __b[1]});
2082 else if constexpr (input_width == 4)
2083 return reinterpret_cast<_R>(
2084 _Up{__a[0], __a[1], __a[2], __a[3], __b[0], __b[1], __b[2], __b[3]});
2085 else if constexpr (input_width == 8)
2086 return reinterpret_cast<_R>(
2087 _Up{__a[0], __a[1], __a[2], __a[3], __a[4], __a[5], __a[6], __a[7],
2088 __b[0], __b[1], __b[2], __b[3], __b[4], __b[5], __b[6], __b[7]});
2089 else if constexpr (input_width == 16)
2090 return reinterpret_cast<_R>(
2091 _Up{__a[0], __a[1], __a[2], __a[3], __a[4], __a[5], __a[6],
2092 __a[7], __a[8], __a[9], __a[10], __a[11], __a[12], __a[13],
2093 __a[14], __a[15], __b[0], __b[1], __b[2], __b[3], __b[4],
2094 __b[5], __b[6], __b[7], __b[8], __b[9], __b[10], __b[11],
2095 __b[12], __b[13], __b[14], __b[15]});
2096 else if constexpr (input_width == 32)
2097 return reinterpret_cast<_R>(
2098 _Up{__a[0], __a[1], __a[2], __a[3], __a[4], __a[5], __a[6],
2099 __a[7], __a[8], __a[9], __a[10], __a[11], __a[12], __a[13],
2100 __a[14], __a[15], __a[16], __a[17], __a[18], __a[19], __a[20],
2101 __a[21], __a[22], __a[23], __a[24], __a[25], __a[26], __a[27],
2102 __a[28], __a[29], __a[30], __a[31], __b[0], __b[1], __b[2],
2103 __b[3], __b[4], __b[5], __b[6], __b[7], __b[8], __b[9],
2104 __b[10], __b[11], __b[12], __b[13], __b[14], __b[15], __b[16],
2105 __b[17], __b[18], __b[19], __b[20], __b[21], __b[22], __b[23],
2106 __b[24], __b[25], __b[26], __b[27], __b[28], __b[29], __b[30],
2107 __b[31]});
2108 }
2109
2110// }}}
2111// __zero_extend {{{
2112template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
2113 struct _ZeroExtendProxy
2114 {
2115 using value_type = typename _TVT::value_type;
2116 static constexpr size_t _Np = _TVT::_S_full_size;
2117 const _Tp __x;
2118
2119 template <typename _To, typename _ToVT = _VectorTraits<_To>,
2120 typename
2121 = enable_if_t<is_same_v<typename _ToVT::value_type, value_type>>>
2122 _GLIBCXX_SIMD_INTRINSIC operator _To() const
2123 {
2124 constexpr size_t _ToN = _ToVT::_S_full_size;
2125 if constexpr (_ToN == _Np)
2126 return __x;
2127 else if constexpr (_ToN == 2 * _Np)
2128 {
2129#ifdef _GLIBCXX_SIMD_WORKAROUND_XXX_3
2130 if constexpr (__have_avx && _TVT::template _S_is<float, 4>)
2131 return __vector_bitcast<value_type>(
2132 _mm256_insertf128_ps(__m256(), __x, 0));
2133 else if constexpr (__have_avx && _TVT::template _S_is<double, 2>)
2134 return __vector_bitcast<value_type>(
2135 _mm256_insertf128_pd(__m256d(), __x, 0));
2136 else if constexpr (__have_avx2 && _Np * sizeof(value_type) == 16)
2137 return __vector_bitcast<value_type>(
2138 _mm256_insertf128_si256(__m256i(), __to_intrin(__x), 0));
2139 else if constexpr (__have_avx512f && _TVT::template _S_is<float, 8>)
2140 {
2141 if constexpr (__have_avx512dq)
2142 return __vector_bitcast<value_type>(
2143 _mm512_insertf32x8(__m512(), __x, 0));
2144 else
2145 return reinterpret_cast<__m512>(
2146 _mm512_insertf64x4(__m512d(),
2147 reinterpret_cast<__m256d>(__x), 0));
2148 }
2149 else if constexpr (__have_avx512f
2150 && _TVT::template _S_is<double, 4>)
2151 return __vector_bitcast<value_type>(
2152 _mm512_insertf64x4(__m512d(), __x, 0));
2153 else if constexpr (__have_avx512f && _Np * sizeof(value_type) == 32)
2154 return __vector_bitcast<value_type>(
2155 _mm512_inserti64x4(__m512i(), __to_intrin(__x), 0));
2156#endif
2157 return __concat(__x, _Tp());
2158 }
2159 else if constexpr (_ToN == 4 * _Np)
2160 {
2161#ifdef _GLIBCXX_SIMD_WORKAROUND_XXX_3
2162 if constexpr (__have_avx512dq && _TVT::template _S_is<double, 2>)
2163 {
2164 return __vector_bitcast<value_type>(
2165 _mm512_insertf64x2(__m512d(), __x, 0));
2166 }
2167 else if constexpr (__have_avx512f
2168 && is_floating_point_v<value_type>)
2169 {
2170 return __vector_bitcast<value_type>(
2171 _mm512_insertf32x4(__m512(), reinterpret_cast<__m128>(__x),
2172 0));
2173 }
2174 else if constexpr (__have_avx512f && _Np * sizeof(value_type) == 16)
2175 {
2176 return __vector_bitcast<value_type>(
2177 _mm512_inserti32x4(__m512i(), __to_intrin(__x), 0));
2178 }
2179#endif
2180 return __concat(__concat(__x, _Tp()),
2181 __vector_type_t<value_type, _Np * 2>());
2182 }
2183 else if constexpr (_ToN == 8 * _Np)
2184 return __concat(operator __vector_type_t<value_type, _Np * 4>(),
2185 __vector_type_t<value_type, _Np * 4>());
2186 else if constexpr (_ToN == 16 * _Np)
2187 return __concat(operator __vector_type_t<value_type, _Np * 8>(),
2188 __vector_type_t<value_type, _Np * 8>());
2189 else
2190 __assert_unreachable<_Tp>();
2191 }
2192 };
2193
2194template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
2195 _GLIBCXX_SIMD_INTRINSIC _ZeroExtendProxy<_Tp, _TVT>
2196 __zero_extend(_Tp __x)
2197 { return {__x}; }
2198
2199// }}}
2200// __extract<_Np, By>{{{
2201template <int _Offset,
2202 int _SplitBy,
2203 typename _Tp,
2204 typename _TVT = _VectorTraits<_Tp>,
2205 typename _R = __vector_type_t<typename _TVT::value_type, _TVT::_S_full_size / _SplitBy>>
2206 _GLIBCXX_SIMD_INTRINSIC constexpr _R
2207 __extract(_Tp __in)
2208 {
2209 using value_type = typename _TVT::value_type;
2210#if _GLIBCXX_SIMD_X86INTRIN // {{{
2211 if constexpr (sizeof(_Tp) == 64 && _SplitBy == 4 && _Offset > 0)
2212 {
2213 if constexpr (__have_avx512dq && is_same_v<double, value_type>)
2214 return _mm512_extractf64x2_pd(__to_intrin(__in), _Offset);
2215 else if constexpr (is_floating_point_v<value_type>)
2216 return __vector_bitcast<value_type>(
2217 _mm512_extractf32x4_ps(__intrin_bitcast<__m512>(__in), _Offset));
2218 else
2219 return reinterpret_cast<_R>(
2220 _mm512_extracti32x4_epi32(__intrin_bitcast<__m512i>(__in),
2221 _Offset));
2222 }
2223 else
2224#endif // _GLIBCXX_SIMD_X86INTRIN }}}
2225 {
2226#ifdef _GLIBCXX_SIMD_WORKAROUND_XXX_1
2227 using _W = conditional_t<
2228 is_floating_point_v<value_type>, double,
2229 conditional_t<(sizeof(_R) >= 16), long long, value_type>>;
2230 static_assert(sizeof(_R) % sizeof(_W) == 0);
2231 constexpr int __return_width = sizeof(_R) / sizeof(_W);
2232 using _Up = __vector_type_t<_W, __return_width>;
2233 const auto __x = __vector_bitcast<_W>(__in);
2234#else
2235 constexpr int __return_width = _TVT::_S_full_size / _SplitBy;
2236 using _Up = _R;
2237 const __vector_type_t<value_type, _TVT::_S_full_size>& __x
2238 = __in; // only needed for _Tp = _SimdWrapper<value_type, _Np>
2239#endif
2240 constexpr int _O = _Offset * __return_width;
2241 return __call_with_subscripts<__return_width, _O>(
2242 __x, [](auto... __entries) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2243 return reinterpret_cast<_R>(_Up{__entries...});
2244 });
2245 }
2246 }
2247
2248// }}}
2249// __lo/__hi64[z]{{{
2250template <typename _Tp,
2251 typename _R = __vector_type8_t<typename _VectorTraits<_Tp>::value_type>>
2252 _GLIBCXX_SIMD_INTRINSIC constexpr _R
2253 __lo64(_Tp __x)
2254 {
2255 _R __r{};
2256 __builtin_memcpy(&__r, &__x, 8);
2257 return __r;
2258 }
2259
2260template <typename _Tp,
2261 typename _R = __vector_type8_t<typename _VectorTraits<_Tp>::value_type>>
2262 _GLIBCXX_SIMD_INTRINSIC constexpr _R
2263 __hi64(_Tp __x)
2264 {
2265 static_assert(sizeof(_Tp) == 16, "use __hi64z if you meant it");
2266 _R __r{};
2267 __builtin_memcpy(&__r, reinterpret_cast<const char*>(&__x) + 8, 8);
2268 return __r;
2269 }
2270
2271template <typename _Tp,
2272 typename _R = __vector_type8_t<typename _VectorTraits<_Tp>::value_type>>
2273 _GLIBCXX_SIMD_INTRINSIC constexpr _R
2274 __hi64z([[maybe_unused]] _Tp __x)
2275 {
2276 _R __r{};
2277 if constexpr (sizeof(_Tp) == 16)
2278 __builtin_memcpy(&__r, reinterpret_cast<const char*>(&__x) + 8, 8);
2279 return __r;
2280 }
2281
2282// }}}
2283// __lo/__hi128{{{
2284template <typename _Tp>
2285 _GLIBCXX_SIMD_INTRINSIC constexpr auto
2286 __lo128(_Tp __x)
2287 { return __extract<0, sizeof(_Tp) / 16>(__x); }
2288
2289template <typename _Tp>
2290 _GLIBCXX_SIMD_INTRINSIC constexpr auto
2291 __hi128(_Tp __x)
2292 {
2293 static_assert(sizeof(__x) == 32);
2294 return __extract<1, 2>(__x);
2295 }
2296
2297// }}}
2298// __lo/__hi256{{{
2299template <typename _Tp>
2300 _GLIBCXX_SIMD_INTRINSIC constexpr auto
2301 __lo256(_Tp __x)
2302 {
2303 static_assert(sizeof(__x) == 64);
2304 return __extract<0, 2>(__x);
2305 }
2306
2307template <typename _Tp>
2308 _GLIBCXX_SIMD_INTRINSIC constexpr auto
2309 __hi256(_Tp __x)
2310 {
2311 static_assert(sizeof(__x) == 64);
2312 return __extract<1, 2>(__x);
2313 }
2314
2315// }}}
2316// __auto_bitcast{{{
2317template <typename _Tp>
2318 struct _AutoCast
2319 {
2320 static_assert(__is_vector_type_v<_Tp>);
2321
2322 const _Tp __x;
2323
2324 template <typename _Up, typename _UVT = _VectorTraits<_Up>>
2325 _GLIBCXX_SIMD_INTRINSIC constexpr operator _Up() const
2326 { return __intrin_bitcast<typename _UVT::type>(__x); }
2327 };
2328
2329template <typename _Tp>
2330 _GLIBCXX_SIMD_INTRINSIC constexpr _AutoCast<_Tp>
2331 __auto_bitcast(const _Tp& __x)
2332 { return {__x}; }
2333
2334template <typename _Tp, size_t _Np>
2335 _GLIBCXX_SIMD_INTRINSIC constexpr
2336 _AutoCast<typename _SimdWrapper<_Tp, _Np>::_BuiltinType>
2337 __auto_bitcast(const _SimdWrapper<_Tp, _Np>& __x)
2338 { return {__x._M_data}; }
2339
2340// }}}
2341// ^^^ ---- builtin vector types [[gnu::vector_size(N)]] and operations ---- ^^^
2342
2343#if _GLIBCXX_SIMD_HAVE_SSE_ABI
2344// __bool_storage_member_type{{{
2345#if _GLIBCXX_SIMD_HAVE_AVX512F && _GLIBCXX_SIMD_X86INTRIN
2346template <size_t _Size>
2347 struct __bool_storage_member_type
2348 {
2349 static_assert((_Size & (_Size - 1)) != 0,
2350 "This trait may only be used for non-power-of-2 sizes. "
2351 "Power-of-2 sizes must be specialized.");
2352 using type =
2353 typename __bool_storage_member_type<std::__bit_ceil(_Size)>::type;
2354 };
2355
2356template <>
2357 struct __bool_storage_member_type<1> { using type = bool; };
2358
2359template <>
2360 struct __bool_storage_member_type<2> { using type = __mmask8; };
2361
2362template <>
2363 struct __bool_storage_member_type<4> { using type = __mmask8; };
2364
2365template <>
2366 struct __bool_storage_member_type<8> { using type = __mmask8; };
2367
2368template <>
2369 struct __bool_storage_member_type<16> { using type = __mmask16; };
2370
2371template <>
2372 struct __bool_storage_member_type<32> { using type = __mmask32; };
2373
2374template <>
2375 struct __bool_storage_member_type<64> { using type = __mmask64; };
2376#endif // _GLIBCXX_SIMD_HAVE_AVX512F
2377
2378// }}}
2379// __intrinsic_type (x86){{{
2380// the following excludes bool via __is_vectorizable
2381#if _GLIBCXX_SIMD_HAVE_SSE
2382template <typename _Tp, size_t _Bytes>
2383 struct __intrinsic_type<_Tp, _Bytes, enable_if_t<__is_vectorizable_v<_Tp> && _Bytes <= 64>>
2384 {
2385 static_assert(!is_same_v<_Tp, long double>,
2386 "no __intrinsic_type support for long double on x86");
2387
2388 static constexpr size_t _S_VBytes = _Bytes <= 16 ? 16 : _Bytes <= 32 ? 32 : 64;
2389
2390 using type [[__gnu__::__vector_size__(_S_VBytes)]]
2391 = conditional_t<is_integral_v<_Tp>, long long int, _Tp>;
2392 };
2393#endif // _GLIBCXX_SIMD_HAVE_SSE
2394
2395// }}}
2396#endif // _GLIBCXX_SIMD_HAVE_SSE_ABI
2397// __intrinsic_type (ARM){{{
2398#if _GLIBCXX_SIMD_HAVE_NEON
2399template <>
2400 struct __intrinsic_type<float, 8, void>
2401 { using type = float32x2_t; };
2402
2403template <>
2404 struct __intrinsic_type<float, 16, void>
2405 { using type = float32x4_t; };
2406
2407template <>
2408 struct __intrinsic_type<double, 8, void>
2409 {
2410#if _GLIBCXX_SIMD_HAVE_NEON_A64
2411 using type = float64x1_t;
2412#endif
2413 };
2414
2415template <>
2416 struct __intrinsic_type<double, 16, void>
2417 {
2418#if _GLIBCXX_SIMD_HAVE_NEON_A64
2419 using type = float64x2_t;
2420#endif
2421 };
2422
2423#define _GLIBCXX_SIMD_ARM_INTRIN(_Bits, _Np) \
2424template <> \
2425 struct __intrinsic_type<__int_with_sizeof_t<_Bits / 8>, \
2426 _Np * _Bits / 8, void> \
2427 { using type = int##_Bits##x##_Np##_t; }; \
2428template <> \
2429 struct __intrinsic_type<make_unsigned_t<__int_with_sizeof_t<_Bits / 8>>, \
2430 _Np * _Bits / 8, void> \
2431 { using type = uint##_Bits##x##_Np##_t; }
2432_GLIBCXX_SIMD_ARM_INTRIN(8, 8);
2433_GLIBCXX_SIMD_ARM_INTRIN(8, 16);
2434_GLIBCXX_SIMD_ARM_INTRIN(16, 4);
2435_GLIBCXX_SIMD_ARM_INTRIN(16, 8);
2436_GLIBCXX_SIMD_ARM_INTRIN(32, 2);
2437_GLIBCXX_SIMD_ARM_INTRIN(32, 4);
2438_GLIBCXX_SIMD_ARM_INTRIN(64, 1);
2439_GLIBCXX_SIMD_ARM_INTRIN(64, 2);
2440#undef _GLIBCXX_SIMD_ARM_INTRIN
2441
2442template <typename _Tp, size_t _Bytes>
2443 struct __intrinsic_type<_Tp, _Bytes, enable_if_t<__is_vectorizable_v<_Tp> && _Bytes <= 16>>
2444 {
2445 static constexpr int _SVecBytes = _Bytes <= 8 ? 8 : 16;
2446
2447 using _Ip = __int_for_sizeof_t<_Tp>;
2448
2449 using _Up = conditional_t<
2450 is_floating_point_v<_Tp>, _Tp,
2451 conditional_t<is_unsigned_v<_Tp>, make_unsigned_t<_Ip>, _Ip>>;
2452
2453 static_assert(!is_same_v<_Tp, _Up> || _SVecBytes != _Bytes,
2454 "should use explicit specialization above");
2455
2456 using type = typename __intrinsic_type<_Up, _SVecBytes>::type;
2457 };
2458#endif // _GLIBCXX_SIMD_HAVE_NEON
2459
2460// }}}
2461// __intrinsic_type (PPC){{{
2462#ifdef __ALTIVEC__
2463template <typename _Tp>
2464 struct __intrinsic_type_impl;
2465
2466#define _GLIBCXX_SIMD_PPC_INTRIN(_Tp) \
2467 template <> \
2468 struct __intrinsic_type_impl<_Tp> { using type = __vector _Tp; }
2469_GLIBCXX_SIMD_PPC_INTRIN(float);
2470#ifdef __VSX__
2471_GLIBCXX_SIMD_PPC_INTRIN(double);
2472#endif
2473_GLIBCXX_SIMD_PPC_INTRIN(signed char);
2474_GLIBCXX_SIMD_PPC_INTRIN(unsigned char);
2475_GLIBCXX_SIMD_PPC_INTRIN(signed short);
2476_GLIBCXX_SIMD_PPC_INTRIN(unsigned short);
2477_GLIBCXX_SIMD_PPC_INTRIN(signed int);
2478_GLIBCXX_SIMD_PPC_INTRIN(unsigned int);
2479#if defined __VSX__ || __SIZEOF_LONG__ == 4
2480_GLIBCXX_SIMD_PPC_INTRIN(signed long);
2481_GLIBCXX_SIMD_PPC_INTRIN(unsigned long);
2482#endif
2483#ifdef __VSX__
2484_GLIBCXX_SIMD_PPC_INTRIN(signed long long);
2485_GLIBCXX_SIMD_PPC_INTRIN(unsigned long long);
2486#endif
2487#undef _GLIBCXX_SIMD_PPC_INTRIN
2488
2489template <typename _Tp, size_t _Bytes>
2490 struct __intrinsic_type<_Tp, _Bytes, enable_if_t<__is_vectorizable_v<_Tp> && _Bytes <= 16>>
2491 {
2492 static constexpr bool _S_is_ldouble = is_same_v<_Tp, long double>;
2493
2494 // allow _Tp == long double with -mlong-double-64
2495 static_assert(!(_S_is_ldouble && sizeof(long double) > sizeof(double)),
2496 "no __intrinsic_type support for 128-bit floating point on PowerPC");
2497
2498#ifndef __VSX__
2499 static_assert(!(is_same_v<_Tp, double>
2500 || (_S_is_ldouble && sizeof(long double) == sizeof(double))),
2501 "no __intrinsic_type support for 64-bit floating point on PowerPC w/o VSX");
2502#endif
2503
2504 static constexpr auto __element_type()
2505 {
2506 if constexpr (is_floating_point_v<_Tp>)
2507 {
2508 if constexpr (_S_is_ldouble)
2509 return double {};
2510 else
2511 return _Tp {};
2512 }
2513 else if constexpr (is_signed_v<_Tp>)
2514 {
2515 if constexpr (sizeof(_Tp) == sizeof(_SChar))
2516 return _SChar {};
2517 else if constexpr (sizeof(_Tp) == sizeof(short))
2518 return short {};
2519 else if constexpr (sizeof(_Tp) == sizeof(int))
2520 return int {};
2521 else if constexpr (sizeof(_Tp) == sizeof(_LLong))
2522 return _LLong {};
2523 }
2524 else
2525 {
2526 if constexpr (sizeof(_Tp) == sizeof(_UChar))
2527 return _UChar {};
2528 else if constexpr (sizeof(_Tp) == sizeof(_UShort))
2529 return _UShort {};
2530 else if constexpr (sizeof(_Tp) == sizeof(_UInt))
2531 return _UInt {};
2532 else if constexpr (sizeof(_Tp) == sizeof(_ULLong))
2533 return _ULLong {};
2534 }
2535 }
2536
2537 using type = typename __intrinsic_type_impl<decltype(__element_type())>::type;
2538 };
2539#endif // __ALTIVEC__
2540
2541// }}}
2542// _SimdWrapper<bool>{{{1
2543template <size_t _Width>
2544 struct _SimdWrapper<bool, _Width,
2545 void_t<typename __bool_storage_member_type<_Width>::type>>
2546 {
2547 using _BuiltinType = typename __bool_storage_member_type<_Width>::type;
2548 using value_type = bool;
2549
2550 static constexpr size_t _S_full_size = sizeof(_BuiltinType) * __CHAR_BIT__;
2551
2552 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper<bool, _S_full_size>
2553 __as_full_vector() const
2554 { return _M_data; }
2555
2556 _GLIBCXX_SIMD_INTRINSIC constexpr
2557 _SimdWrapper() = default;
2558
2559 _GLIBCXX_SIMD_INTRINSIC constexpr
2560 _SimdWrapper(_BuiltinType __k) : _M_data(__k) {};
2561
2562 _GLIBCXX_SIMD_INTRINSIC
2563 operator const _BuiltinType&() const
2564 { return _M_data; }
2565
2566 _GLIBCXX_SIMD_INTRINSIC
2567 operator _BuiltinType&()
2568 { return _M_data; }
2569
2570 _GLIBCXX_SIMD_INTRINSIC _BuiltinType
2571 __intrin() const
2572 { return _M_data; }
2573
2574 _GLIBCXX_SIMD_INTRINSIC constexpr value_type
2575 operator[](size_t __i) const
2576 { return _M_data & (_BuiltinType(1) << __i); }
2577
2578 template <size_t __i>
2579 _GLIBCXX_SIMD_INTRINSIC constexpr value_type
2580 operator[](_SizeConstant<__i>) const
2581 { return _M_data & (_BuiltinType(1) << __i); }
2582
2583 _GLIBCXX_SIMD_INTRINSIC constexpr void
2584 _M_set(size_t __i, value_type __x)
2585 {
2586 if (__x)
2587 _M_data |= (_BuiltinType(1) << __i);
2588 else
2589 _M_data &= ~(_BuiltinType(1) << __i);
2590 }
2591
2592 _GLIBCXX_SIMD_INTRINSIC constexpr bool
2593 _M_is_constprop() const
2594 { return __builtin_constant_p(_M_data); }
2595
2596 _GLIBCXX_SIMD_INTRINSIC constexpr bool
2597 _M_is_constprop_none_of() const
2598 {
2599 if (__builtin_constant_p(_M_data))
2600 {
2601 constexpr int __nbits = sizeof(_BuiltinType) * __CHAR_BIT__;
2602 constexpr _BuiltinType __active_mask
2603 = ~_BuiltinType() >> (__nbits - _Width);
2604 return (_M_data & __active_mask) == 0;
2605 }
2606 return false;
2607 }
2608
2609 _GLIBCXX_SIMD_INTRINSIC constexpr bool
2610 _M_is_constprop_all_of() const
2611 {
2612 if (__builtin_constant_p(_M_data))
2613 {
2614 constexpr int __nbits = sizeof(_BuiltinType) * __CHAR_BIT__;
2615 constexpr _BuiltinType __active_mask
2616 = ~_BuiltinType() >> (__nbits - _Width);
2617 return (_M_data & __active_mask) == __active_mask;
2618 }
2619 return false;
2620 }
2621
2622 _BuiltinType _M_data;
2623 };
2624
2625// _SimdWrapperBase{{{1
2626template <bool _MustZeroInitPadding, typename _BuiltinType>
2627 struct _SimdWrapperBase;
2628
2629template <typename _BuiltinType>
2630 struct _SimdWrapperBase<false, _BuiltinType> // no padding or no SNaNs
2631 {
2632 _GLIBCXX_SIMD_INTRINSIC constexpr
2633 _SimdWrapperBase() = default;
2634
2635 _GLIBCXX_SIMD_INTRINSIC constexpr
2636 _SimdWrapperBase(_BuiltinType __init) : _M_data(__init) {}
2637
2638 _BuiltinType _M_data;
2639 };
2640
2641template <typename _BuiltinType>
2642 struct _SimdWrapperBase<true, _BuiltinType> // with padding that needs to
2643 // never become SNaN
2644 {
2645 _GLIBCXX_SIMD_INTRINSIC constexpr
2646 _SimdWrapperBase() : _M_data() {}
2647
2648 _GLIBCXX_SIMD_INTRINSIC constexpr
2649 _SimdWrapperBase(_BuiltinType __init) : _M_data(__init) {}
2650
2651 _BuiltinType _M_data;
2652 };
2653
2654// }}}
2655// _SimdWrapper{{{
2656template <typename _Tp, size_t _Width>
2657 struct _SimdWrapper<
2658 _Tp, _Width,
2659 void_t<__vector_type_t<_Tp, _Width>, __intrinsic_type_t<_Tp, _Width>>>
2660 : _SimdWrapperBase<__has_iec559_behavior<__signaling_NaN, _Tp>::value
2661 && sizeof(_Tp) * _Width
2662 == sizeof(__vector_type_t<_Tp, _Width>),
2663 __vector_type_t<_Tp, _Width>>
2664 {
2665 using _Base
2666 = _SimdWrapperBase<__has_iec559_behavior<__signaling_NaN, _Tp>::value
2667 && sizeof(_Tp) * _Width
2668 == sizeof(__vector_type_t<_Tp, _Width>),
2669 __vector_type_t<_Tp, _Width>>;
2670
2671 static_assert(__is_vectorizable_v<_Tp>);
2672 static_assert(_Width >= 2); // 1 doesn't make sense, use _Tp directly then
2673
2674 using _BuiltinType = __vector_type_t<_Tp, _Width>;
2675 using value_type = _Tp;
2676
2677 static inline constexpr size_t _S_full_size
2678 = sizeof(_BuiltinType) / sizeof(value_type);
2679 static inline constexpr int _S_size = _Width;
2680 static inline constexpr bool _S_is_partial = _S_full_size != _S_size;
2681
2682 using _Base::_M_data;
2683
2684 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper<_Tp, _S_full_size>
2685 __as_full_vector() const
2686 { return _M_data; }
2687
2688 _GLIBCXX_SIMD_INTRINSIC constexpr
2689 _SimdWrapper(initializer_list<_Tp> __init)
2690 : _Base(__generate_from_n_evaluations<_Width, _BuiltinType>(
2691 [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
2692 return __init.begin()[__i.value];
2693 })) {}
2694
2695 _GLIBCXX_SIMD_INTRINSIC constexpr
2696 _SimdWrapper() = default;
2697
2698 _GLIBCXX_SIMD_INTRINSIC constexpr
2699 _SimdWrapper(const _SimdWrapper&) = default;
2700
2701 _GLIBCXX_SIMD_INTRINSIC constexpr
2702 _SimdWrapper(_SimdWrapper&&) = default;
2703
2704 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper&
2705 operator=(const _SimdWrapper&) = default;
2706
2707 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper&
2708 operator=(_SimdWrapper&&) = default;
2709
2710 template <typename _V, typename = enable_if_t<disjunction_v<
2711 is_same<_V, __vector_type_t<_Tp, _Width>>,
2712 is_same<_V, __intrinsic_type_t<_Tp, _Width>>>>>
2713 _GLIBCXX_SIMD_INTRINSIC constexpr
2714 _SimdWrapper(_V __x)
2715 // __vector_bitcast can convert e.g. __m128 to __vector(2) float
2716 : _Base(__vector_bitcast<_Tp, _Width>(__x)) {}
2717
2718 template <typename... _As,
2719 typename = enable_if_t<((is_same_v<simd_abi::scalar, _As> && ...)
2720 && sizeof...(_As) <= _Width)>>
2721 _GLIBCXX_SIMD_INTRINSIC constexpr
2722 operator _SimdTuple<_Tp, _As...>() const
2723 {
2724 return __generate_from_n_evaluations<sizeof...(_As), _SimdTuple<_Tp, _As...>>(
2725 [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
2726 { return _M_data[int(__i)]; });
2727 }
2728
2729 _GLIBCXX_SIMD_INTRINSIC constexpr
2730 operator const _BuiltinType&() const
2731 { return _M_data; }
2732
2733 _GLIBCXX_SIMD_INTRINSIC constexpr
2734 operator _BuiltinType&()
2735 { return _M_data; }
2736
2737 _GLIBCXX_SIMD_INTRINSIC constexpr _Tp
2738 operator[](size_t __i) const
2739 { return _M_data[__i]; }
2740
2741 template <size_t __i>
2742 _GLIBCXX_SIMD_INTRINSIC constexpr _Tp
2743 operator[](_SizeConstant<__i>) const
2744 { return _M_data[__i]; }
2745
2746 _GLIBCXX_SIMD_INTRINSIC constexpr void
2747 _M_set(size_t __i, _Tp __x)
2748 {
2749 if (__builtin_is_constant_evaluated())
2750 _M_data = __generate_from_n_evaluations<_Width, _BuiltinType>([&](auto __j) {
2751 return __j == __i ? __x : _M_data[__j()];
2752 });
2753 else
2754 _M_data[__i] = __x;
2755 }
2756
2757 _GLIBCXX_SIMD_INTRINSIC
2758 constexpr bool
2759 _M_is_constprop() const
2760 { return __builtin_constant_p(_M_data); }
2761
2762 _GLIBCXX_SIMD_INTRINSIC constexpr bool
2763 _M_is_constprop_none_of() const
2764 {
2765 if (__builtin_constant_p(_M_data))
2766 {
2767 bool __r = true;
2768 if constexpr (is_floating_point_v<_Tp>)
2769 {
2770 using _Ip = __int_for_sizeof_t<_Tp>;
2771 const auto __intdata = __vector_bitcast<_Ip>(_M_data);
2772 __execute_n_times<_Width>(
2773 [&](auto __i) { __r &= __intdata[__i.value] == _Ip(); });
2774 }
2775 else
2776 __execute_n_times<_Width>(
2777 [&](auto __i) { __r &= _M_data[__i.value] == _Tp(); });
2778 if (__builtin_constant_p(__r))
2779 return __r;
2780 }
2781 return false;
2782 }
2783
2784 _GLIBCXX_SIMD_INTRINSIC constexpr bool
2785 _M_is_constprop_all_of() const
2786 {
2787 if (__builtin_constant_p(_M_data))
2788 {
2789 bool __r = true;
2790 if constexpr (is_floating_point_v<_Tp>)
2791 {
2792 using _Ip = __int_for_sizeof_t<_Tp>;
2793 const auto __intdata = __vector_bitcast<_Ip>(_M_data);
2794 __execute_n_times<_Width>(
2795 [&](auto __i) { __r &= __intdata[__i.value] == ~_Ip(); });
2796 }
2797 else
2798 __execute_n_times<_Width>(
2799 [&](auto __i) { __r &= _M_data[__i.value] == ~_Tp(); });
2800 if (__builtin_constant_p(__r))
2801 return __r;
2802 }
2803 return false;
2804 }
2805 };
2806
2807// }}}
2808
2809// __vectorized_sizeof {{{
2810template <typename _Tp>
2811 constexpr size_t
2812 __vectorized_sizeof()
2813 {
2814 if constexpr (!__is_vectorizable_v<_Tp>)
2815 return 0;
2816
2817 if constexpr (sizeof(_Tp) <= 8)
2818 {
2819 // X86:
2820 if constexpr (__have_avx512bw)
2821 return 64;
2822 if constexpr (__have_avx512f && sizeof(_Tp) >= 4)
2823 return 64;
2824 if constexpr (__have_avx2)
2825 return 32;
2826 if constexpr (__have_avx && is_floating_point_v<_Tp>)
2827 return 32;
2828 if constexpr (__have_sse2)
2829 return 16;
2830 if constexpr (__have_sse && is_same_v<_Tp, float>)
2831 return 16;
2832 /* The following is too much trouble because of mixed MMX and x87 code.
2833 * While nothing here explicitly calls MMX instructions of registers,
2834 * they are still emitted but no EMMS cleanup is done.
2835 if constexpr (__have_mmx && sizeof(_Tp) <= 4 && is_integral_v<_Tp>)
2836 return 8;
2837 */
2838
2839 // PowerPC:
2840 if constexpr (__have_power8vec
2841 || (__have_power_vmx && (sizeof(_Tp) < 8))
2842 || (__have_power_vsx && is_floating_point_v<_Tp>) )
2843 return 16;
2844
2845 // ARM:
2846 if constexpr (__have_neon_a64)
2847 return 16;
2848 if constexpr (__have_neon_a32 and (not is_floating_point_v<_Tp>
2849 or is_same_v<_Tp, float>))
2850 return 16;
2851 if constexpr (__have_neon
2852 && sizeof(_Tp) < 8
2853 // Only allow fp if the user allows non-ICE559 fp (e.g.
2854 // via -ffast-math). ARMv7 NEON fp is not conforming to
2855 // IEC559.
2856 && (__support_neon_float || !is_floating_point_v<_Tp>))
2857 return 16;
2858 }
2859
2860 return sizeof(_Tp);
2861 }
2862
2863// }}}
2864namespace simd_abi {
2865// most of simd_abi is defined in simd_detail.h
2866template <typename _Tp>
2867 inline constexpr int max_fixed_size
2868 = ((__have_avx512bw && sizeof(_Tp) == 1)
2869 || (__have_sve && __sve_vectorized_size_bytes/sizeof(_Tp) >= 64)) ? 64 : 32;
2870
2871// compatible {{{
2872#if defined __x86_64__ || defined __aarch64__
2873template <typename _Tp>
2874 using compatible = conditional_t<(sizeof(_Tp) <= 8), _VecBuiltin<16>, scalar>;
2875#elif defined __ARM_NEON
2876// FIXME: not sure, probably needs to be scalar (or dependent on the hard-float
2877// ABI?)
2878template <typename _Tp>
2879 using compatible
2880 = conditional_t<(sizeof(_Tp) < 8
2881 && (__support_neon_float || !is_floating_point_v<_Tp>)),
2882 _VecBuiltin<16>, scalar>;
2883#else
2884template <typename>
2885 using compatible = scalar;
2886#endif
2887
2888// }}}
2889// native {{{
2890template <typename _Tp>
2891 constexpr auto
2892 __determine_native_abi()
2893 {
2894 constexpr size_t __bytes = __vectorized_sizeof<_Tp>();
2895 if constexpr (__bytes == sizeof(_Tp))
2896 return static_cast<scalar*>(nullptr);
2897 else if constexpr (__have_sve)
2898 return static_cast<_SveAbi<__sve_vectorized_size_bytes>*>(nullptr);
2899 else if constexpr (__have_avx512vl || (__have_avx512f && __bytes == 64))
2900 return static_cast<_VecBltnBtmsk<__bytes>*>(nullptr);
2901 else
2902 return static_cast<_VecBuiltin<__bytes>*>(nullptr);
2903 }
2904
2905template <typename _Tp, typename = enable_if_t<__is_vectorizable_v<_Tp>>>
2906 using native = remove_pointer_t<decltype(__determine_native_abi<_Tp>())>;
2907
2908// }}}
2909// __default_abi {{{
2910#if defined _GLIBCXX_SIMD_DEFAULT_ABI
2911template <typename _Tp>
2912 using __default_abi = _GLIBCXX_SIMD_DEFAULT_ABI<_Tp>;
2913#else
2914template <typename _Tp>
2915 using __default_abi = compatible<_Tp>;
2916#endif
2917
2918// }}}
2919} // namespace simd_abi
2920
2921// traits {{{1
2922template <typename _Tp>
2923 struct is_simd_flag_type
2924 : false_type
2925 {};
2926
2927template <>
2928 struct is_simd_flag_type<element_aligned_tag>
2929 : true_type
2930 {};
2931
2932template <>
2933 struct is_simd_flag_type<vector_aligned_tag>
2934 : true_type
2935 {};
2936
2937template <size_t _Np>
2938 struct is_simd_flag_type<overaligned_tag<_Np>>
2939 : __bool_constant<(_Np > 0) and __has_single_bit(_Np)>
2940 {};
2941
2942template <typename _Tp>
2943 inline constexpr bool is_simd_flag_type_v = is_simd_flag_type<_Tp>::value;
2944
2945template <typename _Tp, typename = enable_if_t<is_simd_flag_type_v<_Tp>>>
2946 using _IsSimdFlagType = _Tp;
2947
2948// is_abi_tag {{{2
2949template <typename _Tp, typename = void_t<>>
2950 struct is_abi_tag : false_type {};
2951
2952template <typename _Tp>
2953 struct is_abi_tag<_Tp, void_t<typename _Tp::_IsValidAbiTag>>
2954 : public _Tp::_IsValidAbiTag {};
2955
2956template <typename _Tp>
2957 inline constexpr bool is_abi_tag_v = is_abi_tag<_Tp>::value;
2958
2959// is_simd(_mask) {{{2
2960template <typename _Tp>
2961 struct is_simd : public false_type {};
2962
2963template <typename _Tp>
2964 inline constexpr bool is_simd_v = is_simd<_Tp>::value;
2965
2966template <typename _Tp>
2967 struct is_simd_mask : public false_type {};
2968
2969template <typename _Tp>
2970inline constexpr bool is_simd_mask_v = is_simd_mask<_Tp>::value;
2971
2972// simd_size {{{2
2973template <typename _Tp, typename _Abi, typename = void>
2974 struct __simd_size_impl {};
2975
2976template <typename _Tp, typename _Abi>
2977 struct __simd_size_impl<
2978 _Tp, _Abi,
2979 enable_if_t<conjunction_v<__is_vectorizable<_Tp>, is_abi_tag<_Abi>>>>
2980 : _SizeConstant<_Abi::template _S_size<_Tp>> {};
2981
2982template <typename _Tp, typename _Abi = simd_abi::__default_abi<_Tp>>
2983 struct simd_size : __simd_size_impl<_Tp, _Abi> {};
2984
2985template <typename _Tp, typename _Abi = simd_abi::__default_abi<_Tp>>
2986 inline constexpr size_t simd_size_v = simd_size<_Tp, _Abi>::value;
2987
2988// simd_abi::deduce {{{2
2989template <typename _Tp, size_t _Np, typename = void>
2990 struct __deduce_impl;
2991
2992template <typename _Tp, size_t _Np, typename = void>
2993 struct __no_sve_deduce_impl;
2994
2995namespace simd_abi {
2996/**
2997 * @tparam _Tp The requested `value_type` for the elements.
2998 * @tparam _Np The requested number of elements.
2999 * @tparam _Abis This parameter is ignored, since this implementation cannot
3000 * make any use of it. Either __a good native ABI is matched and used as `type`
3001 * alias, or the `fixed_size<_Np>` ABI is used, which internally is built from
3002 * the best matching native ABIs.
3003 */
3004template <typename _Tp, size_t _Np, typename...>
3005 struct deduce : __deduce_impl<_Tp, _Np> {};
3006
3007template <typename _Tp, size_t _Np, typename... _Abis>
3008 using deduce_t = typename deduce<_Tp, _Np, _Abis...>::type;
3009
3010template <typename _Tp, size_t _Np, typename...>
3011 struct __no_sve_deduce : __no_sve_deduce_impl<_Tp, _Np> {};
3012
3013template <typename _Tp, size_t _Np, typename... _Abis>
3014 using __no_sve_deduce_t = typename __no_sve_deduce<_Tp, _Np, _Abis...>::type;
3015} // namespace simd_abi
3016
3017// }}}2
3018// rebind_simd {{{2
3019template <typename _Tp, typename _V, typename = void>
3020 struct rebind_simd;
3021
3022template <typename _Tp, typename _Up, typename _Abi>
3023 struct rebind_simd<_Tp, simd<_Up, _Abi>,
3024 void_t<std::conditional_t<!__is_sve_abi<_Abi>(),
3025 simd_abi::__no_sve_deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>,
3026 simd_abi::deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>>>>
3027 {
3028 using type = simd<_Tp, std::conditional_t<
3029 !__is_sve_abi<_Abi>(),
3030 simd_abi::__no_sve_deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>,
3031 simd_abi::deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>>>;
3032 };
3033
3034template <typename _Tp, typename _Up, typename _Abi>
3035 struct rebind_simd<_Tp, simd_mask<_Up, _Abi>,
3036 void_t<std::conditional_t<!__is_sve_abi<_Abi>(),
3037 simd_abi::__no_sve_deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>,
3038 simd_abi::deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>>>>
3039 {
3040 using type = simd_mask<_Tp, std::conditional_t<
3041 !__is_sve_abi<_Abi>(),
3042 simd_abi::__no_sve_deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>,
3043 simd_abi::deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>>>;
3044 };
3045
3046template <typename _Tp, typename _V>
3047 using rebind_simd_t = typename rebind_simd<_Tp, _V>::type;
3048
3049// resize_simd {{{2
3050template <int _Np, typename _V, typename = void>
3051 struct resize_simd;
3052
3053template <int _Np, typename _Tp, typename _Abi>
3054 struct resize_simd<_Np, simd<_Tp, _Abi>, void_t<simd_abi::deduce_t<_Tp, _Np, _Abi>>>
3055 { using type = simd<_Tp, simd_abi::deduce_t<_Tp, _Np, _Abi>>; };
3056
3057template <int _Np, typename _Tp, typename _Abi>
3058 struct resize_simd<_Np, simd_mask<_Tp, _Abi>, void_t<simd_abi::deduce_t<_Tp, _Np, _Abi>>>
3059 { using type = simd_mask<_Tp, simd_abi::deduce_t<_Tp, _Np, _Abi>>; };
3060
3061template <int _Np, typename _V>
3062 using resize_simd_t = typename resize_simd<_Np, _V>::type;
3063
3064// }}}2
3065// memory_alignment {{{2
3066template <typename _Tp, typename _Up = typename _Tp::value_type>
3067 struct memory_alignment
3068 : public _SizeConstant<vector_aligned_tag::_S_alignment<_Tp, _Up>> {};
3069
3070template <typename _Tp, typename _Up = typename _Tp::value_type>
3071 inline constexpr size_t memory_alignment_v = memory_alignment<_Tp, _Up>::value;
3072
3073// class template simd [simd] {{{1
3074template <typename _Tp, typename _Abi = simd_abi::__default_abi<_Tp>>
3075 class simd;
3076
3077template <typename _Tp, typename _Abi>
3078 struct is_simd<simd<_Tp, _Abi>> : public true_type {};
3079
3080template <typename _Tp>
3081 using native_simd = simd<_Tp, simd_abi::native<_Tp>>;
3082
3083template <typename _Tp, int _Np>
3084 using fixed_size_simd = simd<_Tp, simd_abi::fixed_size<_Np>>;
3085
3086template <typename _Tp, size_t _Np>
3087 using __deduced_simd = simd<_Tp, simd_abi::deduce_t<_Tp, _Np>>;
3088
3089// class template simd_mask [simd_mask] {{{1
3090template <typename _Tp, typename _Abi = simd_abi::__default_abi<_Tp>>
3091 class simd_mask;
3092
3093template <typename _Tp, typename _Abi>
3094 struct is_simd_mask<simd_mask<_Tp, _Abi>> : public true_type {};
3095
3096template <typename _Tp>
3097 using native_simd_mask = simd_mask<_Tp, simd_abi::native<_Tp>>;
3098
3099template <typename _Tp, int _Np>
3100 using fixed_size_simd_mask = simd_mask<_Tp, simd_abi::fixed_size<_Np>>;
3101
3102template <typename _Tp, size_t _Np>
3103 using __deduced_simd_mask = simd_mask<_Tp, simd_abi::deduce_t<_Tp, _Np>>;
3104
3105// casts [simd.casts] {{{1
3106// static_simd_cast {{{2
3107template <typename _Tp, typename _Up, typename _Ap, bool = is_simd_v<_Tp>, typename = void>
3108 struct __static_simd_cast_return_type;
3109
3110template <typename _Tp, typename _A0, typename _Up, typename _Ap>
3111 struct __static_simd_cast_return_type<simd_mask<_Tp, _A0>, _Up, _Ap, false, void>
3112 : __static_simd_cast_return_type<simd<_Tp, _A0>, _Up, _Ap> {};
3113
3114template <typename _Tp, typename _Up, typename _Ap>
3115 struct __static_simd_cast_return_type<
3116 _Tp, _Up, _Ap, true, enable_if_t<_Tp::size() == simd_size_v<_Up, _Ap>>>
3117 { using type = _Tp; };
3118
3119template <typename _Tp, typename _Ap>
3120 struct __static_simd_cast_return_type<_Tp, _Tp, _Ap, false,
3121#ifdef _GLIBCXX_SIMD_FIX_P2TS_ISSUE66
3122 enable_if_t<__is_vectorizable_v<_Tp>>
3123#else
3124 void
3125#endif
3126 >
3127 { using type = simd<_Tp, _Ap>; };
3128
3129template <typename _Tp, typename = void>
3130 struct __safe_make_signed { using type = _Tp;};
3131
3132template <typename _Tp>
3133 struct __safe_make_signed<_Tp, enable_if_t<is_integral_v<_Tp>>>
3134 {
3135 // the extra make_unsigned_t is because of PR85951
3136 using type = make_signed_t<make_unsigned_t<_Tp>>;
3137 };
3138
3139template <typename _Tp>
3140 using safe_make_signed_t = typename __safe_make_signed<_Tp>::type;
3141
3142template <typename _Tp, typename _Up, typename _Ap>
3143 struct __static_simd_cast_return_type<_Tp, _Up, _Ap, false,
3144#ifdef _GLIBCXX_SIMD_FIX_P2TS_ISSUE66
3145 enable_if_t<__is_vectorizable_v<_Tp>>
3146#else
3147 void
3148#endif
3149 >
3150 {
3151 using type = conditional_t<
3152 (is_integral_v<_Up> && is_integral_v<_Tp> &&
3153#ifndef _GLIBCXX_SIMD_FIX_P2TS_ISSUE65
3154 is_signed_v<_Up> != is_signed_v<_Tp> &&
3155#endif
3156 is_same_v<safe_make_signed_t<_Up>, safe_make_signed_t<_Tp>>),
3157 simd<_Tp, _Ap>, fixed_size_simd<_Tp, simd_size_v<_Up, _Ap>>>;
3158 };
3159
3160template <typename _Tp, typename _Up, typename _Ap,
3161 typename _R
3162 = typename __static_simd_cast_return_type<_Tp, _Up, _Ap>::type>
3163 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _R
3164 static_simd_cast(const simd<_Up, _Ap>& __x)
3165 {
3166 if constexpr (is_same<_R, simd<_Up, _Ap>>::value)
3167 return __x;
3168 else
3169 {
3170 _SimdConverter<_Up, _Ap, typename _R::value_type, typename _R::abi_type>
3171 __c;
3172 return _R(__private_init, __c(__data(__x)));
3173 }
3174 }
3175
3176namespace __proposed {
3177template <typename _Tp, typename _Up, typename _Ap,
3178 typename _R
3179 = typename __static_simd_cast_return_type<_Tp, _Up, _Ap>::type>
3180 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR typename _R::mask_type
3181 static_simd_cast(const simd_mask<_Up, _Ap>& __x)
3182 {
3183 using _RM = typename _R::mask_type;
3184 return {__private_init, _RM::abi_type::_MaskImpl::template _S_convert<
3185 typename _RM::simd_type::value_type>(__x)};
3186 }
3187
3188template <typename _To, typename _Up, typename _Abi>
3189 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3190 _To
3191 simd_bit_cast(const simd<_Up, _Abi>& __x)
3192 {
3193 using _Tp = typename _To::value_type;
3194 using _ToMember = typename _SimdTraits<_Tp, typename _To::abi_type>::_SimdMember;
3195 using _From = simd<_Up, _Abi>;
3196 using _FromMember = typename _SimdTraits<_Up, _Abi>::_SimdMember;
3197 // with concepts, the following should be constraints
3198 static_assert(sizeof(_To) == sizeof(_From));
3199 static_assert(is_trivially_copyable_v<_Tp> && is_trivially_copyable_v<_Up>);
3200 static_assert(is_trivially_copyable_v<_ToMember> && is_trivially_copyable_v<_FromMember>);
3201#if __has_builtin(__builtin_bit_cast)
3202 return {__private_init, __builtin_bit_cast(_ToMember, __data(__x))};
3203#else
3204 return {__private_init, __bit_cast<_ToMember>(__data(__x))};
3205#endif
3206 }
3207
3208template <typename _To, typename _Up, typename _Abi>
3209 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3210 _To
3211 simd_bit_cast(const simd_mask<_Up, _Abi>& __x)
3212 {
3213 using _From = simd_mask<_Up, _Abi>;
3214 static_assert(sizeof(_To) == sizeof(_From));
3215 static_assert(is_trivially_copyable_v<_From>);
3216 // _To can be simd<T, A>, specifically simd<T, fixed_size<N>> in which case _To is not trivially
3217 // copyable.
3218 if constexpr (is_simd_v<_To>)
3219 {
3220 using _Tp = typename _To::value_type;
3221 using _ToMember = typename _SimdTraits<_Tp, typename _To::abi_type>::_SimdMember;
3222 static_assert(is_trivially_copyable_v<_ToMember>);
3223#if __has_builtin(__builtin_bit_cast)
3224 return {__private_init, __builtin_bit_cast(_ToMember, __x)};
3225#else
3226 return {__private_init, __bit_cast<_ToMember>(__x)};
3227#endif
3228 }
3229 else
3230 {
3231 static_assert(is_trivially_copyable_v<_To>);
3232#if __has_builtin(__builtin_bit_cast)
3233 return __builtin_bit_cast(_To, __x);
3234#else
3235 return __bit_cast<_To>(__x);
3236#endif
3237 }
3238 }
3239} // namespace __proposed
3240
3241// simd_cast {{{2
3242template <typename _Tp, typename _Up, typename _Ap,
3243 typename _To = __value_type_or_identity_t<_Tp>>
3244 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR auto
3245 simd_cast(const simd<_ValuePreserving<_Up, _To>, _Ap>& __x)
3246 -> decltype(static_simd_cast<_Tp>(__x))
3247 { return static_simd_cast<_Tp>(__x); }
3248
3249namespace __proposed {
3250template <typename _Tp, typename _Up, typename _Ap,
3251 typename _To = __value_type_or_identity_t<_Tp>>
3252 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR auto
3253 simd_cast(const simd_mask<_ValuePreserving<_Up, _To>, _Ap>& __x)
3254 -> decltype(static_simd_cast<_Tp>(__x))
3255 { return static_simd_cast<_Tp>(__x); }
3256} // namespace __proposed
3257
3258// }}}2
3259// resizing_simd_cast {{{
3260namespace __proposed {
3261/* Proposed spec:
3262
3263template <class T, class U, class Abi>
3264T resizing_simd_cast(const simd<U, Abi>& x)
3265
3266p1 Constraints:
3267 - is_simd_v<T> is true and
3268 - T::value_type is the same type as U
3269
3270p2 Returns:
3271 A simd object with the i^th element initialized to x[i] for all i in the
3272 range of [0, min(T::size(), simd_size_v<U, Abi>)). If T::size() is larger
3273 than simd_size_v<U, Abi>, the remaining elements are value-initialized.
3274
3275template <class T, class U, class Abi>
3276T resizing_simd_cast(const simd_mask<U, Abi>& x)
3277
3278p1 Constraints: is_simd_mask_v<T> is true
3279
3280p2 Returns:
3281 A simd_mask object with the i^th element initialized to x[i] for all i in
3282the range of [0, min(T::size(), simd_size_v<U, Abi>)). If T::size() is larger
3283 than simd_size_v<U, Abi>, the remaining elements are initialized to false.
3284
3285 */
3286
3287template <typename _Tp, typename _Up, typename _Ap>
3288 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR enable_if_t<
3289 conjunction_v<is_simd<_Tp>, is_same<typename _Tp::value_type, _Up>>, _Tp>
3290 resizing_simd_cast(const simd<_Up, _Ap>& __x)
3291 {
3292 if constexpr (is_same_v<typename _Tp::abi_type, _Ap>)
3293 return __x;
3294 else if (__builtin_is_constant_evaluated())
3295 return _Tp([&](auto __i) constexpr {
3296 return __i < simd_size_v<_Up, _Ap> ? __x[__i] : _Up();
3297 });
3298 else if constexpr (simd_size_v<_Up, _Ap> == 1)
3299 {
3300 _Tp __r{};
3301 __r[0] = __x[0];
3302 return __r;
3303 }
3304 else if constexpr (_Tp::size() == 1)
3305 return __x[0];
3306 else if constexpr (sizeof(_Tp) == sizeof(__x)
3307 && !__is_fixed_size_abi_v<_Ap> && !__is_sve_abi<_Ap>())
3308 return {__private_init,
3309 __vector_bitcast<typename _Tp::value_type, _Tp::size()>(
3310 _Ap::_S_masked(__data(__x))._M_data)};
3311 else
3312 {
3313 _Tp __r{};
3314 __builtin_memcpy(&__data(__r), &__data(__x),
3315 sizeof(_Up)
3316 * std::min(_Tp::size(), simd_size_v<_Up, _Ap>));
3317 return __r;
3318 }
3319 }
3320
3321template <typename _Tp, typename _Up, typename _Ap>
3322 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3323 enable_if_t<is_simd_mask_v<_Tp>, _Tp>
3324 resizing_simd_cast(const simd_mask<_Up, _Ap>& __x)
3325 {
3326 return {__private_init, _Tp::abi_type::_MaskImpl::template _S_convert<
3327 typename _Tp::simd_type::value_type>(__x)};
3328 }
3329} // namespace __proposed
3330
3331// }}}
3332// to_fixed_size {{{2
3333template <typename _Tp, int _Np>
3334 _GLIBCXX_SIMD_INTRINSIC fixed_size_simd<_Tp, _Np>
3335 to_fixed_size(const fixed_size_simd<_Tp, _Np>& __x)
3336 { return __x; }
3337
3338template <typename _Tp, int _Np>
3339 _GLIBCXX_SIMD_INTRINSIC fixed_size_simd_mask<_Tp, _Np>
3340 to_fixed_size(const fixed_size_simd_mask<_Tp, _Np>& __x)
3341 { return __x; }
3342
3343template <typename _Tp, typename _Ap>
3344 _GLIBCXX_SIMD_INTRINSIC fixed_size_simd<_Tp, simd_size_v<_Tp, _Ap>>
3345 to_fixed_size(const simd<_Tp, _Ap>& __x)
3346 {
3347 using _Rp = fixed_size_simd<_Tp, simd_size_v<_Tp, _Ap>>;
3348 return _Rp([&__x](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { return __x[__i]; });
3349 }
3350
3351template <typename _Tp, typename _Ap>
3352 _GLIBCXX_SIMD_INTRINSIC fixed_size_simd_mask<_Tp, simd_size_v<_Tp, _Ap>>
3353 to_fixed_size(const simd_mask<_Tp, _Ap>& __x)
3354 {
3355 return {__private_init,
3356 [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { return __x[__i]; }};
3357 }
3358
3359// to_native {{{2
3360template <typename _Tp, int _Np>
3361 _GLIBCXX_SIMD_INTRINSIC
3362 enable_if_t<(_Np == native_simd<_Tp>::size()), native_simd<_Tp>>
3363 to_native(const fixed_size_simd<_Tp, _Np>& __x)
3364 {
3365 alignas(memory_alignment_v<native_simd<_Tp>>) _Tp __mem[_Np];
3366 __x.copy_to(__mem, vector_aligned);
3367 return {__mem, vector_aligned};
3368 }
3369
3370template <typename _Tp, int _Np>
3371 _GLIBCXX_SIMD_INTRINSIC
3372 enable_if_t<(_Np == native_simd_mask<_Tp>::size()), native_simd_mask<_Tp>>
3373 to_native(const fixed_size_simd_mask<_Tp, _Np>& __x)
3374 {
3375 return native_simd_mask<_Tp>(
3376 __private_init,
3377 [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { return __x[__i]; });
3378 }
3379
3380// to_compatible {{{2
3381template <typename _Tp, int _Np>
3382 _GLIBCXX_SIMD_INTRINSIC enable_if_t<(_Np == simd<_Tp>::size()), simd<_Tp>>
3383 to_compatible(const simd<_Tp, simd_abi::fixed_size<_Np>>& __x)
3384 {
3385 alignas(memory_alignment_v<simd<_Tp>>) _Tp __mem[_Np];
3386 __x.copy_to(__mem, vector_aligned);
3387 return {__mem, vector_aligned};
3388 }
3389
3390template <typename _Tp, int _Np>
3391 _GLIBCXX_SIMD_INTRINSIC
3392 enable_if_t<(_Np == simd_mask<_Tp>::size()), simd_mask<_Tp>>
3393 to_compatible(const simd_mask<_Tp, simd_abi::fixed_size<_Np>>& __x)
3394 {
3395 return simd_mask<_Tp>(
3396 __private_init,
3397 [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { return __x[__i]; });
3398 }
3399
3400// masked assignment [simd_mask.where] {{{1
3401
3402// where_expression {{{1
3403// const_where_expression<M, T> {{{2
3404template <typename _M, typename _Tp>
3405 class const_where_expression
3406 {
3407 using _V = _Tp;
3408 static_assert(is_same_v<_V, __remove_cvref_t<_Tp>>);
3409
3410 struct _Wrapper { using value_type = _V; };
3411
3412 protected:
3413 using _Impl = typename _V::_Impl;
3414
3415 using value_type =
3416 typename conditional_t<is_arithmetic_v<_V>, _Wrapper, _V>::value_type;
3417
3418 _GLIBCXX_SIMD_INTRINSIC friend const _M&
3419 __get_mask(const const_where_expression& __x)
3420 { return __x._M_k; }
3421
3422 _GLIBCXX_SIMD_INTRINSIC friend const _Tp&
3423 __get_lvalue(const const_where_expression& __x)
3424 { return __x._M_value; }
3425
3426 const _M& _M_k;
3427 _Tp& _M_value;
3428
3429 public:
3430 const_where_expression(const const_where_expression&) = delete;
3431
3432 const_where_expression& operator=(const const_where_expression&) = delete;
3433
3434 _GLIBCXX_SIMD_INTRINSIC constexpr
3435 const_where_expression(const _M& __kk, const _Tp& dd)
3436 : _M_k(__kk), _M_value(const_cast<_Tp&>(dd)) {}
3437
3438 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _V
3439 operator-() const&&
3440 {
3441 return {__private_init,
3442 _Impl::template _S_masked_unary<negate>(__data(_M_k),
3443 __data(_M_value))};
3444 }
3445
3446 template <typename _Up, typename _Flags>
3447 [[nodiscard]] _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _V
3448 copy_from(const _LoadStorePtr<_Up, value_type>* __mem, _IsSimdFlagType<_Flags>) const&&
3449 {
3450 return {__private_init,
3451 _Impl::_S_masked_load(__data(_M_value), __data(_M_k),
3452 _Flags::template _S_apply<_V>(__mem))};
3453 }
3454
3455 template <typename _Up, typename _Flags>
3456 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
3457 copy_to(_LoadStorePtr<_Up, value_type>* __mem, _IsSimdFlagType<_Flags>) const&&
3458 {
3459 _Impl::_S_masked_store(__data(_M_value),
3460 _Flags::template _S_apply<_V>(__mem),
3461 __data(_M_k));
3462 }
3463 };
3464
3465// const_where_expression<bool, T> {{{2
3466template <typename _Tp>
3467 class const_where_expression<bool, _Tp>
3468 {
3469 using _M = bool;
3470 using _V = _Tp;
3471
3472 static_assert(is_same_v<_V, __remove_cvref_t<_Tp>>);
3473
3474 struct _Wrapper { using value_type = _V; };
3475
3476 protected:
3477 using value_type
3478 = typename conditional_t<is_arithmetic_v<_V>, _Wrapper, _V>::value_type;
3479
3480 _GLIBCXX_SIMD_INTRINSIC friend const _M&
3481 __get_mask(const const_where_expression& __x)
3482 { return __x._M_k; }
3483
3484 _GLIBCXX_SIMD_INTRINSIC friend const _Tp&
3485 __get_lvalue(const const_where_expression& __x)
3486 { return __x._M_value; }
3487
3488 const bool _M_k;
3489 _Tp& _M_value;
3490
3491 public:
3492 const_where_expression(const const_where_expression&) = delete;
3493 const_where_expression& operator=(const const_where_expression&) = delete;
3494
3495 _GLIBCXX_SIMD_INTRINSIC constexpr
3496 const_where_expression(const bool __kk, const _Tp& dd)
3497 : _M_k(__kk), _M_value(const_cast<_Tp&>(dd)) {}
3498
3499 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _V
3500 operator-() const&&
3501 { return _M_k ? -_M_value : _M_value; }
3502
3503 template <typename _Up, typename _Flags>
3504 [[nodiscard]] _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _V
3505 copy_from(const _LoadStorePtr<_Up, value_type>* __mem, _IsSimdFlagType<_Flags>) const&&
3506 { return _M_k ? static_cast<_V>(__mem[0]) : _M_value; }
3507
3508 template <typename _Up, typename _Flags>
3509 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
3510 copy_to(_LoadStorePtr<_Up, value_type>* __mem, _IsSimdFlagType<_Flags>) const&&
3511 {
3512 if (_M_k)
3513 __mem[0] = _M_value;
3514 }
3515 };
3516
3517// where_expression<M, T> {{{2
3518template <typename _M, typename _Tp>
3519 class where_expression : public const_where_expression<_M, _Tp>
3520 {
3521 using _Impl = typename const_where_expression<_M, _Tp>::_Impl;
3522
3523 static_assert(!is_const<_Tp>::value,
3524 "where_expression may only be instantiated with __a non-const "
3525 "_Tp parameter");
3526
3527 using typename const_where_expression<_M, _Tp>::value_type;
3528 using const_where_expression<_M, _Tp>::_M_k;
3529 using const_where_expression<_M, _Tp>::_M_value;
3530
3531 static_assert(
3532 is_same<typename _M::abi_type, typename _Tp::abi_type>::value, "");
3533 static_assert(_M::size() == _Tp::size(), "");
3534
3535 _GLIBCXX_SIMD_INTRINSIC friend constexpr _Tp&
3536 __get_lvalue(where_expression& __x)
3537 { return __x._M_value; }
3538
3539 public:
3540 where_expression(const where_expression&) = delete;
3541 where_expression& operator=(const where_expression&) = delete;
3542
3543 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3544 where_expression(const _M& __kk, _Tp& dd)
3545 : const_where_expression<_M, _Tp>(__kk, dd) {}
3546
3547 template <typename _Up>
3548 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
3549 operator=(_Up&& __x) &&
3550 {
3551 _Impl::_S_masked_assign(__data(_M_k), __data(_M_value),
3552 __to_value_type_or_member_type<_Tp>(
3553 static_cast<_Up&&>(__x)));
3554 }
3555
3556#define _GLIBCXX_SIMD_OP_(__op, __name) \
3557 template <typename _Up> \
3558 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void \
3559 operator __op##=(_Up&& __x)&& \
3560 { \
3561 _Impl::template _S_masked_cassign( \
3562 __data(_M_k), __data(_M_value), \
3563 __to_value_type_or_member_type<_Tp>(static_cast<_Up&&>(__x)), \
3564 [](auto __impl, auto __lhs, auto __rhs) \
3565 constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA \
3566 { return __impl.__name(__lhs, __rhs); }); \
3567 } \
3568 static_assert(true)
3569 _GLIBCXX_SIMD_OP_(+, _S_plus);
3570 _GLIBCXX_SIMD_OP_(-, _S_minus);
3571 _GLIBCXX_SIMD_OP_(*, _S_multiplies);
3572 _GLIBCXX_SIMD_OP_(/, _S_divides);
3573 _GLIBCXX_SIMD_OP_(%, _S_modulus);
3574 _GLIBCXX_SIMD_OP_(&, _S_bit_and);
3575 _GLIBCXX_SIMD_OP_(|, _S_bit_or);
3576 _GLIBCXX_SIMD_OP_(^, _S_bit_xor);
3577 _GLIBCXX_SIMD_OP_(<<, _S_shift_left);
3578 _GLIBCXX_SIMD_OP_(>>, _S_shift_right);
3579#undef _GLIBCXX_SIMD_OP_
3580
3581 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
3582 operator++() &&
3583 {
3584 __data(_M_value)
3585 = _Impl::template _S_masked_unary<__increment>(__data(_M_k), __data(_M_value));
3586 }
3587
3588 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
3589 operator++(int) &&
3590 {
3591 __data(_M_value)
3592 = _Impl::template _S_masked_unary<__increment>(__data(_M_k), __data(_M_value));
3593 }
3594
3595 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
3596 operator--() &&
3597 {
3598 __data(_M_value)
3599 = _Impl::template _S_masked_unary<__decrement>(__data(_M_k), __data(_M_value));
3600 }
3601
3602 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
3603 operator--(int) &&
3604 {
3605 __data(_M_value)
3606 = _Impl::template _S_masked_unary<__decrement>(__data(_M_k), __data(_M_value));
3607 }
3608
3609 // intentionally hides const_where_expression::copy_from
3610 template <typename _Up, typename _Flags>
3611 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
3612 copy_from(const _LoadStorePtr<_Up, value_type>* __mem, _IsSimdFlagType<_Flags>) &&
3613 {
3614 __data(_M_value) = _Impl::_S_masked_load(__data(_M_value), __data(_M_k),
3615 _Flags::template _S_apply<_Tp>(__mem));
3616 }
3617 };
3618
3619// where_expression<bool, T> {{{2
3620template <typename _Tp>
3621 class where_expression<bool, _Tp>
3622 : public const_where_expression<bool, _Tp>
3623 {
3624 using _M = bool;
3625 using typename const_where_expression<_M, _Tp>::value_type;
3626 using const_where_expression<_M, _Tp>::_M_k;
3627 using const_where_expression<_M, _Tp>::_M_value;
3628
3629 public:
3630 where_expression(const where_expression&) = delete;
3631 where_expression& operator=(const where_expression&) = delete;
3632
3633 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3634 where_expression(const _M& __kk, _Tp& dd)
3635 : const_where_expression<_M, _Tp>(__kk, dd) {}
3636
3637#define _GLIBCXX_SIMD_OP_(__op) \
3638 template <typename _Up> \
3639 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void \
3640 operator __op(_Up&& __x)&& \
3641 { if (_M_k) _M_value __op static_cast<_Up&&>(__x); }
3642
3643 _GLIBCXX_SIMD_OP_(=)
3644 _GLIBCXX_SIMD_OP_(+=)
3645 _GLIBCXX_SIMD_OP_(-=)
3646 _GLIBCXX_SIMD_OP_(*=)
3647 _GLIBCXX_SIMD_OP_(/=)
3648 _GLIBCXX_SIMD_OP_(%=)
3649 _GLIBCXX_SIMD_OP_(&=)
3650 _GLIBCXX_SIMD_OP_(|=)
3651 _GLIBCXX_SIMD_OP_(^=)
3652 _GLIBCXX_SIMD_OP_(<<=)
3653 _GLIBCXX_SIMD_OP_(>>=)
3654 #undef _GLIBCXX_SIMD_OP_
3655
3656 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
3657 operator++() &&
3658 { if (_M_k) ++_M_value; }
3659
3660 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
3661 operator++(int) &&
3662 { if (_M_k) ++_M_value; }
3663
3664 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
3665 operator--() &&
3666 { if (_M_k) --_M_value; }
3667
3668 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
3669 operator--(int) &&
3670 { if (_M_k) --_M_value; }
3671
3672 // intentionally hides const_where_expression::copy_from
3673 template <typename _Up, typename _Flags>
3674 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR void
3675 copy_from(const _LoadStorePtr<_Up, value_type>* __mem, _IsSimdFlagType<_Flags>) &&
3676 { if (_M_k) _M_value = __mem[0]; }
3677 };
3678
3679// where {{{1
3680template <typename _Tp, typename _Ap>
3681 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3682 where_expression<simd_mask<_Tp, _Ap>, simd<_Tp, _Ap>>
3683 where(const typename simd<_Tp, _Ap>::mask_type& __k, simd<_Tp, _Ap>& __value)
3684 { return {__k, __value}; }
3685
3686template <typename _Tp, typename _Ap>
3687 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3688 const_where_expression<simd_mask<_Tp, _Ap>, simd<_Tp, _Ap>>
3689 where(const typename simd<_Tp, _Ap>::mask_type& __k, const simd<_Tp, _Ap>& __value)
3690 { return {__k, __value}; }
3691
3692template <typename _Tp, typename _Ap>
3693 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3694 where_expression<simd_mask<_Tp, _Ap>, simd_mask<_Tp, _Ap>>
3695 where(const remove_const_t<simd_mask<_Tp, _Ap>>& __k, simd_mask<_Tp, _Ap>& __value)
3696 { return {__k, __value}; }
3697
3698template <typename _Tp, typename _Ap>
3699 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3700 const_where_expression<simd_mask<_Tp, _Ap>, simd_mask<_Tp, _Ap>>
3701 where(const remove_const_t<simd_mask<_Tp, _Ap>>& __k, const simd_mask<_Tp, _Ap>& __value)
3702 { return {__k, __value}; }
3703
3704template <typename _Tp>
3705 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR where_expression<bool, _Tp>
3706 where(_ExactBool __k, _Tp& __value)
3707 { return {__k, __value}; }
3708
3709template <typename _Tp>
3710 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR const_where_expression<bool, _Tp>
3711 where(_ExactBool __k, const _Tp& __value)
3712 { return {__k, __value}; }
3713
3714template <typename _Tp, typename _Ap>
3715 _GLIBCXX_SIMD_CONSTEXPR void
3716 where(bool __k, simd<_Tp, _Ap>& __value) = delete;
3717
3718template <typename _Tp, typename _Ap>
3719 _GLIBCXX_SIMD_CONSTEXPR void
3720 where(bool __k, const simd<_Tp, _Ap>& __value) = delete;
3721
3722// proposed mask iterations {{{1
3723namespace __proposed {
3724template <size_t _Np>
3725 class where_range
3726 {
3727 const bitset<_Np> __bits;
3728
3729 public:
3730 where_range(bitset<_Np> __b) : __bits(__b) {}
3731
3732 class iterator
3733 {
3734 size_t __mask;
3735 size_t __bit;
3736
3737 _GLIBCXX_SIMD_INTRINSIC void
3738 __next_bit()
3739 { __bit = __builtin_ctzl(__mask); }
3740
3741 _GLIBCXX_SIMD_INTRINSIC void
3742 __reset_lsb()
3743 {
3744 // 01100100 - 1 = 01100011
3745 __mask &= (__mask - 1);
3746 // __asm__("btr %1,%0" : "+r"(__mask) : "r"(__bit));
3747 }
3748
3749 public:
3750 iterator(decltype(__mask) __m) : __mask(__m) { __next_bit(); }
3751 iterator(const iterator&) = default;
3752 iterator(iterator&&) = default;
3753
3754 _GLIBCXX_SIMD_ALWAYS_INLINE size_t
3755 operator->() const
3756 { return __bit; }
3757
3758 _GLIBCXX_SIMD_ALWAYS_INLINE size_t
3759 operator*() const
3760 { return __bit; }
3761
3762 _GLIBCXX_SIMD_ALWAYS_INLINE iterator&
3763 operator++()
3764 {
3765 __reset_lsb();
3766 __next_bit();
3767 return *this;
3768 }
3769
3770 _GLIBCXX_SIMD_ALWAYS_INLINE iterator
3771 operator++(int)
3772 {
3773 iterator __tmp = *this;
3774 __reset_lsb();
3775 __next_bit();
3776 return __tmp;
3777 }
3778
3779 _GLIBCXX_SIMD_ALWAYS_INLINE bool
3780 operator==(const iterator& __rhs) const
3781 { return __mask == __rhs.__mask; }
3782
3783 _GLIBCXX_SIMD_ALWAYS_INLINE bool
3784 operator!=(const iterator& __rhs) const
3785 { return __mask != __rhs.__mask; }
3786 };
3787
3788 iterator
3789 begin() const
3790 { return __bits.to_ullong(); }
3791
3792 iterator
3793 end() const
3794 { return 0; }
3795 };
3796
3797template <typename _Tp, typename _Ap>
3798 where_range<simd_size_v<_Tp, _Ap>>
3799 where(const simd_mask<_Tp, _Ap>& __k)
3800 { return __k.__to_bitset(); }
3801
3802} // namespace __proposed
3803
3804// }}}1
3805// reductions [simd.reductions] {{{1
3806template <typename _Tp, typename _Abi, typename _BinaryOperation = plus<>>
3807 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _Tp
3808 reduce(const simd<_Tp, _Abi>& __v, _BinaryOperation __binary_op = _BinaryOperation())
3809 { return _Abi::_SimdImpl::_S_reduce(__v, __binary_op); }
3810
3811template <typename _M, typename _V, typename _BinaryOperation = plus<>>
3812 _GLIBCXX_SIMD_INTRINSIC typename _V::value_type
3813 reduce(const const_where_expression<_M, _V>& __x,
3814 typename _V::value_type __identity_element, _BinaryOperation __binary_op)
3815 {
3816 if (__builtin_expect(none_of(__get_mask(__x)), false))
3817 return __identity_element;
3818
3819 _V __tmp = __identity_element;
3820 _V::_Impl::_S_masked_assign(__data(__get_mask(__x)), __data(__tmp),
3821 __data(__get_lvalue(__x)));
3822 return reduce(__tmp, __binary_op);
3823 }
3824
3825template <typename _M, typename _V>
3826 _GLIBCXX_SIMD_INTRINSIC typename _V::value_type
3827 reduce(const const_where_expression<_M, _V>& __x, plus<> __binary_op = {})
3828 { return reduce(__x, 0, __binary_op); }
3829
3830template <typename _M, typename _V>
3831 _GLIBCXX_SIMD_INTRINSIC typename _V::value_type
3832 reduce(const const_where_expression<_M, _V>& __x, multiplies<> __binary_op)
3833 { return reduce(__x, 1, __binary_op); }
3834
3835template <typename _M, typename _V>
3836 _GLIBCXX_SIMD_INTRINSIC typename _V::value_type
3837 reduce(const const_where_expression<_M, _V>& __x, bit_and<> __binary_op)
3838 { return reduce(__x, ~typename _V::value_type(), __binary_op); }
3839
3840template <typename _M, typename _V>
3841 _GLIBCXX_SIMD_INTRINSIC typename _V::value_type
3842 reduce(const const_where_expression<_M, _V>& __x, bit_or<> __binary_op)
3843 { return reduce(__x, 0, __binary_op); }
3844
3845template <typename _M, typename _V>
3846 _GLIBCXX_SIMD_INTRINSIC typename _V::value_type
3847 reduce(const const_where_expression<_M, _V>& __x, bit_xor<> __binary_op)
3848 { return reduce(__x, 0, __binary_op); }
3849
3850template <typename _Tp, typename _Abi>
3851 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _Tp
3852 hmin(const simd<_Tp, _Abi>& __v) noexcept
3853 { return _Abi::_SimdImpl::_S_reduce(__v, __detail::_Minimum()); }
3854
3855template <typename _Tp, typename _Abi>
3856 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _Tp
3857 hmax(const simd<_Tp, _Abi>& __v) noexcept
3858 { return _Abi::_SimdImpl::_S_reduce(__v, __detail::_Maximum()); }
3859
3860template <typename _M, typename _V>
3861 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3862 typename _V::value_type
3863 hmin(const const_where_expression<_M, _V>& __x) noexcept
3864 {
3865 using _Tp = typename _V::value_type;
3866 constexpr _Tp __id_elem =
3867#ifdef __FINITE_MATH_ONLY__
3868 __finite_max_v<_Tp>;
3869#else
3870 __value_or<__infinity, _Tp>(__finite_max_v<_Tp>);
3871#endif
3872 _V __tmp = __id_elem;
3873 _V::_Impl::_S_masked_assign(__data(__get_mask(__x)), __data(__tmp),
3874 __data(__get_lvalue(__x)));
3875 return _V::abi_type::_SimdImpl::_S_reduce(__tmp, __detail::_Minimum());
3876 }
3877
3878template <typename _M, typename _V>
3879 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3880 typename _V::value_type
3881 hmax(const const_where_expression<_M, _V>& __x) noexcept
3882 {
3883 using _Tp = typename _V::value_type;
3884 constexpr _Tp __id_elem =
3885#ifdef __FINITE_MATH_ONLY__
3886 __finite_min_v<_Tp>;
3887#else
3888 [] {
3889 if constexpr (__value_exists_v<__infinity, _Tp>)
3890 return -__infinity_v<_Tp>;
3891 else
3892 return __finite_min_v<_Tp>;
3893 }();
3894#endif
3895 _V __tmp = __id_elem;
3896 _V::_Impl::_S_masked_assign(__data(__get_mask(__x)), __data(__tmp),
3897 __data(__get_lvalue(__x)));
3898 return _V::abi_type::_SimdImpl::_S_reduce(__tmp, __detail::_Maximum());
3899 }
3900
3901// }}}1
3902// algorithms [simd.alg] {{{
3903template <typename _Tp, typename _Ap>
3904 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
3905 min(const simd<_Tp, _Ap>& __a, const simd<_Tp, _Ap>& __b)
3906 { return {__private_init, _Ap::_SimdImpl::_S_min(__data(__a), __data(__b))}; }
3907
3908template <typename _Tp, typename _Ap>
3909 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
3910 max(const simd<_Tp, _Ap>& __a, const simd<_Tp, _Ap>& __b)
3911 { return {__private_init, _Ap::_SimdImpl::_S_max(__data(__a), __data(__b))}; }
3912
3913template <typename _Tp, typename _Ap>
3914 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3915 pair<simd<_Tp, _Ap>, simd<_Tp, _Ap>>
3916 minmax(const simd<_Tp, _Ap>& __a, const simd<_Tp, _Ap>& __b)
3917 {
3918 const auto pair_of_members
3919 = _Ap::_SimdImpl::_S_minmax(__data(__a), __data(__b));
3920 return {simd<_Tp, _Ap>(__private_init, pair_of_members.first),
3921 simd<_Tp, _Ap>(__private_init, pair_of_members.second)};
3922 }
3923
3924template <typename _Tp, typename _Ap>
3925 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
3926 clamp(const simd<_Tp, _Ap>& __v, const simd<_Tp, _Ap>& __lo, const simd<_Tp, _Ap>& __hi)
3927 {
3928 using _Impl = typename _Ap::_SimdImpl;
3929 return {__private_init,
3930 _Impl::_S_min(__data(__hi),
3931 _Impl::_S_max(__data(__lo), __data(__v)))};
3932 }
3933
3934// }}}
3935
3936template <size_t... _Sizes, typename _Tp, typename _Ap,
3937 typename = enable_if_t<((_Sizes + ...) == simd<_Tp, _Ap>::size())>>
3938 inline tuple<simd<_Tp, simd_abi::deduce_t<_Tp, _Sizes>>...>
3939 split(const simd<_Tp, _Ap>&);
3940
3941// __extract_part {{{
3942template <int _Index, int _Total, int _Combine = 1, typename _Tp, size_t _Np>
3943 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_CONST constexpr
3944 _SimdWrapper<_Tp, _Np / _Total * _Combine>
3945 __extract_part(const _SimdWrapper<_Tp, _Np> __x);
3946
3947template <int _Index, int _Parts, int _Combine = 1, typename _Tp, typename _A0, typename... _As>
3948 _GLIBCXX_SIMD_INTRINSIC constexpr auto
3949 __extract_part(const _SimdTuple<_Tp, _A0, _As...>& __x);
3950
3951// }}}
3952// _SizeList {{{
3953template <size_t _V0, size_t... _Values>
3954 struct _SizeList
3955 {
3956 template <size_t _I>
3957 static constexpr size_t
3958 _S_at(_SizeConstant<_I> = {})
3959 {
3960 if constexpr (_I == 0)
3961 return _V0;
3962 else
3963 return _SizeList<_Values...>::template _S_at<_I - 1>();
3964 }
3965
3966 template <size_t _I>
3967 static constexpr auto
3968 _S_before(_SizeConstant<_I> = {})
3969 {
3970 if constexpr (_I == 0)
3971 return _SizeConstant<0>();
3972 else
3973 return _SizeConstant<
3974 _V0 + _SizeList<_Values...>::template _S_before<_I - 1>()>();
3975 }
3976
3977 template <size_t _Np>
3978 static constexpr auto
3979 _S_pop_front(_SizeConstant<_Np> = {})
3980 {
3981 if constexpr (_Np == 0)
3982 return _SizeList();
3983 else
3984 return _SizeList<_Values...>::template _S_pop_front<_Np - 1>();
3985 }
3986 };
3987
3988// }}}
3989// __extract_center {{{
3990template <typename _Tp, size_t _Np>
3991 _GLIBCXX_SIMD_INTRINSIC _SimdWrapper<_Tp, _Np / 2>
3992 __extract_center(_SimdWrapper<_Tp, _Np> __x)
3993 {
3994 static_assert(_Np >= 4);
3995 static_assert(_Np % 4 == 0); // x0 - x1 - x2 - x3 -> return {x1, x2}
3996#if _GLIBCXX_SIMD_X86INTRIN // {{{
3997 if constexpr (__have_avx512f && sizeof(_Tp) * _Np == 64)
3998 {
3999 const auto __intrin = __to_intrin(__x);
4000 if constexpr (is_integral_v<_Tp>)
4001 return __vector_bitcast<_Tp>(_mm512_castsi512_si256(
4002 _mm512_shuffle_i32x4(__intrin, __intrin,
4003 1 + 2 * 0x4 + 2 * 0x10 + 3 * 0x40)));
4004 else if constexpr (sizeof(_Tp) == 4)
4005 return __vector_bitcast<_Tp>(_mm512_castps512_ps256(
4006 _mm512_shuffle_f32x4(__intrin, __intrin,
4007 1 + 2 * 0x4 + 2 * 0x10 + 3 * 0x40)));
4008 else if constexpr (sizeof(_Tp) == 8)
4009 return __vector_bitcast<_Tp>(_mm512_castpd512_pd256(
4010 _mm512_shuffle_f64x2(__intrin, __intrin,
4011 1 + 2 * 0x4 + 2 * 0x10 + 3 * 0x40)));
4012 else
4013 __assert_unreachable<_Tp>();
4014 }
4015 else if constexpr (sizeof(_Tp) * _Np == 32 && is_floating_point_v<_Tp>)
4016 return __vector_bitcast<_Tp>(
4017 _mm_shuffle_pd(__lo128(__vector_bitcast<double>(__x)),
4018 __hi128(__vector_bitcast<double>(__x)), 1));
4019 else if constexpr (sizeof(__x) == 32 && sizeof(_Tp) * _Np <= 32)
4020 return __vector_bitcast<_Tp>(
4021 _mm_alignr_epi8(__hi128(__vector_bitcast<_LLong>(__x)),
4022 __lo128(__vector_bitcast<_LLong>(__x)),
4023 sizeof(_Tp) * _Np / 4));
4024 else
4025#endif // _GLIBCXX_SIMD_X86INTRIN }}}
4026 {
4027 __vector_type_t<_Tp, _Np / 2> __r;
4028 __builtin_memcpy(&__r,
4029 reinterpret_cast<const char*>(&__x)
4030 + sizeof(_Tp) * _Np / 4,
4031 sizeof(_Tp) * _Np / 2);
4032 return __r;
4033 }
4034 }
4035
4036template <typename _Tp, typename _A0, typename... _As>
4037 _GLIBCXX_SIMD_INTRINSIC
4038 _SimdWrapper<_Tp, _SimdTuple<_Tp, _A0, _As...>::_S_size() / 2>
4039 __extract_center(const _SimdTuple<_Tp, _A0, _As...>& __x)
4040 {
4041 if constexpr (sizeof...(_As) == 0)
4042 return __extract_center(__x.first);
4043 else
4044 return __extract_part<1, 4, 2>(__x);
4045 }
4046
4047// }}}
4048// __split_wrapper {{{
4049template <size_t... _Sizes, typename _Tp, typename... _As>
4050 auto
4051 __split_wrapper(_SizeList<_Sizes...>, const _SimdTuple<_Tp, _As...>& __x)
4052 {
4053 return split<_Sizes...>(
4054 fixed_size_simd<_Tp, _SimdTuple<_Tp, _As...>::_S_size()>(__private_init,
4055 __x));
4056 }
4057
4058// }}}
4059
4060// split<simd>(simd) {{{
4061template <typename _V, typename _Ap,
4062 size_t _Parts = simd_size_v<typename _V::value_type, _Ap> / _V::size()>
4063 enable_if_t<simd_size_v<typename _V::value_type, _Ap> == _Parts * _V::size()
4064 && is_simd_v<_V>, array<_V, _Parts>>
4065 split(const simd<typename _V::value_type, _Ap>& __x)
4066 {
4067 using _Tp = typename _V::value_type;
4068
4069 auto __gen_fallback = [&]() constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
4070 return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>(
4071 [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
4072 return _V([&](auto __j) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
4073 { return __x[__i * _V::size() + __j]; });
4074 });
4075 };
4076
4077 if constexpr (_Parts == 1)
4078 {
4079 return {simd_cast<_V>(__x)};
4080 }
4081 else if (__x._M_is_constprop())
4082 {
4083 return __gen_fallback();
4084 }
4085#if _GLIBCXX_SIMD_HAVE_SVE
4086 else if constexpr(__is_sve_abi<_Ap>)
4087 {
4088 return __gen_fallback();
4089 }
4090#endif
4091 else if constexpr (
4092 __is_fixed_size_abi_v<_Ap>
4093 && (is_same_v<typename _V::abi_type, simd_abi::scalar>
4094 || (__is_fixed_size_abi_v<typename _V::abi_type>
4095 && sizeof(_V) == sizeof(_Tp) * _V::size() // _V doesn't have padding
4096 )))
4097 {
4098 // fixed_size -> fixed_size (w/o padding) or scalar
4099#ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
4100 const __may_alias<_Tp>* const __element_ptr
4101 = reinterpret_cast<const __may_alias<_Tp>*>(&__data(__x));
4102 return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>(
4103 [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
4104 { return _V(__element_ptr + __i * _V::size(), vector_aligned); });
4105#else
4106 const auto& __xx = __data(__x);
4107 return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>(
4108 [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
4109 [[maybe_unused]] constexpr size_t __offset
4110 = decltype(__i)::value * _V::size();
4111 return _V([&](auto __j) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
4112 constexpr _SizeConstant<__j + __offset> __k;
4113 return __xx[__k];
4114 });
4115 });
4116#endif
4117 }
4118 else if constexpr (is_same_v<typename _V::abi_type, simd_abi::scalar>)
4119 {
4120 // normally memcpy should work here as well
4121 return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>(
4122 [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA { return __x[__i]; });
4123 }
4124 else
4125 {
4126 return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>(
4127 [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
4128 if constexpr (__is_fixed_size_abi_v<typename _V::abi_type>)
4129 return _V([&](auto __j) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
4130 return __x[__i * _V::size() + __j];
4131 });
4132 else
4133 return _V(__private_init,
4134 __extract_part<decltype(__i)::value, _Parts>(__data(__x)));
4135 });
4136 }
4137 }
4138
4139// }}}
4140// split<simd_mask>(simd_mask) {{{
4141template <typename _V, typename _Ap,
4142 size_t _Parts = simd_size_v<typename _V::simd_type::value_type, _Ap> / _V::size()>
4143 enable_if_t<is_simd_mask_v<_V> && simd_size_v<typename
4144 _V::simd_type::value_type, _Ap> == _Parts * _V::size(), array<_V, _Parts>>
4145 split(const simd_mask<typename _V::simd_type::value_type, _Ap>& __x)
4146 {
4147 if constexpr (is_same_v<_Ap, typename _V::abi_type>)
4148 return {__x};
4149 else if constexpr (_Parts == 1)
4150 return {__proposed::static_simd_cast<_V>(__x)};
4151 else if constexpr (_Parts == 2 && __is_sse_abi<typename _V::abi_type>()
4152 && __is_avx_abi<_Ap>())
4153 return {_V(__private_init, __lo128(__data(__x))),
4154 _V(__private_init, __hi128(__data(__x)))};
4155 else if constexpr (_V::size() <= __CHAR_BIT__ * sizeof(_ULLong))
4156 {
4157 const bitset __bits = __x.__to_bitset();
4158 return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>(
4159 [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
4160 constexpr size_t __offset = __i * _V::size();
4161 return _V(__bitset_init, (__bits >> __offset).to_ullong());
4162 });
4163 }
4164 else
4165 {
4166 return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>(
4167 [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
4168 constexpr size_t __offset = __i * _V::size();
4169 return _V(__private_init,
4170 [&](auto __j) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
4171 return __x[__j + __offset];
4172 });
4173 });
4174 }
4175 }
4176
4177// }}}
4178// split<_Sizes...>(simd) {{{
4179template <size_t... _Sizes, typename _Tp, typename _Ap, typename>
4180 _GLIBCXX_SIMD_ALWAYS_INLINE
4181 tuple<simd<_Tp, simd_abi::deduce_t<_Tp, _Sizes>>...>
4182 split(const simd<_Tp, _Ap>& __x)
4183 {
4184 using _SL = _SizeList<_Sizes...>;
4185 using _Tuple = tuple<__deduced_simd<_Tp, _Sizes>...>;
4186 constexpr size_t _Np = simd_size_v<_Tp, _Ap>;
4187 constexpr size_t _N0 = _SL::template _S_at<0>();
4188 using _V = __deduced_simd<_Tp, _N0>;
4189
4190 auto __gen_fallback = [&]() constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
4191 {
4192 return __generate_from_n_evaluations<sizeof...(_Sizes), _Tuple>(
4193 [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
4194 using _Vi = __deduced_simd<_Tp, _SL::_S_at(__i)>;
4195 constexpr size_t __offset = _SL::_S_before(__i);
4196 return _Vi([&](auto __j) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
4197 return __x[__offset + __j];
4198 });
4199 });
4200 };
4201
4202 if (__x._M_is_constprop())
4203 __gen_fallback();
4204#if _GLIBCXX_SIMD_HAVE_SVE
4205 else if constexpr (__have_sve)
4206 __gen_fallback();
4207#endif
4208 else if constexpr (_Np == _N0)
4209 {
4210 static_assert(sizeof...(_Sizes) == 1);
4211 return {simd_cast<_V>(__x)};
4212 }
4213 else if constexpr // split from fixed_size, such that __x::first.size == _N0
4214 (__is_fixed_size_abi_v<
4215 _Ap> && __fixed_size_storage_t<_Tp, _Np>::_S_first_size == _N0)
4216 {
4217 static_assert(
4218 !__is_fixed_size_abi_v<typename _V::abi_type>,
4219 "How can <_Tp, _Np> be __a single _SimdTuple entry but __a "
4220 "fixed_size_simd "
4221 "when deduced?");
4222 // extract first and recurse (__split_wrapper is needed to deduce a new
4223 // _Sizes pack)
4224 return tuple_cat(make_tuple(_V(__private_init, __data(__x).first)),
4225 __split_wrapper(_SL::template _S_pop_front<1>(),
4226 __data(__x).second));
4227 }
4228 else if constexpr ((!is_same_v<simd_abi::scalar,
4229 simd_abi::deduce_t<_Tp, _Sizes>> && ...)
4230 && (!__is_fixed_size_abi_v<
4231 simd_abi::deduce_t<_Tp, _Sizes>> && ...))
4232 {
4233 if constexpr (((_Sizes * 2 == _Np) && ...))
4234 return {{__private_init, __extract_part<0, 2>(__data(__x))},
4235 {__private_init, __extract_part<1, 2>(__data(__x))}};
4236 else if constexpr (is_same_v<_SizeList<_Sizes...>,
4237 _SizeList<_Np / 3, _Np / 3, _Np / 3>>)
4238 return {{__private_init, __extract_part<0, 3>(__data(__x))},
4239 {__private_init, __extract_part<1, 3>(__data(__x))},
4240 {__private_init, __extract_part<2, 3>(__data(__x))}};
4241 else if constexpr (is_same_v<_SizeList<_Sizes...>,
4242 _SizeList<2 * _Np / 3, _Np / 3>>)
4243 return {{__private_init, __extract_part<0, 3, 2>(__data(__x))},
4244 {__private_init, __extract_part<2, 3>(__data(__x))}};
4245 else if constexpr (is_same_v<_SizeList<_Sizes...>,
4246 _SizeList<_Np / 3, 2 * _Np / 3>>)
4247 return {{__private_init, __extract_part<0, 3>(__data(__x))},
4248 {__private_init, __extract_part<1, 3, 2>(__data(__x))}};
4249 else if constexpr (is_same_v<_SizeList<_Sizes...>,
4250 _SizeList<_Np / 2, _Np / 4, _Np / 4>>)
4251 return {{__private_init, __extract_part<0, 2>(__data(__x))},
4252 {__private_init, __extract_part<2, 4>(__data(__x))},
4253 {__private_init, __extract_part<3, 4>(__data(__x))}};
4254 else if constexpr (is_same_v<_SizeList<_Sizes...>,
4255 _SizeList<_Np / 4, _Np / 4, _Np / 2>>)
4256 return {{__private_init, __extract_part<0, 4>(__data(__x))},
4257 {__private_init, __extract_part<1, 4>(__data(__x))},
4258 {__private_init, __extract_part<1, 2>(__data(__x))}};
4259 else if constexpr (is_same_v<_SizeList<_Sizes...>,
4260 _SizeList<_Np / 4, _Np / 2, _Np / 4>>)
4261 return {{__private_init, __extract_part<0, 4>(__data(__x))},
4262 {__private_init, __extract_center(__data(__x))},
4263 {__private_init, __extract_part<3, 4>(__data(__x))}};
4264 else if constexpr (((_Sizes * 4 == _Np) && ...))
4265 return {{__private_init, __extract_part<0, 4>(__data(__x))},
4266 {__private_init, __extract_part<1, 4>(__data(__x))},
4267 {__private_init, __extract_part<2, 4>(__data(__x))},
4268 {__private_init, __extract_part<3, 4>(__data(__x))}};
4269 // else fall through
4270 }
4271#ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
4272 const __may_alias<_Tp>* const __element_ptr
4273 = reinterpret_cast<const __may_alias<_Tp>*>(&__x);
4274 return __generate_from_n_evaluations<sizeof...(_Sizes), _Tuple>(
4275 [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
4276 using _Vi = __deduced_simd<_Tp, _SL::_S_at(__i)>;
4277 constexpr size_t __offset = _SL::_S_before(__i);
4278 constexpr size_t __base_align = alignof(simd<_Tp, _Ap>);
4279 constexpr size_t __a
4280 = __base_align - ((__offset * sizeof(_Tp)) % __base_align);
4281 constexpr size_t __b = ((__a - 1) & __a) ^ __a;
4282 constexpr size_t __alignment = __b == 0 ? __a : __b;
4283 return _Vi(__element_ptr + __offset, overaligned<__alignment>);
4284 });
4285#else
4286 return __generate_from_n_evaluations<sizeof...(_Sizes), _Tuple>(
4287 [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
4288 using _Vi = __deduced_simd<_Tp, _SL::_S_at(__i)>;
4289 const auto& __xx = __data(__x);
4290 using _Offset = decltype(_SL::_S_before(__i));
4291 return _Vi([&](auto __j) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
4292 constexpr _SizeConstant<_Offset::value + __j> __k;
4293 return __xx[__k];
4294 });
4295 });
4296#endif
4297 }
4298
4299// }}}
4300
4301// __subscript_in_pack {{{
4302template <size_t _I, typename _Tp, typename _Ap, typename... _As>
4303 _GLIBCXX_SIMD_INTRINSIC constexpr _Tp
4304 __subscript_in_pack(const simd<_Tp, _Ap>& __x, const simd<_Tp, _As>&... __xs)
4305 {
4306 if constexpr (_I < simd_size_v<_Tp, _Ap>)
4307 return __x[_I];
4308 else
4309 return __subscript_in_pack<_I - simd_size_v<_Tp, _Ap>>(__xs...);
4310 }
4311
4312// }}}
4313// __store_pack_of_simd {{{
4314template <typename _Tp, typename _A0, typename... _As>
4315 _GLIBCXX_SIMD_INTRINSIC void
4316 __store_pack_of_simd(char* __mem, const simd<_Tp, _A0>& __x0, const simd<_Tp, _As>&... __xs)
4317 {
4318 constexpr size_t __n_bytes = sizeof(_Tp) * simd_size_v<_Tp, _A0>;
4319 __builtin_memcpy(__mem, &__data(__x0), __n_bytes);
4320 if constexpr (sizeof...(__xs) > 0)
4321 __store_pack_of_simd(__mem + __n_bytes, __xs...);
4322 }
4323
4324// }}}
4325// concat(simd...) {{{
4326template <typename _Tp, typename... _As, typename = __detail::__odr_helper>
4327 inline _GLIBCXX_SIMD_CONSTEXPR
4328 simd<_Tp, simd_abi::deduce_t<_Tp, (simd_size_v<_Tp, _As> + ...)>>
4329 concat(const simd<_Tp, _As>&... __xs)
4330 {
4331 using _Rp = __deduced_simd<_Tp, (simd_size_v<_Tp, _As> + ...)>;
4332 if constexpr (sizeof...(__xs) == 1)
4333 return simd_cast<_Rp>(__xs...);
4334 else if ((... && __xs._M_is_constprop()))
4335 return simd<_Tp,
4336 simd_abi::deduce_t<_Tp, (simd_size_v<_Tp, _As> + ...)>>(
4337 [&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA
4338 { return __subscript_in_pack<__i>(__xs...); });
4339 else
4340 {
4341 _Rp __r{};
4342 __store_pack_of_simd(reinterpret_cast<char*>(&__data(__r)), __xs...);
4343 return __r;
4344 }
4345 }
4346
4347// }}}
4348// concat(array<simd>) {{{
4349template <typename _Tp, typename _Abi, size_t _Np>
4350 _GLIBCXX_SIMD_ALWAYS_INLINE
4351 _GLIBCXX_SIMD_CONSTEXPR __deduced_simd<_Tp, simd_size_v<_Tp, _Abi> * _Np>
4352 concat(const array<simd<_Tp, _Abi>, _Np>& __x)
4353 {
4354 return __call_with_subscripts<_Np>(
4355 __x, [](const auto&... __xs) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
4356 return concat(__xs...);
4357 });
4358 }
4359
4360// }}}
4361
4362/// @cond undocumented
4363// _SmartReference {{{
4364template <typename _Up, typename _Accessor = _Up,
4365 typename _ValueType = typename _Up::value_type>
4366 class _SmartReference
4367 {
4368 friend _Accessor;
4369 int _M_index;
4370 _Up& _M_obj;
4371
4372 _GLIBCXX_SIMD_INTRINSIC constexpr _ValueType
4373 _M_read() const noexcept
4374 {
4375 if constexpr (is_arithmetic_v<_Up>)
4376 return _M_obj;
4377 else
4378 return _M_obj[_M_index];
4379 }
4380
4381 template <typename _Tp>
4382 _GLIBCXX_SIMD_INTRINSIC constexpr void
4383 _M_write(_Tp&& __x) const
4384 { _Accessor::_S_set(_M_obj, _M_index, static_cast<_Tp&&>(__x)); }
4385
4386 public:
4387 _GLIBCXX_SIMD_INTRINSIC constexpr
4388 _SmartReference(_Up& __o, int __i) noexcept
4389 : _M_index(__i), _M_obj(__o) {}
4390
4391 using value_type = _ValueType;
4392
4393 _GLIBCXX_SIMD_INTRINSIC
4394 _SmartReference(const _SmartReference&) = delete;
4395
4396 _GLIBCXX_SIMD_INTRINSIC constexpr
4397 operator value_type() const noexcept
4398 { return _M_read(); }
4399
4400 template <typename _Tp, typename = _ValuePreservingOrInt<__remove_cvref_t<_Tp>, value_type>>
4401 _GLIBCXX_SIMD_INTRINSIC constexpr _SmartReference
4402 operator=(_Tp&& __x) &&
4403 {
4404 _M_write(static_cast<_Tp&&>(__x));
4405 return {_M_obj, _M_index};
4406 }
4407
4408#define _GLIBCXX_SIMD_OP_(__op) \
4409 template <typename _Tp, \
4410 typename _TT = decltype(declval<value_type>() __op declval<_Tp>()), \
4411 typename = _ValuePreservingOrInt<__remove_cvref_t<_Tp>, _TT>, \
4412 typename = _ValuePreservingOrInt<_TT, value_type>> \
4413 _GLIBCXX_SIMD_INTRINSIC constexpr _SmartReference \
4414 operator __op##=(_Tp&& __x) && \
4415 { \
4416 const value_type& __lhs = _M_read(); \
4417 _M_write(__lhs __op __x); \
4418 return {_M_obj, _M_index}; \
4419 }
4420 _GLIBCXX_SIMD_ALL_ARITHMETICS(_GLIBCXX_SIMD_OP_);
4421 _GLIBCXX_SIMD_ALL_SHIFTS(_GLIBCXX_SIMD_OP_);
4422 _GLIBCXX_SIMD_ALL_BINARY(_GLIBCXX_SIMD_OP_);
4423#undef _GLIBCXX_SIMD_OP_
4424
4425 template <typename _Tp = void,
4426 typename = decltype(++declval<conditional_t<true, value_type, _Tp>&>())>
4427 _GLIBCXX_SIMD_INTRINSIC constexpr _SmartReference
4428 operator++() &&
4429 {
4430 value_type __x = _M_read();
4431 _M_write(++__x);
4432 return {_M_obj, _M_index};
4433 }
4434
4435 template <typename _Tp = void,
4436 typename = decltype(declval<conditional_t<true, value_type, _Tp>&>()++)>
4437 _GLIBCXX_SIMD_INTRINSIC constexpr value_type
4438 operator++(int) &&
4439 {
4440 const value_type __r = _M_read();
4441 value_type __x = __r;
4442 _M_write(++__x);
4443 return __r;
4444 }
4445
4446 template <typename _Tp = void,
4447 typename = decltype(--declval<conditional_t<true, value_type, _Tp>&>())>
4448 _GLIBCXX_SIMD_INTRINSIC constexpr _SmartReference
4449 operator--() &&
4450 {
4451 value_type __x = _M_read();
4452 _M_write(--__x);
4453 return {_M_obj, _M_index};
4454 }
4455
4456 template <typename _Tp = void,
4457 typename = decltype(declval<conditional_t<true, value_type, _Tp>&>()--)>
4458 _GLIBCXX_SIMD_INTRINSIC constexpr value_type
4459 operator--(int) &&
4460 {
4461 const value_type __r = _M_read();
4462 value_type __x = __r;
4463 _M_write(--__x);
4464 return __r;
4465 }
4466
4467 _GLIBCXX_SIMD_INTRINSIC friend void
4468 swap(_SmartReference&& __a, _SmartReference&& __b) noexcept(
4469 conjunction<
4470 is_nothrow_constructible<value_type, _SmartReference&&>,
4471 is_nothrow_assignable<_SmartReference&&, value_type&&>>::value)
4472 {
4473 value_type __tmp = static_cast<_SmartReference&&>(__a);
4474 static_cast<_SmartReference&&>(__a) = static_cast<value_type>(__b);
4475 static_cast<_SmartReference&&>(__b) = std::move(__tmp);
4476 }
4477
4478 _GLIBCXX_SIMD_INTRINSIC friend void
4479 swap(value_type& __a, _SmartReference&& __b) noexcept(
4480 conjunction<
4481 is_nothrow_constructible<value_type, value_type&&>,
4482 is_nothrow_assignable<value_type&, value_type&&>,
4483 is_nothrow_assignable<_SmartReference&&, value_type&&>>::value)
4484 {
4485 value_type __tmp(std::move(__a));
4486 __a = static_cast<value_type>(__b);
4487 static_cast<_SmartReference&&>(__b) = std::move(__tmp);
4488 }
4489
4490 _GLIBCXX_SIMD_INTRINSIC friend void
4491 swap(_SmartReference&& __a, value_type& __b) noexcept(
4492 conjunction<
4493 is_nothrow_constructible<value_type, _SmartReference&&>,
4494 is_nothrow_assignable<value_type&, value_type&&>,
4495 is_nothrow_assignable<_SmartReference&&, value_type&&>>::value)
4496 {
4497 value_type __tmp(__a);
4498 static_cast<_SmartReference&&>(__a) = std::move(__b);
4499 __b = std::move(__tmp);
4500 }
4501 };
4502
4503// }}}
4504// __scalar_abi_wrapper {{{
4505template <int _Bytes>
4506 struct __scalar_abi_wrapper
4507 {
4508 template <typename _Tp> static constexpr size_t _S_full_size = 1;
4509 template <typename _Tp> static constexpr size_t _S_size = 1;
4510 template <typename _Tp> static constexpr size_t _S_is_partial = false;
4511
4512 template <typename _Tp, typename _Abi = simd_abi::scalar>
4513 static constexpr bool _S_is_valid_v
4514 = _Abi::template _IsValid<_Tp>::value && sizeof(_Tp) == _Bytes;
4515 };
4516
4517// }}}
4518// __decay_abi metafunction {{{
4519template <typename _Tp>
4520 struct __decay_abi { using type = _Tp; };
4521
4522template <int _Bytes>
4523 struct __decay_abi<__scalar_abi_wrapper<_Bytes>>
4524 { using type = simd_abi::scalar; };
4525
4526// }}}
4527// __find_next_valid_abi metafunction {{{1
4528// Given an ABI tag A<N>, find an N2 < N such that A<N2>::_S_is_valid_v<_Tp> ==
4529// true, N2 is a power-of-2, and A<N2>::_S_is_partial<_Tp> is false. Break
4530// recursion at 2 elements in the resulting ABI tag. In this case
4531// type::_S_is_valid_v<_Tp> may be false.
4532template <template <int> class _Abi, int _Bytes, typename _Tp>
4533 struct __find_next_valid_abi
4534 {
4535 static constexpr auto
4536 _S_choose()
4537 {
4538 constexpr int _NextBytes = std::__bit_ceil(_Bytes) / 2;
4539 using _NextAbi = _Abi<_NextBytes>;
4540 if constexpr (_NextBytes < sizeof(_Tp) * 2) // break recursion
4541 return _Abi<_Bytes>();
4542 else if constexpr (_NextAbi::template _S_is_partial<_Tp> == false
4543 && _NextAbi::template _S_is_valid_v<_Tp>)
4544 return _NextAbi();
4545 else
4546 return __find_next_valid_abi<_Abi, _NextBytes, _Tp>::_S_choose();
4547 }
4548
4549 using type = decltype(_S_choose());
4550 };
4551
4552template <int _Bytes, typename _Tp>
4553 struct __find_next_valid_abi<__scalar_abi_wrapper, _Bytes, _Tp>
4554 { using type = simd_abi::scalar; };
4555
4556// _AbiList {{{1
4557template <template <int> class...>
4558 struct _AbiList
4559 {
4560 template <typename, int> static constexpr bool _S_has_valid_abi = false;
4561 template <typename, int> using _FirstValidAbi = void;
4562 template <typename, int> using _BestAbi = void;
4563 };
4564
4565template <template <int> class _A0, template <int> class... _Rest>
4566 struct _AbiList<_A0, _Rest...>
4567 {
4568 template <typename _Tp, int _Np>
4569 static constexpr bool _S_has_valid_abi
4570 = _A0<sizeof(_Tp) * _Np>::template _S_is_valid_v<
4571 _Tp> || _AbiList<_Rest...>::template _S_has_valid_abi<_Tp, _Np>;
4572
4573 template <typename _Tp, int _Np>
4574 using _FirstValidAbi = conditional_t<
4575 _A0<sizeof(_Tp) * _Np>::template _S_is_valid_v<_Tp>,
4576 typename __decay_abi<_A0<sizeof(_Tp) * _Np>>::type,
4577 typename _AbiList<_Rest...>::template _FirstValidAbi<_Tp, _Np>>;
4578
4579 template <typename _Tp, int _Np>
4580 static constexpr auto
4581 _S_determine_best_abi()
4582 {
4583 static_assert(_Np >= 1);
4584 constexpr int _Bytes = sizeof(_Tp) * _Np;
4585 if constexpr (_Np == 1)
4586 return __make_dependent_t<_Tp, simd_abi::scalar>{};
4587 else
4588 {
4589 constexpr int __fullsize = _A0<_Bytes>::template _S_full_size<_Tp>;
4590 // _A0<_Bytes> is good if:
4591 // 1. The ABI tag is valid for _Tp
4592 // 2. The storage overhead is no more than padding to fill the next
4593 // power-of-2 number of bytes
4594 if constexpr (_A0<_Bytes>::template _S_is_valid_v<_Tp>
4595 && ((__is_sve_abi<_A0<_Bytes>>() && __have_sve
4596 && (_Np <= __sve_vectorized_size_bytes/sizeof(_Tp)))
4597 || (__fullsize / 2 < _Np))
4598 )
4599 return typename __decay_abi<_A0<_Bytes>>::type{};
4600 else
4601 {
4602 using _Bp =
4603 typename __find_next_valid_abi<_A0, _Bytes, _Tp>::type;
4604 if constexpr (_Bp::template _S_is_valid_v<
4605 _Tp> && _Bp::template _S_size<_Tp> <= _Np)
4606 return _Bp{};
4607 else
4608 return
4609 typename _AbiList<_Rest...>::template _BestAbi<_Tp, _Np>{};
4610 }
4611 }
4612 }
4613
4614 template <typename _Tp, int _Np>
4615 using _BestAbi = decltype(_S_determine_best_abi<_Tp, _Np>());
4616 };
4617
4618// }}}1
4619
4620// the following lists all native ABIs, which makes them accessible to
4621// simd_abi::deduce and select_best_vector_type_t (for fixed_size). Order
4622// matters: Whatever comes first has higher priority.
4623using _AllNativeAbis = _AbiList<
4624#if _GLIBCXX_SIMD_HAVE_SVE
4625 simd_abi::_SveAbi,
4626#endif
4627 simd_abi::_VecBltnBtmsk, simd_abi::_VecBuiltin, __scalar_abi_wrapper>;
4628
4629using _NoSveAllNativeAbis = _AbiList<simd_abi::_VecBltnBtmsk, simd_abi::_VecBuiltin,
4630 __scalar_abi_wrapper>;
4631
4632// valid _SimdTraits specialization {{{1
4633template <typename _Tp, typename _Abi>
4634 struct _SimdTraits<_Tp, _Abi, void_t<typename _Abi::template _IsValid<_Tp>>>
4635 : _Abi::template __traits<_Tp> {};
4636
4637// __deduce_impl specializations {{{1
4638// try all native ABIs (including scalar) first
4639template <typename _Tp, size_t _Np>
4640 struct __deduce_impl<
4641 _Tp, _Np, enable_if_t<_AllNativeAbis::template _S_has_valid_abi<_Tp, _Np>>>
4642 { using type = _AllNativeAbis::_FirstValidAbi<_Tp, _Np>; };
4643
4644template <typename _Tp, size_t _Np>
4645 struct __no_sve_deduce_impl<
4646 _Tp, _Np, enable_if_t<_NoSveAllNativeAbis::template _S_has_valid_abi<_Tp, _Np>>>
4647 { using type = _NoSveAllNativeAbis::_FirstValidAbi<_Tp, _Np>; };
4648
4649// fall back to fixed_size only if scalar and native ABIs don't match
4650template <typename _Tp, size_t _Np, typename = void>
4651 struct __deduce_fixed_size_fallback {};
4652
4653template <typename _Tp, size_t _Np>
4654 struct __deduce_fixed_size_fallback<_Tp, _Np,
4655 enable_if_t<simd_abi::fixed_size<_Np>::template _S_is_valid_v<_Tp>>>
4656 { using type = simd_abi::fixed_size<_Np>; };
4657
4658template <typename _Tp, size_t _Np, typename>
4659 struct __deduce_impl : public __deduce_fixed_size_fallback<_Tp, _Np> {};
4660
4661template <typename _Tp, size_t _Np, typename>
4662 struct __no_sve_deduce_impl
4663 : public __deduce_fixed_size_fallback<_Tp, _Np>
4664 {};
4665
4666
4667//}}}1
4668/// @endcond
4669
4670// simd_mask {{{
4671template <typename _Tp, typename _Abi>
4672 class simd_mask : public _SimdTraits<_Tp, _Abi>::_MaskBase
4673 {
4674 // types, tags, and friends {{{
4675 using _Traits = _SimdTraits<_Tp, _Abi>;
4676 using _MemberType = typename _Traits::_MaskMember;
4677
4678 // We map all masks with equal element sizeof to a single integer type, the
4679 // one given by __int_for_sizeof_t<_Tp>. This is the approach
4680 // [[gnu::vector_size(N)]] types take as well and it reduces the number of
4681 // template specializations in the implementation classes.
4682 using _Ip = __int_for_sizeof_t<_Tp>;
4683 static constexpr _Ip* _S_type_tag = nullptr;
4684
4685 friend typename _Traits::_MaskBase;
4686 friend class simd<_Tp, _Abi>; // to construct masks on return
4687 friend typename _Traits::_SimdImpl; // to construct masks on return and
4688 // inspect data on masked operations
4689 public:
4690 using _Impl = typename _Traits::_MaskImpl;
4691 friend _Impl;
4692
4693 // }}}
4694 // member types {{{
4695 using value_type = bool;
4696 using reference = _SmartReference<_MemberType, _Impl, value_type>;
4697 using simd_type = simd<_Tp, _Abi>;
4698 using abi_type = _Abi;
4699
4700 // }}}
4701 static constexpr size_t size() // {{{
4702 { return __size_or_zero_v<_Tp, _Abi>; }
4703
4704 // }}}
4705 // constructors & assignment {{{
4706 simd_mask() = default;
4707 simd_mask(const simd_mask&) = default;
4708 simd_mask(simd_mask&&) = default;
4709 simd_mask& operator=(const simd_mask&) = default;
4710 simd_mask& operator=(simd_mask&&) = default;
4711
4712 // }}}
4713 // access to internal representation (optional feature) {{{
4714 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR explicit
4715 simd_mask(typename _Traits::_MaskCastType __init)
4716 : _M_data{__init} {}
4717 // conversions to internal type is done in _MaskBase
4718
4719 // }}}
4720 // bitset interface (extension to be proposed) {{{
4721 // TS_FEEDBACK:
4722 // Conversion of simd_mask to and from bitset makes it much easier to
4723 // interface with other facilities. I suggest adding `static
4724 // simd_mask::from_bitset` and `simd_mask::to_bitset`.
4725 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR static simd_mask
4726 __from_bitset(bitset<size()> bs)
4727 { return {__bitset_init, bs}; }
4728
4729 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bitset<size()>
4730 __to_bitset() const
4731 { return _Impl::_S_to_bits(_M_data)._M_to_bitset(); }
4732
4733 // }}}
4734 // explicit broadcast constructor {{{
4735 _GLIBCXX_SIMD_ALWAYS_INLINE explicit _GLIBCXX_SIMD_CONSTEXPR
4736 simd_mask(value_type __x)
4737 : _M_data(_Impl::template _S_broadcast<_Ip>(__x)) {}
4738
4739 // }}}
4740 // implicit type conversion constructor {{{
4741 #ifdef _GLIBCXX_SIMD_ENABLE_IMPLICIT_MASK_CAST
4742 // proposed improvement
4743 template <typename _Up, typename _A2,
4744 typename = enable_if_t<simd_size_v<_Up, _A2> == size()>>
4745 _GLIBCXX_SIMD_ALWAYS_INLINE explicit(sizeof(_MemberType)
4746 != sizeof(typename _SimdTraits<_Up, _A2>::_MaskMember))
4747 simd_mask(const simd_mask<_Up, _A2>& __x)
4748 : simd_mask(__proposed::static_simd_cast<simd_mask>(__x)) {}
4749 #else
4750 // conforming to ISO/IEC 19570:2018
4751 template <typename _Up, typename = enable_if_t<conjunction<
4752 is_same<abi_type, simd_abi::fixed_size<size()>>,
4753 is_same<_Up, _Up>>::value>>
4754 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR
4755 simd_mask(const simd_mask<_Up, simd_abi::fixed_size<size()>>& __x)
4756 : _M_data(_Impl::_S_from_bitmask(__data(__x), _S_type_tag)) {}
4757 #endif
4758
4759 // }}}
4760 // load constructor {{{
4761 template <typename _Flags>
4762 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR
4763 simd_mask(const value_type* __mem, _IsSimdFlagType<_Flags>)
4764 : _M_data(_Impl::template _S_load<_Ip>(_Flags::template _S_apply<simd_mask>(__mem))) {}
4765
4766 template <typename _Flags>
4767 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR
4768 simd_mask(const value_type* __mem, simd_mask __k, _IsSimdFlagType<_Flags>)
4769 : _M_data{}
4770 {
4771 _M_data = _Impl::_S_masked_load(_M_data, __k._M_data,
4772 _Flags::template _S_apply<simd_mask>(__mem));
4773 }
4774
4775 // }}}
4776 // loads [simd_mask.load] {{{
4777 template <typename _Flags>
4778 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR void
4779 copy_from(const value_type* __mem, _IsSimdFlagType<_Flags>)
4780 { _M_data = _Impl::template _S_load<_Ip>(_Flags::template _S_apply<simd_mask>(__mem)); }
4781
4782 // }}}
4783 // stores [simd_mask.store] {{{
4784 template <typename _Flags>
4785 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR void
4786 copy_to(value_type* __mem, _IsSimdFlagType<_Flags>) const
4787 { _Impl::_S_store(_M_data, _Flags::template _S_apply<simd_mask>(__mem)); }
4788
4789 // }}}
4790 // scalar access {{{
4791 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR reference
4792 operator[](size_t __i)
4793 {
4794 if (__i >= size())
4795 __invoke_ub("Subscript %d is out of range [0, %d]", __i, size() - 1);
4796 return {_M_data, int(__i)};
4797 }
4798
4799 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR value_type
4800 operator[](size_t __i) const
4801 {
4802 if (__i >= size())
4803 __invoke_ub("Subscript %d is out of range [0, %d]", __i, size() - 1);
4804 if constexpr (__is_scalar_abi<_Abi>())
4805 return _M_data;
4806 else
4807 return static_cast<bool>(_M_data[__i]);
4808 }
4809
4810 // }}}
4811 // negation {{{
4812 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR simd_mask
4813 operator!() const
4814 { return {__private_init, _Impl::_S_bit_not(_M_data)}; }
4815
4816 // }}}
4817 // simd_mask binary operators [simd_mask.binary] {{{
4818 #ifdef _GLIBCXX_SIMD_ENABLE_IMPLICIT_MASK_CAST
4819 // simd_mask<int> && simd_mask<uint> needs disambiguation
4820 template <typename _Up, typename _A2,
4821 typename = enable_if_t<is_convertible_v<simd_mask<_Up, _A2>, simd_mask>>>
4822 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
4823 operator&&(const simd_mask& __x, const simd_mask<_Up, _A2>& __y)
4824 {
4825 return {__private_init,
4826 _Impl::_S_logical_and(__x._M_data, simd_mask(__y)._M_data)};
4827 }
4828
4829 template <typename _Up, typename _A2,
4830 typename = enable_if_t<is_convertible_v<simd_mask<_Up, _A2>, simd_mask>>>
4831 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
4832 operator||(const simd_mask& __x, const simd_mask<_Up, _A2>& __y)
4833 {
4834 return {__private_init,
4835 _Impl::_S_logical_or(__x._M_data, simd_mask(__y)._M_data)};
4836 }
4837 #endif // _GLIBCXX_SIMD_ENABLE_IMPLICIT_MASK_CAST
4838
4839 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
4840 operator&&(const simd_mask& __x, const simd_mask& __y)
4841 { return {__private_init, _Impl::_S_logical_and(__x._M_data, __y._M_data)}; }
4842
4843 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
4844 operator||(const simd_mask& __x, const simd_mask& __y)
4845 { return {__private_init, _Impl::_S_logical_or(__x._M_data, __y._M_data)}; }
4846
4847 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
4848 operator&(const simd_mask& __x, const simd_mask& __y)
4849 { return {__private_init, _Impl::_S_bit_and(__x._M_data, __y._M_data)}; }
4850
4851 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
4852 operator|(const simd_mask& __x, const simd_mask& __y)
4853 { return {__private_init, _Impl::_S_bit_or(__x._M_data, __y._M_data)}; }
4854
4855 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
4856 operator^(const simd_mask& __x, const simd_mask& __y)
4857 { return {__private_init, _Impl::_S_bit_xor(__x._M_data, __y._M_data)}; }
4858
4859 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask&
4860 operator&=(simd_mask& __x, const simd_mask& __y)
4861 {
4862 __x._M_data = _Impl::_S_bit_and(__x._M_data, __y._M_data);
4863 return __x;
4864 }
4865
4866 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask&
4867 operator|=(simd_mask& __x, const simd_mask& __y)
4868 {
4869 __x._M_data = _Impl::_S_bit_or(__x._M_data, __y._M_data);
4870 return __x;
4871 }
4872
4873 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask&
4874 operator^=(simd_mask& __x, const simd_mask& __y)
4875 {
4876 __x._M_data = _Impl::_S_bit_xor(__x._M_data, __y._M_data);
4877 return __x;
4878 }
4879
4880 // }}}
4881 // simd_mask compares [simd_mask.comparison] {{{
4882 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
4883 operator==(const simd_mask& __x, const simd_mask& __y)
4884 { return !operator!=(__x, __y); }
4885
4886 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
4887 operator!=(const simd_mask& __x, const simd_mask& __y)
4888 { return {__private_init, _Impl::_S_bit_xor(__x._M_data, __y._M_data)}; }
4889
4890 // }}}
4891 // private_init ctor {{{
4892 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
4893 simd_mask(_PrivateInit, typename _Traits::_MaskMember __init)
4894 : _M_data(__init) {}
4895
4896 // }}}
4897 // private_init generator ctor {{{
4898 template <typename _Fp, typename = decltype(bool(declval<_Fp>()(size_t())))>
4899 _GLIBCXX_SIMD_INTRINSIC constexpr
4900 simd_mask(_PrivateInit, _Fp&& __gen)
4901 : _M_data()
4902 {
4903 __execute_n_times<size()>([&](auto __i) constexpr _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
4904 _Impl::_S_set(_M_data, __i, __gen(__i));
4905 });
4906 }
4907
4908 // }}}
4909 // bitset_init ctor {{{
4910 _GLIBCXX_SIMD_INTRINSIC constexpr
4911 simd_mask(_BitsetInit, bitset<size()> __init)
4912 : _M_data(_Impl::_S_from_bitmask(_SanitizedBitMask<size()>(__init), _S_type_tag))
4913 {}
4914
4915 // }}}
4916 // __cvt {{{
4917 // TS_FEEDBACK:
4918 // The conversion operator this implements should be a ctor on simd_mask.
4919 // Once you call .__cvt() on a simd_mask it converts conveniently.
4920 // A useful variation: add `explicit(sizeof(_Tp) != sizeof(_Up))`
4921 struct _CvtProxy
4922 {
4923 template <typename _Up, typename _A2,
4924 typename = enable_if_t<simd_size_v<_Up, _A2> == simd_size_v<_Tp, _Abi>>>
4925 _GLIBCXX_SIMD_ALWAYS_INLINE
4926 operator simd_mask<_Up, _A2>() &&
4927 {
4928 using namespace std::experimental::__proposed;
4929 return static_simd_cast<simd_mask<_Up, _A2>>(_M_data);
4930 }
4931
4932 const simd_mask<_Tp, _Abi>& _M_data;
4933 };
4934
4935 _GLIBCXX_SIMD_INTRINSIC _CvtProxy
4936 __cvt() const
4937 { return {*this}; }
4938
4939 // }}}
4940 // operator?: overloads (suggested extension) {{{
4941 #ifdef __GXX_CONDITIONAL_IS_OVERLOADABLE__
4942 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
4943 operator?:(const simd_mask& __k, const simd_mask& __where_true,
4944 const simd_mask& __where_false)
4945 {
4946 auto __ret = __where_false;
4947 _Impl::_S_masked_assign(__k._M_data, __ret._M_data, __where_true._M_data);
4948 return __ret;
4949 }
4950
4951 template <typename _U1, typename _U2,
4952 typename _Rp = simd<common_type_t<_U1, _U2>, _Abi>,
4953 typename = enable_if_t<conjunction_v<
4954 is_convertible<_U1, _Rp>, is_convertible<_U2, _Rp>,
4955 is_convertible<simd_mask, typename _Rp::mask_type>>>>
4956 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend _Rp
4957 operator?:(const simd_mask& __k, const _U1& __where_true,
4958 const _U2& __where_false)
4959 {
4960 _Rp __ret = __where_false;
4961 _Rp::_Impl::_S_masked_assign(
4962 __data(static_cast<typename _Rp::mask_type>(__k)), __data(__ret),
4963 __data(static_cast<_Rp>(__where_true)));
4964 return __ret;
4965 }
4966
4967 #ifdef _GLIBCXX_SIMD_ENABLE_IMPLICIT_MASK_CAST
4968 template <typename _Kp, typename _Ak, typename _Up, typename _Au,
4969 typename = enable_if_t<
4970 conjunction_v<is_convertible<simd_mask<_Kp, _Ak>, simd_mask>,
4971 is_convertible<simd_mask<_Up, _Au>, simd_mask>>>>
4972 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
4973 operator?:(const simd_mask<_Kp, _Ak>& __k, const simd_mask& __where_true,
4974 const simd_mask<_Up, _Au>& __where_false)
4975 {
4976 simd_mask __ret = __where_false;
4977 _Impl::_S_masked_assign(simd_mask(__k)._M_data, __ret._M_data,
4978 __where_true._M_data);
4979 return __ret;
4980 }
4981 #endif // _GLIBCXX_SIMD_ENABLE_IMPLICIT_MASK_CAST
4982 #endif // __GXX_CONDITIONAL_IS_OVERLOADABLE__
4983
4984 // }}}
4985 // _M_is_constprop {{{
4986 _GLIBCXX_SIMD_INTRINSIC constexpr bool
4987 _M_is_constprop() const
4988 {
4989 if constexpr (__is_scalar_abi<_Abi>())
4990 return __builtin_constant_p(_M_data);
4991 else
4992 return _M_data._M_is_constprop();
4993 }
4994
4995 // }}}
4996
4997 private:
4998 friend const auto& __data<_Tp, abi_type>(const simd_mask&);
4999 friend auto& __data<_Tp, abi_type>(simd_mask&);
5000 alignas(_Traits::_S_mask_align) _MemberType _M_data;
5001 };
5002
5003// }}}
5004
5005/// @cond undocumented
5006// __data(simd_mask) {{{
5007template <typename _Tp, typename _Ap>
5008 _GLIBCXX_SIMD_INTRINSIC constexpr const auto&
5009 __data(const simd_mask<_Tp, _Ap>& __x)
5010 { return __x._M_data; }
5011
5012template <typename _Tp, typename _Ap>
5013 _GLIBCXX_SIMD_INTRINSIC constexpr auto&
5014 __data(simd_mask<_Tp, _Ap>& __x)
5015 { return __x._M_data; }
5016
5017// }}}
5018/// @endcond
5019
5020// simd_mask reductions [simd_mask.reductions] {{{
5021template <typename _Tp, typename _Abi>
5022 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
5023 all_of(const simd_mask<_Tp, _Abi>& __k) noexcept
5024 {
5025 if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
5026 {
5027 for (size_t __i = 0; __i < simd_size_v<_Tp, _Abi>; ++__i)
5028 if (!__k[__i])
5029 return false;
5030 return true;
5031 }
5032 else
5033 return _Abi::_MaskImpl::_S_all_of(__k);
5034 }
5035
5036template <typename _Tp, typename _Abi>
5037 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
5038 any_of(const simd_mask<_Tp, _Abi>& __k) noexcept
5039 {
5040 if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
5041 {
5042 for (size_t __i = 0; __i < simd_size_v<_Tp, _Abi>; ++__i)
5043 if (__k[__i])
5044 return true;
5045 return false;
5046 }
5047 else
5048 return _Abi::_MaskImpl::_S_any_of(__k);
5049 }
5050
5051template <typename _Tp, typename _Abi>
5052 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
5053 none_of(const simd_mask<_Tp, _Abi>& __k) noexcept
5054 {
5055 if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
5056 {
5057 for (size_t __i = 0; __i < simd_size_v<_Tp, _Abi>; ++__i)
5058 if (__k[__i])
5059 return false;
5060 return true;
5061 }
5062 else
5063 return _Abi::_MaskImpl::_S_none_of(__k);
5064 }
5065
5066template <typename _Tp, typename _Abi>
5067 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
5068 some_of(const simd_mask<_Tp, _Abi>& __k) noexcept
5069 {
5070 if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
5071 {
5072 for (size_t __i = 1; __i < simd_size_v<_Tp, _Abi>; ++__i)
5073 if (__k[__i] != __k[__i - 1])
5074 return true;
5075 return false;
5076 }
5077 else
5078 return _Abi::_MaskImpl::_S_some_of(__k);
5079 }
5080
5081template <typename _Tp, typename _Abi>
5082 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR int
5083 popcount(const simd_mask<_Tp, _Abi>& __k) noexcept
5084 {
5085 if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
5086 {
5087 const int __r = __call_with_subscripts<simd_size_v<_Tp, _Abi>>(
5088 __k, [](auto... __elements) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
5089 return ((__elements != 0) + ...);
5090 });
5091 if (__builtin_is_constant_evaluated() || __builtin_constant_p(__r))
5092 return __r;
5093 }
5094 return _Abi::_MaskImpl::_S_popcount(__k);
5095 }
5096
5097template <typename _Tp, typename _Abi>
5098 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR int
5099 find_first_set(const simd_mask<_Tp, _Abi>& __k)
5100 {
5101 if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
5102 {
5103 constexpr size_t _Np = simd_size_v<_Tp, _Abi>;
5104 const size_t _Idx = __call_with_n_evaluations<_Np>(
5105 [](auto... __indexes) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
5106 return std::min({__indexes...});
5107 }, [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
5108 return __k[__i] ? +__i : _Np;
5109 });
5110 if (_Idx >= _Np)
5111 __invoke_ub("find_first_set(empty mask) is UB");
5112 if (__builtin_constant_p(_Idx))
5113 return _Idx;
5114 }
5115 return _Abi::_MaskImpl::_S_find_first_set(__k);
5116 }
5117
5118template <typename _Tp, typename _Abi>
5119 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR int
5120 find_last_set(const simd_mask<_Tp, _Abi>& __k)
5121 {
5122 if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
5123 {
5124 constexpr size_t _Np = simd_size_v<_Tp, _Abi>;
5125 const int _Idx = __call_with_n_evaluations<_Np>(
5126 [](auto... __indexes) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
5127 return std::max({__indexes...});
5128 }, [&](auto __i) _GLIBCXX_SIMD_ALWAYS_INLINE_LAMBDA {
5129 return __k[__i] ? int(__i) : -1;
5130 });
5131 if (_Idx < 0)
5132 __invoke_ub("find_first_set(empty mask) is UB");
5133 if (__builtin_constant_p(_Idx))
5134 return _Idx;
5135 }
5136 return _Abi::_MaskImpl::_S_find_last_set(__k);
5137 }
5138
5139_GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
5140all_of(_ExactBool __x) noexcept
5141{ return __x; }
5142
5143_GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
5144any_of(_ExactBool __x) noexcept
5145{ return __x; }
5146
5147_GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
5148none_of(_ExactBool __x) noexcept
5149{ return !__x; }
5150
5151_GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
5152some_of(_ExactBool) noexcept
5153{ return false; }
5154
5155_GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR int
5156popcount(_ExactBool __x) noexcept
5157{ return __x; }
5158
5159_GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR int
5160find_first_set(_ExactBool)
5161{ return 0; }
5162
5163_GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR int
5164find_last_set(_ExactBool)
5165{ return 0; }
5166
5167// }}}
5168
5169/// @cond undocumented
5170// _SimdIntOperators{{{1
5171template <typename _V, typename _Tp, typename _Abi, bool>
5172 class _SimdIntOperators {};
5173
5174template <typename _V, typename _Tp, typename _Abi>
5175 class _SimdIntOperators<_V, _Tp, _Abi, true>
5176 {
5177 using _Impl = typename _SimdTraits<_Tp, _Abi>::_SimdImpl;
5178
5179 _GLIBCXX_SIMD_INTRINSIC constexpr const _V&
5180 __derived() const
5181 { return *static_cast<const _V*>(this); }
5182
5183 template <typename _Up>
5184 _GLIBCXX_SIMD_INTRINSIC static _GLIBCXX_SIMD_CONSTEXPR _V
5185 _S_make_derived(_Up&& __d)
5186 { return {__private_init, static_cast<_Up&&>(__d)}; }
5187
5188 public:
5189 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5190 _V&
5191 operator%=(_V& __lhs, const _V& __x)
5192 { return __lhs = __lhs % __x; }
5193
5194 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5195 _V&
5196 operator&=(_V& __lhs, const _V& __x)
5197 { return __lhs = __lhs & __x; }
5198
5199 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5200 _V&
5201 operator|=(_V& __lhs, const _V& __x)
5202 { return __lhs = __lhs | __x; }
5203
5204 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5205 _V&
5206 operator^=(_V& __lhs, const _V& __x)
5207 { return __lhs = __lhs ^ __x; }
5208
5209 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5210 _V&
5211 operator<<=(_V& __lhs, const _V& __x)
5212 { return __lhs = __lhs << __x; }
5213
5214 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5215 _V&
5216 operator>>=(_V& __lhs, const _V& __x)
5217 { return __lhs = __lhs >> __x; }
5218
5219 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5220 _V&
5221 operator<<=(_V& __lhs, int __x)
5222 { return __lhs = __lhs << __x; }
5223
5224 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5225 _V&
5226 operator>>=(_V& __lhs, int __x)
5227 { return __lhs = __lhs >> __x; }
5228
5229 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5230 _V
5231 operator%(const _V& __x, const _V& __y)
5232 {
5233 return _SimdIntOperators::_S_make_derived(
5234 _Impl::_S_modulus(__data(__x), __data(__y)));
5235 }
5236
5237 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5238 _V
5239 operator&(const _V& __x, const _V& __y)
5240 {
5241 return _SimdIntOperators::_S_make_derived(
5242 _Impl::_S_bit_and(__data(__x), __data(__y)));
5243 }
5244
5245 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5246 _V
5247 operator|(const _V& __x, const _V& __y)
5248 {
5249 return _SimdIntOperators::_S_make_derived(
5250 _Impl::_S_bit_or(__data(__x), __data(__y)));
5251 }
5252
5253 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5254 _V
5255 operator^(const _V& __x, const _V& __y)
5256 {
5257 return _SimdIntOperators::_S_make_derived(
5258 _Impl::_S_bit_xor(__data(__x), __data(__y)));
5259 }
5260
5261 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5262 _V
5263 operator<<(const _V& __x, const _V& __y)
5264 {
5265 return _SimdIntOperators::_S_make_derived(
5266 _Impl::_S_bit_shift_left(__data(__x), __data(__y)));
5267 }
5268
5269 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5270 _V
5271 operator>>(const _V& __x, const _V& __y)
5272 {
5273 return _SimdIntOperators::_S_make_derived(
5274 _Impl::_S_bit_shift_right(__data(__x), __data(__y)));
5275 }
5276
5277 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5278 _V
5279 operator<<(const _V& __x, int __y)
5280 {
5281 if (__y < 0)
5282 __invoke_ub("The behavior is undefined if the right operand of a "
5283 "shift operation is negative. [expr.shift]\nA shift by "
5284 "%d was requested",
5285 __y);
5286 if (size_t(__y) >= sizeof(declval<_Tp>() << __y) * __CHAR_BIT__)
5287 __invoke_ub(
5288 "The behavior is undefined if the right operand of a "
5289 "shift operation is greater than or equal to the width of the "
5290 "promoted left operand. [expr.shift]\nA shift by %d was requested",
5291 __y);
5292 return _SimdIntOperators::_S_make_derived(
5293 _Impl::_S_bit_shift_left(__data(__x), __y));
5294 }
5295
5296 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5297 _V
5298 operator>>(const _V& __x, int __y)
5299 {
5300 if (__y < 0)
5301 __invoke_ub(
5302 "The behavior is undefined if the right operand of a shift "
5303 "operation is negative. [expr.shift]\nA shift by %d was requested",
5304 __y);
5305 if (size_t(__y) >= sizeof(declval<_Tp>() << __y) * __CHAR_BIT__)
5306 __invoke_ub(
5307 "The behavior is undefined if the right operand of a shift "
5308 "operation is greater than or equal to the width of the promoted "
5309 "left operand. [expr.shift]\nA shift by %d was requested",
5310 __y);
5311 return _SimdIntOperators::_S_make_derived(
5312 _Impl::_S_bit_shift_right(__data(__x), __y));
5313 }
5314
5315 // unary operators (for integral _Tp)
5316 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR
5317 _V
5318 operator~() const
5319 { return {__private_init, _Impl::_S_complement(__derived()._M_data)}; }
5320 };
5321
5322//}}}1
5323/// @endcond
5324
5325// simd {{{
5326template <typename _Tp, typename _Abi>
5327 class simd : public _SimdIntOperators<
5328 simd<_Tp, _Abi>, _Tp, _Abi,
5329 conjunction<is_integral<_Tp>,
5330 typename _SimdTraits<_Tp, _Abi>::_IsValid>::value>,
5331 public _SimdTraits<_Tp, _Abi>::_SimdBase
5332 {
5333 using _Traits = _SimdTraits<_Tp, _Abi>;
5334 using _MemberType = typename _Traits::_SimdMember;
5335 using _CastType = typename _Traits::_SimdCastType;
5336 static constexpr _Tp* _S_type_tag = nullptr;
5337 friend typename _Traits::_SimdBase;
5338
5339 public:
5340 using _Impl = typename _Traits::_SimdImpl;
5341 friend _Impl;
5342 friend _SimdIntOperators<simd, _Tp, _Abi, true>;
5343
5344 using value_type = _Tp;
5345 using reference = _SmartReference<_MemberType, _Impl, value_type>;
5346 using mask_type = simd_mask<_Tp, _Abi>;
5347 using abi_type = _Abi;
5348
5349 static constexpr size_t size()
5350 { return __size_or_zero_v<_Tp, _Abi>; }
5351
5352 _GLIBCXX_SIMD_CONSTEXPR simd() = default;
5353 _GLIBCXX_SIMD_CONSTEXPR simd(const simd&) = default;
5354 _GLIBCXX_SIMD_CONSTEXPR simd(simd&&) noexcept = default;
5355 _GLIBCXX_SIMD_CONSTEXPR simd& operator=(const simd&) = default;
5356 _GLIBCXX_SIMD_CONSTEXPR simd& operator=(simd&&) noexcept = default;
5357
5358 // implicit broadcast constructor
5359 template <typename _Up,
5360 typename = enable_if_t<!is_same_v<__remove_cvref_t<_Up>, bool>>>
5361 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR
5362 simd(_ValuePreservingOrInt<_Up, value_type>&& __x)
5363 : _M_data(
5364 _Impl::_S_broadcast(static_cast<value_type>(static_cast<_Up&&>(__x))))
5365 {}
5366
5367 // implicit type conversion constructor (convert from fixed_size to
5368 // fixed_size)
5369 template <typename _Up>
5370 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR
5371 simd(const simd<_Up, simd_abi::fixed_size<size()>>& __x,
5373 conjunction<
5374 is_same<simd_abi::fixed_size<size()>, abi_type>,
5375 negation<__is_narrowing_conversion<_Up, value_type>>,
5376 __converts_to_higher_integer_rank<_Up, value_type>>::value,
5377 void*> = nullptr)
5378 : simd{static_cast<array<_Up, size()>>(__x).data(), vector_aligned} {}
5379
5380 // explicit type conversion constructor
5381#ifdef _GLIBCXX_SIMD_ENABLE_STATIC_CAST
5382 template <typename _Up, typename _A2,
5383 typename = decltype(static_simd_cast<simd>(
5384 declval<const simd<_Up, _A2>&>()))>
5385 _GLIBCXX_SIMD_ALWAYS_INLINE explicit _GLIBCXX_SIMD_CONSTEXPR
5386 simd(const simd<_Up, _A2>& __x)
5387 : simd(static_simd_cast<simd>(__x)) {}
5388#endif // _GLIBCXX_SIMD_ENABLE_STATIC_CAST
5389
5390 // generator constructor
5391 template <typename _Fp>
5392 _GLIBCXX_SIMD_ALWAYS_INLINE explicit _GLIBCXX_SIMD_CONSTEXPR
5393 simd(_Fp&& __gen, _ValuePreservingOrInt<decltype(declval<_Fp>()(
5394 declval<_SizeConstant<0>&>())),
5395 value_type>* = nullptr)
5396 : _M_data(_Impl::_S_generator(static_cast<_Fp&&>(__gen), _S_type_tag)) {}
5397
5398 // load constructor
5399 template <typename _Up, typename _Flags>
5400 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR
5401 simd(const _Up* __mem, _IsSimdFlagType<_Flags>)
5402 : _M_data(
5403 _Impl::_S_load(_Flags::template _S_apply<simd>(__mem), _S_type_tag))
5404 {}
5405
5406 // loads [simd.load]
5407 template <typename _Up, typename _Flags>
5408 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR void
5409 copy_from(const _Vectorizable<_Up>* __mem, _IsSimdFlagType<_Flags>)
5410 {
5411 _M_data = static_cast<decltype(_M_data)>(
5412 _Impl::_S_load(_Flags::template _S_apply<simd>(__mem), _S_type_tag));
5413 }
5414
5415 // stores [simd.store]
5416 template <typename _Up, typename _Flags>
5417 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR void
5418 copy_to(_Vectorizable<_Up>* __mem, _IsSimdFlagType<_Flags>) const
5419 {
5420 _Impl::_S_store(_M_data, _Flags::template _S_apply<simd>(__mem),
5421 _S_type_tag);
5422 }
5423
5424 // scalar access
5425 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR reference
5426 operator[](size_t __i)
5427 { return {_M_data, int(__i)}; }
5428
5429 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR value_type
5430 operator[]([[maybe_unused]] size_t __i) const
5431 {
5432 if constexpr (__is_scalar_abi<_Abi>())
5433 {
5434 _GLIBCXX_DEBUG_ASSERT(__i == 0);
5435 return _M_data;
5436 }
5437 else
5438 return _M_data[__i];
5439 }
5440
5441 // increment and decrement:
5442 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR simd&
5443 operator++()
5444 {
5445 _Impl::_S_increment(_M_data);
5446 return *this;
5447 }
5448
5449 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR simd
5450 operator++(int)
5451 {
5452 simd __r = *this;
5453 _Impl::_S_increment(_M_data);
5454 return __r;
5455 }
5456
5457 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR simd&
5458 operator--()
5459 {
5460 _Impl::_S_decrement(_M_data);
5461 return *this;
5462 }
5463
5464 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR simd
5465 operator--(int)
5466 {
5467 simd __r = *this;
5468 _Impl::_S_decrement(_M_data);
5469 return __r;
5470 }
5471
5472 // unary operators (for any _Tp)
5473 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR mask_type
5474 operator!() const
5475 { return {__private_init, _Impl::_S_negate(_M_data)}; }
5476
5477 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR simd
5478 operator+() const
5479 { return *this; }
5480
5481 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR simd
5482 operator-() const
5483 { return {__private_init, _Impl::_S_unary_minus(_M_data)}; }
5484
5485 // access to internal representation (suggested extension)
5486 _GLIBCXX_SIMD_ALWAYS_INLINE explicit _GLIBCXX_SIMD_CONSTEXPR
5487 simd(_CastType __init) : _M_data(__init) {}
5488
5489 // compound assignment [simd.cassign]
5490 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd&
5491 operator+=(simd& __lhs, const simd& __x)
5492 { return __lhs = __lhs + __x; }
5493
5494 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd&
5495 operator-=(simd& __lhs, const simd& __x)
5496 { return __lhs = __lhs - __x; }
5497
5498 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd&
5499 operator*=(simd& __lhs, const simd& __x)
5500 { return __lhs = __lhs * __x; }
5501
5502 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd&
5503 operator/=(simd& __lhs, const simd& __x)
5504 { return __lhs = __lhs / __x; }
5505
5506 // binary operators [simd.binary]
5507 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd
5508 operator+(const simd& __x, const simd& __y)
5509 { return {__private_init, _Impl::_S_plus(__x._M_data, __y._M_data)}; }
5510
5511 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd
5512 operator-(const simd& __x, const simd& __y)
5513 { return {__private_init, _Impl::_S_minus(__x._M_data, __y._M_data)}; }
5514
5515 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd
5516 operator*(const simd& __x, const simd& __y)
5517 { return {__private_init, _Impl::_S_multiplies(__x._M_data, __y._M_data)}; }
5518
5519 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd
5520 operator/(const simd& __x, const simd& __y)
5521 { return {__private_init, _Impl::_S_divides(__x._M_data, __y._M_data)}; }
5522
5523 // compares [simd.comparison]
5524 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend mask_type
5525 operator==(const simd& __x, const simd& __y)
5526 { return simd::_S_make_mask(_Impl::_S_equal_to(__x._M_data, __y._M_data)); }
5527
5528 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend mask_type
5529 operator!=(const simd& __x, const simd& __y)
5530 {
5531 return simd::_S_make_mask(
5532 _Impl::_S_not_equal_to(__x._M_data, __y._M_data));
5533 }
5534
5535 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend mask_type
5536 operator<(const simd& __x, const simd& __y)
5537 { return simd::_S_make_mask(_Impl::_S_less(__x._M_data, __y._M_data)); }
5538
5539 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend mask_type
5540 operator<=(const simd& __x, const simd& __y)
5541 {
5542 return simd::_S_make_mask(_Impl::_S_less_equal(__x._M_data, __y._M_data));
5543 }
5544
5545 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend mask_type
5546 operator>(const simd& __x, const simd& __y)
5547 { return simd::_S_make_mask(_Impl::_S_less(__y._M_data, __x._M_data)); }
5548
5549 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend mask_type
5550 operator>=(const simd& __x, const simd& __y)
5551 {
5552 return simd::_S_make_mask(_Impl::_S_less_equal(__y._M_data, __x._M_data));
5553 }
5554
5555 // operator?: overloads (suggested extension) {{{
5556#ifdef __GXX_CONDITIONAL_IS_OVERLOADABLE__
5557 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd
5558 operator?:(const mask_type& __k, const simd& __where_true,
5559 const simd& __where_false)
5560 {
5561 auto __ret = __where_false;
5562 _Impl::_S_masked_assign(__data(__k), __data(__ret), __data(__where_true));
5563 return __ret;
5564 }
5565
5566#endif // __GXX_CONDITIONAL_IS_OVERLOADABLE__
5567 // }}}
5568
5569 // "private" because of the first arguments's namespace
5570 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
5571 simd(_PrivateInit, const _MemberType& __init)
5572 : _M_data(__init) {}
5573
5574 // "private" because of the first arguments's namespace
5575 _GLIBCXX_SIMD_INTRINSIC
5576 simd(_BitsetInit, bitset<size()> __init) : _M_data()
5577 { where(mask_type(__bitset_init, __init), *this) = ~*this; }
5578
5579 _GLIBCXX_SIMD_INTRINSIC constexpr bool
5580 _M_is_constprop() const
5581 {
5582 if constexpr (__is_scalar_abi<_Abi>())
5583 return __builtin_constant_p(_M_data);
5584 else
5585 return _M_data._M_is_constprop();
5586 }
5587
5588 private:
5589 _GLIBCXX_SIMD_INTRINSIC static constexpr mask_type
5590 _S_make_mask(typename mask_type::_MemberType __k)
5591 { return {__private_init, __k}; }
5592
5593 friend const auto& __data<value_type, abi_type>(const simd&);
5594 friend auto& __data<value_type, abi_type>(simd&);
5595 alignas(_Traits::_S_simd_align) _MemberType _M_data;
5596 };
5597
5598// }}}
5599/// @cond undocumented
5600// __data {{{
5601template <typename _Tp, typename _Ap>
5602 _GLIBCXX_SIMD_INTRINSIC constexpr const auto&
5603 __data(const simd<_Tp, _Ap>& __x)
5604 { return __x._M_data; }
5605
5606template <typename _Tp, typename _Ap>
5607 _GLIBCXX_SIMD_INTRINSIC constexpr auto&
5608 __data(simd<_Tp, _Ap>& __x)
5609 { return __x._M_data; }
5610
5611// }}}
5612namespace __float_bitwise_operators { //{{{
5613template <typename _Tp, typename _Ap>
5614 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
5615 operator^(const simd<_Tp, _Ap>& __a, const simd<_Tp, _Ap>& __b)
5616 { return {__private_init, _Ap::_SimdImpl::_S_bit_xor(__data(__a), __data(__b))}; }
5617
5618template <typename _Tp, typename _Ap>
5619 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
5620 operator|(const simd<_Tp, _Ap>& __a, const simd<_Tp, _Ap>& __b)
5621 { return {__private_init, _Ap::_SimdImpl::_S_bit_or(__data(__a), __data(__b))}; }
5622
5623template <typename _Tp, typename _Ap>
5624 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
5625 operator&(const simd<_Tp, _Ap>& __a, const simd<_Tp, _Ap>& __b)
5626 { return {__private_init, _Ap::_SimdImpl::_S_bit_and(__data(__a), __data(__b))}; }
5627
5628template <typename _Tp, typename _Ap>
5629 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
5630 enable_if_t<is_floating_point_v<_Tp>, simd<_Tp, _Ap>>
5631 operator~(const simd<_Tp, _Ap>& __a)
5632 { return {__private_init, _Ap::_SimdImpl::_S_complement(__data(__a))}; }
5633} // namespace __float_bitwise_operators }}}
5634/// @endcond
5635
5636/// @}
5637_GLIBCXX_SIMD_END_NAMESPACE
5638
5639#endif // __cplusplus >= 201703L
5640#endif // _GLIBCXX_EXPERIMENTAL_SIMD_H
5641
5642// vim: foldmethod=marker foldmarker={{{,}}}
constexpr bool operator<=(const duration< _Rep1, _Period1 > &__lhs, const duration< _Rep2, _Period2 > &__rhs)
Definition: chrono.h:855
constexpr bool operator>=(const duration< _Rep1, _Period1 > &__lhs, const duration< _Rep2, _Period2 > &__rhs)
Definition: chrono.h:869
constexpr duration< __common_rep_t< _Rep1, __disable_if_is_duration< _Rep2 > >, _Period > operator%(const duration< _Rep1, _Period > &__d, const _Rep2 &__s)
Definition: chrono.h:779
constexpr bool operator<(const duration< _Rep1, _Period1 > &__lhs, const duration< _Rep2, _Period2 > &__rhs)
Definition: chrono.h:822
constexpr bool operator>(const duration< _Rep1, _Period1 > &__lhs, const duration< _Rep2, _Period2 > &__rhs)
Definition: chrono.h:862
constexpr complex< _Tp > operator*(const complex< _Tp > &__x, const complex< _Tp > &__y)
Return new complex value x times y.
Definition: complex:400
constexpr complex< _Tp > operator-(const complex< _Tp > &__x, const complex< _Tp > &__y)
Return new complex value x minus y.
Definition: complex:370
constexpr complex< _Tp > operator+(const complex< _Tp > &__x, const complex< _Tp > &__y)
Return new complex value x plus y.
Definition: complex:340
constexpr complex< _Tp > operator/(const complex< _Tp > &__x, const complex< _Tp > &__y)
Return new complex value x divided by y.
Definition: complex:430
typename remove_reference< _Tp >::type remove_reference_t
Alias template for remove_reference.
Definition: type_traits:1718
typename make_unsigned< _Tp >::type make_unsigned_t
Alias template for make_unsigned.
Definition: type_traits:2061
__bool_constant< true > true_type
The type used as a compile-time boolean with true value.
Definition: type_traits:111
typename conditional< _Cond, _Iftrue, _Iffalse >::type conditional_t
Alias template for conditional.
Definition: type_traits:2700
__bool_constant< false > false_type
The type used as a compile-time boolean with false value.
Definition: type_traits:114
typename enable_if< _Cond, _Tp >::type enable_if_t
Alias template for enable_if.
Definition: type_traits:2696
constexpr auto tuple_cat(_Tpls &&... __tpls) -> typename __tuple_cat_result< _Tpls... >::__type
Create a tuple containing all elements from multiple tuple-like objects.
Definition: tuple:2775
auto declval() noexcept -> decltype(__declval< _Tp >(0))
Definition: type_traits:2470
constexpr tuple< typename __decay_and_strip< _Elements >::__type... > make_tuple(_Elements &&... __args)
Create a tuple containing copies of the arguments.
Definition: tuple:2638
constexpr std::remove_reference< _Tp >::type && move(_Tp &&__t) noexcept
Convert a value to an rvalue.
Definition: move.h:126
_Tp * end(valarray< _Tp > &__va) noexcept
Return an iterator pointing to one past the last element of the valarray.
Definition: valarray:1249
_Tp * begin(valarray< _Tp > &__va) noexcept
Return an iterator pointing to the first element of the valarray.
Definition: valarray:1227
constexpr const _Tp & max(const _Tp &, const _Tp &)
This does what you think it does.
Definition: stl_algobase.h:257
constexpr const _Tp & min(const _Tp &, const _Tp &)
This does what you think it does.
Definition: stl_algobase.h:233
constexpr _Tp reduce(_InputIterator __first, _InputIterator __last, _Tp __init, _BinaryOperation __binary_op)
Calculate reduction of values in a range.
Definition: numeric:287
void void_t
A metafunction that always yields void, used for detecting valid types.
ISO C++ entities toplevel namespace is std.
constexpr auto size(const _Container &__cont) noexcept(noexcept(__cont.size())) -> decltype(__cont.size())
Return the size of a container.
Definition: range_access.h:262
constexpr bitset< _Nb > operator^(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition: bitset:1577
std::basic_istream< _CharT, _Traits > & operator>>(std::basic_istream< _CharT, _Traits > &__is, bitset< _Nb > &__x)
Global I/O operators for bitsets.
Definition: bitset:1597
std::basic_ostream< _CharT, _Traits > & operator<<(std::basic_ostream< _CharT, _Traits > &__os, const bitset< _Nb > &__x)
Global I/O operators for bitsets.
Definition: bitset:1687
constexpr auto data(_Container &__cont) noexcept(noexcept(__cont.data())) -> decltype(__cont.data())
Return the data pointer of a container.
Definition: range_access.h:312
constexpr bitset< _Nb > operator|(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition: bitset:1567
constexpr bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition: bitset:1557