25 #ifndef _GLIBCXX_EXPERIMENTAL_SIMD_ABIS_H_
26 #define _GLIBCXX_EXPERIMENTAL_SIMD_ABIS_H_
28 #if __cplusplus >= 201703L
34 _GLIBCXX_SIMD_BEGIN_NAMESPACE
36 template <
typename _V>
37 static inline _GLIBCXX_SIMD_USE_CONSTEXPR _V _S_allbits
38 =
reinterpret_cast<_V
>(~__vector_type_t<char,
sizeof(_V) /
sizeof(
char)>());
42 template <
typename _V,
typename = _VectorTraits<_V>>
43 static inline _GLIBCXX_SIMD_USE_CONSTEXPR _V _S_signmask
44 = __xor(_V() + 1, _V() - 1);
46 template <
typename _V,
typename = _VectorTraits<_V>>
47 static inline _GLIBCXX_SIMD_USE_CONSTEXPR _V _S_absmask
48 = __andnot(_S_signmask<_V>, _S_allbits<_V>);
53 template <
int... _Indices,
typename _Tp,
typename _TVT = _VectorTraits<_Tp>>
55 __vector_permute(_Tp __x)
57 static_assert(
sizeof...(_Indices) == _TVT::_S_full_size);
58 return __make_vector<typename _TVT::value_type>(
59 (_Indices == -1 ? 0 : __x[_Indices == -1 ? 0 : _Indices])...);
65 template <
int... _Indices,
typename _Tp,
typename _TVT = _VectorTraits<_Tp>>
67 __vector_shuffle(_Tp __x, _Tp __y)
69 return _Tp{(_Indices == -1 ? 0
70 : _Indices < _TVT::_S_full_size
72 : __y[_Indices - _TVT::_S_full_size])...};
77 template <
typename _Tp,
typename... _Args>
78 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper<_Tp,
sizeof...(_Args)>
79 __make_wrapper(
const _Args&... __args)
80 {
return __make_vector<_Tp>(__args...); }
84 template <
typename _Tp,
size_t _ToN = 0,
typename _Up,
size_t _M,
85 size_t _Np = _ToN != 0 ? _ToN :
sizeof(_Up) * _M /
sizeof(_Tp)>
86 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper<_Tp, _Np>
87 __wrapper_bitcast(_SimdWrapper<_Up, _M> __x)
89 static_assert(_Np > 1);
90 return __intrin_bitcast<__vector_type_t<_Tp, _Np>>(__x._M_data);
96 template <
unsigned __shift,
typename _Tp,
typename _TVT = _VectorTraits<_Tp>>
97 _GLIBCXX_SIMD_INTRINSIC _Tp
98 __shift_elements_right(_Tp __v)
100 [[maybe_unused]]
const auto __iv = __to_intrin(__v);
101 static_assert(__shift <=
sizeof(_Tp));
102 if constexpr (__shift == 0)
104 else if constexpr (__shift == sizeof(_Tp))
106 #if _GLIBCXX_SIMD_X86INTRIN
107 else if constexpr (__have_sse && __shift == 8
108 && _TVT::template _S_is<float, 4>)
109 return _mm_movehl_ps(__iv, __iv);
110 else if constexpr (__have_sse2 && __shift == 8
111 && _TVT::template _S_is<double, 2>)
112 return _mm_unpackhi_pd(__iv, __iv);
113 else if constexpr (__have_sse2 &&
sizeof(_Tp) == 16)
114 return reinterpret_cast<typename _TVT::type>(
115 _mm_srli_si128(reinterpret_cast<__m128i>(__iv), __shift));
116 else if constexpr (__shift == 16 && sizeof(_Tp) == 32)
126 return __zero_extend(__hi128(__v));
128 else if constexpr (__have_avx2 &&
sizeof(_Tp) == 32 && __shift < 16)
130 const auto __vll = __vector_bitcast<_LLong>(__v);
131 return reinterpret_cast<typename _TVT::type
>(
132 _mm256_alignr_epi8(_mm256_permute2x128_si256(__vll, __vll, 0x81),
135 else if constexpr (__have_avx &&
sizeof(_Tp) == 32 && __shift < 16)
137 const auto __vll = __vector_bitcast<_LLong>(__v);
138 return reinterpret_cast<typename _TVT::type
>(
139 __concat(_mm_alignr_epi8(__hi128(__vll), __lo128(__vll), __shift),
140 _mm_srli_si128(__hi128(__vll), __shift)));
142 else if constexpr (
sizeof(_Tp) == 32 && __shift > 16)
143 return __zero_extend(__shift_elements_right<__shift - 16>(__hi128(__v)));
144 else if constexpr (sizeof(_Tp) == 64 && __shift == 32)
145 return __zero_extend(__hi256(__v));
146 else if constexpr (__have_avx512f && sizeof(_Tp) == 64)
148 if constexpr (__shift >= 48)
149 return __zero_extend(
150 __shift_elements_right<__shift - 48>(__extract<3, 4>(__v)));
151 else if constexpr (__shift >= 32)
152 return __zero_extend(
153 __shift_elements_right<__shift - 32>(__hi256(__v)));
154 else if constexpr (__shift % 8 == 0)
155 return reinterpret_cast<typename _TVT::type>(
156 _mm512_alignr_epi64(__m512i(), __intrin_bitcast<__m512i>(__v),
158 else if constexpr (__shift % 4 == 0)
159 return reinterpret_cast<typename _TVT::type>(
160 _mm512_alignr_epi32(__m512i(), __intrin_bitcast<__m512i>(__v),
162 else if constexpr (__have_avx512bw && __shift < 16)
164 const auto __vll = __vector_bitcast<_LLong>(__v);
165 return reinterpret_cast<typename _TVT::type
>(
166 _mm512_alignr_epi8(_mm512_shuffle_i32x4(__vll, __vll, 0xf9),
169 else if constexpr (__have_avx512bw && __shift < 32)
171 const auto __vll = __vector_bitcast<_LLong>(__v);
172 return reinterpret_cast<typename _TVT::type
>(
173 _mm512_alignr_epi8(_mm512_shuffle_i32x4(__vll, __m512i(), 0xee),
174 _mm512_shuffle_i32x4(__vll, __vll, 0xf9),
178 __assert_unreachable<_Tp>();
187 constexpr
int __chunksize = __shift % 8 == 0 ? 8
188 : __shift % 4 == 0 ? 4
189 : __shift % 2 == 0 ? 2
191 auto __w = __vector_bitcast<__int_with_sizeof_t<__chunksize>>(__v);
192 using _Up = decltype(__w);
193 return __intrin_bitcast<_Tp>(
194 __call_with_n_evaluations<(
sizeof(_Tp) - __shift) / __chunksize>(
195 [](
auto... __chunks) {
return _Up{__chunks...}; },
196 [&](
auto __i) {
return __w[__shift / __chunksize + __i]; }));
202 template <
int _Index,
int _Total,
int _Combine,
typename _Tp,
size_t _Np>
203 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_CONST
204 _SimdWrapper<_Tp, _Np / _Total * _Combine>
205 __extract_part(
const _SimdWrapper<_Tp, _Np> __x)
207 if constexpr (_Index % 2 == 0 && _Total % 2 == 0 && _Combine % 2 == 0)
208 return __extract_part<_Index / 2, _Total / 2, _Combine / 2>(__x);
211 constexpr
size_t __values_per_part = _Np / _Total;
212 constexpr
size_t __values_to_skip = _Index * __values_per_part;
213 constexpr
size_t __return_size = __values_per_part * _Combine;
214 using _R = __vector_type_t<_Tp, __return_size>;
215 static_assert((_Index + _Combine) * __values_per_part *
sizeof(_Tp)
217 "out of bounds __extract_part");
224 if (__x._M_is_constprop())
225 return __generate_from_n_evaluations<__return_size, _R>(
226 [&](
auto __i) {
return __x[__values_to_skip + __i]; });
227 if constexpr (_Index == 0 && _Total == 1)
229 else if constexpr (_Index == 0)
230 return __intrin_bitcast<_R>(__as_vector(__x));
231 #if _GLIBCXX_SIMD_X86INTRIN
232 else if constexpr (
sizeof(__x) == 32
233 && __return_size *
sizeof(_Tp) <= 16)
235 constexpr
size_t __bytes_to_skip = __values_to_skip *
sizeof(_Tp);
236 if constexpr (__bytes_to_skip == 16)
237 return __vector_bitcast<_Tp, __return_size>(
238 __hi128(__as_vector(__x)));
240 return __vector_bitcast<_Tp, __return_size>(
241 _mm_alignr_epi8(__hi128(__vector_bitcast<_LLong>(__x)),
242 __lo128(__vector_bitcast<_LLong>(__x)),
246 else if constexpr (_Index > 0
247 && (__values_to_skip % __return_size != 0
249 && (__values_to_skip + __return_size) *
sizeof(_Tp)
251 &&
sizeof(__x) >= 16)
252 return __intrin_bitcast<_R>(
253 __shift_elements_right<__values_to_skip * sizeof(_Tp)>(
258 __builtin_memcpy(&__r,
259 reinterpret_cast<const char*
>(&__x)
260 +
sizeof(_Tp) * __values_to_skip,
261 __return_size *
sizeof(_Tp));
269 template <
int _Index,
int _Total,
int _Combine = 1,
size_t _Np>
270 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper<bool, _Np / _Total * _Combine>
271 __extract_part(
const _SimdWrapper<bool, _Np> __x)
273 static_assert(_Combine == 1,
"_Combine != 1 not implemented");
274 static_assert(__have_avx512f && _Np == _Np);
275 static_assert(_Total >= 2 && _Index + _Combine <= _Total && _Index >= 0);
276 return __x._M_data >> (_Index * _Np / _Total);
283 template <
typename _To,
typename _From,
size_t... _I>
284 _GLIBCXX_SIMD_INTRINSIC constexpr _To
285 __vector_convert(_From __a, index_sequence<_I...>)
287 using _Tp =
typename _VectorTraits<_To>::value_type;
288 return _To{
static_cast<_Tp
>(__a[_I])...};
291 template <
typename _To,
typename _From,
size_t... _I>
292 _GLIBCXX_SIMD_INTRINSIC constexpr _To
293 __vector_convert(_From __a, _From __b, index_sequence<_I...>)
295 using _Tp =
typename _VectorTraits<_To>::value_type;
296 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...};
299 template <
typename _To,
typename _From,
size_t... _I>
300 _GLIBCXX_SIMD_INTRINSIC constexpr _To
301 __vector_convert(_From __a, _From __b, _From __c, index_sequence<_I...>)
303 using _Tp =
typename _VectorTraits<_To>::value_type;
304 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
305 static_cast<_Tp
>(__c[_I])...};
308 template <
typename _To,
typename _From,
size_t... _I>
309 _GLIBCXX_SIMD_INTRINSIC constexpr _To
310 __vector_convert(_From __a, _From __b, _From __c, _From __d,
311 index_sequence<_I...>)
313 using _Tp =
typename _VectorTraits<_To>::value_type;
314 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
315 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...};
318 template <
typename _To,
typename _From,
size_t... _I>
319 _GLIBCXX_SIMD_INTRINSIC constexpr _To
320 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
321 index_sequence<_I...>)
323 using _Tp =
typename _VectorTraits<_To>::value_type;
324 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
325 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
326 static_cast<_Tp
>(__e[_I])...};
329 template <
typename _To,
typename _From,
size_t... _I>
330 _GLIBCXX_SIMD_INTRINSIC constexpr _To
331 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
332 _From __f, index_sequence<_I...>)
334 using _Tp =
typename _VectorTraits<_To>::value_type;
335 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
336 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
337 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...};
340 template <
typename _To,
typename _From,
size_t... _I>
341 _GLIBCXX_SIMD_INTRINSIC constexpr _To
342 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
343 _From __f, _From __g, index_sequence<_I...>)
345 using _Tp =
typename _VectorTraits<_To>::value_type;
346 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
347 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
348 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
349 static_cast<_Tp
>(__g[_I])...};
352 template <
typename _To,
typename _From,
size_t... _I>
353 _GLIBCXX_SIMD_INTRINSIC constexpr _To
354 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
355 _From __f, _From __g, _From __h, index_sequence<_I...>)
357 using _Tp =
typename _VectorTraits<_To>::value_type;
358 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
359 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
360 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
361 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...};
364 template <
typename _To,
typename _From,
size_t... _I>
365 _GLIBCXX_SIMD_INTRINSIC constexpr _To
366 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
367 _From __f, _From __g, _From __h, _From __i,
368 index_sequence<_I...>)
370 using _Tp =
typename _VectorTraits<_To>::value_type;
371 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
372 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
373 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
374 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
375 static_cast<_Tp
>(__i[_I])...};
378 template <
typename _To,
typename _From,
size_t... _I>
379 _GLIBCXX_SIMD_INTRINSIC constexpr _To
380 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
381 _From __f, _From __g, _From __h, _From __i, _From __j,
382 index_sequence<_I...>)
384 using _Tp =
typename _VectorTraits<_To>::value_type;
385 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
386 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
387 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
388 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
389 static_cast<_Tp
>(__i[_I])...,
static_cast<_Tp
>(__j[_I])...};
392 template <
typename _To,
typename _From,
size_t... _I>
393 _GLIBCXX_SIMD_INTRINSIC constexpr _To
394 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
395 _From __f, _From __g, _From __h, _From __i, _From __j,
396 _From __k, index_sequence<_I...>)
398 using _Tp =
typename _VectorTraits<_To>::value_type;
399 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
400 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
401 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
402 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
403 static_cast<_Tp
>(__i[_I])...,
static_cast<_Tp
>(__j[_I])...,
404 static_cast<_Tp
>(__k[_I])...};
407 template <
typename _To,
typename _From,
size_t... _I>
408 _GLIBCXX_SIMD_INTRINSIC constexpr _To
409 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
410 _From __f, _From __g, _From __h, _From __i, _From __j,
411 _From __k, _From __l, index_sequence<_I...>)
413 using _Tp =
typename _VectorTraits<_To>::value_type;
414 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
415 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
416 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
417 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
418 static_cast<_Tp
>(__i[_I])...,
static_cast<_Tp
>(__j[_I])...,
419 static_cast<_Tp
>(__k[_I])...,
static_cast<_Tp
>(__l[_I])...};
422 template <
typename _To,
typename _From,
size_t... _I>
423 _GLIBCXX_SIMD_INTRINSIC constexpr _To
424 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
425 _From __f, _From __g, _From __h, _From __i, _From __j,
426 _From __k, _From __l, _From __m, index_sequence<_I...>)
428 using _Tp =
typename _VectorTraits<_To>::value_type;
429 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
430 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
431 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
432 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
433 static_cast<_Tp
>(__i[_I])...,
static_cast<_Tp
>(__j[_I])...,
434 static_cast<_Tp
>(__k[_I])...,
static_cast<_Tp
>(__l[_I])...,
435 static_cast<_Tp
>(__m[_I])...};
438 template <
typename _To,
typename _From,
size_t... _I>
439 _GLIBCXX_SIMD_INTRINSIC constexpr _To
440 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
441 _From __f, _From __g, _From __h, _From __i, _From __j,
442 _From __k, _From __l, _From __m, _From __n,
443 index_sequence<_I...>)
445 using _Tp =
typename _VectorTraits<_To>::value_type;
446 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
447 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
448 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
449 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
450 static_cast<_Tp
>(__i[_I])...,
static_cast<_Tp
>(__j[_I])...,
451 static_cast<_Tp
>(__k[_I])...,
static_cast<_Tp
>(__l[_I])...,
452 static_cast<_Tp
>(__m[_I])...,
static_cast<_Tp
>(__n[_I])...};
455 template <
typename _To,
typename _From,
size_t... _I>
456 _GLIBCXX_SIMD_INTRINSIC constexpr _To
457 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
458 _From __f, _From __g, _From __h, _From __i, _From __j,
459 _From __k, _From __l, _From __m, _From __n, _From __o,
460 index_sequence<_I...>)
462 using _Tp =
typename _VectorTraits<_To>::value_type;
463 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
464 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
465 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
466 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
467 static_cast<_Tp
>(__i[_I])...,
static_cast<_Tp
>(__j[_I])...,
468 static_cast<_Tp
>(__k[_I])...,
static_cast<_Tp
>(__l[_I])...,
469 static_cast<_Tp
>(__m[_I])...,
static_cast<_Tp
>(__n[_I])...,
470 static_cast<_Tp
>(__o[_I])...};
473 template <
typename _To,
typename _From,
size_t... _I>
474 _GLIBCXX_SIMD_INTRINSIC constexpr _To
475 __vector_convert(_From __a, _From __b, _From __c, _From __d, _From __e,
476 _From __f, _From __g, _From __h, _From __i, _From __j,
477 _From __k, _From __l, _From __m, _From __n, _From __o,
478 _From __p, index_sequence<_I...>)
480 using _Tp =
typename _VectorTraits<_To>::value_type;
481 return _To{
static_cast<_Tp
>(__a[_I])...,
static_cast<_Tp
>(__b[_I])...,
482 static_cast<_Tp
>(__c[_I])...,
static_cast<_Tp
>(__d[_I])...,
483 static_cast<_Tp
>(__e[_I])...,
static_cast<_Tp
>(__f[_I])...,
484 static_cast<_Tp
>(__g[_I])...,
static_cast<_Tp
>(__h[_I])...,
485 static_cast<_Tp
>(__i[_I])...,
static_cast<_Tp
>(__j[_I])...,
486 static_cast<_Tp
>(__k[_I])...,
static_cast<_Tp
>(__l[_I])...,
487 static_cast<_Tp
>(__m[_I])...,
static_cast<_Tp
>(__n[_I])...,
488 static_cast<_Tp
>(__o[_I])...,
static_cast<_Tp
>(__p[_I])...};
494 template <
typename _To,
typename... _From,
size_t _FromSize>
495 _GLIBCXX_SIMD_INTRINSIC constexpr _To
496 __vector_convert(_SimdWrapper<_From, _FromSize>... __xs)
498 #ifdef _GLIBCXX_SIMD_WORKAROUND_PR85048
499 using _From0 = __first_of_pack_t<_From...>;
500 using _FW = _SimdWrapper<_From0, _FromSize>;
501 if (!_FW::_S_is_partial && !(... && __xs._M_is_constprop()))
503 if constexpr ((
sizeof...(_From) & (
sizeof...(_From) - 1))
505 return __convert_x86<_To>(__as_vector(__xs)...);
507 return __vector_convert<_To>(__xs..., _FW{});
511 return __vector_convert<_To>(
512 __as_vector(__xs)...,
514 _VectorTraits<_To>::_S_full_size,
int(_FromSize))
520 template <
typename _To,
typename _From,
typename... _More>
521 _GLIBCXX_SIMD_INTRINSIC constexpr
auto
522 __convert(_From __v0, _More... __vs)
524 static_assert((
true && ... && is_same_v<_From, _More>) );
525 if constexpr (__is_vectorizable_v<_From>)
527 using _V =
typename _VectorTraits<_To>::type;
528 using _Tp =
typename _VectorTraits<_To>::value_type;
529 return _V{
static_cast<_Tp
>(__v0),
static_cast<_Tp
>(__vs)...};
531 else if constexpr (__is_vector_type_v<_From>)
532 return __convert<_To>(__as_wrapper(__v0), __as_wrapper(__vs)...);
535 constexpr
size_t __input_size = _From::_S_size * (1 +
sizeof...(_More));
536 if constexpr (__is_vectorizable_v<_To>)
537 return __convert<__vector_type_t<_To, __input_size>>(__v0, __vs...);
538 else if constexpr (!__is_vector_type_v<_To>)
539 return _To(__convert<typename _To::_BuiltinType>(__v0, __vs...));
543 sizeof...(_More) == 0
544 || _VectorTraits<_To>::_S_full_size >= __input_size,
545 "__convert(...) requires the input to fit into the output");
546 return __vector_convert<_To>(__v0, __vs...);
556 template <
typename _To,
561 typename _From,
typename _FromVT = _VectorTraits<_From>>
562 _GLIBCXX_SIMD_INTRINSIC
auto
563 __convert_all(_From __v)
565 if constexpr (is_arithmetic_v<_To> && _NParts != 1)
567 static_assert(_Offset < _FromVT::_S_full_size);
569 = _NParts == 0 ? _FromVT::_S_partial_width - _Offset : _NParts;
570 return __generate_from_n_evaluations<_Np, array<_To, _Np>>(
571 [&](
auto __i) {
return static_cast<_To
>(__v[__i + _Offset]); });
575 static_assert(__is_vector_type_v<_To>);
576 using _ToVT = _VectorTraits<_To>;
577 if constexpr (__is_vector_type_v<_From>)
578 return __convert_all<_To, _NParts>(__as_wrapper(__v));
579 else if constexpr (_NParts == 1)
581 static_assert(_Offset % _ToVT::_S_full_size == 0);
582 return array<_To, 1>{__vector_convert<_To>(
583 __extract_part<_Offset / _ToVT::_S_full_size,
584 __div_roundup(_FromVT::_S_partial_width,
585 _ToVT::_S_full_size)>(__v))};
587 #if _GLIBCXX_SIMD_X86INTRIN
588 else if constexpr (!__have_sse4_1 && _Offset == 0
589 && is_integral_v<typename _FromVT::value_type>
590 &&
sizeof(
typename _FromVT::value_type)
591 <
sizeof(
typename _ToVT::value_type)
592 && !(
sizeof(
typename _FromVT::value_type) == 4
593 && is_same_v<typename _ToVT::value_type, double>))
595 using _ToT =
typename _ToVT::value_type;
596 using _FromT =
typename _FromVT::value_type;
600 : (_FromVT::_S_partial_width / _ToVT::_S_full_size);
601 using _R = array<_To, _Np>;
606 [[maybe_unused]]
auto __adjust
608 auto __vv) -> _SimdWrapper<_FromT, decltype(__n)::value> {
609 return __vector_bitcast<_FromT, decltype(__n)::value>(__vv);
611 [[maybe_unused]]
const auto __vi = __to_intrin(__v);
612 auto&& __make_array = [](
auto __x0, [[maybe_unused]]
auto __x1) {
613 if constexpr (_Np == 1)
614 return _R{__intrin_bitcast<_To>(__x0)};
616 return _R{__intrin_bitcast<_To>(__x0),
617 __intrin_bitcast<_To>(__x1)};
620 if constexpr (_Np == 0)
622 else if constexpr (
sizeof(_FromT) == 1 &&
sizeof(_ToT) == 2)
624 static_assert(is_integral_v<_FromT>);
625 static_assert(is_integral_v<_ToT>);
626 if constexpr (is_unsigned_v<_FromT>)
627 return __make_array(_mm_unpacklo_epi8(__vi, __m128i()),
628 _mm_unpackhi_epi8(__vi, __m128i()));
631 _mm_srai_epi16(_mm_unpacklo_epi8(__vi, __vi), 8),
632 _mm_srai_epi16(_mm_unpackhi_epi8(__vi, __vi), 8));
634 else if constexpr (
sizeof(_FromT) == 2 &&
sizeof(_ToT) == 4)
636 static_assert(is_integral_v<_FromT>);
637 if constexpr (is_floating_point_v<_ToT>)
640 = __convert_all<__vector_type16_t<int>, _Np>(
641 __adjust(_SizeConstant<_Np * 4>(), __v));
642 return __generate_from_n_evaluations<_Np, _R>(
644 return __vector_convert<_To>(__as_wrapper(__ints[__i]));
647 else if constexpr (is_unsigned_v<_FromT>)
648 return __make_array(_mm_unpacklo_epi16(__vi, __m128i()),
649 _mm_unpackhi_epi16(__vi, __m128i()));
652 _mm_srai_epi32(_mm_unpacklo_epi16(__vi, __vi), 16),
653 _mm_srai_epi32(_mm_unpackhi_epi16(__vi, __vi), 16));
655 else if constexpr (
sizeof(_FromT) == 4 &&
sizeof(_ToT) == 8
656 && is_integral_v<_FromT> && is_integral_v<_ToT>)
658 if constexpr (is_unsigned_v<_FromT>)
659 return __make_array(_mm_unpacklo_epi32(__vi, __m128i()),
660 _mm_unpackhi_epi32(__vi, __m128i()));
663 _mm_unpacklo_epi32(__vi, _mm_srai_epi32(__vi, 31)),
664 _mm_unpackhi_epi32(__vi, _mm_srai_epi32(__vi, 31)));
666 else if constexpr (
sizeof(_FromT) == 4 &&
sizeof(_ToT) == 8
667 && is_integral_v<_FromT> && is_integral_v<_ToT>)
669 if constexpr (is_unsigned_v<_FromT>)
670 return __make_array(_mm_unpacklo_epi32(__vi, __m128i()),
671 _mm_unpackhi_epi32(__vi, __m128i()));
674 _mm_unpacklo_epi32(__vi, _mm_srai_epi32(__vi, 31)),
675 _mm_unpackhi_epi32(__vi, _mm_srai_epi32(__vi, 31)));
677 else if constexpr (
sizeof(_FromT) == 1 &&
sizeof(_ToT) >= 4
678 && is_signed_v<_FromT>)
680 const __m128i __vv[2] = {_mm_unpacklo_epi8(__vi, __vi),
681 _mm_unpackhi_epi8(__vi, __vi)};
682 const __vector_type_t<int, 4> __vvvv[4] = {
683 __vector_bitcast<int>(_mm_unpacklo_epi16(__vv[0], __vv[0])),
684 __vector_bitcast<int>(_mm_unpackhi_epi16(__vv[0], __vv[0])),
685 __vector_bitcast<int>(_mm_unpacklo_epi16(__vv[1], __vv[1])),
686 __vector_bitcast<int>(_mm_unpackhi_epi16(__vv[1], __vv[1]))};
687 if constexpr (
sizeof(_ToT) == 4)
688 return __generate_from_n_evaluations<_Np, _R>([&](auto __i) {
689 return __vector_convert<_To>(
690 _SimdWrapper<int, 4>(__vvvv[__i] >> 24));
692 else if constexpr (is_integral_v<_ToT>)
693 return __generate_from_n_evaluations<_Np, _R>([&](
auto __i) {
694 const auto __signbits = __to_intrin(__vvvv[__i / 2] >> 31);
695 const auto __sx32 = __to_intrin(__vvvv[__i / 2] >> 24);
696 return __vector_bitcast<_ToT>(
697 __i % 2 == 0 ? _mm_unpacklo_epi32(__sx32, __signbits)
698 : _mm_unpackhi_epi32(__sx32, __signbits));
701 return __generate_from_n_evaluations<_Np, _R>([&](
auto __i) {
702 const _SimdWrapper<int, 4> __int4 = __vvvv[__i / 2] >> 24;
703 return __vector_convert<_To>(
704 __i % 2 == 0 ? __int4
705 : _SimdWrapper<int, 4>(
706 _mm_unpackhi_epi64(__to_intrin(__int4),
707 __to_intrin(__int4))));
710 else if constexpr (
sizeof(_FromT) == 1 &&
sizeof(_ToT) == 4)
712 const auto __shorts = __convert_all<__vector_type16_t<
713 conditional_t<is_signed_v<_FromT>, short,
unsigned short>>>(
714 __adjust(_SizeConstant<(_Np + 1) / 2 * 8>(), __v));
715 return __generate_from_n_evaluations<_Np, _R>([&](
auto __i) {
716 return __convert_all<_To>(__shorts[__i / 2])[__i % 2];
719 else if constexpr (
sizeof(_FromT) == 2 &&
sizeof(_ToT) == 8
720 && is_signed_v<_FromT> && is_integral_v<_ToT>)
722 const __m128i __vv[2] = {_mm_unpacklo_epi16(__vi, __vi),
723 _mm_unpackhi_epi16(__vi, __vi)};
724 const __vector_type16_t<int> __vvvv[4]
725 = {__vector_bitcast<int>(
726 _mm_unpacklo_epi32(_mm_srai_epi32(__vv[0], 16),
727 _mm_srai_epi32(__vv[0], 31))),
728 __vector_bitcast<int>(
729 _mm_unpackhi_epi32(_mm_srai_epi32(__vv[0], 16),
730 _mm_srai_epi32(__vv[0], 31))),
731 __vector_bitcast<int>(
732 _mm_unpacklo_epi32(_mm_srai_epi32(__vv[1], 16),
733 _mm_srai_epi32(__vv[1], 31))),
734 __vector_bitcast<int>(
735 _mm_unpackhi_epi32(_mm_srai_epi32(__vv[1], 16),
736 _mm_srai_epi32(__vv[1], 31)))};
737 return __generate_from_n_evaluations<_Np, _R>([&](
auto __i) {
738 return __vector_bitcast<_ToT>(__vvvv[__i]);
741 else if constexpr (
sizeof(_FromT) <= 2 &&
sizeof(_ToT) == 8)
745 is_signed_v<_FromT> || is_floating_point_v<_ToT>, int,
747 __adjust(_SizeConstant<(_Np + 1) / 2 * 4>(), __v));
748 return __generate_from_n_evaluations<_Np, _R>([&](
auto __i) {
749 return __convert_all<_To>(__ints[__i / 2])[__i % 2];
753 __assert_unreachable<_To>();
756 else if constexpr ((_FromVT::_S_partial_width - _Offset)
757 > _ToVT::_S_full_size)
766 constexpr
size_t _NTotal
767 = (_FromVT::_S_partial_width - _Offset) / _ToVT::_S_full_size;
768 constexpr
size_t _Np = _NParts == 0 ? _NTotal : _NParts;
771 || (_Np == _NTotal + 1
772 && (_FromVT::_S_partial_width - _Offset) % _ToVT::_S_full_size
774 using _R = array<_To, _Np>;
775 if constexpr (_Np == 1)
776 return _R{__vector_convert<_To>(
777 __extract_part<_Offset, _FromVT::_S_partial_width,
778 _ToVT::_S_full_size>(__v))};
780 return __generate_from_n_evaluations<_Np, _R>([&](
781 auto __i) constexpr {
783 = __extract_part<__i * _ToVT::_S_full_size + _Offset,
784 _FromVT::_S_partial_width,
785 _ToVT::_S_full_size>(__v);
786 return __vector_convert<_To>(__part);
789 else if constexpr (_Offset == 0)
790 return array<_To, 1>{__vector_convert<_To>(__v)};
792 return array<_To, 1>{__vector_convert<_To>(
793 __extract_part<_Offset, _FromVT::_S_partial_width,
794 _FromVT::_S_partial_width - _Offset>(__v))};
801 template <
typename _Tp,
typename _Mp,
typename _Abi,
size_t _Np>
805 using _SimdImpl =
typename _Abi::_SimdImpl;
806 using _MaskImpl =
typename _Abi::_MaskImpl;
809 using _SimdMember = _SimdWrapper<_Tp, _Np>;
810 using _MaskMember = _SimdWrapper<_Mp, _Np>;
811 static constexpr
size_t _S_simd_align =
alignof(_SimdMember);
812 static constexpr
size_t _S_mask_align =
alignof(_MaskMember);
816 static constexpr
size_t _S_full_size = _SimdMember::_S_full_size;
817 static constexpr
bool _S_is_partial = _SimdMember::_S_is_partial;
823 explicit operator __intrinsic_type_t<_Tp, _Np>()
const
825 return __to_intrin(
static_cast<const simd<_Tp, _Abi>*
>(
this)->_M_data);
827 explicit operator __vector_type_t<_Tp, _Np>()
const
829 return static_cast<const simd<_Tp, _Abi>*
>(
this)->_M_data.__builtin();
835 explicit operator __intrinsic_type_t<_Tp, _Np>()
const
836 {
return __data(*
static_cast<const simd<_Tp, _Abi>*
>(
this)); }
840 is_same<__intrinsic_type_t<_Tp, _Np>, __vector_type_t<_Tp, _Np>>::value,
841 _SimdBase1, _SimdBase2>;
847 explicit operator __intrinsic_type_t<_Tp, _Np>()
const
849 return static_cast<const simd_mask<_Tp, _Abi>*
>(
this)
850 ->_M_data.__intrin();
852 explicit operator __vector_type_t<_Tp, _Np>()
const
854 return static_cast<const simd_mask<_Tp, _Abi>*
>(
this)->_M_data._M_data;
860 explicit operator __intrinsic_type_t<_Tp, _Np>()
const
861 {
return __data(*
static_cast<const simd_mask<_Tp, _Abi>*
>(
this)); }
865 is_same<__intrinsic_type_t<_Tp, _Np>, __vector_type_t<_Tp, _Np>>::value,
866 _MaskBase1, _MaskBase2>;
873 using _Up = __intrinsic_type_t<_Tp, _Np>;
877 _MaskCastType(_Up __x) : _M_data(__x) {}
878 operator _MaskMember()
const {
return _M_data; }
886 using _Ap = __intrinsic_type_t<_Tp, _Np>;
890 _SimdCastType1(_Ap __a) : _M_data(__vector_bitcast<_Tp>(__a)) {}
891 operator _SimdMember()
const {
return _M_data; }
896 using _Ap = __intrinsic_type_t<_Tp, _Np>;
897 using _B = __vector_type_t<_Tp, _Np>;
901 _SimdCastType2(_Ap __a) : _M_data(__vector_bitcast<_Tp>(__a)) {}
902 _SimdCastType2(_B __b) : _M_data(__b) {}
903 operator _SimdMember()
const {
return _M_data; }
907 is_same<__intrinsic_type_t<_Tp, _Np>, __vector_type_t<_Tp, _Np>>::value,
908 _SimdCastType1, _SimdCastType2>;
913 struct _CommonImplX86;
914 struct _CommonImplNeon;
915 struct _CommonImplBuiltin;
916 template <
typename _Abi>
struct _SimdImplBuiltin;
917 template <
typename _Abi>
struct _MaskImplBuiltin;
918 template <
typename _Abi>
struct _SimdImplX86;
919 template <
typename _Abi>
struct _MaskImplX86;
920 template <
typename _Abi>
struct _SimdImplNeon;
921 template <
typename _Abi>
struct _MaskImplNeon;
922 template <
typename _Abi>
struct _SimdImplPpc;
925 template <
int _UsedBytes>
926 struct simd_abi::_VecBuiltin
928 template <
typename _Tp>
929 static constexpr
size_t _S_size = _UsedBytes /
sizeof(_Tp);
932 struct _IsValidAbiTag : __bool_constant<(_UsedBytes > 1)> {};
934 template <
typename _Tp>
935 struct _IsValidSizeFor
936 : __bool_constant<(_UsedBytes / sizeof(_Tp) > 1
937 && _UsedBytes % sizeof(_Tp) == 0
938 && _UsedBytes <= __vectorized_sizeof<_Tp>()
939 && (!__have_avx512f || _UsedBytes <= 32))> {};
941 template <typename _Tp>
942 struct _IsValid : conjunction<_IsValidAbiTag, __is_vectorizable<_Tp>,
943 _IsValidSizeFor<_Tp>> {};
945 template <typename _Tp>
946 static constexpr bool _S_is_valid_v = _IsValid<_Tp>::value;
950 #if _GLIBCXX_SIMD_X86INTRIN
951 using _CommonImpl = _CommonImplX86;
952 using _SimdImpl = _SimdImplX86<_VecBuiltin<_UsedBytes>>;
953 using _MaskImpl = _MaskImplX86<_VecBuiltin<_UsedBytes>>;
954 #elif _GLIBCXX_SIMD_HAVE_NEON
955 using _CommonImpl = _CommonImplNeon;
956 using _SimdImpl = _SimdImplNeon<_VecBuiltin<_UsedBytes>>;
957 using _MaskImpl = _MaskImplNeon<_VecBuiltin<_UsedBytes>>;
959 using _CommonImpl = _CommonImplBuiltin;
961 using _SimdImpl = _SimdImplPpc<_VecBuiltin<_UsedBytes>>;
963 using _SimdImpl = _SimdImplBuiltin<_VecBuiltin<_UsedBytes>>;
965 using _MaskImpl = _MaskImplBuiltin<_VecBuiltin<_UsedBytes>>;
970 template <typename _Tp>
971 using _MaskValueType = __int_for_sizeof_t<_Tp>;
973 template <typename _Tp>
975 = conditional_t<_S_is_valid_v<_Tp>,
976 _GnuTraits<_Tp, _MaskValueType<_Tp>,
977 _VecBuiltin<_UsedBytes>, _S_size<_Tp>>,
982 template <typename _Tp>
983 static constexpr size_t _S_full_size = __traits<_Tp>::_S_full_size;
985 template <typename _Tp>
986 static constexpr bool _S_is_partial = __traits<_Tp>::_S_is_partial;
990 template <typename _Tp>
991 using _MaskMember = _SimdWrapper<_MaskValueType<_Tp>, _S_size<_Tp>>;
993 template <typename _Tp>
994 _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember<_Tp>
997 using _UV = typename _MaskMember<_Tp>::_BuiltinType;
998 if constexpr (!_MaskMember<_Tp>::_S_is_partial)
1002 constexpr auto __size = _S_size<_Tp>;
1003 _GLIBCXX_SIMD_USE_CONSTEXPR auto __r = __generate_vector<_UV>(
1004 [](auto __i) constexpr { return __i < __size ? -1 : 0; });
1009 template <typename _Tp>
1010 _GLIBCXX_SIMD_INTRINSIC static constexpr __intrinsic_type_t<_Tp,
1012 _S_implicit_mask_intrin()
1015 __vector_bitcast<_Tp>(_S_implicit_mask<_Tp>()._M_data));
1018 template <typename _TW, typename _TVT = _VectorTraits<_TW>>
1019 _GLIBCXX_SIMD_INTRINSIC static constexpr _TW _S_masked(_TW __x)
1021 using _Tp = typename _TVT::value_type;
1022 if constexpr (!_MaskMember<_Tp>::_S_is_partial)
1025 return __and(__as_vector(__x),
1026 __vector_bitcast<_Tp>(_S_implicit_mask<_Tp>()));
1029 template <typename _TW, typename _TVT = _VectorTraits<_TW>>
1030 _GLIBCXX_SIMD_INTRINSIC static constexpr auto
1031 __make_padding_nonzero(_TW __x)
1033 using _Tp = typename _TVT::value_type;
1034 if constexpr (!_S_is_partial<_Tp>)
1038 _GLIBCXX_SIMD_USE_CONSTEXPR auto __implicit_mask
1039 = __vector_bitcast<_Tp>(_S_implicit_mask<_Tp>());
1040 if constexpr (is_integral_v<_Tp>)
1041 return __or(__x, ~__implicit_mask);
1044 _GLIBCXX_SIMD_USE_CONSTEXPR auto __one
1045 = __andnot(__implicit_mask,
1046 __vector_broadcast<_S_full_size<_Tp>>(_Tp(1)));
1050 return __or(__and(__x, __implicit_mask), __one);
1059 template <int _UsedBytes>
1060 struct simd_abi::_VecBltnBtmsk
1062 template <typename _Tp>
1063 static constexpr size_t _S_size = _UsedBytes / sizeof(_Tp);
1066 struct _IsValidAbiTag : __bool_constant<(_UsedBytes > 1)> {};
1068 template <typename _Tp>
1069 struct _IsValidSizeFor
1070 : __bool_constant<(_UsedBytes / sizeof(_Tp) > 1
1071 && _UsedBytes % sizeof(_Tp) == 0 && _UsedBytes <= 64
1072 && (_UsedBytes > 32 || __have_avx512vl))> {};
1076 template <
typename _Tp>
1079 _IsValidAbiTag, __bool_constant<__have_avx512f>,
1080 __bool_constant<__have_avx512bw || (sizeof(_Tp) >= 4)>,
1081 __bool_constant<(__vectorized_sizeof<_Tp>() > sizeof(_Tp))>,
1082 _IsValidSizeFor<_Tp>> {};
1084 template <
typename _Tp>
1085 static constexpr
bool _S_is_valid_v = _IsValid<_Tp>::value;
1089 #if _GLIBCXX_SIMD_X86INTRIN
1090 using _CommonImpl = _CommonImplX86;
1091 using _SimdImpl = _SimdImplX86<_VecBltnBtmsk<_UsedBytes>>;
1092 using _MaskImpl = _MaskImplX86<_VecBltnBtmsk<_UsedBytes>>;
1095 struct _MissingImpl;
1097 using _CommonImpl = _MissingImpl<_UsedBytes>;
1098 using _SimdImpl = _MissingImpl<_UsedBytes>;
1099 using _MaskImpl = _MissingImpl<_UsedBytes>;
1104 template <
typename _Tp>
1105 using _MaskMember = _SimdWrapper<bool, _S_size<_Tp>>;
1107 template <
typename _Tp>
1110 _GnuTraits<_Tp, bool, _VecBltnBtmsk<_UsedBytes>, _S_size<_Tp>>,
1115 template <
typename _Tp>
1116 static constexpr
size_t _S_full_size = __traits<_Tp>::_S_full_size;
1117 template <
typename _Tp>
1118 static constexpr
bool _S_is_partial = __traits<_Tp>::_S_is_partial;
1123 template <
typename _Tp>
1124 using _ImplicitMask = _SimdWrapper<bool, _S_size<_Tp>>;
1127 template <
size_t _Np>
1128 _GLIBCXX_SIMD_INTRINSIC
static constexpr __bool_storage_member_type_t<_Np>
1131 using _Tp = __bool_storage_member_type_t<_Np>;
1132 return _Np <
sizeof(_Tp) * __CHAR_BIT__ ? _Tp((1ULL << _Np) - 1) : ~_Tp();
1135 template <
typename _Tp>
1136 _GLIBCXX_SIMD_INTRINSIC
static constexpr _ImplicitMask<_Tp>
1138 {
return __implicit_mask_n<_S_size<_Tp>>(); }
1140 template <
typename _Tp>
1141 _GLIBCXX_SIMD_INTRINSIC
static constexpr __bool_storage_member_type_t<
1143 _S_implicit_mask_intrin()
1144 {
return __implicit_mask_n<_S_size<_Tp>>(); }
1146 template <
typename _Tp,
size_t _Np>
1147 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1148 _S_masked(_SimdWrapper<_Tp, _Np> __x)
1150 if constexpr (is_same_v<_Tp, bool>)
1151 if constexpr (_Np < 8 || (_Np & (_Np - 1)) != 0)
1152 return _MaskImpl::_S_bit_and(
1153 __x, _SimdWrapper<_Tp, _Np>(
1154 __bool_storage_member_type_t<_Np>((1ULL << _Np) - 1)));
1158 return _S_masked(__x._M_data);
1161 template <typename _TV>
1162 _GLIBCXX_SIMD_INTRINSIC static constexpr _TV
1165 using _Tp =
typename _VectorTraits<_TV>::value_type;
1167 !__is_bitmask_v<_TV>,
1168 "_VecBltnBtmsk::_S_masked cannot work on bitmasks, since it doesn't "
1169 "know the number of elements. Use _SimdWrapper<bool, N> instead.");
1170 if constexpr (_S_is_partial<_Tp>)
1172 constexpr
size_t _Np = _S_size<_Tp>;
1173 return __make_dependent_t<_TV, _CommonImpl>::_S_blend(
1174 _S_implicit_mask<_Tp>(), _SimdWrapper<_Tp, _Np>(),
1175 _SimdWrapper<_Tp, _Np>(__x));
1181 template <
typename _TV,
typename _TVT = _VectorTraits<_TV>>
1182 _GLIBCXX_SIMD_INTRINSIC
static constexpr
auto
1183 __make_padding_nonzero(_TV __x)
1185 using _Tp =
typename _TVT::value_type;
1186 if constexpr (!_S_is_partial<_Tp>)
1190 constexpr
size_t _Np = _S_size<_Tp>;
1191 if constexpr (is_integral_v<typename _TVT::value_type>)
1193 | __generate_vector<_Tp, _S_full_size<_Tp>>(
1194 [](
auto __i) -> _Tp {
1201 return __make_dependent_t<_TV, _CommonImpl>::_S_blend(
1202 _S_implicit_mask<_Tp>(),
1203 _SimdWrapper<_Tp, _Np>(
1204 __vector_broadcast<_S_full_size<_Tp>>(_Tp(1))),
1205 _SimdWrapper<_Tp, _Np>(__x))
1215 struct _CommonImplBuiltin
1222 template <
typename _From,
typename _To,
size_t _ToSize>
1223 static inline constexpr
bool __converts_via_decomposition_v
1224 =
sizeof(_From) !=
sizeof(_To);
1228 template <
typename _Tp,
size_t _Np,
size_t _Bytes = _Np * sizeof(_Tp)>
1229 _GLIBCXX_SIMD_INTRINSIC
static __vector_type_t<_Tp, _Np>
1230 _S_load(
const void* __p)
1232 static_assert(_Np > 1);
1233 static_assert(_Bytes %
sizeof(_Tp) == 0);
1234 using _Rp = __vector_type_t<_Tp, _Np>;
1235 if constexpr (
sizeof(_Rp) == _Bytes)
1238 __builtin_memcpy(&__r, __p, _Bytes);
1243 #ifdef _GLIBCXX_SIMD_WORKAROUND_PR90424
1247 conditional_t<_Bytes % 8 == 0, long long, int>,
1248 conditional_t<_Bytes % 2 == 0, short, signed char>>,
1249 conditional_t<(_Bytes < 8 || _Np % 2 == 1 || _Np == 2), _Tp,
1251 using _V = __vector_type_t<_Up, _Np *
sizeof(_Tp) /
sizeof(_Up)>;
1252 if constexpr (
sizeof(_V) !=
sizeof(_Rp))
1255 __builtin_memcpy(&__r, __p, _Bytes);
1264 static_assert(_Bytes <=
sizeof(_V));
1265 __builtin_memcpy(&__r, __p, _Bytes);
1266 return reinterpret_cast<_Rp
>(__r);
1273 template <
size_t _ReqBytes = 0,
typename _TV>
1274 _GLIBCXX_SIMD_INTRINSIC
static void _S_store(_TV __x,
void* __addr)
1276 constexpr
size_t _Bytes = _ReqBytes == 0 ?
sizeof(__x) : _ReqBytes;
1277 static_assert(
sizeof(__x) >= _Bytes);
1279 if constexpr (__is_vector_type_v<_TV>)
1281 using _Tp =
typename _VectorTraits<_TV>::value_type;
1282 constexpr
size_t _Np = _Bytes /
sizeof(_Tp);
1283 static_assert(_Np *
sizeof(_Tp) == _Bytes);
1285 #ifdef _GLIBCXX_SIMD_WORKAROUND_PR90424
1287 (is_integral_v<_Tp> || _Bytes < 4),
1288 conditional_t<(
sizeof(__x) >
sizeof(
long long)),
long long, _Tp>,
1290 const auto __v = __vector_bitcast<_Up>(__x);
1292 const __vector_type_t<_Tp, _Np> __v = __x;
1295 if constexpr ((_Bytes & (_Bytes - 1)) != 0)
1297 constexpr
size_t _MoreBytes = std::__bit_ceil(_Bytes);
1298 alignas(decltype(__v))
char __tmp[_MoreBytes];
1299 __builtin_memcpy(__tmp, &__v, _MoreBytes);
1300 __builtin_memcpy(__addr, __tmp, _Bytes);
1303 __builtin_memcpy(__addr, &__v, _Bytes);
1306 __builtin_memcpy(__addr, &__x, _Bytes);
1309 template <
typename _Tp,
size_t _Np>
1310 _GLIBCXX_SIMD_INTRINSIC
static void _S_store(_SimdWrapper<_Tp, _Np> __x,
1312 { _S_store<_Np * sizeof(_Tp)>(__x._M_data, __addr); }
1316 template <
size_t _Np,
bool _Sanitized>
1317 _GLIBCXX_SIMD_INTRINSIC
static constexpr
void
1318 _S_store_bool_array(_BitMask<_Np, _Sanitized> __x,
bool* __mem)
1320 if constexpr (_Np == 1)
1322 else if constexpr (_Np == 2)
1324 short __bool2 = (__x._M_to_bits() * 0x81) & 0x0101;
1325 _S_store<_Np>(__bool2, __mem);
1327 else if constexpr (_Np == 3)
1329 int __bool3 = (__x._M_to_bits() * 0x4081) & 0x010101;
1330 _S_store<_Np>(__bool3, __mem);
1334 __execute_n_times<__div_roundup(_Np, 4)>([&](
auto __i) {
1335 constexpr
int __offset = __i * 4;
1336 constexpr
int __remaining = _Np - __offset;
1337 if constexpr (__remaining > 4 && __remaining <= 7)
1339 const _ULLong __bool7
1340 = (__x.template _M_extract<__offset>()._M_to_bits()
1342 & 0x0101010101010101ULL;
1343 _S_store<__remaining>(__bool7, __mem + __offset);
1345 else if constexpr (__remaining >= 4)
1347 int __bits = __x.template _M_extract<__offset>()._M_to_bits();
1348 if constexpr (__remaining > 7)
1350 const
int __bool4 = (__bits * 0x204081) & 0x01010101;
1351 _S_store<4>(__bool4, __mem + __offset);
1359 template <typename _Tp,
size_t _Np>
1360 _GLIBCXX_SIMD_INTRINSIC static constexpr auto
1361 _S_blend(_SimdWrapper<__int_for_sizeof_t<_Tp>, _Np> __k,
1362 _SimdWrapper<_Tp, _Np> __at0, _SimdWrapper<_Tp, _Np> __at1)
1363 {
return __k._M_data ? __at1._M_data : __at0._M_data; }
1370 template <
typename _Abi>
1371 struct _SimdImplBuiltin
1374 template <
typename _Tp>
1375 static constexpr
size_t _S_max_store_size = 16;
1377 using abi_type = _Abi;
1379 template <
typename _Tp>
1380 using _TypeTag = _Tp*;
1382 template <
typename _Tp>
1383 using _SimdMember =
typename _Abi::template __traits<_Tp>::_SimdMember;
1385 template <
typename _Tp>
1386 using _MaskMember =
typename _Abi::template _MaskMember<_Tp>;
1388 template <
typename _Tp>
1389 static constexpr
size_t _S_size = _Abi::template _S_size<_Tp>;
1391 template <
typename _Tp>
1392 static constexpr
size_t _S_full_size = _Abi::template _S_full_size<_Tp>;
1394 using _CommonImpl =
typename _Abi::_CommonImpl;
1395 using _SuperImpl =
typename _Abi::_SimdImpl;
1396 using _MaskImpl =
typename _Abi::_MaskImpl;
1399 template <
typename _Tp,
size_t _Np>
1400 _GLIBCXX_SIMD_INTRINSIC
static simd<_Tp, _Abi>
1401 _M_make_simd(_SimdWrapper<_Tp, _Np> __x)
1402 {
return {__private_init, __x}; }
1404 template <
typename _Tp,
size_t _Np>
1405 _GLIBCXX_SIMD_INTRINSIC
static simd<_Tp, _Abi>
1406 _M_make_simd(__intrinsic_type_t<_Tp, _Np> __x)
1407 {
return {__private_init, __vector_bitcast<_Tp>(__x)}; }
1410 template <
typename _Tp>
1411 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdMember<_Tp>
1412 _S_broadcast(_Tp __x) noexcept
1413 {
return __vector_broadcast<_S_full_size<_Tp>>(__x); }
1416 template <
typename _Fp,
typename _Tp>
1417 inline static constexpr _SimdMember<_Tp> _S_generator(_Fp&& __gen,
1420 return __generate_vector<_Tp, _S_full_size<_Tp>>([&](
1421 auto __i) constexpr {
1422 if constexpr (__i < _S_size<_Tp>)
1430 template <
typename _Tp,
typename _Up>
1431 _GLIBCXX_SIMD_INTRINSIC
static _SimdMember<_Tp>
1432 _S_load(
const _Up* __mem, _TypeTag<_Tp>) noexcept
1434 constexpr
size_t _Np = _S_size<_Tp>;
1435 constexpr
size_t __max_load_size
1436 = (
sizeof(_Up) >= 4 && __have_avx512f) || __have_avx512bw ? 64
1437 : (is_floating_point_v<_Up> && __have_avx) || __have_avx2 ? 32
1439 constexpr
size_t __bytes_to_load =
sizeof(_Up) * _Np;
1440 if constexpr (
sizeof(_Up) > 8)
1441 return __generate_vector<_Tp, _SimdMember<_Tp>::_S_full_size>([&](
1442 auto __i) constexpr {
1443 return static_cast<_Tp
>(__i < _Np ? __mem[__i] : 0);
1445 else if constexpr (is_same_v<_Up, _Tp>)
1446 return _CommonImpl::template _S_load<_Tp, _S_full_size<_Tp>,
1447 _Np *
sizeof(_Tp)>(__mem);
1448 else if constexpr (__bytes_to_load <= __max_load_size)
1449 return __convert<_SimdMember<_Tp>>(
1450 _CommonImpl::template _S_load<_Up, _Np>(__mem));
1451 else if constexpr (__bytes_to_load % __max_load_size == 0)
1453 constexpr
size_t __n_loads = __bytes_to_load / __max_load_size;
1454 constexpr
size_t __elements_per_load = _Np / __n_loads;
1455 return __call_with_n_evaluations<__n_loads>(
1456 [](
auto... __uncvted) {
1457 return __convert<_SimdMember<_Tp>>(__uncvted...);
1460 return _CommonImpl::template _S_load<_Up, __elements_per_load>(
1461 __mem + __i * __elements_per_load);
1464 else if constexpr (__bytes_to_load % (__max_load_size / 2) == 0
1465 && __max_load_size > 16)
1467 constexpr
size_t __n_loads
1468 = __bytes_to_load / (__max_load_size / 2);
1469 constexpr
size_t __elements_per_load = _Np / __n_loads;
1470 return __call_with_n_evaluations<__n_loads>(
1471 [](
auto... __uncvted) {
1472 return __convert<_SimdMember<_Tp>>(__uncvted...);
1475 return _CommonImpl::template _S_load<_Up, __elements_per_load>(
1476 __mem + __i * __elements_per_load);
1480 return __call_with_subscripts(
1481 __mem, make_index_sequence<_Np>(), [](
auto... __args) {
1482 return __vector_type_t<_Tp, _S_full_size<_Tp>>{
1483 static_cast<_Tp
>(__args)...};
1488 template <
typename _Tp,
size_t _Np,
typename _Up>
1489 static inline _SimdWrapper<_Tp, _Np>
1490 _S_masked_load(_SimdWrapper<_Tp, _Np> __merge, _MaskMember<_Tp> __k,
1491 const _Up* __mem) noexcept
1493 _BitOps::_S_bit_iteration(_MaskImpl::_S_to_bits(__k), [&](
auto __i) {
1494 __merge._M_set(__i,
static_cast<_Tp
>(__mem[__i]));
1500 template <
typename _Tp,
typename _Up>
1501 _GLIBCXX_SIMD_INTRINSIC
static void
1502 _S_store(_SimdMember<_Tp> __v, _Up* __mem, _TypeTag<_Tp>) noexcept
1505 constexpr
size_t _Np = _S_size<_Tp>;
1506 constexpr
size_t __max_store_size
1507 = _SuperImpl::template _S_max_store_size<_Up>;
1508 if constexpr (
sizeof(_Up) > 8)
1509 __execute_n_times<_Np>([&](auto __i) constexpr {
1510 __mem[__i] = __v[__i];
1512 else if constexpr (is_same_v<_Up, _Tp>)
1513 _CommonImpl::_S_store(__v, __mem);
1514 else if constexpr (
sizeof(_Up) * _Np <= __max_store_size)
1515 _CommonImpl::_S_store(_SimdWrapper<_Up, _Np>(__convert<_Up>(__v)),
1519 constexpr
size_t __vsize = __max_store_size /
sizeof(_Up);
1521 constexpr
size_t __stores = __div_roundup(_Np, __vsize);
1522 constexpr
size_t __full_stores = _Np / __vsize;
1523 using _V = __vector_type_t<_Up, __vsize>;
1524 const array<_V, __stores> __converted
1525 = __convert_all<_V, __stores>(__v);
1526 __execute_n_times<__full_stores>([&](
auto __i) constexpr {
1527 _CommonImpl::_S_store(__converted[__i], __mem + __i * __vsize);
1529 if constexpr (__full_stores < __stores)
1530 _CommonImpl::template _S_store<(_Np - __full_stores * __vsize)
1532 __converted[__full_stores], __mem + __full_stores * __vsize);
1537 template <
typename _Tp,
size_t _Np>
1538 _GLIBCXX_SIMD_INTRINSIC
static void
1539 _S_masked_store_nocvt(_SimdWrapper<_Tp, _Np> __v, _Tp* __mem,
1540 _MaskMember<_Tp> __k)
1542 _BitOps::_S_bit_iteration(
1543 _MaskImpl::_S_to_bits(__k), [&](
auto __i) constexpr {
1544 __mem[__i] = __v[__i];
1549 template <
typename _TW,
typename _TVT = _VectorTraits<_TW>,
1550 typename _Tp =
typename _TVT::value_type,
typename _Up>
1552 _S_masked_store(
const _TW __v, _Up* __mem,
const _MaskMember<_Tp> __k)
1555 constexpr
size_t _TV_size = _S_size<_Tp>;
1556 [[maybe_unused]]
const auto __vi = __to_intrin(__v);
1557 constexpr
size_t __max_store_size
1558 = _SuperImpl::template _S_max_store_size<_Up>;
1562 _Up> || (is_integral_v<_Tp> && is_integral_v<_Up> &&
sizeof(_Tp) ==
sizeof(_Up)))
1565 const _MaskMember<_Up> __kk = [&]() {
1566 if constexpr (__is_bitmask_v<decltype(__k)>)
1567 return _MaskMember<_Up>(__k._M_data);
1569 return __wrapper_bitcast<__int_for_sizeof_t<_Up>>(__k);
1571 _SuperImpl::_S_masked_store_nocvt(__wrapper_bitcast<_Up>(__v),
1574 else if constexpr (__vectorized_sizeof<_Up>() > sizeof(_Up)
1576 template __converts_via_decomposition_v<
1577 _Tp, _Up, __max_store_size>)
1581 constexpr
size_t _UW_size
1582 =
std::min(_TV_size, __max_store_size /
sizeof(_Up));
1583 static_assert(_UW_size <= _TV_size);
1584 using _UW = _SimdWrapper<_Up, _UW_size>;
1585 using _UV = __vector_type_t<_Up, _UW_size>;
1586 using _UAbi = simd_abi::deduce_t<_Up, _UW_size>;
1587 if constexpr (_UW_size == _TV_size)
1589 const _UW __converted = __convert<_UW>(__v);
1590 _SuperImpl::_S_masked_store_nocvt(
1592 _UAbi::_MaskImpl::template _S_convert<
1593 __int_for_sizeof_t<_Up>>(__k));
1597 static_assert(_UW_size *
sizeof(_Up) == __max_store_size);
1598 constexpr
size_t _NFullStores = _TV_size / _UW_size;
1599 constexpr
size_t _NAllStores
1600 = __div_roundup(_TV_size, _UW_size);
1601 constexpr
size_t _NParts = _S_full_size<_Tp> / _UW_size;
1602 const array<_UV, _NAllStores> __converted
1603 = __convert_all<_UV, _NAllStores>(__v);
1604 __execute_n_times<_NFullStores>([&](
auto __i) {
1605 _SuperImpl::_S_masked_store_nocvt(
1606 _UW(__converted[__i]), __mem + __i * _UW_size,
1607 _UAbi::_MaskImpl::template _S_convert<
1608 __int_for_sizeof_t<_Up>>(
1609 __extract_part<__i, _NParts>(__k.__as_full_vector())));
1611 if constexpr (_NAllStores
1613 _SuperImpl::_S_masked_store_nocvt(
1614 _UW(__converted[_NFullStores]),
1615 __mem + _NFullStores * _UW_size,
1616 _UAbi::_MaskImpl::template _S_convert<
1617 __int_for_sizeof_t<_Up>>(
1618 __extract_part<_NFullStores, _NParts>(
1619 __k.__as_full_vector())));
1623 _BitOps::_S_bit_iteration(
1624 _MaskImpl::_S_to_bits(__k), [&](
auto __i) constexpr {
1625 __mem[__i] =
static_cast<_Up
>(__v[__i]);
1630 template <
typename _Tp,
size_t _Np>
1631 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1632 _S_complement(_SimdWrapper<_Tp, _Np> __x) noexcept
1633 {
return ~__x._M_data; }
1636 template <
typename _Tp,
size_t _Np>
1637 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1638 _S_unary_minus(_SimdWrapper<_Tp, _Np> __x) noexcept
1642 return -__x._M_data;
1646 template <
typename _Tp,
size_t _Np>
1647 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1648 _S_plus(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1649 {
return __x._M_data + __y._M_data; }
1651 template <
typename _Tp,
size_t _Np>
1652 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1653 _S_minus(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1654 {
return __x._M_data - __y._M_data; }
1656 template <
typename _Tp,
size_t _Np>
1657 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1658 _S_multiplies(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1659 {
return __x._M_data * __y._M_data; }
1661 template <
typename _Tp,
size_t _Np>
1662 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1663 _S_divides(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1667 if constexpr (!_Abi::template _S_is_partial<_Tp>)
1668 return __x._M_data / __y._M_data;
1670 return __x._M_data / _Abi::__make_padding_nonzero(__y._M_data);
1673 template <
typename _Tp,
size_t _Np>
1674 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1675 _S_modulus(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1677 if constexpr (!_Abi::template _S_is_partial<_Tp>)
1678 return __x._M_data % __y._M_data;
1680 return __as_vector(__x)
1681 % _Abi::__make_padding_nonzero(__as_vector(__y));
1684 template <
typename _Tp,
size_t _Np>
1685 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1686 _S_bit_and(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1687 {
return __and(__x, __y); }
1689 template <
typename _Tp,
size_t _Np>
1690 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1691 _S_bit_or(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1692 {
return __or(__x, __y); }
1694 template <
typename _Tp,
size_t _Np>
1695 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1696 _S_bit_xor(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1697 {
return __xor(__x, __y); }
1699 template <
typename _Tp,
size_t _Np>
1700 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
1701 _S_bit_shift_left(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1702 {
return __x._M_data << __y._M_data; }
1704 template <
typename _Tp,
size_t _Np>
1705 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
1706 _S_bit_shift_right(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1707 {
return __x._M_data >> __y._M_data; }
1709 template <
typename _Tp,
size_t _Np>
1710 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1711 _S_bit_shift_left(_SimdWrapper<_Tp, _Np> __x,
int __y)
1712 {
return __x._M_data << __y; }
1714 template <
typename _Tp,
size_t _Np>
1715 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
1716 _S_bit_shift_right(_SimdWrapper<_Tp, _Np> __x,
int __y)
1717 {
return __x._M_data >> __y; }
1721 template <
typename _Tp,
size_t _Np>
1722 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember<_Tp>
1723 _S_equal_to(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1724 {
return __x._M_data == __y._M_data; }
1727 template <
typename _Tp,
size_t _Np>
1728 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember<_Tp>
1729 _S_not_equal_to(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1730 {
return __x._M_data != __y._M_data; }
1733 template <
typename _Tp,
size_t _Np>
1734 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember<_Tp>
1735 _S_less(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1736 {
return __x._M_data < __y._M_data; }
1739 template <
typename _Tp,
size_t _Np>
1740 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember<_Tp>
1741 _S_less_equal(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
1742 {
return __x._M_data <= __y._M_data; }
1745 template <
typename _Tp,
size_t _Np>
1746 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember<_Tp>
1747 _S_negate(_SimdWrapper<_Tp, _Np> __x) noexcept
1748 {
return !__x._M_data; }
1751 template <
typename _Tp,
size_t _Np>
1752 _GLIBCXX_SIMD_NORMAL_MATH _GLIBCXX_SIMD_INTRINSIC
static constexpr
1753 _SimdWrapper<_Tp, _Np>
1754 _S_min(_SimdWrapper<_Tp, _Np> __a, _SimdWrapper<_Tp, _Np> __b)
1755 {
return __a._M_data < __b._M_data ? __a._M_data : __b._M_data; }
1757 template <
typename _Tp,
size_t _Np>
1758 _GLIBCXX_SIMD_NORMAL_MATH _GLIBCXX_SIMD_INTRINSIC
static constexpr
1759 _SimdWrapper<_Tp, _Np>
1760 _S_max(_SimdWrapper<_Tp, _Np> __a, _SimdWrapper<_Tp, _Np> __b)
1761 {
return __a._M_data > __b._M_data ? __a._M_data : __b._M_data; }
1763 template <
typename _Tp,
size_t _Np>
1764 _GLIBCXX_SIMD_NORMAL_MATH _GLIBCXX_SIMD_INTRINSIC
static constexpr
1765 pair<_SimdWrapper<_Tp, _Np>, _SimdWrapper<_Tp, _Np>>
1766 _S_minmax(_SimdWrapper<_Tp, _Np> __a, _SimdWrapper<_Tp, _Np> __b)
1768 return {__a._M_data < __b._M_data ? __a._M_data : __b._M_data,
1769 __a._M_data < __b._M_data ? __b._M_data : __a._M_data};
1773 template <
size_t _Np,
size_t... _Is,
size_t... _Zeros,
typename _Tp,
1774 typename _BinaryOperation>
1775 _GLIBCXX_SIMD_INTRINSIC
static _Tp
1776 _S_reduce_partial(index_sequence<_Is...>, index_sequence<_Zeros...>,
1777 simd<_Tp, _Abi> __x, _BinaryOperation&& __binary_op)
1779 using _V = __vector_type_t<_Tp, _Np / 2>;
1780 static_assert(
sizeof(_V) <=
sizeof(__x));
1783 using _FullSimd = __deduced_simd<_Tp, _VectorTraits<_V>::_S_full_size>;
1784 using _HalfSimd = __deduced_simd<_Tp, _Np / 2>;
1785 const auto __xx = __as_vector(__x);
1786 return _HalfSimd::abi_type::_SimdImpl::_S_reduce(
1787 static_cast<_HalfSimd
>(__as_vector(__binary_op(
1788 static_cast<_FullSimd
>(__intrin_bitcast<_V>(__xx)),
1789 static_cast<_FullSimd
>(__intrin_bitcast<_V>(
1790 __vector_permute<(_Np / 2 + _Is)..., (
int(_Zeros * 0) - 1)...>(
1795 template <
typename _Tp,
typename _BinaryOperation>
1796 _GLIBCXX_SIMD_INTRINSIC
static constexpr _Tp
1797 _S_reduce(simd<_Tp, _Abi> __x, _BinaryOperation&& __binary_op)
1799 constexpr
size_t _Np = simd_size_v<_Tp, _Abi>;
1800 if constexpr (_Np == 1)
1802 else if constexpr (_Np == 2)
1803 return __binary_op(simd<_Tp, simd_abi::scalar>(__x[0]),
1804 simd<_Tp, simd_abi::scalar>(__x[1]))[0];
1805 else if constexpr (_Abi::template _S_is_partial<_Tp>)
1807 [[maybe_unused]] constexpr
auto __full_size
1808 = _Abi::template _S_full_size<_Tp>;
1809 if constexpr (_Np == 3)
1811 __binary_op(simd<_Tp, simd_abi::scalar>(__x[0]),
1812 simd<_Tp, simd_abi::scalar>(__x[1])),
1813 simd<_Tp, simd_abi::scalar>(__x[2]))[0];
1814 else if constexpr (is_same_v<__remove_cvref_t<_BinaryOperation>,
1817 using _Ap = simd_abi::deduce_t<_Tp, __full_size>;
1818 return _Ap::_SimdImpl::_S_reduce(
1819 simd<_Tp, _Ap>(__private_init,
1820 _Abi::_S_masked(__as_vector(__x))),
1823 else if constexpr (is_same_v<__remove_cvref_t<_BinaryOperation>,
1826 using _Ap = simd_abi::deduce_t<_Tp, __full_size>;
1827 using _TW = _SimdWrapper<_Tp, __full_size>;
1828 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __implicit_mask_full
1829 = _Abi::template _S_implicit_mask<_Tp>().__as_full_vector();
1830 _GLIBCXX_SIMD_USE_CONSTEXPR _TW __one
1831 = __vector_broadcast<__full_size>(_Tp(1));
1832 const _TW __x_full = __data(__x).__as_full_vector();
1833 const _TW __x_padded_with_ones
1834 = _Ap::_CommonImpl::_S_blend(__implicit_mask_full, __one,
1836 return _Ap::_SimdImpl::_S_reduce(
1837 simd<_Tp, _Ap>(__private_init, __x_padded_with_ones),
1840 else if constexpr (_Np & 1)
1842 using _Ap = simd_abi::deduce_t<_Tp, _Np - 1>;
1844 simd<_Tp, simd_abi::scalar>(_Ap::_SimdImpl::_S_reduce(
1846 __intrin_bitcast<__vector_type_t<_Tp, _Np - 1>>(
1849 simd<_Tp, simd_abi::scalar>(__x[_Np - 1]))[0];
1852 return _S_reduce_partial<_Np>(
1853 make_index_sequence<_Np / 2>(),
1854 make_index_sequence<__full_size - _Np / 2>(), __x, __binary_op);
1856 else if constexpr (
sizeof(__x) == 16)
1858 if constexpr (_Np == 16)
1860 const auto __y = __data(__x);
1862 _M_make_simd<_Tp, _Np>(
1863 __vector_permute<0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
1865 _M_make_simd<_Tp, _Np>(
1866 __vector_permute<8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
1867 14, 14, 15, 15>(__y)));
1869 if constexpr (_Np >= 8)
1871 const auto __y = __vector_bitcast<short>(__data(__x));
1873 _M_make_simd<_Tp, _Np>(__vector_bitcast<_Tp>(
1874 __vector_permute<0, 0, 1, 1, 2, 2, 3, 3>(__y))),
1875 _M_make_simd<_Tp, _Np>(__vector_bitcast<_Tp>(
1876 __vector_permute<4, 4, 5, 5, 6, 6, 7, 7>(__y))));
1878 if constexpr (_Np >= 4)
1880 using _Up = conditional_t<is_floating_point_v<_Tp>, float,
int>;
1881 const auto __y = __vector_bitcast<_Up>(__data(__x));
1882 __x = __binary_op(__x,
1883 _M_make_simd<_Tp, _Np>(__vector_bitcast<_Tp>(
1884 __vector_permute<3, 2, 1, 0>(__y))));
1886 using _Up = conditional_t<is_floating_point_v<_Tp>, double, _LLong>;
1887 const auto __y = __vector_bitcast<_Up>(__data(__x));
1888 __x = __binary_op(__x, _M_make_simd<_Tp, _Np>(__vector_bitcast<_Tp>(
1889 __vector_permute<1, 1>(__y))));
1894 static_assert(
sizeof(__x) > __min_vector_size<_Tp>);
1895 static_assert((_Np & (_Np - 1)) == 0);
1896 using _Ap = simd_abi::deduce_t<_Tp, _Np / 2>;
1897 using _V = simd<_Tp, _Ap>;
1898 return _Ap::_SimdImpl::_S_reduce(
1899 __binary_op(_V(__private_init, __extract<0, 2>(__as_vector(__x))),
1901 __extract<1, 2>(__as_vector(__x)))),
1902 static_cast<_BinaryOperation&&
>(__binary_op));
1908 #define _GLIBCXX_SIMD_MATH_FALLBACK(__name) \
1909 template <typename _Tp, typename... _More> \
1910 static _Tp _S_##__name(const _Tp& __x, const _More&... __more) \
1912 return __generate_vector<_Tp>( \
1913 [&](auto __i) { return __name(__x[__i], __more[__i]...); }); \
1916 #define _GLIBCXX_SIMD_MATH_FALLBACK_MASKRET(__name) \
1917 template <typename _Tp, typename... _More> \
1918 static typename _Tp::mask_type _S_##__name(const _Tp& __x, \
1919 const _More&... __more) \
1921 return __generate_vector<_Tp>( \
1922 [&](auto __i) { return __name(__x[__i], __more[__i]...); }); \
1925 #define _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET(_RetTp, __name) \
1926 template <typename _Tp, typename... _More> \
1927 static auto _S_##__name(const _Tp& __x, const _More&... __more) \
1929 return __fixed_size_storage_t<_RetTp, \
1930 _VectorTraits<_Tp>::_S_partial_width>:: \
1931 _S_generate([&](auto __meta) constexpr { \
1932 return __meta._S_generator( \
1934 return __name(__x[__meta._S_offset + __i], \
1935 __more[__meta._S_offset + __i]...); \
1937 static_cast<_RetTp*>(nullptr)); \
1941 _GLIBCXX_SIMD_MATH_FALLBACK(
acos)
1942 _GLIBCXX_SIMD_MATH_FALLBACK(
asin)
1943 _GLIBCXX_SIMD_MATH_FALLBACK(
atan)
1944 _GLIBCXX_SIMD_MATH_FALLBACK(atan2)
1945 _GLIBCXX_SIMD_MATH_FALLBACK(
cos)
1946 _GLIBCXX_SIMD_MATH_FALLBACK(
sin)
1947 _GLIBCXX_SIMD_MATH_FALLBACK(
tan)
1948 _GLIBCXX_SIMD_MATH_FALLBACK(
acosh)
1949 _GLIBCXX_SIMD_MATH_FALLBACK(
asinh)
1950 _GLIBCXX_SIMD_MATH_FALLBACK(
atanh)
1951 _GLIBCXX_SIMD_MATH_FALLBACK(
cosh)
1952 _GLIBCXX_SIMD_MATH_FALLBACK(
sinh)
1953 _GLIBCXX_SIMD_MATH_FALLBACK(
tanh)
1954 _GLIBCXX_SIMD_MATH_FALLBACK(
exp)
1955 _GLIBCXX_SIMD_MATH_FALLBACK(exp2)
1956 _GLIBCXX_SIMD_MATH_FALLBACK(expm1)
1957 _GLIBCXX_SIMD_MATH_FALLBACK(ldexp)
1958 _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET(
int, ilogb)
1959 _GLIBCXX_SIMD_MATH_FALLBACK(
log)
1960 _GLIBCXX_SIMD_MATH_FALLBACK(
log10)
1961 _GLIBCXX_SIMD_MATH_FALLBACK(log1p)
1962 _GLIBCXX_SIMD_MATH_FALLBACK(log2)
1963 _GLIBCXX_SIMD_MATH_FALLBACK(logb)
1966 _GLIBCXX_SIMD_MATH_FALLBACK(scalbn)
1967 _GLIBCXX_SIMD_MATH_FALLBACK(scalbln)
1968 _GLIBCXX_SIMD_MATH_FALLBACK(cbrt)
1969 _GLIBCXX_SIMD_MATH_FALLBACK(
fabs)
1970 _GLIBCXX_SIMD_MATH_FALLBACK(
pow)
1971 _GLIBCXX_SIMD_MATH_FALLBACK(
sqrt)
1972 _GLIBCXX_SIMD_MATH_FALLBACK(erf)
1973 _GLIBCXX_SIMD_MATH_FALLBACK(erfc)
1974 _GLIBCXX_SIMD_MATH_FALLBACK(lgamma)
1975 _GLIBCXX_SIMD_MATH_FALLBACK(tgamma)
1977 _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET(
long, lrint)
1978 _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET(
long long, llrint)
1980 _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET(
long, lround)
1981 _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET(
long long, llround)
1983 _GLIBCXX_SIMD_MATH_FALLBACK(fmod)
1984 _GLIBCXX_SIMD_MATH_FALLBACK(remainder)
1986 template <
typename _Tp,
typename _TVT = _VectorTraits<_Tp>>
1988 _S_remquo(
const _Tp __x,
const _Tp __y,
1989 __fixed_size_storage_t<int, _TVT::_S_partial_width>* __z)
1991 return __generate_vector<_Tp>([&](
auto __i) {
1993 auto __r = remquo(__x[__i], __y[__i], &__tmp);
1994 __z->_M_set(__i, __tmp);
2000 _GLIBCXX_SIMD_MATH_FALLBACK(nextafter)
2001 _GLIBCXX_SIMD_MATH_FALLBACK(fdim)
2002 _GLIBCXX_SIMD_MATH_FALLBACK(fmax)
2003 _GLIBCXX_SIMD_MATH_FALLBACK(fmin)
2004 _GLIBCXX_SIMD_MATH_FALLBACK(fma)
2006 template <
typename _Tp,
size_t _Np>
2007 static constexpr _MaskMember<_Tp>
2008 _S_isgreater(_SimdWrapper<_Tp, _Np> __x,
2009 _SimdWrapper<_Tp, _Np> __y) noexcept
2011 using _Ip = __int_for_sizeof_t<_Tp>;
2012 const auto __xn = __vector_bitcast<_Ip>(__x);
2013 const auto __yn = __vector_bitcast<_Ip>(__y);
2014 const auto __xp = __xn < 0 ? -(__xn & __finite_max_v<_Ip>) : __xn;
2015 const auto __yp = __yn < 0 ? -(__yn & __finite_max_v<_Ip>) : __yn;
2016 return __andnot(_SuperImpl::_S_isunordered(__x, __y)._M_data,
2020 template <
typename _Tp,
size_t _Np>
2021 static constexpr _MaskMember<_Tp>
2022 _S_isgreaterequal(_SimdWrapper<_Tp, _Np> __x,
2023 _SimdWrapper<_Tp, _Np> __y) noexcept
2025 using _Ip = __int_for_sizeof_t<_Tp>;
2026 const auto __xn = __vector_bitcast<_Ip>(__x);
2027 const auto __yn = __vector_bitcast<_Ip>(__y);
2028 const auto __xp = __xn < 0 ? -(__xn & __finite_max_v<_Ip>) : __xn;
2029 const auto __yp = __yn < 0 ? -(__yn & __finite_max_v<_Ip>) : __yn;
2030 return __andnot(_SuperImpl::_S_isunordered(__x, __y)._M_data,
2034 template <
typename _Tp,
size_t _Np>
2035 static constexpr _MaskMember<_Tp>
2036 _S_isless(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y) noexcept
2038 using _Ip = __int_for_sizeof_t<_Tp>;
2039 const auto __xn = __vector_bitcast<_Ip>(__x);
2040 const auto __yn = __vector_bitcast<_Ip>(__y);
2041 const auto __xp = __xn < 0 ? -(__xn & __finite_max_v<_Ip>) : __xn;
2042 const auto __yp = __yn < 0 ? -(__yn & __finite_max_v<_Ip>) : __yn;
2043 return __andnot(_SuperImpl::_S_isunordered(__x, __y)._M_data,
2047 template <
typename _Tp,
size_t _Np>
2048 static constexpr _MaskMember<_Tp>
2049 _S_islessequal(_SimdWrapper<_Tp, _Np> __x,
2050 _SimdWrapper<_Tp, _Np> __y) noexcept
2052 using _Ip = __int_for_sizeof_t<_Tp>;
2053 const auto __xn = __vector_bitcast<_Ip>(__x);
2054 const auto __yn = __vector_bitcast<_Ip>(__y);
2055 const auto __xp = __xn < 0 ? -(__xn & __finite_max_v<_Ip>) : __xn;
2056 const auto __yp = __yn < 0 ? -(__yn & __finite_max_v<_Ip>) : __yn;
2057 return __andnot(_SuperImpl::_S_isunordered(__x, __y)._M_data,
2061 template <
typename _Tp,
size_t _Np>
2062 static constexpr _MaskMember<_Tp>
2063 _S_islessgreater(_SimdWrapper<_Tp, _Np> __x,
2064 _SimdWrapper<_Tp, _Np> __y) noexcept
2066 return __andnot(_SuperImpl::_S_isunordered(__x, __y),
2067 _SuperImpl::_S_not_equal_to(__x, __y));
2070 #undef _GLIBCXX_SIMD_MATH_FALLBACK
2071 #undef _GLIBCXX_SIMD_MATH_FALLBACK_MASKRET
2072 #undef _GLIBCXX_SIMD_MATH_FALLBACK_FIXEDRET
2074 template <
typename _Tp,
size_t _Np>
2075 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
2076 _S_abs(_SimdWrapper<_Tp, _Np> __x) noexcept
2082 if constexpr (is_floating_point_v<_Tp>)
2087 return __and(_S_absmask<__vector_type_t<_Tp, _Np>>, __x._M_data);
2089 return __x._M_data < 0 ? -__x._M_data : __x._M_data;
2097 template <
typename _TV,
typename _UV>
2098 _GLIBCXX_SIMD_INTRINSIC
static constexpr _TV _S_plus_minus(_TV __x,
2101 #if defined __i386__ && !defined __SSE_MATH__
2102 if constexpr (
sizeof(__x) == 8)
2104 static_assert(is_same_v<_TV, __vector_type_t<float, 2>>);
2105 const auto __x4 = __vector_bitcast<float, 4>(__x);
2106 if constexpr (is_same_v<_TV, _UV>)
2107 return __vector_bitcast<float, 2>(
2108 _S_plus_minus(__x4, __vector_bitcast<float, 4>(__y)));
2110 return __vector_bitcast<float, 2>(_S_plus_minus(__x4, __y));
2113 #if !defined __clang__ && __GCC_IEC_559 == 0
2114 if (__builtin_is_constant_evaluated()
2115 || (__builtin_constant_p(__x) && __builtin_constant_p(__y)))
2116 return (__x + __y) - __y;
2120 if constexpr(__have_sse)
2122 if constexpr (
sizeof(__x) >= 16)
2123 asm("" : "+x"(__x));
2124 else if constexpr (is_same_v<__vector_type_t<
float, 2>, _TV>)
2125 asm("" : "+x"(__x[0]), "+x"(__x[1]));
2127 __assert_unreachable<_TV>();
2129 else if constexpr(__have_neon)
2130 asm("" : "+w"(__x));
2131 else if constexpr (__have_power_vmx)
2133 if constexpr (is_same_v<__vector_type_t<float, 2>, _TV>)
2134 asm("" : "+fgr"(__x[0]), "+fgr"(__x[1]));
2136 asm("" : "+v"(__x));
2139 asm("" : "+g"(__x));
2143 return (__x + __y) - __y;
2149 template <
typename _Tp,
typename _TVT = _VectorTraits<_Tp>>
2150 _GLIBCXX_SIMD_INTRINSIC
static _Tp _S_nearbyint(_Tp __x_) noexcept
2152 using value_type =
typename _TVT::value_type;
2153 using _V =
typename _TVT::type;
2154 const _V __x = __x_;
2155 const _V __absx = __and(__x, _S_absmask<_V>);
2156 static_assert(__CHAR_BIT__ *
sizeof(1ull) >= __digits_v<value_type>);
2157 _GLIBCXX_SIMD_USE_CONSTEXPR _V __shifter_abs
2158 = _V() + (1ull << (__digits_v<value_type> - 1));
2159 const _V __shifter = __or(__and(_S_signmask<_V>, __x), __shifter_abs);
2160 const _V __shifted = _S_plus_minus(__x, __shifter);
2161 return __absx < __shifter_abs ? __shifted : __x;
2165 template <
typename _Tp,
typename _TVT = _VectorTraits<_Tp>>
2166 _GLIBCXX_SIMD_INTRINSIC
static _Tp _S_rint(_Tp __x) noexcept
2168 return _SuperImpl::_S_nearbyint(__x);
2172 template <
typename _Tp,
size_t _Np>
2173 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
2174 _S_trunc(_SimdWrapper<_Tp, _Np> __x)
2176 using _V = __vector_type_t<_Tp, _Np>;
2177 const _V __absx = __and(__x._M_data, _S_absmask<_V>);
2178 static_assert(__CHAR_BIT__ *
sizeof(1ull) >= __digits_v<_Tp>);
2179 constexpr _Tp __shifter = 1ull << (__digits_v<_Tp> - 1);
2180 _V __truncated = _S_plus_minus(__absx, __shifter);
2181 __truncated -= __truncated > __absx ? _V() + 1 : _V();
2182 return __absx < __shifter ? __or(__xor(__absx, __x._M_data), __truncated)
2187 template <
typename _Tp,
size_t _Np>
2188 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
2189 _S_round(_SimdWrapper<_Tp, _Np> __x)
2191 const auto __abs_x = _SuperImpl::_S_abs(__x);
2192 const auto __t_abs = _SuperImpl::_S_trunc(__abs_x)._M_data;
2194 = __t_abs + (__abs_x._M_data - __t_abs >= _Tp(.5) ? _Tp(1) : 0);
2195 return __or(__xor(__abs_x._M_data, __x._M_data), __r_abs);
2199 template <
typename _Tp,
size_t _Np>
2200 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
2201 _S_floor(_SimdWrapper<_Tp, _Np> __x)
2203 const auto __y = _SuperImpl::_S_trunc(__x)._M_data;
2204 const auto __negative_input
2205 = __vector_bitcast<_Tp>(__x._M_data < __vector_broadcast<_Np, _Tp>(0));
2207 = __andnot(__vector_bitcast<_Tp>(__y == __x._M_data), __negative_input);
2208 return __or(__andnot(__mask, __y),
2209 __and(__mask, __y - __vector_broadcast<_Np, _Tp>(1)));
2213 template <
typename _Tp,
size_t _Np>
2214 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
2215 _S_ceil(_SimdWrapper<_Tp, _Np> __x)
2217 const auto __y = _SuperImpl::_S_trunc(__x)._M_data;
2218 const auto __negative_input
2219 = __vector_bitcast<_Tp>(__x._M_data < __vector_broadcast<_Np, _Tp>(0));
2220 const auto __inv_mask
2221 = __or(__vector_bitcast<_Tp>(__y == __x._M_data), __negative_input);
2222 return __or(__and(__inv_mask, __y),
2223 __andnot(__inv_mask, __y + __vector_broadcast<_Np, _Tp>(1)));
2227 template <
typename _Tp,
size_t _Np>
2228 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember<_Tp>
2229 _S_isnan([[maybe_unused]] _SimdWrapper<_Tp, _Np> __x)
2231 #if __FINITE_MATH_ONLY__
2233 #elif !defined __SUPPORT_SNAN__
2234 return ~(__x._M_data == __x._M_data);
2235 #elif defined __STDC_IEC_559__
2236 using _Ip = __int_for_sizeof_t<_Tp>;
2237 const auto __absn = __vector_bitcast<_Ip>(_SuperImpl::_S_abs(__x));
2239 = __vector_bitcast<_Ip>(__vector_broadcast<_Np>(__infinity_v<_Tp>));
2240 return __infn < __absn;
2242 #error "Not implemented: how to support SNaN but non-IEC559 floating-point?"
2247 template <
typename _Tp,
size_t _Np>
2248 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember<_Tp>
2249 _S_isfinite([[maybe_unused]] _SimdWrapper<_Tp, _Np> __x)
2251 #if __FINITE_MATH_ONLY__
2252 using _UV =
typename _MaskMember<_Tp>::_BuiltinType;
2253 _GLIBCXX_SIMD_USE_CONSTEXPR _UV __alltrue = ~_UV();
2257 using _Ip = __int_for_sizeof_t<_Tp>;
2258 const auto __absn = __vector_bitcast<_Ip>(_SuperImpl::_S_abs(__x));
2260 = __vector_bitcast<_Ip>(__vector_broadcast<_Np>(__finite_max_v<_Tp>));
2261 return __absn <= __maxn;
2266 template <
typename _Tp,
size_t _Np>
2267 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember<_Tp>
2268 _S_isunordered(_SimdWrapper<_Tp, _Np> __x, _SimdWrapper<_Tp, _Np> __y)
2270 return __or(_S_isnan(__x), _S_isnan(__y));
2274 template <
typename _Tp,
size_t _Np>
2275 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember<_Tp>
2276 _S_signbit(_SimdWrapper<_Tp, _Np> __x)
2278 using _Ip = __int_for_sizeof_t<_Tp>;
2279 return __vector_bitcast<_Ip>(__x) < 0;
2286 template <
typename _Tp,
size_t _Np>
2287 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember<_Tp>
2288 _S_isinf([[maybe_unused]] _SimdWrapper<_Tp, _Np> __x)
2290 #if __FINITE_MATH_ONLY__
2293 return _SuperImpl::template _S_equal_to<_Tp, _Np>(_SuperImpl::_S_abs(__x),
2294 __vector_broadcast<_Np>(
2295 __infinity_v<_Tp>));
2310 template <
typename _Tp,
size_t _Np>
2311 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember<_Tp>
2312 _S_isnormal(_SimdWrapper<_Tp, _Np> __x)
2314 using _Ip = __int_for_sizeof_t<_Tp>;
2315 const auto __absn = __vector_bitcast<_Ip>(_SuperImpl::_S_abs(__x));
2317 = __vector_bitcast<_Ip>(__vector_broadcast<_Np>(__norm_min_v<_Tp>));
2318 #if __FINITE_MATH_ONLY__
2319 return __absn >= __minn;
2322 = __vector_bitcast<_Ip>(__vector_broadcast<_Np>(__finite_max_v<_Tp>));
2323 return __minn <= __absn && __absn <= __maxn;
2328 template <
typename _Tp,
size_t _Np>
2329 _GLIBCXX_SIMD_INTRINSIC
static __fixed_size_storage_t<int, _Np>
2330 _S_fpclassify(_SimdWrapper<_Tp, _Np> __x)
2332 using _I = __int_for_sizeof_t<_Tp>;
2334 = __vector_bitcast<_I>(__to_intrin(_SuperImpl::_S_abs(__x)));
2335 constexpr
size_t _NI =
sizeof(__xn) /
sizeof(_I);
2336 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __minn
2337 = __vector_bitcast<_I>(__vector_broadcast<_NI>(__norm_min_v<_Tp>));
2338 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __infn
2339 = __vector_bitcast<_I>(__vector_broadcast<_NI>(__infinity_v<_Tp>));
2341 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __fp_normal
2342 = __vector_broadcast<_NI, _I>(FP_NORMAL);
2343 #if !__FINITE_MATH_ONLY__
2344 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __fp_nan
2345 = __vector_broadcast<_NI, _I>(FP_NAN);
2346 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __fp_infinite
2347 = __vector_broadcast<_NI, _I>(FP_INFINITE);
2349 #ifndef __FAST_MATH__
2350 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __fp_subnormal
2351 = __vector_broadcast<_NI, _I>(FP_SUBNORMAL);
2353 _GLIBCXX_SIMD_USE_CONSTEXPR
auto __fp_zero
2354 = __vector_broadcast<_NI, _I>(FP_ZERO);
2356 __vector_type_t<_I, _NI>
2357 __tmp = __xn < __minn
2358 #ifdef __FAST_MATH__
2361 ? (__xn == 0 ? __fp_zero : __fp_subnormal)
2363 #
if __FINITE_MATH_ONLY__
2366 : (__xn < __infn ? __fp_normal
2367 : (__xn == __infn ? __fp_infinite : __fp_nan));
2370 if constexpr (
sizeof(_I) ==
sizeof(
int))
2372 using _FixedInt = __fixed_size_storage_t<int, _Np>;
2373 const auto __as_int = __vector_bitcast<int, _Np>(__tmp);
2374 if constexpr (_FixedInt::_S_tuple_size == 1)
2376 else if constexpr (_FixedInt::_S_tuple_size == 2
2378 typename _FixedInt::_SecondType::_FirstAbi,
2380 return {__extract<0, 2>(__as_int), __as_int[_Np - 1]};
2381 else if constexpr (_FixedInt::_S_tuple_size == 2)
2382 return {__extract<0, 2>(__as_int),
2383 __auto_bitcast(__extract<1, 2>(__as_int))};
2385 __assert_unreachable<_Tp>();
2387 else if constexpr (_Np == 2 &&
sizeof(_I) == 8
2388 && __fixed_size_storage_t<int, _Np>::_S_tuple_size == 2)
2390 const auto __aslong = __vector_bitcast<_LLong>(__tmp);
2391 return {int(__aslong[0]), {int(__aslong[1])}};
2393 #if _GLIBCXX_SIMD_X86INTRIN
2394 else if constexpr (
sizeof(_Tp) == 8 &&
sizeof(__tmp) == 32
2395 && __fixed_size_storage_t<int, _Np>::_S_tuple_size == 1)
2396 return {_mm_packs_epi32(__to_intrin(__lo128(__tmp)),
2397 __to_intrin(__hi128(__tmp)))};
2398 else if constexpr (
sizeof(_Tp) == 8 &&
sizeof(__tmp) == 64
2399 && __fixed_size_storage_t<int, _Np>::_S_tuple_size == 1)
2400 return {_mm512_cvtepi64_epi32(__to_intrin(__tmp))};
2402 else if constexpr (__fixed_size_storage_t<int, _Np>::_S_tuple_size == 1)
2403 return {__call_with_subscripts<_Np>(__vector_bitcast<_LLong>(__tmp),
2405 return __make_wrapper<int>(__l...);
2408 __assert_unreachable<_Tp>();
2412 template <
typename _Tp,
size_t _Np>
2413 _GLIBCXX_SIMD_INTRINSIC
static void
2414 _S_increment(_SimdWrapper<_Tp, _Np>& __x)
2415 { __x = __x._M_data + 1; }
2417 template <
typename _Tp,
size_t _Np>
2418 _GLIBCXX_SIMD_INTRINSIC
static void
2419 _S_decrement(_SimdWrapper<_Tp, _Np>& __x)
2420 { __x = __x._M_data - 1; }
2423 template <
typename _Tp,
size_t _Np,
typename _Up>
2424 _GLIBCXX_SIMD_INTRINSIC constexpr
static void
2425 _S_set(_SimdWrapper<_Tp, _Np>& __v,
int __i, _Up&& __x) noexcept
2426 { __v._M_set(__i,
static_cast<_Up&&
>(__x)); }
2429 template <
typename _Tp,
typename _K,
size_t _Np>
2430 _GLIBCXX_SIMD_INTRINSIC
static void
2431 _S_masked_assign(_SimdWrapper<_K, _Np> __k, _SimdWrapper<_Tp, _Np>& __lhs,
2432 __type_identity_t<_SimdWrapper<_Tp, _Np>> __rhs)
2434 if (__k._M_is_constprop_none_of())
2436 else if (__k._M_is_constprop_all_of())
2439 __lhs = _CommonImpl::_S_blend(__k, __lhs, __rhs);
2442 template <
typename _Tp,
typename _K,
size_t _Np>
2443 _GLIBCXX_SIMD_INTRINSIC
static void
2444 _S_masked_assign(_SimdWrapper<_K, _Np> __k, _SimdWrapper<_Tp, _Np>& __lhs,
2445 __type_identity_t<_Tp> __rhs)
2447 if (__k._M_is_constprop_none_of())
2449 else if (__k._M_is_constprop_all_of())
2450 __lhs = __vector_broadcast<_Np>(__rhs);
2451 else if (__builtin_constant_p(__rhs) && __rhs == 0)
2453 if constexpr (!is_same_v<bool, _K>)
2457 = __andnot(__vector_bitcast<_Tp>(__k), __lhs._M_data);
2461 = _CommonImpl::_S_blend(__k, __lhs, _SimdWrapper<_Tp, _Np>());
2464 __lhs = _CommonImpl::_S_blend(__k, __lhs,
2465 _SimdWrapper<_Tp, _Np>(
2466 __vector_broadcast<_Np>(__rhs)));
2470 template <
typename _Op,
typename _Tp,
typename _K,
size_t _Np>
2471 _GLIBCXX_SIMD_INTRINSIC
static void
2472 _S_masked_cassign(
const _SimdWrapper<_K, _Np> __k,
2473 _SimdWrapper<_Tp, _Np>& __lhs,
2474 const __type_identity_t<_SimdWrapper<_Tp, _Np>> __rhs,
2477 if (__k._M_is_constprop_none_of())
2479 else if (__k._M_is_constprop_all_of())
2480 __lhs = __op(_SuperImpl{}, __lhs, __rhs);
2482 __lhs = _CommonImpl::_S_blend(__k, __lhs,
2483 __op(_SuperImpl{}, __lhs, __rhs));
2486 template <
typename _Op,
typename _Tp,
typename _K,
size_t _Np>
2487 _GLIBCXX_SIMD_INTRINSIC
static void
2488 _S_masked_cassign(
const _SimdWrapper<_K, _Np> __k,
2489 _SimdWrapper<_Tp, _Np>& __lhs,
2490 const __type_identity_t<_Tp> __rhs, _Op __op)
2491 { _S_masked_cassign(__k, __lhs, __vector_broadcast<_Np>(__rhs), __op); }
2494 template <
template <
typename>
class _Op,
typename _Tp,
typename _K,
2496 _GLIBCXX_SIMD_INTRINSIC
static _SimdWrapper<_Tp, _Np>
2497 _S_masked_unary(
const _SimdWrapper<_K, _Np> __k,
2498 const _SimdWrapper<_Tp, _Np> __v)
2500 if (__k._M_is_constprop_none_of())
2502 auto __vv = _M_make_simd(__v);
2503 _Op<decltype(__vv)> __op;
2504 if (__k._M_is_constprop_all_of())
2505 return __data(__op(__vv));
2507 return _CommonImpl::_S_blend(__k, __v, __data(__op(__vv)));
2514 struct _MaskImplBuiltinMixin
2516 template <
typename _Tp>
2517 using _TypeTag = _Tp*;
2520 template <
typename _Up,
size_t _ToN = 1>
2521 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Up, _ToN>
2522 _S_to_maskvector(
bool __x)
2524 static_assert(is_same_v<_Up, __int_for_sizeof_t<_Up>>);
2525 return __x ? __vector_type_t<_Up, _ToN>{~_Up()}
2526 : __vector_type_t<_Up, _ToN>{};
2529 template <
typename _Up,
size_t _UpN = 0,
size_t _Np,
bool _Sanitized,
2530 size_t _ToN = _UpN == 0 ? _Np : _UpN>
2531 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Up, _ToN>
2532 _S_to_maskvector(_BitMask<_Np, _Sanitized> __x)
2534 static_assert(is_same_v<_Up, __int_for_sizeof_t<_Up>>);
2535 return __generate_vector<__vector_type_t<_Up, _ToN>>([&](
2536 auto __i) constexpr {
2537 if constexpr (__i < _Np)
2538 return __x[__i] ? ~_Up() : _Up();
2544 template <
typename _Up,
size_t _UpN = 0,
typename _Tp,
size_t _Np,
2545 size_t _ToN = _UpN == 0 ? _Np : _UpN>
2546 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Up, _ToN>
2547 _S_to_maskvector(_SimdWrapper<_Tp, _Np> __x)
2549 static_assert(is_same_v<_Up, __int_for_sizeof_t<_Up>>);
2550 using _TW = _SimdWrapper<_Tp, _Np>;
2551 using _UW = _SimdWrapper<_Up, _ToN>;
2552 if constexpr (
sizeof(_Up) ==
sizeof(_Tp) &&
sizeof(_TW) ==
sizeof(_UW))
2553 return __wrapper_bitcast<_Up, _ToN>(__x);
2554 else if constexpr (is_same_v<_Tp, bool>)
2555 return _S_to_maskvector<_Up, _ToN>(_BitMask<_Np>(__x._M_data));
2582 return __generate_vector<__vector_type_t<_Up, _ToN>>([&](
2583 auto __i) constexpr {
2584 if constexpr (__i < _Np)
2585 return _Up(__x[__i.value]);
2595 template <
typename _Tp,
size_t _Np>
2596 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SanitizedBitMask<_Np>
2597 _S_to_bits(_SimdWrapper<_Tp, _Np> __x)
2599 static_assert(!is_same_v<_Tp, bool>);
2600 static_assert(_Np <= __CHAR_BIT__ *
sizeof(_ULLong));
2601 using _Up = make_unsigned_t<__int_for_sizeof_t<_Tp>>;
2603 = __vector_bitcast<_Up>(__x) >> (
sizeof(_Up) * __CHAR_BIT__ - 1);
2605 __execute_n_times<_Np>(
2606 [&](
auto __i) { __r |= _ULLong(__bools[__i.value]) << __i; });
2614 template <
typename _Abi>
2615 struct _MaskImplBuiltin : _MaskImplBuiltinMixin
2617 using _MaskImplBuiltinMixin::_S_to_bits;
2618 using _MaskImplBuiltinMixin::_S_to_maskvector;
2621 template <
typename _Tp>
2622 using _SimdMember =
typename _Abi::template __traits<_Tp>::_SimdMember;
2624 template <
typename _Tp>
2625 using _MaskMember =
typename _Abi::template _MaskMember<_Tp>;
2627 using _SuperImpl =
typename _Abi::_MaskImpl;
2628 using _CommonImpl =
typename _Abi::_CommonImpl;
2630 template <
typename _Tp>
2631 static constexpr
size_t _S_size = simd_size_v<_Tp, _Abi>;
2635 template <
typename _Tp>
2636 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember<_Tp>
2637 _S_broadcast(
bool __x)
2639 return __x ? _Abi::template _S_implicit_mask<_Tp>()
2640 : _MaskMember<_Tp>();
2645 template <
typename _Tp>
2646 _GLIBCXX_SIMD_INTRINSIC
static constexpr _MaskMember<_Tp>
2647 _S_load(
const bool* __mem)
2649 using _I = __int_for_sizeof_t<_Tp>;
2650 if constexpr (
sizeof(_Tp) ==
sizeof(
bool))
2653 = _CommonImpl::template _S_load<_I, _S_size<_Tp>>(__mem);
2658 return __generate_vector<_I, _S_size<_Tp>>([&](
auto __i) constexpr {
2659 return __mem[__i] ? ~_I() : _I();
2665 template <
typename _Tp,
size_t _Np,
bool _Sanitized>
2666 _GLIBCXX_SIMD_INTRINSIC
static constexpr
auto
2667 _S_convert(_BitMask<_Np, _Sanitized> __x)
2669 if constexpr (__is_builtin_bitmask_abi<_Abi>())
2670 return _SimdWrapper<
bool, simd_size_v<_Tp, _Abi>>(__x._M_to_bits());
2672 return _SuperImpl::template _S_to_maskvector<__int_for_sizeof_t<_Tp>,
2674 __x._M_sanitized());
2677 template <typename _Tp,
size_t _Np>
2678 _GLIBCXX_SIMD_INTRINSIC static constexpr auto
2679 _S_convert(_SimdWrapper<
bool, _Np> __x)
2681 if constexpr (__is_builtin_bitmask_abi<_Abi>())
2682 return _SimdWrapper<
bool, simd_size_v<_Tp, _Abi>>(__x._M_data);
2684 return _SuperImpl::template _S_to_maskvector<__int_for_sizeof_t<_Tp>,
2686 _BitMask<_Np>(__x._M_data)._M_sanitized());
2689 template <typename _Tp, typename _Up,
size_t _Np>
2690 _GLIBCXX_SIMD_INTRINSIC static constexpr auto
2691 _S_convert(_SimdWrapper<_Up, _Np> __x)
2693 if constexpr (__is_builtin_bitmask_abi<_Abi>())
2694 return _SimdWrapper<
bool, simd_size_v<_Tp, _Abi>>(
2695 _SuperImpl::_S_to_bits(__x));
2697 return _SuperImpl::template _S_to_maskvector<__int_for_sizeof_t<_Tp>,
2701 template <typename _Tp, typename _Up, typename _UAbi>
2702 _GLIBCXX_SIMD_INTRINSIC static constexpr auto
2703 _S_convert(simd_mask<_Up, _UAbi> __x)
2705 if constexpr (__is_builtin_bitmask_abi<_Abi>())
2707 using _R = _SimdWrapper<bool, simd_size_v<_Tp, _Abi>>;
2708 if constexpr (__is_builtin_bitmask_abi<_UAbi>())
2709 return _R(__data(__x));
2710 else if constexpr (__is_scalar_abi<_UAbi>())
2711 return _R(__data(__x));
2712 else if constexpr (__is_fixed_size_abi_v<_UAbi>)
2713 return _R(__data(__x)._M_to_bits());
2715 return _R(_UAbi::_MaskImpl::_S_to_bits(__data(__x))._M_to_bits());
2718 return _SuperImpl::template _S_to_maskvector<__int_for_sizeof_t<_Tp>,
2725 template <typename _Tp,
size_t _Np>
2726 static inline _SimdWrapper<_Tp, _Np>
2727 _S_masked_load(_SimdWrapper<_Tp, _Np> __merge,
2728 _SimdWrapper<_Tp, _Np> __mask, const
bool* __mem) noexcept
2731 auto __tmp = __wrapper_bitcast<__int_for_sizeof_t<_Tp>>(__merge);
2732 _BitOps::_S_bit_iteration(_SuperImpl::_S_to_bits(__mask),
2734 __tmp._M_set(__i, -__mem[__i]);
2736 __merge = __wrapper_bitcast<_Tp>(__tmp);
2741 template <
typename _Tp,
size_t _Np>
2742 _GLIBCXX_SIMD_INTRINSIC
static void _S_store(_SimdWrapper<_Tp, _Np> __v,
2743 bool* __mem) noexcept
2745 __execute_n_times<_Np>([&](
auto __i) constexpr {
2746 __mem[__i] = __v[__i];
2751 template <
typename _Tp,
size_t _Np>
2753 _S_masked_store(
const _SimdWrapper<_Tp, _Np> __v,
bool* __mem,
2754 const _SimdWrapper<_Tp, _Np> __k) noexcept
2756 _BitOps::_S_bit_iteration(
2757 _SuperImpl::_S_to_bits(__k), [&](
auto __i) constexpr {
2758 __mem[__i] = __v[__i];
2763 template <
size_t _Np,
typename _Tp>
2764 _GLIBCXX_SIMD_INTRINSIC
static _MaskMember<_Tp>
2765 _S_from_bitmask(_SanitizedBitMask<_Np> __bits, _TypeTag<_Tp>)
2767 return _SuperImpl::template _S_to_maskvector<_Tp, _S_size<_Tp>>(__bits);
2771 template <
typename _Tp,
size_t _Np>
2772 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
2773 _S_logical_and(
const _SimdWrapper<_Tp, _Np>& __x,
2774 const _SimdWrapper<_Tp, _Np>& __y)
2775 {
return __and(__x._M_data, __y._M_data); }
2777 template <
typename _Tp,
size_t _Np>
2778 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
2779 _S_logical_or(
const _SimdWrapper<_Tp, _Np>& __x,
2780 const _SimdWrapper<_Tp, _Np>& __y)
2781 {
return __or(__x._M_data, __y._M_data); }
2783 template <
typename _Tp,
size_t _Np>
2784 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
2785 _S_bit_not(
const _SimdWrapper<_Tp, _Np>& __x)
2787 if constexpr (_Abi::template _S_is_partial<_Tp>)
2788 return __andnot(__x, __wrapper_bitcast<_Tp>(
2789 _Abi::template _S_implicit_mask<_Tp>()));
2791 return __not(__x._M_data);
2794 template <
typename _Tp,
size_t _Np>
2795 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
2796 _S_bit_and(
const _SimdWrapper<_Tp, _Np>& __x,
2797 const _SimdWrapper<_Tp, _Np>& __y)
2798 {
return __and(__x._M_data, __y._M_data); }
2800 template <
typename _Tp,
size_t _Np>
2801 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
2802 _S_bit_or(
const _SimdWrapper<_Tp, _Np>& __x,
2803 const _SimdWrapper<_Tp, _Np>& __y)
2804 {
return __or(__x._M_data, __y._M_data); }
2806 template <
typename _Tp,
size_t _Np>
2807 _GLIBCXX_SIMD_INTRINSIC
static constexpr _SimdWrapper<_Tp, _Np>
2808 _S_bit_xor(
const _SimdWrapper<_Tp, _Np>& __x,
2809 const _SimdWrapper<_Tp, _Np>& __y)
2810 {
return __xor(__x._M_data, __y._M_data); }
2813 template <
typename _Tp,
size_t _Np>
2814 static constexpr
void _S_set(_SimdWrapper<_Tp, _Np>& __k,
int __i,
2817 if constexpr (is_same_v<_Tp, bool>)
2818 __k._M_set(__i, __x);
2821 static_assert(is_same_v<_Tp, __int_for_sizeof_t<_Tp>>);
2822 if (__builtin_is_constant_evaluated())
2824 __k = __generate_from_n_evaluations<_Np,
2825 __vector_type_t<_Tp, _Np>>(
2834 __k._M_data[__i] = -__x;
2839 template <
typename _Tp,
size_t _Np>
2840 _GLIBCXX_SIMD_INTRINSIC
static void
2841 _S_masked_assign(_SimdWrapper<_Tp, _Np> __k,
2842 _SimdWrapper<_Tp, _Np>& __lhs,
2843 __type_identity_t<_SimdWrapper<_Tp, _Np>> __rhs)
2844 { __lhs = _CommonImpl::_S_blend(__k, __lhs, __rhs); }
2846 template <
typename _Tp,
size_t _Np>
2847 _GLIBCXX_SIMD_INTRINSIC
static void
2848 _S_masked_assign(_SimdWrapper<_Tp, _Np> __k,
2849 _SimdWrapper<_Tp, _Np>& __lhs,
bool __rhs)
2851 if (__builtin_constant_p(__rhs))
2854 __lhs = __andnot(__k, __lhs);
2856 __lhs = __or(__k, __lhs);
2859 __lhs = _CommonImpl::_S_blend(__k, __lhs,
2860 __data(simd_mask<_Tp, _Abi>(__rhs)));
2865 template <
typename _Tp>
2866 _GLIBCXX_SIMD_INTRINSIC
static bool
2867 _S_all_of(simd_mask<_Tp, _Abi> __k)
2869 return __call_with_subscripts(
2871 [](
const auto... __ent) constexpr {
return (... && !(__ent == 0)); });
2876 template <
typename _Tp>
2877 _GLIBCXX_SIMD_INTRINSIC
static bool
2878 _S_any_of(simd_mask<_Tp, _Abi> __k)
2880 return __call_with_subscripts(
2882 [](
const auto... __ent) constexpr {
return (... || !(__ent == 0)); });
2887 template <
typename _Tp>
2888 _GLIBCXX_SIMD_INTRINSIC
static bool
2889 _S_none_of(simd_mask<_Tp, _Abi> __k)
2891 return __call_with_subscripts(
2893 [](
const auto... __ent) constexpr {
return (... && (__ent == 0)); });
2898 template <
typename _Tp>
2899 _GLIBCXX_SIMD_INTRINSIC
static bool
2900 _S_some_of(simd_mask<_Tp, _Abi> __k)
2902 const int __n_true = _S_popcount(__k);
2903 return __n_true > 0 && __n_true < int(_S_size<_Tp>);
2908 template <
typename _Tp>
2909 _GLIBCXX_SIMD_INTRINSIC
static int
2910 _S_popcount(simd_mask<_Tp, _Abi> __k)
2912 using _I = __int_for_sizeof_t<_Tp>;
2913 if constexpr (is_default_constructible_v<simd<_I, _Abi>>)
2915 simd<_I, _Abi>(__private_init, __wrapper_bitcast<_I>(__data(__k))));
2917 return -
reduce(__bit_cast<rebind_simd_t<_I, simd<_Tp, _Abi>>>(
2918 simd<_Tp, _Abi>(__private_init, __data(__k))));
2923 template <typename _Tp>
2924 _GLIBCXX_SIMD_INTRINSIC static
int
2925 _S_find_first_set(simd_mask<_Tp, _Abi> __k)
2927 return std::__countr_zero(
2928 _SuperImpl::_S_to_bits(__data(__k))._M_to_bits());
2933 template <
typename _Tp>
2934 _GLIBCXX_SIMD_INTRINSIC
static int
2935 _S_find_last_set(simd_mask<_Tp, _Abi> __k)
2937 return std::__bit_width(
2938 _SuperImpl::_S_to_bits(__data(__k))._M_to_bits()) - 1;
2945 _GLIBCXX_SIMD_END_NAMESPACE
complex< _Tp > log10(const complex< _Tp > &)
Return complex base 10 logarithm of z.
_Tp fabs(const std::complex< _Tp > &)
fabs(__z) [8.1.8].
std::complex< _Tp > asinh(const std::complex< _Tp > &)
asinh(__z) [8.1.6].
std::complex< _Tp > atan(const std::complex< _Tp > &)
atan(__z) [8.1.4].
complex< _Tp > sin(const complex< _Tp > &)
Return complex sine of z.
complex< _Tp > log(const complex< _Tp > &)
Return complex natural logarithm of z.
complex< _Tp > tan(const complex< _Tp > &)
Return complex tangent of z.
complex< _Tp > exp(const complex< _Tp > &)
Return complex base e exponential of z.
complex< _Tp > cosh(const complex< _Tp > &)
Return complex hyperbolic cosine of z.
complex< _Tp > tanh(const complex< _Tp > &)
Return complex hyperbolic tangent of z.
complex< _Tp > pow(const complex< _Tp > &, int)
Return x to the y'th power.
std::complex< _Tp > atanh(const std::complex< _Tp > &)
atanh(__z) [8.1.7].
std::complex< _Tp > acosh(const std::complex< _Tp > &)
acosh(__z) [8.1.5].
complex< _Tp > sinh(const complex< _Tp > &)
Return complex hyperbolic sine of z.
complex< _Tp > cos(const complex< _Tp > &)
Return complex cosine of z.
std::complex< _Tp > acos(const std::complex< _Tp > &)
acos(__z) [8.1.2].
complex< _Tp > sqrt(const complex< _Tp > &)
Return complex square root of z.
std::complex< _Tp > asin(const std::complex< _Tp > &)
asin(__z) [8.1.3].
typename conditional< _Cond, _Iftrue, _Iffalse >::type conditional_t
Alias template for conditional.
integral_constant< bool, true > true_type
The type used as a compile-time boolean with true value.
constexpr const _Tp & min(const _Tp &, const _Tp &)
This does what you think it does.
constexpr _Tp reduce(_InputIterator __first, _InputIterator __last, _Tp __init, _BinaryOperation __binary_op)
Calculate reduction of values in a range.
make_integer_sequence< size_t, _Num > make_index_sequence
Alias template make_index_sequence.