AVX512/PacketMath.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2016 Benoit Steiner (benoit.steiner.goog@gmail.com)
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_PACKET_MATH_AVX512_H
11 #define EIGEN_PACKET_MATH_AVX512_H
12 
13 // IWYU pragma: private
14 #include "../../InternalHeaderCheck.h"
15 
16 namespace Eigen {
17 
18 namespace internal {
19 
20 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
21 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
22 #endif
23 
24 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
25 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
26 #endif
27 
28 #ifdef EIGEN_VECTORIZE_FMA
29 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
30 #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
31 #endif
32 #endif
33 
34 typedef __m512 Packet16f;
35 typedef __m512i Packet16i;
36 typedef __m512d Packet8d;
38 #ifndef EIGEN_VECTORIZE_AVX512FP16
40 #endif
42 
43 template <>
44 struct is_arithmetic<__m512> {
45  enum { value = true };
46 };
47 template <>
48 struct is_arithmetic<__m512i> {
49  enum { value = true };
50 };
51 template <>
52 struct is_arithmetic<__m512d> {
53  enum { value = true };
54 };
55 template <>
57  enum { value = true };
58 };
59 
60 #ifndef EIGEN_VECTORIZE_AVX512FP16
61 template <>
63  enum { value = true };
64 };
65 
66 template <>
68  typedef Packet16h type;
69  // There is no half-size packet for Packet16h.
70  typedef Packet16h half;
71  enum {
74  size = 16,
75 
76  HasCmp = 1,
77  HasAdd = 1,
78  HasSub = 1,
79  HasMul = 1,
80  HasDiv = 1,
81  HasNegate = 1,
82  HasAbs = 1,
83  HasAbs2 = 0,
84  HasMin = 1,
85  HasMax = 1,
86  HasConj = 1,
88  HasSqrt = 1,
89  HasRsqrt = 1,
90  HasLog = 1,
91  HasLog1p = 1,
92  HasExp = 1,
93  HasExpm1 = 1,
94  HasBessel = 1,
95  HasNdtri = 1,
100  HasBlend = 0
101  };
102 };
103 #endif
104 
105 template <>
106 struct packet_traits<float> : default_packet_traits {
107  typedef Packet16f type;
108  typedef Packet8f half;
109  enum {
110  Vectorizable = 1,
111  AlignedOnScalar = 1,
112  size = 16,
113 
114  HasAbs = 1,
115  HasMin = 1,
116  HasMax = 1,
117  HasConj = 1,
118  HasBlend = 1,
121  HasACos = 1,
122  HasASin = 1,
123  HasATan = 1,
124  HasATanh = 1,
125  HasSqrt = 1,
126  HasRsqrt = 1,
127  HasLog = 1,
128  HasLog1p = 1,
129  HasExpm1 = 1,
130  HasNdtri = 1,
131  HasBessel = 1,
132  HasExp = 1,
137  HasCmp = 1,
138  HasDiv = 1
139  };
140 };
141 template <>
142 struct packet_traits<double> : default_packet_traits {
143  typedef Packet8d type;
144  typedef Packet4d half;
145  enum {
146  Vectorizable = 1,
147  AlignedOnScalar = 1,
148  size = 8,
149  HasBlend = 1,
150  HasSqrt = 1,
151  HasRsqrt = 1,
154  HasLog = 1,
155  HasExp = 1,
156  HasATan = 1,
160  HasATanh = 1,
161  HasCmp = 1,
162  HasDiv = 1
163  };
164 };
165 
166 template <>
167 struct packet_traits<int> : default_packet_traits {
168  typedef Packet16i type;
169  typedef Packet8i half;
170  enum { Vectorizable = 1, AlignedOnScalar = 1, HasBlend = 0, HasCmp = 1, HasDiv = 1, size = 16 };
171 };
172 
173 template <>
175  typedef Packet8l type;
176  typedef Packet4l half;
177  enum { Vectorizable = 1, AlignedOnScalar = 1, HasCmp = 1, size = 8 };
178 };
179 
180 template <>
182  typedef float type;
183  typedef Packet8f half;
185  typedef uint16_t mask_t;
186  enum {
187  size = 16,
189  vectorizable = true,
192  masked_fpops_available = true
193  };
194 };
195 template <>
197  typedef double type;
198  typedef Packet4d half;
200  typedef uint8_t mask_t;
201  enum {
202  size = 8,
204  vectorizable = true,
207  masked_fpops_available = true
208  };
209 };
210 template <>
212  typedef int type;
213  typedef Packet8i half;
214  enum {
215  size = 16,
217  vectorizable = true,
219  masked_store_available = false
220  };
221 };
222 
223 template <>
225  typedef int64_t type;
226  typedef Packet4l half;
227  enum {
228  size = 8,
230  vectorizable = true,
232  masked_store_available = false
233  };
234 };
235 
236 #ifndef EIGEN_VECTORIZE_AVX512FP16
237 template <>
239  typedef Eigen::half type;
240  typedef Packet8h half;
241  enum {
242  size = 16,
244  vectorizable = true,
246  masked_store_available = false
247  };
248 };
249 #endif
250 
251 template <>
253  return _mm512_set1_ps(from);
254 }
255 template <>
257  return _mm512_set1_pd(from);
258 }
259 template <>
261  return _mm512_set1_epi32(from);
262 }
263 template <>
265  return _mm512_set1_epi64(from);
266 }
267 
268 template <>
270  return _mm512_castsi512_ps(_mm512_set1_epi32(from));
271 }
272 
273 template <>
275  return _mm512_castsi512_pd(_mm512_set1_epi64(from));
276 }
277 
278 template <>
280  return _mm512_setzero_ps();
281 }
282 template <>
284  return _mm512_setzero_pd();
285 }
286 template <>
288  return _mm512_setzero_si512();
289 }
290 
291 template <>
293  return _mm512_setzero_si512();
294 }
295 
296 template <>
298  return _mm512_castsi512_ps(_mm512_set_epi32(0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1));
299 }
300 template <>
302  return _mm512_set_epi32(0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1);
303 }
304 template <>
306  return _mm512_castsi512_pd(_mm512_set_epi32(0, 0, -1, -1, 0, 0, -1, -1, 0, 0, -1, -1, 0, 0, -1, -1));
307 }
308 template <>
310  return _mm512_set_epi32(0, 0, -1, -1, 0, 0, -1, -1, 0, 0, -1, -1, 0, 0, -1, -1);
311 }
312 
313 template <>
315 #if (EIGEN_COMP_GNUC != 0) || (EIGEN_COMP_CLANG != 0)
316  // Inline asm here helps reduce some register spilling in TRSM kernels.
317  // See note in unrolls::gemm::microKernel in TrsmKernel.h
318  Packet16f ret;
319  __asm__("vbroadcastss %[mem], %[dst]" : [dst] "=v"(ret) : [mem] "m"(*from));
320  return ret;
321 #else
322  return _mm512_broadcastss_ps(_mm_load_ps1(from));
323 #endif
324 }
325 template <>
327 #if (EIGEN_COMP_GNUC != 0) || (EIGEN_COMP_CLANG != 0)
328  Packet8d ret;
329  __asm__("vbroadcastsd %[mem], %[dst]" : [dst] "=v"(ret) : [mem] "m"(*from));
330  return ret;
331 #else
332  return _mm512_set1_pd(*from);
333 #endif
334 }
335 
336 template <>
338  return _mm512_add_ps(_mm512_set1_ps(a), _mm512_set_ps(15.0f, 14.0f, 13.0f, 12.0f, 11.0f, 10.0f, 9.0f, 8.0f, 7.0f,
339  6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f, 0.0f));
340 }
341 template <>
343  return _mm512_add_pd(_mm512_set1_pd(a), _mm512_set_pd(7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0));
344 }
345 template <>
347  return _mm512_add_epi32(_mm512_set1_epi32(a), _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0));
348 }
349 template <>
351  return _mm512_add_epi64(_mm512_set1_epi64(a), _mm512_set_epi64(7, 6, 5, 4, 3, 2, 1, 0));
352 }
353 
354 template <>
356  return _mm512_add_ps(a, b);
357 }
358 template <>
360  return _mm512_add_pd(a, b);
361 }
362 template <>
364  return _mm512_add_epi32(a, b);
365 }
366 template <>
368  return _mm512_add_epi64(a, b);
369 }
370 
371 template <>
373  __mmask16 mask = static_cast<__mmask16>(umask);
374  return _mm512_maskz_add_ps(mask, a, b);
375 }
376 template <>
378  __mmask8 mask = static_cast<__mmask8>(umask);
379  return _mm512_maskz_add_pd(mask, a, b);
380 }
381 
382 template <>
384  return _mm512_sub_ps(a, b);
385 }
386 template <>
388  return _mm512_sub_pd(a, b);
389 }
390 template <>
392  return _mm512_sub_epi32(a, b);
393 }
394 template <>
396  return _mm512_sub_epi64(a, b);
397 }
398 
399 template <>
401  // NOTE: MSVC seems to struggle with _mm512_set1_epi32, leading to random results.
402  // The intel docs give it a relatively high latency as well, so we're probably
403  // better off with using _mm512_set_epi32 directly anyways.
404  const __m512i mask =
405  _mm512_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000,
406  0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
407  return _mm512_castsi512_ps(_mm512_xor_epi32(_mm512_castps_si512(a), mask));
408 }
409 template <>
411  const __m512i mask =
412  _mm512_set_epi64(0x8000000000000000ULL, 0x8000000000000000ULL, 0x8000000000000000ULL, 0x8000000000000000ULL,
413  0x8000000000000000ULL, 0x8000000000000000ULL, 0x8000000000000000ULL, 0x8000000000000000ULL);
414  return _mm512_castsi512_pd(_mm512_xor_epi64(_mm512_castpd_si512(a), mask));
415 }
416 template <>
418  return _mm512_sub_epi32(_mm512_setzero_si512(), a);
419 }
420 template <>
422  return _mm512_sub_epi64(_mm512_setzero_si512(), a);
423 }
424 
425 template <>
427  return a;
428 }
429 template <>
431  return a;
432 }
433 template <>
435  return a;
436 }
437 template <>
439  return a;
440 }
441 
442 template <>
444  return _mm512_mul_ps(a, b);
445 }
446 template <>
448  return _mm512_mul_pd(a, b);
449 }
450 template <>
452  return _mm512_mullo_epi32(a, b);
453 }
454 template <>
456 #ifdef EIGEN_VECTORIZE_AVX512DQ
457  return _mm512_mullo_epi64(a, b);
458 #else
459  return _mm512_mullox_epi64(a, b);
460 #endif
461 }
462 
463 template <>
465  return _mm512_div_ps(a, b);
466 }
467 
468 template <>
470  return _mm512_div_pd(a, b);
471 }
472 
473 template <>
475  Packet8i q_lo = pdiv<Packet8i>(_mm512_extracti64x4_epi64(a, 0), _mm512_extracti64x4_epi64(b, 0));
476  Packet8i q_hi = pdiv<Packet8i>(_mm512_extracti64x4_epi64(a, 1), _mm512_extracti64x4_epi64(b, 1));
477  return _mm512_inserti64x4(_mm512_castsi256_si512(q_lo), q_hi, 1);
478 }
479 
480 #ifdef EIGEN_VECTORIZE_FMA
481 template <>
483  return _mm512_fmadd_ps(a, b, c);
484 }
485 template <>
486 EIGEN_STRONG_INLINE Packet8d pmadd(const Packet8d& a, const Packet8d& b, const Packet8d& c) {
487  return _mm512_fmadd_pd(a, b, c);
488 }
489 
490 template <>
492  return _mm512_fmsub_ps(a, b, c);
493 }
494 template <>
495 EIGEN_STRONG_INLINE Packet8d pmsub(const Packet8d& a, const Packet8d& b, const Packet8d& c) {
496  return _mm512_fmsub_pd(a, b, c);
497 }
498 
499 template <>
501  return _mm512_fnmadd_ps(a, b, c);
502 }
503 template <>
504 EIGEN_STRONG_INLINE Packet8d pnmadd(const Packet8d& a, const Packet8d& b, const Packet8d& c) {
505  return _mm512_fnmadd_pd(a, b, c);
506 }
507 
508 template <>
510  return _mm512_fnmsub_ps(a, b, c);
511 }
512 template <>
513 EIGEN_STRONG_INLINE Packet8d pnmsub(const Packet8d& a, const Packet8d& b, const Packet8d& c) {
514  return _mm512_fnmsub_pd(a, b, c);
515 }
516 #endif
517 
518 template <>
519 EIGEN_DEVICE_FUNC inline Packet16f pselect(const Packet16f& mask, const Packet16f& a, const Packet16f& b) {
520  __mmask16 mask16 = _mm512_cmpeq_epi32_mask(_mm512_castps_si512(mask), _mm512_setzero_epi32());
521  return _mm512_mask_blend_ps(mask16, a, b);
522 }
523 
524 template <>
525 EIGEN_DEVICE_FUNC inline Packet16i pselect(const Packet16i& mask, const Packet16i& a, const Packet16i& b) {
526  __mmask16 mask16 = _mm512_cmpeq_epi32_mask(mask, _mm512_setzero_epi32());
527  return _mm512_mask_blend_epi32(mask16, a, b);
528 }
529 
530 template <>
531 EIGEN_DEVICE_FUNC inline Packet8l pselect(const Packet8l& mask, const Packet8l& a, const Packet8l& b) {
532  __mmask8 mask8 = _mm512_cmpeq_epi64_mask(mask, _mm512_setzero_si512());
533  return _mm512_mask_blend_epi64(mask8, a, b);
534 }
535 
536 template <>
537 EIGEN_DEVICE_FUNC inline Packet8d pselect(const Packet8d& mask, const Packet8d& a, const Packet8d& b) {
538  __mmask8 mask8 = _mm512_cmp_epi64_mask(_mm512_castpd_si512(mask), _mm512_setzero_epi32(), _MM_CMPINT_EQ);
539  return _mm512_mask_blend_pd(mask8, a, b);
540 }
541 
542 template <>
544  // Arguments are reversed to match NaN propagation behavior of std::min.
545  return _mm512_min_ps(b, a);
546 }
547 template <>
549  // Arguments are reversed to match NaN propagation behavior of std::min.
550  return _mm512_min_pd(b, a);
551 }
552 template <>
554  return _mm512_min_epi32(b, a);
555 }
556 template <>
558  return _mm512_min_epi64(b, a);
559 }
560 
561 template <>
563  // Arguments are reversed to match NaN propagation behavior of std::max.
564  return _mm512_max_ps(b, a);
565 }
566 template <>
568  // Arguments are reversed to match NaN propagation behavior of std::max.
569  return _mm512_max_pd(b, a);
570 }
571 template <>
573  return _mm512_max_epi32(b, a);
574 }
575 template <>
577  return _mm512_max_epi64(b, a);
578 }
579 
580 // Add specializations for min/max with prescribed NaN propagation.
581 template <>
584 }
585 template <>
588 }
589 template <>
592 }
593 template <>
596 }
597 template <>
600 }
601 template <>
604 }
605 template <>
608 }
609 template <>
612 }
613 
614 #ifdef EIGEN_VECTORIZE_AVX512DQ
615 template <int I_>
617  return _mm512_extractf32x8_ps(x, I_);
618 }
619 template <int I_>
621  return _mm512_extractf64x2_pd(x, I_);
622 }
624  return _mm512_insertf32x8(_mm512_castps256_ps512(a), b, 1);
625 }
627  return _mm512_inserti32x8(_mm512_castsi256_si512(a), b, 1);
628 }
629 #else
630 // AVX512F does not define _mm512_extractf32x8_ps to extract _m256 from _m512
631 template <int I_>
633  return _mm256_castsi256_ps(_mm512_extracti64x4_epi64(_mm512_castps_si512(x), I_));
634 }
635 
636 // AVX512F does not define _mm512_extractf64x2_pd to extract _m128 from _m512
637 template <int I_>
639  return _mm_castsi128_pd(_mm512_extracti32x4_epi32(_mm512_castpd_si512(x), I_));
640 }
641 
643  return _mm512_castsi512_ps(
644  _mm512_inserti64x4(_mm512_castsi256_si512(_mm256_castps_si256(a)), _mm256_castps_si256(b), 1));
645 }
647  return _mm512_inserti64x4(_mm512_castsi256_si512(a), b, 1);
648 }
649 #endif
650 
651 // Helper function for bit packing snippet of low precision comparison.
652 // It packs the flags from 32x16 to 16x16.
654  // Split data into small pieces and handle with AVX instructions
655  // to guarantee internal order of vector.
656  // Operation:
657  // dst[15:0] := Saturate16(rf[31:0])
658  // dst[31:16] := Saturate16(rf[63:32])
659  // ...
660  // dst[255:240] := Saturate16(rf[255:224])
661  __m256i lo = _mm256_castps_si256(extract256<0>(rf));
662  __m256i hi = _mm256_castps_si256(extract256<1>(rf));
663  __m128i result_lo = _mm_packs_epi32(_mm256_extractf128_si256(lo, 0), _mm256_extractf128_si256(lo, 1));
664  __m128i result_hi = _mm_packs_epi32(_mm256_extractf128_si256(hi, 0), _mm256_extractf128_si256(hi, 1));
665  return _mm256_insertf128_si256(_mm256_castsi128_si256(result_lo), result_hi, 1);
666 }
667 
668 template <>
670  __mmask16 mask = _mm512_cmp_ps_mask(a, a, _CMP_UNORD_Q);
671  return _mm512_castsi512_ps(_mm512_maskz_set1_epi32(mask, int32_t(-1)));
672 }
673 
674 template <>
676  __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_EQ_OQ);
677  return _mm512_castsi512_ps(_mm512_mask_set1_epi32(_mm512_setzero_epi32(), mask, int32_t(-1)));
678 }
679 template <>
681  __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_LE_OQ);
682  return _mm512_castsi512_ps(_mm512_mask_set1_epi32(_mm512_setzero_epi32(), mask, int32_t(-1)));
683 }
684 
685 template <>
687  __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_LT_OQ);
688  return _mm512_castsi512_ps(_mm512_mask_set1_epi32(_mm512_setzero_epi32(), mask, int32_t(-1)));
689 }
690 
691 template <>
693  __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_NGE_UQ);
694  return _mm512_castsi512_ps(_mm512_mask_set1_epi32(_mm512_setzero_epi32(), mask, int32_t(-1)));
695 }
696 
697 template <>
699  __mmask16 mask = _mm512_cmp_epi32_mask(a, b, _MM_CMPINT_EQ);
700  return _mm512_mask_set1_epi32(_mm512_setzero_epi32(), mask, int32_t(-1));
701 }
702 template <>
704  __mmask16 mask = _mm512_cmp_epi32_mask(a, b, _MM_CMPINT_LE);
705  return _mm512_mask_set1_epi32(_mm512_setzero_epi32(), mask, int32_t(-1));
706 }
707 template <>
709  __mmask16 mask = _mm512_cmp_epi32_mask(a, b, _MM_CMPINT_LT);
710  return _mm512_mask_set1_epi32(_mm512_setzero_epi32(), mask, int32_t(-1));
711 }
712 
713 template <>
715  __mmask8 mask = _mm512_cmp_epi64_mask(a, b, _MM_CMPINT_EQ);
716  return _mm512_mask_set1_epi64(_mm512_setzero_si512(), mask, int64_t(-1));
717 }
718 template <>
720  __mmask8 mask = _mm512_cmp_epi64_mask(a, b, _MM_CMPINT_LE);
721  return _mm512_mask_set1_epi64(_mm512_setzero_si512(), mask, int64_t(-1));
722 }
723 template <>
725  __mmask8 mask = _mm512_cmp_epi64_mask(a, b, _MM_CMPINT_LT);
726  return _mm512_mask_set1_epi64(_mm512_setzero_si512(), mask, int64_t(-1));
727 }
728 
729 template <>
731  __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_EQ_OQ);
732  return _mm512_castsi512_pd(_mm512_mask_set1_epi64(_mm512_setzero_epi32(), mask, 0xffffffffffffffffu));
733 }
734 template <>
736  __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_LE_OQ);
737  return _mm512_castsi512_pd(_mm512_mask_set1_epi64(_mm512_setzero_epi32(), mask, 0xffffffffffffffffu));
738 }
739 template <>
741  __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_LT_OQ);
742  return _mm512_castsi512_pd(_mm512_mask_set1_epi64(_mm512_setzero_epi32(), mask, 0xffffffffffffffffu));
743 }
744 template <>
746  __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_NGE_UQ);
747  return _mm512_castsi512_pd(_mm512_mask_set1_epi64(_mm512_setzero_epi32(), mask, 0xffffffffffffffffu));
748 }
749 
750 template <>
752  return _mm512_roundscale_ps(a, _MM_FROUND_CUR_DIRECTION);
753 }
754 template <>
756  return _mm512_roundscale_pd(a, _MM_FROUND_CUR_DIRECTION);
757 }
758 
759 template <>
761  return _mm512_roundscale_ps(a, _MM_FROUND_TO_POS_INF);
762 }
763 template <>
765  return _mm512_roundscale_pd(a, _MM_FROUND_TO_POS_INF);
766 }
767 
768 template <>
770  return _mm512_roundscale_ps(a, _MM_FROUND_TO_NEG_INF);
771 }
772 template <>
774  return _mm512_roundscale_pd(a, _MM_FROUND_TO_NEG_INF);
775 }
776 
777 template <>
779  return _mm512_roundscale_ps(a, _MM_FROUND_TO_ZERO);
780 }
781 template <>
783  return _mm512_roundscale_pd(a, _MM_FROUND_TO_ZERO);
784 }
785 
786 template <>
788  return _mm512_set1_epi32(int32_t(-1));
789 }
790 
791 template <>
793  return _mm512_set1_epi64(int64_t(-1));
794 }
795 
796 template <>
798  return _mm512_castsi512_ps(ptrue<Packet16i>(_mm512_castps_si512(a)));
799 }
800 
801 template <>
803  return _mm512_castsi512_pd(ptrue<Packet16i>(_mm512_castpd_si512(a)));
804 }
805 
806 template <>
808  return _mm512_and_si512(a, b);
809 }
810 
811 template <>
813  return _mm512_and_si512(a, b);
814 }
815 
816 template <>
818 #ifdef EIGEN_VECTORIZE_AVX512DQ
819  return _mm512_and_ps(a, b);
820 #else
821  return _mm512_castsi512_ps(pand(_mm512_castps_si512(a), _mm512_castps_si512(b)));
822 #endif
823 }
824 template <>
826 #ifdef EIGEN_VECTORIZE_AVX512DQ
827  return _mm512_and_pd(a, b);
828 #else
829  Packet8d res = _mm512_undefined_pd();
830  Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);
831  Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);
832  res = _mm512_insertf64x4(res, _mm256_and_pd(lane0_a, lane0_b), 0);
833 
834  Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);
835  Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);
836  return _mm512_insertf64x4(res, _mm256_and_pd(lane1_a, lane1_b), 1);
837 #endif
838 }
839 
840 template <>
842  return _mm512_or_si512(a, b);
843 }
844 
845 template <>
847  return _mm512_or_si512(a, b);
848 }
849 
850 template <>
852 #ifdef EIGEN_VECTORIZE_AVX512DQ
853  return _mm512_or_ps(a, b);
854 #else
855  return _mm512_castsi512_ps(por(_mm512_castps_si512(a), _mm512_castps_si512(b)));
856 #endif
857 }
858 
859 template <>
861 #ifdef EIGEN_VECTORIZE_AVX512DQ
862  return _mm512_or_pd(a, b);
863 #else
864  return _mm512_castsi512_pd(por(_mm512_castpd_si512(a), _mm512_castpd_si512(b)));
865 #endif
866 }
867 
868 template <>
870  return _mm512_xor_si512(a, b);
871 }
872 
873 template <>
875  return _mm512_xor_si512(a, b);
876 }
877 
878 template <>
880 #ifdef EIGEN_VECTORIZE_AVX512DQ
881  return _mm512_xor_ps(a, b);
882 #else
883  return _mm512_castsi512_ps(pxor(_mm512_castps_si512(a), _mm512_castps_si512(b)));
884 #endif
885 }
886 
887 template <>
889 #ifdef EIGEN_VECTORIZE_AVX512DQ
890  return _mm512_xor_pd(a, b);
891 #else
892  return _mm512_castsi512_pd(pxor(_mm512_castpd_si512(a), _mm512_castpd_si512(b)));
893 #endif
894 }
895 
896 template <>
898  return _mm512_andnot_si512(b, a);
899 }
900 
901 template <>
903  return _mm512_andnot_si512(b, a);
904 }
905 
906 template <>
908 #ifdef EIGEN_VECTORIZE_AVX512DQ
909  return _mm512_andnot_ps(b, a);
910 #else
911  return _mm512_castsi512_ps(pandnot(_mm512_castps_si512(a), _mm512_castps_si512(b)));
912 #endif
913 }
914 template <>
916 #ifdef EIGEN_VECTORIZE_AVX512DQ
917  return _mm512_andnot_pd(b, a);
918 #else
919  return _mm512_castsi512_pd(pandnot(_mm512_castpd_si512(a), _mm512_castpd_si512(b)));
920 #endif
921 }
922 
923 template <>
925  // Work-around for default std::round rounding mode.
926  const Packet16f mask = pset1frombits<Packet16f>(static_cast<numext::uint32_t>(0x80000000u));
927  const Packet16f prev0dot5 = pset1frombits<Packet16f>(static_cast<numext::uint32_t>(0x3EFFFFFFu));
928  return _mm512_roundscale_ps(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
929 }
930 template <>
932  // Work-around for default std::round rounding mode.
933  const Packet8d mask = pset1frombits<Packet8d>(static_cast<numext::uint64_t>(0x8000000000000000ull));
934  const Packet8d prev0dot5 = pset1frombits<Packet8d>(static_cast<numext::uint64_t>(0x3FDFFFFFFFFFFFFFull));
935  return _mm512_roundscale_pd(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
936 }
937 
938 template <int N>
940  return _mm512_srai_epi32(a, N);
941 }
942 
943 template <int N>
945  return _mm512_srli_epi32(a, N);
946 }
947 
948 template <int N>
950  return _mm512_slli_epi32(a, N);
951 }
952 
953 template <int N>
955  return _mm512_srai_epi64(a, N);
956 }
957 
958 template <int N>
960  return _mm512_srli_epi64(a, N);
961 }
962 
963 template <int N>
965  return _mm512_slli_epi64(a, N);
966 }
967 
968 template <>
970  EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_ps(from);
971 }
972 template <>
974  EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_pd(from);
975 }
976 template <>
978  EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_epi64(from);
979 }
980 template <>
982  EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_epi64(from);
983 }
984 
985 template <>
987  EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_ps(from);
988 }
989 template <>
991  EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_pd(from);
992 }
993 template <>
995  EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_epi32(from);
996 }
997 template <>
999  EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_epi64(from);
1000 }
1001 
1002 template <>
1004  __mmask16 mask = static_cast<__mmask16>(umask);
1005  EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_maskz_loadu_ps(mask, from);
1006 }
1007 template <>
1009  __mmask8 mask = static_cast<__mmask8>(umask);
1010  EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_maskz_loadu_pd(mask, from);
1011 }
1012 
1013 // Loads 8 floats from memory a returns the packet
1014 // {a0, a0 a1, a1, a2, a2, a3, a3, a4, a4, a5, a5, a6, a6, a7, a7}
1015 template <>
1017  // an unaligned load is required here as there is no requirement
1018  // on the alignment of input pointer 'from'
1019  __m256i low_half = _mm256_castps_si256(_mm256_loadu_ps(from));
1020  __m512 even_elements = _mm512_castsi512_ps(_mm512_cvtepu32_epi64(low_half));
1021  __m512 pairs = _mm512_permute_ps(even_elements, _MM_SHUFFLE(2, 2, 0, 0));
1022  return pairs;
1023 }
1024 
1025 // Loads 4 doubles from memory a returns the packet {a0, a0, a1, a1, a2, a2, a3,
1026 // a3}
1027 template <>
1029  Packet8d tmp = _mm512_castpd256_pd512(ploadu<Packet4d>(from));
1030  const Packet8l scatter_mask = _mm512_set_epi64(3, 3, 2, 2, 1, 1, 0, 0);
1031  return _mm512_permutexvar_pd(scatter_mask, tmp);
1032 }
1033 
1034 // Loads 4 int64_t from memory a returns the packet {a0, a0, a1, a1, a2, a2, a3,
1035 // a3}
1036 template <>
1038  Packet8l tmp = _mm512_castsi256_si512(ploadu<Packet4l>(from));
1039  const Packet8l scatter_mask = _mm512_set_epi64(3, 3, 2, 2, 1, 1, 0, 0);
1040  return _mm512_permutexvar_epi64(scatter_mask, tmp);
1041 }
1042 
1043 // Loads 8 integers from memory and returns the packet
1044 // {a0, a0 a1, a1, a2, a2, a3, a3, a4, a4, a5, a5, a6, a6, a7, a7}
1045 template <>
1047  __m256i low_half = _mm256_load_si256(reinterpret_cast<const __m256i*>(from));
1048  __m512 even_elements = _mm512_castsi512_ps(_mm512_cvtepu32_epi64(low_half));
1049  __m512 pairs = _mm512_permute_ps(even_elements, _MM_SHUFFLE(2, 2, 0, 0));
1050  return _mm512_castps_si512(pairs);
1051 }
1052 
1053 // Loads 4 floats from memory a returns the packet
1054 // {a0, a0 a0, a0, a1, a1, a1, a1, a2, a2, a2, a2, a3, a3, a3, a3}
1055 template <>
1057  Packet16f tmp = _mm512_castps128_ps512(ploadu<Packet4f>(from));
1058  const Packet16i scatter_mask = _mm512_set_epi32(3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0);
1059  return _mm512_permutexvar_ps(scatter_mask, tmp);
1060 }
1061 
1062 // Loads 2 doubles from memory a returns the packet
1063 // {a0, a0 a0, a0, a1, a1, a1, a1}
1064 template <>
1066  __m256d lane0 = _mm256_set1_pd(*from);
1067  __m256d lane1 = _mm256_set1_pd(*(from + 1));
1068  __m512d tmp = _mm512_undefined_pd();
1069  tmp = _mm512_insertf64x4(tmp, lane0, 0);
1070  return _mm512_insertf64x4(tmp, lane1, 1);
1071 }
1072 
1073 // Loads 2 int64_t from memory a returns the packet
1074 // {a0, a0 a0, a0, a1, a1, a1, a1}
1075 template <>
1077  __m256i lane0 = _mm256_set1_epi64x(*from);
1078  __m256i lane1 = _mm256_set1_epi64x(*(from + 1));
1079  __m512i tmp = _mm512_undefined_epi32();
1080  tmp = _mm512_inserti64x4(tmp, lane0, 0);
1081  return _mm512_inserti64x4(tmp, lane1, 1);
1082 }
1083 
1084 // Loads 4 integers from memory and returns the packet
1085 // {a0, a0 a0, a0, a1, a1, a1, a1, a2, a2, a2, a2, a3, a3, a3, a3}
1086 template <>
1088  Packet16i tmp = _mm512_castsi128_si512(ploadu<Packet4i>(from));
1089  const Packet16i scatter_mask = _mm512_set_epi32(3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0);
1090  return _mm512_permutexvar_epi32(scatter_mask, tmp);
1091 }
1092 
1093 template <>
1094 EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet16f& from) {
1095  EIGEN_DEBUG_ALIGNED_STORE _mm512_store_ps(to, from);
1096 }
1097 template <>
1098 EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet8d& from) {
1099  EIGEN_DEBUG_ALIGNED_STORE _mm512_store_pd(to, from);
1100 }
1101 template <>
1102 EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet16i& from) {
1103  EIGEN_DEBUG_ALIGNED_STORE _mm512_store_epi32(to, from);
1104 }
1105 template <>
1107  EIGEN_DEBUG_ALIGNED_STORE _mm512_store_epi64(to, from);
1108 }
1109 
1110 template <>
1111 EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet16f& from) {
1112  EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_ps(to, from);
1113 }
1114 template <>
1115 EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet8d& from) {
1116  EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_pd(to, from);
1117 }
1118 template <>
1119 EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet16i& from) {
1120  EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_epi32(to, from);
1121 }
1122 template <>
1124  EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_epi64(to, from);
1125 }
1126 template <>
1127 EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet16f& from, uint16_t umask) {
1128  __mmask16 mask = static_cast<__mmask16>(umask);
1129  EIGEN_DEBUG_UNALIGNED_STORE return _mm512_mask_storeu_ps(to, mask, from);
1130 }
1131 template <>
1132 EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet8d& from, uint8_t umask) {
1133  __mmask8 mask = static_cast<__mmask8>(umask);
1134  EIGEN_DEBUG_UNALIGNED_STORE return _mm512_mask_storeu_pd(to, mask, from);
1135 }
1136 
1137 template <typename Scalar, typename Packet>
1138 EIGEN_DEVICE_FUNC inline Packet pgather(const Packet& src, const Scalar* from, Index stride,
1139  typename unpacket_traits<Packet>::mask_t umask);
1140 template <>
1141 EIGEN_DEVICE_FUNC inline Packet16f pgather<float, Packet16f>(const Packet16f& src, const float* from, Index stride,
1142  uint16_t umask) {
1143  Packet16i stride_vector = _mm512_set1_epi32(convert_index<int>(stride));
1144  Packet16i stride_multiplier = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1145  Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);
1146  __mmask16 mask = static_cast<__mmask16>(umask);
1147 
1148  return _mm512_mask_i32gather_ps(src, mask, indices, from, 4);
1149 }
1150 template <>
1151 EIGEN_DEVICE_FUNC inline Packet8d pgather<double, Packet8d>(const Packet8d& src, const double* from, Index stride,
1152  uint8_t umask) {
1153  Packet8i stride_vector = _mm256_set1_epi32(convert_index<int>(stride));
1154  Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
1155  Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);
1156  __mmask8 mask = static_cast<__mmask8>(umask);
1157 
1158  return _mm512_mask_i32gather_pd(src, mask, indices, from, 8);
1159 }
1160 
1161 template <>
1162 EIGEN_DEVICE_FUNC inline Packet16f pgather<float, Packet16f>(const float* from, Index stride) {
1163  Packet16i stride_vector = _mm512_set1_epi32(convert_index<int>(stride));
1164  Packet16i stride_multiplier = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1165  Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);
1166 
1167  return _mm512_i32gather_ps(indices, from, 4);
1168 }
1169 template <>
1170 EIGEN_DEVICE_FUNC inline Packet8d pgather<double, Packet8d>(const double* from, Index stride) {
1171  Packet8i stride_vector = _mm256_set1_epi32(convert_index<int>(stride));
1172  Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
1173  Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);
1174 
1175  return _mm512_i32gather_pd(indices, from, 8);
1176 }
1177 template <>
1179  Packet8i stride_vector = _mm256_set1_epi32(convert_index<int>(stride));
1180  Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
1181  Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);
1182 
1183  return _mm512_i32gather_epi64(indices, from, 8);
1184 }
1185 template <>
1187  Packet16i stride_vector = _mm512_set1_epi32(convert_index<int>(stride));
1188  Packet16i stride_multiplier = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1189  Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);
1190  return _mm512_i32gather_epi32(indices, from, 4);
1191 }
1192 
1193 template <typename Scalar, typename Packet>
1194 EIGEN_DEVICE_FUNC inline void pscatter(Scalar* to, const Packet& from, Index stride,
1195  typename unpacket_traits<Packet>::mask_t umask);
1196 template <>
1197 EIGEN_DEVICE_FUNC inline void pscatter<float, Packet16f>(float* to, const Packet16f& from, Index stride,
1198  uint16_t umask) {
1199  Packet16i stride_vector = _mm512_set1_epi32(convert_index<int>(stride));
1200  Packet16i stride_multiplier = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1201  Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);
1202  __mmask16 mask = static_cast<__mmask16>(umask);
1203  _mm512_mask_i32scatter_ps(to, mask, indices, from, 4);
1204 }
1205 template <>
1206 EIGEN_DEVICE_FUNC inline void pscatter<double, Packet8d>(double* to, const Packet8d& from, Index stride,
1207  uint8_t umask) {
1208  Packet8i stride_vector = _mm256_set1_epi32(convert_index<int>(stride));
1209  Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
1210  Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);
1211  __mmask8 mask = static_cast<__mmask8>(umask);
1212  _mm512_mask_i32scatter_pd(to, mask, indices, from, 8);
1213 }
1214 template <>
1215 EIGEN_DEVICE_FUNC inline void pscatter<float, Packet16f>(float* to, const Packet16f& from, Index stride) {
1216  Packet16i stride_vector = _mm512_set1_epi32(convert_index<int>(stride));
1217  Packet16i stride_multiplier = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1218  Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);
1219  _mm512_i32scatter_ps(to, indices, from, 4);
1220 }
1221 template <>
1222 EIGEN_DEVICE_FUNC inline void pscatter<double, Packet8d>(double* to, const Packet8d& from, Index stride) {
1223  Packet8i stride_vector = _mm256_set1_epi32(convert_index<int>(stride));
1224  Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
1225  Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);
1226  _mm512_i32scatter_pd(to, indices, from, 8);
1227 }
1228 template <>
1229 EIGEN_DEVICE_FUNC inline void pscatter<int64_t, Packet8l>(int64_t* to, const Packet8l& from, Index stride) {
1230  Packet8i stride_vector = _mm256_set1_epi32(convert_index<int>(stride));
1231  Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
1232  Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);
1233  _mm512_i32scatter_epi64(to, indices, from, 8);
1234 }
1235 template <>
1236 EIGEN_DEVICE_FUNC inline void pscatter<int, Packet16i>(int* to, const Packet16i& from, Index stride) {
1237  Packet16i stride_vector = _mm512_set1_epi32(convert_index<int>(stride));
1238  Packet16i stride_multiplier = _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1239  Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);
1240  _mm512_i32scatter_epi32(to, indices, from, 4);
1241 }
1242 
1243 template <>
1244 EIGEN_STRONG_INLINE void pstore1<Packet16f>(float* to, const float& a) {
1246  pstore(to, pa);
1247 }
1248 template <>
1249 EIGEN_STRONG_INLINE void pstore1<Packet8d>(double* to, const double& a) {
1251  pstore(to, pa);
1252 }
1253 template <>
1254 EIGEN_STRONG_INLINE void pstore1<Packet16i>(int* to, const int& a) {
1256  pstore(to, pa);
1257 }
1258 template <>
1261  pstore(to, pa);
1262 }
1263 
1264 template <>
1265 EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) {
1266  _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0);
1267 }
1268 template <>
1269 EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) {
1270  _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0);
1271 }
1272 template <>
1273 EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) {
1274  _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0);
1275 }
1276 
1277 template <>
1279  return _mm512_cvtss_f32(a);
1280 }
1281 template <>
1283  return _mm512_cvtsd_f64(a);
1284 }
1285 template <>
1287  int64_t x = _mm_extract_epi64_0(_mm512_extracti32x4_epi32(a, 0));
1288  return x;
1289 }
1290 template <>
1292 #if EIGEN_GNUC_STRICT_LESS_THAN(11, 0, 0)
1293  return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
1294 #else
1295  return _mm512_cvtsi512_si32(a);
1296 #endif
1297 }
1298 
1299 template <>
1301  return _mm512_permutexvar_ps(_mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), a);
1302 }
1303 
1304 template <>
1306  return _mm512_permutexvar_pd(_mm512_set_epi32(0, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7), a);
1307 }
1308 
1309 template <>
1311  return _mm512_permutexvar_epi32(_mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), a);
1312 }
1313 
1314 template <>
1316  return _mm512_permutexvar_epi64(_mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7), a);
1317 }
1318 
1319 template <>
1321  // _mm512_abs_ps intrinsic not found, so hack around it
1322  return _mm512_castsi512_ps(_mm512_and_si512(_mm512_castps_si512(a), _mm512_set1_epi32(0x7fffffff)));
1323 }
1324 template <>
1326  // _mm512_abs_ps intrinsic not found, so hack around it
1327  return _mm512_castsi512_pd(_mm512_and_si512(_mm512_castpd_si512(a), _mm512_set1_epi64(0x7fffffffffffffff)));
1328 }
1329 template <>
1331  return _mm512_abs_epi32(a);
1332 }
1333 template <>
1335  return _mm512_abs_epi64(a);
1336 }
1337 
1338 template <>
1340  return _mm256_srai_epi16(a, 15);
1341 }
1342 template <>
1344  return _mm256_srai_epi16(a, 15);
1345 }
1346 template <>
1348  return _mm512_castsi512_ps(_mm512_srai_epi32(_mm512_castps_si512(a), 31));
1349 }
1350 template <>
1352  return _mm512_castsi512_pd(_mm512_srai_epi64(_mm512_castpd_si512(a), 63));
1353 }
1354 
1355 template <>
1357  return pfrexp_generic(a, exponent);
1358 }
1359 
1360 // Extract exponent without existence of Packet8l.
1361 template <>
1363  const Packet8d cst_exp_mask = pset1frombits<Packet8d>(static_cast<uint64_t>(0x7ff0000000000000ull));
1364 #ifdef EIGEN_VECTORIZE_AVX512DQ
1365  return _mm512_cvtepi64_pd(_mm512_srli_epi64(_mm512_castpd_si512(pand(a, cst_exp_mask)), 52));
1366 #else
1367  return _mm512_cvtepi32_pd(_mm512_cvtepi64_epi32(_mm512_srli_epi64(_mm512_castpd_si512(pand(a, cst_exp_mask)), 52)));
1368 #endif
1369 }
1370 
1371 template <>
1373  return pfrexp_generic(a, exponent);
1374 }
1375 
1376 template <>
1378  return pldexp_generic(a, exponent);
1379 }
1380 
1381 template <>
1383  // Clamp exponent to [-2099, 2099]
1384  const Packet8d max_exponent = pset1<Packet8d>(2099.0);
1385  const Packet8i e = _mm512_cvtpd_epi32(pmin(pmax(exponent, pnegate(max_exponent)), max_exponent));
1386 
1387  // Split 2^e into four factors and multiply.
1388  const Packet8i bias = pset1<Packet8i>(1023);
1389  Packet8i b = parithmetic_shift_right<2>(e); // floor(e/4)
1390 
1391  // 2^b
1392  const Packet8i permute_idx = _mm256_setr_epi32(0, 4, 1, 5, 2, 6, 3, 7);
1393  Packet8i hi = _mm256_permutevar8x32_epi32(padd(b, bias), permute_idx);
1394  Packet8i lo = _mm256_slli_epi64(hi, 52);
1395  hi = _mm256_slli_epi64(_mm256_srli_epi64(hi, 32), 52);
1396  Packet8d c = _mm512_castsi512_pd(_mm512_inserti64x4(_mm512_castsi256_si512(lo), hi, 1));
1397  Packet8d out = pmul(pmul(pmul(a, c), c), c); // a * 2^(3b)
1398 
1399  // 2^(e - 3b)
1400  b = psub(psub(psub(e, b), b), b); // e - 3b
1401  hi = _mm256_permutevar8x32_epi32(padd(b, bias), permute_idx);
1402  lo = _mm256_slli_epi64(hi, 52);
1403  hi = _mm256_slli_epi64(_mm256_srli_epi64(hi, 32), 52);
1404  c = _mm512_castsi512_pd(_mm512_inserti64x4(_mm512_castsi256_si512(lo), hi, 1));
1405  out = pmul(out, c); // a * 2^e
1406  return out;
1407 }
1408 
1409 #ifdef EIGEN_VECTORIZE_AVX512DQ
1410 // AVX512F does not define _mm512_extractf32x8_ps to extract _m256 from _m512
1411 #define EIGEN_EXTRACT_8f_FROM_16f(INPUT, OUTPUT) \
1412  __m256 OUTPUT##_0 = _mm512_extractf32x8_ps(INPUT, 0); \
1413  __m256 OUTPUT##_1 = _mm512_extractf32x8_ps(INPUT, 1)
1414 
1415 // AVX512F does not define _mm512_extracti32x8_epi32 to extract _m256i from _m512i
1416 #define EIGEN_EXTRACT_8i_FROM_16i(INPUT, OUTPUT) \
1417  __m256i OUTPUT##_0 = _mm512_extracti32x8_epi32(INPUT, 0); \
1418  __m256i OUTPUT##_1 = _mm512_extracti32x8_epi32(INPUT, 1)
1419 #else
1420 #define EIGEN_EXTRACT_8f_FROM_16f(INPUT, OUTPUT) \
1421  __m256 OUTPUT##_0 = _mm256_insertf128_ps(_mm256_castps128_ps256(_mm512_extractf32x4_ps(INPUT, 0)), \
1422  _mm512_extractf32x4_ps(INPUT, 1), 1); \
1423  __m256 OUTPUT##_1 = _mm256_insertf128_ps(_mm256_castps128_ps256(_mm512_extractf32x4_ps(INPUT, 2)), \
1424  _mm512_extractf32x4_ps(INPUT, 3), 1)
1425 
1426 #define EIGEN_EXTRACT_8i_FROM_16i(INPUT, OUTPUT) \
1427  __m256i OUTPUT##_0 = _mm256_insertf128_si256(_mm256_castsi128_si256(_mm512_extracti32x4_epi32(INPUT, 0)), \
1428  _mm512_extracti32x4_epi32(INPUT, 1), 1); \
1429  __m256i OUTPUT##_1 = _mm256_insertf128_si256(_mm256_castsi128_si256(_mm512_extracti32x4_epi32(INPUT, 2)), \
1430  _mm512_extracti32x4_epi32(INPUT, 3), 1)
1431 #endif
1432 
1433 #ifdef EIGEN_VECTORIZE_AVX512DQ
1434 #define EIGEN_INSERT_8f_INTO_16f(OUTPUT, INPUTA, INPUTB) \
1435  OUTPUT = _mm512_insertf32x8(_mm512_castps256_ps512(INPUTA), INPUTB, 1);
1436 
1437 #define EIGEN_INSERT_8i_INTO_16i(OUTPUT, INPUTA, INPUTB) \
1438  OUTPUT = _mm512_inserti32x8(_mm512_castsi256_si512(INPUTA), INPUTB, 1);
1439 #else
1440 #define EIGEN_INSERT_8f_INTO_16f(OUTPUT, INPUTA, INPUTB) \
1441  OUTPUT = _mm512_undefined_ps(); \
1442  OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTA, 0), 0); \
1443  OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTA, 1), 1); \
1444  OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTB, 0), 2); \
1445  OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTB, 1), 3);
1446 
1447 #define EIGEN_INSERT_8i_INTO_16i(OUTPUT, INPUTA, INPUTB) \
1448  OUTPUT = _mm512_undefined_epi32(); \
1449  OUTPUT = _mm512_inserti32x4(OUTPUT, _mm256_extractf128_si256(INPUTA, 0), 0); \
1450  OUTPUT = _mm512_inserti32x4(OUTPUT, _mm256_extractf128_si256(INPUTA, 1), 1); \
1451  OUTPUT = _mm512_inserti32x4(OUTPUT, _mm256_extractf128_si256(INPUTB, 0), 2); \
1452  OUTPUT = _mm512_inserti32x4(OUTPUT, _mm256_extractf128_si256(INPUTB, 1), 3);
1453 #endif
1454 
1455 template <>
1457 #ifdef EIGEN_VECTORIZE_AVX512DQ
1458  __m256 lane0 = _mm512_extractf32x8_ps(a, 0);
1459  __m256 lane1 = _mm512_extractf32x8_ps(a, 1);
1460  Packet8f x = _mm256_add_ps(lane0, lane1);
1461  return predux<Packet8f>(x);
1462 #else
1463  __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
1464  __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
1465  __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
1466  __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
1467  __m128 sum = _mm_add_ps(_mm_add_ps(lane0, lane1), _mm_add_ps(lane2, lane3));
1468  return predux<Packet4f>(sum);
1469 #endif
1470 }
1471 template <>
1473  __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
1474  __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
1475  __m256d sum = _mm256_add_pd(lane0, lane1);
1476  return predux<Packet4d>(sum);
1477 }
1478 
1479 template <>
1481  return _mm512_reduce_add_epi64(a);
1482 }
1483 
1484 template <>
1486  return _mm512_reduce_add_epi32(a);
1487 }
1488 
1489 template <>
1491 #ifdef EIGEN_VECTORIZE_AVX512DQ
1492  __m256 lane0 = _mm512_extractf32x8_ps(a, 0);
1493  __m256 lane1 = _mm512_extractf32x8_ps(a, 1);
1494  return _mm256_add_ps(lane0, lane1);
1495 #else
1496  __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
1497  __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
1498  __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
1499  __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
1500  __m128 sum0 = _mm_add_ps(lane0, lane2);
1501  __m128 sum1 = _mm_add_ps(lane1, lane3);
1502  return _mm256_insertf128_ps(_mm256_castps128_ps256(sum0), sum1, 1);
1503 #endif
1504 }
1505 template <>
1507  __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
1508  __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
1509  return _mm256_add_pd(lane0, lane1);
1510 }
1511 template <>
1513 #ifdef EIGEN_VECTORIZE_AVX512DQ
1514  __m256i lane0 = _mm512_extracti32x8_epi32(a, 0);
1515  __m256i lane1 = _mm512_extracti32x8_epi32(a, 1);
1516  return _mm256_add_epi32(lane0, lane1);
1517 #else
1518  __m128i lane0 = _mm512_extracti32x4_epi32(a, 0);
1519  __m128i lane1 = _mm512_extracti32x4_epi32(a, 1);
1520  __m128i lane2 = _mm512_extracti32x4_epi32(a, 2);
1521  __m128i lane3 = _mm512_extracti32x4_epi32(a, 3);
1522  __m128i sum0 = _mm_add_epi32(lane0, lane2);
1523  __m128i sum1 = _mm_add_epi32(lane1, lane3);
1524  return _mm256_inserti128_si256(_mm256_castsi128_si256(sum0), sum1, 1);
1525 #endif
1526 }
1527 
1528 template <>
1530  __m256i lane0 = _mm512_extracti64x4_epi64(a, 0);
1531  __m256i lane1 = _mm512_extracti64x4_epi64(a, 1);
1532  return _mm256_add_epi64(lane0, lane1);
1533 }
1534 
1535 template <>
1537 // #ifdef EIGEN_VECTORIZE_AVX512DQ
1538 #if 0
1539  Packet8f lane0 = _mm512_extractf32x8_ps(a, 0);
1540  Packet8f lane1 = _mm512_extractf32x8_ps(a, 1);
1541  Packet8f res = pmul(lane0, lane1);
1542  res = pmul(res, _mm256_permute2f128_ps(res, res, 1));
1543  res = pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
1544  return pfirst(pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
1545 #else
1546  __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
1547  __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
1548  __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
1549  __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
1550  __m128 res = pmul(pmul(lane0, lane1), pmul(lane2, lane3));
1551  res = pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
1552  return pfirst(pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
1553 #endif
1554 }
1555 template <>
1557  __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
1558  __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
1559  __m256d res = pmul(lane0, lane1);
1560  res = pmul(res, _mm256_permute2f128_pd(res, res, 1));
1561  return pfirst(pmul(res, _mm256_shuffle_pd(res, res, 1)));
1562 }
1563 template <>
1565  return _mm512_reduce_mul_epi32(a);
1566 }
1567 
1568 #if EIGEN_COMP_MSVC
1569 // MSVC's _mm512_reduce_mul_epi64 is borked, at least up to and including 1939.
1570 // alignas(64) int64_t data[] = { 1,1,-1,-1,1,-1,-1,-1 };
1571 // int64_t out = _mm512_reduce_mul_epi64(_mm512_load_epi64(data));
1572 // produces garbage: 4294967295. It seems to happen whenever the output is supposed to be negative.
1573 // Fall back to a manual approach:
1574 template <>
1576  Packet4l lane0 = _mm512_extracti64x4_epi64(a, 0);
1577  Packet4l lane1 = _mm512_extracti64x4_epi64(a, 1);
1578  Packet4l res = pmul(lane0, lane1);
1579  res = pmul(res, Packet4l(_mm256_permute2x128_si256(res, res, 1)));
1580  res = pmul(res, Packet4l(_mm256_shuffle_epi32(res, 0xE)));
1581  return pfirst(res);
1582 }
1583 #else
1584 template <>
1586  return _mm512_reduce_mul_epi64(a);
1587 }
1588 #endif
1589 
1590 template <>
1592  __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
1593  __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
1594  __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
1595  __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
1596  __m128 res = _mm_min_ps(_mm_min_ps(lane0, lane1), _mm_min_ps(lane2, lane3));
1597  res = _mm_min_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
1598  return pfirst(_mm_min_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
1599 }
1600 template <>
1602  __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
1603  __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
1604  __m256d res = _mm256_min_pd(lane0, lane1);
1605  res = _mm256_min_pd(res, _mm256_permute2f128_pd(res, res, 1));
1606  return pfirst(_mm256_min_pd(res, _mm256_shuffle_pd(res, res, 1)));
1607 }
1608 template <>
1610  return _mm512_reduce_min_epi32(a);
1611 }
1612 template <>
1614  return _mm512_reduce_min_epi64(a);
1615 }
1616 
1617 template <>
1619  __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
1620  __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
1621  __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
1622  __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
1623  __m128 res = _mm_max_ps(_mm_max_ps(lane0, lane1), _mm_max_ps(lane2, lane3));
1624  res = _mm_max_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
1625  return pfirst(_mm_max_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
1626 }
1627 
1628 template <>
1630  __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
1631  __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
1632  __m256d res = _mm256_max_pd(lane0, lane1);
1633  res = _mm256_max_pd(res, _mm256_permute2f128_pd(res, res, 1));
1634  return pfirst(_mm256_max_pd(res, _mm256_shuffle_pd(res, res, 1)));
1635 }
1636 template <>
1638  return _mm512_reduce_max_epi32(a);
1639 }
1640 template <>
1642  return _mm512_reduce_max_epi64(a);
1643 }
1644 
1645 template <>
1647  return _mm512_reduce_or_epi32(_mm512_castps_si512(a)) != 0;
1648 }
1649 
1650 template <>
1652  return _mm512_reduce_or_epi32(a) != 0;
1653 }
1654 
1655 template <>
1657  return _mm512_reduce_or_epi64(_mm512_castpd_si512(a)) != 0;
1658 }
1659 
1660 template <>
1662  return _mm512_reduce_or_epi64(a) != 0;
1663 }
1664 
1665 #define PACK_OUTPUT(OUTPUT, INPUT, INDEX, STRIDE) \
1666  EIGEN_INSERT_8f_INTO_16f(OUTPUT[INDEX], INPUT[INDEX], INPUT[INDEX + STRIDE]);
1667 
1669  __m512 T0 = _mm512_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
1670  __m512 T1 = _mm512_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
1671  __m512 T2 = _mm512_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
1672  __m512 T3 = _mm512_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
1673  __m512 T4 = _mm512_unpacklo_ps(kernel.packet[4], kernel.packet[5]);
1674  __m512 T5 = _mm512_unpackhi_ps(kernel.packet[4], kernel.packet[5]);
1675  __m512 T6 = _mm512_unpacklo_ps(kernel.packet[6], kernel.packet[7]);
1676  __m512 T7 = _mm512_unpackhi_ps(kernel.packet[6], kernel.packet[7]);
1677  __m512 T8 = _mm512_unpacklo_ps(kernel.packet[8], kernel.packet[9]);
1678  __m512 T9 = _mm512_unpackhi_ps(kernel.packet[8], kernel.packet[9]);
1679  __m512 T10 = _mm512_unpacklo_ps(kernel.packet[10], kernel.packet[11]);
1680  __m512 T11 = _mm512_unpackhi_ps(kernel.packet[10], kernel.packet[11]);
1681  __m512 T12 = _mm512_unpacklo_ps(kernel.packet[12], kernel.packet[13]);
1682  __m512 T13 = _mm512_unpackhi_ps(kernel.packet[12], kernel.packet[13]);
1683  __m512 T14 = _mm512_unpacklo_ps(kernel.packet[14], kernel.packet[15]);
1684  __m512 T15 = _mm512_unpackhi_ps(kernel.packet[14], kernel.packet[15]);
1685  __m512 S0 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
1686  __m512 S1 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
1687  __m512 S2 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
1688  __m512 S3 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
1689  __m512 S4 = _mm512_shuffle_ps(T4, T6, _MM_SHUFFLE(1, 0, 1, 0));
1690  __m512 S5 = _mm512_shuffle_ps(T4, T6, _MM_SHUFFLE(3, 2, 3, 2));
1691  __m512 S6 = _mm512_shuffle_ps(T5, T7, _MM_SHUFFLE(1, 0, 1, 0));
1692  __m512 S7 = _mm512_shuffle_ps(T5, T7, _MM_SHUFFLE(3, 2, 3, 2));
1693  __m512 S8 = _mm512_shuffle_ps(T8, T10, _MM_SHUFFLE(1, 0, 1, 0));
1694  __m512 S9 = _mm512_shuffle_ps(T8, T10, _MM_SHUFFLE(3, 2, 3, 2));
1695  __m512 S10 = _mm512_shuffle_ps(T9, T11, _MM_SHUFFLE(1, 0, 1, 0));
1696  __m512 S11 = _mm512_shuffle_ps(T9, T11, _MM_SHUFFLE(3, 2, 3, 2));
1697  __m512 S12 = _mm512_shuffle_ps(T12, T14, _MM_SHUFFLE(1, 0, 1, 0));
1698  __m512 S13 = _mm512_shuffle_ps(T12, T14, _MM_SHUFFLE(3, 2, 3, 2));
1699  __m512 S14 = _mm512_shuffle_ps(T13, T15, _MM_SHUFFLE(1, 0, 1, 0));
1700  __m512 S15 = _mm512_shuffle_ps(T13, T15, _MM_SHUFFLE(3, 2, 3, 2));
1701 
1704  EIGEN_EXTRACT_8f_FROM_16f(S2, S2);
1705  EIGEN_EXTRACT_8f_FROM_16f(S3, S3);
1706  EIGEN_EXTRACT_8f_FROM_16f(S4, S4);
1707  EIGEN_EXTRACT_8f_FROM_16f(S5, S5);
1708  EIGEN_EXTRACT_8f_FROM_16f(S6, S6);
1709  EIGEN_EXTRACT_8f_FROM_16f(S7, S7);
1710  EIGEN_EXTRACT_8f_FROM_16f(S8, S8);
1711  EIGEN_EXTRACT_8f_FROM_16f(S9, S9);
1712  EIGEN_EXTRACT_8f_FROM_16f(S10, S10);
1713  EIGEN_EXTRACT_8f_FROM_16f(S11, S11);
1714  EIGEN_EXTRACT_8f_FROM_16f(S12, S12);
1715  EIGEN_EXTRACT_8f_FROM_16f(S13, S13);
1716  EIGEN_EXTRACT_8f_FROM_16f(S14, S14);
1717  EIGEN_EXTRACT_8f_FROM_16f(S15, S15);
1718 
1720 
1721  tmp.packet[0] = _mm256_permute2f128_ps(S0_0, S4_0, 0x20);
1722  tmp.packet[1] = _mm256_permute2f128_ps(S1_0, S5_0, 0x20);
1723  tmp.packet[2] = _mm256_permute2f128_ps(S2_0, S6_0, 0x20);
1724  tmp.packet[3] = _mm256_permute2f128_ps(S3_0, S7_0, 0x20);
1725  tmp.packet[4] = _mm256_permute2f128_ps(S0_0, S4_0, 0x31);
1726  tmp.packet[5] = _mm256_permute2f128_ps(S1_0, S5_0, 0x31);
1727  tmp.packet[6] = _mm256_permute2f128_ps(S2_0, S6_0, 0x31);
1728  tmp.packet[7] = _mm256_permute2f128_ps(S3_0, S7_0, 0x31);
1729 
1730  tmp.packet[8] = _mm256_permute2f128_ps(S0_1, S4_1, 0x20);
1731  tmp.packet[9] = _mm256_permute2f128_ps(S1_1, S5_1, 0x20);
1732  tmp.packet[10] = _mm256_permute2f128_ps(S2_1, S6_1, 0x20);
1733  tmp.packet[11] = _mm256_permute2f128_ps(S3_1, S7_1, 0x20);
1734  tmp.packet[12] = _mm256_permute2f128_ps(S0_1, S4_1, 0x31);
1735  tmp.packet[13] = _mm256_permute2f128_ps(S1_1, S5_1, 0x31);
1736  tmp.packet[14] = _mm256_permute2f128_ps(S2_1, S6_1, 0x31);
1737  tmp.packet[15] = _mm256_permute2f128_ps(S3_1, S7_1, 0x31);
1738 
1739  // Second set of _m256 outputs
1740  tmp.packet[16] = _mm256_permute2f128_ps(S8_0, S12_0, 0x20);
1741  tmp.packet[17] = _mm256_permute2f128_ps(S9_0, S13_0, 0x20);
1742  tmp.packet[18] = _mm256_permute2f128_ps(S10_0, S14_0, 0x20);
1743  tmp.packet[19] = _mm256_permute2f128_ps(S11_0, S15_0, 0x20);
1744  tmp.packet[20] = _mm256_permute2f128_ps(S8_0, S12_0, 0x31);
1745  tmp.packet[21] = _mm256_permute2f128_ps(S9_0, S13_0, 0x31);
1746  tmp.packet[22] = _mm256_permute2f128_ps(S10_0, S14_0, 0x31);
1747  tmp.packet[23] = _mm256_permute2f128_ps(S11_0, S15_0, 0x31);
1748 
1749  tmp.packet[24] = _mm256_permute2f128_ps(S8_1, S12_1, 0x20);
1750  tmp.packet[25] = _mm256_permute2f128_ps(S9_1, S13_1, 0x20);
1751  tmp.packet[26] = _mm256_permute2f128_ps(S10_1, S14_1, 0x20);
1752  tmp.packet[27] = _mm256_permute2f128_ps(S11_1, S15_1, 0x20);
1753  tmp.packet[28] = _mm256_permute2f128_ps(S8_1, S12_1, 0x31);
1754  tmp.packet[29] = _mm256_permute2f128_ps(S9_1, S13_1, 0x31);
1755  tmp.packet[30] = _mm256_permute2f128_ps(S10_1, S14_1, 0x31);
1756  tmp.packet[31] = _mm256_permute2f128_ps(S11_1, S15_1, 0x31);
1757 
1758  // Pack them into the output
1759  PACK_OUTPUT(kernel.packet, tmp.packet, 0, 16);
1760  PACK_OUTPUT(kernel.packet, tmp.packet, 1, 16);
1761  PACK_OUTPUT(kernel.packet, tmp.packet, 2, 16);
1762  PACK_OUTPUT(kernel.packet, tmp.packet, 3, 16);
1763 
1764  PACK_OUTPUT(kernel.packet, tmp.packet, 4, 16);
1765  PACK_OUTPUT(kernel.packet, tmp.packet, 5, 16);
1766  PACK_OUTPUT(kernel.packet, tmp.packet, 6, 16);
1767  PACK_OUTPUT(kernel.packet, tmp.packet, 7, 16);
1768 
1769  PACK_OUTPUT(kernel.packet, tmp.packet, 8, 16);
1770  PACK_OUTPUT(kernel.packet, tmp.packet, 9, 16);
1771  PACK_OUTPUT(kernel.packet, tmp.packet, 10, 16);
1772  PACK_OUTPUT(kernel.packet, tmp.packet, 11, 16);
1773 
1774  PACK_OUTPUT(kernel.packet, tmp.packet, 12, 16);
1775  PACK_OUTPUT(kernel.packet, tmp.packet, 13, 16);
1776  PACK_OUTPUT(kernel.packet, tmp.packet, 14, 16);
1777  PACK_OUTPUT(kernel.packet, tmp.packet, 15, 16);
1778 }
1779 #define PACK_OUTPUT_2(OUTPUT, INPUT, INDEX, STRIDE) \
1780  EIGEN_INSERT_8f_INTO_16f(OUTPUT[INDEX], INPUT[2 * INDEX], INPUT[2 * INDEX + STRIDE]);
1781 
1783  __m512 T0 = _mm512_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
1784  __m512 T1 = _mm512_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
1785  __m512 T2 = _mm512_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
1786  __m512 T3 = _mm512_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
1787  __m512 T4 = _mm512_unpacklo_ps(kernel.packet[4], kernel.packet[5]);
1788  __m512 T5 = _mm512_unpackhi_ps(kernel.packet[4], kernel.packet[5]);
1789  __m512 T6 = _mm512_unpacklo_ps(kernel.packet[6], kernel.packet[7]);
1790  __m512 T7 = _mm512_unpackhi_ps(kernel.packet[6], kernel.packet[7]);
1791 
1792  kernel.packet[0] = _mm512_castpd_ps(_mm512_unpacklo_pd(_mm512_castps_pd(T0), _mm512_castps_pd(T2)));
1793  kernel.packet[1] = _mm512_castpd_ps(_mm512_unpackhi_pd(_mm512_castps_pd(T0), _mm512_castps_pd(T2)));
1794  kernel.packet[2] = _mm512_castpd_ps(_mm512_unpacklo_pd(_mm512_castps_pd(T1), _mm512_castps_pd(T3)));
1795  kernel.packet[3] = _mm512_castpd_ps(_mm512_unpackhi_pd(_mm512_castps_pd(T1), _mm512_castps_pd(T3)));
1796  kernel.packet[4] = _mm512_castpd_ps(_mm512_unpacklo_pd(_mm512_castps_pd(T4), _mm512_castps_pd(T6)));
1797  kernel.packet[5] = _mm512_castpd_ps(_mm512_unpackhi_pd(_mm512_castps_pd(T4), _mm512_castps_pd(T6)));
1798  kernel.packet[6] = _mm512_castpd_ps(_mm512_unpacklo_pd(_mm512_castps_pd(T5), _mm512_castps_pd(T7)));
1799  kernel.packet[7] = _mm512_castpd_ps(_mm512_unpackhi_pd(_mm512_castps_pd(T5), _mm512_castps_pd(T7)));
1800 
1801  T0 = _mm512_shuffle_f32x4(kernel.packet[0], kernel.packet[4], 0x44);
1802  T1 = _mm512_shuffle_f32x4(kernel.packet[0], kernel.packet[4], 0xee);
1803  T2 = _mm512_shuffle_f32x4(kernel.packet[1], kernel.packet[5], 0x44);
1804  T3 = _mm512_shuffle_f32x4(kernel.packet[1], kernel.packet[5], 0xee);
1805  T4 = _mm512_shuffle_f32x4(kernel.packet[2], kernel.packet[6], 0x44);
1806  T5 = _mm512_shuffle_f32x4(kernel.packet[2], kernel.packet[6], 0xee);
1807  T6 = _mm512_shuffle_f32x4(kernel.packet[3], kernel.packet[7], 0x44);
1808  T7 = _mm512_shuffle_f32x4(kernel.packet[3], kernel.packet[7], 0xee);
1809 
1810  kernel.packet[0] = _mm512_shuffle_f32x4(T0, T2, 0x88);
1811  kernel.packet[2] = _mm512_shuffle_f32x4(T0, T2, 0xdd);
1812  kernel.packet[1] = _mm512_shuffle_f32x4(T4, T6, 0x88);
1813  kernel.packet[3] = _mm512_shuffle_f32x4(T4, T6, 0xdd);
1814  kernel.packet[4] = _mm512_shuffle_f32x4(T1, T3, 0x88);
1815  kernel.packet[6] = _mm512_shuffle_f32x4(T1, T3, 0xdd);
1816  kernel.packet[5] = _mm512_shuffle_f32x4(T5, T7, 0x88);
1817  kernel.packet[7] = _mm512_shuffle_f32x4(T5, T7, 0xdd);
1818 }
1819 
1821  __m512 T0 = _mm512_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
1822  __m512 T1 = _mm512_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
1823  __m512 T2 = _mm512_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
1824  __m512 T3 = _mm512_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
1825 
1826  __m512 S0 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
1827  __m512 S1 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
1828  __m512 S2 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
1829  __m512 S3 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
1830 
1833  EIGEN_EXTRACT_8f_FROM_16f(S2, S2);
1834  EIGEN_EXTRACT_8f_FROM_16f(S3, S3);
1835 
1837 
1838  tmp.packet[0] = _mm256_permute2f128_ps(S0_0, S1_0, 0x20);
1839  tmp.packet[1] = _mm256_permute2f128_ps(S2_0, S3_0, 0x20);
1840  tmp.packet[2] = _mm256_permute2f128_ps(S0_0, S1_0, 0x31);
1841  tmp.packet[3] = _mm256_permute2f128_ps(S2_0, S3_0, 0x31);
1842 
1843  tmp.packet[4] = _mm256_permute2f128_ps(S0_1, S1_1, 0x20);
1844  tmp.packet[5] = _mm256_permute2f128_ps(S2_1, S3_1, 0x20);
1845  tmp.packet[6] = _mm256_permute2f128_ps(S0_1, S1_1, 0x31);
1846  tmp.packet[7] = _mm256_permute2f128_ps(S2_1, S3_1, 0x31);
1847 
1848  PACK_OUTPUT_2(kernel.packet, tmp.packet, 0, 1);
1849  PACK_OUTPUT_2(kernel.packet, tmp.packet, 1, 1);
1850  PACK_OUTPUT_2(kernel.packet, tmp.packet, 2, 1);
1851  PACK_OUTPUT_2(kernel.packet, tmp.packet, 3, 1);
1852 }
1853 
1854 #define PACK_OUTPUT_SQ_D(OUTPUT, INPUT, INDEX, STRIDE) \
1855  OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[INDEX], 0); \
1856  OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[INDEX + STRIDE], 1);
1857 
1858 #define PACK_OUTPUT_D(OUTPUT, INPUT, INDEX, STRIDE) \
1859  OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[(2 * INDEX)], 0); \
1860  OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[(2 * INDEX) + STRIDE], 1);
1861 
1862 #define PACK_OUTPUT_L(OUTPUT, INPUT, INDEX, STRIDE) \
1863  OUTPUT[INDEX] = _mm512_inserti64x4(OUTPUT[INDEX], INPUT[(2 * INDEX)], 0); \
1864  OUTPUT[INDEX] = _mm512_inserti64x4(OUTPUT[INDEX], INPUT[(2 * INDEX) + STRIDE], 1);
1865 
1867  __m512d T0 = _mm512_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);
1868  __m512d T1 = _mm512_shuffle_pd(kernel.packet[0], kernel.packet[1], 0xff);
1869  __m512d T2 = _mm512_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);
1870  __m512d T3 = _mm512_shuffle_pd(kernel.packet[2], kernel.packet[3], 0xff);
1871 
1873 
1874  tmp.packet[0] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0), _mm512_extractf64x4_pd(T2, 0), 0x20);
1875  tmp.packet[1] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0), _mm512_extractf64x4_pd(T3, 0), 0x20);
1876  tmp.packet[2] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0), _mm512_extractf64x4_pd(T2, 0), 0x31);
1877  tmp.packet[3] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0), _mm512_extractf64x4_pd(T3, 0), 0x31);
1878 
1879  tmp.packet[4] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1), _mm512_extractf64x4_pd(T2, 1), 0x20);
1880  tmp.packet[5] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1), _mm512_extractf64x4_pd(T3, 1), 0x20);
1881  tmp.packet[6] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1), _mm512_extractf64x4_pd(T2, 1), 0x31);
1882  tmp.packet[7] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1), _mm512_extractf64x4_pd(T3, 1), 0x31);
1883 
1884  PACK_OUTPUT_D(kernel.packet, tmp.packet, 0, 1);
1885  PACK_OUTPUT_D(kernel.packet, tmp.packet, 1, 1);
1886  PACK_OUTPUT_D(kernel.packet, tmp.packet, 2, 1);
1887  PACK_OUTPUT_D(kernel.packet, tmp.packet, 3, 1);
1888 }
1889 
1891  __m512d T0 = _mm512_unpacklo_pd(kernel.packet[0], kernel.packet[1]);
1892  __m512d T1 = _mm512_unpackhi_pd(kernel.packet[0], kernel.packet[1]);
1893  __m512d T2 = _mm512_unpacklo_pd(kernel.packet[2], kernel.packet[3]);
1894  __m512d T3 = _mm512_unpackhi_pd(kernel.packet[2], kernel.packet[3]);
1895  __m512d T4 = _mm512_unpacklo_pd(kernel.packet[4], kernel.packet[5]);
1896  __m512d T5 = _mm512_unpackhi_pd(kernel.packet[4], kernel.packet[5]);
1897  __m512d T6 = _mm512_unpacklo_pd(kernel.packet[6], kernel.packet[7]);
1898  __m512d T7 = _mm512_unpackhi_pd(kernel.packet[6], kernel.packet[7]);
1899 
1900  kernel.packet[0] = _mm512_permutex_pd(T2, 0x4E);
1901  kernel.packet[0] = _mm512_mask_blend_pd(0xCC, T0, kernel.packet[0]);
1902  kernel.packet[2] = _mm512_permutex_pd(T0, 0x4E);
1903  kernel.packet[2] = _mm512_mask_blend_pd(0xCC, kernel.packet[2], T2);
1904  kernel.packet[1] = _mm512_permutex_pd(T3, 0x4E);
1905  kernel.packet[1] = _mm512_mask_blend_pd(0xCC, T1, kernel.packet[1]);
1906  kernel.packet[3] = _mm512_permutex_pd(T1, 0x4E);
1907  kernel.packet[3] = _mm512_mask_blend_pd(0xCC, kernel.packet[3], T3);
1908  kernel.packet[4] = _mm512_permutex_pd(T6, 0x4E);
1909  kernel.packet[4] = _mm512_mask_blend_pd(0xCC, T4, kernel.packet[4]);
1910  kernel.packet[6] = _mm512_permutex_pd(T4, 0x4E);
1911  kernel.packet[6] = _mm512_mask_blend_pd(0xCC, kernel.packet[6], T6);
1912  kernel.packet[5] = _mm512_permutex_pd(T7, 0x4E);
1913  kernel.packet[5] = _mm512_mask_blend_pd(0xCC, T5, kernel.packet[5]);
1914  kernel.packet[7] = _mm512_permutex_pd(T5, 0x4E);
1915  kernel.packet[7] = _mm512_mask_blend_pd(0xCC, kernel.packet[7], T7);
1916 
1917  T0 = _mm512_shuffle_f64x2(kernel.packet[4], kernel.packet[4], 0x4E);
1918  T0 = _mm512_mask_blend_pd(0xF0, kernel.packet[0], T0);
1919  T4 = _mm512_shuffle_f64x2(kernel.packet[0], kernel.packet[0], 0x4E);
1920  T4 = _mm512_mask_blend_pd(0xF0, T4, kernel.packet[4]);
1921  T1 = _mm512_shuffle_f64x2(kernel.packet[5], kernel.packet[5], 0x4E);
1922  T1 = _mm512_mask_blend_pd(0xF0, kernel.packet[1], T1);
1923  T5 = _mm512_shuffle_f64x2(kernel.packet[1], kernel.packet[1], 0x4E);
1924  T5 = _mm512_mask_blend_pd(0xF0, T5, kernel.packet[5]);
1925  T2 = _mm512_shuffle_f64x2(kernel.packet[6], kernel.packet[6], 0x4E);
1926  T2 = _mm512_mask_blend_pd(0xF0, kernel.packet[2], T2);
1927  T6 = _mm512_shuffle_f64x2(kernel.packet[2], kernel.packet[2], 0x4E);
1928  T6 = _mm512_mask_blend_pd(0xF0, T6, kernel.packet[6]);
1929  T3 = _mm512_shuffle_f64x2(kernel.packet[7], kernel.packet[7], 0x4E);
1930  T3 = _mm512_mask_blend_pd(0xF0, kernel.packet[3], T3);
1931  T7 = _mm512_shuffle_f64x2(kernel.packet[3], kernel.packet[3], 0x4E);
1932  T7 = _mm512_mask_blend_pd(0xF0, T7, kernel.packet[7]);
1933 
1934  kernel.packet[0] = T0;
1935  kernel.packet[1] = T1;
1936  kernel.packet[2] = T2;
1937  kernel.packet[3] = T3;
1938  kernel.packet[4] = T4;
1939  kernel.packet[5] = T5;
1940  kernel.packet[6] = T6;
1941  kernel.packet[7] = T7;
1942 }
1943 
1945  __m512i T0 = _mm512_castpd_si512(
1946  _mm512_shuffle_pd(_mm512_castsi512_pd(kernel.packet[0]), _mm512_castsi512_pd(kernel.packet[1]), 0));
1947  __m512i T1 = _mm512_castpd_si512(
1948  _mm512_shuffle_pd(_mm512_castsi512_pd(kernel.packet[0]), _mm512_castsi512_pd(kernel.packet[1]), 0xff));
1949  __m512i T2 = _mm512_castpd_si512(
1950  _mm512_shuffle_pd(_mm512_castsi512_pd(kernel.packet[2]), _mm512_castsi512_pd(kernel.packet[3]), 0));
1951  __m512i T3 = _mm512_castpd_si512(
1952  _mm512_shuffle_pd(_mm512_castsi512_pd(kernel.packet[2]), _mm512_castsi512_pd(kernel.packet[3]), 0xff));
1953 
1955 
1956  tmp.packet[0] = _mm256_permute2x128_si256(_mm512_extracti64x4_epi64(T0, 0), _mm512_extracti64x4_epi64(T2, 0), 0x20);
1957  tmp.packet[1] = _mm256_permute2x128_si256(_mm512_extracti64x4_epi64(T1, 0), _mm512_extracti64x4_epi64(T3, 0), 0x20);
1958  tmp.packet[2] = _mm256_permute2x128_si256(_mm512_extracti64x4_epi64(T0, 0), _mm512_extracti64x4_epi64(T2, 0), 0x31);
1959  tmp.packet[3] = _mm256_permute2x128_si256(_mm512_extracti64x4_epi64(T1, 0), _mm512_extracti64x4_epi64(T3, 0), 0x31);
1960 
1961  tmp.packet[4] = _mm256_permute2x128_si256(_mm512_extracti64x4_epi64(T0, 1), _mm512_extracti64x4_epi64(T2, 1), 0x20);
1962  tmp.packet[5] = _mm256_permute2x128_si256(_mm512_extracti64x4_epi64(T1, 1), _mm512_extracti64x4_epi64(T3, 1), 0x20);
1963  tmp.packet[6] = _mm256_permute2x128_si256(_mm512_extracti64x4_epi64(T0, 1), _mm512_extracti64x4_epi64(T2, 1), 0x31);
1964  tmp.packet[7] = _mm256_permute2x128_si256(_mm512_extracti64x4_epi64(T1, 1), _mm512_extracti64x4_epi64(T3, 1), 0x31);
1965 
1966  PACK_OUTPUT_L(kernel.packet, tmp.packet, 0, 1);
1967  PACK_OUTPUT_L(kernel.packet, tmp.packet, 1, 1);
1968  PACK_OUTPUT_L(kernel.packet, tmp.packet, 2, 1);
1969  PACK_OUTPUT_L(kernel.packet, tmp.packet, 3, 1);
1970 }
1971 
1973  __m512i T0 = _mm512_unpacklo_epi64(kernel.packet[0], kernel.packet[1]);
1974  __m512i T1 = _mm512_unpackhi_epi64(kernel.packet[0], kernel.packet[1]);
1975  __m512i T2 = _mm512_unpacklo_epi64(kernel.packet[2], kernel.packet[3]);
1976  __m512i T3 = _mm512_unpackhi_epi64(kernel.packet[2], kernel.packet[3]);
1977  __m512i T4 = _mm512_unpacklo_epi64(kernel.packet[4], kernel.packet[5]);
1978  __m512i T5 = _mm512_unpackhi_epi64(kernel.packet[4], kernel.packet[5]);
1979  __m512i T6 = _mm512_unpacklo_epi64(kernel.packet[6], kernel.packet[7]);
1980  __m512i T7 = _mm512_unpackhi_epi64(kernel.packet[6], kernel.packet[7]);
1981 
1982  kernel.packet[0] = _mm512_permutex_epi64(T2, 0x4E);
1983  kernel.packet[0] = _mm512_mask_blend_epi64(0xCC, T0, kernel.packet[0]);
1984  kernel.packet[2] = _mm512_permutex_epi64(T0, 0x4E);
1985  kernel.packet[2] = _mm512_mask_blend_epi64(0xCC, kernel.packet[2], T2);
1986  kernel.packet[1] = _mm512_permutex_epi64(T3, 0x4E);
1987  kernel.packet[1] = _mm512_mask_blend_epi64(0xCC, T1, kernel.packet[1]);
1988  kernel.packet[3] = _mm512_permutex_epi64(T1, 0x4E);
1989  kernel.packet[3] = _mm512_mask_blend_epi64(0xCC, kernel.packet[3], T3);
1990  kernel.packet[4] = _mm512_permutex_epi64(T6, 0x4E);
1991  kernel.packet[4] = _mm512_mask_blend_epi64(0xCC, T4, kernel.packet[4]);
1992  kernel.packet[6] = _mm512_permutex_epi64(T4, 0x4E);
1993  kernel.packet[6] = _mm512_mask_blend_epi64(0xCC, kernel.packet[6], T6);
1994  kernel.packet[5] = _mm512_permutex_epi64(T7, 0x4E);
1995  kernel.packet[5] = _mm512_mask_blend_epi64(0xCC, T5, kernel.packet[5]);
1996  kernel.packet[7] = _mm512_permutex_epi64(T5, 0x4E);
1997  kernel.packet[7] = _mm512_mask_blend_epi64(0xCC, kernel.packet[7], T7);
1998 
1999  T0 = _mm512_shuffle_i64x2(kernel.packet[4], kernel.packet[4], 0x4E);
2000  T0 = _mm512_mask_blend_epi64(0xF0, kernel.packet[0], T0);
2001  T4 = _mm512_shuffle_i64x2(kernel.packet[0], kernel.packet[0], 0x4E);
2002  T4 = _mm512_mask_blend_epi64(0xF0, T4, kernel.packet[4]);
2003  T1 = _mm512_shuffle_i64x2(kernel.packet[5], kernel.packet[5], 0x4E);
2004  T1 = _mm512_mask_blend_epi64(0xF0, kernel.packet[1], T1);
2005  T5 = _mm512_shuffle_i64x2(kernel.packet[1], kernel.packet[1], 0x4E);
2006  T5 = _mm512_mask_blend_epi64(0xF0, T5, kernel.packet[5]);
2007  T2 = _mm512_shuffle_i64x2(kernel.packet[6], kernel.packet[6], 0x4E);
2008  T2 = _mm512_mask_blend_epi64(0xF0, kernel.packet[2], T2);
2009  T6 = _mm512_shuffle_i64x2(kernel.packet[2], kernel.packet[2], 0x4E);
2010  T6 = _mm512_mask_blend_epi64(0xF0, T6, kernel.packet[6]);
2011  T3 = _mm512_shuffle_i64x2(kernel.packet[7], kernel.packet[7], 0x4E);
2012  T3 = _mm512_mask_blend_epi64(0xF0, kernel.packet[3], T3);
2013  T7 = _mm512_shuffle_i64x2(kernel.packet[3], kernel.packet[3], 0x4E);
2014  T7 = _mm512_mask_blend_epi64(0xF0, T7, kernel.packet[7]);
2015 
2016  kernel.packet[0] = T0;
2017  kernel.packet[1] = T1;
2018  kernel.packet[2] = T2;
2019  kernel.packet[3] = T3;
2020  kernel.packet[4] = T4;
2021  kernel.packet[5] = T5;
2022  kernel.packet[6] = T6;
2023  kernel.packet[7] = T7;
2024 }
2025 
2026 #define PACK_OUTPUT_I32(OUTPUT, INPUT, INDEX, STRIDE) \
2027  EIGEN_INSERT_8i_INTO_16i(OUTPUT[INDEX], INPUT[INDEX], INPUT[INDEX + STRIDE]);
2028 
2029 #define PACK_OUTPUT_I32_2(OUTPUT, INPUT, INDEX, STRIDE) \
2030  EIGEN_INSERT_8i_INTO_16i(OUTPUT[INDEX], INPUT[2 * INDEX], INPUT[2 * INDEX + STRIDE]);
2031 
2032 #define SHUFFLE_EPI32(A, B, M) _mm512_castps_si512(_mm512_shuffle_ps(_mm512_castsi512_ps(A), _mm512_castsi512_ps(B), M))
2033 
2035  __m512i T0 = _mm512_unpacklo_epi32(kernel.packet[0], kernel.packet[1]);
2036  __m512i T1 = _mm512_unpackhi_epi32(kernel.packet[0], kernel.packet[1]);
2037  __m512i T2 = _mm512_unpacklo_epi32(kernel.packet[2], kernel.packet[3]);
2038  __m512i T3 = _mm512_unpackhi_epi32(kernel.packet[2], kernel.packet[3]);
2039  __m512i T4 = _mm512_unpacklo_epi32(kernel.packet[4], kernel.packet[5]);
2040  __m512i T5 = _mm512_unpackhi_epi32(kernel.packet[4], kernel.packet[5]);
2041  __m512i T6 = _mm512_unpacklo_epi32(kernel.packet[6], kernel.packet[7]);
2042  __m512i T7 = _mm512_unpackhi_epi32(kernel.packet[6], kernel.packet[7]);
2043  __m512i T8 = _mm512_unpacklo_epi32(kernel.packet[8], kernel.packet[9]);
2044  __m512i T9 = _mm512_unpackhi_epi32(kernel.packet[8], kernel.packet[9]);
2045  __m512i T10 = _mm512_unpacklo_epi32(kernel.packet[10], kernel.packet[11]);
2046  __m512i T11 = _mm512_unpackhi_epi32(kernel.packet[10], kernel.packet[11]);
2047  __m512i T12 = _mm512_unpacklo_epi32(kernel.packet[12], kernel.packet[13]);
2048  __m512i T13 = _mm512_unpackhi_epi32(kernel.packet[12], kernel.packet[13]);
2049  __m512i T14 = _mm512_unpacklo_epi32(kernel.packet[14], kernel.packet[15]);
2050  __m512i T15 = _mm512_unpackhi_epi32(kernel.packet[14], kernel.packet[15]);
2051  __m512i S0 = SHUFFLE_EPI32(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
2052  __m512i S1 = SHUFFLE_EPI32(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
2053  __m512i S2 = SHUFFLE_EPI32(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
2054  __m512i S3 = SHUFFLE_EPI32(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
2055  __m512i S4 = SHUFFLE_EPI32(T4, T6, _MM_SHUFFLE(1, 0, 1, 0));
2056  __m512i S5 = SHUFFLE_EPI32(T4, T6, _MM_SHUFFLE(3, 2, 3, 2));
2057  __m512i S6 = SHUFFLE_EPI32(T5, T7, _MM_SHUFFLE(1, 0, 1, 0));
2058  __m512i S7 = SHUFFLE_EPI32(T5, T7, _MM_SHUFFLE(3, 2, 3, 2));
2059  __m512i S8 = SHUFFLE_EPI32(T8, T10, _MM_SHUFFLE(1, 0, 1, 0));
2060  __m512i S9 = SHUFFLE_EPI32(T8, T10, _MM_SHUFFLE(3, 2, 3, 2));
2061  __m512i S10 = SHUFFLE_EPI32(T9, T11, _MM_SHUFFLE(1, 0, 1, 0));
2062  __m512i S11 = SHUFFLE_EPI32(T9, T11, _MM_SHUFFLE(3, 2, 3, 2));
2063  __m512i S12 = SHUFFLE_EPI32(T12, T14, _MM_SHUFFLE(1, 0, 1, 0));
2064  __m512i S13 = SHUFFLE_EPI32(T12, T14, _MM_SHUFFLE(3, 2, 3, 2));
2065  __m512i S14 = SHUFFLE_EPI32(T13, T15, _MM_SHUFFLE(1, 0, 1, 0));
2066  __m512i S15 = SHUFFLE_EPI32(T13, T15, _MM_SHUFFLE(3, 2, 3, 2));
2067 
2070  EIGEN_EXTRACT_8i_FROM_16i(S2, S2);
2071  EIGEN_EXTRACT_8i_FROM_16i(S3, S3);
2072  EIGEN_EXTRACT_8i_FROM_16i(S4, S4);
2073  EIGEN_EXTRACT_8i_FROM_16i(S5, S5);
2074  EIGEN_EXTRACT_8i_FROM_16i(S6, S6);
2075  EIGEN_EXTRACT_8i_FROM_16i(S7, S7);
2076  EIGEN_EXTRACT_8i_FROM_16i(S8, S8);
2077  EIGEN_EXTRACT_8i_FROM_16i(S9, S9);
2078  EIGEN_EXTRACT_8i_FROM_16i(S10, S10);
2079  EIGEN_EXTRACT_8i_FROM_16i(S11, S11);
2080  EIGEN_EXTRACT_8i_FROM_16i(S12, S12);
2081  EIGEN_EXTRACT_8i_FROM_16i(S13, S13);
2082  EIGEN_EXTRACT_8i_FROM_16i(S14, S14);
2083  EIGEN_EXTRACT_8i_FROM_16i(S15, S15);
2084 
2086 
2087  tmp.packet[0] = _mm256_permute2f128_si256(S0_0, S4_0, 0x20);
2088  tmp.packet[1] = _mm256_permute2f128_si256(S1_0, S5_0, 0x20);
2089  tmp.packet[2] = _mm256_permute2f128_si256(S2_0, S6_0, 0x20);
2090  tmp.packet[3] = _mm256_permute2f128_si256(S3_0, S7_0, 0x20);
2091  tmp.packet[4] = _mm256_permute2f128_si256(S0_0, S4_0, 0x31);
2092  tmp.packet[5] = _mm256_permute2f128_si256(S1_0, S5_0, 0x31);
2093  tmp.packet[6] = _mm256_permute2f128_si256(S2_0, S6_0, 0x31);
2094  tmp.packet[7] = _mm256_permute2f128_si256(S3_0, S7_0, 0x31);
2095 
2096  tmp.packet[8] = _mm256_permute2f128_si256(S0_1, S4_1, 0x20);
2097  tmp.packet[9] = _mm256_permute2f128_si256(S1_1, S5_1, 0x20);
2098  tmp.packet[10] = _mm256_permute2f128_si256(S2_1, S6_1, 0x20);
2099  tmp.packet[11] = _mm256_permute2f128_si256(S3_1, S7_1, 0x20);
2100  tmp.packet[12] = _mm256_permute2f128_si256(S0_1, S4_1, 0x31);
2101  tmp.packet[13] = _mm256_permute2f128_si256(S1_1, S5_1, 0x31);
2102  tmp.packet[14] = _mm256_permute2f128_si256(S2_1, S6_1, 0x31);
2103  tmp.packet[15] = _mm256_permute2f128_si256(S3_1, S7_1, 0x31);
2104 
2105  // Second set of _m256 outputs
2106  tmp.packet[16] = _mm256_permute2f128_si256(S8_0, S12_0, 0x20);
2107  tmp.packet[17] = _mm256_permute2f128_si256(S9_0, S13_0, 0x20);
2108  tmp.packet[18] = _mm256_permute2f128_si256(S10_0, S14_0, 0x20);
2109  tmp.packet[19] = _mm256_permute2f128_si256(S11_0, S15_0, 0x20);
2110  tmp.packet[20] = _mm256_permute2f128_si256(S8_0, S12_0, 0x31);
2111  tmp.packet[21] = _mm256_permute2f128_si256(S9_0, S13_0, 0x31);
2112  tmp.packet[22] = _mm256_permute2f128_si256(S10_0, S14_0, 0x31);
2113  tmp.packet[23] = _mm256_permute2f128_si256(S11_0, S15_0, 0x31);
2114 
2115  tmp.packet[24] = _mm256_permute2f128_si256(S8_1, S12_1, 0x20);
2116  tmp.packet[25] = _mm256_permute2f128_si256(S9_1, S13_1, 0x20);
2117  tmp.packet[26] = _mm256_permute2f128_si256(S10_1, S14_1, 0x20);
2118  tmp.packet[27] = _mm256_permute2f128_si256(S11_1, S15_1, 0x20);
2119  tmp.packet[28] = _mm256_permute2f128_si256(S8_1, S12_1, 0x31);
2120  tmp.packet[29] = _mm256_permute2f128_si256(S9_1, S13_1, 0x31);
2121  tmp.packet[30] = _mm256_permute2f128_si256(S10_1, S14_1, 0x31);
2122  tmp.packet[31] = _mm256_permute2f128_si256(S11_1, S15_1, 0x31);
2123 
2124  // Pack them into the output
2125  PACK_OUTPUT_I32(kernel.packet, tmp.packet, 0, 16);
2126  PACK_OUTPUT_I32(kernel.packet, tmp.packet, 1, 16);
2127  PACK_OUTPUT_I32(kernel.packet, tmp.packet, 2, 16);
2128  PACK_OUTPUT_I32(kernel.packet, tmp.packet, 3, 16);
2129 
2130  PACK_OUTPUT_I32(kernel.packet, tmp.packet, 4, 16);
2131  PACK_OUTPUT_I32(kernel.packet, tmp.packet, 5, 16);
2132  PACK_OUTPUT_I32(kernel.packet, tmp.packet, 6, 16);
2133  PACK_OUTPUT_I32(kernel.packet, tmp.packet, 7, 16);
2134 
2135  PACK_OUTPUT_I32(kernel.packet, tmp.packet, 8, 16);
2136  PACK_OUTPUT_I32(kernel.packet, tmp.packet, 9, 16);
2137  PACK_OUTPUT_I32(kernel.packet, tmp.packet, 10, 16);
2138  PACK_OUTPUT_I32(kernel.packet, tmp.packet, 11, 16);
2139 
2140  PACK_OUTPUT_I32(kernel.packet, tmp.packet, 12, 16);
2141  PACK_OUTPUT_I32(kernel.packet, tmp.packet, 13, 16);
2142  PACK_OUTPUT_I32(kernel.packet, tmp.packet, 14, 16);
2143  PACK_OUTPUT_I32(kernel.packet, tmp.packet, 15, 16);
2144 }
2145 
2147  __m512i T0 = _mm512_unpacklo_epi32(kernel.packet[0], kernel.packet[1]);
2148  __m512i T1 = _mm512_unpackhi_epi32(kernel.packet[0], kernel.packet[1]);
2149  __m512i T2 = _mm512_unpacklo_epi32(kernel.packet[2], kernel.packet[3]);
2150  __m512i T3 = _mm512_unpackhi_epi32(kernel.packet[2], kernel.packet[3]);
2151 
2152  __m512i S0 = SHUFFLE_EPI32(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
2153  __m512i S1 = SHUFFLE_EPI32(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
2154  __m512i S2 = SHUFFLE_EPI32(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
2155  __m512i S3 = SHUFFLE_EPI32(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
2156 
2159  EIGEN_EXTRACT_8i_FROM_16i(S2, S2);
2160  EIGEN_EXTRACT_8i_FROM_16i(S3, S3);
2161 
2163 
2164  tmp.packet[0] = _mm256_permute2f128_si256(S0_0, S1_0, 0x20);
2165  tmp.packet[1] = _mm256_permute2f128_si256(S2_0, S3_0, 0x20);
2166  tmp.packet[2] = _mm256_permute2f128_si256(S0_0, S1_0, 0x31);
2167  tmp.packet[3] = _mm256_permute2f128_si256(S2_0, S3_0, 0x31);
2168 
2169  tmp.packet[4] = _mm256_permute2f128_si256(S0_1, S1_1, 0x20);
2170  tmp.packet[5] = _mm256_permute2f128_si256(S2_1, S3_1, 0x20);
2171  tmp.packet[6] = _mm256_permute2f128_si256(S0_1, S1_1, 0x31);
2172  tmp.packet[7] = _mm256_permute2f128_si256(S2_1, S3_1, 0x31);
2173 
2174  PACK_OUTPUT_I32_2(kernel.packet, tmp.packet, 0, 1);
2175  PACK_OUTPUT_I32_2(kernel.packet, tmp.packet, 1, 1);
2176  PACK_OUTPUT_I32_2(kernel.packet, tmp.packet, 2, 1);
2177  PACK_OUTPUT_I32_2(kernel.packet, tmp.packet, 3, 1);
2178 }
2179 
2180 template <size_t N>
2182  alignas(__m128i) uint8_t aux[sizeof(__m128i)];
2183  for (size_t i = 0; i < N; i++) aux[i] = static_cast<uint8_t>(ifPacket.select[i]);
2184  __m128i paux = _mm_sub_epi8(_mm_setzero_si128(), _mm_load_si128(reinterpret_cast<const __m128i*>(aux)));
2185  return _mm_movemask_epi8(paux);
2186 }
2187 
2188 template <>
2189 EIGEN_STRONG_INLINE Packet16f pblend(const Selector<16>& ifPacket, const Packet16f& thenPacket,
2190  const Packet16f& elsePacket) {
2191  __mmask16 m = avx512_blend_mask(ifPacket);
2192  return _mm512_mask_blend_ps(m, elsePacket, thenPacket);
2193 }
2194 template <>
2195 EIGEN_STRONG_INLINE Packet8d pblend(const Selector<8>& ifPacket, const Packet8d& thenPacket,
2196  const Packet8d& elsePacket) {
2197  __mmask8 m = avx512_blend_mask(ifPacket);
2198  return _mm512_mask_blend_pd(m, elsePacket, thenPacket);
2199 }
2200 
2201 // Packet math for Eigen::half
2202 template <>
2204  return _mm256_set1_epi16(from.x);
2205 }
2206 
2207 template <>
2209  return half_impl::raw_uint16_to_half(static_cast<unsigned short>(_mm256_extract_epi16(from, 0)));
2210 }
2211 
2212 template <>
2214  return _mm256_load_si256(reinterpret_cast<const __m256i*>(from));
2215 }
2216 
2217 template <>
2219  return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
2220 }
2221 
2222 template <>
2224  // (void*) -> workaround clang warning:
2225  // cast from 'Eigen::half *' to '__m256i *' increases required alignment from 2 to 32
2226  _mm256_store_si256((__m256i*)(void*)to, from);
2227 }
2228 
2229 template <>
2231  // (void*) -> workaround clang warning:
2232  // cast from 'Eigen::half *' to '__m256i *' increases required alignment from 2 to 32
2233  _mm256_storeu_si256((__m256i*)(void*)to, from);
2234 }
2235 
2236 template <>
2238  unsigned short a = from[0].x;
2239  unsigned short b = from[1].x;
2240  unsigned short c = from[2].x;
2241  unsigned short d = from[3].x;
2242  unsigned short e = from[4].x;
2243  unsigned short f = from[5].x;
2244  unsigned short g = from[6].x;
2245  unsigned short h = from[7].x;
2246  return _mm256_set_epi16(h, h, g, g, f, f, e, e, d, d, c, c, b, b, a, a);
2247 }
2248 
2249 template <>
2251  unsigned short a = from[0].x;
2252  unsigned short b = from[1].x;
2253  unsigned short c = from[2].x;
2254  unsigned short d = from[3].x;
2255  return _mm256_set_epi16(d, d, d, d, c, c, c, c, b, b, b, b, a, a, a, a);
2256 }
2257 
2258 EIGEN_STRONG_INLINE Packet16f half2float(const Packet16h& a) { return _mm512_cvtph_ps(a); }
2259 
2261  return _mm512_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
2262 }
2263 
2264 template <>
2266  return Packet16h(ptrue(Packet8i(a)));
2267 }
2268 
2269 template <>
2271  const __m256i sign_mask = _mm256_set1_epi16(static_cast<numext::uint16_t>(0x8000));
2272  return _mm256_andnot_si256(sign_mask, a);
2273 }
2274 
2275 template <>
2278 }
2279 
2280 template <>
2283 }
2284 
2285 template <>
2287  return float2half(plset<Packet16f>(static_cast<float>(a)));
2288 }
2289 
2290 template <>
2292  // in some cases Packet8i is a wrapper around __m256i, so we need to
2293  // cast to Packet8i to call the correct overload.
2294  return Packet16h(por(Packet8i(a), Packet8i(b)));
2295 }
2296 template <>
2298  return Packet16h(pxor(Packet8i(a), Packet8i(b)));
2299 }
2300 template <>
2302  return Packet16h(pand(Packet8i(a), Packet8i(b)));
2303 }
2304 template <>
2306  return Packet16h(pandnot(Packet8i(a), Packet8i(b)));
2307 }
2308 
2309 template <>
2311  return _mm256_blendv_epi8(b, a, mask);
2312 }
2313 
2314 template <>
2317 }
2318 
2319 template <>
2322 }
2323 
2324 template <>
2327 }
2328 
2329 template <>
2332 }
2333 
2334 template <>
2337 }
2338 
2339 template <>
2341  Packet16f af = half2float(a);
2342  Packet16f bf = half2float(b);
2343  return Pack32To16(pcmp_eq(af, bf));
2344 }
2345 
2346 template <>
2348  return Pack32To16(pcmp_le(half2float(a), half2float(b)));
2349 }
2350 
2351 template <>
2353  return Pack32To16(pcmp_lt(half2float(a), half2float(b)));
2354 }
2355 
2356 template <>
2359 }
2360 
2361 template <>
2363  return a;
2364 }
2365 
2366 template <>
2368  Packet16h sign_mask = _mm256_set1_epi16(static_cast<unsigned short>(0x8000));
2369  return _mm256_xor_si256(a, sign_mask);
2370 }
2371 
2372 #ifndef EIGEN_VECTORIZE_AVX512FP16
2373 template <>
2375  Packet16f af = half2float(a);
2376  Packet16f bf = half2float(b);
2377  Packet16f rf = padd(af, bf);
2378  return float2half(rf);
2379 }
2380 
2381 template <>
2383  Packet16f af = half2float(a);
2384  Packet16f bf = half2float(b);
2385  Packet16f rf = psub(af, bf);
2386  return float2half(rf);
2387 }
2388 
2389 template <>
2391  Packet16f af = half2float(a);
2392  Packet16f bf = half2float(b);
2393  Packet16f rf = pmul(af, bf);
2394  return float2half(rf);
2395 }
2396 
2397 template <>
2399  Packet16f af = half2float(a);
2400  Packet16f bf = half2float(b);
2401  Packet16f rf = pdiv(af, bf);
2402  return float2half(rf);
2403 }
2404 
2405 template <>
2407  Packet16f from_float = half2float(from);
2408  return half(predux(from_float));
2409 }
2410 
2411 #endif
2412 
2413 template <>
2415  Packet8h lane0 = _mm256_extractf128_si256(a, 0);
2416  Packet8h lane1 = _mm256_extractf128_si256(a, 1);
2417  return padd<Packet8h>(lane0, lane1);
2418 }
2419 
2420 template <>
2422  Packet16f af = half2float(a);
2423  float reduced = predux_max<Packet16f>(af);
2424  return Eigen::half(reduced);
2425 }
2426 
2427 template <>
2429  Packet16f af = half2float(a);
2430  float reduced = predux_min<Packet16f>(af);
2431  return Eigen::half(reduced);
2432 }
2433 
2434 template <>
2436  Packet16f from_float = half2float(from);
2437  return half(predux_mul(from_float));
2438 }
2439 
2440 template <>
2442  __m128i m = _mm_setr_epi8(14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1);
2443  return _mm256_insertf128_si256(_mm256_castsi128_si256(_mm_shuffle_epi8(_mm256_extractf128_si256(a, 1), m)),
2444  _mm_shuffle_epi8(_mm256_extractf128_si256(a, 0), m), 1);
2445 }
2446 
2447 template <>
2448 EIGEN_STRONG_INLINE Packet16h pgather<Eigen::half, Packet16h>(const Eigen::half* from, Index stride) {
2449  return _mm256_set_epi16(from[15 * stride].x, from[14 * stride].x, from[13 * stride].x, from[12 * stride].x,
2450  from[11 * stride].x, from[10 * stride].x, from[9 * stride].x, from[8 * stride].x,
2451  from[7 * stride].x, from[6 * stride].x, from[5 * stride].x, from[4 * stride].x,
2452  from[3 * stride].x, from[2 * stride].x, from[1 * stride].x, from[0 * stride].x);
2453 }
2454 
2455 template <>
2457  EIGEN_ALIGN64 half aux[16];
2458  pstore(aux, from);
2459  to[stride * 0] = aux[0];
2460  to[stride * 1] = aux[1];
2461  to[stride * 2] = aux[2];
2462  to[stride * 3] = aux[3];
2463  to[stride * 4] = aux[4];
2464  to[stride * 5] = aux[5];
2465  to[stride * 6] = aux[6];
2466  to[stride * 7] = aux[7];
2467  to[stride * 8] = aux[8];
2468  to[stride * 9] = aux[9];
2469  to[stride * 10] = aux[10];
2470  to[stride * 11] = aux[11];
2471  to[stride * 12] = aux[12];
2472  to[stride * 13] = aux[13];
2473  to[stride * 14] = aux[14];
2474  to[stride * 15] = aux[15];
2475 }
2476 
2478  __m256i a = kernel.packet[0];
2479  __m256i b = kernel.packet[1];
2480  __m256i c = kernel.packet[2];
2481  __m256i d = kernel.packet[3];
2482  __m256i e = kernel.packet[4];
2483  __m256i f = kernel.packet[5];
2484  __m256i g = kernel.packet[6];
2485  __m256i h = kernel.packet[7];
2486  __m256i i = kernel.packet[8];
2487  __m256i j = kernel.packet[9];
2488  __m256i k = kernel.packet[10];
2489  __m256i l = kernel.packet[11];
2490  __m256i m = kernel.packet[12];
2491  __m256i n = kernel.packet[13];
2492  __m256i o = kernel.packet[14];
2493  __m256i p = kernel.packet[15];
2494 
2495  __m256i ab_07 = _mm256_unpacklo_epi16(a, b);
2496  __m256i cd_07 = _mm256_unpacklo_epi16(c, d);
2497  __m256i ef_07 = _mm256_unpacklo_epi16(e, f);
2498  __m256i gh_07 = _mm256_unpacklo_epi16(g, h);
2499  __m256i ij_07 = _mm256_unpacklo_epi16(i, j);
2500  __m256i kl_07 = _mm256_unpacklo_epi16(k, l);
2501  __m256i mn_07 = _mm256_unpacklo_epi16(m, n);
2502  __m256i op_07 = _mm256_unpacklo_epi16(o, p);
2503 
2504  __m256i ab_8f = _mm256_unpackhi_epi16(a, b);
2505  __m256i cd_8f = _mm256_unpackhi_epi16(c, d);
2506  __m256i ef_8f = _mm256_unpackhi_epi16(e, f);
2507  __m256i gh_8f = _mm256_unpackhi_epi16(g, h);
2508  __m256i ij_8f = _mm256_unpackhi_epi16(i, j);
2509  __m256i kl_8f = _mm256_unpackhi_epi16(k, l);
2510  __m256i mn_8f = _mm256_unpackhi_epi16(m, n);
2511  __m256i op_8f = _mm256_unpackhi_epi16(o, p);
2512 
2513  __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07);
2514  __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07);
2515  __m256i efgh_03 = _mm256_unpacklo_epi32(ef_07, gh_07);
2516  __m256i efgh_47 = _mm256_unpackhi_epi32(ef_07, gh_07);
2517  __m256i ijkl_03 = _mm256_unpacklo_epi32(ij_07, kl_07);
2518  __m256i ijkl_47 = _mm256_unpackhi_epi32(ij_07, kl_07);
2519  __m256i mnop_03 = _mm256_unpacklo_epi32(mn_07, op_07);
2520  __m256i mnop_47 = _mm256_unpackhi_epi32(mn_07, op_07);
2521 
2522  __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f);
2523  __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f);
2524  __m256i efgh_8b = _mm256_unpacklo_epi32(ef_8f, gh_8f);
2525  __m256i efgh_cf = _mm256_unpackhi_epi32(ef_8f, gh_8f);
2526  __m256i ijkl_8b = _mm256_unpacklo_epi32(ij_8f, kl_8f);
2527  __m256i ijkl_cf = _mm256_unpackhi_epi32(ij_8f, kl_8f);
2528  __m256i mnop_8b = _mm256_unpacklo_epi32(mn_8f, op_8f);
2529  __m256i mnop_cf = _mm256_unpackhi_epi32(mn_8f, op_8f);
2530 
2531  __m256i abcdefgh_01 = _mm256_unpacklo_epi64(abcd_03, efgh_03);
2532  __m256i abcdefgh_23 = _mm256_unpackhi_epi64(abcd_03, efgh_03);
2533  __m256i ijklmnop_01 = _mm256_unpacklo_epi64(ijkl_03, mnop_03);
2534  __m256i ijklmnop_23 = _mm256_unpackhi_epi64(ijkl_03, mnop_03);
2535  __m256i abcdefgh_45 = _mm256_unpacklo_epi64(abcd_47, efgh_47);
2536  __m256i abcdefgh_67 = _mm256_unpackhi_epi64(abcd_47, efgh_47);
2537  __m256i ijklmnop_45 = _mm256_unpacklo_epi64(ijkl_47, mnop_47);
2538  __m256i ijklmnop_67 = _mm256_unpackhi_epi64(ijkl_47, mnop_47);
2539  __m256i abcdefgh_89 = _mm256_unpacklo_epi64(abcd_8b, efgh_8b);
2540  __m256i abcdefgh_ab = _mm256_unpackhi_epi64(abcd_8b, efgh_8b);
2541  __m256i ijklmnop_89 = _mm256_unpacklo_epi64(ijkl_8b, mnop_8b);
2542  __m256i ijklmnop_ab = _mm256_unpackhi_epi64(ijkl_8b, mnop_8b);
2543  __m256i abcdefgh_cd = _mm256_unpacklo_epi64(abcd_cf, efgh_cf);
2544  __m256i abcdefgh_ef = _mm256_unpackhi_epi64(abcd_cf, efgh_cf);
2545  __m256i ijklmnop_cd = _mm256_unpacklo_epi64(ijkl_cf, mnop_cf);
2546  __m256i ijklmnop_ef = _mm256_unpackhi_epi64(ijkl_cf, mnop_cf);
2547 
2548  // NOTE: no unpacklo/hi instr in this case, so using permute instr.
2549  __m256i a_p_0 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x20);
2550  __m256i a_p_1 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x20);
2551  __m256i a_p_2 = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x20);
2552  __m256i a_p_3 = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x20);
2553  __m256i a_p_4 = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x20);
2554  __m256i a_p_5 = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x20);
2555  __m256i a_p_6 = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x20);
2556  __m256i a_p_7 = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x20);
2557  __m256i a_p_8 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x31);
2558  __m256i a_p_9 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x31);
2559  __m256i a_p_a = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x31);
2560  __m256i a_p_b = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x31);
2561  __m256i a_p_c = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x31);
2562  __m256i a_p_d = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x31);
2563  __m256i a_p_e = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x31);
2564  __m256i a_p_f = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x31);
2565 
2566  kernel.packet[0] = a_p_0;
2567  kernel.packet[1] = a_p_1;
2568  kernel.packet[2] = a_p_2;
2569  kernel.packet[3] = a_p_3;
2570  kernel.packet[4] = a_p_4;
2571  kernel.packet[5] = a_p_5;
2572  kernel.packet[6] = a_p_6;
2573  kernel.packet[7] = a_p_7;
2574  kernel.packet[8] = a_p_8;
2575  kernel.packet[9] = a_p_9;
2576  kernel.packet[10] = a_p_a;
2577  kernel.packet[11] = a_p_b;
2578  kernel.packet[12] = a_p_c;
2579  kernel.packet[13] = a_p_d;
2580  kernel.packet[14] = a_p_e;
2581  kernel.packet[15] = a_p_f;
2582 }
2583 
2585  EIGEN_ALIGN64 half in[8][16];
2586  pstore<half>(in[0], kernel.packet[0]);
2587  pstore<half>(in[1], kernel.packet[1]);
2588  pstore<half>(in[2], kernel.packet[2]);
2589  pstore<half>(in[3], kernel.packet[3]);
2590  pstore<half>(in[4], kernel.packet[4]);
2591  pstore<half>(in[5], kernel.packet[5]);
2592  pstore<half>(in[6], kernel.packet[6]);
2593  pstore<half>(in[7], kernel.packet[7]);
2594 
2595  EIGEN_ALIGN64 half out[8][16];
2596 
2597  for (int i = 0; i < 8; ++i) {
2598  for (int j = 0; j < 8; ++j) {
2599  out[i][j] = in[j][2 * i];
2600  }
2601  for (int j = 0; j < 8; ++j) {
2602  out[i][j + 8] = in[j][2 * i + 1];
2603  }
2604  }
2605 
2606  kernel.packet[0] = pload<Packet16h>(out[0]);
2607  kernel.packet[1] = pload<Packet16h>(out[1]);
2608  kernel.packet[2] = pload<Packet16h>(out[2]);
2609  kernel.packet[3] = pload<Packet16h>(out[3]);
2610  kernel.packet[4] = pload<Packet16h>(out[4]);
2611  kernel.packet[5] = pload<Packet16h>(out[5]);
2612  kernel.packet[6] = pload<Packet16h>(out[6]);
2613  kernel.packet[7] = pload<Packet16h>(out[7]);
2614 }
2615 
2617  EIGEN_ALIGN64 half in[4][16];
2618  pstore<half>(in[0], kernel.packet[0]);
2619  pstore<half>(in[1], kernel.packet[1]);
2620  pstore<half>(in[2], kernel.packet[2]);
2621  pstore<half>(in[3], kernel.packet[3]);
2622 
2623  EIGEN_ALIGN64 half out[4][16];
2624 
2625  for (int i = 0; i < 4; ++i) {
2626  for (int j = 0; j < 4; ++j) {
2627  out[i][j] = in[j][4 * i];
2628  }
2629  for (int j = 0; j < 4; ++j) {
2630  out[i][j + 4] = in[j][4 * i + 1];
2631  }
2632  for (int j = 0; j < 4; ++j) {
2633  out[i][j + 8] = in[j][4 * i + 2];
2634  }
2635  for (int j = 0; j < 4; ++j) {
2636  out[i][j + 12] = in[j][4 * i + 3];
2637  }
2638  }
2639 
2640  kernel.packet[0] = pload<Packet16h>(out[0]);
2641  kernel.packet[1] = pload<Packet16h>(out[1]);
2642  kernel.packet[2] = pload<Packet16h>(out[2]);
2643  kernel.packet[3] = pload<Packet16h>(out[3]);
2644 }
2645 
2646 template <>
2648  enum { value = true };
2649 };
2650 
2651 template <>
2652 struct packet_traits<bfloat16> : default_packet_traits {
2653  typedef Packet16bf type;
2654  typedef Packet8bf half;
2655  enum {
2656  Vectorizable = 1,
2657  AlignedOnScalar = 1,
2658  size = 16,
2659  HasBlend = 0,
2660  HasInsert = 1,
2663  HasSqrt = 1,
2664  HasRsqrt = 1,
2665 #ifdef EIGEN_VECTORIZE_AVX512DQ
2666  HasLog = 1, // Currently fails test with bad accuracy.
2667  HasLog1p = 1,
2668  HasExpm1 = 1,
2669  HasNdtri = 1,
2670  HasBessel = 1,
2671 #endif
2672  HasExp = 1,
2675  HasCmp = 1,
2676  HasDiv = 1
2677  };
2678 };
2679 
2680 template <>
2682  typedef bfloat16 type;
2683  enum {
2684  size = 16,
2688  masked_store_available = false
2689  };
2690  typedef Packet8bf half;
2691 };
2692 
2693 template <>
2695  return _mm256_set1_epi16(from.value);
2696 }
2697 
2698 template <>
2700  bfloat16 t;
2701  t.value = static_cast<unsigned short>(_mm256_extract_epi16(from, 0));
2702  return t;
2703 }
2704 
2705 template <>
2707  return _mm256_load_si256(reinterpret_cast<const __m256i*>(from));
2708 }
2709 
2710 template <>
2712  return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
2713 }
2714 
2715 template <>
2717  _mm256_store_si256(reinterpret_cast<__m256i*>(to), from);
2718 }
2719 
2720 template <>
2722  _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from);
2723 }
2724 
2725 template <>
2727  unsigned short a = from[0].value;
2728  unsigned short b = from[1].value;
2729  unsigned short c = from[2].value;
2730  unsigned short d = from[3].value;
2731  unsigned short e = from[4].value;
2732  unsigned short f = from[5].value;
2733  unsigned short g = from[6].value;
2734  unsigned short h = from[7].value;
2735  return _mm256_set_epi16(h, h, g, g, f, f, e, e, d, d, c, c, b, b, a, a);
2736 }
2737 
2738 template <>
2740  unsigned short a = from[0].value;
2741  unsigned short b = from[1].value;
2742  unsigned short c = from[2].value;
2743  unsigned short d = from[3].value;
2744  return _mm256_set_epi16(d, d, d, d, c, c, c, c, b, b, b, b, a, a, a, a);
2745 }
2746 
2748  return _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepu16_epi32(a), 16));
2749 }
2750 
2751 // Convert float to bfloat16 according to round-to-nearest-even/denormals algorithm.
2753  Packet16bf r;
2754 
2755 #if defined(EIGEN_VECTORIZE_AVX512BF16) && EIGEN_GNUC_STRICT_AT_LEAST(10, 1, 0)
2756  // Since GCC 10.1 supports avx512bf16 and C style explicit cast
2757  // (C++ static_cast is not supported yet), do conversion via intrinsic
2758  // and register path for performance.
2759  r = (__m256i)(_mm512_cvtneps_pbh(a));
2760 
2761 #else
2762  __m512i t;
2763  __m512i input = _mm512_castps_si512(a);
2764  __m512i nan = _mm512_set1_epi32(0x7fc0);
2765 
2766  // uint32_t lsb = (input >> 16) & 1;
2767  t = _mm512_and_si512(_mm512_srli_epi32(input, 16), _mm512_set1_epi32(1));
2768  // uint32_t rounding_bias = 0x7fff + lsb;
2769  t = _mm512_add_epi32(t, _mm512_set1_epi32(0x7fff));
2770  // input += rounding_bias;
2771  t = _mm512_add_epi32(t, input);
2772  // input = input >> 16;
2773  t = _mm512_srli_epi32(t, 16);
2774 
2775  // Check NaN before converting back to bf16
2776  __mmask16 mask = _mm512_cmp_ps_mask(a, a, _CMP_ORD_Q);
2777 
2778  t = _mm512_mask_blend_epi32(mask, nan, t);
2779  // output.value = static_cast<uint16_t>(input);
2780  r = _mm512_cvtepi32_epi16(t);
2781 #endif // EIGEN_VECTORIZE_AVX512BF16
2782 
2783  return r;
2784 }
2785 
2786 template <>
2789 }
2790 
2791 template <>
2794 }
2795 
2796 template <>
2799 }
2800 
2801 template <>
2804 }
2805 
2806 template <>
2809 }
2810 
2811 template <>
2813  // Input mask is expected to be all 0/1, handle it with 8-bit
2814  // intrinsic for performance.
2815  return _mm256_blendv_epi8(b, a, mask);
2816 }
2817 
2818 template <>
2821 }
2822 
2823 template <>
2826 }
2827 
2828 template <>
2831 }
2832 
2833 template <>
2836 }
2837 
2838 template <>
2841 }
2842 
2843 template <>
2845  return Pack32To16(pcmp_eq(Bf16ToF32(a), Bf16ToF32(b)));
2846 }
2847 
2848 template <>
2850  return Pack32To16(pcmp_le(Bf16ToF32(a), Bf16ToF32(b)));
2851 }
2852 
2853 template <>
2855  return Pack32To16(pcmp_lt(Bf16ToF32(a), Bf16ToF32(b)));
2856 }
2857 
2858 template <>
2861 }
2862 
2863 template <>
2865  Packet16bf sign_mask = _mm256_set1_epi16(static_cast<unsigned short>(0x8000));
2866  return _mm256_xor_si256(a, sign_mask);
2867 }
2868 
2869 template <>
2871  return a;
2872 }
2873 
2874 template <>
2876  const __m256i sign_mask = _mm256_set1_epi16(static_cast<numext::uint16_t>(0x8000));
2877  return _mm256_andnot_si256(sign_mask, a);
2878 }
2879 
2880 template <>
2883 }
2884 
2885 template <>
2888 }
2889 
2890 template <>
2893 }
2894 
2895 template <>
2898 }
2899 
2900 template <>
2903 }
2904 
2905 template <>
2908 }
2909 
2910 template <>
2912  return F32ToBf16(plset<Packet16f>(static_cast<float>(a)));
2913 }
2914 
2915 template <>
2917  Packet8bf lane0 = _mm256_extractf128_si256(a, 0);
2918  Packet8bf lane1 = _mm256_extractf128_si256(a, 1);
2919  return padd<Packet8bf>(lane0, lane1);
2920 }
2921 
2922 template <>
2924  return static_cast<bfloat16>(predux<Packet16f>(Bf16ToF32(p)));
2925 }
2926 
2927 template <>
2929  return static_cast<bfloat16>(predux_mul<Packet16f>(Bf16ToF32(from)));
2930 }
2931 
2932 template <>
2934  return static_cast<bfloat16>(predux_min<Packet16f>(Bf16ToF32(from)));
2935 }
2936 
2937 template <>
2939  return static_cast<bfloat16>(predux_max<Packet16f>(Bf16ToF32(from)));
2940 }
2941 
2942 template <>
2944  __m256i m = _mm256_setr_epi8(14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9, 6, 7,
2945  4, 5, 2, 3, 0, 1);
2946 
2947  Packet16bf res;
2948  // Swap hi and lo first because shuffle is in 128-bit lanes.
2949  res = _mm256_permute2x128_si256(a, a, 1);
2950  // Shuffle 8-bit values in src within 2*128-bit lanes.
2951  return _mm256_shuffle_epi8(res, m);
2952 }
2953 
2954 template <>
2956  return _mm256_set_epi16(
2957  from[15 * stride].value, from[14 * stride].value, from[13 * stride].value, from[12 * stride].value,
2958  from[11 * stride].value, from[10 * stride].value, from[9 * stride].value, from[8 * stride].value,
2959  from[7 * stride].value, from[6 * stride].value, from[5 * stride].value, from[4 * stride].value,
2960  from[3 * stride].value, from[2 * stride].value, from[1 * stride].value, from[0 * stride].value);
2961 }
2962 
2963 template <>
2965  EIGEN_ALIGN64 bfloat16 aux[16];
2966  pstore(aux, from);
2967  to[stride * 0] = aux[0];
2968  to[stride * 1] = aux[1];
2969  to[stride * 2] = aux[2];
2970  to[stride * 3] = aux[3];
2971  to[stride * 4] = aux[4];
2972  to[stride * 5] = aux[5];
2973  to[stride * 6] = aux[6];
2974  to[stride * 7] = aux[7];
2975  to[stride * 8] = aux[8];
2976  to[stride * 9] = aux[9];
2977  to[stride * 10] = aux[10];
2978  to[stride * 11] = aux[11];
2979  to[stride * 12] = aux[12];
2980  to[stride * 13] = aux[13];
2981  to[stride * 14] = aux[14];
2982  to[stride * 15] = aux[15];
2983 }
2984 
2986  __m256i a = kernel.packet[0];
2987  __m256i b = kernel.packet[1];
2988  __m256i c = kernel.packet[2];
2989  __m256i d = kernel.packet[3];
2990  __m256i e = kernel.packet[4];
2991  __m256i f = kernel.packet[5];
2992  __m256i g = kernel.packet[6];
2993  __m256i h = kernel.packet[7];
2994  __m256i i = kernel.packet[8];
2995  __m256i j = kernel.packet[9];
2996  __m256i k = kernel.packet[10];
2997  __m256i l = kernel.packet[11];
2998  __m256i m = kernel.packet[12];
2999  __m256i n = kernel.packet[13];
3000  __m256i o = kernel.packet[14];
3001  __m256i p = kernel.packet[15];
3002 
3003  __m256i ab_07 = _mm256_unpacklo_epi16(a, b);
3004  __m256i cd_07 = _mm256_unpacklo_epi16(c, d);
3005  __m256i ef_07 = _mm256_unpacklo_epi16(e, f);
3006  __m256i gh_07 = _mm256_unpacklo_epi16(g, h);
3007  __m256i ij_07 = _mm256_unpacklo_epi16(i, j);
3008  __m256i kl_07 = _mm256_unpacklo_epi16(k, l);
3009  __m256i mn_07 = _mm256_unpacklo_epi16(m, n);
3010  __m256i op_07 = _mm256_unpacklo_epi16(o, p);
3011 
3012  __m256i ab_8f = _mm256_unpackhi_epi16(a, b);
3013  __m256i cd_8f = _mm256_unpackhi_epi16(c, d);
3014  __m256i ef_8f = _mm256_unpackhi_epi16(e, f);
3015  __m256i gh_8f = _mm256_unpackhi_epi16(g, h);
3016  __m256i ij_8f = _mm256_unpackhi_epi16(i, j);
3017  __m256i kl_8f = _mm256_unpackhi_epi16(k, l);
3018  __m256i mn_8f = _mm256_unpackhi_epi16(m, n);
3019  __m256i op_8f = _mm256_unpackhi_epi16(o, p);
3020 
3021  __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07);
3022  __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07);
3023  __m256i efgh_03 = _mm256_unpacklo_epi32(ef_07, gh_07);
3024  __m256i efgh_47 = _mm256_unpackhi_epi32(ef_07, gh_07);
3025  __m256i ijkl_03 = _mm256_unpacklo_epi32(ij_07, kl_07);
3026  __m256i ijkl_47 = _mm256_unpackhi_epi32(ij_07, kl_07);
3027  __m256i mnop_03 = _mm256_unpacklo_epi32(mn_07, op_07);
3028  __m256i mnop_47 = _mm256_unpackhi_epi32(mn_07, op_07);
3029 
3030  __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f);
3031  __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f);
3032  __m256i efgh_8b = _mm256_unpacklo_epi32(ef_8f, gh_8f);
3033  __m256i efgh_cf = _mm256_unpackhi_epi32(ef_8f, gh_8f);
3034  __m256i ijkl_8b = _mm256_unpacklo_epi32(ij_8f, kl_8f);
3035  __m256i ijkl_cf = _mm256_unpackhi_epi32(ij_8f, kl_8f);
3036  __m256i mnop_8b = _mm256_unpacklo_epi32(mn_8f, op_8f);
3037  __m256i mnop_cf = _mm256_unpackhi_epi32(mn_8f, op_8f);
3038 
3039  __m256i abcdefgh_01 = _mm256_unpacklo_epi64(abcd_03, efgh_03);
3040  __m256i abcdefgh_23 = _mm256_unpackhi_epi64(abcd_03, efgh_03);
3041  __m256i ijklmnop_01 = _mm256_unpacklo_epi64(ijkl_03, mnop_03);
3042  __m256i ijklmnop_23 = _mm256_unpackhi_epi64(ijkl_03, mnop_03);
3043  __m256i abcdefgh_45 = _mm256_unpacklo_epi64(abcd_47, efgh_47);
3044  __m256i abcdefgh_67 = _mm256_unpackhi_epi64(abcd_47, efgh_47);
3045  __m256i ijklmnop_45 = _mm256_unpacklo_epi64(ijkl_47, mnop_47);
3046  __m256i ijklmnop_67 = _mm256_unpackhi_epi64(ijkl_47, mnop_47);
3047  __m256i abcdefgh_89 = _mm256_unpacklo_epi64(abcd_8b, efgh_8b);
3048  __m256i abcdefgh_ab = _mm256_unpackhi_epi64(abcd_8b, efgh_8b);
3049  __m256i ijklmnop_89 = _mm256_unpacklo_epi64(ijkl_8b, mnop_8b);
3050  __m256i ijklmnop_ab = _mm256_unpackhi_epi64(ijkl_8b, mnop_8b);
3051  __m256i abcdefgh_cd = _mm256_unpacklo_epi64(abcd_cf, efgh_cf);
3052  __m256i abcdefgh_ef = _mm256_unpackhi_epi64(abcd_cf, efgh_cf);
3053  __m256i ijklmnop_cd = _mm256_unpacklo_epi64(ijkl_cf, mnop_cf);
3054  __m256i ijklmnop_ef = _mm256_unpackhi_epi64(ijkl_cf, mnop_cf);
3055 
3056  // NOTE: no unpacklo/hi instr in this case, so using permute instr.
3057  kernel.packet[0] = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x20);
3058  kernel.packet[1] = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x20);
3059  kernel.packet[2] = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x20);
3060  kernel.packet[3] = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x20);
3061  kernel.packet[4] = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x20);
3062  kernel.packet[5] = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x20);
3063  kernel.packet[6] = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x20);
3064  kernel.packet[7] = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x20);
3065  kernel.packet[8] = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x31);
3066  kernel.packet[9] = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x31);
3067  kernel.packet[10] = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x31);
3068  kernel.packet[11] = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x31);
3069  kernel.packet[12] = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x31);
3070  kernel.packet[13] = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x31);
3071  kernel.packet[14] = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x31);
3072  kernel.packet[15] = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x31);
3073 }
3074 
3076  __m256i a = kernel.packet[0];
3077  __m256i b = kernel.packet[1];
3078  __m256i c = kernel.packet[2];
3079  __m256i d = kernel.packet[3];
3080 
3081  __m256i ab_07 = _mm256_unpacklo_epi16(a, b);
3082  __m256i cd_07 = _mm256_unpacklo_epi16(c, d);
3083  __m256i ab_8f = _mm256_unpackhi_epi16(a, b);
3084  __m256i cd_8f = _mm256_unpackhi_epi16(c, d);
3085 
3086  __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07);
3087  __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07);
3088  __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f);
3089  __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f);
3090 
3091  // NOTE: no unpacklo/hi instr in this case, so using permute instr.
3092  kernel.packet[0] = _mm256_permute2x128_si256(abcd_03, abcd_47, 0x20);
3093  kernel.packet[1] = _mm256_permute2x128_si256(abcd_8b, abcd_cf, 0x20);
3094  kernel.packet[2] = _mm256_permute2x128_si256(abcd_03, abcd_47, 0x31);
3095  kernel.packet[3] = _mm256_permute2x128_si256(abcd_8b, abcd_cf, 0x31);
3096 }
3097 
3098 } // end namespace internal
3099 
3100 } // end namespace Eigen
3101 
3102 #endif // EIGEN_PACKET_MATH_AVX512_H
#define EIGEN_EXTRACT_8i_FROM_16i(INPUT, OUTPUT)
Definition: AVX512/PacketMath.h:1426
#define PACK_OUTPUT_2(OUTPUT, INPUT, INDEX, STRIDE)
Definition: AVX512/PacketMath.h:1779
#define SHUFFLE_EPI32(A, B, M)
Definition: AVX512/PacketMath.h:2032
#define PACK_OUTPUT_D(OUTPUT, INPUT, INDEX, STRIDE)
Definition: AVX512/PacketMath.h:1858
#define PACK_OUTPUT_L(OUTPUT, INPUT, INDEX, STRIDE)
Definition: AVX512/PacketMath.h:1862
#define PACK_OUTPUT_I32_2(OUTPUT, INPUT, INDEX, STRIDE)
Definition: AVX512/PacketMath.h:2029
#define EIGEN_EXTRACT_8f_FROM_16f(INPUT, OUTPUT)
Definition: AVX512/PacketMath.h:1420
#define PACK_OUTPUT(OUTPUT, INPUT, INDEX, STRIDE)
Definition: AVX512/PacketMath.h:1665
#define PACK_OUTPUT_I32(OUTPUT, INPUT, INDEX, STRIDE)
Definition: AVX512/PacketMath.h:2026
int i
Definition: BiCGSTAB_step_by_step.cpp:9
const unsigned n
Definition: CG3DPackingUnitTest.cpp:11
#define EIGEN_ALIGN64
Definition: ConfigureVectorization.h:144
Array< double, 1, 3 > e(1./3., 0.5, 2.)
#define EIGEN_DEBUG_ALIGNED_STORE
Definition: GenericPacketMath.h:38
#define EIGEN_DEBUG_ALIGNED_LOAD
Definition: GenericPacketMath.h:30
#define EIGEN_DEBUG_UNALIGNED_STORE
Definition: GenericPacketMath.h:42
#define EIGEN_DEBUG_UNALIGNED_LOAD
Definition: GenericPacketMath.h:34
#define EIGEN_DEVICE_FUNC
Definition: Macros.h:892
#define EIGEN_FAST_MATH
Definition: Macros.h:51
#define EIGEN_STRONG_INLINE
Definition: Macros.h:834
cout<< "Here is the matrix m:"<< endl<< m<< endl;Matrix< ptrdiff_t, 3, 1 > res
Definition: PartialRedux_count.cpp:3
float * p
Definition: Tutorial_Map_using.cpp:9
Scalar * b
Definition: benchVecAdd.cpp:17
SCALAR Scalar
Definition: bench_gemm.cpp:45
EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const
Definition: PlainObjectBase.h:247
@ N
Definition: constructor.cpp:22
static int f(const TensorMap< Tensor< int, 3 > > &tensor)
Definition: cxx11_tensor_map.cpp:237
@ Aligned64
Definition: Constants.h:239
@ Aligned32
Definition: Constants.h:238
return int(ret)+1
Eigen::DenseIndex ret
Definition: level1_cplx_impl.h:43
const Scalar * a
Definition: level2_cplx_impl.h:32
const char const int const RealScalar const RealScalar * pa
Definition: level2_cplx_impl.h:20
int * m
Definition: level2_cplx_impl.h:294
char char char int int * k
Definition: level2_impl.h:374
Eigen::Matrix< Scalar, Dynamic, Dynamic, ColMajor > tmp
Definition: level3_impl.h:365
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __half_raw raw_uint16_to_half(numext::uint16_t x)
Definition: Half.h:496
EIGEN_STRONG_INLINE Packet8d pandnot< Packet8d >(const Packet8d &a, const Packet8d &b)
Definition: AVX512/PacketMath.h:915
EIGEN_STRONG_INLINE Packet16i ptrue< Packet16i >(const Packet16i &)
Definition: AVX512/PacketMath.h:787
EIGEN_STRONG_INLINE Packet8l ploaddup< Packet8l >(const int64_t *from)
Definition: AVX512/PacketMath.h:1037
EIGEN_STRONG_INLINE Packet16i pdiv< Packet16i >(const Packet16i &a, const Packet16i &b)
Definition: AVX512/PacketMath.h:474
__m128d Packet2d
Definition: LSX/PacketMath.h:36
EIGEN_STRONG_INLINE void pstoreu< double >(double *to, const Packet4d &from)
Definition: AVX/PacketMath.h:1628
EIGEN_STRONG_INLINE Packet pminmax_propagate_numbers(const Packet &a, const Packet &b, Op op)
Definition: SSE/PacketMath.h:1118
EIGEN_STRONG_INLINE Packet16bf ploaddup< Packet16bf >(const bfloat16 *from)
Definition: AVX512/PacketMath.h:2726
EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf &a)
Definition: AltiVec/Complex.h:268
EIGEN_STRONG_INLINE Packet8d pload< Packet8d >(const double *from)
Definition: AVX512/PacketMath.h:973
EIGEN_STRONG_INLINE Packet8d ploadquad< Packet8d >(const double *from)
Definition: AVX512/PacketMath.h:1065
EIGEN_STRONG_INLINE Packet8d psub< Packet8d >(const Packet8d &a, const Packet8d &b)
Definition: AVX512/PacketMath.h:387
EIGEN_STRONG_INLINE Packet8d ploadu< Packet8d >(const double *from)
Definition: AVX512/PacketMath.h:990
EIGEN_STRONG_INLINE Packet8d pset1frombits< Packet8d >(const numext::uint64_t from)
Definition: AVX512/PacketMath.h:274
EIGEN_DEVICE_FUNC Packet16f pgather< float, Packet16f >(const Packet16f &src, const float *from, Index stride, uint16_t umask)
Definition: AVX512/PacketMath.h:1141
EIGEN_STRONG_INLINE Packet16f pceil< Packet16f >(const Packet16f &a)
Definition: AVX512/PacketMath.h:760
EIGEN_STRONG_INLINE Packet8d plset< Packet8d >(const double &a)
Definition: AVX512/PacketMath.h:342
EIGEN_DEVICE_FUNC Packet padd(const Packet &a, const Packet &b)
Definition: GenericPacketMath.h:318
EIGEN_STRONG_INLINE Packet16i pmax< Packet16i >(const Packet16i &a, const Packet16i &b)
Definition: AVX512/PacketMath.h:572
EIGEN_STRONG_INLINE bfloat16 predux_max< Packet16bf >(const Packet16bf &from)
Definition: AVX512/PacketMath.h:2938
EIGEN_STRONG_INLINE Packet8h float2half(const Packet8f &a)
Definition: AVX/PacketMath.h:2283
EIGEN_STRONG_INLINE Packet8f Bf16ToF32(const Packet8bf &a)
Definition: AVX/PacketMath.h:2558
EIGEN_STRONG_INLINE Packet8f pzero(const Packet8f &)
Definition: AVX/PacketMath.h:774
EIGEN_STRONG_INLINE Packet8d pmul< Packet8d >(const Packet8d &a, const Packet8d &b)
Definition: AVX512/PacketMath.h:447
EIGEN_STRONG_INLINE Packet8f predux_half_dowto4< Packet16f >(const Packet16f &a)
Definition: AVX512/PacketMath.h:1490
EIGEN_STRONG_INLINE Packet8d pmin< PropagateNaN, Packet8d >(const Packet8d &a, const Packet8d &b)
Definition: AVX512/PacketMath.h:602
EIGEN_STRONG_INLINE Packet16bf pmax< Packet16bf >(const Packet16bf &a, const Packet16bf &b)
Definition: AVX512/PacketMath.h:2906
EIGEN_STRONG_INLINE Packet16f pfrexp< Packet16f >(const Packet16f &a, Packet16f &exponent)
Definition: AVX512/PacketMath.h:1356
EIGEN_ALWAYS_INLINE int64_t _mm_extract_epi64_0(const __m128i &a)
Definition: SSE/PacketMath.h:161
EIGEN_STRONG_INLINE Packet8f pisnan(const Packet8f &a)
Definition: AVX/PacketMath.h:1034
EIGEN_STRONG_INLINE Packet16f padd< Packet16f >(const Packet16f &a, const Packet16f &b)
Definition: AVX512/PacketMath.h:355
EIGEN_STRONG_INLINE Packet16f cat256(Packet8f a, Packet8f b)
Definition: AVX512/PacketMath.h:642
EIGEN_DEVICE_FUNC Packet8d pgather< double, Packet8d >(const Packet8d &src, const double *from, Index stride, uint8_t umask)
Definition: AVX512/PacketMath.h:1151
EIGEN_STRONG_INLINE Packet16bf pmul< Packet16bf >(const Packet16bf &a, const Packet16bf &b)
Definition: AVX512/PacketMath.h:2891
EIGEN_STRONG_INLINE Packet2d extract128(Packet8d x)
Definition: AVX512/PacketMath.h:638
EIGEN_STRONG_INLINE Packet16bf pset1< Packet16bf >(const bfloat16 &from)
Definition: AVX512/PacketMath.h:2694
EIGEN_DEVICE_FUNC Packet8l pgather< int64_t, Packet8l >(const int64_t *from, Index stride)
Definition: AVX512/PacketMath.h:1178
EIGEN_STRONG_INLINE Packet8i pset1< Packet8i >(const int &from)
Definition: AVX/PacketMath.h:756
EIGEN_STRONG_INLINE Packet16i por< Packet16i >(const Packet16i &a, const Packet16i &b)
Definition: AVX512/PacketMath.h:841
EIGEN_STRONG_INLINE Packet16h ploadquad(const Eigen::half *from)
Definition: AVX512/PacketMath.h:2250
EIGEN_STRONG_INLINE Packet16f ploaddup< Packet16f >(const float *from)
Definition: AVX512/PacketMath.h:1016
EIGEN_STRONG_INLINE Packet16h ptrunc< Packet16h >(const Packet16h &a)
Definition: AVX512/PacketMath.h:2335
EIGEN_STRONG_INLINE Packet16f pxor< Packet16f >(const Packet16f &a, const Packet16f &b)
Definition: AVX512/PacketMath.h:879
EIGEN_STRONG_INLINE Packet16f por< Packet16f >(const Packet16f &a, const Packet16f &b)
Definition: AVX512/PacketMath.h:851
EIGEN_STRONG_INLINE float predux_max< Packet16f >(const Packet16f &a)
Definition: AVX512/PacketMath.h:1618
EIGEN_STRONG_INLINE int predux< Packet16i >(const Packet16i &a)
Definition: AVX512/PacketMath.h:1485
EIGEN_STRONG_INLINE Packet16h pround< Packet16h >(const Packet16h &a)
Definition: AVX512/PacketMath.h:2315
EIGEN_STRONG_INLINE Eigen::half pfirst< Packet16h >(const Packet16h &from)
Definition: AVX512/PacketMath.h:2208
EIGEN_STRONG_INLINE Packet4d predux_half_dowto4< Packet8d >(const Packet8d &a)
Definition: AVX512/PacketMath.h:1506
EIGEN_STRONG_INLINE Packet8d print< Packet8d >(const Packet8d &a)
Definition: AVX512/PacketMath.h:755
EIGEN_STRONG_INLINE Packet8l pand< Packet8l >(const Packet8l &a, const Packet8l &b)
Definition: AVX512/PacketMath.h:812
EIGEN_STRONG_INLINE Packet16i pset1< Packet16i >(const int &from)
Definition: AVX512/PacketMath.h:260
EIGEN_STRONG_INLINE Packet16h pfloor< Packet16h >(const Packet16h &a)
Definition: AVX512/PacketMath.h:2330
EIGEN_STRONG_INLINE void ptranspose(PacketBlock< Packet2cf, 2 > &kernel)
Definition: AltiVec/Complex.h:339
EIGEN_DEVICE_FUNC void pscatter< int, Packet16i >(int *to, const Packet16i &from, Index stride)
Definition: AVX512/PacketMath.h:1236
EIGEN_STRONG_INLINE Packet16f print< Packet16f >(const Packet16f &a)
Definition: AVX512/PacketMath.h:751
EIGEN_STRONG_INLINE Packet16i ploadquad< Packet16i >(const int *from)
Definition: AVX512/PacketMath.h:1087
EIGEN_STRONG_INLINE Packet16h ploadu< Packet16h >(const Eigen::half *from)
Definition: AVX512/PacketMath.h:2218
__m512d Packet8d
Definition: AVX512/PacketMath.h:36
EIGEN_STRONG_INLINE int avx512_blend_mask(const Selector< N > &ifPacket)
Definition: AVX512/PacketMath.h:2181
EIGEN_STRONG_INLINE Packet8l ploadquad< Packet8l >(const int64_t *from)
Definition: AVX512/PacketMath.h:1076
EIGEN_STRONG_INLINE int64_t predux_min< Packet8l >(const Packet8l &a)
Definition: AVX512/PacketMath.h:1613
EIGEN_STRONG_INLINE bool predux_any(const Packet4f &x)
Definition: AltiVec/PacketMath.h:2751
EIGEN_STRONG_INLINE Packet8i por< Packet8i >(const Packet8i &a, const Packet8i &b)
Definition: AVX/PacketMath.h:1335
EIGEN_STRONG_INLINE Packet16f ploadu< Packet16f >(const float *from)
Definition: AVX512/PacketMath.h:986
EIGEN_STRONG_INLINE Packet16bf pceil< Packet16bf >(const Packet16bf &a)
Definition: AVX512/PacketMath.h:2829
EIGEN_DEVICE_FUNC Packet pdiv(const Packet &a, const Packet &b)
Definition: GenericPacketMath.h:368
EIGEN_STRONG_INLINE void pstore< bfloat16 >(bfloat16 *to, const Packet8bf &from)
Definition: AltiVec/PacketMath.h:662
EIGEN_STRONG_INLINE Packet16f psub< Packet16f >(const Packet16f &a, const Packet16f &b)
Definition: AVX512/PacketMath.h:383
EIGEN_STRONG_INLINE int pfirst< Packet16i >(const Packet16i &a)
Definition: AVX512/PacketMath.h:1291
EIGEN_STRONG_INLINE Packet8l pmul< Packet8l >(const Packet8l &a, const Packet8l &b)
Definition: AVX512/PacketMath.h:455
EIGEN_STRONG_INLINE int predux_mul< Packet16i >(const Packet16i &a)
Definition: AVX512/PacketMath.h:1564
EIGEN_STRONG_INLINE Packet16h pceil< Packet16h >(const Packet16h &a)
Definition: AVX512/PacketMath.h:2325
EIGEN_STRONG_INLINE Packet16h pload< Packet16h >(const Eigen::half *from)
Definition: AVX512/PacketMath.h:2213
EIGEN_STRONG_INLINE Packet16i pandnot< Packet16i >(const Packet16i &a, const Packet16i &b)
Definition: AVX512/PacketMath.h:897
EIGEN_DEVICE_FUNC Packet16i pgather< int, Packet16i >(const int *from, Index stride)
Definition: AVX512/PacketMath.h:1186
EIGEN_STRONG_INLINE Packet4i plogical_shift_left(const Packet4i &a)
Definition: AltiVec/PacketMath.h:1983
EIGEN_STRONG_INLINE Eigen::half predux_min< Packet16h >(const Packet16h &a)
Definition: AVX512/PacketMath.h:2428
EIGEN_STRONG_INLINE Packet8f extract256(Packet16f x)
Definition: AVX512/PacketMath.h:632
EIGEN_STRONG_INLINE Packet16i pmul< Packet16i >(const Packet16i &a, const Packet16i &b)
Definition: AVX512/PacketMath.h:451
EIGEN_STRONG_INLINE Packet16bf pgather< bfloat16, Packet16bf >(const bfloat16 *from, Index stride)
Definition: AVX512/PacketMath.h:2955
EIGEN_STRONG_INLINE Packet16f plset< Packet16f >(const float &a)
Definition: AVX512/PacketMath.h:337
EIGEN_STRONG_INLINE Packet16i pxor< Packet16i >(const Packet16i &a, const Packet16i &b)
Definition: AVX512/PacketMath.h:869
EIGEN_STRONG_INLINE Packet16f pmax< PropagateNumbers, Packet16f >(const Packet16f &a, const Packet16f &b)
Definition: AVX512/PacketMath.h:590
EIGEN_DEVICE_FUNC Packet pmax(const Packet &a, const Packet &b)
Definition: GenericPacketMath.h:663
EIGEN_STRONG_INLINE Packet16i psub< Packet16i >(const Packet16i &a, const Packet16i &b)
Definition: AVX512/PacketMath.h:391
EIGEN_STRONG_INLINE int64_t predux_max< Packet8l >(const Packet8l &a)
Definition: AVX512/PacketMath.h:1641
EIGEN_STRONG_INLINE Packet4i pblend(const Selector< 4 > &ifPacket, const Packet4i &thenPacket, const Packet4i &elsePacket)
Definition: AltiVec/PacketMath.h:3075
EIGEN_STRONG_INLINE double predux< Packet4d >(const Packet4d &a)
Definition: AVX/PacketMath.h:1958
EIGEN_STRONG_INLINE Packet8bf padd< Packet8bf >(const Packet8bf &a, const Packet8bf &b)
Definition: AltiVec/PacketMath.h:2283
EIGEN_STRONG_INLINE Packet4f pcmp_le(const Packet4f &a, const Packet4f &b)
Definition: AltiVec/PacketMath.h:1314
EIGEN_STRONG_INLINE Packet4i plogical_shift_right(const Packet4i &a)
Definition: AltiVec/PacketMath.h:1979
EIGEN_STRONG_INLINE Packet16f pset1frombits< Packet16f >(unsigned int from)
Definition: AVX512/PacketMath.h:269
EIGEN_STRONG_INLINE Packet8d ptrue< Packet8d >(const Packet8d &a)
Definition: AVX512/PacketMath.h:802
EIGEN_STRONG_INLINE Packet pminmax_propagate_nan(const Packet &a, const Packet &b, Op op)
Definition: SSE/PacketMath.h:1127
EIGEN_STRONG_INLINE half predux_mul< Packet16h >(const Packet16h &from)
Definition: AVX512/PacketMath.h:2435
EIGEN_STRONG_INLINE Packet16bf plset< Packet16bf >(const bfloat16 &a)
Definition: AVX512/PacketMath.h:2911
eigen_packet_wrapper< __m256i, 2 > Packet16bf
Definition: AVX512/PacketMath.h:41
EIGEN_STRONG_INLINE Packet8l plset< Packet8l >(const int64_t &a)
Definition: AVX512/PacketMath.h:350
EIGEN_STRONG_INLINE Packet16i pmin< Packet16i >(const Packet16i &a, const Packet16i &b)
Definition: AVX512/PacketMath.h:553
EIGEN_STRONG_INLINE void pstore1< Packet16f >(float *to, const float &a)
Definition: AVX512/PacketMath.h:1244
EIGEN_STRONG_INLINE void pstore< int >(int *to, const Packet4i &from)
Definition: AltiVec/PacketMath.h:647
EIGEN_STRONG_INLINE Packet16f ptrunc< Packet16f >(const Packet16f &a)
Definition: AVX512/PacketMath.h:778
EIGEN_STRONG_INLINE Packet8l pmax< Packet8l >(const Packet8l &a, const Packet8l &b)
Definition: AVX512/PacketMath.h:576
EIGEN_STRONG_INLINE Packet8h por(const Packet8h &a, const Packet8h &b)
Definition: AVX/PacketMath.h:2309
EIGEN_STRONG_INLINE Packet8d pfloor< Packet8d >(const Packet8d &a)
Definition: AVX512/PacketMath.h:773
EIGEN_STRONG_INLINE float predux_mul< Packet16f >(const Packet16f &a)
Definition: AVX512/PacketMath.h:1536
EIGEN_STRONG_INLINE Packet4i pcmp_lt(const Packet4i &a, const Packet4i &b)
Definition: AltiVec/PacketMath.h:1341
EIGEN_DEVICE_FUNC void pscatter< double, Packet8d >(double *to, const Packet8d &from, Index stride, uint8_t umask)
Definition: AVX512/PacketMath.h:1206
EIGEN_STRONG_INLINE Packet16i pand< Packet16i >(const Packet16i &a, const Packet16i &b)
Definition: AVX512/PacketMath.h:807
EIGEN_STRONG_INLINE double predux< Packet8d >(const Packet8d &a)
Definition: AVX512/PacketMath.h:1472
EIGEN_STRONG_INLINE Packet8d pload1< Packet8d >(const double *from)
Definition: AVX512/PacketMath.h:326
EIGEN_STRONG_INLINE Packet16i padd< Packet16i >(const Packet16i &a, const Packet16i &b)
Definition: AVX512/PacketMath.h:363
EIGEN_STRONG_INLINE float predux< Packet8f >(const Packet8f &a)
Definition: AVX/PacketMath.h:1954
EIGEN_STRONG_INLINE Packet16i ploadu< Packet16i >(const int *from)
Definition: AVX512/PacketMath.h:994
EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf &a)
Definition: AltiVec/Complex.h:303
EIGEN_STRONG_INLINE Packet8i pdiv< Packet8i >(const Packet8i &a, const Packet8i &b)
Definition: AVX/PacketMath.h:965
EIGEN_STRONG_INLINE Packet8l padd< Packet8l >(const Packet8l &a, const Packet8l &b)
Definition: AVX512/PacketMath.h:367
EIGEN_STRONG_INLINE Packet16f pround< Packet16f >(const Packet16f &a)
Definition: AVX512/PacketMath.h:924
EIGEN_STRONG_INLINE Packet8l pset1< Packet8l >(const int64_t &from)
Definition: AVX512/PacketMath.h:264
EIGEN_STRONG_INLINE Packet8d por< Packet8d >(const Packet8d &a, const Packet8d &b)
Definition: AVX512/PacketMath.h:860
EIGEN_STRONG_INLINE Packet8f half2float(const Packet8h &a)
Definition: AVX/PacketMath.h:2273
EIGEN_STRONG_INLINE void pstore< double >(double *to, const Packet4d &from)
Definition: AVX/PacketMath.h:1611
EIGEN_STRONG_INLINE float predux< Packet16f >(const Packet16f &a)
Definition: AVX512/PacketMath.h:1456
EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f &a, const Packet4f &b, const Packet4f &c)
Definition: AltiVec/PacketMath.h:1218
EIGEN_STRONG_INLINE Packet16f pset1< Packet16f >(const float &from)
Definition: AVX512/PacketMath.h:252
EIGEN_STRONG_INLINE Packet4cf pmul(const Packet4cf &a, const Packet4cf &b)
Definition: AVX/Complex.h:88
EIGEN_STRONG_INLINE Packet8d ploaddup< Packet8d >(const double *from)
Definition: AVX512/PacketMath.h:1028
EIGEN_STRONG_INLINE Packet16f pmax< Packet16f >(const Packet16f &a, const Packet16f &b)
Definition: AVX512/PacketMath.h:562
EIGEN_STRONG_INLINE Packet8h ptrue(const Packet8h &a)
Definition: AVX/PacketMath.h:2263
EIGEN_DEVICE_FUNC Packet pgather(const Packet &src, const Scalar *from, Index stride, typename unpacket_traits< Packet >::mask_t umask)
EIGEN_DEVICE_FUNC Packet pmin(const Packet &a, const Packet &b)
Definition: GenericPacketMath.h:649
EIGEN_STRONG_INLINE Packet8h pandnot(const Packet8h &a, const Packet8h &b)
Definition: AVX/PacketMath.h:2323
EIGEN_STRONG_INLINE Packet16i cat256i(Packet8i a, Packet8i b)
Definition: AVX512/PacketMath.h:646
EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf &a)
Definition: AltiVec/Complex.h:264
EIGEN_STRONG_INLINE Packet8h predux_half_dowto4< Packet16h >(const Packet16h &a)
Definition: AVX512/PacketMath.h:2414
EIGEN_STRONG_INLINE void prefetch< float >(const float *addr)
Definition: AltiVec/PacketMath.h:1854
EIGEN_STRONG_INLINE Packet4d ploadu< Packet4d >(const double *from)
Definition: AVX/PacketMath.h:1511
EIGEN_STRONG_INLINE void pstoreu< bfloat16 >(bfloat16 *to, const Packet8bf &from)
Definition: AltiVec/PacketMath.h:1772
EIGEN_STRONG_INLINE Packet4i parithmetic_shift_right(const Packet4i &a)
Definition: AltiVec/PacketMath.h:1975
EIGEN_STRONG_INLINE Packet8d pmax< Packet8d >(const Packet8d &a, const Packet8d &b)
Definition: AVX512/PacketMath.h:567
EIGEN_STRONG_INLINE Packet16f pmul< Packet16f >(const Packet16f &a, const Packet16f &b)
Definition: AVX512/PacketMath.h:443
EIGEN_STRONG_INLINE Packet4d pfrexp_generic_get_biased_exponent(const Packet4d &a)
Definition: AVX/PacketMath.h:1880
EIGEN_STRONG_INLINE void pscatter< half, Packet16h >(half *to, const Packet16h &from, Index stride)
Definition: AVX512/PacketMath.h:2456
EIGEN_STRONG_INLINE Packet8l ptrue< Packet8l >(const Packet8l &)
Definition: AVX512/PacketMath.h:792
EIGEN_STRONG_INLINE void pstoreu< half >(Eigen::half *to, const Packet16h &from)
Definition: AVX512/PacketMath.h:2230
EIGEN_STRONG_INLINE Packet16i ploaddup< Packet16i >(const int *from)
Definition: AVX512/PacketMath.h:1046
EIGEN_STRONG_INLINE Packet16bf ploadu< Packet16bf >(const bfloat16 *from)
Definition: AVX512/PacketMath.h:2711
EIGEN_STRONG_INLINE int64_t predux_mul< Packet8l >(const Packet8l &a)
Definition: AVX512/PacketMath.h:1585
EIGEN_STRONG_INLINE Packet8i pxor< Packet8i >(const Packet8i &a, const Packet8i &b)
Definition: AVX/PacketMath.h:1360
EIGEN_STRONG_INLINE double predux_max< Packet8d >(const Packet8d &a)
Definition: AVX512/PacketMath.h:1629
EIGEN_DEVICE_FUNC void pscatter< float, Packet16f >(float *to, const Packet16f &from, Index stride, uint16_t umask)
Definition: AVX512/PacketMath.h:1197
EIGEN_STRONG_INLINE Packet4i ploadu< Packet4i >(const int *from)
Definition: AltiVec/PacketMath.h:1537
EIGEN_STRONG_INLINE Packet8bf psignbit(const Packet8bf &a)
Definition: AltiVec/PacketMath.h:1966
EIGEN_STRONG_INLINE Packet16f ptrue< Packet16f >(const Packet16f &a)
Definition: AVX512/PacketMath.h:797
EIGEN_STRONG_INLINE Packet8d pmax< PropagateNumbers, Packet8d >(const Packet8d &a, const Packet8d &b)
Definition: AVX512/PacketMath.h:594
EIGEN_STRONG_INLINE double predux_min< Packet8d >(const Packet8d &a)
Definition: AVX512/PacketMath.h:1601
EIGEN_STRONG_INLINE bfloat16 predux_min< Packet16bf >(const Packet16bf &from)
Definition: AVX512/PacketMath.h:2933
EIGEN_STRONG_INLINE Packet16f pmin< PropagateNumbers, Packet16f >(const Packet16f &a, const Packet16f &b)
Definition: AVX512/PacketMath.h:582
EIGEN_STRONG_INLINE Packet16f pload1< Packet16f >(const float *from)
Definition: AVX512/PacketMath.h:314
EIGEN_STRONG_INLINE Packet16bf psub< Packet16bf >(const Packet16bf &a, const Packet16bf &b)
Definition: AVX512/PacketMath.h:2886
EIGEN_STRONG_INLINE Packet16f pandnot< Packet16f >(const Packet16f &a, const Packet16f &b)
Definition: AVX512/PacketMath.h:907
EIGEN_STRONG_INLINE Packet8d pdiv< Packet8d >(const Packet8d &a, const Packet8d &b)
Definition: AVX512/PacketMath.h:469
EIGEN_STRONG_INLINE Packet16f pdiv< Packet16f >(const Packet16f &a, const Packet16f &b)
Definition: AVX512/PacketMath.h:464
EIGEN_STRONG_INLINE Packet16f pload< Packet16f >(const float *from)
Definition: AVX512/PacketMath.h:969
EIGEN_STRONG_INLINE Packet16f pldexp< Packet16f >(const Packet16f &a, const Packet16f &exponent)
Definition: AVX512/PacketMath.h:1377
EIGEN_STRONG_INLINE Packet8d pldexp< Packet8d >(const Packet8d &a, const Packet8d &exponent)
Definition: AVX512/PacketMath.h:1382
EIGEN_STRONG_INLINE Packet16bf pfloor< Packet16bf >(const Packet16bf &a)
Definition: AVX512/PacketMath.h:2834
const char * SsePrefetchPtrType
Definition: SSE/PacketMath.h:1719
EIGEN_STRONG_INLINE void pstore< float >(float *to, const Packet4f &from)
Definition: AltiVec/PacketMath.h:642
__m512i Packet16i
Definition: AVX512/PacketMath.h:35
EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f &a)
Definition: AltiVec/PacketMath.h:1936
EIGEN_STRONG_INLINE void pstore1< Packet8d >(double *to, const double &a)
Definition: AVX512/PacketMath.h:1249
EIGEN_STRONG_INLINE Packet8bf predux_half_dowto4< Packet16bf >(const Packet16bf &a)
Definition: AVX512/PacketMath.h:2916
EIGEN_STRONG_INLINE Packet16h padd< Packet16h >(const Packet16h &a, const Packet16h &b)
Definition: AVX512/PacketMath.h:2374
EIGEN_STRONG_INLINE Packet8f peven_mask(const Packet8f &)
Definition: AVX/PacketMath.h:791
EIGEN_STRONG_INLINE Packet8i pandnot< Packet8i >(const Packet8i &a, const Packet8i &b)
Definition: AVX/PacketMath.h:1385
EIGEN_STRONG_INLINE bfloat16 pfirst(const Packet8bf &a)
Definition: AltiVec/PacketMath.h:2418
EIGEN_STRONG_INLINE Packet16i plset< Packet16i >(const int &a)
Definition: AVX512/PacketMath.h:346
EIGEN_STRONG_INLINE Packet16f pand< Packet16f >(const Packet16f &a, const Packet16f &b)
Definition: AVX512/PacketMath.h:817
eigen_packet_wrapper< __m256i, 0 > Packet8i
Definition: AVX/PacketMath.h:35
EIGEN_STRONG_INLINE __m256i Pack32To16(Packet16f rf)
Definition: AVX512/PacketMath.h:653
EIGEN_DEVICE_FUNC void pscatter(Scalar *to, const Packet &from, Index stride, typename unpacket_traits< Packet >::mask_t umask)
EIGEN_DEVICE_FUNC void pstore(Scalar *to, const Packet &from)
Definition: GenericPacketMath.h:891
EIGEN_STRONG_INLINE Packet16h plset< Packet16h >(const half &a)
Definition: AVX512/PacketMath.h:2286
EIGEN_STRONG_INLINE Packet16h pset1< Packet16h >(const Eigen::half &from)
Definition: AVX512/PacketMath.h:2203
EIGEN_STRONG_INLINE Packet4f pnmsub(const Packet4f &a, const Packet4f &b, const Packet4f &c)
Definition: LSX/PacketMath.h:835
EIGEN_STRONG_INLINE double predux_mul< Packet8d >(const Packet8d &a)
Definition: AVX512/PacketMath.h:1556
EIGEN_STRONG_INLINE Packet8i predux_half_dowto4< Packet16i >(const Packet16i &a)
Definition: AVX512/PacketMath.h:1512
EIGEN_STRONG_INLINE Packet8l por< Packet8l >(const Packet8l &a, const Packet8l &b)
Definition: AVX512/PacketMath.h:846
EIGEN_DEVICE_FUNC unpacket_traits< Packet >::type predux(const Packet &a)
Definition: GenericPacketMath.h:1232
EIGEN_STRONG_INLINE Packet16bf print< Packet16bf >(const Packet16bf &a)
Definition: AVX512/PacketMath.h:2824
EIGEN_STRONG_INLINE Packet8d pfrexp< Packet8d >(const Packet8d &a, Packet8d &exponent)
Definition: AVX512/PacketMath.h:1372
EIGEN_STRONG_INLINE Eigen::half predux_max< Packet16h >(const Packet16h &a)
Definition: AVX512/PacketMath.h:2421
EIGEN_STRONG_INLINE Packet2cf pcmp_eq(const Packet2cf &a, const Packet2cf &b)
Definition: AltiVec/Complex.h:353
EIGEN_STRONG_INLINE bfloat16 pfirst< Packet16bf >(const Packet16bf &from)
Definition: AVX512/PacketMath.h:2699
EIGEN_STRONG_INLINE bfloat16 predux< Packet16bf >(const Packet16bf &p)
Definition: AVX512/PacketMath.h:2923
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Packet pldexp_generic(const Packet &a, const Packet &exponent)
Definition: GenericPacketMathFunctions.h:226
EIGEN_STRONG_INLINE int predux_max< Packet16i >(const Packet16i &a)
Definition: AVX512/PacketMath.h:1637
EIGEN_STRONG_INLINE Packet4f pmsub(const Packet4f &a, const Packet4f &b, const Packet4f &c)
Definition: LSX/PacketMath.h:819
EIGEN_DEVICE_FUNC void pscatter< int64_t, Packet8l >(int64_t *to, const Packet8l &from, Index stride)
Definition: AVX512/PacketMath.h:1229
EIGEN_STRONG_INLINE void pstoreu< int >(int *to, const Packet4i &from)
Definition: AltiVec/PacketMath.h:1760
EIGEN_STRONG_INLINE Packet8h pand(const Packet8h &a, const Packet8h &b)
Definition: AVX/PacketMath.h:2319
EIGEN_STRONG_INLINE Packet16bf pround< Packet16bf >(const Packet16bf &a)
Definition: AVX512/PacketMath.h:2819
EIGEN_STRONG_INLINE Packet8h pxor(const Packet8h &a, const Packet8h &b)
Definition: AVX/PacketMath.h:2315
EIGEN_STRONG_INLINE float predux< Packet4f >(const Packet4f &a)
Definition: AltiVec/PacketMath.h:2435
EIGEN_STRONG_INLINE int predux_min< Packet16i >(const Packet16i &a)
Definition: AVX512/PacketMath.h:1609
EIGEN_STRONG_INLINE Packet4f pnmadd(const Packet4f &a, const Packet4f &b, const Packet4f &c)
Definition: LSX/PacketMath.h:827
EIGEN_STRONG_INLINE half predux< Packet16h >(const Packet16h &from)
Definition: AVX512/PacketMath.h:2406
EIGEN_STRONG_INLINE Packet4f ploadu< Packet4f >(const float *from)
Definition: AltiVec/PacketMath.h:1533
EIGEN_STRONG_INLINE Packet8l pandnot< Packet8l >(const Packet8l &a, const Packet8l &b)
Definition: AVX512/PacketMath.h:902
EIGEN_STRONG_INLINE Packet16h print< Packet16h >(const Packet16h &a)
Definition: AVX512/PacketMath.h:2320
EIGEN_STRONG_INLINE Packet4f pselect(const Packet4f &mask, const Packet4f &a, const Packet4f &b)
Definition: AltiVec/PacketMath.h:1474
EIGEN_STRONG_INLINE void pscatter< bfloat16, Packet16bf >(bfloat16 *to, const Packet16bf &from, Index stride)
Definition: AVX512/PacketMath.h:2964
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Packet pfrexp_generic(const Packet &a, Packet &exponent)
Definition: GenericPacketMathFunctions.h:184
EIGEN_DEVICE_FUNC Packet psub(const Packet &a, const Packet &b)
Definition: GenericPacketMath.h:337
EIGEN_STRONG_INLINE Packet16i pload< Packet16i >(const int *from)
Definition: AVX512/PacketMath.h:977
EIGEN_STRONG_INLINE Packet16bf pload< Packet16bf >(const bfloat16 *from)
Definition: AVX512/PacketMath.h:2706
EIGEN_STRONG_INLINE Packet8l pmin< Packet8l >(const Packet8l &a, const Packet8l &b)
Definition: AVX512/PacketMath.h:557
EIGEN_STRONG_INLINE Packet16f pmin< PropagateNaN, Packet16f >(const Packet16f &a, const Packet16f &b)
Definition: AVX512/PacketMath.h:598
EIGEN_STRONG_INLINE Packet8l pload< Packet8l >(const int64_t *from)
Definition: AVX512/PacketMath.h:981
eigen_packet_wrapper< __m512i, 1 > Packet8l
Definition: AVX512/PacketMath.h:37
EIGEN_STRONG_INLINE double pfirst< Packet8d >(const Packet8d &a)
Definition: AVX512/PacketMath.h:1282
EIGEN_STRONG_INLINE Packet8d pand< Packet8d >(const Packet8d &a, const Packet8d &b)
Definition: AVX512/PacketMath.h:825
EIGEN_STRONG_INLINE void pstore< half >(Eigen::half *to, const Packet16h &from)
Definition: AVX512/PacketMath.h:2223
EIGEN_STRONG_INLINE Packet16f pfloor< Packet16f >(const Packet16f &a)
Definition: AVX512/PacketMath.h:769
EIGEN_STRONG_INLINE Packet8i ptrue< Packet8i >(const Packet8i &a)
Definition: AVX/PacketMath.h:1269
EIGEN_STRONG_INLINE Packet8d pset1< Packet8d >(const double &from)
Definition: AVX512/PacketMath.h:256
EIGEN_STRONG_INLINE Packet8l pxor< Packet8l >(const Packet8l &a, const Packet8l &b)
Definition: AVX512/PacketMath.h:874
EIGEN_STRONG_INLINE float predux_min< Packet16f >(const Packet16f &a)
Definition: AVX512/PacketMath.h:1591
EIGEN_STRONG_INLINE Packet16h pmul< Packet16h >(const Packet16h &a, const Packet16h &b)
Definition: AVX512/PacketMath.h:2390
EIGEN_STRONG_INLINE Packet16bf pdiv< Packet16bf >(const Packet16bf &a, const Packet16bf &b)
Definition: AVX512/PacketMath.h:2896
EIGEN_STRONG_INLINE void pstore1< Packet8l >(int64_t *to, const int64_t &a)
Definition: AVX512/PacketMath.h:1259
EIGEN_STRONG_INLINE Packet16f ploadquad< Packet16f >(const float *from)
Definition: AVX512/PacketMath.h:1056
EIGEN_STRONG_INLINE Packet8l psub< Packet8l >(const Packet8l &a, const Packet8l &b)
Definition: AVX512/PacketMath.h:395
EIGEN_STRONG_INLINE Packet8l ploadu< Packet8l >(const int64_t *from)
Definition: AVX512/PacketMath.h:998
EIGEN_STRONG_INLINE Packet8d ptrunc< Packet8d >(const Packet8d &a)
Definition: AVX512/PacketMath.h:782
EIGEN_STRONG_INLINE Packet8d pround< Packet8d >(const Packet8d &a)
Definition: AVX512/PacketMath.h:931
EIGEN_STRONG_INLINE Packet16f pmin< Packet16f >(const Packet16f &a, const Packet16f &b)
Definition: AVX512/PacketMath.h:543
EIGEN_STRONG_INLINE Packet4l predux_half_dowto4< Packet8l >(const Packet8l &a)
Definition: AVX512/PacketMath.h:1529
EIGEN_STRONG_INLINE Packet8h padd< Packet8h >(const Packet8h &a, const Packet8h &b)
Definition: AVX/PacketMath.h:2390
EIGEN_STRONG_INLINE Packet16h pdiv< Packet16h >(const Packet16h &a, const Packet16h &b)
Definition: AVX512/PacketMath.h:2398
EIGEN_STRONG_INLINE int64_t pfirst< Packet8l >(const Packet8l &a)
Definition: AVX512/PacketMath.h:1286
__m256 Packet8f
Definition: AVX/PacketMath.h:34
EIGEN_STRONG_INLINE Packet16h pmax< Packet16h >(const Packet16h &a, const Packet16h &b)
Definition: AVX512/PacketMath.h:2281
EIGEN_STRONG_INLINE Packet8d pxor< Packet8d >(const Packet8d &a, const Packet8d &b)
Definition: AVX512/PacketMath.h:888
eigen_packet_wrapper< __m256i, 1 > Packet16h
Definition: AVX512/PacketMath.h:39
EIGEN_STRONG_INLINE Packet8d pceil< Packet8d >(const Packet8d &a)
Definition: AVX512/PacketMath.h:764
EIGEN_STRONG_INLINE Packet8bf F32ToBf16(Packet4f p4f)
Definition: AltiVec/PacketMath.h:2059
EIGEN_STRONG_INLINE Packet8d pmin< PropagateNumbers, Packet8d >(const Packet8d &a, const Packet8d &b)
Definition: AVX512/PacketMath.h:586
EIGEN_STRONG_INLINE void pstoreu< int64_t >(int64_t *to, const Packet8l &from)
Definition: AVX512/PacketMath.h:1123
EIGEN_STRONG_INLINE void pstoreu< float >(float *to, const Packet4f &from)
Definition: AltiVec/PacketMath.h:1756
EIGEN_STRONG_INLINE Packet4f pcmp_lt_or_nan(const Packet4f &a, const Packet4f &b)
Definition: AltiVec/PacketMath.h:1329
EIGEN_STRONG_INLINE Packet8i pand< Packet8i >(const Packet8i &a, const Packet8i &b)
Definition: AVX/PacketMath.h:1310
EIGEN_STRONG_INLINE Packet16bf ptrunc< Packet16bf >(const Packet16bf &a)
Definition: AVX512/PacketMath.h:2839
EIGEN_STRONG_INLINE Packet16f pmax< PropagateNaN, Packet16f >(const Packet16f &a, const Packet16f &b)
Definition: AVX512/PacketMath.h:606
EIGEN_STRONG_INLINE Packet16bf padd< Packet16bf >(const Packet16bf &a, const Packet16bf &b)
Definition: AVX512/PacketMath.h:2881
EIGEN_STRONG_INLINE void prefetch< int >(const int *addr)
Definition: AltiVec/PacketMath.h:1858
__m256d Packet4d
Definition: AVX/PacketMath.h:36
EIGEN_STRONG_INLINE Packet16h ploaddup< Packet16h >(const Eigen::half *from)
Definition: AVX512/PacketMath.h:2237
EIGEN_STRONG_INLINE Packet16bf pmin< Packet16bf >(const Packet16bf &a, const Packet16bf &b)
Definition: AVX512/PacketMath.h:2901
EIGEN_STRONG_INLINE int64_t predux< Packet8l >(const Packet8l &a)
Definition: AVX512/PacketMath.h:1480
EIGEN_STRONG_INLINE bfloat16 predux_mul< Packet16bf >(const Packet16bf &from)
Definition: AVX512/PacketMath.h:2928
EIGEN_STRONG_INLINE Packet8d pmin< Packet8d >(const Packet8d &a, const Packet8d &b)
Definition: AVX512/PacketMath.h:548
__m512 Packet16f
Definition: AVX512/PacketMath.h:34
EIGEN_STRONG_INLINE Packet8d pmax< PropagateNaN, Packet8d >(const Packet8d &a, const Packet8d &b)
Definition: AVX512/PacketMath.h:610
EIGEN_STRONG_INLINE Packet16h psub< Packet16h >(const Packet16h &a, const Packet16h &b)
Definition: AVX512/PacketMath.h:2382
EIGEN_STRONG_INLINE void pstore1< Packet16i >(int *to, const int &a)
Definition: AVX512/PacketMath.h:1254
EIGEN_STRONG_INLINE float pfirst< Packet16f >(const Packet16f &a)
Definition: AVX512/PacketMath.h:1278
EIGEN_STRONG_INLINE void pstore< int64_t >(int64_t *to, const Packet8l &from)
Definition: AVX512/PacketMath.h:1106
EIGEN_DEVICE_FUNC unpacket_traits< Packet >::type predux_mul(const Packet &a)
Definition: GenericPacketMath.h:1238
EIGEN_STRONG_INLINE void prefetch< double >(const double *addr)
Definition: AVX/PacketMath.h:1750
EIGEN_STRONG_INLINE Packet16h pmin< Packet16h >(const Packet16h &a, const Packet16h &b)
Definition: AVX512/PacketMath.h:2276
EIGEN_STRONG_INLINE Packet8d padd< Packet8d >(const Packet8d &a, const Packet8d &b)
Definition: AVX512/PacketMath.h:359
std::int32_t int32_t
Definition: Meta.h:41
std::uint8_t uint8_t
Definition: Meta.h:36
std::int64_t int64_t
Definition: Meta.h:43
std::uint16_t uint16_t
Definition: Meta.h:38
std::uint32_t uint32_t
Definition: Meta.h:40
std::uint64_t uint64_t
Definition: Meta.h:42
Namespace containing all symbols from the Eigen library.
Definition: bench_norm.cpp:70
squared absolute value
Definition: GlobalFunctions.h:87
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:83
double S0
Strength of source function in inner region.
Definition: stefan_boltzmann.cc:148
double S1
Strength of source function in outer region.
Definition: stefan_boltzmann.cc:151
r
Definition: UniformPSDSelfTest.py:20
int c
Definition: calibrate.py:100
Definition: Eigen_Colamd.h:49
list x
Definition: plotDoE.py:28
t
Definition: plotPSD.py:36
unsigned short value
Definition: BFloat16.h:77
Definition: BFloat16.h:101
numext::uint16_t x
Definition: Half.h:101
Definition: Half.h:139
Definition: GenericPacketMath.h:1407
Packet packet[N]
Definition: GenericPacketMath.h:1408
Definition: GenericPacketMath.h:1421
bool select[N]
Definition: GenericPacketMath.h:1422
Definition: GenericPacketMath.h:45
@ HasASin
Definition: GenericPacketMath.h:84
@ HasATanh
Definition: GenericPacketMath.h:87
@ HasRsqrt
Definition: GenericPacketMath.h:74
@ HasSin
Definition: GenericPacketMath.h:81
@ HasBlend
Definition: GenericPacketMath.h:66
@ HasErfc
Definition: GenericPacketMath.h:96
@ HasACos
Definition: GenericPacketMath.h:85
@ HasNdtri
Definition: GenericPacketMath.h:97
@ HasCos
Definition: GenericPacketMath.h:82
@ HasCmp
Definition: GenericPacketMath.h:69
@ HasReciprocal
Definition: GenericPacketMath.h:72
@ HasLog1p
Definition: GenericPacketMath.h:78
@ HasExp
Definition: GenericPacketMath.h:75
@ HasSqrt
Definition: GenericPacketMath.h:73
@ HasErf
Definition: GenericPacketMath.h:95
@ HasBessel
Definition: GenericPacketMath.h:98
@ HasExpm1
Definition: GenericPacketMath.h:76
@ HasLog
Definition: GenericPacketMath.h:77
@ HasTanh
Definition: GenericPacketMath.h:90
@ HasATan
Definition: GenericPacketMath.h:86
@ HasDiv
Definition: GenericPacketMath.h:71
Definition: GenericPacketMath.h:225
Definition: Meta.h:145
@ value
Definition: Meta.h:146
Packet8bf half
Definition: AVX512/PacketMath.h:2654
Packet16bf type
Definition: AVX512/PacketMath.h:2653
Packet4d half
Definition: AVX512/PacketMath.h:144
Packet8d type
Definition: AVX512/PacketMath.h:143
Packet8f half
Definition: AVX512/PacketMath.h:108
Packet16f type
Definition: AVX512/PacketMath.h:107
Packet16h half
Definition: AVX512/PacketMath.h:70
Packet16h type
Definition: AVX512/PacketMath.h:68
Packet4l half
Definition: AVX512/PacketMath.h:176
Packet8l type
Definition: AVX512/PacketMath.h:175
Packet8i half
Definition: AVX512/PacketMath.h:169
Packet16i type
Definition: AVX512/PacketMath.h:168
Definition: GenericPacketMath.h:108
@ size
Definition: GenericPacketMath.h:113
@ AlignedOnScalar
Definition: GenericPacketMath.h:114
@ Vectorizable
Definition: GenericPacketMath.h:112
@ HasSub
Definition: GenericPacketMath.h:118
@ HasMax
Definition: GenericPacketMath.h:124
@ HasNegate
Definition: GenericPacketMath.h:120
@ HasMul
Definition: GenericPacketMath.h:119
@ HasAdd
Definition: GenericPacketMath.h:117
@ HasSetLinear
Definition: GenericPacketMath.h:126
@ HasMin
Definition: GenericPacketMath.h:123
@ HasConj
Definition: GenericPacketMath.h:125
@ HasAbs2
Definition: GenericPacketMath.h:122
@ HasAbs
Definition: GenericPacketMath.h:121
Packet8bf half
Definition: AVX512/PacketMath.h:2690
bfloat16 type
Definition: AVX512/PacketMath.h:2682
uint16_t mask_t
Definition: AVX512/PacketMath.h:185
float type
Definition: AVX512/PacketMath.h:182
Packet8f half
Definition: AVX512/PacketMath.h:183
Packet16i integer_packet
Definition: AVX512/PacketMath.h:184
Packet8h half
Definition: AVX512/PacketMath.h:240
Eigen::half type
Definition: AVX512/PacketMath.h:239
Packet8i half
Definition: AVX512/PacketMath.h:213
int type
Definition: AVX512/PacketMath.h:212
uint8_t mask_t
Definition: AVX512/PacketMath.h:200
Packet8l integer_packet
Definition: AVX512/PacketMath.h:199
Packet4d half
Definition: AVX512/PacketMath.h:198
double type
Definition: AVX512/PacketMath.h:197
int64_t type
Definition: AVX512/PacketMath.h:225
Packet4l half
Definition: AVX512/PacketMath.h:226
Definition: GenericPacketMath.h:134
@ masked_load_available
Definition: GenericPacketMath.h:142
@ size
Definition: GenericPacketMath.h:139
@ masked_store_available
Definition: GenericPacketMath.h:143
@ vectorizable
Definition: GenericPacketMath.h:141
@ alignment
Definition: GenericPacketMath.h:140
std::ofstream out("Result.txt")
std::ptrdiff_t j
Definition: tut_arithmetic_redux_minmax.cpp:2
Definition: ZVector/PacketMath.h:50