45#ifndef INCLUDED_volk_32fc_x2_dot_prod_32fc_u_H
46#define INCLUDED_volk_32fc_x2_dot_prod_32fc_u_H
55extern void volk_32fc_x2_dot_prod_32fc_sifive_u74(
lv_32fc_t* result,
58 unsigned int num_points);
67 unsigned int num_points)
70 float* res = (
float*)result;
71 float* in = (
float*)input;
72 float* tp = (
float*)taps;
73 unsigned int n_2_ccomplex_blocks = num_points / 2;
75 float sum0[2] = { 0, 0 };
76 float sum1[2] = { 0, 0 };
79 for (i = 0; i < n_2_ccomplex_blocks; ++i) {
80 sum0[0] += in[0] * tp[0] - in[1] * tp[1];
81 sum0[1] += in[0] * tp[1] + in[1] * tp[0];
82 sum1[0] += in[2] * tp[2] - in[3] * tp[3];
83 sum1[1] += in[2] * tp[3] + in[3] * tp[2];
89 res[0] = sum0[0] + sum1[0];
90 res[1] = sum0[1] + sum1[1];
94 *result += input[num_points - 1] * taps[num_points - 1];
101#if LV_HAVE_SSE && LV_HAVE_64
103static inline void volk_32fc_x2_dot_prod_32fc_u_sse_64(
lv_32fc_t* result,
106 unsigned int num_points)
109 const unsigned int num_bytes = num_points * 8;
110 unsigned int isodd = num_points & 1;
113 "# ccomplex_dotprod_generic (float* result, const float *input,\n\t"
114 "# const float *taps, unsigned num_bytes)\n\t"
115 "# float sum0 = 0;\n\t"
116 "# float sum1 = 0;\n\t"
117 "# float sum2 = 0;\n\t"
118 "# float sum3 = 0;\n\t"
120 "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t"
121 "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t"
122 "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t"
123 "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t"
126 "# } while (--n_2_ccomplex_blocks != 0);\n\t"
127 "# result[0] = sum0 + sum2;\n\t"
128 "# result[1] = sum1 + sum3;\n\t"
129 "# TODO: prefetch and better scheduling\n\t"
130 " xor %%r9, %%r9\n\t"
131 " xor %%r10, %%r10\n\t"
132 " movq %%rcx, %%rax\n\t"
133 " movq %%rcx, %%r8\n\t"
134 " movq %[rsi], %%r9\n\t"
135 " movq %[rdx], %%r10\n\t"
136 " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
137 " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
138 " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t"
140 " jmp .%=L1_test\n\t"
141 " # 4 taps / loop\n\t"
142 " # something like ?? cycles / loop\n\t"
144 "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
145 "# movups (%%r9), %%xmmA\n\t"
146 "# movups (%%r10), %%xmmB\n\t"
147 "# movups %%xmmA, %%xmmZ\n\t"
148 "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
149 "# mulps %%xmmB, %%xmmA\n\t"
150 "# mulps %%xmmZ, %%xmmB\n\t"
151 "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
152 "# xorps %%xmmPN, %%xmmA\n\t"
153 "# movups %%xmmA, %%xmmZ\n\t"
154 "# unpcklps %%xmmB, %%xmmA\n\t"
155 "# unpckhps %%xmmB, %%xmmZ\n\t"
156 "# movups %%xmmZ, %%xmmY\n\t"
157 "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
158 "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
159 "# addps %%xmmZ, %%xmmA\n\t"
160 "# addps %%xmmA, %%xmmC\n\t"
161 "# A=xmm0, B=xmm2, Z=xmm4\n\t"
162 "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
163 " movups 0(%%r9), %%xmm0\n\t"
164 " movups 16(%%r9), %%xmm1\n\t"
165 " movups %%xmm0, %%xmm4\n\t"
166 " movups 0(%%r10), %%xmm2\n\t"
167 " mulps %%xmm2, %%xmm0\n\t"
168 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
169 " movups 16(%%r10), %%xmm3\n\t"
170 " movups %%xmm1, %%xmm5\n\t"
171 " addps %%xmm0, %%xmm6\n\t"
172 " mulps %%xmm3, %%xmm1\n\t"
173 " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
174 " addps %%xmm1, %%xmm6\n\t"
175 " mulps %%xmm4, %%xmm2\n\t"
176 " addps %%xmm2, %%xmm7\n\t"
177 " mulps %%xmm5, %%xmm3\n\t"
179 " addps %%xmm3, %%xmm7\n\t"
180 " add $32, %%r10\n\t"
184 " # We've handled the bulk of multiplies up to here.\n\t"
185 " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
186 " # If so, we've got 2 more taps to do.\n\t"
189 " # The count was odd, do 2 more taps.\n\t"
190 " movups 0(%%r9), %%xmm0\n\t"
191 " movups %%xmm0, %%xmm4\n\t"
192 " movups 0(%%r10), %%xmm2\n\t"
193 " mulps %%xmm2, %%xmm0\n\t"
194 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
195 " addps %%xmm0, %%xmm6\n\t"
196 " mulps %%xmm4, %%xmm2\n\t"
197 " addps %%xmm2, %%xmm7\n\t"
199 " # neg inversor\n\t"
200 " xorps %%xmm1, %%xmm1\n\t"
201 " mov $0x80000000, %%r9\n\t"
202 " movd %%r9, %%xmm1\n\t"
203 " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
205 " xorps %%xmm1, %%xmm6\n\t"
206 " movups %%xmm6, %%xmm2\n\t"
207 " unpcklps %%xmm7, %%xmm6\n\t"
208 " unpckhps %%xmm7, %%xmm2\n\t"
209 " movups %%xmm2, %%xmm3\n\t"
210 " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
211 " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
212 " addps %%xmm2, %%xmm6\n\t"
213 " # xmm6 = r1 i2 r3 i4\n\t"
214 " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
215 " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
216 " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) "
219 : [rsi]
"r"(input), [rdx]
"r"(taps),
"c"(num_bytes), [rdi]
"r"(result)
220 :
"rax",
"r8",
"r9",
"r10");
224 *result += input[num_points - 1] * taps[num_points - 1];
235#include <pmmintrin.h>
240 unsigned int num_points)
244 memset(&dotProduct, 0x0, 2 *
sizeof(
float));
246 unsigned int number = 0;
247 const unsigned int halfPoints = num_points / 2;
248 unsigned int isodd = num_points & 1;
250 __m128 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
257 for (; number < halfPoints; number++) {
286 dotProduct += (dotProductVector[0] + dotProductVector[1]);
289 dotProduct += input[num_points - 1] * taps[num_points - 1];
292 *result = dotProduct;
383#include <immintrin.h>
388 unsigned int num_points)
391 unsigned int isodd = num_points & 3;
394 memset(&dotProduct, 0x0, 2 *
sizeof(
float));
396 unsigned int number = 0;
397 const unsigned int quarterPoints = num_points / 4;
399 __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
404 dotProdVal = _mm256_setzero_ps();
406 for (; number < quarterPoints; number++) {
407 x = _mm256_loadu_ps((
float*)a);
408 y = _mm256_loadu_ps((
float*)b);
410 yl = _mm256_moveldup_ps(y);
411 yh = _mm256_movehdup_ps(y);
413 tmp1 = _mm256_mul_ps(x, yl);
415 x = _mm256_shuffle_ps(x, x, 0xB1);
417 tmp2 = _mm256_mul_ps(x, yh);
419 z = _mm256_addsub_ps(tmp1,
422 dotProdVal = _mm256_add_ps(dotProdVal,
431 _mm256_storeu_ps((
float*)dotProductVector,
434 dotProduct += (dotProductVector[0] + dotProductVector[1] + dotProductVector[2] +
435 dotProductVector[3]);
437 for (i = num_points - isodd; i < num_points; i++) {
438 dotProduct += input[i] * taps[i];
441 *result = dotProduct;
446#if LV_HAVE_AVX && LV_HAVE_FMA
447#include <immintrin.h>
449static inline void volk_32fc_x2_dot_prod_32fc_u_avx_fma(
lv_32fc_t* result,
452 unsigned int num_points)
455 unsigned int isodd = num_points & 3;
458 memset(&dotProduct, 0x0, 2 *
sizeof(
float));
460 unsigned int number = 0;
461 const unsigned int quarterPoints = num_points / 4;
463 __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
468 dotProdVal = _mm256_setzero_ps();
470 for (; number < quarterPoints; number++) {
472 x = _mm256_loadu_ps((
float*)a);
473 y = _mm256_loadu_ps((
float*)b);
475 yl = _mm256_moveldup_ps(y);
476 yh = _mm256_movehdup_ps(y);
480 x = _mm256_shuffle_ps(x, x, 0xB1);
482 tmp2 = _mm256_mul_ps(x, yh);
484 z = _mm256_fmaddsub_ps(
487 dotProdVal = _mm256_add_ps(dotProdVal,
496 _mm256_storeu_ps((
float*)dotProductVector,
499 dotProduct += (dotProductVector[0] + dotProductVector[1] + dotProductVector[2] +
500 dotProductVector[3]);
502 for (i = num_points - isodd; i < num_points; i++) {
503 dotProduct += input[i] * taps[i];
506 *result = dotProduct;
513#ifndef INCLUDED_volk_32fc_x2_dot_prod_32fc_a_H
514#define INCLUDED_volk_32fc_x2_dot_prod_32fc_a_H
522#if LV_HAVE_SSE && LV_HAVE_64
525static inline void volk_32fc_x2_dot_prod_32fc_a_sse_64(
lv_32fc_t* result,
528 unsigned int num_points)
531 const unsigned int num_bytes = num_points * 8;
532 unsigned int isodd = num_points & 1;
535 "# ccomplex_dotprod_generic (float* result, const float *input,\n\t"
536 "# const float *taps, unsigned num_bytes)\n\t"
537 "# float sum0 = 0;\n\t"
538 "# float sum1 = 0;\n\t"
539 "# float sum2 = 0;\n\t"
540 "# float sum3 = 0;\n\t"
542 "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t"
543 "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t"
544 "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t"
545 "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t"
548 "# } while (--n_2_ccomplex_blocks != 0);\n\t"
549 "# result[0] = sum0 + sum2;\n\t"
550 "# result[1] = sum1 + sum3;\n\t"
551 "# TODO: prefetch and better scheduling\n\t"
552 " xor %%r9, %%r9\n\t"
553 " xor %%r10, %%r10\n\t"
554 " movq %%rcx, %%rax\n\t"
555 " movq %%rcx, %%r8\n\t"
556 " movq %[rsi], %%r9\n\t"
557 " movq %[rdx], %%r10\n\t"
558 " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
559 " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
560 " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t"
562 " jmp .%=L1_test\n\t"
563 " # 4 taps / loop\n\t"
564 " # something like ?? cycles / loop\n\t"
566 "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
567 "# movaps (%%r9), %%xmmA\n\t"
568 "# movaps (%%r10), %%xmmB\n\t"
569 "# movaps %%xmmA, %%xmmZ\n\t"
570 "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
571 "# mulps %%xmmB, %%xmmA\n\t"
572 "# mulps %%xmmZ, %%xmmB\n\t"
573 "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
574 "# xorps %%xmmPN, %%xmmA\n\t"
575 "# movaps %%xmmA, %%xmmZ\n\t"
576 "# unpcklps %%xmmB, %%xmmA\n\t"
577 "# unpckhps %%xmmB, %%xmmZ\n\t"
578 "# movaps %%xmmZ, %%xmmY\n\t"
579 "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
580 "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
581 "# addps %%xmmZ, %%xmmA\n\t"
582 "# addps %%xmmA, %%xmmC\n\t"
583 "# A=xmm0, B=xmm2, Z=xmm4\n\t"
584 "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
585 " movaps 0(%%r9), %%xmm0\n\t"
586 " movaps 16(%%r9), %%xmm1\n\t"
587 " movaps %%xmm0, %%xmm4\n\t"
588 " movaps 0(%%r10), %%xmm2\n\t"
589 " mulps %%xmm2, %%xmm0\n\t"
590 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
591 " movaps 16(%%r10), %%xmm3\n\t"
592 " movaps %%xmm1, %%xmm5\n\t"
593 " addps %%xmm0, %%xmm6\n\t"
594 " mulps %%xmm3, %%xmm1\n\t"
595 " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
596 " addps %%xmm1, %%xmm6\n\t"
597 " mulps %%xmm4, %%xmm2\n\t"
598 " addps %%xmm2, %%xmm7\n\t"
599 " mulps %%xmm5, %%xmm3\n\t"
601 " addps %%xmm3, %%xmm7\n\t"
602 " add $32, %%r10\n\t"
606 " # We've handled the bulk of multiplies up to here.\n\t"
607 " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
608 " # If so, we've got 2 more taps to do.\n\t"
611 " # The count was odd, do 2 more taps.\n\t"
612 " movaps 0(%%r9), %%xmm0\n\t"
613 " movaps %%xmm0, %%xmm4\n\t"
614 " movaps 0(%%r10), %%xmm2\n\t"
615 " mulps %%xmm2, %%xmm0\n\t"
616 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
617 " addps %%xmm0, %%xmm6\n\t"
618 " mulps %%xmm4, %%xmm2\n\t"
619 " addps %%xmm2, %%xmm7\n\t"
621 " # neg inversor\n\t"
622 " xorps %%xmm1, %%xmm1\n\t"
623 " mov $0x80000000, %%r9\n\t"
624 " movd %%r9, %%xmm1\n\t"
625 " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
627 " xorps %%xmm1, %%xmm6\n\t"
628 " movaps %%xmm6, %%xmm2\n\t"
629 " unpcklps %%xmm7, %%xmm6\n\t"
630 " unpckhps %%xmm7, %%xmm2\n\t"
631 " movaps %%xmm2, %%xmm3\n\t"
632 " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
633 " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
634 " addps %%xmm2, %%xmm6\n\t"
635 " # xmm6 = r1 i2 r3 i4\n\t"
636 " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
637 " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
638 " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) "
641 : [rsi]
"r"(input), [rdx]
"r"(taps),
"c"(num_bytes), [rdi]
"r"(result)
642 :
"rax",
"r8",
"r9",
"r10");
646 *result += input[num_points - 1] * taps[num_points - 1];
656#include <pmmintrin.h>
661 unsigned int num_points)
664 const unsigned int num_bytes = num_points * 8;
665 unsigned int isodd = num_points & 1;
668 memset(&dotProduct, 0x0, 2 *
sizeof(
float));
670 unsigned int number = 0;
671 const unsigned int halfPoints = num_bytes >> 4;
673 __m128 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
680 for (; number < halfPoints; number++) {
709 dotProduct += (dotProductVector[0] + dotProductVector[1]);
712 dotProduct += input[num_points - 1] * taps[num_points - 1];
715 *result = dotProduct;
811 unsigned int num_points)
814 unsigned int quarter_points = num_points / 4;
821 float32x4x2_t a_val, b_val, c_val, accumulator;
822 float32x4x2_t tmp_real, tmp_imag;
823 accumulator.val[0] = vdupq_n_f32(0);
824 accumulator.val[1] = vdupq_n_f32(0);
826 for (number = 0; number < quarter_points; ++number) {
827 a_val = vld2q_f32((
float*)a_ptr);
828 b_val = vld2q_f32((
float*)b_ptr);
834 tmp_real.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
836 tmp_real.val[1] = vmulq_f32(a_val.val[1], b_val.val[1]);
840 tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[1]);
842 tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
844 c_val.val[0] = vsubq_f32(tmp_real.val[0], tmp_real.val[1]);
845 c_val.val[1] = vaddq_f32(tmp_imag.val[0], tmp_imag.val[1]);
847 accumulator.val[0] = vaddq_f32(accumulator.val[0], c_val.val[0]);
848 accumulator.val[1] = vaddq_f32(accumulator.val[1], c_val.val[1]);
854 vst2q_f32((
float*)accum_result, accumulator);
855 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
858 for (number = quarter_points * 4; number < num_points; ++number) {
859 *result += (*a_ptr++) * (*b_ptr++);
869 unsigned int num_points)
872 unsigned int quarter_points = num_points / 4;
879 float32x4x2_t a_val, b_val, accumulator;
880 float32x4x2_t tmp_imag;
881 accumulator.val[0] = vdupq_n_f32(0);
882 accumulator.val[1] = vdupq_n_f32(0);
884 for (number = 0; number < quarter_points; ++number) {
885 a_val = vld2q_f32((
float*)a_ptr);
886 b_val = vld2q_f32((
float*)b_ptr);
891 tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
892 tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
895 tmp_imag.val[1] = vmlaq_f32(tmp_imag.val[1], a_val.val[0], b_val.val[1]);
896 tmp_imag.val[0] = vmlsq_f32(tmp_imag.val[0], a_val.val[1], b_val.val[1]);
898 accumulator.val[0] = vaddq_f32(accumulator.val[0], tmp_imag.val[0]);
899 accumulator.val[1] = vaddq_f32(accumulator.val[1], tmp_imag.val[1]);
906 vst2q_f32((
float*)accum_result, accumulator);
907 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
910 for (number = quarter_points * 4; number < num_points; ++number) {
911 *result += (*a_ptr++) * (*b_ptr++);
920 unsigned int num_points)
923 unsigned int quarter_points = num_points / 4;
930 float32x4x2_t a_val, b_val, accumulator1, accumulator2;
931 accumulator1.val[0] = vdupq_n_f32(0);
932 accumulator1.val[1] = vdupq_n_f32(0);
933 accumulator2.val[0] = vdupq_n_f32(0);
934 accumulator2.val[1] = vdupq_n_f32(0);
936 for (number = 0; number < quarter_points; ++number) {
937 a_val = vld2q_f32((
float*)a_ptr);
938 b_val = vld2q_f32((
float*)b_ptr);
943 accumulator1.val[0] = vmlaq_f32(accumulator1.val[0], a_val.val[0], b_val.val[0]);
944 accumulator1.val[1] = vmlaq_f32(accumulator1.val[1], a_val.val[0], b_val.val[1]);
945 accumulator2.val[0] = vmlsq_f32(accumulator2.val[0], a_val.val[1], b_val.val[1]);
946 accumulator2.val[1] = vmlaq_f32(accumulator2.val[1], a_val.val[1], b_val.val[0]);
951 accumulator1.val[0] = vaddq_f32(accumulator1.val[0], accumulator2.val[0]);
952 accumulator1.val[1] = vaddq_f32(accumulator1.val[1], accumulator2.val[1]);
954 vst2q_f32((
float*)accum_result, accumulator1);
955 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
958 for (number = quarter_points * 4; number < num_points; ++number) {
959 *result += (*a_ptr++) * (*b_ptr++);
968 unsigned int num_points)
973 unsigned int quarter_points = num_points / 8;
980 float32x4x4_t a_val, b_val, accumulator1, accumulator2;
981 float32x4x2_t reduced_accumulator;
982 accumulator1.val[0] = vdupq_n_f32(0);
983 accumulator1.val[1] = vdupq_n_f32(0);
984 accumulator1.val[2] = vdupq_n_f32(0);
985 accumulator1.val[3] = vdupq_n_f32(0);
986 accumulator2.val[0] = vdupq_n_f32(0);
987 accumulator2.val[1] = vdupq_n_f32(0);
988 accumulator2.val[2] = vdupq_n_f32(0);
989 accumulator2.val[3] = vdupq_n_f32(0);
992 for (number = 0; number < quarter_points; ++number) {
993 a_val = vld4q_f32((
float*)a_ptr);
994 b_val = vld4q_f32((
float*)b_ptr);
999 accumulator1.val[0] = vmlaq_f32(accumulator1.val[0], a_val.val[0], b_val.val[0]);
1000 accumulator1.val[1] = vmlaq_f32(accumulator1.val[1], a_val.val[0], b_val.val[1]);
1002 accumulator1.val[2] = vmlaq_f32(accumulator1.val[2], a_val.val[2], b_val.val[2]);
1003 accumulator1.val[3] = vmlaq_f32(accumulator1.val[3], a_val.val[2], b_val.val[3]);
1005 accumulator2.val[0] = vmlsq_f32(accumulator2.val[0], a_val.val[1], b_val.val[1]);
1006 accumulator2.val[1] = vmlaq_f32(accumulator2.val[1], a_val.val[1], b_val.val[0]);
1008 accumulator2.val[2] = vmlsq_f32(accumulator2.val[2], a_val.val[3], b_val.val[3]);
1009 accumulator2.val[3] = vmlaq_f32(accumulator2.val[3], a_val.val[3], b_val.val[2]);
1015 accumulator1.val[0] = vaddq_f32(accumulator1.val[0], accumulator1.val[2]);
1016 accumulator1.val[1] = vaddq_f32(accumulator1.val[1], accumulator1.val[3]);
1017 accumulator2.val[0] = vaddq_f32(accumulator2.val[0], accumulator2.val[2]);
1018 accumulator2.val[1] = vaddq_f32(accumulator2.val[1], accumulator2.val[3]);
1019 reduced_accumulator.val[0] = vaddq_f32(accumulator1.val[0], accumulator2.val[0]);
1020 reduced_accumulator.val[1] = vaddq_f32(accumulator1.val[1], accumulator2.val[1]);
1023 vst2q_f32((
float*)accum_result, reduced_accumulator);
1024 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
1027 for (number = quarter_points * 8; number < num_points; ++number) {
1028 *result += (*a_ptr++) * (*b_ptr++);
1036#include <immintrin.h>
1041 unsigned int num_points)
1044 unsigned int isodd = num_points & 3;
1047 memset(&dotProduct, 0x0, 2 *
sizeof(
float));
1049 unsigned int number = 0;
1050 const unsigned int quarterPoints = num_points / 4;
1052 __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
1057 dotProdVal = _mm256_setzero_ps();
1059 for (; number < quarterPoints; number++) {
1061 x = _mm256_load_ps((
float*)a);
1062 y = _mm256_load_ps((
float*)b);
1064 yl = _mm256_moveldup_ps(y);
1065 yh = _mm256_movehdup_ps(y);
1067 tmp1 = _mm256_mul_ps(x, yl);
1069 x = _mm256_shuffle_ps(x, x, 0xB1);
1071 tmp2 = _mm256_mul_ps(x, yh);
1073 z = _mm256_addsub_ps(tmp1,
1076 dotProdVal = _mm256_add_ps(dotProdVal,
1085 _mm256_store_ps((
float*)dotProductVector,
1088 dotProduct += (dotProductVector[0] + dotProductVector[1] + dotProductVector[2] +
1089 dotProductVector[3]);
1091 for (i = num_points - isodd; i < num_points; i++) {
1092 dotProduct += input[i] * taps[i];
1095 *result = dotProduct;
1100#if LV_HAVE_AVX && LV_HAVE_FMA
1101#include <immintrin.h>
1103static inline void volk_32fc_x2_dot_prod_32fc_a_avx_fma(
lv_32fc_t* result,
1106 unsigned int num_points)
1109 unsigned int isodd = num_points & 3;
1112 memset(&dotProduct, 0x0, 2 *
sizeof(
float));
1114 unsigned int number = 0;
1115 const unsigned int quarterPoints = num_points / 4;
1117 __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
1122 dotProdVal = _mm256_setzero_ps();
1124 for (; number < quarterPoints; number++) {
1126 x = _mm256_load_ps((
float*)a);
1127 y = _mm256_load_ps((
float*)b);
1129 yl = _mm256_moveldup_ps(y);
1130 yh = _mm256_movehdup_ps(y);
1134 x = _mm256_shuffle_ps(x, x, 0xB1);
1136 tmp2 = _mm256_mul_ps(x, yh);
1138 z = _mm256_fmaddsub_ps(
1141 dotProdVal = _mm256_add_ps(dotProdVal,
1150 _mm256_store_ps((
float*)dotProductVector,
1153 dotProduct += (dotProductVector[0] + dotProductVector[1] + dotProductVector[2] +
1154 dotProductVector[3]);
1156 for (i = num_points - isodd; i < num_points; i++) {
1157 dotProduct += input[i] * taps[i];
1160 *result = dotProduct;