Vector Optimized Library of Kernels 3.1.0
Architecture-tuned implementations of math kernels
volk_32fc_x2_multiply_32fc.h
Go to the documentation of this file.
1/* -*- c++ -*- */
2/*
3 * Copyright 2012, 2014 Free Software Foundation, Inc.
4 *
5 * This file is part of VOLK
6 *
7 * SPDX-License-Identifier: LGPL-3.0-or-later
8 */
9
57#ifndef INCLUDED_volk_32fc_x2_multiply_32fc_u_H
58#define INCLUDED_volk_32fc_x2_multiply_32fc_u_H
59
60#include <float.h>
61#include <inttypes.h>
62#include <stdio.h>
63#include <volk/volk_complex.h>
64
65#if LV_HAVE_AVX2 && LV_HAVE_FMA
66#include <immintrin.h>
74static inline void volk_32fc_x2_multiply_32fc_u_avx2_fma(lv_32fc_t* cVector,
75 const lv_32fc_t* aVector,
76 const lv_32fc_t* bVector,
77 unsigned int num_points)
78{
79 unsigned int number = 0;
80 const unsigned int quarterPoints = num_points / 4;
81
82 lv_32fc_t* c = cVector;
83 const lv_32fc_t* a = aVector;
84 const lv_32fc_t* b = bVector;
85
86 for (; number < quarterPoints; number++) {
87
88 const __m256 x =
89 _mm256_loadu_ps((float*)a); // Load the ar + ai, br + bi as ar,ai,br,bi
90 const __m256 y =
91 _mm256_loadu_ps((float*)b); // Load the cr + ci, dr + di as cr,ci,dr,di
92
93 const __m256 yl = _mm256_moveldup_ps(y); // Load yl with cr,cr,dr,dr
94 const __m256 yh = _mm256_movehdup_ps(y); // Load yh with ci,ci,di,di
95
96 const __m256 tmp2x = _mm256_permute_ps(x, 0xB1); // Re-arrange x to be ai,ar,bi,br
97
98 const __m256 tmp2 = _mm256_mul_ps(tmp2x, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
99
100 const __m256 z = _mm256_fmaddsub_ps(
101 x, yl, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
102
103 _mm256_storeu_ps((float*)c, z); // Store the results back into the C container
104
105 a += 4;
106 b += 4;
107 c += 4;
108 }
109
110 number = quarterPoints * 4;
111 for (; number < num_points; number++) {
112 *c++ = (*a++) * (*b++);
113 }
114}
115#endif /* LV_HAVE_AVX2 && LV_HAVE_FMA */
116
117
118#ifdef LV_HAVE_AVX
119#include <immintrin.h>
121
122static inline void volk_32fc_x2_multiply_32fc_u_avx(lv_32fc_t* cVector,
123 const lv_32fc_t* aVector,
124 const lv_32fc_t* bVector,
125 unsigned int num_points)
126{
127 unsigned int number = 0;
128 const unsigned int quarterPoints = num_points / 4;
129
130 __m256 x, y, z;
131 lv_32fc_t* c = cVector;
132 const lv_32fc_t* a = aVector;
133 const lv_32fc_t* b = bVector;
134
135 for (; number < quarterPoints; number++) {
136 x = _mm256_loadu_ps(
137 (float*)a); // Load the ar + ai, br + bi ... as ar,ai,br,bi ...
138 y = _mm256_loadu_ps(
139 (float*)b); // Load the cr + ci, dr + di ... as cr,ci,dr,di ...
140 z = _mm256_complexmul_ps(x, y);
141 _mm256_storeu_ps((float*)c, z); // Store the results back into the C container
142
143 a += 4;
144 b += 4;
145 c += 4;
146 }
147
148 number = quarterPoints * 4;
149
150 for (; number < num_points; number++) {
151 *c++ = (*a++) * (*b++);
152 }
153}
154#endif /* LV_HAVE_AVX */
155
156
157#ifdef LV_HAVE_SSE3
158#include <pmmintrin.h>
160
162 const lv_32fc_t* aVector,
163 const lv_32fc_t* bVector,
164 unsigned int num_points)
165{
166 unsigned int number = 0;
167 const unsigned int halfPoints = num_points / 2;
168
169 __m128 x, y, z;
170 lv_32fc_t* c = cVector;
171 const lv_32fc_t* a = aVector;
172 const lv_32fc_t* b = bVector;
173
174 for (; number < halfPoints; number++) {
175 x = _mm_loadu_ps((float*)a); // Load the ar + ai, br + bi as ar,ai,br,bi
176 y = _mm_loadu_ps((float*)b); // Load the cr + ci, dr + di as cr,ci,dr,di
177 z = _mm_complexmul_ps(x, y);
178 _mm_storeu_ps((float*)c, z); // Store the results back into the C container
179
180 a += 2;
181 b += 2;
182 c += 2;
183 }
184
185 if ((num_points % 2) != 0) {
186 *c = (*a) * (*b);
187 }
188}
189#endif /* LV_HAVE_SSE */
190
191
192#ifdef LV_HAVE_GENERIC
193
195 const lv_32fc_t* aVector,
196 const lv_32fc_t* bVector,
197 unsigned int num_points)
198{
199 lv_32fc_t* cPtr = cVector;
200 const lv_32fc_t* aPtr = aVector;
201 const lv_32fc_t* bPtr = bVector;
202 unsigned int number = 0;
203
204 for (number = 0; number < num_points; number++) {
205 *cPtr++ = (*aPtr++) * (*bPtr++);
206 }
207}
208#endif /* LV_HAVE_GENERIC */
209
210
211#endif /* INCLUDED_volk_32fc_x2_multiply_32fc_u_H */
212#ifndef INCLUDED_volk_32fc_x2_multiply_32fc_a_H
213#define INCLUDED_volk_32fc_x2_multiply_32fc_a_H
214
215#include <float.h>
216#include <inttypes.h>
217#include <stdio.h>
218#include <volk/volk_complex.h>
219
220#if LV_HAVE_AVX2 && LV_HAVE_FMA
221#include <immintrin.h>
229static inline void volk_32fc_x2_multiply_32fc_a_avx2_fma(lv_32fc_t* cVector,
230 const lv_32fc_t* aVector,
231 const lv_32fc_t* bVector,
232 unsigned int num_points)
233{
234 unsigned int number = 0;
235 const unsigned int quarterPoints = num_points / 4;
236
237 lv_32fc_t* c = cVector;
238 const lv_32fc_t* a = aVector;
239 const lv_32fc_t* b = bVector;
240
241 for (; number < quarterPoints; number++) {
242
243 const __m256 x =
244 _mm256_load_ps((float*)a); // Load the ar + ai, br + bi as ar,ai,br,bi
245 const __m256 y =
246 _mm256_load_ps((float*)b); // Load the cr + ci, dr + di as cr,ci,dr,di
247
248 const __m256 yl = _mm256_moveldup_ps(y); // Load yl with cr,cr,dr,dr
249 const __m256 yh = _mm256_movehdup_ps(y); // Load yh with ci,ci,di,di
250
251 const __m256 tmp2x = _mm256_permute_ps(x, 0xB1); // Re-arrange x to be ai,ar,bi,br
252
253 const __m256 tmp2 = _mm256_mul_ps(tmp2x, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
254
255 const __m256 z = _mm256_fmaddsub_ps(
256 x, yl, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
257
258 _mm256_store_ps((float*)c, z); // Store the results back into the C container
259
260 a += 4;
261 b += 4;
262 c += 4;
263 }
264
265 number = quarterPoints * 4;
266 for (; number < num_points; number++) {
267 *c++ = (*a++) * (*b++);
268 }
269}
270#endif /* LV_HAVE_AVX2 && LV_HAVE_FMA */
271
272
273#ifdef LV_HAVE_AVX
274#include <immintrin.h>
276
277static inline void volk_32fc_x2_multiply_32fc_a_avx(lv_32fc_t* cVector,
278 const lv_32fc_t* aVector,
279 const lv_32fc_t* bVector,
280 unsigned int num_points)
281{
282 unsigned int number = 0;
283 const unsigned int quarterPoints = num_points / 4;
284
285 __m256 x, y, z;
286 lv_32fc_t* c = cVector;
287 const lv_32fc_t* a = aVector;
288 const lv_32fc_t* b = bVector;
289
290 for (; number < quarterPoints; number++) {
291 x = _mm256_load_ps((float*)a); // Load the ar + ai, br + bi ... as ar,ai,br,bi ...
292 y = _mm256_load_ps((float*)b); // Load the cr + ci, dr + di ... as cr,ci,dr,di ...
293 z = _mm256_complexmul_ps(x, y);
294 _mm256_store_ps((float*)c, z); // Store the results back into the C container
295
296 a += 4;
297 b += 4;
298 c += 4;
299 }
300
301 number = quarterPoints * 4;
302
303 for (; number < num_points; number++) {
304 *c++ = (*a++) * (*b++);
305 }
306}
307#endif /* LV_HAVE_AVX */
308
309#ifdef LV_HAVE_SSE3
310#include <pmmintrin.h>
312
314 const lv_32fc_t* aVector,
315 const lv_32fc_t* bVector,
316 unsigned int num_points)
317{
318 unsigned int number = 0;
319 const unsigned int halfPoints = num_points / 2;
320
321 __m128 x, y, z;
322 lv_32fc_t* c = cVector;
323 const lv_32fc_t* a = aVector;
324 const lv_32fc_t* b = bVector;
325
326 for (; number < halfPoints; number++) {
327 x = _mm_load_ps((float*)a); // Load the ar + ai, br + bi as ar,ai,br,bi
328 y = _mm_load_ps((float*)b); // Load the cr + ci, dr + di as cr,ci,dr,di
329 z = _mm_complexmul_ps(x, y);
330 _mm_store_ps((float*)c, z); // Store the results back into the C container
331
332 a += 2;
333 b += 2;
334 c += 2;
335 }
336
337 if ((num_points % 2) != 0) {
338 *c = (*a) * (*b);
339 }
340}
341#endif /* LV_HAVE_SSE */
342
343
344#ifdef LV_HAVE_NEON
345#include <arm_neon.h>
346
347static inline void volk_32fc_x2_multiply_32fc_neon(lv_32fc_t* cVector,
348 const lv_32fc_t* aVector,
349 const lv_32fc_t* bVector,
350 unsigned int num_points)
351{
352 lv_32fc_t* a_ptr = (lv_32fc_t*)aVector;
353 lv_32fc_t* b_ptr = (lv_32fc_t*)bVector;
354 unsigned int quarter_points = num_points / 4;
355 float32x4x2_t a_val, b_val, c_val;
356 float32x4x2_t tmp_real, tmp_imag;
357 unsigned int number = 0;
358
359 for (number = 0; number < quarter_points; ++number) {
360 a_val = vld2q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
361 b_val = vld2q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
362 __VOLK_PREFETCH(a_ptr + 4);
363 __VOLK_PREFETCH(b_ptr + 4);
364
365 // multiply the real*real and imag*imag to get real result
366 // a0r*b0r|a1r*b1r|a2r*b2r|a3r*b3r
367 tmp_real.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
368 // a0i*b0i|a1i*b1i|a2i*b2i|a3i*b3i
369 tmp_real.val[1] = vmulq_f32(a_val.val[1], b_val.val[1]);
370
371 // Multiply cross terms to get the imaginary result
372 // a0r*b0i|a1r*b1i|a2r*b2i|a3r*b3i
373 tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[1]);
374 // a0i*b0r|a1i*b1r|a2i*b2r|a3i*b3r
375 tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
376
377 // store the results
378 c_val.val[0] = vsubq_f32(tmp_real.val[0], tmp_real.val[1]);
379 c_val.val[1] = vaddq_f32(tmp_imag.val[0], tmp_imag.val[1]);
380 vst2q_f32((float*)cVector, c_val);
381
382 a_ptr += 4;
383 b_ptr += 4;
384 cVector += 4;
385 }
386
387 for (number = quarter_points * 4; number < num_points; number++) {
388 *cVector++ = (*a_ptr++) * (*b_ptr++);
389 }
390}
391#endif /* LV_HAVE_NEON */
392
393
394#ifdef LV_HAVE_NEON
395
397 const lv_32fc_t* aVector,
398 const lv_32fc_t* bVector,
399 unsigned int num_points)
400{
401 lv_32fc_t* a_ptr = (lv_32fc_t*)aVector;
402 lv_32fc_t* b_ptr = (lv_32fc_t*)bVector;
403 unsigned int quarter_points = num_points / 4;
404 float32x4x2_t a_val, b_val;
405 float32x4x2_t tmp_imag;
406 unsigned int number = 0;
407
408 for (number = 0; number < quarter_points; ++number) {
409 a_val = vld2q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
410 b_val = vld2q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
411 __VOLK_PREFETCH(a_ptr + 4);
412 __VOLK_PREFETCH(b_ptr + 4);
413
414 // do the first multiply
415 tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
416 tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
417
418 // use multiply accumulate/subtract to get result
419 tmp_imag.val[1] = vmlaq_f32(tmp_imag.val[1], a_val.val[0], b_val.val[1]);
420 tmp_imag.val[0] = vmlsq_f32(tmp_imag.val[0], a_val.val[1], b_val.val[1]);
421
422 // store
423 vst2q_f32((float*)cVector, tmp_imag);
424 // increment pointers
425 a_ptr += 4;
426 b_ptr += 4;
427 cVector += 4;
428 }
429
430 for (number = quarter_points * 4; number < num_points; number++) {
431 *cVector++ = (*a_ptr++) * (*b_ptr++);
432 }
433}
434#endif /* LV_HAVE_NEON */
435
436
437#ifdef LV_HAVE_NEONV7
438
439extern void volk_32fc_x2_multiply_32fc_a_neonasm(lv_32fc_t* cVector,
440 const lv_32fc_t* aVector,
441 const lv_32fc_t* bVector,
442 unsigned int num_points);
443#endif /* LV_HAVE_NEONV7 */
444
445
446#ifdef LV_HAVE_ORC
447
448extern void volk_32fc_x2_multiply_32fc_a_orc_impl(lv_32fc_t* cVector,
449 const lv_32fc_t* aVector,
450 const lv_32fc_t* bVector,
451 unsigned int num_points);
452
453static inline void volk_32fc_x2_multiply_32fc_u_orc(lv_32fc_t* cVector,
454 const lv_32fc_t* aVector,
455 const lv_32fc_t* bVector,
456 unsigned int num_points)
457{
458 volk_32fc_x2_multiply_32fc_a_orc_impl(cVector, aVector, bVector, num_points);
459}
460
461#endif /* LV_HAVE_ORC */
462
463#endif /* INCLUDED_volk_32fc_x2_multiply_32fc_a_H */