Vector Optimized Library of Kernels 3.1.0
Architecture-tuned implementations of math kernels
volk_32f_s32f_add_32f.h
Go to the documentation of this file.
1/* -*- c++ -*- */
2/*
3 * Copyright 2020 Free Software Foundation, Inc.
4 *
5 * This file is part of VOLK
6 *
7 * SPDX-License-Identifier: LGPL-3.0-or-later
8 */
9
55#include <inttypes.h>
56#include <stdio.h>
57
58#ifndef INCLUDED_volk_32f_s32f_add_32f_u_H
59#define INCLUDED_volk_32f_s32f_add_32f_u_H
60
61#ifdef LV_HAVE_GENERIC
62
63static inline void volk_32f_s32f_add_32f_generic(float* cVector,
64 const float* aVector,
65 const float scalar,
66 unsigned int num_points)
67{
68 unsigned int number = 0;
69 const float* inputPtr = aVector;
70 float* outputPtr = cVector;
71 for (number = 0; number < num_points; number++) {
72 *outputPtr = (*inputPtr) + scalar;
73 inputPtr++;
74 outputPtr++;
75 }
76}
77
78#endif /* LV_HAVE_GENERIC */
79#ifdef LV_HAVE_SSE
80#include <xmmintrin.h>
81
82static inline void volk_32f_s32f_add_32f_u_sse(float* cVector,
83 const float* aVector,
84 const float scalar,
85 unsigned int num_points)
86{
87 unsigned int number = 0;
88 const unsigned int quarterPoints = num_points / 4;
89
90 float* cPtr = cVector;
91 const float* aPtr = aVector;
92
93 __m128 aVal, bVal, cVal;
94 bVal = _mm_set_ps1(scalar);
95 for (; number < quarterPoints; number++) {
96 aVal = _mm_loadu_ps(aPtr);
97
98 cVal = _mm_add_ps(aVal, bVal);
99
100 _mm_storeu_ps(cPtr, cVal); // Store the results back into the C container
101
102 aPtr += 4;
103 cPtr += 4;
104 }
105
106 number = quarterPoints * 4;
107 volk_32f_s32f_add_32f_generic(cPtr, aPtr, scalar, num_points - number);
108}
109#endif /* LV_HAVE_SSE */
110
111#ifdef LV_HAVE_AVX
112#include <immintrin.h>
113
114static inline void volk_32f_s32f_add_32f_u_avx(float* cVector,
115 const float* aVector,
116 const float scalar,
117 unsigned int num_points)
118{
119 unsigned int number = 0;
120 const unsigned int eighthPoints = num_points / 8;
121
122 float* cPtr = cVector;
123 const float* aPtr = aVector;
124
125 __m256 aVal, bVal, cVal;
126 bVal = _mm256_set1_ps(scalar);
127 for (; number < eighthPoints; number++) {
128
129 aVal = _mm256_loadu_ps(aPtr);
130
131 cVal = _mm256_add_ps(aVal, bVal);
132
133 _mm256_storeu_ps(cPtr, cVal); // Store the results back into the C container
134
135 aPtr += 8;
136 cPtr += 8;
137 }
138
139 number = eighthPoints * 8;
140 volk_32f_s32f_add_32f_generic(cPtr, aPtr, scalar, num_points - number);
141}
142#endif /* LV_HAVE_AVX */
143
144#ifdef LV_HAVE_NEON
145#include <arm_neon.h>
146
147static inline void volk_32f_s32f_add_32f_u_neon(float* cVector,
148 const float* aVector,
149 const float scalar,
150 unsigned int num_points)
151{
152 unsigned int number = 0;
153 const float* inputPtr = aVector;
154 float* outputPtr = cVector;
155 const unsigned int quarterPoints = num_points / 4;
156
157 float32x4_t aVal, cVal, scalarvec;
158
159 scalarvec = vdupq_n_f32(scalar);
160
161 for (number = 0; number < quarterPoints; number++) {
162 aVal = vld1q_f32(inputPtr); // Load into NEON regs
163 cVal = vaddq_f32(aVal, scalarvec); // Do the add
164 vst1q_f32(outputPtr, cVal); // Store results back to output
165 inputPtr += 4;
166 outputPtr += 4;
167 }
168
169 number = quarterPoints * 4;
170 volk_32f_s32f_add_32f_generic(outputPtr, inputPtr, scalar, num_points - number);
171}
172#endif /* LV_HAVE_NEON */
173
174
175#endif /* INCLUDED_volk_32f_s32f_add_32f_u_H */
176
177
178#ifndef INCLUDED_volk_32f_s32f_add_32f_a_H
179#define INCLUDED_volk_32f_s32f_add_32f_a_H
180
181#ifdef LV_HAVE_SSE
182#include <xmmintrin.h>
183
184static inline void volk_32f_s32f_add_32f_a_sse(float* cVector,
185 const float* aVector,
186 const float scalar,
187 unsigned int num_points)
188{
189 unsigned int number = 0;
190 const unsigned int quarterPoints = num_points / 4;
191
192 float* cPtr = cVector;
193 const float* aPtr = aVector;
194
195 __m128 aVal, bVal, cVal;
196 bVal = _mm_set_ps1(scalar);
197 for (; number < quarterPoints; number++) {
198 aVal = _mm_load_ps(aPtr);
199
200 cVal = _mm_add_ps(aVal, bVal);
201
202 _mm_store_ps(cPtr, cVal); // Store the results back into the C container
203
204 aPtr += 4;
205 cPtr += 4;
206 }
207
208 number = quarterPoints * 4;
209 volk_32f_s32f_add_32f_generic(cPtr, aPtr, scalar, num_points - number);
210}
211#endif /* LV_HAVE_SSE */
212
213#ifdef LV_HAVE_AVX
214#include <immintrin.h>
215
216static inline void volk_32f_s32f_add_32f_a_avx(float* cVector,
217 const float* aVector,
218 const float scalar,
219 unsigned int num_points)
220{
221 unsigned int number = 0;
222 const unsigned int eighthPoints = num_points / 8;
223
224 float* cPtr = cVector;
225 const float* aPtr = aVector;
226
227 __m256 aVal, bVal, cVal;
228 bVal = _mm256_set1_ps(scalar);
229 for (; number < eighthPoints; number++) {
230 aVal = _mm256_load_ps(aPtr);
231
232 cVal = _mm256_add_ps(aVal, bVal);
233
234 _mm256_store_ps(cPtr, cVal); // Store the results back into the C container
235
236 aPtr += 8;
237 cPtr += 8;
238 }
239
240 number = eighthPoints * 8;
241 volk_32f_s32f_add_32f_generic(cPtr, aPtr, scalar, num_points - number);
242}
243#endif /* LV_HAVE_AVX */
244
245#ifdef LV_HAVE_ORC
246
247extern void volk_32f_s32f_add_32f_a_orc_impl(float* dst,
248 const float* src,
249 const float scalar,
250 unsigned int num_points);
251
252static inline void volk_32f_s32f_add_32f_u_orc(float* cVector,
253 const float* aVector,
254 const float scalar,
255 unsigned int num_points)
256{
257 volk_32f_s32f_add_32f_a_orc_impl(cVector, aVector, scalar, num_points);
258}
259#endif /* LV_HAVE_ORC */
260
261#endif /* INCLUDED_volk_32f_s32f_add_32f_a_H */