Vector Optimized Library of Kernels  2.2
Architecture-tuned implementations of math kernels
volk_32fc_x2_dot_prod_32fc.h
Go to the documentation of this file.
1 /* -*- c++ -*- */
2 /*
3  * Copyright 2012, 2013, 2014 Free Software Foundation, Inc.
4  *
5  * This file is part of GNU Radio
6  *
7  * GNU Radio is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 3, or (at your option)
10  * any later version.
11  *
12  * GNU Radio is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with GNU Radio; see the file COPYING. If not, write to
19  * the Free Software Foundation, Inc., 51 Franklin Street,
20  * Boston, MA 02110-1301, USA.
21  */
22 
58 #ifndef INCLUDED_volk_32fc_x2_dot_prod_32fc_u_H
59 #define INCLUDED_volk_32fc_x2_dot_prod_32fc_u_H
60 
61 #include <volk/volk_common.h>
62 #include <volk/volk_complex.h>
63 #include <stdio.h>
64 #include <string.h>
65 
66 
67 #ifdef LV_HAVE_GENERIC
68 
69 
70 static inline void volk_32fc_x2_dot_prod_32fc_generic(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
71 
72  float * res = (float*) result;
73  float * in = (float*) input;
74  float * tp = (float*) taps;
75  unsigned int n_2_ccomplex_blocks = num_points/2;
76 
77  float sum0[2] = {0,0};
78  float sum1[2] = {0,0};
79  unsigned int i = 0;
80 
81  for(i = 0; i < n_2_ccomplex_blocks; ++i) {
82  sum0[0] += in[0] * tp[0] - in[1] * tp[1];
83  sum0[1] += in[0] * tp[1] + in[1] * tp[0];
84  sum1[0] += in[2] * tp[2] - in[3] * tp[3];
85  sum1[1] += in[2] * tp[3] + in[3] * tp[2];
86 
87  in += 4;
88  tp += 4;
89  }
90 
91  res[0] = sum0[0] + sum1[0];
92  res[1] = sum0[1] + sum1[1];
93 
94  // Cleanup if we had an odd number of points
95  if (num_points & 1) {
96  *result += input[num_points - 1] * taps[num_points - 1];
97  }
98 }
99 
100 #endif /*LV_HAVE_GENERIC*/
101 
102 
103 
104 #if LV_HAVE_SSE && LV_HAVE_64
105 
106 static inline void volk_32fc_x2_dot_prod_32fc_u_sse_64(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
107 
108  const unsigned int num_bytes = num_points*8;
109  unsigned int isodd = num_points & 1;
110 
111  __VOLK_ASM
112  (
113  "# ccomplex_dotprod_generic (float* result, const float *input,\n\t"
114  "# const float *taps, unsigned num_bytes)\n\t"
115  "# float sum0 = 0;\n\t"
116  "# float sum1 = 0;\n\t"
117  "# float sum2 = 0;\n\t"
118  "# float sum3 = 0;\n\t"
119  "# do {\n\t"
120  "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t"
121  "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t"
122  "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t"
123  "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t"
124  "# input += 4;\n\t"
125  "# taps += 4; \n\t"
126  "# } while (--n_2_ccomplex_blocks != 0);\n\t"
127  "# result[0] = sum0 + sum2;\n\t"
128  "# result[1] = sum1 + sum3;\n\t"
129  "# TODO: prefetch and better scheduling\n\t"
130  " xor %%r9, %%r9\n\t"
131  " xor %%r10, %%r10\n\t"
132  " movq %%rcx, %%rax\n\t"
133  " movq %%rcx, %%r8\n\t"
134  " movq %[rsi], %%r9\n\t"
135  " movq %[rdx], %%r10\n\t"
136  " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
137  " movups 0(%%r9), %%xmm0\n\t"
138  " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
139  " movups 0(%%r10), %%xmm2\n\t"
140  " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t"
141  " shr $4, %%r8\n\t"
142  " jmp .%=L1_test\n\t"
143  " # 4 taps / loop\n\t"
144  " # something like ?? cycles / loop\n\t"
145  ".%=Loop1: \n\t"
146  "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
147  "# movups (%%r9), %%xmmA\n\t"
148  "# movups (%%r10), %%xmmB\n\t"
149  "# movups %%xmmA, %%xmmZ\n\t"
150  "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
151  "# mulps %%xmmB, %%xmmA\n\t"
152  "# mulps %%xmmZ, %%xmmB\n\t"
153  "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
154  "# xorps %%xmmPN, %%xmmA\n\t"
155  "# movups %%xmmA, %%xmmZ\n\t"
156  "# unpcklps %%xmmB, %%xmmA\n\t"
157  "# unpckhps %%xmmB, %%xmmZ\n\t"
158  "# movups %%xmmZ, %%xmmY\n\t"
159  "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
160  "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
161  "# addps %%xmmZ, %%xmmA\n\t"
162  "# addps %%xmmA, %%xmmC\n\t"
163  "# A=xmm0, B=xmm2, Z=xmm4\n\t"
164  "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
165  " movups 16(%%r9), %%xmm1\n\t"
166  " movups %%xmm0, %%xmm4\n\t"
167  " mulps %%xmm2, %%xmm0\n\t"
168  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
169  " movups 16(%%r10), %%xmm3\n\t"
170  " movups %%xmm1, %%xmm5\n\t"
171  " addps %%xmm0, %%xmm6\n\t"
172  " mulps %%xmm3, %%xmm1\n\t"
173  " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
174  " addps %%xmm1, %%xmm6\n\t"
175  " mulps %%xmm4, %%xmm2\n\t"
176  " movups 32(%%r9), %%xmm0\n\t"
177  " addps %%xmm2, %%xmm7\n\t"
178  " mulps %%xmm5, %%xmm3\n\t"
179  " add $32, %%r9\n\t"
180  " movups 32(%%r10), %%xmm2\n\t"
181  " addps %%xmm3, %%xmm7\n\t"
182  " add $32, %%r10\n\t"
183  ".%=L1_test:\n\t"
184  " dec %%rax\n\t"
185  " jge .%=Loop1\n\t"
186  " # We've handled the bulk of multiplies up to here.\n\t"
187  " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
188  " # If so, we've got 2 more taps to do.\n\t"
189  " and $1, %%r8\n\t"
190  " je .%=Leven\n\t"
191  " # The count was odd, do 2 more taps.\n\t"
192  " # Note that we've already got mm0/mm2 preloaded\n\t"
193  " # from the main loop.\n\t"
194  " movups %%xmm0, %%xmm4\n\t"
195  " mulps %%xmm2, %%xmm0\n\t"
196  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
197  " addps %%xmm0, %%xmm6\n\t"
198  " mulps %%xmm4, %%xmm2\n\t"
199  " addps %%xmm2, %%xmm7\n\t"
200  ".%=Leven:\n\t"
201  " # neg inversor\n\t"
202  " xorps %%xmm1, %%xmm1\n\t"
203  " mov $0x80000000, %%r9\n\t"
204  " movd %%r9, %%xmm1\n\t"
205  " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
206  " # pfpnacc\n\t"
207  " xorps %%xmm1, %%xmm6\n\t"
208  " movups %%xmm6, %%xmm2\n\t"
209  " unpcklps %%xmm7, %%xmm6\n\t"
210  " unpckhps %%xmm7, %%xmm2\n\t"
211  " movups %%xmm2, %%xmm3\n\t"
212  " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
213  " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
214  " addps %%xmm2, %%xmm6\n\t"
215  " # xmm6 = r1 i2 r3 i4\n\t"
216  " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
217  " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
218  " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) to memory\n\t"
219  :
220  :[rsi] "r" (input), [rdx] "r" (taps), "c" (num_bytes), [rdi] "r" (result)
221  :"rax", "r8", "r9", "r10"
222  );
223 
224 
225  if(isodd) {
226  *result += input[num_points - 1] * taps[num_points - 1];
227  }
228 
229  return;
230 
231 }
232 
233 #endif /* LV_HAVE_SSE && LV_HAVE_64 */
234 
235 
236 
237 
238 #ifdef LV_HAVE_SSE3
239 
240 #include <pmmintrin.h>
241 
242 static inline void volk_32fc_x2_dot_prod_32fc_u_sse3(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
243 
244  lv_32fc_t dotProduct;
245  memset(&dotProduct, 0x0, 2*sizeof(float));
246 
247  unsigned int number = 0;
248  const unsigned int halfPoints = num_points/2;
249  unsigned int isodd = num_points & 1;
250 
251  __m128 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
252 
253  const lv_32fc_t* a = input;
254  const lv_32fc_t* b = taps;
255 
256  dotProdVal = _mm_setzero_ps();
257 
258  for(;number < halfPoints; number++){
259 
260  x = _mm_loadu_ps((float*)a); // Load the ar + ai, br + bi as ar,ai,br,bi
261  y = _mm_loadu_ps((float*)b); // Load the cr + ci, dr + di as cr,ci,dr,di
262 
263  yl = _mm_moveldup_ps(y); // Load yl with cr,cr,dr,dr
264  yh = _mm_movehdup_ps(y); // Load yh with ci,ci,di,di
265 
266  tmp1 = _mm_mul_ps(x,yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr
267 
268  x = _mm_shuffle_ps(x,x,0xB1); // Re-arrange x to be ai,ar,bi,br
269 
270  tmp2 = _mm_mul_ps(x,yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
271 
272  z = _mm_addsub_ps(tmp1,tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
273 
274  dotProdVal = _mm_add_ps(dotProdVal, z); // Add the complex multiplication results together
275 
276  a += 2;
277  b += 2;
278  }
279 
280  __VOLK_ATTR_ALIGNED(16) lv_32fc_t dotProductVector[2];
281 
282  _mm_storeu_ps((float*)dotProductVector,dotProdVal); // Store the results back into the dot product vector
283 
284  dotProduct += ( dotProductVector[0] + dotProductVector[1] );
285 
286  if(isodd) {
287  dotProduct += input[num_points - 1] * taps[num_points - 1];
288  }
289 
290  *result = dotProduct;
291 }
292 
293 #endif /*LV_HAVE_SSE3*/
294 
295 #ifdef LV_HAVE_SSE4_1
296 
297 #include <smmintrin.h>
298 
299 static inline void volk_32fc_x2_dot_prod_32fc_u_sse4_1(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
300 
301  unsigned int i = 0;
302  const unsigned int qtr_points = num_points/4;
303  const unsigned int isodd = num_points & 3;
304 
305  __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, real0, real1, im0, im1;
306  float *p_input, *p_taps;
307  __m64 *p_result;
308 
309  p_result = (__m64*)result;
310  p_input = (float*)input;
311  p_taps = (float*)taps;
312 
313  static const __m128i neg = {0x000000000000000080000000};
314 
315  real0 = _mm_setzero_ps();
316  real1 = _mm_setzero_ps();
317  im0 = _mm_setzero_ps();
318  im1 = _mm_setzero_ps();
319 
320  for(; i < qtr_points; ++i) {
321  xmm0 = _mm_loadu_ps(p_input);
322  xmm1 = _mm_loadu_ps(p_taps);
323 
324  p_input += 4;
325  p_taps += 4;
326 
327  xmm2 = _mm_loadu_ps(p_input);
328  xmm3 = _mm_loadu_ps(p_taps);
329 
330  p_input += 4;
331  p_taps += 4;
332 
333  xmm4 = _mm_unpackhi_ps(xmm0, xmm2);
334  xmm5 = _mm_unpackhi_ps(xmm1, xmm3);
335  xmm0 = _mm_unpacklo_ps(xmm0, xmm2);
336  xmm2 = _mm_unpacklo_ps(xmm1, xmm3);
337 
338  //imaginary vector from input
339  xmm1 = _mm_unpackhi_ps(xmm0, xmm4);
340  //real vector from input
341  xmm3 = _mm_unpacklo_ps(xmm0, xmm4);
342  //imaginary vector from taps
343  xmm0 = _mm_unpackhi_ps(xmm2, xmm5);
344  //real vector from taps
345  xmm2 = _mm_unpacklo_ps(xmm2, xmm5);
346 
347  xmm4 = _mm_dp_ps(xmm3, xmm2, 0xf1);
348  xmm5 = _mm_dp_ps(xmm1, xmm0, 0xf1);
349 
350  xmm6 = _mm_dp_ps(xmm3, xmm0, 0xf2);
351  xmm7 = _mm_dp_ps(xmm1, xmm2, 0xf2);
352 
353  real0 = _mm_add_ps(xmm4, real0);
354  real1 = _mm_add_ps(xmm5, real1);
355  im0 = _mm_add_ps(xmm6, im0);
356  im1 = _mm_add_ps(xmm7, im1);
357  }
358 
359  real1 = _mm_xor_ps(real1, bit128_p(&neg)->float_vec);
360 
361  im0 = _mm_add_ps(im0, im1);
362  real0 = _mm_add_ps(real0, real1);
363 
364  im0 = _mm_add_ps(im0, real0);
365 
366  _mm_storel_pi(p_result, im0);
367 
368  for(i = num_points-isodd; i < num_points; i++) {
369  *result += input[i] * taps[i];
370  }
371 }
372 
373 #endif /*LV_HAVE_SSE4_1*/
374 
375 #ifdef LV_HAVE_AVX
376 
377 #include <immintrin.h>
378 
379 static inline void volk_32fc_x2_dot_prod_32fc_u_avx(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
380 
381  unsigned int isodd = num_points & 3;
382  unsigned int i = 0;
383  lv_32fc_t dotProduct;
384  memset(&dotProduct, 0x0, 2*sizeof(float));
385 
386  unsigned int number = 0;
387  const unsigned int quarterPoints = num_points / 4;
388 
389  __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
390 
391  const lv_32fc_t* a = input;
392  const lv_32fc_t* b = taps;
393 
394  dotProdVal = _mm256_setzero_ps();
395 
396  for(;number < quarterPoints; number++){
397  x = _mm256_loadu_ps((float*)a); // Load a,b,e,f as ar,ai,br,bi,er,ei,fr,fi
398  y = _mm256_loadu_ps((float*)b); // Load c,d,g,h as cr,ci,dr,di,gr,gi,hr,hi
399 
400  yl = _mm256_moveldup_ps(y); // Load yl with cr,cr,dr,dr,gr,gr,hr,hr
401  yh = _mm256_movehdup_ps(y); // Load yh with ci,ci,di,di,gi,gi,hi,hi
402 
403  tmp1 = _mm256_mul_ps(x,yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr ...
404 
405  x = _mm256_shuffle_ps(x,x,0xB1); // Re-arrange x to be ai,ar,bi,br,ei,er,fi,fr
406 
407  tmp2 = _mm256_mul_ps(x,yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di ...
408 
409  z = _mm256_addsub_ps(tmp1,tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
410 
411  dotProdVal = _mm256_add_ps(dotProdVal, z); // Add the complex multiplication results together
412 
413  a += 4;
414  b += 4;
415  }
416 
417  __VOLK_ATTR_ALIGNED(32) lv_32fc_t dotProductVector[4];
418 
419  _mm256_storeu_ps((float*)dotProductVector,dotProdVal); // Store the results back into the dot product vector
420 
421  dotProduct += ( dotProductVector[0] + dotProductVector[1] + dotProductVector[2] + dotProductVector[3]);
422 
423  for(i = num_points-isodd; i < num_points; i++) {
424  dotProduct += input[i] * taps[i];
425  }
426 
427  *result = dotProduct;
428 }
429 
430 #endif /*LV_HAVE_AVX*/
431 
432 #if LV_HAVE_AVX && LV_HAVE_FMA
433 #include <immintrin.h>
434 
435 static inline void volk_32fc_x2_dot_prod_32fc_u_avx_fma(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
436 
437  unsigned int isodd = num_points & 3;
438  unsigned int i = 0;
439  lv_32fc_t dotProduct;
440  memset(&dotProduct, 0x0, 2*sizeof(float));
441 
442  unsigned int number = 0;
443  const unsigned int quarterPoints = num_points / 4;
444 
445  __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
446 
447  const lv_32fc_t* a = input;
448  const lv_32fc_t* b = taps;
449 
450  dotProdVal = _mm256_setzero_ps();
451 
452  for(;number < quarterPoints; number++){
453 
454  x = _mm256_loadu_ps((float*)a); // Load a,b,e,f as ar,ai,br,bi,er,ei,fr,fi
455  y = _mm256_loadu_ps((float*)b); // Load c,d,g,h as cr,ci,dr,di,gr,gi,hr,hi
456 
457  yl = _mm256_moveldup_ps(y); // Load yl with cr,cr,dr,dr,gr,gr,hr,hr
458  yh = _mm256_movehdup_ps(y); // Load yh with ci,ci,di,di,gi,gi,hi,hi
459 
460  tmp1 = x;
461 
462  x = _mm256_shuffle_ps(x,x,0xB1); // Re-arrange x to be ai,ar,bi,br,ei,er,fi,fr
463 
464  tmp2 = _mm256_mul_ps(x,yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di ...
465 
466  z = _mm256_fmaddsub_ps(tmp1, yl,tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
467 
468  dotProdVal = _mm256_add_ps(dotProdVal, z); // Add the complex multiplication results together
469 
470  a += 4;
471  b += 4;
472  }
473 
474  __VOLK_ATTR_ALIGNED(32) lv_32fc_t dotProductVector[4];
475 
476  _mm256_storeu_ps((float*)dotProductVector,dotProdVal); // Store the results back into the dot product vector
477 
478  dotProduct += ( dotProductVector[0] + dotProductVector[1] + dotProductVector[2] + dotProductVector[3]);
479 
480  for(i = num_points-isodd; i < num_points; i++) {
481  dotProduct += input[i] * taps[i];
482  }
483 
484  *result = dotProduct;
485 }
486 
487 #endif /*LV_HAVE_AVX && LV_HAVE_FMA*/
488 
489 #endif /*INCLUDED_volk_32fc_x2_dot_prod_32fc_u_H*/
490 
491 #ifndef INCLUDED_volk_32fc_x2_dot_prod_32fc_a_H
492 #define INCLUDED_volk_32fc_x2_dot_prod_32fc_a_H
493 
494 #include <volk/volk_common.h>
495 #include <volk/volk_complex.h>
496 #include <stdio.h>
497 #include <string.h>
498 
499 
500 #ifdef LV_HAVE_GENERIC
501 
502 
503 static inline void volk_32fc_x2_dot_prod_32fc_a_generic(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
504 
505  const unsigned int num_bytes = num_points*8;
506 
507  float * res = (float*) result;
508  float * in = (float*) input;
509  float * tp = (float*) taps;
510  unsigned int n_2_ccomplex_blocks = num_bytes >> 4;
511 
512  float sum0[2] = {0,0};
513  float sum1[2] = {0,0};
514  unsigned int i = 0;
515 
516  for(i = 0; i < n_2_ccomplex_blocks; ++i) {
517  sum0[0] += in[0] * tp[0] - in[1] * tp[1];
518  sum0[1] += in[0] * tp[1] + in[1] * tp[0];
519  sum1[0] += in[2] * tp[2] - in[3] * tp[3];
520  sum1[1] += in[2] * tp[3] + in[3] * tp[2];
521 
522  in += 4;
523  tp += 4;
524  }
525 
526  res[0] = sum0[0] + sum1[0];
527  res[1] = sum0[1] + sum1[1];
528 
529  if (num_points & 1) {
530  *result += input[num_points - 1] * taps[num_points - 1];
531  }
532 }
533 
534 #endif /*LV_HAVE_GENERIC*/
535 
536 
537 #if LV_HAVE_SSE && LV_HAVE_64
538 
539 
540 static inline void volk_32fc_x2_dot_prod_32fc_a_sse_64(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
541 
542  const unsigned int num_bytes = num_points*8;
543  unsigned int isodd = num_points & 1;
544 
545  __VOLK_ASM
546  (
547  "# ccomplex_dotprod_generic (float* result, const float *input,\n\t"
548  "# const float *taps, unsigned num_bytes)\n\t"
549  "# float sum0 = 0;\n\t"
550  "# float sum1 = 0;\n\t"
551  "# float sum2 = 0;\n\t"
552  "# float sum3 = 0;\n\t"
553  "# do {\n\t"
554  "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t"
555  "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t"
556  "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t"
557  "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t"
558  "# input += 4;\n\t"
559  "# taps += 4; \n\t"
560  "# } while (--n_2_ccomplex_blocks != 0);\n\t"
561  "# result[0] = sum0 + sum2;\n\t"
562  "# result[1] = sum1 + sum3;\n\t"
563  "# TODO: prefetch and better scheduling\n\t"
564  " xor %%r9, %%r9\n\t"
565  " xor %%r10, %%r10\n\t"
566  " movq %%rcx, %%rax\n\t"
567  " movq %%rcx, %%r8\n\t"
568  " movq %[rsi], %%r9\n\t"
569  " movq %[rdx], %%r10\n\t"
570  " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
571  " movaps 0(%%r9), %%xmm0\n\t"
572  " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
573  " movaps 0(%%r10), %%xmm2\n\t"
574  " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t"
575  " shr $4, %%r8\n\t"
576  " jmp .%=L1_test\n\t"
577  " # 4 taps / loop\n\t"
578  " # something like ?? cycles / loop\n\t"
579  ".%=Loop1: \n\t"
580  "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
581  "# movaps (%%r9), %%xmmA\n\t"
582  "# movaps (%%r10), %%xmmB\n\t"
583  "# movaps %%xmmA, %%xmmZ\n\t"
584  "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
585  "# mulps %%xmmB, %%xmmA\n\t"
586  "# mulps %%xmmZ, %%xmmB\n\t"
587  "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
588  "# xorps %%xmmPN, %%xmmA\n\t"
589  "# movaps %%xmmA, %%xmmZ\n\t"
590  "# unpcklps %%xmmB, %%xmmA\n\t"
591  "# unpckhps %%xmmB, %%xmmZ\n\t"
592  "# movaps %%xmmZ, %%xmmY\n\t"
593  "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
594  "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
595  "# addps %%xmmZ, %%xmmA\n\t"
596  "# addps %%xmmA, %%xmmC\n\t"
597  "# A=xmm0, B=xmm2, Z=xmm4\n\t"
598  "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
599  " movaps 16(%%r9), %%xmm1\n\t"
600  " movaps %%xmm0, %%xmm4\n\t"
601  " mulps %%xmm2, %%xmm0\n\t"
602  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
603  " movaps 16(%%r10), %%xmm3\n\t"
604  " movaps %%xmm1, %%xmm5\n\t"
605  " addps %%xmm0, %%xmm6\n\t"
606  " mulps %%xmm3, %%xmm1\n\t"
607  " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
608  " addps %%xmm1, %%xmm6\n\t"
609  " mulps %%xmm4, %%xmm2\n\t"
610  " movaps 32(%%r9), %%xmm0\n\t"
611  " addps %%xmm2, %%xmm7\n\t"
612  " mulps %%xmm5, %%xmm3\n\t"
613  " add $32, %%r9\n\t"
614  " movaps 32(%%r10), %%xmm2\n\t"
615  " addps %%xmm3, %%xmm7\n\t"
616  " add $32, %%r10\n\t"
617  ".%=L1_test:\n\t"
618  " dec %%rax\n\t"
619  " jge .%=Loop1\n\t"
620  " # We've handled the bulk of multiplies up to here.\n\t"
621  " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
622  " # If so, we've got 2 more taps to do.\n\t"
623  " and $1, %%r8\n\t"
624  " je .%=Leven\n\t"
625  " # The count was odd, do 2 more taps.\n\t"
626  " # Note that we've already got mm0/mm2 preloaded\n\t"
627  " # from the main loop.\n\t"
628  " movaps %%xmm0, %%xmm4\n\t"
629  " mulps %%xmm2, %%xmm0\n\t"
630  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
631  " addps %%xmm0, %%xmm6\n\t"
632  " mulps %%xmm4, %%xmm2\n\t"
633  " addps %%xmm2, %%xmm7\n\t"
634  ".%=Leven:\n\t"
635  " # neg inversor\n\t"
636  " xorps %%xmm1, %%xmm1\n\t"
637  " mov $0x80000000, %%r9\n\t"
638  " movd %%r9, %%xmm1\n\t"
639  " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
640  " # pfpnacc\n\t"
641  " xorps %%xmm1, %%xmm6\n\t"
642  " movaps %%xmm6, %%xmm2\n\t"
643  " unpcklps %%xmm7, %%xmm6\n\t"
644  " unpckhps %%xmm7, %%xmm2\n\t"
645  " movaps %%xmm2, %%xmm3\n\t"
646  " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
647  " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
648  " addps %%xmm2, %%xmm6\n\t"
649  " # xmm6 = r1 i2 r3 i4\n\t"
650  " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
651  " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
652  " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) to memory\n\t"
653  :
654  :[rsi] "r" (input), [rdx] "r" (taps), "c" (num_bytes), [rdi] "r" (result)
655  :"rax", "r8", "r9", "r10"
656  );
657 
658 
659  if(isodd) {
660  *result += input[num_points - 1] * taps[num_points - 1];
661  }
662 
663  return;
664 
665 }
666 
667 #endif
668 
669 #if LV_HAVE_SSE && LV_HAVE_32
670 
671 static inline void volk_32fc_x2_dot_prod_32fc_a_sse_32(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
672 
673  volk_32fc_x2_dot_prod_32fc_a_generic(result, input, taps, num_points);
674 
675 #if 0
676  const unsigned int num_bytes = num_points*8;
677  unsigned int isodd = num_points & 1;
678 
680  (
681  " #pushl %%ebp\n\t"
682  " #movl %%esp, %%ebp\n\t"
683  " movl 12(%%ebp), %%eax # input\n\t"
684  " movl 16(%%ebp), %%edx # taps\n\t"
685  " movl 20(%%ebp), %%ecx # n_bytes\n\t"
686  " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
687  " movaps 0(%%eax), %%xmm0\n\t"
688  " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
689  " movaps 0(%%edx), %%xmm2\n\t"
690  " shrl $5, %%ecx # ecx = n_2_ccomplex_blocks / 2\n\t"
691  " jmp .%=L1_test\n\t"
692  " # 4 taps / loop\n\t"
693  " # something like ?? cycles / loop\n\t"
694  ".%=Loop1: \n\t"
695  "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
696  "# movaps (%%eax), %%xmmA\n\t"
697  "# movaps (%%edx), %%xmmB\n\t"
698  "# movaps %%xmmA, %%xmmZ\n\t"
699  "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
700  "# mulps %%xmmB, %%xmmA\n\t"
701  "# mulps %%xmmZ, %%xmmB\n\t"
702  "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
703  "# xorps %%xmmPN, %%xmmA\n\t"
704  "# movaps %%xmmA, %%xmmZ\n\t"
705  "# unpcklps %%xmmB, %%xmmA\n\t"
706  "# unpckhps %%xmmB, %%xmmZ\n\t"
707  "# movaps %%xmmZ, %%xmmY\n\t"
708  "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
709  "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
710  "# addps %%xmmZ, %%xmmA\n\t"
711  "# addps %%xmmA, %%xmmC\n\t"
712  "# A=xmm0, B=xmm2, Z=xmm4\n\t"
713  "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
714  " movaps 16(%%eax), %%xmm1\n\t"
715  " movaps %%xmm0, %%xmm4\n\t"
716  " mulps %%xmm2, %%xmm0\n\t"
717  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
718  " movaps 16(%%edx), %%xmm3\n\t"
719  " movaps %%xmm1, %%xmm5\n\t"
720  " addps %%xmm0, %%xmm6\n\t"
721  " mulps %%xmm3, %%xmm1\n\t"
722  " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
723  " addps %%xmm1, %%xmm6\n\t"
724  " mulps %%xmm4, %%xmm2\n\t"
725  " movaps 32(%%eax), %%xmm0\n\t"
726  " addps %%xmm2, %%xmm7\n\t"
727  " mulps %%xmm5, %%xmm3\n\t"
728  " addl $32, %%eax\n\t"
729  " movaps 32(%%edx), %%xmm2\n\t"
730  " addps %%xmm3, %%xmm7\n\t"
731  " addl $32, %%edx\n\t"
732  ".%=L1_test:\n\t"
733  " decl %%ecx\n\t"
734  " jge .%=Loop1\n\t"
735  " # We've handled the bulk of multiplies up to here.\n\t"
736  " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
737  " # If so, we've got 2 more taps to do.\n\t"
738  " movl 20(%%ebp), %%ecx # n_2_ccomplex_blocks\n\t"
739  " shrl $4, %%ecx\n\t"
740  " andl $1, %%ecx\n\t"
741  " je .%=Leven\n\t"
742  " # The count was odd, do 2 more taps.\n\t"
743  " # Note that we've already got mm0/mm2 preloaded\n\t"
744  " # from the main loop.\n\t"
745  " movaps %%xmm0, %%xmm4\n\t"
746  " mulps %%xmm2, %%xmm0\n\t"
747  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
748  " addps %%xmm0, %%xmm6\n\t"
749  " mulps %%xmm4, %%xmm2\n\t"
750  " addps %%xmm2, %%xmm7\n\t"
751  ".%=Leven:\n\t"
752  " # neg inversor\n\t"
753  " movl 8(%%ebp), %%eax \n\t"
754  " xorps %%xmm1, %%xmm1\n\t"
755  " movl $0x80000000, (%%eax)\n\t"
756  " movss (%%eax), %%xmm1\n\t"
757  " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
758  " # pfpnacc\n\t"
759  " xorps %%xmm1, %%xmm6\n\t"
760  " movaps %%xmm6, %%xmm2\n\t"
761  " unpcklps %%xmm7, %%xmm6\n\t"
762  " unpckhps %%xmm7, %%xmm2\n\t"
763  " movaps %%xmm2, %%xmm3\n\t"
764  " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
765  " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
766  " addps %%xmm2, %%xmm6\n\t"
767  " # xmm6 = r1 i2 r3 i4\n\t"
768  " #movl 8(%%ebp), %%eax # @result\n\t"
769  " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
770  " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
771  " movlps %%xmm6, (%%eax) # store low 2x32 bits (complex) to memory\n\t"
772  " #popl %%ebp\n\t"
773  :
774  :
775  : "eax", "ecx", "edx"
776  );
777 
778 
779  int getem = num_bytes % 16;
780 
781  if(isodd) {
782  *result += (input[num_points - 1] * taps[num_points - 1]);
783  }
784 
785  return;
786 #endif
787 }
788 
789 #endif /*LV_HAVE_SSE*/
790 
791 #ifdef LV_HAVE_SSE3
792 
793 #include <pmmintrin.h>
794 
795 static inline void volk_32fc_x2_dot_prod_32fc_a_sse3(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
796 
797  const unsigned int num_bytes = num_points*8;
798  unsigned int isodd = num_points & 1;
799 
800  lv_32fc_t dotProduct;
801  memset(&dotProduct, 0x0, 2*sizeof(float));
802 
803  unsigned int number = 0;
804  const unsigned int halfPoints = num_bytes >> 4;
805 
806  __m128 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
807 
808  const lv_32fc_t* a = input;
809  const lv_32fc_t* b = taps;
810 
811  dotProdVal = _mm_setzero_ps();
812 
813  for(;number < halfPoints; number++){
814 
815  x = _mm_load_ps((float*)a); // Load the ar + ai, br + bi as ar,ai,br,bi
816  y = _mm_load_ps((float*)b); // Load the cr + ci, dr + di as cr,ci,dr,di
817 
818  yl = _mm_moveldup_ps(y); // Load yl with cr,cr,dr,dr
819  yh = _mm_movehdup_ps(y); // Load yh with ci,ci,di,di
820 
821  tmp1 = _mm_mul_ps(x,yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr
822 
823  x = _mm_shuffle_ps(x,x,0xB1); // Re-arrange x to be ai,ar,bi,br
824 
825  tmp2 = _mm_mul_ps(x,yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
826 
827  z = _mm_addsub_ps(tmp1,tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
828 
829  dotProdVal = _mm_add_ps(dotProdVal, z); // Add the complex multiplication results together
830 
831  a += 2;
832  b += 2;
833  }
834 
835  __VOLK_ATTR_ALIGNED(16) lv_32fc_t dotProductVector[2];
836 
837  _mm_store_ps((float*)dotProductVector,dotProdVal); // Store the results back into the dot product vector
838 
839  dotProduct += ( dotProductVector[0] + dotProductVector[1] );
840 
841  if(isodd) {
842  dotProduct += input[num_points - 1] * taps[num_points - 1];
843  }
844 
845  *result = dotProduct;
846 }
847 
848 #endif /*LV_HAVE_SSE3*/
849 
850 
851 #ifdef LV_HAVE_SSE4_1
852 
853 #include <smmintrin.h>
854 
855 static inline void volk_32fc_x2_dot_prod_32fc_a_sse4_1(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
856 
857  unsigned int i = 0;
858  const unsigned int qtr_points = num_points/4;
859  const unsigned int isodd = num_points & 3;
860 
861  __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, real0, real1, im0, im1;
862  float *p_input, *p_taps;
863  __m64 *p_result;
864 
865  static const __m128i neg = {0x000000000000000080000000};
866 
867  p_result = (__m64*)result;
868  p_input = (float*)input;
869  p_taps = (float*)taps;
870 
871  real0 = _mm_setzero_ps();
872  real1 = _mm_setzero_ps();
873  im0 = _mm_setzero_ps();
874  im1 = _mm_setzero_ps();
875 
876  for(; i < qtr_points; ++i) {
877  xmm0 = _mm_load_ps(p_input);
878  xmm1 = _mm_load_ps(p_taps);
879 
880  p_input += 4;
881  p_taps += 4;
882 
883  xmm2 = _mm_load_ps(p_input);
884  xmm3 = _mm_load_ps(p_taps);
885 
886  p_input += 4;
887  p_taps += 4;
888 
889  xmm4 = _mm_unpackhi_ps(xmm0, xmm2);
890  xmm5 = _mm_unpackhi_ps(xmm1, xmm3);
891  xmm0 = _mm_unpacklo_ps(xmm0, xmm2);
892  xmm2 = _mm_unpacklo_ps(xmm1, xmm3);
893 
894  //imaginary vector from input
895  xmm1 = _mm_unpackhi_ps(xmm0, xmm4);
896  //real vector from input
897  xmm3 = _mm_unpacklo_ps(xmm0, xmm4);
898  //imaginary vector from taps
899  xmm0 = _mm_unpackhi_ps(xmm2, xmm5);
900  //real vector from taps
901  xmm2 = _mm_unpacklo_ps(xmm2, xmm5);
902 
903  xmm4 = _mm_dp_ps(xmm3, xmm2, 0xf1);
904  xmm5 = _mm_dp_ps(xmm1, xmm0, 0xf1);
905 
906  xmm6 = _mm_dp_ps(xmm3, xmm0, 0xf2);
907  xmm7 = _mm_dp_ps(xmm1, xmm2, 0xf2);
908 
909  real0 = _mm_add_ps(xmm4, real0);
910  real1 = _mm_add_ps(xmm5, real1);
911  im0 = _mm_add_ps(xmm6, im0);
912  im1 = _mm_add_ps(xmm7, im1);
913  }
914 
915  real1 = _mm_xor_ps(real1, bit128_p(&neg)->float_vec);
916 
917  im0 = _mm_add_ps(im0, im1);
918  real0 = _mm_add_ps(real0, real1);
919 
920  im0 = _mm_add_ps(im0, real0);
921 
922  _mm_storel_pi(p_result, im0);
923 
924  for(i = num_points-isodd; i < num_points; i++) {
925  *result += input[i] * taps[i];
926  }
927 }
928 
929 #endif /*LV_HAVE_SSE4_1*/
930 
931 #ifdef LV_HAVE_NEON
932 #include <arm_neon.h>
933 
934 static inline void volk_32fc_x2_dot_prod_32fc_neon(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
935 
936  unsigned int quarter_points = num_points / 4;
937  unsigned int number;
938 
939  lv_32fc_t* a_ptr = (lv_32fc_t*) taps;
940  lv_32fc_t* b_ptr = (lv_32fc_t*) input;
941  // for 2-lane vectors, 1st lane holds the real part,
942  // 2nd lane holds the imaginary part
943  float32x4x2_t a_val, b_val, c_val, accumulator;
944  float32x4x2_t tmp_real, tmp_imag;
945  accumulator.val[0] = vdupq_n_f32(0);
946  accumulator.val[1] = vdupq_n_f32(0);
947 
948  for(number = 0; number < quarter_points; ++number) {
949  a_val = vld2q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
950  b_val = vld2q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
951  __VOLK_PREFETCH(a_ptr+8);
952  __VOLK_PREFETCH(b_ptr+8);
953 
954  // multiply the real*real and imag*imag to get real result
955  // a0r*b0r|a1r*b1r|a2r*b2r|a3r*b3r
956  tmp_real.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
957  // a0i*b0i|a1i*b1i|a2i*b2i|a3i*b3i
958  tmp_real.val[1] = vmulq_f32(a_val.val[1], b_val.val[1]);
959 
960  // Multiply cross terms to get the imaginary result
961  // a0r*b0i|a1r*b1i|a2r*b2i|a3r*b3i
962  tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[1]);
963  // a0i*b0r|a1i*b1r|a2i*b2r|a3i*b3r
964  tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
965 
966  c_val.val[0] = vsubq_f32(tmp_real.val[0], tmp_real.val[1]);
967  c_val.val[1] = vaddq_f32(tmp_imag.val[0], tmp_imag.val[1]);
968 
969  accumulator.val[0] = vaddq_f32(accumulator.val[0], c_val.val[0]);
970  accumulator.val[1] = vaddq_f32(accumulator.val[1], c_val.val[1]);
971 
972  a_ptr += 4;
973  b_ptr += 4;
974  }
975  lv_32fc_t accum_result[4];
976  vst2q_f32((float*)accum_result, accumulator);
977  *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
978 
979  // tail case
980  for(number = quarter_points*4; number < num_points; ++number) {
981  *result += (*a_ptr++) * (*b_ptr++);
982  }
983 
984 }
985 #endif /*LV_HAVE_NEON*/
986 
987 #ifdef LV_HAVE_NEON
988 #include <arm_neon.h>
989 static inline void volk_32fc_x2_dot_prod_32fc_neon_opttests(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
990 
991  unsigned int quarter_points = num_points / 4;
992  unsigned int number;
993 
994  lv_32fc_t* a_ptr = (lv_32fc_t*) taps;
995  lv_32fc_t* b_ptr = (lv_32fc_t*) input;
996  // for 2-lane vectors, 1st lane holds the real part,
997  // 2nd lane holds the imaginary part
998  float32x4x2_t a_val, b_val, accumulator;
999  float32x4x2_t tmp_imag;
1000  accumulator.val[0] = vdupq_n_f32(0);
1001  accumulator.val[1] = vdupq_n_f32(0);
1002 
1003  for(number = 0; number < quarter_points; ++number) {
1004  a_val = vld2q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
1005  b_val = vld2q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
1006  __VOLK_PREFETCH(a_ptr+8);
1007  __VOLK_PREFETCH(b_ptr+8);
1008 
1009  // do the first multiply
1010  tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
1011  tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
1012 
1013  // use multiply accumulate/subtract to get result
1014  tmp_imag.val[1] = vmlaq_f32(tmp_imag.val[1], a_val.val[0], b_val.val[1]);
1015  tmp_imag.val[0] = vmlsq_f32(tmp_imag.val[0], a_val.val[1], b_val.val[1]);
1016 
1017  accumulator.val[0] = vaddq_f32(accumulator.val[0], tmp_imag.val[0]);
1018  accumulator.val[1] = vaddq_f32(accumulator.val[1], tmp_imag.val[1]);
1019 
1020  // increment pointers
1021  a_ptr += 4;
1022  b_ptr += 4;
1023  }
1024  lv_32fc_t accum_result[4];
1025  vst2q_f32((float*)accum_result, accumulator);
1026  *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
1027 
1028  // tail case
1029  for(number = quarter_points*4; number < num_points; ++number) {
1030  *result += (*a_ptr++) * (*b_ptr++);
1031  }
1032 
1033 }
1034 #endif /*LV_HAVE_NEON*/
1035 
1036 #ifdef LV_HAVE_NEON
1037 static inline void volk_32fc_x2_dot_prod_32fc_neon_optfma(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
1038 
1039  unsigned int quarter_points = num_points / 4;
1040  unsigned int number;
1041 
1042  lv_32fc_t* a_ptr = (lv_32fc_t*) taps;
1043  lv_32fc_t* b_ptr = (lv_32fc_t*) input;
1044  // for 2-lane vectors, 1st lane holds the real part,
1045  // 2nd lane holds the imaginary part
1046  float32x4x2_t a_val, b_val, accumulator1, accumulator2;
1047  accumulator1.val[0] = vdupq_n_f32(0);
1048  accumulator1.val[1] = vdupq_n_f32(0);
1049  accumulator2.val[0] = vdupq_n_f32(0);
1050  accumulator2.val[1] = vdupq_n_f32(0);
1051 
1052  for(number = 0; number < quarter_points; ++number) {
1053  a_val = vld2q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
1054  b_val = vld2q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
1055  __VOLK_PREFETCH(a_ptr+8);
1056  __VOLK_PREFETCH(b_ptr+8);
1057 
1058  // use 2 accumulators to remove inter-instruction data dependencies
1059  accumulator1.val[0] = vmlaq_f32(accumulator1.val[0], a_val.val[0], b_val.val[0]);
1060  accumulator1.val[1] = vmlaq_f32(accumulator1.val[1], a_val.val[0], b_val.val[1]);
1061  accumulator2.val[0] = vmlsq_f32(accumulator2.val[0], a_val.val[1], b_val.val[1]);
1062  accumulator2.val[1] = vmlaq_f32(accumulator2.val[1], a_val.val[1], b_val.val[0]);
1063  // increment pointers
1064  a_ptr += 4;
1065  b_ptr += 4;
1066  }
1067  accumulator1.val[0] = vaddq_f32(accumulator1.val[0], accumulator2.val[0]);
1068  accumulator1.val[1] = vaddq_f32(accumulator1.val[1], accumulator2.val[1]);
1069  lv_32fc_t accum_result[4];
1070  vst2q_f32((float*)accum_result, accumulator1);
1071  *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
1072 
1073  // tail case
1074  for(number = quarter_points*4; number < num_points; ++number) {
1075  *result += (*a_ptr++) * (*b_ptr++);
1076  }
1077 
1078 }
1079 #endif /*LV_HAVE_NEON*/
1080 
1081 #ifdef LV_HAVE_NEON
1082 static inline void volk_32fc_x2_dot_prod_32fc_neon_optfmaunroll(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
1083 // NOTE: GCC does a poor job with this kernel, but the equivalent ASM code is very fast
1084 
1085  unsigned int quarter_points = num_points / 8;
1086  unsigned int number;
1087 
1088  lv_32fc_t* a_ptr = (lv_32fc_t*) taps;
1089  lv_32fc_t* b_ptr = (lv_32fc_t*) input;
1090  // for 2-lane vectors, 1st lane holds the real part,
1091  // 2nd lane holds the imaginary part
1092  float32x4x4_t a_val, b_val, accumulator1, accumulator2;
1093  float32x4x2_t reduced_accumulator;
1094  accumulator1.val[0] = vdupq_n_f32(0);
1095  accumulator1.val[1] = vdupq_n_f32(0);
1096  accumulator1.val[2] = vdupq_n_f32(0);
1097  accumulator1.val[3] = vdupq_n_f32(0);
1098  accumulator2.val[0] = vdupq_n_f32(0);
1099  accumulator2.val[1] = vdupq_n_f32(0);
1100  accumulator2.val[2] = vdupq_n_f32(0);
1101  accumulator2.val[3] = vdupq_n_f32(0);
1102 
1103  // 8 input regs, 8 accumulators -> 16/16 neon regs are used
1104  for(number = 0; number < quarter_points; ++number) {
1105  a_val = vld4q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
1106  b_val = vld4q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
1107  __VOLK_PREFETCH(a_ptr+8);
1108  __VOLK_PREFETCH(b_ptr+8);
1109 
1110  // use 2 accumulators to remove inter-instruction data dependencies
1111  accumulator1.val[0] = vmlaq_f32(accumulator1.val[0], a_val.val[0], b_val.val[0]);
1112  accumulator1.val[1] = vmlaq_f32(accumulator1.val[1], a_val.val[0], b_val.val[1]);
1113 
1114  accumulator1.val[2] = vmlaq_f32(accumulator1.val[2], a_val.val[2], b_val.val[2]);
1115  accumulator1.val[3] = vmlaq_f32(accumulator1.val[3], a_val.val[2], b_val.val[3]);
1116 
1117  accumulator2.val[0] = vmlsq_f32(accumulator2.val[0], a_val.val[1], b_val.val[1]);
1118  accumulator2.val[1] = vmlaq_f32(accumulator2.val[1], a_val.val[1], b_val.val[0]);
1119 
1120  accumulator2.val[2] = vmlsq_f32(accumulator2.val[2], a_val.val[3], b_val.val[3]);
1121  accumulator2.val[3] = vmlaq_f32(accumulator2.val[3], a_val.val[3], b_val.val[2]);
1122  // increment pointers
1123  a_ptr += 8;
1124  b_ptr += 8;
1125  }
1126  // reduce 8 accumulator lanes down to 2 (1 real and 1 imag)
1127  accumulator1.val[0] = vaddq_f32(accumulator1.val[0], accumulator1.val[2]);
1128  accumulator1.val[1] = vaddq_f32(accumulator1.val[1], accumulator1.val[3]);
1129  accumulator2.val[0] = vaddq_f32(accumulator2.val[0], accumulator2.val[2]);
1130  accumulator2.val[1] = vaddq_f32(accumulator2.val[1], accumulator2.val[3]);
1131  reduced_accumulator.val[0] = vaddq_f32(accumulator1.val[0], accumulator2.val[0]);
1132  reduced_accumulator.val[1] = vaddq_f32(accumulator1.val[1], accumulator2.val[1]);
1133  // now reduce accumulators to scalars
1134  lv_32fc_t accum_result[4];
1135  vst2q_f32((float*)accum_result, reduced_accumulator);
1136  *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
1137 
1138  // tail case
1139  for(number = quarter_points*8; number < num_points; ++number) {
1140  *result += (*a_ptr++) * (*b_ptr++);
1141  }
1142 
1143 }
1144 #endif /*LV_HAVE_NEON*/
1145 
1146 
1147 #ifdef LV_HAVE_AVX
1148 
1149 #include <immintrin.h>
1150 
1151 static inline void volk_32fc_x2_dot_prod_32fc_a_avx(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
1152 
1153  unsigned int isodd = num_points & 3;
1154  unsigned int i = 0;
1155  lv_32fc_t dotProduct;
1156  memset(&dotProduct, 0x0, 2*sizeof(float));
1157 
1158  unsigned int number = 0;
1159  const unsigned int quarterPoints = num_points / 4;
1160 
1161  __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
1162 
1163  const lv_32fc_t* a = input;
1164  const lv_32fc_t* b = taps;
1165 
1166  dotProdVal = _mm256_setzero_ps();
1167 
1168  for(;number < quarterPoints; number++){
1169 
1170  x = _mm256_load_ps((float*)a); // Load a,b,e,f as ar,ai,br,bi,er,ei,fr,fi
1171  y = _mm256_load_ps((float*)b); // Load c,d,g,h as cr,ci,dr,di,gr,gi,hr,hi
1172 
1173  yl = _mm256_moveldup_ps(y); // Load yl with cr,cr,dr,dr,gr,gr,hr,hr
1174  yh = _mm256_movehdup_ps(y); // Load yh with ci,ci,di,di,gi,gi,hi,hi
1175 
1176  tmp1 = _mm256_mul_ps(x,yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr ...
1177 
1178  x = _mm256_shuffle_ps(x,x,0xB1); // Re-arrange x to be ai,ar,bi,br,ei,er,fi,fr
1179 
1180  tmp2 = _mm256_mul_ps(x,yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di ...
1181 
1182  z = _mm256_addsub_ps(tmp1,tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
1183 
1184  dotProdVal = _mm256_add_ps(dotProdVal, z); // Add the complex multiplication results together
1185 
1186  a += 4;
1187  b += 4;
1188  }
1189 
1190  __VOLK_ATTR_ALIGNED(32) lv_32fc_t dotProductVector[4];
1191 
1192  _mm256_store_ps((float*)dotProductVector,dotProdVal); // Store the results back into the dot product vector
1193 
1194  dotProduct += ( dotProductVector[0] + dotProductVector[1] + dotProductVector[2] + dotProductVector[3]);
1195 
1196  for(i = num_points-isodd; i < num_points; i++) {
1197  dotProduct += input[i] * taps[i];
1198  }
1199 
1200  *result = dotProduct;
1201 }
1202 
1203 #endif /*LV_HAVE_AVX*/
1204 
1205 #if LV_HAVE_AVX && LV_HAVE_FMA
1206 #include <immintrin.h>
1207 
1208 static inline void volk_32fc_x2_dot_prod_32fc_a_avx_fma(lv_32fc_t* result, const lv_32fc_t* input, const lv_32fc_t* taps, unsigned int num_points) {
1209 
1210  unsigned int isodd = num_points & 3;
1211  unsigned int i = 0;
1212  lv_32fc_t dotProduct;
1213  memset(&dotProduct, 0x0, 2*sizeof(float));
1214 
1215  unsigned int number = 0;
1216  const unsigned int quarterPoints = num_points / 4;
1217 
1218  __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
1219 
1220  const lv_32fc_t* a = input;
1221  const lv_32fc_t* b = taps;
1222 
1223  dotProdVal = _mm256_setzero_ps();
1224 
1225  for(;number < quarterPoints; number++){
1226 
1227  x = _mm256_load_ps((float*)a); // Load a,b,e,f as ar,ai,br,bi,er,ei,fr,fi
1228  y = _mm256_load_ps((float*)b); // Load c,d,g,h as cr,ci,dr,di,gr,gi,hr,hi
1229 
1230  yl = _mm256_moveldup_ps(y); // Load yl with cr,cr,dr,dr,gr,gr,hr,hr
1231  yh = _mm256_movehdup_ps(y); // Load yh with ci,ci,di,di,gi,gi,hi,hi
1232 
1233  tmp1 = x;
1234 
1235  x = _mm256_shuffle_ps(x,x,0xB1); // Re-arrange x to be ai,ar,bi,br,ei,er,fi,fr
1236 
1237  tmp2 = _mm256_mul_ps(x,yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di ...
1238 
1239  z = _mm256_fmaddsub_ps(tmp1, yl,tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
1240 
1241  dotProdVal = _mm256_add_ps(dotProdVal, z); // Add the complex multiplication results together
1242 
1243  a += 4;
1244  b += 4;
1245  }
1246 
1247  __VOLK_ATTR_ALIGNED(32) lv_32fc_t dotProductVector[4];
1248 
1249  _mm256_store_ps((float*)dotProductVector,dotProdVal); // Store the results back into the dot product vector
1250 
1251  dotProduct += ( dotProductVector[0] + dotProductVector[1] + dotProductVector[2] + dotProductVector[3]);
1252 
1253  for(i = num_points-isodd; i < num_points; i++) {
1254  dotProduct += input[i] * taps[i];
1255  }
1256 
1257  *result = dotProduct;
1258 }
1259 
1260 #endif /*LV_HAVE_AVX && LV_HAVE_FMA*/
1261 
1262 
1263 #endif /*INCLUDED_volk_32fc_x2_dot_prod_32fc_a_H*/
static void volk_32fc_x2_dot_prod_32fc_generic(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:70
static void volk_32fc_x2_dot_prod_32fc_u_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:242
#define __VOLK_ASM
Definition: volk_common.h:54
static void volk_32fc_x2_dot_prod_32fc_a_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:795
#define __VOLK_VOLATILE
Definition: volk_common.h:55
#define bit128_p(x)
Definition: volk_common.h:132
static void volk_32fc_x2_dot_prod_32fc_neon_optfma(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:1037
static void volk_32fc_x2_dot_prod_32fc_a_generic(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:503
static void volk_32fc_x2_dot_prod_32fc_neon(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:934
#define __VOLK_PREFETCH(addr)
Definition: volk_common.h:53
for i
Definition: volk_config_fixed.tmpl.h:25
#define __VOLK_ATTR_ALIGNED(x)
Definition: volk_common.h:47
static void volk_32fc_x2_dot_prod_32fc_u_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:379
float complex lv_32fc_t
Definition: volk_complex.h:61
__m256 float_vec
Definition: volk_common.h:126
static void volk_32fc_x2_dot_prod_32fc_neon_optfmaunroll(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:1082
static void volk_32fc_x2_dot_prod_32fc_a_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:1151
static void volk_32fc_x2_dot_prod_32fc_neon_opttests(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:989