59 #ifndef INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H 60 #define INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H 66 #ifdef LV_HAVE_GENERIC 70 const unsigned int num_bytes = num_points*8;
72 float * res = (
float*) result;
73 float * in = (
float*) input;
74 float * tp = (
float*) taps;
75 unsigned int n_2_ccomplex_blocks = num_bytes >> 4;
77 float sum0[2] = {0,0};
78 float sum1[2] = {0,0};
81 for(i = 0; i < n_2_ccomplex_blocks; ++
i) {
82 sum0[0] += in[0] * tp[0] + in[1] * tp[1];
83 sum0[1] += (-in[0] * tp[1]) + in[1] * tp[0];
84 sum1[0] += in[2] * tp[2] + in[3] * tp[3];
85 sum1[1] += (-in[2] * tp[3]) + in[3] * tp[2];
91 res[0] = sum0[0] + sum1[0];
92 res[1] = sum0[1] + sum1[1];
94 if (num_bytes >> 3 & 1) {
95 *result += input[(num_bytes >> 3) - 1] *
lv_conj(taps[(num_bytes >> 3) - 1]);
103 #include <immintrin.h> 109 __m256 sum_a_mult_b_real = _mm256_setzero_ps();
110 __m256 sum_a_mult_b_imag = _mm256_setzero_ps();
112 for (
long unsigned i = 0;
i < (num_points & ~3u);
i += 4) {
124 __m256 a = _mm256_loadu_ps((
const float *) &input[i]);
125 __m256 b = _mm256_loadu_ps((
const float *) &taps[i]);
126 __m256 b_real = _mm256_moveldup_ps(b);
127 __m256 b_imag = _mm256_movehdup_ps(b);
130 sum_a_mult_b_real = _mm256_add_ps(sum_a_mult_b_real, _mm256_mul_ps(a, b_real));
132 sum_a_mult_b_imag = _mm256_addsub_ps(sum_a_mult_b_imag, _mm256_mul_ps(a, b_imag));
136 sum_a_mult_b_imag = _mm256_permute_ps(sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
138 __m256 sum = _mm256_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
142 sum = _mm256_add_ps(sum, _mm256_permute2f128_ps(sum, sum, 0x01));
144 sum = _mm256_add_ps(sum, _mm256_permute_ps(sum, _MM_SHUFFLE(1, 0, 3, 2)));
146 __m128 lower = _mm256_extractf128_ps(sum, 0);
147 _mm_storel_pi((__m64 *) result, lower);
150 for (
long unsigned i = num_points & ~3u; i < num_points; ++
i) {
161 #include <xmmintrin.h> 162 #include <pmmintrin.h> 168 __m128 sum_a_mult_b_real = _mm_setzero_ps();
169 __m128 sum_a_mult_b_imag = _mm_setzero_ps();
171 for (
long unsigned i = 0;
i < (num_points & ~1u);
i += 2) {
183 __m128 a = _mm_loadu_ps((
const float *) &input[i]);
184 __m128 b = _mm_loadu_ps((
const float *) &taps[i]);
185 __m128 b_real = _mm_moveldup_ps(b);
186 __m128 b_imag = _mm_movehdup_ps(b);
189 sum_a_mult_b_real = _mm_add_ps(sum_a_mult_b_real, _mm_mul_ps(a, b_real));
191 sum_a_mult_b_imag = _mm_addsub_ps(sum_a_mult_b_imag, _mm_mul_ps(a, b_imag));
195 sum_a_mult_b_imag = _mm_shuffle_ps(sum_a_mult_b_imag, sum_a_mult_b_imag,
196 _MM_SHUFFLE(2, 3, 0, 1));
198 __m128 sum = _mm_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
200 sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 0, 3, 2)));
202 _mm_storel_pi((__m64 *) result, sum);
205 if (num_points & 1u) {
217 #include <arm_neon.h> 220 unsigned int quarter_points = num_points / 4;
227 float32x4x2_t a_val, b_val, accumulator;
228 float32x4x2_t tmp_imag;
229 accumulator.val[0] = vdupq_n_f32(0);
230 accumulator.val[1] = vdupq_n_f32(0);
232 for(number = 0; number < quarter_points; ++number) {
233 a_val = vld2q_f32((
float*)a_ptr);
234 b_val = vld2q_f32((
float*)b_ptr);
239 tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
240 tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
243 tmp_imag.val[1] = vmlsq_f32(tmp_imag.val[1], a_val.val[0], b_val.val[1]);
244 tmp_imag.val[0] = vmlaq_f32(tmp_imag.val[0], a_val.val[1], b_val.val[1]);
246 accumulator.val[0] = vaddq_f32(accumulator.val[0], tmp_imag.val[0]);
247 accumulator.val[1] = vaddq_f32(accumulator.val[1], tmp_imag.val[1]);
254 vst2q_f32((
float*)accum_result, accumulator);
255 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
258 for(number = quarter_points*4; number < num_points; ++number) {
259 *result += (*a_ptr++) *
lv_conj(*b_ptr++);
268 #ifndef INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H 269 #define INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H 277 #include <immintrin.h> 283 __m256 sum_a_mult_b_real = _mm256_setzero_ps();
284 __m256 sum_a_mult_b_imag = _mm256_setzero_ps();
286 for (
long unsigned i = 0;
i < (num_points & ~3u);
i += 4) {
298 __m256 a = _mm256_load_ps((
const float *) &input[i]);
299 __m256 b = _mm256_load_ps((
const float *) &taps[i]);
300 __m256 b_real = _mm256_moveldup_ps(b);
301 __m256 b_imag = _mm256_movehdup_ps(b);
304 sum_a_mult_b_real = _mm256_add_ps(sum_a_mult_b_real, _mm256_mul_ps(a, b_real));
306 sum_a_mult_b_imag = _mm256_addsub_ps(sum_a_mult_b_imag, _mm256_mul_ps(a, b_imag));
310 sum_a_mult_b_imag = _mm256_permute_ps(sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
312 __m256 sum = _mm256_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
316 sum = _mm256_add_ps(sum, _mm256_permute2f128_ps(sum, sum, 0x01));
318 sum = _mm256_add_ps(sum, _mm256_permute_ps(sum, _MM_SHUFFLE(1, 0, 3, 2)));
320 __m128 lower = _mm256_extractf128_ps(sum, 0);
321 _mm_storel_pi((__m64 *) result, lower);
324 for (
long unsigned i = num_points & ~3u; i < num_points; ++
i) {
334 #include <xmmintrin.h> 335 #include <pmmintrin.h> 341 __m128 sum_a_mult_b_real = _mm_setzero_ps();
342 __m128 sum_a_mult_b_imag = _mm_setzero_ps();
344 for (
long unsigned i = 0;
i < (num_points & ~1u);
i += 2) {
356 __m128 a = _mm_load_ps((
const float *) &input[i]);
357 __m128 b = _mm_load_ps((
const float *) &taps[i]);
358 __m128 b_real = _mm_moveldup_ps(b);
359 __m128 b_imag = _mm_movehdup_ps(b);
362 sum_a_mult_b_real = _mm_add_ps(sum_a_mult_b_real, _mm_mul_ps(a, b_real));
364 sum_a_mult_b_imag = _mm_addsub_ps(sum_a_mult_b_imag, _mm_mul_ps(a, b_imag));
368 sum_a_mult_b_imag = _mm_shuffle_ps(sum_a_mult_b_imag, sum_a_mult_b_imag,
369 _MM_SHUFFLE(2, 3, 0, 1));
371 __m128 sum = _mm_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
373 sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 0, 3, 2)));
375 _mm_storel_pi((__m64 *) result, sum);
378 if (num_points & 1u) {
390 #ifdef LV_HAVE_GENERIC 395 const unsigned int num_bytes = num_points*8;
397 float * res = (
float*) result;
398 float * in = (
float*) input;
399 float * tp = (
float*) taps;
400 unsigned int n_2_ccomplex_blocks = num_bytes >> 4;
402 float sum0[2] = {0,0};
403 float sum1[2] = {0,0};
406 for(i = 0; i < n_2_ccomplex_blocks; ++
i) {
407 sum0[0] += in[0] * tp[0] + in[1] * tp[1];
408 sum0[1] += (-in[0] * tp[1]) + in[1] * tp[0];
409 sum1[0] += in[2] * tp[2] + in[3] * tp[3];
410 sum1[1] += (-in[2] * tp[3]) + in[3] * tp[2];
416 res[0] = sum0[0] + sum1[0];
417 res[1] = sum0[1] + sum1[1];
419 if (num_bytes >> 3 & 1) {
420 *result += input[(num_bytes >> 3) - 1] *
lv_conj(taps[(num_bytes >> 3) - 1]);
427 #if LV_HAVE_SSE && LV_HAVE_64 429 static inline void volk_32fc_x2_conjugate_dot_prod_32fc_a_sse(
lv_32fc_t* result,
const lv_32fc_t* input,
const lv_32fc_t* taps,
unsigned int num_points) {
431 const unsigned int num_bytes = num_points*8;
433 __VOLK_ATTR_ALIGNED(16)
static const uint32_t conjugator[4]= {0x00000000, 0x80000000, 0x00000000, 0x80000000};
437 "# ccomplex_conjugate_dotprod_generic (float* result, const float *input,\n\t" 438 "# const float *taps, unsigned num_bytes)\n\t" 439 "# float sum0 = 0;\n\t" 440 "# float sum1 = 0;\n\t" 441 "# float sum2 = 0;\n\t" 442 "# float sum3 = 0;\n\t" 444 "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t" 445 "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t" 446 "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t" 447 "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t" 450 "# } while (--n_2_ccomplex_blocks != 0);\n\t" 451 "# result[0] = sum0 + sum2;\n\t" 452 "# result[1] = sum1 + sum3;\n\t" 453 "# TODO: prefetch and better scheduling\n\t" 454 " xor %%r9, %%r9\n\t" 455 " xor %%r10, %%r10\n\t" 456 " movq %[conjugator], %%r9\n\t" 457 " movq %%rcx, %%rax\n\t" 458 " movaps 0(%%r9), %%xmm8\n\t" 459 " movq %%rcx, %%r8\n\t" 460 " movq %[rsi], %%r9\n\t" 461 " movq %[rdx], %%r10\n\t" 462 " xorps %%xmm6, %%xmm6 # zero accumulators\n\t" 463 " movaps 0(%%r9), %%xmm0\n\t" 464 " xorps %%xmm7, %%xmm7 # zero accumulators\n\t" 465 " movups 0(%%r10), %%xmm2\n\t" 466 " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t" 468 " xorps %%xmm8, %%xmm2\n\t" 469 " jmp .%=L1_test\n\t" 470 " # 4 taps / loop\n\t" 471 " # something like ?? cycles / loop\n\t" 473 "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t" 474 "# movaps (%%r9), %%xmmA\n\t" 475 "# movaps (%%r10), %%xmmB\n\t" 476 "# movaps %%xmmA, %%xmmZ\n\t" 477 "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t" 478 "# mulps %%xmmB, %%xmmA\n\t" 479 "# mulps %%xmmZ, %%xmmB\n\t" 480 "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t" 481 "# xorps %%xmmPN, %%xmmA\n\t" 482 "# movaps %%xmmA, %%xmmZ\n\t" 483 "# unpcklps %%xmmB, %%xmmA\n\t" 484 "# unpckhps %%xmmB, %%xmmZ\n\t" 485 "# movaps %%xmmZ, %%xmmY\n\t" 486 "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t" 487 "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t" 488 "# addps %%xmmZ, %%xmmA\n\t" 489 "# addps %%xmmA, %%xmmC\n\t" 490 "# A=xmm0, B=xmm2, Z=xmm4\n\t" 491 "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t" 492 " movaps 16(%%r9), %%xmm1\n\t" 493 " movaps %%xmm0, %%xmm4\n\t" 494 " mulps %%xmm2, %%xmm0\n\t" 495 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t" 496 " movaps 16(%%r10), %%xmm3\n\t" 497 " movaps %%xmm1, %%xmm5\n\t" 498 " xorps %%xmm8, %%xmm3\n\t" 499 " addps %%xmm0, %%xmm6\n\t" 500 " mulps %%xmm3, %%xmm1\n\t" 501 " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t" 502 " addps %%xmm1, %%xmm6\n\t" 503 " mulps %%xmm4, %%xmm2\n\t" 504 " movaps 32(%%r9), %%xmm0\n\t" 505 " addps %%xmm2, %%xmm7\n\t" 506 " mulps %%xmm5, %%xmm3\n\t" 508 " movaps 32(%%r10), %%xmm2\n\t" 509 " addps %%xmm3, %%xmm7\n\t" 510 " add $32, %%r10\n\t" 511 " xorps %%xmm8, %%xmm2\n\t" 515 " # We've handled the bulk of multiplies up to here.\n\t" 516 " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t" 517 " # If so, we've got 2 more taps to do.\n\t" 520 " # The count was odd, do 2 more taps.\n\t" 521 " # Note that we've already got mm0/mm2 preloaded\n\t" 522 " # from the main loop.\n\t" 523 " movaps %%xmm0, %%xmm4\n\t" 524 " mulps %%xmm2, %%xmm0\n\t" 525 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t" 526 " addps %%xmm0, %%xmm6\n\t" 527 " mulps %%xmm4, %%xmm2\n\t" 528 " addps %%xmm2, %%xmm7\n\t" 530 " # neg inversor\n\t" 531 " xorps %%xmm1, %%xmm1\n\t" 532 " mov $0x80000000, %%r9\n\t" 533 " movd %%r9, %%xmm1\n\t" 534 " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t" 536 " xorps %%xmm1, %%xmm6\n\t" 537 " movaps %%xmm6, %%xmm2\n\t" 538 " unpcklps %%xmm7, %%xmm6\n\t" 539 " unpckhps %%xmm7, %%xmm2\n\t" 540 " movaps %%xmm2, %%xmm3\n\t" 541 " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t" 542 " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t" 543 " addps %%xmm2, %%xmm6\n\t" 544 " # xmm6 = r1 i2 r3 i4\n\t" 545 " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t" 546 " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t" 547 " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) to memory\n\t" 549 :[rsi]
"r" (input), [rdx]
"r" (taps),
"c" (num_bytes), [rdi]
"r" (result), [conjugator]
"r" (conjugator)
550 :
"rax",
"r8",
"r9",
"r10" 553 int getem = num_bytes % 16;
555 for(; getem > 0; getem -= 8) {
556 *result += (input[(num_bytes >> 3) - 1] *
lv_conj(taps[(num_bytes >> 3) - 1]));
561 #if LV_HAVE_SSE && LV_HAVE_32 562 static inline void volk_32fc_x2_conjugate_dot_prod_32fc_a_sse_32(
lv_32fc_t* result,
const lv_32fc_t* input,
const lv_32fc_t* taps,
unsigned int num_points) {
564 const unsigned int num_bytes = num_points*8;
566 __VOLK_ATTR_ALIGNED(16)
static const uint32_t conjugator[4]= {0x00000000, 0x80000000, 0x00000000, 0x80000000};
568 int bound = num_bytes >> 4;
569 int leftovers = num_bytes % 16;
574 " #movl %%esp, %%ebp\n\t" 575 " #movl 12(%%ebp), %%eax # input\n\t" 576 " #movl 16(%%ebp), %%edx # taps\n\t" 577 " #movl 20(%%ebp), %%ecx # n_bytes\n\t" 578 " movaps 0(%[conjugator]), %%xmm1\n\t" 579 " xorps %%xmm6, %%xmm6 # zero accumulators\n\t" 580 " movaps 0(%[eax]), %%xmm0\n\t" 581 " xorps %%xmm7, %%xmm7 # zero accumulators\n\t" 582 " movaps 0(%[edx]), %%xmm2\n\t" 583 " movl %[ecx], (%[out])\n\t" 584 " shrl $5, %[ecx] # ecx = n_2_ccomplex_blocks / 2\n\t" 586 " xorps %%xmm1, %%xmm2\n\t" 587 " jmp .%=L1_test\n\t" 588 " # 4 taps / loop\n\t" 589 " # something like ?? cycles / loop\n\t" 591 "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t" 592 "# movaps (%[eax]), %%xmmA\n\t" 593 "# movaps (%[edx]), %%xmmB\n\t" 594 "# movaps %%xmmA, %%xmmZ\n\t" 595 "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t" 596 "# mulps %%xmmB, %%xmmA\n\t" 597 "# mulps %%xmmZ, %%xmmB\n\t" 598 "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t" 599 "# xorps %%xmmPN, %%xmmA\n\t" 600 "# movaps %%xmmA, %%xmmZ\n\t" 601 "# unpcklps %%xmmB, %%xmmA\n\t" 602 "# unpckhps %%xmmB, %%xmmZ\n\t" 603 "# movaps %%xmmZ, %%xmmY\n\t" 604 "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t" 605 "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t" 606 "# addps %%xmmZ, %%xmmA\n\t" 607 "# addps %%xmmA, %%xmmC\n\t" 608 "# A=xmm0, B=xmm2, Z=xmm4\n\t" 609 "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t" 610 " movaps 16(%[edx]), %%xmm3\n\t" 611 " movaps %%xmm0, %%xmm4\n\t" 612 " xorps %%xmm1, %%xmm3\n\t" 613 " mulps %%xmm2, %%xmm0\n\t" 614 " movaps 16(%[eax]), %%xmm1\n\t" 615 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t" 616 " movaps %%xmm1, %%xmm5\n\t" 617 " addps %%xmm0, %%xmm6\n\t" 618 " mulps %%xmm3, %%xmm1\n\t" 619 " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t" 620 " addps %%xmm1, %%xmm6\n\t" 621 " movaps 0(%[conjugator]), %%xmm1\n\t" 622 " mulps %%xmm4, %%xmm2\n\t" 623 " movaps 32(%[eax]), %%xmm0\n\t" 624 " addps %%xmm2, %%xmm7\n\t" 625 " mulps %%xmm5, %%xmm3\n\t" 626 " addl $32, %[eax]\n\t" 627 " movaps 32(%[edx]), %%xmm2\n\t" 628 " addps %%xmm3, %%xmm7\n\t" 629 " xorps %%xmm1, %%xmm2\n\t" 630 " addl $32, %[edx]\n\t" 634 " # We've handled the bulk of multiplies up to here.\n\t" 635 " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t" 636 " # If so, we've got 2 more taps to do.\n\t" 637 " movl 0(%[out]), %[ecx] # n_2_ccomplex_blocks\n\t" 638 " shrl $4, %[ecx]\n\t" 639 " andl $1, %[ecx]\n\t" 641 " # The count was odd, do 2 more taps.\n\t" 642 " # Note that we've already got mm0/mm2 preloaded\n\t" 643 " # from the main loop.\n\t" 644 " movaps %%xmm0, %%xmm4\n\t" 645 " mulps %%xmm2, %%xmm0\n\t" 646 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t" 647 " addps %%xmm0, %%xmm6\n\t" 648 " mulps %%xmm4, %%xmm2\n\t" 649 " addps %%xmm2, %%xmm7\n\t" 651 " # neg inversor\n\t" 652 " #movl 8(%%ebp), %[eax] \n\t" 653 " xorps %%xmm1, %%xmm1\n\t" 654 " movl $0x80000000, (%[out])\n\t" 655 " movss (%[out]), %%xmm1\n\t" 656 " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t" 658 " xorps %%xmm1, %%xmm6\n\t" 659 " movaps %%xmm6, %%xmm2\n\t" 660 " unpcklps %%xmm7, %%xmm6\n\t" 661 " unpckhps %%xmm7, %%xmm2\n\t" 662 " movaps %%xmm2, %%xmm3\n\t" 663 " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t" 664 " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t" 665 " addps %%xmm2, %%xmm6\n\t" 666 " # xmm6 = r1 i2 r3 i4\n\t" 667 " #movl 8(%%ebp), %[eax] # @result\n\t" 668 " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t" 669 " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t" 670 " movlps %%xmm6, (%[out]) # store low 2x32 bits (complex) to memory\n\t" 673 : [eax]
"r" (input), [edx]
"r" (taps), [ecx]
"r" (num_bytes), [out]
"r" (result), [conjugator]
"r" (conjugator)
676 for(; leftovers > 0; leftovers -= 8) {
677 *result += (input[(bound << 1)] *
lv_conj(taps[(bound << 1)]));
#define __VOLK_ASM
Definition: volk_common.h:54
#define __VOLK_VOLATILE
Definition: volk_common.h:55
static void volk_32fc_x2_conjugate_dot_prod_32fc_neon(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:218
#define lv_conj(x)
Definition: volk_complex.h:87
static void volk_32fc_x2_conjugate_dot_prod_32fc_a_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:337
#define lv_cmake(r, i)
Definition: volk_complex.h:64
static void volk_32fc_x2_conjugate_dot_prod_32fc_a_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:279
static void volk_32fc_x2_conjugate_dot_prod_32fc_generic(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:68
#define __VOLK_PREFETCH(addr)
Definition: volk_common.h:53
static void volk_32fc_x2_conjugate_dot_prod_32fc_a_generic(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:393
for i
Definition: volk_config_fixed.tmpl.h:25
#define __VOLK_ATTR_ALIGNED(x)
Definition: volk_common.h:47
float complex lv_32fc_t
Definition: volk_complex.h:61
static void volk_32fc_x2_conjugate_dot_prod_32fc_u_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:105
static void volk_32fc_x2_conjugate_dot_prod_32fc_u_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:164
#define lv_creal(x)
Definition: volk_complex.h:83
#define lv_cimag(x)
Definition: volk_complex.h:85