67#ifndef INCLUDE_VOLK_VOLK_NEON_INTRINSICS_H_
68#define INCLUDE_VOLK_VOLK_NEON_INTRINSICS_H_
76 float32x4_t iValue, qValue, result;
77 iValue = vmulq_f32(cmplxValue.val[0], cmplxValue.val[0]);
78 qValue = vmulq_f32(cmplxValue.val[1], cmplxValue.val[1]);
79 result = vaddq_f32(iValue, qValue);
86 float32x4_t sqrt_reciprocal = vrsqrteq_f32(x);
87 sqrt_reciprocal = vmulq_f32(
88 vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
89 sqrt_reciprocal = vmulq_f32(
90 vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
92 return sqrt_reciprocal;
99 float32x4_t recip = vrecpeq_f32(x);
100 recip = vmulq_f32(vrecpsq_f32(x, recip), recip);
101 recip = vmulq_f32(vrecpsq_f32(x, recip), recip);
109 float32x4x2_t tmp_real;
110 float32x4x2_t tmp_imag;
115 tmp_real.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
117 tmp_real.val[1] = vmulq_f32(a_val.val[1], b_val.val[1]);
120 tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[1]);
122 tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
124 c_val.val[0] = vsubq_f32(tmp_real.val[0], tmp_real.val[1]);
125 c_val.val[1] = vaddq_f32(tmp_imag.val[0], tmp_imag.val[1]);
132 float32x4_t cA = vmlaq_f32(coeffs[0], coeffs[4], x);
133 float32x4_t cB = vmlaq_f32(coeffs[2], coeffs[6], x);
134 float32x4_t cC = vmlaq_f32(coeffs[1], coeffs[5], x);
135 float32x4_t cD = vmlaq_f32(coeffs[3], coeffs[7], x);
136 float32x4_t x2 = vmulq_f32(x, x);
137 float32x4_t x4 = vmulq_f32(x2, x2);
138 float32x4_t res = vmlaq_f32(vmlaq_f32(cA, cB, x2), vmlaq_f32(cC, cD, x2), x4);
146 const float32x4_t log_tab[8] = {
147 vdupq_n_f32(-2.29561495781f), vdupq_n_f32(-2.47071170807f),
148 vdupq_n_f32(-5.68692588806f), vdupq_n_f32(-0.165253549814f),
149 vdupq_n_f32(5.17591238022f), vdupq_n_f32(0.844007015228f),
150 vdupq_n_f32(4.58445882797f), vdupq_n_f32(0.0141278216615f),
153 const int32x4_t CONST_127 = vdupq_n_s32(127);
154 const float32x4_t CONST_LN2 = vdupq_n_f32(0.6931471805f);
157 int32x4_t m = vsubq_s32(
158 vreinterpretq_s32_u32(vshrq_n_u32(vreinterpretq_u32_f32(x), 23)), CONST_127);
160 vreinterpretq_f32_s32(vsubq_s32(vreinterpretq_s32_f32(x), vshlq_n_s32(m, 23)));
166 poly = vmlaq_f32(poly, vcvtq_f32_s32(m), CONST_LN2);
176 const float32x4_t c_minus_cephes_DP1 = vdupq_n_f32(-0.78515625);
177 const float32x4_t c_minus_cephes_DP2 = vdupq_n_f32(-2.4187564849853515625e-4);
178 const float32x4_t c_minus_cephes_DP3 = vdupq_n_f32(-3.77489497744594108e-8);
179 const float32x4_t c_sincof_p0 = vdupq_n_f32(-1.9515295891e-4);
180 const float32x4_t c_sincof_p1 = vdupq_n_f32(8.3321608736e-3);
181 const float32x4_t c_sincof_p2 = vdupq_n_f32(-1.6666654611e-1);
182 const float32x4_t c_coscof_p0 = vdupq_n_f32(2.443315711809948e-005);
183 const float32x4_t c_coscof_p1 = vdupq_n_f32(-1.388731625493765e-003);
184 const float32x4_t c_coscof_p2 = vdupq_n_f32(4.166664568298827e-002);
185 const float32x4_t c_cephes_FOPI = vdupq_n_f32(1.27323954473516);
187 const float32x4_t CONST_1 = vdupq_n_f32(1.f);
188 const float32x4_t CONST_1_2 = vdupq_n_f32(0.5f);
189 const float32x4_t CONST_0 = vdupq_n_f32(0.f);
190 const uint32x4_t CONST_2 = vdupq_n_u32(2);
191 const uint32x4_t CONST_4 = vdupq_n_u32(4);
195 uint32x4_t sign_mask_sin, sign_mask_cos;
196 sign_mask_sin = vcltq_f32(x, CONST_0);
199 float32x4_t y = vmulq_f32(x, c_cephes_FOPI);
202 emm2 = vcvtq_u32_f32(y);
204 emm2 = vaddq_u32(emm2, vdupq_n_u32(1));
205 emm2 = vandq_u32(emm2, vdupq_n_u32(~1));
206 y = vcvtq_f32_u32(emm2);
212 const uint32x4_t poly_mask = vtstq_u32(emm2, CONST_2);
215 x = vmlaq_f32(x, y, c_minus_cephes_DP1);
216 x = vmlaq_f32(x, y, c_minus_cephes_DP2);
217 x = vmlaq_f32(x, y, c_minus_cephes_DP3);
219 sign_mask_sin = veorq_u32(sign_mask_sin, vtstq_u32(emm2, CONST_4));
220 sign_mask_cos = vtstq_u32(vsubq_u32(emm2, CONST_2), CONST_4);
225 float32x4_t z = vmulq_f32(x, x);
227 y1 = vmlaq_f32(c_coscof_p1, z, c_coscof_p0);
228 y1 = vmlaq_f32(c_coscof_p2, z, y1);
229 y1 = vmulq_f32(y1, z);
230 y1 = vmulq_f32(y1, z);
231 y1 = vmlsq_f32(y1, z, CONST_1_2);
232 y1 = vaddq_f32(y1, CONST_1);
234 y2 = vmlaq_f32(c_sincof_p1, z, c_sincof_p0);
235 y2 = vmlaq_f32(c_sincof_p2, z, y2);
236 y2 = vmulq_f32(y2, z);
237 y2 = vmlaq_f32(x, x, y2);
240 const float32x4_t ys = vbslq_f32(poly_mask, y1, y2);
241 const float32x4_t yc = vbslq_f32(poly_mask, y2, y1);
243 float32x4x2_t sincos;
244 sincos.val[0] = vbslq_f32(sign_mask_sin, vnegq_f32(ys), ys);
245 sincos.val[1] = vbslq_f32(sign_mask_cos, yc, vnegq_f32(yc));
253 return sincos.val[0];
259 return sincos.val[1];
265 return vmulq_f32(sincos.val[0],
_vinvq_f32(sincos.val[1]));
274 aux = vmulq_f32(aux,
val);
275 aux = vsubq_f32(aux, acc);
276 aux = vmulq_f32(aux, aux);
278 return vfmaq_f32(sq_acc, aux, rec);
280 aux = vmulq_f32(aux, rec);
281 return vaddq_f32(sq_acc, aux);