34 const std::array<float32x4_t, 8>
log_tab = {{
35 vdupq_n_f32(-2.29561495781f),
36 vdupq_n_f32(-2.47071170807f),
37 vdupq_n_f32(-5.68692588806f),
38 vdupq_n_f32(-0.165253549814f),
39 vdupq_n_f32(5.17591238022f),
40 vdupq_n_f32(0.844007015228f),
41 vdupq_n_f32(4.58445882797f),
42 vdupq_n_f32(0.0141278216615f),
51 #ifndef DOXYGEN_SKIP_THIS
52 inline float32x4_t prefer_vfmaq_f32(float32x4_t a, float32x4_t
b, float32x4_t c)
55 return vfmaq_f32(a,
b, c);
56 #else // __ARM_FEATURE_FMA
57 return vmlaq_f32(a,
b, c);
58 #endif // __ARM_FEATURE_FMA
63 static const float32x4_t CONST_1 = vdupq_n_f32(1.f);
65 const int32x4_t z = vcvtq_s32_f32(val);
66 const float32x4_t r = vcvtq_f32_s32(z);
68 return vbslq_f32(vcgtq_f32(r, val), vsubq_f32(r, CONST_1), r);
74 return vrndnq_f32(val);
76 static const float32x4_t CONST_HALF_FLOAT = vdupq_n_f32(0.5f);
77 static const float32x4_t CONST_1_FLOAT = vdupq_n_f32(1.f);
78 static const int32x4_t CONST_1_INT = vdupq_n_s32(1);
80 const float32x4_t diff = vsubq_f32(val, floor_val);
81 const float32x4_t fp32_upper_limit =
82 vreinterpretq_f32_u32(vdupq_n_u32(0x4B000000));
98 float32x4_t rounded_val = vbslq_f32(
99 vorrq_u32(vcltq_f32(diff, CONST_HALF_FLOAT),
100 vandq_u32(vceqq_f32(diff, CONST_HALF_FLOAT),
101 vmvnq_u32(vtstq_s32(vandq_s32(vcvtq_s32_f32(floor_val), CONST_1_INT), CONST_1_INT)))),
102 floor_val, vaddq_f32(floor_val, CONST_1_FLOAT));
104 float32x4_t result = vbslq_f32(vcgeq_f32(vabsq_f32(val), fp32_upper_limit), val, rounded_val);
107 #endif // __aarch64__
112 float32x2_t sqrt_reciprocal = vrsqrte_f32(x);
113 sqrt_reciprocal = vmul_f32(vrsqrts_f32(vmul_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
114 sqrt_reciprocal = vmul_f32(vrsqrts_f32(vmul_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
116 return sqrt_reciprocal;
121 float32x4_t sqrt_reciprocal = vrsqrteq_f32(x);
122 sqrt_reciprocal = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
123 sqrt_reciprocal = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
125 return sqrt_reciprocal;
128 inline float32x2_t
vinv_f32(float32x2_t x)
130 float32x2_t recip = vrecpe_f32(x);
131 recip = vmul_f32(vrecps_f32(x, recip), recip);
132 recip = vmul_f32(vrecps_f32(x, recip), recip);
136 inline float32x4_t
vinvq_f32(float32x4_t x)
138 float32x4_t recip = vrecpeq_f32(x);
139 recip = vmulq_f32(vrecpsq_f32(x, recip), recip);
140 recip = vmulq_f32(vrecpsq_f32(x, recip), recip);
144 inline float32x4_t
vtaylor_polyq_f32(float32x4_t x,
const std::array<float32x4_t, 8> &coeffs)
146 float32x4_t
A = vmlaq_f32(coeffs[0], coeffs[4], x);
147 float32x4_t
B = vmlaq_f32(coeffs[2], coeffs[6], x);
148 float32x4_t C = vmlaq_f32(coeffs[1], coeffs[5], x);
149 float32x4_t D = vmlaq_f32(coeffs[3], coeffs[7], x);
150 float32x4_t x2 = vmulq_f32(x, x);
151 float32x4_t x4 = vmulq_f32(x2, x2);
152 float32x4_t res = vmlaq_f32(vmlaq_f32(A, B, x2), vmlaq_f32(C, D, x2), x4);
156 static const uint32_t exp_f32_coeff[] = {
164 inline float32x4_t
vexpq_f32(float32x4_t x)
166 const auto c1 = vreinterpretq_f32_u32(vdupq_n_u32(exp_f32_coeff[0]));
167 const auto c2 = vreinterpretq_f32_u32(vdupq_n_u32(exp_f32_coeff[1]));
168 const auto c3 = vreinterpretq_f32_u32(vdupq_n_u32(exp_f32_coeff[2]));
169 const auto c4 = vreinterpretq_f32_u32(vdupq_n_u32(exp_f32_coeff[3]));
170 const auto c5 = vreinterpretq_f32_u32(vdupq_n_u32(exp_f32_coeff[4]));
172 const auto shift = vreinterpretq_f32_u32(vdupq_n_u32(0x4b00007f));
173 const auto inv_ln2 = vreinterpretq_f32_u32(vdupq_n_u32(0x3fb8aa3b));
174 const auto neg_ln2_hi =
175 vreinterpretq_f32_u32(vdupq_n_u32(0xbf317200));
176 const auto neg_ln2_lo =
177 vreinterpretq_f32_u32(vdupq_n_u32(0xb5bfbe8e));
179 const auto inf = vdupq_n_f32(std::numeric_limits<float>::infinity());
180 const auto max_input = vdupq_n_f32(88.37f);
181 const auto zero = vdupq_n_f32(0.f);
182 const auto min_input = vdupq_n_f32(-86.64f);
198 const auto z = prefer_vfmaq_f32(shift, x, inv_ln2);
199 const auto n = z - shift;
200 const auto scale = vreinterpretq_f32_u32(vreinterpretq_u32_f32(z) << 23);
204 const auto r_hi = prefer_vfmaq_f32(x, n, neg_ln2_hi);
205 const auto r = prefer_vfmaq_f32(r_hi, n, neg_ln2_lo);
209 const auto r2 = r * r;
211 const auto p1 = c1 * r;
212 const auto p23 = prefer_vfmaq_f32(c2, c3, r);
213 const auto p45 = prefer_vfmaq_f32(c4, c5, r);
214 const auto p2345 = prefer_vfmaq_f32(p23, p45, r2);
215 const auto p12345 = prefer_vfmaq_f32(p1, p2345, r2);
217 auto poly = prefer_vfmaq_f32(
scale, p12345,
scale);
220 poly = vbslq_f32(vcltq_f32(x, min_input), zero, poly);
221 poly = vbslq_f32(vcgtq_f32(x, max_input), inf, poly);
227 inline float32x4_t
verfq_f32(float32x4_t x)
229 const float32x4_t max_value = vdupq_n_f32(3.9375);
230 const float32x4_t shift = vdupq_n_f32(65536);
231 const float32x4_t third = vdupq_n_f32(0.3333333333);
232 const float32x4_t one = vdupq_n_f32(1.f);
233 const uint32x4_t max_index = vdupq_n_u32(512);
234 const uint32x4_t sign_mask = vdupq_n_u32(0x7fffffff);
236 const float32x4_t x_abs = vabsq_f32(x);
257 const float32x4_t z = x_abs + shift;
258 const float32x4_t r = z - shift;
260 uint32x4_t index = vreinterpretq_u32_f32(z) - vreinterpretq_u32_f32(shift);
261 index = vminq_u32(index, max_index);
264 const float64_t entry_0 = *
reinterpret_cast<const float64_t *
>(&
erf_f32_lut[index[0]]);
265 const float64_t entry_1 = *
reinterpret_cast<const float64_t *
>(&
erf_f32_lut[index[1]]);
266 const float64_t entry_2 = *
reinterpret_cast<const float64_t *
>(&
erf_f32_lut[index[2]]);
267 const float64_t entry_3 = *
reinterpret_cast<const float64_t *
>(&
erf_f32_lut[index[3]]);
269 const float32x4_t entry_01 = vreinterpretq_f32_f64(float64x2_t{entry_0, entry_1});
270 const float32x4_t entry_23 = vreinterpretq_f32_f64(float64x2_t{entry_2, entry_3});
272 const float32x4_t erf_r = vuzp1q_f32(entry_01, entry_23);
273 const float32x4_t scale_r = vuzp2q_f32(entry_01, entry_23);
276 const float32x4_t d = x_abs - r;
277 const float32x4_t d2 = d * d;
279 const float32x4_t t0 = vfmaq_f32(r, third, d);
280 const float32x4_t t1 = vfmsq_f32(d, d2, t0);
281 const float32x4_t erf_x = vfmaq_f32(erf_r, scale_r, t1);
283 const float32x4_t clamped = vbslq_f32(x_abs > max_value, one, erf_x);
284 const float32x4_t result = vbslq_f32(sign_mask, clamped, x);
288 #endif // #ifdef __aarch64__
290 inline float32x4_t
vlogq_f32(float32x4_t x)
292 static const int32x4_t CONST_127 = vdupq_n_s32(127);
293 static const float32x4_t CONST_LN2 = vdupq_n_f32(0.6931471805f);
296 int32x4_t m = vsubq_s32(vreinterpretq_s32_u32(vshrq_n_u32(vreinterpretq_u32_f32(x), 23)), CONST_127);
297 float32x4_t val = vreinterpretq_f32_s32(vsubq_s32(vreinterpretq_s32_f32(x), vshlq_n_s32(m, 23)));
303 poly = vmlaq_f32(poly, vcvtq_f32_s32(m), CONST_LN2);
308 inline float32x4_t
vtanhq_f32(float32x4_t val)
310 static const float32x4_t CONST_1 = vdupq_n_f32(1.f);
311 static const float32x4_t CONST_2 = vdupq_n_f32(2.f);
312 static const float32x4_t CONST_MIN_TANH = vdupq_n_f32(-10.f);
313 static const float32x4_t CONST_MAX_TANH = vdupq_n_f32(10.f);
314 static const float32x4_t CONST_THR = vdupq_n_f32(5.e-3);
315 static const float32x4_t CONST_1_3 = vdupq_n_f32(0.3333333f);
317 float32x4_t x = vminq_f32(vmaxq_f32(val, CONST_MIN_TANH), CONST_MAX_TANH);
320 vbslq_f32(vcgtq_f32(vabsq_f32(x), CONST_THR),
vexpq_f32(vmulq_f32(CONST_2, x)), vmulq_f32(x, x));
322 vbslq_f32(vcgtq_f32(vabsq_f32(x), CONST_THR), vsubq_f32(exp2x, CONST_1), vmulq_f32(CONST_1_3, exp2x));
323 float32x4_t den = vbslq_f32(vcgtq_f32(vabsq_f32(x), CONST_THR), vaddq_f32(exp2x, CONST_1), vsubq_f32(CONST_1, num));
324 float32x4_t tanh = vbslq_f32(vcgtq_f32(vabsq_f32(x), CONST_THR), vmulq_f32(num,
vinvq_f32(den)), vmulq_f32(x, den));
328 inline float32x4_t
vpowq_f32(float32x4_t val, float32x4_t n)
333 inline float32x4_t
vsinq_f32(float32x4_t val)
335 const float32x4_t pi_v = vdupq_n_f32(
M_PI);
336 const float32x4_t pio2_v = vdupq_n_f32(
M_PI / 2);
337 const float32x4_t ipi_v = vdupq_n_f32(1 /
M_PI);
340 const int32x4_t c_v = vabsq_s32(vcvtq_s32_f32(vmulq_f32(val, ipi_v)));
341 const uint32x4_t sign_v = vcleq_f32(val, vdupq_n_f32(0));
342 const uint32x4_t odd_v = vandq_u32(vreinterpretq_u32_s32(c_v), vdupq_n_u32(1));
344 uint32x4_t neg_v = veorq_u32(odd_v, sign_v);
347 float32x4_t ma = vsubq_f32(vabsq_f32(val), vmulq_f32(pi_v, vcvtq_f32_s32(c_v)));
348 const uint32x4_t reb_v = vcgeq_f32(ma, pio2_v);
351 ma = vbslq_f32(reb_v, vsubq_f32(pi_v, ma), ma);
354 const float32x4_t ma2 = vmulq_f32(ma, ma);
357 float32x4_t elem = vmulq_f32(vmulq_f32(ma, ma2), vdupq_n_f32(
te_sin_coeff2));
358 float32x4_t res = vsubq_f32(ma, elem);
361 elem = vmulq_f32(vmulq_f32(elem, ma2), vdupq_n_f32(
te_sin_coeff3));
362 res = vaddq_f32(res, elem);
365 elem = vmulq_f32(vmulq_f32(elem, ma2), vdupq_n_f32(
te_sin_coeff4));
366 res = vsubq_f32(res, elem);
369 elem = vmulq_f32(vmulq_f32(elem, ma2), vdupq_n_f32(
te_sin_coeff5));
370 res = vaddq_f32(res, elem);
373 neg_v = vshlq_n_u32(neg_v, 31);
374 res = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(res), neg_v));
378 inline float32x2_t
vsin_f32(float32x2_t val)
380 const float32x2_t pi_v = vdup_n_f32(
M_PI);
381 const float32x2_t pio2_v = vdup_n_f32(
M_PI / 2);
382 const float32x2_t ipi_v = vdup_n_f32(1 /
M_PI);
385 const int32x2_t c_v = vabs_s32(vcvt_s32_f32(vmul_f32(val, ipi_v)));
386 const uint32x2_t sign_v = vcle_f32(val, vdup_n_f32(0));
387 const uint32x2_t odd_v = vand_u32(vreinterpret_u32_s32(c_v), vdup_n_u32(1));
389 uint32x2_t neg_v = veor_u32(odd_v, sign_v);
392 float32x2_t ma = vsub_f32(vabs_f32(val), vmul_f32(pi_v, vcvt_f32_s32(c_v)));
393 const uint32x2_t reb_v = vcge_f32(ma, pio2_v);
396 ma = vbsl_f32(reb_v, vsub_f32(pi_v, ma), ma);
399 const float32x2_t ma2 = vmul_f32(ma, ma);
402 float32x2_t elem = vmul_f32(vmul_f32(ma, ma2), vdup_n_f32(
te_sin_coeff2));
403 float32x2_t res = vsub_f32(ma, elem);
406 elem = vmul_f32(vmul_f32(elem, ma2), vdup_n_f32(
te_sin_coeff3));
407 res = vadd_f32(res, elem);
410 elem = vmul_f32(vmul_f32(elem, ma2), vdup_n_f32(
te_sin_coeff4));
411 res = vsub_f32(res, elem);
414 elem = vmul_f32(vmul_f32(elem, ma2), vdup_n_f32(
te_sin_coeff5));
415 res = vadd_f32(res, elem);
418 neg_v = vshl_n_u32(neg_v, 31);
419 res = vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(res), neg_v));
427 const int32x4_t shift_vec = vnegq_s32(exponent);
428 const int32x4_t fixup = vshrq_n_s32(vandq_s32(x, shift_vec), 31);
429 const int32x4_t fixed_up_x = vqaddq_s32(x, fixup);
430 return vrshlq_s32(fixed_up_x, shift_vec);
435 const int32x4_t shift_vec = vdupq_n_s32(-exponent);
436 const int32x4_t fixup = vshrq_n_s32(vandq_s32(x, shift_vec), 31);
437 const int32x4_t fixed_up_x = vqaddq_s32(x, fixup);
438 return vrshlq_s32(fixed_up_x, shift_vec);
443 const int32_t mask = (1 << exponent) - 1;
444 const int32_t threshold = (mask >> 1) + (x < 0 ? 1 : 0);
445 return (x >> exponent) + ((x & mask) > threshold ? 1 : 0);
452 const auto tmp1 = vmovl_u8(vget_low_u8(in));
453 out.val[0] = vcvtq_f32_u32(vmovl_u16(vget_low_u16(tmp1)));
454 out.val[1] = vcvtq_f32_u32(vmovl_u16(vget_high_u16(tmp1)));
456 const auto tmp2 = vmovl_u8(vget_high_u8(in));
457 out.val[2] = vcvtq_f32_u32(vmovl_u16(vget_low_u16(tmp2)));
458 out.val[3] = vcvtq_f32_u32(vmovl_u16(vget_high_u16(tmp2)));
466 const auto tmp1 = vmovl_s8(vget_low_s8(in));
467 out.val[0] = vcvtq_f32_s32(vmovl_s16(vget_low_s16(tmp1)));
468 out.val[1] = vcvtq_f32_s32(vmovl_s16(vget_high_s16(tmp1)));
470 const auto tmp2 = vmovl_s8(vget_high_s8(in));
471 out.val[2] = vcvtq_f32_s32(vmovl_s16(vget_low_s16(tmp2)));
472 out.val[3] = vcvtq_f32_s32(vmovl_s16(vget_high_s16(tmp2)));
490 out.val[0] = vqmovn_u16(vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in1.val[0])), vqmovn_u32(vcvtq_u32_f32(in2.val[0]))));
491 out.val[1] = vqmovn_u16(vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in1.val[1])), vqmovn_u32(vcvtq_u32_f32(in2.val[1]))));
492 out.val[2] = vqmovn_u16(vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in1.val[2])), vqmovn_u32(vcvtq_u32_f32(in2.val[2]))));
497 const auto low = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in.val[0])), vqmovn_u32(vcvtq_u32_f32(in.val[1])));
498 const auto high = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in.val[2])), vqmovn_u32(vcvtq_u32_f32(in.val[3])));
499 out = vcombine_u8(vqmovn_u16(low), vqmovn_u16(high));
504 const auto low = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(in.val[0])), vqmovn_s32(vcvtq_s32_f32(in.val[1])));
505 const auto high = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(in.val[2])), vqmovn_s32(vcvtq_s32_f32(in.val[3])));
506 out = vcombine_s8(vqmovn_s16(low), vqmovn_s16(high));
539 const float32x2_t v0 = vget_high_f32(v);
540 const float32x2_t v1 = vget_low_f32(v);
541 const float32x2_t v_out = vadd_f32(v0, v1);
543 const float a = vget_lane_f32(v_out, 0);
544 const float b = vget_lane_f32(v_out, 1);
549 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
552 #ifndef DOXYGEN_SKIP_THIS
553 inline float16x8_t vfloorq_f16(float16x8_t val)
555 static const float16x8_t CONST_1 = vdupq_n_f16(1.f);
563 inline float16x8_t vroundq_rte_f16(float16x8_t val)
565 return vrndnq_f16(val);
568 inline float16x4_t vinvsqrt_f16(float16x4_t x)
573 return sqrt_reciprocal;
576 inline float16x8_t vinvsqrtq_f16(float16x8_t x)
581 return sqrt_reciprocal;
584 inline float16x4_t vinv_f16(float16x4_t x)
592 inline float16x8_t vinvq_f16(float16x8_t x)
600 inline float16x4_t vtanh_rational_approx_f16(float16x4_t x16)
604 const float32x4_t x = vcvt_f32_f16(x16);
606 const float32x4_t ONE = vdupq_n_f32(1.0f);
607 const float32x4_t
C1 = vdupq_n_f32(0.43760237f);
608 const float32x4_t
C2 = vdupq_n_f32(0.104402f);
609 const float32x4_t
C3 = vdupq_n_f32(0.013442706f);
610 const float32x4_t C4 = vdupq_n_f32(0.00073561433f);
612 const float32x4_t x2 = vmulq_f32(x, x);
615 float32x4_t denom = vfmaq_f32(C1, C3, x2);
616 denom = vfmaq_f32(ONE, x2, denom);
619 float32x4_t numer = vfmaq_f32(C2, C4, x2);
620 numer = vfmaq_f32(ONE, x2, numer);
621 numer = vmulq_f32(numer, x);
623 return vcvt_f16_f32(vdivq_f32(numer, denom));
626 inline float16x8_t vtanhq_f16(float16x8_t x)
629 const float16x8_t tanh =
630 vcombine_f16(vtanh_rational_approx_f16(vget_low_f16(x)), vtanh_rational_approx_f16(vget_high_f16(x)));
633 const float16x8_t ONE = vdupq_n_f16(1.0f);
634 const float16x8_t MAX_X = vdupq_n_f16(4.508f);
635 const auto at_limit = vcageq_f16(x, MAX_X);
636 const float16x8_t sign_x =
vbslq_f16(vclezq_f16(x), -ONE, ONE);
637 return vbslq_f16(at_limit, sign_x, tanh);
640 inline float16x8_t vtaylor_polyq_f16(float16x8_t x,
const std::array<float16x8_t, 8> &coeffs)
647 const float16x8_t x4 =
vmulq_f16(x2, x2);
652 inline float16x8_t vexpq_f16(float16x8_t x)
654 const float32x4_t x_high = vcvt_f32_f16(vget_high_f16(x));
655 const float32x4_t x_low = vcvt_f32_f16(vget_low_f16(x));
657 const float16x8_t res = vcombine_f16(vcvt_f16_f32(
vexpq_f32(x_low)), vcvt_f16_f32(
vexpq_f32(x_high)));
662 inline float16x8_t verfq_f16(float16x8_t x)
664 const float32x4_t x_high = vcvt_f32_f16(vget_high_f16(x));
665 const float32x4_t x_low = vcvt_f32_f16(vget_low_f16(x));
667 const float16x8_t res = vcombine_f16(vcvt_f16_f32(
verfq_f32(x_low)), vcvt_f16_f32(
verfq_f32(x_high)));
670 #endif // #ifdef __aarch64__
672 inline float16x8_t vlogq_f16(float16x8_t x)
674 const float32x4_t x_high = vcvt_f32_f16(vget_high_f16(x));
675 const float32x4_t x_low = vcvt_f32_f16(vget_low_f16(x));
677 const float16x8_t res = vcombine_f16(vcvt_f16_f32(
vlogq_f32(x_low)), vcvt_f16_f32(
vlogq_f32(x_high)));
681 inline float16x8_t vpowq_f16(float16x8_t val, float16x8_t n)
683 float32x4_t n0_f32 = vcvt_f32_f16(vget_low_f16(n));
684 float32x4_t n1_f32 = vcvt_f32_f16(vget_high_f16(n));
685 float32x4_t val0_f32 = vcvt_f32_f16(vget_low_f16(val));
686 float32x4_t val1_f32 = vcvt_f32_f16(vget_high_f16(val));
691 return vcombine_f16(vcvt_f16_f32(res0_f32), vcvt_f16_f32(res1_f32));
694 inline float16x8_t vsinq_f16(float16x8_t val)
696 const float32x4_t val_high = vcvt_f32_f16(vget_high_f16(val));
697 const float32x4_t val_low = vcvt_f32_f16(vget_low_f16(val));
699 const float32x4_t res_high =
vsinq_f32(val_high);
700 const float32x4_t res_low =
vsinq_f32(val_low);
702 return vcombine_f16(vcvt_f16_f32(res_low), vcvt_f16_f32(res_high));
705 inline float16x4_t vsin_f16(float16x4_t val)
707 const float32x4_t val_f32 = vcvt_f32_f16(val);
708 const float32x2_t val_high = vget_high_f32(val_f32);
709 const float32x2_t val_low = vget_low_f32(val_f32);
711 const float32x2_t res_high =
vsin_f32(val_high);
712 const float32x2_t res_low =
vsin_f32(val_low);
714 return vcvt_f16_f32(vcombine_f32(res_low, res_high));
717 inline float16_t
vreduce(
const float16x8_t &v)
719 const float16x4_t v0 = vget_high_f16(v);
720 const float16x4_t v1 = vget_low_f16(v);
721 const float16x4_t v_out =
vadd_f16(v0, v1);
723 const float16_t a = vget_lane_f16(v_out, 0);
724 const float16_t
b = vget_lane_f16(v_out, 1);
725 const float16_t c = vget_lane_f16(v_out, 2);
726 const float16_t d = vget_lane_f16(v_out, 3);
728 return a +
b + c + d;