pl/math: Cleanup Neon expf

Constants moved to static volatile struct
Fix signatures
No more implicit vector operations - replaced with intrinsics

Neon expf is a loose copy of AOR/math version, and
is only used as a helper routine in pl/math.
Inlined expf helper and remove special cases.
diff --git a/pl/math/v_coshf_2u4.c b/pl/math/v_coshf_2u4.c
index 393467f..a9fcc44 100644
--- a/pl/math/v_coshf_2u4.c
+++ b/pl/math/v_coshf_2u4.c
@@ -5,6 +5,7 @@
  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
  */
 
+#include "v_expf_inline.h"
 #include "v_math.h"
 #include "mathlib.h"
 #include "pl_sig.h"
@@ -17,8 +18,6 @@
 		special case.  */
 #define Half v_f32 (0.5)
 
-float32x4_t __v_expf (float32x4_t);
-
 /* Single-precision vector cosh, using vector expf.
    Maximum error is 2.38 ULP:
    __v_coshf(0x1.e8001ep+1) got 0x1.6a491ep+4 want 0x1.6a4922p+4.  */
@@ -45,7 +44,7 @@
 #endif
 
   /* Calculate cosh by exp(x) / 2 + exp(-x) / 2.  */
-  float32x4_t t = __v_expf (ax);
+  float32x4_t t = v_expf_inline (ax);
   float32x4_t y = t * Half + Half / t;
 
 #if WANT_SIMD_EXCEPT
diff --git a/pl/math/v_erff_1u5.c b/pl/math/v_erff_1u5.c
index c82abb2..3263baa 100644
--- a/pl/math/v_erff_1u5.c
+++ b/pl/math/v_erff_1u5.c
@@ -5,14 +5,12 @@
  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
  */
 
+#include "v_expf_inline.h"
 #include "v_math.h"
-#include "include/mathlib.h"
 #include "math_config.h"
 #include "pl_sig.h"
 #include "pl_test.h"
 
-VPCS_ATTR float32x4_t __v_expf (float32x4_t);
-
 #define AbsMask v_u32 (0x7fffffff)
 
 /* Special cases (fall back to scalar calls).  */
@@ -83,7 +81,7 @@
 
   /* y = |x| + |x|*P(|x|)        if |x| < 0.921875
      1 - exp (-(|x|+|x|*P(x^2))) otherwise.  */
-  float32x4_t y = vbslq_f32 (red, r, v_f32 (1.0f) - __v_expf (-r));
+  float32x4_t y = vbslq_f32 (red, r, v_f32 (1.0f) - v_expf_inline (-r));
 
   /* Boring domain (absolute value is required to get the sign of erf(-nan)
      right).  */
diff --git a/pl/math/v_expf.c b/pl/math/v_expf.c
deleted file mode 100644
index ee03d44..0000000
--- a/pl/math/v_expf.c
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Single-precision vector e^x function.
- *
- * Copyright (c) 2019-2023, Arm Limited.
- * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
- */
-
-#include "v_math.h"
-#include "mathlib.h"
-static const float Poly[] = {
-  /* maxerr: 1.45358 +0.5 ulp.  */
-  0x1.0e4020p-7f,
-  0x1.573e2ep-5f,
-  0x1.555e66p-3f,
-  0x1.fffdb6p-2f,
-  0x1.ffffecp-1f,
-};
-#define C0 v_f32 (Poly[0])
-#define C1 v_f32 (Poly[1])
-#define C2 v_f32 (Poly[2])
-#define C3 v_f32 (Poly[3])
-#define C4 v_f32 (Poly[4])
-
-#define Shift v_f32 (0x1.8p23f)
-#define InvLn2 v_f32 (0x1.715476p+0f)
-#define Ln2hi v_f32 (0x1.62e4p-1f)
-#define Ln2lo v_f32 (0x1.7f7d1cp-20f)
-
-VPCS_ATTR
-static float32x4_t
-specialcase (float32x4_t poly, float32x4_t n, uint32x4_t e, float32x4_t absn,
-	     uint32x4_t cmp1, float32x4_t scale)
-{
-  /* 2^n may overflow, break it up into s1*s2.  */
-  uint32x4_t b = (n <= v_f32 (0.0f)) & v_u32 (0x82000000);
-  float32x4_t s1 = vreinterpretq_f32_u32 (v_u32 (0x7f000000) + b);
-  float32x4_t s2 = vreinterpretq_f32_u32 (e - b);
-  uint32x4_t cmp2 = absn > v_f32 (192.0f);
-  uint32x4_t r2 = vreinterpretq_u32_f32 (s1 * s1);
-  uint32x4_t r1 = vreinterpretq_u32_f32 (vfmaq_f32 (s2, poly, s2) * s1);
-  /* Similar to r1 but avoids double rounding in the subnormal range.  */
-  uint32x4_t r0 = vreinterpretq_u32_f32 (vfmaq_f32 (scale, poly, scale));
-  return vreinterpretq_f32_u32 ((cmp2 & r2) | (~cmp2 & cmp1 & r1)
-				| (~cmp1 & r0));
-}
-
-VPCS_ATTR
-float32x4_t
-__v_expf (float32x4_t x)
-{
-  float32x4_t n, r, r2, scale, p, q, poly, absn, z;
-  uint32x4_t cmp, e;
-
-  /* exp(x) = 2^n (1 + poly(r)), with 1 + poly(r) in [1/sqrt(2),sqrt(2)]
-     x = ln2*n + r, with r in [-ln2/2, ln2/2].  */
-#if 1
-  z = vfmaq_f32 (Shift, x, InvLn2);
-  n = z - Shift;
-  r = vfmaq_f32 (x, n, -Ln2hi);
-  r = vfmaq_f32 (r, n, -Ln2lo);
-  e = vreinterpretq_u32_f32 (z) << 23;
-#else
-  z = x * InvLn2;
-  n = vrndaq_f32 (z);
-  r = vfmaq_f32 (x, n, -Ln2hi);
-  r = vfmaq_f32 (r, n, -Ln2lo);
-  e = vreinterpretq_u32_s32 (vcvtaq_s32_f32 (z)) << 23;
-#endif
-  scale = vreinterpretq_f32_u32 (e + v_u32 (0x3f800000));
-  absn = vabsq_f32 (n);
-  cmp = absn > v_f32 (126.0f);
-  r2 = r * r;
-  p = vfmaq_f32 (C1, C0, r);
-  q = vfmaq_f32 (C3, C2, r);
-  q = vfmaq_f32 (q, p, r2);
-  p = C4 * r;
-  poly = vfmaq_f32 (p, q, r2);
-  if (unlikely (v_any_u32 (cmp)))
-    return specialcase (poly, n, e, absn, cmp, scale);
-  return vfmaq_f32 (scale, poly, scale);
-}
diff --git a/pl/math/v_expf_inline.h b/pl/math/v_expf_inline.h
new file mode 100644
index 0000000..d3995b5
--- /dev/null
+++ b/pl/math/v_expf_inline.h
@@ -0,0 +1,62 @@
+/*
+ * Helper for single-precision routines which calculate exp(x) and do not
+ * need special-case handling
+ *
+ * Copyright (c) 2019-2023, Arm Limited.
+ * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef PL_MATH_V_EXPF_INLINE_H
+#define PL_MATH_V_EXPF_INLINE_H
+
+#include "v_math.h"
+
+#define V_EXPF_INLINE_POLY_ORDER 4
+
+struct v_expf_inline_data
+{
+  float32x4_t poly[V_EXPF_INLINE_POLY_ORDER + 1];
+  float32x4_t shift, invln2, ln2_hi, ln2_lo;
+};
+
+static const volatile struct v_expf_inline_data data
+  = {.shift = V4 (0x1.8p23f),
+     .invln2 = V4 (0x1.715476p+0f),
+     .ln2_hi = V4 (0x1.62e4p-1f),
+     .ln2_lo = V4 (0x1.7f7d1cp-20f),
+     .poly = {/* maxerr: 1.45358 +0.5 ulp.  */
+	      V4 (0x1.0e4020p-7f), V4 (0x1.573e2ep-5f), V4 (0x1.555e66p-3f),
+	      V4 (0x1.fffdb6p-2f), V4 (0x1.ffffecp-1f)}};
+
+#define ExponentBias v_u32 (0x3f800000) /* asuint(1.0f).  */
+#define C(i) data.poly[i]
+
+static inline float32x4_t
+v_expf_inline (float32x4_t x)
+{
+  /* Helper routine for calculating exp(x).
+     Copied from v_expf.c, with all special-case handling removed - the
+     calling routine should handle special values if required.  */
+
+  /* exp(x) = 2^n (1 + poly(r)), with 1 + poly(r) in [1/sqrt(2),sqrt(2)]
+     x = ln2*n + r, with r in [-ln2/2, ln2/2].  */
+  float32x4_t n, r, z;
+  z = vfmaq_f32 (data.shift, x, data.invln2);
+  n = vsubq_f32 (z, data.shift);
+  r = vfmsq_f32 (x, n, data.ln2_hi);
+  r = vfmsq_f32 (r, n, data.ln2_lo);
+  uint32x4_t e = vshlq_n_u32 (vreinterpretq_u32_f32 (z), 23);
+  float32x4_t scale = vreinterpretq_f32_u32 (vaddq_u32 (e, ExponentBias));
+
+  /* Custom order-4 Estrin avoids building high order monomial.  */
+  float32x4_t r2 = vmulq_f32 (r, r);
+  float32x4_t p, q, poly;
+  p = vfmaq_f32 (C (1), C (0), r);
+  q = vfmaq_f32 (C (3), C (2), r);
+  q = vfmaq_f32 (q, p, r2);
+  p = vmulq_f32 (C (4), r);
+  poly = vfmaq_f32 (p, q, r2);
+  return vfmaq_f32 (scale, poly, scale);
+}
+
+#endif // PL_MATH_V_EXPF_INLINE_H