
(FPCore (x) :precision binary32 (atanh x))
float code(float x) {
return atanhf(x);
}
function code(x) return atanh(x) end
function tmp = code(x) tmp = atanh(x); end
\begin{array}{l}
\\
\tanh^{-1} x
\end{array}
Sampling outcomes in binary32 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary32 (* 0.5 (log1p (/ (* 2.0 x) (- 1.0 x)))))
float code(float x) {
return 0.5f * log1pf(((2.0f * x) / (1.0f - x)));
}
function code(x) return Float32(Float32(0.5) * log1p(Float32(Float32(Float32(2.0) * x) / Float32(Float32(1.0) - x)))) end
\begin{array}{l}
\\
0.5 \cdot \mathsf{log1p}\left(\frac{2 \cdot x}{1 - x}\right)
\end{array}
(FPCore (x)
:precision binary32
(let* ((t_0 (- 1.0 (* -1.0 x))))
(*
0.5
(log1p (/ (* 2.0 x) (- (/ 1.0 t_0) (/ (* (* -1.0 x) x) (* -1.0 t_0))))))))
float code(float x) {
float t_0 = 1.0f - (-1.0f * x);
return 0.5f * log1pf(((2.0f * x) / ((1.0f / t_0) - (((-1.0f * x) * x) / (-1.0f * t_0)))));
}
function code(x) t_0 = Float32(Float32(1.0) - Float32(Float32(-1.0) * x)) return Float32(Float32(0.5) * log1p(Float32(Float32(Float32(2.0) * x) / Float32(Float32(Float32(1.0) / t_0) - Float32(Float32(Float32(Float32(-1.0) * x) * x) / Float32(Float32(-1.0) * t_0)))))) end
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 1 - -1 \cdot x\\
0.5 \cdot \mathsf{log1p}\left(\frac{2 \cdot x}{\frac{1}{t\_0} - \frac{\left(-1 \cdot x\right) \cdot x}{-1 \cdot t\_0}}\right)
\end{array}
\end{array}
Initial program 99.8%
lift--.f32N/A
flip--N/A
metadata-evalN/A
unpow2N/A
div-subN/A
lower--.f32N/A
lower-/.f32N/A
*-lft-identityN/A
metadata-evalN/A
fp-cancel-sub-sign-invN/A
lower--.f32N/A
lower-*.f32N/A
lower-/.f32N/A
unpow2N/A
lower-*.f32N/A
*-lft-identityN/A
metadata-evalN/A
fp-cancel-sub-sign-invN/A
lower--.f32N/A
lower-*.f3299.8
Applied rewrites99.8%
Final simplification99.8%
(FPCore (x) :precision binary32 (* 0.5 (log1p (/ (* 2.0 x) (- 1.0 x)))))
float code(float x) {
return 0.5f * log1pf(((2.0f * x) / (1.0f - x)));
}
function code(x) return Float32(Float32(0.5) * log1p(Float32(Float32(Float32(2.0) * x) / Float32(Float32(1.0) - x)))) end
\begin{array}{l}
\\
0.5 \cdot \mathsf{log1p}\left(\frac{2 \cdot x}{1 - x}\right)
\end{array}
Initial program 99.8%
(FPCore (x)
:precision binary32
(let* ((t_0
(*
(fma
(- (* (* x x) 0.2857142857142857) -0.4)
(* x x)
0.6666666666666666)
x))
(t_1 (* t_0 x))
(t_2 (* -1.0 t_1)))
(*
0.5
(*
(/
(fma (pow t_0 3.0) (* (* x x) x) 8.0)
(fma t_2 t_2 (- 4.0 (* t_1 2.0))))
x))))
float code(float x) {
float t_0 = fmaf((((x * x) * 0.2857142857142857f) - -0.4f), (x * x), 0.6666666666666666f) * x;
float t_1 = t_0 * x;
float t_2 = -1.0f * t_1;
return 0.5f * ((fmaf(powf(t_0, 3.0f), ((x * x) * x), 8.0f) / fmaf(t_2, t_2, (4.0f - (t_1 * 2.0f)))) * x);
}
function code(x) t_0 = Float32(fma(Float32(Float32(Float32(x * x) * Float32(0.2857142857142857)) - Float32(-0.4)), Float32(x * x), Float32(0.6666666666666666)) * x) t_1 = Float32(t_0 * x) t_2 = Float32(Float32(-1.0) * t_1) return Float32(Float32(0.5) * Float32(Float32(fma((t_0 ^ Float32(3.0)), Float32(Float32(x * x) * x), Float32(8.0)) / fma(t_2, t_2, Float32(Float32(4.0) - Float32(t_1 * Float32(2.0))))) * x)) end
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(\left(x \cdot x\right) \cdot 0.2857142857142857 - -0.4, x \cdot x, 0.6666666666666666\right) \cdot x\\
t_1 := t\_0 \cdot x\\
t_2 := -1 \cdot t\_1\\
0.5 \cdot \left(\frac{\mathsf{fma}\left({t\_0}^{3}, \left(x \cdot x\right) \cdot x, 8\right)}{\mathsf{fma}\left(t\_2, t\_2, 4 - t\_1 \cdot 2\right)} \cdot x\right)
\end{array}
\end{array}
Initial program 99.8%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f32N/A
Applied rewrites99.1%
Applied rewrites99.1%
Final simplification99.1%
herbie shell --seed 2025065
(FPCore (x)
:name "Rust f32::atanh"
:precision binary32
(* 0.5 (log1p (/ (* 2.0 x) (- 1.0 x)))))