
(FPCore (x) :precision binary32 (atanh x))
float code(float x) {
return atanhf(x);
}
function code(x) return atanh(x) end
function tmp = code(x) tmp = atanh(x); end
\begin{array}{l}
\\
\tanh^{-1} x
\end{array}
Sampling outcomes in binary32 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary32 (* 0.5 (log1p (/ (* 2.0 x) (- 1.0 x)))))
float code(float x) {
return 0.5f * log1pf(((2.0f * x) / (1.0f - x)));
}
function code(x) return Float32(Float32(0.5) * log1p(Float32(Float32(Float32(2.0) * x) / Float32(Float32(1.0) - x)))) end
\begin{array}{l}
\\
0.5 \cdot \mathsf{log1p}\left(\frac{2 \cdot x}{1 - x}\right)
\end{array}
(FPCore (x) :precision binary32 (* 0.5 (log1p (/ (* 2.0 x) (- 1.0 x)))))
float code(float x) {
return 0.5f * log1pf(((2.0f * x) / (1.0f - x)));
}
function code(x) return Float32(Float32(0.5) * log1p(Float32(Float32(Float32(2.0) * x) / Float32(Float32(1.0) - x)))) end
\begin{array}{l}
\\
0.5 \cdot \mathsf{log1p}\left(\frac{2 \cdot x}{1 - x}\right)
\end{array}
Initial program 99.7%
Final simplification99.7%
(FPCore (x) :precision binary32 (* 0.5 (log1p (* x (/ 2.0 (- 1.0 x))))))
float code(float x) {
return 0.5f * log1pf((x * (2.0f / (1.0f - x))));
}
function code(x) return Float32(Float32(0.5) * log1p(Float32(x * Float32(Float32(2.0) / Float32(Float32(1.0) - x))))) end
\begin{array}{l}
\\
0.5 \cdot \mathsf{log1p}\left(x \cdot \frac{2}{1 - x}\right)
\end{array}
Initial program 99.7%
associate-/l*99.4%
Simplified99.4%
associate-/r/99.7%
Applied egg-rr99.7%
Final simplification99.7%
(FPCore (x) :precision binary32 (* 0.5 (* 2.0 x)))
float code(float x) {
return 0.5f * (2.0f * x);
}
real(4) function code(x)
real(4), intent (in) :: x
code = 0.5e0 * (2.0e0 * x)
end function
function code(x) return Float32(Float32(0.5) * Float32(Float32(2.0) * x)) end
function tmp = code(x) tmp = single(0.5) * (single(2.0) * x); end
\begin{array}{l}
\\
0.5 \cdot \left(2 \cdot x\right)
\end{array}
Initial program 99.7%
associate-/l*99.4%
Simplified99.4%
log1p-udef23.6%
associate-/l*23.6%
+-commutative23.6%
associate-*l/23.6%
*-commutative23.6%
fma-def23.6%
Applied egg-rr23.6%
add-cube-cbrt23.6%
pow223.6%
Applied egg-rr23.6%
Taylor expanded in x around 0 97.0%
Final simplification97.0%
herbie shell --seed 2023174
(FPCore (x)
:name "Rust f32::atanh"
:precision binary32
(* 0.5 (log1p (/ (* 2.0 x) (- 1.0 x)))))