
(FPCore (x) :precision binary64 (atanh x))
double code(double x) {
return atanh(x);
}
def code(x): return math.atanh(x)
function code(x) return atanh(x) end
function tmp = code(x) tmp = atanh(x); end
code[x_] := N[ArcTanh[x], $MachinePrecision]
\begin{array}{l}
\\
\tanh^{-1} x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (* 0.5 (log1p (/ (* 2.0 x) (- 1.0 x)))))
double code(double x) {
return 0.5 * log1p(((2.0 * x) / (1.0 - x)));
}
public static double code(double x) {
return 0.5 * Math.log1p(((2.0 * x) / (1.0 - x)));
}
def code(x): return 0.5 * math.log1p(((2.0 * x) / (1.0 - x)))
function code(x) return Float64(0.5 * log1p(Float64(Float64(2.0 * x) / Float64(1.0 - x)))) end
code[x_] := N[(0.5 * N[Log[1 + N[(N[(2.0 * x), $MachinePrecision] / N[(1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \mathsf{log1p}\left(\frac{2 \cdot x}{1 - x}\right)
\end{array}
(FPCore (x) :precision binary64 (* 0.5 (log1p (/ (* 2.0 x) (- 1.0 x)))))
double code(double x) {
return 0.5 * log1p(((2.0 * x) / (1.0 - x)));
}
public static double code(double x) {
return 0.5 * Math.log1p(((2.0 * x) / (1.0 - x)));
}
def code(x): return 0.5 * math.log1p(((2.0 * x) / (1.0 - x)))
function code(x) return Float64(0.5 * log1p(Float64(Float64(2.0 * x) / Float64(1.0 - x)))) end
code[x_] := N[(0.5 * N[Log[1 + N[(N[(2.0 * x), $MachinePrecision] / N[(1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \mathsf{log1p}\left(\frac{2 \cdot x}{1 - x}\right)
\end{array}
Initial program 100.0%
(FPCore (x) :precision binary64 (* 0.5 (log1p (/ 1.0 (+ (/ 0.5 x) -0.5)))))
double code(double x) {
return 0.5 * log1p((1.0 / ((0.5 / x) + -0.5)));
}
public static double code(double x) {
return 0.5 * Math.log1p((1.0 / ((0.5 / x) + -0.5)));
}
def code(x): return 0.5 * math.log1p((1.0 / ((0.5 / x) + -0.5)))
function code(x) return Float64(0.5 * log1p(Float64(1.0 / Float64(Float64(0.5 / x) + -0.5)))) end
code[x_] := N[(0.5 * N[Log[1 + N[(1.0 / N[(N[(0.5 / x), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \mathsf{log1p}\left(\frac{1}{\frac{0.5}{x} + -0.5}\right)
\end{array}
Initial program 100.0%
add-log-exp8.7%
*-un-lft-identity8.7%
log-prod8.7%
metadata-eval8.7%
add-log-exp100.0%
*-commutative100.0%
associate-/l*100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
associate-*r/100.0%
*-commutative100.0%
*-lft-identity100.0%
associate-*l/100.0%
associate-/r/99.8%
div-sub99.8%
sub-neg99.8%
associate-/r*99.8%
metadata-eval99.8%
*-commutative99.8%
associate-/r*99.8%
*-inverses99.8%
metadata-eval99.8%
metadata-eval99.8%
Simplified99.8%
(FPCore (x) :precision binary64 (* 0.5 (log1p (* 2.0 x))))
double code(double x) {
return 0.5 * log1p((2.0 * x));
}
public static double code(double x) {
return 0.5 * Math.log1p((2.0 * x));
}
def code(x): return 0.5 * math.log1p((2.0 * x))
function code(x) return Float64(0.5 * log1p(Float64(2.0 * x))) end
code[x_] := N[(0.5 * N[Log[1 + N[(2.0 * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \mathsf{log1p}\left(2 \cdot x\right)
\end{array}
Initial program 100.0%
Taylor expanded in x around 0 97.7%
(FPCore (x) :precision binary64 (* 0.5 (log1p -2.0)))
double code(double x) {
return 0.5 * log1p(-2.0);
}
public static double code(double x) {
return 0.5 * Math.log1p(-2.0);
}
def code(x): return 0.5 * math.log1p(-2.0)
function code(x) return Float64(0.5 * log1p(-2.0)) end
code[x_] := N[(0.5 * N[Log[1 + -2.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \mathsf{log1p}\left(-2\right)
\end{array}
Initial program 100.0%
Taylor expanded in x around inf 0.0%
herbie shell --seed 2024089
(FPCore (x)
:name "Rust f64::atanh"
:precision binary64
(* 0.5 (log1p (/ (* 2.0 x) (- 1.0 x)))))