
(FPCore (x) :precision binary64 (atanh x))
double code(double x) {
return atanh(x);
}
def code(x): return math.atanh(x)
function code(x) return atanh(x) end
function tmp = code(x) tmp = atanh(x); end
code[x_] := N[ArcTanh[x], $MachinePrecision]
\begin{array}{l}
\\
\tanh^{-1} x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (* 0.5 (log1p (/ (* 2.0 x) (- 1.0 x)))))
double code(double x) {
return 0.5 * log1p(((2.0 * x) / (1.0 - x)));
}
public static double code(double x) {
return 0.5 * Math.log1p(((2.0 * x) / (1.0 - x)));
}
def code(x): return 0.5 * math.log1p(((2.0 * x) / (1.0 - x)))
function code(x) return Float64(0.5 * log1p(Float64(Float64(2.0 * x) / Float64(1.0 - x)))) end
code[x_] := N[(0.5 * N[Log[1 + N[(N[(2.0 * x), $MachinePrecision] / N[(1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \mathsf{log1p}\left(\frac{2 \cdot x}{1 - x}\right)
\end{array}
(FPCore (x) :precision binary64 (* 0.5 (log1p (/ (* 2.0 x) (- 1.0 x)))))
double code(double x) {
return 0.5 * log1p(((2.0 * x) / (1.0 - x)));
}
public static double code(double x) {
return 0.5 * Math.log1p(((2.0 * x) / (1.0 - x)));
}
def code(x): return 0.5 * math.log1p(((2.0 * x) / (1.0 - x)))
function code(x) return Float64(0.5 * log1p(Float64(Float64(2.0 * x) / Float64(1.0 - x)))) end
code[x_] := N[(0.5 * N[Log[1 + N[(N[(2.0 * x), $MachinePrecision] / N[(1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \mathsf{log1p}\left(\frac{2 \cdot x}{1 - x}\right)
\end{array}
Initial program 100.0%
(FPCore (x) :precision binary64 (* 0.5 (log1p (/ 2.0 (+ (/ 1.0 x) -1.0)))))
double code(double x) {
return 0.5 * log1p((2.0 / ((1.0 / x) + -1.0)));
}
public static double code(double x) {
return 0.5 * Math.log1p((2.0 / ((1.0 / x) + -1.0)));
}
def code(x): return 0.5 * math.log1p((2.0 / ((1.0 / x) + -1.0)))
function code(x) return Float64(0.5 * log1p(Float64(2.0 / Float64(Float64(1.0 / x) + -1.0)))) end
code[x_] := N[(0.5 * N[Log[1 + N[(2.0 / N[(N[(1.0 / x), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \mathsf{log1p}\left(\frac{2}{\frac{1}{x} + -1}\right)
\end{array}
Initial program 100.0%
add-log-exp8.1%
*-un-lft-identity8.1%
log-prod8.1%
metadata-eval8.1%
add-log-exp100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
associate-*l/100.0%
associate-/r/99.8%
div-sub99.8%
sub-neg99.8%
*-inverses99.8%
metadata-eval99.8%
Simplified99.8%
(FPCore (x) :precision binary64 (+ x (* (pow x 3.0) 0.3333333333333333)))
double code(double x) {
return x + (pow(x, 3.0) * 0.3333333333333333);
}
real(8) function code(x)
real(8), intent (in) :: x
code = x + ((x ** 3.0d0) * 0.3333333333333333d0)
end function
public static double code(double x) {
return x + (Math.pow(x, 3.0) * 0.3333333333333333);
}
def code(x): return x + (math.pow(x, 3.0) * 0.3333333333333333)
function code(x) return Float64(x + Float64((x ^ 3.0) * 0.3333333333333333)) end
function tmp = code(x) tmp = x + ((x ^ 3.0) * 0.3333333333333333); end
code[x_] := N[(x + N[(N[Power[x, 3.0], $MachinePrecision] * 0.3333333333333333), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + {x}^{3} \cdot 0.3333333333333333
\end{array}
Initial program 100.0%
Taylor expanded in x around 0 99.6%
distribute-lft-in99.7%
*-rgt-identity99.7%
*-commutative99.7%
associate-*r*99.7%
unpow299.7%
cube-mult99.7%
Simplified99.7%
(FPCore (x) :precision binary64 x)
double code(double x) {
return x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x
end function
public static double code(double x) {
return x;
}
def code(x): return x
function code(x) return x end
function tmp = code(x) tmp = x; end
code[x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 100.0%
Taylor expanded in x around 0 99.4%
herbie shell --seed 2024113
(FPCore (x)
:name "Rust f64::atanh"
:precision binary64
(* 0.5 (log1p (/ (* 2.0 x) (- 1.0 x)))))