
(FPCore (x) :precision binary64 (atanh x))
double code(double x) {
return atanh(x);
}
def code(x): return math.atanh(x)
function code(x) return atanh(x) end
function tmp = code(x) tmp = atanh(x); end
code[x_] := N[ArcTanh[x], $MachinePrecision]
\begin{array}{l}
\\
\tanh^{-1} x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (* 0.5 (log1p (/ (* 2.0 x) (- 1.0 x)))))
double code(double x) {
return 0.5 * log1p(((2.0 * x) / (1.0 - x)));
}
public static double code(double x) {
return 0.5 * Math.log1p(((2.0 * x) / (1.0 - x)));
}
def code(x): return 0.5 * math.log1p(((2.0 * x) / (1.0 - x)))
function code(x) return Float64(0.5 * log1p(Float64(Float64(2.0 * x) / Float64(1.0 - x)))) end
code[x_] := N[(0.5 * N[Log[1 + N[(N[(2.0 * x), $MachinePrecision] / N[(1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \mathsf{log1p}\left(\frac{2 \cdot x}{1 - x}\right)
\end{array}
(FPCore (x) :precision binary64 (* 0.5 (log1p (/ (* 2.0 x) (- 1.0 x)))))
double code(double x) {
return 0.5 * log1p(((2.0 * x) / (1.0 - x)));
}
public static double code(double x) {
return 0.5 * Math.log1p(((2.0 * x) / (1.0 - x)));
}
def code(x): return 0.5 * math.log1p(((2.0 * x) / (1.0 - x)))
function code(x) return Float64(0.5 * log1p(Float64(Float64(2.0 * x) / Float64(1.0 - x)))) end
code[x_] := N[(0.5 * N[Log[1 + N[(N[(2.0 * x), $MachinePrecision] / N[(1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \mathsf{log1p}\left(\frac{2 \cdot x}{1 - x}\right)
\end{array}
Initial program 100.0%
(FPCore (x) :precision binary64 (* 0.5 (log1p (* (/ -2.0 (- x 1.0)) x))))
double code(double x) {
return 0.5 * log1p(((-2.0 / (x - 1.0)) * x));
}
public static double code(double x) {
return 0.5 * Math.log1p(((-2.0 / (x - 1.0)) * x));
}
def code(x): return 0.5 * math.log1p(((-2.0 / (x - 1.0)) * x))
function code(x) return Float64(0.5 * log1p(Float64(Float64(-2.0 / Float64(x - 1.0)) * x))) end
code[x_] := N[(0.5 * N[Log[1 + N[(N[(-2.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \mathsf{log1p}\left(\frac{-2}{x - 1} \cdot x\right)
\end{array}
Initial program 100.0%
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
frac-2negN/A
lower-/.f64N/A
metadata-evalN/A
neg-sub0N/A
lift--.f64N/A
sub-negN/A
+-commutativeN/A
associate--r+N/A
neg-sub0N/A
remove-double-negN/A
lower--.f64100.0
Applied rewrites100.0%
(FPCore (x)
:precision binary64
(*
(/
x
(fma
(fma
(fma -0.02328042328042328 (* x x) -0.044444444444444446)
(* x x)
-0.16666666666666666)
(* x x)
0.5))
0.5))
double code(double x) {
return (x / fma(fma(fma(-0.02328042328042328, (x * x), -0.044444444444444446), (x * x), -0.16666666666666666), (x * x), 0.5)) * 0.5;
}
function code(x) return Float64(Float64(x / fma(fma(fma(-0.02328042328042328, Float64(x * x), -0.044444444444444446), Float64(x * x), -0.16666666666666666), Float64(x * x), 0.5)) * 0.5) end
code[x_] := N[(N[(x / N[(N[(N[(-0.02328042328042328 * N[(x * x), $MachinePrecision] + -0.044444444444444446), $MachinePrecision] * N[(x * x), $MachinePrecision] + -0.16666666666666666), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.5), $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.02328042328042328, x \cdot x, -0.044444444444444446\right), x \cdot x, -0.16666666666666666\right), x \cdot x, 0.5\right)} \cdot 0.5
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6499.4
Applied rewrites99.4%
Applied rewrites99.4%
Taylor expanded in x around 0
Applied rewrites99.4%
lift-*.f64N/A
*-commutativeN/A
lower-*.f6499.4
Applied rewrites99.4%
(FPCore (x) :precision binary64 (* (/ x (fma (fma (* x x) -0.044444444444444446 -0.16666666666666666) (* x x) 0.5)) 0.5))
double code(double x) {
return (x / fma(fma((x * x), -0.044444444444444446, -0.16666666666666666), (x * x), 0.5)) * 0.5;
}
function code(x) return Float64(Float64(x / fma(fma(Float64(x * x), -0.044444444444444446, -0.16666666666666666), Float64(x * x), 0.5)) * 0.5) end
code[x_] := N[(N[(x / N[(N[(N[(x * x), $MachinePrecision] * -0.044444444444444446 + -0.16666666666666666), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.5), $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, -0.044444444444444446, -0.16666666666666666\right), x \cdot x, 0.5\right)} \cdot 0.5
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6499.4
Applied rewrites99.4%
Applied rewrites99.4%
Taylor expanded in x around 0
Applied rewrites99.4%
lift-*.f64N/A
*-commutativeN/A
lower-*.f6499.4
Applied rewrites99.4%
(FPCore (x) :precision binary64 (* 0.5 (* (fma (fma 0.4 (* x x) 0.6666666666666666) (* x x) 2.0) x)))
double code(double x) {
return 0.5 * (fma(fma(0.4, (x * x), 0.6666666666666666), (x * x), 2.0) * x);
}
function code(x) return Float64(0.5 * Float64(fma(fma(0.4, Float64(x * x), 0.6666666666666666), Float64(x * x), 2.0) * x)) end
code[x_] := N[(0.5 * N[(N[(N[(0.4 * N[(x * x), $MachinePrecision] + 0.6666666666666666), $MachinePrecision] * N[(x * x), $MachinePrecision] + 2.0), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \left(\mathsf{fma}\left(\mathsf{fma}\left(0.4, x \cdot x, 0.6666666666666666\right), x \cdot x, 2\right) \cdot x\right)
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6499.4
Applied rewrites99.4%
(FPCore (x) :precision binary64 (* 0.5 (fma (* (* x x) 0.6666666666666666) x (* 2.0 x))))
double code(double x) {
return 0.5 * fma(((x * x) * 0.6666666666666666), x, (2.0 * x));
}
function code(x) return Float64(0.5 * fma(Float64(Float64(x * x) * 0.6666666666666666), x, Float64(2.0 * x))) end
code[x_] := N[(0.5 * N[(N[(N[(x * x), $MachinePrecision] * 0.6666666666666666), $MachinePrecision] * x + N[(2.0 * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \mathsf{fma}\left(\left(x \cdot x\right) \cdot 0.6666666666666666, x, 2 \cdot x\right)
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.3
Applied rewrites99.3%
Applied rewrites99.3%
(FPCore (x) :precision binary64 (* 0.5 (* (fma 0.6666666666666666 (* x x) 2.0) x)))
double code(double x) {
return 0.5 * (fma(0.6666666666666666, (x * x), 2.0) * x);
}
function code(x) return Float64(0.5 * Float64(fma(0.6666666666666666, Float64(x * x), 2.0) * x)) end
code[x_] := N[(0.5 * N[(N[(0.6666666666666666 * N[(x * x), $MachinePrecision] + 2.0), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \left(\mathsf{fma}\left(0.6666666666666666, x \cdot x, 2\right) \cdot x\right)
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.3
Applied rewrites99.3%
(FPCore (x) :precision binary64 (* 0.5 (* 2.0 x)))
double code(double x) {
return 0.5 * (2.0 * x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.5d0 * (2.0d0 * x)
end function
public static double code(double x) {
return 0.5 * (2.0 * x);
}
def code(x): return 0.5 * (2.0 * x)
function code(x) return Float64(0.5 * Float64(2.0 * x)) end
function tmp = code(x) tmp = 0.5 * (2.0 * x); end
code[x_] := N[(0.5 * N[(2.0 * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \left(2 \cdot x\right)
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
lower-*.f6499.0
Applied rewrites99.0%
herbie shell --seed 2024313
(FPCore (x)
:name "Rust f64::atanh"
:precision binary64
(* 0.5 (log1p (/ (* 2.0 x) (- 1.0 x)))))