
(FPCore (x) :precision binary64 (atanh x))
double code(double x) {
return atanh(x);
}
def code(x): return math.atanh(x)
function code(x) return atanh(x) end
function tmp = code(x) tmp = atanh(x); end
code[x_] := N[ArcTanh[x], $MachinePrecision]
\begin{array}{l}
\\
\tanh^{-1} x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (* 0.5 (log1p (/ (* 2.0 x) (- 1.0 x)))))
double code(double x) {
return 0.5 * log1p(((2.0 * x) / (1.0 - x)));
}
public static double code(double x) {
return 0.5 * Math.log1p(((2.0 * x) / (1.0 - x)));
}
def code(x): return 0.5 * math.log1p(((2.0 * x) / (1.0 - x)))
function code(x) return Float64(0.5 * log1p(Float64(Float64(2.0 * x) / Float64(1.0 - x)))) end
code[x_] := N[(0.5 * N[Log[1 + N[(N[(2.0 * x), $MachinePrecision] / N[(1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \mathsf{log1p}\left(\frac{2 \cdot x}{1 - x}\right)
\end{array}
(FPCore (x) :precision binary64 (* 0.5 (log1p (/ (* 2.0 x) (- 1.0 x)))))
double code(double x) {
return 0.5 * log1p(((2.0 * x) / (1.0 - x)));
}
public static double code(double x) {
return 0.5 * Math.log1p(((2.0 * x) / (1.0 - x)));
}
def code(x): return 0.5 * math.log1p(((2.0 * x) / (1.0 - x)))
function code(x) return Float64(0.5 * log1p(Float64(Float64(2.0 * x) / Float64(1.0 - x)))) end
code[x_] := N[(0.5 * N[Log[1 + N[(N[(2.0 * x), $MachinePrecision] / N[(1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \mathsf{log1p}\left(\frac{2 \cdot x}{1 - x}\right)
\end{array}
Initial program 100.0%
(FPCore (x)
:precision binary64
(/
(*
x
(-
1.0
(*
(* x (* x (* x x)))
(+ 0.1111111111111111 (* x (* x 0.13333333333333333))))))
(-
1.0
(*
(* x x)
(+
0.3333333333333333
(* x (* x (+ 0.2 (* x (* x 0.14285714285714285))))))))))
double code(double x) {
return (x * (1.0 - ((x * (x * (x * x))) * (0.1111111111111111 + (x * (x * 0.13333333333333333)))))) / (1.0 - ((x * x) * (0.3333333333333333 + (x * (x * (0.2 + (x * (x * 0.14285714285714285))))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * (1.0d0 - ((x * (x * (x * x))) * (0.1111111111111111d0 + (x * (x * 0.13333333333333333d0)))))) / (1.0d0 - ((x * x) * (0.3333333333333333d0 + (x * (x * (0.2d0 + (x * (x * 0.14285714285714285d0))))))))
end function
public static double code(double x) {
return (x * (1.0 - ((x * (x * (x * x))) * (0.1111111111111111 + (x * (x * 0.13333333333333333)))))) / (1.0 - ((x * x) * (0.3333333333333333 + (x * (x * (0.2 + (x * (x * 0.14285714285714285))))))));
}
def code(x): return (x * (1.0 - ((x * (x * (x * x))) * (0.1111111111111111 + (x * (x * 0.13333333333333333)))))) / (1.0 - ((x * x) * (0.3333333333333333 + (x * (x * (0.2 + (x * (x * 0.14285714285714285))))))))
function code(x) return Float64(Float64(x * Float64(1.0 - Float64(Float64(x * Float64(x * Float64(x * x))) * Float64(0.1111111111111111 + Float64(x * Float64(x * 0.13333333333333333)))))) / Float64(1.0 - Float64(Float64(x * x) * Float64(0.3333333333333333 + Float64(x * Float64(x * Float64(0.2 + Float64(x * Float64(x * 0.14285714285714285))))))))) end
function tmp = code(x) tmp = (x * (1.0 - ((x * (x * (x * x))) * (0.1111111111111111 + (x * (x * 0.13333333333333333)))))) / (1.0 - ((x * x) * (0.3333333333333333 + (x * (x * (0.2 + (x * (x * 0.14285714285714285)))))))); end
code[x_] := N[(N[(x * N[(1.0 - N[(N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(0.1111111111111111 + N[(x * N[(x * 0.13333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[(N[(x * x), $MachinePrecision] * N[(0.3333333333333333 + N[(x * N[(x * N[(0.2 + N[(x * N[(x * 0.14285714285714285), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot \left(1 - \left(x \cdot \left(x \cdot \left(x \cdot x\right)\right)\right) \cdot \left(0.1111111111111111 + x \cdot \left(x \cdot 0.13333333333333333\right)\right)\right)}{1 - \left(x \cdot x\right) \cdot \left(0.3333333333333333 + x \cdot \left(x \cdot \left(0.2 + x \cdot \left(x \cdot 0.14285714285714285\right)\right)\right)\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6499.5%
Simplified99.5%
*-commutativeN/A
flip-+N/A
associate-*l/N/A
/-lowering-/.f64N/A
Applied egg-rr99.5%
Taylor expanded in x around 0
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
unpow2N/A
cube-multN/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6499.6%
Simplified99.6%
Final simplification99.6%
(FPCore (x)
:precision binary64
(*
(/
x
(-
1.0
(*
(* x x)
(+
0.3333333333333333
(* (* x x) (+ 0.2 (* (* x x) 0.14285714285714285)))))))
(-
1.0
(*
x
(*
(* x (* x x))
(+ 0.1111111111111111 (* x (* x 0.13333333333333333))))))))
double code(double x) {
return (x / (1.0 - ((x * x) * (0.3333333333333333 + ((x * x) * (0.2 + ((x * x) * 0.14285714285714285))))))) * (1.0 - (x * ((x * (x * x)) * (0.1111111111111111 + (x * (x * 0.13333333333333333))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x / (1.0d0 - ((x * x) * (0.3333333333333333d0 + ((x * x) * (0.2d0 + ((x * x) * 0.14285714285714285d0))))))) * (1.0d0 - (x * ((x * (x * x)) * (0.1111111111111111d0 + (x * (x * 0.13333333333333333d0))))))
end function
public static double code(double x) {
return (x / (1.0 - ((x * x) * (0.3333333333333333 + ((x * x) * (0.2 + ((x * x) * 0.14285714285714285))))))) * (1.0 - (x * ((x * (x * x)) * (0.1111111111111111 + (x * (x * 0.13333333333333333))))));
}
def code(x): return (x / (1.0 - ((x * x) * (0.3333333333333333 + ((x * x) * (0.2 + ((x * x) * 0.14285714285714285))))))) * (1.0 - (x * ((x * (x * x)) * (0.1111111111111111 + (x * (x * 0.13333333333333333))))))
function code(x) return Float64(Float64(x / Float64(1.0 - Float64(Float64(x * x) * Float64(0.3333333333333333 + Float64(Float64(x * x) * Float64(0.2 + Float64(Float64(x * x) * 0.14285714285714285))))))) * Float64(1.0 - Float64(x * Float64(Float64(x * Float64(x * x)) * Float64(0.1111111111111111 + Float64(x * Float64(x * 0.13333333333333333))))))) end
function tmp = code(x) tmp = (x / (1.0 - ((x * x) * (0.3333333333333333 + ((x * x) * (0.2 + ((x * x) * 0.14285714285714285))))))) * (1.0 - (x * ((x * (x * x)) * (0.1111111111111111 + (x * (x * 0.13333333333333333)))))); end
code[x_] := N[(N[(x / N[(1.0 - N[(N[(x * x), $MachinePrecision] * N[(0.3333333333333333 + N[(N[(x * x), $MachinePrecision] * N[(0.2 + N[(N[(x * x), $MachinePrecision] * 0.14285714285714285), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(1.0 - N[(x * N[(N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(0.1111111111111111 + N[(x * N[(x * 0.13333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{1 - \left(x \cdot x\right) \cdot \left(0.3333333333333333 + \left(x \cdot x\right) \cdot \left(0.2 + \left(x \cdot x\right) \cdot 0.14285714285714285\right)\right)} \cdot \left(1 - x \cdot \left(\left(x \cdot \left(x \cdot x\right)\right) \cdot \left(0.1111111111111111 + x \cdot \left(x \cdot 0.13333333333333333\right)\right)\right)\right)
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6499.5%
Simplified99.5%
*-commutativeN/A
flip-+N/A
associate-*l/N/A
/-lowering-/.f64N/A
Applied egg-rr99.5%
Taylor expanded in x around 0
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
unpow2N/A
cube-multN/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6499.6%
Simplified99.6%
associate-/l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
Applied egg-rr99.6%
(FPCore (x)
:precision binary64
(+
x
(*
(* x (* x x))
(+
0.3333333333333333
(* x (* x (+ 0.2 (* x (* x 0.14285714285714285)))))))))
double code(double x) {
return x + ((x * (x * x)) * (0.3333333333333333 + (x * (x * (0.2 + (x * (x * 0.14285714285714285)))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x + ((x * (x * x)) * (0.3333333333333333d0 + (x * (x * (0.2d0 + (x * (x * 0.14285714285714285d0)))))))
end function
public static double code(double x) {
return x + ((x * (x * x)) * (0.3333333333333333 + (x * (x * (0.2 + (x * (x * 0.14285714285714285)))))));
}
def code(x): return x + ((x * (x * x)) * (0.3333333333333333 + (x * (x * (0.2 + (x * (x * 0.14285714285714285)))))))
function code(x) return Float64(x + Float64(Float64(x * Float64(x * x)) * Float64(0.3333333333333333 + Float64(x * Float64(x * Float64(0.2 + Float64(x * Float64(x * 0.14285714285714285)))))))) end
function tmp = code(x) tmp = x + ((x * (x * x)) * (0.3333333333333333 + (x * (x * (0.2 + (x * (x * 0.14285714285714285))))))); end
code[x_] := N[(x + N[(N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(0.3333333333333333 + N[(x * N[(x * N[(0.2 + N[(x * N[(x * 0.14285714285714285), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(x \cdot \left(x \cdot x\right)\right) \cdot \left(0.3333333333333333 + x \cdot \left(x \cdot \left(0.2 + x \cdot \left(x \cdot 0.14285714285714285\right)\right)\right)\right)
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6499.5%
Simplified99.5%
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
+-lowering-+.f64N/A
Applied egg-rr99.5%
Final simplification99.5%
(FPCore (x)
:precision binary64
(*
x
(+
1.0
(*
(* x x)
(+
0.3333333333333333
(* x (* x (+ 0.2 (* (* x x) 0.14285714285714285)))))))))
double code(double x) {
return x * (1.0 + ((x * x) * (0.3333333333333333 + (x * (x * (0.2 + ((x * x) * 0.14285714285714285)))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (1.0d0 + ((x * x) * (0.3333333333333333d0 + (x * (x * (0.2d0 + ((x * x) * 0.14285714285714285d0)))))))
end function
public static double code(double x) {
return x * (1.0 + ((x * x) * (0.3333333333333333 + (x * (x * (0.2 + ((x * x) * 0.14285714285714285)))))));
}
def code(x): return x * (1.0 + ((x * x) * (0.3333333333333333 + (x * (x * (0.2 + ((x * x) * 0.14285714285714285)))))))
function code(x) return Float64(x * Float64(1.0 + Float64(Float64(x * x) * Float64(0.3333333333333333 + Float64(x * Float64(x * Float64(0.2 + Float64(Float64(x * x) * 0.14285714285714285)))))))) end
function tmp = code(x) tmp = x * (1.0 + ((x * x) * (0.3333333333333333 + (x * (x * (0.2 + ((x * x) * 0.14285714285714285))))))); end
code[x_] := N[(x * N[(1.0 + N[(N[(x * x), $MachinePrecision] * N[(0.3333333333333333 + N[(x * N[(x * N[(0.2 + N[(N[(x * x), $MachinePrecision] * 0.14285714285714285), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.3333333333333333 + x \cdot \left(x \cdot \left(0.2 + \left(x \cdot x\right) \cdot 0.14285714285714285\right)\right)\right)\right)
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6499.5%
Simplified99.5%
(FPCore (x) :precision binary64 (+ x (* x (* (* x x) (+ 0.3333333333333333 (* (* x x) 0.2))))))
double code(double x) {
return x + (x * ((x * x) * (0.3333333333333333 + ((x * x) * 0.2))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x + (x * ((x * x) * (0.3333333333333333d0 + ((x * x) * 0.2d0))))
end function
public static double code(double x) {
return x + (x * ((x * x) * (0.3333333333333333 + ((x * x) * 0.2))));
}
def code(x): return x + (x * ((x * x) * (0.3333333333333333 + ((x * x) * 0.2))))
function code(x) return Float64(x + Float64(x * Float64(Float64(x * x) * Float64(0.3333333333333333 + Float64(Float64(x * x) * 0.2))))) end
function tmp = code(x) tmp = x + (x * ((x * x) * (0.3333333333333333 + ((x * x) * 0.2)))); end
code[x_] := N[(x + N[(x * N[(N[(x * x), $MachinePrecision] * N[(0.3333333333333333 + N[(N[(x * x), $MachinePrecision] * 0.2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + x \cdot \left(\left(x \cdot x\right) \cdot \left(0.3333333333333333 + \left(x \cdot x\right) \cdot 0.2\right)\right)
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6499.3%
Simplified99.3%
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.3%
Applied egg-rr99.3%
Final simplification99.3%
(FPCore (x) :precision binary64 (* x (+ 1.0 (* (* x x) (+ 0.3333333333333333 (* (* x x) 0.2))))))
double code(double x) {
return x * (1.0 + ((x * x) * (0.3333333333333333 + ((x * x) * 0.2))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (1.0d0 + ((x * x) * (0.3333333333333333d0 + ((x * x) * 0.2d0))))
end function
public static double code(double x) {
return x * (1.0 + ((x * x) * (0.3333333333333333 + ((x * x) * 0.2))));
}
def code(x): return x * (1.0 + ((x * x) * (0.3333333333333333 + ((x * x) * 0.2))))
function code(x) return Float64(x * Float64(1.0 + Float64(Float64(x * x) * Float64(0.3333333333333333 + Float64(Float64(x * x) * 0.2))))) end
function tmp = code(x) tmp = x * (1.0 + ((x * x) * (0.3333333333333333 + ((x * x) * 0.2)))); end
code[x_] := N[(x * N[(1.0 + N[(N[(x * x), $MachinePrecision] * N[(0.3333333333333333 + N[(N[(x * x), $MachinePrecision] * 0.2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.3333333333333333 + \left(x \cdot x\right) \cdot 0.2\right)\right)
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6499.3%
Simplified99.3%
(FPCore (x) :precision binary64 (* x (+ 1.0 (* (* x x) 0.3333333333333333))))
double code(double x) {
return x * (1.0 + ((x * x) * 0.3333333333333333));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (1.0d0 + ((x * x) * 0.3333333333333333d0))
end function
public static double code(double x) {
return x * (1.0 + ((x * x) * 0.3333333333333333));
}
def code(x): return x * (1.0 + ((x * x) * 0.3333333333333333))
function code(x) return Float64(x * Float64(1.0 + Float64(Float64(x * x) * 0.3333333333333333))) end
function tmp = code(x) tmp = x * (1.0 + ((x * x) * 0.3333333333333333)); end
code[x_] := N[(x * N[(1.0 + N[(N[(x * x), $MachinePrecision] * 0.3333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 + \left(x \cdot x\right) \cdot 0.3333333333333333\right)
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6499.0%
Simplified99.0%
Final simplification99.0%
(FPCore (x) :precision binary64 x)
double code(double x) {
return x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x
end function
public static double code(double x) {
return x;
}
def code(x): return x
function code(x) return x end
function tmp = code(x) tmp = x; end
code[x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
Simplified98.6%
herbie shell --seed 2024161
(FPCore (x)
:name "Rust f64::atanh"
:precision binary64
(* 0.5 (log1p (/ (* 2.0 x) (- 1.0 x)))))