
(FPCore (x) :precision binary64 (acosh x))
double code(double x) {
return acosh(x);
}
def code(x): return math.acosh(x)
function code(x) return acosh(x) end
function tmp = code(x) tmp = acosh(x); end
code[x_] := N[ArcCosh[x], $MachinePrecision]
\begin{array}{l}
\\
\cosh^{-1} x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (log (+ x (sqrt (- (* x x) 1.0)))))
double code(double x) {
return log((x + sqrt(((x * x) - 1.0))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((x + sqrt(((x * x) - 1.0d0))))
end function
public static double code(double x) {
return Math.log((x + Math.sqrt(((x * x) - 1.0))));
}
def code(x): return math.log((x + math.sqrt(((x * x) - 1.0))))
function code(x) return log(Float64(x + sqrt(Float64(Float64(x * x) - 1.0)))) end
function tmp = code(x) tmp = log((x + sqrt(((x * x) - 1.0)))); end
code[x_] := N[Log[N[(x + N[Sqrt[N[(N[(x * x), $MachinePrecision] - 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(x + \sqrt{x \cdot x - 1}\right)
\end{array}
(FPCore (x) :precision binary64 (let* ((t_0 (/ (+ 0.5 (/ 0.125 (* x x))) (* x x)))) (log (/ (* x (- 4.0 (* t_0 t_0))) (+ t_0 2.0)))))
double code(double x) {
double t_0 = (0.5 + (0.125 / (x * x))) / (x * x);
return log(((x * (4.0 - (t_0 * t_0))) / (t_0 + 2.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = (0.5d0 + (0.125d0 / (x * x))) / (x * x)
code = log(((x * (4.0d0 - (t_0 * t_0))) / (t_0 + 2.0d0)))
end function
public static double code(double x) {
double t_0 = (0.5 + (0.125 / (x * x))) / (x * x);
return Math.log(((x * (4.0 - (t_0 * t_0))) / (t_0 + 2.0)));
}
def code(x): t_0 = (0.5 + (0.125 / (x * x))) / (x * x) return math.log(((x * (4.0 - (t_0 * t_0))) / (t_0 + 2.0)))
function code(x) t_0 = Float64(Float64(0.5 + Float64(0.125 / Float64(x * x))) / Float64(x * x)) return log(Float64(Float64(x * Float64(4.0 - Float64(t_0 * t_0))) / Float64(t_0 + 2.0))) end
function tmp = code(x) t_0 = (0.5 + (0.125 / (x * x))) / (x * x); tmp = log(((x * (4.0 - (t_0 * t_0))) / (t_0 + 2.0))); end
code[x_] := Block[{t$95$0 = N[(N[(0.5 + N[(0.125 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(x * x), $MachinePrecision]), $MachinePrecision]}, N[Log[N[(N[(x * N[(4.0 - N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(t$95$0 + 2.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{0.5 + \frac{0.125}{x \cdot x}}{x \cdot x}\\
\log \left(\frac{x \cdot \left(4 - t\_0 \cdot t\_0\right)}{t\_0 + 2}\right)
\end{array}
\end{array}
Initial program 52.7%
Taylor expanded in x around inf 99.3%
mul-1-neg99.3%
unsub-neg99.3%
unpow299.3%
associate-*r/99.3%
metadata-eval99.3%
unpow299.3%
Simplified99.3%
*-commutative99.3%
flip--99.3%
associate-*l/99.3%
metadata-eval99.3%
Applied egg-rr99.3%
Final simplification99.3%
(FPCore (x) :precision binary64 (log (* x (- 2.0 (/ (+ 0.5 (/ 0.125 (* x x))) (* x x))))))
double code(double x) {
return log((x * (2.0 - ((0.5 + (0.125 / (x * x))) / (x * x)))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((x * (2.0d0 - ((0.5d0 + (0.125d0 / (x * x))) / (x * x)))))
end function
public static double code(double x) {
return Math.log((x * (2.0 - ((0.5 + (0.125 / (x * x))) / (x * x)))));
}
def code(x): return math.log((x * (2.0 - ((0.5 + (0.125 / (x * x))) / (x * x)))))
function code(x) return log(Float64(x * Float64(2.0 - Float64(Float64(0.5 + Float64(0.125 / Float64(x * x))) / Float64(x * x))))) end
function tmp = code(x) tmp = log((x * (2.0 - ((0.5 + (0.125 / (x * x))) / (x * x))))); end
code[x_] := N[Log[N[(x * N[(2.0 - N[(N[(0.5 + N[(0.125 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(x \cdot \left(2 - \frac{0.5 + \frac{0.125}{x \cdot x}}{x \cdot x}\right)\right)
\end{array}
Initial program 52.7%
Taylor expanded in x around inf 99.3%
mul-1-neg99.3%
unsub-neg99.3%
unpow299.3%
associate-*r/99.3%
metadata-eval99.3%
unpow299.3%
Simplified99.3%
(FPCore (x) :precision binary64 (log (* x (+ 2.0 (/ -0.5 (* x x))))))
double code(double x) {
return log((x * (2.0 + (-0.5 / (x * x)))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((x * (2.0d0 + ((-0.5d0) / (x * x)))))
end function
public static double code(double x) {
return Math.log((x * (2.0 + (-0.5 / (x * x)))));
}
def code(x): return math.log((x * (2.0 + (-0.5 / (x * x)))))
function code(x) return log(Float64(x * Float64(2.0 + Float64(-0.5 / Float64(x * x))))) end
function tmp = code(x) tmp = log((x * (2.0 + (-0.5 / (x * x))))); end
code[x_] := N[Log[N[(x * N[(2.0 + N[(-0.5 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(x \cdot \left(2 + \frac{-0.5}{x \cdot x}\right)\right)
\end{array}
Initial program 52.7%
Taylor expanded in x around inf 99.1%
sub-neg99.1%
unpow299.1%
associate-*r/99.1%
metadata-eval99.1%
distribute-neg-frac99.1%
metadata-eval99.1%
Simplified99.1%
(FPCore (x) :precision binary64 (log (+ x x)))
double code(double x) {
return log((x + x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((x + x))
end function
public static double code(double x) {
return Math.log((x + x));
}
def code(x): return math.log((x + x))
function code(x) return log(Float64(x + x)) end
function tmp = code(x) tmp = log((x + x)); end
code[x_] := N[Log[N[(x + x), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(x + x\right)
\end{array}
Initial program 52.7%
Taylor expanded in x around inf 98.6%
(FPCore (x) :precision binary64 (log (+ x (* (sqrt (- x 1.0)) (sqrt (+ x 1.0))))))
double code(double x) {
return log((x + (sqrt((x - 1.0)) * sqrt((x + 1.0)))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((x + (sqrt((x - 1.0d0)) * sqrt((x + 1.0d0)))))
end function
public static double code(double x) {
return Math.log((x + (Math.sqrt((x - 1.0)) * Math.sqrt((x + 1.0)))));
}
def code(x): return math.log((x + (math.sqrt((x - 1.0)) * math.sqrt((x + 1.0)))))
function code(x) return log(Float64(x + Float64(sqrt(Float64(x - 1.0)) * sqrt(Float64(x + 1.0))))) end
function tmp = code(x) tmp = log((x + (sqrt((x - 1.0)) * sqrt((x + 1.0))))); end
code[x_] := N[Log[N[(x + N[(N[Sqrt[N[(x - 1.0), $MachinePrecision]], $MachinePrecision] * N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(x + \sqrt{x - 1} \cdot \sqrt{x + 1}\right)
\end{array}
herbie shell --seed 2024097
(FPCore (x)
:name "Rust f64::acosh"
:precision binary64
:pre (>= x 1.0)
:alt
(log (+ x (* (sqrt (- x 1.0)) (sqrt (+ x 1.0)))))
(log (+ x (sqrt (- (* x x) 1.0)))))