
(FPCore (x) :precision binary64 (log (+ x (sqrt (+ (* x x) 1.0)))))
double code(double x) {
return log((x + sqrt(((x * x) + 1.0))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((x + sqrt(((x * x) + 1.0d0))))
end function
public static double code(double x) {
return Math.log((x + Math.sqrt(((x * x) + 1.0))));
}
def code(x): return math.log((x + math.sqrt(((x * x) + 1.0))))
function code(x) return log(Float64(x + sqrt(Float64(Float64(x * x) + 1.0)))) end
function tmp = code(x) tmp = log((x + sqrt(((x * x) + 1.0)))); end
code[x_] := N[Log[N[(x + N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(x + \sqrt{x \cdot x + 1}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (log (+ x (sqrt (+ (* x x) 1.0)))))
double code(double x) {
return log((x + sqrt(((x * x) + 1.0))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((x + sqrt(((x * x) + 1.0d0))))
end function
public static double code(double x) {
return Math.log((x + Math.sqrt(((x * x) + 1.0))));
}
def code(x): return math.log((x + math.sqrt(((x * x) + 1.0))))
function code(x) return log(Float64(x + sqrt(Float64(Float64(x * x) + 1.0)))) end
function tmp = code(x) tmp = log((x + sqrt(((x * x) + 1.0)))); end
code[x_] := N[Log[N[(x + N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(x + \sqrt{x \cdot x + 1}\right)
\end{array}
(FPCore (x) :precision binary64 (asinh x))
double code(double x) {
return asinh(x);
}
def code(x): return math.asinh(x)
function code(x) return asinh(x) end
function tmp = code(x) tmp = asinh(x); end
code[x_] := N[ArcSinh[x], $MachinePrecision]
\begin{array}{l}
\\
\sinh^{-1} x
\end{array}
Initial program 20.9%
lift-log.f64N/A
lift-+.f64N/A
lift-sqrt.f64N/A
lift-+.f64N/A
lift-*.f64N/A
asinh-def-revN/A
lower-asinh.f6499.9
Applied rewrites99.9%
(FPCore (x) :precision binary64 (fma (* 0.16666666666666666 x) (* x x) x))
double code(double x) {
return fma((0.16666666666666666 * x), (x * x), x);
}
function code(x) return fma(Float64(0.16666666666666666 * x), Float64(x * x), x) end
code[x_] := N[(N[(0.16666666666666666 * x), $MachinePrecision] * N[(x * x), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(0.16666666666666666 \cdot x, x \cdot x, x\right)
\end{array}
Initial program 20.9%
Taylor expanded in x around 0
distribute-rgt-inN/A
*-lft-identityN/A
+-commutativeN/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
pow-plusN/A
lower-pow.f64N/A
metadata-eval52.1
Applied rewrites52.1%
Applied rewrites53.1%
(FPCore (x) :precision binary64 (* (fma (* x x) 0.16666666666666666 -1.0) x))
double code(double x) {
return fma((x * x), 0.16666666666666666, -1.0) * x;
}
function code(x) return Float64(fma(Float64(x * x), 0.16666666666666666, -1.0) * x) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * 0.16666666666666666 + -1.0), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot x, 0.16666666666666666, -1\right) \cdot x
\end{array}
Initial program 20.9%
Taylor expanded in x around 0
distribute-rgt-inN/A
*-lft-identityN/A
+-commutativeN/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
pow-plusN/A
lower-pow.f64N/A
metadata-eval52.1
Applied rewrites52.1%
Applied rewrites53.1%
Applied rewrites4.0%
Final simplification4.0%
(FPCore (x) :precision binary64 (- x))
double code(double x) {
return -x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = -x
end function
public static double code(double x) {
return -x;
}
def code(x): return -x
function code(x) return Float64(-x) end
function tmp = code(x) tmp = -x; end
code[x_] := (-x)
\begin{array}{l}
\\
-x
\end{array}
Initial program 20.9%
Taylor expanded in x around 0
distribute-rgt-inN/A
*-lft-identityN/A
+-commutativeN/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
pow-plusN/A
lower-pow.f64N/A
metadata-eval52.1
Applied rewrites52.1%
Applied rewrites4.0%
Taylor expanded in x around 0
Applied rewrites2.5%
(FPCore (x) :precision binary64 (let* ((t_0 (sqrt (+ (* x x) 1.0)))) (if (< x 0.0) (log (/ -1.0 (- x t_0))) (log (+ x t_0)))))
double code(double x) {
double t_0 = sqrt(((x * x) + 1.0));
double tmp;
if (x < 0.0) {
tmp = log((-1.0 / (x - t_0)));
} else {
tmp = log((x + t_0));
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = sqrt(((x * x) + 1.0d0))
if (x < 0.0d0) then
tmp = log(((-1.0d0) / (x - t_0)))
else
tmp = log((x + t_0))
end if
code = tmp
end function
public static double code(double x) {
double t_0 = Math.sqrt(((x * x) + 1.0));
double tmp;
if (x < 0.0) {
tmp = Math.log((-1.0 / (x - t_0)));
} else {
tmp = Math.log((x + t_0));
}
return tmp;
}
def code(x): t_0 = math.sqrt(((x * x) + 1.0)) tmp = 0 if x < 0.0: tmp = math.log((-1.0 / (x - t_0))) else: tmp = math.log((x + t_0)) return tmp
function code(x) t_0 = sqrt(Float64(Float64(x * x) + 1.0)) tmp = 0.0 if (x < 0.0) tmp = log(Float64(-1.0 / Float64(x - t_0))); else tmp = log(Float64(x + t_0)); end return tmp end
function tmp_2 = code(x) t_0 = sqrt(((x * x) + 1.0)); tmp = 0.0; if (x < 0.0) tmp = log((-1.0 / (x - t_0))); else tmp = log((x + t_0)); end tmp_2 = tmp; end
code[x_] := Block[{t$95$0 = N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]}, If[Less[x, 0.0], N[Log[N[(-1.0 / N[(x - t$95$0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Log[N[(x + t$95$0), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \sqrt{x \cdot x + 1}\\
\mathbf{if}\;x < 0:\\
\;\;\;\;\log \left(\frac{-1}{x - t\_0}\right)\\
\mathbf{else}:\\
\;\;\;\;\log \left(x + t\_0\right)\\
\end{array}
\end{array}
herbie shell --seed 2024312
(FPCore (x)
:name "Hyperbolic arcsine"
:precision binary64
:alt
(! :herbie-platform default (if (< x 0) (log (/ -1 (- x (sqrt (+ (* x x) 1))))) (log (+ x (sqrt (+ (* x x) 1))))))
(log (+ x (sqrt (+ (* x x) 1.0)))))