
(FPCore (x) :precision binary64 (log (+ x (sqrt (+ (* x x) 1.0)))))
double code(double x) {
return log((x + sqrt(((x * x) + 1.0))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((x + sqrt(((x * x) + 1.0d0))))
end function
public static double code(double x) {
return Math.log((x + Math.sqrt(((x * x) + 1.0))));
}
def code(x): return math.log((x + math.sqrt(((x * x) + 1.0))))
function code(x) return log(Float64(x + sqrt(Float64(Float64(x * x) + 1.0)))) end
function tmp = code(x) tmp = log((x + sqrt(((x * x) + 1.0)))); end
code[x_] := N[Log[N[(x + N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(x + \sqrt{x \cdot x + 1}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (log (+ x (sqrt (+ (* x x) 1.0)))))
double code(double x) {
return log((x + sqrt(((x * x) + 1.0))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((x + sqrt(((x * x) + 1.0d0))))
end function
public static double code(double x) {
return Math.log((x + Math.sqrt(((x * x) + 1.0))));
}
def code(x): return math.log((x + math.sqrt(((x * x) + 1.0))))
function code(x) return log(Float64(x + sqrt(Float64(Float64(x * x) + 1.0)))) end
function tmp = code(x) tmp = log((x + sqrt(((x * x) + 1.0)))); end
code[x_] := N[Log[N[(x + N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(x + \sqrt{x \cdot x + 1}\right)
\end{array}
(FPCore (x)
:precision binary64
(if (<= x -1.25)
(log (/ -0.5 x))
(if (<= x 0.96)
(fma (* x x) (* x -0.16666666666666666) x)
(log (+ x (- x (/ -0.5 x)))))))
double code(double x) {
double tmp;
if (x <= -1.25) {
tmp = log((-0.5 / x));
} else if (x <= 0.96) {
tmp = fma((x * x), (x * -0.16666666666666666), x);
} else {
tmp = log((x + (x - (-0.5 / x))));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= -1.25) tmp = log(Float64(-0.5 / x)); elseif (x <= 0.96) tmp = fma(Float64(x * x), Float64(x * -0.16666666666666666), x); else tmp = log(Float64(x + Float64(x - Float64(-0.5 / x)))); end return tmp end
code[x_] := If[LessEqual[x, -1.25], N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision], If[LessEqual[x, 0.96], N[(N[(x * x), $MachinePrecision] * N[(x * -0.16666666666666666), $MachinePrecision] + x), $MachinePrecision], N[Log[N[(x + N[(x - N[(-0.5 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.25:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right)\\
\mathbf{elif}\;x \leq 0.96:\\
\;\;\;\;\mathsf{fma}\left(x \cdot x, x \cdot -0.16666666666666666, x\right)\\
\mathbf{else}:\\
\;\;\;\;\log \left(x + \left(x - \frac{-0.5}{x}\right)\right)\\
\end{array}
\end{array}
if x < -1.25Initial program 1.8%
Taylor expanded in x around -inf
lower-/.f64100.0
Applied rewrites100.0%
if -1.25 < x < 0.95999999999999996Initial program 6.7%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-commutativeN/A
*-rgt-identityN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
lower-*.f64100.0
Applied rewrites100.0%
if 0.95999999999999996 < x Initial program 52.8%
Taylor expanded in x around inf
distribute-rgt-inN/A
*-lft-identityN/A
cancel-sign-subN/A
distribute-lft-neg-inN/A
lower--.f64N/A
associate-*l*N/A
distribute-lft-neg-inN/A
metadata-evalN/A
unpow2N/A
associate-/r*N/A
associate-*l/N/A
lft-mult-inverseN/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64100.0
Applied rewrites100.0%
(FPCore (x)
:precision binary64
(if (<= x -1.25)
(log (/ -0.5 x))
(if (<= x 1.26)
(fma (* x x) (* x -0.16666666666666666) x)
(log (* x 2.0)))))
double code(double x) {
double tmp;
if (x <= -1.25) {
tmp = log((-0.5 / x));
} else if (x <= 1.26) {
tmp = fma((x * x), (x * -0.16666666666666666), x);
} else {
tmp = log((x * 2.0));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= -1.25) tmp = log(Float64(-0.5 / x)); elseif (x <= 1.26) tmp = fma(Float64(x * x), Float64(x * -0.16666666666666666), x); else tmp = log(Float64(x * 2.0)); end return tmp end
code[x_] := If[LessEqual[x, -1.25], N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision], If[LessEqual[x, 1.26], N[(N[(x * x), $MachinePrecision] * N[(x * -0.16666666666666666), $MachinePrecision] + x), $MachinePrecision], N[Log[N[(x * 2.0), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.25:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right)\\
\mathbf{elif}\;x \leq 1.26:\\
\;\;\;\;\mathsf{fma}\left(x \cdot x, x \cdot -0.16666666666666666, x\right)\\
\mathbf{else}:\\
\;\;\;\;\log \left(x \cdot 2\right)\\
\end{array}
\end{array}
if x < -1.25Initial program 1.8%
Taylor expanded in x around -inf
lower-/.f64100.0
Applied rewrites100.0%
if -1.25 < x < 1.26000000000000001Initial program 6.7%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-commutativeN/A
*-rgt-identityN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
lower-*.f64100.0
Applied rewrites100.0%
if 1.26000000000000001 < x Initial program 52.8%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f6499.6
Applied rewrites99.6%
(FPCore (x)
:precision binary64
(if (<= x 1.86)
(fma
(*
x
(/ (* x -0.027777777777777776) (fma x (* x 0.075) 0.16666666666666666)))
x
x)
(log (* x 2.0))))
double code(double x) {
double tmp;
if (x <= 1.86) {
tmp = fma((x * ((x * -0.027777777777777776) / fma(x, (x * 0.075), 0.16666666666666666))), x, x);
} else {
tmp = log((x * 2.0));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= 1.86) tmp = fma(Float64(x * Float64(Float64(x * -0.027777777777777776) / fma(x, Float64(x * 0.075), 0.16666666666666666))), x, x); else tmp = log(Float64(x * 2.0)); end return tmp end
code[x_] := If[LessEqual[x, 1.86], N[(N[(x * N[(N[(x * -0.027777777777777776), $MachinePrecision] / N[(x * N[(x * 0.075), $MachinePrecision] + 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * x + x), $MachinePrecision], N[Log[N[(x * 2.0), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.86:\\
\;\;\;\;\mathsf{fma}\left(x \cdot \frac{x \cdot -0.027777777777777776}{\mathsf{fma}\left(x, x \cdot 0.075, 0.16666666666666666\right)}, x, x\right)\\
\mathbf{else}:\\
\;\;\;\;\log \left(x \cdot 2\right)\\
\end{array}
\end{array}
if x < 1.8600000000000001Initial program 4.9%
Taylor expanded in x around 0
*-commutativeN/A
+-commutativeN/A
distribute-lft1-inN/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6465.1
Applied rewrites65.1%
Applied rewrites64.5%
Taylor expanded in x around 0
Applied rewrites64.7%
Applied rewrites65.7%
if 1.8600000000000001 < x Initial program 52.8%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f6499.6
Applied rewrites99.6%
Final simplification75.8%
(FPCore (x)
:precision binary64
(if (<= x 4.5)
(fma
(*
x
(/ (* x -0.027777777777777776) (fma x (* x 0.075) 0.16666666666666666)))
x
x)
(log (+ x 1.0))))
double code(double x) {
double tmp;
if (x <= 4.5) {
tmp = fma((x * ((x * -0.027777777777777776) / fma(x, (x * 0.075), 0.16666666666666666))), x, x);
} else {
tmp = log((x + 1.0));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= 4.5) tmp = fma(Float64(x * Float64(Float64(x * -0.027777777777777776) / fma(x, Float64(x * 0.075), 0.16666666666666666))), x, x); else tmp = log(Float64(x + 1.0)); end return tmp end
code[x_] := If[LessEqual[x, 4.5], N[(N[(x * N[(N[(x * -0.027777777777777776), $MachinePrecision] / N[(x * N[(x * 0.075), $MachinePrecision] + 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * x + x), $MachinePrecision], N[Log[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 4.5:\\
\;\;\;\;\mathsf{fma}\left(x \cdot \frac{x \cdot -0.027777777777777776}{\mathsf{fma}\left(x, x \cdot 0.075, 0.16666666666666666\right)}, x, x\right)\\
\mathbf{else}:\\
\;\;\;\;\log \left(x + 1\right)\\
\end{array}
\end{array}
if x < 4.5Initial program 4.9%
Taylor expanded in x around 0
*-commutativeN/A
+-commutativeN/A
distribute-lft1-inN/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6465.1
Applied rewrites65.1%
Applied rewrites64.5%
Taylor expanded in x around 0
Applied rewrites64.7%
Applied rewrites65.7%
if 4.5 < x Initial program 52.8%
Taylor expanded in x around 0
Applied rewrites31.4%
Final simplification55.5%
(FPCore (x) :precision binary64 (fma (* x (/ (* x -0.027777777777777776) (fma x (* x 0.075) 0.16666666666666666))) x x))
double code(double x) {
return fma((x * ((x * -0.027777777777777776) / fma(x, (x * 0.075), 0.16666666666666666))), x, x);
}
function code(x) return fma(Float64(x * Float64(Float64(x * -0.027777777777777776) / fma(x, Float64(x * 0.075), 0.16666666666666666))), x, x) end
code[x_] := N[(N[(x * N[(N[(x * -0.027777777777777776), $MachinePrecision] / N[(x * N[(x * 0.075), $MachinePrecision] + 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * x + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot \frac{x \cdot -0.027777777777777776}{\mathsf{fma}\left(x, x \cdot 0.075, 0.16666666666666666\right)}, x, x\right)
\end{array}
Initial program 19.2%
Taylor expanded in x around 0
*-commutativeN/A
+-commutativeN/A
distribute-lft1-inN/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6446.8
Applied rewrites46.8%
Applied rewrites46.0%
Taylor expanded in x around 0
Applied rewrites46.3%
Applied rewrites47.8%
Final simplification47.8%
(FPCore (x) :precision binary64 (fma (* x (fma x (* x 0.075) -0.16666666666666666)) (* x x) x))
double code(double x) {
return fma((x * fma(x, (x * 0.075), -0.16666666666666666)), (x * x), x);
}
function code(x) return fma(Float64(x * fma(x, Float64(x * 0.075), -0.16666666666666666)), Float64(x * x), x) end
code[x_] := N[(N[(x * N[(x * N[(x * 0.075), $MachinePrecision] + -0.16666666666666666), $MachinePrecision]), $MachinePrecision] * N[(x * x), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot \mathsf{fma}\left(x, x \cdot 0.075, -0.16666666666666666\right), x \cdot x, x\right)
\end{array}
Initial program 19.2%
Taylor expanded in x around 0
*-commutativeN/A
+-commutativeN/A
distribute-lft1-inN/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6446.8
Applied rewrites46.8%
Applied rewrites46.8%
(FPCore (x) :precision binary64 (fma (* (* x x) 0.075) (* x (* x x)) x))
double code(double x) {
return fma(((x * x) * 0.075), (x * (x * x)), x);
}
function code(x) return fma(Float64(Float64(x * x) * 0.075), Float64(x * Float64(x * x)), x) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * 0.075), $MachinePrecision] * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(x \cdot x\right) \cdot 0.075, x \cdot \left(x \cdot x\right), x\right)
\end{array}
Initial program 19.2%
Taylor expanded in x around 0
*-commutativeN/A
+-commutativeN/A
distribute-lft1-inN/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6446.8
Applied rewrites46.8%
Taylor expanded in x around inf
Applied rewrites46.8%
(FPCore (x) :precision binary64 (fma (* x x) (* x -0.16666666666666666) x))
double code(double x) {
return fma((x * x), (x * -0.16666666666666666), x);
}
function code(x) return fma(Float64(x * x), Float64(x * -0.16666666666666666), x) end
code[x_] := N[(N[(x * x), $MachinePrecision] * N[(x * -0.16666666666666666), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot x, x \cdot -0.16666666666666666, x\right)
\end{array}
Initial program 19.2%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-commutativeN/A
*-rgt-identityN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
lower-*.f6445.3
Applied rewrites45.3%
(FPCore (x) :precision binary64 (* x (* (* x x) -0.16666666666666666)))
double code(double x) {
return x * ((x * x) * -0.16666666666666666);
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * ((x * x) * (-0.16666666666666666d0))
end function
public static double code(double x) {
return x * ((x * x) * -0.16666666666666666);
}
def code(x): return x * ((x * x) * -0.16666666666666666)
function code(x) return Float64(x * Float64(Float64(x * x) * -0.16666666666666666)) end
function tmp = code(x) tmp = x * ((x * x) * -0.16666666666666666); end
code[x_] := N[(x * N[(N[(x * x), $MachinePrecision] * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(\left(x \cdot x\right) \cdot -0.16666666666666666\right)
\end{array}
Initial program 19.2%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-commutativeN/A
*-rgt-identityN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
lower-*.f6445.3
Applied rewrites45.3%
Taylor expanded in x around inf
Applied rewrites2.7%
Final simplification2.7%
(FPCore (x) :precision binary64 (let* ((t_0 (sqrt (+ (* x x) 1.0)))) (if (< x 0.0) (log (/ -1.0 (- x t_0))) (log (+ x t_0)))))
double code(double x) {
double t_0 = sqrt(((x * x) + 1.0));
double tmp;
if (x < 0.0) {
tmp = log((-1.0 / (x - t_0)));
} else {
tmp = log((x + t_0));
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = sqrt(((x * x) + 1.0d0))
if (x < 0.0d0) then
tmp = log(((-1.0d0) / (x - t_0)))
else
tmp = log((x + t_0))
end if
code = tmp
end function
public static double code(double x) {
double t_0 = Math.sqrt(((x * x) + 1.0));
double tmp;
if (x < 0.0) {
tmp = Math.log((-1.0 / (x - t_0)));
} else {
tmp = Math.log((x + t_0));
}
return tmp;
}
def code(x): t_0 = math.sqrt(((x * x) + 1.0)) tmp = 0 if x < 0.0: tmp = math.log((-1.0 / (x - t_0))) else: tmp = math.log((x + t_0)) return tmp
function code(x) t_0 = sqrt(Float64(Float64(x * x) + 1.0)) tmp = 0.0 if (x < 0.0) tmp = log(Float64(-1.0 / Float64(x - t_0))); else tmp = log(Float64(x + t_0)); end return tmp end
function tmp_2 = code(x) t_0 = sqrt(((x * x) + 1.0)); tmp = 0.0; if (x < 0.0) tmp = log((-1.0 / (x - t_0))); else tmp = log((x + t_0)); end tmp_2 = tmp; end
code[x_] := Block[{t$95$0 = N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]}, If[Less[x, 0.0], N[Log[N[(-1.0 / N[(x - t$95$0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Log[N[(x + t$95$0), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \sqrt{x \cdot x + 1}\\
\mathbf{if}\;x < 0:\\
\;\;\;\;\log \left(\frac{-1}{x - t\_0}\right)\\
\mathbf{else}:\\
\;\;\;\;\log \left(x + t\_0\right)\\
\end{array}
\end{array}
herbie shell --seed 2024226
(FPCore (x)
:name "Hyperbolic arcsine"
:precision binary64
:alt
(! :herbie-platform default (if (< x 0) (log (/ -1 (- x (sqrt (+ (* x x) 1))))) (log (+ x (sqrt (+ (* x x) 1))))))
(log (+ x (sqrt (+ (* x x) 1.0)))))