
(FPCore (x) :precision binary64 (log (+ x (sqrt (+ (* x x) 1.0)))))
double code(double x) {
return log((x + sqrt(((x * x) + 1.0))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((x + sqrt(((x * x) + 1.0d0))))
end function
public static double code(double x) {
return Math.log((x + Math.sqrt(((x * x) + 1.0))));
}
def code(x): return math.log((x + math.sqrt(((x * x) + 1.0))))
function code(x) return log(Float64(x + sqrt(Float64(Float64(x * x) + 1.0)))) end
function tmp = code(x) tmp = log((x + sqrt(((x * x) + 1.0)))); end
code[x_] := N[Log[N[(x + N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(x + \sqrt{x \cdot x + 1}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (log (+ x (sqrt (+ (* x x) 1.0)))))
double code(double x) {
return log((x + sqrt(((x * x) + 1.0))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((x + sqrt(((x * x) + 1.0d0))))
end function
public static double code(double x) {
return Math.log((x + Math.sqrt(((x * x) + 1.0))));
}
def code(x): return math.log((x + math.sqrt(((x * x) + 1.0))))
function code(x) return log(Float64(x + sqrt(Float64(Float64(x * x) + 1.0)))) end
function tmp = code(x) tmp = log((x + sqrt(((x * x) + 1.0)))); end
code[x_] := N[Log[N[(x + N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(x + \sqrt{x \cdot x + 1}\right)
\end{array}
(FPCore (x)
:precision binary64
(if (<= x -1.26)
(log (/ -0.5 x))
(if (<= x 1.05)
(fma
(fma
(* x x)
(fma x (* x -0.044642857142857144) 0.075)
-0.16666666666666666)
(* x (* x x))
x)
(log (+ x (- x (/ -0.5 x)))))))
double code(double x) {
double tmp;
if (x <= -1.26) {
tmp = log((-0.5 / x));
} else if (x <= 1.05) {
tmp = fma(fma((x * x), fma(x, (x * -0.044642857142857144), 0.075), -0.16666666666666666), (x * (x * x)), x);
} else {
tmp = log((x + (x - (-0.5 / x))));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= -1.26) tmp = log(Float64(-0.5 / x)); elseif (x <= 1.05) tmp = fma(fma(Float64(x * x), fma(x, Float64(x * -0.044642857142857144), 0.075), -0.16666666666666666), Float64(x * Float64(x * x)), x); else tmp = log(Float64(x + Float64(x - Float64(-0.5 / x)))); end return tmp end
code[x_] := If[LessEqual[x, -1.26], N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision], If[LessEqual[x, 1.05], N[(N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * -0.044642857142857144), $MachinePrecision] + 0.075), $MachinePrecision] + -0.16666666666666666), $MachinePrecision] * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], N[Log[N[(x + N[(x - N[(-0.5 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.26:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right)\\
\mathbf{elif}\;x \leq 1.05:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot -0.044642857142857144, 0.075\right), -0.16666666666666666\right), x \cdot \left(x \cdot x\right), x\right)\\
\mathbf{else}:\\
\;\;\;\;\log \left(x + \left(x - \frac{-0.5}{x}\right)\right)\\
\end{array}
\end{array}
if x < -1.26000000000000001Initial program 1.8%
Taylor expanded in x around -inf
lower-/.f64100.0
Applied rewrites100.0%
if -1.26000000000000001 < x < 1.05000000000000004Initial program 8.1%
Taylor expanded in x around 0
+-commutativeN/A
distribute-rgt-inN/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
*-lft-identityN/A
lower-fma.f64N/A
Applied rewrites100.0%
if 1.05000000000000004 < x Initial program 58.9%
Taylor expanded in x around inf
distribute-rgt-inN/A
*-lft-identityN/A
cancel-sign-subN/A
distribute-lft-neg-inN/A
lower--.f64N/A
associate-*l*N/A
distribute-lft-neg-inN/A
metadata-evalN/A
unpow2N/A
associate-/r*N/A
associate-*l/N/A
lft-mult-inverseN/A
associate-*r/N/A
metadata-evalN/A
lower-/.f6498.9
Applied rewrites98.9%
(FPCore (x)
:precision binary64
(if (<= x -1.26)
(log (/ -0.5 x))
(if (<= x 1.3)
(fma
(fma
(* x x)
(fma x (* x -0.044642857142857144) 0.075)
-0.16666666666666666)
(* x (* x x))
x)
(log (* x 2.0)))))
double code(double x) {
double tmp;
if (x <= -1.26) {
tmp = log((-0.5 / x));
} else if (x <= 1.3) {
tmp = fma(fma((x * x), fma(x, (x * -0.044642857142857144), 0.075), -0.16666666666666666), (x * (x * x)), x);
} else {
tmp = log((x * 2.0));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= -1.26) tmp = log(Float64(-0.5 / x)); elseif (x <= 1.3) tmp = fma(fma(Float64(x * x), fma(x, Float64(x * -0.044642857142857144), 0.075), -0.16666666666666666), Float64(x * Float64(x * x)), x); else tmp = log(Float64(x * 2.0)); end return tmp end
code[x_] := If[LessEqual[x, -1.26], N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision], If[LessEqual[x, 1.3], N[(N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * -0.044642857142857144), $MachinePrecision] + 0.075), $MachinePrecision] + -0.16666666666666666), $MachinePrecision] * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], N[Log[N[(x * 2.0), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.26:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right)\\
\mathbf{elif}\;x \leq 1.3:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot -0.044642857142857144, 0.075\right), -0.16666666666666666\right), x \cdot \left(x \cdot x\right), x\right)\\
\mathbf{else}:\\
\;\;\;\;\log \left(x \cdot 2\right)\\
\end{array}
\end{array}
if x < -1.26000000000000001Initial program 1.8%
Taylor expanded in x around -inf
lower-/.f64100.0
Applied rewrites100.0%
if -1.26000000000000001 < x < 1.30000000000000004Initial program 8.1%
Taylor expanded in x around 0
+-commutativeN/A
distribute-rgt-inN/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
*-lft-identityN/A
lower-fma.f64N/A
Applied rewrites100.0%
if 1.30000000000000004 < x Initial program 58.9%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f6498.3
Applied rewrites98.3%
(FPCore (x) :precision binary64 (if (<= x 1.46) (/ 1.0 (fma x 0.16666666666666666 (/ 1.0 x))) (log (* x 2.0))))
double code(double x) {
double tmp;
if (x <= 1.46) {
tmp = 1.0 / fma(x, 0.16666666666666666, (1.0 / x));
} else {
tmp = log((x * 2.0));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= 1.46) tmp = Float64(1.0 / fma(x, 0.16666666666666666, Float64(1.0 / x))); else tmp = log(Float64(x * 2.0)); end return tmp end
code[x_] := If[LessEqual[x, 1.46], N[(1.0 / N[(x * 0.16666666666666666 + N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(x * 2.0), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.46:\\
\;\;\;\;\frac{1}{\mathsf{fma}\left(x, 0.16666666666666666, \frac{1}{x}\right)}\\
\mathbf{else}:\\
\;\;\;\;\log \left(x \cdot 2\right)\\
\end{array}
\end{array}
if x < 1.46Initial program 6.2%
Taylor expanded in x around 0
*-commutativeN/A
+-commutativeN/A
distribute-lft1-inN/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6470.5
Applied rewrites70.5%
Applied rewrites70.4%
Taylor expanded in x around 0
Applied rewrites70.7%
Taylor expanded in x around inf
Applied rewrites70.8%
if 1.46 < x Initial program 58.9%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f6498.3
Applied rewrites98.3%
(FPCore (x) :precision binary64 (if (<= x 2.4) (/ 1.0 (fma x 0.16666666666666666 (/ 1.0 x))) (log (+ x 1.0))))
double code(double x) {
double tmp;
if (x <= 2.4) {
tmp = 1.0 / fma(x, 0.16666666666666666, (1.0 / x));
} else {
tmp = log((x + 1.0));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= 2.4) tmp = Float64(1.0 / fma(x, 0.16666666666666666, Float64(1.0 / x))); else tmp = log(Float64(x + 1.0)); end return tmp end
code[x_] := If[LessEqual[x, 2.4], N[(1.0 / N[(x * 0.16666666666666666 + N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 2.4:\\
\;\;\;\;\frac{1}{\mathsf{fma}\left(x, 0.16666666666666666, \frac{1}{x}\right)}\\
\mathbf{else}:\\
\;\;\;\;\log \left(x + 1\right)\\
\end{array}
\end{array}
if x < 2.39999999999999991Initial program 6.7%
Taylor expanded in x around 0
*-commutativeN/A
+-commutativeN/A
distribute-lft1-inN/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6470.2
Applied rewrites70.2%
Applied rewrites70.1%
Taylor expanded in x around 0
Applied rewrites70.5%
Taylor expanded in x around inf
Applied rewrites70.6%
if 2.39999999999999991 < x Initial program 58.3%
Taylor expanded in x around 0
Applied rewrites31.1%
(FPCore (x) :precision binary64 (if (<= (+ x (sqrt (+ 1.0 (* x x)))) 5.0) (fma (* x x) (* x -0.16666666666666666) x) (/ 1.0 (* x 0.16666666666666666))))
double code(double x) {
double tmp;
if ((x + sqrt((1.0 + (x * x)))) <= 5.0) {
tmp = fma((x * x), (x * -0.16666666666666666), x);
} else {
tmp = 1.0 / (x * 0.16666666666666666);
}
return tmp;
}
function code(x) tmp = 0.0 if (Float64(x + sqrt(Float64(1.0 + Float64(x * x)))) <= 5.0) tmp = fma(Float64(x * x), Float64(x * -0.16666666666666666), x); else tmp = Float64(1.0 / Float64(x * 0.16666666666666666)); end return tmp end
code[x_] := If[LessEqual[N[(x + N[Sqrt[N[(1.0 + N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], 5.0], N[(N[(x * x), $MachinePrecision] * N[(x * -0.16666666666666666), $MachinePrecision] + x), $MachinePrecision], N[(1.0 / N[(x * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x + \sqrt{1 + x \cdot x} \leq 5:\\
\;\;\;\;\mathsf{fma}\left(x \cdot x, x \cdot -0.16666666666666666, x\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{x \cdot 0.16666666666666666}\\
\end{array}
\end{array}
if (+.f64 x (sqrt.f64 (+.f64 (*.f64 x x) #s(literal 1 binary64)))) < 5Initial program 7.8%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-commutativeN/A
*-rgt-identityN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
lower-*.f6482.1
Applied rewrites82.1%
if 5 < (+.f64 x (sqrt.f64 (+.f64 (*.f64 x x) #s(literal 1 binary64)))) Initial program 40.1%
Taylor expanded in x around 0
*-commutativeN/A
+-commutativeN/A
distribute-lft1-inN/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f643.5
Applied rewrites3.5%
Applied rewrites3.5%
Taylor expanded in x around 0
Applied rewrites4.6%
Taylor expanded in x around inf
Applied rewrites5.0%
Final simplification53.5%
(FPCore (x) :precision binary64 (/ 1.0 (fma x 0.16666666666666666 (/ 1.0 x))))
double code(double x) {
return 1.0 / fma(x, 0.16666666666666666, (1.0 / x));
}
function code(x) return Float64(1.0 / fma(x, 0.16666666666666666, Float64(1.0 / x))) end
code[x_] := N[(1.0 / N[(x * 0.16666666666666666 + N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\mathsf{fma}\left(x, 0.16666666666666666, \frac{1}{x}\right)}
\end{array}
Initial program 19.8%
Taylor expanded in x around 0
*-commutativeN/A
+-commutativeN/A
distribute-lft1-inN/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6453.3
Applied rewrites53.3%
Applied rewrites53.3%
Taylor expanded in x around 0
Applied rewrites53.9%
Taylor expanded in x around inf
Applied rewrites54.1%
(FPCore (x) :precision binary64 (/ 1.0 (/ 1.0 x)))
double code(double x) {
return 1.0 / (1.0 / x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / (1.0d0 / x)
end function
public static double code(double x) {
return 1.0 / (1.0 / x);
}
def code(x): return 1.0 / (1.0 / x)
function code(x) return Float64(1.0 / Float64(1.0 / x)) end
function tmp = code(x) tmp = 1.0 / (1.0 / x); end
code[x_] := N[(1.0 / N[(1.0 / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\frac{1}{x}}
\end{array}
Initial program 19.8%
Taylor expanded in x around 0
*-commutativeN/A
+-commutativeN/A
distribute-lft1-inN/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6453.3
Applied rewrites53.3%
Applied rewrites53.3%
Taylor expanded in x around 0
Applied rewrites54.0%
(FPCore (x) :precision binary64 (/ 1.0 (* x 0.16666666666666666)))
double code(double x) {
return 1.0 / (x * 0.16666666666666666);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / (x * 0.16666666666666666d0)
end function
public static double code(double x) {
return 1.0 / (x * 0.16666666666666666);
}
def code(x): return 1.0 / (x * 0.16666666666666666)
function code(x) return Float64(1.0 / Float64(x * 0.16666666666666666)) end
function tmp = code(x) tmp = 1.0 / (x * 0.16666666666666666); end
code[x_] := N[(1.0 / N[(x * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{x \cdot 0.16666666666666666}
\end{array}
Initial program 19.8%
Taylor expanded in x around 0
*-commutativeN/A
+-commutativeN/A
distribute-lft1-inN/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6453.3
Applied rewrites53.3%
Applied rewrites53.3%
Taylor expanded in x around 0
Applied rewrites53.9%
Taylor expanded in x around inf
Applied rewrites4.5%
(FPCore (x) :precision binary64 (* x (* x (* x -0.16666666666666666))))
double code(double x) {
return x * (x * (x * -0.16666666666666666));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (x * (x * (-0.16666666666666666d0)))
end function
public static double code(double x) {
return x * (x * (x * -0.16666666666666666));
}
def code(x): return x * (x * (x * -0.16666666666666666))
function code(x) return Float64(x * Float64(x * Float64(x * -0.16666666666666666))) end
function tmp = code(x) tmp = x * (x * (x * -0.16666666666666666)); end
code[x_] := N[(x * N[(x * N[(x * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x \cdot \left(x \cdot -0.16666666666666666\right)\right)
\end{array}
Initial program 19.8%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-commutativeN/A
*-rgt-identityN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
lower-*.f6451.9
Applied rewrites51.9%
Taylor expanded in x around inf
Applied rewrites3.1%
(FPCore (x) :precision binary64 (let* ((t_0 (sqrt (+ (* x x) 1.0)))) (if (< x 0.0) (log (/ -1.0 (- x t_0))) (log (+ x t_0)))))
double code(double x) {
double t_0 = sqrt(((x * x) + 1.0));
double tmp;
if (x < 0.0) {
tmp = log((-1.0 / (x - t_0)));
} else {
tmp = log((x + t_0));
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = sqrt(((x * x) + 1.0d0))
if (x < 0.0d0) then
tmp = log(((-1.0d0) / (x - t_0)))
else
tmp = log((x + t_0))
end if
code = tmp
end function
public static double code(double x) {
double t_0 = Math.sqrt(((x * x) + 1.0));
double tmp;
if (x < 0.0) {
tmp = Math.log((-1.0 / (x - t_0)));
} else {
tmp = Math.log((x + t_0));
}
return tmp;
}
def code(x): t_0 = math.sqrt(((x * x) + 1.0)) tmp = 0 if x < 0.0: tmp = math.log((-1.0 / (x - t_0))) else: tmp = math.log((x + t_0)) return tmp
function code(x) t_0 = sqrt(Float64(Float64(x * x) + 1.0)) tmp = 0.0 if (x < 0.0) tmp = log(Float64(-1.0 / Float64(x - t_0))); else tmp = log(Float64(x + t_0)); end return tmp end
function tmp_2 = code(x) t_0 = sqrt(((x * x) + 1.0)); tmp = 0.0; if (x < 0.0) tmp = log((-1.0 / (x - t_0))); else tmp = log((x + t_0)); end tmp_2 = tmp; end
code[x_] := Block[{t$95$0 = N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]}, If[Less[x, 0.0], N[Log[N[(-1.0 / N[(x - t$95$0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Log[N[(x + t$95$0), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \sqrt{x \cdot x + 1}\\
\mathbf{if}\;x < 0:\\
\;\;\;\;\log \left(\frac{-1}{x - t\_0}\right)\\
\mathbf{else}:\\
\;\;\;\;\log \left(x + t\_0\right)\\
\end{array}
\end{array}
herbie shell --seed 2024233
(FPCore (x)
:name "Hyperbolic arcsine"
:precision binary64
:alt
(! :herbie-platform default (if (< x 0) (log (/ -1 (- x (sqrt (+ (* x x) 1))))) (log (+ x (sqrt (+ (* x x) 1))))))
(log (+ x (sqrt (+ (* x x) 1.0)))))