
(FPCore (x) :precision binary64 (log (+ x (sqrt (+ (* x x) 1.0)))))
double code(double x) {
return log((x + sqrt(((x * x) + 1.0))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((x + sqrt(((x * x) + 1.0d0))))
end function
public static double code(double x) {
return Math.log((x + Math.sqrt(((x * x) + 1.0))));
}
def code(x): return math.log((x + math.sqrt(((x * x) + 1.0))))
function code(x) return log(Float64(x + sqrt(Float64(Float64(x * x) + 1.0)))) end
function tmp = code(x) tmp = log((x + sqrt(((x * x) + 1.0)))); end
code[x_] := N[Log[N[(x + N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(x + \sqrt{x \cdot x + 1}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (log (+ x (sqrt (+ (* x x) 1.0)))))
double code(double x) {
return log((x + sqrt(((x * x) + 1.0))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((x + sqrt(((x * x) + 1.0d0))))
end function
public static double code(double x) {
return Math.log((x + Math.sqrt(((x * x) + 1.0))));
}
def code(x): return math.log((x + math.sqrt(((x * x) + 1.0))))
function code(x) return log(Float64(x + sqrt(Float64(Float64(x * x) + 1.0)))) end
function tmp = code(x) tmp = log((x + sqrt(((x * x) + 1.0)))); end
code[x_] := N[Log[N[(x + N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(x + \sqrt{x \cdot x + 1}\right)
\end{array}
(FPCore (x)
:precision binary64
(if (<= x -10000.0)
(log (/ -0.5 x))
(if (<= x 5000000.0)
(-
(log (fma (* x x) x (pow (fma x x 1.0) 1.5)))
(log1p (* (- (+ x x) (sqrt (fma x x 1.0))) x)))
(log (+ x x)))))
double code(double x) {
double tmp;
if (x <= -10000.0) {
tmp = log((-0.5 / x));
} else if (x <= 5000000.0) {
tmp = log(fma((x * x), x, pow(fma(x, x, 1.0), 1.5))) - log1p((((x + x) - sqrt(fma(x, x, 1.0))) * x));
} else {
tmp = log((x + x));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= -10000.0) tmp = log(Float64(-0.5 / x)); elseif (x <= 5000000.0) tmp = Float64(log(fma(Float64(x * x), x, (fma(x, x, 1.0) ^ 1.5))) - log1p(Float64(Float64(Float64(x + x) - sqrt(fma(x, x, 1.0))) * x))); else tmp = log(Float64(x + x)); end return tmp end
code[x_] := If[LessEqual[x, -10000.0], N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision], If[LessEqual[x, 5000000.0], N[(N[Log[N[(N[(x * x), $MachinePrecision] * x + N[Power[N[(x * x + 1.0), $MachinePrecision], 1.5], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[Log[1 + N[(N[(N[(x + x), $MachinePrecision] - N[Sqrt[N[(x * x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[Log[N[(x + x), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -10000:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right)\\
\mathbf{elif}\;x \leq 5000000:\\
\;\;\;\;\log \left(\mathsf{fma}\left(x \cdot x, x, {\left(\mathsf{fma}\left(x, x, 1\right)\right)}^{1.5}\right)\right) - \mathsf{log1p}\left(\left(\left(x + x\right) - \sqrt{\mathsf{fma}\left(x, x, 1\right)}\right) \cdot x\right)\\
\mathbf{else}:\\
\;\;\;\;\log \left(x + x\right)\\
\end{array}
\end{array}
if x < -1e4Initial program 1.8%
Taylor expanded in x around -inf
lower-/.f64100.0
Applied rewrites100.0%
if -1e4 < x < 5e6Initial program 11.9%
lift-log.f64N/A
lift-+.f64N/A
flip3-+N/A
log-divN/A
lower--.f64N/A
Applied rewrites98.3%
lift-+.f64N/A
+-commutativeN/A
lift-pow.f64N/A
unpow3N/A
lower-fma.f64N/A
lower-*.f6498.3
Applied rewrites98.3%
lift-fma.f64N/A
lift-*.f64N/A
distribute-lft-outN/A
*-commutativeN/A
lower-*.f64N/A
lift--.f64N/A
lift-sqrt.f64N/A
lift-fma.f64N/A
associate-+r-N/A
lower--.f64N/A
lower-+.f64N/A
lift-fma.f64N/A
lift-sqrt.f6498.3
Applied rewrites98.3%
if 5e6 < x Initial program 49.6%
Taylor expanded in x around inf
lower-*.f64100.0
Applied rewrites100.0%
Applied rewrites100.0%
(FPCore (x)
:precision binary64
(if (<= x -1.3)
(log (/ -0.5 x))
(if (<= x 1.0)
(fma (* (* x x) x) (fma 0.075 (* x x) -0.16666666666666666) x)
(log (+ x (- x (/ -0.5 x)))))))
double code(double x) {
double tmp;
if (x <= -1.3) {
tmp = log((-0.5 / x));
} else if (x <= 1.0) {
tmp = fma(((x * x) * x), fma(0.075, (x * x), -0.16666666666666666), x);
} else {
tmp = log((x + (x - (-0.5 / x))));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= -1.3) tmp = log(Float64(-0.5 / x)); elseif (x <= 1.0) tmp = fma(Float64(Float64(x * x) * x), fma(0.075, Float64(x * x), -0.16666666666666666), x); else tmp = log(Float64(x + Float64(x - Float64(-0.5 / x)))); end return tmp end
code[x_] := If[LessEqual[x, -1.3], N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision], If[LessEqual[x, 1.0], N[(N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision] * N[(0.075 * N[(x * x), $MachinePrecision] + -0.16666666666666666), $MachinePrecision] + x), $MachinePrecision], N[Log[N[(x + N[(x - N[(-0.5 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.3:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right)\\
\mathbf{elif}\;x \leq 1:\\
\;\;\;\;\mathsf{fma}\left(\left(x \cdot x\right) \cdot x, \mathsf{fma}\left(0.075, x \cdot x, -0.16666666666666666\right), x\right)\\
\mathbf{else}:\\
\;\;\;\;\log \left(x + \left(x - \frac{-0.5}{x}\right)\right)\\
\end{array}
\end{array}
if x < -1.30000000000000004Initial program 4.9%
Taylor expanded in x around -inf
lower-/.f6497.8
Applied rewrites97.8%
if -1.30000000000000004 < x < 1Initial program 9.5%
lift-log.f64N/A
lift-+.f64N/A
flip3-+N/A
log-divN/A
lower--.f64N/A
Applied rewrites98.4%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
unpow2N/A
cube-multN/A
*-rgt-identityN/A
lower-fma.f64N/A
lower-pow.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.4
Applied rewrites99.4%
Applied rewrites99.4%
if 1 < x Initial program 51.6%
Taylor expanded in x around inf
distribute-lft-inN/A
*-rgt-identityN/A
cancel-sign-subN/A
mul-1-negN/A
lower--.f64N/A
mul-1-negN/A
distribute-lft-neg-inN/A
*-commutativeN/A
associate-*r*N/A
distribute-rgt-neg-inN/A
metadata-evalN/A
unpow2N/A
associate-/r*N/A
associate-*r/N/A
rgt-mult-inverseN/A
associate-*l/N/A
metadata-evalN/A
lower-/.f6499.1
Applied rewrites99.1%
(FPCore (x)
:precision binary64
(if (<= x -1.3)
(log (/ -0.5 x))
(if (<= x 1.3)
(fma (* (* x x) x) (fma 0.075 (* x x) -0.16666666666666666) x)
(log (+ x x)))))
double code(double x) {
double tmp;
if (x <= -1.3) {
tmp = log((-0.5 / x));
} else if (x <= 1.3) {
tmp = fma(((x * x) * x), fma(0.075, (x * x), -0.16666666666666666), x);
} else {
tmp = log((x + x));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= -1.3) tmp = log(Float64(-0.5 / x)); elseif (x <= 1.3) tmp = fma(Float64(Float64(x * x) * x), fma(0.075, Float64(x * x), -0.16666666666666666), x); else tmp = log(Float64(x + x)); end return tmp end
code[x_] := If[LessEqual[x, -1.3], N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision], If[LessEqual[x, 1.3], N[(N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision] * N[(0.075 * N[(x * x), $MachinePrecision] + -0.16666666666666666), $MachinePrecision] + x), $MachinePrecision], N[Log[N[(x + x), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.3:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right)\\
\mathbf{elif}\;x \leq 1.3:\\
\;\;\;\;\mathsf{fma}\left(\left(x \cdot x\right) \cdot x, \mathsf{fma}\left(0.075, x \cdot x, -0.16666666666666666\right), x\right)\\
\mathbf{else}:\\
\;\;\;\;\log \left(x + x\right)\\
\end{array}
\end{array}
if x < -1.30000000000000004Initial program 4.9%
Taylor expanded in x around -inf
lower-/.f6497.8
Applied rewrites97.8%
if -1.30000000000000004 < x < 1.30000000000000004Initial program 9.5%
lift-log.f64N/A
lift-+.f64N/A
flip3-+N/A
log-divN/A
lower--.f64N/A
Applied rewrites98.4%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
unpow2N/A
cube-multN/A
*-rgt-identityN/A
lower-fma.f64N/A
lower-pow.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.4
Applied rewrites99.4%
Applied rewrites99.4%
if 1.30000000000000004 < x Initial program 51.6%
Taylor expanded in x around inf
lower-*.f6498.0
Applied rewrites98.0%
Applied rewrites98.0%
(FPCore (x) :precision binary64 (if (<= x 1.3) (fma (* (* x x) x) (fma 0.075 (* x x) -0.16666666666666666) x) (log (+ x x))))
double code(double x) {
double tmp;
if (x <= 1.3) {
tmp = fma(((x * x) * x), fma(0.075, (x * x), -0.16666666666666666), x);
} else {
tmp = log((x + x));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= 1.3) tmp = fma(Float64(Float64(x * x) * x), fma(0.075, Float64(x * x), -0.16666666666666666), x); else tmp = log(Float64(x + x)); end return tmp end
code[x_] := If[LessEqual[x, 1.3], N[(N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision] * N[(0.075 * N[(x * x), $MachinePrecision] + -0.16666666666666666), $MachinePrecision] + x), $MachinePrecision], N[Log[N[(x + x), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.3:\\
\;\;\;\;\mathsf{fma}\left(\left(x \cdot x\right) \cdot x, \mathsf{fma}\left(0.075, x \cdot x, -0.16666666666666666\right), x\right)\\
\mathbf{else}:\\
\;\;\;\;\log \left(x + x\right)\\
\end{array}
\end{array}
if x < 1.30000000000000004Initial program 8.2%
lift-log.f64N/A
lift-+.f64N/A
flip3-+N/A
log-divN/A
lower--.f64N/A
Applied rewrites71.1%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
unpow2N/A
cube-multN/A
*-rgt-identityN/A
lower-fma.f64N/A
lower-pow.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6471.7
Applied rewrites71.7%
Applied rewrites71.7%
if 1.30000000000000004 < x Initial program 51.6%
Taylor expanded in x around inf
lower-*.f6498.0
Applied rewrites98.0%
Applied rewrites98.0%
(FPCore (x) :precision binary64 (if (<= x 1.52) (fma (* (* x x) x) (fma 0.075 (* x x) -0.16666666666666666) x) (log (+ 1.0 x))))
double code(double x) {
double tmp;
if (x <= 1.52) {
tmp = fma(((x * x) * x), fma(0.075, (x * x), -0.16666666666666666), x);
} else {
tmp = log((1.0 + x));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= 1.52) tmp = fma(Float64(Float64(x * x) * x), fma(0.075, Float64(x * x), -0.16666666666666666), x); else tmp = log(Float64(1.0 + x)); end return tmp end
code[x_] := If[LessEqual[x, 1.52], N[(N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision] * N[(0.075 * N[(x * x), $MachinePrecision] + -0.16666666666666666), $MachinePrecision] + x), $MachinePrecision], N[Log[N[(1.0 + x), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.52:\\
\;\;\;\;\mathsf{fma}\left(\left(x \cdot x\right) \cdot x, \mathsf{fma}\left(0.075, x \cdot x, -0.16666666666666666\right), x\right)\\
\mathbf{else}:\\
\;\;\;\;\log \left(1 + x\right)\\
\end{array}
\end{array}
if x < 1.52Initial program 8.2%
lift-log.f64N/A
lift-+.f64N/A
flip3-+N/A
log-divN/A
lower--.f64N/A
Applied rewrites71.1%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
unpow2N/A
cube-multN/A
*-rgt-identityN/A
lower-fma.f64N/A
lower-pow.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6471.7
Applied rewrites71.7%
Applied rewrites71.7%
if 1.52 < x Initial program 51.6%
Taylor expanded in x around 0
lower-+.f6431.0
Applied rewrites31.0%
(FPCore (x) :precision binary64 (fma (* (* x x) x) (fma 0.075 (* x x) -0.16666666666666666) x))
double code(double x) {
return fma(((x * x) * x), fma(0.075, (x * x), -0.16666666666666666), x);
}
function code(x) return fma(Float64(Float64(x * x) * x), fma(0.075, Float64(x * x), -0.16666666666666666), x) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision] * N[(0.075 * N[(x * x), $MachinePrecision] + -0.16666666666666666), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(x \cdot x\right) \cdot x, \mathsf{fma}\left(0.075, x \cdot x, -0.16666666666666666\right), x\right)
\end{array}
Initial program 17.0%
lift-log.f64N/A
lift-+.f64N/A
flip3-+N/A
log-divN/A
lower--.f64N/A
Applied rewrites63.7%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
unpow2N/A
cube-multN/A
*-rgt-identityN/A
lower-fma.f64N/A
lower-pow.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6457.9
Applied rewrites57.9%
Applied rewrites57.9%
(FPCore (x) :precision binary64 (fma (* -0.16666666666666666 (* x x)) x x))
double code(double x) {
return fma((-0.16666666666666666 * (x * x)), x, x);
}
function code(x) return fma(Float64(-0.16666666666666666 * Float64(x * x)), x, x) end
code[x_] := N[(N[(-0.16666666666666666 * N[(x * x), $MachinePrecision]), $MachinePrecision] * x + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-0.16666666666666666 \cdot \left(x \cdot x\right), x, x\right)
\end{array}
Initial program 17.0%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
*-commutativeN/A
associate-*r*N/A
*-rgt-identityN/A
lower-fma.f64N/A
*-commutativeN/A
pow-plusN/A
lower-pow.f64N/A
metadata-eval56.6
Applied rewrites56.6%
Applied rewrites56.6%
(FPCore (x) :precision binary64 (let* ((t_0 (sqrt (+ (* x x) 1.0)))) (if (< x 0.0) (log (/ -1.0 (- x t_0))) (log (+ x t_0)))))
double code(double x) {
double t_0 = sqrt(((x * x) + 1.0));
double tmp;
if (x < 0.0) {
tmp = log((-1.0 / (x - t_0)));
} else {
tmp = log((x + t_0));
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = sqrt(((x * x) + 1.0d0))
if (x < 0.0d0) then
tmp = log(((-1.0d0) / (x - t_0)))
else
tmp = log((x + t_0))
end if
code = tmp
end function
public static double code(double x) {
double t_0 = Math.sqrt(((x * x) + 1.0));
double tmp;
if (x < 0.0) {
tmp = Math.log((-1.0 / (x - t_0)));
} else {
tmp = Math.log((x + t_0));
}
return tmp;
}
def code(x): t_0 = math.sqrt(((x * x) + 1.0)) tmp = 0 if x < 0.0: tmp = math.log((-1.0 / (x - t_0))) else: tmp = math.log((x + t_0)) return tmp
function code(x) t_0 = sqrt(Float64(Float64(x * x) + 1.0)) tmp = 0.0 if (x < 0.0) tmp = log(Float64(-1.0 / Float64(x - t_0))); else tmp = log(Float64(x + t_0)); end return tmp end
function tmp_2 = code(x) t_0 = sqrt(((x * x) + 1.0)); tmp = 0.0; if (x < 0.0) tmp = log((-1.0 / (x - t_0))); else tmp = log((x + t_0)); end tmp_2 = tmp; end
code[x_] := Block[{t$95$0 = N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]}, If[Less[x, 0.0], N[Log[N[(-1.0 / N[(x - t$95$0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Log[N[(x + t$95$0), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \sqrt{x \cdot x + 1}\\
\mathbf{if}\;x < 0:\\
\;\;\;\;\log \left(\frac{-1}{x - t\_0}\right)\\
\mathbf{else}:\\
\;\;\;\;\log \left(x + t\_0\right)\\
\end{array}
\end{array}
herbie shell --seed 2024307
(FPCore (x)
:name "Hyperbolic arcsine"
:precision binary64
:alt
(! :herbie-platform default (if (< x 0) (log (/ -1 (- x (sqrt (+ (* x x) 1))))) (log (+ x (sqrt (+ (* x x) 1))))))
(log (+ x (sqrt (+ (* x x) 1.0)))))