
(FPCore (x) :precision binary64 (/ x (+ 1.0 (sqrt (+ x 1.0)))))
double code(double x) {
return x / (1.0 + sqrt((x + 1.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x / (1.0d0 + sqrt((x + 1.0d0)))
end function
public static double code(double x) {
return x / (1.0 + Math.sqrt((x + 1.0)));
}
def code(x): return x / (1.0 + math.sqrt((x + 1.0)))
function code(x) return Float64(x / Float64(1.0 + sqrt(Float64(x + 1.0)))) end
function tmp = code(x) tmp = x / (1.0 + sqrt((x + 1.0))); end
code[x_] := N[(x / N[(1.0 + N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{1 + \sqrt{x + 1}}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ x (+ 1.0 (sqrt (+ x 1.0)))))
double code(double x) {
return x / (1.0 + sqrt((x + 1.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x / (1.0d0 + sqrt((x + 1.0d0)))
end function
public static double code(double x) {
return x / (1.0 + Math.sqrt((x + 1.0)));
}
def code(x): return x / (1.0 + math.sqrt((x + 1.0)))
function code(x) return Float64(x / Float64(1.0 + sqrt(Float64(x + 1.0)))) end
function tmp = code(x) tmp = x / (1.0 + sqrt((x + 1.0))); end
code[x_] := N[(x / N[(1.0 + N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{1 + \sqrt{x + 1}}
\end{array}
(FPCore (x) :precision binary64 (if (<= x 0.000136) (* x (+ 0.5 (+ (* x (* x 0.0625)) (* x -0.125)))) (+ (sqrt (+ x 1.0)) -1.0)))
double code(double x) {
double tmp;
if (x <= 0.000136) {
tmp = x * (0.5 + ((x * (x * 0.0625)) + (x * -0.125)));
} else {
tmp = sqrt((x + 1.0)) + -1.0;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (x <= 0.000136d0) then
tmp = x * (0.5d0 + ((x * (x * 0.0625d0)) + (x * (-0.125d0))))
else
tmp = sqrt((x + 1.0d0)) + (-1.0d0)
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (x <= 0.000136) {
tmp = x * (0.5 + ((x * (x * 0.0625)) + (x * -0.125)));
} else {
tmp = Math.sqrt((x + 1.0)) + -1.0;
}
return tmp;
}
def code(x): tmp = 0 if x <= 0.000136: tmp = x * (0.5 + ((x * (x * 0.0625)) + (x * -0.125))) else: tmp = math.sqrt((x + 1.0)) + -1.0 return tmp
function code(x) tmp = 0.0 if (x <= 0.000136) tmp = Float64(x * Float64(0.5 + Float64(Float64(x * Float64(x * 0.0625)) + Float64(x * -0.125)))); else tmp = Float64(sqrt(Float64(x + 1.0)) + -1.0); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= 0.000136) tmp = x * (0.5 + ((x * (x * 0.0625)) + (x * -0.125))); else tmp = sqrt((x + 1.0)) + -1.0; end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, 0.000136], N[(x * N[(0.5 + N[(N[(x * N[(x * 0.0625), $MachinePrecision]), $MachinePrecision] + N[(x * -0.125), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision] + -1.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 0.000136:\\
\;\;\;\;x \cdot \left(0.5 + \left(x \cdot \left(x \cdot 0.0625\right) + x \cdot -0.125\right)\right)\\
\mathbf{else}:\\
\;\;\;\;\sqrt{x + 1} + -1\\
\end{array}
\end{array}
if x < 1.36e-4Initial program 100.0%
flip-+3.2%
metadata-eval3.2%
add-sqr-sqrt3.2%
+-commutative3.2%
associate--r+8.2%
metadata-eval8.2%
neg-sub08.2%
associate-/r/8.2%
Applied egg-rr8.2%
remove-double-neg8.2%
distribute-frac-neg8.2%
*-inverses8.2%
metadata-eval8.2%
neg-mul-18.2%
sub-neg8.2%
+-commutative8.2%
distribute-neg-in8.2%
remove-double-neg8.2%
metadata-eval8.2%
Simplified8.2%
Taylor expanded in x around 0 99.6%
unpow299.6%
*-commutative99.6%
associate-*r*99.6%
+-commutative99.6%
associate-+l+99.6%
*-commutative99.6%
*-commutative99.6%
unpow399.6%
associate-*r*99.6%
associate-*r*99.6%
distribute-lft-in99.6%
fma-udef99.6%
associate-*l*99.6%
distribute-lft-out99.6%
Simplified99.6%
fma-udef99.6%
distribute-rgt-in99.6%
Applied egg-rr99.6%
if 1.36e-4 < x Initial program 99.2%
flip-+99.2%
metadata-eval99.2%
add-sqr-sqrt100.0%
+-commutative100.0%
associate--r+100.0%
metadata-eval100.0%
neg-sub0100.0%
associate-/r/100.0%
Applied egg-rr100.0%
remove-double-neg100.0%
distribute-frac-neg100.0%
*-inverses100.0%
metadata-eval100.0%
neg-mul-1100.0%
sub-neg100.0%
+-commutative100.0%
distribute-neg-in100.0%
remove-double-neg100.0%
metadata-eval100.0%
Simplified100.0%
Final simplification99.7%
(FPCore (x) :precision binary64 (/ x (+ 1.0 (sqrt (+ x 1.0)))))
double code(double x) {
return x / (1.0 + sqrt((x + 1.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x / (1.0d0 + sqrt((x + 1.0d0)))
end function
public static double code(double x) {
return x / (1.0 + Math.sqrt((x + 1.0)));
}
def code(x): return x / (1.0 + math.sqrt((x + 1.0)))
function code(x) return Float64(x / Float64(1.0 + sqrt(Float64(x + 1.0)))) end
function tmp = code(x) tmp = x / (1.0 + sqrt((x + 1.0))); end
code[x_] := N[(x / N[(1.0 + N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{1 + \sqrt{x + 1}}
\end{array}
Initial program 99.7%
Final simplification99.7%
(FPCore (x) :precision binary64 (/ x (+ (* x 0.5) 2.0)))
double code(double x) {
return x / ((x * 0.5) + 2.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = x / ((x * 0.5d0) + 2.0d0)
end function
public static double code(double x) {
return x / ((x * 0.5) + 2.0);
}
def code(x): return x / ((x * 0.5) + 2.0)
function code(x) return Float64(x / Float64(Float64(x * 0.5) + 2.0)) end
function tmp = code(x) tmp = x / ((x * 0.5) + 2.0); end
code[x_] := N[(x / N[(N[(x * 0.5), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{x \cdot 0.5 + 2}
\end{array}
Initial program 99.7%
Taylor expanded in x around 0 65.6%
Final simplification65.6%
(FPCore (x) :precision binary64 (/ x 2.0))
double code(double x) {
return x / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x / 2.0d0
end function
public static double code(double x) {
return x / 2.0;
}
def code(x): return x / 2.0
function code(x) return Float64(x / 2.0) end
function tmp = code(x) tmp = x / 2.0; end
code[x_] := N[(x / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{2}
\end{array}
Initial program 99.7%
Taylor expanded in x around 0 64.7%
Final simplification64.7%
herbie shell --seed 2023217
(FPCore (x)
:name "Numeric.Log:$clog1p from log-domain-0.10.2.1, B"
:precision binary64
(/ x (+ 1.0 (sqrt (+ x 1.0)))))