
(FPCore (x) :precision binary64 (/ x (+ 1.0 (sqrt (+ x 1.0)))))
double code(double x) {
return x / (1.0 + sqrt((x + 1.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x / (1.0d0 + sqrt((x + 1.0d0)))
end function
public static double code(double x) {
return x / (1.0 + Math.sqrt((x + 1.0)));
}
def code(x): return x / (1.0 + math.sqrt((x + 1.0)))
function code(x) return Float64(x / Float64(1.0 + sqrt(Float64(x + 1.0)))) end
function tmp = code(x) tmp = x / (1.0 + sqrt((x + 1.0))); end
code[x_] := N[(x / N[(1.0 + N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{1 + \sqrt{x + 1}}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ x (+ 1.0 (sqrt (+ x 1.0)))))
double code(double x) {
return x / (1.0 + sqrt((x + 1.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x / (1.0d0 + sqrt((x + 1.0d0)))
end function
public static double code(double x) {
return x / (1.0 + Math.sqrt((x + 1.0)));
}
def code(x): return x / (1.0 + math.sqrt((x + 1.0)))
function code(x) return Float64(x / Float64(1.0 + sqrt(Float64(x + 1.0)))) end
function tmp = code(x) tmp = x / (1.0 + sqrt((x + 1.0))); end
code[x_] := N[(x / N[(1.0 + N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{1 + \sqrt{x + 1}}
\end{array}
(FPCore (x) :precision binary64 (/ x (+ 1.0 (sqrt (+ x 1.0)))))
double code(double x) {
return x / (1.0 + sqrt((x + 1.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x / (1.0d0 + sqrt((x + 1.0d0)))
end function
public static double code(double x) {
return x / (1.0 + Math.sqrt((x + 1.0)));
}
def code(x): return x / (1.0 + math.sqrt((x + 1.0)))
function code(x) return Float64(x / Float64(1.0 + sqrt(Float64(x + 1.0)))) end
function tmp = code(x) tmp = x / (1.0 + sqrt((x + 1.0))); end
code[x_] := N[(x / N[(1.0 + N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{1 + \sqrt{x + 1}}
\end{array}
Initial program 99.8%
Final simplification99.8%
(FPCore (x) :precision binary64 (if (<= x 0.000205) (/ x (* (- 4.0 (* x (* x 0.25))) (+ (* x 0.125) 0.5))) (+ (sqrt (+ x 1.0)) -1.0)))
double code(double x) {
double tmp;
if (x <= 0.000205) {
tmp = x / ((4.0 - (x * (x * 0.25))) * ((x * 0.125) + 0.5));
} else {
tmp = sqrt((x + 1.0)) + -1.0;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (x <= 0.000205d0) then
tmp = x / ((4.0d0 - (x * (x * 0.25d0))) * ((x * 0.125d0) + 0.5d0))
else
tmp = sqrt((x + 1.0d0)) + (-1.0d0)
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (x <= 0.000205) {
tmp = x / ((4.0 - (x * (x * 0.25))) * ((x * 0.125) + 0.5));
} else {
tmp = Math.sqrt((x + 1.0)) + -1.0;
}
return tmp;
}
def code(x): tmp = 0 if x <= 0.000205: tmp = x / ((4.0 - (x * (x * 0.25))) * ((x * 0.125) + 0.5)) else: tmp = math.sqrt((x + 1.0)) + -1.0 return tmp
function code(x) tmp = 0.0 if (x <= 0.000205) tmp = Float64(x / Float64(Float64(4.0 - Float64(x * Float64(x * 0.25))) * Float64(Float64(x * 0.125) + 0.5))); else tmp = Float64(sqrt(Float64(x + 1.0)) + -1.0); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= 0.000205) tmp = x / ((4.0 - (x * (x * 0.25))) * ((x * 0.125) + 0.5)); else tmp = sqrt((x + 1.0)) + -1.0; end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, 0.000205], N[(x / N[(N[(4.0 - N[(x * N[(x * 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(x * 0.125), $MachinePrecision] + 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision] + -1.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 0.000205:\\
\;\;\;\;\frac{x}{\left(4 - x \cdot \left(x \cdot 0.25\right)\right) \cdot \left(x \cdot 0.125 + 0.5\right)}\\
\mathbf{else}:\\
\;\;\;\;\sqrt{x + 1} + -1\\
\end{array}
\end{array}
if x < 2.05e-4Initial program 100.0%
Taylor expanded in x around 0 99.3%
+-commutative99.3%
associate-+r+99.3%
metadata-eval99.3%
flip-+99.3%
metadata-eval99.3%
*-commutative99.3%
*-commutative99.3%
swap-sqr99.3%
metadata-eval99.3%
*-commutative99.3%
Applied egg-rr99.3%
div-inv99.3%
associate-*l*99.3%
sub-neg99.3%
distribute-rgt-neg-in99.3%
metadata-eval99.3%
Applied egg-rr99.3%
Taylor expanded in x around 0 99.6%
if 2.05e-4 < x Initial program 99.3%
flip-+98.9%
metadata-eval98.9%
add-sqr-sqrt99.6%
+-commutative99.6%
associate--r+99.6%
metadata-eval99.6%
neg-sub099.6%
associate-/r/99.6%
Applied egg-rr99.6%
remove-double-neg99.6%
distribute-frac-neg99.6%
*-inverses99.6%
metadata-eval99.6%
neg-mul-199.6%
sub-neg99.6%
+-commutative99.6%
distribute-neg-in99.6%
remove-double-neg99.6%
metadata-eval99.6%
Simplified99.6%
Final simplification99.6%
(FPCore (x) :precision binary64 (+ (* (/ x (- 4.0 (* x (* x 0.25)))) 2.0) (* (/ x (- (/ 4.0 x) (* x 0.25))) -0.5)))
double code(double x) {
return ((x / (4.0 - (x * (x * 0.25)))) * 2.0) + ((x / ((4.0 / x) - (x * 0.25))) * -0.5);
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((x / (4.0d0 - (x * (x * 0.25d0)))) * 2.0d0) + ((x / ((4.0d0 / x) - (x * 0.25d0))) * (-0.5d0))
end function
public static double code(double x) {
return ((x / (4.0 - (x * (x * 0.25)))) * 2.0) + ((x / ((4.0 / x) - (x * 0.25))) * -0.5);
}
def code(x): return ((x / (4.0 - (x * (x * 0.25)))) * 2.0) + ((x / ((4.0 / x) - (x * 0.25))) * -0.5)
function code(x) return Float64(Float64(Float64(x / Float64(4.0 - Float64(x * Float64(x * 0.25)))) * 2.0) + Float64(Float64(x / Float64(Float64(4.0 / x) - Float64(x * 0.25))) * -0.5)) end
function tmp = code(x) tmp = ((x / (4.0 - (x * (x * 0.25)))) * 2.0) + ((x / ((4.0 / x) - (x * 0.25))) * -0.5); end
code[x_] := N[(N[(N[(x / N[(4.0 - N[(x * N[(x * 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 2.0), $MachinePrecision] + N[(N[(x / N[(N[(4.0 / x), $MachinePrecision] - N[(x * 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{4 - x \cdot \left(x \cdot 0.25\right)} \cdot 2 + \frac{x}{\frac{4}{x} - x \cdot 0.25} \cdot -0.5
\end{array}
Initial program 99.8%
Taylor expanded in x around 0 72.2%
+-commutative72.2%
associate-+r+72.2%
metadata-eval72.2%
flip-+71.8%
metadata-eval71.8%
*-commutative71.8%
*-commutative71.8%
swap-sqr71.8%
metadata-eval71.8%
*-commutative71.8%
Applied egg-rr71.8%
associate-/r/71.8%
sub-neg71.8%
distribute-lft-in71.8%
associate-*l*71.8%
associate-*l*71.8%
distribute-rgt-neg-in71.8%
metadata-eval71.8%
Applied egg-rr71.8%
expm1-log1p-u71.8%
expm1-udef71.4%
associate-*l/71.0%
Applied egg-rr71.0%
expm1-def71.4%
expm1-log1p71.4%
associate-*r*71.4%
unpow271.4%
associate-*l/71.4%
unpow271.4%
associate-/l*71.8%
div-sub71.8%
*-commutative71.8%
associate-/l*72.2%
*-commutative72.2%
*-inverses72.2%
associate-*r/72.2%
/-rgt-identity72.2%
Simplified72.2%
Final simplification72.2%
(FPCore (x) :precision binary64 (/ x (+ 1.0 (+ 1.0 (* x 0.5)))))
double code(double x) {
return x / (1.0 + (1.0 + (x * 0.5)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x / (1.0d0 + (1.0d0 + (x * 0.5d0)))
end function
public static double code(double x) {
return x / (1.0 + (1.0 + (x * 0.5)));
}
def code(x): return x / (1.0 + (1.0 + (x * 0.5)))
function code(x) return Float64(x / Float64(1.0 + Float64(1.0 + Float64(x * 0.5)))) end
function tmp = code(x) tmp = x / (1.0 + (1.0 + (x * 0.5))); end
code[x_] := N[(x / N[(1.0 + N[(1.0 + N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{1 + \left(1 + x \cdot 0.5\right)}
\end{array}
Initial program 99.8%
Taylor expanded in x around 0 72.2%
Final simplification72.2%
(FPCore (x) :precision binary64 (/ x (+ 2.0 (* x 0.5))))
double code(double x) {
return x / (2.0 + (x * 0.5));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x / (2.0d0 + (x * 0.5d0))
end function
public static double code(double x) {
return x / (2.0 + (x * 0.5));
}
def code(x): return x / (2.0 + (x * 0.5))
function code(x) return Float64(x / Float64(2.0 + Float64(x * 0.5))) end
function tmp = code(x) tmp = x / (2.0 + (x * 0.5)); end
code[x_] := N[(x / N[(2.0 + N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{2 + x \cdot 0.5}
\end{array}
Initial program 99.8%
Taylor expanded in x around 0 72.2%
Final simplification72.2%
(FPCore (x) :precision binary64 (/ x 2.0))
double code(double x) {
return x / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x / 2.0d0
end function
public static double code(double x) {
return x / 2.0;
}
def code(x): return x / 2.0
function code(x) return Float64(x / 2.0) end
function tmp = code(x) tmp = x / 2.0; end
code[x_] := N[(x / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{2}
\end{array}
Initial program 99.8%
Taylor expanded in x around 0 71.4%
Final simplification71.4%
(FPCore (x) :precision binary64 2.0)
double code(double x) {
return 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0
end function
public static double code(double x) {
return 2.0;
}
def code(x): return 2.0
function code(x) return 2.0 end
function tmp = code(x) tmp = 2.0; end
code[x_] := 2.0
\begin{array}{l}
\\
2
\end{array}
Initial program 99.8%
Taylor expanded in x around 0 72.2%
Taylor expanded in x around inf 4.6%
Final simplification4.6%
herbie shell --seed 2023252
(FPCore (x)
:name "Numeric.Log:$clog1p from log-domain-0.10.2.1, B"
:precision binary64
(/ x (+ 1.0 (sqrt (+ x 1.0)))))