
(FPCore (x) :precision binary64 (- (sqrt (+ x 1.0)) (sqrt x)))
double code(double x) {
return sqrt((x + 1.0)) - sqrt(x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = sqrt((x + 1.0d0)) - sqrt(x)
end function
public static double code(double x) {
return Math.sqrt((x + 1.0)) - Math.sqrt(x);
}
def code(x): return math.sqrt((x + 1.0)) - math.sqrt(x)
function code(x) return Float64(sqrt(Float64(x + 1.0)) - sqrt(x)) end
function tmp = code(x) tmp = sqrt((x + 1.0)) - sqrt(x); end
code[x_] := N[(N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision] - N[Sqrt[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sqrt{x + 1} - \sqrt{x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (sqrt (+ x 1.0)) (sqrt x)))
double code(double x) {
return sqrt((x + 1.0)) - sqrt(x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = sqrt((x + 1.0d0)) - sqrt(x)
end function
public static double code(double x) {
return Math.sqrt((x + 1.0)) - Math.sqrt(x);
}
def code(x): return math.sqrt((x + 1.0)) - math.sqrt(x)
function code(x) return Float64(sqrt(Float64(x + 1.0)) - sqrt(x)) end
function tmp = code(x) tmp = sqrt((x + 1.0)) - sqrt(x); end
code[x_] := N[(N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision] - N[Sqrt[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sqrt{x + 1} - \sqrt{x}
\end{array}
(FPCore (x) :precision binary64 (/ 1.0 (+ (sqrt x) (sqrt (+ 1.0 x)))))
double code(double x) {
return 1.0 / (sqrt(x) + sqrt((1.0 + x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / (sqrt(x) + sqrt((1.0d0 + x)))
end function
public static double code(double x) {
return 1.0 / (Math.sqrt(x) + Math.sqrt((1.0 + x)));
}
def code(x): return 1.0 / (math.sqrt(x) + math.sqrt((1.0 + x)))
function code(x) return Float64(1.0 / Float64(sqrt(x) + sqrt(Float64(1.0 + x)))) end
function tmp = code(x) tmp = 1.0 / (sqrt(x) + sqrt((1.0 + x))); end
code[x_] := N[(1.0 / N[(N[Sqrt[x], $MachinePrecision] + N[Sqrt[N[(1.0 + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\sqrt{x} + \sqrt{1 + x}}
\end{array}
Initial program 8.4%
flip--9.5%
div-inv9.5%
add-sqr-sqrt9.7%
add-sqr-sqrt10.6%
associate--l+10.6%
Applied egg-rr10.6%
+-commutative10.6%
associate-+l-99.6%
+-inverses99.6%
metadata-eval99.6%
associate-*r/99.6%
metadata-eval99.6%
+-commutative99.6%
+-commutative99.6%
Simplified99.6%
Final simplification99.6%
(FPCore (x) :precision binary64 (if (<= x 32500000.0) (- (sqrt (+ 1.0 x)) (sqrt x)) (* 0.5 (pow x -0.5))))
double code(double x) {
double tmp;
if (x <= 32500000.0) {
tmp = sqrt((1.0 + x)) - sqrt(x);
} else {
tmp = 0.5 * pow(x, -0.5);
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (x <= 32500000.0d0) then
tmp = sqrt((1.0d0 + x)) - sqrt(x)
else
tmp = 0.5d0 * (x ** (-0.5d0))
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (x <= 32500000.0) {
tmp = Math.sqrt((1.0 + x)) - Math.sqrt(x);
} else {
tmp = 0.5 * Math.pow(x, -0.5);
}
return tmp;
}
def code(x): tmp = 0 if x <= 32500000.0: tmp = math.sqrt((1.0 + x)) - math.sqrt(x) else: tmp = 0.5 * math.pow(x, -0.5) return tmp
function code(x) tmp = 0.0 if (x <= 32500000.0) tmp = Float64(sqrt(Float64(1.0 + x)) - sqrt(x)); else tmp = Float64(0.5 * (x ^ -0.5)); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= 32500000.0) tmp = sqrt((1.0 + x)) - sqrt(x); else tmp = 0.5 * (x ^ -0.5); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, 32500000.0], N[(N[Sqrt[N[(1.0 + x), $MachinePrecision]], $MachinePrecision] - N[Sqrt[x], $MachinePrecision]), $MachinePrecision], N[(0.5 * N[Power[x, -0.5], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 32500000:\\
\;\;\;\;\sqrt{1 + x} - \sqrt{x}\\
\mathbf{else}:\\
\;\;\;\;0.5 \cdot {x}^{-0.5}\\
\end{array}
\end{array}
if x < 3.25e7Initial program 90.0%
if 3.25e7 < x Initial program 5.5%
Taylor expanded in x around inf 99.0%
*-un-lft-identity99.0%
inv-pow99.0%
sqrt-pow199.2%
metadata-eval99.2%
Applied egg-rr99.2%
*-lft-identity99.2%
Simplified99.2%
Final simplification98.8%
(FPCore (x) :precision binary64 (* 0.5 (pow x -0.5)))
double code(double x) {
return 0.5 * pow(x, -0.5);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.5d0 * (x ** (-0.5d0))
end function
public static double code(double x) {
return 0.5 * Math.pow(x, -0.5);
}
def code(x): return 0.5 * math.pow(x, -0.5)
function code(x) return Float64(0.5 * (x ^ -0.5)) end
function tmp = code(x) tmp = 0.5 * (x ^ -0.5); end
code[x_] := N[(0.5 * N[Power[x, -0.5], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot {x}^{-0.5}
\end{array}
Initial program 8.4%
Taylor expanded in x around inf 96.6%
*-un-lft-identity96.6%
inv-pow96.6%
sqrt-pow196.8%
metadata-eval96.8%
Applied egg-rr96.8%
*-lft-identity96.8%
Simplified96.8%
Final simplification96.8%
(FPCore (x) :precision binary64 (sqrt (/ 0.25 x)))
double code(double x) {
return sqrt((0.25 / x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = sqrt((0.25d0 / x))
end function
public static double code(double x) {
return Math.sqrt((0.25 / x));
}
def code(x): return math.sqrt((0.25 / x))
function code(x) return sqrt(Float64(0.25 / x)) end
function tmp = code(x) tmp = sqrt((0.25 / x)); end
code[x_] := N[Sqrt[N[(0.25 / x), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\sqrt{\frac{0.25}{x}}
\end{array}
Initial program 8.4%
flip--9.5%
div-inv9.5%
add-sqr-sqrt9.7%
add-sqr-sqrt10.6%
associate--l+10.6%
Applied egg-rr10.6%
+-commutative10.6%
associate-+l-99.6%
+-inverses99.6%
metadata-eval99.6%
associate-*r/99.6%
metadata-eval99.6%
+-commutative99.6%
+-commutative99.6%
Simplified99.6%
Taylor expanded in x around inf 96.4%
*-commutative96.4%
Simplified96.4%
add-sqr-sqrt96.0%
sqrt-unprod96.4%
frac-times96.0%
metadata-eval96.0%
swap-sqr96.0%
add-sqr-sqrt96.2%
metadata-eval96.2%
Applied egg-rr96.2%
*-commutative96.2%
associate-/r*96.6%
metadata-eval96.6%
Simplified96.6%
Final simplification96.6%
(FPCore (x) :precision binary64 (- (sqrt x)))
double code(double x) {
return -sqrt(x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = -sqrt(x)
end function
public static double code(double x) {
return -Math.sqrt(x);
}
def code(x): return -math.sqrt(x)
function code(x) return Float64(-sqrt(x)) end
function tmp = code(x) tmp = -sqrt(x); end
code[x_] := (-N[Sqrt[x], $MachinePrecision])
\begin{array}{l}
\\
-\sqrt{x}
\end{array}
Initial program 8.4%
Taylor expanded in x around 0 1.6%
Taylor expanded in x around inf 1.6%
neg-mul-11.6%
Simplified1.6%
Final simplification1.6%
(FPCore (x) :precision binary64 (/ 1.0 (+ (sqrt (+ x 1.0)) (sqrt x))))
double code(double x) {
return 1.0 / (sqrt((x + 1.0)) + sqrt(x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / (sqrt((x + 1.0d0)) + sqrt(x))
end function
public static double code(double x) {
return 1.0 / (Math.sqrt((x + 1.0)) + Math.sqrt(x));
}
def code(x): return 1.0 / (math.sqrt((x + 1.0)) + math.sqrt(x))
function code(x) return Float64(1.0 / Float64(sqrt(Float64(x + 1.0)) + sqrt(x))) end
function tmp = code(x) tmp = 1.0 / (sqrt((x + 1.0)) + sqrt(x)); end
code[x_] := N[(1.0 / N[(N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision] + N[Sqrt[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\sqrt{x + 1} + \sqrt{x}}
\end{array}
herbie shell --seed 2024077
(FPCore (x)
:name "2sqrt (example 3.1)"
:precision binary64
:pre (and (> x 1.0) (< x 1e+308))
:alt
(/ 1.0 (+ (sqrt (+ x 1.0)) (sqrt x)))
(- (sqrt (+ x 1.0)) (sqrt x)))