
(FPCore (x) :precision binary64 (- (/ 1.0 (sqrt x)) (/ 1.0 (sqrt (+ x 1.0)))))
double code(double x) {
return (1.0 / sqrt(x)) - (1.0 / sqrt((x + 1.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / sqrt(x)) - (1.0d0 / sqrt((x + 1.0d0)))
end function
public static double code(double x) {
return (1.0 / Math.sqrt(x)) - (1.0 / Math.sqrt((x + 1.0)));
}
def code(x): return (1.0 / math.sqrt(x)) - (1.0 / math.sqrt((x + 1.0)))
function code(x) return Float64(Float64(1.0 / sqrt(x)) - Float64(1.0 / sqrt(Float64(x + 1.0)))) end
function tmp = code(x) tmp = (1.0 / sqrt(x)) - (1.0 / sqrt((x + 1.0))); end
code[x_] := N[(N[(1.0 / N[Sqrt[x], $MachinePrecision]), $MachinePrecision] - N[(1.0 / N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\sqrt{x}} - \frac{1}{\sqrt{x + 1}}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (/ 1.0 (sqrt x)) (/ 1.0 (sqrt (+ x 1.0)))))
double code(double x) {
return (1.0 / sqrt(x)) - (1.0 / sqrt((x + 1.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / sqrt(x)) - (1.0d0 / sqrt((x + 1.0d0)))
end function
public static double code(double x) {
return (1.0 / Math.sqrt(x)) - (1.0 / Math.sqrt((x + 1.0)));
}
def code(x): return (1.0 / math.sqrt(x)) - (1.0 / math.sqrt((x + 1.0)))
function code(x) return Float64(Float64(1.0 / sqrt(x)) - Float64(1.0 / sqrt(Float64(x + 1.0)))) end
function tmp = code(x) tmp = (1.0 / sqrt(x)) - (1.0 / sqrt((x + 1.0))); end
code[x_] := N[(N[(1.0 / N[Sqrt[x], $MachinePrecision]), $MachinePrecision] - N[(1.0 / N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\sqrt{x}} - \frac{1}{\sqrt{x + 1}}
\end{array}
(FPCore (x) :precision binary64 (- (/ (* 0.5 (pow x -0.5)) x) (/ (* (pow x -0.5) (/ 0.375 x)) x)))
double code(double x) {
return ((0.5 * pow(x, -0.5)) / x) - ((pow(x, -0.5) * (0.375 / x)) / x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((0.5d0 * (x ** (-0.5d0))) / x) - (((x ** (-0.5d0)) * (0.375d0 / x)) / x)
end function
public static double code(double x) {
return ((0.5 * Math.pow(x, -0.5)) / x) - ((Math.pow(x, -0.5) * (0.375 / x)) / x);
}
def code(x): return ((0.5 * math.pow(x, -0.5)) / x) - ((math.pow(x, -0.5) * (0.375 / x)) / x)
function code(x) return Float64(Float64(Float64(0.5 * (x ^ -0.5)) / x) - Float64(Float64((x ^ -0.5) * Float64(0.375 / x)) / x)) end
function tmp = code(x) tmp = ((0.5 * (x ^ -0.5)) / x) - (((x ^ -0.5) * (0.375 / x)) / x); end
code[x_] := N[(N[(N[(0.5 * N[Power[x, -0.5], $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision] - N[(N[(N[Power[x, -0.5], $MachinePrecision] * N[(0.375 / x), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5 \cdot {x}^{-0.5}}{x} - \frac{{x}^{-0.5} \cdot \frac{0.375}{x}}{x}
\end{array}
Initial program 42.3%
Taylor expanded in x around inf 83.6%
*-un-lft-identity83.6%
pow1/283.6%
pow-flip83.6%
pow-pow83.6%
metadata-eval83.6%
metadata-eval83.6%
Applied egg-rr83.6%
*-lft-identity83.6%
Simplified83.6%
Taylor expanded in x around inf 98.2%
+-commutative98.2%
mul-1-neg98.2%
unsub-neg98.2%
distribute-rgt-out98.2%
metadata-eval98.2%
Simplified98.2%
div-sub98.2%
inv-pow98.2%
sqrt-pow198.2%
metadata-eval98.2%
associate-/l*98.2%
inv-pow98.2%
sqrt-pow198.2%
metadata-eval98.2%
Applied egg-rr98.2%
Final simplification98.2%
(FPCore (x) :precision binary64 (/ (* (pow x -0.5) (- 0.5 (/ 0.375 x))) x))
double code(double x) {
return (pow(x, -0.5) * (0.5 - (0.375 / x))) / x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((x ** (-0.5d0)) * (0.5d0 - (0.375d0 / x))) / x
end function
public static double code(double x) {
return (Math.pow(x, -0.5) * (0.5 - (0.375 / x))) / x;
}
def code(x): return (math.pow(x, -0.5) * (0.5 - (0.375 / x))) / x
function code(x) return Float64(Float64((x ^ -0.5) * Float64(0.5 - Float64(0.375 / x))) / x) end
function tmp = code(x) tmp = ((x ^ -0.5) * (0.5 - (0.375 / x))) / x; end
code[x_] := N[(N[(N[Power[x, -0.5], $MachinePrecision] * N[(0.5 - N[(0.375 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{{x}^{-0.5} \cdot \left(0.5 - \frac{0.375}{x}\right)}{x}
\end{array}
Initial program 42.3%
Taylor expanded in x around inf 83.6%
*-un-lft-identity83.6%
pow1/283.6%
pow-flip83.6%
pow-pow83.6%
metadata-eval83.6%
metadata-eval83.6%
Applied egg-rr83.6%
*-lft-identity83.6%
Simplified83.6%
Taylor expanded in x around inf 98.2%
+-commutative98.2%
mul-1-neg98.2%
unsub-neg98.2%
distribute-rgt-out98.2%
metadata-eval98.2%
Simplified98.2%
div-sub98.2%
inv-pow98.2%
sqrt-pow198.2%
metadata-eval98.2%
associate-/l*98.2%
inv-pow98.2%
sqrt-pow198.2%
metadata-eval98.2%
Applied egg-rr98.2%
div-sub98.2%
*-commutative98.2%
distribute-lft-out--98.2%
Simplified98.2%
Final simplification98.2%
(FPCore (x) :precision binary64 (* 0.5 (pow x -1.5)))
double code(double x) {
return 0.5 * pow(x, -1.5);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.5d0 * (x ** (-1.5d0))
end function
public static double code(double x) {
return 0.5 * Math.pow(x, -1.5);
}
def code(x): return 0.5 * math.pow(x, -1.5)
function code(x) return Float64(0.5 * (x ^ -1.5)) end
function tmp = code(x) tmp = 0.5 * (x ^ -1.5); end
code[x_] := N[(0.5 * N[Power[x, -1.5], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot {x}^{-1.5}
\end{array}
Initial program 42.3%
Taylor expanded in x around inf 67.7%
pow167.7%
pow1/267.7%
pow-flip68.5%
pow-pow97.5%
metadata-eval97.5%
metadata-eval97.5%
Applied egg-rr97.5%
unpow197.5%
Simplified97.5%
Final simplification97.5%
(FPCore (x) :precision binary64 0.0)
double code(double x) {
return 0.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.0d0
end function
public static double code(double x) {
return 0.0;
}
def code(x): return 0.0
function code(x) return 0.0 end
function tmp = code(x) tmp = 0.0; end
code[x_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 42.3%
sub-neg42.3%
+-commutative42.3%
add-sqr-sqrt24.5%
distribute-rgt-neg-in24.5%
fma-define7.7%
pow1/27.7%
pow-flip7.7%
+-commutative7.7%
metadata-eval7.7%
pow1/27.7%
pow-flip7.7%
+-commutative7.7%
metadata-eval7.7%
inv-pow7.7%
sqrt-pow28.0%
metadata-eval8.0%
Applied egg-rr8.0%
Taylor expanded in x around inf 39.0%
distribute-rgt1-in39.0%
metadata-eval39.0%
mul0-lft39.0%
Simplified39.0%
Final simplification39.0%
(FPCore (x) :precision binary64 (/ 1.0 (+ (* (+ x 1.0) (sqrt x)) (* x (sqrt (+ x 1.0))))))
double code(double x) {
return 1.0 / (((x + 1.0) * sqrt(x)) + (x * sqrt((x + 1.0))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / (((x + 1.0d0) * sqrt(x)) + (x * sqrt((x + 1.0d0))))
end function
public static double code(double x) {
return 1.0 / (((x + 1.0) * Math.sqrt(x)) + (x * Math.sqrt((x + 1.0))));
}
def code(x): return 1.0 / (((x + 1.0) * math.sqrt(x)) + (x * math.sqrt((x + 1.0))))
function code(x) return Float64(1.0 / Float64(Float64(Float64(x + 1.0) * sqrt(x)) + Float64(x * sqrt(Float64(x + 1.0))))) end
function tmp = code(x) tmp = 1.0 / (((x + 1.0) * sqrt(x)) + (x * sqrt((x + 1.0)))); end
code[x_] := N[(1.0 / N[(N[(N[(x + 1.0), $MachinePrecision] * N[Sqrt[x], $MachinePrecision]), $MachinePrecision] + N[(x * N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\left(x + 1\right) \cdot \sqrt{x} + x \cdot \sqrt{x + 1}}
\end{array}
herbie shell --seed 2024085
(FPCore (x)
:name "2isqrt (example 3.6)"
:precision binary64
:pre (and (> x 1.0) (< x 1e+308))
:alt
(/ 1.0 (+ (* (+ x 1.0) (sqrt x)) (* x (sqrt (+ x 1.0)))))
(- (/ 1.0 (sqrt x)) (/ 1.0 (sqrt (+ x 1.0)))))