
(FPCore (x) :precision binary64 (- (/ 1.0 (sqrt x)) (/ 1.0 (sqrt (+ x 1.0)))))
double code(double x) {
return (1.0 / sqrt(x)) - (1.0 / sqrt((x + 1.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / sqrt(x)) - (1.0d0 / sqrt((x + 1.0d0)))
end function
public static double code(double x) {
return (1.0 / Math.sqrt(x)) - (1.0 / Math.sqrt((x + 1.0)));
}
def code(x): return (1.0 / math.sqrt(x)) - (1.0 / math.sqrt((x + 1.0)))
function code(x) return Float64(Float64(1.0 / sqrt(x)) - Float64(1.0 / sqrt(Float64(x + 1.0)))) end
function tmp = code(x) tmp = (1.0 / sqrt(x)) - (1.0 / sqrt((x + 1.0))); end
code[x_] := N[(N[(1.0 / N[Sqrt[x], $MachinePrecision]), $MachinePrecision] - N[(1.0 / N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\sqrt{x}} - \frac{1}{\sqrt{x + 1}}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (/ 1.0 (sqrt x)) (/ 1.0 (sqrt (+ x 1.0)))))
double code(double x) {
return (1.0 / sqrt(x)) - (1.0 / sqrt((x + 1.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / sqrt(x)) - (1.0d0 / sqrt((x + 1.0d0)))
end function
public static double code(double x) {
return (1.0 / Math.sqrt(x)) - (1.0 / Math.sqrt((x + 1.0)));
}
def code(x): return (1.0 / math.sqrt(x)) - (1.0 / math.sqrt((x + 1.0)))
function code(x) return Float64(Float64(1.0 / sqrt(x)) - Float64(1.0 / sqrt(Float64(x + 1.0)))) end
function tmp = code(x) tmp = (1.0 / sqrt(x)) - (1.0 / sqrt((x + 1.0))); end
code[x_] := N[(N[(1.0 / N[Sqrt[x], $MachinePrecision]), $MachinePrecision] - N[(1.0 / N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\sqrt{x}} - \frac{1}{\sqrt{x + 1}}
\end{array}
(FPCore (x) :precision binary64 (/ (- (* 0.5 (pow x -0.5)) (* (pow x -0.5) (/ 0.375 x))) x))
double code(double x) {
return ((0.5 * pow(x, -0.5)) - (pow(x, -0.5) * (0.375 / x))) / x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((0.5d0 * (x ** (-0.5d0))) - ((x ** (-0.5d0)) * (0.375d0 / x))) / x
end function
public static double code(double x) {
return ((0.5 * Math.pow(x, -0.5)) - (Math.pow(x, -0.5) * (0.375 / x))) / x;
}
def code(x): return ((0.5 * math.pow(x, -0.5)) - (math.pow(x, -0.5) * (0.375 / x))) / x
function code(x) return Float64(Float64(Float64(0.5 * (x ^ -0.5)) - Float64((x ^ -0.5) * Float64(0.375 / x))) / x) end
function tmp = code(x) tmp = ((0.5 * (x ^ -0.5)) - ((x ^ -0.5) * (0.375 / x))) / x; end
code[x_] := N[(N[(N[(0.5 * N[Power[x, -0.5], $MachinePrecision]), $MachinePrecision] - N[(N[Power[x, -0.5], $MachinePrecision] * N[(0.375 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5 \cdot {x}^{-0.5} - {x}^{-0.5} \cdot \frac{0.375}{x}}{x}
\end{array}
Initial program 38.6%
Taylor expanded in x around inf 85.2%
distribute-rgt-in85.2%
*-un-lft-identity85.2%
distribute-lft-in85.2%
pow-flip85.2%
sqrt-pow185.2%
metadata-eval85.2%
metadata-eval85.2%
*-commutative85.2%
pow-flip85.2%
sqrt-pow185.2%
metadata-eval85.2%
metadata-eval85.2%
Applied egg-rr85.2%
associate-*r*85.2%
distribute-rgt-out85.2%
*-commutative85.2%
associate-*r*85.2%
metadata-eval85.2%
*-commutative85.2%
Simplified85.2%
Taylor expanded in x around inf 98.8%
Simplified98.8%
(FPCore (x) :precision binary64 (/ (* (pow x -0.5) (- 0.5 (/ 0.375 x))) x))
double code(double x) {
return (pow(x, -0.5) * (0.5 - (0.375 / x))) / x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((x ** (-0.5d0)) * (0.5d0 - (0.375d0 / x))) / x
end function
public static double code(double x) {
return (Math.pow(x, -0.5) * (0.5 - (0.375 / x))) / x;
}
def code(x): return (math.pow(x, -0.5) * (0.5 - (0.375 / x))) / x
function code(x) return Float64(Float64((x ^ -0.5) * Float64(0.5 - Float64(0.375 / x))) / x) end
function tmp = code(x) tmp = ((x ^ -0.5) * (0.5 - (0.375 / x))) / x; end
code[x_] := N[(N[(N[Power[x, -0.5], $MachinePrecision] * N[(0.5 - N[(0.375 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{{x}^{-0.5} \cdot \left(0.5 - \frac{0.375}{x}\right)}{x}
\end{array}
Initial program 38.6%
Taylor expanded in x around inf 85.2%
distribute-rgt-in85.2%
*-un-lft-identity85.2%
distribute-lft-in85.2%
pow-flip85.2%
sqrt-pow185.2%
metadata-eval85.2%
metadata-eval85.2%
*-commutative85.2%
pow-flip85.2%
sqrt-pow185.2%
metadata-eval85.2%
metadata-eval85.2%
Applied egg-rr85.2%
associate-*r*85.2%
distribute-rgt-out85.2%
*-commutative85.2%
associate-*r*85.2%
metadata-eval85.2%
*-commutative85.2%
Simplified85.2%
Taylor expanded in x around inf 98.8%
Simplified98.8%
div-inv98.7%
*-commutative98.7%
distribute-lft-out--98.7%
Applied egg-rr98.7%
associate-*r/98.8%
*-rgt-identity98.8%
Simplified98.8%
(FPCore (x) :precision binary64 (* (pow x -0.5) (/ (+ 0.5 (/ -0.375 x)) x)))
double code(double x) {
return pow(x, -0.5) * ((0.5 + (-0.375 / x)) / x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x ** (-0.5d0)) * ((0.5d0 + ((-0.375d0) / x)) / x)
end function
public static double code(double x) {
return Math.pow(x, -0.5) * ((0.5 + (-0.375 / x)) / x);
}
def code(x): return math.pow(x, -0.5) * ((0.5 + (-0.375 / x)) / x)
function code(x) return Float64((x ^ -0.5) * Float64(Float64(0.5 + Float64(-0.375 / x)) / x)) end
function tmp = code(x) tmp = (x ^ -0.5) * ((0.5 + (-0.375 / x)) / x); end
code[x_] := N[(N[Power[x, -0.5], $MachinePrecision] * N[(N[(0.5 + N[(-0.375 / x), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{x}^{-0.5} \cdot \frac{0.5 + \frac{-0.375}{x}}{x}
\end{array}
Initial program 38.6%
Taylor expanded in x around inf 85.2%
distribute-rgt-in85.2%
*-un-lft-identity85.2%
distribute-lft-in85.2%
pow-flip85.2%
sqrt-pow185.2%
metadata-eval85.2%
metadata-eval85.2%
*-commutative85.2%
pow-flip85.2%
sqrt-pow185.2%
metadata-eval85.2%
metadata-eval85.2%
Applied egg-rr85.2%
associate-*r*85.2%
distribute-rgt-out85.2%
*-commutative85.2%
associate-*r*85.2%
metadata-eval85.2%
*-commutative85.2%
Simplified85.2%
Taylor expanded in x around inf 98.8%
Simplified98.8%
div-inv98.7%
*-commutative98.7%
distribute-lft-out--98.7%
Applied egg-rr98.7%
associate-*l*98.7%
associate-*r/98.7%
*-rgt-identity98.7%
sub-neg98.7%
distribute-neg-frac98.7%
metadata-eval98.7%
Simplified98.7%
(FPCore (x) :precision binary64 (/ (* 0.5 (sqrt (/ 1.0 x))) x))
double code(double x) {
return (0.5 * sqrt((1.0 / x))) / x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (0.5d0 * sqrt((1.0d0 / x))) / x
end function
public static double code(double x) {
return (0.5 * Math.sqrt((1.0 / x))) / x;
}
def code(x): return (0.5 * math.sqrt((1.0 / x))) / x
function code(x) return Float64(Float64(0.5 * sqrt(Float64(1.0 / x))) / x) end
function tmp = code(x) tmp = (0.5 * sqrt((1.0 / x))) / x; end
code[x_] := N[(N[(0.5 * N[Sqrt[N[(1.0 / x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5 \cdot \sqrt{\frac{1}{x}}}{x}
\end{array}
Initial program 38.6%
Taylor expanded in x around inf 85.2%
distribute-rgt-in85.2%
*-un-lft-identity85.2%
distribute-lft-in85.2%
pow-flip85.2%
sqrt-pow185.2%
metadata-eval85.2%
metadata-eval85.2%
*-commutative85.2%
pow-flip85.2%
sqrt-pow185.2%
metadata-eval85.2%
metadata-eval85.2%
Applied egg-rr85.2%
associate-*r*85.2%
distribute-rgt-out85.2%
*-commutative85.2%
associate-*r*85.2%
metadata-eval85.2%
*-commutative85.2%
Simplified85.2%
Taylor expanded in x around inf 98.8%
Simplified98.8%
Taylor expanded in x around inf 97.9%
(FPCore (x) :precision binary64 (pow (* x x) -0.25))
double code(double x) {
return pow((x * x), -0.25);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * x) ** (-0.25d0)
end function
public static double code(double x) {
return Math.pow((x * x), -0.25);
}
def code(x): return math.pow((x * x), -0.25)
function code(x) return Float64(x * x) ^ -0.25 end
function tmp = code(x) tmp = (x * x) ^ -0.25; end
code[x_] := N[Power[N[(x * x), $MachinePrecision], -0.25], $MachinePrecision]
\begin{array}{l}
\\
{\left(x \cdot x\right)}^{-0.25}
\end{array}
Initial program 38.6%
Taylor expanded in x around 0 5.6%
inv-pow5.6%
sqrt-pow15.6%
metadata-eval5.6%
sqr-pow5.6%
pow-prod-down37.2%
pow237.2%
metadata-eval37.2%
Applied egg-rr37.2%
unpow237.2%
Applied egg-rr37.2%
(FPCore (x) :precision binary64 (pow x -0.5))
double code(double x) {
return pow(x, -0.5);
}
real(8) function code(x)
real(8), intent (in) :: x
code = x ** (-0.5d0)
end function
public static double code(double x) {
return Math.pow(x, -0.5);
}
def code(x): return math.pow(x, -0.5)
function code(x) return x ^ -0.5 end
function tmp = code(x) tmp = x ^ -0.5; end
code[x_] := N[Power[x, -0.5], $MachinePrecision]
\begin{array}{l}
\\
{x}^{-0.5}
\end{array}
Initial program 38.6%
sub-neg38.6%
inv-pow38.6%
sqrt-pow227.4%
metadata-eval27.4%
distribute-neg-frac27.4%
metadata-eval27.4%
+-commutative27.4%
Applied egg-rr27.4%
*-rgt-identity27.4%
metadata-eval27.4%
distribute-rgt-neg-in27.4%
sub-neg27.4%
associate-*l/27.4%
metadata-eval27.4%
unpow1/227.4%
exp-to-pow6.9%
log1p-undefine6.9%
*-commutative6.9%
exp-neg6.9%
*-commutative6.9%
distribute-rgt-neg-in6.9%
log1p-undefine6.9%
metadata-eval6.9%
exp-to-pow38.6%
Simplified38.6%
Taylor expanded in x around 0 5.6%
unpow-15.6%
metadata-eval5.6%
pow-sqr5.6%
rem-sqrt-square5.6%
metadata-eval5.6%
pow-sqr5.6%
fabs-sqr5.6%
pow-sqr5.6%
metadata-eval5.6%
Simplified5.6%
(FPCore (x) :precision binary64 (/ 1.0 (+ (* (+ x 1.0) (sqrt x)) (* x (sqrt (+ x 1.0))))))
double code(double x) {
return 1.0 / (((x + 1.0) * sqrt(x)) + (x * sqrt((x + 1.0))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / (((x + 1.0d0) * sqrt(x)) + (x * sqrt((x + 1.0d0))))
end function
public static double code(double x) {
return 1.0 / (((x + 1.0) * Math.sqrt(x)) + (x * Math.sqrt((x + 1.0))));
}
def code(x): return 1.0 / (((x + 1.0) * math.sqrt(x)) + (x * math.sqrt((x + 1.0))))
function code(x) return Float64(1.0 / Float64(Float64(Float64(x + 1.0) * sqrt(x)) + Float64(x * sqrt(Float64(x + 1.0))))) end
function tmp = code(x) tmp = 1.0 / (((x + 1.0) * sqrt(x)) + (x * sqrt((x + 1.0)))); end
code[x_] := N[(1.0 / N[(N[(N[(x + 1.0), $MachinePrecision] * N[Sqrt[x], $MachinePrecision]), $MachinePrecision] + N[(x * N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\left(x + 1\right) \cdot \sqrt{x} + x \cdot \sqrt{x + 1}}
\end{array}
(FPCore (x) :precision binary64 (- (pow x -0.5) (pow (+ x 1.0) -0.5)))
double code(double x) {
return pow(x, -0.5) - pow((x + 1.0), -0.5);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x ** (-0.5d0)) - ((x + 1.0d0) ** (-0.5d0))
end function
public static double code(double x) {
return Math.pow(x, -0.5) - Math.pow((x + 1.0), -0.5);
}
def code(x): return math.pow(x, -0.5) - math.pow((x + 1.0), -0.5)
function code(x) return Float64((x ^ -0.5) - (Float64(x + 1.0) ^ -0.5)) end
function tmp = code(x) tmp = (x ^ -0.5) - ((x + 1.0) ^ -0.5); end
code[x_] := N[(N[Power[x, -0.5], $MachinePrecision] - N[Power[N[(x + 1.0), $MachinePrecision], -0.5], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{x}^{-0.5} - {\left(x + 1\right)}^{-0.5}
\end{array}
herbie shell --seed 2024181
(FPCore (x)
:name "2isqrt (example 3.6)"
:precision binary64
:pre (and (> x 1.0) (< x 1e+308))
:alt
(! :herbie-platform default (/ 1 (+ (* (+ x 1) (sqrt x)) (* x (sqrt (+ x 1))))))
:alt
(! :herbie-platform default (- (pow x -1/2) (pow (+ x 1) -1/2)))
(- (/ 1.0 (sqrt x)) (/ 1.0 (sqrt (+ x 1.0)))))