
(FPCore (x) :precision binary64 (- (/ 1.0 (sqrt x)) (/ 1.0 (sqrt (+ x 1.0)))))
double code(double x) {
return (1.0 / sqrt(x)) - (1.0 / sqrt((x + 1.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / sqrt(x)) - (1.0d0 / sqrt((x + 1.0d0)))
end function
public static double code(double x) {
return (1.0 / Math.sqrt(x)) - (1.0 / Math.sqrt((x + 1.0)));
}
def code(x): return (1.0 / math.sqrt(x)) - (1.0 / math.sqrt((x + 1.0)))
function code(x) return Float64(Float64(1.0 / sqrt(x)) - Float64(1.0 / sqrt(Float64(x + 1.0)))) end
function tmp = code(x) tmp = (1.0 / sqrt(x)) - (1.0 / sqrt((x + 1.0))); end
code[x_] := N[(N[(1.0 / N[Sqrt[x], $MachinePrecision]), $MachinePrecision] - N[(1.0 / N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\sqrt{x}} - \frac{1}{\sqrt{x + 1}}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (/ 1.0 (sqrt x)) (/ 1.0 (sqrt (+ x 1.0)))))
double code(double x) {
return (1.0 / sqrt(x)) - (1.0 / sqrt((x + 1.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / sqrt(x)) - (1.0d0 / sqrt((x + 1.0d0)))
end function
public static double code(double x) {
return (1.0 / Math.sqrt(x)) - (1.0 / Math.sqrt((x + 1.0)));
}
def code(x): return (1.0 / math.sqrt(x)) - (1.0 / math.sqrt((x + 1.0)))
function code(x) return Float64(Float64(1.0 / sqrt(x)) - Float64(1.0 / sqrt(Float64(x + 1.0)))) end
function tmp = code(x) tmp = (1.0 / sqrt(x)) - (1.0 / sqrt((x + 1.0))); end
code[x_] := N[(N[(1.0 / N[Sqrt[x], $MachinePrecision]), $MachinePrecision] - N[(1.0 / N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\sqrt{x}} - \frac{1}{\sqrt{x + 1}}
\end{array}
(FPCore (x) :precision binary64 (/ (/ 1.0 (+ (fma x 2.0 0.5) (* x (/ -0.125 (* x x))))) (sqrt (+ 1.0 x))))
double code(double x) {
return (1.0 / (fma(x, 2.0, 0.5) + (x * (-0.125 / (x * x))))) / sqrt((1.0 + x));
}
function code(x) return Float64(Float64(1.0 / Float64(fma(x, 2.0, 0.5) + Float64(x * Float64(-0.125 / Float64(x * x))))) / sqrt(Float64(1.0 + x))) end
code[x_] := N[(N[(1.0 / N[(N[(x * 2.0 + 0.5), $MachinePrecision] + N[(x * N[(-0.125 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Sqrt[N[(1.0 + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{1}{\mathsf{fma}\left(x, 2, 0.5\right) + x \cdot \frac{-0.125}{x \cdot x}}}{\sqrt{1 + x}}
\end{array}
Initial program 41.1%
Applied rewrites43.7%
Taylor expanded in x around inf
+-commutativeN/A
distribute-rgt-inN/A
associate-*l*N/A
lft-mult-inverseN/A
metadata-evalN/A
*-lft-identityN/A
lower-+.f6442.4
Applied rewrites42.4%
Taylor expanded in x around 0
Applied rewrites98.3%
Taylor expanded in x around inf
sub-negN/A
distribute-rgt-inN/A
*-commutativeN/A
lower-+.f64N/A
distribute-lft-inN/A
*-commutativeN/A
associate-*l*N/A
lft-mult-inverseN/A
metadata-evalN/A
lower-fma.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
associate-*l/N/A
*-commutativeN/A
associate-*r/N/A
metadata-evalN/A
distribute-neg-fracN/A
lower-*.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f6498.9
Applied rewrites98.9%
(FPCore (x) :precision binary64 (/ (/ 1.0 (+ x (+ x 0.5))) (sqrt (+ 1.0 x))))
double code(double x) {
return (1.0 / (x + (x + 0.5))) / sqrt((1.0 + x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / (x + (x + 0.5d0))) / sqrt((1.0d0 + x))
end function
public static double code(double x) {
return (1.0 / (x + (x + 0.5))) / Math.sqrt((1.0 + x));
}
def code(x): return (1.0 / (x + (x + 0.5))) / math.sqrt((1.0 + x))
function code(x) return Float64(Float64(1.0 / Float64(x + Float64(x + 0.5))) / sqrt(Float64(1.0 + x))) end
function tmp = code(x) tmp = (1.0 / (x + (x + 0.5))) / sqrt((1.0 + x)); end
code[x_] := N[(N[(1.0 / N[(x + N[(x + 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Sqrt[N[(1.0 + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{1}{x + \left(x + 0.5\right)}}{\sqrt{1 + x}}
\end{array}
Initial program 41.1%
Applied rewrites43.7%
Taylor expanded in x around inf
+-commutativeN/A
distribute-rgt-inN/A
associate-*l*N/A
lft-mult-inverseN/A
metadata-evalN/A
*-lft-identityN/A
lower-+.f6442.4
Applied rewrites42.4%
Taylor expanded in x around 0
Applied rewrites98.3%
Final simplification98.3%
(FPCore (x) :precision binary64 (/ 1.0 (* (sqrt (+ 1.0 x)) (+ x (+ x 0.5)))))
double code(double x) {
return 1.0 / (sqrt((1.0 + x)) * (x + (x + 0.5)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / (sqrt((1.0d0 + x)) * (x + (x + 0.5d0)))
end function
public static double code(double x) {
return 1.0 / (Math.sqrt((1.0 + x)) * (x + (x + 0.5)));
}
def code(x): return 1.0 / (math.sqrt((1.0 + x)) * (x + (x + 0.5)))
function code(x) return Float64(1.0 / Float64(sqrt(Float64(1.0 + x)) * Float64(x + Float64(x + 0.5)))) end
function tmp = code(x) tmp = 1.0 / (sqrt((1.0 + x)) * (x + (x + 0.5))); end
code[x_] := N[(1.0 / N[(N[Sqrt[N[(1.0 + x), $MachinePrecision]], $MachinePrecision] * N[(x + N[(x + 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\sqrt{1 + x} \cdot \left(x + \left(x + 0.5\right)\right)}
\end{array}
Initial program 41.1%
Applied rewrites43.7%
Taylor expanded in x around inf
lower-/.f6496.8
Applied rewrites96.8%
lift-+.f64N/A
lift-fma.f64N/A
lift-sqrt.f64N/A
lift-+.f64N/A
lift--.f64N/A
lift-+.f64N/A
lift-sqrt.f64N/A
associate-/l/N/A
lift--.f64N/A
lift-+.f64N/A
associate--l+N/A
+-inversesN/A
metadata-evalN/A
lower-/.f64N/A
lower-*.f6484.8
Applied rewrites97.2%
(FPCore (x) :precision binary64 (/ (/ 0.5 x) (sqrt x)))
double code(double x) {
return (0.5 / x) / sqrt(x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (0.5d0 / x) / sqrt(x)
end function
public static double code(double x) {
return (0.5 / x) / Math.sqrt(x);
}
def code(x): return (0.5 / x) / math.sqrt(x)
function code(x) return Float64(Float64(0.5 / x) / sqrt(x)) end
function tmp = code(x) tmp = (0.5 / x) / sqrt(x); end
code[x_] := N[(N[(0.5 / x), $MachinePrecision] / N[Sqrt[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{0.5}{x}}{\sqrt{x}}
\end{array}
Initial program 41.1%
Applied rewrites43.7%
Taylor expanded in x around inf
lower-/.f6496.8
Applied rewrites96.8%
Taylor expanded in x around inf
lower-sqrt.f6496.6
Applied rewrites96.6%
(FPCore (x) :precision binary64 (/ (/ 0.5 x) (fma x 0.5 1.0)))
double code(double x) {
return (0.5 / x) / fma(x, 0.5, 1.0);
}
function code(x) return Float64(Float64(0.5 / x) / fma(x, 0.5, 1.0)) end
code[x_] := N[(N[(0.5 / x), $MachinePrecision] / N[(x * 0.5 + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{0.5}{x}}{\mathsf{fma}\left(x, 0.5, 1\right)}
\end{array}
Initial program 41.1%
Applied rewrites43.7%
Taylor expanded in x around inf
lower-/.f6496.8
Applied rewrites96.8%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f6439.1
Applied rewrites39.1%
(FPCore (x) :precision binary64 (/ 1.0 (* 1.0 (* x 2.0))))
double code(double x) {
return 1.0 / (1.0 * (x * 2.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / (1.0d0 * (x * 2.0d0))
end function
public static double code(double x) {
return 1.0 / (1.0 * (x * 2.0));
}
def code(x): return 1.0 / (1.0 * (x * 2.0))
function code(x) return Float64(1.0 / Float64(1.0 * Float64(x * 2.0))) end
function tmp = code(x) tmp = 1.0 / (1.0 * (x * 2.0)); end
code[x_] := N[(1.0 / N[(1.0 * N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{1 \cdot \left(x \cdot 2\right)}
\end{array}
Initial program 41.1%
Applied rewrites43.7%
Taylor expanded in x around inf
+-commutativeN/A
distribute-rgt-inN/A
associate-*l*N/A
lft-mult-inverseN/A
metadata-evalN/A
*-lft-identityN/A
lower-+.f6442.4
Applied rewrites42.4%
Taylor expanded in x around 0
Applied rewrites98.3%
lift-+.f64N/A
lift-sqrt.f64N/A
associate-/l/N/A
associate--l+N/A
+-inversesN/A
metadata-evalN/A
lift-+.f64N/A
lift-approxN/A
lift-*.f64N/A
lower-/.f6497.2
lift-sqrt.f64N/A
lift-+.f64N/A
lift-approx7.8
lift-approxN/A
lift-+.f64N/A
lift-approx7.8
Applied rewrites7.8%
(FPCore (x) :precision binary64 (sqrt (/ 1.0 x)))
double code(double x) {
return sqrt((1.0 / x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = sqrt((1.0d0 / x))
end function
public static double code(double x) {
return Math.sqrt((1.0 / x));
}
def code(x): return math.sqrt((1.0 / x))
function code(x) return sqrt(Float64(1.0 / x)) end
function tmp = code(x) tmp = sqrt((1.0 / x)); end
code[x_] := N[Sqrt[N[(1.0 / x), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\sqrt{\frac{1}{x}}
\end{array}
Initial program 41.1%
Taylor expanded in x around 0
lower-sqrt.f64N/A
lower-/.f645.7
Applied rewrites5.7%
(FPCore (x) :precision binary64 (/ 1.0 (+ (* (+ x 1.0) (sqrt x)) (* x (sqrt (+ x 1.0))))))
double code(double x) {
return 1.0 / (((x + 1.0) * sqrt(x)) + (x * sqrt((x + 1.0))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / (((x + 1.0d0) * sqrt(x)) + (x * sqrt((x + 1.0d0))))
end function
public static double code(double x) {
return 1.0 / (((x + 1.0) * Math.sqrt(x)) + (x * Math.sqrt((x + 1.0))));
}
def code(x): return 1.0 / (((x + 1.0) * math.sqrt(x)) + (x * math.sqrt((x + 1.0))))
function code(x) return Float64(1.0 / Float64(Float64(Float64(x + 1.0) * sqrt(x)) + Float64(x * sqrt(Float64(x + 1.0))))) end
function tmp = code(x) tmp = 1.0 / (((x + 1.0) * sqrt(x)) + (x * sqrt((x + 1.0)))); end
code[x_] := N[(1.0 / N[(N[(N[(x + 1.0), $MachinePrecision] * N[Sqrt[x], $MachinePrecision]), $MachinePrecision] + N[(x * N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\left(x + 1\right) \cdot \sqrt{x} + x \cdot \sqrt{x + 1}}
\end{array}
(FPCore (x) :precision binary64 (- (pow x -0.5) (pow (+ x 1.0) -0.5)))
double code(double x) {
return pow(x, -0.5) - pow((x + 1.0), -0.5);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x ** (-0.5d0)) - ((x + 1.0d0) ** (-0.5d0))
end function
public static double code(double x) {
return Math.pow(x, -0.5) - Math.pow((x + 1.0), -0.5);
}
def code(x): return math.pow(x, -0.5) - math.pow((x + 1.0), -0.5)
function code(x) return Float64((x ^ -0.5) - (Float64(x + 1.0) ^ -0.5)) end
function tmp = code(x) tmp = (x ^ -0.5) - ((x + 1.0) ^ -0.5); end
code[x_] := N[(N[Power[x, -0.5], $MachinePrecision] - N[Power[N[(x + 1.0), $MachinePrecision], -0.5], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{x}^{-0.5} - {\left(x + 1\right)}^{-0.5}
\end{array}
herbie shell --seed 2024212
(FPCore (x)
:name "2isqrt (example 3.6)"
:precision binary64
:pre (and (> x 1.0) (< x 1e+308))
:alt
(! :herbie-platform default (/ 1 (+ (* (+ x 1) (sqrt x)) (* x (sqrt (+ x 1))))))
:alt
(! :herbie-platform default (- (pow x -1/2) (pow (+ x 1) -1/2)))
(- (/ 1.0 (sqrt x)) (/ 1.0 (sqrt (+ x 1.0)))))