
(FPCore (x) :precision binary64 (- (sqrt (+ 1.0 x)) (sqrt (- 1.0 x))))
double code(double x) {
return sqrt((1.0 + x)) - sqrt((1.0 - x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = sqrt((1.0d0 + x)) - sqrt((1.0d0 - x))
end function
public static double code(double x) {
return Math.sqrt((1.0 + x)) - Math.sqrt((1.0 - x));
}
def code(x): return math.sqrt((1.0 + x)) - math.sqrt((1.0 - x))
function code(x) return Float64(sqrt(Float64(1.0 + x)) - sqrt(Float64(1.0 - x))) end
function tmp = code(x) tmp = sqrt((1.0 + x)) - sqrt((1.0 - x)); end
code[x_] := N[(N[Sqrt[N[(1.0 + x), $MachinePrecision]], $MachinePrecision] - N[Sqrt[N[(1.0 - x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sqrt{1 + x} - \sqrt{1 - x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (sqrt (+ 1.0 x)) (sqrt (- 1.0 x))))
double code(double x) {
return sqrt((1.0 + x)) - sqrt((1.0 - x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = sqrt((1.0d0 + x)) - sqrt((1.0d0 - x))
end function
public static double code(double x) {
return Math.sqrt((1.0 + x)) - Math.sqrt((1.0 - x));
}
def code(x): return math.sqrt((1.0 + x)) - math.sqrt((1.0 - x))
function code(x) return Float64(sqrt(Float64(1.0 + x)) - sqrt(Float64(1.0 - x))) end
function tmp = code(x) tmp = sqrt((1.0 + x)) - sqrt((1.0 - x)); end
code[x_] := N[(N[Sqrt[N[(1.0 + x), $MachinePrecision]], $MachinePrecision] - N[Sqrt[N[(1.0 - x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sqrt{1 + x} - \sqrt{1 - x}
\end{array}
(FPCore (x) :precision binary64 (fma (* x x) (* x (fma x (* x (fma (* x x) 0.0322265625 0.0546875)) 0.125)) x))
double code(double x) {
return fma((x * x), (x * fma(x, (x * fma((x * x), 0.0322265625, 0.0546875)), 0.125)), x);
}
function code(x) return fma(Float64(x * x), Float64(x * fma(x, Float64(x * fma(Float64(x * x), 0.0322265625, 0.0546875)), 0.125)), x) end
code[x_] := N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * 0.0322265625 + 0.0546875), $MachinePrecision]), $MachinePrecision] + 0.125), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot x, x \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 0.0322265625, 0.0546875\right), 0.125\right), x\right)
\end{array}
Initial program 8.1%
Taylor expanded in x around 0
+-commutativeN/A
distribute-rgt-inN/A
associate-*l*N/A
*-lft-identityN/A
lower-fma.f64N/A
Applied rewrites100.0%
(FPCore (x) :precision binary64 (fma (* x x) (* x (fma x (* x 0.0546875) 0.125)) x))
double code(double x) {
return fma((x * x), (x * fma(x, (x * 0.0546875), 0.125)), x);
}
function code(x) return fma(Float64(x * x), Float64(x * fma(x, Float64(x * 0.0546875), 0.125)), x) end
code[x_] := N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(x * 0.0546875), $MachinePrecision] + 0.125), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot x, x \cdot \mathsf{fma}\left(x, x \cdot 0.0546875, 0.125\right), x\right)
\end{array}
Initial program 8.1%
Taylor expanded in x around 0
+-commutativeN/A
distribute-rgt-inN/A
associate-*l*N/A
*-lft-identityN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64100.0
Applied rewrites100.0%
(FPCore (x) :precision binary64 (fma x (* x (* x 0.125)) x))
double code(double x) {
return fma(x, (x * (x * 0.125)), x);
}
function code(x) return fma(x, Float64(x * Float64(x * 0.125)), x) end
code[x_] := N[(x * N[(x * N[(x * 0.125), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, x \cdot \left(x \cdot 0.125\right), x\right)
\end{array}
Initial program 8.1%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
lower-fma.f64N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f6499.9
Applied rewrites99.9%
(FPCore (x) :precision binary64 (* x (fma x (* x 0.125) 1.0)))
double code(double x) {
return x * fma(x, (x * 0.125), 1.0);
}
function code(x) return Float64(x * fma(x, Float64(x * 0.125), 1.0)) end
code[x_] := N[(x * N[(x * N[(x * 0.125), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \mathsf{fma}\left(x, x \cdot 0.125, 1\right)
\end{array}
Initial program 8.1%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
lower-fma.f64N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f6499.9
Applied rewrites99.9%
lift-*.f64N/A
lift-*.f64N/A
*-commutativeN/A
distribute-lft1-inN/A
lower-*.f64N/A
lift-*.f64N/A
lower-fma.f6499.8
Applied rewrites99.8%
Final simplification99.8%
(FPCore (x) :precision binary64 x)
double code(double x) {
return x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x
end function
public static double code(double x) {
return x;
}
def code(x): return x
function code(x) return x end
function tmp = code(x) tmp = x; end
code[x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 8.1%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
lower-fma.f64N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f6499.9
Applied rewrites99.9%
lift-*.f64N/A
lift-*.f64N/A
*-commutativeN/A
distribute-lft1-inN/A
lower-*.f64N/A
lift-*.f64N/A
lower-fma.f6499.8
Applied rewrites99.8%
Taylor expanded in x around 0
Applied rewrites99.2%
*-lft-identity99.2
Applied rewrites99.2%
(FPCore (x) :precision binary64 (/ (* 2.0 x) (+ (sqrt (+ 1.0 x)) (sqrt (- 1.0 x)))))
double code(double x) {
return (2.0 * x) / (sqrt((1.0 + x)) + sqrt((1.0 - x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (2.0d0 * x) / (sqrt((1.0d0 + x)) + sqrt((1.0d0 - x)))
end function
public static double code(double x) {
return (2.0 * x) / (Math.sqrt((1.0 + x)) + Math.sqrt((1.0 - x)));
}
def code(x): return (2.0 * x) / (math.sqrt((1.0 + x)) + math.sqrt((1.0 - x)))
function code(x) return Float64(Float64(2.0 * x) / Float64(sqrt(Float64(1.0 + x)) + sqrt(Float64(1.0 - x)))) end
function tmp = code(x) tmp = (2.0 * x) / (sqrt((1.0 + x)) + sqrt((1.0 - x))); end
code[x_] := N[(N[(2.0 * x), $MachinePrecision] / N[(N[Sqrt[N[(1.0 + x), $MachinePrecision]], $MachinePrecision] + N[Sqrt[N[(1.0 - x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2 \cdot x}{\sqrt{1 + x} + \sqrt{1 - x}}
\end{array}
herbie shell --seed 2024216
(FPCore (x)
:name "bug333 (missed optimization)"
:precision binary64
:pre (and (<= -1.0 x) (<= x 1.0))
:alt
(! :herbie-platform default (/ (* 2 x) (+ (sqrt (+ 1 x)) (sqrt (- 1 x)))))
(- (sqrt (+ 1.0 x)) (sqrt (- 1.0 x))))