
(FPCore (x) :precision binary64 (- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))
double code(double x) {
return 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x)))));
}
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 * (1.0 + (1.0 / Math.hypot(1.0, x)))));
}
def code(x): return 1.0 - math.sqrt((0.5 * (1.0 + (1.0 / math.hypot(1.0, x)))))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 * Float64(1.0 + Float64(1.0 / hypot(1.0, x)))))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x))))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 * N[(1.0 + N[(1.0 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5 \cdot \left(1 + \frac{1}{\mathsf{hypot}\left(1, x\right)}\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 13 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))
double code(double x) {
return 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x)))));
}
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 * (1.0 + (1.0 / Math.hypot(1.0, x)))));
}
def code(x): return 1.0 - math.sqrt((0.5 * (1.0 + (1.0 / math.hypot(1.0, x)))))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 * Float64(1.0 + Float64(1.0 / hypot(1.0, x)))))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x))))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 * N[(1.0 + N[(1.0 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5 \cdot \left(1 + \frac{1}{\mathsf{hypot}\left(1, x\right)}\right)}
\end{array}
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 0.5 (hypot 1.0 x))))
(/
1.0
(/
(+ 1.0 (sqrt (+ 0.5 t_0)))
(/
(- 1.0 (pow (+ 0.5 (sqrt (/ 0.25 (fma x x 1.0)))) 2.0))
(+ t_0 1.5))))))
double code(double x) {
double t_0 = 0.5 / hypot(1.0, x);
return 1.0 / ((1.0 + sqrt((0.5 + t_0))) / ((1.0 - pow((0.5 + sqrt((0.25 / fma(x, x, 1.0)))), 2.0)) / (t_0 + 1.5)));
}
function code(x) t_0 = Float64(0.5 / hypot(1.0, x)) return Float64(1.0 / Float64(Float64(1.0 + sqrt(Float64(0.5 + t_0))) / Float64(Float64(1.0 - (Float64(0.5 + sqrt(Float64(0.25 / fma(x, x, 1.0)))) ^ 2.0)) / Float64(t_0 + 1.5)))) end
code[x_] := Block[{t$95$0 = N[(0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]}, N[(1.0 / N[(N[(1.0 + N[Sqrt[N[(0.5 + t$95$0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(N[(1.0 - N[Power[N[(0.5 + N[Sqrt[N[(0.25 / N[(x * x + 1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] / N[(t$95$0 + 1.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{0.5}{\mathsf{hypot}\left(1, x\right)}\\
\frac{1}{\frac{1 + \sqrt{0.5 + t\_0}}{\frac{1 - {\left(0.5 + \sqrt{\frac{0.25}{\mathsf{fma}\left(x, x, 1\right)}}\right)}^{2}}{t\_0 + 1.5}}}
\end{array}
\end{array}
Initial program 98.4%
distribute-lft-in98.4%
metadata-eval98.4%
associate-*r/98.4%
metadata-eval98.4%
Simplified98.4%
flip--98.4%
div-inv98.4%
metadata-eval98.4%
add-sqr-sqrt99.8%
associate--r+99.8%
metadata-eval99.8%
Applied egg-rr99.8%
*-commutative99.8%
associate-/r/99.9%
Simplified99.9%
metadata-eval99.9%
associate--r+99.9%
flip--99.9%
metadata-eval99.9%
pow299.9%
Applied egg-rr99.9%
associate-+r+99.9%
metadata-eval99.9%
+-commutative99.9%
Simplified99.9%
add-sqr-sqrt99.9%
sqrt-unprod99.9%
frac-times99.9%
metadata-eval99.9%
hypot-undefine99.9%
hypot-undefine99.9%
rem-square-sqrt99.9%
metadata-eval99.9%
+-commutative99.9%
fma-define99.9%
Applied egg-rr99.9%
(FPCore (x) :precision binary64 (/ 1.0 (/ (+ 1.0 (sqrt (+ 0.5 (/ 0.5 (hypot 1.0 x))))) (log (exp (+ 0.5 (/ -0.5 (hypot 1.0 x))))))))
double code(double x) {
return 1.0 / ((1.0 + sqrt((0.5 + (0.5 / hypot(1.0, x))))) / log(exp((0.5 + (-0.5 / hypot(1.0, x))))));
}
public static double code(double x) {
return 1.0 / ((1.0 + Math.sqrt((0.5 + (0.5 / Math.hypot(1.0, x))))) / Math.log(Math.exp((0.5 + (-0.5 / Math.hypot(1.0, x))))));
}
def code(x): return 1.0 / ((1.0 + math.sqrt((0.5 + (0.5 / math.hypot(1.0, x))))) / math.log(math.exp((0.5 + (-0.5 / math.hypot(1.0, x))))))
function code(x) return Float64(1.0 / Float64(Float64(1.0 + sqrt(Float64(0.5 + Float64(0.5 / hypot(1.0, x))))) / log(exp(Float64(0.5 + Float64(-0.5 / hypot(1.0, x))))))) end
function tmp = code(x) tmp = 1.0 / ((1.0 + sqrt((0.5 + (0.5 / hypot(1.0, x))))) / log(exp((0.5 + (-0.5 / hypot(1.0, x)))))); end
code[x_] := N[(1.0 / N[(N[(1.0 + N[Sqrt[N[(0.5 + N[(0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[Log[N[Exp[N[(0.5 + N[(-0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\frac{1 + \sqrt{0.5 + \frac{0.5}{\mathsf{hypot}\left(1, x\right)}}}{\log \left(e^{0.5 + \frac{-0.5}{\mathsf{hypot}\left(1, x\right)}}\right)}}
\end{array}
Initial program 98.4%
distribute-lft-in98.4%
metadata-eval98.4%
associate-*r/98.4%
metadata-eval98.4%
Simplified98.4%
flip--98.4%
div-inv98.4%
metadata-eval98.4%
add-sqr-sqrt99.8%
associate--r+99.8%
metadata-eval99.8%
Applied egg-rr99.8%
*-commutative99.8%
associate-/r/99.9%
Simplified99.9%
metadata-eval99.9%
associate--r+99.9%
add-log-exp99.9%
associate--r+99.9%
metadata-eval99.9%
sub-neg99.9%
distribute-neg-frac99.9%
metadata-eval99.9%
Applied egg-rr99.9%
(FPCore (x) :precision binary64 (let* ((t_0 (+ 0.5 (/ 0.5 (hypot 1.0 x))))) (* (+ 0.25 (/ -0.25 (fma x x 1.0))) (/ 1.0 (* t_0 (+ 1.0 (sqrt t_0)))))))
double code(double x) {
double t_0 = 0.5 + (0.5 / hypot(1.0, x));
return (0.25 + (-0.25 / fma(x, x, 1.0))) * (1.0 / (t_0 * (1.0 + sqrt(t_0))));
}
function code(x) t_0 = Float64(0.5 + Float64(0.5 / hypot(1.0, x))) return Float64(Float64(0.25 + Float64(-0.25 / fma(x, x, 1.0))) * Float64(1.0 / Float64(t_0 * Float64(1.0 + sqrt(t_0))))) end
code[x_] := Block[{t$95$0 = N[(0.5 + N[(0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(0.25 + N[(-0.25 / N[(x * x + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(1.0 / N[(t$95$0 * N[(1.0 + N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 0.5 + \frac{0.5}{\mathsf{hypot}\left(1, x\right)}\\
\left(0.25 + \frac{-0.25}{\mathsf{fma}\left(x, x, 1\right)}\right) \cdot \frac{1}{t\_0 \cdot \left(1 + \sqrt{t\_0}\right)}
\end{array}
\end{array}
Initial program 98.4%
distribute-lft-in98.4%
metadata-eval98.4%
associate-*r/98.4%
metadata-eval98.4%
Simplified98.4%
flip--98.4%
metadata-eval98.4%
add-sqr-sqrt99.8%
associate--r+99.8%
metadata-eval99.8%
Applied egg-rr99.8%
flip--99.8%
div-inv99.8%
metadata-eval99.8%
frac-times99.8%
metadata-eval99.8%
hypot-undefine99.8%
hypot-undefine99.8%
rem-square-sqrt99.9%
metadata-eval99.9%
unpow299.9%
Applied egg-rr99.9%
associate-*r/99.9%
*-rgt-identity99.9%
/-rgt-identity99.9%
Simplified99.9%
associate-/l/99.9%
div-inv99.9%
Applied egg-rr99.9%
Final simplification99.9%
(FPCore (x) :precision binary64 (let* ((t_0 (/ 0.5 (hypot 1.0 x)))) (/ 1.0 (/ (+ 1.0 (sqrt (+ 0.5 t_0))) (- 0.5 t_0)))))
double code(double x) {
double t_0 = 0.5 / hypot(1.0, x);
return 1.0 / ((1.0 + sqrt((0.5 + t_0))) / (0.5 - t_0));
}
public static double code(double x) {
double t_0 = 0.5 / Math.hypot(1.0, x);
return 1.0 / ((1.0 + Math.sqrt((0.5 + t_0))) / (0.5 - t_0));
}
def code(x): t_0 = 0.5 / math.hypot(1.0, x) return 1.0 / ((1.0 + math.sqrt((0.5 + t_0))) / (0.5 - t_0))
function code(x) t_0 = Float64(0.5 / hypot(1.0, x)) return Float64(1.0 / Float64(Float64(1.0 + sqrt(Float64(0.5 + t_0))) / Float64(0.5 - t_0))) end
function tmp = code(x) t_0 = 0.5 / hypot(1.0, x); tmp = 1.0 / ((1.0 + sqrt((0.5 + t_0))) / (0.5 - t_0)); end
code[x_] := Block[{t$95$0 = N[(0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]}, N[(1.0 / N[(N[(1.0 + N[Sqrt[N[(0.5 + t$95$0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(0.5 - t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{0.5}{\mathsf{hypot}\left(1, x\right)}\\
\frac{1}{\frac{1 + \sqrt{0.5 + t\_0}}{0.5 - t\_0}}
\end{array}
\end{array}
Initial program 98.4%
distribute-lft-in98.4%
metadata-eval98.4%
associate-*r/98.4%
metadata-eval98.4%
Simplified98.4%
flip--98.4%
div-inv98.4%
metadata-eval98.4%
add-sqr-sqrt99.8%
associate--r+99.8%
metadata-eval99.8%
Applied egg-rr99.8%
*-commutative99.8%
associate-/r/99.9%
Simplified99.9%
(FPCore (x) :precision binary64 (let* ((t_0 (/ 0.5 (hypot 1.0 x)))) (/ (- 0.5 t_0) (+ 1.0 (sqrt (+ 0.5 t_0))))))
double code(double x) {
double t_0 = 0.5 / hypot(1.0, x);
return (0.5 - t_0) / (1.0 + sqrt((0.5 + t_0)));
}
public static double code(double x) {
double t_0 = 0.5 / Math.hypot(1.0, x);
return (0.5 - t_0) / (1.0 + Math.sqrt((0.5 + t_0)));
}
def code(x): t_0 = 0.5 / math.hypot(1.0, x) return (0.5 - t_0) / (1.0 + math.sqrt((0.5 + t_0)))
function code(x) t_0 = Float64(0.5 / hypot(1.0, x)) return Float64(Float64(0.5 - t_0) / Float64(1.0 + sqrt(Float64(0.5 + t_0)))) end
function tmp = code(x) t_0 = 0.5 / hypot(1.0, x); tmp = (0.5 - t_0) / (1.0 + sqrt((0.5 + t_0))); end
code[x_] := Block[{t$95$0 = N[(0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]}, N[(N[(0.5 - t$95$0), $MachinePrecision] / N[(1.0 + N[Sqrt[N[(0.5 + t$95$0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{0.5}{\mathsf{hypot}\left(1, x\right)}\\
\frac{0.5 - t\_0}{1 + \sqrt{0.5 + t\_0}}
\end{array}
\end{array}
Initial program 98.4%
distribute-lft-in98.4%
metadata-eval98.4%
associate-*r/98.4%
metadata-eval98.4%
Simplified98.4%
flip--98.4%
metadata-eval98.4%
add-sqr-sqrt99.8%
associate--r+99.8%
metadata-eval99.8%
Applied egg-rr99.8%
(FPCore (x) :precision binary64 (/ 1.0 (/ (+ 1.0 (sqrt (+ 0.5 (/ 0.5 (hypot 1.0 x))))) (- 0.5 (/ 0.5 x)))))
double code(double x) {
return 1.0 / ((1.0 + sqrt((0.5 + (0.5 / hypot(1.0, x))))) / (0.5 - (0.5 / x)));
}
public static double code(double x) {
return 1.0 / ((1.0 + Math.sqrt((0.5 + (0.5 / Math.hypot(1.0, x))))) / (0.5 - (0.5 / x)));
}
def code(x): return 1.0 / ((1.0 + math.sqrt((0.5 + (0.5 / math.hypot(1.0, x))))) / (0.5 - (0.5 / x)))
function code(x) return Float64(1.0 / Float64(Float64(1.0 + sqrt(Float64(0.5 + Float64(0.5 / hypot(1.0, x))))) / Float64(0.5 - Float64(0.5 / x)))) end
function tmp = code(x) tmp = 1.0 / ((1.0 + sqrt((0.5 + (0.5 / hypot(1.0, x))))) / (0.5 - (0.5 / x))); end
code[x_] := N[(1.0 / N[(N[(1.0 + N[Sqrt[N[(0.5 + N[(0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(0.5 - N[(0.5 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\frac{1 + \sqrt{0.5 + \frac{0.5}{\mathsf{hypot}\left(1, x\right)}}}{0.5 - \frac{0.5}{x}}}
\end{array}
Initial program 98.4%
distribute-lft-in98.4%
metadata-eval98.4%
associate-*r/98.4%
metadata-eval98.4%
Simplified98.4%
flip--98.4%
div-inv98.4%
metadata-eval98.4%
add-sqr-sqrt99.8%
associate--r+99.8%
metadata-eval99.8%
Applied egg-rr99.8%
*-commutative99.8%
associate-/r/99.9%
Simplified99.9%
Taylor expanded in x around inf 98.0%
associate-*r/98.0%
metadata-eval98.0%
Simplified98.0%
(FPCore (x) :precision binary64 (/ (- 0.5 (/ 0.5 x)) (+ 1.0 (sqrt (+ 0.5 (/ 0.5 (hypot 1.0 x)))))))
double code(double x) {
return (0.5 - (0.5 / x)) / (1.0 + sqrt((0.5 + (0.5 / hypot(1.0, x)))));
}
public static double code(double x) {
return (0.5 - (0.5 / x)) / (1.0 + Math.sqrt((0.5 + (0.5 / Math.hypot(1.0, x)))));
}
def code(x): return (0.5 - (0.5 / x)) / (1.0 + math.sqrt((0.5 + (0.5 / math.hypot(1.0, x)))))
function code(x) return Float64(Float64(0.5 - Float64(0.5 / x)) / Float64(1.0 + sqrt(Float64(0.5 + Float64(0.5 / hypot(1.0, x)))))) end
function tmp = code(x) tmp = (0.5 - (0.5 / x)) / (1.0 + sqrt((0.5 + (0.5 / hypot(1.0, x))))); end
code[x_] := N[(N[(0.5 - N[(0.5 / x), $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[Sqrt[N[(0.5 + N[(0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5 - \frac{0.5}{x}}{1 + \sqrt{0.5 + \frac{0.5}{\mathsf{hypot}\left(1, x\right)}}}
\end{array}
Initial program 98.4%
distribute-lft-in98.4%
metadata-eval98.4%
associate-*r/98.4%
metadata-eval98.4%
Simplified98.4%
flip--98.4%
metadata-eval98.4%
add-sqr-sqrt99.8%
associate--r+99.8%
metadata-eval99.8%
Applied egg-rr99.8%
Taylor expanded in x around inf 98.0%
associate-*r/98.0%
metadata-eval98.0%
Simplified98.0%
(FPCore (x) :precision binary64 (- 1.0 (sqrt (+ 0.5 (/ 0.5 (hypot 1.0 x))))))
double code(double x) {
return 1.0 - sqrt((0.5 + (0.5 / hypot(1.0, x))));
}
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 + (0.5 / Math.hypot(1.0, x))));
}
def code(x): return 1.0 - math.sqrt((0.5 + (0.5 / math.hypot(1.0, x))))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 + Float64(0.5 / hypot(1.0, x))))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 + (0.5 / hypot(1.0, x)))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 + N[(0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5 + \frac{0.5}{\mathsf{hypot}\left(1, x\right)}}
\end{array}
Initial program 98.4%
distribute-lft-in98.4%
metadata-eval98.4%
associate-*r/98.4%
metadata-eval98.4%
Simplified98.4%
(FPCore (x) :precision binary64 (- 1.0 (sqrt (+ 0.5 (/ 0.5 x)))))
double code(double x) {
return 1.0 - sqrt((0.5 + (0.5 / x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 - sqrt((0.5d0 + (0.5d0 / x)))
end function
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 + (0.5 / x)));
}
def code(x): return 1.0 - math.sqrt((0.5 + (0.5 / x)))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 + Float64(0.5 / x)))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 + (0.5 / x))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 + N[(0.5 / x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5 + \frac{0.5}{x}}
\end{array}
Initial program 98.4%
distribute-lft-in98.4%
metadata-eval98.4%
associate-*r/98.4%
metadata-eval98.4%
Simplified98.4%
Taylor expanded in x around inf 96.4%
(FPCore (x) :precision binary64 (/ 0.5 (+ 1.0 (sqrt 0.5))))
double code(double x) {
return 0.5 / (1.0 + sqrt(0.5));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.5d0 / (1.0d0 + sqrt(0.5d0))
end function
public static double code(double x) {
return 0.5 / (1.0 + Math.sqrt(0.5));
}
def code(x): return 0.5 / (1.0 + math.sqrt(0.5))
function code(x) return Float64(0.5 / Float64(1.0 + sqrt(0.5))) end
function tmp = code(x) tmp = 0.5 / (1.0 + sqrt(0.5)); end
code[x_] := N[(0.5 / N[(1.0 + N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5}{1 + \sqrt{0.5}}
\end{array}
Initial program 98.4%
distribute-lft-in98.4%
metadata-eval98.4%
associate-*r/98.4%
metadata-eval98.4%
Simplified98.4%
flip--98.4%
metadata-eval98.4%
add-sqr-sqrt99.8%
associate--r+99.8%
metadata-eval99.8%
Applied egg-rr99.8%
Taylor expanded in x around inf 96.2%
(FPCore (x) :precision binary64 (- 1.0 (sqrt 0.5)))
double code(double x) {
return 1.0 - sqrt(0.5);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 - sqrt(0.5d0)
end function
public static double code(double x) {
return 1.0 - Math.sqrt(0.5);
}
def code(x): return 1.0 - math.sqrt(0.5)
function code(x) return Float64(1.0 - sqrt(0.5)) end
function tmp = code(x) tmp = 1.0 - sqrt(0.5); end
code[x_] := N[(1.0 - N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5}
\end{array}
Initial program 98.4%
distribute-lft-in98.4%
metadata-eval98.4%
associate-*r/98.4%
metadata-eval98.4%
Simplified98.4%
Taylor expanded in x around inf 94.8%
(FPCore (x) :precision binary64 0.25)
double code(double x) {
return 0.25;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.25d0
end function
public static double code(double x) {
return 0.25;
}
def code(x): return 0.25
function code(x) return 0.25 end
function tmp = code(x) tmp = 0.25; end
code[x_] := 0.25
\begin{array}{l}
\\
0.25
\end{array}
Initial program 98.4%
distribute-lft-in98.4%
metadata-eval98.4%
associate-*r/98.4%
metadata-eval98.4%
Simplified98.4%
flip--98.4%
metadata-eval98.4%
add-sqr-sqrt99.8%
associate--r+99.8%
metadata-eval99.8%
Applied egg-rr99.8%
Taylor expanded in x around 0 22.9%
Taylor expanded in x around inf 22.7%
Final simplification22.7%
(FPCore (x) :precision binary64 0.0)
double code(double x) {
return 0.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.0d0
end function
public static double code(double x) {
return 0.0;
}
def code(x): return 0.0
function code(x) return 0.0 end
function tmp = code(x) tmp = 0.0; end
code[x_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 98.4%
distribute-lft-in98.4%
metadata-eval98.4%
associate-*r/98.4%
metadata-eval98.4%
Simplified98.4%
Taylor expanded in x around 0 3.1%
Final simplification3.1%
herbie shell --seed 2024087
(FPCore (x)
:name "Given's Rotation SVD example, simplified"
:precision binary64
(- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))