
(FPCore (x) :precision binary64 (- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))
double code(double x) {
return 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x)))));
}
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 * (1.0 + (1.0 / Math.hypot(1.0, x)))));
}
def code(x): return 1.0 - math.sqrt((0.5 * (1.0 + (1.0 / math.hypot(1.0, x)))))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 * Float64(1.0 + Float64(1.0 / hypot(1.0, x)))))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x))))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 * N[(1.0 + N[(1.0 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5 \cdot \left(1 + \frac{1}{\mathsf{hypot}\left(1, x\right)}\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))
double code(double x) {
return 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x)))));
}
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 * (1.0 + (1.0 / Math.hypot(1.0, x)))));
}
def code(x): return 1.0 - math.sqrt((0.5 * (1.0 + (1.0 / math.hypot(1.0, x)))))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 * Float64(1.0 + Float64(1.0 / hypot(1.0, x)))))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x))))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 * N[(1.0 + N[(1.0 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5 \cdot \left(1 + \frac{1}{\mathsf{hypot}\left(1, x\right)}\right)}
\end{array}
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 0.5 (hypot 1.0 x)))
(t_1 (+ 0.5 t_0))
(t_2 (+ (pow t_1 0.5) (+ t_0 1.5)))
(t_3 (/ (pow t_1 1.5) t_2))
(t_4 (/ 1.0 t_2)))
(/
(- (pow t_4 3.0) (pow t_3 3.0))
(+ (* t_4 t_4) (+ (* t_3 t_3) (* t_4 t_3))))))
double code(double x) {
double t_0 = 0.5 / hypot(1.0, x);
double t_1 = 0.5 + t_0;
double t_2 = pow(t_1, 0.5) + (t_0 + 1.5);
double t_3 = pow(t_1, 1.5) / t_2;
double t_4 = 1.0 / t_2;
return (pow(t_4, 3.0) - pow(t_3, 3.0)) / ((t_4 * t_4) + ((t_3 * t_3) + (t_4 * t_3)));
}
public static double code(double x) {
double t_0 = 0.5 / Math.hypot(1.0, x);
double t_1 = 0.5 + t_0;
double t_2 = Math.pow(t_1, 0.5) + (t_0 + 1.5);
double t_3 = Math.pow(t_1, 1.5) / t_2;
double t_4 = 1.0 / t_2;
return (Math.pow(t_4, 3.0) - Math.pow(t_3, 3.0)) / ((t_4 * t_4) + ((t_3 * t_3) + (t_4 * t_3)));
}
def code(x): t_0 = 0.5 / math.hypot(1.0, x) t_1 = 0.5 + t_0 t_2 = math.pow(t_1, 0.5) + (t_0 + 1.5) t_3 = math.pow(t_1, 1.5) / t_2 t_4 = 1.0 / t_2 return (math.pow(t_4, 3.0) - math.pow(t_3, 3.0)) / ((t_4 * t_4) + ((t_3 * t_3) + (t_4 * t_3)))
function code(x) t_0 = Float64(0.5 / hypot(1.0, x)) t_1 = Float64(0.5 + t_0) t_2 = Float64((t_1 ^ 0.5) + Float64(t_0 + 1.5)) t_3 = Float64((t_1 ^ 1.5) / t_2) t_4 = Float64(1.0 / t_2) return Float64(Float64((t_4 ^ 3.0) - (t_3 ^ 3.0)) / Float64(Float64(t_4 * t_4) + Float64(Float64(t_3 * t_3) + Float64(t_4 * t_3)))) end
function tmp = code(x) t_0 = 0.5 / hypot(1.0, x); t_1 = 0.5 + t_0; t_2 = (t_1 ^ 0.5) + (t_0 + 1.5); t_3 = (t_1 ^ 1.5) / t_2; t_4 = 1.0 / t_2; tmp = ((t_4 ^ 3.0) - (t_3 ^ 3.0)) / ((t_4 * t_4) + ((t_3 * t_3) + (t_4 * t_3))); end
code[x_] := Block[{t$95$0 = N[(0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(0.5 + t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(N[Power[t$95$1, 0.5], $MachinePrecision] + N[(t$95$0 + 1.5), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$3 = N[(N[Power[t$95$1, 1.5], $MachinePrecision] / t$95$2), $MachinePrecision]}, Block[{t$95$4 = N[(1.0 / t$95$2), $MachinePrecision]}, N[(N[(N[Power[t$95$4, 3.0], $MachinePrecision] - N[Power[t$95$3, 3.0], $MachinePrecision]), $MachinePrecision] / N[(N[(t$95$4 * t$95$4), $MachinePrecision] + N[(N[(t$95$3 * t$95$3), $MachinePrecision] + N[(t$95$4 * t$95$3), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{0.5}{\mathsf{hypot}\left(1, x\right)}\\
t_1 := 0.5 + t\_0\\
t_2 := {t\_1}^{0.5} + \left(t\_0 + 1.5\right)\\
t_3 := \frac{{t\_1}^{1.5}}{t\_2}\\
t_4 := \frac{1}{t\_2}\\
\frac{{t\_4}^{3} - {t\_3}^{3}}{t\_4 \cdot t\_4 + \left(t\_3 \cdot t\_3 + t\_4 \cdot t\_3\right)}
\end{array}
\end{array}
Initial program 98.4%
--lowering--.f64N/A
sqrt-lowering-sqrt.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
associate-*l/N/A
metadata-evalN/A
/-lowering-/.f64N/A
hypot-undefineN/A
hypot-lowering-hypot.f6498.4%
Simplified98.4%
Applied egg-rr99.9%
Final simplification99.9%
(FPCore (x) :precision binary64 (let* ((t_0 (/ 0.5 (hypot 1.0 x))) (t_1 (+ 0.5 t_0))) (/ (- 1.0 (pow t_1 1.5)) (+ (sqrt t_1) (+ t_0 1.5)))))
double code(double x) {
double t_0 = 0.5 / hypot(1.0, x);
double t_1 = 0.5 + t_0;
return (1.0 - pow(t_1, 1.5)) / (sqrt(t_1) + (t_0 + 1.5));
}
public static double code(double x) {
double t_0 = 0.5 / Math.hypot(1.0, x);
double t_1 = 0.5 + t_0;
return (1.0 - Math.pow(t_1, 1.5)) / (Math.sqrt(t_1) + (t_0 + 1.5));
}
def code(x): t_0 = 0.5 / math.hypot(1.0, x) t_1 = 0.5 + t_0 return (1.0 - math.pow(t_1, 1.5)) / (math.sqrt(t_1) + (t_0 + 1.5))
function code(x) t_0 = Float64(0.5 / hypot(1.0, x)) t_1 = Float64(0.5 + t_0) return Float64(Float64(1.0 - (t_1 ^ 1.5)) / Float64(sqrt(t_1) + Float64(t_0 + 1.5))) end
function tmp = code(x) t_0 = 0.5 / hypot(1.0, x); t_1 = 0.5 + t_0; tmp = (1.0 - (t_1 ^ 1.5)) / (sqrt(t_1) + (t_0 + 1.5)); end
code[x_] := Block[{t$95$0 = N[(0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(0.5 + t$95$0), $MachinePrecision]}, N[(N[(1.0 - N[Power[t$95$1, 1.5], $MachinePrecision]), $MachinePrecision] / N[(N[Sqrt[t$95$1], $MachinePrecision] + N[(t$95$0 + 1.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{0.5}{\mathsf{hypot}\left(1, x\right)}\\
t_1 := 0.5 + t\_0\\
\frac{1 - {t\_1}^{1.5}}{\sqrt{t\_1} + \left(t\_0 + 1.5\right)}
\end{array}
\end{array}
Initial program 98.4%
--lowering--.f64N/A
sqrt-lowering-sqrt.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
associate-*l/N/A
metadata-evalN/A
/-lowering-/.f64N/A
hypot-undefineN/A
hypot-lowering-hypot.f6498.4%
Simplified98.4%
flip3--N/A
/-lowering-/.f64N/A
Applied egg-rr99.9%
/-lowering-/.f64N/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
hypot-undefineN/A
hypot-lowering-hypot.f64N/A
+-lowering-+.f64N/A
Applied egg-rr99.9%
(FPCore (x) :precision binary64 (let* ((t_0 (/ 0.5 (hypot 1.0 x)))) (/ (- 0.5 t_0) (+ 1.0 (pow (+ 0.5 t_0) 0.5)))))
double code(double x) {
double t_0 = 0.5 / hypot(1.0, x);
return (0.5 - t_0) / (1.0 + pow((0.5 + t_0), 0.5));
}
public static double code(double x) {
double t_0 = 0.5 / Math.hypot(1.0, x);
return (0.5 - t_0) / (1.0 + Math.pow((0.5 + t_0), 0.5));
}
def code(x): t_0 = 0.5 / math.hypot(1.0, x) return (0.5 - t_0) / (1.0 + math.pow((0.5 + t_0), 0.5))
function code(x) t_0 = Float64(0.5 / hypot(1.0, x)) return Float64(Float64(0.5 - t_0) / Float64(1.0 + (Float64(0.5 + t_0) ^ 0.5))) end
function tmp = code(x) t_0 = 0.5 / hypot(1.0, x); tmp = (0.5 - t_0) / (1.0 + ((0.5 + t_0) ^ 0.5)); end
code[x_] := Block[{t$95$0 = N[(0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]}, N[(N[(0.5 - t$95$0), $MachinePrecision] / N[(1.0 + N[Power[N[(0.5 + t$95$0), $MachinePrecision], 0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{0.5}{\mathsf{hypot}\left(1, x\right)}\\
\frac{0.5 - t\_0}{1 + {\left(0.5 + t\_0\right)}^{0.5}}
\end{array}
\end{array}
Initial program 98.4%
--lowering--.f64N/A
sqrt-lowering-sqrt.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
associate-*l/N/A
metadata-evalN/A
/-lowering-/.f64N/A
hypot-undefineN/A
hypot-lowering-hypot.f6498.4%
Simplified98.4%
flip--N/A
metadata-evalN/A
rem-square-sqrtN/A
associate--r+N/A
metadata-evalN/A
/-lowering-/.f64N/A
--lowering--.f64N/A
/-lowering-/.f64N/A
hypot-undefineN/A
hypot-lowering-hypot.f64N/A
+-lowering-+.f64N/A
Applied egg-rr99.9%
(FPCore (x) :precision binary64 (- 1.0 (sqrt (+ 0.5 (/ 0.5 (hypot 1.0 x))))))
double code(double x) {
return 1.0 - sqrt((0.5 + (0.5 / hypot(1.0, x))));
}
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 + (0.5 / Math.hypot(1.0, x))));
}
def code(x): return 1.0 - math.sqrt((0.5 + (0.5 / math.hypot(1.0, x))))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 + Float64(0.5 / hypot(1.0, x))))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 + (0.5 / hypot(1.0, x)))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 + N[(0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5 + \frac{0.5}{\mathsf{hypot}\left(1, x\right)}}
\end{array}
Initial program 98.4%
--lowering--.f64N/A
sqrt-lowering-sqrt.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
associate-*l/N/A
metadata-evalN/A
/-lowering-/.f64N/A
hypot-undefineN/A
hypot-lowering-hypot.f6498.4%
Simplified98.4%
(FPCore (x) :precision binary64 (/ 0.5 (+ 1.0 (sqrt 0.5))))
double code(double x) {
return 0.5 / (1.0 + sqrt(0.5));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.5d0 / (1.0d0 + sqrt(0.5d0))
end function
public static double code(double x) {
return 0.5 / (1.0 + Math.sqrt(0.5));
}
def code(x): return 0.5 / (1.0 + math.sqrt(0.5))
function code(x) return Float64(0.5 / Float64(1.0 + sqrt(0.5))) end
function tmp = code(x) tmp = 0.5 / (1.0 + sqrt(0.5)); end
code[x_] := N[(0.5 / N[(1.0 + N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5}{1 + \sqrt{0.5}}
\end{array}
Initial program 98.4%
--lowering--.f64N/A
sqrt-lowering-sqrt.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
associate-*l/N/A
metadata-evalN/A
/-lowering-/.f64N/A
hypot-undefineN/A
hypot-lowering-hypot.f6498.4%
Simplified98.4%
Taylor expanded in x around inf
--lowering--.f64N/A
sqrt-lowering-sqrt.f6495.9%
Simplified95.9%
flip--N/A
metadata-evalN/A
rem-square-sqrtN/A
metadata-evalN/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
sqrt-lowering-sqrt.f6497.3%
Applied egg-rr97.3%
(FPCore (x) :precision binary64 (- 1.0 (sqrt 0.5)))
double code(double x) {
return 1.0 - sqrt(0.5);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 - sqrt(0.5d0)
end function
public static double code(double x) {
return 1.0 - Math.sqrt(0.5);
}
def code(x): return 1.0 - math.sqrt(0.5)
function code(x) return Float64(1.0 - sqrt(0.5)) end
function tmp = code(x) tmp = 1.0 - sqrt(0.5); end
code[x_] := N[(1.0 - N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5}
\end{array}
Initial program 98.4%
--lowering--.f64N/A
sqrt-lowering-sqrt.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
associate-*l/N/A
metadata-evalN/A
/-lowering-/.f64N/A
hypot-undefineN/A
hypot-lowering-hypot.f6498.4%
Simplified98.4%
Taylor expanded in x around inf
--lowering--.f64N/A
sqrt-lowering-sqrt.f6495.9%
Simplified95.9%
(FPCore (x) :precision binary64 (let* ((t_0 (* x (* x 0.125)))) (/ (+ 1.0 (/ 1.0 (* (+ 1.0 t_0) (- -1.0 t_0)))) (- 1.0 -1.0))))
double code(double x) {
double t_0 = x * (x * 0.125);
return (1.0 + (1.0 / ((1.0 + t_0) * (-1.0 - t_0)))) / (1.0 - -1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = x * (x * 0.125d0)
code = (1.0d0 + (1.0d0 / ((1.0d0 + t_0) * ((-1.0d0) - t_0)))) / (1.0d0 - (-1.0d0))
end function
public static double code(double x) {
double t_0 = x * (x * 0.125);
return (1.0 + (1.0 / ((1.0 + t_0) * (-1.0 - t_0)))) / (1.0 - -1.0);
}
def code(x): t_0 = x * (x * 0.125) return (1.0 + (1.0 / ((1.0 + t_0) * (-1.0 - t_0)))) / (1.0 - -1.0)
function code(x) t_0 = Float64(x * Float64(x * 0.125)) return Float64(Float64(1.0 + Float64(1.0 / Float64(Float64(1.0 + t_0) * Float64(-1.0 - t_0)))) / Float64(1.0 - -1.0)) end
function tmp = code(x) t_0 = x * (x * 0.125); tmp = (1.0 + (1.0 / ((1.0 + t_0) * (-1.0 - t_0)))) / (1.0 - -1.0); end
code[x_] := Block[{t$95$0 = N[(x * N[(x * 0.125), $MachinePrecision]), $MachinePrecision]}, N[(N[(1.0 + N[(1.0 / N[(N[(1.0 + t$95$0), $MachinePrecision] * N[(-1.0 - t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(1.0 - -1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot \left(x \cdot 0.125\right)\\
\frac{1 + \frac{1}{\left(1 + t\_0\right) \cdot \left(-1 - t\_0\right)}}{1 - -1}
\end{array}
\end{array}
Initial program 98.4%
--lowering--.f64N/A
sqrt-lowering-sqrt.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
associate-*l/N/A
metadata-evalN/A
/-lowering-/.f64N/A
hypot-undefineN/A
hypot-lowering-hypot.f6498.4%
Simplified98.4%
flip3-+N/A
clear-numN/A
sqrt-divN/A
metadata-evalN/A
/-lowering-/.f64N/A
Applied egg-rr98.4%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6417.5%
Simplified17.5%
sub-negN/A
flip-+N/A
sqr-negN/A
/-lowering-/.f64N/A
Applied egg-rr17.5%
Taylor expanded in x around 0
Simplified19.3%
Final simplification19.3%
(FPCore (x) :precision binary64 (+ 1.0 (/ 1.0 (- -1.0 (* 0.125 (* x x))))))
double code(double x) {
return 1.0 + (1.0 / (-1.0 - (0.125 * (x * x))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 + (1.0d0 / ((-1.0d0) - (0.125d0 * (x * x))))
end function
public static double code(double x) {
return 1.0 + (1.0 / (-1.0 - (0.125 * (x * x))));
}
def code(x): return 1.0 + (1.0 / (-1.0 - (0.125 * (x * x))))
function code(x) return Float64(1.0 + Float64(1.0 / Float64(-1.0 - Float64(0.125 * Float64(x * x))))) end
function tmp = code(x) tmp = 1.0 + (1.0 / (-1.0 - (0.125 * (x * x)))); end
code[x_] := N[(1.0 + N[(1.0 / N[(-1.0 - N[(0.125 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 + \frac{1}{-1 - 0.125 \cdot \left(x \cdot x\right)}
\end{array}
Initial program 98.4%
--lowering--.f64N/A
sqrt-lowering-sqrt.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
associate-*l/N/A
metadata-evalN/A
/-lowering-/.f64N/A
hypot-undefineN/A
hypot-lowering-hypot.f6498.4%
Simplified98.4%
flip3-+N/A
clear-numN/A
sqrt-divN/A
metadata-evalN/A
/-lowering-/.f64N/A
Applied egg-rr98.4%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6417.5%
Simplified17.5%
Final simplification17.5%
(FPCore (x) :precision binary64 1.0)
double code(double x) {
return 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double x) {
return 1.0;
}
def code(x): return 1.0
function code(x) return 1.0 end
function tmp = code(x) tmp = 1.0; end
code[x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 98.4%
--lowering--.f64N/A
sqrt-lowering-sqrt.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
associate-*l/N/A
metadata-evalN/A
/-lowering-/.f64N/A
hypot-undefineN/A
hypot-lowering-hypot.f6498.4%
Simplified98.4%
flip3-+N/A
clear-numN/A
sqrt-divN/A
metadata-evalN/A
/-lowering-/.f64N/A
Applied egg-rr98.4%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6417.5%
Simplified17.5%
Taylor expanded in x around inf
Simplified17.4%
(FPCore (x) :precision binary64 0.0)
double code(double x) {
return 0.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.0d0
end function
public static double code(double x) {
return 0.0;
}
def code(x): return 0.0
function code(x) return 0.0 end
function tmp = code(x) tmp = 0.0; end
code[x_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 98.4%
--lowering--.f64N/A
sqrt-lowering-sqrt.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
associate-*l/N/A
metadata-evalN/A
/-lowering-/.f64N/A
hypot-undefineN/A
hypot-lowering-hypot.f6498.4%
Simplified98.4%
Taylor expanded in x around 0
Simplified3.1%
metadata-eval3.1%
Applied egg-rr3.1%
herbie shell --seed 2024161
(FPCore (x)
:name "Given's Rotation SVD example, simplified"
:precision binary64
(- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))