
(FPCore (x) :precision binary64 (- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))
double code(double x) {
return 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x)))));
}
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 * (1.0 + (1.0 / Math.hypot(1.0, x)))));
}
def code(x): return 1.0 - math.sqrt((0.5 * (1.0 + (1.0 / math.hypot(1.0, x)))))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 * Float64(1.0 + Float64(1.0 / hypot(1.0, x)))))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x))))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 * N[(1.0 + N[(1.0 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5 \cdot \left(1 + \frac{1}{\mathsf{hypot}\left(1, x\right)}\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))
double code(double x) {
return 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x)))));
}
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 * (1.0 + (1.0 / Math.hypot(1.0, x)))));
}
def code(x): return 1.0 - math.sqrt((0.5 * (1.0 + (1.0 / math.hypot(1.0, x)))))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 * Float64(1.0 + Float64(1.0 / hypot(1.0, x)))))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x))))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 * N[(1.0 + N[(1.0 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5 \cdot \left(1 + \frac{1}{\mathsf{hypot}\left(1, x\right)}\right)}
\end{array}
(FPCore (x)
:precision binary64
(if (<= (hypot 1.0 x) 1.001)
(* (* x x) (fma x (* x (fma x (* x 0.0673828125) -0.0859375)) 0.125))
(/
(fma (sqrt (/ 1.0 (fma x x 1.0))) 0.5 -0.5)
(- -1.0 (sqrt (+ 0.5 (/ 0.5 (sqrt (fma x x 1.0)))))))))
double code(double x) {
double tmp;
if (hypot(1.0, x) <= 1.001) {
tmp = (x * x) * fma(x, (x * fma(x, (x * 0.0673828125), -0.0859375)), 0.125);
} else {
tmp = fma(sqrt((1.0 / fma(x, x, 1.0))), 0.5, -0.5) / (-1.0 - sqrt((0.5 + (0.5 / sqrt(fma(x, x, 1.0))))));
}
return tmp;
}
function code(x) tmp = 0.0 if (hypot(1.0, x) <= 1.001) tmp = Float64(Float64(x * x) * fma(x, Float64(x * fma(x, Float64(x * 0.0673828125), -0.0859375)), 0.125)); else tmp = Float64(fma(sqrt(Float64(1.0 / fma(x, x, 1.0))), 0.5, -0.5) / Float64(-1.0 - sqrt(Float64(0.5 + Float64(0.5 / sqrt(fma(x, x, 1.0))))))); end return tmp end
code[x_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 1.001], N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(x * N[(x * 0.0673828125), $MachinePrecision] + -0.0859375), $MachinePrecision]), $MachinePrecision] + 0.125), $MachinePrecision]), $MachinePrecision], N[(N[(N[Sqrt[N[(1.0 / N[(x * x + 1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * 0.5 + -0.5), $MachinePrecision] / N[(-1.0 - N[Sqrt[N[(0.5 + N[(0.5 / N[Sqrt[N[(x * x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 1.001:\\
\;\;\;\;\left(x \cdot x\right) \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 0.0673828125, -0.0859375\right), 0.125\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(\sqrt{\frac{1}{\mathsf{fma}\left(x, x, 1\right)}}, 0.5, -0.5\right)}{-1 - \sqrt{0.5 + \frac{0.5}{\sqrt{\mathsf{fma}\left(x, x, 1\right)}}}}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 1.0009999999999999Initial program 47.4%
Applied egg-rr47.4%
Taylor expanded in x around 0
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sub-negN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64100.0
Simplified100.0%
if 1.0009999999999999 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.4%
Applied egg-rr99.9%
clear-numN/A
associate-/r/N/A
metadata-evalN/A
sqrt-divN/A
accelerator-lowering-fma.f64N/A
sqrt-lowering-sqrt.f64N/A
/-lowering-/.f64N/A
accelerator-lowering-fma.f6499.9
Applied egg-rr99.9%
unsub-negN/A
+-commutativeN/A
--lowering--.f64N/A
+-commutativeN/A
sqrt-lowering-sqrt.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
sqrt-lowering-sqrt.f64N/A
accelerator-lowering-fma.f6499.9
Applied egg-rr99.9%
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 0.5 (sqrt (fma x x 1.0)))))
(if (<= (hypot 1.0 x) 1.001)
(* (* x x) (fma x (* x (fma x (* x 0.0673828125) -0.0859375)) 0.125))
(/ (+ -0.5 t_0) (- -1.0 (sqrt (+ 0.5 t_0)))))))
double code(double x) {
double t_0 = 0.5 / sqrt(fma(x, x, 1.0));
double tmp;
if (hypot(1.0, x) <= 1.001) {
tmp = (x * x) * fma(x, (x * fma(x, (x * 0.0673828125), -0.0859375)), 0.125);
} else {
tmp = (-0.5 + t_0) / (-1.0 - sqrt((0.5 + t_0)));
}
return tmp;
}
function code(x) t_0 = Float64(0.5 / sqrt(fma(x, x, 1.0))) tmp = 0.0 if (hypot(1.0, x) <= 1.001) tmp = Float64(Float64(x * x) * fma(x, Float64(x * fma(x, Float64(x * 0.0673828125), -0.0859375)), 0.125)); else tmp = Float64(Float64(-0.5 + t_0) / Float64(-1.0 - sqrt(Float64(0.5 + t_0)))); end return tmp end
code[x_] := Block[{t$95$0 = N[(0.5 / N[Sqrt[N[(x * x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 1.001], N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(x * N[(x * 0.0673828125), $MachinePrecision] + -0.0859375), $MachinePrecision]), $MachinePrecision] + 0.125), $MachinePrecision]), $MachinePrecision], N[(N[(-0.5 + t$95$0), $MachinePrecision] / N[(-1.0 - N[Sqrt[N[(0.5 + t$95$0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{0.5}{\sqrt{\mathsf{fma}\left(x, x, 1\right)}}\\
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 1.001:\\
\;\;\;\;\left(x \cdot x\right) \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 0.0673828125, -0.0859375\right), 0.125\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{-0.5 + t\_0}{-1 - \sqrt{0.5 + t\_0}}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 1.0009999999999999Initial program 47.4%
Applied egg-rr47.4%
Taylor expanded in x around 0
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sub-negN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64100.0
Simplified100.0%
if 1.0009999999999999 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.4%
Applied egg-rr99.9%
unsub-negN/A
--lowering--.f64N/A
+-commutativeN/A
metadata-evalN/A
sub-negN/A
rem-square-sqrtN/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
sqr-negN/A
Applied egg-rr99.9%
Final simplification99.9%
(FPCore (x) :precision binary64 (if (<= (hypot 1.0 x) 1.001) (* (* x x) (fma x (* x (fma x (* x 0.0673828125) -0.0859375)) 0.125)) (- 1.0 (sqrt (fma (sqrt (/ 1.0 (fma x x 1.0))) 0.5 0.5)))))
double code(double x) {
double tmp;
if (hypot(1.0, x) <= 1.001) {
tmp = (x * x) * fma(x, (x * fma(x, (x * 0.0673828125), -0.0859375)), 0.125);
} else {
tmp = 1.0 - sqrt(fma(sqrt((1.0 / fma(x, x, 1.0))), 0.5, 0.5));
}
return tmp;
}
function code(x) tmp = 0.0 if (hypot(1.0, x) <= 1.001) tmp = Float64(Float64(x * x) * fma(x, Float64(x * fma(x, Float64(x * 0.0673828125), -0.0859375)), 0.125)); else tmp = Float64(1.0 - sqrt(fma(sqrt(Float64(1.0 / fma(x, x, 1.0))), 0.5, 0.5))); end return tmp end
code[x_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 1.001], N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(x * N[(x * 0.0673828125), $MachinePrecision] + -0.0859375), $MachinePrecision]), $MachinePrecision] + 0.125), $MachinePrecision]), $MachinePrecision], N[(1.0 - N[Sqrt[N[(N[Sqrt[N[(1.0 / N[(x * x + 1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * 0.5 + 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 1.001:\\
\;\;\;\;\left(x \cdot x\right) \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 0.0673828125, -0.0859375\right), 0.125\right)\\
\mathbf{else}:\\
\;\;\;\;1 - \sqrt{\mathsf{fma}\left(\sqrt{\frac{1}{\mathsf{fma}\left(x, x, 1\right)}}, 0.5, 0.5\right)}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 1.0009999999999999Initial program 47.4%
Applied egg-rr47.4%
Taylor expanded in x around 0
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sub-negN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64100.0
Simplified100.0%
if 1.0009999999999999 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.4%
+-commutativeN/A
distribute-rgt-inN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
Applied egg-rr98.4%
(FPCore (x) :precision binary64 (if (<= (hypot 1.0 x) 1.001) (* (* x x) (fma x (* x (fma x (* x 0.0673828125) -0.0859375)) 0.125)) (- 1.0 (sqrt (+ 0.5 (/ 0.5 (sqrt (fma x x 1.0))))))))
double code(double x) {
double tmp;
if (hypot(1.0, x) <= 1.001) {
tmp = (x * x) * fma(x, (x * fma(x, (x * 0.0673828125), -0.0859375)), 0.125);
} else {
tmp = 1.0 - sqrt((0.5 + (0.5 / sqrt(fma(x, x, 1.0)))));
}
return tmp;
}
function code(x) tmp = 0.0 if (hypot(1.0, x) <= 1.001) tmp = Float64(Float64(x * x) * fma(x, Float64(x * fma(x, Float64(x * 0.0673828125), -0.0859375)), 0.125)); else tmp = Float64(1.0 - sqrt(Float64(0.5 + Float64(0.5 / sqrt(fma(x, x, 1.0)))))); end return tmp end
code[x_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 1.001], N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(x * N[(x * 0.0673828125), $MachinePrecision] + -0.0859375), $MachinePrecision]), $MachinePrecision] + 0.125), $MachinePrecision]), $MachinePrecision], N[(1.0 - N[Sqrt[N[(0.5 + N[(0.5 / N[Sqrt[N[(x * x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 1.001:\\
\;\;\;\;\left(x \cdot x\right) \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 0.0673828125, -0.0859375\right), 0.125\right)\\
\mathbf{else}:\\
\;\;\;\;1 - \sqrt{0.5 + \frac{0.5}{\sqrt{\mathsf{fma}\left(x, x, 1\right)}}}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 1.0009999999999999Initial program 47.4%
Applied egg-rr47.4%
Taylor expanded in x around 0
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sub-negN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64100.0
Simplified100.0%
if 1.0009999999999999 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.4%
+-commutativeN/A
distribute-rgt-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
associate-*l/N/A
metadata-evalN/A
/-lowering-/.f64N/A
rem-square-sqrtN/A
sqrt-lowering-sqrt.f64N/A
rem-square-sqrtN/A
metadata-evalN/A
+-commutativeN/A
accelerator-lowering-fma.f6498.4
Applied egg-rr98.4%
Final simplification99.1%
(FPCore (x) :precision binary64 (if (<= (hypot 1.0 x) 2.0) (* (* x x) (fma x (* x (fma x (* x 0.0673828125) -0.0859375)) 0.125)) (/ 0.5 (+ 1.0 (sqrt 0.5)))))
double code(double x) {
double tmp;
if (hypot(1.0, x) <= 2.0) {
tmp = (x * x) * fma(x, (x * fma(x, (x * 0.0673828125), -0.0859375)), 0.125);
} else {
tmp = 0.5 / (1.0 + sqrt(0.5));
}
return tmp;
}
function code(x) tmp = 0.0 if (hypot(1.0, x) <= 2.0) tmp = Float64(Float64(x * x) * fma(x, Float64(x * fma(x, Float64(x * 0.0673828125), -0.0859375)), 0.125)); else tmp = Float64(0.5 / Float64(1.0 + sqrt(0.5))); end return tmp end
code[x_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 2.0], N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(x * N[(x * 0.0673828125), $MachinePrecision] + -0.0859375), $MachinePrecision]), $MachinePrecision] + 0.125), $MachinePrecision]), $MachinePrecision], N[(0.5 / N[(1.0 + N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 2:\\
\;\;\;\;\left(x \cdot x\right) \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 0.0673828125, -0.0859375\right), 0.125\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{0.5}{1 + \sqrt{0.5}}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 48.2%
Applied egg-rr48.2%
Taylor expanded in x around 0
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sub-negN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6498.9
Simplified98.9%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.5%
Taylor expanded in x around inf
Simplified95.8%
flip--N/A
metadata-evalN/A
rem-square-sqrtN/A
metadata-evalN/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
sqrt-lowering-sqrt.f6497.3
Applied egg-rr97.3%
(FPCore (x) :precision binary64 (if (<= (hypot 1.0 x) 2.0) (* (* x x) (fma x (* x -0.0859375) 0.125)) (/ 0.5 (+ 1.0 (sqrt 0.5)))))
double code(double x) {
double tmp;
if (hypot(1.0, x) <= 2.0) {
tmp = (x * x) * fma(x, (x * -0.0859375), 0.125);
} else {
tmp = 0.5 / (1.0 + sqrt(0.5));
}
return tmp;
}
function code(x) tmp = 0.0 if (hypot(1.0, x) <= 2.0) tmp = Float64(Float64(x * x) * fma(x, Float64(x * -0.0859375), 0.125)); else tmp = Float64(0.5 / Float64(1.0 + sqrt(0.5))); end return tmp end
code[x_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 2.0], N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * -0.0859375), $MachinePrecision] + 0.125), $MachinePrecision]), $MachinePrecision], N[(0.5 / N[(1.0 + N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 2:\\
\;\;\;\;\left(x \cdot x\right) \cdot \mathsf{fma}\left(x, x \cdot -0.0859375, 0.125\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{0.5}{1 + \sqrt{0.5}}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 48.2%
Applied egg-rr48.2%
Taylor expanded in x around 0
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6498.8
Simplified98.8%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.5%
Taylor expanded in x around inf
Simplified95.8%
flip--N/A
metadata-evalN/A
rem-square-sqrtN/A
metadata-evalN/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
sqrt-lowering-sqrt.f6497.3
Applied egg-rr97.3%
(FPCore (x) :precision binary64 (if (<= (hypot 1.0 x) 2.0) (* (* x x) (fma x (* x -0.0859375) 0.125)) (- 1.0 (sqrt 0.5))))
double code(double x) {
double tmp;
if (hypot(1.0, x) <= 2.0) {
tmp = (x * x) * fma(x, (x * -0.0859375), 0.125);
} else {
tmp = 1.0 - sqrt(0.5);
}
return tmp;
}
function code(x) tmp = 0.0 if (hypot(1.0, x) <= 2.0) tmp = Float64(Float64(x * x) * fma(x, Float64(x * -0.0859375), 0.125)); else tmp = Float64(1.0 - sqrt(0.5)); end return tmp end
code[x_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 2.0], N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * -0.0859375), $MachinePrecision] + 0.125), $MachinePrecision]), $MachinePrecision], N[(1.0 - N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 2:\\
\;\;\;\;\left(x \cdot x\right) \cdot \mathsf{fma}\left(x, x \cdot -0.0859375, 0.125\right)\\
\mathbf{else}:\\
\;\;\;\;1 - \sqrt{0.5}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 48.2%
Applied egg-rr48.2%
Taylor expanded in x around 0
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6498.8
Simplified98.8%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.5%
Taylor expanded in x around inf
Simplified95.8%
(FPCore (x) :precision binary64 (if (<= (hypot 1.0 x) 2.0) (* (* x x) 0.125) (- 1.0 (sqrt 0.5))))
double code(double x) {
double tmp;
if (hypot(1.0, x) <= 2.0) {
tmp = (x * x) * 0.125;
} else {
tmp = 1.0 - sqrt(0.5);
}
return tmp;
}
public static double code(double x) {
double tmp;
if (Math.hypot(1.0, x) <= 2.0) {
tmp = (x * x) * 0.125;
} else {
tmp = 1.0 - Math.sqrt(0.5);
}
return tmp;
}
def code(x): tmp = 0 if math.hypot(1.0, x) <= 2.0: tmp = (x * x) * 0.125 else: tmp = 1.0 - math.sqrt(0.5) return tmp
function code(x) tmp = 0.0 if (hypot(1.0, x) <= 2.0) tmp = Float64(Float64(x * x) * 0.125); else tmp = Float64(1.0 - sqrt(0.5)); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (hypot(1.0, x) <= 2.0) tmp = (x * x) * 0.125; else tmp = 1.0 - sqrt(0.5); end tmp_2 = tmp; end
code[x_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 2.0], N[(N[(x * x), $MachinePrecision] * 0.125), $MachinePrecision], N[(1.0 - N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 2:\\
\;\;\;\;\left(x \cdot x\right) \cdot 0.125\\
\mathbf{else}:\\
\;\;\;\;1 - \sqrt{0.5}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 48.2%
Applied egg-rr48.2%
Taylor expanded in x around 0
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6498.1
Simplified98.1%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.5%
Taylor expanded in x around inf
Simplified95.8%
Final simplification96.9%
(FPCore (x) :precision binary64 (* (* x x) 0.125))
double code(double x) {
return (x * x) * 0.125;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * x) * 0.125d0
end function
public static double code(double x) {
return (x * x) * 0.125;
}
def code(x): return (x * x) * 0.125
function code(x) return Float64(Float64(x * x) * 0.125) end
function tmp = code(x) tmp = (x * x) * 0.125; end
code[x_] := N[(N[(x * x), $MachinePrecision] * 0.125), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot x\right) \cdot 0.125
\end{array}
Initial program 75.3%
Applied egg-rr76.1%
Taylor expanded in x around 0
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6447.4
Simplified47.4%
Final simplification47.4%
(FPCore (x) :precision binary64 0.0)
double code(double x) {
return 0.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.0d0
end function
public static double code(double x) {
return 0.0;
}
def code(x): return 0.0
function code(x) return 0.0 end
function tmp = code(x) tmp = 0.0; end
code[x_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 75.3%
+-commutativeN/A
distribute-rgt-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
associate-*l/N/A
metadata-evalN/A
/-lowering-/.f64N/A
rem-square-sqrtN/A
sqrt-lowering-sqrt.f64N/A
rem-square-sqrtN/A
metadata-evalN/A
+-commutativeN/A
accelerator-lowering-fma.f6475.3
Applied egg-rr75.3%
Taylor expanded in x around 0
Simplified22.8%
metadata-eval22.8
Applied egg-rr22.8%
herbie shell --seed 2024204
(FPCore (x)
:name "Given's Rotation SVD example, simplified"
:precision binary64
(- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))