
(FPCore (x) :precision binary64 (- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))
double code(double x) {
return 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x)))));
}
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 * (1.0 + (1.0 / Math.hypot(1.0, x)))));
}
def code(x): return 1.0 - math.sqrt((0.5 * (1.0 + (1.0 / math.hypot(1.0, x)))))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 * Float64(1.0 + Float64(1.0 / hypot(1.0, x)))))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x))))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 * N[(1.0 + N[(1.0 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5 \cdot \left(1 + \frac{1}{\mathsf{hypot}\left(1, x\right)}\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 18 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))
double code(double x) {
return 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x)))));
}
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 * (1.0 + (1.0 / Math.hypot(1.0, x)))));
}
def code(x): return 1.0 - math.sqrt((0.5 * (1.0 + (1.0 / math.hypot(1.0, x)))))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 * Float64(1.0 + Float64(1.0 / hypot(1.0, x)))))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x))))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 * N[(1.0 + N[(1.0 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5 \cdot \left(1 + \frac{1}{\mathsf{hypot}\left(1, x\right)}\right)}
\end{array}
(FPCore (x)
:precision binary64
(let* ((t_0 (/ -0.5 (hypot 1.0 x))) (t_1 (- 0.5 t_0)))
(if (<= (hypot 1.0 x) 1.0005)
(* (* (fma (fma 0.0673828125 (* x x) -0.0859375) (* x x) 0.125) x) x)
(/
1.0
(/
(fma
(/ (- (/ 0.25 (fma x x 1.0)) 0.25) (- (pow t_1 2.0) 1.0))
(- 1.5 t_0)
(+ (sqrt t_1) 1.0))
(- 1.0 (pow t_1 1.5)))))))
double code(double x) {
double t_0 = -0.5 / hypot(1.0, x);
double t_1 = 0.5 - t_0;
double tmp;
if (hypot(1.0, x) <= 1.0005) {
tmp = (fma(fma(0.0673828125, (x * x), -0.0859375), (x * x), 0.125) * x) * x;
} else {
tmp = 1.0 / (fma((((0.25 / fma(x, x, 1.0)) - 0.25) / (pow(t_1, 2.0) - 1.0)), (1.5 - t_0), (sqrt(t_1) + 1.0)) / (1.0 - pow(t_1, 1.5)));
}
return tmp;
}
function code(x) t_0 = Float64(-0.5 / hypot(1.0, x)) t_1 = Float64(0.5 - t_0) tmp = 0.0 if (hypot(1.0, x) <= 1.0005) tmp = Float64(Float64(fma(fma(0.0673828125, Float64(x * x), -0.0859375), Float64(x * x), 0.125) * x) * x); else tmp = Float64(1.0 / Float64(fma(Float64(Float64(Float64(0.25 / fma(x, x, 1.0)) - 0.25) / Float64((t_1 ^ 2.0) - 1.0)), Float64(1.5 - t_0), Float64(sqrt(t_1) + 1.0)) / Float64(1.0 - (t_1 ^ 1.5)))); end return tmp end
code[x_] := Block[{t$95$0 = N[(-0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(0.5 - t$95$0), $MachinePrecision]}, If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 1.0005], N[(N[(N[(N[(0.0673828125 * N[(x * x), $MachinePrecision] + -0.0859375), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.125), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], N[(1.0 / N[(N[(N[(N[(N[(0.25 / N[(x * x + 1.0), $MachinePrecision]), $MachinePrecision] - 0.25), $MachinePrecision] / N[(N[Power[t$95$1, 2.0], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision] * N[(1.5 - t$95$0), $MachinePrecision] + N[(N[Sqrt[t$95$1], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[Power[t$95$1, 1.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{-0.5}{\mathsf{hypot}\left(1, x\right)}\\
t_1 := 0.5 - t\_0\\
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 1.0005:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0673828125, x \cdot x, -0.0859375\right), x \cdot x, 0.125\right) \cdot x\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\frac{\mathsf{fma}\left(\frac{\frac{0.25}{\mathsf{fma}\left(x, x, 1\right)} - 0.25}{{t\_1}^{2} - 1}, 1.5 - t\_0, \sqrt{t\_1} + 1\right)}{1 - {t\_1}^{1.5}}}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 1.00049999999999994Initial program 47.8%
Applied rewrites47.8%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64100.0
Applied rewrites100.0%
if 1.00049999999999994 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.3%
Applied rewrites99.8%
lift-/.f64N/A
lift--.f64N/A
rem-square-sqrtN/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
sqr-negN/A
lift-neg.f64N/A
lift-neg.f64N/A
metadata-evalN/A
lift--.f64N/A
flip-+N/A
+-commutativeN/A
lift-neg.f64N/A
sub-negN/A
Applied rewrites99.8%
lift-+.f64N/A
lift-+.f64N/A
associate-+l+N/A
lift--.f64N/A
flip--N/A
metadata-evalN/A
associate--r-N/A
lift--.f64N/A
flip--N/A
+-commutativeN/A
lift-+.f64N/A
associate-/r/N/A
+-commutativeN/A
lift-+.f64N/A
Applied rewrites99.8%
Final simplification99.9%
(FPCore (x)
:precision binary64
(let* ((t_0 (/ -0.5 (hypot 1.0 x))) (t_1 (- 0.5 t_0)))
(if (<= (hypot 1.0 x) 1.0005)
(* (* (fma (fma 0.0673828125 (* x x) -0.0859375) (* x x) 0.125) x) x)
(/ (- 1.0 (pow t_1 1.5)) (+ (sqrt t_1) (- 1.5 t_0))))))
double code(double x) {
double t_0 = -0.5 / hypot(1.0, x);
double t_1 = 0.5 - t_0;
double tmp;
if (hypot(1.0, x) <= 1.0005) {
tmp = (fma(fma(0.0673828125, (x * x), -0.0859375), (x * x), 0.125) * x) * x;
} else {
tmp = (1.0 - pow(t_1, 1.5)) / (sqrt(t_1) + (1.5 - t_0));
}
return tmp;
}
function code(x) t_0 = Float64(-0.5 / hypot(1.0, x)) t_1 = Float64(0.5 - t_0) tmp = 0.0 if (hypot(1.0, x) <= 1.0005) tmp = Float64(Float64(fma(fma(0.0673828125, Float64(x * x), -0.0859375), Float64(x * x), 0.125) * x) * x); else tmp = Float64(Float64(1.0 - (t_1 ^ 1.5)) / Float64(sqrt(t_1) + Float64(1.5 - t_0))); end return tmp end
code[x_] := Block[{t$95$0 = N[(-0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(0.5 - t$95$0), $MachinePrecision]}, If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 1.0005], N[(N[(N[(N[(0.0673828125 * N[(x * x), $MachinePrecision] + -0.0859375), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.125), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], N[(N[(1.0 - N[Power[t$95$1, 1.5], $MachinePrecision]), $MachinePrecision] / N[(N[Sqrt[t$95$1], $MachinePrecision] + N[(1.5 - t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{-0.5}{\mathsf{hypot}\left(1, x\right)}\\
t_1 := 0.5 - t\_0\\
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 1.0005:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0673828125, x \cdot x, -0.0859375\right), x \cdot x, 0.125\right) \cdot x\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - {t\_1}^{1.5}}{\sqrt{t\_1} + \left(1.5 - t\_0\right)}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 1.00049999999999994Initial program 47.8%
Applied rewrites47.8%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64100.0
Applied rewrites100.0%
if 1.00049999999999994 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.3%
Applied rewrites99.8%
lift-/.f64N/A
lift--.f64N/A
rem-square-sqrtN/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
sqr-negN/A
lift-neg.f64N/A
lift-neg.f64N/A
metadata-evalN/A
lift--.f64N/A
flip-+N/A
+-commutativeN/A
lift-neg.f64N/A
sub-negN/A
Applied rewrites99.8%
Applied rewrites99.8%
(FPCore (x)
:precision binary64
(let* ((t_0 (/ -0.5 (hypot 1.0 x))) (t_1 (- t_0 0.5)))
(if (<= (hypot 1.0 x) 1.0005)
(* (* (fma (fma 0.0673828125 (* x x) -0.0859375) (* x x) 0.125) x) x)
(/
(- (/ 0.25 t_1) (/ (/ 0.25 (fma x x 1.0)) t_1))
(- -1.0 (sqrt (- 0.5 t_0)))))))
double code(double x) {
double t_0 = -0.5 / hypot(1.0, x);
double t_1 = t_0 - 0.5;
double tmp;
if (hypot(1.0, x) <= 1.0005) {
tmp = (fma(fma(0.0673828125, (x * x), -0.0859375), (x * x), 0.125) * x) * x;
} else {
tmp = ((0.25 / t_1) - ((0.25 / fma(x, x, 1.0)) / t_1)) / (-1.0 - sqrt((0.5 - t_0)));
}
return tmp;
}
function code(x) t_0 = Float64(-0.5 / hypot(1.0, x)) t_1 = Float64(t_0 - 0.5) tmp = 0.0 if (hypot(1.0, x) <= 1.0005) tmp = Float64(Float64(fma(fma(0.0673828125, Float64(x * x), -0.0859375), Float64(x * x), 0.125) * x) * x); else tmp = Float64(Float64(Float64(0.25 / t_1) - Float64(Float64(0.25 / fma(x, x, 1.0)) / t_1)) / Float64(-1.0 - sqrt(Float64(0.5 - t_0)))); end return tmp end
code[x_] := Block[{t$95$0 = N[(-0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 - 0.5), $MachinePrecision]}, If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 1.0005], N[(N[(N[(N[(0.0673828125 * N[(x * x), $MachinePrecision] + -0.0859375), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.125), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], N[(N[(N[(0.25 / t$95$1), $MachinePrecision] - N[(N[(0.25 / N[(x * x + 1.0), $MachinePrecision]), $MachinePrecision] / t$95$1), $MachinePrecision]), $MachinePrecision] / N[(-1.0 - N[Sqrt[N[(0.5 - t$95$0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{-0.5}{\mathsf{hypot}\left(1, x\right)}\\
t_1 := t\_0 - 0.5\\
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 1.0005:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0673828125, x \cdot x, -0.0859375\right), x \cdot x, 0.125\right) \cdot x\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{0.25}{t\_1} - \frac{\frac{0.25}{\mathsf{fma}\left(x, x, 1\right)}}{t\_1}}{-1 - \sqrt{0.5 - t\_0}}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 1.00049999999999994Initial program 47.8%
Applied rewrites47.8%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64100.0
Applied rewrites100.0%
if 1.00049999999999994 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.3%
Applied rewrites99.8%
lift-/.f64N/A
lift--.f64N/A
rem-square-sqrtN/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
sqr-negN/A
lift-neg.f64N/A
lift-neg.f64N/A
metadata-evalN/A
lift--.f64N/A
flip-+N/A
+-commutativeN/A
lift-neg.f64N/A
sub-negN/A
Applied rewrites99.8%
lift--.f64N/A
lift--.f64N/A
associate--r-N/A
metadata-evalN/A
flip-+N/A
metadata-evalN/A
lift--.f64N/A
div-subN/A
lower--.f64N/A
lower-/.f64N/A
lower-/.f64N/A
Applied rewrites99.8%
Final simplification99.9%
(FPCore (x)
:precision binary64
(let* ((t_0 (/ -0.5 (hypot 1.0 x))))
(if (<= (hypot 1.0 x) 1.0005)
(* (* (fma (fma 0.0673828125 (* x x) -0.0859375) (* x x) 0.125) x) x)
(/
(/ (- (/ 0.25 (fma x x 1.0)) 0.25) (- t_0 0.5))
(+ (sqrt (- 0.5 t_0)) 1.0)))))
double code(double x) {
double t_0 = -0.5 / hypot(1.0, x);
double tmp;
if (hypot(1.0, x) <= 1.0005) {
tmp = (fma(fma(0.0673828125, (x * x), -0.0859375), (x * x), 0.125) * x) * x;
} else {
tmp = (((0.25 / fma(x, x, 1.0)) - 0.25) / (t_0 - 0.5)) / (sqrt((0.5 - t_0)) + 1.0);
}
return tmp;
}
function code(x) t_0 = Float64(-0.5 / hypot(1.0, x)) tmp = 0.0 if (hypot(1.0, x) <= 1.0005) tmp = Float64(Float64(fma(fma(0.0673828125, Float64(x * x), -0.0859375), Float64(x * x), 0.125) * x) * x); else tmp = Float64(Float64(Float64(Float64(0.25 / fma(x, x, 1.0)) - 0.25) / Float64(t_0 - 0.5)) / Float64(sqrt(Float64(0.5 - t_0)) + 1.0)); end return tmp end
code[x_] := Block[{t$95$0 = N[(-0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 1.0005], N[(N[(N[(N[(0.0673828125 * N[(x * x), $MachinePrecision] + -0.0859375), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.125), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], N[(N[(N[(N[(0.25 / N[(x * x + 1.0), $MachinePrecision]), $MachinePrecision] - 0.25), $MachinePrecision] / N[(t$95$0 - 0.5), $MachinePrecision]), $MachinePrecision] / N[(N[Sqrt[N[(0.5 - t$95$0), $MachinePrecision]], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{-0.5}{\mathsf{hypot}\left(1, x\right)}\\
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 1.0005:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0673828125, x \cdot x, -0.0859375\right), x \cdot x, 0.125\right) \cdot x\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\frac{0.25}{\mathsf{fma}\left(x, x, 1\right)} - 0.25}{t\_0 - 0.5}}{\sqrt{0.5 - t\_0} + 1}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 1.00049999999999994Initial program 47.8%
Applied rewrites47.8%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64100.0
Applied rewrites100.0%
if 1.00049999999999994 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.3%
Applied rewrites99.8%
lift-/.f64N/A
lift--.f64N/A
rem-square-sqrtN/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
sqr-negN/A
lift-neg.f64N/A
lift-neg.f64N/A
metadata-evalN/A
lift--.f64N/A
flip-+N/A
+-commutativeN/A
lift-neg.f64N/A
sub-negN/A
Applied rewrites99.8%
lift--.f64N/A
lift--.f64N/A
associate--r-N/A
metadata-evalN/A
flip-+N/A
lift--.f64N/A
lower-/.f64N/A
Applied rewrites99.8%
Final simplification99.9%
(FPCore (x)
:precision binary64
(let* ((t_0 (- 0.5 (/ -0.5 (hypot 1.0 x)))))
(if (<= (hypot 1.0 x) 1.0005)
(* (* (fma (fma 0.0673828125 (* x x) -0.0859375) (* x x) 0.125) x) x)
(/ (- (/ 0.25 (fma x x 1.0)) 0.25) (* (- -1.0 (sqrt t_0)) t_0)))))
double code(double x) {
double t_0 = 0.5 - (-0.5 / hypot(1.0, x));
double tmp;
if (hypot(1.0, x) <= 1.0005) {
tmp = (fma(fma(0.0673828125, (x * x), -0.0859375), (x * x), 0.125) * x) * x;
} else {
tmp = ((0.25 / fma(x, x, 1.0)) - 0.25) / ((-1.0 - sqrt(t_0)) * t_0);
}
return tmp;
}
function code(x) t_0 = Float64(0.5 - Float64(-0.5 / hypot(1.0, x))) tmp = 0.0 if (hypot(1.0, x) <= 1.0005) tmp = Float64(Float64(fma(fma(0.0673828125, Float64(x * x), -0.0859375), Float64(x * x), 0.125) * x) * x); else tmp = Float64(Float64(Float64(0.25 / fma(x, x, 1.0)) - 0.25) / Float64(Float64(-1.0 - sqrt(t_0)) * t_0)); end return tmp end
code[x_] := Block[{t$95$0 = N[(0.5 - N[(-0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 1.0005], N[(N[(N[(N[(0.0673828125 * N[(x * x), $MachinePrecision] + -0.0859375), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.125), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], N[(N[(N[(0.25 / N[(x * x + 1.0), $MachinePrecision]), $MachinePrecision] - 0.25), $MachinePrecision] / N[(N[(-1.0 - N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 0.5 - \frac{-0.5}{\mathsf{hypot}\left(1, x\right)}\\
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 1.0005:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0673828125, x \cdot x, -0.0859375\right), x \cdot x, 0.125\right) \cdot x\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{0.25}{\mathsf{fma}\left(x, x, 1\right)} - 0.25}{\left(-1 - \sqrt{t\_0}\right) \cdot t\_0}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 1.00049999999999994Initial program 47.8%
Applied rewrites47.8%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64100.0
Applied rewrites100.0%
if 1.00049999999999994 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.3%
Applied rewrites99.8%
lift-/.f64N/A
lift--.f64N/A
rem-square-sqrtN/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
sqr-negN/A
lift-neg.f64N/A
lift-neg.f64N/A
metadata-evalN/A
lift--.f64N/A
flip-+N/A
+-commutativeN/A
lift-neg.f64N/A
sub-negN/A
Applied rewrites99.8%
lift--.f64N/A
lift--.f64N/A
associate--r-N/A
metadata-evalN/A
flip-+N/A
metadata-evalN/A
lift--.f64N/A
div-subN/A
lower--.f64N/A
lower-/.f64N/A
lower-/.f64N/A
Applied rewrites99.8%
lift-/.f64N/A
lift--.f64N/A
lift-/.f64N/A
lift-/.f64N/A
sub-divN/A
associate-/l/N/A
Applied rewrites99.8%
Final simplification99.9%
(FPCore (x)
:precision binary64
(let* ((t_0 (/ -0.5 (hypot 1.0 x))))
(if (<= (hypot 1.0 x) 1.0005)
(* (* (fma (fma 0.0673828125 (* x x) -0.0859375) (* x x) 0.125) x) x)
(/ (- -1.0 (- t_0 0.5)) (- -1.0 (sqrt (- 0.5 t_0)))))))
double code(double x) {
double t_0 = -0.5 / hypot(1.0, x);
double tmp;
if (hypot(1.0, x) <= 1.0005) {
tmp = (fma(fma(0.0673828125, (x * x), -0.0859375), (x * x), 0.125) * x) * x;
} else {
tmp = (-1.0 - (t_0 - 0.5)) / (-1.0 - sqrt((0.5 - t_0)));
}
return tmp;
}
function code(x) t_0 = Float64(-0.5 / hypot(1.0, x)) tmp = 0.0 if (hypot(1.0, x) <= 1.0005) tmp = Float64(Float64(fma(fma(0.0673828125, Float64(x * x), -0.0859375), Float64(x * x), 0.125) * x) * x); else tmp = Float64(Float64(-1.0 - Float64(t_0 - 0.5)) / Float64(-1.0 - sqrt(Float64(0.5 - t_0)))); end return tmp end
code[x_] := Block[{t$95$0 = N[(-0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 1.0005], N[(N[(N[(N[(0.0673828125 * N[(x * x), $MachinePrecision] + -0.0859375), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.125), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], N[(N[(-1.0 - N[(t$95$0 - 0.5), $MachinePrecision]), $MachinePrecision] / N[(-1.0 - N[Sqrt[N[(0.5 - t$95$0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{-0.5}{\mathsf{hypot}\left(1, x\right)}\\
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 1.0005:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0673828125, x \cdot x, -0.0859375\right), x \cdot x, 0.125\right) \cdot x\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;\frac{-1 - \left(t\_0 - 0.5\right)}{-1 - \sqrt{0.5 - t\_0}}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 1.00049999999999994Initial program 47.8%
Applied rewrites47.8%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64100.0
Applied rewrites100.0%
if 1.00049999999999994 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.3%
Applied rewrites99.8%
lift-/.f64N/A
lift--.f64N/A
rem-square-sqrtN/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
sqr-negN/A
lift-neg.f64N/A
lift-neg.f64N/A
metadata-evalN/A
lift--.f64N/A
flip-+N/A
+-commutativeN/A
lift-neg.f64N/A
sub-negN/A
Applied rewrites99.8%
Final simplification99.9%
(FPCore (x)
:precision binary64
(if (<= (hypot 1.0 x) 1.0005)
(* (* (fma (fma 0.0673828125 (* x x) -0.0859375) (* x x) 0.125) x) x)
(/
(- (/ 0.5 (hypot 1.0 x)) 0.5)
(- -1.0 (sqrt (- 0.5 (/ -0.5 (hypot 1.0 x))))))))
double code(double x) {
double tmp;
if (hypot(1.0, x) <= 1.0005) {
tmp = (fma(fma(0.0673828125, (x * x), -0.0859375), (x * x), 0.125) * x) * x;
} else {
tmp = ((0.5 / hypot(1.0, x)) - 0.5) / (-1.0 - sqrt((0.5 - (-0.5 / hypot(1.0, x)))));
}
return tmp;
}
function code(x) tmp = 0.0 if (hypot(1.0, x) <= 1.0005) tmp = Float64(Float64(fma(fma(0.0673828125, Float64(x * x), -0.0859375), Float64(x * x), 0.125) * x) * x); else tmp = Float64(Float64(Float64(0.5 / hypot(1.0, x)) - 0.5) / Float64(-1.0 - sqrt(Float64(0.5 - Float64(-0.5 / hypot(1.0, x)))))); end return tmp end
code[x_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 1.0005], N[(N[(N[(N[(0.0673828125 * N[(x * x), $MachinePrecision] + -0.0859375), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.125), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], N[(N[(N[(0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision] - 0.5), $MachinePrecision] / N[(-1.0 - N[Sqrt[N[(0.5 - N[(-0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 1.0005:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0673828125, x \cdot x, -0.0859375\right), x \cdot x, 0.125\right) \cdot x\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{0.5}{\mathsf{hypot}\left(1, x\right)} - 0.5}{-1 - \sqrt{0.5 - \frac{-0.5}{\mathsf{hypot}\left(1, x\right)}}}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 1.00049999999999994Initial program 47.8%
Applied rewrites47.8%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64100.0
Applied rewrites100.0%
if 1.00049999999999994 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.3%
Applied rewrites99.8%
lift-/.f64N/A
lift--.f64N/A
rem-square-sqrtN/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
sqr-negN/A
lift-neg.f64N/A
lift-neg.f64N/A
metadata-evalN/A
lift--.f64N/A
flip-+N/A
+-commutativeN/A
lift-neg.f64N/A
sub-negN/A
Applied rewrites99.8%
lift--.f64N/A
lift--.f64N/A
sub-negN/A
associate--r+N/A
metadata-evalN/A
lower--.f64N/A
lift-/.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f6499.7
Applied rewrites99.7%
Final simplification99.9%
(FPCore (x) :precision binary64 (if (<= (hypot 1.0 x) 1.0005) (* (* (fma (fma 0.0673828125 (* x x) -0.0859375) (* x x) 0.125) x) x) (- 1.0 (sqrt (- 0.5 (/ -0.5 (hypot 1.0 x)))))))
double code(double x) {
double tmp;
if (hypot(1.0, x) <= 1.0005) {
tmp = (fma(fma(0.0673828125, (x * x), -0.0859375), (x * x), 0.125) * x) * x;
} else {
tmp = 1.0 - sqrt((0.5 - (-0.5 / hypot(1.0, x))));
}
return tmp;
}
function code(x) tmp = 0.0 if (hypot(1.0, x) <= 1.0005) tmp = Float64(Float64(fma(fma(0.0673828125, Float64(x * x), -0.0859375), Float64(x * x), 0.125) * x) * x); else tmp = Float64(1.0 - sqrt(Float64(0.5 - Float64(-0.5 / hypot(1.0, x))))); end return tmp end
code[x_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 1.0005], N[(N[(N[(N[(0.0673828125 * N[(x * x), $MachinePrecision] + -0.0859375), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.125), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], N[(1.0 - N[Sqrt[N[(0.5 - N[(-0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 1.0005:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0673828125, x \cdot x, -0.0859375\right), x \cdot x, 0.125\right) \cdot x\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;1 - \sqrt{0.5 - \frac{-0.5}{\mathsf{hypot}\left(1, x\right)}}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 1.00049999999999994Initial program 47.8%
Applied rewrites47.8%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64100.0
Applied rewrites100.0%
if 1.00049999999999994 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.3%
lift-*.f64N/A
lift-+.f64N/A
distribute-lft-inN/A
metadata-evalN/A
lift-/.f64N/A
frac-2negN/A
metadata-evalN/A
associate-*r/N/A
div-invN/A
metadata-evalN/A
metadata-evalN/A
inv-powN/A
metadata-evalN/A
metadata-evalN/A
pow-powN/A
pow2N/A
sqr-negN/A
pow-prod-downN/A
pow-sqrN/A
metadata-evalN/A
metadata-evalN/A
inv-powN/A
lift-/.f64N/A
Applied rewrites98.3%
(FPCore (x)
:precision binary64
(let* ((t_0 (+ (/ 0.5 x) 0.5)))
(if (<= (hypot 1.0 x) 2.0)
(*
(*
(fma
(fma (fma -0.056243896484375 (* x x) 0.0673828125) (* x x) -0.0859375)
(* x x)
0.125)
x)
x)
(/ (- 1.0 t_0) (+ (sqrt t_0) 1.0)))))
double code(double x) {
double t_0 = (0.5 / x) + 0.5;
double tmp;
if (hypot(1.0, x) <= 2.0) {
tmp = (fma(fma(fma(-0.056243896484375, (x * x), 0.0673828125), (x * x), -0.0859375), (x * x), 0.125) * x) * x;
} else {
tmp = (1.0 - t_0) / (sqrt(t_0) + 1.0);
}
return tmp;
}
function code(x) t_0 = Float64(Float64(0.5 / x) + 0.5) tmp = 0.0 if (hypot(1.0, x) <= 2.0) tmp = Float64(Float64(fma(fma(fma(-0.056243896484375, Float64(x * x), 0.0673828125), Float64(x * x), -0.0859375), Float64(x * x), 0.125) * x) * x); else tmp = Float64(Float64(1.0 - t_0) / Float64(sqrt(t_0) + 1.0)); end return tmp end
code[x_] := Block[{t$95$0 = N[(N[(0.5 / x), $MachinePrecision] + 0.5), $MachinePrecision]}, If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 2.0], N[(N[(N[(N[(N[(-0.056243896484375 * N[(x * x), $MachinePrecision] + 0.0673828125), $MachinePrecision] * N[(x * x), $MachinePrecision] + -0.0859375), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.125), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], N[(N[(1.0 - t$95$0), $MachinePrecision] / N[(N[Sqrt[t$95$0], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{0.5}{x} + 0.5\\
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 2:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.056243896484375, x \cdot x, 0.0673828125\right), x \cdot x, -0.0859375\right), x \cdot x, 0.125\right) \cdot x\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - t\_0}{\sqrt{t\_0} + 1}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 48.8%
Applied rewrites48.9%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
Applied rewrites99.0%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.5%
Taylor expanded in x around inf
+-commutativeN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f6496.4
Applied rewrites96.4%
lift--.f64N/A
flip--N/A
lower-/.f64N/A
Applied rewrites97.9%
(FPCore (x)
:precision binary64
(if (<= (hypot 1.0 x) 2.0)
(*
(*
(fma
(fma (fma -0.056243896484375 (* x x) 0.0673828125) (* x x) -0.0859375)
(* x x)
0.125)
x)
x)
(- 1.0 (sqrt (- 0.5 (/ (- (/ 0.25 (* x x)) 0.5) x))))))
double code(double x) {
double tmp;
if (hypot(1.0, x) <= 2.0) {
tmp = (fma(fma(fma(-0.056243896484375, (x * x), 0.0673828125), (x * x), -0.0859375), (x * x), 0.125) * x) * x;
} else {
tmp = 1.0 - sqrt((0.5 - (((0.25 / (x * x)) - 0.5) / x)));
}
return tmp;
}
function code(x) tmp = 0.0 if (hypot(1.0, x) <= 2.0) tmp = Float64(Float64(fma(fma(fma(-0.056243896484375, Float64(x * x), 0.0673828125), Float64(x * x), -0.0859375), Float64(x * x), 0.125) * x) * x); else tmp = Float64(1.0 - sqrt(Float64(0.5 - Float64(Float64(Float64(0.25 / Float64(x * x)) - 0.5) / x)))); end return tmp end
code[x_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 2.0], N[(N[(N[(N[(N[(-0.056243896484375 * N[(x * x), $MachinePrecision] + 0.0673828125), $MachinePrecision] * N[(x * x), $MachinePrecision] + -0.0859375), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.125), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], N[(1.0 - N[Sqrt[N[(0.5 - N[(N[(N[(0.25 / N[(x * x), $MachinePrecision]), $MachinePrecision] - 0.5), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 2:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.056243896484375, x \cdot x, 0.0673828125\right), x \cdot x, -0.0859375\right), x \cdot x, 0.125\right) \cdot x\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;1 - \sqrt{0.5 - \frac{\frac{0.25}{x \cdot x} - 0.5}{x}}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 48.8%
Applied rewrites48.9%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
Applied rewrites99.0%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.5%
lift-*.f64N/A
lift-+.f64N/A
distribute-lft-inN/A
metadata-evalN/A
lift-/.f64N/A
frac-2negN/A
metadata-evalN/A
associate-*r/N/A
div-invN/A
metadata-evalN/A
metadata-evalN/A
inv-powN/A
metadata-evalN/A
metadata-evalN/A
pow-powN/A
pow2N/A
sqr-negN/A
pow-prod-downN/A
pow-sqrN/A
metadata-evalN/A
metadata-evalN/A
inv-powN/A
lift-/.f64N/A
Applied rewrites98.5%
Taylor expanded in x around inf
lower-/.f64N/A
lower--.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f6496.5
Applied rewrites96.5%
(FPCore (x)
:precision binary64
(if (<= (hypot 1.0 x) 2.0)
(*
(*
(fma
(fma (fma -0.056243896484375 (* x x) 0.0673828125) (* x x) -0.0859375)
(* x x)
0.125)
x)
x)
(- 1.0 (sqrt (+ (/ 0.5 x) 0.5)))))
double code(double x) {
double tmp;
if (hypot(1.0, x) <= 2.0) {
tmp = (fma(fma(fma(-0.056243896484375, (x * x), 0.0673828125), (x * x), -0.0859375), (x * x), 0.125) * x) * x;
} else {
tmp = 1.0 - sqrt(((0.5 / x) + 0.5));
}
return tmp;
}
function code(x) tmp = 0.0 if (hypot(1.0, x) <= 2.0) tmp = Float64(Float64(fma(fma(fma(-0.056243896484375, Float64(x * x), 0.0673828125), Float64(x * x), -0.0859375), Float64(x * x), 0.125) * x) * x); else tmp = Float64(1.0 - sqrt(Float64(Float64(0.5 / x) + 0.5))); end return tmp end
code[x_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 2.0], N[(N[(N[(N[(N[(-0.056243896484375 * N[(x * x), $MachinePrecision] + 0.0673828125), $MachinePrecision] * N[(x * x), $MachinePrecision] + -0.0859375), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.125), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], N[(1.0 - N[Sqrt[N[(N[(0.5 / x), $MachinePrecision] + 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 2:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.056243896484375, x \cdot x, 0.0673828125\right), x \cdot x, -0.0859375\right), x \cdot x, 0.125\right) \cdot x\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;1 - \sqrt{\frac{0.5}{x} + 0.5}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 48.8%
Applied rewrites48.9%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
Applied rewrites99.0%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.5%
Taylor expanded in x around inf
+-commutativeN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f6496.4
Applied rewrites96.4%
(FPCore (x) :precision binary64 (if (<= (hypot 1.0 x) 2.0) (* (* (fma (fma 0.0673828125 (* x x) -0.0859375) (* x x) 0.125) x) x) (- 1.0 (sqrt (+ (/ 0.5 x) 0.5)))))
double code(double x) {
double tmp;
if (hypot(1.0, x) <= 2.0) {
tmp = (fma(fma(0.0673828125, (x * x), -0.0859375), (x * x), 0.125) * x) * x;
} else {
tmp = 1.0 - sqrt(((0.5 / x) + 0.5));
}
return tmp;
}
function code(x) tmp = 0.0 if (hypot(1.0, x) <= 2.0) tmp = Float64(Float64(fma(fma(0.0673828125, Float64(x * x), -0.0859375), Float64(x * x), 0.125) * x) * x); else tmp = Float64(1.0 - sqrt(Float64(Float64(0.5 / x) + 0.5))); end return tmp end
code[x_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 2.0], N[(N[(N[(N[(0.0673828125 * N[(x * x), $MachinePrecision] + -0.0859375), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.125), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], N[(1.0 - N[Sqrt[N[(N[(0.5 / x), $MachinePrecision] + 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 2:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0673828125, x \cdot x, -0.0859375\right), x \cdot x, 0.125\right) \cdot x\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;1 - \sqrt{\frac{0.5}{x} + 0.5}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 48.8%
Applied rewrites48.9%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6498.8
Applied rewrites98.8%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.5%
Taylor expanded in x around inf
+-commutativeN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f6496.4
Applied rewrites96.4%
(FPCore (x) :precision binary64 (if (<= (/ 1.0 (hypot 1.0 x)) 0.2) (- 1.0 (sqrt 0.5)) (* (* (fma -0.0859375 (* x x) 0.125) x) x)))
double code(double x) {
double tmp;
if ((1.0 / hypot(1.0, x)) <= 0.2) {
tmp = 1.0 - sqrt(0.5);
} else {
tmp = (fma(-0.0859375, (x * x), 0.125) * x) * x;
}
return tmp;
}
function code(x) tmp = 0.0 if (Float64(1.0 / hypot(1.0, x)) <= 0.2) tmp = Float64(1.0 - sqrt(0.5)); else tmp = Float64(Float64(fma(-0.0859375, Float64(x * x), 0.125) * x) * x); end return tmp end
code[x_] := If[LessEqual[N[(1.0 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision], 0.2], N[(1.0 - N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision], N[(N[(N[(-0.0859375 * N[(x * x), $MachinePrecision] + 0.125), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{1}{\mathsf{hypot}\left(1, x\right)} \leq 0.2:\\
\;\;\;\;1 - \sqrt{0.5}\\
\mathbf{else}:\\
\;\;\;\;\left(\mathsf{fma}\left(-0.0859375, x \cdot x, 0.125\right) \cdot x\right) \cdot x\\
\end{array}
\end{array}
if (/.f64 #s(literal 1 binary64) (hypot.f64 #s(literal 1 binary64) x)) < 0.20000000000000001Initial program 98.5%
Taylor expanded in x around inf
Applied rewrites95.5%
if 0.20000000000000001 < (/.f64 #s(literal 1 binary64) (hypot.f64 #s(literal 1 binary64) x)) Initial program 48.8%
Applied rewrites48.9%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6498.3
Applied rewrites98.3%
(FPCore (x) :precision binary64 (if (<= (hypot 1.0 x) 2.0) (* (* (fma -0.0859375 (* x x) 0.125) x) x) (- 1.0 (sqrt (+ (/ 0.5 x) 0.5)))))
double code(double x) {
double tmp;
if (hypot(1.0, x) <= 2.0) {
tmp = (fma(-0.0859375, (x * x), 0.125) * x) * x;
} else {
tmp = 1.0 - sqrt(((0.5 / x) + 0.5));
}
return tmp;
}
function code(x) tmp = 0.0 if (hypot(1.0, x) <= 2.0) tmp = Float64(Float64(fma(-0.0859375, Float64(x * x), 0.125) * x) * x); else tmp = Float64(1.0 - sqrt(Float64(Float64(0.5 / x) + 0.5))); end return tmp end
code[x_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 2.0], N[(N[(N[(-0.0859375 * N[(x * x), $MachinePrecision] + 0.125), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], N[(1.0 - N[Sqrt[N[(N[(0.5 / x), $MachinePrecision] + 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 2:\\
\;\;\;\;\left(\mathsf{fma}\left(-0.0859375, x \cdot x, 0.125\right) \cdot x\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;1 - \sqrt{\frac{0.5}{x} + 0.5}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 48.8%
Applied rewrites48.9%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6498.3
Applied rewrites98.3%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.5%
Taylor expanded in x around inf
+-commutativeN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f6496.4
Applied rewrites96.4%
(FPCore (x) :precision binary64 (if (<= (hypot 1.0 x) 2.0) (* (* (fma -0.0859375 (* x x) 0.125) x) x) (/ 0.5 (+ (sqrt 0.5) 1.0))))
double code(double x) {
double tmp;
if (hypot(1.0, x) <= 2.0) {
tmp = (fma(-0.0859375, (x * x), 0.125) * x) * x;
} else {
tmp = 0.5 / (sqrt(0.5) + 1.0);
}
return tmp;
}
function code(x) tmp = 0.0 if (hypot(1.0, x) <= 2.0) tmp = Float64(Float64(fma(-0.0859375, Float64(x * x), 0.125) * x) * x); else tmp = Float64(0.5 / Float64(sqrt(0.5) + 1.0)); end return tmp end
code[x_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 2.0], N[(N[(N[(-0.0859375 * N[(x * x), $MachinePrecision] + 0.125), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], N[(0.5 / N[(N[Sqrt[0.5], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 2:\\
\;\;\;\;\left(\mathsf{fma}\left(-0.0859375, x \cdot x, 0.125\right) \cdot x\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;\frac{0.5}{\sqrt{0.5} + 1}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 48.8%
Applied rewrites48.9%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6498.3
Applied rewrites98.3%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.5%
Applied rewrites100.0%
Taylor expanded in x around inf
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-sqrt.f6497.0
Applied rewrites97.0%
(FPCore (x) :precision binary64 (if (<= (/ 1.0 (hypot 1.0 x)) 0.2) (- 1.0 (sqrt 0.5)) (* 0.125 (* x x))))
double code(double x) {
double tmp;
if ((1.0 / hypot(1.0, x)) <= 0.2) {
tmp = 1.0 - sqrt(0.5);
} else {
tmp = 0.125 * (x * x);
}
return tmp;
}
public static double code(double x) {
double tmp;
if ((1.0 / Math.hypot(1.0, x)) <= 0.2) {
tmp = 1.0 - Math.sqrt(0.5);
} else {
tmp = 0.125 * (x * x);
}
return tmp;
}
def code(x): tmp = 0 if (1.0 / math.hypot(1.0, x)) <= 0.2: tmp = 1.0 - math.sqrt(0.5) else: tmp = 0.125 * (x * x) return tmp
function code(x) tmp = 0.0 if (Float64(1.0 / hypot(1.0, x)) <= 0.2) tmp = Float64(1.0 - sqrt(0.5)); else tmp = Float64(0.125 * Float64(x * x)); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if ((1.0 / hypot(1.0, x)) <= 0.2) tmp = 1.0 - sqrt(0.5); else tmp = 0.125 * (x * x); end tmp_2 = tmp; end
code[x_] := If[LessEqual[N[(1.0 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision], 0.2], N[(1.0 - N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision], N[(0.125 * N[(x * x), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{1}{\mathsf{hypot}\left(1, x\right)} \leq 0.2:\\
\;\;\;\;1 - \sqrt{0.5}\\
\mathbf{else}:\\
\;\;\;\;0.125 \cdot \left(x \cdot x\right)\\
\end{array}
\end{array}
if (/.f64 #s(literal 1 binary64) (hypot.f64 #s(literal 1 binary64) x)) < 0.20000000000000001Initial program 98.5%
Taylor expanded in x around inf
Applied rewrites95.5%
if 0.20000000000000001 < (/.f64 #s(literal 1 binary64) (hypot.f64 #s(literal 1 binary64) x)) Initial program 48.8%
Applied rewrites48.9%
Taylor expanded in x around 0
lower-*.f64N/A
unpow2N/A
lower-*.f6497.5
Applied rewrites97.5%
(FPCore (x) :precision binary64 (* 0.125 (* x x)))
double code(double x) {
return 0.125 * (x * x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.125d0 * (x * x)
end function
public static double code(double x) {
return 0.125 * (x * x);
}
def code(x): return 0.125 * (x * x)
function code(x) return Float64(0.125 * Float64(x * x)) end
function tmp = code(x) tmp = 0.125 * (x * x); end
code[x_] := N[(0.125 * N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.125 \cdot \left(x \cdot x\right)
\end{array}
Initial program 75.0%
Applied rewrites75.8%
Taylor expanded in x around 0
lower-*.f64N/A
unpow2N/A
lower-*.f6448.4
Applied rewrites48.4%
(FPCore (x) :precision binary64 (- 1.0 1.0))
double code(double x) {
return 1.0 - 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 - 1.0d0
end function
public static double code(double x) {
return 1.0 - 1.0;
}
def code(x): return 1.0 - 1.0
function code(x) return Float64(1.0 - 1.0) end
function tmp = code(x) tmp = 1.0 - 1.0; end
code[x_] := N[(1.0 - 1.0), $MachinePrecision]
\begin{array}{l}
\\
1 - 1
\end{array}
Initial program 75.0%
lift-*.f64N/A
lift-+.f64N/A
distribute-lft-inN/A
metadata-evalN/A
lift-/.f64N/A
frac-2negN/A
metadata-evalN/A
associate-*r/N/A
div-invN/A
metadata-evalN/A
metadata-evalN/A
inv-powN/A
metadata-evalN/A
metadata-evalN/A
pow-powN/A
pow2N/A
sqr-negN/A
pow-prod-downN/A
pow-sqrN/A
metadata-evalN/A
metadata-evalN/A
inv-powN/A
lift-/.f64N/A
Applied rewrites75.0%
Taylor expanded in x around 0
Applied rewrites23.2%
herbie shell --seed 2024250
(FPCore (x)
:name "Given's Rotation SVD example, simplified"
:precision binary64
(- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))