
(FPCore (x) :precision binary64 (- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))
double code(double x) {
return 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x)))));
}
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 * (1.0 + (1.0 / Math.hypot(1.0, x)))));
}
def code(x): return 1.0 - math.sqrt((0.5 * (1.0 + (1.0 / math.hypot(1.0, x)))))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 * Float64(1.0 + Float64(1.0 / hypot(1.0, x)))))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x))))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 * N[(1.0 + N[(1.0 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5 \cdot \left(1 + \frac{1}{\mathsf{hypot}\left(1, x\right)}\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))
double code(double x) {
return 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x)))));
}
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 * (1.0 + (1.0 / Math.hypot(1.0, x)))));
}
def code(x): return 1.0 - math.sqrt((0.5 * (1.0 + (1.0 / math.hypot(1.0, x)))))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 * Float64(1.0 + Float64(1.0 / hypot(1.0, x)))))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x))))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 * N[(1.0 + N[(1.0 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5 \cdot \left(1 + \frac{1}{\mathsf{hypot}\left(1, x\right)}\right)}
\end{array}
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= (hypot 1.0 x_m) 2.0)
(*
(* (fma (fma 0.0673828125 (* x_m x_m) -0.0859375) (* x_m x_m) 0.125) x_m)
x_m)
(/
(- (- (/ 0.5 x_m) 0.5))
(+ (sqrt (- 0.5 (/ -0.5 (hypot 1.0 x_m)))) 1.0))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (hypot(1.0, x_m) <= 2.0) {
tmp = (fma(fma(0.0673828125, (x_m * x_m), -0.0859375), (x_m * x_m), 0.125) * x_m) * x_m;
} else {
tmp = -((0.5 / x_m) - 0.5) / (sqrt((0.5 - (-0.5 / hypot(1.0, x_m)))) + 1.0);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (hypot(1.0, x_m) <= 2.0) tmp = Float64(Float64(fma(fma(0.0673828125, Float64(x_m * x_m), -0.0859375), Float64(x_m * x_m), 0.125) * x_m) * x_m); else tmp = Float64(Float64(-Float64(Float64(0.5 / x_m) - 0.5)) / Float64(sqrt(Float64(0.5 - Float64(-0.5 / hypot(1.0, x_m)))) + 1.0)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x$95$m ^ 2], $MachinePrecision], 2.0], N[(N[(N[(N[(0.0673828125 * N[(x$95$m * x$95$m), $MachinePrecision] + -0.0859375), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.125), $MachinePrecision] * x$95$m), $MachinePrecision] * x$95$m), $MachinePrecision], N[((-N[(N[(0.5 / x$95$m), $MachinePrecision] - 0.5), $MachinePrecision]) / N[(N[Sqrt[N[(0.5 - N[(-0.5 / N[Sqrt[1.0 ^ 2 + x$95$m ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\_m\right) \leq 2:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0673828125, x\_m \cdot x\_m, -0.0859375\right), x\_m \cdot x\_m, 0.125\right) \cdot x\_m\right) \cdot x\_m\\
\mathbf{else}:\\
\;\;\;\;\frac{-\left(\frac{0.5}{x\_m} - 0.5\right)}{\sqrt{0.5 - \frac{-0.5}{\mathsf{hypot}\left(1, x\_m\right)}} + 1}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 59.4%
Applied rewrites59.5%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64100.0
Applied rewrites100.0%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.5%
Applied rewrites100.0%
Taylor expanded in x around inf
lower--.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f6499.8
Applied rewrites99.8%
Final simplification99.9%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= (hypot 1.0 x_m) 2.0)
(*
(* (fma (fma 0.0673828125 (* x_m x_m) -0.0859375) (* x_m x_m) 0.125) x_m)
x_m)
(/ (- (/ 0.5 x_m) 0.5) (- -1.0 (sqrt 0.5)))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (hypot(1.0, x_m) <= 2.0) {
tmp = (fma(fma(0.0673828125, (x_m * x_m), -0.0859375), (x_m * x_m), 0.125) * x_m) * x_m;
} else {
tmp = ((0.5 / x_m) - 0.5) / (-1.0 - sqrt(0.5));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (hypot(1.0, x_m) <= 2.0) tmp = Float64(Float64(fma(fma(0.0673828125, Float64(x_m * x_m), -0.0859375), Float64(x_m * x_m), 0.125) * x_m) * x_m); else tmp = Float64(Float64(Float64(0.5 / x_m) - 0.5) / Float64(-1.0 - sqrt(0.5))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x$95$m ^ 2], $MachinePrecision], 2.0], N[(N[(N[(N[(0.0673828125 * N[(x$95$m * x$95$m), $MachinePrecision] + -0.0859375), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.125), $MachinePrecision] * x$95$m), $MachinePrecision] * x$95$m), $MachinePrecision], N[(N[(N[(0.5 / x$95$m), $MachinePrecision] - 0.5), $MachinePrecision] / N[(-1.0 - N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\_m\right) \leq 2:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0673828125, x\_m \cdot x\_m, -0.0859375\right), x\_m \cdot x\_m, 0.125\right) \cdot x\_m\right) \cdot x\_m\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{0.5}{x\_m} - 0.5}{-1 - \sqrt{0.5}}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 59.4%
Applied rewrites59.5%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64100.0
Applied rewrites100.0%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.5%
Applied rewrites100.0%
Taylor expanded in x around inf
lower--.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f6499.8
Applied rewrites99.8%
Taylor expanded in x around inf
distribute-lft-inN/A
metadata-evalN/A
mul-1-negN/A
unsub-negN/A
lower--.f64N/A
lower-sqrt.f6499.4
Applied rewrites99.4%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= (hypot 1.0 x_m) 2.0)
(*
(* (fma (fma 0.0673828125 (* x_m x_m) -0.0859375) (* x_m x_m) 0.125) x_m)
x_m)
(/ 0.5 (+ (sqrt 0.5) 1.0))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (hypot(1.0, x_m) <= 2.0) {
tmp = (fma(fma(0.0673828125, (x_m * x_m), -0.0859375), (x_m * x_m), 0.125) * x_m) * x_m;
} else {
tmp = 0.5 / (sqrt(0.5) + 1.0);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (hypot(1.0, x_m) <= 2.0) tmp = Float64(Float64(fma(fma(0.0673828125, Float64(x_m * x_m), -0.0859375), Float64(x_m * x_m), 0.125) * x_m) * x_m); else tmp = Float64(0.5 / Float64(sqrt(0.5) + 1.0)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x$95$m ^ 2], $MachinePrecision], 2.0], N[(N[(N[(N[(0.0673828125 * N[(x$95$m * x$95$m), $MachinePrecision] + -0.0859375), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.125), $MachinePrecision] * x$95$m), $MachinePrecision] * x$95$m), $MachinePrecision], N[(0.5 / N[(N[Sqrt[0.5], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\_m\right) \leq 2:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0673828125, x\_m \cdot x\_m, -0.0859375\right), x\_m \cdot x\_m, 0.125\right) \cdot x\_m\right) \cdot x\_m\\
\mathbf{else}:\\
\;\;\;\;\frac{0.5}{\sqrt{0.5} + 1}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 59.4%
Applied rewrites59.5%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64100.0
Applied rewrites100.0%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.5%
Applied rewrites100.0%
Taylor expanded in x around inf
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-sqrt.f6499.4
Applied rewrites99.4%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= (hypot 1.0 x_m) 2.0) (* (fma -0.0859375 (* x_m x_m) 0.125) (* x_m x_m)) (/ 0.5 (+ (sqrt 0.5) 1.0))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (hypot(1.0, x_m) <= 2.0) {
tmp = fma(-0.0859375, (x_m * x_m), 0.125) * (x_m * x_m);
} else {
tmp = 0.5 / (sqrt(0.5) + 1.0);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (hypot(1.0, x_m) <= 2.0) tmp = Float64(fma(-0.0859375, Float64(x_m * x_m), 0.125) * Float64(x_m * x_m)); else tmp = Float64(0.5 / Float64(sqrt(0.5) + 1.0)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x$95$m ^ 2], $MachinePrecision], 2.0], N[(N[(-0.0859375 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.125), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision], N[(0.5 / N[(N[Sqrt[0.5], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\_m\right) \leq 2:\\
\;\;\;\;\mathsf{fma}\left(-0.0859375, x\_m \cdot x\_m, 0.125\right) \cdot \left(x\_m \cdot x\_m\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{0.5}{\sqrt{0.5} + 1}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 59.4%
Applied rewrites59.5%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.7
Applied rewrites99.7%
Applied rewrites99.7%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.5%
Applied rewrites100.0%
Taylor expanded in x around inf
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-sqrt.f6499.4
Applied rewrites99.4%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= (hypot 1.0 x_m) 2.0) (* (fma -0.0859375 (* x_m x_m) 0.125) (* x_m x_m)) (- 1.0 (sqrt 0.5))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (hypot(1.0, x_m) <= 2.0) {
tmp = fma(-0.0859375, (x_m * x_m), 0.125) * (x_m * x_m);
} else {
tmp = 1.0 - sqrt(0.5);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (hypot(1.0, x_m) <= 2.0) tmp = Float64(fma(-0.0859375, Float64(x_m * x_m), 0.125) * Float64(x_m * x_m)); else tmp = Float64(1.0 - sqrt(0.5)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x$95$m ^ 2], $MachinePrecision], 2.0], N[(N[(-0.0859375 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.125), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision], N[(1.0 - N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\_m\right) \leq 2:\\
\;\;\;\;\mathsf{fma}\left(-0.0859375, x\_m \cdot x\_m, 0.125\right) \cdot \left(x\_m \cdot x\_m\right)\\
\mathbf{else}:\\
\;\;\;\;1 - \sqrt{0.5}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 59.4%
Applied rewrites59.5%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.7
Applied rewrites99.7%
Applied rewrites99.7%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.5%
Taylor expanded in x around inf
Applied rewrites97.9%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= (hypot 1.0 x_m) 2.0) (* (* (fma -0.0859375 (* x_m x_m) 0.125) x_m) x_m) (- 1.0 (sqrt 0.5))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (hypot(1.0, x_m) <= 2.0) {
tmp = (fma(-0.0859375, (x_m * x_m), 0.125) * x_m) * x_m;
} else {
tmp = 1.0 - sqrt(0.5);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (hypot(1.0, x_m) <= 2.0) tmp = Float64(Float64(fma(-0.0859375, Float64(x_m * x_m), 0.125) * x_m) * x_m); else tmp = Float64(1.0 - sqrt(0.5)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x$95$m ^ 2], $MachinePrecision], 2.0], N[(N[(N[(-0.0859375 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.125), $MachinePrecision] * x$95$m), $MachinePrecision] * x$95$m), $MachinePrecision], N[(1.0 - N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\_m\right) \leq 2:\\
\;\;\;\;\left(\mathsf{fma}\left(-0.0859375, x\_m \cdot x\_m, 0.125\right) \cdot x\_m\right) \cdot x\_m\\
\mathbf{else}:\\
\;\;\;\;1 - \sqrt{0.5}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 59.4%
Applied rewrites59.5%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.7
Applied rewrites99.7%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.5%
Taylor expanded in x around inf
Applied rewrites97.9%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= (hypot 1.0 x_m) 2.0) (* 0.125 (* x_m x_m)) (- 1.0 (sqrt 0.5))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (hypot(1.0, x_m) <= 2.0) {
tmp = 0.125 * (x_m * x_m);
} else {
tmp = 1.0 - sqrt(0.5);
}
return tmp;
}
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (Math.hypot(1.0, x_m) <= 2.0) {
tmp = 0.125 * (x_m * x_m);
} else {
tmp = 1.0 - Math.sqrt(0.5);
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if math.hypot(1.0, x_m) <= 2.0: tmp = 0.125 * (x_m * x_m) else: tmp = 1.0 - math.sqrt(0.5) return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (hypot(1.0, x_m) <= 2.0) tmp = Float64(0.125 * Float64(x_m * x_m)); else tmp = Float64(1.0 - sqrt(0.5)); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (hypot(1.0, x_m) <= 2.0) tmp = 0.125 * (x_m * x_m); else tmp = 1.0 - sqrt(0.5); end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x$95$m ^ 2], $MachinePrecision], 2.0], N[(0.125 * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision], N[(1.0 - N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\_m\right) \leq 2:\\
\;\;\;\;0.125 \cdot \left(x\_m \cdot x\_m\right)\\
\mathbf{else}:\\
\;\;\;\;1 - \sqrt{0.5}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 59.4%
Applied rewrites59.5%
Taylor expanded in x around 0
lower-*.f64N/A
unpow2N/A
lower-*.f6499.3
Applied rewrites99.3%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 98.5%
Taylor expanded in x around inf
Applied rewrites97.9%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (* (* 0.125 x_m) x_m))
x_m = fabs(x);
double code(double x_m) {
return (0.125 * x_m) * x_m;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = (0.125d0 * x_m) * x_m
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return (0.125 * x_m) * x_m;
}
x_m = math.fabs(x) def code(x_m): return (0.125 * x_m) * x_m
x_m = abs(x) function code(x_m) return Float64(Float64(0.125 * x_m) * x_m) end
x_m = abs(x); function tmp = code(x_m) tmp = (0.125 * x_m) * x_m; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(N[(0.125 * x$95$m), $MachinePrecision] * x$95$m), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\left(0.125 \cdot x\_m\right) \cdot x\_m
\end{array}
Initial program 77.7%
Applied rewrites78.4%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6454.7
Applied rewrites54.7%
Taylor expanded in x around 0
Applied rewrites54.7%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (* 0.125 (* x_m x_m)))
x_m = fabs(x);
double code(double x_m) {
return 0.125 * (x_m * x_m);
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = 0.125d0 * (x_m * x_m)
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return 0.125 * (x_m * x_m);
}
x_m = math.fabs(x) def code(x_m): return 0.125 * (x_m * x_m)
x_m = abs(x) function code(x_m) return Float64(0.125 * Float64(x_m * x_m)) end
x_m = abs(x); function tmp = code(x_m) tmp = 0.125 * (x_m * x_m); end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(0.125 * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
0.125 \cdot \left(x\_m \cdot x\_m\right)
\end{array}
Initial program 77.7%
Applied rewrites78.4%
Taylor expanded in x around 0
lower-*.f64N/A
unpow2N/A
lower-*.f6454.7
Applied rewrites54.7%
herbie shell --seed 2024322
(FPCore (x)
:name "Given's Rotation SVD example, simplified"
:precision binary64
(- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))