
(FPCore (x) :precision binary64 (- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))
double code(double x) {
return 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x)))));
}
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 * (1.0 + (1.0 / Math.hypot(1.0, x)))));
}
def code(x): return 1.0 - math.sqrt((0.5 * (1.0 + (1.0 / math.hypot(1.0, x)))))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 * Float64(1.0 + Float64(1.0 / hypot(1.0, x)))))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x))))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 * N[(1.0 + N[(1.0 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5 \cdot \left(1 + \frac{1}{\mathsf{hypot}\left(1, x\right)}\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))
double code(double x) {
return 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x)))));
}
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 * (1.0 + (1.0 / Math.hypot(1.0, x)))));
}
def code(x): return 1.0 - math.sqrt((0.5 * (1.0 + (1.0 / math.hypot(1.0, x)))))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 * Float64(1.0 + Float64(1.0 / hypot(1.0, x)))))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x))))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 * N[(1.0 + N[(1.0 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5 \cdot \left(1 + \frac{1}{\mathsf{hypot}\left(1, x\right)}\right)}
\end{array}
(FPCore (x)
:precision binary64
(let* ((t_0 (+ (sqrt 0.5) 1.0)) (t_1 (pow t_0 2.0)) (t_2 (/ 0.5 t_0)))
(if (<= (hypot 1.0 x) 2.0)
(*
(*
(fma
(fma (fma -0.17724609375 (* x x) 0.1953125) (* x x) -0.21875)
(* x x)
0.25)
x)
x)
(-
(+ (/ t_2 (pow x 4.0)) t_2)
(fma
(/ (/ -0.25 (pow x 4.0)) (sqrt 0.5))
(+ (/ 0.625 t_1) (/ (+ (/ (/ 0.125 (+ (sqrt 0.5) 0.5)) t_0) t_2) t_0))
(+
(/ (/ 0.125 t_1) (* (* (sqrt 0.5) x) x))
(/ (/ 0.5 (fma (sqrt 0.5) x x)) x)))))))
double code(double x) {
double t_0 = sqrt(0.5) + 1.0;
double t_1 = pow(t_0, 2.0);
double t_2 = 0.5 / t_0;
double tmp;
if (hypot(1.0, x) <= 2.0) {
tmp = (fma(fma(fma(-0.17724609375, (x * x), 0.1953125), (x * x), -0.21875), (x * x), 0.25) * x) * x;
} else {
tmp = ((t_2 / pow(x, 4.0)) + t_2) - fma(((-0.25 / pow(x, 4.0)) / sqrt(0.5)), ((0.625 / t_1) + ((((0.125 / (sqrt(0.5) + 0.5)) / t_0) + t_2) / t_0)), (((0.125 / t_1) / ((sqrt(0.5) * x) * x)) + ((0.5 / fma(sqrt(0.5), x, x)) / x)));
}
return tmp;
}
function code(x) t_0 = Float64(sqrt(0.5) + 1.0) t_1 = t_0 ^ 2.0 t_2 = Float64(0.5 / t_0) tmp = 0.0 if (hypot(1.0, x) <= 2.0) tmp = Float64(Float64(fma(fma(fma(-0.17724609375, Float64(x * x), 0.1953125), Float64(x * x), -0.21875), Float64(x * x), 0.25) * x) * x); else tmp = Float64(Float64(Float64(t_2 / (x ^ 4.0)) + t_2) - fma(Float64(Float64(-0.25 / (x ^ 4.0)) / sqrt(0.5)), Float64(Float64(0.625 / t_1) + Float64(Float64(Float64(Float64(0.125 / Float64(sqrt(0.5) + 0.5)) / t_0) + t_2) / t_0)), Float64(Float64(Float64(0.125 / t_1) / Float64(Float64(sqrt(0.5) * x) * x)) + Float64(Float64(0.5 / fma(sqrt(0.5), x, x)) / x)))); end return tmp end
code[x_] := Block[{t$95$0 = N[(N[Sqrt[0.5], $MachinePrecision] + 1.0), $MachinePrecision]}, Block[{t$95$1 = N[Power[t$95$0, 2.0], $MachinePrecision]}, Block[{t$95$2 = N[(0.5 / t$95$0), $MachinePrecision]}, If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 2.0], N[(N[(N[(N[(N[(-0.17724609375 * N[(x * x), $MachinePrecision] + 0.1953125), $MachinePrecision] * N[(x * x), $MachinePrecision] + -0.21875), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.25), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], N[(N[(N[(t$95$2 / N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision] + t$95$2), $MachinePrecision] - N[(N[(N[(-0.25 / N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision] / N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision] * N[(N[(0.625 / t$95$1), $MachinePrecision] + N[(N[(N[(N[(0.125 / N[(N[Sqrt[0.5], $MachinePrecision] + 0.5), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] + t$95$2), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(0.125 / t$95$1), $MachinePrecision] / N[(N[(N[Sqrt[0.5], $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] + N[(N[(0.5 / N[(N[Sqrt[0.5], $MachinePrecision] * x + x), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \sqrt{0.5} + 1\\
t_1 := {t\_0}^{2}\\
t_2 := \frac{0.5}{t\_0}\\
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 2:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.17724609375, x \cdot x, 0.1953125\right), x \cdot x, -0.21875\right), x \cdot x, 0.25\right) \cdot x\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;\left(\frac{t\_2}{{x}^{4}} + t\_2\right) - \mathsf{fma}\left(\frac{\frac{-0.25}{{x}^{4}}}{\sqrt{0.5}}, \frac{0.625}{t\_1} + \frac{\frac{\frac{0.125}{\sqrt{0.5} + 0.5}}{t\_0} + t\_2}{t\_0}, \frac{\frac{0.125}{t\_1}}{\left(\sqrt{0.5} \cdot x\right) \cdot x} + \frac{\frac{0.5}{\mathsf{fma}\left(\sqrt{0.5}, x, x\right)}}{x}\right)\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 52.8%
lift--.f64N/A
flip--N/A
metadata-evalN/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
rem-square-sqrtN/A
lift-*.f64N/A
lift-+.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
associate--r+N/A
metadata-evalN/A
lower-/.f64N/A
Applied rewrites52.8%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
Applied rewrites99.9%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 97.6%
lift--.f64N/A
flip--N/A
metadata-evalN/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
rem-square-sqrtN/A
lift-*.f64N/A
lift-+.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
associate--r+N/A
metadata-evalN/A
lower-/.f64N/A
Applied rewrites99.1%
Taylor expanded in x around inf
Applied rewrites100.0%
Final simplification99.9%
(FPCore (x)
:precision binary64
(if (<= (hypot 1.0 x) 2.0)
(*
(*
(fma
(fma (fma -0.17724609375 (* x x) 0.1953125) (* x x) -0.21875)
(* x x)
0.25)
x)
x)
(/ 0.5 (+ (sqrt 0.5) 1.0))))
double code(double x) {
double tmp;
if (hypot(1.0, x) <= 2.0) {
tmp = (fma(fma(fma(-0.17724609375, (x * x), 0.1953125), (x * x), -0.21875), (x * x), 0.25) * x) * x;
} else {
tmp = 0.5 / (sqrt(0.5) + 1.0);
}
return tmp;
}
function code(x) tmp = 0.0 if (hypot(1.0, x) <= 2.0) tmp = Float64(Float64(fma(fma(fma(-0.17724609375, Float64(x * x), 0.1953125), Float64(x * x), -0.21875), Float64(x * x), 0.25) * x) * x); else tmp = Float64(0.5 / Float64(sqrt(0.5) + 1.0)); end return tmp end
code[x_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 2.0], N[(N[(N[(N[(N[(-0.17724609375 * N[(x * x), $MachinePrecision] + 0.1953125), $MachinePrecision] * N[(x * x), $MachinePrecision] + -0.21875), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.25), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], N[(0.5 / N[(N[Sqrt[0.5], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 2:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.17724609375, x \cdot x, 0.1953125\right), x \cdot x, -0.21875\right), x \cdot x, 0.25\right) \cdot x\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;\frac{0.5}{\sqrt{0.5} + 1}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 52.8%
lift--.f64N/A
flip--N/A
metadata-evalN/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
rem-square-sqrtN/A
lift-*.f64N/A
lift-+.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
associate--r+N/A
metadata-evalN/A
lower-/.f64N/A
Applied rewrites52.8%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
Applied rewrites99.9%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 97.6%
lift--.f64N/A
flip--N/A
metadata-evalN/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
rem-square-sqrtN/A
lift-*.f64N/A
lift-+.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
associate--r+N/A
metadata-evalN/A
lower-/.f64N/A
Applied rewrites99.1%
Taylor expanded in x around inf
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-sqrt.f6499.6
Applied rewrites99.6%
(FPCore (x) :precision binary64 (if (<= (hypot 1.0 x) 2.0) (* (* (fma (fma 0.1953125 (* x x) -0.21875) (* x x) 0.25) x) x) (/ 0.5 (+ (sqrt 0.5) 1.0))))
double code(double x) {
double tmp;
if (hypot(1.0, x) <= 2.0) {
tmp = (fma(fma(0.1953125, (x * x), -0.21875), (x * x), 0.25) * x) * x;
} else {
tmp = 0.5 / (sqrt(0.5) + 1.0);
}
return tmp;
}
function code(x) tmp = 0.0 if (hypot(1.0, x) <= 2.0) tmp = Float64(Float64(fma(fma(0.1953125, Float64(x * x), -0.21875), Float64(x * x), 0.25) * x) * x); else tmp = Float64(0.5 / Float64(sqrt(0.5) + 1.0)); end return tmp end
code[x_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 2.0], N[(N[(N[(N[(0.1953125 * N[(x * x), $MachinePrecision] + -0.21875), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.25), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], N[(0.5 / N[(N[Sqrt[0.5], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 2:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(0.1953125, x \cdot x, -0.21875\right), x \cdot x, 0.25\right) \cdot x\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;\frac{0.5}{\sqrt{0.5} + 1}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 52.8%
lift--.f64N/A
flip--N/A
metadata-evalN/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
rem-square-sqrtN/A
lift-*.f64N/A
lift-+.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
associate--r+N/A
metadata-evalN/A
lower-/.f64N/A
Applied rewrites52.8%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6499.7
Applied rewrites99.7%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 97.6%
lift--.f64N/A
flip--N/A
metadata-evalN/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
rem-square-sqrtN/A
lift-*.f64N/A
lift-+.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
associate--r+N/A
metadata-evalN/A
lower-/.f64N/A
Applied rewrites99.1%
Taylor expanded in x around inf
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-sqrt.f6499.6
Applied rewrites99.6%
(FPCore (x) :precision binary64 (if (<= (hypot 1.0 x) 2.0) (* (* (fma -0.21875 (* x x) 0.25) x) x) (/ 0.5 (+ (sqrt 0.5) 1.0))))
double code(double x) {
double tmp;
if (hypot(1.0, x) <= 2.0) {
tmp = (fma(-0.21875, (x * x), 0.25) * x) * x;
} else {
tmp = 0.5 / (sqrt(0.5) + 1.0);
}
return tmp;
}
function code(x) tmp = 0.0 if (hypot(1.0, x) <= 2.0) tmp = Float64(Float64(fma(-0.21875, Float64(x * x), 0.25) * x) * x); else tmp = Float64(0.5 / Float64(sqrt(0.5) + 1.0)); end return tmp end
code[x_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 2.0], N[(N[(N[(-0.21875 * N[(x * x), $MachinePrecision] + 0.25), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], N[(0.5 / N[(N[Sqrt[0.5], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 2:\\
\;\;\;\;\left(\mathsf{fma}\left(-0.21875, x \cdot x, 0.25\right) \cdot x\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;\frac{0.5}{\sqrt{0.5} + 1}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 52.8%
lift--.f64N/A
flip--N/A
metadata-evalN/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
rem-square-sqrtN/A
lift-*.f64N/A
lift-+.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
associate--r+N/A
metadata-evalN/A
lower-/.f64N/A
Applied rewrites52.8%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.6
Applied rewrites99.6%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 97.6%
lift--.f64N/A
flip--N/A
metadata-evalN/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
rem-square-sqrtN/A
lift-*.f64N/A
lift-+.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
associate--r+N/A
metadata-evalN/A
lower-/.f64N/A
Applied rewrites99.1%
Taylor expanded in x around inf
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-sqrt.f6499.6
Applied rewrites99.6%
(FPCore (x) :precision binary64 (if (<= (hypot 1.0 x) 2.0) (* (* (fma -0.21875 (* x x) 0.25) x) x) (- 1.0 (sqrt 0.5))))
double code(double x) {
double tmp;
if (hypot(1.0, x) <= 2.0) {
tmp = (fma(-0.21875, (x * x), 0.25) * x) * x;
} else {
tmp = 1.0 - sqrt(0.5);
}
return tmp;
}
function code(x) tmp = 0.0 if (hypot(1.0, x) <= 2.0) tmp = Float64(Float64(fma(-0.21875, Float64(x * x), 0.25) * x) * x); else tmp = Float64(1.0 - sqrt(0.5)); end return tmp end
code[x_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 2.0], N[(N[(N[(-0.21875 * N[(x * x), $MachinePrecision] + 0.25), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], N[(1.0 - N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 2:\\
\;\;\;\;\left(\mathsf{fma}\left(-0.21875, x \cdot x, 0.25\right) \cdot x\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;1 - \sqrt{0.5}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 52.8%
lift--.f64N/A
flip--N/A
metadata-evalN/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
rem-square-sqrtN/A
lift-*.f64N/A
lift-+.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
associate--r+N/A
metadata-evalN/A
lower-/.f64N/A
Applied rewrites52.8%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.6
Applied rewrites99.6%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 97.6%
Taylor expanded in x around inf
Applied rewrites98.1%
(FPCore (x) :precision binary64 (if (<= (hypot 1.0 x) 2.0) (* (* 0.25 x) x) (- 1.0 (sqrt 0.5))))
double code(double x) {
double tmp;
if (hypot(1.0, x) <= 2.0) {
tmp = (0.25 * x) * x;
} else {
tmp = 1.0 - sqrt(0.5);
}
return tmp;
}
public static double code(double x) {
double tmp;
if (Math.hypot(1.0, x) <= 2.0) {
tmp = (0.25 * x) * x;
} else {
tmp = 1.0 - Math.sqrt(0.5);
}
return tmp;
}
def code(x): tmp = 0 if math.hypot(1.0, x) <= 2.0: tmp = (0.25 * x) * x else: tmp = 1.0 - math.sqrt(0.5) return tmp
function code(x) tmp = 0.0 if (hypot(1.0, x) <= 2.0) tmp = Float64(Float64(0.25 * x) * x); else tmp = Float64(1.0 - sqrt(0.5)); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (hypot(1.0, x) <= 2.0) tmp = (0.25 * x) * x; else tmp = 1.0 - sqrt(0.5); end tmp_2 = tmp; end
code[x_] := If[LessEqual[N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision], 2.0], N[(N[(0.25 * x), $MachinePrecision] * x), $MachinePrecision], N[(1.0 - N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\mathsf{hypot}\left(1, x\right) \leq 2:\\
\;\;\;\;\left(0.25 \cdot x\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;1 - \sqrt{0.5}\\
\end{array}
\end{array}
if (hypot.f64 #s(literal 1 binary64) x) < 2Initial program 52.8%
lift--.f64N/A
flip--N/A
metadata-evalN/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
rem-square-sqrtN/A
lift-*.f64N/A
lift-+.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
associate--r+N/A
metadata-evalN/A
lower-/.f64N/A
Applied rewrites52.8%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6499.7
Applied rewrites99.7%
Taylor expanded in x around 0
Applied rewrites99.0%
if 2 < (hypot.f64 #s(literal 1 binary64) x) Initial program 97.6%
Taylor expanded in x around inf
Applied rewrites98.1%
(FPCore (x) :precision binary64 (* (* 0.25 x) x))
double code(double x) {
return (0.25 * x) * x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (0.25d0 * x) * x
end function
public static double code(double x) {
return (0.25 * x) * x;
}
def code(x): return (0.25 * x) * x
function code(x) return Float64(Float64(0.25 * x) * x) end
function tmp = code(x) tmp = (0.25 * x) * x; end
code[x_] := N[(N[(0.25 * x), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}
\\
\left(0.25 \cdot x\right) \cdot x
\end{array}
Initial program 75.7%
lift--.f64N/A
flip--N/A
metadata-evalN/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
rem-square-sqrtN/A
lift-*.f64N/A
lift-+.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
associate--r+N/A
metadata-evalN/A
lower-/.f64N/A
Applied rewrites76.5%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6450.4
Applied rewrites50.4%
Taylor expanded in x around 0
Applied rewrites50.4%
(FPCore (x) :precision binary64 (* 0.25 (* x x)))
double code(double x) {
return 0.25 * (x * x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.25d0 * (x * x)
end function
public static double code(double x) {
return 0.25 * (x * x);
}
def code(x): return 0.25 * (x * x)
function code(x) return Float64(0.25 * Float64(x * x)) end
function tmp = code(x) tmp = 0.25 * (x * x); end
code[x_] := N[(0.25 * N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.25 \cdot \left(x \cdot x\right)
\end{array}
Initial program 75.7%
lift--.f64N/A
flip--N/A
metadata-evalN/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
rem-square-sqrtN/A
lift-*.f64N/A
lift-+.f64N/A
distribute-rgt-inN/A
metadata-evalN/A
associate--r+N/A
metadata-evalN/A
lower-/.f64N/A
Applied rewrites76.5%
Taylor expanded in x around 0
lower-*.f64N/A
unpow2N/A
lower-*.f6450.4
Applied rewrites50.4%
herbie shell --seed 2024249
(FPCore (x)
:name "Given's Rotation SVD example, simplified"
:precision binary64
(- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))