
(FPCore (x) :precision binary64 (- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))
double code(double x) {
return 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x)))));
}
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 * (1.0 + (1.0 / Math.hypot(1.0, x)))));
}
def code(x): return 1.0 - math.sqrt((0.5 * (1.0 + (1.0 / math.hypot(1.0, x)))))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 * Float64(1.0 + Float64(1.0 / hypot(1.0, x)))))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x))))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 * N[(1.0 + N[(1.0 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5 \cdot \left(1 + \frac{1}{\mathsf{hypot}\left(1, x\right)}\right)}
\end{array}
Herbie found 15 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))
double code(double x) {
return 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x)))));
}
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 * (1.0 + (1.0 / Math.hypot(1.0, x)))));
}
def code(x): return 1.0 - math.sqrt((0.5 * (1.0 + (1.0 / math.hypot(1.0, x)))))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 * Float64(1.0 + Float64(1.0 / hypot(1.0, x)))))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x))))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 * N[(1.0 + N[(1.0 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5 \cdot \left(1 + \frac{1}{\mathsf{hypot}\left(1, x\right)}\right)}
\end{array}
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(let* ((t_0 (* (- (/ 1.0 (sqrt (fma x_m x_m 1.0))) -1.0) 0.5))
(t_1 (- (sqrt t_0) -1.0)))
(if (<= x_m 0.0029)
(* (fma -0.0859375 (* x_m x_m) 0.125) (* x_m x_m))
(- (/ 1.0 t_1) (/ t_0 t_1)))))x_m = fabs(x);
double code(double x_m) {
double t_0 = ((1.0 / sqrt(fma(x_m, x_m, 1.0))) - -1.0) * 0.5;
double t_1 = sqrt(t_0) - -1.0;
double tmp;
if (x_m <= 0.0029) {
tmp = fma(-0.0859375, (x_m * x_m), 0.125) * (x_m * x_m);
} else {
tmp = (1.0 / t_1) - (t_0 / t_1);
}
return tmp;
}
x_m = abs(x) function code(x_m) t_0 = Float64(Float64(Float64(1.0 / sqrt(fma(x_m, x_m, 1.0))) - -1.0) * 0.5) t_1 = Float64(sqrt(t_0) - -1.0) tmp = 0.0 if (x_m <= 0.0029) tmp = Float64(fma(-0.0859375, Float64(x_m * x_m), 0.125) * Float64(x_m * x_m)); else tmp = Float64(Float64(1.0 / t_1) - Float64(t_0 / t_1)); end return tmp end
x_m = N[Abs[x], $MachinePrecision]
code[x$95$m_] := Block[{t$95$0 = N[(N[(N[(1.0 / N[Sqrt[N[(x$95$m * x$95$m + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - -1.0), $MachinePrecision] * 0.5), $MachinePrecision]}, Block[{t$95$1 = N[(N[Sqrt[t$95$0], $MachinePrecision] - -1.0), $MachinePrecision]}, If[LessEqual[x$95$m, 0.0029], N[(N[(-0.0859375 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.125), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision], N[(N[(1.0 / t$95$1), $MachinePrecision] - N[(t$95$0 / t$95$1), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
t_0 := \left(\frac{1}{\sqrt{\mathsf{fma}\left(x\_m, x\_m, 1\right)}} - -1\right) \cdot 0.5\\
t_1 := \sqrt{t\_0} - -1\\
\mathbf{if}\;x\_m \leq 0.0029:\\
\;\;\;\;\mathsf{fma}\left(-0.0859375, x\_m \cdot x\_m, 0.125\right) \cdot \left(x\_m \cdot x\_m\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{t\_1} - \frac{t\_0}{t\_1}\\
\end{array}
\end{array}
if x < 0.0029Initial program 75.8%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
pow2N/A
lower-*.f64N/A
pow2N/A
lower-*.f6450.2
Applied rewrites50.2%
if 0.0029 < x Initial program 75.8%
lift--.f64N/A
lift-sqrt.f64N/A
lift-*.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-hypot.f64N/A
metadata-evalN/A
flip--N/A
lower-/.f64N/A
Applied rewrites76.6%
lift-/.f64N/A
lift--.f64N/A
lift-*.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-fma.f64N/A
lift-sqrt.f64N/A
lift-+.f64N/A
lift-sqrt.f64N/A
lift-*.f64N/A
Applied rewrites76.5%
Applied rewrites76.5%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(let* ((t_0 (* (- (/ 1.0 (sqrt (fma x_m x_m 1.0))) -1.0) 0.5)))
(if (<= x_m 0.0315)
(*
(fma
(fma
(fma -0.056243896484375 (* x_m x_m) 0.0673828125)
(* x_m x_m)
-0.0859375)
(* x_m x_m)
0.125)
(* x_m x_m))
(* (- 1.0 t_0) (/ 1.0 (- (sqrt t_0) -1.0))))))x_m = fabs(x);
double code(double x_m) {
double t_0 = ((1.0 / sqrt(fma(x_m, x_m, 1.0))) - -1.0) * 0.5;
double tmp;
if (x_m <= 0.0315) {
tmp = fma(fma(fma(-0.056243896484375, (x_m * x_m), 0.0673828125), (x_m * x_m), -0.0859375), (x_m * x_m), 0.125) * (x_m * x_m);
} else {
tmp = (1.0 - t_0) * (1.0 / (sqrt(t_0) - -1.0));
}
return tmp;
}
x_m = abs(x) function code(x_m) t_0 = Float64(Float64(Float64(1.0 / sqrt(fma(x_m, x_m, 1.0))) - -1.0) * 0.5) tmp = 0.0 if (x_m <= 0.0315) tmp = Float64(fma(fma(fma(-0.056243896484375, Float64(x_m * x_m), 0.0673828125), Float64(x_m * x_m), -0.0859375), Float64(x_m * x_m), 0.125) * Float64(x_m * x_m)); else tmp = Float64(Float64(1.0 - t_0) * Float64(1.0 / Float64(sqrt(t_0) - -1.0))); end return tmp end
x_m = N[Abs[x], $MachinePrecision]
code[x$95$m_] := Block[{t$95$0 = N[(N[(N[(1.0 / N[Sqrt[N[(x$95$m * x$95$m + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - -1.0), $MachinePrecision] * 0.5), $MachinePrecision]}, If[LessEqual[x$95$m, 0.0315], N[(N[(N[(N[(-0.056243896484375 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.0673828125), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + -0.0859375), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.125), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision], N[(N[(1.0 - t$95$0), $MachinePrecision] * N[(1.0 / N[(N[Sqrt[t$95$0], $MachinePrecision] - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
t_0 := \left(\frac{1}{\sqrt{\mathsf{fma}\left(x\_m, x\_m, 1\right)}} - -1\right) \cdot 0.5\\
\mathbf{if}\;x\_m \leq 0.0315:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.056243896484375, x\_m \cdot x\_m, 0.0673828125\right), x\_m \cdot x\_m, -0.0859375\right), x\_m \cdot x\_m, 0.125\right) \cdot \left(x\_m \cdot x\_m\right)\\
\mathbf{else}:\\
\;\;\;\;\left(1 - t\_0\right) \cdot \frac{1}{\sqrt{t\_0} - -1}\\
\end{array}
\end{array}
if x < 0.0315Initial program 75.8%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites50.3%
if 0.0315 < x Initial program 75.8%
lift--.f64N/A
lift-sqrt.f64N/A
lift-*.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-hypot.f64N/A
metadata-evalN/A
flip--N/A
lower-/.f64N/A
Applied rewrites76.6%
lift-/.f64N/A
lift--.f64N/A
lift-*.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-fma.f64N/A
lift-sqrt.f64N/A
lift-+.f64N/A
lift-sqrt.f64N/A
lift-*.f64N/A
Applied rewrites76.5%
Applied rewrites76.6%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(let* ((t_0 (/ 1.0 (sqrt (fma x_m x_m 1.0)))))
(if (<= (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x_m))))) 0.9995)
(/ (- 1.0 (* (+ t_0 1.0) 0.5)) (fma (sqrt (- t_0 -1.0)) (sqrt 0.5) 1.0))
(*
(fma
(fma
(fma -0.056243896484375 (* x_m x_m) 0.0673828125)
(* x_m x_m)
-0.0859375)
(* x_m x_m)
0.125)
(* x_m x_m)))))x_m = fabs(x);
double code(double x_m) {
double t_0 = 1.0 / sqrt(fma(x_m, x_m, 1.0));
double tmp;
if (sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x_m))))) <= 0.9995) {
tmp = (1.0 - ((t_0 + 1.0) * 0.5)) / fma(sqrt((t_0 - -1.0)), sqrt(0.5), 1.0);
} else {
tmp = fma(fma(fma(-0.056243896484375, (x_m * x_m), 0.0673828125), (x_m * x_m), -0.0859375), (x_m * x_m), 0.125) * (x_m * x_m);
}
return tmp;
}
x_m = abs(x) function code(x_m) t_0 = Float64(1.0 / sqrt(fma(x_m, x_m, 1.0))) tmp = 0.0 if (sqrt(Float64(0.5 * Float64(1.0 + Float64(1.0 / hypot(1.0, x_m))))) <= 0.9995) tmp = Float64(Float64(1.0 - Float64(Float64(t_0 + 1.0) * 0.5)) / fma(sqrt(Float64(t_0 - -1.0)), sqrt(0.5), 1.0)); else tmp = Float64(fma(fma(fma(-0.056243896484375, Float64(x_m * x_m), 0.0673828125), Float64(x_m * x_m), -0.0859375), Float64(x_m * x_m), 0.125) * Float64(x_m * x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision]
code[x$95$m_] := Block[{t$95$0 = N[(1.0 / N[Sqrt[N[(x$95$m * x$95$m + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[Sqrt[N[(0.5 * N[(1.0 + N[(1.0 / N[Sqrt[1.0 ^ 2 + x$95$m ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], 0.9995], N[(N[(1.0 - N[(N[(t$95$0 + 1.0), $MachinePrecision] * 0.5), $MachinePrecision]), $MachinePrecision] / N[(N[Sqrt[N[(t$95$0 - -1.0), $MachinePrecision]], $MachinePrecision] * N[Sqrt[0.5], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(-0.056243896484375 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.0673828125), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + -0.0859375), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.125), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
t_0 := \frac{1}{\sqrt{\mathsf{fma}\left(x\_m, x\_m, 1\right)}}\\
\mathbf{if}\;\sqrt{0.5 \cdot \left(1 + \frac{1}{\mathsf{hypot}\left(1, x\_m\right)}\right)} \leq 0.9995:\\
\;\;\;\;\frac{1 - \left(t\_0 + 1\right) \cdot 0.5}{\mathsf{fma}\left(\sqrt{t\_0 - -1}, \sqrt{0.5}, 1\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.056243896484375, x\_m \cdot x\_m, 0.0673828125\right), x\_m \cdot x\_m, -0.0859375\right), x\_m \cdot x\_m, 0.125\right) \cdot \left(x\_m \cdot x\_m\right)\\
\end{array}
\end{array}
if (sqrt.f64 (*.f64 #s(literal 1/2 binary64) (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) (hypot.f64 #s(literal 1 binary64) x))))) < 0.99950000000000006Initial program 75.8%
lift--.f64N/A
lift-sqrt.f64N/A
lift-*.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-hypot.f64N/A
metadata-evalN/A
flip--N/A
lower-/.f64N/A
Applied rewrites76.6%
lift-+.f64N/A
lift-sqrt.f64N/A
lift-*.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-fma.f64N/A
lift-sqrt.f64N/A
+-commutativeN/A
sqrt-prodN/A
lower-fma.f64N/A
Applied rewrites76.6%
if 0.99950000000000006 < (sqrt.f64 (*.f64 #s(literal 1/2 binary64) (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) (hypot.f64 #s(literal 1 binary64) x))))) Initial program 75.8%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites50.3%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(let* ((t_0 (* (- (/ 1.0 (sqrt (fma x_m x_m 1.0))) -1.0) 0.5)))
(if (<= (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x_m))))) 0.9995)
(/ (- 1.0 t_0) (- (sqrt t_0) -1.0))
(*
(fma
(fma
(fma -0.056243896484375 (* x_m x_m) 0.0673828125)
(* x_m x_m)
-0.0859375)
(* x_m x_m)
0.125)
(* x_m x_m)))))x_m = fabs(x);
double code(double x_m) {
double t_0 = ((1.0 / sqrt(fma(x_m, x_m, 1.0))) - -1.0) * 0.5;
double tmp;
if (sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x_m))))) <= 0.9995) {
tmp = (1.0 - t_0) / (sqrt(t_0) - -1.0);
} else {
tmp = fma(fma(fma(-0.056243896484375, (x_m * x_m), 0.0673828125), (x_m * x_m), -0.0859375), (x_m * x_m), 0.125) * (x_m * x_m);
}
return tmp;
}
x_m = abs(x) function code(x_m) t_0 = Float64(Float64(Float64(1.0 / sqrt(fma(x_m, x_m, 1.0))) - -1.0) * 0.5) tmp = 0.0 if (sqrt(Float64(0.5 * Float64(1.0 + Float64(1.0 / hypot(1.0, x_m))))) <= 0.9995) tmp = Float64(Float64(1.0 - t_0) / Float64(sqrt(t_0) - -1.0)); else tmp = Float64(fma(fma(fma(-0.056243896484375, Float64(x_m * x_m), 0.0673828125), Float64(x_m * x_m), -0.0859375), Float64(x_m * x_m), 0.125) * Float64(x_m * x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision]
code[x$95$m_] := Block[{t$95$0 = N[(N[(N[(1.0 / N[Sqrt[N[(x$95$m * x$95$m + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - -1.0), $MachinePrecision] * 0.5), $MachinePrecision]}, If[LessEqual[N[Sqrt[N[(0.5 * N[(1.0 + N[(1.0 / N[Sqrt[1.0 ^ 2 + x$95$m ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], 0.9995], N[(N[(1.0 - t$95$0), $MachinePrecision] / N[(N[Sqrt[t$95$0], $MachinePrecision] - -1.0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(-0.056243896484375 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.0673828125), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + -0.0859375), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.125), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
t_0 := \left(\frac{1}{\sqrt{\mathsf{fma}\left(x\_m, x\_m, 1\right)}} - -1\right) \cdot 0.5\\
\mathbf{if}\;\sqrt{0.5 \cdot \left(1 + \frac{1}{\mathsf{hypot}\left(1, x\_m\right)}\right)} \leq 0.9995:\\
\;\;\;\;\frac{1 - t\_0}{\sqrt{t\_0} - -1}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.056243896484375, x\_m \cdot x\_m, 0.0673828125\right), x\_m \cdot x\_m, -0.0859375\right), x\_m \cdot x\_m, 0.125\right) \cdot \left(x\_m \cdot x\_m\right)\\
\end{array}
\end{array}
if (sqrt.f64 (*.f64 #s(literal 1/2 binary64) (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) (hypot.f64 #s(literal 1 binary64) x))))) < 0.99950000000000006Initial program 75.8%
lift--.f64N/A
lift-sqrt.f64N/A
lift-*.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-hypot.f64N/A
metadata-evalN/A
flip--N/A
lower-/.f64N/A
Applied rewrites76.6%
lift-/.f64N/A
lift--.f64N/A
lift-*.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-fma.f64N/A
lift-sqrt.f64N/A
lift-+.f64N/A
lift-sqrt.f64N/A
lift-*.f64N/A
Applied rewrites76.5%
lift--.f64N/A
lift-/.f64N/A
lift-+.f64N/A
lift-sqrt.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-/.f64N/A
lift-fma.f64N/A
lift-sqrt.f64N/A
Applied rewrites76.6%
if 0.99950000000000006 < (sqrt.f64 (*.f64 #s(literal 1/2 binary64) (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) (hypot.f64 #s(literal 1 binary64) x))))) Initial program 75.8%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites50.3%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(let* ((t_0 (- (/ 0.5 x_m) -0.5)) (t_1 (+ (sqrt t_0) 1.0)))
(if (<= (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x_m))))) 0.8)
(- (/ 1.0 t_1) (/ t_0 t_1))
(*
(fma
(fma
(fma -0.056243896484375 (* x_m x_m) 0.0673828125)
(* x_m x_m)
-0.0859375)
(* x_m x_m)
0.125)
(* x_m x_m)))))x_m = fabs(x);
double code(double x_m) {
double t_0 = (0.5 / x_m) - -0.5;
double t_1 = sqrt(t_0) + 1.0;
double tmp;
if (sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x_m))))) <= 0.8) {
tmp = (1.0 / t_1) - (t_0 / t_1);
} else {
tmp = fma(fma(fma(-0.056243896484375, (x_m * x_m), 0.0673828125), (x_m * x_m), -0.0859375), (x_m * x_m), 0.125) * (x_m * x_m);
}
return tmp;
}
x_m = abs(x) function code(x_m) t_0 = Float64(Float64(0.5 / x_m) - -0.5) t_1 = Float64(sqrt(t_0) + 1.0) tmp = 0.0 if (sqrt(Float64(0.5 * Float64(1.0 + Float64(1.0 / hypot(1.0, x_m))))) <= 0.8) tmp = Float64(Float64(1.0 / t_1) - Float64(t_0 / t_1)); else tmp = Float64(fma(fma(fma(-0.056243896484375, Float64(x_m * x_m), 0.0673828125), Float64(x_m * x_m), -0.0859375), Float64(x_m * x_m), 0.125) * Float64(x_m * x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision]
code[x$95$m_] := Block[{t$95$0 = N[(N[(0.5 / x$95$m), $MachinePrecision] - -0.5), $MachinePrecision]}, Block[{t$95$1 = N[(N[Sqrt[t$95$0], $MachinePrecision] + 1.0), $MachinePrecision]}, If[LessEqual[N[Sqrt[N[(0.5 * N[(1.0 + N[(1.0 / N[Sqrt[1.0 ^ 2 + x$95$m ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], 0.8], N[(N[(1.0 / t$95$1), $MachinePrecision] - N[(t$95$0 / t$95$1), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(-0.056243896484375 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.0673828125), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + -0.0859375), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.125), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
t_0 := \frac{0.5}{x\_m} - -0.5\\
t_1 := \sqrt{t\_0} + 1\\
\mathbf{if}\;\sqrt{0.5 \cdot \left(1 + \frac{1}{\mathsf{hypot}\left(1, x\_m\right)}\right)} \leq 0.8:\\
\;\;\;\;\frac{1}{t\_1} - \frac{t\_0}{t\_1}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.056243896484375, x\_m \cdot x\_m, 0.0673828125\right), x\_m \cdot x\_m, -0.0859375\right), x\_m \cdot x\_m, 0.125\right) \cdot \left(x\_m \cdot x\_m\right)\\
\end{array}
\end{array}
if (sqrt.f64 (*.f64 #s(literal 1/2 binary64) (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) (hypot.f64 #s(literal 1 binary64) x))))) < 0.80000000000000004Initial program 75.8%
lift--.f64N/A
lift-sqrt.f64N/A
lift-*.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-hypot.f64N/A
metadata-evalN/A
flip--N/A
lower-/.f64N/A
Applied rewrites76.6%
Taylor expanded in x around inf
+-commutativeN/A
lower-+.f64N/A
mult-flip-revN/A
lower-/.f6450.5
Applied rewrites50.5%
Taylor expanded in x around inf
+-commutativeN/A
lower-+.f64N/A
mult-flip-revN/A
lower-/.f6450.7
Applied rewrites50.7%
lift-/.f64N/A
lift--.f64N/A
div-subN/A
lower--.f64N/A
Applied rewrites50.7%
if 0.80000000000000004 < (sqrt.f64 (*.f64 #s(literal 1/2 binary64) (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) (hypot.f64 #s(literal 1 binary64) x))))) Initial program 75.8%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites50.3%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 1.1)
(*
(fma
(fma
(fma -0.056243896484375 (* x_m x_m) 0.0673828125)
(* x_m x_m)
-0.0859375)
(* x_m x_m)
0.125)
(* x_m x_m))
(/ (- 0.5 (/ 0.5 x_m)) (+ 1.0 (sqrt (* (+ (/ 1.0 x_m) 1.0) 0.5))))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 1.1) {
tmp = fma(fma(fma(-0.056243896484375, (x_m * x_m), 0.0673828125), (x_m * x_m), -0.0859375), (x_m * x_m), 0.125) * (x_m * x_m);
} else {
tmp = (0.5 - (0.5 / x_m)) / (1.0 + sqrt((((1.0 / x_m) + 1.0) * 0.5)));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 1.1) tmp = Float64(fma(fma(fma(-0.056243896484375, Float64(x_m * x_m), 0.0673828125), Float64(x_m * x_m), -0.0859375), Float64(x_m * x_m), 0.125) * Float64(x_m * x_m)); else tmp = Float64(Float64(0.5 - Float64(0.5 / x_m)) / Float64(1.0 + sqrt(Float64(Float64(Float64(1.0 / x_m) + 1.0) * 0.5)))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 1.1], N[(N[(N[(N[(-0.056243896484375 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.0673828125), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + -0.0859375), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.125), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision], N[(N[(0.5 - N[(0.5 / x$95$m), $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[Sqrt[N[(N[(N[(1.0 / x$95$m), $MachinePrecision] + 1.0), $MachinePrecision] * 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 1.1:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.056243896484375, x\_m \cdot x\_m, 0.0673828125\right), x\_m \cdot x\_m, -0.0859375\right), x\_m \cdot x\_m, 0.125\right) \cdot \left(x\_m \cdot x\_m\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{0.5 - \frac{0.5}{x\_m}}{1 + \sqrt{\left(\frac{1}{x\_m} + 1\right) \cdot 0.5}}\\
\end{array}
\end{array}
if x < 1.1000000000000001Initial program 75.8%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites50.3%
if 1.1000000000000001 < x Initial program 75.8%
lift--.f64N/A
lift-sqrt.f64N/A
lift-*.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-hypot.f64N/A
metadata-evalN/A
flip--N/A
lower-/.f64N/A
Applied rewrites76.6%
Taylor expanded in x around inf
lower--.f64N/A
mult-flip-revN/A
lower-/.f6450.5
Applied rewrites50.5%
Taylor expanded in x around inf
lower-/.f6450.7
Applied rewrites50.7%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 1.1) (* (* (fma (* x_m x_m) -0.0859375 0.125) x_m) x_m) (/ (- 0.5 (/ 0.5 x_m)) (+ 1.0 (sqrt (* (+ (/ 1.0 x_m) 1.0) 0.5))))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 1.1) {
tmp = (fma((x_m * x_m), -0.0859375, 0.125) * x_m) * x_m;
} else {
tmp = (0.5 - (0.5 / x_m)) / (1.0 + sqrt((((1.0 / x_m) + 1.0) * 0.5)));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 1.1) tmp = Float64(Float64(fma(Float64(x_m * x_m), -0.0859375, 0.125) * x_m) * x_m); else tmp = Float64(Float64(0.5 - Float64(0.5 / x_m)) / Float64(1.0 + sqrt(Float64(Float64(Float64(1.0 / x_m) + 1.0) * 0.5)))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 1.1], N[(N[(N[(N[(x$95$m * x$95$m), $MachinePrecision] * -0.0859375 + 0.125), $MachinePrecision] * x$95$m), $MachinePrecision] * x$95$m), $MachinePrecision], N[(N[(0.5 - N[(0.5 / x$95$m), $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[Sqrt[N[(N[(N[(1.0 / x$95$m), $MachinePrecision] + 1.0), $MachinePrecision] * 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 1.1:\\
\;\;\;\;\left(\mathsf{fma}\left(x\_m \cdot x\_m, -0.0859375, 0.125\right) \cdot x\_m\right) \cdot x\_m\\
\mathbf{else}:\\
\;\;\;\;\frac{0.5 - \frac{0.5}{x\_m}}{1 + \sqrt{\left(\frac{1}{x\_m} + 1\right) \cdot 0.5}}\\
\end{array}
\end{array}
if x < 1.1000000000000001Initial program 75.8%
lift--.f64N/A
lift-sqrt.f64N/A
lift-*.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-hypot.f64N/A
metadata-evalN/A
flip--N/A
lower-/.f64N/A
Applied rewrites76.6%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f64N/A
pow2N/A
lift-*.f6450.2
Applied rewrites50.2%
lift-*.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
lift-*.f64N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
pow2N/A
*-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f6450.2
Applied rewrites50.2%
if 1.1000000000000001 < x Initial program 75.8%
lift--.f64N/A
lift-sqrt.f64N/A
lift-*.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-hypot.f64N/A
metadata-evalN/A
flip--N/A
lower-/.f64N/A
Applied rewrites76.6%
Taylor expanded in x around inf
lower--.f64N/A
mult-flip-revN/A
lower-/.f6450.5
Applied rewrites50.5%
Taylor expanded in x around inf
lower-/.f6450.7
Applied rewrites50.7%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(let* ((t_0 (- (/ 0.5 x_m) -0.5)))
(if (<= x_m 1.1)
(* (* (fma (* x_m x_m) -0.0859375 0.125) x_m) x_m)
(/ (- 1.0 t_0) (+ (sqrt t_0) 1.0)))))x_m = fabs(x);
double code(double x_m) {
double t_0 = (0.5 / x_m) - -0.5;
double tmp;
if (x_m <= 1.1) {
tmp = (fma((x_m * x_m), -0.0859375, 0.125) * x_m) * x_m;
} else {
tmp = (1.0 - t_0) / (sqrt(t_0) + 1.0);
}
return tmp;
}
x_m = abs(x) function code(x_m) t_0 = Float64(Float64(0.5 / x_m) - -0.5) tmp = 0.0 if (x_m <= 1.1) tmp = Float64(Float64(fma(Float64(x_m * x_m), -0.0859375, 0.125) * x_m) * x_m); else tmp = Float64(Float64(1.0 - t_0) / Float64(sqrt(t_0) + 1.0)); end return tmp end
x_m = N[Abs[x], $MachinePrecision]
code[x$95$m_] := Block[{t$95$0 = N[(N[(0.5 / x$95$m), $MachinePrecision] - -0.5), $MachinePrecision]}, If[LessEqual[x$95$m, 1.1], N[(N[(N[(N[(x$95$m * x$95$m), $MachinePrecision] * -0.0859375 + 0.125), $MachinePrecision] * x$95$m), $MachinePrecision] * x$95$m), $MachinePrecision], N[(N[(1.0 - t$95$0), $MachinePrecision] / N[(N[Sqrt[t$95$0], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
t_0 := \frac{0.5}{x\_m} - -0.5\\
\mathbf{if}\;x\_m \leq 1.1:\\
\;\;\;\;\left(\mathsf{fma}\left(x\_m \cdot x\_m, -0.0859375, 0.125\right) \cdot x\_m\right) \cdot x\_m\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - t\_0}{\sqrt{t\_0} + 1}\\
\end{array}
\end{array}
if x < 1.1000000000000001Initial program 75.8%
lift--.f64N/A
lift-sqrt.f64N/A
lift-*.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-hypot.f64N/A
metadata-evalN/A
flip--N/A
lower-/.f64N/A
Applied rewrites76.6%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f64N/A
pow2N/A
lift-*.f6450.2
Applied rewrites50.2%
lift-*.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
lift-*.f64N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
pow2N/A
*-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f6450.2
Applied rewrites50.2%
if 1.1000000000000001 < x Initial program 75.8%
lift--.f64N/A
lift-sqrt.f64N/A
lift-*.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-hypot.f64N/A
metadata-evalN/A
flip--N/A
lower-/.f64N/A
Applied rewrites76.6%
Taylor expanded in x around inf
+-commutativeN/A
lower-+.f64N/A
mult-flip-revN/A
lower-/.f6450.5
Applied rewrites50.5%
Taylor expanded in x around inf
+-commutativeN/A
lower-+.f64N/A
mult-flip-revN/A
lower-/.f6450.7
Applied rewrites50.7%
lift-+.f64N/A
add-flipN/A
metadata-evalN/A
lower--.f6450.7
lower--.f64N/A
lower--.f64N/A
lower--.f64N/A
lower--.f64N/A
lower--.f64N/A
lower--.f64N/A
lower--.f64N/A
Applied rewrites50.7%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 0.0024) (* (fma -0.0859375 (* x_m x_m) 0.125) (* x_m x_m)) (- 1.0 (sqrt (* (- (/ 1.0 (sqrt (fma x_m x_m 1.0))) -1.0) 0.5)))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.0024) {
tmp = fma(-0.0859375, (x_m * x_m), 0.125) * (x_m * x_m);
} else {
tmp = 1.0 - sqrt((((1.0 / sqrt(fma(x_m, x_m, 1.0))) - -1.0) * 0.5));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.0024) tmp = Float64(fma(-0.0859375, Float64(x_m * x_m), 0.125) * Float64(x_m * x_m)); else tmp = Float64(1.0 - sqrt(Float64(Float64(Float64(1.0 / sqrt(fma(x_m, x_m, 1.0))) - -1.0) * 0.5))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.0024], N[(N[(-0.0859375 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.125), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision], N[(1.0 - N[Sqrt[N[(N[(N[(1.0 / N[Sqrt[N[(x$95$m * x$95$m + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - -1.0), $MachinePrecision] * 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.0024:\\
\;\;\;\;\mathsf{fma}\left(-0.0859375, x\_m \cdot x\_m, 0.125\right) \cdot \left(x\_m \cdot x\_m\right)\\
\mathbf{else}:\\
\;\;\;\;1 - \sqrt{\left(\frac{1}{\sqrt{\mathsf{fma}\left(x\_m, x\_m, 1\right)}} - -1\right) \cdot 0.5}\\
\end{array}
\end{array}
if x < 0.00239999999999999979Initial program 75.8%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
pow2N/A
lower-*.f64N/A
pow2N/A
lower-*.f6450.2
Applied rewrites50.2%
if 0.00239999999999999979 < x Initial program 75.8%
lift--.f64N/A
lift-sqrt.f64N/A
lift-*.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-hypot.f64N/A
metadata-evalN/A
flip--N/A
lower-/.f64N/A
Applied rewrites76.6%
Applied rewrites75.8%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 1.1) (* (* (fma (* x_m x_m) -0.0859375 0.125) x_m) x_m) (- 1.0 (sqrt (- (/ 0.5 x_m) -0.5)))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 1.1) {
tmp = (fma((x_m * x_m), -0.0859375, 0.125) * x_m) * x_m;
} else {
tmp = 1.0 - sqrt(((0.5 / x_m) - -0.5));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 1.1) tmp = Float64(Float64(fma(Float64(x_m * x_m), -0.0859375, 0.125) * x_m) * x_m); else tmp = Float64(1.0 - sqrt(Float64(Float64(0.5 / x_m) - -0.5))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 1.1], N[(N[(N[(N[(x$95$m * x$95$m), $MachinePrecision] * -0.0859375 + 0.125), $MachinePrecision] * x$95$m), $MachinePrecision] * x$95$m), $MachinePrecision], N[(1.0 - N[Sqrt[N[(N[(0.5 / x$95$m), $MachinePrecision] - -0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 1.1:\\
\;\;\;\;\left(\mathsf{fma}\left(x\_m \cdot x\_m, -0.0859375, 0.125\right) \cdot x\_m\right) \cdot x\_m\\
\mathbf{else}:\\
\;\;\;\;1 - \sqrt{\frac{0.5}{x\_m} - -0.5}\\
\end{array}
\end{array}
if x < 1.1000000000000001Initial program 75.8%
lift--.f64N/A
lift-sqrt.f64N/A
lift-*.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-hypot.f64N/A
metadata-evalN/A
flip--N/A
lower-/.f64N/A
Applied rewrites76.6%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f64N/A
pow2N/A
lift-*.f6450.2
Applied rewrites50.2%
lift-*.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
lift-*.f64N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
pow2N/A
*-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f6450.2
Applied rewrites50.2%
if 1.1000000000000001 < x Initial program 75.8%
Taylor expanded in x around inf
+-commutativeN/A
lower-+.f64N/A
mult-flip-revN/A
lower-/.f6450.0
Applied rewrites50.0%
metadata-eval50.0
metadata-eval50.0
lift-+.f64N/A
lift-/.f64N/A
mult-flip-revN/A
add-flipN/A
metadata-evalN/A
lower--.f64N/A
mult-flip-revN/A
lift-/.f6450.0
Applied rewrites50.0%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 1.1) (* (fma -0.0859375 (* x_m x_m) 0.125) (* x_m x_m)) (- 1.0 (sqrt (- (/ 0.5 x_m) -0.5)))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 1.1) {
tmp = fma(-0.0859375, (x_m * x_m), 0.125) * (x_m * x_m);
} else {
tmp = 1.0 - sqrt(((0.5 / x_m) - -0.5));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 1.1) tmp = Float64(fma(-0.0859375, Float64(x_m * x_m), 0.125) * Float64(x_m * x_m)); else tmp = Float64(1.0 - sqrt(Float64(Float64(0.5 / x_m) - -0.5))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 1.1], N[(N[(-0.0859375 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.125), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision], N[(1.0 - N[Sqrt[N[(N[(0.5 / x$95$m), $MachinePrecision] - -0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 1.1:\\
\;\;\;\;\mathsf{fma}\left(-0.0859375, x\_m \cdot x\_m, 0.125\right) \cdot \left(x\_m \cdot x\_m\right)\\
\mathbf{else}:\\
\;\;\;\;1 - \sqrt{\frac{0.5}{x\_m} - -0.5}\\
\end{array}
\end{array}
if x < 1.1000000000000001Initial program 75.8%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
pow2N/A
lower-*.f64N/A
pow2N/A
lower-*.f6450.2
Applied rewrites50.2%
if 1.1000000000000001 < x Initial program 75.8%
Taylor expanded in x around inf
+-commutativeN/A
lower-+.f64N/A
mult-flip-revN/A
lower-/.f6450.0
Applied rewrites50.0%
metadata-eval50.0
metadata-eval50.0
lift-+.f64N/A
lift-/.f64N/A
mult-flip-revN/A
add-flipN/A
metadata-evalN/A
lower--.f64N/A
mult-flip-revN/A
lift-/.f6450.0
Applied rewrites50.0%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 1.25) (* (* x_m x_m) 0.125) (- 1.0 (sqrt (- (/ 0.5 x_m) -0.5)))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 1.25) {
tmp = (x_m * x_m) * 0.125;
} else {
tmp = 1.0 - sqrt(((0.5 / x_m) - -0.5));
}
return tmp;
}
x_m = private
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x_m)
use fmin_fmax_functions
real(8), intent (in) :: x_m
real(8) :: tmp
if (x_m <= 1.25d0) then
tmp = (x_m * x_m) * 0.125d0
else
tmp = 1.0d0 - sqrt(((0.5d0 / x_m) - (-0.5d0)))
end if
code = tmp
end function
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 1.25) {
tmp = (x_m * x_m) * 0.125;
} else {
tmp = 1.0 - Math.sqrt(((0.5 / x_m) - -0.5));
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 1.25: tmp = (x_m * x_m) * 0.125 else: tmp = 1.0 - math.sqrt(((0.5 / x_m) - -0.5)) return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 1.25) tmp = Float64(Float64(x_m * x_m) * 0.125); else tmp = Float64(1.0 - sqrt(Float64(Float64(0.5 / x_m) - -0.5))); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 1.25) tmp = (x_m * x_m) * 0.125; else tmp = 1.0 - sqrt(((0.5 / x_m) - -0.5)); end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 1.25], N[(N[(x$95$m * x$95$m), $MachinePrecision] * 0.125), $MachinePrecision], N[(1.0 - N[Sqrt[N[(N[(0.5 / x$95$m), $MachinePrecision] - -0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 1.25:\\
\;\;\;\;\left(x\_m \cdot x\_m\right) \cdot 0.125\\
\mathbf{else}:\\
\;\;\;\;1 - \sqrt{\frac{0.5}{x\_m} - -0.5}\\
\end{array}
\end{array}
if x < 1.25Initial program 75.8%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
pow2N/A
lower-*.f6451.7
Applied rewrites51.7%
if 1.25 < x Initial program 75.8%
Taylor expanded in x around inf
+-commutativeN/A
lower-+.f64N/A
mult-flip-revN/A
lower-/.f6450.0
Applied rewrites50.0%
metadata-eval50.0
metadata-eval50.0
lift-+.f64N/A
lift-/.f64N/A
mult-flip-revN/A
add-flipN/A
metadata-evalN/A
lower--.f64N/A
mult-flip-revN/A
lift-/.f6450.0
Applied rewrites50.0%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 1.5) (* (* x_m x_m) 0.125) (- 1.0 (sqrt 0.5))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 1.5) {
tmp = (x_m * x_m) * 0.125;
} else {
tmp = 1.0 - sqrt(0.5);
}
return tmp;
}
x_m = private
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x_m)
use fmin_fmax_functions
real(8), intent (in) :: x_m
real(8) :: tmp
if (x_m <= 1.5d0) then
tmp = (x_m * x_m) * 0.125d0
else
tmp = 1.0d0 - sqrt(0.5d0)
end if
code = tmp
end function
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 1.5) {
tmp = (x_m * x_m) * 0.125;
} else {
tmp = 1.0 - Math.sqrt(0.5);
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 1.5: tmp = (x_m * x_m) * 0.125 else: tmp = 1.0 - math.sqrt(0.5) return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 1.5) tmp = Float64(Float64(x_m * x_m) * 0.125); else tmp = Float64(1.0 - sqrt(0.5)); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 1.5) tmp = (x_m * x_m) * 0.125; else tmp = 1.0 - sqrt(0.5); end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 1.5], N[(N[(x$95$m * x$95$m), $MachinePrecision] * 0.125), $MachinePrecision], N[(1.0 - N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 1.5:\\
\;\;\;\;\left(x\_m \cdot x\_m\right) \cdot 0.125\\
\mathbf{else}:\\
\;\;\;\;1 - \sqrt{0.5}\\
\end{array}
\end{array}
if x < 1.5Initial program 75.8%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
pow2N/A
lower-*.f6451.7
Applied rewrites51.7%
if 1.5 < x Initial program 75.8%
Taylor expanded in x around inf
Applied rewrites50.3%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 2.2e-77) (- 1.0 1.0) (- 1.0 (sqrt 0.5))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 2.2e-77) {
tmp = 1.0 - 1.0;
} else {
tmp = 1.0 - sqrt(0.5);
}
return tmp;
}
x_m = private
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x_m)
use fmin_fmax_functions
real(8), intent (in) :: x_m
real(8) :: tmp
if (x_m <= 2.2d-77) then
tmp = 1.0d0 - 1.0d0
else
tmp = 1.0d0 - sqrt(0.5d0)
end if
code = tmp
end function
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 2.2e-77) {
tmp = 1.0 - 1.0;
} else {
tmp = 1.0 - Math.sqrt(0.5);
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 2.2e-77: tmp = 1.0 - 1.0 else: tmp = 1.0 - math.sqrt(0.5) return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 2.2e-77) tmp = Float64(1.0 - 1.0); else tmp = Float64(1.0 - sqrt(0.5)); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 2.2e-77) tmp = 1.0 - 1.0; else tmp = 1.0 - sqrt(0.5); end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 2.2e-77], N[(1.0 - 1.0), $MachinePrecision], N[(1.0 - N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 2.2 \cdot 10^{-77}:\\
\;\;\;\;1 - 1\\
\mathbf{else}:\\
\;\;\;\;1 - \sqrt{0.5}\\
\end{array}
\end{array}
if x < 2.20000000000000007e-77Initial program 75.8%
Taylor expanded in x around 0
Applied rewrites27.5%
if 2.20000000000000007e-77 < x Initial program 75.8%
Taylor expanded in x around inf
Applied rewrites50.3%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (- 1.0 1.0))
x_m = fabs(x);
double code(double x_m) {
return 1.0 - 1.0;
}
x_m = private
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x_m)
use fmin_fmax_functions
real(8), intent (in) :: x_m
code = 1.0d0 - 1.0d0
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return 1.0 - 1.0;
}
x_m = math.fabs(x) def code(x_m): return 1.0 - 1.0
x_m = abs(x) function code(x_m) return Float64(1.0 - 1.0) end
x_m = abs(x); function tmp = code(x_m) tmp = 1.0 - 1.0; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(1.0 - 1.0), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
1 - 1
\end{array}
Initial program 75.8%
Taylor expanded in x around 0
Applied rewrites27.5%
herbie shell --seed 2025140
(FPCore (x)
:name "Given's Rotation SVD example, simplified"
:precision binary64
(- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))