
(FPCore (x) :precision binary64 (- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))
double code(double x) {
return 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x)))));
}
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 * (1.0 + (1.0 / Math.hypot(1.0, x)))));
}
def code(x): return 1.0 - math.sqrt((0.5 * (1.0 + (1.0 / math.hypot(1.0, x)))))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 * Float64(1.0 + Float64(1.0 / hypot(1.0, x)))))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x))))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 * N[(1.0 + N[(1.0 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
1 - \sqrt{0.5 \cdot \left(1 + \frac{1}{\mathsf{hypot}\left(1, x\right)}\right)}
Herbie found 15 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))
double code(double x) {
return 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x)))));
}
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 * (1.0 + (1.0 / Math.hypot(1.0, x)))));
}
def code(x): return 1.0 - math.sqrt((0.5 * (1.0 + (1.0 / math.hypot(1.0, x)))))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 * Float64(1.0 + Float64(1.0 / hypot(1.0, x)))))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x))))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 * N[(1.0 + N[(1.0 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
1 - \sqrt{0.5 \cdot \left(1 + \frac{1}{\mathsf{hypot}\left(1, x\right)}\right)}
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 0.5 (sqrt (fma (fabs x) (fabs x) 1.0))))
(t_1 (- 1.0 t_0))
(t_2 (pow (fabs x) 2.0))
(t_3 (* t_1 (- t_0 1.0))))
(if (<= (fabs x) 0.0152)
(*
t_2
(+
0.125
(*
t_2
(- (* t_2 (+ 0.0673828125 (* -0.056243896484375 t_2))) 0.0859375))))
(/
(* (/ (- t_3 (* t_1 -0.5)) t_3) t_1)
(+ 1.0 (sqrt (/ (- (* t_0 t_0) (* 0.5 0.5)) (- t_0 0.5))))))))double code(double x) {
double t_0 = 0.5 / sqrt(fma(fabs(x), fabs(x), 1.0));
double t_1 = 1.0 - t_0;
double t_2 = pow(fabs(x), 2.0);
double t_3 = t_1 * (t_0 - 1.0);
double tmp;
if (fabs(x) <= 0.0152) {
tmp = t_2 * (0.125 + (t_2 * ((t_2 * (0.0673828125 + (-0.056243896484375 * t_2))) - 0.0859375)));
} else {
tmp = (((t_3 - (t_1 * -0.5)) / t_3) * t_1) / (1.0 + sqrt((((t_0 * t_0) - (0.5 * 0.5)) / (t_0 - 0.5))));
}
return tmp;
}
function code(x) t_0 = Float64(0.5 / sqrt(fma(abs(x), abs(x), 1.0))) t_1 = Float64(1.0 - t_0) t_2 = abs(x) ^ 2.0 t_3 = Float64(t_1 * Float64(t_0 - 1.0)) tmp = 0.0 if (abs(x) <= 0.0152) tmp = Float64(t_2 * Float64(0.125 + Float64(t_2 * Float64(Float64(t_2 * Float64(0.0673828125 + Float64(-0.056243896484375 * t_2))) - 0.0859375)))); else tmp = Float64(Float64(Float64(Float64(t_3 - Float64(t_1 * -0.5)) / t_3) * t_1) / Float64(1.0 + sqrt(Float64(Float64(Float64(t_0 * t_0) - Float64(0.5 * 0.5)) / Float64(t_0 - 0.5))))); end return tmp end
code[x_] := Block[{t$95$0 = N[(0.5 / N[Sqrt[N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(1.0 - t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[Power[N[Abs[x], $MachinePrecision], 2.0], $MachinePrecision]}, Block[{t$95$3 = N[(t$95$1 * N[(t$95$0 - 1.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[Abs[x], $MachinePrecision], 0.0152], N[(t$95$2 * N[(0.125 + N[(t$95$2 * N[(N[(t$95$2 * N[(0.0673828125 + N[(-0.056243896484375 * t$95$2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 0.0859375), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(t$95$3 - N[(t$95$1 * -0.5), $MachinePrecision]), $MachinePrecision] / t$95$3), $MachinePrecision] * t$95$1), $MachinePrecision] / N[(1.0 + N[Sqrt[N[(N[(N[(t$95$0 * t$95$0), $MachinePrecision] - N[(0.5 * 0.5), $MachinePrecision]), $MachinePrecision] / N[(t$95$0 - 0.5), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]]]
\begin{array}{l}
t_0 := \frac{0.5}{\sqrt{\mathsf{fma}\left(\left|x\right|, \left|x\right|, 1\right)}}\\
t_1 := 1 - t\_0\\
t_2 := {\left(\left|x\right|\right)}^{2}\\
t_3 := t\_1 \cdot \left(t\_0 - 1\right)\\
\mathbf{if}\;\left|x\right| \leq 0.0152:\\
\;\;\;\;t\_2 \cdot \left(0.125 + t\_2 \cdot \left(t\_2 \cdot \left(0.0673828125 + -0.056243896484375 \cdot t\_2\right) - 0.0859375\right)\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{t\_3 - t\_1 \cdot -0.5}{t\_3} \cdot t\_1}{1 + \sqrt{\frac{t\_0 \cdot t\_0 - 0.5 \cdot 0.5}{t\_0 - 0.5}}}\\
\end{array}
if x < 0.0152Initial program 76.2%
Taylor expanded in x around 0
lower-*.f64N/A
lower-pow.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-pow.f6449.3%
Applied rewrites49.3%
if 0.0152 < x Initial program 76.2%
lift--.f64N/A
flip--N/A
lower-unsound-/.f64N/A
Applied rewrites77.0%
lift--.f64N/A
lift-*.f64N/A
metadata-evalN/A
lift--.f64N/A
associate--r-N/A
add-flipN/A
metadata-evalN/A
sub-to-multN/A
lower-unsound-*.f64N/A
lower-unsound--.f64N/A
lower-unsound-/.f64N/A
lower--.f64N/A
lower--.f6477.0%
Applied rewrites77.0%
lift--.f64N/A
lift-/.f64N/A
sub-to-fractionN/A
div-subN/A
*-lft-identityN/A
frac-2negN/A
metadata-evalN/A
frac-subN/A
lower-/.f64N/A
Applied rewrites77.0%
lift--.f64N/A
sub-flipN/A
metadata-evalN/A
flip-+N/A
lower-unsound--.f32N/A
lower--.f32N/A
metadata-evalN/A
associate--l-N/A
lift--.f64N/A
sub-negate-revN/A
lift--.f64N/A
lift-/.f64N/A
lift-sqrt.f64N/A
lift-fma.f64N/A
lower-unsound-/.f64N/A
Applied rewrites51.6%
(FPCore (x)
:precision binary64
(let* ((t_0 (sqrt (fma (fabs x) (fabs x) 1.0)))
(t_1 (/ 0.5 t_0))
(t_2 (- 1.0 t_1))
(t_3 (pow (fabs x) 2.0))
(t_4 (* t_2 (- t_1 1.0))))
(if (<= (fabs x) 0.0152)
(*
t_3
(+
0.125
(*
t_3
(- (* t_3 (+ 0.0673828125 (* -0.056243896484375 t_3))) 0.0859375))))
(/
(* (/ (- t_4 (* t_2 -0.5)) t_4) t_2)
(fma (sqrt (- (/ 1.0 t_0) -1.0)) (sqrt 0.5) 1.0)))))double code(double x) {
double t_0 = sqrt(fma(fabs(x), fabs(x), 1.0));
double t_1 = 0.5 / t_0;
double t_2 = 1.0 - t_1;
double t_3 = pow(fabs(x), 2.0);
double t_4 = t_2 * (t_1 - 1.0);
double tmp;
if (fabs(x) <= 0.0152) {
tmp = t_3 * (0.125 + (t_3 * ((t_3 * (0.0673828125 + (-0.056243896484375 * t_3))) - 0.0859375)));
} else {
tmp = (((t_4 - (t_2 * -0.5)) / t_4) * t_2) / fma(sqrt(((1.0 / t_0) - -1.0)), sqrt(0.5), 1.0);
}
return tmp;
}
function code(x) t_0 = sqrt(fma(abs(x), abs(x), 1.0)) t_1 = Float64(0.5 / t_0) t_2 = Float64(1.0 - t_1) t_3 = abs(x) ^ 2.0 t_4 = Float64(t_2 * Float64(t_1 - 1.0)) tmp = 0.0 if (abs(x) <= 0.0152) tmp = Float64(t_3 * Float64(0.125 + Float64(t_3 * Float64(Float64(t_3 * Float64(0.0673828125 + Float64(-0.056243896484375 * t_3))) - 0.0859375)))); else tmp = Float64(Float64(Float64(Float64(t_4 - Float64(t_2 * -0.5)) / t_4) * t_2) / fma(sqrt(Float64(Float64(1.0 / t_0) - -1.0)), sqrt(0.5), 1.0)); end return tmp end
code[x_] := Block[{t$95$0 = N[Sqrt[N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]}, Block[{t$95$1 = N[(0.5 / t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(1.0 - t$95$1), $MachinePrecision]}, Block[{t$95$3 = N[Power[N[Abs[x], $MachinePrecision], 2.0], $MachinePrecision]}, Block[{t$95$4 = N[(t$95$2 * N[(t$95$1 - 1.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[Abs[x], $MachinePrecision], 0.0152], N[(t$95$3 * N[(0.125 + N[(t$95$3 * N[(N[(t$95$3 * N[(0.0673828125 + N[(-0.056243896484375 * t$95$3), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 0.0859375), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(t$95$4 - N[(t$95$2 * -0.5), $MachinePrecision]), $MachinePrecision] / t$95$4), $MachinePrecision] * t$95$2), $MachinePrecision] / N[(N[Sqrt[N[(N[(1.0 / t$95$0), $MachinePrecision] - -1.0), $MachinePrecision]], $MachinePrecision] * N[Sqrt[0.5], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]]]]]]
\begin{array}{l}
t_0 := \sqrt{\mathsf{fma}\left(\left|x\right|, \left|x\right|, 1\right)}\\
t_1 := \frac{0.5}{t\_0}\\
t_2 := 1 - t\_1\\
t_3 := {\left(\left|x\right|\right)}^{2}\\
t_4 := t\_2 \cdot \left(t\_1 - 1\right)\\
\mathbf{if}\;\left|x\right| \leq 0.0152:\\
\;\;\;\;t\_3 \cdot \left(0.125 + t\_3 \cdot \left(t\_3 \cdot \left(0.0673828125 + -0.056243896484375 \cdot t\_3\right) - 0.0859375\right)\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{t\_4 - t\_2 \cdot -0.5}{t\_4} \cdot t\_2}{\mathsf{fma}\left(\sqrt{\frac{1}{t\_0} - -1}, \sqrt{0.5}, 1\right)}\\
\end{array}
if x < 0.0152Initial program 76.2%
Taylor expanded in x around 0
lower-*.f64N/A
lower-pow.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-pow.f6449.3%
Applied rewrites49.3%
if 0.0152 < x Initial program 76.2%
lift--.f64N/A
flip--N/A
lower-unsound-/.f64N/A
Applied rewrites77.0%
lift--.f64N/A
lift-*.f64N/A
metadata-evalN/A
lift--.f64N/A
associate--r-N/A
add-flipN/A
metadata-evalN/A
sub-to-multN/A
lower-unsound-*.f64N/A
lower-unsound--.f64N/A
lower-unsound-/.f64N/A
lower--.f64N/A
lower--.f6477.0%
Applied rewrites77.0%
lift--.f64N/A
lift-/.f64N/A
sub-to-fractionN/A
div-subN/A
*-lft-identityN/A
frac-2negN/A
metadata-evalN/A
frac-subN/A
lower-/.f64N/A
Applied rewrites77.0%
lift-+.f64N/A
+-commutativeN/A
lower-+.f6477.0%
lower-unsound-+.f64N/A
Applied rewrites77.0%
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 0.5 (sqrt (fma (fabs x) (fabs x) 1.0))))
(t_1 (- 1.0 t_0))
(t_2 (pow (fabs x) 2.0))
(t_3 (- t_0 1.0)))
(if (<= (fabs x) 0.0152)
(*
t_2
(+
0.125
(*
t_2
(- (* t_2 (+ 0.0673828125 (* -0.056243896484375 t_2))) 0.0859375))))
(/
(* (/ (fma t_3 -0.5 (* t_3 t_1)) (* t_1 t_3)) t_1)
(+ 1.0 (sqrt (- t_0 -0.5)))))))double code(double x) {
double t_0 = 0.5 / sqrt(fma(fabs(x), fabs(x), 1.0));
double t_1 = 1.0 - t_0;
double t_2 = pow(fabs(x), 2.0);
double t_3 = t_0 - 1.0;
double tmp;
if (fabs(x) <= 0.0152) {
tmp = t_2 * (0.125 + (t_2 * ((t_2 * (0.0673828125 + (-0.056243896484375 * t_2))) - 0.0859375)));
} else {
tmp = ((fma(t_3, -0.5, (t_3 * t_1)) / (t_1 * t_3)) * t_1) / (1.0 + sqrt((t_0 - -0.5)));
}
return tmp;
}
function code(x) t_0 = Float64(0.5 / sqrt(fma(abs(x), abs(x), 1.0))) t_1 = Float64(1.0 - t_0) t_2 = abs(x) ^ 2.0 t_3 = Float64(t_0 - 1.0) tmp = 0.0 if (abs(x) <= 0.0152) tmp = Float64(t_2 * Float64(0.125 + Float64(t_2 * Float64(Float64(t_2 * Float64(0.0673828125 + Float64(-0.056243896484375 * t_2))) - 0.0859375)))); else tmp = Float64(Float64(Float64(fma(t_3, -0.5, Float64(t_3 * t_1)) / Float64(t_1 * t_3)) * t_1) / Float64(1.0 + sqrt(Float64(t_0 - -0.5)))); end return tmp end
code[x_] := Block[{t$95$0 = N[(0.5 / N[Sqrt[N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(1.0 - t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[Power[N[Abs[x], $MachinePrecision], 2.0], $MachinePrecision]}, Block[{t$95$3 = N[(t$95$0 - 1.0), $MachinePrecision]}, If[LessEqual[N[Abs[x], $MachinePrecision], 0.0152], N[(t$95$2 * N[(0.125 + N[(t$95$2 * N[(N[(t$95$2 * N[(0.0673828125 + N[(-0.056243896484375 * t$95$2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 0.0859375), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(t$95$3 * -0.5 + N[(t$95$3 * t$95$1), $MachinePrecision]), $MachinePrecision] / N[(t$95$1 * t$95$3), $MachinePrecision]), $MachinePrecision] * t$95$1), $MachinePrecision] / N[(1.0 + N[Sqrt[N[(t$95$0 - -0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]]]
\begin{array}{l}
t_0 := \frac{0.5}{\sqrt{\mathsf{fma}\left(\left|x\right|, \left|x\right|, 1\right)}}\\
t_1 := 1 - t\_0\\
t_2 := {\left(\left|x\right|\right)}^{2}\\
t_3 := t\_0 - 1\\
\mathbf{if}\;\left|x\right| \leq 0.0152:\\
\;\;\;\;t\_2 \cdot \left(0.125 + t\_2 \cdot \left(t\_2 \cdot \left(0.0673828125 + -0.056243896484375 \cdot t\_2\right) - 0.0859375\right)\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\mathsf{fma}\left(t\_3, -0.5, t\_3 \cdot t\_1\right)}{t\_1 \cdot t\_3} \cdot t\_1}{1 + \sqrt{t\_0 - -0.5}}\\
\end{array}
if x < 0.0152Initial program 76.2%
Taylor expanded in x around 0
lower-*.f64N/A
lower-pow.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-pow.f6449.3%
Applied rewrites49.3%
if 0.0152 < x Initial program 76.2%
lift--.f64N/A
flip--N/A
lower-unsound-/.f64N/A
Applied rewrites77.0%
lift--.f64N/A
lift-*.f64N/A
metadata-evalN/A
lift--.f64N/A
associate--r-N/A
add-flipN/A
metadata-evalN/A
sub-to-multN/A
lower-unsound-*.f64N/A
lower-unsound--.f64N/A
lower-unsound-/.f64N/A
lower--.f64N/A
lower--.f6477.0%
Applied rewrites77.0%
lift--.f64N/A
lift-/.f64N/A
sub-to-fractionN/A
div-subN/A
*-lft-identityN/A
frac-2negN/A
metadata-evalN/A
frac-subN/A
lower-/.f64N/A
Applied rewrites77.0%
lift--.f64N/A
sub-flipN/A
+-commutativeN/A
lift-*.f64N/A
distribute-lft-neg-outN/A
lift--.f64N/A
sub-negate-revN/A
lift--.f64N/A
lower-fma.f6477.0%
lift-*.f64N/A
*-commutativeN/A
Applied rewrites77.0%
(FPCore (x)
:precision binary64
(let* ((t_0 (pow (fabs x) 2.0))
(t_1 (/ 0.5 (sqrt (fma (fabs x) (fabs x) 1.0))))
(t_2 (- (sqrt (- t_1 -0.5)) -1.0)))
(if (<= (fabs x) 0.0152)
(*
t_0
(+
0.125
(*
t_0
(- (* t_0 (+ 0.0673828125 (* -0.056243896484375 t_0))) 0.0859375))))
(/ (fma (/ (- 1.0 t_1) t_2) t_2 -0.5) t_2))))double code(double x) {
double t_0 = pow(fabs(x), 2.0);
double t_1 = 0.5 / sqrt(fma(fabs(x), fabs(x), 1.0));
double t_2 = sqrt((t_1 - -0.5)) - -1.0;
double tmp;
if (fabs(x) <= 0.0152) {
tmp = t_0 * (0.125 + (t_0 * ((t_0 * (0.0673828125 + (-0.056243896484375 * t_0))) - 0.0859375)));
} else {
tmp = fma(((1.0 - t_1) / t_2), t_2, -0.5) / t_2;
}
return tmp;
}
function code(x) t_0 = abs(x) ^ 2.0 t_1 = Float64(0.5 / sqrt(fma(abs(x), abs(x), 1.0))) t_2 = Float64(sqrt(Float64(t_1 - -0.5)) - -1.0) tmp = 0.0 if (abs(x) <= 0.0152) tmp = Float64(t_0 * Float64(0.125 + Float64(t_0 * Float64(Float64(t_0 * Float64(0.0673828125 + Float64(-0.056243896484375 * t_0))) - 0.0859375)))); else tmp = Float64(fma(Float64(Float64(1.0 - t_1) / t_2), t_2, -0.5) / t_2); end return tmp end
code[x_] := Block[{t$95$0 = N[Power[N[Abs[x], $MachinePrecision], 2.0], $MachinePrecision]}, Block[{t$95$1 = N[(0.5 / N[Sqrt[N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(N[Sqrt[N[(t$95$1 - -0.5), $MachinePrecision]], $MachinePrecision] - -1.0), $MachinePrecision]}, If[LessEqual[N[Abs[x], $MachinePrecision], 0.0152], N[(t$95$0 * N[(0.125 + N[(t$95$0 * N[(N[(t$95$0 * N[(0.0673828125 + N[(-0.056243896484375 * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 0.0859375), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(1.0 - t$95$1), $MachinePrecision] / t$95$2), $MachinePrecision] * t$95$2 + -0.5), $MachinePrecision] / t$95$2), $MachinePrecision]]]]]
\begin{array}{l}
t_0 := {\left(\left|x\right|\right)}^{2}\\
t_1 := \frac{0.5}{\sqrt{\mathsf{fma}\left(\left|x\right|, \left|x\right|, 1\right)}}\\
t_2 := \sqrt{t\_1 - -0.5} - -1\\
\mathbf{if}\;\left|x\right| \leq 0.0152:\\
\;\;\;\;t\_0 \cdot \left(0.125 + t\_0 \cdot \left(t\_0 \cdot \left(0.0673828125 + -0.056243896484375 \cdot t\_0\right) - 0.0859375\right)\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(\frac{1 - t\_1}{t\_2}, t\_2, -0.5\right)}{t\_2}\\
\end{array}
if x < 0.0152Initial program 76.2%
Taylor expanded in x around 0
lower-*.f64N/A
lower-pow.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-pow.f6449.3%
Applied rewrites49.3%
if 0.0152 < x Initial program 76.2%
lift--.f64N/A
flip--N/A
lower-unsound-/.f64N/A
Applied rewrites77.0%
lift-/.f64N/A
lift--.f64N/A
lift-*.f64N/A
metadata-evalN/A
lift--.f64N/A
associate--r-N/A
div-addN/A
add-to-fractionN/A
lower-/.f64N/A
Applied rewrites77.0%
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 0.5 (sqrt (fma (fabs x) (fabs x) 1.0))))
(t_1 (- (sqrt (- t_0 -0.5)) -1.0)))
(if (<= (fabs x) 5e-12)
(* (* 0.125 (fabs x)) (fabs x))
(/ (fma (/ (- 1.0 t_0) t_1) t_1 -0.5) t_1))))double code(double x) {
double t_0 = 0.5 / sqrt(fma(fabs(x), fabs(x), 1.0));
double t_1 = sqrt((t_0 - -0.5)) - -1.0;
double tmp;
if (fabs(x) <= 5e-12) {
tmp = (0.125 * fabs(x)) * fabs(x);
} else {
tmp = fma(((1.0 - t_0) / t_1), t_1, -0.5) / t_1;
}
return tmp;
}
function code(x) t_0 = Float64(0.5 / sqrt(fma(abs(x), abs(x), 1.0))) t_1 = Float64(sqrt(Float64(t_0 - -0.5)) - -1.0) tmp = 0.0 if (abs(x) <= 5e-12) tmp = Float64(Float64(0.125 * abs(x)) * abs(x)); else tmp = Float64(fma(Float64(Float64(1.0 - t_0) / t_1), t_1, -0.5) / t_1); end return tmp end
code[x_] := Block[{t$95$0 = N[(0.5 / N[Sqrt[N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[Sqrt[N[(t$95$0 - -0.5), $MachinePrecision]], $MachinePrecision] - -1.0), $MachinePrecision]}, If[LessEqual[N[Abs[x], $MachinePrecision], 5e-12], N[(N[(0.125 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(1.0 - t$95$0), $MachinePrecision] / t$95$1), $MachinePrecision] * t$95$1 + -0.5), $MachinePrecision] / t$95$1), $MachinePrecision]]]]
\begin{array}{l}
t_0 := \frac{0.5}{\sqrt{\mathsf{fma}\left(\left|x\right|, \left|x\right|, 1\right)}}\\
t_1 := \sqrt{t\_0 - -0.5} - -1\\
\mathbf{if}\;\left|x\right| \leq 5 \cdot 10^{-12}:\\
\;\;\;\;\left(0.125 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(\frac{1 - t\_0}{t\_1}, t\_1, -0.5\right)}{t\_1}\\
\end{array}
if x < 4.9999999999999997e-12Initial program 76.2%
Taylor expanded in x around 0
lower-*.f64N/A
lower-pow.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower-pow.f6450.7%
Applied rewrites50.7%
Taylor expanded in x around 0
Applied rewrites50.8%
lift-*.f64N/A
*-commutativeN/A
lift-pow.f64N/A
pow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f6450.8%
Applied rewrites50.8%
if 4.9999999999999997e-12 < x Initial program 76.2%
lift--.f64N/A
flip--N/A
lower-unsound-/.f64N/A
Applied rewrites77.0%
lift-/.f64N/A
lift--.f64N/A
lift-*.f64N/A
metadata-evalN/A
lift--.f64N/A
associate--r-N/A
div-addN/A
add-to-fractionN/A
lower-/.f64N/A
Applied rewrites77.0%
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 0.5 (sqrt (fma (fabs x) (fabs x) 1.0))))
(t_1 (- 1.0 t_0))
(t_2 (* (fabs x) (fabs x))))
(if (<= (fabs x) 0.0215)
(*
(* (fma (fma 0.0673828125 t_2 -0.0859375) t_2 0.125) (fabs x))
(fabs x))
(/ (* (- 1.0 (/ 0.5 t_1)) t_1) (- (sqrt (- t_0 -0.5)) -1.0)))))double code(double x) {
double t_0 = 0.5 / sqrt(fma(fabs(x), fabs(x), 1.0));
double t_1 = 1.0 - t_0;
double t_2 = fabs(x) * fabs(x);
double tmp;
if (fabs(x) <= 0.0215) {
tmp = (fma(fma(0.0673828125, t_2, -0.0859375), t_2, 0.125) * fabs(x)) * fabs(x);
} else {
tmp = ((1.0 - (0.5 / t_1)) * t_1) / (sqrt((t_0 - -0.5)) - -1.0);
}
return tmp;
}
function code(x) t_0 = Float64(0.5 / sqrt(fma(abs(x), abs(x), 1.0))) t_1 = Float64(1.0 - t_0) t_2 = Float64(abs(x) * abs(x)) tmp = 0.0 if (abs(x) <= 0.0215) tmp = Float64(Float64(fma(fma(0.0673828125, t_2, -0.0859375), t_2, 0.125) * abs(x)) * abs(x)); else tmp = Float64(Float64(Float64(1.0 - Float64(0.5 / t_1)) * t_1) / Float64(sqrt(Float64(t_0 - -0.5)) - -1.0)); end return tmp end
code[x_] := Block[{t$95$0 = N[(0.5 / N[Sqrt[N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(1.0 - t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[Abs[x], $MachinePrecision], 0.0215], N[(N[(N[(N[(0.0673828125 * t$95$2 + -0.0859375), $MachinePrecision] * t$95$2 + 0.125), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 - N[(0.5 / t$95$1), $MachinePrecision]), $MachinePrecision] * t$95$1), $MachinePrecision] / N[(N[Sqrt[N[(t$95$0 - -0.5), $MachinePrecision]], $MachinePrecision] - -1.0), $MachinePrecision]), $MachinePrecision]]]]]
\begin{array}{l}
t_0 := \frac{0.5}{\sqrt{\mathsf{fma}\left(\left|x\right|, \left|x\right|, 1\right)}}\\
t_1 := 1 - t\_0\\
t_2 := \left|x\right| \cdot \left|x\right|\\
\mathbf{if}\;\left|x\right| \leq 0.0215:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0673828125, t\_2, -0.0859375\right), t\_2, 0.125\right) \cdot \left|x\right|\right) \cdot \left|x\right|\\
\mathbf{else}:\\
\;\;\;\;\frac{\left(1 - \frac{0.5}{t\_1}\right) \cdot t\_1}{\sqrt{t\_0 - -0.5} - -1}\\
\end{array}
if x < 0.021499999999999998Initial program 76.2%
Taylor expanded in x around 0
lower-*.f64N/A
lower-pow.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower-pow.f6450.7%
Applied rewrites50.7%
lift-*.f64N/A
*-commutativeN/A
lift-pow.f64N/A
pow2N/A
associate-*r*N/A
lower-*.f64N/A
Applied rewrites50.7%
if 0.021499999999999998 < x Initial program 76.2%
lift--.f64N/A
flip--N/A
lower-unsound-/.f64N/A
Applied rewrites77.0%
lift-/.f64N/A
lift--.f64N/A
lift-*.f64N/A
metadata-evalN/A
lift--.f64N/A
associate--r-N/A
div-addN/A
add-to-fractionN/A
lower-/.f64N/A
Applied rewrites77.0%
lift-fma.f64N/A
add-flipN/A
Applied rewrites77.0%
(FPCore (x)
:precision binary64
(let* ((t_0 (sqrt (fma (fabs x) (fabs x) 1.0))) (t_1 (* (fabs x) (fabs x))))
(if (<= (fabs x) 0.0215)
(*
(* (fma (fma 0.0673828125 t_1 -0.0859375) t_1 0.125) (fabs x))
(fabs x))
(/
(- (/ 0.5 t_0) 0.5)
(- -1.0 (* (sqrt (- (/ 1.0 t_0) -1.0)) (sqrt 0.5)))))))double code(double x) {
double t_0 = sqrt(fma(fabs(x), fabs(x), 1.0));
double t_1 = fabs(x) * fabs(x);
double tmp;
if (fabs(x) <= 0.0215) {
tmp = (fma(fma(0.0673828125, t_1, -0.0859375), t_1, 0.125) * fabs(x)) * fabs(x);
} else {
tmp = ((0.5 / t_0) - 0.5) / (-1.0 - (sqrt(((1.0 / t_0) - -1.0)) * sqrt(0.5)));
}
return tmp;
}
function code(x) t_0 = sqrt(fma(abs(x), abs(x), 1.0)) t_1 = Float64(abs(x) * abs(x)) tmp = 0.0 if (abs(x) <= 0.0215) tmp = Float64(Float64(fma(fma(0.0673828125, t_1, -0.0859375), t_1, 0.125) * abs(x)) * abs(x)); else tmp = Float64(Float64(Float64(0.5 / t_0) - 0.5) / Float64(-1.0 - Float64(sqrt(Float64(Float64(1.0 / t_0) - -1.0)) * sqrt(0.5)))); end return tmp end
code[x_] := Block[{t$95$0 = N[Sqrt[N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]}, Block[{t$95$1 = N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[Abs[x], $MachinePrecision], 0.0215], N[(N[(N[(N[(0.0673828125 * t$95$1 + -0.0859375), $MachinePrecision] * t$95$1 + 0.125), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision], N[(N[(N[(0.5 / t$95$0), $MachinePrecision] - 0.5), $MachinePrecision] / N[(-1.0 - N[(N[Sqrt[N[(N[(1.0 / t$95$0), $MachinePrecision] - -1.0), $MachinePrecision]], $MachinePrecision] * N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
t_0 := \sqrt{\mathsf{fma}\left(\left|x\right|, \left|x\right|, 1\right)}\\
t_1 := \left|x\right| \cdot \left|x\right|\\
\mathbf{if}\;\left|x\right| \leq 0.0215:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0673828125, t\_1, -0.0859375\right), t\_1, 0.125\right) \cdot \left|x\right|\right) \cdot \left|x\right|\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{0.5}{t\_0} - 0.5}{-1 - \sqrt{\frac{1}{t\_0} - -1} \cdot \sqrt{0.5}}\\
\end{array}
if x < 0.021499999999999998Initial program 76.2%
Taylor expanded in x around 0
lower-*.f64N/A
lower-pow.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower-pow.f6450.7%
Applied rewrites50.7%
lift-*.f64N/A
*-commutativeN/A
lift-pow.f64N/A
pow2N/A
associate-*r*N/A
lower-*.f64N/A
Applied rewrites50.7%
if 0.021499999999999998 < x Initial program 76.2%
lift--.f64N/A
flip--N/A
lower-unsound-/.f64N/A
Applied rewrites77.0%
lift-/.f64N/A
frac-2negN/A
lower-/.f64N/A
lift--.f64N/A
lift-*.f64N/A
metadata-evalN/A
sub-negate-revN/A
lift--.f64N/A
associate--l-N/A
metadata-evalN/A
lower--.f64N/A
lift-+.f64N/A
distribute-neg-inN/A
metadata-evalN/A
sub-flip-reverseN/A
Applied rewrites77.0%
lift-sqrt.f64N/A
lift--.f64N/A
sub-flipN/A
lift-/.f64N/A
mult-flipN/A
lift-sqrt.f64N/A
lift-fma.f64N/A
pow2N/A
lift-pow.f64N/A
+-commutativeN/A
lift-pow.f64N/A
pow2N/A
metadata-evalN/A
metadata-evalN/A
distribute-lft-outN/A
+-commutativeN/A
*-commutativeN/A
sqrt-prodN/A
Applied rewrites77.0%
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 0.5 (sqrt (fma (fabs x) (fabs x) 1.0))))
(t_1 (* (fabs x) (fabs x))))
(if (<= (fabs x) 0.0215)
(*
(* (fma (fma 0.0673828125 t_1 -0.0859375) t_1 0.125) (fabs x))
(fabs x))
(/ (- t_0 0.5) (- -1.0 (sqrt (- t_0 -0.5)))))))double code(double x) {
double t_0 = 0.5 / sqrt(fma(fabs(x), fabs(x), 1.0));
double t_1 = fabs(x) * fabs(x);
double tmp;
if (fabs(x) <= 0.0215) {
tmp = (fma(fma(0.0673828125, t_1, -0.0859375), t_1, 0.125) * fabs(x)) * fabs(x);
} else {
tmp = (t_0 - 0.5) / (-1.0 - sqrt((t_0 - -0.5)));
}
return tmp;
}
function code(x) t_0 = Float64(0.5 / sqrt(fma(abs(x), abs(x), 1.0))) t_1 = Float64(abs(x) * abs(x)) tmp = 0.0 if (abs(x) <= 0.0215) tmp = Float64(Float64(fma(fma(0.0673828125, t_1, -0.0859375), t_1, 0.125) * abs(x)) * abs(x)); else tmp = Float64(Float64(t_0 - 0.5) / Float64(-1.0 - sqrt(Float64(t_0 - -0.5)))); end return tmp end
code[x_] := Block[{t$95$0 = N[(0.5 / N[Sqrt[N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[Abs[x], $MachinePrecision], 0.0215], N[(N[(N[(N[(0.0673828125 * t$95$1 + -0.0859375), $MachinePrecision] * t$95$1 + 0.125), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision], N[(N[(t$95$0 - 0.5), $MachinePrecision] / N[(-1.0 - N[Sqrt[N[(t$95$0 - -0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
t_0 := \frac{0.5}{\sqrt{\mathsf{fma}\left(\left|x\right|, \left|x\right|, 1\right)}}\\
t_1 := \left|x\right| \cdot \left|x\right|\\
\mathbf{if}\;\left|x\right| \leq 0.0215:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0673828125, t\_1, -0.0859375\right), t\_1, 0.125\right) \cdot \left|x\right|\right) \cdot \left|x\right|\\
\mathbf{else}:\\
\;\;\;\;\frac{t\_0 - 0.5}{-1 - \sqrt{t\_0 - -0.5}}\\
\end{array}
if x < 0.021499999999999998Initial program 76.2%
Taylor expanded in x around 0
lower-*.f64N/A
lower-pow.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower-pow.f6450.7%
Applied rewrites50.7%
lift-*.f64N/A
*-commutativeN/A
lift-pow.f64N/A
pow2N/A
associate-*r*N/A
lower-*.f64N/A
Applied rewrites50.7%
if 0.021499999999999998 < x Initial program 76.2%
lift--.f64N/A
flip--N/A
lower-unsound-/.f64N/A
Applied rewrites77.0%
lift-/.f64N/A
frac-2negN/A
lower-/.f64N/A
lift--.f64N/A
lift-*.f64N/A
metadata-evalN/A
sub-negate-revN/A
lift--.f64N/A
associate--l-N/A
metadata-evalN/A
lower--.f64N/A
lift-+.f64N/A
distribute-neg-inN/A
metadata-evalN/A
sub-flip-reverseN/A
Applied rewrites77.0%
(FPCore (x)
:precision binary64
(let* ((t_0 (- (/ 0.5 (fabs x)) -0.5)) (t_1 (* (fabs x) (fabs x))))
(if (<= (fabs x) 1.6)
(*
(* (fma (fma 0.0673828125 t_1 -0.0859375) t_1 0.125) (fabs x))
(fabs x))
(/ (- (* 1.0 1.0) t_0) (+ 1.0 (sqrt t_0))))))double code(double x) {
double t_0 = (0.5 / fabs(x)) - -0.5;
double t_1 = fabs(x) * fabs(x);
double tmp;
if (fabs(x) <= 1.6) {
tmp = (fma(fma(0.0673828125, t_1, -0.0859375), t_1, 0.125) * fabs(x)) * fabs(x);
} else {
tmp = ((1.0 * 1.0) - t_0) / (1.0 + sqrt(t_0));
}
return tmp;
}
function code(x) t_0 = Float64(Float64(0.5 / abs(x)) - -0.5) t_1 = Float64(abs(x) * abs(x)) tmp = 0.0 if (abs(x) <= 1.6) tmp = Float64(Float64(fma(fma(0.0673828125, t_1, -0.0859375), t_1, 0.125) * abs(x)) * abs(x)); else tmp = Float64(Float64(Float64(1.0 * 1.0) - t_0) / Float64(1.0 + sqrt(t_0))); end return tmp end
code[x_] := Block[{t$95$0 = N[(N[(0.5 / N[Abs[x], $MachinePrecision]), $MachinePrecision] - -0.5), $MachinePrecision]}, Block[{t$95$1 = N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[Abs[x], $MachinePrecision], 1.6], N[(N[(N[(N[(0.0673828125 * t$95$1 + -0.0859375), $MachinePrecision] * t$95$1 + 0.125), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 * 1.0), $MachinePrecision] - t$95$0), $MachinePrecision] / N[(1.0 + N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
t_0 := \frac{0.5}{\left|x\right|} - -0.5\\
t_1 := \left|x\right| \cdot \left|x\right|\\
\mathbf{if}\;\left|x\right| \leq 1.6:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0673828125, t\_1, -0.0859375\right), t\_1, 0.125\right) \cdot \left|x\right|\right) \cdot \left|x\right|\\
\mathbf{else}:\\
\;\;\;\;\frac{1 \cdot 1 - t\_0}{1 + \sqrt{t\_0}}\\
\end{array}
if x < 1.6000000000000001Initial program 76.2%
Taylor expanded in x around 0
lower-*.f64N/A
lower-pow.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower-pow.f6450.7%
Applied rewrites50.7%
lift-*.f64N/A
*-commutativeN/A
lift-pow.f64N/A
pow2N/A
associate-*r*N/A
lower-*.f64N/A
Applied rewrites50.7%
if 1.6000000000000001 < x Initial program 76.2%
lift--.f64N/A
flip--N/A
lower-unsound-/.f64N/A
Applied rewrites77.0%
Taylor expanded in x around inf
lower-/.f6451.4%
Applied rewrites51.4%
Taylor expanded in x around inf
lower-/.f6450.7%
Applied rewrites50.7%
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 0.5 (fabs x))) (t_1 (* (fabs x) (fabs x))))
(if (<= (fabs x) 1.6)
(*
(* (fma (fma 0.0673828125 t_1 -0.0859375) t_1 0.125) (fabs x))
(fabs x))
(/ (- t_0 0.5) (- -1.0 (sqrt (- t_0 -0.5)))))))double code(double x) {
double t_0 = 0.5 / fabs(x);
double t_1 = fabs(x) * fabs(x);
double tmp;
if (fabs(x) <= 1.6) {
tmp = (fma(fma(0.0673828125, t_1, -0.0859375), t_1, 0.125) * fabs(x)) * fabs(x);
} else {
tmp = (t_0 - 0.5) / (-1.0 - sqrt((t_0 - -0.5)));
}
return tmp;
}
function code(x) t_0 = Float64(0.5 / abs(x)) t_1 = Float64(abs(x) * abs(x)) tmp = 0.0 if (abs(x) <= 1.6) tmp = Float64(Float64(fma(fma(0.0673828125, t_1, -0.0859375), t_1, 0.125) * abs(x)) * abs(x)); else tmp = Float64(Float64(t_0 - 0.5) / Float64(-1.0 - sqrt(Float64(t_0 - -0.5)))); end return tmp end
code[x_] := Block[{t$95$0 = N[(0.5 / N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[Abs[x], $MachinePrecision], 1.6], N[(N[(N[(N[(0.0673828125 * t$95$1 + -0.0859375), $MachinePrecision] * t$95$1 + 0.125), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision], N[(N[(t$95$0 - 0.5), $MachinePrecision] / N[(-1.0 - N[Sqrt[N[(t$95$0 - -0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
t_0 := \frac{0.5}{\left|x\right|}\\
t_1 := \left|x\right| \cdot \left|x\right|\\
\mathbf{if}\;\left|x\right| \leq 1.6:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0673828125, t\_1, -0.0859375\right), t\_1, 0.125\right) \cdot \left|x\right|\right) \cdot \left|x\right|\\
\mathbf{else}:\\
\;\;\;\;\frac{t\_0 - 0.5}{-1 - \sqrt{t\_0 - -0.5}}\\
\end{array}
if x < 1.6000000000000001Initial program 76.2%
Taylor expanded in x around 0
lower-*.f64N/A
lower-pow.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower-pow.f6450.7%
Applied rewrites50.7%
lift-*.f64N/A
*-commutativeN/A
lift-pow.f64N/A
pow2N/A
associate-*r*N/A
lower-*.f64N/A
Applied rewrites50.7%
if 1.6000000000000001 < x Initial program 76.2%
lift--.f64N/A
flip--N/A
lower-unsound-/.f64N/A
Applied rewrites77.0%
lift-/.f64N/A
frac-2negN/A
lower-/.f64N/A
lift--.f64N/A
lift-*.f64N/A
metadata-evalN/A
sub-negate-revN/A
lift--.f64N/A
associate--l-N/A
metadata-evalN/A
lower--.f64N/A
lift-+.f64N/A
distribute-neg-inN/A
metadata-evalN/A
sub-flip-reverseN/A
Applied rewrites77.0%
Taylor expanded in x around inf
lower-/.f6451.4%
Applied rewrites51.4%
Taylor expanded in x around inf
lower-/.f6450.7%
Applied rewrites50.7%
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 0.5 (fabs x))))
(if (<= (fabs x) 2.2)
(* (* 0.125 (fabs x)) (fabs x))
(/ (- t_0 0.5) (- -1.0 (sqrt (- t_0 -0.5)))))))double code(double x) {
double t_0 = 0.5 / fabs(x);
double tmp;
if (fabs(x) <= 2.2) {
tmp = (0.125 * fabs(x)) * fabs(x);
} else {
tmp = (t_0 - 0.5) / (-1.0 - sqrt((t_0 - -0.5)));
}
return tmp;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = 0.5d0 / abs(x)
if (abs(x) <= 2.2d0) then
tmp = (0.125d0 * abs(x)) * abs(x)
else
tmp = (t_0 - 0.5d0) / ((-1.0d0) - sqrt((t_0 - (-0.5d0))))
end if
code = tmp
end function
public static double code(double x) {
double t_0 = 0.5 / Math.abs(x);
double tmp;
if (Math.abs(x) <= 2.2) {
tmp = (0.125 * Math.abs(x)) * Math.abs(x);
} else {
tmp = (t_0 - 0.5) / (-1.0 - Math.sqrt((t_0 - -0.5)));
}
return tmp;
}
def code(x): t_0 = 0.5 / math.fabs(x) tmp = 0 if math.fabs(x) <= 2.2: tmp = (0.125 * math.fabs(x)) * math.fabs(x) else: tmp = (t_0 - 0.5) / (-1.0 - math.sqrt((t_0 - -0.5))) return tmp
function code(x) t_0 = Float64(0.5 / abs(x)) tmp = 0.0 if (abs(x) <= 2.2) tmp = Float64(Float64(0.125 * abs(x)) * abs(x)); else tmp = Float64(Float64(t_0 - 0.5) / Float64(-1.0 - sqrt(Float64(t_0 - -0.5)))); end return tmp end
function tmp_2 = code(x) t_0 = 0.5 / abs(x); tmp = 0.0; if (abs(x) <= 2.2) tmp = (0.125 * abs(x)) * abs(x); else tmp = (t_0 - 0.5) / (-1.0 - sqrt((t_0 - -0.5))); end tmp_2 = tmp; end
code[x_] := Block[{t$95$0 = N[(0.5 / N[Abs[x], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[Abs[x], $MachinePrecision], 2.2], N[(N[(0.125 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision], N[(N[(t$95$0 - 0.5), $MachinePrecision] / N[(-1.0 - N[Sqrt[N[(t$95$0 - -0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
t_0 := \frac{0.5}{\left|x\right|}\\
\mathbf{if}\;\left|x\right| \leq 2.2:\\
\;\;\;\;\left(0.125 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\mathbf{else}:\\
\;\;\;\;\frac{t\_0 - 0.5}{-1 - \sqrt{t\_0 - -0.5}}\\
\end{array}
if x < 2.2000000000000002Initial program 76.2%
Taylor expanded in x around 0
lower-*.f64N/A
lower-pow.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower-pow.f6450.7%
Applied rewrites50.7%
Taylor expanded in x around 0
Applied rewrites50.8%
lift-*.f64N/A
*-commutativeN/A
lift-pow.f64N/A
pow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f6450.8%
Applied rewrites50.8%
if 2.2000000000000002 < x Initial program 76.2%
lift--.f64N/A
flip--N/A
lower-unsound-/.f64N/A
Applied rewrites77.0%
lift-/.f64N/A
frac-2negN/A
lower-/.f64N/A
lift--.f64N/A
lift-*.f64N/A
metadata-evalN/A
sub-negate-revN/A
lift--.f64N/A
associate--l-N/A
metadata-evalN/A
lower--.f64N/A
lift-+.f64N/A
distribute-neg-inN/A
metadata-evalN/A
sub-flip-reverseN/A
Applied rewrites77.0%
Taylor expanded in x around inf
lower-/.f6451.4%
Applied rewrites51.4%
Taylor expanded in x around inf
lower-/.f6450.7%
Applied rewrites50.7%
(FPCore (x) :precision binary64 (if (<= (fabs x) 0.0105) (* (* 0.125 (fabs x)) (fabs x)) (- 1.0 (sqrt (- (/ 0.5 (sqrt (fma (fabs x) (fabs x) 1.0))) -0.5)))))
double code(double x) {
double tmp;
if (fabs(x) <= 0.0105) {
tmp = (0.125 * fabs(x)) * fabs(x);
} else {
tmp = 1.0 - sqrt(((0.5 / sqrt(fma(fabs(x), fabs(x), 1.0))) - -0.5));
}
return tmp;
}
function code(x) tmp = 0.0 if (abs(x) <= 0.0105) tmp = Float64(Float64(0.125 * abs(x)) * abs(x)); else tmp = Float64(1.0 - sqrt(Float64(Float64(0.5 / sqrt(fma(abs(x), abs(x), 1.0))) - -0.5))); end return tmp end
code[x_] := If[LessEqual[N[Abs[x], $MachinePrecision], 0.0105], N[(N[(0.125 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision], N[(1.0 - N[Sqrt[N[(N[(0.5 / N[Sqrt[N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - -0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\mathbf{if}\;\left|x\right| \leq 0.0105:\\
\;\;\;\;\left(0.125 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\mathbf{else}:\\
\;\;\;\;1 - \sqrt{\frac{0.5}{\sqrt{\mathsf{fma}\left(\left|x\right|, \left|x\right|, 1\right)}} - -0.5}\\
\end{array}
if x < 0.0105000000000000007Initial program 76.2%
Taylor expanded in x around 0
lower-*.f64N/A
lower-pow.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower-pow.f6450.7%
Applied rewrites50.7%
Taylor expanded in x around 0
Applied rewrites50.8%
lift-*.f64N/A
*-commutativeN/A
lift-pow.f64N/A
pow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f6450.8%
Applied rewrites50.8%
if 0.0105000000000000007 < x Initial program 76.2%
lift-*.f64N/A
lift-+.f64N/A
+-commutativeN/A
distribute-rgt-inN/A
metadata-evalN/A
add-flipN/A
lower--.f64N/A
lift-/.f64N/A
associate-*l/N/A
metadata-evalN/A
lower-/.f64N/A
lift-hypot.f64N/A
lower-sqrt.f64N/A
metadata-evalN/A
+-commutativeN/A
lower-fma.f64N/A
metadata-eval76.2%
Applied rewrites76.2%
(FPCore (x) :precision binary64 (if (<= (fabs x) 0.024) (* (* 0.125 (fabs x)) (fabs x)) 0.2928932188134525))
double code(double x) {
double tmp;
if (fabs(x) <= 0.024) {
tmp = (0.125 * fabs(x)) * fabs(x);
} else {
tmp = 0.2928932188134525;
}
return tmp;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
real(8) :: tmp
if (abs(x) <= 0.024d0) then
tmp = (0.125d0 * abs(x)) * abs(x)
else
tmp = 0.2928932188134525d0
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (Math.abs(x) <= 0.024) {
tmp = (0.125 * Math.abs(x)) * Math.abs(x);
} else {
tmp = 0.2928932188134525;
}
return tmp;
}
def code(x): tmp = 0 if math.fabs(x) <= 0.024: tmp = (0.125 * math.fabs(x)) * math.fabs(x) else: tmp = 0.2928932188134525 return tmp
function code(x) tmp = 0.0 if (abs(x) <= 0.024) tmp = Float64(Float64(0.125 * abs(x)) * abs(x)); else tmp = 0.2928932188134525; end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (abs(x) <= 0.024) tmp = (0.125 * abs(x)) * abs(x); else tmp = 0.2928932188134525; end tmp_2 = tmp; end
code[x_] := If[LessEqual[N[Abs[x], $MachinePrecision], 0.024], N[(N[(0.125 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision], 0.2928932188134525]
\begin{array}{l}
\mathbf{if}\;\left|x\right| \leq 0.024:\\
\;\;\;\;\left(0.125 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\mathbf{else}:\\
\;\;\;\;0.2928932188134525\\
\end{array}
if x < 0.024Initial program 76.2%
Taylor expanded in x around 0
lower-*.f64N/A
lower-pow.f64N/A
lower-+.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower-pow.f6450.7%
Applied rewrites50.7%
Taylor expanded in x around 0
Applied rewrites50.8%
lift-*.f64N/A
*-commutativeN/A
lift-pow.f64N/A
pow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f6450.8%
Applied rewrites50.8%
if 0.024 < x Initial program 76.2%
lift--.f64N/A
flip--N/A
lower-unsound-/.f64N/A
Applied rewrites77.0%
Taylor expanded in x around inf
lower-/.f64N/A
lower-+.f64N/A
lower-sqrt.f6451.9%
Applied rewrites51.9%
Evaluated real constant51.9%
(FPCore (x) :precision binary64 (if (<= (fabs x) 7.5e-80) (- 1.0 1.0) 0.2928932188134525))
double code(double x) {
double tmp;
if (fabs(x) <= 7.5e-80) {
tmp = 1.0 - 1.0;
} else {
tmp = 0.2928932188134525;
}
return tmp;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
real(8) :: tmp
if (abs(x) <= 7.5d-80) then
tmp = 1.0d0 - 1.0d0
else
tmp = 0.2928932188134525d0
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (Math.abs(x) <= 7.5e-80) {
tmp = 1.0 - 1.0;
} else {
tmp = 0.2928932188134525;
}
return tmp;
}
def code(x): tmp = 0 if math.fabs(x) <= 7.5e-80: tmp = 1.0 - 1.0 else: tmp = 0.2928932188134525 return tmp
function code(x) tmp = 0.0 if (abs(x) <= 7.5e-80) tmp = Float64(1.0 - 1.0); else tmp = 0.2928932188134525; end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (abs(x) <= 7.5e-80) tmp = 1.0 - 1.0; else tmp = 0.2928932188134525; end tmp_2 = tmp; end
code[x_] := If[LessEqual[N[Abs[x], $MachinePrecision], 7.5e-80], N[(1.0 - 1.0), $MachinePrecision], 0.2928932188134525]
\begin{array}{l}
\mathbf{if}\;\left|x\right| \leq 7.5 \cdot 10^{-80}:\\
\;\;\;\;1 - 1\\
\mathbf{else}:\\
\;\;\;\;0.2928932188134525\\
\end{array}
if x < 7.49999999999999999e-80Initial program 76.2%
Taylor expanded in x around 0
Applied rewrites27.0%
if 7.49999999999999999e-80 < x Initial program 76.2%
lift--.f64N/A
flip--N/A
lower-unsound-/.f64N/A
Applied rewrites77.0%
Taylor expanded in x around inf
lower-/.f64N/A
lower-+.f64N/A
lower-sqrt.f6451.9%
Applied rewrites51.9%
Evaluated real constant51.9%
(FPCore (x) :precision binary64 0.2928932188134525)
double code(double x) {
return 0.2928932188134525;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = 0.2928932188134525d0
end function
public static double code(double x) {
return 0.2928932188134525;
}
def code(x): return 0.2928932188134525
function code(x) return 0.2928932188134525 end
function tmp = code(x) tmp = 0.2928932188134525; end
code[x_] := 0.2928932188134525
0.2928932188134525
Initial program 76.2%
lift--.f64N/A
flip--N/A
lower-unsound-/.f64N/A
Applied rewrites77.0%
Taylor expanded in x around inf
lower-/.f64N/A
lower-+.f64N/A
lower-sqrt.f6451.9%
Applied rewrites51.9%
Evaluated real constant51.9%
herbie shell --seed 2025182
(FPCore (x)
:name "Given's Rotation SVD example, simplified"
:precision binary64
(- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))