
(FPCore (v w r) :precision binary64 (- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))
double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((3.0d0 + (2.0d0 / (r * r))) - (((0.125d0 * (3.0d0 - (2.0d0 * v))) * (((w * w) * r) * r)) / (1.0d0 - v))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
def code(v, w, r): return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5
function code(v, w, r) return Float64(Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - Float64(Float64(Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) * Float64(Float64(Float64(w * w) * r) * r)) / Float64(1.0 - v))) - 4.5) end
function tmp = code(v, w, r) tmp = ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5; end
code[v_, w_, r_] := N[(N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(w * w), $MachinePrecision] * r), $MachinePrecision] * r), $MachinePrecision]), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(3 + \frac{2}{r \cdot r}\right) - \frac{\left(0.125 \cdot \left(3 - 2 \cdot v\right)\right) \cdot \left(\left(\left(w \cdot w\right) \cdot r\right) \cdot r\right)}{1 - v}\right) - 4.5
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 15 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (v w r) :precision binary64 (- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))
double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((3.0d0 + (2.0d0 / (r * r))) - (((0.125d0 * (3.0d0 - (2.0d0 * v))) * (((w * w) * r) * r)) / (1.0d0 - v))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
def code(v, w, r): return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5
function code(v, w, r) return Float64(Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - Float64(Float64(Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) * Float64(Float64(Float64(w * w) * r) * r)) / Float64(1.0 - v))) - 4.5) end
function tmp = code(v, w, r) tmp = ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5; end
code[v_, w_, r_] := N[(N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(w * w), $MachinePrecision] * r), $MachinePrecision] * r), $MachinePrecision]), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(3 + \frac{2}{r \cdot r}\right) - \frac{\left(0.125 \cdot \left(3 - 2 \cdot v\right)\right) \cdot \left(\left(\left(w \cdot w\right) \cdot r\right) \cdot r\right)}{1 - v}\right) - 4.5
\end{array}
r_m = (fabs.f64 r)
(FPCore (v w r_m)
:precision binary64
(if (<= r_m 3.9e-46)
(fma (* r_m w) (* (* r_m w) -0.375) (+ -1.5 (/ 2.0 (* r_m r_m))))
(fma
(/ 2.0 r_m)
(/ 1.0 r_m)
(-
3.0
(fma
(* 0.125 (fma v -2.0 3.0))
(* (* w (* r_m w)) (/ r_m (- 1.0 v)))
4.5)))))r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 3.9e-46) {
tmp = fma((r_m * w), ((r_m * w) * -0.375), (-1.5 + (2.0 / (r_m * r_m))));
} else {
tmp = fma((2.0 / r_m), (1.0 / r_m), (3.0 - fma((0.125 * fma(v, -2.0, 3.0)), ((w * (r_m * w)) * (r_m / (1.0 - v))), 4.5)));
}
return tmp;
}
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 3.9e-46) tmp = fma(Float64(r_m * w), Float64(Float64(r_m * w) * -0.375), Float64(-1.5 + Float64(2.0 / Float64(r_m * r_m)))); else tmp = fma(Float64(2.0 / r_m), Float64(1.0 / r_m), Float64(3.0 - fma(Float64(0.125 * fma(v, -2.0, 3.0)), Float64(Float64(w * Float64(r_m * w)) * Float64(r_m / Float64(1.0 - v))), 4.5))); end return tmp end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 3.9e-46], N[(N[(r$95$m * w), $MachinePrecision] * N[(N[(r$95$m * w), $MachinePrecision] * -0.375), $MachinePrecision] + N[(-1.5 + N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(2.0 / r$95$m), $MachinePrecision] * N[(1.0 / r$95$m), $MachinePrecision] + N[(3.0 - N[(N[(0.125 * N[(v * -2.0 + 3.0), $MachinePrecision]), $MachinePrecision] * N[(N[(w * N[(r$95$m * w), $MachinePrecision]), $MachinePrecision] * N[(r$95$m / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 4.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 3.9 \cdot 10^{-46}:\\
\;\;\;\;\mathsf{fma}\left(r\_m \cdot w, \left(r\_m \cdot w\right) \cdot -0.375, -1.5 + \frac{2}{r\_m \cdot r\_m}\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\frac{2}{r\_m}, \frac{1}{r\_m}, 3 - \mathsf{fma}\left(0.125 \cdot \mathsf{fma}\left(v, -2, 3\right), \left(w \cdot \left(r\_m \cdot w\right)\right) \cdot \frac{r\_m}{1 - v}, 4.5\right)\right)\\
\end{array}
\end{array}
if r < 3.9000000000000003e-46Initial program 87.3%
Taylor expanded in v around 0
sub-negN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
distribute-neg-inN/A
associate-*r*N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
Applied rewrites80.0%
Applied rewrites94.2%
if 3.9000000000000003e-46 < r Initial program 85.5%
lift--.f64N/A
lift--.f64N/A
associate--l-N/A
lift-+.f64N/A
+-commutativeN/A
associate--l+N/A
lift-/.f64N/A
lift-*.f64N/A
associate-/r*N/A
div-invN/A
lower-fma.f64N/A
lower-/.f64N/A
lower-/.f64N/A
Applied rewrites99.7%
r_m = (fabs.f64 r)
(FPCore (v w r_m)
:precision binary64
(let* ((t_0 (/ 2.0 (* r_m r_m)))
(t_1
(+
(+ 3.0 t_0)
(/
(* (* 0.125 (- 3.0 (* 2.0 v))) (* r_m (* r_m (* w w))))
(+ v -1.0)))))
(if (<= t_1 -20000000000000.0)
(- 3.0 (fma w (* (* w (* r_m r_m)) 0.25) 4.5))
(if (<= t_1 3.1)
(fma (* (* r_m w) -0.375) (* r_m w) -1.5)
(+ -1.5 t_0)))))r_m = fabs(r);
double code(double v, double w, double r_m) {
double t_0 = 2.0 / (r_m * r_m);
double t_1 = (3.0 + t_0) + (((0.125 * (3.0 - (2.0 * v))) * (r_m * (r_m * (w * w)))) / (v + -1.0));
double tmp;
if (t_1 <= -20000000000000.0) {
tmp = 3.0 - fma(w, ((w * (r_m * r_m)) * 0.25), 4.5);
} else if (t_1 <= 3.1) {
tmp = fma(((r_m * w) * -0.375), (r_m * w), -1.5);
} else {
tmp = -1.5 + t_0;
}
return tmp;
}
r_m = abs(r) function code(v, w, r_m) t_0 = Float64(2.0 / Float64(r_m * r_m)) t_1 = Float64(Float64(3.0 + t_0) + Float64(Float64(Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) * Float64(r_m * Float64(r_m * Float64(w * w)))) / Float64(v + -1.0))) tmp = 0.0 if (t_1 <= -20000000000000.0) tmp = Float64(3.0 - fma(w, Float64(Float64(w * Float64(r_m * r_m)) * 0.25), 4.5)); elseif (t_1 <= 3.1) tmp = fma(Float64(Float64(r_m * w) * -0.375), Float64(r_m * w), -1.5); else tmp = Float64(-1.5 + t_0); end return tmp end
r_m = N[Abs[r], $MachinePrecision]
code[v_, w_, r$95$m_] := Block[{t$95$0 = N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(3.0 + t$95$0), $MachinePrecision] + N[(N[(N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(r$95$m * N[(r$95$m * N[(w * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(v + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, -20000000000000.0], N[(3.0 - N[(w * N[(N[(w * N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision] * 0.25), $MachinePrecision] + 4.5), $MachinePrecision]), $MachinePrecision], If[LessEqual[t$95$1, 3.1], N[(N[(N[(r$95$m * w), $MachinePrecision] * -0.375), $MachinePrecision] * N[(r$95$m * w), $MachinePrecision] + -1.5), $MachinePrecision], N[(-1.5 + t$95$0), $MachinePrecision]]]]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
t_0 := \frac{2}{r\_m \cdot r\_m}\\
t_1 := \left(3 + t\_0\right) + \frac{\left(0.125 \cdot \left(3 - 2 \cdot v\right)\right) \cdot \left(r\_m \cdot \left(r\_m \cdot \left(w \cdot w\right)\right)\right)}{v + -1}\\
\mathbf{if}\;t\_1 \leq -20000000000000:\\
\;\;\;\;3 - \mathsf{fma}\left(w, \left(w \cdot \left(r\_m \cdot r\_m\right)\right) \cdot 0.25, 4.5\right)\\
\mathbf{elif}\;t\_1 \leq 3.1:\\
\;\;\;\;\mathsf{fma}\left(\left(r\_m \cdot w\right) \cdot -0.375, r\_m \cdot w, -1.5\right)\\
\mathbf{else}:\\
\;\;\;\;-1.5 + t\_0\\
\end{array}
\end{array}
if (-.f64 (+.f64 #s(literal 3 binary64) (/.f64 #s(literal 2 binary64) (*.f64 r r))) (/.f64 (*.f64 (*.f64 #s(literal 1/8 binary64) (-.f64 #s(literal 3 binary64) (*.f64 #s(literal 2 binary64) v))) (*.f64 (*.f64 (*.f64 w w) r) r)) (-.f64 #s(literal 1 binary64) v))) < -2e13Initial program 85.1%
lift--.f64N/A
lift--.f64N/A
associate--l-N/A
lower--.f64N/A
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
lower-fma.f64N/A
Applied rewrites97.0%
lift-fma.f64N/A
*-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites92.1%
Taylor expanded in r around inf
Applied rewrites87.6%
Taylor expanded in v around inf
lower-*.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6482.9
Applied rewrites82.9%
if -2e13 < (-.f64 (+.f64 #s(literal 3 binary64) (/.f64 #s(literal 2 binary64) (*.f64 r r))) (/.f64 (*.f64 (*.f64 #s(literal 1/8 binary64) (-.f64 #s(literal 3 binary64) (*.f64 #s(literal 2 binary64) v))) (*.f64 (*.f64 (*.f64 w w) r) r)) (-.f64 #s(literal 1 binary64) v))) < 3.10000000000000009Initial program 86.7%
Taylor expanded in v around 0
sub-negN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
distribute-neg-inN/A
associate-*r*N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
Applied rewrites54.1%
Taylor expanded in w around 0
Applied rewrites83.5%
Taylor expanded in r around inf
Applied rewrites54.1%
Applied rewrites93.5%
if 3.10000000000000009 < (-.f64 (+.f64 #s(literal 3 binary64) (/.f64 #s(literal 2 binary64) (*.f64 r r))) (/.f64 (*.f64 (*.f64 #s(literal 1/8 binary64) (-.f64 #s(literal 3 binary64) (*.f64 #s(literal 2 binary64) v))) (*.f64 (*.f64 (*.f64 w w) r) r)) (-.f64 #s(literal 1 binary64) v))) Initial program 88.5%
Taylor expanded in w around 0
sub-negN/A
metadata-evalN/A
+-commutativeN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f6499.8
Applied rewrites99.8%
Final simplification91.4%
r_m = (fabs.f64 r)
(FPCore (v w r_m)
:precision binary64
(let* ((t_0 (/ 2.0 (* r_m r_m)))
(t_1
(+
(+ 3.0 t_0)
(/
(* (* 0.125 (- 3.0 (* 2.0 v))) (* r_m (* r_m (* w w))))
(+ v -1.0)))))
(if (<= t_1 (- INFINITY))
(* -0.25 (* w (* w (* r_m r_m))))
(if (<= t_1 3.1)
(fma (* (* r_m w) -0.375) (* r_m w) -1.5)
(+ -1.5 t_0)))))r_m = fabs(r);
double code(double v, double w, double r_m) {
double t_0 = 2.0 / (r_m * r_m);
double t_1 = (3.0 + t_0) + (((0.125 * (3.0 - (2.0 * v))) * (r_m * (r_m * (w * w)))) / (v + -1.0));
double tmp;
if (t_1 <= -((double) INFINITY)) {
tmp = -0.25 * (w * (w * (r_m * r_m)));
} else if (t_1 <= 3.1) {
tmp = fma(((r_m * w) * -0.375), (r_m * w), -1.5);
} else {
tmp = -1.5 + t_0;
}
return tmp;
}
r_m = abs(r) function code(v, w, r_m) t_0 = Float64(2.0 / Float64(r_m * r_m)) t_1 = Float64(Float64(3.0 + t_0) + Float64(Float64(Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) * Float64(r_m * Float64(r_m * Float64(w * w)))) / Float64(v + -1.0))) tmp = 0.0 if (t_1 <= Float64(-Inf)) tmp = Float64(-0.25 * Float64(w * Float64(w * Float64(r_m * r_m)))); elseif (t_1 <= 3.1) tmp = fma(Float64(Float64(r_m * w) * -0.375), Float64(r_m * w), -1.5); else tmp = Float64(-1.5 + t_0); end return tmp end
r_m = N[Abs[r], $MachinePrecision]
code[v_, w_, r$95$m_] := Block[{t$95$0 = N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(3.0 + t$95$0), $MachinePrecision] + N[(N[(N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(r$95$m * N[(r$95$m * N[(w * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(v + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, (-Infinity)], N[(-0.25 * N[(w * N[(w * N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[t$95$1, 3.1], N[(N[(N[(r$95$m * w), $MachinePrecision] * -0.375), $MachinePrecision] * N[(r$95$m * w), $MachinePrecision] + -1.5), $MachinePrecision], N[(-1.5 + t$95$0), $MachinePrecision]]]]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
t_0 := \frac{2}{r\_m \cdot r\_m}\\
t_1 := \left(3 + t\_0\right) + \frac{\left(0.125 \cdot \left(3 - 2 \cdot v\right)\right) \cdot \left(r\_m \cdot \left(r\_m \cdot \left(w \cdot w\right)\right)\right)}{v + -1}\\
\mathbf{if}\;t\_1 \leq -\infty:\\
\;\;\;\;-0.25 \cdot \left(w \cdot \left(w \cdot \left(r\_m \cdot r\_m\right)\right)\right)\\
\mathbf{elif}\;t\_1 \leq 3.1:\\
\;\;\;\;\mathsf{fma}\left(\left(r\_m \cdot w\right) \cdot -0.375, r\_m \cdot w, -1.5\right)\\
\mathbf{else}:\\
\;\;\;\;-1.5 + t\_0\\
\end{array}
\end{array}
if (-.f64 (+.f64 #s(literal 3 binary64) (/.f64 #s(literal 2 binary64) (*.f64 r r))) (/.f64 (*.f64 (*.f64 #s(literal 1/8 binary64) (-.f64 #s(literal 3 binary64) (*.f64 #s(literal 2 binary64) v))) (*.f64 (*.f64 (*.f64 w w) r) r)) (-.f64 #s(literal 1 binary64) v))) < -inf.0Initial program 81.9%
Taylor expanded in v around inf
Applied rewrites66.8%
Taylor expanded in r around inf
Applied rewrites61.1%
Taylor expanded in v around inf
Applied rewrites89.2%
if -inf.0 < (-.f64 (+.f64 #s(literal 3 binary64) (/.f64 #s(literal 2 binary64) (*.f64 r r))) (/.f64 (*.f64 (*.f64 #s(literal 1/8 binary64) (-.f64 #s(literal 3 binary64) (*.f64 #s(literal 2 binary64) v))) (*.f64 (*.f64 (*.f64 w w) r) r)) (-.f64 #s(literal 1 binary64) v))) < 3.10000000000000009Initial program 90.4%
Taylor expanded in v around 0
sub-negN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
distribute-neg-inN/A
associate-*r*N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
Applied rewrites47.9%
Taylor expanded in w around 0
Applied rewrites61.1%
Taylor expanded in r around inf
Applied rewrites47.9%
Applied rewrites82.3%
if 3.10000000000000009 < (-.f64 (+.f64 #s(literal 3 binary64) (/.f64 #s(literal 2 binary64) (*.f64 r r))) (/.f64 (*.f64 (*.f64 #s(literal 1/8 binary64) (-.f64 #s(literal 3 binary64) (*.f64 #s(literal 2 binary64) v))) (*.f64 (*.f64 (*.f64 w w) r) r)) (-.f64 #s(literal 1 binary64) v))) Initial program 88.5%
Taylor expanded in w around 0
sub-negN/A
metadata-evalN/A
+-commutativeN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f6499.8
Applied rewrites99.8%
Final simplification91.3%
r_m = (fabs.f64 r)
(FPCore (v w r_m)
:precision binary64
(let* ((t_0 (/ 2.0 (* r_m r_m))))
(if (<=
(+
(+ 3.0 t_0)
(/
(* (* 0.125 (- 3.0 (* 2.0 v))) (* r_m (* r_m (* w w))))
(+ v -1.0)))
-2000000000000.0)
(* -0.25 (* w (* w (* r_m r_m))))
(+ -1.5 t_0))))r_m = fabs(r);
double code(double v, double w, double r_m) {
double t_0 = 2.0 / (r_m * r_m);
double tmp;
if (((3.0 + t_0) + (((0.125 * (3.0 - (2.0 * v))) * (r_m * (r_m * (w * w)))) / (v + -1.0))) <= -2000000000000.0) {
tmp = -0.25 * (w * (w * (r_m * r_m)));
} else {
tmp = -1.5 + t_0;
}
return tmp;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
real(8) :: t_0
real(8) :: tmp
t_0 = 2.0d0 / (r_m * r_m)
if (((3.0d0 + t_0) + (((0.125d0 * (3.0d0 - (2.0d0 * v))) * (r_m * (r_m * (w * w)))) / (v + (-1.0d0)))) <= (-2000000000000.0d0)) then
tmp = (-0.25d0) * (w * (w * (r_m * r_m)))
else
tmp = (-1.5d0) + t_0
end if
code = tmp
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
double t_0 = 2.0 / (r_m * r_m);
double tmp;
if (((3.0 + t_0) + (((0.125 * (3.0 - (2.0 * v))) * (r_m * (r_m * (w * w)))) / (v + -1.0))) <= -2000000000000.0) {
tmp = -0.25 * (w * (w * (r_m * r_m)));
} else {
tmp = -1.5 + t_0;
}
return tmp;
}
r_m = math.fabs(r) def code(v, w, r_m): t_0 = 2.0 / (r_m * r_m) tmp = 0 if ((3.0 + t_0) + (((0.125 * (3.0 - (2.0 * v))) * (r_m * (r_m * (w * w)))) / (v + -1.0))) <= -2000000000000.0: tmp = -0.25 * (w * (w * (r_m * r_m))) else: tmp = -1.5 + t_0 return tmp
r_m = abs(r) function code(v, w, r_m) t_0 = Float64(2.0 / Float64(r_m * r_m)) tmp = 0.0 if (Float64(Float64(3.0 + t_0) + Float64(Float64(Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) * Float64(r_m * Float64(r_m * Float64(w * w)))) / Float64(v + -1.0))) <= -2000000000000.0) tmp = Float64(-0.25 * Float64(w * Float64(w * Float64(r_m * r_m)))); else tmp = Float64(-1.5 + t_0); end return tmp end
r_m = abs(r); function tmp_2 = code(v, w, r_m) t_0 = 2.0 / (r_m * r_m); tmp = 0.0; if (((3.0 + t_0) + (((0.125 * (3.0 - (2.0 * v))) * (r_m * (r_m * (w * w)))) / (v + -1.0))) <= -2000000000000.0) tmp = -0.25 * (w * (w * (r_m * r_m))); else tmp = -1.5 + t_0; end tmp_2 = tmp; end
r_m = N[Abs[r], $MachinePrecision]
code[v_, w_, r$95$m_] := Block[{t$95$0 = N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(N[(3.0 + t$95$0), $MachinePrecision] + N[(N[(N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(r$95$m * N[(r$95$m * N[(w * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(v + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], -2000000000000.0], N[(-0.25 * N[(w * N[(w * N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(-1.5 + t$95$0), $MachinePrecision]]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
t_0 := \frac{2}{r\_m \cdot r\_m}\\
\mathbf{if}\;\left(3 + t\_0\right) + \frac{\left(0.125 \cdot \left(3 - 2 \cdot v\right)\right) \cdot \left(r\_m \cdot \left(r\_m \cdot \left(w \cdot w\right)\right)\right)}{v + -1} \leq -2000000000000:\\
\;\;\;\;-0.25 \cdot \left(w \cdot \left(w \cdot \left(r\_m \cdot r\_m\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;-1.5 + t\_0\\
\end{array}
\end{array}
if (-.f64 (+.f64 #s(literal 3 binary64) (/.f64 #s(literal 2 binary64) (*.f64 r r))) (/.f64 (*.f64 (*.f64 #s(literal 1/8 binary64) (-.f64 #s(literal 3 binary64) (*.f64 #s(literal 2 binary64) v))) (*.f64 (*.f64 (*.f64 w w) r) r)) (-.f64 #s(literal 1 binary64) v))) < -2e12Initial program 85.3%
Taylor expanded in v around inf
Applied rewrites64.3%
Taylor expanded in r around inf
Applied rewrites59.5%
Taylor expanded in v around inf
Applied rewrites82.2%
if -2e12 < (-.f64 (+.f64 #s(literal 3 binary64) (/.f64 #s(literal 2 binary64) (*.f64 r r))) (/.f64 (*.f64 (*.f64 #s(literal 1/8 binary64) (-.f64 #s(literal 3 binary64) (*.f64 #s(literal 2 binary64) v))) (*.f64 (*.f64 (*.f64 w w) r) r)) (-.f64 #s(literal 1 binary64) v))) Initial program 87.8%
Taylor expanded in w around 0
sub-negN/A
metadata-evalN/A
+-commutativeN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f6494.8
Applied rewrites94.8%
Final simplification89.4%
r_m = (fabs.f64 r)
(FPCore (v w r_m)
:precision binary64
(let* ((t_0 (/ 2.0 (* r_m r_m))))
(if (<= r_m 3.9e-46)
(fma (* r_m w) (* (* r_m w) -0.375) (+ -1.5 t_0))
(-
(+ 3.0 t_0)
(fma
(* 0.125 (fma v -2.0 3.0))
(* (* w (* r_m w)) (/ r_m (- 1.0 v)))
4.5)))))r_m = fabs(r);
double code(double v, double w, double r_m) {
double t_0 = 2.0 / (r_m * r_m);
double tmp;
if (r_m <= 3.9e-46) {
tmp = fma((r_m * w), ((r_m * w) * -0.375), (-1.5 + t_0));
} else {
tmp = (3.0 + t_0) - fma((0.125 * fma(v, -2.0, 3.0)), ((w * (r_m * w)) * (r_m / (1.0 - v))), 4.5);
}
return tmp;
}
r_m = abs(r) function code(v, w, r_m) t_0 = Float64(2.0 / Float64(r_m * r_m)) tmp = 0.0 if (r_m <= 3.9e-46) tmp = fma(Float64(r_m * w), Float64(Float64(r_m * w) * -0.375), Float64(-1.5 + t_0)); else tmp = Float64(Float64(3.0 + t_0) - fma(Float64(0.125 * fma(v, -2.0, 3.0)), Float64(Float64(w * Float64(r_m * w)) * Float64(r_m / Float64(1.0 - v))), 4.5)); end return tmp end
r_m = N[Abs[r], $MachinePrecision]
code[v_, w_, r$95$m_] := Block[{t$95$0 = N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[r$95$m, 3.9e-46], N[(N[(r$95$m * w), $MachinePrecision] * N[(N[(r$95$m * w), $MachinePrecision] * -0.375), $MachinePrecision] + N[(-1.5 + t$95$0), $MachinePrecision]), $MachinePrecision], N[(N[(3.0 + t$95$0), $MachinePrecision] - N[(N[(0.125 * N[(v * -2.0 + 3.0), $MachinePrecision]), $MachinePrecision] * N[(N[(w * N[(r$95$m * w), $MachinePrecision]), $MachinePrecision] * N[(r$95$m / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 4.5), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
t_0 := \frac{2}{r\_m \cdot r\_m}\\
\mathbf{if}\;r\_m \leq 3.9 \cdot 10^{-46}:\\
\;\;\;\;\mathsf{fma}\left(r\_m \cdot w, \left(r\_m \cdot w\right) \cdot -0.375, -1.5 + t\_0\right)\\
\mathbf{else}:\\
\;\;\;\;\left(3 + t\_0\right) - \mathsf{fma}\left(0.125 \cdot \mathsf{fma}\left(v, -2, 3\right), \left(w \cdot \left(r\_m \cdot w\right)\right) \cdot \frac{r\_m}{1 - v}, 4.5\right)\\
\end{array}
\end{array}
if r < 3.9000000000000003e-46Initial program 87.3%
Taylor expanded in v around 0
sub-negN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
distribute-neg-inN/A
associate-*r*N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
Applied rewrites80.0%
Applied rewrites94.2%
if 3.9000000000000003e-46 < r Initial program 85.5%
lift--.f64N/A
lift--.f64N/A
associate--l-N/A
lower--.f64N/A
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
lower-fma.f64N/A
Applied rewrites99.7%
r_m = (fabs.f64 r)
(FPCore (v w r_m)
:precision binary64
(if (<= r_m 195.0)
(+ -1.5 (fma (* w (* (* r_m r_m) -0.25)) w (/ 2.0 (* r_m r_m))))
(-
3.0
(fma
(* 0.125 (fma v -2.0 3.0))
(* (* w (* r_m w)) (/ r_m (- 1.0 v)))
4.5))))r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 195.0) {
tmp = -1.5 + fma((w * ((r_m * r_m) * -0.25)), w, (2.0 / (r_m * r_m)));
} else {
tmp = 3.0 - fma((0.125 * fma(v, -2.0, 3.0)), ((w * (r_m * w)) * (r_m / (1.0 - v))), 4.5);
}
return tmp;
}
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 195.0) tmp = Float64(-1.5 + fma(Float64(w * Float64(Float64(r_m * r_m) * -0.25)), w, Float64(2.0 / Float64(r_m * r_m)))); else tmp = Float64(3.0 - fma(Float64(0.125 * fma(v, -2.0, 3.0)), Float64(Float64(w * Float64(r_m * w)) * Float64(r_m / Float64(1.0 - v))), 4.5)); end return tmp end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 195.0], N[(-1.5 + N[(N[(w * N[(N[(r$95$m * r$95$m), $MachinePrecision] * -0.25), $MachinePrecision]), $MachinePrecision] * w + N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(3.0 - N[(N[(0.125 * N[(v * -2.0 + 3.0), $MachinePrecision]), $MachinePrecision] * N[(N[(w * N[(r$95$m * w), $MachinePrecision]), $MachinePrecision] * N[(r$95$m / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 4.5), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 195:\\
\;\;\;\;-1.5 + \mathsf{fma}\left(w \cdot \left(\left(r\_m \cdot r\_m\right) \cdot -0.25\right), w, \frac{2}{r\_m \cdot r\_m}\right)\\
\mathbf{else}:\\
\;\;\;\;3 - \mathsf{fma}\left(0.125 \cdot \mathsf{fma}\left(v, -2, 3\right), \left(w \cdot \left(r\_m \cdot w\right)\right) \cdot \frac{r\_m}{1 - v}, 4.5\right)\\
\end{array}
\end{array}
if r < 195Initial program 86.5%
Taylor expanded in v around inf
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
associate-+l+N/A
lower-+.f64N/A
associate-*r*N/A
unpow2N/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
associate-*r/N/A
Applied rewrites90.7%
if 195 < r Initial program 87.1%
lift--.f64N/A
lift--.f64N/A
associate--l-N/A
lower--.f64N/A
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
lower-fma.f64N/A
Applied rewrites99.8%
Taylor expanded in r around inf
Applied rewrites99.8%
Final simplification93.3%
r_m = (fabs.f64 r)
(FPCore (v w r_m)
:precision binary64
(if (<= r_m 3.9e-130)
(/ (/ 2.0 r_m) r_m)
(if (<= r_m 0.43)
(+ (/ 2.0 (* r_m r_m)) (* (* r_m r_m) (* -0.375 (* w w))))
(if (<= r_m 2.25e+148)
(- 3.0 (fma w (* (* w (* r_m r_m)) 0.25) 4.5))
(fma (* (* r_m w) -0.375) (* r_m w) -1.5)))))r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 3.9e-130) {
tmp = (2.0 / r_m) / r_m;
} else if (r_m <= 0.43) {
tmp = (2.0 / (r_m * r_m)) + ((r_m * r_m) * (-0.375 * (w * w)));
} else if (r_m <= 2.25e+148) {
tmp = 3.0 - fma(w, ((w * (r_m * r_m)) * 0.25), 4.5);
} else {
tmp = fma(((r_m * w) * -0.375), (r_m * w), -1.5);
}
return tmp;
}
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 3.9e-130) tmp = Float64(Float64(2.0 / r_m) / r_m); elseif (r_m <= 0.43) tmp = Float64(Float64(2.0 / Float64(r_m * r_m)) + Float64(Float64(r_m * r_m) * Float64(-0.375 * Float64(w * w)))); elseif (r_m <= 2.25e+148) tmp = Float64(3.0 - fma(w, Float64(Float64(w * Float64(r_m * r_m)) * 0.25), 4.5)); else tmp = fma(Float64(Float64(r_m * w) * -0.375), Float64(r_m * w), -1.5); end return tmp end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 3.9e-130], N[(N[(2.0 / r$95$m), $MachinePrecision] / r$95$m), $MachinePrecision], If[LessEqual[r$95$m, 0.43], N[(N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision] + N[(N[(r$95$m * r$95$m), $MachinePrecision] * N[(-0.375 * N[(w * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[r$95$m, 2.25e+148], N[(3.0 - N[(w * N[(N[(w * N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision] * 0.25), $MachinePrecision] + 4.5), $MachinePrecision]), $MachinePrecision], N[(N[(N[(r$95$m * w), $MachinePrecision] * -0.375), $MachinePrecision] * N[(r$95$m * w), $MachinePrecision] + -1.5), $MachinePrecision]]]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 3.9 \cdot 10^{-130}:\\
\;\;\;\;\frac{\frac{2}{r\_m}}{r\_m}\\
\mathbf{elif}\;r\_m \leq 0.43:\\
\;\;\;\;\frac{2}{r\_m \cdot r\_m} + \left(r\_m \cdot r\_m\right) \cdot \left(-0.375 \cdot \left(w \cdot w\right)\right)\\
\mathbf{elif}\;r\_m \leq 2.25 \cdot 10^{+148}:\\
\;\;\;\;3 - \mathsf{fma}\left(w, \left(w \cdot \left(r\_m \cdot r\_m\right)\right) \cdot 0.25, 4.5\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\left(r\_m \cdot w\right) \cdot -0.375, r\_m \cdot w, -1.5\right)\\
\end{array}
\end{array}
if r < 3.9000000000000001e-130Initial program 86.4%
Taylor expanded in r around 0
lower-/.f64N/A
unpow2N/A
lower-*.f6454.2
Applied rewrites54.2%
Applied rewrites54.2%
if 3.9000000000000001e-130 < r < 0.429999999999999993Initial program 90.3%
Taylor expanded in v around 0
sub-negN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
distribute-neg-inN/A
associate-*r*N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
Applied rewrites93.9%
Taylor expanded in w around inf
Applied rewrites93.9%
if 0.429999999999999993 < r < 2.24999999999999997e148Initial program 87.9%
lift--.f64N/A
lift--.f64N/A
associate--l-N/A
lower--.f64N/A
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
lower-fma.f64N/A
Applied rewrites99.6%
lift-fma.f64N/A
*-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites99.9%
Taylor expanded in r around inf
Applied rewrites99.9%
Taylor expanded in v around inf
lower-*.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6497.5
Applied rewrites97.5%
if 2.24999999999999997e148 < r Initial program 85.1%
Taylor expanded in v around 0
sub-negN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
distribute-neg-inN/A
associate-*r*N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
Applied rewrites55.5%
Taylor expanded in w around 0
Applied rewrites25.6%
Taylor expanded in r around inf
Applied rewrites55.5%
Applied rewrites90.2%
Final simplification68.9%
r_m = (fabs.f64 r)
(FPCore (v w r_m)
:precision binary64
(if (<= r_m 195.0)
(+ -1.5 (fma (* w (* (* r_m r_m) -0.25)) w (/ 2.0 (* r_m r_m))))
(-
3.0
(fma w (* (* r_m (* w (/ r_m (- 1.0 v)))) (fma v -0.25 0.375)) 4.5))))r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 195.0) {
tmp = -1.5 + fma((w * ((r_m * r_m) * -0.25)), w, (2.0 / (r_m * r_m)));
} else {
tmp = 3.0 - fma(w, ((r_m * (w * (r_m / (1.0 - v)))) * fma(v, -0.25, 0.375)), 4.5);
}
return tmp;
}
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 195.0) tmp = Float64(-1.5 + fma(Float64(w * Float64(Float64(r_m * r_m) * -0.25)), w, Float64(2.0 / Float64(r_m * r_m)))); else tmp = Float64(3.0 - fma(w, Float64(Float64(r_m * Float64(w * Float64(r_m / Float64(1.0 - v)))) * fma(v, -0.25, 0.375)), 4.5)); end return tmp end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 195.0], N[(-1.5 + N[(N[(w * N[(N[(r$95$m * r$95$m), $MachinePrecision] * -0.25), $MachinePrecision]), $MachinePrecision] * w + N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(3.0 - N[(w * N[(N[(r$95$m * N[(w * N[(r$95$m / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(v * -0.25 + 0.375), $MachinePrecision]), $MachinePrecision] + 4.5), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 195:\\
\;\;\;\;-1.5 + \mathsf{fma}\left(w \cdot \left(\left(r\_m \cdot r\_m\right) \cdot -0.25\right), w, \frac{2}{r\_m \cdot r\_m}\right)\\
\mathbf{else}:\\
\;\;\;\;3 - \mathsf{fma}\left(w, \left(r\_m \cdot \left(w \cdot \frac{r\_m}{1 - v}\right)\right) \cdot \mathsf{fma}\left(v, -0.25, 0.375\right), 4.5\right)\\
\end{array}
\end{array}
if r < 195Initial program 86.5%
Taylor expanded in v around inf
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
associate-+l+N/A
lower-+.f64N/A
associate-*r*N/A
unpow2N/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
associate-*r/N/A
Applied rewrites90.7%
if 195 < r Initial program 87.1%
lift--.f64N/A
lift--.f64N/A
associate--l-N/A
lower--.f64N/A
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
lower-fma.f64N/A
Applied rewrites99.8%
lift-fma.f64N/A
*-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites93.6%
Taylor expanded in r around inf
Applied rewrites93.6%
Final simplification91.5%
r_m = (fabs.f64 r)
(FPCore (v w r_m)
:precision binary64
(let* ((t_0 (/ 2.0 (* r_m r_m))))
(if (<= r_m 2.25e+148)
(+ -1.5 (fma (* w (* (* r_m r_m) -0.25)) w t_0))
(fma (* r_m (* w (* r_m w))) -0.375 (+ -1.5 t_0)))))r_m = fabs(r);
double code(double v, double w, double r_m) {
double t_0 = 2.0 / (r_m * r_m);
double tmp;
if (r_m <= 2.25e+148) {
tmp = -1.5 + fma((w * ((r_m * r_m) * -0.25)), w, t_0);
} else {
tmp = fma((r_m * (w * (r_m * w))), -0.375, (-1.5 + t_0));
}
return tmp;
}
r_m = abs(r) function code(v, w, r_m) t_0 = Float64(2.0 / Float64(r_m * r_m)) tmp = 0.0 if (r_m <= 2.25e+148) tmp = Float64(-1.5 + fma(Float64(w * Float64(Float64(r_m * r_m) * -0.25)), w, t_0)); else tmp = fma(Float64(r_m * Float64(w * Float64(r_m * w))), -0.375, Float64(-1.5 + t_0)); end return tmp end
r_m = N[Abs[r], $MachinePrecision]
code[v_, w_, r$95$m_] := Block[{t$95$0 = N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[r$95$m, 2.25e+148], N[(-1.5 + N[(N[(w * N[(N[(r$95$m * r$95$m), $MachinePrecision] * -0.25), $MachinePrecision]), $MachinePrecision] * w + t$95$0), $MachinePrecision]), $MachinePrecision], N[(N[(r$95$m * N[(w * N[(r$95$m * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * -0.375 + N[(-1.5 + t$95$0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
t_0 := \frac{2}{r\_m \cdot r\_m}\\
\mathbf{if}\;r\_m \leq 2.25 \cdot 10^{+148}:\\
\;\;\;\;-1.5 + \mathsf{fma}\left(w \cdot \left(\left(r\_m \cdot r\_m\right) \cdot -0.25\right), w, t\_0\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(r\_m \cdot \left(w \cdot \left(r\_m \cdot w\right)\right), -0.375, -1.5 + t\_0\right)\\
\end{array}
\end{array}
if r < 2.24999999999999997e148Initial program 87.0%
Taylor expanded in v around inf
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
associate-+l+N/A
lower-+.f64N/A
associate-*r*N/A
unpow2N/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
associate-*r/N/A
Applied rewrites91.6%
if 2.24999999999999997e148 < r Initial program 85.1%
Taylor expanded in v around 0
sub-negN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
distribute-neg-inN/A
associate-*r*N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
Applied rewrites55.5%
Applied rewrites85.5%
Applied rewrites90.2%
Final simplification91.3%
r_m = (fabs.f64 r)
(FPCore (v w r_m)
:precision binary64
(let* ((t_0 (/ 2.0 (* r_m r_m))))
(if (<= r_m 2.25e+148)
(+ -1.5 (fma (* w (* (* r_m r_m) -0.25)) w t_0))
(fma (* r_m w) (* (* r_m w) -0.375) (+ -1.5 t_0)))))r_m = fabs(r);
double code(double v, double w, double r_m) {
double t_0 = 2.0 / (r_m * r_m);
double tmp;
if (r_m <= 2.25e+148) {
tmp = -1.5 + fma((w * ((r_m * r_m) * -0.25)), w, t_0);
} else {
tmp = fma((r_m * w), ((r_m * w) * -0.375), (-1.5 + t_0));
}
return tmp;
}
r_m = abs(r) function code(v, w, r_m) t_0 = Float64(2.0 / Float64(r_m * r_m)) tmp = 0.0 if (r_m <= 2.25e+148) tmp = Float64(-1.5 + fma(Float64(w * Float64(Float64(r_m * r_m) * -0.25)), w, t_0)); else tmp = fma(Float64(r_m * w), Float64(Float64(r_m * w) * -0.375), Float64(-1.5 + t_0)); end return tmp end
r_m = N[Abs[r], $MachinePrecision]
code[v_, w_, r$95$m_] := Block[{t$95$0 = N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[r$95$m, 2.25e+148], N[(-1.5 + N[(N[(w * N[(N[(r$95$m * r$95$m), $MachinePrecision] * -0.25), $MachinePrecision]), $MachinePrecision] * w + t$95$0), $MachinePrecision]), $MachinePrecision], N[(N[(r$95$m * w), $MachinePrecision] * N[(N[(r$95$m * w), $MachinePrecision] * -0.375), $MachinePrecision] + N[(-1.5 + t$95$0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
t_0 := \frac{2}{r\_m \cdot r\_m}\\
\mathbf{if}\;r\_m \leq 2.25 \cdot 10^{+148}:\\
\;\;\;\;-1.5 + \mathsf{fma}\left(w \cdot \left(\left(r\_m \cdot r\_m\right) \cdot -0.25\right), w, t\_0\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(r\_m \cdot w, \left(r\_m \cdot w\right) \cdot -0.375, -1.5 + t\_0\right)\\
\end{array}
\end{array}
if r < 2.24999999999999997e148Initial program 87.0%
Taylor expanded in v around inf
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
associate-+l+N/A
lower-+.f64N/A
associate-*r*N/A
unpow2N/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
associate-*r/N/A
Applied rewrites91.6%
if 2.24999999999999997e148 < r Initial program 85.1%
Taylor expanded in v around 0
sub-negN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
distribute-neg-inN/A
associate-*r*N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
Applied rewrites55.5%
Applied rewrites90.2%
Final simplification91.3%
r_m = (fabs.f64 r) (FPCore (v w r_m) :precision binary64 (if (<= r_m 2.25e+148) (+ -1.5 (fma (* w (* (* r_m r_m) -0.25)) w (/ 2.0 (* r_m r_m)))) (fma (* (* r_m w) -0.375) (* r_m w) -1.5)))
r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 2.25e+148) {
tmp = -1.5 + fma((w * ((r_m * r_m) * -0.25)), w, (2.0 / (r_m * r_m)));
} else {
tmp = fma(((r_m * w) * -0.375), (r_m * w), -1.5);
}
return tmp;
}
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 2.25e+148) tmp = Float64(-1.5 + fma(Float64(w * Float64(Float64(r_m * r_m) * -0.25)), w, Float64(2.0 / Float64(r_m * r_m)))); else tmp = fma(Float64(Float64(r_m * w) * -0.375), Float64(r_m * w), -1.5); end return tmp end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 2.25e+148], N[(-1.5 + N[(N[(w * N[(N[(r$95$m * r$95$m), $MachinePrecision] * -0.25), $MachinePrecision]), $MachinePrecision] * w + N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(r$95$m * w), $MachinePrecision] * -0.375), $MachinePrecision] * N[(r$95$m * w), $MachinePrecision] + -1.5), $MachinePrecision]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 2.25 \cdot 10^{+148}:\\
\;\;\;\;-1.5 + \mathsf{fma}\left(w \cdot \left(\left(r\_m \cdot r\_m\right) \cdot -0.25\right), w, \frac{2}{r\_m \cdot r\_m}\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\left(r\_m \cdot w\right) \cdot -0.375, r\_m \cdot w, -1.5\right)\\
\end{array}
\end{array}
if r < 2.24999999999999997e148Initial program 87.0%
Taylor expanded in v around inf
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
associate-+l+N/A
lower-+.f64N/A
associate-*r*N/A
unpow2N/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
associate-*r/N/A
Applied rewrites91.6%
if 2.24999999999999997e148 < r Initial program 85.1%
Taylor expanded in v around 0
sub-negN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
distribute-neg-inN/A
associate-*r*N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
Applied rewrites55.5%
Taylor expanded in w around 0
Applied rewrites25.6%
Taylor expanded in r around inf
Applied rewrites55.5%
Applied rewrites90.2%
Final simplification91.3%
r_m = (fabs.f64 r)
(FPCore (v w r_m)
:precision binary64
(let* ((t_0 (* w (* r_m r_m))))
(if (<= r_m 0.43)
(+ (/ 2.0 (* r_m r_m)) (* -0.375 (* w t_0)))
(if (<= r_m 2.25e+148)
(- 3.0 (fma w (* t_0 0.25) 4.5))
(fma (* (* r_m w) -0.375) (* r_m w) -1.5)))))r_m = fabs(r);
double code(double v, double w, double r_m) {
double t_0 = w * (r_m * r_m);
double tmp;
if (r_m <= 0.43) {
tmp = (2.0 / (r_m * r_m)) + (-0.375 * (w * t_0));
} else if (r_m <= 2.25e+148) {
tmp = 3.0 - fma(w, (t_0 * 0.25), 4.5);
} else {
tmp = fma(((r_m * w) * -0.375), (r_m * w), -1.5);
}
return tmp;
}
r_m = abs(r) function code(v, w, r_m) t_0 = Float64(w * Float64(r_m * r_m)) tmp = 0.0 if (r_m <= 0.43) tmp = Float64(Float64(2.0 / Float64(r_m * r_m)) + Float64(-0.375 * Float64(w * t_0))); elseif (r_m <= 2.25e+148) tmp = Float64(3.0 - fma(w, Float64(t_0 * 0.25), 4.5)); else tmp = fma(Float64(Float64(r_m * w) * -0.375), Float64(r_m * w), -1.5); end return tmp end
r_m = N[Abs[r], $MachinePrecision]
code[v_, w_, r$95$m_] := Block[{t$95$0 = N[(w * N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[r$95$m, 0.43], N[(N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision] + N[(-0.375 * N[(w * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[r$95$m, 2.25e+148], N[(3.0 - N[(w * N[(t$95$0 * 0.25), $MachinePrecision] + 4.5), $MachinePrecision]), $MachinePrecision], N[(N[(N[(r$95$m * w), $MachinePrecision] * -0.375), $MachinePrecision] * N[(r$95$m * w), $MachinePrecision] + -1.5), $MachinePrecision]]]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
t_0 := w \cdot \left(r\_m \cdot r\_m\right)\\
\mathbf{if}\;r\_m \leq 0.43:\\
\;\;\;\;\frac{2}{r\_m \cdot r\_m} + -0.375 \cdot \left(w \cdot t\_0\right)\\
\mathbf{elif}\;r\_m \leq 2.25 \cdot 10^{+148}:\\
\;\;\;\;3 - \mathsf{fma}\left(w, t\_0 \cdot 0.25, 4.5\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\left(r\_m \cdot w\right) \cdot -0.375, r\_m \cdot w, -1.5\right)\\
\end{array}
\end{array}
if r < 0.429999999999999993Initial program 86.9%
Taylor expanded in v around 0
sub-negN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
distribute-neg-inN/A
associate-*r*N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
Applied rewrites80.4%
Taylor expanded in w around 0
Applied rewrites67.0%
Taylor expanded in w around inf
Applied rewrites80.4%
if 0.429999999999999993 < r < 2.24999999999999997e148Initial program 87.9%
lift--.f64N/A
lift--.f64N/A
associate--l-N/A
lower--.f64N/A
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
lower-fma.f64N/A
Applied rewrites99.6%
lift-fma.f64N/A
*-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites99.9%
Taylor expanded in r around inf
Applied rewrites99.9%
Taylor expanded in v around inf
lower-*.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6497.5
Applied rewrites97.5%
if 2.24999999999999997e148 < r Initial program 85.1%
Taylor expanded in v around 0
sub-negN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
distribute-neg-inN/A
associate-*r*N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
Applied rewrites55.5%
Taylor expanded in w around 0
Applied rewrites25.6%
Taylor expanded in r around inf
Applied rewrites55.5%
Applied rewrites90.2%
Final simplification84.2%
r_m = (fabs.f64 r) (FPCore (v w r_m) :precision binary64 (if (<= r_m 1.15) (/ 2.0 (* r_m r_m)) -1.5))
r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 1.15) {
tmp = 2.0 / (r_m * r_m);
} else {
tmp = -1.5;
}
return tmp;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
real(8) :: tmp
if (r_m <= 1.15d0) then
tmp = 2.0d0 / (r_m * r_m)
else
tmp = -1.5d0
end if
code = tmp
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 1.15) {
tmp = 2.0 / (r_m * r_m);
} else {
tmp = -1.5;
}
return tmp;
}
r_m = math.fabs(r) def code(v, w, r_m): tmp = 0 if r_m <= 1.15: tmp = 2.0 / (r_m * r_m) else: tmp = -1.5 return tmp
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 1.15) tmp = Float64(2.0 / Float64(r_m * r_m)); else tmp = -1.5; end return tmp end
r_m = abs(r); function tmp_2 = code(v, w, r_m) tmp = 0.0; if (r_m <= 1.15) tmp = 2.0 / (r_m * r_m); else tmp = -1.5; end tmp_2 = tmp; end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 1.15], N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision], -1.5]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 1.15:\\
\;\;\;\;\frac{2}{r\_m \cdot r\_m}\\
\mathbf{else}:\\
\;\;\;\;-1.5\\
\end{array}
\end{array}
if r < 1.1499999999999999Initial program 86.9%
Taylor expanded in r around 0
lower-/.f64N/A
unpow2N/A
lower-*.f6455.4
Applied rewrites55.4%
if 1.1499999999999999 < r Initial program 86.2%
Taylor expanded in v around 0
sub-negN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
distribute-neg-inN/A
associate-*r*N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
Applied rewrites64.3%
Taylor expanded in w around 0
Applied rewrites33.4%
Taylor expanded in r around inf
Applied rewrites64.3%
Taylor expanded in w around 0
Applied rewrites33.4%
r_m = (fabs.f64 r) (FPCore (v w r_m) :precision binary64 (+ -1.5 (/ 2.0 (* r_m r_m))))
r_m = fabs(r);
double code(double v, double w, double r_m) {
return -1.5 + (2.0 / (r_m * r_m));
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
code = (-1.5d0) + (2.0d0 / (r_m * r_m))
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
return -1.5 + (2.0 / (r_m * r_m));
}
r_m = math.fabs(r) def code(v, w, r_m): return -1.5 + (2.0 / (r_m * r_m))
r_m = abs(r) function code(v, w, r_m) return Float64(-1.5 + Float64(2.0 / Float64(r_m * r_m))) end
r_m = abs(r); function tmp = code(v, w, r_m) tmp = -1.5 + (2.0 / (r_m * r_m)); end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := N[(-1.5 + N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
r_m = \left|r\right|
\\
-1.5 + \frac{2}{r\_m \cdot r\_m}
\end{array}
Initial program 86.7%
Taylor expanded in w around 0
sub-negN/A
metadata-evalN/A
+-commutativeN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f6457.2
Applied rewrites57.2%
r_m = (fabs.f64 r) (FPCore (v w r_m) :precision binary64 -1.5)
r_m = fabs(r);
double code(double v, double w, double r_m) {
return -1.5;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
code = -1.5d0
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
return -1.5;
}
r_m = math.fabs(r) def code(v, w, r_m): return -1.5
r_m = abs(r) function code(v, w, r_m) return -1.5 end
r_m = abs(r); function tmp = code(v, w, r_m) tmp = -1.5; end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := -1.5
\begin{array}{l}
r_m = \left|r\right|
\\
-1.5
\end{array}
Initial program 86.7%
Taylor expanded in v around 0
sub-negN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
distribute-neg-inN/A
associate-*r*N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
Applied rewrites75.7%
Taylor expanded in w around 0
Applied rewrites57.2%
Taylor expanded in r around inf
Applied rewrites42.3%
Taylor expanded in w around 0
Applied rewrites18.2%
herbie shell --seed 2024226
(FPCore (v w r)
:name "Rosa's TurbineBenchmark"
:precision binary64
(- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))