
(FPCore (v w r) :precision binary64 (- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))
double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((3.0d0 + (2.0d0 / (r * r))) - (((0.125d0 * (3.0d0 - (2.0d0 * v))) * (((w * w) * r) * r)) / (1.0d0 - v))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
def code(v, w, r): return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5
function code(v, w, r) return Float64(Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - Float64(Float64(Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) * Float64(Float64(Float64(w * w) * r) * r)) / Float64(1.0 - v))) - 4.5) end
function tmp = code(v, w, r) tmp = ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5; end
code[v_, w_, r_] := N[(N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(w * w), $MachinePrecision] * r), $MachinePrecision] * r), $MachinePrecision]), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(3 + \frac{2}{r \cdot r}\right) - \frac{\left(0.125 \cdot \left(3 - 2 \cdot v\right)\right) \cdot \left(\left(\left(w \cdot w\right) \cdot r\right) \cdot r\right)}{1 - v}\right) - 4.5
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 16 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (v w r) :precision binary64 (- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))
double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((3.0d0 + (2.0d0 / (r * r))) - (((0.125d0 * (3.0d0 - (2.0d0 * v))) * (((w * w) * r) * r)) / (1.0d0 - v))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
def code(v, w, r): return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5
function code(v, w, r) return Float64(Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - Float64(Float64(Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) * Float64(Float64(Float64(w * w) * r) * r)) / Float64(1.0 - v))) - 4.5) end
function tmp = code(v, w, r) tmp = ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5; end
code[v_, w_, r_] := N[(N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(w * w), $MachinePrecision] * r), $MachinePrecision] * r), $MachinePrecision]), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(3 + \frac{2}{r \cdot r}\right) - \frac{\left(0.125 \cdot \left(3 - 2 \cdot v\right)\right) \cdot \left(\left(\left(w \cdot w\right) \cdot r\right) \cdot r\right)}{1 - v}\right) - 4.5
\end{array}
(FPCore (v w r)
:precision binary64
(let* ((t_0 (/ 2.0 (* r r)))
(t_1 (+ 3.0 t_0))
(t_2 (* 0.125 (- 3.0 (* 2.0 v)))))
(if (<= (+ t_1 (/ (* t_2 (* r (* r (* w w)))) (+ v -1.0))) (- INFINITY))
(- (- t_0 (* (* r (* r w)) (* w 0.25))) 4.5)
(- (+ t_1 (/ (* t_2 (* (* r w) (* r w))) (+ v -1.0))) 4.5))))
double code(double v, double w, double r) {
double t_0 = 2.0 / (r * r);
double t_1 = 3.0 + t_0;
double t_2 = 0.125 * (3.0 - (2.0 * v));
double tmp;
if ((t_1 + ((t_2 * (r * (r * (w * w)))) / (v + -1.0))) <= -((double) INFINITY)) {
tmp = (t_0 - ((r * (r * w)) * (w * 0.25))) - 4.5;
} else {
tmp = (t_1 + ((t_2 * ((r * w) * (r * w))) / (v + -1.0))) - 4.5;
}
return tmp;
}
public static double code(double v, double w, double r) {
double t_0 = 2.0 / (r * r);
double t_1 = 3.0 + t_0;
double t_2 = 0.125 * (3.0 - (2.0 * v));
double tmp;
if ((t_1 + ((t_2 * (r * (r * (w * w)))) / (v + -1.0))) <= -Double.POSITIVE_INFINITY) {
tmp = (t_0 - ((r * (r * w)) * (w * 0.25))) - 4.5;
} else {
tmp = (t_1 + ((t_2 * ((r * w) * (r * w))) / (v + -1.0))) - 4.5;
}
return tmp;
}
def code(v, w, r): t_0 = 2.0 / (r * r) t_1 = 3.0 + t_0 t_2 = 0.125 * (3.0 - (2.0 * v)) tmp = 0 if (t_1 + ((t_2 * (r * (r * (w * w)))) / (v + -1.0))) <= -math.inf: tmp = (t_0 - ((r * (r * w)) * (w * 0.25))) - 4.5 else: tmp = (t_1 + ((t_2 * ((r * w) * (r * w))) / (v + -1.0))) - 4.5 return tmp
function code(v, w, r) t_0 = Float64(2.0 / Float64(r * r)) t_1 = Float64(3.0 + t_0) t_2 = Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) tmp = 0.0 if (Float64(t_1 + Float64(Float64(t_2 * Float64(r * Float64(r * Float64(w * w)))) / Float64(v + -1.0))) <= Float64(-Inf)) tmp = Float64(Float64(t_0 - Float64(Float64(r * Float64(r * w)) * Float64(w * 0.25))) - 4.5); else tmp = Float64(Float64(t_1 + Float64(Float64(t_2 * Float64(Float64(r * w) * Float64(r * w))) / Float64(v + -1.0))) - 4.5); end return tmp end
function tmp_2 = code(v, w, r) t_0 = 2.0 / (r * r); t_1 = 3.0 + t_0; t_2 = 0.125 * (3.0 - (2.0 * v)); tmp = 0.0; if ((t_1 + ((t_2 * (r * (r * (w * w)))) / (v + -1.0))) <= -Inf) tmp = (t_0 - ((r * (r * w)) * (w * 0.25))) - 4.5; else tmp = (t_1 + ((t_2 * ((r * w) * (r * w))) / (v + -1.0))) - 4.5; end tmp_2 = tmp; end
code[v_, w_, r_] := Block[{t$95$0 = N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(3.0 + t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(t$95$1 + N[(N[(t$95$2 * N[(r * N[(r * N[(w * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(v + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], (-Infinity)], N[(N[(t$95$0 - N[(N[(r * N[(r * w), $MachinePrecision]), $MachinePrecision] * N[(w * 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision], N[(N[(t$95$1 + N[(N[(t$95$2 * N[(N[(r * w), $MachinePrecision] * N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(v + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{2}{r \cdot r}\\
t_1 := 3 + t\_0\\
t_2 := 0.125 \cdot \left(3 - 2 \cdot v\right)\\
\mathbf{if}\;t\_1 + \frac{t\_2 \cdot \left(r \cdot \left(r \cdot \left(w \cdot w\right)\right)\right)}{v + -1} \leq -\infty:\\
\;\;\;\;\left(t\_0 - \left(r \cdot \left(r \cdot w\right)\right) \cdot \left(w \cdot 0.25\right)\right) - 4.5\\
\mathbf{else}:\\
\;\;\;\;\left(t\_1 + \frac{t\_2 \cdot \left(\left(r \cdot w\right) \cdot \left(r \cdot w\right)\right)}{v + -1}\right) - 4.5\\
\end{array}
\end{array}
if (-.f64 (+.f64 #s(literal 3 binary64) (/.f64 #s(literal 2 binary64) (*.f64 r r))) (/.f64 (*.f64 (*.f64 #s(literal 1/8 binary64) (-.f64 #s(literal 3 binary64) (*.f64 #s(literal 2 binary64) v))) (*.f64 (*.f64 (*.f64 w w) r) r)) (-.f64 #s(literal 1 binary64) v))) < -inf.0Initial program 77.1%
Taylor expanded in v around inf
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6485.4%
Simplified85.4%
associate-*l*N/A
associate-*r*N/A
*-lowering-*.f64N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6498.8%
Applied egg-rr98.8%
Taylor expanded in r around 0
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6498.8%
Simplified98.8%
if -inf.0 < (-.f64 (+.f64 #s(literal 3 binary64) (/.f64 #s(literal 2 binary64) (*.f64 r r))) (/.f64 (*.f64 (*.f64 #s(literal 1/8 binary64) (-.f64 #s(literal 3 binary64) (*.f64 #s(literal 2 binary64) v))) (*.f64 (*.f64 (*.f64 w w) r) r)) (-.f64 #s(literal 1 binary64) v))) Initial program 85.6%
associate-*l*N/A
unswap-sqrN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6499.2%
Applied egg-rr99.2%
Final simplification99.1%
(FPCore (v w r) :precision binary64 (- (+ 3.0 (/ 2.0 (* r r))) (fma (* (* r w) (fma v -0.25 0.375)) (/ (* r w) (- 1.0 v)) 4.5)))
double code(double v, double w, double r) {
return (3.0 + (2.0 / (r * r))) - fma(((r * w) * fma(v, -0.25, 0.375)), ((r * w) / (1.0 - v)), 4.5);
}
function code(v, w, r) return Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - fma(Float64(Float64(r * w) * fma(v, -0.25, 0.375)), Float64(Float64(r * w) / Float64(1.0 - v)), 4.5)) end
code[v_, w_, r_] := N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(r * w), $MachinePrecision] * N[(v * -0.25 + 0.375), $MachinePrecision]), $MachinePrecision] * N[(N[(r * w), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision] + 4.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(3 + \frac{2}{r \cdot r}\right) - \mathsf{fma}\left(\left(r \cdot w\right) \cdot \mathsf{fma}\left(v, -0.25, 0.375\right), \frac{r \cdot w}{1 - v}, 4.5\right)
\end{array}
Initial program 83.0%
associate--l-N/A
--lowering--.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate-*r*N/A
associate-/l*N/A
accelerator-lowering-fma.f64N/A
Applied egg-rr80.8%
associate-*r/N/A
associate-*l*N/A
associate-*l*N/A
*-commutativeN/A
associate-*l*N/A
swap-sqrN/A
+-commutativeN/A
*-commutativeN/A
metadata-evalN/A
cancel-sign-sub-invN/A
associate-*r*N/A
associate-/l*N/A
Applied egg-rr97.1%
(FPCore (v w r)
:precision binary64
(if (<= r 460000.0)
(+ -1.5 (fma (* w (* (* r r) -0.25)) w (/ 2.0 (* r r))))
(if (<= r 1e+147)
(fma (* (* w (fma -0.25 v 0.375)) (- 0.0 w)) (/ (* r r) (- 1.0 v)) -1.5)
(- (- 3.0 (* (* r w) (* r (* w 0.25)))) 4.5))))
double code(double v, double w, double r) {
double tmp;
if (r <= 460000.0) {
tmp = -1.5 + fma((w * ((r * r) * -0.25)), w, (2.0 / (r * r)));
} else if (r <= 1e+147) {
tmp = fma(((w * fma(-0.25, v, 0.375)) * (0.0 - w)), ((r * r) / (1.0 - v)), -1.5);
} else {
tmp = (3.0 - ((r * w) * (r * (w * 0.25)))) - 4.5;
}
return tmp;
}
function code(v, w, r) tmp = 0.0 if (r <= 460000.0) tmp = Float64(-1.5 + fma(Float64(w * Float64(Float64(r * r) * -0.25)), w, Float64(2.0 / Float64(r * r)))); elseif (r <= 1e+147) tmp = fma(Float64(Float64(w * fma(-0.25, v, 0.375)) * Float64(0.0 - w)), Float64(Float64(r * r) / Float64(1.0 - v)), -1.5); else tmp = Float64(Float64(3.0 - Float64(Float64(r * w) * Float64(r * Float64(w * 0.25)))) - 4.5); end return tmp end
code[v_, w_, r_] := If[LessEqual[r, 460000.0], N[(-1.5 + N[(N[(w * N[(N[(r * r), $MachinePrecision] * -0.25), $MachinePrecision]), $MachinePrecision] * w + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[r, 1e+147], N[(N[(N[(w * N[(-0.25 * v + 0.375), $MachinePrecision]), $MachinePrecision] * N[(0.0 - w), $MachinePrecision]), $MachinePrecision] * N[(N[(r * r), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision] + -1.5), $MachinePrecision], N[(N[(3.0 - N[(N[(r * w), $MachinePrecision] * N[(r * N[(w * 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;r \leq 460000:\\
\;\;\;\;-1.5 + \mathsf{fma}\left(w \cdot \left(\left(r \cdot r\right) \cdot -0.25\right), w, \frac{2}{r \cdot r}\right)\\
\mathbf{elif}\;r \leq 10^{+147}:\\
\;\;\;\;\mathsf{fma}\left(\left(w \cdot \mathsf{fma}\left(-0.25, v, 0.375\right)\right) \cdot \left(0 - w\right), \frac{r \cdot r}{1 - v}, -1.5\right)\\
\mathbf{else}:\\
\;\;\;\;\left(3 - \left(r \cdot w\right) \cdot \left(r \cdot \left(w \cdot 0.25\right)\right)\right) - 4.5\\
\end{array}
\end{array}
if r < 4.6e5Initial program 81.6%
Taylor expanded in v around inf
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
associate-+l+N/A
+-lowering-+.f64N/A
associate-*r*N/A
unpow2N/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
associate-*r/N/A
Simplified88.9%
if 4.6e5 < r < 9.9999999999999998e146Initial program 92.9%
associate--l-N/A
--lowering--.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate-*r*N/A
associate-/l*N/A
accelerator-lowering-fma.f64N/A
Applied egg-rr85.5%
associate-*r/N/A
associate-*l*N/A
associate-*l*N/A
*-commutativeN/A
associate-*l*N/A
swap-sqrN/A
+-commutativeN/A
*-commutativeN/A
metadata-evalN/A
cancel-sign-sub-invN/A
associate-*r*N/A
associate-/l*N/A
Applied egg-rr99.6%
Taylor expanded in r around inf
distribute-lft-inN/A
associate-/l*N/A
distribute-lft-inN/A
*-commutativeN/A
associate-*l*N/A
lft-mult-inverseN/A
metadata-evalN/A
metadata-evalN/A
+-commutativeN/A
Simplified96.3%
if 9.9999999999999998e146 < r Initial program 83.4%
Taylor expanded in v around inf
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6461.4%
Simplified61.4%
Taylor expanded in r around inf
Simplified61.4%
associate-*l*N/A
associate-*r*N/A
associate-*r*N/A
*-commutativeN/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6485.6%
Applied egg-rr85.6%
Final simplification89.3%
(FPCore (v w r)
:precision binary64
(let* ((t_0 (/ 2.0 (* r r))))
(if (<= r 7.6)
(+ -1.5 (fma (* w (* (* r r) -0.25)) w t_0))
(if (<= r 1.65e+81)
(+ t_0 (fma (* r (* r (* w w))) -0.375 -1.5))
(- (- 3.0 (* (* r w) (* r (* w 0.25)))) 4.5)))))
double code(double v, double w, double r) {
double t_0 = 2.0 / (r * r);
double tmp;
if (r <= 7.6) {
tmp = -1.5 + fma((w * ((r * r) * -0.25)), w, t_0);
} else if (r <= 1.65e+81) {
tmp = t_0 + fma((r * (r * (w * w))), -0.375, -1.5);
} else {
tmp = (3.0 - ((r * w) * (r * (w * 0.25)))) - 4.5;
}
return tmp;
}
function code(v, w, r) t_0 = Float64(2.0 / Float64(r * r)) tmp = 0.0 if (r <= 7.6) tmp = Float64(-1.5 + fma(Float64(w * Float64(Float64(r * r) * -0.25)), w, t_0)); elseif (r <= 1.65e+81) tmp = Float64(t_0 + fma(Float64(r * Float64(r * Float64(w * w))), -0.375, -1.5)); else tmp = Float64(Float64(3.0 - Float64(Float64(r * w) * Float64(r * Float64(w * 0.25)))) - 4.5); end return tmp end
code[v_, w_, r_] := Block[{t$95$0 = N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[r, 7.6], N[(-1.5 + N[(N[(w * N[(N[(r * r), $MachinePrecision] * -0.25), $MachinePrecision]), $MachinePrecision] * w + t$95$0), $MachinePrecision]), $MachinePrecision], If[LessEqual[r, 1.65e+81], N[(t$95$0 + N[(N[(r * N[(r * N[(w * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * -0.375 + -1.5), $MachinePrecision]), $MachinePrecision], N[(N[(3.0 - N[(N[(r * w), $MachinePrecision] * N[(r * N[(w * 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{2}{r \cdot r}\\
\mathbf{if}\;r \leq 7.6:\\
\;\;\;\;-1.5 + \mathsf{fma}\left(w \cdot \left(\left(r \cdot r\right) \cdot -0.25\right), w, t\_0\right)\\
\mathbf{elif}\;r \leq 1.65 \cdot 10^{+81}:\\
\;\;\;\;t\_0 + \mathsf{fma}\left(r \cdot \left(r \cdot \left(w \cdot w\right)\right), -0.375, -1.5\right)\\
\mathbf{else}:\\
\;\;\;\;\left(3 - \left(r \cdot w\right) \cdot \left(r \cdot \left(w \cdot 0.25\right)\right)\right) - 4.5\\
\end{array}
\end{array}
if r < 7.5999999999999996Initial program 81.5%
Taylor expanded in v around inf
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
associate-+l+N/A
+-lowering-+.f64N/A
associate-*r*N/A
unpow2N/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
associate-*r/N/A
Simplified88.9%
if 7.5999999999999996 < r < 1.65e81Initial program 88.8%
associate-*l*N/A
unswap-sqrN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6488.9%
Applied egg-rr88.9%
Taylor expanded in v around 0
sub-negN/A
+-lowering-+.f64N/A
associate-*r/N/A
metadata-evalN/A
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
distribute-neg-inN/A
*-commutativeN/A
distribute-rgt-neg-inN/A
metadata-evalN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6486.0%
Simplified86.0%
if 1.65e81 < r Initial program 88.1%
Taylor expanded in v around inf
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6470.9%
Simplified70.9%
Taylor expanded in r around inf
Simplified70.9%
associate-*l*N/A
associate-*r*N/A
associate-*r*N/A
*-commutativeN/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6488.2%
Applied egg-rr88.2%
Final simplification88.6%
(FPCore (v w r) :precision binary64 (if (<= r 1100.0) (+ -1.5 (fma (* w (* (* r r) -0.25)) w (/ 2.0 (* r r)))) (- 3.0 (fma (* (* r w) (fma v -0.25 0.375)) (/ (* r w) (- 1.0 v)) 4.5))))
double code(double v, double w, double r) {
double tmp;
if (r <= 1100.0) {
tmp = -1.5 + fma((w * ((r * r) * -0.25)), w, (2.0 / (r * r)));
} else {
tmp = 3.0 - fma(((r * w) * fma(v, -0.25, 0.375)), ((r * w) / (1.0 - v)), 4.5);
}
return tmp;
}
function code(v, w, r) tmp = 0.0 if (r <= 1100.0) tmp = Float64(-1.5 + fma(Float64(w * Float64(Float64(r * r) * -0.25)), w, Float64(2.0 / Float64(r * r)))); else tmp = Float64(3.0 - fma(Float64(Float64(r * w) * fma(v, -0.25, 0.375)), Float64(Float64(r * w) / Float64(1.0 - v)), 4.5)); end return tmp end
code[v_, w_, r_] := If[LessEqual[r, 1100.0], N[(-1.5 + N[(N[(w * N[(N[(r * r), $MachinePrecision] * -0.25), $MachinePrecision]), $MachinePrecision] * w + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(3.0 - N[(N[(N[(r * w), $MachinePrecision] * N[(v * -0.25 + 0.375), $MachinePrecision]), $MachinePrecision] * N[(N[(r * w), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision] + 4.5), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;r \leq 1100:\\
\;\;\;\;-1.5 + \mathsf{fma}\left(w \cdot \left(\left(r \cdot r\right) \cdot -0.25\right), w, \frac{2}{r \cdot r}\right)\\
\mathbf{else}:\\
\;\;\;\;3 - \mathsf{fma}\left(\left(r \cdot w\right) \cdot \mathsf{fma}\left(v, -0.25, 0.375\right), \frac{r \cdot w}{1 - v}, 4.5\right)\\
\end{array}
\end{array}
if r < 1100Initial program 81.6%
Taylor expanded in v around inf
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
associate-+l+N/A
+-lowering-+.f64N/A
associate-*r*N/A
unpow2N/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
associate-*r/N/A
Simplified88.9%
if 1100 < r Initial program 88.1%
associate--l-N/A
--lowering--.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate-*r*N/A
associate-/l*N/A
accelerator-lowering-fma.f64N/A
Applied egg-rr80.1%
associate-*r/N/A
associate-*l*N/A
associate-*l*N/A
*-commutativeN/A
associate-*l*N/A
swap-sqrN/A
+-commutativeN/A
*-commutativeN/A
metadata-evalN/A
cancel-sign-sub-invN/A
associate-*r*N/A
associate-/l*N/A
Applied egg-rr99.6%
Taylor expanded in r around inf
Simplified99.6%
Final simplification91.2%
(FPCore (v w r)
:precision binary64
(let* ((t_0 (/ 2.0 (* r r))))
(if (<= r 0.75)
(+ -1.5 (fma (* w (* (* r r) -0.25)) w t_0))
(fma (* r (* w -0.375)) (* r w) (+ t_0 -1.5)))))
double code(double v, double w, double r) {
double t_0 = 2.0 / (r * r);
double tmp;
if (r <= 0.75) {
tmp = -1.5 + fma((w * ((r * r) * -0.25)), w, t_0);
} else {
tmp = fma((r * (w * -0.375)), (r * w), (t_0 + -1.5));
}
return tmp;
}
function code(v, w, r) t_0 = Float64(2.0 / Float64(r * r)) tmp = 0.0 if (r <= 0.75) tmp = Float64(-1.5 + fma(Float64(w * Float64(Float64(r * r) * -0.25)), w, t_0)); else tmp = fma(Float64(r * Float64(w * -0.375)), Float64(r * w), Float64(t_0 + -1.5)); end return tmp end
code[v_, w_, r_] := Block[{t$95$0 = N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[r, 0.75], N[(-1.5 + N[(N[(w * N[(N[(r * r), $MachinePrecision] * -0.25), $MachinePrecision]), $MachinePrecision] * w + t$95$0), $MachinePrecision]), $MachinePrecision], N[(N[(r * N[(w * -0.375), $MachinePrecision]), $MachinePrecision] * N[(r * w), $MachinePrecision] + N[(t$95$0 + -1.5), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{2}{r \cdot r}\\
\mathbf{if}\;r \leq 0.75:\\
\;\;\;\;-1.5 + \mathsf{fma}\left(w \cdot \left(\left(r \cdot r\right) \cdot -0.25\right), w, t\_0\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(r \cdot \left(w \cdot -0.375\right), r \cdot w, t\_0 + -1.5\right)\\
\end{array}
\end{array}
if r < 0.75Initial program 81.5%
Taylor expanded in v around inf
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
associate-+l+N/A
+-lowering-+.f64N/A
associate-*r*N/A
unpow2N/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
associate-*r/N/A
Simplified88.9%
if 0.75 < r Initial program 88.3%
+-commutativeN/A
associate--l+N/A
associate-/r*N/A
div-invN/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
/-lowering-/.f64N/A
Applied egg-rr77.3%
Taylor expanded in v around 0
cancel-sign-sub-invN/A
+-commutativeN/A
distribute-lft-neg-inN/A
associate-*r*N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
metadata-evalN/A
distribute-lft-neg-inN/A
*-commutativeN/A
distribute-rgt-neg-inN/A
metadata-evalN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6470.7%
Simplified70.7%
frac-timesN/A
metadata-evalN/A
associate-+r+N/A
associate--l+N/A
metadata-evalN/A
+-lowering-+.f64N/A
Applied egg-rr79.8%
associate-+l+N/A
*-commutativeN/A
associate-*r*N/A
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f6486.0%
Applied egg-rr86.0%
Final simplification88.2%
(FPCore (v w r)
:precision binary64
(let* ((t_0 (/ 2.0 (* r r))))
(if (<= r 0.41)
(+ -1.5 (fma (* w (* (* r r) -0.25)) w t_0))
(+ -1.5 (fma r (* w (* r (* w -0.375))) t_0)))))
double code(double v, double w, double r) {
double t_0 = 2.0 / (r * r);
double tmp;
if (r <= 0.41) {
tmp = -1.5 + fma((w * ((r * r) * -0.25)), w, t_0);
} else {
tmp = -1.5 + fma(r, (w * (r * (w * -0.375))), t_0);
}
return tmp;
}
function code(v, w, r) t_0 = Float64(2.0 / Float64(r * r)) tmp = 0.0 if (r <= 0.41) tmp = Float64(-1.5 + fma(Float64(w * Float64(Float64(r * r) * -0.25)), w, t_0)); else tmp = Float64(-1.5 + fma(r, Float64(w * Float64(r * Float64(w * -0.375))), t_0)); end return tmp end
code[v_, w_, r_] := Block[{t$95$0 = N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[r, 0.41], N[(-1.5 + N[(N[(w * N[(N[(r * r), $MachinePrecision] * -0.25), $MachinePrecision]), $MachinePrecision] * w + t$95$0), $MachinePrecision]), $MachinePrecision], N[(-1.5 + N[(r * N[(w * N[(r * N[(w * -0.375), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{2}{r \cdot r}\\
\mathbf{if}\;r \leq 0.41:\\
\;\;\;\;-1.5 + \mathsf{fma}\left(w \cdot \left(\left(r \cdot r\right) \cdot -0.25\right), w, t\_0\right)\\
\mathbf{else}:\\
\;\;\;\;-1.5 + \mathsf{fma}\left(r, w \cdot \left(r \cdot \left(w \cdot -0.375\right)\right), t\_0\right)\\
\end{array}
\end{array}
if r < 0.409999999999999976Initial program 81.5%
Taylor expanded in v around inf
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
associate-+l+N/A
+-lowering-+.f64N/A
associate-*r*N/A
unpow2N/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
associate-*r/N/A
Simplified88.9%
if 0.409999999999999976 < r Initial program 88.3%
+-commutativeN/A
associate--l+N/A
associate-/r*N/A
div-invN/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
/-lowering-/.f64N/A
Applied egg-rr77.3%
Taylor expanded in v around 0
cancel-sign-sub-invN/A
+-commutativeN/A
distribute-lft-neg-inN/A
associate-*r*N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
metadata-evalN/A
distribute-lft-neg-inN/A
*-commutativeN/A
distribute-rgt-neg-inN/A
metadata-evalN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6470.7%
Simplified70.7%
frac-timesN/A
metadata-evalN/A
associate-+r+N/A
associate--l+N/A
metadata-evalN/A
+-lowering-+.f64N/A
Applied egg-rr79.8%
associate-*r*N/A
*-lowering-*.f64N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f6486.0%
Applied egg-rr86.0%
Final simplification88.2%
(FPCore (v w r)
:precision binary64
(if (<= r 460000.0)
(+ -1.5 (fma (* w (* (* r r) -0.25)) w (/ 2.0 (* r r))))
(if (<= r 3.7e+81)
(fma (* r r) (* (* w w) -0.375) -1.5)
(- (- 3.0 (* (* r w) (* r (* w 0.25)))) 4.5))))
double code(double v, double w, double r) {
double tmp;
if (r <= 460000.0) {
tmp = -1.5 + fma((w * ((r * r) * -0.25)), w, (2.0 / (r * r)));
} else if (r <= 3.7e+81) {
tmp = fma((r * r), ((w * w) * -0.375), -1.5);
} else {
tmp = (3.0 - ((r * w) * (r * (w * 0.25)))) - 4.5;
}
return tmp;
}
function code(v, w, r) tmp = 0.0 if (r <= 460000.0) tmp = Float64(-1.5 + fma(Float64(w * Float64(Float64(r * r) * -0.25)), w, Float64(2.0 / Float64(r * r)))); elseif (r <= 3.7e+81) tmp = fma(Float64(r * r), Float64(Float64(w * w) * -0.375), -1.5); else tmp = Float64(Float64(3.0 - Float64(Float64(r * w) * Float64(r * Float64(w * 0.25)))) - 4.5); end return tmp end
code[v_, w_, r_] := If[LessEqual[r, 460000.0], N[(-1.5 + N[(N[(w * N[(N[(r * r), $MachinePrecision] * -0.25), $MachinePrecision]), $MachinePrecision] * w + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[r, 3.7e+81], N[(N[(r * r), $MachinePrecision] * N[(N[(w * w), $MachinePrecision] * -0.375), $MachinePrecision] + -1.5), $MachinePrecision], N[(N[(3.0 - N[(N[(r * w), $MachinePrecision] * N[(r * N[(w * 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;r \leq 460000:\\
\;\;\;\;-1.5 + \mathsf{fma}\left(w \cdot \left(\left(r \cdot r\right) \cdot -0.25\right), w, \frac{2}{r \cdot r}\right)\\
\mathbf{elif}\;r \leq 3.7 \cdot 10^{+81}:\\
\;\;\;\;\mathsf{fma}\left(r \cdot r, \left(w \cdot w\right) \cdot -0.375, -1.5\right)\\
\mathbf{else}:\\
\;\;\;\;\left(3 - \left(r \cdot w\right) \cdot \left(r \cdot \left(w \cdot 0.25\right)\right)\right) - 4.5\\
\end{array}
\end{array}
if r < 4.6e5Initial program 81.6%
Taylor expanded in v around inf
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
associate-+l+N/A
+-lowering-+.f64N/A
associate-*r*N/A
unpow2N/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
associate-*r/N/A
Simplified88.9%
if 4.6e5 < r < 3.7000000000000001e81Initial program 88.2%
+-commutativeN/A
associate--l+N/A
associate-/r*N/A
div-invN/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
/-lowering-/.f64N/A
Applied egg-rr88.2%
Taylor expanded in v around 0
cancel-sign-sub-invN/A
+-commutativeN/A
distribute-lft-neg-inN/A
associate-*r*N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
metadata-evalN/A
distribute-lft-neg-inN/A
*-commutativeN/A
distribute-rgt-neg-inN/A
metadata-evalN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6485.0%
Simplified85.0%
Taylor expanded in r around inf
sub-negN/A
distribute-lft-inN/A
distribute-rgt-neg-inN/A
*-commutativeN/A
associate-*l*N/A
lft-mult-inverseN/A
metadata-evalN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6485.0%
Simplified85.0%
if 3.7000000000000001e81 < r Initial program 88.1%
Taylor expanded in v around inf
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6470.9%
Simplified70.9%
Taylor expanded in r around inf
Simplified70.9%
associate-*l*N/A
associate-*r*N/A
associate-*r*N/A
*-commutativeN/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6488.2%
Applied egg-rr88.2%
Final simplification88.6%
(FPCore (v w r) :precision binary64 (if (<= r 8.8e-18) (/ (/ 2.0 r) r) (- (- 3.0 (* (* r w) (* r (* w 0.25)))) 4.5)))
double code(double v, double w, double r) {
double tmp;
if (r <= 8.8e-18) {
tmp = (2.0 / r) / r;
} else {
tmp = (3.0 - ((r * w) * (r * (w * 0.25)))) - 4.5;
}
return tmp;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
real(8) :: tmp
if (r <= 8.8d-18) then
tmp = (2.0d0 / r) / r
else
tmp = (3.0d0 - ((r * w) * (r * (w * 0.25d0)))) - 4.5d0
end if
code = tmp
end function
public static double code(double v, double w, double r) {
double tmp;
if (r <= 8.8e-18) {
tmp = (2.0 / r) / r;
} else {
tmp = (3.0 - ((r * w) * (r * (w * 0.25)))) - 4.5;
}
return tmp;
}
def code(v, w, r): tmp = 0 if r <= 8.8e-18: tmp = (2.0 / r) / r else: tmp = (3.0 - ((r * w) * (r * (w * 0.25)))) - 4.5 return tmp
function code(v, w, r) tmp = 0.0 if (r <= 8.8e-18) tmp = Float64(Float64(2.0 / r) / r); else tmp = Float64(Float64(3.0 - Float64(Float64(r * w) * Float64(r * Float64(w * 0.25)))) - 4.5); end return tmp end
function tmp_2 = code(v, w, r) tmp = 0.0; if (r <= 8.8e-18) tmp = (2.0 / r) / r; else tmp = (3.0 - ((r * w) * (r * (w * 0.25)))) - 4.5; end tmp_2 = tmp; end
code[v_, w_, r_] := If[LessEqual[r, 8.8e-18], N[(N[(2.0 / r), $MachinePrecision] / r), $MachinePrecision], N[(N[(3.0 - N[(N[(r * w), $MachinePrecision] * N[(r * N[(w * 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;r \leq 8.8 \cdot 10^{-18}:\\
\;\;\;\;\frac{\frac{2}{r}}{r}\\
\mathbf{else}:\\
\;\;\;\;\left(3 - \left(r \cdot w\right) \cdot \left(r \cdot \left(w \cdot 0.25\right)\right)\right) - 4.5\\
\end{array}
\end{array}
if r < 8.7999999999999994e-18Initial program 81.8%
Taylor expanded in r around 0
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6461.5%
Simplified61.5%
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6461.5%
Applied egg-rr61.5%
if 8.7999999999999994e-18 < r Initial program 87.1%
Taylor expanded in v around inf
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6474.9%
Simplified74.9%
Taylor expanded in r around inf
Simplified72.0%
associate-*l*N/A
associate-*r*N/A
associate-*r*N/A
*-commutativeN/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6483.6%
Applied egg-rr83.6%
Final simplification66.5%
(FPCore (v w r) :precision binary64 (if (<= r 1.95e-23) (/ (/ 2.0 r) r) (- (fma (* w (* r (* r w))) -0.25 3.0) 4.5)))
double code(double v, double w, double r) {
double tmp;
if (r <= 1.95e-23) {
tmp = (2.0 / r) / r;
} else {
tmp = fma((w * (r * (r * w))), -0.25, 3.0) - 4.5;
}
return tmp;
}
function code(v, w, r) tmp = 0.0 if (r <= 1.95e-23) tmp = Float64(Float64(2.0 / r) / r); else tmp = Float64(fma(Float64(w * Float64(r * Float64(r * w))), -0.25, 3.0) - 4.5); end return tmp end
code[v_, w_, r_] := If[LessEqual[r, 1.95e-23], N[(N[(2.0 / r), $MachinePrecision] / r), $MachinePrecision], N[(N[(N[(w * N[(r * N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * -0.25 + 3.0), $MachinePrecision] - 4.5), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;r \leq 1.95 \cdot 10^{-23}:\\
\;\;\;\;\frac{\frac{2}{r}}{r}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(w \cdot \left(r \cdot \left(r \cdot w\right)\right), -0.25, 3\right) - 4.5\\
\end{array}
\end{array}
if r < 1.95e-23Initial program 82.1%
Taylor expanded in r around 0
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6461.6%
Simplified61.6%
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6461.6%
Applied egg-rr61.6%
if 1.95e-23 < r Initial program 85.9%
Taylor expanded in v around inf
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6475.7%
Simplified75.7%
Taylor expanded in r around inf
Simplified71.3%
--lowering--.f64N/A
Applied egg-rr82.1%
(FPCore (v w r) :precision binary64 (if (<= r 1.7e-23) (/ (/ 2.0 r) r) (fma -0.25 (* r (* r (* w w))) -1.5)))
double code(double v, double w, double r) {
double tmp;
if (r <= 1.7e-23) {
tmp = (2.0 / r) / r;
} else {
tmp = fma(-0.25, (r * (r * (w * w))), -1.5);
}
return tmp;
}
function code(v, w, r) tmp = 0.0 if (r <= 1.7e-23) tmp = Float64(Float64(2.0 / r) / r); else tmp = fma(-0.25, Float64(r * Float64(r * Float64(w * w))), -1.5); end return tmp end
code[v_, w_, r_] := If[LessEqual[r, 1.7e-23], N[(N[(2.0 / r), $MachinePrecision] / r), $MachinePrecision], N[(-0.25 * N[(r * N[(r * N[(w * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.5), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;r \leq 1.7 \cdot 10^{-23}:\\
\;\;\;\;\frac{\frac{2}{r}}{r}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(-0.25, r \cdot \left(r \cdot \left(w \cdot w\right)\right), -1.5\right)\\
\end{array}
\end{array}
if r < 1.7e-23Initial program 82.1%
Taylor expanded in r around 0
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6461.6%
Simplified61.6%
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6461.6%
Applied egg-rr61.6%
if 1.7e-23 < r Initial program 85.9%
Taylor expanded in v around inf
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6475.7%
Simplified75.7%
Taylor expanded in r around inf
mul-1-negN/A
distribute-rgt-inN/A
distribute-neg-inN/A
associate-*r*N/A
*-commutativeN/A
distribute-lft-neg-inN/A
metadata-evalN/A
unsub-negN/A
associate-*l*N/A
lft-mult-inverseN/A
metadata-evalN/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
Simplified80.0%
(FPCore (v w r) :precision binary64 (if (<= r 2.3e-23) (/ 2.0 (* r r)) (fma -0.25 (* r (* r (* w w))) -1.5)))
double code(double v, double w, double r) {
double tmp;
if (r <= 2.3e-23) {
tmp = 2.0 / (r * r);
} else {
tmp = fma(-0.25, (r * (r * (w * w))), -1.5);
}
return tmp;
}
function code(v, w, r) tmp = 0.0 if (r <= 2.3e-23) tmp = Float64(2.0 / Float64(r * r)); else tmp = fma(-0.25, Float64(r * Float64(r * Float64(w * w))), -1.5); end return tmp end
code[v_, w_, r_] := If[LessEqual[r, 2.3e-23], N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision], N[(-0.25 * N[(r * N[(r * N[(w * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.5), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;r \leq 2.3 \cdot 10^{-23}:\\
\;\;\;\;\frac{2}{r \cdot r}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(-0.25, r \cdot \left(r \cdot \left(w \cdot w\right)\right), -1.5\right)\\
\end{array}
\end{array}
if r < 2.3000000000000001e-23Initial program 82.1%
Taylor expanded in r around 0
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6461.6%
Simplified61.6%
if 2.3000000000000001e-23 < r Initial program 85.9%
Taylor expanded in v around inf
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6475.7%
Simplified75.7%
Taylor expanded in r around inf
mul-1-negN/A
distribute-rgt-inN/A
distribute-neg-inN/A
associate-*r*N/A
*-commutativeN/A
distribute-lft-neg-inN/A
metadata-evalN/A
unsub-negN/A
associate-*l*N/A
lft-mult-inverseN/A
metadata-evalN/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
Simplified80.0%
(FPCore (v w r) :precision binary64 (if (<= r 2.15e-23) (/ 2.0 (* r r)) (* -0.25 (* r (* r (* w w))))))
double code(double v, double w, double r) {
double tmp;
if (r <= 2.15e-23) {
tmp = 2.0 / (r * r);
} else {
tmp = -0.25 * (r * (r * (w * w)));
}
return tmp;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
real(8) :: tmp
if (r <= 2.15d-23) then
tmp = 2.0d0 / (r * r)
else
tmp = (-0.25d0) * (r * (r * (w * w)))
end if
code = tmp
end function
public static double code(double v, double w, double r) {
double tmp;
if (r <= 2.15e-23) {
tmp = 2.0 / (r * r);
} else {
tmp = -0.25 * (r * (r * (w * w)));
}
return tmp;
}
def code(v, w, r): tmp = 0 if r <= 2.15e-23: tmp = 2.0 / (r * r) else: tmp = -0.25 * (r * (r * (w * w))) return tmp
function code(v, w, r) tmp = 0.0 if (r <= 2.15e-23) tmp = Float64(2.0 / Float64(r * r)); else tmp = Float64(-0.25 * Float64(r * Float64(r * Float64(w * w)))); end return tmp end
function tmp_2 = code(v, w, r) tmp = 0.0; if (r <= 2.15e-23) tmp = 2.0 / (r * r); else tmp = -0.25 * (r * (r * (w * w))); end tmp_2 = tmp; end
code[v_, w_, r_] := If[LessEqual[r, 2.15e-23], N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision], N[(-0.25 * N[(r * N[(r * N[(w * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;r \leq 2.15 \cdot 10^{-23}:\\
\;\;\;\;\frac{2}{r \cdot r}\\
\mathbf{else}:\\
\;\;\;\;-0.25 \cdot \left(r \cdot \left(r \cdot \left(w \cdot w\right)\right)\right)\\
\end{array}
\end{array}
if r < 2.15000000000000001e-23Initial program 82.1%
Taylor expanded in r around 0
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6461.6%
Simplified61.6%
if 2.15000000000000001e-23 < r Initial program 85.9%
Taylor expanded in v around inf
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6475.7%
Simplified75.7%
Taylor expanded in r around inf
*-lowering-*.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6459.6%
Simplified59.6%
(FPCore (v w r) :precision binary64 (if (<= r 0.055) (/ 2.0 (* r r)) -1.5))
double code(double v, double w, double r) {
double tmp;
if (r <= 0.055) {
tmp = 2.0 / (r * r);
} else {
tmp = -1.5;
}
return tmp;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
real(8) :: tmp
if (r <= 0.055d0) then
tmp = 2.0d0 / (r * r)
else
tmp = -1.5d0
end if
code = tmp
end function
public static double code(double v, double w, double r) {
double tmp;
if (r <= 0.055) {
tmp = 2.0 / (r * r);
} else {
tmp = -1.5;
}
return tmp;
}
def code(v, w, r): tmp = 0 if r <= 0.055: tmp = 2.0 / (r * r) else: tmp = -1.5 return tmp
function code(v, w, r) tmp = 0.0 if (r <= 0.055) tmp = Float64(2.0 / Float64(r * r)); else tmp = -1.5; end return tmp end
function tmp_2 = code(v, w, r) tmp = 0.0; if (r <= 0.055) tmp = 2.0 / (r * r); else tmp = -1.5; end tmp_2 = tmp; end
code[v_, w_, r_] := If[LessEqual[r, 0.055], N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision], -1.5]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;r \leq 0.055:\\
\;\;\;\;\frac{2}{r \cdot r}\\
\mathbf{else}:\\
\;\;\;\;-1.5\\
\end{array}
\end{array}
if r < 0.0550000000000000003Initial program 81.5%
Taylor expanded in r around 0
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6461.2%
Simplified61.2%
if 0.0550000000000000003 < r Initial program 88.3%
Taylor expanded in w around 0
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
associate-*r/N/A
metadata-evalN/A
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6427.0%
Simplified27.0%
Taylor expanded in r around inf
Simplified25.7%
(FPCore (v w r) :precision binary64 (+ (/ 2.0 (* r r)) -1.5))
double code(double v, double w, double r) {
return (2.0 / (r * r)) + -1.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = (2.0d0 / (r * r)) + (-1.5d0)
end function
public static double code(double v, double w, double r) {
return (2.0 / (r * r)) + -1.5;
}
def code(v, w, r): return (2.0 / (r * r)) + -1.5
function code(v, w, r) return Float64(Float64(2.0 / Float64(r * r)) + -1.5) end
function tmp = code(v, w, r) tmp = (2.0 / (r * r)) + -1.5; end
code[v_, w_, r_] := N[(N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision] + -1.5), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{r \cdot r} + -1.5
\end{array}
Initial program 83.0%
Taylor expanded in w around 0
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
associate-*r/N/A
metadata-evalN/A
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6459.6%
Simplified59.6%
Final simplification59.6%
(FPCore (v w r) :precision binary64 -1.5)
double code(double v, double w, double r) {
return -1.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = -1.5d0
end function
public static double code(double v, double w, double r) {
return -1.5;
}
def code(v, w, r): return -1.5
function code(v, w, r) return -1.5 end
function tmp = code(v, w, r) tmp = -1.5; end
code[v_, w_, r_] := -1.5
\begin{array}{l}
\\
-1.5
\end{array}
Initial program 83.0%
Taylor expanded in w around 0
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
associate-*r/N/A
metadata-evalN/A
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6459.6%
Simplified59.6%
Taylor expanded in r around inf
Simplified12.5%
herbie shell --seed 2024193
(FPCore (v w r)
:name "Rosa's TurbineBenchmark"
:precision binary64
(- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))