
(FPCore (v w r) :precision binary64 (- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))
double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((3.0d0 + (2.0d0 / (r * r))) - (((0.125d0 * (3.0d0 - (2.0d0 * v))) * (((w * w) * r) * r)) / (1.0d0 - v))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
def code(v, w, r): return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5
function code(v, w, r) return Float64(Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - Float64(Float64(Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) * Float64(Float64(Float64(w * w) * r) * r)) / Float64(1.0 - v))) - 4.5) end
function tmp = code(v, w, r) tmp = ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5; end
code[v_, w_, r_] := N[(N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(w * w), $MachinePrecision] * r), $MachinePrecision] * r), $MachinePrecision]), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(3 + \frac{2}{r \cdot r}\right) - \frac{\left(0.125 \cdot \left(3 - 2 \cdot v\right)\right) \cdot \left(\left(\left(w \cdot w\right) \cdot r\right) \cdot r\right)}{1 - v}\right) - 4.5
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 24 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (v w r) :precision binary64 (- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))
double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((3.0d0 + (2.0d0 / (r * r))) - (((0.125d0 * (3.0d0 - (2.0d0 * v))) * (((w * w) * r) * r)) / (1.0d0 - v))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
def code(v, w, r): return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5
function code(v, w, r) return Float64(Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - Float64(Float64(Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) * Float64(Float64(Float64(w * w) * r) * r)) / Float64(1.0 - v))) - 4.5) end
function tmp = code(v, w, r) tmp = ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5; end
code[v_, w_, r_] := N[(N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(w * w), $MachinePrecision] * r), $MachinePrecision] * r), $MachinePrecision]), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(3 + \frac{2}{r \cdot r}\right) - \frac{\left(0.125 \cdot \left(3 - 2 \cdot v\right)\right) \cdot \left(\left(\left(w \cdot w\right) \cdot r\right) \cdot r\right)}{1 - v}\right) - 4.5
\end{array}
r_m = (fabs.f64 r)
(FPCore (v w r_m)
:precision binary64
(let* ((t_0 (+ 0.375 (* v -0.25))))
(if (<= r_m 1.4e-10)
(+
(/ 2.0 (* r_m r_m))
(+ -1.5 (* t_0 (/ (* w (* (* r_m r_m) w)) (+ v -1.0)))))
(- (+ 3.0 (* t_0 (/ (* (* r_m w) (* r_m w)) (+ v -1.0)))) 4.5))))r_m = fabs(r);
double code(double v, double w, double r_m) {
double t_0 = 0.375 + (v * -0.25);
double tmp;
if (r_m <= 1.4e-10) {
tmp = (2.0 / (r_m * r_m)) + (-1.5 + (t_0 * ((w * ((r_m * r_m) * w)) / (v + -1.0))));
} else {
tmp = (3.0 + (t_0 * (((r_m * w) * (r_m * w)) / (v + -1.0)))) - 4.5;
}
return tmp;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
real(8) :: t_0
real(8) :: tmp
t_0 = 0.375d0 + (v * (-0.25d0))
if (r_m <= 1.4d-10) then
tmp = (2.0d0 / (r_m * r_m)) + ((-1.5d0) + (t_0 * ((w * ((r_m * r_m) * w)) / (v + (-1.0d0)))))
else
tmp = (3.0d0 + (t_0 * (((r_m * w) * (r_m * w)) / (v + (-1.0d0))))) - 4.5d0
end if
code = tmp
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
double t_0 = 0.375 + (v * -0.25);
double tmp;
if (r_m <= 1.4e-10) {
tmp = (2.0 / (r_m * r_m)) + (-1.5 + (t_0 * ((w * ((r_m * r_m) * w)) / (v + -1.0))));
} else {
tmp = (3.0 + (t_0 * (((r_m * w) * (r_m * w)) / (v + -1.0)))) - 4.5;
}
return tmp;
}
r_m = math.fabs(r) def code(v, w, r_m): t_0 = 0.375 + (v * -0.25) tmp = 0 if r_m <= 1.4e-10: tmp = (2.0 / (r_m * r_m)) + (-1.5 + (t_0 * ((w * ((r_m * r_m) * w)) / (v + -1.0)))) else: tmp = (3.0 + (t_0 * (((r_m * w) * (r_m * w)) / (v + -1.0)))) - 4.5 return tmp
r_m = abs(r) function code(v, w, r_m) t_0 = Float64(0.375 + Float64(v * -0.25)) tmp = 0.0 if (r_m <= 1.4e-10) tmp = Float64(Float64(2.0 / Float64(r_m * r_m)) + Float64(-1.5 + Float64(t_0 * Float64(Float64(w * Float64(Float64(r_m * r_m) * w)) / Float64(v + -1.0))))); else tmp = Float64(Float64(3.0 + Float64(t_0 * Float64(Float64(Float64(r_m * w) * Float64(r_m * w)) / Float64(v + -1.0)))) - 4.5); end return tmp end
r_m = abs(r); function tmp_2 = code(v, w, r_m) t_0 = 0.375 + (v * -0.25); tmp = 0.0; if (r_m <= 1.4e-10) tmp = (2.0 / (r_m * r_m)) + (-1.5 + (t_0 * ((w * ((r_m * r_m) * w)) / (v + -1.0)))); else tmp = (3.0 + (t_0 * (((r_m * w) * (r_m * w)) / (v + -1.0)))) - 4.5; end tmp_2 = tmp; end
r_m = N[Abs[r], $MachinePrecision]
code[v_, w_, r$95$m_] := Block[{t$95$0 = N[(0.375 + N[(v * -0.25), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[r$95$m, 1.4e-10], N[(N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision] + N[(-1.5 + N[(t$95$0 * N[(N[(w * N[(N[(r$95$m * r$95$m), $MachinePrecision] * w), $MachinePrecision]), $MachinePrecision] / N[(v + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(3.0 + N[(t$95$0 * N[(N[(N[(r$95$m * w), $MachinePrecision] * N[(r$95$m * w), $MachinePrecision]), $MachinePrecision] / N[(v + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
t_0 := 0.375 + v \cdot -0.25\\
\mathbf{if}\;r\_m \leq 1.4 \cdot 10^{-10}:\\
\;\;\;\;\frac{2}{r\_m \cdot r\_m} + \left(-1.5 + t\_0 \cdot \frac{w \cdot \left(\left(r\_m \cdot r\_m\right) \cdot w\right)}{v + -1}\right)\\
\mathbf{else}:\\
\;\;\;\;\left(3 + t\_0 \cdot \frac{\left(r\_m \cdot w\right) \cdot \left(r\_m \cdot w\right)}{v + -1}\right) - 4.5\\
\end{array}
\end{array}
if r < 1.40000000000000008e-10Initial program 86.0%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.1%
associate-*r*N/A
swap-sqrN/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6495.6%
Applied egg-rr95.6%
if 1.40000000000000008e-10 < r Initial program 91.3%
Taylor expanded in r around inf
Simplified91.3%
associate-/l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
associate-*l*N/A
*-commutativeN/A
swap-sqrN/A
/-lowering-/.f64N/A
associate-*l*N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
--lowering--.f64N/A
sub-negN/A
distribute-lft-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
Applied egg-rr99.8%
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.8%
Applied egg-rr99.8%
*-commutativeN/A
associate-*l*N/A
metadata-evalN/A
metadata-evalN/A
*-lowering-*.f64N/A
metadata-eval99.8%
Applied egg-rr99.8%
Final simplification96.8%
r_m = (fabs.f64 r)
(FPCore (v w r_m)
:precision binary64
(if (<= r_m 1.4e-10)
(+ (/ (/ 2.0 r_m) r_m) (+ -1.5 (* (* r_m w) (* (* r_m w) -0.375))))
(if (<= r_m 1.38e+165)
(-
(+ 3.0 (* (* r_m (+ 0.375 (* v -0.25))) (/ (* r_m (* w w)) (+ v -1.0))))
4.5)
(+ -1.5 (* r_m (* (* r_m w) (* -0.25 w)))))))r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 1.4e-10) {
tmp = ((2.0 / r_m) / r_m) + (-1.5 + ((r_m * w) * ((r_m * w) * -0.375)));
} else if (r_m <= 1.38e+165) {
tmp = (3.0 + ((r_m * (0.375 + (v * -0.25))) * ((r_m * (w * w)) / (v + -1.0)))) - 4.5;
} else {
tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w)));
}
return tmp;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
real(8) :: tmp
if (r_m <= 1.4d-10) then
tmp = ((2.0d0 / r_m) / r_m) + ((-1.5d0) + ((r_m * w) * ((r_m * w) * (-0.375d0))))
else if (r_m <= 1.38d+165) then
tmp = (3.0d0 + ((r_m * (0.375d0 + (v * (-0.25d0)))) * ((r_m * (w * w)) / (v + (-1.0d0))))) - 4.5d0
else
tmp = (-1.5d0) + (r_m * ((r_m * w) * ((-0.25d0) * w)))
end if
code = tmp
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 1.4e-10) {
tmp = ((2.0 / r_m) / r_m) + (-1.5 + ((r_m * w) * ((r_m * w) * -0.375)));
} else if (r_m <= 1.38e+165) {
tmp = (3.0 + ((r_m * (0.375 + (v * -0.25))) * ((r_m * (w * w)) / (v + -1.0)))) - 4.5;
} else {
tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w)));
}
return tmp;
}
r_m = math.fabs(r) def code(v, w, r_m): tmp = 0 if r_m <= 1.4e-10: tmp = ((2.0 / r_m) / r_m) + (-1.5 + ((r_m * w) * ((r_m * w) * -0.375))) elif r_m <= 1.38e+165: tmp = (3.0 + ((r_m * (0.375 + (v * -0.25))) * ((r_m * (w * w)) / (v + -1.0)))) - 4.5 else: tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w))) return tmp
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 1.4e-10) tmp = Float64(Float64(Float64(2.0 / r_m) / r_m) + Float64(-1.5 + Float64(Float64(r_m * w) * Float64(Float64(r_m * w) * -0.375)))); elseif (r_m <= 1.38e+165) tmp = Float64(Float64(3.0 + Float64(Float64(r_m * Float64(0.375 + Float64(v * -0.25))) * Float64(Float64(r_m * Float64(w * w)) / Float64(v + -1.0)))) - 4.5); else tmp = Float64(-1.5 + Float64(r_m * Float64(Float64(r_m * w) * Float64(-0.25 * w)))); end return tmp end
r_m = abs(r); function tmp_2 = code(v, w, r_m) tmp = 0.0; if (r_m <= 1.4e-10) tmp = ((2.0 / r_m) / r_m) + (-1.5 + ((r_m * w) * ((r_m * w) * -0.375))); elseif (r_m <= 1.38e+165) tmp = (3.0 + ((r_m * (0.375 + (v * -0.25))) * ((r_m * (w * w)) / (v + -1.0)))) - 4.5; else tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w))); end tmp_2 = tmp; end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 1.4e-10], N[(N[(N[(2.0 / r$95$m), $MachinePrecision] / r$95$m), $MachinePrecision] + N[(-1.5 + N[(N[(r$95$m * w), $MachinePrecision] * N[(N[(r$95$m * w), $MachinePrecision] * -0.375), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[r$95$m, 1.38e+165], N[(N[(3.0 + N[(N[(r$95$m * N[(0.375 + N[(v * -0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(r$95$m * N[(w * w), $MachinePrecision]), $MachinePrecision] / N[(v + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision], N[(-1.5 + N[(r$95$m * N[(N[(r$95$m * w), $MachinePrecision] * N[(-0.25 * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 1.4 \cdot 10^{-10}:\\
\;\;\;\;\frac{\frac{2}{r\_m}}{r\_m} + \left(-1.5 + \left(r\_m \cdot w\right) \cdot \left(\left(r\_m \cdot w\right) \cdot -0.375\right)\right)\\
\mathbf{elif}\;r\_m \leq 1.38 \cdot 10^{+165}:\\
\;\;\;\;\left(3 + \left(r\_m \cdot \left(0.375 + v \cdot -0.25\right)\right) \cdot \frac{r\_m \cdot \left(w \cdot w\right)}{v + -1}\right) - 4.5\\
\mathbf{else}:\\
\;\;\;\;-1.5 + r\_m \cdot \left(\left(r\_m \cdot w\right) \cdot \left(-0.25 \cdot w\right)\right)\\
\end{array}
\end{array}
if r < 1.40000000000000008e-10Initial program 86.0%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.1%
Taylor expanded in v around 0
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6481.5%
Simplified81.5%
associate-*r*N/A
swap-sqrN/A
*-lowering-*.f64N/A
associate-*l*N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f6492.0%
Applied egg-rr92.0%
+-lowering-+.f64N/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
+-lowering-+.f64N/A
associate-*r*N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6494.1%
Applied egg-rr94.1%
if 1.40000000000000008e-10 < r < 1.37999999999999997e165Initial program 99.8%
Taylor expanded in r around inf
Simplified99.8%
associate-/l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
associate-*l*N/A
*-commutativeN/A
swap-sqrN/A
/-lowering-/.f64N/A
associate-*l*N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
--lowering--.f64N/A
sub-negN/A
distribute-lft-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
Applied egg-rr99.8%
*-commutativeN/A
associate-/l*N/A
associate-*r*N/A
*-commutativeN/A
associate-*l*N/A
metadata-evalN/A
*-lowering-*.f64N/A
*-commutativeN/A
metadata-evalN/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
associate-*l*N/A
metadata-evalN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
Applied egg-rr94.2%
if 1.37999999999999997e165 < r Initial program 82.8%
Taylor expanded in r around inf
Simplified82.8%
Taylor expanded in v around inf
mul-1-negN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6470.3%
Simplified70.3%
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
associate-*l*N/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6493.2%
Applied egg-rr93.2%
Final simplification94.0%
r_m = (fabs.f64 r)
(FPCore (v w r_m)
:precision binary64
(if (<= r_m 1.15e-100)
(/ (/ 2.0 r_m) r_m)
(if (<= r_m 1.6e+100)
(+ (/ 2.0 (* r_m r_m)) (+ -1.5 (* (* r_m r_m) (* -0.375 (* w w)))))
(+ -1.5 (* r_m (* (* r_m w) (* -0.25 w)))))))r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 1.15e-100) {
tmp = (2.0 / r_m) / r_m;
} else if (r_m <= 1.6e+100) {
tmp = (2.0 / (r_m * r_m)) + (-1.5 + ((r_m * r_m) * (-0.375 * (w * w))));
} else {
tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w)));
}
return tmp;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
real(8) :: tmp
if (r_m <= 1.15d-100) then
tmp = (2.0d0 / r_m) / r_m
else if (r_m <= 1.6d+100) then
tmp = (2.0d0 / (r_m * r_m)) + ((-1.5d0) + ((r_m * r_m) * ((-0.375d0) * (w * w))))
else
tmp = (-1.5d0) + (r_m * ((r_m * w) * ((-0.25d0) * w)))
end if
code = tmp
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 1.15e-100) {
tmp = (2.0 / r_m) / r_m;
} else if (r_m <= 1.6e+100) {
tmp = (2.0 / (r_m * r_m)) + (-1.5 + ((r_m * r_m) * (-0.375 * (w * w))));
} else {
tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w)));
}
return tmp;
}
r_m = math.fabs(r) def code(v, w, r_m): tmp = 0 if r_m <= 1.15e-100: tmp = (2.0 / r_m) / r_m elif r_m <= 1.6e+100: tmp = (2.0 / (r_m * r_m)) + (-1.5 + ((r_m * r_m) * (-0.375 * (w * w)))) else: tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w))) return tmp
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 1.15e-100) tmp = Float64(Float64(2.0 / r_m) / r_m); elseif (r_m <= 1.6e+100) tmp = Float64(Float64(2.0 / Float64(r_m * r_m)) + Float64(-1.5 + Float64(Float64(r_m * r_m) * Float64(-0.375 * Float64(w * w))))); else tmp = Float64(-1.5 + Float64(r_m * Float64(Float64(r_m * w) * Float64(-0.25 * w)))); end return tmp end
r_m = abs(r); function tmp_2 = code(v, w, r_m) tmp = 0.0; if (r_m <= 1.15e-100) tmp = (2.0 / r_m) / r_m; elseif (r_m <= 1.6e+100) tmp = (2.0 / (r_m * r_m)) + (-1.5 + ((r_m * r_m) * (-0.375 * (w * w)))); else tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w))); end tmp_2 = tmp; end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 1.15e-100], N[(N[(2.0 / r$95$m), $MachinePrecision] / r$95$m), $MachinePrecision], If[LessEqual[r$95$m, 1.6e+100], N[(N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision] + N[(-1.5 + N[(N[(r$95$m * r$95$m), $MachinePrecision] * N[(-0.375 * N[(w * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(-1.5 + N[(r$95$m * N[(N[(r$95$m * w), $MachinePrecision] * N[(-0.25 * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 1.15 \cdot 10^{-100}:\\
\;\;\;\;\frac{\frac{2}{r\_m}}{r\_m}\\
\mathbf{elif}\;r\_m \leq 1.6 \cdot 10^{+100}:\\
\;\;\;\;\frac{2}{r\_m \cdot r\_m} + \left(-1.5 + \left(r\_m \cdot r\_m\right) \cdot \left(-0.375 \cdot \left(w \cdot w\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;-1.5 + r\_m \cdot \left(\left(r\_m \cdot w\right) \cdot \left(-0.25 \cdot w\right)\right)\\
\end{array}
\end{array}
if r < 1.14999999999999997e-100Initial program 85.6%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.0%
Taylor expanded in r around 0
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6457.6%
Simplified57.6%
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6457.7%
Applied egg-rr57.7%
if 1.14999999999999997e-100 < r < 1.5999999999999999e100Initial program 96.9%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified99.6%
Taylor expanded in v around 0
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6490.9%
Simplified90.9%
if 1.5999999999999999e100 < r Initial program 87.6%
Taylor expanded in r around inf
Simplified87.6%
Taylor expanded in v around inf
mul-1-negN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6474.8%
Simplified74.8%
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
associate-*l*N/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6491.9%
Applied egg-rr91.9%
Final simplification68.8%
r_m = (fabs.f64 r)
(FPCore (v w r_m)
:precision binary64
(if (<= r_m 9.2e-11)
(+ (/ (/ 2.0 r_m) r_m) (+ -1.5 (* (* r_m w) (* (* r_m w) -0.375))))
(-
(+ 3.0 (* (+ 0.375 (* v -0.25)) (/ (* (* r_m w) (* r_m w)) (+ v -1.0))))
4.5)))r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 9.2e-11) {
tmp = ((2.0 / r_m) / r_m) + (-1.5 + ((r_m * w) * ((r_m * w) * -0.375)));
} else {
tmp = (3.0 + ((0.375 + (v * -0.25)) * (((r_m * w) * (r_m * w)) / (v + -1.0)))) - 4.5;
}
return tmp;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
real(8) :: tmp
if (r_m <= 9.2d-11) then
tmp = ((2.0d0 / r_m) / r_m) + ((-1.5d0) + ((r_m * w) * ((r_m * w) * (-0.375d0))))
else
tmp = (3.0d0 + ((0.375d0 + (v * (-0.25d0))) * (((r_m * w) * (r_m * w)) / (v + (-1.0d0))))) - 4.5d0
end if
code = tmp
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 9.2e-11) {
tmp = ((2.0 / r_m) / r_m) + (-1.5 + ((r_m * w) * ((r_m * w) * -0.375)));
} else {
tmp = (3.0 + ((0.375 + (v * -0.25)) * (((r_m * w) * (r_m * w)) / (v + -1.0)))) - 4.5;
}
return tmp;
}
r_m = math.fabs(r) def code(v, w, r_m): tmp = 0 if r_m <= 9.2e-11: tmp = ((2.0 / r_m) / r_m) + (-1.5 + ((r_m * w) * ((r_m * w) * -0.375))) else: tmp = (3.0 + ((0.375 + (v * -0.25)) * (((r_m * w) * (r_m * w)) / (v + -1.0)))) - 4.5 return tmp
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 9.2e-11) tmp = Float64(Float64(Float64(2.0 / r_m) / r_m) + Float64(-1.5 + Float64(Float64(r_m * w) * Float64(Float64(r_m * w) * -0.375)))); else tmp = Float64(Float64(3.0 + Float64(Float64(0.375 + Float64(v * -0.25)) * Float64(Float64(Float64(r_m * w) * Float64(r_m * w)) / Float64(v + -1.0)))) - 4.5); end return tmp end
r_m = abs(r); function tmp_2 = code(v, w, r_m) tmp = 0.0; if (r_m <= 9.2e-11) tmp = ((2.0 / r_m) / r_m) + (-1.5 + ((r_m * w) * ((r_m * w) * -0.375))); else tmp = (3.0 + ((0.375 + (v * -0.25)) * (((r_m * w) * (r_m * w)) / (v + -1.0)))) - 4.5; end tmp_2 = tmp; end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 9.2e-11], N[(N[(N[(2.0 / r$95$m), $MachinePrecision] / r$95$m), $MachinePrecision] + N[(-1.5 + N[(N[(r$95$m * w), $MachinePrecision] * N[(N[(r$95$m * w), $MachinePrecision] * -0.375), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(3.0 + N[(N[(0.375 + N[(v * -0.25), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(r$95$m * w), $MachinePrecision] * N[(r$95$m * w), $MachinePrecision]), $MachinePrecision] / N[(v + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 9.2 \cdot 10^{-11}:\\
\;\;\;\;\frac{\frac{2}{r\_m}}{r\_m} + \left(-1.5 + \left(r\_m \cdot w\right) \cdot \left(\left(r\_m \cdot w\right) \cdot -0.375\right)\right)\\
\mathbf{else}:\\
\;\;\;\;\left(3 + \left(0.375 + v \cdot -0.25\right) \cdot \frac{\left(r\_m \cdot w\right) \cdot \left(r\_m \cdot w\right)}{v + -1}\right) - 4.5\\
\end{array}
\end{array}
if r < 9.20000000000000054e-11Initial program 86.0%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.1%
Taylor expanded in v around 0
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6481.5%
Simplified81.5%
associate-*r*N/A
swap-sqrN/A
*-lowering-*.f64N/A
associate-*l*N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f6492.0%
Applied egg-rr92.0%
+-lowering-+.f64N/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
+-lowering-+.f64N/A
associate-*r*N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6494.1%
Applied egg-rr94.1%
if 9.20000000000000054e-11 < r Initial program 91.3%
Taylor expanded in r around inf
Simplified91.3%
associate-/l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
associate-*l*N/A
*-commutativeN/A
swap-sqrN/A
/-lowering-/.f64N/A
associate-*l*N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
--lowering--.f64N/A
sub-negN/A
distribute-lft-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
Applied egg-rr99.8%
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.8%
Applied egg-rr99.8%
*-commutativeN/A
associate-*l*N/A
metadata-evalN/A
metadata-evalN/A
*-lowering-*.f64N/A
metadata-eval99.8%
Applied egg-rr99.8%
Final simplification95.7%
r_m = (fabs.f64 r)
(FPCore (v w r_m)
:precision binary64
(if (<= r_m 1.4e-10)
(+ (/ (/ 2.0 r_m) r_m) (+ -1.5 (* (* r_m w) (* (* r_m w) -0.375))))
(-
(+ 3.0 (* (+ 0.375 (* v -0.25)) (/ (* r_m (* w (* r_m w))) (+ v -1.0))))
4.5)))r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 1.4e-10) {
tmp = ((2.0 / r_m) / r_m) + (-1.5 + ((r_m * w) * ((r_m * w) * -0.375)));
} else {
tmp = (3.0 + ((0.375 + (v * -0.25)) * ((r_m * (w * (r_m * w))) / (v + -1.0)))) - 4.5;
}
return tmp;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
real(8) :: tmp
if (r_m <= 1.4d-10) then
tmp = ((2.0d0 / r_m) / r_m) + ((-1.5d0) + ((r_m * w) * ((r_m * w) * (-0.375d0))))
else
tmp = (3.0d0 + ((0.375d0 + (v * (-0.25d0))) * ((r_m * (w * (r_m * w))) / (v + (-1.0d0))))) - 4.5d0
end if
code = tmp
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 1.4e-10) {
tmp = ((2.0 / r_m) / r_m) + (-1.5 + ((r_m * w) * ((r_m * w) * -0.375)));
} else {
tmp = (3.0 + ((0.375 + (v * -0.25)) * ((r_m * (w * (r_m * w))) / (v + -1.0)))) - 4.5;
}
return tmp;
}
r_m = math.fabs(r) def code(v, w, r_m): tmp = 0 if r_m <= 1.4e-10: tmp = ((2.0 / r_m) / r_m) + (-1.5 + ((r_m * w) * ((r_m * w) * -0.375))) else: tmp = (3.0 + ((0.375 + (v * -0.25)) * ((r_m * (w * (r_m * w))) / (v + -1.0)))) - 4.5 return tmp
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 1.4e-10) tmp = Float64(Float64(Float64(2.0 / r_m) / r_m) + Float64(-1.5 + Float64(Float64(r_m * w) * Float64(Float64(r_m * w) * -0.375)))); else tmp = Float64(Float64(3.0 + Float64(Float64(0.375 + Float64(v * -0.25)) * Float64(Float64(r_m * Float64(w * Float64(r_m * w))) / Float64(v + -1.0)))) - 4.5); end return tmp end
r_m = abs(r); function tmp_2 = code(v, w, r_m) tmp = 0.0; if (r_m <= 1.4e-10) tmp = ((2.0 / r_m) / r_m) + (-1.5 + ((r_m * w) * ((r_m * w) * -0.375))); else tmp = (3.0 + ((0.375 + (v * -0.25)) * ((r_m * (w * (r_m * w))) / (v + -1.0)))) - 4.5; end tmp_2 = tmp; end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 1.4e-10], N[(N[(N[(2.0 / r$95$m), $MachinePrecision] / r$95$m), $MachinePrecision] + N[(-1.5 + N[(N[(r$95$m * w), $MachinePrecision] * N[(N[(r$95$m * w), $MachinePrecision] * -0.375), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(3.0 + N[(N[(0.375 + N[(v * -0.25), $MachinePrecision]), $MachinePrecision] * N[(N[(r$95$m * N[(w * N[(r$95$m * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(v + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 1.4 \cdot 10^{-10}:\\
\;\;\;\;\frac{\frac{2}{r\_m}}{r\_m} + \left(-1.5 + \left(r\_m \cdot w\right) \cdot \left(\left(r\_m \cdot w\right) \cdot -0.375\right)\right)\\
\mathbf{else}:\\
\;\;\;\;\left(3 + \left(0.375 + v \cdot -0.25\right) \cdot \frac{r\_m \cdot \left(w \cdot \left(r\_m \cdot w\right)\right)}{v + -1}\right) - 4.5\\
\end{array}
\end{array}
if r < 1.40000000000000008e-10Initial program 86.0%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.1%
Taylor expanded in v around 0
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6481.5%
Simplified81.5%
associate-*r*N/A
swap-sqrN/A
*-lowering-*.f64N/A
associate-*l*N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f6492.0%
Applied egg-rr92.0%
+-lowering-+.f64N/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
+-lowering-+.f64N/A
associate-*r*N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6494.1%
Applied egg-rr94.1%
if 1.40000000000000008e-10 < r Initial program 91.3%
Taylor expanded in r around inf
Simplified91.3%
associate-/l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
associate-*l*N/A
*-commutativeN/A
swap-sqrN/A
/-lowering-/.f64N/A
associate-*l*N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
--lowering--.f64N/A
sub-negN/A
distribute-lft-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
Applied egg-rr99.8%
*-commutativeN/A
associate-*l*N/A
metadata-evalN/A
*-lowering-*.f6499.8%
Applied egg-rr99.8%
Final simplification95.7%
r_m = (fabs.f64 r)
(FPCore (v w r_m)
:precision binary64
(let* ((t_0 (/ (/ 2.0 r_m) r_m)))
(if (<= r_m 5e-101)
t_0
(if (<= r_m 5600000.0)
(+ t_0 (* (* r_m r_m) (* -0.375 (* w w))))
(+ -1.5 (* r_m (* (* r_m w) (* -0.25 w))))))))r_m = fabs(r);
double code(double v, double w, double r_m) {
double t_0 = (2.0 / r_m) / r_m;
double tmp;
if (r_m <= 5e-101) {
tmp = t_0;
} else if (r_m <= 5600000.0) {
tmp = t_0 + ((r_m * r_m) * (-0.375 * (w * w)));
} else {
tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w)));
}
return tmp;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
real(8) :: t_0
real(8) :: tmp
t_0 = (2.0d0 / r_m) / r_m
if (r_m <= 5d-101) then
tmp = t_0
else if (r_m <= 5600000.0d0) then
tmp = t_0 + ((r_m * r_m) * ((-0.375d0) * (w * w)))
else
tmp = (-1.5d0) + (r_m * ((r_m * w) * ((-0.25d0) * w)))
end if
code = tmp
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
double t_0 = (2.0 / r_m) / r_m;
double tmp;
if (r_m <= 5e-101) {
tmp = t_0;
} else if (r_m <= 5600000.0) {
tmp = t_0 + ((r_m * r_m) * (-0.375 * (w * w)));
} else {
tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w)));
}
return tmp;
}
r_m = math.fabs(r) def code(v, w, r_m): t_0 = (2.0 / r_m) / r_m tmp = 0 if r_m <= 5e-101: tmp = t_0 elif r_m <= 5600000.0: tmp = t_0 + ((r_m * r_m) * (-0.375 * (w * w))) else: tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w))) return tmp
r_m = abs(r) function code(v, w, r_m) t_0 = Float64(Float64(2.0 / r_m) / r_m) tmp = 0.0 if (r_m <= 5e-101) tmp = t_0; elseif (r_m <= 5600000.0) tmp = Float64(t_0 + Float64(Float64(r_m * r_m) * Float64(-0.375 * Float64(w * w)))); else tmp = Float64(-1.5 + Float64(r_m * Float64(Float64(r_m * w) * Float64(-0.25 * w)))); end return tmp end
r_m = abs(r); function tmp_2 = code(v, w, r_m) t_0 = (2.0 / r_m) / r_m; tmp = 0.0; if (r_m <= 5e-101) tmp = t_0; elseif (r_m <= 5600000.0) tmp = t_0 + ((r_m * r_m) * (-0.375 * (w * w))); else tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w))); end tmp_2 = tmp; end
r_m = N[Abs[r], $MachinePrecision]
code[v_, w_, r$95$m_] := Block[{t$95$0 = N[(N[(2.0 / r$95$m), $MachinePrecision] / r$95$m), $MachinePrecision]}, If[LessEqual[r$95$m, 5e-101], t$95$0, If[LessEqual[r$95$m, 5600000.0], N[(t$95$0 + N[(N[(r$95$m * r$95$m), $MachinePrecision] * N[(-0.375 * N[(w * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(-1.5 + N[(r$95$m * N[(N[(r$95$m * w), $MachinePrecision] * N[(-0.25 * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
t_0 := \frac{\frac{2}{r\_m}}{r\_m}\\
\mathbf{if}\;r\_m \leq 5 \cdot 10^{-101}:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;r\_m \leq 5600000:\\
\;\;\;\;t\_0 + \left(r\_m \cdot r\_m\right) \cdot \left(-0.375 \cdot \left(w \cdot w\right)\right)\\
\mathbf{else}:\\
\;\;\;\;-1.5 + r\_m \cdot \left(\left(r\_m \cdot w\right) \cdot \left(-0.25 \cdot w\right)\right)\\
\end{array}
\end{array}
if r < 5.0000000000000001e-101Initial program 85.6%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.0%
Taylor expanded in r around 0
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6457.6%
Simplified57.6%
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6457.7%
Applied egg-rr57.7%
if 5.0000000000000001e-101 < r < 5.6e6Initial program 91.7%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified99.5%
Taylor expanded in v around 0
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6492.9%
Simplified92.9%
associate-*r*N/A
swap-sqrN/A
*-lowering-*.f64N/A
associate-*l*N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f6492.9%
Applied egg-rr92.9%
+-lowering-+.f64N/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
+-lowering-+.f64N/A
associate-*r*N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6492.6%
Applied egg-rr92.6%
Taylor expanded in r around inf
*-commutativeN/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6492.6%
Simplified92.6%
if 5.6e6 < r Initial program 91.3%
Taylor expanded in r around inf
Simplified91.3%
Taylor expanded in v around inf
mul-1-negN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6475.6%
Simplified75.6%
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
associate-*l*N/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6487.4%
Applied egg-rr87.4%
Final simplification67.7%
r_m = (fabs.f64 r)
(FPCore (v w r_m)
:precision binary64
(if (<= r_m 7.8e-101)
(/ (/ 2.0 r_m) r_m)
(if (<= r_m 5600000.0)
(+ (/ 2.0 (* r_m r_m)) (* (* r_m r_m) (* -0.375 (* w w))))
(+ -1.5 (* r_m (* (* r_m w) (* -0.25 w)))))))r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 7.8e-101) {
tmp = (2.0 / r_m) / r_m;
} else if (r_m <= 5600000.0) {
tmp = (2.0 / (r_m * r_m)) + ((r_m * r_m) * (-0.375 * (w * w)));
} else {
tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w)));
}
return tmp;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
real(8) :: tmp
if (r_m <= 7.8d-101) then
tmp = (2.0d0 / r_m) / r_m
else if (r_m <= 5600000.0d0) then
tmp = (2.0d0 / (r_m * r_m)) + ((r_m * r_m) * ((-0.375d0) * (w * w)))
else
tmp = (-1.5d0) + (r_m * ((r_m * w) * ((-0.25d0) * w)))
end if
code = tmp
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 7.8e-101) {
tmp = (2.0 / r_m) / r_m;
} else if (r_m <= 5600000.0) {
tmp = (2.0 / (r_m * r_m)) + ((r_m * r_m) * (-0.375 * (w * w)));
} else {
tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w)));
}
return tmp;
}
r_m = math.fabs(r) def code(v, w, r_m): tmp = 0 if r_m <= 7.8e-101: tmp = (2.0 / r_m) / r_m elif r_m <= 5600000.0: tmp = (2.0 / (r_m * r_m)) + ((r_m * r_m) * (-0.375 * (w * w))) else: tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w))) return tmp
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 7.8e-101) tmp = Float64(Float64(2.0 / r_m) / r_m); elseif (r_m <= 5600000.0) tmp = Float64(Float64(2.0 / Float64(r_m * r_m)) + Float64(Float64(r_m * r_m) * Float64(-0.375 * Float64(w * w)))); else tmp = Float64(-1.5 + Float64(r_m * Float64(Float64(r_m * w) * Float64(-0.25 * w)))); end return tmp end
r_m = abs(r); function tmp_2 = code(v, w, r_m) tmp = 0.0; if (r_m <= 7.8e-101) tmp = (2.0 / r_m) / r_m; elseif (r_m <= 5600000.0) tmp = (2.0 / (r_m * r_m)) + ((r_m * r_m) * (-0.375 * (w * w))); else tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w))); end tmp_2 = tmp; end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 7.8e-101], N[(N[(2.0 / r$95$m), $MachinePrecision] / r$95$m), $MachinePrecision], If[LessEqual[r$95$m, 5600000.0], N[(N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision] + N[(N[(r$95$m * r$95$m), $MachinePrecision] * N[(-0.375 * N[(w * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(-1.5 + N[(r$95$m * N[(N[(r$95$m * w), $MachinePrecision] * N[(-0.25 * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 7.8 \cdot 10^{-101}:\\
\;\;\;\;\frac{\frac{2}{r\_m}}{r\_m}\\
\mathbf{elif}\;r\_m \leq 5600000:\\
\;\;\;\;\frac{2}{r\_m \cdot r\_m} + \left(r\_m \cdot r\_m\right) \cdot \left(-0.375 \cdot \left(w \cdot w\right)\right)\\
\mathbf{else}:\\
\;\;\;\;-1.5 + r\_m \cdot \left(\left(r\_m \cdot w\right) \cdot \left(-0.25 \cdot w\right)\right)\\
\end{array}
\end{array}
if r < 7.80000000000000031e-101Initial program 85.6%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.0%
Taylor expanded in r around 0
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6457.6%
Simplified57.6%
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6457.7%
Applied egg-rr57.7%
if 7.80000000000000031e-101 < r < 5.6e6Initial program 91.7%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified99.5%
Taylor expanded in v around 0
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6492.9%
Simplified92.9%
Taylor expanded in r around inf
*-commutativeN/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6492.9%
Simplified92.9%
if 5.6e6 < r Initial program 91.3%
Taylor expanded in r around inf
Simplified91.3%
Taylor expanded in v around inf
mul-1-negN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6475.6%
Simplified75.6%
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
associate-*l*N/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6487.4%
Applied egg-rr87.4%
Final simplification67.7%
r_m = (fabs.f64 r) (FPCore (v w r_m) :precision binary64 (+ (/ 2.0 (* r_m r_m)) (+ -1.5 (* (+ 0.375 (* v -0.25)) (/ (* (* r_m w) (* r_m w)) (+ v -1.0))))))
r_m = fabs(r);
double code(double v, double w, double r_m) {
return (2.0 / (r_m * r_m)) + (-1.5 + ((0.375 + (v * -0.25)) * (((r_m * w) * (r_m * w)) / (v + -1.0))));
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
code = (2.0d0 / (r_m * r_m)) + ((-1.5d0) + ((0.375d0 + (v * (-0.25d0))) * (((r_m * w) * (r_m * w)) / (v + (-1.0d0)))))
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
return (2.0 / (r_m * r_m)) + (-1.5 + ((0.375 + (v * -0.25)) * (((r_m * w) * (r_m * w)) / (v + -1.0))));
}
r_m = math.fabs(r) def code(v, w, r_m): return (2.0 / (r_m * r_m)) + (-1.5 + ((0.375 + (v * -0.25)) * (((r_m * w) * (r_m * w)) / (v + -1.0))))
r_m = abs(r) function code(v, w, r_m) return Float64(Float64(2.0 / Float64(r_m * r_m)) + Float64(-1.5 + Float64(Float64(0.375 + Float64(v * -0.25)) * Float64(Float64(Float64(r_m * w) * Float64(r_m * w)) / Float64(v + -1.0))))) end
r_m = abs(r); function tmp = code(v, w, r_m) tmp = (2.0 / (r_m * r_m)) + (-1.5 + ((0.375 + (v * -0.25)) * (((r_m * w) * (r_m * w)) / (v + -1.0)))); end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := N[(N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision] + N[(-1.5 + N[(N[(0.375 + N[(v * -0.25), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(r$95$m * w), $MachinePrecision] * N[(r$95$m * w), $MachinePrecision]), $MachinePrecision] / N[(v + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
r_m = \left|r\right|
\\
\frac{2}{r\_m \cdot r\_m} + \left(-1.5 + \left(0.375 + v \cdot -0.25\right) \cdot \frac{\left(r\_m \cdot w\right) \cdot \left(r\_m \cdot w\right)}{v + -1}\right)
\end{array}
Initial program 87.5%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.9%
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.7%
Applied egg-rr99.7%
Final simplification99.7%
r_m = (fabs.f64 r) (FPCore (v w r_m) :precision binary64 (+ (/ 2.0 (* r_m r_m)) (+ -1.5 (* (+ 0.375 (* v -0.25)) (/ (* r_m (* w (* r_m w))) (+ v -1.0))))))
r_m = fabs(r);
double code(double v, double w, double r_m) {
return (2.0 / (r_m * r_m)) + (-1.5 + ((0.375 + (v * -0.25)) * ((r_m * (w * (r_m * w))) / (v + -1.0))));
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
code = (2.0d0 / (r_m * r_m)) + ((-1.5d0) + ((0.375d0 + (v * (-0.25d0))) * ((r_m * (w * (r_m * w))) / (v + (-1.0d0)))))
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
return (2.0 / (r_m * r_m)) + (-1.5 + ((0.375 + (v * -0.25)) * ((r_m * (w * (r_m * w))) / (v + -1.0))));
}
r_m = math.fabs(r) def code(v, w, r_m): return (2.0 / (r_m * r_m)) + (-1.5 + ((0.375 + (v * -0.25)) * ((r_m * (w * (r_m * w))) / (v + -1.0))))
r_m = abs(r) function code(v, w, r_m) return Float64(Float64(2.0 / Float64(r_m * r_m)) + Float64(-1.5 + Float64(Float64(0.375 + Float64(v * -0.25)) * Float64(Float64(r_m * Float64(w * Float64(r_m * w))) / Float64(v + -1.0))))) end
r_m = abs(r); function tmp = code(v, w, r_m) tmp = (2.0 / (r_m * r_m)) + (-1.5 + ((0.375 + (v * -0.25)) * ((r_m * (w * (r_m * w))) / (v + -1.0)))); end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := N[(N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision] + N[(-1.5 + N[(N[(0.375 + N[(v * -0.25), $MachinePrecision]), $MachinePrecision] * N[(N[(r$95$m * N[(w * N[(r$95$m * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(v + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
r_m = \left|r\right|
\\
\frac{2}{r\_m \cdot r\_m} + \left(-1.5 + \left(0.375 + v \cdot -0.25\right) \cdot \frac{r\_m \cdot \left(w \cdot \left(r\_m \cdot w\right)\right)}{v + -1}\right)
\end{array}
Initial program 87.5%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.9%
Final simplification97.9%
r_m = (fabs.f64 r) (FPCore (v w r_m) :precision binary64 (if (<= r_m 5.7e+163) (+ (/ 2.0 (* r_m r_m)) (+ -1.5 (* w (* -0.375 (* r_m (* r_m w)))))) (+ -1.5 (* r_m (* (* r_m w) (* -0.25 w))))))
r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 5.7e+163) {
tmp = (2.0 / (r_m * r_m)) + (-1.5 + (w * (-0.375 * (r_m * (r_m * w)))));
} else {
tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w)));
}
return tmp;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
real(8) :: tmp
if (r_m <= 5.7d+163) then
tmp = (2.0d0 / (r_m * r_m)) + ((-1.5d0) + (w * ((-0.375d0) * (r_m * (r_m * w)))))
else
tmp = (-1.5d0) + (r_m * ((r_m * w) * ((-0.25d0) * w)))
end if
code = tmp
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 5.7e+163) {
tmp = (2.0 / (r_m * r_m)) + (-1.5 + (w * (-0.375 * (r_m * (r_m * w)))));
} else {
tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w)));
}
return tmp;
}
r_m = math.fabs(r) def code(v, w, r_m): tmp = 0 if r_m <= 5.7e+163: tmp = (2.0 / (r_m * r_m)) + (-1.5 + (w * (-0.375 * (r_m * (r_m * w))))) else: tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w))) return tmp
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 5.7e+163) tmp = Float64(Float64(2.0 / Float64(r_m * r_m)) + Float64(-1.5 + Float64(w * Float64(-0.375 * Float64(r_m * Float64(r_m * w)))))); else tmp = Float64(-1.5 + Float64(r_m * Float64(Float64(r_m * w) * Float64(-0.25 * w)))); end return tmp end
r_m = abs(r); function tmp_2 = code(v, w, r_m) tmp = 0.0; if (r_m <= 5.7e+163) tmp = (2.0 / (r_m * r_m)) + (-1.5 + (w * (-0.375 * (r_m * (r_m * w))))); else tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w))); end tmp_2 = tmp; end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 5.7e+163], N[(N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision] + N[(-1.5 + N[(w * N[(-0.375 * N[(r$95$m * N[(r$95$m * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(-1.5 + N[(r$95$m * N[(N[(r$95$m * w), $MachinePrecision] * N[(-0.25 * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 5.7 \cdot 10^{+163}:\\
\;\;\;\;\frac{2}{r\_m \cdot r\_m} + \left(-1.5 + w \cdot \left(-0.375 \cdot \left(r\_m \cdot \left(r\_m \cdot w\right)\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;-1.5 + r\_m \cdot \left(\left(r\_m \cdot w\right) \cdot \left(-0.25 \cdot w\right)\right)\\
\end{array}
\end{array}
if r < 5.6999999999999998e163Initial program 88.2%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.6%
Taylor expanded in v around 0
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6482.6%
Simplified82.6%
associate-*r*N/A
swap-sqrN/A
associate-*r*N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6493.6%
Applied egg-rr93.6%
*-commutativeN/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6493.6%
Applied egg-rr93.6%
if 5.6999999999999998e163 < r Initial program 82.8%
Taylor expanded in r around inf
Simplified82.8%
Taylor expanded in v around inf
mul-1-negN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6470.3%
Simplified70.3%
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
associate-*l*N/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6493.2%
Applied egg-rr93.2%
Final simplification93.5%
r_m = (fabs.f64 r) (FPCore (v w r_m) :precision binary64 (if (<= r_m 7.2e+166) (+ (/ 2.0 (* r_m r_m)) (+ -1.5 (* (* r_m (* r_m w)) (* w -0.375)))) (+ -1.5 (* r_m (* (* r_m w) (* -0.25 w))))))
r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 7.2e+166) {
tmp = (2.0 / (r_m * r_m)) + (-1.5 + ((r_m * (r_m * w)) * (w * -0.375)));
} else {
tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w)));
}
return tmp;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
real(8) :: tmp
if (r_m <= 7.2d+166) then
tmp = (2.0d0 / (r_m * r_m)) + ((-1.5d0) + ((r_m * (r_m * w)) * (w * (-0.375d0))))
else
tmp = (-1.5d0) + (r_m * ((r_m * w) * ((-0.25d0) * w)))
end if
code = tmp
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 7.2e+166) {
tmp = (2.0 / (r_m * r_m)) + (-1.5 + ((r_m * (r_m * w)) * (w * -0.375)));
} else {
tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w)));
}
return tmp;
}
r_m = math.fabs(r) def code(v, w, r_m): tmp = 0 if r_m <= 7.2e+166: tmp = (2.0 / (r_m * r_m)) + (-1.5 + ((r_m * (r_m * w)) * (w * -0.375))) else: tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w))) return tmp
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 7.2e+166) tmp = Float64(Float64(2.0 / Float64(r_m * r_m)) + Float64(-1.5 + Float64(Float64(r_m * Float64(r_m * w)) * Float64(w * -0.375)))); else tmp = Float64(-1.5 + Float64(r_m * Float64(Float64(r_m * w) * Float64(-0.25 * w)))); end return tmp end
r_m = abs(r); function tmp_2 = code(v, w, r_m) tmp = 0.0; if (r_m <= 7.2e+166) tmp = (2.0 / (r_m * r_m)) + (-1.5 + ((r_m * (r_m * w)) * (w * -0.375))); else tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w))); end tmp_2 = tmp; end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 7.2e+166], N[(N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision] + N[(-1.5 + N[(N[(r$95$m * N[(r$95$m * w), $MachinePrecision]), $MachinePrecision] * N[(w * -0.375), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(-1.5 + N[(r$95$m * N[(N[(r$95$m * w), $MachinePrecision] * N[(-0.25 * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 7.2 \cdot 10^{+166}:\\
\;\;\;\;\frac{2}{r\_m \cdot r\_m} + \left(-1.5 + \left(r\_m \cdot \left(r\_m \cdot w\right)\right) \cdot \left(w \cdot -0.375\right)\right)\\
\mathbf{else}:\\
\;\;\;\;-1.5 + r\_m \cdot \left(\left(r\_m \cdot w\right) \cdot \left(-0.25 \cdot w\right)\right)\\
\end{array}
\end{array}
if r < 7.1999999999999994e166Initial program 88.2%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.6%
Taylor expanded in v around 0
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6482.6%
Simplified82.6%
associate-*r*N/A
swap-sqrN/A
associate-*r*N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6493.6%
Applied egg-rr93.6%
if 7.1999999999999994e166 < r Initial program 82.8%
Taylor expanded in r around inf
Simplified82.8%
Taylor expanded in v around inf
mul-1-negN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6470.3%
Simplified70.3%
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
associate-*l*N/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6493.2%
Applied egg-rr93.2%
Final simplification93.5%
r_m = (fabs.f64 r) (FPCore (v w r_m) :precision binary64 (+ (/ (/ 2.0 r_m) r_m) (+ -1.5 (* (* r_m w) (* (* r_m w) -0.375)))))
r_m = fabs(r);
double code(double v, double w, double r_m) {
return ((2.0 / r_m) / r_m) + (-1.5 + ((r_m * w) * ((r_m * w) * -0.375)));
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
code = ((2.0d0 / r_m) / r_m) + ((-1.5d0) + ((r_m * w) * ((r_m * w) * (-0.375d0))))
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
return ((2.0 / r_m) / r_m) + (-1.5 + ((r_m * w) * ((r_m * w) * -0.375)));
}
r_m = math.fabs(r) def code(v, w, r_m): return ((2.0 / r_m) / r_m) + (-1.5 + ((r_m * w) * ((r_m * w) * -0.375)))
r_m = abs(r) function code(v, w, r_m) return Float64(Float64(Float64(2.0 / r_m) / r_m) + Float64(-1.5 + Float64(Float64(r_m * w) * Float64(Float64(r_m * w) * -0.375)))) end
r_m = abs(r); function tmp = code(v, w, r_m) tmp = ((2.0 / r_m) / r_m) + (-1.5 + ((r_m * w) * ((r_m * w) * -0.375))); end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := N[(N[(N[(2.0 / r$95$m), $MachinePrecision] / r$95$m), $MachinePrecision] + N[(-1.5 + N[(N[(r$95$m * w), $MachinePrecision] * N[(N[(r$95$m * w), $MachinePrecision] * -0.375), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
r_m = \left|r\right|
\\
\frac{\frac{2}{r\_m}}{r\_m} + \left(-1.5 + \left(r\_m \cdot w\right) \cdot \left(\left(r\_m \cdot w\right) \cdot -0.375\right)\right)
\end{array}
Initial program 87.5%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.9%
Taylor expanded in v around 0
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6480.9%
Simplified80.9%
associate-*r*N/A
swap-sqrN/A
*-lowering-*.f64N/A
associate-*l*N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f6491.3%
Applied egg-rr91.3%
+-lowering-+.f64N/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
+-lowering-+.f64N/A
associate-*r*N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6492.8%
Applied egg-rr92.8%
r_m = (fabs.f64 r) (FPCore (v w r_m) :precision binary64 (if (<= r_m 1.3e-11) (/ (/ 2.0 r_m) r_m) (+ -1.5 (* r_m (* (* r_m w) (* -0.25 w))))))
r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 1.3e-11) {
tmp = (2.0 / r_m) / r_m;
} else {
tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w)));
}
return tmp;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
real(8) :: tmp
if (r_m <= 1.3d-11) then
tmp = (2.0d0 / r_m) / r_m
else
tmp = (-1.5d0) + (r_m * ((r_m * w) * ((-0.25d0) * w)))
end if
code = tmp
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 1.3e-11) {
tmp = (2.0 / r_m) / r_m;
} else {
tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w)));
}
return tmp;
}
r_m = math.fabs(r) def code(v, w, r_m): tmp = 0 if r_m <= 1.3e-11: tmp = (2.0 / r_m) / r_m else: tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w))) return tmp
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 1.3e-11) tmp = Float64(Float64(2.0 / r_m) / r_m); else tmp = Float64(-1.5 + Float64(r_m * Float64(Float64(r_m * w) * Float64(-0.25 * w)))); end return tmp end
r_m = abs(r); function tmp_2 = code(v, w, r_m) tmp = 0.0; if (r_m <= 1.3e-11) tmp = (2.0 / r_m) / r_m; else tmp = -1.5 + (r_m * ((r_m * w) * (-0.25 * w))); end tmp_2 = tmp; end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 1.3e-11], N[(N[(2.0 / r$95$m), $MachinePrecision] / r$95$m), $MachinePrecision], N[(-1.5 + N[(r$95$m * N[(N[(r$95$m * w), $MachinePrecision] * N[(-0.25 * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 1.3 \cdot 10^{-11}:\\
\;\;\;\;\frac{\frac{2}{r\_m}}{r\_m}\\
\mathbf{else}:\\
\;\;\;\;-1.5 + r\_m \cdot \left(\left(r\_m \cdot w\right) \cdot \left(-0.25 \cdot w\right)\right)\\
\end{array}
\end{array}
if r < 1.3e-11Initial program 86.0%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.1%
Taylor expanded in r around 0
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6459.2%
Simplified59.2%
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6459.3%
Applied egg-rr59.3%
if 1.3e-11 < r Initial program 91.3%
Taylor expanded in r around inf
Simplified91.3%
Taylor expanded in v around inf
mul-1-negN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6475.6%
Simplified75.6%
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
associate-*l*N/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6487.4%
Applied egg-rr87.4%
Final simplification67.2%
r_m = (fabs.f64 r) (FPCore (v w r_m) :precision binary64 (if (<= r_m 1.4e-10) (/ (/ 2.0 r_m) r_m) (+ -1.5 (* (* r_m w) (* (* r_m w) -0.375)))))
r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 1.4e-10) {
tmp = (2.0 / r_m) / r_m;
} else {
tmp = -1.5 + ((r_m * w) * ((r_m * w) * -0.375));
}
return tmp;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
real(8) :: tmp
if (r_m <= 1.4d-10) then
tmp = (2.0d0 / r_m) / r_m
else
tmp = (-1.5d0) + ((r_m * w) * ((r_m * w) * (-0.375d0)))
end if
code = tmp
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 1.4e-10) {
tmp = (2.0 / r_m) / r_m;
} else {
tmp = -1.5 + ((r_m * w) * ((r_m * w) * -0.375));
}
return tmp;
}
r_m = math.fabs(r) def code(v, w, r_m): tmp = 0 if r_m <= 1.4e-10: tmp = (2.0 / r_m) / r_m else: tmp = -1.5 + ((r_m * w) * ((r_m * w) * -0.375)) return tmp
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 1.4e-10) tmp = Float64(Float64(2.0 / r_m) / r_m); else tmp = Float64(-1.5 + Float64(Float64(r_m * w) * Float64(Float64(r_m * w) * -0.375))); end return tmp end
r_m = abs(r); function tmp_2 = code(v, w, r_m) tmp = 0.0; if (r_m <= 1.4e-10) tmp = (2.0 / r_m) / r_m; else tmp = -1.5 + ((r_m * w) * ((r_m * w) * -0.375)); end tmp_2 = tmp; end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 1.4e-10], N[(N[(2.0 / r$95$m), $MachinePrecision] / r$95$m), $MachinePrecision], N[(-1.5 + N[(N[(r$95$m * w), $MachinePrecision] * N[(N[(r$95$m * w), $MachinePrecision] * -0.375), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 1.4 \cdot 10^{-10}:\\
\;\;\;\;\frac{\frac{2}{r\_m}}{r\_m}\\
\mathbf{else}:\\
\;\;\;\;-1.5 + \left(r\_m \cdot w\right) \cdot \left(\left(r\_m \cdot w\right) \cdot -0.375\right)\\
\end{array}
\end{array}
if r < 1.40000000000000008e-10Initial program 86.0%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.1%
Taylor expanded in r around 0
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6459.2%
Simplified59.2%
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6459.3%
Applied egg-rr59.3%
if 1.40000000000000008e-10 < r Initial program 91.3%
Taylor expanded in r around inf
Simplified91.3%
Taylor expanded in v around 0
mul-1-negN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-commutativeN/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6479.4%
Simplified79.4%
associate-*r*N/A
swap-sqrN/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6489.5%
Applied egg-rr89.5%
Final simplification67.8%
r_m = (fabs.f64 r) (FPCore (v w r_m) :precision binary64 (if (<= r_m 9.8e-11) (/ (/ 2.0 r_m) r_m) (+ -1.5 (* -0.375 (* w (* r_m (* r_m w)))))))
r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 9.8e-11) {
tmp = (2.0 / r_m) / r_m;
} else {
tmp = -1.5 + (-0.375 * (w * (r_m * (r_m * w))));
}
return tmp;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
real(8) :: tmp
if (r_m <= 9.8d-11) then
tmp = (2.0d0 / r_m) / r_m
else
tmp = (-1.5d0) + ((-0.375d0) * (w * (r_m * (r_m * w))))
end if
code = tmp
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 9.8e-11) {
tmp = (2.0 / r_m) / r_m;
} else {
tmp = -1.5 + (-0.375 * (w * (r_m * (r_m * w))));
}
return tmp;
}
r_m = math.fabs(r) def code(v, w, r_m): tmp = 0 if r_m <= 9.8e-11: tmp = (2.0 / r_m) / r_m else: tmp = -1.5 + (-0.375 * (w * (r_m * (r_m * w)))) return tmp
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 9.8e-11) tmp = Float64(Float64(2.0 / r_m) / r_m); else tmp = Float64(-1.5 + Float64(-0.375 * Float64(w * Float64(r_m * Float64(r_m * w))))); end return tmp end
r_m = abs(r); function tmp_2 = code(v, w, r_m) tmp = 0.0; if (r_m <= 9.8e-11) tmp = (2.0 / r_m) / r_m; else tmp = -1.5 + (-0.375 * (w * (r_m * (r_m * w)))); end tmp_2 = tmp; end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 9.8e-11], N[(N[(2.0 / r$95$m), $MachinePrecision] / r$95$m), $MachinePrecision], N[(-1.5 + N[(-0.375 * N[(w * N[(r$95$m * N[(r$95$m * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 9.8 \cdot 10^{-11}:\\
\;\;\;\;\frac{\frac{2}{r\_m}}{r\_m}\\
\mathbf{else}:\\
\;\;\;\;-1.5 + -0.375 \cdot \left(w \cdot \left(r\_m \cdot \left(r\_m \cdot w\right)\right)\right)\\
\end{array}
\end{array}
if r < 9.7999999999999998e-11Initial program 86.0%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.1%
Taylor expanded in r around 0
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6459.2%
Simplified59.2%
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6459.3%
Applied egg-rr59.3%
if 9.7999999999999998e-11 < r Initial program 91.3%
Taylor expanded in r around inf
Simplified91.3%
Taylor expanded in v around 0
mul-1-negN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-commutativeN/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6479.4%
Simplified79.4%
associate-*r*N/A
swap-sqrN/A
*-lowering-*.f64N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6487.8%
Applied egg-rr87.8%
Final simplification67.3%
r_m = (fabs.f64 r) (FPCore (v w r_m) :precision binary64 (if (<= r_m 1.05e-10) (/ (/ 2.0 r_m) r_m) (+ -1.5 (* (* r_m r_m) (* -0.25 (* w w))))))
r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 1.05e-10) {
tmp = (2.0 / r_m) / r_m;
} else {
tmp = -1.5 + ((r_m * r_m) * (-0.25 * (w * w)));
}
return tmp;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
real(8) :: tmp
if (r_m <= 1.05d-10) then
tmp = (2.0d0 / r_m) / r_m
else
tmp = (-1.5d0) + ((r_m * r_m) * ((-0.25d0) * (w * w)))
end if
code = tmp
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 1.05e-10) {
tmp = (2.0 / r_m) / r_m;
} else {
tmp = -1.5 + ((r_m * r_m) * (-0.25 * (w * w)));
}
return tmp;
}
r_m = math.fabs(r) def code(v, w, r_m): tmp = 0 if r_m <= 1.05e-10: tmp = (2.0 / r_m) / r_m else: tmp = -1.5 + ((r_m * r_m) * (-0.25 * (w * w))) return tmp
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 1.05e-10) tmp = Float64(Float64(2.0 / r_m) / r_m); else tmp = Float64(-1.5 + Float64(Float64(r_m * r_m) * Float64(-0.25 * Float64(w * w)))); end return tmp end
r_m = abs(r); function tmp_2 = code(v, w, r_m) tmp = 0.0; if (r_m <= 1.05e-10) tmp = (2.0 / r_m) / r_m; else tmp = -1.5 + ((r_m * r_m) * (-0.25 * (w * w))); end tmp_2 = tmp; end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 1.05e-10], N[(N[(2.0 / r$95$m), $MachinePrecision] / r$95$m), $MachinePrecision], N[(-1.5 + N[(N[(r$95$m * r$95$m), $MachinePrecision] * N[(-0.25 * N[(w * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 1.05 \cdot 10^{-10}:\\
\;\;\;\;\frac{\frac{2}{r\_m}}{r\_m}\\
\mathbf{else}:\\
\;\;\;\;-1.5 + \left(r\_m \cdot r\_m\right) \cdot \left(-0.25 \cdot \left(w \cdot w\right)\right)\\
\end{array}
\end{array}
if r < 1.05e-10Initial program 86.0%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.1%
Taylor expanded in r around 0
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6459.2%
Simplified59.2%
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6459.3%
Applied egg-rr59.3%
if 1.05e-10 < r Initial program 91.3%
Taylor expanded in r around inf
Simplified91.3%
Taylor expanded in v around inf
mul-1-negN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6475.6%
Simplified75.6%
Final simplification63.9%
r_m = (fabs.f64 r) (FPCore (v w r_m) :precision binary64 (if (<= r_m 4.5e-11) (/ (/ 2.0 r_m) r_m) (+ -1.5 (* (* r_m r_m) (* -0.375 (* w w))))))
r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 4.5e-11) {
tmp = (2.0 / r_m) / r_m;
} else {
tmp = -1.5 + ((r_m * r_m) * (-0.375 * (w * w)));
}
return tmp;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
real(8) :: tmp
if (r_m <= 4.5d-11) then
tmp = (2.0d0 / r_m) / r_m
else
tmp = (-1.5d0) + ((r_m * r_m) * ((-0.375d0) * (w * w)))
end if
code = tmp
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 4.5e-11) {
tmp = (2.0 / r_m) / r_m;
} else {
tmp = -1.5 + ((r_m * r_m) * (-0.375 * (w * w)));
}
return tmp;
}
r_m = math.fabs(r) def code(v, w, r_m): tmp = 0 if r_m <= 4.5e-11: tmp = (2.0 / r_m) / r_m else: tmp = -1.5 + ((r_m * r_m) * (-0.375 * (w * w))) return tmp
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 4.5e-11) tmp = Float64(Float64(2.0 / r_m) / r_m); else tmp = Float64(-1.5 + Float64(Float64(r_m * r_m) * Float64(-0.375 * Float64(w * w)))); end return tmp end
r_m = abs(r); function tmp_2 = code(v, w, r_m) tmp = 0.0; if (r_m <= 4.5e-11) tmp = (2.0 / r_m) / r_m; else tmp = -1.5 + ((r_m * r_m) * (-0.375 * (w * w))); end tmp_2 = tmp; end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 4.5e-11], N[(N[(2.0 / r$95$m), $MachinePrecision] / r$95$m), $MachinePrecision], N[(-1.5 + N[(N[(r$95$m * r$95$m), $MachinePrecision] * N[(-0.375 * N[(w * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 4.5 \cdot 10^{-11}:\\
\;\;\;\;\frac{\frac{2}{r\_m}}{r\_m}\\
\mathbf{else}:\\
\;\;\;\;-1.5 + \left(r\_m \cdot r\_m\right) \cdot \left(-0.375 \cdot \left(w \cdot w\right)\right)\\
\end{array}
\end{array}
if r < 4.5e-11Initial program 86.0%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.1%
Taylor expanded in r around 0
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6459.2%
Simplified59.2%
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6459.3%
Applied egg-rr59.3%
if 4.5e-11 < r Initial program 91.3%
Taylor expanded in r around inf
Simplified91.3%
Taylor expanded in v around 0
mul-1-negN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-commutativeN/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6479.4%
Simplified79.4%
Final simplification65.0%
r_m = (fabs.f64 r) (FPCore (v w r_m) :precision binary64 (if (<= r_m 8e+18) (+ (/ 2.0 (* r_m r_m)) -1.5) (* (* r_m r_m) (* -0.25 (* w w)))))
r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 8e+18) {
tmp = (2.0 / (r_m * r_m)) + -1.5;
} else {
tmp = (r_m * r_m) * (-0.25 * (w * w));
}
return tmp;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
real(8) :: tmp
if (r_m <= 8d+18) then
tmp = (2.0d0 / (r_m * r_m)) + (-1.5d0)
else
tmp = (r_m * r_m) * ((-0.25d0) * (w * w))
end if
code = tmp
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 8e+18) {
tmp = (2.0 / (r_m * r_m)) + -1.5;
} else {
tmp = (r_m * r_m) * (-0.25 * (w * w));
}
return tmp;
}
r_m = math.fabs(r) def code(v, w, r_m): tmp = 0 if r_m <= 8e+18: tmp = (2.0 / (r_m * r_m)) + -1.5 else: tmp = (r_m * r_m) * (-0.25 * (w * w)) return tmp
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 8e+18) tmp = Float64(Float64(2.0 / Float64(r_m * r_m)) + -1.5); else tmp = Float64(Float64(r_m * r_m) * Float64(-0.25 * Float64(w * w))); end return tmp end
r_m = abs(r); function tmp_2 = code(v, w, r_m) tmp = 0.0; if (r_m <= 8e+18) tmp = (2.0 / (r_m * r_m)) + -1.5; else tmp = (r_m * r_m) * (-0.25 * (w * w)); end tmp_2 = tmp; end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 8e+18], N[(N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision] + -1.5), $MachinePrecision], N[(N[(r$95$m * r$95$m), $MachinePrecision] * N[(-0.25 * N[(w * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 8 \cdot 10^{+18}:\\
\;\;\;\;\frac{2}{r\_m \cdot r\_m} + -1.5\\
\mathbf{else}:\\
\;\;\;\;\left(r\_m \cdot r\_m\right) \cdot \left(-0.25 \cdot \left(w \cdot w\right)\right)\\
\end{array}
\end{array}
if r < 8e18Initial program 86.0%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.1%
Taylor expanded in r around 0
Simplified64.8%
if 8e18 < r Initial program 91.2%
Taylor expanded in r around inf
Simplified91.2%
Taylor expanded in v around inf
mul-1-negN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-lft-neg-inN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6475.3%
Simplified75.3%
Taylor expanded in r around inf
*-commutativeN/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6461.8%
Simplified61.8%
Final simplification64.0%
r_m = (fabs.f64 r) (FPCore (v w r_m) :precision binary64 (if (<= r_m 3.7e-12) (/ (/ 2.0 r_m) r_m) (* (* r_m r_m) (* -0.375 (* w w)))))
r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 3.7e-12) {
tmp = (2.0 / r_m) / r_m;
} else {
tmp = (r_m * r_m) * (-0.375 * (w * w));
}
return tmp;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
real(8) :: tmp
if (r_m <= 3.7d-12) then
tmp = (2.0d0 / r_m) / r_m
else
tmp = (r_m * r_m) * ((-0.375d0) * (w * w))
end if
code = tmp
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 3.7e-12) {
tmp = (2.0 / r_m) / r_m;
} else {
tmp = (r_m * r_m) * (-0.375 * (w * w));
}
return tmp;
}
r_m = math.fabs(r) def code(v, w, r_m): tmp = 0 if r_m <= 3.7e-12: tmp = (2.0 / r_m) / r_m else: tmp = (r_m * r_m) * (-0.375 * (w * w)) return tmp
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 3.7e-12) tmp = Float64(Float64(2.0 / r_m) / r_m); else tmp = Float64(Float64(r_m * r_m) * Float64(-0.375 * Float64(w * w))); end return tmp end
r_m = abs(r); function tmp_2 = code(v, w, r_m) tmp = 0.0; if (r_m <= 3.7e-12) tmp = (2.0 / r_m) / r_m; else tmp = (r_m * r_m) * (-0.375 * (w * w)); end tmp_2 = tmp; end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 3.7e-12], N[(N[(2.0 / r$95$m), $MachinePrecision] / r$95$m), $MachinePrecision], N[(N[(r$95$m * r$95$m), $MachinePrecision] * N[(-0.375 * N[(w * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 3.7 \cdot 10^{-12}:\\
\;\;\;\;\frac{\frac{2}{r\_m}}{r\_m}\\
\mathbf{else}:\\
\;\;\;\;\left(r\_m \cdot r\_m\right) \cdot \left(-0.375 \cdot \left(w \cdot w\right)\right)\\
\end{array}
\end{array}
if r < 3.69999999999999999e-12Initial program 86.0%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.1%
Taylor expanded in r around 0
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6459.2%
Simplified59.2%
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6459.3%
Applied egg-rr59.3%
if 3.69999999999999999e-12 < r Initial program 91.3%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified99.8%
Taylor expanded in v around 0
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6479.4%
Simplified79.4%
Taylor expanded in r around inf
*-commutativeN/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6464.8%
Simplified64.8%
Final simplification60.8%
r_m = (fabs.f64 r) (FPCore (v w r_m) :precision binary64 (if (<= r_m 4.5e-11) (/ (/ 2.0 r_m) r_m) (* (* r_m r_m) (* w (* w -0.375)))))
r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 4.5e-11) {
tmp = (2.0 / r_m) / r_m;
} else {
tmp = (r_m * r_m) * (w * (w * -0.375));
}
return tmp;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
real(8) :: tmp
if (r_m <= 4.5d-11) then
tmp = (2.0d0 / r_m) / r_m
else
tmp = (r_m * r_m) * (w * (w * (-0.375d0)))
end if
code = tmp
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 4.5e-11) {
tmp = (2.0 / r_m) / r_m;
} else {
tmp = (r_m * r_m) * (w * (w * -0.375));
}
return tmp;
}
r_m = math.fabs(r) def code(v, w, r_m): tmp = 0 if r_m <= 4.5e-11: tmp = (2.0 / r_m) / r_m else: tmp = (r_m * r_m) * (w * (w * -0.375)) return tmp
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 4.5e-11) tmp = Float64(Float64(2.0 / r_m) / r_m); else tmp = Float64(Float64(r_m * r_m) * Float64(w * Float64(w * -0.375))); end return tmp end
r_m = abs(r); function tmp_2 = code(v, w, r_m) tmp = 0.0; if (r_m <= 4.5e-11) tmp = (2.0 / r_m) / r_m; else tmp = (r_m * r_m) * (w * (w * -0.375)); end tmp_2 = tmp; end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 4.5e-11], N[(N[(2.0 / r$95$m), $MachinePrecision] / r$95$m), $MachinePrecision], N[(N[(r$95$m * r$95$m), $MachinePrecision] * N[(w * N[(w * -0.375), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 4.5 \cdot 10^{-11}:\\
\;\;\;\;\frac{\frac{2}{r\_m}}{r\_m}\\
\mathbf{else}:\\
\;\;\;\;\left(r\_m \cdot r\_m\right) \cdot \left(w \cdot \left(w \cdot -0.375\right)\right)\\
\end{array}
\end{array}
if r < 4.5e-11Initial program 86.0%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.1%
Taylor expanded in r around 0
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6459.2%
Simplified59.2%
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6459.3%
Applied egg-rr59.3%
if 4.5e-11 < r Initial program 91.3%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified99.8%
Taylor expanded in v around 0
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6479.4%
Simplified79.4%
associate-*r*N/A
swap-sqrN/A
*-lowering-*.f64N/A
associate-*l*N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f6489.5%
Applied egg-rr89.5%
Taylor expanded in r around inf
*-commutativeN/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f6464.8%
Simplified64.8%
r_m = (fabs.f64 r) (FPCore (v w r_m) :precision binary64 (if (<= r_m 1.4e-10) (/ (/ 2.0 r_m) r_m) -1.5))
r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 1.4e-10) {
tmp = (2.0 / r_m) / r_m;
} else {
tmp = -1.5;
}
return tmp;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
real(8) :: tmp
if (r_m <= 1.4d-10) then
tmp = (2.0d0 / r_m) / r_m
else
tmp = -1.5d0
end if
code = tmp
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 1.4e-10) {
tmp = (2.0 / r_m) / r_m;
} else {
tmp = -1.5;
}
return tmp;
}
r_m = math.fabs(r) def code(v, w, r_m): tmp = 0 if r_m <= 1.4e-10: tmp = (2.0 / r_m) / r_m else: tmp = -1.5 return tmp
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 1.4e-10) tmp = Float64(Float64(2.0 / r_m) / r_m); else tmp = -1.5; end return tmp end
r_m = abs(r); function tmp_2 = code(v, w, r_m) tmp = 0.0; if (r_m <= 1.4e-10) tmp = (2.0 / r_m) / r_m; else tmp = -1.5; end tmp_2 = tmp; end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 1.4e-10], N[(N[(2.0 / r$95$m), $MachinePrecision] / r$95$m), $MachinePrecision], -1.5]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 1.4 \cdot 10^{-10}:\\
\;\;\;\;\frac{\frac{2}{r\_m}}{r\_m}\\
\mathbf{else}:\\
\;\;\;\;-1.5\\
\end{array}
\end{array}
if r < 1.40000000000000008e-10Initial program 86.0%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.1%
Taylor expanded in r around 0
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6459.2%
Simplified59.2%
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6459.3%
Applied egg-rr59.3%
if 1.40000000000000008e-10 < r Initial program 91.3%
Taylor expanded in r around inf
Simplified91.3%
Taylor expanded in w around 0
Simplified20.4%
r_m = (fabs.f64 r) (FPCore (v w r_m) :precision binary64 (if (<= r_m 1.4e-10) (/ 2.0 (* r_m r_m)) -1.5))
r_m = fabs(r);
double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 1.4e-10) {
tmp = 2.0 / (r_m * r_m);
} else {
tmp = -1.5;
}
return tmp;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
real(8) :: tmp
if (r_m <= 1.4d-10) then
tmp = 2.0d0 / (r_m * r_m)
else
tmp = -1.5d0
end if
code = tmp
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
double tmp;
if (r_m <= 1.4e-10) {
tmp = 2.0 / (r_m * r_m);
} else {
tmp = -1.5;
}
return tmp;
}
r_m = math.fabs(r) def code(v, w, r_m): tmp = 0 if r_m <= 1.4e-10: tmp = 2.0 / (r_m * r_m) else: tmp = -1.5 return tmp
r_m = abs(r) function code(v, w, r_m) tmp = 0.0 if (r_m <= 1.4e-10) tmp = Float64(2.0 / Float64(r_m * r_m)); else tmp = -1.5; end return tmp end
r_m = abs(r); function tmp_2 = code(v, w, r_m) tmp = 0.0; if (r_m <= 1.4e-10) tmp = 2.0 / (r_m * r_m); else tmp = -1.5; end tmp_2 = tmp; end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := If[LessEqual[r$95$m, 1.4e-10], N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision], -1.5]
\begin{array}{l}
r_m = \left|r\right|
\\
\begin{array}{l}
\mathbf{if}\;r\_m \leq 1.4 \cdot 10^{-10}:\\
\;\;\;\;\frac{2}{r\_m \cdot r\_m}\\
\mathbf{else}:\\
\;\;\;\;-1.5\\
\end{array}
\end{array}
if r < 1.40000000000000008e-10Initial program 86.0%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.1%
Taylor expanded in r around 0
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6459.2%
Simplified59.2%
if 1.40000000000000008e-10 < r Initial program 91.3%
Taylor expanded in r around inf
Simplified91.3%
Taylor expanded in w around 0
Simplified20.4%
r_m = (fabs.f64 r) (FPCore (v w r_m) :precision binary64 (+ (/ 2.0 (* r_m r_m)) -1.5))
r_m = fabs(r);
double code(double v, double w, double r_m) {
return (2.0 / (r_m * r_m)) + -1.5;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
code = (2.0d0 / (r_m * r_m)) + (-1.5d0)
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
return (2.0 / (r_m * r_m)) + -1.5;
}
r_m = math.fabs(r) def code(v, w, r_m): return (2.0 / (r_m * r_m)) + -1.5
r_m = abs(r) function code(v, w, r_m) return Float64(Float64(2.0 / Float64(r_m * r_m)) + -1.5) end
r_m = abs(r); function tmp = code(v, w, r_m) tmp = (2.0 / (r_m * r_m)) + -1.5; end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := N[(N[(2.0 / N[(r$95$m * r$95$m), $MachinePrecision]), $MachinePrecision] + -1.5), $MachinePrecision]
\begin{array}{l}
r_m = \left|r\right|
\\
\frac{2}{r\_m \cdot r\_m} + -1.5
\end{array}
Initial program 87.5%
associate--l-N/A
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate--r+N/A
sub-negN/A
+-commutativeN/A
associate--l+N/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
Simplified97.9%
Taylor expanded in r around 0
Simplified52.2%
r_m = (fabs.f64 r) (FPCore (v w r_m) :precision binary64 -1.5)
r_m = fabs(r);
double code(double v, double w, double r_m) {
return -1.5;
}
r_m = abs(r)
real(8) function code(v, w, r_m)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r_m
code = -1.5d0
end function
r_m = Math.abs(r);
public static double code(double v, double w, double r_m) {
return -1.5;
}
r_m = math.fabs(r) def code(v, w, r_m): return -1.5
r_m = abs(r) function code(v, w, r_m) return -1.5 end
r_m = abs(r); function tmp = code(v, w, r_m) tmp = -1.5; end
r_m = N[Abs[r], $MachinePrecision] code[v_, w_, r$95$m_] := -1.5
\begin{array}{l}
r_m = \left|r\right|
\\
-1.5
\end{array}
Initial program 87.5%
Taylor expanded in r around inf
Simplified51.6%
Taylor expanded in w around 0
Simplified10.5%
herbie shell --seed 2024163
(FPCore (v w r)
:name "Rosa's TurbineBenchmark"
:precision binary64
(- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))