
(FPCore (v w r) :precision binary64 (- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))
double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((3.0d0 + (2.0d0 / (r * r))) - (((0.125d0 * (3.0d0 - (2.0d0 * v))) * (((w * w) * r) * r)) / (1.0d0 - v))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
def code(v, w, r): return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5
function code(v, w, r) return Float64(Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - Float64(Float64(Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) * Float64(Float64(Float64(w * w) * r) * r)) / Float64(1.0 - v))) - 4.5) end
function tmp = code(v, w, r) tmp = ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5; end
code[v_, w_, r_] := N[(N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(w * w), $MachinePrecision] * r), $MachinePrecision] * r), $MachinePrecision]), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(3 + \frac{2}{r \cdot r}\right) - \frac{\left(0.125 \cdot \left(3 - 2 \cdot v\right)\right) \cdot \left(\left(\left(w \cdot w\right) \cdot r\right) \cdot r\right)}{1 - v}\right) - 4.5
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (v w r) :precision binary64 (- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))
double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((3.0d0 + (2.0d0 / (r * r))) - (((0.125d0 * (3.0d0 - (2.0d0 * v))) * (((w * w) * r) * r)) / (1.0d0 - v))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
def code(v, w, r): return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5
function code(v, w, r) return Float64(Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - Float64(Float64(Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) * Float64(Float64(Float64(w * w) * r) * r)) / Float64(1.0 - v))) - 4.5) end
function tmp = code(v, w, r) tmp = ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5; end
code[v_, w_, r_] := N[(N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(w * w), $MachinePrecision] * r), $MachinePrecision] * r), $MachinePrecision]), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(3 + \frac{2}{r \cdot r}\right) - \frac{\left(0.125 \cdot \left(3 - 2 \cdot v\right)\right) \cdot \left(\left(\left(w \cdot w\right) \cdot r\right) \cdot r\right)}{1 - v}\right) - 4.5
\end{array}
(FPCore (v w r) :precision binary64 (+ (/ (/ 2.0 r) r) (+ -1.5 (* (fma v -0.25 0.375) (* (* r w) (* (* r w) (/ -1.0 (- 1.0 v))))))))
double code(double v, double w, double r) {
return ((2.0 / r) / r) + (-1.5 + (fma(v, -0.25, 0.375) * ((r * w) * ((r * w) * (-1.0 / (1.0 - v))))));
}
function code(v, w, r) return Float64(Float64(Float64(2.0 / r) / r) + Float64(-1.5 + Float64(fma(v, -0.25, 0.375) * Float64(Float64(r * w) * Float64(Float64(r * w) * Float64(-1.0 / Float64(1.0 - v))))))) end
code[v_, w_, r_] := N[(N[(N[(2.0 / r), $MachinePrecision] / r), $MachinePrecision] + N[(-1.5 + N[(N[(v * -0.25 + 0.375), $MachinePrecision] * N[(N[(r * w), $MachinePrecision] * N[(N[(r * w), $MachinePrecision] * N[(-1.0 / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{2}{r}}{r} + \left(-1.5 + \mathsf{fma}\left(v, -0.25, 0.375\right) \cdot \left(\left(r \cdot w\right) \cdot \left(\left(r \cdot w\right) \cdot \frac{-1}{1 - v}\right)\right)\right)
\end{array}
Initial program 84.2%
Simplified96.4%
div-inv96.4%
associate-*r*99.8%
associate-*l*99.8%
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (v w r)
:precision binary64
(+
-4.5
(+
3.0
(-
(/ 2.0 (* r r))
(/
(* 0.125 (+ 3.0 (* v -2.0)))
(* (/ (- 1.0 v) (* r w)) (/ 1.0 (* r w))))))))
double code(double v, double w, double r) {
return -4.5 + (3.0 + ((2.0 / (r * r)) - ((0.125 * (3.0 + (v * -2.0))) / (((1.0 - v) / (r * w)) * (1.0 / (r * w))))));
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = (-4.5d0) + (3.0d0 + ((2.0d0 / (r * r)) - ((0.125d0 * (3.0d0 + (v * (-2.0d0)))) / (((1.0d0 - v) / (r * w)) * (1.0d0 / (r * w))))))
end function
public static double code(double v, double w, double r) {
return -4.5 + (3.0 + ((2.0 / (r * r)) - ((0.125 * (3.0 + (v * -2.0))) / (((1.0 - v) / (r * w)) * (1.0 / (r * w))))));
}
def code(v, w, r): return -4.5 + (3.0 + ((2.0 / (r * r)) - ((0.125 * (3.0 + (v * -2.0))) / (((1.0 - v) / (r * w)) * (1.0 / (r * w))))))
function code(v, w, r) return Float64(-4.5 + Float64(3.0 + Float64(Float64(2.0 / Float64(r * r)) - Float64(Float64(0.125 * Float64(3.0 + Float64(v * -2.0))) / Float64(Float64(Float64(1.0 - v) / Float64(r * w)) * Float64(1.0 / Float64(r * w))))))) end
function tmp = code(v, w, r) tmp = -4.5 + (3.0 + ((2.0 / (r * r)) - ((0.125 * (3.0 + (v * -2.0))) / (((1.0 - v) / (r * w)) * (1.0 / (r * w)))))); end
code[v_, w_, r_] := N[(-4.5 + N[(3.0 + N[(N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision] - N[(N[(0.125 * N[(3.0 + N[(v * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(N[(1.0 - v), $MachinePrecision] / N[(r * w), $MachinePrecision]), $MachinePrecision] * N[(1.0 / N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-4.5 + \left(3 + \left(\frac{2}{r \cdot r} - \frac{0.125 \cdot \left(3 + v \cdot -2\right)}{\frac{1 - v}{r \cdot w} \cdot \frac{1}{r \cdot w}}\right)\right)
\end{array}
Initial program 84.2%
Simplified85.6%
associate-*r*96.0%
*-commutative96.0%
*-un-lft-identity96.0%
associate-*r*99.4%
times-frac99.4%
Applied egg-rr99.4%
Final simplification99.4%
(FPCore (v w r)
:precision binary64
(+
(+
3.0
(-
(/ 2.0 (* r r))
(/
(* 0.125 (+ 3.0 (* v -2.0)))
(* (/ (/ 1.0 w) r) (/ (- 1.0 v) (* r w))))))
-4.5))
double code(double v, double w, double r) {
return (3.0 + ((2.0 / (r * r)) - ((0.125 * (3.0 + (v * -2.0))) / (((1.0 / w) / r) * ((1.0 - v) / (r * w)))))) + -4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = (3.0d0 + ((2.0d0 / (r * r)) - ((0.125d0 * (3.0d0 + (v * (-2.0d0)))) / (((1.0d0 / w) / r) * ((1.0d0 - v) / (r * w)))))) + (-4.5d0)
end function
public static double code(double v, double w, double r) {
return (3.0 + ((2.0 / (r * r)) - ((0.125 * (3.0 + (v * -2.0))) / (((1.0 / w) / r) * ((1.0 - v) / (r * w)))))) + -4.5;
}
def code(v, w, r): return (3.0 + ((2.0 / (r * r)) - ((0.125 * (3.0 + (v * -2.0))) / (((1.0 / w) / r) * ((1.0 - v) / (r * w)))))) + -4.5
function code(v, w, r) return Float64(Float64(3.0 + Float64(Float64(2.0 / Float64(r * r)) - Float64(Float64(0.125 * Float64(3.0 + Float64(v * -2.0))) / Float64(Float64(Float64(1.0 / w) / r) * Float64(Float64(1.0 - v) / Float64(r * w)))))) + -4.5) end
function tmp = code(v, w, r) tmp = (3.0 + ((2.0 / (r * r)) - ((0.125 * (3.0 + (v * -2.0))) / (((1.0 / w) / r) * ((1.0 - v) / (r * w)))))) + -4.5; end
code[v_, w_, r_] := N[(N[(3.0 + N[(N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision] - N[(N[(0.125 * N[(3.0 + N[(v * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(N[(1.0 / w), $MachinePrecision] / r), $MachinePrecision] * N[(N[(1.0 - v), $MachinePrecision] / N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(3 + \left(\frac{2}{r \cdot r} - \frac{0.125 \cdot \left(3 + v \cdot -2\right)}{\frac{\frac{1}{w}}{r} \cdot \frac{1 - v}{r \cdot w}}\right)\right) + -4.5
\end{array}
Initial program 84.2%
Simplified85.6%
associate-*r*96.0%
*-commutative96.0%
*-un-lft-identity96.0%
associate-*r*99.4%
times-frac99.4%
Applied egg-rr99.4%
clear-num99.4%
associate-/r/99.4%
*-commutative99.4%
associate-/r*99.4%
Applied egg-rr99.4%
Final simplification99.4%
(FPCore (v w r)
:precision binary64
(+
-4.5
(+
3.0
(+
(/ 2.0 (* r r))
(/ (* (* r w) (- (* v -0.25) -0.375)) (/ (+ v -1.0) (* r w)))))))
double code(double v, double w, double r) {
return -4.5 + (3.0 + ((2.0 / (r * r)) + (((r * w) * ((v * -0.25) - -0.375)) / ((v + -1.0) / (r * w)))));
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = (-4.5d0) + (3.0d0 + ((2.0d0 / (r * r)) + (((r * w) * ((v * (-0.25d0)) - (-0.375d0))) / ((v + (-1.0d0)) / (r * w)))))
end function
public static double code(double v, double w, double r) {
return -4.5 + (3.0 + ((2.0 / (r * r)) + (((r * w) * ((v * -0.25) - -0.375)) / ((v + -1.0) / (r * w)))));
}
def code(v, w, r): return -4.5 + (3.0 + ((2.0 / (r * r)) + (((r * w) * ((v * -0.25) - -0.375)) / ((v + -1.0) / (r * w)))))
function code(v, w, r) return Float64(-4.5 + Float64(3.0 + Float64(Float64(2.0 / Float64(r * r)) + Float64(Float64(Float64(r * w) * Float64(Float64(v * -0.25) - -0.375)) / Float64(Float64(v + -1.0) / Float64(r * w)))))) end
function tmp = code(v, w, r) tmp = -4.5 + (3.0 + ((2.0 / (r * r)) + (((r * w) * ((v * -0.25) - -0.375)) / ((v + -1.0) / (r * w))))); end
code[v_, w_, r_] := N[(-4.5 + N[(3.0 + N[(N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(r * w), $MachinePrecision] * N[(N[(v * -0.25), $MachinePrecision] - -0.375), $MachinePrecision]), $MachinePrecision] / N[(N[(v + -1.0), $MachinePrecision] / N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-4.5 + \left(3 + \left(\frac{2}{r \cdot r} + \frac{\left(r \cdot w\right) \cdot \left(v \cdot -0.25 - -0.375\right)}{\frac{v + -1}{r \cdot w}}\right)\right)
\end{array}
Initial program 84.2%
Simplified85.6%
associate-*r*96.0%
*-commutative96.0%
*-un-lft-identity96.0%
associate-*r*99.4%
times-frac99.4%
Applied egg-rr99.4%
associate-/r*98.7%
frac-2neg98.7%
associate-/r/98.7%
div-inv98.7%
+-commutative98.7%
distribute-rgt-in98.7%
*-commutative98.7%
associate-*l*99.1%
metadata-eval99.1%
metadata-eval99.1%
fma-udef99.1%
clear-num99.1%
/-rgt-identity99.1%
distribute-rgt-neg-in99.1%
Applied egg-rr99.1%
associate-/l*99.8%
distribute-frac-neg99.8%
associate-/r*99.4%
associate-*l/98.7%
distribute-rgt-neg-out98.7%
distribute-rgt-neg-in98.7%
*-commutative98.7%
distribute-rgt-neg-in98.7%
neg-sub098.7%
fma-udef98.7%
*-commutative98.7%
+-commutative98.7%
*-commutative98.7%
associate--r+98.7%
metadata-eval98.7%
associate-/r*99.1%
distribute-frac-neg99.1%
Simplified99.1%
Final simplification99.1%
(FPCore (v w r) :precision binary64 (if (<= v 40000000000.0) (+ -1.5 (+ (/ 2.0 (* r r)) (* -0.375 (* (* r w) (* r w))))) (+ (/ (/ 2.0 r) r) (- -1.5 (* 0.25 (* w (* r (* r w))))))))
double code(double v, double w, double r) {
double tmp;
if (v <= 40000000000.0) {
tmp = -1.5 + ((2.0 / (r * r)) + (-0.375 * ((r * w) * (r * w))));
} else {
tmp = ((2.0 / r) / r) + (-1.5 - (0.25 * (w * (r * (r * w)))));
}
return tmp;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
real(8) :: tmp
if (v <= 40000000000.0d0) then
tmp = (-1.5d0) + ((2.0d0 / (r * r)) + ((-0.375d0) * ((r * w) * (r * w))))
else
tmp = ((2.0d0 / r) / r) + ((-1.5d0) - (0.25d0 * (w * (r * (r * w)))))
end if
code = tmp
end function
public static double code(double v, double w, double r) {
double tmp;
if (v <= 40000000000.0) {
tmp = -1.5 + ((2.0 / (r * r)) + (-0.375 * ((r * w) * (r * w))));
} else {
tmp = ((2.0 / r) / r) + (-1.5 - (0.25 * (w * (r * (r * w)))));
}
return tmp;
}
def code(v, w, r): tmp = 0 if v <= 40000000000.0: tmp = -1.5 + ((2.0 / (r * r)) + (-0.375 * ((r * w) * (r * w)))) else: tmp = ((2.0 / r) / r) + (-1.5 - (0.25 * (w * (r * (r * w))))) return tmp
function code(v, w, r) tmp = 0.0 if (v <= 40000000000.0) tmp = Float64(-1.5 + Float64(Float64(2.0 / Float64(r * r)) + Float64(-0.375 * Float64(Float64(r * w) * Float64(r * w))))); else tmp = Float64(Float64(Float64(2.0 / r) / r) + Float64(-1.5 - Float64(0.25 * Float64(w * Float64(r * Float64(r * w)))))); end return tmp end
function tmp_2 = code(v, w, r) tmp = 0.0; if (v <= 40000000000.0) tmp = -1.5 + ((2.0 / (r * r)) + (-0.375 * ((r * w) * (r * w)))); else tmp = ((2.0 / r) / r) + (-1.5 - (0.25 * (w * (r * (r * w))))); end tmp_2 = tmp; end
code[v_, w_, r_] := If[LessEqual[v, 40000000000.0], N[(-1.5 + N[(N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision] + N[(-0.375 * N[(N[(r * w), $MachinePrecision] * N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(2.0 / r), $MachinePrecision] / r), $MachinePrecision] + N[(-1.5 - N[(0.25 * N[(w * N[(r * N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;v \leq 40000000000:\\
\;\;\;\;-1.5 + \left(\frac{2}{r \cdot r} + -0.375 \cdot \left(\left(r \cdot w\right) \cdot \left(r \cdot w\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{2}{r}}{r} + \left(-1.5 - 0.25 \cdot \left(w \cdot \left(r \cdot \left(r \cdot w\right)\right)\right)\right)\\
\end{array}
\end{array}
if v < 4e10Initial program 87.6%
Simplified88.1%
Taylor expanded in v around 0 83.9%
*-commutative83.9%
unpow283.9%
unpow283.9%
swap-sqr97.5%
unpow297.5%
Simplified97.5%
unpow297.5%
Applied egg-rr97.5%
if 4e10 < v Initial program 71.8%
Simplified96.4%
div-inv96.3%
associate-*r*99.7%
associate-*l*99.8%
Applied egg-rr99.8%
Taylor expanded in v around inf 65.9%
unpow265.9%
unpow265.9%
swap-sqr99.7%
unpow299.7%
Simplified99.7%
unpow299.7%
associate-*r*94.7%
Applied egg-rr94.7%
Final simplification96.9%
(FPCore (v w r) :precision binary64 (if (<= v 190000000000.0) (+ -1.5 (+ (/ 2.0 (* r r)) (* -0.375 (* (* r w) (* r w))))) (+ (/ (/ 2.0 r) r) (- -1.5 (* 0.25 (* r (* w (* r w))))))))
double code(double v, double w, double r) {
double tmp;
if (v <= 190000000000.0) {
tmp = -1.5 + ((2.0 / (r * r)) + (-0.375 * ((r * w) * (r * w))));
} else {
tmp = ((2.0 / r) / r) + (-1.5 - (0.25 * (r * (w * (r * w)))));
}
return tmp;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
real(8) :: tmp
if (v <= 190000000000.0d0) then
tmp = (-1.5d0) + ((2.0d0 / (r * r)) + ((-0.375d0) * ((r * w) * (r * w))))
else
tmp = ((2.0d0 / r) / r) + ((-1.5d0) - (0.25d0 * (r * (w * (r * w)))))
end if
code = tmp
end function
public static double code(double v, double w, double r) {
double tmp;
if (v <= 190000000000.0) {
tmp = -1.5 + ((2.0 / (r * r)) + (-0.375 * ((r * w) * (r * w))));
} else {
tmp = ((2.0 / r) / r) + (-1.5 - (0.25 * (r * (w * (r * w)))));
}
return tmp;
}
def code(v, w, r): tmp = 0 if v <= 190000000000.0: tmp = -1.5 + ((2.0 / (r * r)) + (-0.375 * ((r * w) * (r * w)))) else: tmp = ((2.0 / r) / r) + (-1.5 - (0.25 * (r * (w * (r * w))))) return tmp
function code(v, w, r) tmp = 0.0 if (v <= 190000000000.0) tmp = Float64(-1.5 + Float64(Float64(2.0 / Float64(r * r)) + Float64(-0.375 * Float64(Float64(r * w) * Float64(r * w))))); else tmp = Float64(Float64(Float64(2.0 / r) / r) + Float64(-1.5 - Float64(0.25 * Float64(r * Float64(w * Float64(r * w)))))); end return tmp end
function tmp_2 = code(v, w, r) tmp = 0.0; if (v <= 190000000000.0) tmp = -1.5 + ((2.0 / (r * r)) + (-0.375 * ((r * w) * (r * w)))); else tmp = ((2.0 / r) / r) + (-1.5 - (0.25 * (r * (w * (r * w))))); end tmp_2 = tmp; end
code[v_, w_, r_] := If[LessEqual[v, 190000000000.0], N[(-1.5 + N[(N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision] + N[(-0.375 * N[(N[(r * w), $MachinePrecision] * N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(2.0 / r), $MachinePrecision] / r), $MachinePrecision] + N[(-1.5 - N[(0.25 * N[(r * N[(w * N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;v \leq 190000000000:\\
\;\;\;\;-1.5 + \left(\frac{2}{r \cdot r} + -0.375 \cdot \left(\left(r \cdot w\right) \cdot \left(r \cdot w\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{2}{r}}{r} + \left(-1.5 - 0.25 \cdot \left(r \cdot \left(w \cdot \left(r \cdot w\right)\right)\right)\right)\\
\end{array}
\end{array}
if v < 1.9e11Initial program 87.6%
Simplified88.1%
Taylor expanded in v around 0 83.9%
*-commutative83.9%
unpow283.9%
unpow283.9%
swap-sqr97.5%
unpow297.5%
Simplified97.5%
unpow297.5%
Applied egg-rr97.5%
if 1.9e11 < v Initial program 71.8%
Simplified96.4%
div-inv96.3%
associate-*r*99.7%
associate-*l*99.8%
Applied egg-rr99.8%
Taylor expanded in v around inf 65.9%
unpow265.9%
unpow265.9%
swap-sqr99.7%
unpow299.7%
Simplified99.7%
unpow299.7%
*-commutative99.7%
associate-*r*96.3%
Applied egg-rr96.3%
Final simplification97.2%
(FPCore (v w r) :precision binary64 (+ -1.5 (+ (/ 2.0 (* r r)) (* -0.375 (* (* r w) (* r w))))))
double code(double v, double w, double r) {
return -1.5 + ((2.0 / (r * r)) + (-0.375 * ((r * w) * (r * w))));
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = (-1.5d0) + ((2.0d0 / (r * r)) + ((-0.375d0) * ((r * w) * (r * w))))
end function
public static double code(double v, double w, double r) {
return -1.5 + ((2.0 / (r * r)) + (-0.375 * ((r * w) * (r * w))));
}
def code(v, w, r): return -1.5 + ((2.0 / (r * r)) + (-0.375 * ((r * w) * (r * w))))
function code(v, w, r) return Float64(-1.5 + Float64(Float64(2.0 / Float64(r * r)) + Float64(-0.375 * Float64(Float64(r * w) * Float64(r * w))))) end
function tmp = code(v, w, r) tmp = -1.5 + ((2.0 / (r * r)) + (-0.375 * ((r * w) * (r * w)))); end
code[v_, w_, r_] := N[(-1.5 + N[(N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision] + N[(-0.375 * N[(N[(r * w), $MachinePrecision] * N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-1.5 + \left(\frac{2}{r \cdot r} + -0.375 \cdot \left(\left(r \cdot w\right) \cdot \left(r \cdot w\right)\right)\right)
\end{array}
Initial program 84.2%
Simplified86.0%
Taylor expanded in v around 0 79.3%
*-commutative79.3%
unpow279.3%
unpow279.3%
swap-sqr94.4%
unpow294.4%
Simplified94.4%
unpow294.4%
Applied egg-rr94.4%
Final simplification94.4%
herbie shell --seed 2024011
(FPCore (v w r)
:name "Rosa's TurbineBenchmark"
:precision binary64
(- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))