
(FPCore (v w r) :precision binary64 (- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))
double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((3.0d0 + (2.0d0 / (r * r))) - (((0.125d0 * (3.0d0 - (2.0d0 * v))) * (((w * w) * r) * r)) / (1.0d0 - v))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
def code(v, w, r): return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5
function code(v, w, r) return Float64(Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - Float64(Float64(Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) * Float64(Float64(Float64(w * w) * r) * r)) / Float64(1.0 - v))) - 4.5) end
function tmp = code(v, w, r) tmp = ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5; end
code[v_, w_, r_] := N[(N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(w * w), $MachinePrecision] * r), $MachinePrecision] * r), $MachinePrecision]), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(3 + \frac{2}{r \cdot r}\right) - \frac{\left(0.125 \cdot \left(3 - 2 \cdot v\right)\right) \cdot \left(\left(\left(w \cdot w\right) \cdot r\right) \cdot r\right)}{1 - v}\right) - 4.5
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (v w r) :precision binary64 (- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))
double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((3.0d0 + (2.0d0 / (r * r))) - (((0.125d0 * (3.0d0 - (2.0d0 * v))) * (((w * w) * r) * r)) / (1.0d0 - v))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
def code(v, w, r): return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5
function code(v, w, r) return Float64(Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - Float64(Float64(Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) * Float64(Float64(Float64(w * w) * r) * r)) / Float64(1.0 - v))) - 4.5) end
function tmp = code(v, w, r) tmp = ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5; end
code[v_, w_, r_] := N[(N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(w * w), $MachinePrecision] * r), $MachinePrecision] * r), $MachinePrecision]), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(3 + \frac{2}{r \cdot r}\right) - \frac{\left(0.125 \cdot \left(3 - 2 \cdot v\right)\right) \cdot \left(\left(\left(w \cdot w\right) \cdot r\right) \cdot r\right)}{1 - v}\right) - 4.5
\end{array}
(FPCore (v w r) :precision binary64 (+ (/ (/ 2.0 r) r) (- -1.5 (/ (fma v -0.25 0.375) (/ (/ (- 1.0 v) (* r w)) (* r w))))))
double code(double v, double w, double r) {
return ((2.0 / r) / r) + (-1.5 - (fma(v, -0.25, 0.375) / (((1.0 - v) / (r * w)) / (r * w))));
}
function code(v, w, r) return Float64(Float64(Float64(2.0 / r) / r) + Float64(-1.5 - Float64(fma(v, -0.25, 0.375) / Float64(Float64(Float64(1.0 - v) / Float64(r * w)) / Float64(r * w))))) end
code[v_, w_, r_] := N[(N[(N[(2.0 / r), $MachinePrecision] / r), $MachinePrecision] + N[(-1.5 - N[(N[(v * -0.25 + 0.375), $MachinePrecision] / N[(N[(N[(1.0 - v), $MachinePrecision] / N[(r * w), $MachinePrecision]), $MachinePrecision] / N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{2}{r}}{r} + \left(-1.5 - \frac{\mathsf{fma}\left(v, -0.25, 0.375\right)}{\frac{\frac{1 - v}{r \cdot w}}{r \cdot w}}\right)
\end{array}
Initial program 84.2%
Simplified96.9%
*-un-lft-identity96.9%
associate-*r*99.7%
times-frac99.7%
*-commutative99.7%
*-commutative99.7%
Applied egg-rr99.7%
associate-*l/99.7%
*-un-lft-identity99.7%
associate-/r*98.3%
Applied egg-rr98.3%
Taylor expanded in v around 0 91.9%
+-commutative91.9%
mul-1-neg91.9%
sub-neg91.9%
div-sub99.7%
Simplified99.7%
Final simplification99.7%
(FPCore (v w r) :precision binary64 (+ (/ (/ 2.0 r) r) (+ -1.5 (/ (/ (- (* v -0.25) -0.375) (+ v -1.0)) (pow (* r w) -2.0)))))
double code(double v, double w, double r) {
return ((2.0 / r) / r) + (-1.5 + ((((v * -0.25) - -0.375) / (v + -1.0)) / pow((r * w), -2.0)));
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((2.0d0 / r) / r) + ((-1.5d0) + ((((v * (-0.25d0)) - (-0.375d0)) / (v + (-1.0d0))) / ((r * w) ** (-2.0d0))))
end function
public static double code(double v, double w, double r) {
return ((2.0 / r) / r) + (-1.5 + ((((v * -0.25) - -0.375) / (v + -1.0)) / Math.pow((r * w), -2.0)));
}
def code(v, w, r): return ((2.0 / r) / r) + (-1.5 + ((((v * -0.25) - -0.375) / (v + -1.0)) / math.pow((r * w), -2.0)))
function code(v, w, r) return Float64(Float64(Float64(2.0 / r) / r) + Float64(-1.5 + Float64(Float64(Float64(Float64(v * -0.25) - -0.375) / Float64(v + -1.0)) / (Float64(r * w) ^ -2.0)))) end
function tmp = code(v, w, r) tmp = ((2.0 / r) / r) + (-1.5 + ((((v * -0.25) - -0.375) / (v + -1.0)) / ((r * w) ^ -2.0))); end
code[v_, w_, r_] := N[(N[(N[(2.0 / r), $MachinePrecision] / r), $MachinePrecision] + N[(-1.5 + N[(N[(N[(N[(v * -0.25), $MachinePrecision] - -0.375), $MachinePrecision] / N[(v + -1.0), $MachinePrecision]), $MachinePrecision] / N[Power[N[(r * w), $MachinePrecision], -2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{2}{r}}{r} + \left(-1.5 + \frac{\frac{v \cdot -0.25 - -0.375}{v + -1}}{{\left(r \cdot w\right)}^{-2}}\right)
\end{array}
Initial program 84.2%
Simplified96.9%
*-un-lft-identity96.9%
div-inv96.9%
times-frac92.5%
associate-*r*94.2%
pow294.2%
*-commutative94.2%
Applied egg-rr94.2%
frac-2neg94.2%
metadata-eval94.2%
clear-num94.2%
frac-times94.9%
metadata-eval94.9%
pow-flip94.9%
metadata-eval94.9%
Applied egg-rr94.9%
associate-*r/99.7%
distribute-lft-neg-in99.7%
associate-/l*99.7%
neg-mul-199.7%
distribute-lft-neg-in99.7%
associate-/r*99.7%
neg-sub099.7%
fma-udef99.7%
*-commutative99.7%
+-commutative99.7%
associate--r+99.7%
metadata-eval99.7%
*-commutative99.7%
neg-sub099.7%
associate--r-99.7%
metadata-eval99.7%
*-commutative99.7%
Simplified99.7%
Final simplification99.7%
(FPCore (v w r)
:precision binary64
(let* ((t_0 (/ (/ 2.0 r) r)))
(if (or (<= v -12500000.0) (not (<= v 0.15)))
(+
t_0
(+ -1.5 (* (/ r (/ v w)) (/ (- (* v -0.25) -0.375) (/ 1.0 (* r w))))))
(+ t_0 (- -1.5 (* 0.375 (* (* r w) (* r w))))))))
double code(double v, double w, double r) {
double t_0 = (2.0 / r) / r;
double tmp;
if ((v <= -12500000.0) || !(v <= 0.15)) {
tmp = t_0 + (-1.5 + ((r / (v / w)) * (((v * -0.25) - -0.375) / (1.0 / (r * w)))));
} else {
tmp = t_0 + (-1.5 - (0.375 * ((r * w) * (r * w))));
}
return tmp;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
real(8) :: t_0
real(8) :: tmp
t_0 = (2.0d0 / r) / r
if ((v <= (-12500000.0d0)) .or. (.not. (v <= 0.15d0))) then
tmp = t_0 + ((-1.5d0) + ((r / (v / w)) * (((v * (-0.25d0)) - (-0.375d0)) / (1.0d0 / (r * w)))))
else
tmp = t_0 + ((-1.5d0) - (0.375d0 * ((r * w) * (r * w))))
end if
code = tmp
end function
public static double code(double v, double w, double r) {
double t_0 = (2.0 / r) / r;
double tmp;
if ((v <= -12500000.0) || !(v <= 0.15)) {
tmp = t_0 + (-1.5 + ((r / (v / w)) * (((v * -0.25) - -0.375) / (1.0 / (r * w)))));
} else {
tmp = t_0 + (-1.5 - (0.375 * ((r * w) * (r * w))));
}
return tmp;
}
def code(v, w, r): t_0 = (2.0 / r) / r tmp = 0 if (v <= -12500000.0) or not (v <= 0.15): tmp = t_0 + (-1.5 + ((r / (v / w)) * (((v * -0.25) - -0.375) / (1.0 / (r * w))))) else: tmp = t_0 + (-1.5 - (0.375 * ((r * w) * (r * w)))) return tmp
function code(v, w, r) t_0 = Float64(Float64(2.0 / r) / r) tmp = 0.0 if ((v <= -12500000.0) || !(v <= 0.15)) tmp = Float64(t_0 + Float64(-1.5 + Float64(Float64(r / Float64(v / w)) * Float64(Float64(Float64(v * -0.25) - -0.375) / Float64(1.0 / Float64(r * w)))))); else tmp = Float64(t_0 + Float64(-1.5 - Float64(0.375 * Float64(Float64(r * w) * Float64(r * w))))); end return tmp end
function tmp_2 = code(v, w, r) t_0 = (2.0 / r) / r; tmp = 0.0; if ((v <= -12500000.0) || ~((v <= 0.15))) tmp = t_0 + (-1.5 + ((r / (v / w)) * (((v * -0.25) - -0.375) / (1.0 / (r * w))))); else tmp = t_0 + (-1.5 - (0.375 * ((r * w) * (r * w)))); end tmp_2 = tmp; end
code[v_, w_, r_] := Block[{t$95$0 = N[(N[(2.0 / r), $MachinePrecision] / r), $MachinePrecision]}, If[Or[LessEqual[v, -12500000.0], N[Not[LessEqual[v, 0.15]], $MachinePrecision]], N[(t$95$0 + N[(-1.5 + N[(N[(r / N[(v / w), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(v * -0.25), $MachinePrecision] - -0.375), $MachinePrecision] / N[(1.0 / N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(t$95$0 + N[(-1.5 - N[(0.375 * N[(N[(r * w), $MachinePrecision] * N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\frac{2}{r}}{r}\\
\mathbf{if}\;v \leq -12500000 \lor \neg \left(v \leq 0.15\right):\\
\;\;\;\;t_0 + \left(-1.5 + \frac{r}{\frac{v}{w}} \cdot \frac{v \cdot -0.25 - -0.375}{\frac{1}{r \cdot w}}\right)\\
\mathbf{else}:\\
\;\;\;\;t_0 + \left(-1.5 - 0.375 \cdot \left(\left(r \cdot w\right) \cdot \left(r \cdot w\right)\right)\right)\\
\end{array}
\end{array}
if v < -1.25e7 or 0.149999999999999994 < v Initial program 83.2%
Simplified96.6%
*-un-lft-identity96.6%
div-inv96.6%
times-frac89.0%
associate-*r*90.2%
pow290.2%
*-commutative90.2%
Applied egg-rr90.2%
frac-2neg90.2%
metadata-eval90.2%
clear-num90.2%
frac-times91.4%
metadata-eval91.4%
pow-flip91.4%
metadata-eval91.4%
Applied egg-rr91.4%
associate-*r/99.7%
distribute-lft-neg-in99.7%
associate-/l*99.7%
neg-mul-199.7%
distribute-lft-neg-in99.7%
associate-/r*99.7%
neg-sub099.7%
fma-udef99.7%
*-commutative99.7%
+-commutative99.7%
associate--r+99.7%
metadata-eval99.7%
*-commutative99.7%
neg-sub099.7%
associate--r-99.7%
metadata-eval99.7%
*-commutative99.7%
Simplified99.7%
div-inv99.7%
*-commutative99.7%
sqr-pow99.7%
times-frac97.8%
metadata-eval97.8%
inv-pow97.8%
*-commutative97.8%
+-commutative97.8%
metadata-eval97.8%
inv-pow97.8%
*-commutative97.8%
Applied egg-rr97.8%
Taylor expanded in v around inf 97.6%
associate-/l*95.1%
Simplified95.1%
if -1.25e7 < v < 0.149999999999999994Initial program 85.6%
Simplified97.3%
Taylor expanded in v around 0 75.7%
*-commutative75.7%
*-commutative75.7%
unpow275.7%
unpow275.7%
swap-sqr98.6%
unpow298.6%
*-commutative98.6%
Simplified98.6%
*-commutative98.6%
unpow298.6%
Applied egg-rr98.6%
Final simplification96.6%
(FPCore (v w r)
:precision binary64
(let* ((t_0 (/ (/ 2.0 r) r)))
(if (or (<= v -12500000.0) (not (<= v 0.15)))
(+
t_0
(+ -1.5 (* (/ (* r w) v) (/ (- (* v -0.25) -0.375) (/ 1.0 (* r w))))))
(+ t_0 (- -1.5 (* 0.375 (* (* r w) (* r w))))))))
double code(double v, double w, double r) {
double t_0 = (2.0 / r) / r;
double tmp;
if ((v <= -12500000.0) || !(v <= 0.15)) {
tmp = t_0 + (-1.5 + (((r * w) / v) * (((v * -0.25) - -0.375) / (1.0 / (r * w)))));
} else {
tmp = t_0 + (-1.5 - (0.375 * ((r * w) * (r * w))));
}
return tmp;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
real(8) :: t_0
real(8) :: tmp
t_0 = (2.0d0 / r) / r
if ((v <= (-12500000.0d0)) .or. (.not. (v <= 0.15d0))) then
tmp = t_0 + ((-1.5d0) + (((r * w) / v) * (((v * (-0.25d0)) - (-0.375d0)) / (1.0d0 / (r * w)))))
else
tmp = t_0 + ((-1.5d0) - (0.375d0 * ((r * w) * (r * w))))
end if
code = tmp
end function
public static double code(double v, double w, double r) {
double t_0 = (2.0 / r) / r;
double tmp;
if ((v <= -12500000.0) || !(v <= 0.15)) {
tmp = t_0 + (-1.5 + (((r * w) / v) * (((v * -0.25) - -0.375) / (1.0 / (r * w)))));
} else {
tmp = t_0 + (-1.5 - (0.375 * ((r * w) * (r * w))));
}
return tmp;
}
def code(v, w, r): t_0 = (2.0 / r) / r tmp = 0 if (v <= -12500000.0) or not (v <= 0.15): tmp = t_0 + (-1.5 + (((r * w) / v) * (((v * -0.25) - -0.375) / (1.0 / (r * w))))) else: tmp = t_0 + (-1.5 - (0.375 * ((r * w) * (r * w)))) return tmp
function code(v, w, r) t_0 = Float64(Float64(2.0 / r) / r) tmp = 0.0 if ((v <= -12500000.0) || !(v <= 0.15)) tmp = Float64(t_0 + Float64(-1.5 + Float64(Float64(Float64(r * w) / v) * Float64(Float64(Float64(v * -0.25) - -0.375) / Float64(1.0 / Float64(r * w)))))); else tmp = Float64(t_0 + Float64(-1.5 - Float64(0.375 * Float64(Float64(r * w) * Float64(r * w))))); end return tmp end
function tmp_2 = code(v, w, r) t_0 = (2.0 / r) / r; tmp = 0.0; if ((v <= -12500000.0) || ~((v <= 0.15))) tmp = t_0 + (-1.5 + (((r * w) / v) * (((v * -0.25) - -0.375) / (1.0 / (r * w))))); else tmp = t_0 + (-1.5 - (0.375 * ((r * w) * (r * w)))); end tmp_2 = tmp; end
code[v_, w_, r_] := Block[{t$95$0 = N[(N[(2.0 / r), $MachinePrecision] / r), $MachinePrecision]}, If[Or[LessEqual[v, -12500000.0], N[Not[LessEqual[v, 0.15]], $MachinePrecision]], N[(t$95$0 + N[(-1.5 + N[(N[(N[(r * w), $MachinePrecision] / v), $MachinePrecision] * N[(N[(N[(v * -0.25), $MachinePrecision] - -0.375), $MachinePrecision] / N[(1.0 / N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(t$95$0 + N[(-1.5 - N[(0.375 * N[(N[(r * w), $MachinePrecision] * N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\frac{2}{r}}{r}\\
\mathbf{if}\;v \leq -12500000 \lor \neg \left(v \leq 0.15\right):\\
\;\;\;\;t_0 + \left(-1.5 + \frac{r \cdot w}{v} \cdot \frac{v \cdot -0.25 - -0.375}{\frac{1}{r \cdot w}}\right)\\
\mathbf{else}:\\
\;\;\;\;t_0 + \left(-1.5 - 0.375 \cdot \left(\left(r \cdot w\right) \cdot \left(r \cdot w\right)\right)\right)\\
\end{array}
\end{array}
if v < -1.25e7 or 0.149999999999999994 < v Initial program 83.2%
Simplified96.6%
*-un-lft-identity96.6%
div-inv96.6%
times-frac89.0%
associate-*r*90.2%
pow290.2%
*-commutative90.2%
Applied egg-rr90.2%
frac-2neg90.2%
metadata-eval90.2%
clear-num90.2%
frac-times91.4%
metadata-eval91.4%
pow-flip91.4%
metadata-eval91.4%
Applied egg-rr91.4%
associate-*r/99.7%
distribute-lft-neg-in99.7%
associate-/l*99.7%
neg-mul-199.7%
distribute-lft-neg-in99.7%
associate-/r*99.7%
neg-sub099.7%
fma-udef99.7%
*-commutative99.7%
+-commutative99.7%
associate--r+99.7%
metadata-eval99.7%
*-commutative99.7%
neg-sub099.7%
associate--r-99.7%
metadata-eval99.7%
*-commutative99.7%
Simplified99.7%
div-inv99.7%
*-commutative99.7%
sqr-pow99.7%
times-frac97.8%
metadata-eval97.8%
inv-pow97.8%
*-commutative97.8%
+-commutative97.8%
metadata-eval97.8%
inv-pow97.8%
*-commutative97.8%
Applied egg-rr97.8%
Taylor expanded in v around inf 97.6%
if -1.25e7 < v < 0.149999999999999994Initial program 85.6%
Simplified97.3%
Taylor expanded in v around 0 75.7%
*-commutative75.7%
*-commutative75.7%
unpow275.7%
unpow275.7%
swap-sqr98.6%
unpow298.6%
*-commutative98.6%
Simplified98.6%
*-commutative98.6%
unpow298.6%
Applied egg-rr98.6%
Final simplification98.0%
(FPCore (v w r)
:precision binary64
(if (<= r 2e-39)
(+ (- -1.5 (* 0.375 (* (* r w) (* r w)))) (/ 1.0 (* r (/ r 2.0))))
(if (<= r 2.7e+142)
(+
-1.5
(+
(/ 2.0 (* r r))
(* (* r r) (/ (+ -0.375 (* v 0.25)) (/ (- 1.0 v) (* w w))))))
(+ (/ (/ 2.0 r) r) (- -1.5 (* 0.375 (* r (* w (* r w)))))))))
double code(double v, double w, double r) {
double tmp;
if (r <= 2e-39) {
tmp = (-1.5 - (0.375 * ((r * w) * (r * w)))) + (1.0 / (r * (r / 2.0)));
} else if (r <= 2.7e+142) {
tmp = -1.5 + ((2.0 / (r * r)) + ((r * r) * ((-0.375 + (v * 0.25)) / ((1.0 - v) / (w * w)))));
} else {
tmp = ((2.0 / r) / r) + (-1.5 - (0.375 * (r * (w * (r * w)))));
}
return tmp;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
real(8) :: tmp
if (r <= 2d-39) then
tmp = ((-1.5d0) - (0.375d0 * ((r * w) * (r * w)))) + (1.0d0 / (r * (r / 2.0d0)))
else if (r <= 2.7d+142) then
tmp = (-1.5d0) + ((2.0d0 / (r * r)) + ((r * r) * (((-0.375d0) + (v * 0.25d0)) / ((1.0d0 - v) / (w * w)))))
else
tmp = ((2.0d0 / r) / r) + ((-1.5d0) - (0.375d0 * (r * (w * (r * w)))))
end if
code = tmp
end function
public static double code(double v, double w, double r) {
double tmp;
if (r <= 2e-39) {
tmp = (-1.5 - (0.375 * ((r * w) * (r * w)))) + (1.0 / (r * (r / 2.0)));
} else if (r <= 2.7e+142) {
tmp = -1.5 + ((2.0 / (r * r)) + ((r * r) * ((-0.375 + (v * 0.25)) / ((1.0 - v) / (w * w)))));
} else {
tmp = ((2.0 / r) / r) + (-1.5 - (0.375 * (r * (w * (r * w)))));
}
return tmp;
}
def code(v, w, r): tmp = 0 if r <= 2e-39: tmp = (-1.5 - (0.375 * ((r * w) * (r * w)))) + (1.0 / (r * (r / 2.0))) elif r <= 2.7e+142: tmp = -1.5 + ((2.0 / (r * r)) + ((r * r) * ((-0.375 + (v * 0.25)) / ((1.0 - v) / (w * w))))) else: tmp = ((2.0 / r) / r) + (-1.5 - (0.375 * (r * (w * (r * w))))) return tmp
function code(v, w, r) tmp = 0.0 if (r <= 2e-39) tmp = Float64(Float64(-1.5 - Float64(0.375 * Float64(Float64(r * w) * Float64(r * w)))) + Float64(1.0 / Float64(r * Float64(r / 2.0)))); elseif (r <= 2.7e+142) tmp = Float64(-1.5 + Float64(Float64(2.0 / Float64(r * r)) + Float64(Float64(r * r) * Float64(Float64(-0.375 + Float64(v * 0.25)) / Float64(Float64(1.0 - v) / Float64(w * w)))))); else tmp = Float64(Float64(Float64(2.0 / r) / r) + Float64(-1.5 - Float64(0.375 * Float64(r * Float64(w * Float64(r * w)))))); end return tmp end
function tmp_2 = code(v, w, r) tmp = 0.0; if (r <= 2e-39) tmp = (-1.5 - (0.375 * ((r * w) * (r * w)))) + (1.0 / (r * (r / 2.0))); elseif (r <= 2.7e+142) tmp = -1.5 + ((2.0 / (r * r)) + ((r * r) * ((-0.375 + (v * 0.25)) / ((1.0 - v) / (w * w))))); else tmp = ((2.0 / r) / r) + (-1.5 - (0.375 * (r * (w * (r * w))))); end tmp_2 = tmp; end
code[v_, w_, r_] := If[LessEqual[r, 2e-39], N[(N[(-1.5 - N[(0.375 * N[(N[(r * w), $MachinePrecision] * N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(r * N[(r / 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[r, 2.7e+142], N[(-1.5 + N[(N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision] + N[(N[(r * r), $MachinePrecision] * N[(N[(-0.375 + N[(v * 0.25), $MachinePrecision]), $MachinePrecision] / N[(N[(1.0 - v), $MachinePrecision] / N[(w * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(2.0 / r), $MachinePrecision] / r), $MachinePrecision] + N[(-1.5 - N[(0.375 * N[(r * N[(w * N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;r \leq 2 \cdot 10^{-39}:\\
\;\;\;\;\left(-1.5 - 0.375 \cdot \left(\left(r \cdot w\right) \cdot \left(r \cdot w\right)\right)\right) + \frac{1}{r \cdot \frac{r}{2}}\\
\mathbf{elif}\;r \leq 2.7 \cdot 10^{+142}:\\
\;\;\;\;-1.5 + \left(\frac{2}{r \cdot r} + \left(r \cdot r\right) \cdot \frac{-0.375 + v \cdot 0.25}{\frac{1 - v}{w \cdot w}}\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{2}{r}}{r} + \left(-1.5 - 0.375 \cdot \left(r \cdot \left(w \cdot \left(r \cdot w\right)\right)\right)\right)\\
\end{array}
\end{array}
if r < 1.99999999999999986e-39Initial program 82.4%
Simplified95.6%
Taylor expanded in v around 0 75.5%
*-commutative75.5%
*-commutative75.5%
unpow275.5%
unpow275.5%
swap-sqr92.1%
unpow292.1%
*-commutative92.1%
Simplified92.1%
*-commutative92.1%
unpow292.1%
Applied egg-rr92.1%
clear-num92.0%
inv-pow92.0%
Applied egg-rr92.0%
unpow-192.0%
associate-/r/92.1%
Simplified92.1%
if 1.99999999999999986e-39 < r < 2.69999999999999983e142Initial program 92.6%
Simplified99.7%
if 2.69999999999999983e142 < r Initial program 84.5%
Simplified99.9%
Taylor expanded in v around 0 60.6%
*-commutative60.6%
*-commutative60.6%
unpow260.6%
unpow260.6%
swap-sqr84.1%
unpow284.1%
*-commutative84.1%
Simplified84.1%
*-commutative84.1%
metadata-eval84.1%
metadata-eval84.1%
pow-flip84.2%
metadata-eval84.2%
Applied egg-rr84.2%
pow-flip84.1%
metadata-eval84.1%
pow284.1%
associate-*r*84.2%
*-commutative84.2%
Applied egg-rr84.2%
Final simplification91.9%
(FPCore (v w r) :precision binary64 (+ (/ (/ 2.0 r) r) (- -1.5 (* (* r w) (/ (/ (- -0.375 (* v -0.25)) (+ v -1.0)) (/ 1.0 (* r w)))))))
double code(double v, double w, double r) {
return ((2.0 / r) / r) + (-1.5 - ((r * w) * (((-0.375 - (v * -0.25)) / (v + -1.0)) / (1.0 / (r * w)))));
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((2.0d0 / r) / r) + ((-1.5d0) - ((r * w) * ((((-0.375d0) - (v * (-0.25d0))) / (v + (-1.0d0))) / (1.0d0 / (r * w)))))
end function
public static double code(double v, double w, double r) {
return ((2.0 / r) / r) + (-1.5 - ((r * w) * (((-0.375 - (v * -0.25)) / (v + -1.0)) / (1.0 / (r * w)))));
}
def code(v, w, r): return ((2.0 / r) / r) + (-1.5 - ((r * w) * (((-0.375 - (v * -0.25)) / (v + -1.0)) / (1.0 / (r * w)))))
function code(v, w, r) return Float64(Float64(Float64(2.0 / r) / r) + Float64(-1.5 - Float64(Float64(r * w) * Float64(Float64(Float64(-0.375 - Float64(v * -0.25)) / Float64(v + -1.0)) / Float64(1.0 / Float64(r * w)))))) end
function tmp = code(v, w, r) tmp = ((2.0 / r) / r) + (-1.5 - ((r * w) * (((-0.375 - (v * -0.25)) / (v + -1.0)) / (1.0 / (r * w))))); end
code[v_, w_, r_] := N[(N[(N[(2.0 / r), $MachinePrecision] / r), $MachinePrecision] + N[(-1.5 - N[(N[(r * w), $MachinePrecision] * N[(N[(N[(-0.375 - N[(v * -0.25), $MachinePrecision]), $MachinePrecision] / N[(v + -1.0), $MachinePrecision]), $MachinePrecision] / N[(1.0 / N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{2}{r}}{r} + \left(-1.5 - \left(r \cdot w\right) \cdot \frac{\frac{-0.375 - v \cdot -0.25}{v + -1}}{\frac{1}{r \cdot w}}\right)
\end{array}
Initial program 84.2%
Simplified96.9%
*-un-lft-identity96.9%
div-inv96.9%
times-frac92.5%
associate-*r*94.2%
pow294.2%
*-commutative94.2%
Applied egg-rr94.2%
frac-2neg94.2%
metadata-eval94.2%
clear-num94.2%
frac-times94.9%
metadata-eval94.9%
pow-flip94.9%
metadata-eval94.9%
Applied egg-rr94.9%
associate-*r/99.7%
distribute-lft-neg-in99.7%
associate-/l*99.7%
neg-mul-199.7%
distribute-lft-neg-in99.7%
associate-/r*99.7%
neg-sub099.7%
fma-udef99.7%
*-commutative99.7%
+-commutative99.7%
associate--r+99.7%
metadata-eval99.7%
*-commutative99.7%
neg-sub099.7%
associate--r-99.7%
metadata-eval99.7%
*-commutative99.7%
Simplified99.7%
*-un-lft-identity99.7%
*-commutative99.7%
sqr-pow99.7%
times-frac99.7%
metadata-eval99.7%
sqrt-pow175.8%
sqrt-div75.8%
pow-flip75.8%
metadata-eval75.8%
pow275.8%
sqrt-prod55.2%
add-sqr-sqrt99.7%
*-commutative99.7%
+-commutative99.7%
metadata-eval99.7%
inv-pow99.7%
*-commutative99.7%
Applied egg-rr99.7%
Final simplification99.7%
(FPCore (v w r) :precision binary64 (+ (- -1.5 (* 0.375 (* (* r w) (* r w)))) (/ 1.0 (* r (/ r 2.0)))))
double code(double v, double w, double r) {
return (-1.5 - (0.375 * ((r * w) * (r * w)))) + (1.0 / (r * (r / 2.0)));
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((-1.5d0) - (0.375d0 * ((r * w) * (r * w)))) + (1.0d0 / (r * (r / 2.0d0)))
end function
public static double code(double v, double w, double r) {
return (-1.5 - (0.375 * ((r * w) * (r * w)))) + (1.0 / (r * (r / 2.0)));
}
def code(v, w, r): return (-1.5 - (0.375 * ((r * w) * (r * w)))) + (1.0 / (r * (r / 2.0)))
function code(v, w, r) return Float64(Float64(-1.5 - Float64(0.375 * Float64(Float64(r * w) * Float64(r * w)))) + Float64(1.0 / Float64(r * Float64(r / 2.0)))) end
function tmp = code(v, w, r) tmp = (-1.5 - (0.375 * ((r * w) * (r * w)))) + (1.0 / (r * (r / 2.0))); end
code[v_, w_, r_] := N[(N[(-1.5 - N[(0.375 * N[(N[(r * w), $MachinePrecision] * N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(r * N[(r / 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-1.5 - 0.375 \cdot \left(\left(r \cdot w\right) \cdot \left(r \cdot w\right)\right)\right) + \frac{1}{r \cdot \frac{r}{2}}
\end{array}
Initial program 84.2%
Simplified96.9%
Taylor expanded in v around 0 74.2%
*-commutative74.2%
*-commutative74.2%
unpow274.2%
unpow274.2%
swap-sqr89.4%
unpow289.4%
*-commutative89.4%
Simplified89.4%
*-commutative89.4%
unpow289.4%
Applied egg-rr89.4%
clear-num89.4%
inv-pow89.4%
Applied egg-rr89.4%
unpow-189.4%
associate-/r/89.4%
Simplified89.4%
Final simplification89.4%
(FPCore (v w r) :precision binary64 (+ (/ (/ 2.0 r) r) (- -1.5 (* 0.375 (* w (* r (* r w)))))))
double code(double v, double w, double r) {
return ((2.0 / r) / r) + (-1.5 - (0.375 * (w * (r * (r * w)))));
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((2.0d0 / r) / r) + ((-1.5d0) - (0.375d0 * (w * (r * (r * w)))))
end function
public static double code(double v, double w, double r) {
return ((2.0 / r) / r) + (-1.5 - (0.375 * (w * (r * (r * w)))));
}
def code(v, w, r): return ((2.0 / r) / r) + (-1.5 - (0.375 * (w * (r * (r * w)))))
function code(v, w, r) return Float64(Float64(Float64(2.0 / r) / r) + Float64(-1.5 - Float64(0.375 * Float64(w * Float64(r * Float64(r * w)))))) end
function tmp = code(v, w, r) tmp = ((2.0 / r) / r) + (-1.5 - (0.375 * (w * (r * (r * w))))); end
code[v_, w_, r_] := N[(N[(N[(2.0 / r), $MachinePrecision] / r), $MachinePrecision] + N[(-1.5 - N[(0.375 * N[(w * N[(r * N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{2}{r}}{r} + \left(-1.5 - 0.375 \cdot \left(w \cdot \left(r \cdot \left(r \cdot w\right)\right)\right)\right)
\end{array}
Initial program 84.2%
Simplified96.9%
Taylor expanded in v around 0 74.2%
*-commutative74.2%
*-commutative74.2%
unpow274.2%
unpow274.2%
swap-sqr89.4%
unpow289.4%
*-commutative89.4%
Simplified89.4%
*-commutative89.4%
metadata-eval89.4%
metadata-eval89.4%
pow-flip89.4%
metadata-eval89.4%
Applied egg-rr89.4%
pow-flip89.4%
metadata-eval89.4%
pow289.4%
associate-*l*87.3%
*-commutative87.3%
Applied egg-rr87.3%
Final simplification87.3%
(FPCore (v w r) :precision binary64 (+ (/ (/ 2.0 r) r) (- -1.5 (* 0.375 (* (* r w) (* r w))))))
double code(double v, double w, double r) {
return ((2.0 / r) / r) + (-1.5 - (0.375 * ((r * w) * (r * w))));
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((2.0d0 / r) / r) + ((-1.5d0) - (0.375d0 * ((r * w) * (r * w))))
end function
public static double code(double v, double w, double r) {
return ((2.0 / r) / r) + (-1.5 - (0.375 * ((r * w) * (r * w))));
}
def code(v, w, r): return ((2.0 / r) / r) + (-1.5 - (0.375 * ((r * w) * (r * w))))
function code(v, w, r) return Float64(Float64(Float64(2.0 / r) / r) + Float64(-1.5 - Float64(0.375 * Float64(Float64(r * w) * Float64(r * w))))) end
function tmp = code(v, w, r) tmp = ((2.0 / r) / r) + (-1.5 - (0.375 * ((r * w) * (r * w)))); end
code[v_, w_, r_] := N[(N[(N[(2.0 / r), $MachinePrecision] / r), $MachinePrecision] + N[(-1.5 - N[(0.375 * N[(N[(r * w), $MachinePrecision] * N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{2}{r}}{r} + \left(-1.5 - 0.375 \cdot \left(\left(r \cdot w\right) \cdot \left(r \cdot w\right)\right)\right)
\end{array}
Initial program 84.2%
Simplified96.9%
Taylor expanded in v around 0 74.2%
*-commutative74.2%
*-commutative74.2%
unpow274.2%
unpow274.2%
swap-sqr89.4%
unpow289.4%
*-commutative89.4%
Simplified89.4%
*-commutative89.4%
unpow289.4%
Applied egg-rr89.4%
Final simplification89.4%
herbie shell --seed 2024019
(FPCore (v w r)
:name "Rosa's TurbineBenchmark"
:precision binary64
(- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))