
(FPCore (v w r) :precision binary64 (- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))
double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((3.0d0 + (2.0d0 / (r * r))) - (((0.125d0 * (3.0d0 - (2.0d0 * v))) * (((w * w) * r) * r)) / (1.0d0 - v))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
def code(v, w, r): return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5
function code(v, w, r) return Float64(Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - Float64(Float64(Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) * Float64(Float64(Float64(w * w) * r) * r)) / Float64(1.0 - v))) - 4.5) end
function tmp = code(v, w, r) tmp = ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5; end
code[v_, w_, r_] := N[(N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(w * w), $MachinePrecision] * r), $MachinePrecision] * r), $MachinePrecision]), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(3 + \frac{2}{r \cdot r}\right) - \frac{\left(0.125 \cdot \left(3 - 2 \cdot v\right)\right) \cdot \left(\left(\left(w \cdot w\right) \cdot r\right) \cdot r\right)}{1 - v}\right) - 4.5
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (v w r) :precision binary64 (- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))
double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((3.0d0 + (2.0d0 / (r * r))) - (((0.125d0 * (3.0d0 - (2.0d0 * v))) * (((w * w) * r) * r)) / (1.0d0 - v))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
def code(v, w, r): return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5
function code(v, w, r) return Float64(Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - Float64(Float64(Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) * Float64(Float64(Float64(w * w) * r) * r)) / Float64(1.0 - v))) - 4.5) end
function tmp = code(v, w, r) tmp = ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5; end
code[v_, w_, r_] := N[(N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(w * w), $MachinePrecision] * r), $MachinePrecision] * r), $MachinePrecision]), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(3 + \frac{2}{r \cdot r}\right) - \frac{\left(0.125 \cdot \left(3 - 2 \cdot v\right)\right) \cdot \left(\left(\left(w \cdot w\right) \cdot r\right) \cdot r\right)}{1 - v}\right) - 4.5
\end{array}
(FPCore (v w r) :precision binary64 (+ (/ 2.0 (* r r)) (+ -1.5 (/ (+ 0.375 (* -0.25 v)) (/ (+ v -1.0) (* (* r w) (* r w)))))))
double code(double v, double w, double r) {
return (2.0 / (r * r)) + (-1.5 + ((0.375 + (-0.25 * v)) / ((v + -1.0) / ((r * w) * (r * w)))));
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = (2.0d0 / (r * r)) + ((-1.5d0) + ((0.375d0 + ((-0.25d0) * v)) / ((v + (-1.0d0)) / ((r * w) * (r * w)))))
end function
public static double code(double v, double w, double r) {
return (2.0 / (r * r)) + (-1.5 + ((0.375 + (-0.25 * v)) / ((v + -1.0) / ((r * w) * (r * w)))));
}
def code(v, w, r): return (2.0 / (r * r)) + (-1.5 + ((0.375 + (-0.25 * v)) / ((v + -1.0) / ((r * w) * (r * w)))))
function code(v, w, r) return Float64(Float64(2.0 / Float64(r * r)) + Float64(-1.5 + Float64(Float64(0.375 + Float64(-0.25 * v)) / Float64(Float64(v + -1.0) / Float64(Float64(r * w) * Float64(r * w)))))) end
function tmp = code(v, w, r) tmp = (2.0 / (r * r)) + (-1.5 + ((0.375 + (-0.25 * v)) / ((v + -1.0) / ((r * w) * (r * w))))); end
code[v_, w_, r_] := N[(N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision] + N[(-1.5 + N[(N[(0.375 + N[(-0.25 * v), $MachinePrecision]), $MachinePrecision] / N[(N[(v + -1.0), $MachinePrecision] / N[(N[(r * w), $MachinePrecision] * N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{r \cdot r} + \left(-1.5 + \frac{0.375 + -0.25 \cdot v}{\frac{v + -1}{\left(r \cdot w\right) \cdot \left(r \cdot w\right)}}\right)
\end{array}
Initial program 84.0%
Simplified86.2%
associate-*l*86.2%
fma-undefine86.2%
*-commutative86.2%
+-commutative86.2%
metadata-eval86.2%
cancel-sign-sub-inv86.2%
associate-*r/86.6%
*-commutative86.6%
associate-/l*86.6%
associate-*l*86.6%
clear-num86.6%
un-div-inv86.7%
Applied egg-rr99.8%
unpow299.8%
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (v w r)
:precision binary64
(let* ((t_0 (/ 2.0 (* r r))))
(if (or (<= v -1e+34) (not (<= v 1.0)))
(+ t_0 (+ -1.5 (/ (+ 0.375 (* -0.25 v)) (/ v (* (* r w) (* r w))))))
(- (- (+ t_0 3.0) (* (* r w) (* r (* 0.375 w)))) 4.5))))
double code(double v, double w, double r) {
double t_0 = 2.0 / (r * r);
double tmp;
if ((v <= -1e+34) || !(v <= 1.0)) {
tmp = t_0 + (-1.5 + ((0.375 + (-0.25 * v)) / (v / ((r * w) * (r * w)))));
} else {
tmp = ((t_0 + 3.0) - ((r * w) * (r * (0.375 * w)))) - 4.5;
}
return tmp;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
real(8) :: t_0
real(8) :: tmp
t_0 = 2.0d0 / (r * r)
if ((v <= (-1d+34)) .or. (.not. (v <= 1.0d0))) then
tmp = t_0 + ((-1.5d0) + ((0.375d0 + ((-0.25d0) * v)) / (v / ((r * w) * (r * w)))))
else
tmp = ((t_0 + 3.0d0) - ((r * w) * (r * (0.375d0 * w)))) - 4.5d0
end if
code = tmp
end function
public static double code(double v, double w, double r) {
double t_0 = 2.0 / (r * r);
double tmp;
if ((v <= -1e+34) || !(v <= 1.0)) {
tmp = t_0 + (-1.5 + ((0.375 + (-0.25 * v)) / (v / ((r * w) * (r * w)))));
} else {
tmp = ((t_0 + 3.0) - ((r * w) * (r * (0.375 * w)))) - 4.5;
}
return tmp;
}
def code(v, w, r): t_0 = 2.0 / (r * r) tmp = 0 if (v <= -1e+34) or not (v <= 1.0): tmp = t_0 + (-1.5 + ((0.375 + (-0.25 * v)) / (v / ((r * w) * (r * w))))) else: tmp = ((t_0 + 3.0) - ((r * w) * (r * (0.375 * w)))) - 4.5 return tmp
function code(v, w, r) t_0 = Float64(2.0 / Float64(r * r)) tmp = 0.0 if ((v <= -1e+34) || !(v <= 1.0)) tmp = Float64(t_0 + Float64(-1.5 + Float64(Float64(0.375 + Float64(-0.25 * v)) / Float64(v / Float64(Float64(r * w) * Float64(r * w)))))); else tmp = Float64(Float64(Float64(t_0 + 3.0) - Float64(Float64(r * w) * Float64(r * Float64(0.375 * w)))) - 4.5); end return tmp end
function tmp_2 = code(v, w, r) t_0 = 2.0 / (r * r); tmp = 0.0; if ((v <= -1e+34) || ~((v <= 1.0))) tmp = t_0 + (-1.5 + ((0.375 + (-0.25 * v)) / (v / ((r * w) * (r * w))))); else tmp = ((t_0 + 3.0) - ((r * w) * (r * (0.375 * w)))) - 4.5; end tmp_2 = tmp; end
code[v_, w_, r_] := Block[{t$95$0 = N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]}, If[Or[LessEqual[v, -1e+34], N[Not[LessEqual[v, 1.0]], $MachinePrecision]], N[(t$95$0 + N[(-1.5 + N[(N[(0.375 + N[(-0.25 * v), $MachinePrecision]), $MachinePrecision] / N[(v / N[(N[(r * w), $MachinePrecision] * N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(t$95$0 + 3.0), $MachinePrecision] - N[(N[(r * w), $MachinePrecision] * N[(r * N[(0.375 * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{2}{r \cdot r}\\
\mathbf{if}\;v \leq -1 \cdot 10^{+34} \lor \neg \left(v \leq 1\right):\\
\;\;\;\;t\_0 + \left(-1.5 + \frac{0.375 + -0.25 \cdot v}{\frac{v}{\left(r \cdot w\right) \cdot \left(r \cdot w\right)}}\right)\\
\mathbf{else}:\\
\;\;\;\;\left(\left(t\_0 + 3\right) - \left(r \cdot w\right) \cdot \left(r \cdot \left(0.375 \cdot w\right)\right)\right) - 4.5\\
\end{array}
\end{array}
if v < -9.99999999999999946e33 or 1 < v Initial program 77.1%
Simplified81.9%
associate-*l*81.9%
fma-undefine81.9%
*-commutative81.9%
+-commutative81.9%
metadata-eval81.9%
cancel-sign-sub-inv81.9%
associate-*r/82.7%
*-commutative82.7%
associate-/l*82.7%
associate-*l*82.7%
clear-num82.7%
un-div-inv82.7%
Applied egg-rr99.8%
unpow299.8%
Applied egg-rr99.8%
Taylor expanded in v around inf 99.2%
neg-mul-199.2%
Simplified99.2%
if -9.99999999999999946e33 < v < 1Initial program 90.0%
associate-/l*90.0%
cancel-sign-sub-inv90.0%
metadata-eval90.0%
+-commutative90.0%
*-commutative90.0%
fma-undefine90.0%
*-commutative90.0%
*-commutative90.0%
associate-/l*90.0%
*-commutative90.0%
associate-*r/90.0%
associate-*r*90.0%
associate-*l*98.4%
associate-*r*99.8%
Applied egg-rr99.8%
Taylor expanded in v around 0 99.4%
*-commutative99.4%
*-commutative99.4%
*-commutative99.4%
associate-*l*99.5%
Simplified99.5%
Taylor expanded in v around 0 99.5%
Final simplification99.4%
(FPCore (v w r) :precision binary64 (- (- (+ (/ 2.0 (* r r)) 3.0) (* (* r w) (* r (* 0.375 w)))) 4.5))
double code(double v, double w, double r) {
return (((2.0 / (r * r)) + 3.0) - ((r * w) * (r * (0.375 * w)))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = (((2.0d0 / (r * r)) + 3.0d0) - ((r * w) * (r * (0.375d0 * w)))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return (((2.0 / (r * r)) + 3.0) - ((r * w) * (r * (0.375 * w)))) - 4.5;
}
def code(v, w, r): return (((2.0 / (r * r)) + 3.0) - ((r * w) * (r * (0.375 * w)))) - 4.5
function code(v, w, r) return Float64(Float64(Float64(Float64(2.0 / Float64(r * r)) + 3.0) - Float64(Float64(r * w) * Float64(r * Float64(0.375 * w)))) - 4.5) end
function tmp = code(v, w, r) tmp = (((2.0 / (r * r)) + 3.0) - ((r * w) * (r * (0.375 * w)))) - 4.5; end
code[v_, w_, r_] := N[(N[(N[(N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision] + 3.0), $MachinePrecision] - N[(N[(r * w), $MachinePrecision] * N[(r * N[(0.375 * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(\frac{2}{r \cdot r} + 3\right) - \left(r \cdot w\right) \cdot \left(r \cdot \left(0.375 \cdot w\right)\right)\right) - 4.5
\end{array}
Initial program 84.0%
associate-/l*86.6%
cancel-sign-sub-inv86.6%
metadata-eval86.6%
+-commutative86.6%
*-commutative86.6%
fma-undefine86.6%
*-commutative86.6%
*-commutative86.6%
associate-/l*86.6%
*-commutative86.6%
associate-*r/86.3%
associate-*r*83.8%
associate-*l*93.8%
associate-*r*93.1%
Applied egg-rr93.1%
Taylor expanded in v around 0 84.8%
*-commutative84.8%
*-commutative84.8%
*-commutative84.8%
associate-*l*84.9%
Simplified84.9%
Taylor expanded in v around 0 94.1%
Final simplification94.1%
(FPCore (v w r) :precision binary64 (if (<= r 9.5) (- (+ (/ 2.0 (* r r)) 3.0) 4.5) (- (- 3.0 (* (* r w) (* r (* 0.375 w)))) 4.5)))
double code(double v, double w, double r) {
double tmp;
if (r <= 9.5) {
tmp = ((2.0 / (r * r)) + 3.0) - 4.5;
} else {
tmp = (3.0 - ((r * w) * (r * (0.375 * w)))) - 4.5;
}
return tmp;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
real(8) :: tmp
if (r <= 9.5d0) then
tmp = ((2.0d0 / (r * r)) + 3.0d0) - 4.5d0
else
tmp = (3.0d0 - ((r * w) * (r * (0.375d0 * w)))) - 4.5d0
end if
code = tmp
end function
public static double code(double v, double w, double r) {
double tmp;
if (r <= 9.5) {
tmp = ((2.0 / (r * r)) + 3.0) - 4.5;
} else {
tmp = (3.0 - ((r * w) * (r * (0.375 * w)))) - 4.5;
}
return tmp;
}
def code(v, w, r): tmp = 0 if r <= 9.5: tmp = ((2.0 / (r * r)) + 3.0) - 4.5 else: tmp = (3.0 - ((r * w) * (r * (0.375 * w)))) - 4.5 return tmp
function code(v, w, r) tmp = 0.0 if (r <= 9.5) tmp = Float64(Float64(Float64(2.0 / Float64(r * r)) + 3.0) - 4.5); else tmp = Float64(Float64(3.0 - Float64(Float64(r * w) * Float64(r * Float64(0.375 * w)))) - 4.5); end return tmp end
function tmp_2 = code(v, w, r) tmp = 0.0; if (r <= 9.5) tmp = ((2.0 / (r * r)) + 3.0) - 4.5; else tmp = (3.0 - ((r * w) * (r * (0.375 * w)))) - 4.5; end tmp_2 = tmp; end
code[v_, w_, r_] := If[LessEqual[r, 9.5], N[(N[(N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision] + 3.0), $MachinePrecision] - 4.5), $MachinePrecision], N[(N[(3.0 - N[(N[(r * w), $MachinePrecision] * N[(r * N[(0.375 * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;r \leq 9.5:\\
\;\;\;\;\left(\frac{2}{r \cdot r} + 3\right) - 4.5\\
\mathbf{else}:\\
\;\;\;\;\left(3 - \left(r \cdot w\right) \cdot \left(r \cdot \left(0.375 \cdot w\right)\right)\right) - 4.5\\
\end{array}
\end{array}
if r < 9.5Initial program 81.3%
Simplified79.1%
Taylor expanded in r around 0 68.6%
if 9.5 < r Initial program 94.5%
associate-/l*94.5%
cancel-sign-sub-inv94.5%
metadata-eval94.5%
+-commutative94.5%
*-commutative94.5%
fma-undefine94.5%
*-commutative94.5%
*-commutative94.5%
associate-/l*94.4%
*-commutative94.4%
associate-*r/94.5%
associate-*r*90.5%
associate-*l*90.8%
associate-*r*90.7%
Applied egg-rr90.7%
Taylor expanded in r around inf 90.7%
Taylor expanded in v around 0 79.1%
Taylor expanded in v around 0 91.4%
*-commutative78.9%
*-commutative78.9%
*-commutative78.9%
associate-*l*79.0%
Simplified91.5%
Final simplification73.3%
(FPCore (v w r) :precision binary64 (- (+ (/ 2.0 (* r r)) 3.0) 4.5))
double code(double v, double w, double r) {
return ((2.0 / (r * r)) + 3.0) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((2.0d0 / (r * r)) + 3.0d0) - 4.5d0
end function
public static double code(double v, double w, double r) {
return ((2.0 / (r * r)) + 3.0) - 4.5;
}
def code(v, w, r): return ((2.0 / (r * r)) + 3.0) - 4.5
function code(v, w, r) return Float64(Float64(Float64(2.0 / Float64(r * r)) + 3.0) - 4.5) end
function tmp = code(v, w, r) tmp = ((2.0 / (r * r)) + 3.0) - 4.5; end
code[v_, w_, r_] := N[(N[(N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision] + 3.0), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{2}{r \cdot r} + 3\right) - 4.5
\end{array}
Initial program 84.0%
Simplified79.2%
Taylor expanded in r around 0 61.2%
Final simplification61.2%
(FPCore (v w r) :precision binary64 -1.5)
double code(double v, double w, double r) {
return -1.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = -1.5d0
end function
public static double code(double v, double w, double r) {
return -1.5;
}
def code(v, w, r): return -1.5
function code(v, w, r) return -1.5 end
function tmp = code(v, w, r) tmp = -1.5; end
code[v_, w_, r_] := -1.5
\begin{array}{l}
\\
-1.5
\end{array}
Initial program 84.0%
Simplified79.2%
Taylor expanded in r around 0 61.2%
Taylor expanded in r around inf 14.9%
herbie shell --seed 2024116
(FPCore (v w r)
:name "Rosa's TurbineBenchmark"
:precision binary64
(- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))