
(FPCore (v w r) :precision binary64 (- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))
double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((3.0d0 + (2.0d0 / (r * r))) - (((0.125d0 * (3.0d0 - (2.0d0 * v))) * (((w * w) * r) * r)) / (1.0d0 - v))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
def code(v, w, r): return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5
function code(v, w, r) return Float64(Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - Float64(Float64(Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) * Float64(Float64(Float64(w * w) * r) * r)) / Float64(1.0 - v))) - 4.5) end
function tmp = code(v, w, r) tmp = ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5; end
code[v_, w_, r_] := N[(N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(w * w), $MachinePrecision] * r), $MachinePrecision] * r), $MachinePrecision]), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(3 + \frac{2}{r \cdot r}\right) - \frac{\left(0.125 \cdot \left(3 - 2 \cdot v\right)\right) \cdot \left(\left(\left(w \cdot w\right) \cdot r\right) \cdot r\right)}{1 - v}\right) - 4.5
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (v w r) :precision binary64 (- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))
double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((3.0d0 + (2.0d0 / (r * r))) - (((0.125d0 * (3.0d0 - (2.0d0 * v))) * (((w * w) * r) * r)) / (1.0d0 - v))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
def code(v, w, r): return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5
function code(v, w, r) return Float64(Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - Float64(Float64(Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) * Float64(Float64(Float64(w * w) * r) * r)) / Float64(1.0 - v))) - 4.5) end
function tmp = code(v, w, r) tmp = ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5; end
code[v_, w_, r_] := N[(N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(w * w), $MachinePrecision] * r), $MachinePrecision] * r), $MachinePrecision]), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(3 + \frac{2}{r \cdot r}\right) - \frac{\left(0.125 \cdot \left(3 - 2 \cdot v\right)\right) \cdot \left(\left(\left(w \cdot w\right) \cdot r\right) \cdot r\right)}{1 - v}\right) - 4.5
\end{array}
(FPCore (v w r) :precision binary64 (- (+ 3.0 (/ 2.0 (* r r))) (+ (* (* 0.125 (+ 3.0 (* -2.0 v))) (* (* r w) (/ (* r w) (- 1.0 v)))) 4.5)))
double code(double v, double w, double r) {
return (3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 + (-2.0 * v))) * ((r * w) * ((r * w) / (1.0 - v)))) + 4.5);
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = (3.0d0 + (2.0d0 / (r * r))) - (((0.125d0 * (3.0d0 + ((-2.0d0) * v))) * ((r * w) * ((r * w) / (1.0d0 - v)))) + 4.5d0)
end function
public static double code(double v, double w, double r) {
return (3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 + (-2.0 * v))) * ((r * w) * ((r * w) / (1.0 - v)))) + 4.5);
}
def code(v, w, r): return (3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 + (-2.0 * v))) * ((r * w) * ((r * w) / (1.0 - v)))) + 4.5)
function code(v, w, r) return Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - Float64(Float64(Float64(0.125 * Float64(3.0 + Float64(-2.0 * v))) * Float64(Float64(r * w) * Float64(Float64(r * w) / Float64(1.0 - v)))) + 4.5)) end
function tmp = code(v, w, r) tmp = (3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 + (-2.0 * v))) * ((r * w) * ((r * w) / (1.0 - v)))) + 4.5); end
code[v_, w_, r_] := N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(0.125 * N[(3.0 + N[(-2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(r * w), $MachinePrecision] * N[(N[(r * w), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 4.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(3 + \frac{2}{r \cdot r}\right) - \left(\left(0.125 \cdot \left(3 + -2 \cdot v\right)\right) \cdot \left(\left(r \cdot w\right) \cdot \frac{r \cdot w}{1 - v}\right) + 4.5\right)
\end{array}
Initial program 85.8%
associate--l-85.8%
associate-*l*79.7%
sqr-neg79.7%
associate-*l*85.8%
associate-/l*88.3%
fma-define88.4%
Simplified88.3%
add-sqr-sqrt88.3%
*-un-lft-identity88.3%
times-frac88.3%
*-commutative88.3%
sqrt-prod46.2%
*-commutative46.2%
sqrt-prod46.2%
sqrt-prod23.9%
add-sqr-sqrt38.4%
associate-*r*38.4%
add-sqr-sqrt69.9%
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (v w r) :precision binary64 (+ (/ 2.0 (* r r)) (+ -1.5 (/ (+ 0.375 (* v -0.25)) (/ (+ v -1.0) (* (* r w) (* r w)))))))
double code(double v, double w, double r) {
return (2.0 / (r * r)) + (-1.5 + ((0.375 + (v * -0.25)) / ((v + -1.0) / ((r * w) * (r * w)))));
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = (2.0d0 / (r * r)) + ((-1.5d0) + ((0.375d0 + (v * (-0.25d0))) / ((v + (-1.0d0)) / ((r * w) * (r * w)))))
end function
public static double code(double v, double w, double r) {
return (2.0 / (r * r)) + (-1.5 + ((0.375 + (v * -0.25)) / ((v + -1.0) / ((r * w) * (r * w)))));
}
def code(v, w, r): return (2.0 / (r * r)) + (-1.5 + ((0.375 + (v * -0.25)) / ((v + -1.0) / ((r * w) * (r * w)))))
function code(v, w, r) return Float64(Float64(2.0 / Float64(r * r)) + Float64(-1.5 + Float64(Float64(0.375 + Float64(v * -0.25)) / Float64(Float64(v + -1.0) / Float64(Float64(r * w) * Float64(r * w)))))) end
function tmp = code(v, w, r) tmp = (2.0 / (r * r)) + (-1.5 + ((0.375 + (v * -0.25)) / ((v + -1.0) / ((r * w) * (r * w))))); end
code[v_, w_, r_] := N[(N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision] + N[(-1.5 + N[(N[(0.375 + N[(v * -0.25), $MachinePrecision]), $MachinePrecision] / N[(N[(v + -1.0), $MachinePrecision] / N[(N[(r * w), $MachinePrecision] * N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{r \cdot r} + \left(-1.5 + \frac{0.375 + v \cdot -0.25}{\frac{v + -1}{\left(r \cdot w\right) \cdot \left(r \cdot w\right)}}\right)
\end{array}
Initial program 85.8%
Simplified87.8%
fma-undefine87.8%
*-commutative87.8%
+-commutative87.8%
metadata-eval87.8%
cancel-sign-sub-inv87.8%
associate-*r/88.3%
*-commutative88.3%
associate-/l*88.3%
clear-num88.3%
un-div-inv88.3%
cancel-sign-sub-inv88.3%
metadata-eval88.3%
distribute-rgt-in88.3%
metadata-eval88.3%
*-commutative88.3%
associate-*l*88.3%
metadata-eval88.3%
*-commutative88.3%
Applied egg-rr99.7%
unpow299.7%
Applied egg-rr99.7%
Final simplification99.7%
(FPCore (v w r) :precision binary64 (+ (/ 2.0 (* r r)) (+ -1.5 (/ (+ 0.375 (* v -0.25)) (/ (/ (+ v -1.0) (* r w)) (* r w))))))
double code(double v, double w, double r) {
return (2.0 / (r * r)) + (-1.5 + ((0.375 + (v * -0.25)) / (((v + -1.0) / (r * w)) / (r * w))));
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = (2.0d0 / (r * r)) + ((-1.5d0) + ((0.375d0 + (v * (-0.25d0))) / (((v + (-1.0d0)) / (r * w)) / (r * w))))
end function
public static double code(double v, double w, double r) {
return (2.0 / (r * r)) + (-1.5 + ((0.375 + (v * -0.25)) / (((v + -1.0) / (r * w)) / (r * w))));
}
def code(v, w, r): return (2.0 / (r * r)) + (-1.5 + ((0.375 + (v * -0.25)) / (((v + -1.0) / (r * w)) / (r * w))))
function code(v, w, r) return Float64(Float64(2.0 / Float64(r * r)) + Float64(-1.5 + Float64(Float64(0.375 + Float64(v * -0.25)) / Float64(Float64(Float64(v + -1.0) / Float64(r * w)) / Float64(r * w))))) end
function tmp = code(v, w, r) tmp = (2.0 / (r * r)) + (-1.5 + ((0.375 + (v * -0.25)) / (((v + -1.0) / (r * w)) / (r * w)))); end
code[v_, w_, r_] := N[(N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision] + N[(-1.5 + N[(N[(0.375 + N[(v * -0.25), $MachinePrecision]), $MachinePrecision] / N[(N[(N[(v + -1.0), $MachinePrecision] / N[(r * w), $MachinePrecision]), $MachinePrecision] / N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{r \cdot r} + \left(-1.5 + \frac{0.375 + v \cdot -0.25}{\frac{\frac{v + -1}{r \cdot w}}{r \cdot w}}\right)
\end{array}
Initial program 85.8%
Simplified87.8%
fma-undefine87.8%
*-commutative87.8%
+-commutative87.8%
metadata-eval87.8%
cancel-sign-sub-inv87.8%
associate-*r/88.3%
*-commutative88.3%
associate-/l*88.3%
clear-num88.3%
un-div-inv88.3%
cancel-sign-sub-inv88.3%
metadata-eval88.3%
distribute-rgt-in88.3%
metadata-eval88.3%
*-commutative88.3%
associate-*l*88.3%
metadata-eval88.3%
*-commutative88.3%
Applied egg-rr99.7%
unpow299.7%
Applied egg-rr99.7%
associate-/r*99.8%
*-un-lft-identity99.8%
*-commutative99.8%
times-frac96.8%
associate-/r*96.4%
Applied egg-rr96.4%
frac-times98.4%
*-un-lft-identity98.4%
associate-/l/99.8%
*-commutative99.8%
*-commutative99.8%
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (v w r) :precision binary64 (- (- (+ 3.0 (/ 2.0 (* r r))) (* (* r w) (* (* r w) 0.375))) 4.5))
double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - ((r * w) * ((r * w) * 0.375))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((3.0d0 + (2.0d0 / (r * r))) - ((r * w) * ((r * w) * 0.375d0))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - ((r * w) * ((r * w) * 0.375))) - 4.5;
}
def code(v, w, r): return ((3.0 + (2.0 / (r * r))) - ((r * w) * ((r * w) * 0.375))) - 4.5
function code(v, w, r) return Float64(Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - Float64(Float64(r * w) * Float64(Float64(r * w) * 0.375))) - 4.5) end
function tmp = code(v, w, r) tmp = ((3.0 + (2.0 / (r * r))) - ((r * w) * ((r * w) * 0.375))) - 4.5; end
code[v_, w_, r_] := N[(N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(r * w), $MachinePrecision] * N[(N[(r * w), $MachinePrecision] * 0.375), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(3 + \frac{2}{r \cdot r}\right) - \left(r \cdot w\right) \cdot \left(\left(r \cdot w\right) \cdot 0.375\right)\right) - 4.5
\end{array}
Initial program 85.8%
associate-/l*88.3%
cancel-sign-sub-inv88.3%
metadata-eval88.3%
+-commutative88.3%
*-commutative88.3%
fma-undefine88.3%
*-commutative88.3%
*-commutative88.3%
associate-/l*88.3%
*-commutative88.3%
associate-*r/87.8%
associate-*r*83.9%
associate-*l*91.3%
associate-*r*93.3%
Applied egg-rr93.3%
Taylor expanded in v around 0 81.3%
Taylor expanded in v around 0 94.6%
Final simplification94.6%
(FPCore (v w r) :precision binary64 (- (+ 3.0 (/ 2.0 (* r r))) 4.5))
double code(double v, double w, double r) {
return (3.0 + (2.0 / (r * r))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = (3.0d0 + (2.0d0 / (r * r))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return (3.0 + (2.0 / (r * r))) - 4.5;
}
def code(v, w, r): return (3.0 + (2.0 / (r * r))) - 4.5
function code(v, w, r) return Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - 4.5) end
function tmp = code(v, w, r) tmp = (3.0 + (2.0 / (r * r))) - 4.5; end
code[v_, w_, r_] := N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(3 + \frac{2}{r \cdot r}\right) - 4.5
\end{array}
Initial program 85.8%
Simplified81.0%
Taylor expanded in r around 0 55.4%
Final simplification55.4%
(FPCore (v w r) :precision binary64 -1.5)
double code(double v, double w, double r) {
return -1.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = -1.5d0
end function
public static double code(double v, double w, double r) {
return -1.5;
}
def code(v, w, r): return -1.5
function code(v, w, r) return -1.5 end
function tmp = code(v, w, r) tmp = -1.5; end
code[v_, w_, r_] := -1.5
\begin{array}{l}
\\
-1.5
\end{array}
Initial program 85.8%
Simplified81.0%
Taylor expanded in r around 0 55.4%
Taylor expanded in r around inf 12.9%
Final simplification12.9%
herbie shell --seed 2024058
(FPCore (v w r)
:name "Rosa's TurbineBenchmark"
:precision binary64
(- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))