
(FPCore (v w r) :precision binary64 (- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))
double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((3.0d0 + (2.0d0 / (r * r))) - (((0.125d0 * (3.0d0 - (2.0d0 * v))) * (((w * w) * r) * r)) / (1.0d0 - v))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
def code(v, w, r): return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5
function code(v, w, r) return Float64(Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - Float64(Float64(Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) * Float64(Float64(Float64(w * w) * r) * r)) / Float64(1.0 - v))) - 4.5) end
function tmp = code(v, w, r) tmp = ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5; end
code[v_, w_, r_] := N[(N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(w * w), $MachinePrecision] * r), $MachinePrecision] * r), $MachinePrecision]), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(3 + \frac{2}{r \cdot r}\right) - \frac{\left(0.125 \cdot \left(3 - 2 \cdot v\right)\right) \cdot \left(\left(\left(w \cdot w\right) \cdot r\right) \cdot r\right)}{1 - v}\right) - 4.5
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (v w r) :precision binary64 (- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))
double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((3.0d0 + (2.0d0 / (r * r))) - (((0.125d0 * (3.0d0 - (2.0d0 * v))) * (((w * w) * r) * r)) / (1.0d0 - v))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
def code(v, w, r): return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5
function code(v, w, r) return Float64(Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - Float64(Float64(Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) * Float64(Float64(Float64(w * w) * r) * r)) / Float64(1.0 - v))) - 4.5) end
function tmp = code(v, w, r) tmp = ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5; end
code[v_, w_, r_] := N[(N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(w * w), $MachinePrecision] * r), $MachinePrecision] * r), $MachinePrecision]), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(3 + \frac{2}{r \cdot r}\right) - \frac{\left(0.125 \cdot \left(3 - 2 \cdot v\right)\right) \cdot \left(\left(\left(w \cdot w\right) \cdot r\right) \cdot r\right)}{1 - v}\right) - 4.5
\end{array}
(FPCore (v w r) :precision binary64 (+ (+ 3.0 (/ 2.0 (* r r))) (- (/ (+ 0.375 (* -0.25 v)) (/ (/ (+ v -1.0) (* r w)) (* r w))) 4.5)))
double code(double v, double w, double r) {
return (3.0 + (2.0 / (r * r))) + (((0.375 + (-0.25 * v)) / (((v + -1.0) / (r * w)) / (r * w))) - 4.5);
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = (3.0d0 + (2.0d0 / (r * r))) + (((0.375d0 + ((-0.25d0) * v)) / (((v + (-1.0d0)) / (r * w)) / (r * w))) - 4.5d0)
end function
public static double code(double v, double w, double r) {
return (3.0 + (2.0 / (r * r))) + (((0.375 + (-0.25 * v)) / (((v + -1.0) / (r * w)) / (r * w))) - 4.5);
}
def code(v, w, r): return (3.0 + (2.0 / (r * r))) + (((0.375 + (-0.25 * v)) / (((v + -1.0) / (r * w)) / (r * w))) - 4.5)
function code(v, w, r) return Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) + Float64(Float64(Float64(0.375 + Float64(-0.25 * v)) / Float64(Float64(Float64(v + -1.0) / Float64(r * w)) / Float64(r * w))) - 4.5)) end
function tmp = code(v, w, r) tmp = (3.0 + (2.0 / (r * r))) + (((0.375 + (-0.25 * v)) / (((v + -1.0) / (r * w)) / (r * w))) - 4.5); end
code[v_, w_, r_] := N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(0.375 + N[(-0.25 * v), $MachinePrecision]), $MachinePrecision] / N[(N[(N[(v + -1.0), $MachinePrecision] / N[(r * w), $MachinePrecision]), $MachinePrecision] / N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(3 + \frac{2}{r \cdot r}\right) + \left(\frac{0.375 + -0.25 \cdot v}{\frac{\frac{v + -1}{r \cdot w}}{r \cdot w}} - 4.5\right)
\end{array}
Initial program 85.5%
associate--l-85.5%
associate-*l*80.1%
sqr-neg80.1%
associate-*l*85.5%
associate-/l*88.4%
fma-define88.4%
Simplified88.4%
associate-/l*88.4%
*-commutative88.4%
associate-*r/87.6%
associate-*l*97.2%
associate-*r*98.7%
add-sqr-sqrt47.9%
associate-*l*47.9%
add-sqr-sqrt24.6%
sqrt-prod34.4%
sqrt-prod34.4%
sqrt-prod68.4%
*-commutative68.4%
sqrt-prod34.4%
*-commutative34.4%
sqrt-prod34.4%
sqrt-prod24.6%
add-sqr-sqrt47.9%
associate-*r*47.9%
add-sqr-sqrt98.7%
clear-num98.6%
un-div-inv98.7%
Applied egg-rr98.7%
clear-num98.7%
un-div-inv98.7%
Applied egg-rr98.7%
clear-num98.7%
un-div-inv98.7%
distribute-lft-in98.7%
metadata-eval98.7%
associate-*r*98.7%
metadata-eval98.7%
associate-/l/99.8%
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (v w r)
:precision binary64
(let* ((t_0 (/ 2.0 (* r r))))
(if (or (<= v -5.0) (not (<= v 1.0)))
(-
(+ 3.0 t_0)
(- 4.5 (/ (+ 0.375 (* -0.25 v)) (/ (/ v (* r w)) (* r w)))))
(+ t_0 (- -1.5 (* 0.375 (* (* r w) (* r w))))))))
double code(double v, double w, double r) {
double t_0 = 2.0 / (r * r);
double tmp;
if ((v <= -5.0) || !(v <= 1.0)) {
tmp = (3.0 + t_0) - (4.5 - ((0.375 + (-0.25 * v)) / ((v / (r * w)) / (r * w))));
} else {
tmp = t_0 + (-1.5 - (0.375 * ((r * w) * (r * w))));
}
return tmp;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
real(8) :: t_0
real(8) :: tmp
t_0 = 2.0d0 / (r * r)
if ((v <= (-5.0d0)) .or. (.not. (v <= 1.0d0))) then
tmp = (3.0d0 + t_0) - (4.5d0 - ((0.375d0 + ((-0.25d0) * v)) / ((v / (r * w)) / (r * w))))
else
tmp = t_0 + ((-1.5d0) - (0.375d0 * ((r * w) * (r * w))))
end if
code = tmp
end function
public static double code(double v, double w, double r) {
double t_0 = 2.0 / (r * r);
double tmp;
if ((v <= -5.0) || !(v <= 1.0)) {
tmp = (3.0 + t_0) - (4.5 - ((0.375 + (-0.25 * v)) / ((v / (r * w)) / (r * w))));
} else {
tmp = t_0 + (-1.5 - (0.375 * ((r * w) * (r * w))));
}
return tmp;
}
def code(v, w, r): t_0 = 2.0 / (r * r) tmp = 0 if (v <= -5.0) or not (v <= 1.0): tmp = (3.0 + t_0) - (4.5 - ((0.375 + (-0.25 * v)) / ((v / (r * w)) / (r * w)))) else: tmp = t_0 + (-1.5 - (0.375 * ((r * w) * (r * w)))) return tmp
function code(v, w, r) t_0 = Float64(2.0 / Float64(r * r)) tmp = 0.0 if ((v <= -5.0) || !(v <= 1.0)) tmp = Float64(Float64(3.0 + t_0) - Float64(4.5 - Float64(Float64(0.375 + Float64(-0.25 * v)) / Float64(Float64(v / Float64(r * w)) / Float64(r * w))))); else tmp = Float64(t_0 + Float64(-1.5 - Float64(0.375 * Float64(Float64(r * w) * Float64(r * w))))); end return tmp end
function tmp_2 = code(v, w, r) t_0 = 2.0 / (r * r); tmp = 0.0; if ((v <= -5.0) || ~((v <= 1.0))) tmp = (3.0 + t_0) - (4.5 - ((0.375 + (-0.25 * v)) / ((v / (r * w)) / (r * w)))); else tmp = t_0 + (-1.5 - (0.375 * ((r * w) * (r * w)))); end tmp_2 = tmp; end
code[v_, w_, r_] := Block[{t$95$0 = N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]}, If[Or[LessEqual[v, -5.0], N[Not[LessEqual[v, 1.0]], $MachinePrecision]], N[(N[(3.0 + t$95$0), $MachinePrecision] - N[(4.5 - N[(N[(0.375 + N[(-0.25 * v), $MachinePrecision]), $MachinePrecision] / N[(N[(v / N[(r * w), $MachinePrecision]), $MachinePrecision] / N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(t$95$0 + N[(-1.5 - N[(0.375 * N[(N[(r * w), $MachinePrecision] * N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{2}{r \cdot r}\\
\mathbf{if}\;v \leq -5 \lor \neg \left(v \leq 1\right):\\
\;\;\;\;\left(3 + t\_0\right) - \left(4.5 - \frac{0.375 + -0.25 \cdot v}{\frac{\frac{v}{r \cdot w}}{r \cdot w}}\right)\\
\mathbf{else}:\\
\;\;\;\;t\_0 + \left(-1.5 - 0.375 \cdot \left(\left(r \cdot w\right) \cdot \left(r \cdot w\right)\right)\right)\\
\end{array}
\end{array}
if v < -5 or 1 < v Initial program 83.0%
associate--l-83.0%
associate-*l*78.0%
sqr-neg78.0%
associate-*l*83.0%
associate-/l*88.1%
fma-define88.1%
Simplified88.1%
associate-/l*88.1%
*-commutative88.1%
associate-*r/86.7%
associate-*l*95.8%
associate-*r*97.7%
add-sqr-sqrt43.5%
associate-*l*43.6%
add-sqr-sqrt18.3%
sqrt-prod31.0%
sqrt-prod31.0%
sqrt-prod65.1%
*-commutative65.1%
sqrt-prod31.0%
*-commutative31.0%
sqrt-prod31.0%
sqrt-prod18.3%
add-sqr-sqrt43.6%
associate-*r*43.5%
add-sqr-sqrt97.7%
clear-num97.7%
un-div-inv97.7%
Applied egg-rr97.7%
clear-num97.7%
un-div-inv97.7%
Applied egg-rr97.7%
clear-num97.7%
un-div-inv97.7%
distribute-lft-in97.7%
metadata-eval97.7%
associate-*r*97.7%
metadata-eval97.7%
associate-/l/99.8%
Applied egg-rr99.8%
Taylor expanded in v around inf 98.7%
mul-1-neg98.7%
*-commutative98.7%
distribute-neg-frac298.7%
*-commutative98.7%
distribute-rgt-neg-in98.7%
Simplified98.7%
if -5 < v < 1Initial program 88.7%
Simplified88.7%
Taylor expanded in v around 0 82.1%
*-commutative82.1%
unpow282.1%
unpow282.1%
swap-sqr99.3%
unpow299.3%
*-commutative99.3%
Simplified99.3%
*-commutative99.3%
pow299.3%
Applied egg-rr99.3%
Final simplification99.0%
(FPCore (v w r) :precision binary64 (+ (+ 3.0 (/ 2.0 (* r r))) (- (* (+ 0.375 (* -0.25 v)) (* w (/ (* r w) (/ (+ v -1.0) r)))) 4.5)))
double code(double v, double w, double r) {
return (3.0 + (2.0 / (r * r))) + (((0.375 + (-0.25 * v)) * (w * ((r * w) / ((v + -1.0) / r)))) - 4.5);
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = (3.0d0 + (2.0d0 / (r * r))) + (((0.375d0 + ((-0.25d0) * v)) * (w * ((r * w) / ((v + (-1.0d0)) / r)))) - 4.5d0)
end function
public static double code(double v, double w, double r) {
return (3.0 + (2.0 / (r * r))) + (((0.375 + (-0.25 * v)) * (w * ((r * w) / ((v + -1.0) / r)))) - 4.5);
}
def code(v, w, r): return (3.0 + (2.0 / (r * r))) + (((0.375 + (-0.25 * v)) * (w * ((r * w) / ((v + -1.0) / r)))) - 4.5)
function code(v, w, r) return Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) + Float64(Float64(Float64(0.375 + Float64(-0.25 * v)) * Float64(w * Float64(Float64(r * w) / Float64(Float64(v + -1.0) / r)))) - 4.5)) end
function tmp = code(v, w, r) tmp = (3.0 + (2.0 / (r * r))) + (((0.375 + (-0.25 * v)) * (w * ((r * w) / ((v + -1.0) / r)))) - 4.5); end
code[v_, w_, r_] := N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(0.375 + N[(-0.25 * v), $MachinePrecision]), $MachinePrecision] * N[(w * N[(N[(r * w), $MachinePrecision] / N[(N[(v + -1.0), $MachinePrecision] / r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(3 + \frac{2}{r \cdot r}\right) + \left(\left(0.375 + -0.25 \cdot v\right) \cdot \left(w \cdot \frac{r \cdot w}{\frac{v + -1}{r}}\right) - 4.5\right)
\end{array}
Initial program 85.5%
associate--l-85.5%
associate-*l*80.1%
sqr-neg80.1%
associate-*l*85.5%
associate-/l*88.4%
fma-define88.4%
Simplified88.4%
associate-/l*88.4%
*-commutative88.4%
associate-*r/87.6%
associate-*l*97.2%
associate-*r*98.7%
add-sqr-sqrt47.9%
associate-*l*47.9%
add-sqr-sqrt24.6%
sqrt-prod34.4%
sqrt-prod34.4%
sqrt-prod68.4%
*-commutative68.4%
sqrt-prod34.4%
*-commutative34.4%
sqrt-prod34.4%
sqrt-prod24.6%
add-sqr-sqrt47.9%
associate-*r*47.9%
add-sqr-sqrt98.7%
clear-num98.6%
un-div-inv98.7%
Applied egg-rr98.7%
clear-num98.7%
un-div-inv98.7%
Applied egg-rr98.7%
*-commutative98.7%
distribute-lft-in98.7%
metadata-eval98.7%
distribute-lft-in86.9%
associate-/r/85.1%
associate-/r/85.1%
associate-*r*85.1%
metadata-eval85.1%
Applied egg-rr85.1%
distribute-lft-out97.2%
metadata-eval97.2%
associate-*r*97.2%
*-commutative97.2%
*-commutative97.2%
associate-/l*92.4%
*-commutative92.4%
associate-*l*92.4%
metadata-eval92.4%
Simplified92.4%
associate-*r/97.2%
Applied egg-rr97.2%
Final simplification97.2%
(FPCore (v w r) :precision binary64 (+ (/ 2.0 (* r r)) (- -1.5 (* 0.375 (* (* r w) (* r w))))))
double code(double v, double w, double r) {
return (2.0 / (r * r)) + (-1.5 - (0.375 * ((r * w) * (r * w))));
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = (2.0d0 / (r * r)) + ((-1.5d0) - (0.375d0 * ((r * w) * (r * w))))
end function
public static double code(double v, double w, double r) {
return (2.0 / (r * r)) + (-1.5 - (0.375 * ((r * w) * (r * w))));
}
def code(v, w, r): return (2.0 / (r * r)) + (-1.5 - (0.375 * ((r * w) * (r * w))))
function code(v, w, r) return Float64(Float64(2.0 / Float64(r * r)) + Float64(-1.5 - Float64(0.375 * Float64(Float64(r * w) * Float64(r * w))))) end
function tmp = code(v, w, r) tmp = (2.0 / (r * r)) + (-1.5 - (0.375 * ((r * w) * (r * w)))); end
code[v_, w_, r_] := N[(N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision] + N[(-1.5 - N[(0.375 * N[(N[(r * w), $MachinePrecision] * N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{r \cdot r} + \left(-1.5 - 0.375 \cdot \left(\left(r \cdot w\right) \cdot \left(r \cdot w\right)\right)\right)
\end{array}
Initial program 85.5%
Simplified87.6%
Taylor expanded in v around 0 77.4%
*-commutative77.4%
unpow277.4%
unpow277.4%
swap-sqr91.4%
unpow291.4%
*-commutative91.4%
Simplified91.4%
*-commutative91.4%
pow291.4%
Applied egg-rr91.4%
Final simplification91.4%
(FPCore (v w r) :precision binary64 (- (+ 3.0 (/ 2.0 (* r r))) 4.5))
double code(double v, double w, double r) {
return (3.0 + (2.0 / (r * r))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = (3.0d0 + (2.0d0 / (r * r))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return (3.0 + (2.0 / (r * r))) - 4.5;
}
def code(v, w, r): return (3.0 + (2.0 / (r * r))) - 4.5
function code(v, w, r) return Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - 4.5) end
function tmp = code(v, w, r) tmp = (3.0 + (2.0 / (r * r))) - 4.5; end
code[v_, w_, r_] := N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(3 + \frac{2}{r \cdot r}\right) - 4.5
\end{array}
Initial program 85.5%
Simplified79.2%
Taylor expanded in r around 0 58.7%
Final simplification58.7%
(FPCore (v w r) :precision binary64 -1.5)
double code(double v, double w, double r) {
return -1.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = -1.5d0
end function
public static double code(double v, double w, double r) {
return -1.5;
}
def code(v, w, r): return -1.5
function code(v, w, r) return -1.5 end
function tmp = code(v, w, r) tmp = -1.5; end
code[v_, w_, r_] := -1.5
\begin{array}{l}
\\
-1.5
\end{array}
Initial program 85.5%
Simplified79.2%
Taylor expanded in r around 0 58.7%
Taylor expanded in r around inf 16.5%
Final simplification16.5%
herbie shell --seed 2024047
(FPCore (v w r)
:name "Rosa's TurbineBenchmark"
:precision binary64
(- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))