
(FPCore (v w r) :precision binary64 (- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))
double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((3.0d0 + (2.0d0 / (r * r))) - (((0.125d0 * (3.0d0 - (2.0d0 * v))) * (((w * w) * r) * r)) / (1.0d0 - v))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
def code(v, w, r): return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5
function code(v, w, r) return Float64(Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - Float64(Float64(Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) * Float64(Float64(Float64(w * w) * r) * r)) / Float64(1.0 - v))) - 4.5) end
function tmp = code(v, w, r) tmp = ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5; end
code[v_, w_, r_] := N[(N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(w * w), $MachinePrecision] * r), $MachinePrecision] * r), $MachinePrecision]), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(3 + \frac{2}{r \cdot r}\right) - \frac{\left(0.125 \cdot \left(3 - 2 \cdot v\right)\right) \cdot \left(\left(\left(w \cdot w\right) \cdot r\right) \cdot r\right)}{1 - v}\right) - 4.5
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (v w r) :precision binary64 (- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))
double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = ((3.0d0 + (2.0d0 / (r * r))) - (((0.125d0 * (3.0d0 - (2.0d0 * v))) * (((w * w) * r) * r)) / (1.0d0 - v))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5;
}
def code(v, w, r): return ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5
function code(v, w, r) return Float64(Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - Float64(Float64(Float64(0.125 * Float64(3.0 - Float64(2.0 * v))) * Float64(Float64(Float64(w * w) * r) * r)) / Float64(1.0 - v))) - 4.5) end
function tmp = code(v, w, r) tmp = ((3.0 + (2.0 / (r * r))) - (((0.125 * (3.0 - (2.0 * v))) * (((w * w) * r) * r)) / (1.0 - v))) - 4.5; end
code[v_, w_, r_] := N[(N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(0.125 * N[(3.0 - N[(2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(w * w), $MachinePrecision] * r), $MachinePrecision] * r), $MachinePrecision]), $MachinePrecision] / N[(1.0 - v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(3 + \frac{2}{r \cdot r}\right) - \frac{\left(0.125 \cdot \left(3 - 2 \cdot v\right)\right) \cdot \left(\left(\left(w \cdot w\right) \cdot r\right) \cdot r\right)}{1 - v}\right) - 4.5
\end{array}
(FPCore (v w r) :precision binary64 (+ (+ 3.0 (/ 2.0 (* r r))) (- (* (* 0.125 (+ 3.0 (* -2.0 v))) (/ (* r w) (/ (+ v -1.0) (* r w)))) 4.5)))
double code(double v, double w, double r) {
return (3.0 + (2.0 / (r * r))) + (((0.125 * (3.0 + (-2.0 * v))) * ((r * w) / ((v + -1.0) / (r * w)))) - 4.5);
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = (3.0d0 + (2.0d0 / (r * r))) + (((0.125d0 * (3.0d0 + ((-2.0d0) * v))) * ((r * w) / ((v + (-1.0d0)) / (r * w)))) - 4.5d0)
end function
public static double code(double v, double w, double r) {
return (3.0 + (2.0 / (r * r))) + (((0.125 * (3.0 + (-2.0 * v))) * ((r * w) / ((v + -1.0) / (r * w)))) - 4.5);
}
def code(v, w, r): return (3.0 + (2.0 / (r * r))) + (((0.125 * (3.0 + (-2.0 * v))) * ((r * w) / ((v + -1.0) / (r * w)))) - 4.5)
function code(v, w, r) return Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) + Float64(Float64(Float64(0.125 * Float64(3.0 + Float64(-2.0 * v))) * Float64(Float64(r * w) / Float64(Float64(v + -1.0) / Float64(r * w)))) - 4.5)) end
function tmp = code(v, w, r) tmp = (3.0 + (2.0 / (r * r))) + (((0.125 * (3.0 + (-2.0 * v))) * ((r * w) / ((v + -1.0) / (r * w)))) - 4.5); end
code[v_, w_, r_] := N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(0.125 * N[(3.0 + N[(-2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(r * w), $MachinePrecision] / N[(N[(v + -1.0), $MachinePrecision] / N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(3 + \frac{2}{r \cdot r}\right) + \left(\left(0.125 \cdot \left(3 + -2 \cdot v\right)\right) \cdot \frac{r \cdot w}{\frac{v + -1}{r \cdot w}} - 4.5\right)
\end{array}
Initial program 83.7%
associate--l-83.6%
associate-*l*78.5%
sqr-neg78.5%
associate-*l*83.6%
associate-/l*88.4%
fma-define88.4%
Simplified88.4%
add-sqr-sqrt88.3%
associate-/l*88.3%
sqrt-prod48.0%
sqrt-prod48.0%
sqrt-prod23.4%
add-sqr-sqrt37.5%
associate-*l*37.5%
add-sqr-sqrt67.4%
sqrt-prod37.5%
sqrt-prod37.5%
sqrt-prod26.9%
add-sqr-sqrt53.0%
associate-*l*53.0%
add-sqr-sqrt99.8%
Applied egg-rr99.8%
clear-num99.8%
un-div-inv99.8%
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (v w r)
:precision binary64
(let* ((t_0 (+ 3.0 (/ 2.0 (* r r)))))
(if (or (<= v -11500000000000.0) (not (<= v 6e-16)))
(- t_0 (+ 4.5 (/ (* -0.25 (* r (* v w))) (/ (- 1.0 v) (* r w)))))
(+ t_0 (- (/ (* (* r w) 0.375) (/ (+ v -1.0) (* r w))) 4.5)))))
double code(double v, double w, double r) {
double t_0 = 3.0 + (2.0 / (r * r));
double tmp;
if ((v <= -11500000000000.0) || !(v <= 6e-16)) {
tmp = t_0 - (4.5 + ((-0.25 * (r * (v * w))) / ((1.0 - v) / (r * w))));
} else {
tmp = t_0 + ((((r * w) * 0.375) / ((v + -1.0) / (r * w))) - 4.5);
}
return tmp;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
real(8) :: t_0
real(8) :: tmp
t_0 = 3.0d0 + (2.0d0 / (r * r))
if ((v <= (-11500000000000.0d0)) .or. (.not. (v <= 6d-16))) then
tmp = t_0 - (4.5d0 + (((-0.25d0) * (r * (v * w))) / ((1.0d0 - v) / (r * w))))
else
tmp = t_0 + ((((r * w) * 0.375d0) / ((v + (-1.0d0)) / (r * w))) - 4.5d0)
end if
code = tmp
end function
public static double code(double v, double w, double r) {
double t_0 = 3.0 + (2.0 / (r * r));
double tmp;
if ((v <= -11500000000000.0) || !(v <= 6e-16)) {
tmp = t_0 - (4.5 + ((-0.25 * (r * (v * w))) / ((1.0 - v) / (r * w))));
} else {
tmp = t_0 + ((((r * w) * 0.375) / ((v + -1.0) / (r * w))) - 4.5);
}
return tmp;
}
def code(v, w, r): t_0 = 3.0 + (2.0 / (r * r)) tmp = 0 if (v <= -11500000000000.0) or not (v <= 6e-16): tmp = t_0 - (4.5 + ((-0.25 * (r * (v * w))) / ((1.0 - v) / (r * w)))) else: tmp = t_0 + ((((r * w) * 0.375) / ((v + -1.0) / (r * w))) - 4.5) return tmp
function code(v, w, r) t_0 = Float64(3.0 + Float64(2.0 / Float64(r * r))) tmp = 0.0 if ((v <= -11500000000000.0) || !(v <= 6e-16)) tmp = Float64(t_0 - Float64(4.5 + Float64(Float64(-0.25 * Float64(r * Float64(v * w))) / Float64(Float64(1.0 - v) / Float64(r * w))))); else tmp = Float64(t_0 + Float64(Float64(Float64(Float64(r * w) * 0.375) / Float64(Float64(v + -1.0) / Float64(r * w))) - 4.5)); end return tmp end
function tmp_2 = code(v, w, r) t_0 = 3.0 + (2.0 / (r * r)); tmp = 0.0; if ((v <= -11500000000000.0) || ~((v <= 6e-16))) tmp = t_0 - (4.5 + ((-0.25 * (r * (v * w))) / ((1.0 - v) / (r * w)))); else tmp = t_0 + ((((r * w) * 0.375) / ((v + -1.0) / (r * w))) - 4.5); end tmp_2 = tmp; end
code[v_, w_, r_] := Block[{t$95$0 = N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[Or[LessEqual[v, -11500000000000.0], N[Not[LessEqual[v, 6e-16]], $MachinePrecision]], N[(t$95$0 - N[(4.5 + N[(N[(-0.25 * N[(r * N[(v * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(1.0 - v), $MachinePrecision] / N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(t$95$0 + N[(N[(N[(N[(r * w), $MachinePrecision] * 0.375), $MachinePrecision] / N[(N[(v + -1.0), $MachinePrecision] / N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 3 + \frac{2}{r \cdot r}\\
\mathbf{if}\;v \leq -11500000000000 \lor \neg \left(v \leq 6 \cdot 10^{-16}\right):\\
\;\;\;\;t\_0 - \left(4.5 + \frac{-0.25 \cdot \left(r \cdot \left(v \cdot w\right)\right)}{\frac{1 - v}{r \cdot w}}\right)\\
\mathbf{else}:\\
\;\;\;\;t\_0 + \left(\frac{\left(r \cdot w\right) \cdot 0.375}{\frac{v + -1}{r \cdot w}} - 4.5\right)\\
\end{array}
\end{array}
if v < -1.15e13 or 5.99999999999999987e-16 < v Initial program 77.8%
associate--l-77.8%
associate-*l*73.1%
sqr-neg73.1%
associate-*l*77.8%
associate-/l*87.2%
fma-define87.2%
Simplified87.2%
add-sqr-sqrt87.0%
associate-/l*87.0%
sqrt-prod46.5%
sqrt-prod46.5%
sqrt-prod22.0%
add-sqr-sqrt34.4%
associate-*l*34.4%
add-sqr-sqrt64.9%
sqrt-prod34.3%
sqrt-prod34.3%
sqrt-prod28.1%
add-sqr-sqrt54.0%
associate-*l*54.0%
add-sqr-sqrt99.7%
Applied egg-rr99.7%
associate-*r*95.5%
clear-num95.5%
un-div-inv95.5%
+-commutative95.5%
fma-define95.5%
Applied egg-rr95.5%
Taylor expanded in v around inf 85.6%
if -1.15e13 < v < 5.99999999999999987e-16Initial program 89.8%
associate--l-89.7%
associate-*l*84.1%
sqr-neg84.1%
associate-*l*89.7%
associate-/l*89.7%
fma-define89.8%
Simplified89.7%
add-sqr-sqrt89.7%
associate-/l*89.7%
sqrt-prod49.6%
sqrt-prod49.6%
sqrt-prod24.8%
add-sqr-sqrt40.8%
associate-*l*40.8%
add-sqr-sqrt70.1%
sqrt-prod40.8%
sqrt-prod40.8%
sqrt-prod25.6%
add-sqr-sqrt51.9%
associate-*l*51.9%
add-sqr-sqrt99.8%
Applied egg-rr99.8%
associate-*r*99.8%
clear-num99.8%
un-div-inv99.8%
+-commutative99.8%
fma-define99.8%
Applied egg-rr99.8%
Taylor expanded in v around 0 99.8%
Final simplification92.6%
(FPCore (v w r) :precision binary64 (+ (+ 3.0 (/ 2.0 (* r r))) (- (* (* 0.125 (+ 3.0 (* -2.0 v))) (* (* r w) (/ (* r w) (+ v -1.0)))) 4.5)))
double code(double v, double w, double r) {
return (3.0 + (2.0 / (r * r))) + (((0.125 * (3.0 + (-2.0 * v))) * ((r * w) * ((r * w) / (v + -1.0)))) - 4.5);
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = (3.0d0 + (2.0d0 / (r * r))) + (((0.125d0 * (3.0d0 + ((-2.0d0) * v))) * ((r * w) * ((r * w) / (v + (-1.0d0))))) - 4.5d0)
end function
public static double code(double v, double w, double r) {
return (3.0 + (2.0 / (r * r))) + (((0.125 * (3.0 + (-2.0 * v))) * ((r * w) * ((r * w) / (v + -1.0)))) - 4.5);
}
def code(v, w, r): return (3.0 + (2.0 / (r * r))) + (((0.125 * (3.0 + (-2.0 * v))) * ((r * w) * ((r * w) / (v + -1.0)))) - 4.5)
function code(v, w, r) return Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) + Float64(Float64(Float64(0.125 * Float64(3.0 + Float64(-2.0 * v))) * Float64(Float64(r * w) * Float64(Float64(r * w) / Float64(v + -1.0)))) - 4.5)) end
function tmp = code(v, w, r) tmp = (3.0 + (2.0 / (r * r))) + (((0.125 * (3.0 + (-2.0 * v))) * ((r * w) * ((r * w) / (v + -1.0)))) - 4.5); end
code[v_, w_, r_] := N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(0.125 * N[(3.0 + N[(-2.0 * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(r * w), $MachinePrecision] * N[(N[(r * w), $MachinePrecision] / N[(v + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(3 + \frac{2}{r \cdot r}\right) + \left(\left(0.125 \cdot \left(3 + -2 \cdot v\right)\right) \cdot \left(\left(r \cdot w\right) \cdot \frac{r \cdot w}{v + -1}\right) - 4.5\right)
\end{array}
Initial program 83.7%
associate--l-83.6%
associate-*l*78.5%
sqr-neg78.5%
associate-*l*83.6%
associate-/l*88.4%
fma-define88.4%
Simplified88.4%
add-sqr-sqrt88.3%
associate-/l*88.3%
sqrt-prod48.0%
sqrt-prod48.0%
sqrt-prod23.4%
add-sqr-sqrt37.5%
associate-*l*37.5%
add-sqr-sqrt67.4%
sqrt-prod37.5%
sqrt-prod37.5%
sqrt-prod26.9%
add-sqr-sqrt53.0%
associate-*l*53.0%
add-sqr-sqrt99.8%
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (v w r) :precision binary64 (+ (+ 3.0 (/ 2.0 (* r r))) (- (/ (+ 0.375 (* v -0.25)) (/ (+ v -1.0) (* (* r w) (* r w)))) 4.5)))
double code(double v, double w, double r) {
return (3.0 + (2.0 / (r * r))) + (((0.375 + (v * -0.25)) / ((v + -1.0) / ((r * w) * (r * w)))) - 4.5);
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = (3.0d0 + (2.0d0 / (r * r))) + (((0.375d0 + (v * (-0.25d0))) / ((v + (-1.0d0)) / ((r * w) * (r * w)))) - 4.5d0)
end function
public static double code(double v, double w, double r) {
return (3.0 + (2.0 / (r * r))) + (((0.375 + (v * -0.25)) / ((v + -1.0) / ((r * w) * (r * w)))) - 4.5);
}
def code(v, w, r): return (3.0 + (2.0 / (r * r))) + (((0.375 + (v * -0.25)) / ((v + -1.0) / ((r * w) * (r * w)))) - 4.5)
function code(v, w, r) return Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) + Float64(Float64(Float64(0.375 + Float64(v * -0.25)) / Float64(Float64(v + -1.0) / Float64(Float64(r * w) * Float64(r * w)))) - 4.5)) end
function tmp = code(v, w, r) tmp = (3.0 + (2.0 / (r * r))) + (((0.375 + (v * -0.25)) / ((v + -1.0) / ((r * w) * (r * w)))) - 4.5); end
code[v_, w_, r_] := N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(0.375 + N[(v * -0.25), $MachinePrecision]), $MachinePrecision] / N[(N[(v + -1.0), $MachinePrecision] / N[(N[(r * w), $MachinePrecision] * N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(3 + \frac{2}{r \cdot r}\right) + \left(\frac{0.375 + v \cdot -0.25}{\frac{v + -1}{\left(r \cdot w\right) \cdot \left(r \cdot w\right)}} - 4.5\right)
\end{array}
Initial program 83.7%
associate--l-83.6%
associate-*l*78.5%
sqr-neg78.5%
associate-*l*83.6%
associate-/l*88.4%
fma-define88.4%
Simplified88.4%
clear-num88.4%
un-div-inv88.4%
distribute-rgt-in88.4%
metadata-eval88.4%
*-commutative88.4%
associate-*l*88.4%
metadata-eval88.4%
associate-*r*80.7%
pow280.7%
pow280.7%
pow-prod-down99.8%
Applied egg-rr99.8%
unpow299.8%
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (v w r) :precision binary64 (+ (+ 3.0 (/ 2.0 (* r r))) (- (/ (* (* r w) 0.375) (/ (+ v -1.0) (* r w))) 4.5)))
double code(double v, double w, double r) {
return (3.0 + (2.0 / (r * r))) + ((((r * w) * 0.375) / ((v + -1.0) / (r * w))) - 4.5);
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = (3.0d0 + (2.0d0 / (r * r))) + ((((r * w) * 0.375d0) / ((v + (-1.0d0)) / (r * w))) - 4.5d0)
end function
public static double code(double v, double w, double r) {
return (3.0 + (2.0 / (r * r))) + ((((r * w) * 0.375) / ((v + -1.0) / (r * w))) - 4.5);
}
def code(v, w, r): return (3.0 + (2.0 / (r * r))) + ((((r * w) * 0.375) / ((v + -1.0) / (r * w))) - 4.5)
function code(v, w, r) return Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) + Float64(Float64(Float64(Float64(r * w) * 0.375) / Float64(Float64(v + -1.0) / Float64(r * w))) - 4.5)) end
function tmp = code(v, w, r) tmp = (3.0 + (2.0 / (r * r))) + ((((r * w) * 0.375) / ((v + -1.0) / (r * w))) - 4.5); end
code[v_, w_, r_] := N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(N[(r * w), $MachinePrecision] * 0.375), $MachinePrecision] / N[(N[(v + -1.0), $MachinePrecision] / N[(r * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(3 + \frac{2}{r \cdot r}\right) + \left(\frac{\left(r \cdot w\right) \cdot 0.375}{\frac{v + -1}{r \cdot w}} - 4.5\right)
\end{array}
Initial program 83.7%
associate--l-83.6%
associate-*l*78.5%
sqr-neg78.5%
associate-*l*83.6%
associate-/l*88.4%
fma-define88.4%
Simplified88.4%
add-sqr-sqrt88.3%
associate-/l*88.3%
sqrt-prod48.0%
sqrt-prod48.0%
sqrt-prod23.4%
add-sqr-sqrt37.5%
associate-*l*37.5%
add-sqr-sqrt67.4%
sqrt-prod37.5%
sqrt-prod37.5%
sqrt-prod26.9%
add-sqr-sqrt53.0%
associate-*l*53.0%
add-sqr-sqrt99.8%
Applied egg-rr99.8%
associate-*r*97.6%
clear-num97.6%
un-div-inv97.6%
+-commutative97.6%
fma-define97.6%
Applied egg-rr97.6%
Taylor expanded in v around 0 81.7%
Final simplification81.7%
(FPCore (v w r) :precision binary64 (- (+ 3.0 (/ 2.0 (* r r))) 4.5))
double code(double v, double w, double r) {
return (3.0 + (2.0 / (r * r))) - 4.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = (3.0d0 + (2.0d0 / (r * r))) - 4.5d0
end function
public static double code(double v, double w, double r) {
return (3.0 + (2.0 / (r * r))) - 4.5;
}
def code(v, w, r): return (3.0 + (2.0 / (r * r))) - 4.5
function code(v, w, r) return Float64(Float64(3.0 + Float64(2.0 / Float64(r * r))) - 4.5) end
function tmp = code(v, w, r) tmp = (3.0 + (2.0 / (r * r))) - 4.5; end
code[v_, w_, r_] := N[(N[(3.0 + N[(2.0 / N[(r * r), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 4.5), $MachinePrecision]
\begin{array}{l}
\\
\left(3 + \frac{2}{r \cdot r}\right) - 4.5
\end{array}
Initial program 83.7%
Simplified78.6%
Taylor expanded in r around 0 54.4%
Final simplification54.4%
(FPCore (v w r) :precision binary64 -1.5)
double code(double v, double w, double r) {
return -1.5;
}
real(8) function code(v, w, r)
real(8), intent (in) :: v
real(8), intent (in) :: w
real(8), intent (in) :: r
code = -1.5d0
end function
public static double code(double v, double w, double r) {
return -1.5;
}
def code(v, w, r): return -1.5
function code(v, w, r) return -1.5 end
function tmp = code(v, w, r) tmp = -1.5; end
code[v_, w_, r_] := -1.5
\begin{array}{l}
\\
-1.5
\end{array}
Initial program 83.7%
Simplified78.6%
Taylor expanded in r around 0 54.4%
Taylor expanded in r around inf 16.8%
Final simplification16.8%
herbie shell --seed 2024080
(FPCore (v w r)
:name "Rosa's TurbineBenchmark"
:precision binary64
(- (- (+ 3.0 (/ 2.0 (* r r))) (/ (* (* 0.125 (- 3.0 (* 2.0 v))) (* (* (* w w) r) r)) (- 1.0 v))) 4.5))