
(FPCore (u v t1) :precision binary64 (/ (* (- t1) v) (* (+ t1 u) (+ t1 u))))
double code(double u, double v, double t1) {
return (-t1 * v) / ((t1 + u) * (t1 + u));
}
real(8) function code(u, v, t1)
real(8), intent (in) :: u
real(8), intent (in) :: v
real(8), intent (in) :: t1
code = (-t1 * v) / ((t1 + u) * (t1 + u))
end function
public static double code(double u, double v, double t1) {
return (-t1 * v) / ((t1 + u) * (t1 + u));
}
def code(u, v, t1): return (-t1 * v) / ((t1 + u) * (t1 + u))
function code(u, v, t1) return Float64(Float64(Float64(-t1) * v) / Float64(Float64(t1 + u) * Float64(t1 + u))) end
function tmp = code(u, v, t1) tmp = (-t1 * v) / ((t1 + u) * (t1 + u)); end
code[u_, v_, t1_] := N[(N[((-t1) * v), $MachinePrecision] / N[(N[(t1 + u), $MachinePrecision] * N[(t1 + u), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-t1\right) \cdot v}{\left(t1 + u\right) \cdot \left(t1 + u\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (u v t1) :precision binary64 (/ (* (- t1) v) (* (+ t1 u) (+ t1 u))))
double code(double u, double v, double t1) {
return (-t1 * v) / ((t1 + u) * (t1 + u));
}
real(8) function code(u, v, t1)
real(8), intent (in) :: u
real(8), intent (in) :: v
real(8), intent (in) :: t1
code = (-t1 * v) / ((t1 + u) * (t1 + u))
end function
public static double code(double u, double v, double t1) {
return (-t1 * v) / ((t1 + u) * (t1 + u));
}
def code(u, v, t1): return (-t1 * v) / ((t1 + u) * (t1 + u))
function code(u, v, t1) return Float64(Float64(Float64(-t1) * v) / Float64(Float64(t1 + u) * Float64(t1 + u))) end
function tmp = code(u, v, t1) tmp = (-t1 * v) / ((t1 + u) * (t1 + u)); end
code[u_, v_, t1_] := N[(N[((-t1) * v), $MachinePrecision] / N[(N[(t1 + u), $MachinePrecision] * N[(t1 + u), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-t1\right) \cdot v}{\left(t1 + u\right) \cdot \left(t1 + u\right)}
\end{array}
(FPCore (u v t1) :precision binary64 (/ (* (/ t1 (+ t1 u)) v) (- (- u) t1)))
double code(double u, double v, double t1) {
return ((t1 / (t1 + u)) * v) / (-u - t1);
}
real(8) function code(u, v, t1)
real(8), intent (in) :: u
real(8), intent (in) :: v
real(8), intent (in) :: t1
code = ((t1 / (t1 + u)) * v) / (-u - t1)
end function
public static double code(double u, double v, double t1) {
return ((t1 / (t1 + u)) * v) / (-u - t1);
}
def code(u, v, t1): return ((t1 / (t1 + u)) * v) / (-u - t1)
function code(u, v, t1) return Float64(Float64(Float64(t1 / Float64(t1 + u)) * v) / Float64(Float64(-u) - t1)) end
function tmp = code(u, v, t1) tmp = ((t1 / (t1 + u)) * v) / (-u - t1); end
code[u_, v_, t1_] := N[(N[(N[(t1 / N[(t1 + u), $MachinePrecision]), $MachinePrecision] * v), $MachinePrecision] / N[((-u) - t1), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{t1}{t1 + u} \cdot v}{\left(-u\right) - t1}
\end{array}
Initial program 68.3%
lift-/.f64N/A
lift-*.f64N/A
lift-*.f64N/A
times-fracN/A
frac-2negN/A
associate-*r/N/A
lower-/.f64N/A
lower-*.f64N/A
lift-neg.f64N/A
distribute-frac-negN/A
lower-neg.f64N/A
lower-/.f64N/A
lower-neg.f64N/A
lower-neg.f6498.4
Applied rewrites98.4%
lift-*.f64N/A
lift-neg.f64N/A
distribute-lft-neg-outN/A
distribute-rgt-neg-inN/A
lift-neg.f64N/A
remove-double-negN/A
lower-*.f6498.4
Applied rewrites98.4%
Final simplification98.4%
(FPCore (u v t1)
:precision binary64
(if (<= t1 -9.2e+156)
(* v (/ (+ -1.0 (/ u t1)) (+ t1 u)))
(if (<= t1 1.5e+108)
(* v (/ (- t1) (* (+ t1 u) (+ t1 u))))
(/ (* v -1.0) (+ t1 u)))))
double code(double u, double v, double t1) {
double tmp;
if (t1 <= -9.2e+156) {
tmp = v * ((-1.0 + (u / t1)) / (t1 + u));
} else if (t1 <= 1.5e+108) {
tmp = v * (-t1 / ((t1 + u) * (t1 + u)));
} else {
tmp = (v * -1.0) / (t1 + u);
}
return tmp;
}
real(8) function code(u, v, t1)
real(8), intent (in) :: u
real(8), intent (in) :: v
real(8), intent (in) :: t1
real(8) :: tmp
if (t1 <= (-9.2d+156)) then
tmp = v * (((-1.0d0) + (u / t1)) / (t1 + u))
else if (t1 <= 1.5d+108) then
tmp = v * (-t1 / ((t1 + u) * (t1 + u)))
else
tmp = (v * (-1.0d0)) / (t1 + u)
end if
code = tmp
end function
public static double code(double u, double v, double t1) {
double tmp;
if (t1 <= -9.2e+156) {
tmp = v * ((-1.0 + (u / t1)) / (t1 + u));
} else if (t1 <= 1.5e+108) {
tmp = v * (-t1 / ((t1 + u) * (t1 + u)));
} else {
tmp = (v * -1.0) / (t1 + u);
}
return tmp;
}
def code(u, v, t1): tmp = 0 if t1 <= -9.2e+156: tmp = v * ((-1.0 + (u / t1)) / (t1 + u)) elif t1 <= 1.5e+108: tmp = v * (-t1 / ((t1 + u) * (t1 + u))) else: tmp = (v * -1.0) / (t1 + u) return tmp
function code(u, v, t1) tmp = 0.0 if (t1 <= -9.2e+156) tmp = Float64(v * Float64(Float64(-1.0 + Float64(u / t1)) / Float64(t1 + u))); elseif (t1 <= 1.5e+108) tmp = Float64(v * Float64(Float64(-t1) / Float64(Float64(t1 + u) * Float64(t1 + u)))); else tmp = Float64(Float64(v * -1.0) / Float64(t1 + u)); end return tmp end
function tmp_2 = code(u, v, t1) tmp = 0.0; if (t1 <= -9.2e+156) tmp = v * ((-1.0 + (u / t1)) / (t1 + u)); elseif (t1 <= 1.5e+108) tmp = v * (-t1 / ((t1 + u) * (t1 + u))); else tmp = (v * -1.0) / (t1 + u); end tmp_2 = tmp; end
code[u_, v_, t1_] := If[LessEqual[t1, -9.2e+156], N[(v * N[(N[(-1.0 + N[(u / t1), $MachinePrecision]), $MachinePrecision] / N[(t1 + u), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[t1, 1.5e+108], N[(v * N[((-t1) / N[(N[(t1 + u), $MachinePrecision] * N[(t1 + u), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(v * -1.0), $MachinePrecision] / N[(t1 + u), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;t1 \leq -9.2 \cdot 10^{+156}:\\
\;\;\;\;v \cdot \frac{-1 + \frac{u}{t1}}{t1 + u}\\
\mathbf{elif}\;t1 \leq 1.5 \cdot 10^{+108}:\\
\;\;\;\;v \cdot \frac{-t1}{\left(t1 + u\right) \cdot \left(t1 + u\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{v \cdot -1}{t1 + u}\\
\end{array}
\end{array}
if t1 < -9.1999999999999995e156Initial program 38.5%
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
lower-/.f6440.1
Applied rewrites40.1%
lift-/.f64N/A
lift-*.f64N/A
associate-/r*N/A
lift-neg.f64N/A
distribute-neg-fracN/A
lift-/.f64N/A
lift-neg.f64N/A
lower-/.f6496.9
lift-neg.f64N/A
lift-/.f64N/A
distribute-neg-frac2N/A
lift-neg.f64N/A
lower-/.f6496.9
lift-neg.f64N/A
lift-+.f64N/A
distribute-neg-inN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6496.9
Applied rewrites96.9%
Taylor expanded in t1 around inf
sub-negN/A
metadata-evalN/A
+-commutativeN/A
lower-+.f64N/A
lower-/.f6490.3
Applied rewrites90.3%
if -9.1999999999999995e156 < t1 < 1.49999999999999992e108Initial program 83.4%
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
lower-/.f6487.3
Applied rewrites87.3%
if 1.49999999999999992e108 < t1 Initial program 47.3%
lift-/.f64N/A
lift-*.f64N/A
lift-*.f64N/A
times-fracN/A
frac-2negN/A
associate-*r/N/A
lower-/.f64N/A
lower-*.f64N/A
lift-neg.f64N/A
distribute-frac-negN/A
lower-neg.f64N/A
lower-/.f64N/A
lower-neg.f64N/A
lower-neg.f6499.9
Applied rewrites99.9%
Taylor expanded in t1 around inf
Applied rewrites91.4%
lift-/.f64N/A
lift-neg.f64N/A
distribute-frac-neg2N/A
distribute-frac-negN/A
lower-/.f64N/A
lift-*.f64N/A
lift-neg.f64N/A
distribute-rgt-neg-outN/A
remove-double-negN/A
*-commutativeN/A
lower-*.f6491.4
Applied rewrites91.4%
Final simplification88.3%
herbie shell --seed 2024225
(FPCore (u v t1)
:name "Rosa's DopplerBench"
:precision binary64
(/ (* (- t1) v) (* (+ t1 u) (+ t1 u))))