
(FPCore (m v) :precision binary64 (* (- (/ (* m (- 1.0 m)) v) 1.0) (- 1.0 m)))
double code(double m, double v) {
return (((m * (1.0 - m)) / v) - 1.0) * (1.0 - m);
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = (((m * (1.0d0 - m)) / v) - 1.0d0) * (1.0d0 - m)
end function
public static double code(double m, double v) {
return (((m * (1.0 - m)) / v) - 1.0) * (1.0 - m);
}
def code(m, v): return (((m * (1.0 - m)) / v) - 1.0) * (1.0 - m)
function code(m, v) return Float64(Float64(Float64(Float64(m * Float64(1.0 - m)) / v) - 1.0) * Float64(1.0 - m)) end
function tmp = code(m, v) tmp = (((m * (1.0 - m)) / v) - 1.0) * (1.0 - m); end
code[m_, v_] := N[(N[(N[(N[(m * N[(1.0 - m), $MachinePrecision]), $MachinePrecision] / v), $MachinePrecision] - 1.0), $MachinePrecision] * N[(1.0 - m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{m \cdot \left(1 - m\right)}{v} - 1\right) \cdot \left(1 - m\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (m v) :precision binary64 (* (- (/ (* m (- 1.0 m)) v) 1.0) (- 1.0 m)))
double code(double m, double v) {
return (((m * (1.0 - m)) / v) - 1.0) * (1.0 - m);
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = (((m * (1.0d0 - m)) / v) - 1.0d0) * (1.0d0 - m)
end function
public static double code(double m, double v) {
return (((m * (1.0 - m)) / v) - 1.0) * (1.0 - m);
}
def code(m, v): return (((m * (1.0 - m)) / v) - 1.0) * (1.0 - m)
function code(m, v) return Float64(Float64(Float64(Float64(m * Float64(1.0 - m)) / v) - 1.0) * Float64(1.0 - m)) end
function tmp = code(m, v) tmp = (((m * (1.0 - m)) / v) - 1.0) * (1.0 - m); end
code[m_, v_] := N[(N[(N[(N[(m * N[(1.0 - m), $MachinePrecision]), $MachinePrecision] / v), $MachinePrecision] - 1.0), $MachinePrecision] * N[(1.0 - m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{m \cdot \left(1 - m\right)}{v} - 1\right) \cdot \left(1 - m\right)
\end{array}
(FPCore (m v) :precision binary64 (* (- 1.0 m) (+ (* (- 1.0 m) (/ m v)) -1.0)))
double code(double m, double v) {
return (1.0 - m) * (((1.0 - m) * (m / v)) + -1.0);
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = (1.0d0 - m) * (((1.0d0 - m) * (m / v)) + (-1.0d0))
end function
public static double code(double m, double v) {
return (1.0 - m) * (((1.0 - m) * (m / v)) + -1.0);
}
def code(m, v): return (1.0 - m) * (((1.0 - m) * (m / v)) + -1.0)
function code(m, v) return Float64(Float64(1.0 - m) * Float64(Float64(Float64(1.0 - m) * Float64(m / v)) + -1.0)) end
function tmp = code(m, v) tmp = (1.0 - m) * (((1.0 - m) * (m / v)) + -1.0); end
code[m_, v_] := N[(N[(1.0 - m), $MachinePrecision] * N[(N[(N[(1.0 - m), $MachinePrecision] * N[(m / v), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(1 - m\right) \cdot \left(\left(1 - m\right) \cdot \frac{m}{v} + -1\right)
\end{array}
Initial program 99.9%
*-commutative99.9%
associate-*r/99.9%
Applied egg-rr99.9%
Final simplification99.9%
(FPCore (m v) :precision binary64 (if (<= m 1.0) (+ -1.0 (+ m (/ (* m (- 1.0 m)) v))) (* (- 1.0 m) (- -1.0 (/ m v)))))
double code(double m, double v) {
double tmp;
if (m <= 1.0) {
tmp = -1.0 + (m + ((m * (1.0 - m)) / v));
} else {
tmp = (1.0 - m) * (-1.0 - (m / v));
}
return tmp;
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
real(8) :: tmp
if (m <= 1.0d0) then
tmp = (-1.0d0) + (m + ((m * (1.0d0 - m)) / v))
else
tmp = (1.0d0 - m) * ((-1.0d0) - (m / v))
end if
code = tmp
end function
public static double code(double m, double v) {
double tmp;
if (m <= 1.0) {
tmp = -1.0 + (m + ((m * (1.0 - m)) / v));
} else {
tmp = (1.0 - m) * (-1.0 - (m / v));
}
return tmp;
}
def code(m, v): tmp = 0 if m <= 1.0: tmp = -1.0 + (m + ((m * (1.0 - m)) / v)) else: tmp = (1.0 - m) * (-1.0 - (m / v)) return tmp
function code(m, v) tmp = 0.0 if (m <= 1.0) tmp = Float64(-1.0 + Float64(m + Float64(Float64(m * Float64(1.0 - m)) / v))); else tmp = Float64(Float64(1.0 - m) * Float64(-1.0 - Float64(m / v))); end return tmp end
function tmp_2 = code(m, v) tmp = 0.0; if (m <= 1.0) tmp = -1.0 + (m + ((m * (1.0 - m)) / v)); else tmp = (1.0 - m) * (-1.0 - (m / v)); end tmp_2 = tmp; end
code[m_, v_] := If[LessEqual[m, 1.0], N[(-1.0 + N[(m + N[(N[(m * N[(1.0 - m), $MachinePrecision]), $MachinePrecision] / v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(1.0 - m), $MachinePrecision] * N[(-1.0 - N[(m / v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;m \leq 1:\\
\;\;\;\;-1 + \left(m + \frac{m \cdot \left(1 - m\right)}{v}\right)\\
\mathbf{else}:\\
\;\;\;\;\left(1 - m\right) \cdot \left(-1 - \frac{m}{v}\right)\\
\end{array}
\end{array}
if m < 1Initial program 100.0%
*-commutative100.0%
sub-neg100.0%
associate-/l*99.7%
metadata-eval99.7%
Simplified99.7%
distribute-rgt-in99.7%
associate-*r/100.0%
clear-num99.8%
associate-*l/99.8%
*-un-lft-identity99.8%
associate-/r*99.8%
neg-mul-199.8%
Applied egg-rr99.8%
Taylor expanded in m around 0 97.4%
Taylor expanded in v around inf 97.6%
if 1 < m Initial program 99.9%
*-commutative99.9%
associate-*r/99.9%
Applied egg-rr99.9%
associate-*r/99.9%
clear-num99.9%
associate-/l/99.9%
associate-/l/99.9%
add-sqr-sqrt99.8%
sqrt-unprod95.2%
sqr-neg95.2%
sqrt-unprod0.0%
add-sqr-sqrt0.1%
distribute-neg-frac0.1%
associate-/l/0.1%
associate-/l/0.1%
distribute-neg-frac0.1%
add-sqr-sqrt0.0%
sqrt-unprod95.2%
sqr-neg95.2%
sqrt-unprod99.8%
add-sqr-sqrt99.9%
*-commutative99.9%
Applied egg-rr99.9%
Taylor expanded in m around 0 0.1%
clear-num0.1%
add-sqr-sqrt0.1%
sqrt-unprod0.1%
sqr-neg0.1%
sqrt-unprod0.0%
add-sqr-sqrt77.8%
distribute-frac-neg277.8%
clear-num77.8%
neg-sub077.8%
clear-num77.8%
Applied egg-rr77.8%
neg-sub077.8%
distribute-neg-frac277.8%
Simplified77.8%
Final simplification88.4%
(FPCore (m v) :precision binary64 (if (<= m 1.0) (+ -1.0 (+ m (/ (* m (- 1.0 m)) v))) (* (+ m -1.0) (- (* m (/ m v)) -1.0))))
double code(double m, double v) {
double tmp;
if (m <= 1.0) {
tmp = -1.0 + (m + ((m * (1.0 - m)) / v));
} else {
tmp = (m + -1.0) * ((m * (m / v)) - -1.0);
}
return tmp;
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
real(8) :: tmp
if (m <= 1.0d0) then
tmp = (-1.0d0) + (m + ((m * (1.0d0 - m)) / v))
else
tmp = (m + (-1.0d0)) * ((m * (m / v)) - (-1.0d0))
end if
code = tmp
end function
public static double code(double m, double v) {
double tmp;
if (m <= 1.0) {
tmp = -1.0 + (m + ((m * (1.0 - m)) / v));
} else {
tmp = (m + -1.0) * ((m * (m / v)) - -1.0);
}
return tmp;
}
def code(m, v): tmp = 0 if m <= 1.0: tmp = -1.0 + (m + ((m * (1.0 - m)) / v)) else: tmp = (m + -1.0) * ((m * (m / v)) - -1.0) return tmp
function code(m, v) tmp = 0.0 if (m <= 1.0) tmp = Float64(-1.0 + Float64(m + Float64(Float64(m * Float64(1.0 - m)) / v))); else tmp = Float64(Float64(m + -1.0) * Float64(Float64(m * Float64(m / v)) - -1.0)); end return tmp end
function tmp_2 = code(m, v) tmp = 0.0; if (m <= 1.0) tmp = -1.0 + (m + ((m * (1.0 - m)) / v)); else tmp = (m + -1.0) * ((m * (m / v)) - -1.0); end tmp_2 = tmp; end
code[m_, v_] := If[LessEqual[m, 1.0], N[(-1.0 + N[(m + N[(N[(m * N[(1.0 - m), $MachinePrecision]), $MachinePrecision] / v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(m + -1.0), $MachinePrecision] * N[(N[(m * N[(m / v), $MachinePrecision]), $MachinePrecision] - -1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;m \leq 1:\\
\;\;\;\;-1 + \left(m + \frac{m \cdot \left(1 - m\right)}{v}\right)\\
\mathbf{else}:\\
\;\;\;\;\left(m + -1\right) \cdot \left(m \cdot \frac{m}{v} - -1\right)\\
\end{array}
\end{array}
if m < 1Initial program 100.0%
*-commutative100.0%
sub-neg100.0%
associate-/l*99.7%
metadata-eval99.7%
Simplified99.7%
distribute-rgt-in99.7%
associate-*r/100.0%
clear-num99.8%
associate-*l/99.8%
*-un-lft-identity99.8%
associate-/r*99.8%
neg-mul-199.8%
Applied egg-rr99.8%
Taylor expanded in m around 0 97.4%
Taylor expanded in v around inf 97.6%
if 1 < m Initial program 99.9%
*-commutative99.9%
sub-neg99.9%
associate-/l*99.9%
metadata-eval99.9%
Simplified99.9%
Taylor expanded in m around inf 96.4%
neg-mul-196.4%
distribute-neg-frac296.4%
Simplified96.4%
Final simplification97.0%
(FPCore (m v) :precision binary64 (if (<= m 1.0) (* (- 1.0 m) (+ (/ m v) -1.0)) (* (- 1.0 m) (- -1.0 (/ m v)))))
double code(double m, double v) {
double tmp;
if (m <= 1.0) {
tmp = (1.0 - m) * ((m / v) + -1.0);
} else {
tmp = (1.0 - m) * (-1.0 - (m / v));
}
return tmp;
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
real(8) :: tmp
if (m <= 1.0d0) then
tmp = (1.0d0 - m) * ((m / v) + (-1.0d0))
else
tmp = (1.0d0 - m) * ((-1.0d0) - (m / v))
end if
code = tmp
end function
public static double code(double m, double v) {
double tmp;
if (m <= 1.0) {
tmp = (1.0 - m) * ((m / v) + -1.0);
} else {
tmp = (1.0 - m) * (-1.0 - (m / v));
}
return tmp;
}
def code(m, v): tmp = 0 if m <= 1.0: tmp = (1.0 - m) * ((m / v) + -1.0) else: tmp = (1.0 - m) * (-1.0 - (m / v)) return tmp
function code(m, v) tmp = 0.0 if (m <= 1.0) tmp = Float64(Float64(1.0 - m) * Float64(Float64(m / v) + -1.0)); else tmp = Float64(Float64(1.0 - m) * Float64(-1.0 - Float64(m / v))); end return tmp end
function tmp_2 = code(m, v) tmp = 0.0; if (m <= 1.0) tmp = (1.0 - m) * ((m / v) + -1.0); else tmp = (1.0 - m) * (-1.0 - (m / v)); end tmp_2 = tmp; end
code[m_, v_] := If[LessEqual[m, 1.0], N[(N[(1.0 - m), $MachinePrecision] * N[(N[(m / v), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision], N[(N[(1.0 - m), $MachinePrecision] * N[(-1.0 - N[(m / v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;m \leq 1:\\
\;\;\;\;\left(1 - m\right) \cdot \left(\frac{m}{v} + -1\right)\\
\mathbf{else}:\\
\;\;\;\;\left(1 - m\right) \cdot \left(-1 - \frac{m}{v}\right)\\
\end{array}
\end{array}
if m < 1Initial program 100.0%
Taylor expanded in m around 0 97.6%
if 1 < m Initial program 99.9%
*-commutative99.9%
associate-*r/99.9%
Applied egg-rr99.9%
associate-*r/99.9%
clear-num99.9%
associate-/l/99.9%
associate-/l/99.9%
add-sqr-sqrt99.8%
sqrt-unprod95.2%
sqr-neg95.2%
sqrt-unprod0.0%
add-sqr-sqrt0.1%
distribute-neg-frac0.1%
associate-/l/0.1%
associate-/l/0.1%
distribute-neg-frac0.1%
add-sqr-sqrt0.0%
sqrt-unprod95.2%
sqr-neg95.2%
sqrt-unprod99.8%
add-sqr-sqrt99.9%
*-commutative99.9%
Applied egg-rr99.9%
Taylor expanded in m around 0 0.1%
clear-num0.1%
add-sqr-sqrt0.1%
sqrt-unprod0.1%
sqr-neg0.1%
sqrt-unprod0.0%
add-sqr-sqrt77.8%
distribute-frac-neg277.8%
clear-num77.8%
neg-sub077.8%
clear-num77.8%
Applied egg-rr77.8%
neg-sub077.8%
distribute-neg-frac277.8%
Simplified77.8%
Final simplification88.4%
(FPCore (m v) :precision binary64 (* (- 1.0 m) (+ -1.0 (* m (/ (- 1.0 m) v)))))
double code(double m, double v) {
return (1.0 - m) * (-1.0 + (m * ((1.0 - m) / v)));
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = (1.0d0 - m) * ((-1.0d0) + (m * ((1.0d0 - m) / v)))
end function
public static double code(double m, double v) {
return (1.0 - m) * (-1.0 + (m * ((1.0 - m) / v)));
}
def code(m, v): return (1.0 - m) * (-1.0 + (m * ((1.0 - m) / v)))
function code(m, v) return Float64(Float64(1.0 - m) * Float64(-1.0 + Float64(m * Float64(Float64(1.0 - m) / v)))) end
function tmp = code(m, v) tmp = (1.0 - m) * (-1.0 + (m * ((1.0 - m) / v))); end
code[m_, v_] := N[(N[(1.0 - m), $MachinePrecision] * N[(-1.0 + N[(m * N[(N[(1.0 - m), $MachinePrecision] / v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(1 - m\right) \cdot \left(-1 + m \cdot \frac{1 - m}{v}\right)
\end{array}
Initial program 99.9%
*-commutative99.9%
sub-neg99.9%
associate-/l*99.8%
metadata-eval99.8%
Simplified99.8%
Final simplification99.8%
(FPCore (m v) :precision binary64 (* (- 1.0 m) (+ (/ (- 1.0 m) (/ v m)) -1.0)))
double code(double m, double v) {
return (1.0 - m) * (((1.0 - m) / (v / m)) + -1.0);
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = (1.0d0 - m) * (((1.0d0 - m) / (v / m)) + (-1.0d0))
end function
public static double code(double m, double v) {
return (1.0 - m) * (((1.0 - m) / (v / m)) + -1.0);
}
def code(m, v): return (1.0 - m) * (((1.0 - m) / (v / m)) + -1.0)
function code(m, v) return Float64(Float64(1.0 - m) * Float64(Float64(Float64(1.0 - m) / Float64(v / m)) + -1.0)) end
function tmp = code(m, v) tmp = (1.0 - m) * (((1.0 - m) / (v / m)) + -1.0); end
code[m_, v_] := N[(N[(1.0 - m), $MachinePrecision] * N[(N[(N[(1.0 - m), $MachinePrecision] / N[(v / m), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(1 - m\right) \cdot \left(\frac{1 - m}{\frac{v}{m}} + -1\right)
\end{array}
Initial program 99.9%
*-commutative99.9%
sub-neg99.9%
associate-/l*99.8%
metadata-eval99.8%
Simplified99.8%
*-commutative99.8%
div-inv99.8%
associate-*l*99.8%
associate-/r/99.8%
un-div-inv99.8%
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (m v) :precision binary64 (+ -1.0 (+ m (/ m v))))
double code(double m, double v) {
return -1.0 + (m + (m / v));
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = (-1.0d0) + (m + (m / v))
end function
public static double code(double m, double v) {
return -1.0 + (m + (m / v));
}
def code(m, v): return -1.0 + (m + (m / v))
function code(m, v) return Float64(-1.0 + Float64(m + Float64(m / v))) end
function tmp = code(m, v) tmp = -1.0 + (m + (m / v)); end
code[m_, v_] := N[(-1.0 + N[(m + N[(m / v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-1 + \left(m + \frac{m}{v}\right)
\end{array}
Initial program 99.9%
*-commutative99.9%
sub-neg99.9%
associate-/l*99.8%
metadata-eval99.8%
Simplified99.8%
Taylor expanded in m around 0 76.9%
+-commutative76.9%
distribute-lft-in76.9%
div-inv77.1%
*-rgt-identity77.1%
Applied egg-rr77.1%
Final simplification77.1%
(FPCore (m v) :precision binary64 (+ m -1.0))
double code(double m, double v) {
return m + -1.0;
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = m + (-1.0d0)
end function
public static double code(double m, double v) {
return m + -1.0;
}
def code(m, v): return m + -1.0
function code(m, v) return Float64(m + -1.0) end
function tmp = code(m, v) tmp = m + -1.0; end
code[m_, v_] := N[(m + -1.0), $MachinePrecision]
\begin{array}{l}
\\
m + -1
\end{array}
Initial program 99.9%
*-commutative99.9%
sub-neg99.9%
associate-/l*99.8%
metadata-eval99.8%
Simplified99.8%
Taylor expanded in v around inf 31.2%
neg-mul-131.2%
sub-neg31.2%
+-commutative31.2%
distribute-neg-in31.2%
remove-double-neg31.2%
metadata-eval31.2%
Simplified31.2%
Final simplification31.2%
(FPCore (m v) :precision binary64 -1.0)
double code(double m, double v) {
return -1.0;
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = -1.0d0
end function
public static double code(double m, double v) {
return -1.0;
}
def code(m, v): return -1.0
function code(m, v) return -1.0 end
function tmp = code(m, v) tmp = -1.0; end
code[m_, v_] := -1.0
\begin{array}{l}
\\
-1
\end{array}
Initial program 99.9%
*-commutative99.9%
sub-neg99.9%
associate-/l*99.8%
metadata-eval99.8%
Simplified99.8%
Taylor expanded in m around 0 28.9%
Final simplification28.9%
herbie shell --seed 2024085
(FPCore (m v)
:name "b parameter of renormalized beta distribution"
:precision binary64
:pre (and (and (< 0.0 m) (< 0.0 v)) (< v 0.25))
(* (- (/ (* m (- 1.0 m)) v) 1.0) (- 1.0 m)))