
(FPCore (m v) :precision binary64 (* (- (/ (* m (- 1.0 m)) v) 1.0) (- 1.0 m)))
double code(double m, double v) {
return (((m * (1.0 - m)) / v) - 1.0) * (1.0 - m);
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = (((m * (1.0d0 - m)) / v) - 1.0d0) * (1.0d0 - m)
end function
public static double code(double m, double v) {
return (((m * (1.0 - m)) / v) - 1.0) * (1.0 - m);
}
def code(m, v): return (((m * (1.0 - m)) / v) - 1.0) * (1.0 - m)
function code(m, v) return Float64(Float64(Float64(Float64(m * Float64(1.0 - m)) / v) - 1.0) * Float64(1.0 - m)) end
function tmp = code(m, v) tmp = (((m * (1.0 - m)) / v) - 1.0) * (1.0 - m); end
code[m_, v_] := N[(N[(N[(N[(m * N[(1.0 - m), $MachinePrecision]), $MachinePrecision] / v), $MachinePrecision] - 1.0), $MachinePrecision] * N[(1.0 - m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{m \cdot \left(1 - m\right)}{v} - 1\right) \cdot \left(1 - m\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (m v) :precision binary64 (* (- (/ (* m (- 1.0 m)) v) 1.0) (- 1.0 m)))
double code(double m, double v) {
return (((m * (1.0 - m)) / v) - 1.0) * (1.0 - m);
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = (((m * (1.0d0 - m)) / v) - 1.0d0) * (1.0d0 - m)
end function
public static double code(double m, double v) {
return (((m * (1.0 - m)) / v) - 1.0) * (1.0 - m);
}
def code(m, v): return (((m * (1.0 - m)) / v) - 1.0) * (1.0 - m)
function code(m, v) return Float64(Float64(Float64(Float64(m * Float64(1.0 - m)) / v) - 1.0) * Float64(1.0 - m)) end
function tmp = code(m, v) tmp = (((m * (1.0 - m)) / v) - 1.0) * (1.0 - m); end
code[m_, v_] := N[(N[(N[(N[(m * N[(1.0 - m), $MachinePrecision]), $MachinePrecision] / v), $MachinePrecision] - 1.0), $MachinePrecision] * N[(1.0 - m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{m \cdot \left(1 - m\right)}{v} - 1\right) \cdot \left(1 - m\right)
\end{array}
(FPCore (m v) :precision binary64 (* (- 1.0 m) (+ (/ m (/ v (- 1.0 m))) -1.0)))
double code(double m, double v) {
return (1.0 - m) * ((m / (v / (1.0 - m))) + -1.0);
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = (1.0d0 - m) * ((m / (v / (1.0d0 - m))) + (-1.0d0))
end function
public static double code(double m, double v) {
return (1.0 - m) * ((m / (v / (1.0 - m))) + -1.0);
}
def code(m, v): return (1.0 - m) * ((m / (v / (1.0 - m))) + -1.0)
function code(m, v) return Float64(Float64(1.0 - m) * Float64(Float64(m / Float64(v / Float64(1.0 - m))) + -1.0)) end
function tmp = code(m, v) tmp = (1.0 - m) * ((m / (v / (1.0 - m))) + -1.0); end
code[m_, v_] := N[(N[(1.0 - m), $MachinePrecision] * N[(N[(m / N[(v / N[(1.0 - m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(1 - m\right) \cdot \left(\frac{m}{\frac{v}{1 - m}} + -1\right)
\end{array}
Initial program 99.9%
*-commutative99.9%
sub-neg99.9%
associate-/l*100.0%
metadata-eval100.0%
Simplified100.0%
Final simplification100.0%
(FPCore (m v) :precision binary64 (if (<= m 1.0) (* (- 1.0 m) (+ -1.0 (/ m v))) (* (- 1.0 m) (- -1.0 (* m (/ m v))))))
double code(double m, double v) {
double tmp;
if (m <= 1.0) {
tmp = (1.0 - m) * (-1.0 + (m / v));
} else {
tmp = (1.0 - m) * (-1.0 - (m * (m / v)));
}
return tmp;
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
real(8) :: tmp
if (m <= 1.0d0) then
tmp = (1.0d0 - m) * ((-1.0d0) + (m / v))
else
tmp = (1.0d0 - m) * ((-1.0d0) - (m * (m / v)))
end if
code = tmp
end function
public static double code(double m, double v) {
double tmp;
if (m <= 1.0) {
tmp = (1.0 - m) * (-1.0 + (m / v));
} else {
tmp = (1.0 - m) * (-1.0 - (m * (m / v)));
}
return tmp;
}
def code(m, v): tmp = 0 if m <= 1.0: tmp = (1.0 - m) * (-1.0 + (m / v)) else: tmp = (1.0 - m) * (-1.0 - (m * (m / v))) return tmp
function code(m, v) tmp = 0.0 if (m <= 1.0) tmp = Float64(Float64(1.0 - m) * Float64(-1.0 + Float64(m / v))); else tmp = Float64(Float64(1.0 - m) * Float64(-1.0 - Float64(m * Float64(m / v)))); end return tmp end
function tmp_2 = code(m, v) tmp = 0.0; if (m <= 1.0) tmp = (1.0 - m) * (-1.0 + (m / v)); else tmp = (1.0 - m) * (-1.0 - (m * (m / v))); end tmp_2 = tmp; end
code[m_, v_] := If[LessEqual[m, 1.0], N[(N[(1.0 - m), $MachinePrecision] * N[(-1.0 + N[(m / v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(1.0 - m), $MachinePrecision] * N[(-1.0 - N[(m * N[(m / v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;m \leq 1:\\
\;\;\;\;\left(1 - m\right) \cdot \left(-1 + \frac{m}{v}\right)\\
\mathbf{else}:\\
\;\;\;\;\left(1 - m\right) \cdot \left(-1 - m \cdot \frac{m}{v}\right)\\
\end{array}
\end{array}
if m < 1Initial program 100.0%
Taylor expanded in m around 0 99.6%
if 1 < m Initial program 99.9%
*-commutative99.9%
sub-neg99.9%
associate-/l*99.9%
metadata-eval99.9%
Simplified99.9%
Taylor expanded in m around inf 99.0%
associate-*r/99.0%
neg-mul-199.0%
Simplified99.0%
frac-2neg99.0%
remove-double-neg99.0%
associate-/r/99.0%
Applied egg-rr99.0%
Final simplification99.3%
(FPCore (m v) :precision binary64 (* (- 1.0 m) (+ -1.0 (* (- 1.0 m) (/ m v)))))
double code(double m, double v) {
return (1.0 - m) * (-1.0 + ((1.0 - m) * (m / v)));
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = (1.0d0 - m) * ((-1.0d0) + ((1.0d0 - m) * (m / v)))
end function
public static double code(double m, double v) {
return (1.0 - m) * (-1.0 + ((1.0 - m) * (m / v)));
}
def code(m, v): return (1.0 - m) * (-1.0 + ((1.0 - m) * (m / v)))
function code(m, v) return Float64(Float64(1.0 - m) * Float64(-1.0 + Float64(Float64(1.0 - m) * Float64(m / v)))) end
function tmp = code(m, v) tmp = (1.0 - m) * (-1.0 + ((1.0 - m) * (m / v))); end
code[m_, v_] := N[(N[(1.0 - m), $MachinePrecision] * N[(-1.0 + N[(N[(1.0 - m), $MachinePrecision] * N[(m / v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(1 - m\right) \cdot \left(-1 + \left(1 - m\right) \cdot \frac{m}{v}\right)
\end{array}
Initial program 99.9%
*-commutative99.9%
sub-neg99.9%
associate-*l/100.0%
metadata-eval100.0%
Simplified100.0%
Final simplification100.0%
(FPCore (m v) :precision binary64 (+ -1.0 (+ m (/ m v))))
double code(double m, double v) {
return -1.0 + (m + (m / v));
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = (-1.0d0) + (m + (m / v))
end function
public static double code(double m, double v) {
return -1.0 + (m + (m / v));
}
def code(m, v): return -1.0 + (m + (m / v))
function code(m, v) return Float64(-1.0 + Float64(m + Float64(m / v))) end
function tmp = code(m, v) tmp = -1.0 + (m + (m / v)); end
code[m_, v_] := N[(-1.0 + N[(m + N[(m / v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-1 + \left(m + \frac{m}{v}\right)
\end{array}
Initial program 99.9%
*-commutative99.9%
sub-neg99.9%
associate-*l/100.0%
metadata-eval100.0%
Simplified100.0%
Taylor expanded in m around 0 79.3%
+-commutative79.3%
distribute-lft-in79.3%
div-inv79.4%
*-rgt-identity79.4%
Applied egg-rr79.4%
Final simplification79.4%
(FPCore (m v) :precision binary64 (+ -1.0 (/ m v)))
double code(double m, double v) {
return -1.0 + (m / v);
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = (-1.0d0) + (m / v)
end function
public static double code(double m, double v) {
return -1.0 + (m / v);
}
def code(m, v): return -1.0 + (m / v)
function code(m, v) return Float64(-1.0 + Float64(m / v)) end
function tmp = code(m, v) tmp = -1.0 + (m / v); end
code[m_, v_] := N[(-1.0 + N[(m / v), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-1 + \frac{m}{v}
\end{array}
Initial program 99.9%
*-commutative99.9%
sub-neg99.9%
associate-*l/100.0%
metadata-eval100.0%
Simplified100.0%
Taylor expanded in m around 0 79.3%
+-commutative79.3%
distribute-lft-in79.3%
div-inv79.4%
*-rgt-identity79.4%
Applied egg-rr79.4%
Taylor expanded in v around 0 79.4%
Final simplification79.4%
(FPCore (m v) :precision binary64 (+ m -1.0))
double code(double m, double v) {
return m + -1.0;
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = m + (-1.0d0)
end function
public static double code(double m, double v) {
return m + -1.0;
}
def code(m, v): return m + -1.0
function code(m, v) return Float64(m + -1.0) end
function tmp = code(m, v) tmp = m + -1.0; end
code[m_, v_] := N[(m + -1.0), $MachinePrecision]
\begin{array}{l}
\\
m + -1
\end{array}
Initial program 99.9%
*-commutative99.9%
sub-neg99.9%
associate-*l/100.0%
metadata-eval100.0%
Simplified100.0%
Taylor expanded in v around inf 26.9%
neg-mul-126.9%
neg-sub026.9%
associate--r-26.9%
metadata-eval26.9%
Simplified26.9%
Final simplification26.9%
(FPCore (m v) :precision binary64 -1.0)
double code(double m, double v) {
return -1.0;
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = -1.0d0
end function
public static double code(double m, double v) {
return -1.0;
}
def code(m, v): return -1.0
function code(m, v) return -1.0 end
function tmp = code(m, v) tmp = -1.0; end
code[m_, v_] := -1.0
\begin{array}{l}
\\
-1
\end{array}
Initial program 99.9%
*-commutative99.9%
sub-neg99.9%
associate-*l/100.0%
metadata-eval100.0%
Simplified100.0%
Taylor expanded in m around 0 24.4%
Final simplification24.4%
herbie shell --seed 2023334
(FPCore (m v)
:name "b parameter of renormalized beta distribution"
:precision binary64
:pre (and (and (< 0.0 m) (< 0.0 v)) (< v 0.25))
(* (- (/ (* m (- 1.0 m)) v) 1.0) (- 1.0 m)))