
(FPCore (m v) :precision binary64 (* (- (/ (* m (- 1.0 m)) v) 1.0) m))
double code(double m, double v) {
return (((m * (1.0 - m)) / v) - 1.0) * m;
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = (((m * (1.0d0 - m)) / v) - 1.0d0) * m
end function
public static double code(double m, double v) {
return (((m * (1.0 - m)) / v) - 1.0) * m;
}
def code(m, v): return (((m * (1.0 - m)) / v) - 1.0) * m
function code(m, v) return Float64(Float64(Float64(Float64(m * Float64(1.0 - m)) / v) - 1.0) * m) end
function tmp = code(m, v) tmp = (((m * (1.0 - m)) / v) - 1.0) * m; end
code[m_, v_] := N[(N[(N[(N[(m * N[(1.0 - m), $MachinePrecision]), $MachinePrecision] / v), $MachinePrecision] - 1.0), $MachinePrecision] * m), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{m \cdot \left(1 - m\right)}{v} - 1\right) \cdot m
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (m v) :precision binary64 (* (- (/ (* m (- 1.0 m)) v) 1.0) m))
double code(double m, double v) {
return (((m * (1.0 - m)) / v) - 1.0) * m;
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = (((m * (1.0d0 - m)) / v) - 1.0d0) * m
end function
public static double code(double m, double v) {
return (((m * (1.0 - m)) / v) - 1.0) * m;
}
def code(m, v): return (((m * (1.0 - m)) / v) - 1.0) * m
function code(m, v) return Float64(Float64(Float64(Float64(m * Float64(1.0 - m)) / v) - 1.0) * m) end
function tmp = code(m, v) tmp = (((m * (1.0 - m)) / v) - 1.0) * m; end
code[m_, v_] := N[(N[(N[(N[(m * N[(1.0 - m), $MachinePrecision]), $MachinePrecision] / v), $MachinePrecision] - 1.0), $MachinePrecision] * m), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{m \cdot \left(1 - m\right)}{v} - 1\right) \cdot m
\end{array}
(FPCore (m v) :precision binary64 (fma (* (- 1.0 m) (/ m v)) m (- m)))
double code(double m, double v) {
return fma(((1.0 - m) * (m / v)), m, -m);
}
function code(m, v) return fma(Float64(Float64(1.0 - m) * Float64(m / v)), m, Float64(-m)) end
code[m_, v_] := N[(N[(N[(1.0 - m), $MachinePrecision] * N[(m / v), $MachinePrecision]), $MachinePrecision] * m + (-m)), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(1 - m\right) \cdot \frac{m}{v}, m, -m\right)
\end{array}
Initial program 99.8%
*-commutative99.8%
sub-neg99.8%
associate-/l*99.8%
metadata-eval99.8%
Simplified99.8%
distribute-rgt-in99.8%
fma-define99.8%
associate-*r/99.8%
*-commutative99.8%
associate-*r/99.9%
neg-mul-199.9%
Applied egg-rr99.9%
(FPCore (m v) :precision binary64 (* m (fma (- 1.0 m) (/ m v) -1.0)))
double code(double m, double v) {
return m * fma((1.0 - m), (m / v), -1.0);
}
function code(m, v) return Float64(m * fma(Float64(1.0 - m), Float64(m / v), -1.0)) end
code[m_, v_] := N[(m * N[(N[(1.0 - m), $MachinePrecision] * N[(m / v), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
m \cdot \mathsf{fma}\left(1 - m, \frac{m}{v}, -1\right)
\end{array}
Initial program 99.8%
*-commutative99.8%
*-commutative99.8%
associate-/l*99.9%
fma-neg99.9%
metadata-eval99.9%
Simplified99.9%
(FPCore (m v) :precision binary64 (if (<= m 1.0) (* m (+ -1.0 (/ 1.0 (/ v m)))) (* m (- -1.0 (* m (/ m v))))))
double code(double m, double v) {
double tmp;
if (m <= 1.0) {
tmp = m * (-1.0 + (1.0 / (v / m)));
} else {
tmp = m * (-1.0 - (m * (m / v)));
}
return tmp;
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
real(8) :: tmp
if (m <= 1.0d0) then
tmp = m * ((-1.0d0) + (1.0d0 / (v / m)))
else
tmp = m * ((-1.0d0) - (m * (m / v)))
end if
code = tmp
end function
public static double code(double m, double v) {
double tmp;
if (m <= 1.0) {
tmp = m * (-1.0 + (1.0 / (v / m)));
} else {
tmp = m * (-1.0 - (m * (m / v)));
}
return tmp;
}
def code(m, v): tmp = 0 if m <= 1.0: tmp = m * (-1.0 + (1.0 / (v / m))) else: tmp = m * (-1.0 - (m * (m / v))) return tmp
function code(m, v) tmp = 0.0 if (m <= 1.0) tmp = Float64(m * Float64(-1.0 + Float64(1.0 / Float64(v / m)))); else tmp = Float64(m * Float64(-1.0 - Float64(m * Float64(m / v)))); end return tmp end
function tmp_2 = code(m, v) tmp = 0.0; if (m <= 1.0) tmp = m * (-1.0 + (1.0 / (v / m))); else tmp = m * (-1.0 - (m * (m / v))); end tmp_2 = tmp; end
code[m_, v_] := If[LessEqual[m, 1.0], N[(m * N[(-1.0 + N[(1.0 / N[(v / m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(m * N[(-1.0 - N[(m * N[(m / v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;m \leq 1:\\
\;\;\;\;m \cdot \left(-1 + \frac{1}{\frac{v}{m}}\right)\\
\mathbf{else}:\\
\;\;\;\;m \cdot \left(-1 - m \cdot \frac{m}{v}\right)\\
\end{array}
\end{array}
if m < 1Initial program 99.7%
*-commutative99.7%
sub-neg99.7%
associate-/l*99.7%
metadata-eval99.7%
Simplified99.7%
Taylor expanded in m around 0 97.8%
div-inv97.8%
clear-num97.8%
Applied egg-rr97.8%
if 1 < m Initial program 99.9%
*-commutative99.9%
sub-neg99.9%
associate-/l*99.9%
metadata-eval99.9%
Simplified99.9%
Taylor expanded in m around inf 98.3%
neg-mul-198.3%
distribute-neg-frac298.3%
Simplified98.3%
Final simplification98.0%
(FPCore (m v) :precision binary64 (if (<= m 1.0) (* m (+ -1.0 (/ 1.0 (/ v m)))) (* m (- -1.0 (/ m v)))))
double code(double m, double v) {
double tmp;
if (m <= 1.0) {
tmp = m * (-1.0 + (1.0 / (v / m)));
} else {
tmp = m * (-1.0 - (m / v));
}
return tmp;
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
real(8) :: tmp
if (m <= 1.0d0) then
tmp = m * ((-1.0d0) + (1.0d0 / (v / m)))
else
tmp = m * ((-1.0d0) - (m / v))
end if
code = tmp
end function
public static double code(double m, double v) {
double tmp;
if (m <= 1.0) {
tmp = m * (-1.0 + (1.0 / (v / m)));
} else {
tmp = m * (-1.0 - (m / v));
}
return tmp;
}
def code(m, v): tmp = 0 if m <= 1.0: tmp = m * (-1.0 + (1.0 / (v / m))) else: tmp = m * (-1.0 - (m / v)) return tmp
function code(m, v) tmp = 0.0 if (m <= 1.0) tmp = Float64(m * Float64(-1.0 + Float64(1.0 / Float64(v / m)))); else tmp = Float64(m * Float64(-1.0 - Float64(m / v))); end return tmp end
function tmp_2 = code(m, v) tmp = 0.0; if (m <= 1.0) tmp = m * (-1.0 + (1.0 / (v / m))); else tmp = m * (-1.0 - (m / v)); end tmp_2 = tmp; end
code[m_, v_] := If[LessEqual[m, 1.0], N[(m * N[(-1.0 + N[(1.0 / N[(v / m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(m * N[(-1.0 - N[(m / v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;m \leq 1:\\
\;\;\;\;m \cdot \left(-1 + \frac{1}{\frac{v}{m}}\right)\\
\mathbf{else}:\\
\;\;\;\;m \cdot \left(-1 - \frac{m}{v}\right)\\
\end{array}
\end{array}
if m < 1Initial program 99.7%
*-commutative99.7%
sub-neg99.7%
associate-/l*99.7%
metadata-eval99.7%
Simplified99.7%
Taylor expanded in m around 0 97.8%
div-inv97.8%
clear-num97.8%
Applied egg-rr97.8%
if 1 < m Initial program 99.9%
*-commutative99.9%
sub-neg99.9%
associate-/l*99.9%
metadata-eval99.9%
Simplified99.9%
Taylor expanded in m around 0 0.1%
div-inv0.1%
frac-2neg0.1%
neg-sub00.1%
div-sub0.1%
add-sqr-sqrt0.1%
sqrt-prod0.1%
sqr-neg0.1%
sqrt-unprod0.0%
add-sqr-sqrt80.0%
frac-2neg80.0%
Applied egg-rr80.0%
div080.0%
neg-sub080.0%
distribute-frac-neg280.0%
Simplified80.0%
Final simplification89.9%
(FPCore (m v) :precision binary64 (if (<= m 1.0) (* m (+ (/ m v) -1.0)) (* m (- -1.0 (/ m v)))))
double code(double m, double v) {
double tmp;
if (m <= 1.0) {
tmp = m * ((m / v) + -1.0);
} else {
tmp = m * (-1.0 - (m / v));
}
return tmp;
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
real(8) :: tmp
if (m <= 1.0d0) then
tmp = m * ((m / v) + (-1.0d0))
else
tmp = m * ((-1.0d0) - (m / v))
end if
code = tmp
end function
public static double code(double m, double v) {
double tmp;
if (m <= 1.0) {
tmp = m * ((m / v) + -1.0);
} else {
tmp = m * (-1.0 - (m / v));
}
return tmp;
}
def code(m, v): tmp = 0 if m <= 1.0: tmp = m * ((m / v) + -1.0) else: tmp = m * (-1.0 - (m / v)) return tmp
function code(m, v) tmp = 0.0 if (m <= 1.0) tmp = Float64(m * Float64(Float64(m / v) + -1.0)); else tmp = Float64(m * Float64(-1.0 - Float64(m / v))); end return tmp end
function tmp_2 = code(m, v) tmp = 0.0; if (m <= 1.0) tmp = m * ((m / v) + -1.0); else tmp = m * (-1.0 - (m / v)); end tmp_2 = tmp; end
code[m_, v_] := If[LessEqual[m, 1.0], N[(m * N[(N[(m / v), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision], N[(m * N[(-1.0 - N[(m / v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;m \leq 1:\\
\;\;\;\;m \cdot \left(\frac{m}{v} + -1\right)\\
\mathbf{else}:\\
\;\;\;\;m \cdot \left(-1 - \frac{m}{v}\right)\\
\end{array}
\end{array}
if m < 1Initial program 99.7%
Taylor expanded in m around 0 97.8%
if 1 < m Initial program 99.9%
*-commutative99.9%
sub-neg99.9%
associate-/l*99.9%
metadata-eval99.9%
Simplified99.9%
Taylor expanded in m around 0 0.1%
div-inv0.1%
frac-2neg0.1%
neg-sub00.1%
div-sub0.1%
add-sqr-sqrt0.1%
sqrt-prod0.1%
sqr-neg0.1%
sqrt-unprod0.0%
add-sqr-sqrt80.0%
frac-2neg80.0%
Applied egg-rr80.0%
div080.0%
neg-sub080.0%
distribute-frac-neg280.0%
Simplified80.0%
Final simplification89.9%
(FPCore (m v) :precision binary64 (if (<= m 1.0) (* m (+ (/ m v) -1.0)) (- m)))
double code(double m, double v) {
double tmp;
if (m <= 1.0) {
tmp = m * ((m / v) + -1.0);
} else {
tmp = -m;
}
return tmp;
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
real(8) :: tmp
if (m <= 1.0d0) then
tmp = m * ((m / v) + (-1.0d0))
else
tmp = -m
end if
code = tmp
end function
public static double code(double m, double v) {
double tmp;
if (m <= 1.0) {
tmp = m * ((m / v) + -1.0);
} else {
tmp = -m;
}
return tmp;
}
def code(m, v): tmp = 0 if m <= 1.0: tmp = m * ((m / v) + -1.0) else: tmp = -m return tmp
function code(m, v) tmp = 0.0 if (m <= 1.0) tmp = Float64(m * Float64(Float64(m / v) + -1.0)); else tmp = Float64(-m); end return tmp end
function tmp_2 = code(m, v) tmp = 0.0; if (m <= 1.0) tmp = m * ((m / v) + -1.0); else tmp = -m; end tmp_2 = tmp; end
code[m_, v_] := If[LessEqual[m, 1.0], N[(m * N[(N[(m / v), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision], (-m)]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;m \leq 1:\\
\;\;\;\;m \cdot \left(\frac{m}{v} + -1\right)\\
\mathbf{else}:\\
\;\;\;\;-m\\
\end{array}
\end{array}
if m < 1Initial program 99.7%
Taylor expanded in m around 0 97.8%
if 1 < m Initial program 99.9%
*-commutative99.9%
sub-neg99.9%
associate-/l*99.9%
metadata-eval99.9%
Simplified99.9%
Taylor expanded in m around 0 5.9%
neg-mul-15.9%
Simplified5.9%
Final simplification56.9%
(FPCore (m v) :precision binary64 (* m (+ -1.0 (/ (- 1.0 m) (/ v m)))))
double code(double m, double v) {
return m * (-1.0 + ((1.0 - m) / (v / m)));
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = m * ((-1.0d0) + ((1.0d0 - m) / (v / m)))
end function
public static double code(double m, double v) {
return m * (-1.0 + ((1.0 - m) / (v / m)));
}
def code(m, v): return m * (-1.0 + ((1.0 - m) / (v / m)))
function code(m, v) return Float64(m * Float64(-1.0 + Float64(Float64(1.0 - m) / Float64(v / m)))) end
function tmp = code(m, v) tmp = m * (-1.0 + ((1.0 - m) / (v / m))); end
code[m_, v_] := N[(m * N[(-1.0 + N[(N[(1.0 - m), $MachinePrecision] / N[(v / m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
m \cdot \left(-1 + \frac{1 - m}{\frac{v}{m}}\right)
\end{array}
Initial program 99.8%
*-commutative99.8%
sub-neg99.8%
associate-/l*99.8%
metadata-eval99.8%
Simplified99.8%
associate-*r/99.8%
*-commutative99.8%
associate-*r/99.9%
clear-num99.9%
un-div-inv99.8%
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (m v) :precision binary64 (* m (+ -1.0 (* m (/ (- 1.0 m) v)))))
double code(double m, double v) {
return m * (-1.0 + (m * ((1.0 - m) / v)));
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = m * ((-1.0d0) + (m * ((1.0d0 - m) / v)))
end function
public static double code(double m, double v) {
return m * (-1.0 + (m * ((1.0 - m) / v)));
}
def code(m, v): return m * (-1.0 + (m * ((1.0 - m) / v)))
function code(m, v) return Float64(m * Float64(-1.0 + Float64(m * Float64(Float64(1.0 - m) / v)))) end
function tmp = code(m, v) tmp = m * (-1.0 + (m * ((1.0 - m) / v))); end
code[m_, v_] := N[(m * N[(-1.0 + N[(m * N[(N[(1.0 - m), $MachinePrecision] / v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
m \cdot \left(-1 + m \cdot \frac{1 - m}{v}\right)
\end{array}
Initial program 99.8%
*-commutative99.8%
sub-neg99.8%
associate-/l*99.8%
metadata-eval99.8%
Simplified99.8%
Final simplification99.8%
(FPCore (m v) :precision binary64 (- m))
double code(double m, double v) {
return -m;
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = -m
end function
public static double code(double m, double v) {
return -m;
}
def code(m, v): return -m
function code(m, v) return Float64(-m) end
function tmp = code(m, v) tmp = -m; end
code[m_, v_] := (-m)
\begin{array}{l}
\\
-m
\end{array}
Initial program 99.8%
*-commutative99.8%
sub-neg99.8%
associate-/l*99.8%
metadata-eval99.8%
Simplified99.8%
Taylor expanded in m around 0 30.3%
neg-mul-130.3%
Simplified30.3%
(FPCore (m v) :precision binary64 m)
double code(double m, double v) {
return m;
}
real(8) function code(m, v)
real(8), intent (in) :: m
real(8), intent (in) :: v
code = m
end function
public static double code(double m, double v) {
return m;
}
def code(m, v): return m
function code(m, v) return m end
function tmp = code(m, v) tmp = m; end
code[m_, v_] := m
\begin{array}{l}
\\
m
\end{array}
Initial program 99.8%
*-commutative99.8%
sub-neg99.8%
associate-/l*99.8%
metadata-eval99.8%
Simplified99.8%
Taylor expanded in m around 0 54.3%
Taylor expanded in v around 0 37.3%
+-commutative37.3%
mul-1-neg37.3%
unpow237.3%
*-commutative37.3%
distribute-lft-neg-out37.3%
distribute-rgt-out37.3%
Simplified37.3%
Taylor expanded in m around 0 20.6%
associate-*r*20.6%
neg-mul-120.6%
Simplified20.6%
associate-/l*30.3%
*-inverses30.3%
distribute-lft-neg-out30.3%
metadata-eval30.3%
div-inv30.3%
/-rgt-identity30.3%
neg-sub030.3%
sub-neg30.3%
add-sqr-sqrt0.0%
sqrt-unprod3.3%
sqr-neg3.3%
sqrt-unprod3.3%
add-sqr-sqrt3.3%
Applied egg-rr3.3%
+-lft-identity3.3%
Simplified3.3%
herbie shell --seed 2024108
(FPCore (m v)
:name "a parameter of renormalized beta distribution"
:precision binary64
:pre (and (and (< 0.0 m) (< 0.0 v)) (< v 0.25))
(* (- (/ (* m (- 1.0 m)) v) 1.0) m))