
(FPCore (cosTheta alpha)
:precision binary32
(let* ((t_0 (- (* alpha alpha) 1.0)))
(/
t_0
(* (* PI (log (* alpha alpha))) (+ 1.0 (* (* t_0 cosTheta) cosTheta))))))
float code(float cosTheta, float alpha) {
float t_0 = (alpha * alpha) - 1.0f;
return t_0 / ((((float) M_PI) * logf((alpha * alpha))) * (1.0f + ((t_0 * cosTheta) * cosTheta)));
}
function code(cosTheta, alpha) t_0 = Float32(Float32(alpha * alpha) - Float32(1.0)) return Float32(t_0 / Float32(Float32(Float32(pi) * log(Float32(alpha * alpha))) * Float32(Float32(1.0) + Float32(Float32(t_0 * cosTheta) * cosTheta)))) end
function tmp = code(cosTheta, alpha) t_0 = (alpha * alpha) - single(1.0); tmp = t_0 / ((single(pi) * log((alpha * alpha))) * (single(1.0) + ((t_0 * cosTheta) * cosTheta))); end
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \alpha \cdot \alpha - 1\\
\frac{t_0}{\left(\pi \cdot \log \left(\alpha \cdot \alpha\right)\right) \cdot \left(1 + \left(t_0 \cdot cosTheta\right) \cdot cosTheta\right)}
\end{array}
\end{array}
Sampling outcomes in binary32 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (cosTheta alpha)
:precision binary32
(let* ((t_0 (- (* alpha alpha) 1.0)))
(/
t_0
(* (* PI (log (* alpha alpha))) (+ 1.0 (* (* t_0 cosTheta) cosTheta))))))
float code(float cosTheta, float alpha) {
float t_0 = (alpha * alpha) - 1.0f;
return t_0 / ((((float) M_PI) * logf((alpha * alpha))) * (1.0f + ((t_0 * cosTheta) * cosTheta)));
}
function code(cosTheta, alpha) t_0 = Float32(Float32(alpha * alpha) - Float32(1.0)) return Float32(t_0 / Float32(Float32(Float32(pi) * log(Float32(alpha * alpha))) * Float32(Float32(1.0) + Float32(Float32(t_0 * cosTheta) * cosTheta)))) end
function tmp = code(cosTheta, alpha) t_0 = (alpha * alpha) - single(1.0); tmp = t_0 / ((single(pi) * log((alpha * alpha))) * (single(1.0) + ((t_0 * cosTheta) * cosTheta))); end
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \alpha \cdot \alpha - 1\\
\frac{t_0}{\left(\pi \cdot \log \left(\alpha \cdot \alpha\right)\right) \cdot \left(1 + \left(t_0 \cdot cosTheta\right) \cdot cosTheta\right)}
\end{array}
\end{array}
(FPCore (cosTheta alpha)
:precision binary32
(let* ((t_0 (+ (* alpha alpha) -1.0)))
(/
t_0
(* (log (pow alpha (* PI 2.0))) (+ 1.0 (* cosTheta (* t_0 cosTheta)))))))
float code(float cosTheta, float alpha) {
float t_0 = (alpha * alpha) + -1.0f;
return t_0 / (logf(powf(alpha, (((float) M_PI) * 2.0f))) * (1.0f + (cosTheta * (t_0 * cosTheta))));
}
function code(cosTheta, alpha) t_0 = Float32(Float32(alpha * alpha) + Float32(-1.0)) return Float32(t_0 / Float32(log((alpha ^ Float32(Float32(pi) * Float32(2.0)))) * Float32(Float32(1.0) + Float32(cosTheta * Float32(t_0 * cosTheta))))) end
function tmp = code(cosTheta, alpha) t_0 = (alpha * alpha) + single(-1.0); tmp = t_0 / (log((alpha ^ (single(pi) * single(2.0)))) * (single(1.0) + (cosTheta * (t_0 * cosTheta)))); end
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \alpha \cdot \alpha + -1\\
\frac{t_0}{\log \left({\alpha}^{\left(\pi \cdot 2\right)}\right) \cdot \left(1 + cosTheta \cdot \left(t_0 \cdot cosTheta\right)\right)}
\end{array}
\end{array}
Initial program 98.6%
pow298.6%
log-pow98.5%
associate-*l*98.5%
add-log-exp98.5%
*-commutative98.5%
exp-to-pow98.7%
Applied egg-rr98.7%
Final simplification98.7%
(FPCore (cosTheta alpha)
:precision binary32
(let* ((t_0 (+ (* alpha alpha) -1.0)))
(/
t_0
(* (+ 1.0 (* cosTheta (* t_0 cosTheta))) (* 2.0 (* PI (log alpha)))))))
float code(float cosTheta, float alpha) {
float t_0 = (alpha * alpha) + -1.0f;
return t_0 / ((1.0f + (cosTheta * (t_0 * cosTheta))) * (2.0f * (((float) M_PI) * logf(alpha))));
}
function code(cosTheta, alpha) t_0 = Float32(Float32(alpha * alpha) + Float32(-1.0)) return Float32(t_0 / Float32(Float32(Float32(1.0) + Float32(cosTheta * Float32(t_0 * cosTheta))) * Float32(Float32(2.0) * Float32(Float32(pi) * log(alpha))))) end
function tmp = code(cosTheta, alpha) t_0 = (alpha * alpha) + single(-1.0); tmp = t_0 / ((single(1.0) + (cosTheta * (t_0 * cosTheta))) * (single(2.0) * (single(pi) * log(alpha)))); end
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \alpha \cdot \alpha + -1\\
\frac{t_0}{\left(1 + cosTheta \cdot \left(t_0 \cdot cosTheta\right)\right) \cdot \left(2 \cdot \left(\pi \cdot \log \alpha\right)\right)}
\end{array}
\end{array}
Initial program 98.6%
Taylor expanded in alpha around 0 98.5%
Final simplification98.5%
(FPCore (cosTheta alpha)
:precision binary32
(let* ((t_0 (+ (* alpha alpha) -1.0)))
(/
t_0
(* (+ 1.0 (* cosTheta (* t_0 cosTheta))) (* PI (log (* alpha alpha)))))))
float code(float cosTheta, float alpha) {
float t_0 = (alpha * alpha) + -1.0f;
return t_0 / ((1.0f + (cosTheta * (t_0 * cosTheta))) * (((float) M_PI) * logf((alpha * alpha))));
}
function code(cosTheta, alpha) t_0 = Float32(Float32(alpha * alpha) + Float32(-1.0)) return Float32(t_0 / Float32(Float32(Float32(1.0) + Float32(cosTheta * Float32(t_0 * cosTheta))) * Float32(Float32(pi) * log(Float32(alpha * alpha))))) end
function tmp = code(cosTheta, alpha) t_0 = (alpha * alpha) + single(-1.0); tmp = t_0 / ((single(1.0) + (cosTheta * (t_0 * cosTheta))) * (single(pi) * log((alpha * alpha)))); end
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \alpha \cdot \alpha + -1\\
\frac{t_0}{\left(1 + cosTheta \cdot \left(t_0 \cdot cosTheta\right)\right) \cdot \left(\pi \cdot \log \left(\alpha \cdot \alpha\right)\right)}
\end{array}
\end{array}
Initial program 98.6%
Final simplification98.6%
(FPCore (cosTheta alpha) :precision binary32 (/ (+ (* alpha alpha) -1.0) (* (* PI (log (* alpha alpha))) (- 1.0 (* cosTheta cosTheta)))))
float code(float cosTheta, float alpha) {
return ((alpha * alpha) + -1.0f) / ((((float) M_PI) * logf((alpha * alpha))) * (1.0f - (cosTheta * cosTheta)));
}
function code(cosTheta, alpha) return Float32(Float32(Float32(alpha * alpha) + Float32(-1.0)) / Float32(Float32(Float32(pi) * log(Float32(alpha * alpha))) * Float32(Float32(1.0) - Float32(cosTheta * cosTheta)))) end
function tmp = code(cosTheta, alpha) tmp = ((alpha * alpha) + single(-1.0)) / ((single(pi) * log((alpha * alpha))) * (single(1.0) - (cosTheta * cosTheta))); end
\begin{array}{l}
\\
\frac{\alpha \cdot \alpha + -1}{\left(\pi \cdot \log \left(\alpha \cdot \alpha\right)\right) \cdot \left(1 - cosTheta \cdot cosTheta\right)}
\end{array}
Initial program 98.6%
Taylor expanded in alpha around 0 97.1%
mul-1-neg97.3%
Simplified97.1%
Final simplification97.1%
(FPCore (cosTheta alpha) :precision binary32 (/ (+ (* alpha alpha) -1.0) (* PI (* 2.0 (log alpha)))))
float code(float cosTheta, float alpha) {
return ((alpha * alpha) + -1.0f) / (((float) M_PI) * (2.0f * logf(alpha)));
}
function code(cosTheta, alpha) return Float32(Float32(Float32(alpha * alpha) + Float32(-1.0)) / Float32(Float32(pi) * Float32(Float32(2.0) * log(alpha)))) end
function tmp = code(cosTheta, alpha) tmp = ((alpha * alpha) + single(-1.0)) / (single(pi) * (single(2.0) * log(alpha))); end
\begin{array}{l}
\\
\frac{\alpha \cdot \alpha + -1}{\pi \cdot \left(2 \cdot \log \alpha\right)}
\end{array}
Initial program 98.6%
pow298.6%
log-pow98.5%
associate-*l*98.5%
add-log-exp98.5%
*-commutative98.5%
exp-to-pow98.7%
Applied egg-rr98.7%
Taylor expanded in alpha around 0 97.3%
mul-1-neg97.3%
Simplified97.3%
Taylor expanded in cosTheta around 0 94.2%
associate-*r*94.2%
*-commutative94.2%
associate-*l*94.2%
Simplified94.2%
Final simplification94.2%
(FPCore (cosTheta alpha) :precision binary32 (/ -0.5 (* PI (* 3.0 (* (log alpha) 0.3333333333333333)))))
float code(float cosTheta, float alpha) {
return -0.5f / (((float) M_PI) * (3.0f * (logf(alpha) * 0.3333333333333333f)));
}
function code(cosTheta, alpha) return Float32(Float32(-0.5) / Float32(Float32(pi) * Float32(Float32(3.0) * Float32(log(alpha) * Float32(0.3333333333333333))))) end
function tmp = code(cosTheta, alpha) tmp = single(-0.5) / (single(pi) * (single(3.0) * (log(alpha) * single(0.3333333333333333)))); end
\begin{array}{l}
\\
\frac{-0.5}{\pi \cdot \left(3 \cdot \left(\log \alpha \cdot 0.3333333333333333\right)\right)}
\end{array}
Initial program 98.6%
Taylor expanded in alpha around 0 66.7%
associate-*r*66.7%
mul-1-neg66.7%
unsub-neg66.7%
Simplified66.7%
Taylor expanded in cosTheta around 0 65.0%
add-cube-cbrt65.0%
log-prod65.0%
pow265.0%
Applied egg-rr65.0%
log-pow65.0%
distribute-lft1-in65.0%
metadata-eval65.0%
Simplified65.0%
add-sqr-sqrt65.0%
log-prod65.0%
pow1/365.1%
sqrt-pow165.1%
metadata-eval65.1%
pow1/365.1%
sqrt-pow165.1%
metadata-eval65.1%
Applied egg-rr65.1%
log-pow65.1%
log-pow65.1%
distribute-rgt-out65.1%
metadata-eval65.1%
Simplified65.1%
Final simplification65.1%
(FPCore (cosTheta alpha) :precision binary32 (* (/ 0.5 PI) (/ -1.0 (log alpha))))
float code(float cosTheta, float alpha) {
return (0.5f / ((float) M_PI)) * (-1.0f / logf(alpha));
}
function code(cosTheta, alpha) return Float32(Float32(Float32(0.5) / Float32(pi)) * Float32(Float32(-1.0) / log(alpha))) end
function tmp = code(cosTheta, alpha) tmp = (single(0.5) / single(pi)) * (single(-1.0) / log(alpha)); end
\begin{array}{l}
\\
\frac{0.5}{\pi} \cdot \frac{-1}{\log \alpha}
\end{array}
Initial program 98.6%
Taylor expanded in cosTheta around 0 94.3%
associate-/r*94.1%
unpow294.1%
fma-neg94.0%
metadata-eval94.0%
*-lft-identity94.0%
log-pow94.1%
times-frac94.1%
metadata-eval94.1%
metadata-eval94.1%
fma-neg94.1%
unpow294.1%
associate-/r*94.2%
associate-*r/94.2%
unpow294.2%
fma-neg94.2%
metadata-eval94.2%
times-frac93.9%
Simplified93.9%
Taylor expanded in alpha around 0 65.1%
Final simplification65.1%
(FPCore (cosTheta alpha) :precision binary32 (/ -0.5 (* PI (log alpha))))
float code(float cosTheta, float alpha) {
return -0.5f / (((float) M_PI) * logf(alpha));
}
function code(cosTheta, alpha) return Float32(Float32(-0.5) / Float32(Float32(pi) * log(alpha))) end
function tmp = code(cosTheta, alpha) tmp = single(-0.5) / (single(pi) * log(alpha)); end
\begin{array}{l}
\\
\frac{-0.5}{\pi \cdot \log \alpha}
\end{array}
Initial program 98.6%
Taylor expanded in alpha around 0 66.7%
associate-*r*66.7%
mul-1-neg66.7%
unsub-neg66.7%
Simplified66.7%
Taylor expanded in cosTheta around 0 65.0%
Final simplification65.0%
(FPCore (cosTheta alpha) :precision binary32 (/ (/ -0.5 PI) (log alpha)))
float code(float cosTheta, float alpha) {
return (-0.5f / ((float) M_PI)) / logf(alpha);
}
function code(cosTheta, alpha) return Float32(Float32(Float32(-0.5) / Float32(pi)) / log(alpha)) end
function tmp = code(cosTheta, alpha) tmp = (single(-0.5) / single(pi)) / log(alpha); end
\begin{array}{l}
\\
\frac{\frac{-0.5}{\pi}}{\log \alpha}
\end{array}
Initial program 98.6%
Taylor expanded in cosTheta around 0 94.3%
associate-/r*94.1%
unpow294.1%
fma-neg94.0%
metadata-eval94.0%
*-lft-identity94.0%
log-pow94.1%
times-frac94.1%
metadata-eval94.1%
metadata-eval94.1%
fma-neg94.1%
unpow294.1%
associate-/r*94.2%
associate-*r/94.2%
unpow294.2%
fma-neg94.2%
metadata-eval94.2%
times-frac93.9%
Simplified93.9%
Taylor expanded in alpha around 0 65.1%
*-commutative65.1%
frac-times65.0%
metadata-eval65.0%
add-cube-cbrt65.0%
pow365.0%
exp-to-pow65.0%
add-log-exp65.0%
*-commutative65.0%
*-commutative65.0%
associate-/r*65.0%
*-commutative65.0%
add-log-exp65.0%
exp-to-pow65.0%
pow365.1%
add-cube-cbrt65.0%
Applied egg-rr65.0%
Final simplification65.0%
herbie shell --seed 2023313
(FPCore (cosTheta alpha)
:name "GTR1 distribution"
:precision binary32
:pre (and (and (<= 0.0 cosTheta) (<= cosTheta 1.0)) (and (<= 0.0001 alpha) (<= alpha 1.0)))
(/ (- (* alpha alpha) 1.0) (* (* PI (log (* alpha alpha))) (+ 1.0 (* (* (- (* alpha alpha) 1.0) cosTheta) cosTheta)))))