
(FPCore (cosTheta alpha)
:precision binary32
(let* ((t_0 (- (* alpha alpha) 1.0)))
(/
t_0
(* (* PI (log (* alpha alpha))) (+ 1.0 (* (* t_0 cosTheta) cosTheta))))))
float code(float cosTheta, float alpha) {
float t_0 = (alpha * alpha) - 1.0f;
return t_0 / ((((float) M_PI) * logf((alpha * alpha))) * (1.0f + ((t_0 * cosTheta) * cosTheta)));
}
function code(cosTheta, alpha) t_0 = Float32(Float32(alpha * alpha) - Float32(1.0)) return Float32(t_0 / Float32(Float32(Float32(pi) * log(Float32(alpha * alpha))) * Float32(Float32(1.0) + Float32(Float32(t_0 * cosTheta) * cosTheta)))) end
function tmp = code(cosTheta, alpha) t_0 = (alpha * alpha) - single(1.0); tmp = t_0 / ((single(pi) * log((alpha * alpha))) * (single(1.0) + ((t_0 * cosTheta) * cosTheta))); end
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \alpha \cdot \alpha - 1\\
\frac{t_0}{\left(\pi \cdot \log \left(\alpha \cdot \alpha\right)\right) \cdot \left(1 + \left(t_0 \cdot cosTheta\right) \cdot cosTheta\right)}
\end{array}
\end{array}
Sampling outcomes in binary32 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (cosTheta alpha)
:precision binary32
(let* ((t_0 (- (* alpha alpha) 1.0)))
(/
t_0
(* (* PI (log (* alpha alpha))) (+ 1.0 (* (* t_0 cosTheta) cosTheta))))))
float code(float cosTheta, float alpha) {
float t_0 = (alpha * alpha) - 1.0f;
return t_0 / ((((float) M_PI) * logf((alpha * alpha))) * (1.0f + ((t_0 * cosTheta) * cosTheta)));
}
function code(cosTheta, alpha) t_0 = Float32(Float32(alpha * alpha) - Float32(1.0)) return Float32(t_0 / Float32(Float32(Float32(pi) * log(Float32(alpha * alpha))) * Float32(Float32(1.0) + Float32(Float32(t_0 * cosTheta) * cosTheta)))) end
function tmp = code(cosTheta, alpha) t_0 = (alpha * alpha) - single(1.0); tmp = t_0 / ((single(pi) * log((alpha * alpha))) * (single(1.0) + ((t_0 * cosTheta) * cosTheta))); end
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \alpha \cdot \alpha - 1\\
\frac{t_0}{\left(\pi \cdot \log \left(\alpha \cdot \alpha\right)\right) \cdot \left(1 + \left(t_0 \cdot cosTheta\right) \cdot cosTheta\right)}
\end{array}
\end{array}
(FPCore (cosTheta alpha)
:precision binary32
(let* ((t_0 (+ (* alpha alpha) -1.0)))
(/
t_0
(* (log (pow (pow alpha 2.0) PI)) (+ 1.0 (* cosTheta (* t_0 cosTheta)))))))
float code(float cosTheta, float alpha) {
float t_0 = (alpha * alpha) + -1.0f;
return t_0 / (logf(powf(powf(alpha, 2.0f), ((float) M_PI))) * (1.0f + (cosTheta * (t_0 * cosTheta))));
}
function code(cosTheta, alpha) t_0 = Float32(Float32(alpha * alpha) + Float32(-1.0)) return Float32(t_0 / Float32(log(((alpha ^ Float32(2.0)) ^ Float32(pi))) * Float32(Float32(1.0) + Float32(cosTheta * Float32(t_0 * cosTheta))))) end
function tmp = code(cosTheta, alpha) t_0 = (alpha * alpha) + single(-1.0); tmp = t_0 / (log(((alpha ^ single(2.0)) ^ single(pi))) * (single(1.0) + (cosTheta * (t_0 * cosTheta)))); end
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \alpha \cdot \alpha + -1\\
\frac{t_0}{\log \left({\left({\alpha}^{2}\right)}^{\pi}\right) \cdot \left(1 + cosTheta \cdot \left(t_0 \cdot cosTheta\right)\right)}
\end{array}
\end{array}
Initial program 98.6%
add-log-exp98.6%
*-commutative98.6%
exp-to-pow98.6%
pow298.6%
Applied egg-rr98.6%
Final simplification98.6%
(FPCore (cosTheta alpha)
:precision binary32
(let* ((t_0 (+ (* alpha alpha) -1.0)))
(/
t_0
(* (+ 1.0 (* cosTheta (* t_0 cosTheta))) (* PI (log (* alpha alpha)))))))
float code(float cosTheta, float alpha) {
float t_0 = (alpha * alpha) + -1.0f;
return t_0 / ((1.0f + (cosTheta * (t_0 * cosTheta))) * (((float) M_PI) * logf((alpha * alpha))));
}
function code(cosTheta, alpha) t_0 = Float32(Float32(alpha * alpha) + Float32(-1.0)) return Float32(t_0 / Float32(Float32(Float32(1.0) + Float32(cosTheta * Float32(t_0 * cosTheta))) * Float32(Float32(pi) * log(Float32(alpha * alpha))))) end
function tmp = code(cosTheta, alpha) t_0 = (alpha * alpha) + single(-1.0); tmp = t_0 / ((single(1.0) + (cosTheta * (t_0 * cosTheta))) * (single(pi) * log((alpha * alpha)))); end
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \alpha \cdot \alpha + -1\\
\frac{t_0}{\left(1 + cosTheta \cdot \left(t_0 \cdot cosTheta\right)\right) \cdot \left(\pi \cdot \log \left(\alpha \cdot \alpha\right)\right)}
\end{array}
\end{array}
Initial program 98.6%
Final simplification98.6%
(FPCore (cosTheta alpha) :precision binary32 (/ (+ (* alpha alpha) -1.0) (* (* PI (log (* alpha alpha))) (- 1.0 (* cosTheta cosTheta)))))
float code(float cosTheta, float alpha) {
return ((alpha * alpha) + -1.0f) / ((((float) M_PI) * logf((alpha * alpha))) * (1.0f - (cosTheta * cosTheta)));
}
function code(cosTheta, alpha) return Float32(Float32(Float32(alpha * alpha) + Float32(-1.0)) / Float32(Float32(Float32(pi) * log(Float32(alpha * alpha))) * Float32(Float32(1.0) - Float32(cosTheta * cosTheta)))) end
function tmp = code(cosTheta, alpha) tmp = ((alpha * alpha) + single(-1.0)) / ((single(pi) * log((alpha * alpha))) * (single(1.0) - (cosTheta * cosTheta))); end
\begin{array}{l}
\\
\frac{\alpha \cdot \alpha + -1}{\left(\pi \cdot \log \left(\alpha \cdot \alpha\right)\right) \cdot \left(1 - cosTheta \cdot cosTheta\right)}
\end{array}
Initial program 98.6%
Taylor expanded in alpha around 0 97.3%
Simplified97.3%
Final simplification97.3%
(FPCore (cosTheta alpha) :precision binary32 (/ -1.0 (* (* PI (log (* alpha alpha))) (- 1.0 (* cosTheta cosTheta)))))
float code(float cosTheta, float alpha) {
return -1.0f / ((((float) M_PI) * logf((alpha * alpha))) * (1.0f - (cosTheta * cosTheta)));
}
function code(cosTheta, alpha) return Float32(Float32(-1.0) / Float32(Float32(Float32(pi) * log(Float32(alpha * alpha))) * Float32(Float32(1.0) - Float32(cosTheta * cosTheta)))) end
function tmp = code(cosTheta, alpha) tmp = single(-1.0) / ((single(pi) * log((alpha * alpha))) * (single(1.0) - (cosTheta * cosTheta))); end
\begin{array}{l}
\\
\frac{-1}{\left(\pi \cdot \log \left(\alpha \cdot \alpha\right)\right) \cdot \left(1 - cosTheta \cdot cosTheta\right)}
\end{array}
Initial program 98.6%
Taylor expanded in alpha around 0 97.3%
Simplified97.3%
Taylor expanded in alpha around 0 67.8%
Final simplification67.8%
(FPCore (cosTheta alpha) :precision binary32 (/ -0.5 (* PI (log alpha))))
float code(float cosTheta, float alpha) {
return -0.5f / (((float) M_PI) * logf(alpha));
}
function code(cosTheta, alpha) return Float32(Float32(-0.5) / Float32(Float32(pi) * log(alpha))) end
function tmp = code(cosTheta, alpha) tmp = single(-0.5) / (single(pi) * log(alpha)); end
\begin{array}{l}
\\
\frac{-0.5}{\pi \cdot \log \alpha}
\end{array}
Initial program 98.6%
Taylor expanded in alpha around 0 67.8%
Simplified67.8%
Taylor expanded in cosTheta around 0 65.8%
Final simplification65.8%
(FPCore (cosTheta alpha) :precision binary32 (/ (+ (* alpha alpha) -1.0) (* (- 1.0 (* cosTheta cosTheta)) (* PI (/ 0.0 0.0)))))
float code(float cosTheta, float alpha) {
return ((alpha * alpha) + -1.0f) / ((1.0f - (cosTheta * cosTheta)) * (((float) M_PI) * (0.0f / 0.0f)));
}
function code(cosTheta, alpha) return Float32(Float32(Float32(alpha * alpha) + Float32(-1.0)) / Float32(Float32(Float32(1.0) - Float32(cosTheta * cosTheta)) * Float32(Float32(pi) * Float32(Float32(0.0) / Float32(0.0))))) end
function tmp = code(cosTheta, alpha) tmp = ((alpha * alpha) + single(-1.0)) / ((single(1.0) - (cosTheta * cosTheta)) * (single(pi) * (single(0.0) / single(0.0)))); end
\begin{array}{l}
\\
\frac{\alpha \cdot \alpha + -1}{\left(1 - cosTheta \cdot cosTheta\right) \cdot \left(\pi \cdot \frac{0}{0}\right)}
\end{array}
Initial program 98.6%
Taylor expanded in alpha around 0 97.3%
Simplified97.3%
log-prod97.1%
flip-+-0.0%
pow2-0.0%
pow2-0.0%
Applied egg-rr-0.0%
Simplified-0.0%
Final simplification-0.0%
herbie shell --seed 2024021
(FPCore (cosTheta alpha)
:name "GTR1 distribution"
:precision binary32
:pre (and (and (<= 0.0 cosTheta) (<= cosTheta 1.0)) (and (<= 0.0001 alpha) (<= alpha 1.0)))
(/ (- (* alpha alpha) 1.0) (* (* PI (log (* alpha alpha))) (+ 1.0 (* (* (- (* alpha alpha) 1.0) cosTheta) cosTheta)))))