
(FPCore (cosTheta alpha)
:precision binary32
(let* ((t_0 (- (* alpha alpha) 1.0)))
(/
t_0
(* (* PI (log (* alpha alpha))) (+ 1.0 (* (* t_0 cosTheta) cosTheta))))))
float code(float cosTheta, float alpha) {
float t_0 = (alpha * alpha) - 1.0f;
return t_0 / ((((float) M_PI) * logf((alpha * alpha))) * (1.0f + ((t_0 * cosTheta) * cosTheta)));
}
function code(cosTheta, alpha) t_0 = Float32(Float32(alpha * alpha) - Float32(1.0)) return Float32(t_0 / Float32(Float32(Float32(pi) * log(Float32(alpha * alpha))) * Float32(Float32(1.0) + Float32(Float32(t_0 * cosTheta) * cosTheta)))) end
function tmp = code(cosTheta, alpha) t_0 = (alpha * alpha) - single(1.0); tmp = t_0 / ((single(pi) * log((alpha * alpha))) * (single(1.0) + ((t_0 * cosTheta) * cosTheta))); end
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \alpha \cdot \alpha - 1\\
\frac{t\_0}{\left(\pi \cdot \log \left(\alpha \cdot \alpha\right)\right) \cdot \left(1 + \left(t\_0 \cdot cosTheta\right) \cdot cosTheta\right)}
\end{array}
\end{array}
Sampling outcomes in binary32 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (cosTheta alpha)
:precision binary32
(let* ((t_0 (- (* alpha alpha) 1.0)))
(/
t_0
(* (* PI (log (* alpha alpha))) (+ 1.0 (* (* t_0 cosTheta) cosTheta))))))
float code(float cosTheta, float alpha) {
float t_0 = (alpha * alpha) - 1.0f;
return t_0 / ((((float) M_PI) * logf((alpha * alpha))) * (1.0f + ((t_0 * cosTheta) * cosTheta)));
}
function code(cosTheta, alpha) t_0 = Float32(Float32(alpha * alpha) - Float32(1.0)) return Float32(t_0 / Float32(Float32(Float32(pi) * log(Float32(alpha * alpha))) * Float32(Float32(1.0) + Float32(Float32(t_0 * cosTheta) * cosTheta)))) end
function tmp = code(cosTheta, alpha) t_0 = (alpha * alpha) - single(1.0); tmp = t_0 / ((single(pi) * log((alpha * alpha))) * (single(1.0) + ((t_0 * cosTheta) * cosTheta))); end
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \alpha \cdot \alpha - 1\\
\frac{t\_0}{\left(\pi \cdot \log \left(\alpha \cdot \alpha\right)\right) \cdot \left(1 + \left(t\_0 \cdot cosTheta\right) \cdot cosTheta\right)}
\end{array}
\end{array}
(FPCore (cosTheta alpha) :precision binary32 (/ (+ (* alpha alpha) -1.0) (* (* PI (log (* alpha alpha))) (fma (fma alpha alpha -1.0) (* cosTheta cosTheta) 1.0))))
float code(float cosTheta, float alpha) {
return ((alpha * alpha) + -1.0f) / ((((float) M_PI) * logf((alpha * alpha))) * fmaf(fmaf(alpha, alpha, -1.0f), (cosTheta * cosTheta), 1.0f));
}
function code(cosTheta, alpha) return Float32(Float32(Float32(alpha * alpha) + Float32(-1.0)) / Float32(Float32(Float32(pi) * log(Float32(alpha * alpha))) * fma(fma(alpha, alpha, Float32(-1.0)), Float32(cosTheta * cosTheta), Float32(1.0)))) end
\begin{array}{l}
\\
\frac{\alpha \cdot \alpha + -1}{\left(\pi \cdot \log \left(\alpha \cdot \alpha\right)\right) \cdot \mathsf{fma}\left(\mathsf{fma}\left(\alpha, \alpha, -1\right), cosTheta \cdot cosTheta, 1\right)}
\end{array}
Initial program 98.6%
+-commutativeN/A
associate-*l*N/A
accelerator-lowering-fma.f32N/A
sub-negN/A
accelerator-lowering-fma.f32N/A
metadata-evalN/A
*-lowering-*.f3298.6
Applied egg-rr98.6%
Final simplification98.6%
(FPCore (cosTheta alpha) :precision binary32 (/ (fma alpha alpha -1.0) (* (* PI (log (* alpha alpha))) (fma (fma alpha alpha -1.0) (* cosTheta cosTheta) 1.0))))
float code(float cosTheta, float alpha) {
return fmaf(alpha, alpha, -1.0f) / ((((float) M_PI) * logf((alpha * alpha))) * fmaf(fmaf(alpha, alpha, -1.0f), (cosTheta * cosTheta), 1.0f));
}
function code(cosTheta, alpha) return Float32(fma(alpha, alpha, Float32(-1.0)) / Float32(Float32(Float32(pi) * log(Float32(alpha * alpha))) * fma(fma(alpha, alpha, Float32(-1.0)), Float32(cosTheta * cosTheta), Float32(1.0)))) end
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(\alpha, \alpha, -1\right)}{\left(\pi \cdot \log \left(\alpha \cdot \alpha\right)\right) \cdot \mathsf{fma}\left(\mathsf{fma}\left(\alpha, \alpha, -1\right), cosTheta \cdot cosTheta, 1\right)}
\end{array}
Initial program 98.6%
+-commutativeN/A
associate-*l*N/A
accelerator-lowering-fma.f32N/A
sub-negN/A
accelerator-lowering-fma.f32N/A
metadata-evalN/A
*-lowering-*.f3298.6
Applied egg-rr98.6%
Taylor expanded in alpha around 0
sub-negN/A
unpow2N/A
metadata-evalN/A
accelerator-lowering-fma.f3298.5
Simplified98.5%
(FPCore (cosTheta alpha) :precision binary32 (/ (+ (* alpha alpha) -1.0) (* (* PI (log (* alpha alpha))) (- 1.0 (* cosTheta cosTheta)))))
float code(float cosTheta, float alpha) {
return ((alpha * alpha) + -1.0f) / ((((float) M_PI) * logf((alpha * alpha))) * (1.0f - (cosTheta * cosTheta)));
}
function code(cosTheta, alpha) return Float32(Float32(Float32(alpha * alpha) + Float32(-1.0)) / Float32(Float32(Float32(pi) * log(Float32(alpha * alpha))) * Float32(Float32(1.0) - Float32(cosTheta * cosTheta)))) end
function tmp = code(cosTheta, alpha) tmp = ((alpha * alpha) + single(-1.0)) / ((single(pi) * log((alpha * alpha))) * (single(1.0) - (cosTheta * cosTheta))); end
\begin{array}{l}
\\
\frac{\alpha \cdot \alpha + -1}{\left(\pi \cdot \log \left(\alpha \cdot \alpha\right)\right) \cdot \left(1 - cosTheta \cdot cosTheta\right)}
\end{array}
Initial program 98.6%
Taylor expanded in alpha around 0
mul-1-negN/A
unsub-negN/A
--lowering--.f32N/A
unpow2N/A
*-lowering-*.f3297.6
Simplified97.6%
Final simplification97.6%
(FPCore (cosTheta alpha) :precision binary32 (/ (fma alpha alpha -1.0) (* (* (* PI 2.0) (fma cosTheta (- cosTheta) 1.0)) (log alpha))))
float code(float cosTheta, float alpha) {
return fmaf(alpha, alpha, -1.0f) / (((((float) M_PI) * 2.0f) * fmaf(cosTheta, -cosTheta, 1.0f)) * logf(alpha));
}
function code(cosTheta, alpha) return Float32(fma(alpha, alpha, Float32(-1.0)) / Float32(Float32(Float32(Float32(pi) * Float32(2.0)) * fma(cosTheta, Float32(-cosTheta), Float32(1.0))) * log(alpha))) end
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(\alpha, \alpha, -1\right)}{\left(\left(\pi \cdot 2\right) \cdot \mathsf{fma}\left(cosTheta, -cosTheta, 1\right)\right) \cdot \log \alpha}
\end{array}
Initial program 98.6%
+-commutativeN/A
associate-*l*N/A
accelerator-lowering-fma.f32N/A
sub-negN/A
accelerator-lowering-fma.f32N/A
metadata-evalN/A
*-lowering-*.f3298.6
Applied egg-rr98.6%
Taylor expanded in alpha around 0
associate-*r*N/A
*-commutativeN/A
associate-*r*N/A
*-lowering-*.f32N/A
*-lowering-*.f32N/A
*-lowering-*.f32N/A
PI-lowering-PI.f32N/A
+-commutativeN/A
mul-1-negN/A
unpow2N/A
distribute-rgt-neg-inN/A
accelerator-lowering-fma.f32N/A
neg-lowering-neg.f32N/A
log-lowering-log.f3297.6
Simplified97.6%
Taylor expanded in alpha around 0
sub-negN/A
unpow2N/A
metadata-evalN/A
accelerator-lowering-fma.f3297.6
Simplified97.6%
Final simplification97.6%
(FPCore (cosTheta alpha) :precision binary32 (/ (fma alpha alpha -1.0) (* (* PI (log alpha)) (fma cosTheta (* cosTheta -2.0) 2.0))))
float code(float cosTheta, float alpha) {
return fmaf(alpha, alpha, -1.0f) / ((((float) M_PI) * logf(alpha)) * fmaf(cosTheta, (cosTheta * -2.0f), 2.0f));
}
function code(cosTheta, alpha) return Float32(fma(alpha, alpha, Float32(-1.0)) / Float32(Float32(Float32(pi) * log(alpha)) * fma(cosTheta, Float32(cosTheta * Float32(-2.0)), Float32(2.0)))) end
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(\alpha, \alpha, -1\right)}{\left(\pi \cdot \log \alpha\right) \cdot \mathsf{fma}\left(cosTheta, cosTheta \cdot -2, 2\right)}
\end{array}
Initial program 98.6%
+-commutativeN/A
associate-*l*N/A
accelerator-lowering-fma.f32N/A
sub-negN/A
accelerator-lowering-fma.f32N/A
metadata-evalN/A
*-lowering-*.f3298.6
Applied egg-rr98.6%
Taylor expanded in alpha around 0
associate-*r*N/A
*-commutativeN/A
associate-*r*N/A
*-lowering-*.f32N/A
*-lowering-*.f32N/A
*-lowering-*.f32N/A
PI-lowering-PI.f32N/A
+-commutativeN/A
mul-1-negN/A
unpow2N/A
distribute-rgt-neg-inN/A
accelerator-lowering-fma.f32N/A
neg-lowering-neg.f32N/A
log-lowering-log.f3297.6
Simplified97.6%
Taylor expanded in cosTheta around 0
associate-*r*N/A
distribute-rgt-outN/A
*-lowering-*.f32N/A
*-lowering-*.f32N/A
PI-lowering-PI.f32N/A
log-lowering-log.f32N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f32N/A
*-lowering-*.f3297.5
Simplified97.5%
Taylor expanded in alpha around 0
sub-negN/A
unpow2N/A
metadata-evalN/A
accelerator-lowering-fma.f3297.5
Simplified97.5%
(FPCore (cosTheta alpha) :precision binary32 (* 0.5 (* (fma cosTheta cosTheta 1.0) (/ (fma alpha alpha -1.0) (* PI (log alpha))))))
float code(float cosTheta, float alpha) {
return 0.5f * (fmaf(cosTheta, cosTheta, 1.0f) * (fmaf(alpha, alpha, -1.0f) / (((float) M_PI) * logf(alpha))));
}
function code(cosTheta, alpha) return Float32(Float32(0.5) * Float32(fma(cosTheta, cosTheta, Float32(1.0)) * Float32(fma(alpha, alpha, Float32(-1.0)) / Float32(Float32(pi) * log(alpha))))) end
\begin{array}{l}
\\
0.5 \cdot \left(\mathsf{fma}\left(cosTheta, cosTheta, 1\right) \cdot \frac{\mathsf{fma}\left(\alpha, \alpha, -1\right)}{\pi \cdot \log \alpha}\right)
\end{array}
Initial program 98.6%
+-commutativeN/A
associate-*l*N/A
accelerator-lowering-fma.f32N/A
sub-negN/A
accelerator-lowering-fma.f32N/A
metadata-evalN/A
*-lowering-*.f3298.6
Applied egg-rr98.6%
Taylor expanded in alpha around 0
associate-*r*N/A
*-commutativeN/A
associate-*r*N/A
*-lowering-*.f32N/A
*-lowering-*.f32N/A
*-lowering-*.f32N/A
PI-lowering-PI.f32N/A
+-commutativeN/A
mul-1-negN/A
unpow2N/A
distribute-rgt-neg-inN/A
accelerator-lowering-fma.f32N/A
neg-lowering-neg.f32N/A
log-lowering-log.f3297.6
Simplified97.6%
Taylor expanded in cosTheta around 0
distribute-lft-outN/A
*-lowering-*.f32N/A
associate-/l*N/A
distribute-lft1-inN/A
*-lowering-*.f32N/A
unpow2N/A
accelerator-lowering-fma.f32N/A
/-lowering-/.f32N/A
sub-negN/A
unpow2N/A
metadata-evalN/A
accelerator-lowering-fma.f32N/A
*-lowering-*.f32N/A
PI-lowering-PI.f32N/A
log-lowering-log.f3296.8
Simplified96.8%
(FPCore (cosTheta alpha) :precision binary32 (/ (+ (* alpha alpha) -1.0) (* PI (log (* alpha alpha)))))
float code(float cosTheta, float alpha) {
return ((alpha * alpha) + -1.0f) / (((float) M_PI) * logf((alpha * alpha)));
}
function code(cosTheta, alpha) return Float32(Float32(Float32(alpha * alpha) + Float32(-1.0)) / Float32(Float32(pi) * log(Float32(alpha * alpha)))) end
function tmp = code(cosTheta, alpha) tmp = ((alpha * alpha) + single(-1.0)) / (single(pi) * log((alpha * alpha))); end
\begin{array}{l}
\\
\frac{\alpha \cdot \alpha + -1}{\pi \cdot \log \left(\alpha \cdot \alpha\right)}
\end{array}
Initial program 98.6%
+-commutativeN/A
associate-*l*N/A
accelerator-lowering-fma.f32N/A
sub-negN/A
accelerator-lowering-fma.f32N/A
metadata-evalN/A
*-lowering-*.f3298.6
Applied egg-rr98.6%
Taylor expanded in cosTheta around 0
*-lowering-*.f32N/A
PI-lowering-PI.f32N/A
log-lowering-log.f32N/A
unpow2N/A
*-lowering-*.f3295.2
Simplified95.2%
Final simplification95.2%
(FPCore (cosTheta alpha) :precision binary32 (/ (fma alpha alpha -1.0) (* PI (log (* alpha alpha)))))
float code(float cosTheta, float alpha) {
return fmaf(alpha, alpha, -1.0f) / (((float) M_PI) * logf((alpha * alpha)));
}
function code(cosTheta, alpha) return Float32(fma(alpha, alpha, Float32(-1.0)) / Float32(Float32(pi) * log(Float32(alpha * alpha)))) end
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(\alpha, \alpha, -1\right)}{\pi \cdot \log \left(\alpha \cdot \alpha\right)}
\end{array}
Initial program 98.6%
Taylor expanded in cosTheta around 0
/-lowering-/.f32N/A
sub-negN/A
unpow2N/A
metadata-evalN/A
accelerator-lowering-fma.f32N/A
*-lowering-*.f32N/A
PI-lowering-PI.f32N/A
log-lowering-log.f32N/A
unpow2N/A
*-lowering-*.f3295.1
Simplified95.1%
(FPCore (cosTheta alpha) :precision binary32 (/ -1.0 (* PI (/ 0.0 0.0))))
float code(float cosTheta, float alpha) {
return -1.0f / (((float) M_PI) * (0.0f / 0.0f));
}
function code(cosTheta, alpha) return Float32(Float32(-1.0) / Float32(Float32(pi) * Float32(Float32(0.0) / Float32(0.0)))) end
function tmp = code(cosTheta, alpha) tmp = single(-1.0) / (single(pi) * (single(0.0) / single(0.0))); end
\begin{array}{l}
\\
\frac{-1}{\pi \cdot \frac{0}{0}}
\end{array}
Initial program 98.6%
/-lowering-/.f32N/A
sub-negN/A
accelerator-lowering-fma.f32N/A
metadata-evalN/A
*-commutativeN/A
*-lowering-*.f32N/A
+-commutativeN/A
associate-*l*N/A
accelerator-lowering-fma.f32N/A
sub-negN/A
accelerator-lowering-fma.f32N/A
metadata-evalN/A
*-lowering-*.f32N/A
*-lowering-*.f32N/A
PI-lowering-PI.f32N/A
log-prodN/A
flip-+N/A
+-inversesN/A
+-inversesN/A
/-lowering-/.f32-0.0
Applied egg-rr-0.0%
Taylor expanded in cosTheta around 0
Simplified-0.0%
Taylor expanded in alpha around 0
Simplified-0.0%
Final simplification-0.0%
herbie shell --seed 2024204
(FPCore (cosTheta alpha)
:name "GTR1 distribution"
:precision binary32
:pre (and (and (<= 0.0 cosTheta) (<= cosTheta 1.0)) (and (<= 0.0001 alpha) (<= alpha 1.0)))
(/ (- (* alpha alpha) 1.0) (* (* PI (log (* alpha alpha))) (+ 1.0 (* (* (- (* alpha alpha) 1.0) cosTheta) cosTheta)))))