
(FPCore (u v) :precision binary32 (+ 1.0 (* v (log (+ u (* (- 1.0 u) (exp (/ -2.0 v))))))))
float code(float u, float v) {
return 1.0f + (v * logf((u + ((1.0f - u) * expf((-2.0f / v))))));
}
real(4) function code(u, v)
real(4), intent (in) :: u
real(4), intent (in) :: v
code = 1.0e0 + (v * log((u + ((1.0e0 - u) * exp(((-2.0e0) / v))))))
end function
function code(u, v) return Float32(Float32(1.0) + Float32(v * log(Float32(u + Float32(Float32(Float32(1.0) - u) * exp(Float32(Float32(-2.0) / v))))))) end
function tmp = code(u, v) tmp = single(1.0) + (v * log((u + ((single(1.0) - u) * exp((single(-2.0) / v)))))); end
\begin{array}{l}
\\
1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right)
\end{array}
Sampling outcomes in binary32 precision:
Herbie found 17 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (u v) :precision binary32 (+ 1.0 (* v (log (+ u (* (- 1.0 u) (exp (/ -2.0 v))))))))
float code(float u, float v) {
return 1.0f + (v * logf((u + ((1.0f - u) * expf((-2.0f / v))))));
}
real(4) function code(u, v)
real(4), intent (in) :: u
real(4), intent (in) :: v
code = 1.0e0 + (v * log((u + ((1.0e0 - u) * exp(((-2.0e0) / v))))))
end function
function code(u, v) return Float32(Float32(1.0) + Float32(v * log(Float32(u + Float32(Float32(Float32(1.0) - u) * exp(Float32(Float32(-2.0) / v))))))) end
function tmp = code(u, v) tmp = single(1.0) + (v * log((u + ((single(1.0) - u) * exp((single(-2.0) / v)))))); end
\begin{array}{l}
\\
1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right)
\end{array}
(FPCore (u v) :precision binary32 (+ 1.0 (* v (log (fma (* (exp (/ -2.0 v)) (- 1.0 (* u u))) (/ 1.0 (+ 1.0 u)) u)))))
float code(float u, float v) {
return 1.0f + (v * logf(fmaf((expf((-2.0f / v)) * (1.0f - (u * u))), (1.0f / (1.0f + u)), u)));
}
function code(u, v) return Float32(Float32(1.0) + Float32(v * log(fma(Float32(exp(Float32(Float32(-2.0) / v)) * Float32(Float32(1.0) - Float32(u * u))), Float32(Float32(1.0) / Float32(Float32(1.0) + u)), u)))) end
\begin{array}{l}
\\
1 + v \cdot \log \left(\mathsf{fma}\left(e^{\frac{-2}{v}} \cdot \left(1 - u \cdot u\right), \frac{1}{1 + u}, u\right)\right)
\end{array}
Initial program 99.4%
Applied rewrites99.5%
(FPCore (u v) :precision binary32 (fma v (log (fma (exp (/ -2.0 v)) (/ (fma u (- u) 1.0) (+ 1.0 u)) u)) 1.0))
float code(float u, float v) {
return fmaf(v, logf(fmaf(expf((-2.0f / v)), (fmaf(u, -u, 1.0f) / (1.0f + u)), u)), 1.0f);
}
function code(u, v) return fma(v, log(fma(exp(Float32(Float32(-2.0) / v)), Float32(fma(u, Float32(-u), Float32(1.0)) / Float32(Float32(1.0) + u)), u)), Float32(1.0)) end
\begin{array}{l}
\\
\mathsf{fma}\left(v, \log \left(\mathsf{fma}\left(e^{\frac{-2}{v}}, \frac{\mathsf{fma}\left(u, -u, 1\right)}{1 + u}, u\right)\right), 1\right)
\end{array}
Initial program 99.5%
Applied rewrites99.4%
Taylor expanded in v around 0
Applied rewrites86.6%
Taylor expanded in v around 0
+-commutativeN/A
lower-fma.f32N/A
Applied rewrites99.5%
herbie shell --seed 2024219
(FPCore (u v)
:name "HairBSDF, sample_f, cosTheta"
:precision binary32
:pre (and (and (<= 1e-5 u) (<= u 1.0)) (and (<= 0.0 v) (<= v 109.746574)))
(+ 1.0 (* v (log (+ u (* (- 1.0 u) (exp (/ -2.0 v))))))))