
(FPCore (u v) :precision binary32 (+ 1.0 (* v (log (+ u (* (- 1.0 u) (exp (/ -2.0 v))))))))
float code(float u, float v) {
return 1.0f + (v * logf((u + ((1.0f - u) * expf((-2.0f / v))))));
}
real(4) function code(u, v)
real(4), intent (in) :: u
real(4), intent (in) :: v
code = 1.0e0 + (v * log((u + ((1.0e0 - u) * exp(((-2.0e0) / v))))))
end function
function code(u, v) return Float32(Float32(1.0) + Float32(v * log(Float32(u + Float32(Float32(Float32(1.0) - u) * exp(Float32(Float32(-2.0) / v))))))) end
function tmp = code(u, v) tmp = single(1.0) + (v * log((u + ((single(1.0) - u) * exp((single(-2.0) / v)))))); end
\begin{array}{l}
\\
1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right)
\end{array}
Sampling outcomes in binary32 precision:
Herbie found 14 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (u v) :precision binary32 (+ 1.0 (* v (log (+ u (* (- 1.0 u) (exp (/ -2.0 v))))))))
float code(float u, float v) {
return 1.0f + (v * logf((u + ((1.0f - u) * expf((-2.0f / v))))));
}
real(4) function code(u, v)
real(4), intent (in) :: u
real(4), intent (in) :: v
code = 1.0e0 + (v * log((u + ((1.0e0 - u) * exp(((-2.0e0) / v))))))
end function
function code(u, v) return Float32(Float32(1.0) + Float32(v * log(Float32(u + Float32(Float32(Float32(1.0) - u) * exp(Float32(Float32(-2.0) / v))))))) end
function tmp = code(u, v) tmp = single(1.0) + (v * log((u + ((single(1.0) - u) * exp((single(-2.0) / v)))))); end
\begin{array}{l}
\\
1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right)
\end{array}
(FPCore (u v) :precision binary32 (fma v (log (+ u (/ (- 1.0 u) (exp (/ 2.0 v))))) 1.0))
float code(float u, float v) {
return fmaf(v, logf((u + ((1.0f - u) / expf((2.0f / v))))), 1.0f);
}
function code(u, v) return fma(v, log(Float32(u + Float32(Float32(Float32(1.0) - u) / exp(Float32(Float32(2.0) / v))))), Float32(1.0)) end
\begin{array}{l}
\\
\mathsf{fma}\left(v, \log \left(u + \frac{1 - u}{e^{\frac{2}{v}}}\right), 1\right)
\end{array}
Initial program 99.6%
lift-exp.f32N/A
lift-/.f32N/A
frac-2negN/A
distribute-frac-neg2N/A
exp-negN/A
lower-/.f32N/A
lower-exp.f32N/A
lower-/.f32N/A
metadata-eval99.6
Applied rewrites99.6%
Taylor expanded in v around 0
+-commutativeN/A
lower-fma.f32N/A
Applied rewrites99.6%
Taylor expanded in v around 0
+-commutativeN/A
lower-fma.f32N/A
Applied rewrites99.6%
(FPCore (u v) :precision binary32 (+ 1.0 (* v (log (+ u (* (- 1.0 u) (pow E (/ -2.0 v))))))))
float code(float u, float v) {
return 1.0f + (v * logf((u + ((1.0f - u) * powf(((float) M_E), (-2.0f / v))))));
}
function code(u, v) return Float32(Float32(1.0) + Float32(v * log(Float32(u + Float32(Float32(Float32(1.0) - u) * (Float32(exp(1)) ^ Float32(Float32(-2.0) / v))))))) end
function tmp = code(u, v) tmp = single(1.0) + (v * log((u + ((single(1.0) - u) * (single(2.71828182845904523536) ^ (single(-2.0) / v)))))); end
\begin{array}{l}
\\
1 + v \cdot \log \left(u + \left(1 - u\right) \cdot {e}^{\left(\frac{-2}{v}\right)}\right)
\end{array}
Initial program 99.5%
lift-exp.f32N/A
*-lft-identityN/A
exp-prodN/A
lower-pow.f32N/A
exp-1-eN/A
lower-E.f3299.5
Applied rewrites99.5%
herbie shell --seed 2024230
(FPCore (u v)
:name "HairBSDF, sample_f, cosTheta"
:precision binary32
:pre (and (and (<= 1e-5 u) (<= u 1.0)) (and (<= 0.0 v) (<= v 109.746574)))
(+ 1.0 (* v (log (+ u (* (- 1.0 u) (exp (/ -2.0 v))))))))