
(FPCore (u v) :precision binary32 (+ 1.0 (* v (log (+ u (* (- 1.0 u) (exp (/ -2.0 v))))))))
float code(float u, float v) {
return 1.0f + (v * logf((u + ((1.0f - u) * expf((-2.0f / v))))));
}
real(4) function code(u, v)
real(4), intent (in) :: u
real(4), intent (in) :: v
code = 1.0e0 + (v * log((u + ((1.0e0 - u) * exp(((-2.0e0) / v))))))
end function
function code(u, v) return Float32(Float32(1.0) + Float32(v * log(Float32(u + Float32(Float32(Float32(1.0) - u) * exp(Float32(Float32(-2.0) / v))))))) end
function tmp = code(u, v) tmp = single(1.0) + (v * log((u + ((single(1.0) - u) * exp((single(-2.0) / v)))))); end
\begin{array}{l}
\\
1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right)
\end{array}
Sampling outcomes in binary32 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (u v) :precision binary32 (+ 1.0 (* v (log (+ u (* (- 1.0 u) (exp (/ -2.0 v))))))))
float code(float u, float v) {
return 1.0f + (v * logf((u + ((1.0f - u) * expf((-2.0f / v))))));
}
real(4) function code(u, v)
real(4), intent (in) :: u
real(4), intent (in) :: v
code = 1.0e0 + (v * log((u + ((1.0e0 - u) * exp(((-2.0e0) / v))))))
end function
function code(u, v) return Float32(Float32(1.0) + Float32(v * log(Float32(u + Float32(Float32(Float32(1.0) - u) * exp(Float32(Float32(-2.0) / v))))))) end
function tmp = code(u, v) tmp = single(1.0) + (v * log((u + ((single(1.0) - u) * exp((single(-2.0) / v)))))); end
\begin{array}{l}
\\
1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right)
\end{array}
(FPCore (u v) :precision binary32 (+ (* (log (- u (* (- u 1.0) (exp (/ -2.0 v))))) v) 1.0))
float code(float u, float v) {
return (logf((u - ((u - 1.0f) * expf((-2.0f / v))))) * v) + 1.0f;
}
real(4) function code(u, v)
real(4), intent (in) :: u
real(4), intent (in) :: v
code = (log((u - ((u - 1.0e0) * exp(((-2.0e0) / v))))) * v) + 1.0e0
end function
function code(u, v) return Float32(Float32(log(Float32(u - Float32(Float32(u - Float32(1.0)) * exp(Float32(Float32(-2.0) / v))))) * v) + Float32(1.0)) end
function tmp = code(u, v) tmp = (log((u - ((u - single(1.0)) * exp((single(-2.0) / v))))) * v) + single(1.0); end
\begin{array}{l}
\\
\log \left(u - \left(u - 1\right) \cdot e^{\frac{-2}{v}}\right) \cdot v + 1
\end{array}
Initial program 99.7%
Final simplification99.7%
(FPCore (u v)
:precision binary32
(+
(*
(log
(-
u
(*
(/ -1.0 (- 1.0 (/ (- (/ (+ (/ -1.3333333333333333 v) -2.0) v) 2.0) v)))
(- 1.0 u))))
v)
1.0))
float code(float u, float v) {
return (logf((u - ((-1.0f / (1.0f - (((((-1.3333333333333333f / v) + -2.0f) / v) - 2.0f) / v))) * (1.0f - u)))) * v) + 1.0f;
}
real(4) function code(u, v)
real(4), intent (in) :: u
real(4), intent (in) :: v
code = (log((u - (((-1.0e0) / (1.0e0 - ((((((-1.3333333333333333e0) / v) + (-2.0e0)) / v) - 2.0e0) / v))) * (1.0e0 - u)))) * v) + 1.0e0
end function
function code(u, v) return Float32(Float32(log(Float32(u - Float32(Float32(Float32(-1.0) / Float32(Float32(1.0) - Float32(Float32(Float32(Float32(Float32(Float32(-1.3333333333333333) / v) + Float32(-2.0)) / v) - Float32(2.0)) / v))) * Float32(Float32(1.0) - u)))) * v) + Float32(1.0)) end
function tmp = code(u, v) tmp = (log((u - ((single(-1.0) / (single(1.0) - (((((single(-1.3333333333333333) / v) + single(-2.0)) / v) - single(2.0)) / v))) * (single(1.0) - u)))) * v) + single(1.0); end
\begin{array}{l}
\\
\log \left(u - \frac{-1}{1 - \frac{\frac{\frac{-1.3333333333333333}{v} + -2}{v} - 2}{v}} \cdot \left(1 - u\right)\right) \cdot v + 1
\end{array}
Initial program 99.7%
lift-exp.f32N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
lower-/.f32N/A
lift-/.f32N/A
distribute-neg-fracN/A
lower-exp.f32N/A
lower-/.f32N/A
metadata-eval99.6
Applied rewrites99.6%
Taylor expanded in v around -inf
mul-1-negN/A
unsub-negN/A
lower--.f32N/A
lower-/.f32N/A
Applied rewrites95.7%
Final simplification95.7%
(FPCore (u v) :precision binary32 (+ (* (log (- u (* (/ -1.0 (+ (/ 2.0 v) (+ (/ 2.0 (* v v)) 1.0))) (- 1.0 u)))) v) 1.0))
float code(float u, float v) {
return (logf((u - ((-1.0f / ((2.0f / v) + ((2.0f / (v * v)) + 1.0f))) * (1.0f - u)))) * v) + 1.0f;
}
real(4) function code(u, v)
real(4), intent (in) :: u
real(4), intent (in) :: v
code = (log((u - (((-1.0e0) / ((2.0e0 / v) + ((2.0e0 / (v * v)) + 1.0e0))) * (1.0e0 - u)))) * v) + 1.0e0
end function
function code(u, v) return Float32(Float32(log(Float32(u - Float32(Float32(Float32(-1.0) / Float32(Float32(Float32(2.0) / v) + Float32(Float32(Float32(2.0) / Float32(v * v)) + Float32(1.0)))) * Float32(Float32(1.0) - u)))) * v) + Float32(1.0)) end
function tmp = code(u, v) tmp = (log((u - ((single(-1.0) / ((single(2.0) / v) + ((single(2.0) / (v * v)) + single(1.0)))) * (single(1.0) - u)))) * v) + single(1.0); end
\begin{array}{l}
\\
\log \left(u - \frac{-1}{\frac{2}{v} + \left(\frac{2}{v \cdot v} + 1\right)} \cdot \left(1 - u\right)\right) \cdot v + 1
\end{array}
Initial program 99.7%
lift-exp.f32N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
lower-/.f32N/A
lift-/.f32N/A
distribute-neg-fracN/A
lower-exp.f32N/A
lower-/.f32N/A
metadata-eval99.6
Applied rewrites99.6%
Taylor expanded in v around inf
associate-*r/N/A
metadata-evalN/A
+-commutativeN/A
associate-+r+N/A
lower-+.f32N/A
+-commutativeN/A
lower-+.f32N/A
lower-/.f32N/A
unpow2N/A
lower-*.f32N/A
lower-/.f3294.7
Applied rewrites94.7%
Final simplification94.7%
(FPCore (u v) :precision binary32 (+ (* (log (- u (* (/ -1.0 (- 1.0 (/ -2.0 v))) (- 1.0 u)))) v) 1.0))
float code(float u, float v) {
return (logf((u - ((-1.0f / (1.0f - (-2.0f / v))) * (1.0f - u)))) * v) + 1.0f;
}
real(4) function code(u, v)
real(4), intent (in) :: u
real(4), intent (in) :: v
code = (log((u - (((-1.0e0) / (1.0e0 - ((-2.0e0) / v))) * (1.0e0 - u)))) * v) + 1.0e0
end function
function code(u, v) return Float32(Float32(log(Float32(u - Float32(Float32(Float32(-1.0) / Float32(Float32(1.0) - Float32(Float32(-2.0) / v))) * Float32(Float32(1.0) - u)))) * v) + Float32(1.0)) end
function tmp = code(u, v) tmp = (log((u - ((single(-1.0) / (single(1.0) - (single(-2.0) / v))) * (single(1.0) - u)))) * v) + single(1.0); end
\begin{array}{l}
\\
\log \left(u - \frac{-1}{1 - \frac{-2}{v}} \cdot \left(1 - u\right)\right) \cdot v + 1
\end{array}
Initial program 99.7%
lift-exp.f32N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
lower-/.f32N/A
lift-/.f32N/A
distribute-neg-fracN/A
lower-exp.f32N/A
lower-/.f32N/A
metadata-eval99.6
Applied rewrites99.6%
Taylor expanded in v around inf
cancel-sign-sub-invN/A
distribute-lft-neg-inN/A
rem-log-expN/A
associate-*r/N/A
metadata-evalN/A
rec-expN/A
lower--.f32N/A
log-recN/A
rem-log-expN/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f3292.6
Applied rewrites92.6%
Final simplification92.6%
(FPCore (u v) :precision binary32 1.0)
float code(float u, float v) {
return 1.0f;
}
real(4) function code(u, v)
real(4), intent (in) :: u
real(4), intent (in) :: v
code = 1.0e0
end function
function code(u, v) return Float32(1.0) end
function tmp = code(u, v) tmp = single(1.0); end
\begin{array}{l}
\\
1
\end{array}
Initial program 99.7%
Taylor expanded in v around 0
Applied rewrites89.1%
(FPCore (u v) :precision binary32 -1.0)
float code(float u, float v) {
return -1.0f;
}
real(4) function code(u, v)
real(4), intent (in) :: u
real(4), intent (in) :: v
code = -1.0e0
end function
function code(u, v) return Float32(-1.0) end
function tmp = code(u, v) tmp = single(-1.0); end
\begin{array}{l}
\\
-1
\end{array}
Initial program 99.7%
Taylor expanded in u around 0
Applied rewrites5.4%
herbie shell --seed 2024312
(FPCore (u v)
:name "HairBSDF, sample_f, cosTheta"
:precision binary32
:pre (and (and (<= 1e-5 u) (<= u 1.0)) (and (<= 0.0 v) (<= v 109.746574)))
(+ 1.0 (* v (log (+ u (* (- 1.0 u) (exp (/ -2.0 v))))))))