
(FPCore (s u) :precision binary32 (* (* 3.0 s) (log (/ 1.0 (- 1.0 (/ (- u 0.25) 0.75))))))
float code(float s, float u) {
return (3.0f * s) * logf((1.0f / (1.0f - ((u - 0.25f) / 0.75f))));
}
real(4) function code(s, u)
real(4), intent (in) :: s
real(4), intent (in) :: u
code = (3.0e0 * s) * log((1.0e0 / (1.0e0 - ((u - 0.25e0) / 0.75e0))))
end function
function code(s, u) return Float32(Float32(Float32(3.0) * s) * log(Float32(Float32(1.0) / Float32(Float32(1.0) - Float32(Float32(u - Float32(0.25)) / Float32(0.75)))))) end
function tmp = code(s, u) tmp = (single(3.0) * s) * log((single(1.0) / (single(1.0) - ((u - single(0.25)) / single(0.75))))); end
\begin{array}{l}
\\
\left(3 \cdot s\right) \cdot \log \left(\frac{1}{1 - \frac{u - 0.25}{0.75}}\right)
\end{array}
Sampling outcomes in binary32 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (s u) :precision binary32 (* (* 3.0 s) (log (/ 1.0 (- 1.0 (/ (- u 0.25) 0.75))))))
float code(float s, float u) {
return (3.0f * s) * logf((1.0f / (1.0f - ((u - 0.25f) / 0.75f))));
}
real(4) function code(s, u)
real(4), intent (in) :: s
real(4), intent (in) :: u
code = (3.0e0 * s) * log((1.0e0 / (1.0e0 - ((u - 0.25e0) / 0.75e0))))
end function
function code(s, u) return Float32(Float32(Float32(3.0) * s) * log(Float32(Float32(1.0) / Float32(Float32(1.0) - Float32(Float32(u - Float32(0.25)) / Float32(0.75)))))) end
function tmp = code(s, u) tmp = (single(3.0) * s) * log((single(1.0) / (single(1.0) - ((u - single(0.25)) / single(0.75))))); end
\begin{array}{l}
\\
\left(3 \cdot s\right) \cdot \log \left(\frac{1}{1 - \frac{u - 0.25}{0.75}}\right)
\end{array}
(FPCore (s u) :precision binary32 (* (* 3.0 s) (log (/ 1.0 (- 1.0 (/ (- u 0.25) 0.75))))))
float code(float s, float u) {
return (3.0f * s) * logf((1.0f / (1.0f - ((u - 0.25f) / 0.75f))));
}
real(4) function code(s, u)
real(4), intent (in) :: s
real(4), intent (in) :: u
code = (3.0e0 * s) * log((1.0e0 / (1.0e0 - ((u - 0.25e0) / 0.75e0))))
end function
function code(s, u) return Float32(Float32(Float32(3.0) * s) * log(Float32(Float32(1.0) / Float32(Float32(1.0) - Float32(Float32(u - Float32(0.25)) / Float32(0.75)))))) end
function tmp = code(s, u) tmp = (single(3.0) * s) * log((single(1.0) / (single(1.0) - ((u - single(0.25)) / single(0.75))))); end
\begin{array}{l}
\\
\left(3 \cdot s\right) \cdot \log \left(\frac{1}{1 - \frac{u - 0.25}{0.75}}\right)
\end{array}
Initial program 95.9%
(FPCore (s u) :precision binary32 (* (* 3.0 s) (log (/ 1.0 (+ (* -1.3333333333333333 (- u 0.25)) 1.0)))))
float code(float s, float u) {
return (3.0f * s) * logf((1.0f / ((-1.3333333333333333f * (u - 0.25f)) + 1.0f)));
}
real(4) function code(s, u)
real(4), intent (in) :: s
real(4), intent (in) :: u
code = (3.0e0 * s) * log((1.0e0 / (((-1.3333333333333333e0) * (u - 0.25e0)) + 1.0e0)))
end function
function code(s, u) return Float32(Float32(Float32(3.0) * s) * log(Float32(Float32(1.0) / Float32(Float32(Float32(-1.3333333333333333) * Float32(u - Float32(0.25))) + Float32(1.0))))) end
function tmp = code(s, u) tmp = (single(3.0) * s) * log((single(1.0) / ((single(-1.3333333333333333) * (u - single(0.25))) + single(1.0)))); end
\begin{array}{l}
\\
\left(3 \cdot s\right) \cdot \log \left(\frac{1}{-1.3333333333333333 \cdot \left(u - 0.25\right) + 1}\right)
\end{array}
Initial program 95.9%
lift--.f32N/A
sub-negN/A
+-commutativeN/A
lower-+.f32N/A
lift-/.f32N/A
distribute-neg-frac2N/A
div-invN/A
*-commutativeN/A
lower-*.f32N/A
metadata-evalN/A
metadata-eval95.6
Applied rewrites95.6%
(FPCore (s u) :precision binary32 (* (* 3.0 s) (log (/ 1.0 (+ 1.3333333333333333 (* -1.3333333333333333 u))))))
float code(float s, float u) {
return (3.0f * s) * logf((1.0f / (1.3333333333333333f + (-1.3333333333333333f * u))));
}
real(4) function code(s, u)
real(4), intent (in) :: s
real(4), intent (in) :: u
code = (3.0e0 * s) * log((1.0e0 / (1.3333333333333333e0 + ((-1.3333333333333333e0) * u))))
end function
function code(s, u) return Float32(Float32(Float32(3.0) * s) * log(Float32(Float32(1.0) / Float32(Float32(1.3333333333333333) + Float32(Float32(-1.3333333333333333) * u))))) end
function tmp = code(s, u) tmp = (single(3.0) * s) * log((single(1.0) / (single(1.3333333333333333) + (single(-1.3333333333333333) * u)))); end
\begin{array}{l}
\\
\left(3 \cdot s\right) \cdot \log \left(\frac{1}{1.3333333333333333 + -1.3333333333333333 \cdot u}\right)
\end{array}
Initial program 95.9%
lift--.f32N/A
sub-negN/A
+-commutativeN/A
lower-+.f32N/A
lift-/.f32N/A
distribute-neg-frac2N/A
div-invN/A
*-commutativeN/A
lower-*.f32N/A
metadata-evalN/A
metadata-eval95.6
Applied rewrites95.6%
lift-*.f32N/A
*-commutativeN/A
metadata-evalN/A
metadata-evalN/A
distribute-rgt-neg-inN/A
div-invN/A
clear-numN/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f32N/A
lower-/.f3295.6
Applied rewrites95.6%
lift-+.f32N/A
+-commutativeN/A
lift-/.f32N/A
lift-/.f32N/A
associate-/r/N/A
metadata-evalN/A
lift--.f32N/A
sub-negN/A
distribute-rgt-inN/A
metadata-evalN/A
metadata-evalN/A
+-commutativeN/A
associate-+r+N/A
metadata-evalN/A
metadata-evalN/A
lower-+.f32N/A
metadata-evalN/A
*-commutativeN/A
lower-*.f3295.3
Applied rewrites95.3%
(FPCore (s u) :precision binary32 (* (* (* (fma 0.5 u 1.0) u) 3.0) s))
float code(float s, float u) {
return ((fmaf(0.5f, u, 1.0f) * u) * 3.0f) * s;
}
function code(s, u) return Float32(Float32(Float32(fma(Float32(0.5), u, Float32(1.0)) * u) * Float32(3.0)) * s) end
\begin{array}{l}
\\
\left(\left(\mathsf{fma}\left(0.5, u, 1\right) \cdot u\right) \cdot 3\right) \cdot s
\end{array}
Initial program 95.9%
Taylor expanded in u around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f32N/A
+-commutativeN/A
lower-fma.f32N/A
lower-log.f3211.1
Applied rewrites11.1%
Taylor expanded in u around inf
Applied rewrites26.3%
lift-*.f32N/A
*-commutativeN/A
lift-*.f32N/A
associate-*r*N/A
lower-*.f32N/A
lower-*.f3226.3
Applied rewrites26.3%
Taylor expanded in u around inf
Applied rewrites29.9%
(FPCore (s u) :precision binary32 (* (* 3.0 s) (* (fma 0.5 u 1.0) u)))
float code(float s, float u) {
return (3.0f * s) * (fmaf(0.5f, u, 1.0f) * u);
}
function code(s, u) return Float32(Float32(Float32(3.0) * s) * Float32(fma(Float32(0.5), u, Float32(1.0)) * u)) end
\begin{array}{l}
\\
\left(3 \cdot s\right) \cdot \left(\mathsf{fma}\left(0.5, u, 1\right) \cdot u\right)
\end{array}
Initial program 95.9%
Taylor expanded in u around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f32N/A
+-commutativeN/A
lower-fma.f32N/A
lower-log.f3210.9
Applied rewrites11.0%
Taylor expanded in u around inf
Applied rewrites26.3%
Taylor expanded in u around inf
Applied rewrites29.9%
(FPCore (s u) :precision binary32 (* (* (* (+ 1.5 u) u) u) s))
float code(float s, float u) {
return (((1.5f + u) * u) * u) * s;
}
real(4) function code(s, u)
real(4), intent (in) :: s
real(4), intent (in) :: u
code = (((1.5e0 + u) * u) * u) * s
end function
function code(s, u) return Float32(Float32(Float32(Float32(Float32(1.5) + u) * u) * u) * s) end
function tmp = code(s, u) tmp = (((single(1.5) + u) * u) * u) * s; end
\begin{array}{l}
\\
\left(\left(\left(1.5 + u\right) \cdot u\right) \cdot u\right) \cdot s
\end{array}
Initial program 95.9%
Taylor expanded in u around 0
distribute-rgt-inN/A
associate-+r+N/A
+-commutativeN/A
associate-*r*N/A
distribute-lft-outN/A
*-commutativeN/A
distribute-lft-outN/A
associate-*l*N/A
*-commutativeN/A
associate-*l*N/A
*-commutativeN/A
distribute-lft-outN/A
unpow2N/A
associate-*l*N/A
distribute-lft-outN/A
Applied rewrites14.5%
Applied rewrites15.7%
Taylor expanded in u around inf
Applied rewrites27.6%
herbie shell --seed 2024313
(FPCore (s u)
:name "Disney BSSRDF, sample scattering profile, upper"
:precision binary32
:pre (and (and (<= 0.0 s) (<= s 256.0)) (and (<= 0.25 u) (<= u 1.0)))
(* (* 3.0 s) (log (/ 1.0 (- 1.0 (/ (- u 0.25) 0.75))))))