Disney BSSRDF, sample scattering profile, lower

Percentage Accurate: 61.4% → 99.4%
Time: 10.7s
Alternatives: 12
Speedup: 11.4×

Specification

?
\[\left(0 \leq s \land s \leq 256\right) \land \left(2.328306437 \cdot 10^{-10} \leq u \land u \leq 0.25\right)\]
\[\begin{array}{l} \\ s \cdot \log \left(\frac{1}{1 - 4 \cdot u}\right) \end{array} \]
(FPCore (s u) :precision binary32 (* s (log (/ 1.0 (- 1.0 (* 4.0 u))))))
float code(float s, float u) {
	return s * logf((1.0f / (1.0f - (4.0f * u))));
}
real(4) function code(s, u)
    real(4), intent (in) :: s
    real(4), intent (in) :: u
    code = s * log((1.0e0 / (1.0e0 - (4.0e0 * u))))
end function
function code(s, u)
	return Float32(s * log(Float32(Float32(1.0) / Float32(Float32(1.0) - Float32(Float32(4.0) * u)))))
end
function tmp = code(s, u)
	tmp = s * log((single(1.0) / (single(1.0) - (single(4.0) * u))));
end
\begin{array}{l}

\\
s \cdot \log \left(\frac{1}{1 - 4 \cdot u}\right)
\end{array}

Sampling outcomes in binary32 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 12 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 61.4% accurate, 1.0× speedup?

\[\begin{array}{l} \\ s \cdot \log \left(\frac{1}{1 - 4 \cdot u}\right) \end{array} \]
(FPCore (s u) :precision binary32 (* s (log (/ 1.0 (- 1.0 (* 4.0 u))))))
float code(float s, float u) {
	return s * logf((1.0f / (1.0f - (4.0f * u))));
}
real(4) function code(s, u)
    real(4), intent (in) :: s
    real(4), intent (in) :: u
    code = s * log((1.0e0 / (1.0e0 - (4.0e0 * u))))
end function
function code(s, u)
	return Float32(s * log(Float32(Float32(1.0) / Float32(Float32(1.0) - Float32(Float32(4.0) * u)))))
end
function tmp = code(s, u)
	tmp = s * log((single(1.0) / (single(1.0) - (single(4.0) * u))));
end
\begin{array}{l}

\\
s \cdot \log \left(\frac{1}{1 - 4 \cdot u}\right)
\end{array}

Alternative 1: 99.4% accurate, 1.1× speedup?

\[\begin{array}{l} \\ \mathsf{log1p}\left(u \cdot -4\right) \cdot \left(-s\right) \end{array} \]
(FPCore (s u) :precision binary32 (* (log1p (* u -4.0)) (- s)))
float code(float s, float u) {
	return log1pf((u * -4.0f)) * -s;
}
function code(s, u)
	return Float32(log1p(Float32(u * Float32(-4.0))) * Float32(-s))
end
\begin{array}{l}

\\
\mathsf{log1p}\left(u \cdot -4\right) \cdot \left(-s\right)
\end{array}
Derivation
  1. Initial program 62.1%

    \[s \cdot \log \left(\frac{1}{1 - 4 \cdot u}\right) \]
  2. Add Preprocessing
  3. Taylor expanded in s around 0

    \[\leadsto \color{blue}{s \cdot \log \left(\frac{1}{1 - 4 \cdot u}\right)} \]
  4. Step-by-step derivation
    1. *-commutativeN/A

      \[\leadsto \color{blue}{\log \left(\frac{1}{1 - 4 \cdot u}\right) \cdot s} \]
    2. log-recN/A

      \[\leadsto \color{blue}{\left(\mathsf{neg}\left(\log \left(1 - 4 \cdot u\right)\right)\right)} \cdot s \]
    3. distribute-lft-neg-outN/A

      \[\leadsto \color{blue}{\mathsf{neg}\left(\log \left(1 - 4 \cdot u\right) \cdot s\right)} \]
    4. distribute-rgt-neg-inN/A

      \[\leadsto \color{blue}{\log \left(1 - 4 \cdot u\right) \cdot \left(\mathsf{neg}\left(s\right)\right)} \]
    5. *-lowering-*.f32N/A

      \[\leadsto \color{blue}{\log \left(1 - 4 \cdot u\right) \cdot \left(\mathsf{neg}\left(s\right)\right)} \]
    6. cancel-sign-sub-invN/A

      \[\leadsto \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(4\right)\right) \cdot u\right)} \cdot \left(\mathsf{neg}\left(s\right)\right) \]
    7. metadata-evalN/A

      \[\leadsto \log \left(1 + \color{blue}{-4} \cdot u\right) \cdot \left(\mathsf{neg}\left(s\right)\right) \]
    8. accelerator-lowering-log1p.f32N/A

      \[\leadsto \color{blue}{\mathsf{log1p}\left(-4 \cdot u\right)} \cdot \left(\mathsf{neg}\left(s\right)\right) \]
    9. *-commutativeN/A

      \[\leadsto \mathsf{log1p}\left(\color{blue}{u \cdot -4}\right) \cdot \left(\mathsf{neg}\left(s\right)\right) \]
    10. *-lowering-*.f32N/A

      \[\leadsto \mathsf{log1p}\left(\color{blue}{u \cdot -4}\right) \cdot \left(\mathsf{neg}\left(s\right)\right) \]
    11. neg-lowering-neg.f3299.4

      \[\leadsto \mathsf{log1p}\left(u \cdot -4\right) \cdot \color{blue}{\left(-s\right)} \]
  5. Simplified99.4%

    \[\leadsto \color{blue}{\mathsf{log1p}\left(u \cdot -4\right) \cdot \left(-s\right)} \]
  6. Add Preprocessing

Alternative 2: 93.5% accurate, 3.7× speedup?

\[\begin{array}{l} \\ s \cdot \mathsf{fma}\left(\mathsf{fma}\left(u, \mathsf{fma}\left(u, 64, 21.333333333333332\right), 8\right), u \cdot u, u \cdot 4\right) \end{array} \]
(FPCore (s u)
 :precision binary32
 (* s (fma (fma u (fma u 64.0 21.333333333333332) 8.0) (* u u) (* u 4.0))))
float code(float s, float u) {
	return s * fmaf(fmaf(u, fmaf(u, 64.0f, 21.333333333333332f), 8.0f), (u * u), (u * 4.0f));
}
function code(s, u)
	return Float32(s * fma(fma(u, fma(u, Float32(64.0), Float32(21.333333333333332)), Float32(8.0)), Float32(u * u), Float32(u * Float32(4.0))))
end
\begin{array}{l}

\\
s \cdot \mathsf{fma}\left(\mathsf{fma}\left(u, \mathsf{fma}\left(u, 64, 21.333333333333332\right), 8\right), u \cdot u, u \cdot 4\right)
\end{array}
Derivation
  1. Initial program 62.1%

    \[s \cdot \log \left(\frac{1}{1 - 4 \cdot u}\right) \]
  2. Add Preprocessing
  3. Taylor expanded in u around 0

    \[\leadsto s \cdot \color{blue}{\left(u \cdot \left(4 + u \cdot \left(8 + u \cdot \left(\frac{64}{3} + 64 \cdot u\right)\right)\right)\right)} \]
  4. Step-by-step derivation
    1. *-lowering-*.f32N/A

      \[\leadsto s \cdot \color{blue}{\left(u \cdot \left(4 + u \cdot \left(8 + u \cdot \left(\frac{64}{3} + 64 \cdot u\right)\right)\right)\right)} \]
    2. +-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \color{blue}{\left(u \cdot \left(8 + u \cdot \left(\frac{64}{3} + 64 \cdot u\right)\right) + 4\right)}\right) \]
    3. accelerator-lowering-fma.f32N/A

      \[\leadsto s \cdot \left(u \cdot \color{blue}{\mathsf{fma}\left(u, 8 + u \cdot \left(\frac{64}{3} + 64 \cdot u\right), 4\right)}\right) \]
    4. +-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \color{blue}{u \cdot \left(\frac{64}{3} + 64 \cdot u\right) + 8}, 4\right)\right) \]
    5. accelerator-lowering-fma.f32N/A

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \color{blue}{\mathsf{fma}\left(u, \frac{64}{3} + 64 \cdot u, 8\right)}, 4\right)\right) \]
    6. +-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, \color{blue}{64 \cdot u + \frac{64}{3}}, 8\right), 4\right)\right) \]
    7. *-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, \color{blue}{u \cdot 64} + \frac{64}{3}, 8\right), 4\right)\right) \]
    8. accelerator-lowering-fma.f3292.9

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, \color{blue}{\mathsf{fma}\left(u, 64, 21.333333333333332\right)}, 8\right), 4\right)\right) \]
  5. Simplified92.9%

    \[\leadsto s \cdot \color{blue}{\left(u \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, \mathsf{fma}\left(u, 64, 21.333333333333332\right), 8\right), 4\right)\right)} \]
  6. Step-by-step derivation
    1. distribute-rgt-inN/A

      \[\leadsto s \cdot \color{blue}{\left(\left(u \cdot \left(u \cdot \left(u \cdot 64 + \frac{64}{3}\right) + 8\right)\right) \cdot u + 4 \cdot u\right)} \]
    2. *-commutativeN/A

      \[\leadsto s \cdot \left(\color{blue}{\left(\left(u \cdot \left(u \cdot 64 + \frac{64}{3}\right) + 8\right) \cdot u\right)} \cdot u + 4 \cdot u\right) \]
    3. associate-*l*N/A

      \[\leadsto s \cdot \left(\color{blue}{\left(u \cdot \left(u \cdot 64 + \frac{64}{3}\right) + 8\right) \cdot \left(u \cdot u\right)} + 4 \cdot u\right) \]
    4. *-commutativeN/A

      \[\leadsto s \cdot \left(\left(u \cdot \left(u \cdot 64 + \frac{64}{3}\right) + 8\right) \cdot \left(u \cdot u\right) + \color{blue}{u \cdot 4}\right) \]
    5. accelerator-lowering-fma.f32N/A

      \[\leadsto s \cdot \color{blue}{\mathsf{fma}\left(u \cdot \left(u \cdot 64 + \frac{64}{3}\right) + 8, u \cdot u, u \cdot 4\right)} \]
    6. accelerator-lowering-fma.f32N/A

      \[\leadsto s \cdot \mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(u, u \cdot 64 + \frac{64}{3}, 8\right)}, u \cdot u, u \cdot 4\right) \]
    7. accelerator-lowering-fma.f32N/A

      \[\leadsto s \cdot \mathsf{fma}\left(\mathsf{fma}\left(u, \color{blue}{\mathsf{fma}\left(u, 64, \frac{64}{3}\right)}, 8\right), u \cdot u, u \cdot 4\right) \]
    8. *-lowering-*.f32N/A

      \[\leadsto s \cdot \mathsf{fma}\left(\mathsf{fma}\left(u, \mathsf{fma}\left(u, 64, \frac{64}{3}\right), 8\right), \color{blue}{u \cdot u}, u \cdot 4\right) \]
    9. *-lowering-*.f3293.3

      \[\leadsto s \cdot \mathsf{fma}\left(\mathsf{fma}\left(u, \mathsf{fma}\left(u, 64, 21.333333333333332\right), 8\right), u \cdot u, \color{blue}{u \cdot 4}\right) \]
  7. Applied egg-rr93.3%

    \[\leadsto s \cdot \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(u, \mathsf{fma}\left(u, 64, 21.333333333333332\right), 8\right), u \cdot u, u \cdot 4\right)} \]
  8. Add Preprocessing

Alternative 3: 93.2% accurate, 4.3× speedup?

\[\begin{array}{l} \\ u \cdot \left(s \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, \mathsf{fma}\left(u, 64, 21.333333333333332\right), 8\right), 4\right)\right) \end{array} \]
(FPCore (s u)
 :precision binary32
 (* u (* s (fma u (fma u (fma u 64.0 21.333333333333332) 8.0) 4.0))))
float code(float s, float u) {
	return u * (s * fmaf(u, fmaf(u, fmaf(u, 64.0f, 21.333333333333332f), 8.0f), 4.0f));
}
function code(s, u)
	return Float32(u * Float32(s * fma(u, fma(u, fma(u, Float32(64.0), Float32(21.333333333333332)), Float32(8.0)), Float32(4.0))))
end
\begin{array}{l}

\\
u \cdot \left(s \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, \mathsf{fma}\left(u, 64, 21.333333333333332\right), 8\right), 4\right)\right)
\end{array}
Derivation
  1. Initial program 62.1%

    \[s \cdot \log \left(\frac{1}{1 - 4 \cdot u}\right) \]
  2. Add Preprocessing
  3. Taylor expanded in u around 0

    \[\leadsto s \cdot \color{blue}{\left(u \cdot \left(4 + u \cdot \left(8 + u \cdot \left(\frac{64}{3} + 64 \cdot u\right)\right)\right)\right)} \]
  4. Step-by-step derivation
    1. *-lowering-*.f32N/A

      \[\leadsto s \cdot \color{blue}{\left(u \cdot \left(4 + u \cdot \left(8 + u \cdot \left(\frac{64}{3} + 64 \cdot u\right)\right)\right)\right)} \]
    2. +-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \color{blue}{\left(u \cdot \left(8 + u \cdot \left(\frac{64}{3} + 64 \cdot u\right)\right) + 4\right)}\right) \]
    3. accelerator-lowering-fma.f32N/A

      \[\leadsto s \cdot \left(u \cdot \color{blue}{\mathsf{fma}\left(u, 8 + u \cdot \left(\frac{64}{3} + 64 \cdot u\right), 4\right)}\right) \]
    4. +-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \color{blue}{u \cdot \left(\frac{64}{3} + 64 \cdot u\right) + 8}, 4\right)\right) \]
    5. accelerator-lowering-fma.f32N/A

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \color{blue}{\mathsf{fma}\left(u, \frac{64}{3} + 64 \cdot u, 8\right)}, 4\right)\right) \]
    6. +-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, \color{blue}{64 \cdot u + \frac{64}{3}}, 8\right), 4\right)\right) \]
    7. *-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, \color{blue}{u \cdot 64} + \frac{64}{3}, 8\right), 4\right)\right) \]
    8. accelerator-lowering-fma.f3292.9

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, \color{blue}{\mathsf{fma}\left(u, 64, 21.333333333333332\right)}, 8\right), 4\right)\right) \]
  5. Simplified92.9%

    \[\leadsto s \cdot \color{blue}{\left(u \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, \mathsf{fma}\left(u, 64, 21.333333333333332\right), 8\right), 4\right)\right)} \]
  6. Step-by-step derivation
    1. *-commutativeN/A

      \[\leadsto s \cdot \color{blue}{\left(\left(u \cdot \left(u \cdot \left(u \cdot 64 + \frac{64}{3}\right) + 8\right) + 4\right) \cdot u\right)} \]
    2. associate-*r*N/A

      \[\leadsto \color{blue}{\left(s \cdot \left(u \cdot \left(u \cdot \left(u \cdot 64 + \frac{64}{3}\right) + 8\right) + 4\right)\right) \cdot u} \]
    3. *-lowering-*.f32N/A

      \[\leadsto \color{blue}{\left(s \cdot \left(u \cdot \left(u \cdot \left(u \cdot 64 + \frac{64}{3}\right) + 8\right) + 4\right)\right) \cdot u} \]
    4. *-lowering-*.f32N/A

      \[\leadsto \color{blue}{\left(s \cdot \left(u \cdot \left(u \cdot \left(u \cdot 64 + \frac{64}{3}\right) + 8\right) + 4\right)\right)} \cdot u \]
    5. accelerator-lowering-fma.f32N/A

      \[\leadsto \left(s \cdot \color{blue}{\mathsf{fma}\left(u, u \cdot \left(u \cdot 64 + \frac{64}{3}\right) + 8, 4\right)}\right) \cdot u \]
    6. accelerator-lowering-fma.f32N/A

      \[\leadsto \left(s \cdot \mathsf{fma}\left(u, \color{blue}{\mathsf{fma}\left(u, u \cdot 64 + \frac{64}{3}, 8\right)}, 4\right)\right) \cdot u \]
    7. accelerator-lowering-fma.f3292.9

      \[\leadsto \left(s \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, \color{blue}{\mathsf{fma}\left(u, 64, 21.333333333333332\right)}, 8\right), 4\right)\right) \cdot u \]
  7. Applied egg-rr92.9%

    \[\leadsto \color{blue}{\left(s \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, \mathsf{fma}\left(u, 64, 21.333333333333332\right), 8\right), 4\right)\right) \cdot u} \]
  8. Final simplification92.9%

    \[\leadsto u \cdot \left(s \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, \mathsf{fma}\left(u, 64, 21.333333333333332\right), 8\right), 4\right)\right) \]
  9. Add Preprocessing

Alternative 4: 93.2% accurate, 4.3× speedup?

\[\begin{array}{l} \\ s \cdot \left(u \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, \mathsf{fma}\left(u, 64, 21.333333333333332\right), 8\right), 4\right)\right) \end{array} \]
(FPCore (s u)
 :precision binary32
 (* s (* u (fma u (fma u (fma u 64.0 21.333333333333332) 8.0) 4.0))))
float code(float s, float u) {
	return s * (u * fmaf(u, fmaf(u, fmaf(u, 64.0f, 21.333333333333332f), 8.0f), 4.0f));
}
function code(s, u)
	return Float32(s * Float32(u * fma(u, fma(u, fma(u, Float32(64.0), Float32(21.333333333333332)), Float32(8.0)), Float32(4.0))))
end
\begin{array}{l}

\\
s \cdot \left(u \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, \mathsf{fma}\left(u, 64, 21.333333333333332\right), 8\right), 4\right)\right)
\end{array}
Derivation
  1. Initial program 62.1%

    \[s \cdot \log \left(\frac{1}{1 - 4 \cdot u}\right) \]
  2. Add Preprocessing
  3. Taylor expanded in u around 0

    \[\leadsto s \cdot \color{blue}{\left(u \cdot \left(4 + u \cdot \left(8 + u \cdot \left(\frac{64}{3} + 64 \cdot u\right)\right)\right)\right)} \]
  4. Step-by-step derivation
    1. *-lowering-*.f32N/A

      \[\leadsto s \cdot \color{blue}{\left(u \cdot \left(4 + u \cdot \left(8 + u \cdot \left(\frac{64}{3} + 64 \cdot u\right)\right)\right)\right)} \]
    2. +-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \color{blue}{\left(u \cdot \left(8 + u \cdot \left(\frac{64}{3} + 64 \cdot u\right)\right) + 4\right)}\right) \]
    3. accelerator-lowering-fma.f32N/A

      \[\leadsto s \cdot \left(u \cdot \color{blue}{\mathsf{fma}\left(u, 8 + u \cdot \left(\frac{64}{3} + 64 \cdot u\right), 4\right)}\right) \]
    4. +-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \color{blue}{u \cdot \left(\frac{64}{3} + 64 \cdot u\right) + 8}, 4\right)\right) \]
    5. accelerator-lowering-fma.f32N/A

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \color{blue}{\mathsf{fma}\left(u, \frac{64}{3} + 64 \cdot u, 8\right)}, 4\right)\right) \]
    6. +-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, \color{blue}{64 \cdot u + \frac{64}{3}}, 8\right), 4\right)\right) \]
    7. *-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, \color{blue}{u \cdot 64} + \frac{64}{3}, 8\right), 4\right)\right) \]
    8. accelerator-lowering-fma.f3292.9

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, \color{blue}{\mathsf{fma}\left(u, 64, 21.333333333333332\right)}, 8\right), 4\right)\right) \]
  5. Simplified92.9%

    \[\leadsto s \cdot \color{blue}{\left(u \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, \mathsf{fma}\left(u, 64, 21.333333333333332\right), 8\right), 4\right)\right)} \]
  6. Add Preprocessing

Alternative 5: 91.3% accurate, 4.5× speedup?

\[\begin{array}{l} \\ s \cdot \mathsf{fma}\left(\mathsf{fma}\left(u, 21.333333333333332, 8\right), u \cdot u, u \cdot 4\right) \end{array} \]
(FPCore (s u)
 :precision binary32
 (* s (fma (fma u 21.333333333333332 8.0) (* u u) (* u 4.0))))
float code(float s, float u) {
	return s * fmaf(fmaf(u, 21.333333333333332f, 8.0f), (u * u), (u * 4.0f));
}
function code(s, u)
	return Float32(s * fma(fma(u, Float32(21.333333333333332), Float32(8.0)), Float32(u * u), Float32(u * Float32(4.0))))
end
\begin{array}{l}

\\
s \cdot \mathsf{fma}\left(\mathsf{fma}\left(u, 21.333333333333332, 8\right), u \cdot u, u \cdot 4\right)
\end{array}
Derivation
  1. Initial program 62.1%

    \[s \cdot \log \left(\frac{1}{1 - 4 \cdot u}\right) \]
  2. Add Preprocessing
  3. Taylor expanded in u around 0

    \[\leadsto s \cdot \color{blue}{\left(u \cdot \left(4 + u \cdot \left(8 + \frac{64}{3} \cdot u\right)\right)\right)} \]
  4. Step-by-step derivation
    1. *-lowering-*.f32N/A

      \[\leadsto s \cdot \color{blue}{\left(u \cdot \left(4 + u \cdot \left(8 + \frac{64}{3} \cdot u\right)\right)\right)} \]
    2. +-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \color{blue}{\left(u \cdot \left(8 + \frac{64}{3} \cdot u\right) + 4\right)}\right) \]
    3. accelerator-lowering-fma.f32N/A

      \[\leadsto s \cdot \left(u \cdot \color{blue}{\mathsf{fma}\left(u, 8 + \frac{64}{3} \cdot u, 4\right)}\right) \]
    4. +-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \color{blue}{\frac{64}{3} \cdot u + 8}, 4\right)\right) \]
    5. *-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \color{blue}{u \cdot \frac{64}{3}} + 8, 4\right)\right) \]
    6. accelerator-lowering-fma.f3291.1

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \color{blue}{\mathsf{fma}\left(u, 21.333333333333332, 8\right)}, 4\right)\right) \]
  5. Simplified91.1%

    \[\leadsto s \cdot \color{blue}{\left(u \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, 21.333333333333332, 8\right), 4\right)\right)} \]
  6. Step-by-step derivation
    1. distribute-rgt-inN/A

      \[\leadsto s \cdot \color{blue}{\left(\left(u \cdot \left(u \cdot \frac{64}{3} + 8\right)\right) \cdot u + 4 \cdot u\right)} \]
    2. *-commutativeN/A

      \[\leadsto s \cdot \left(\color{blue}{\left(\left(u \cdot \frac{64}{3} + 8\right) \cdot u\right)} \cdot u + 4 \cdot u\right) \]
    3. associate-*l*N/A

      \[\leadsto s \cdot \left(\color{blue}{\left(u \cdot \frac{64}{3} + 8\right) \cdot \left(u \cdot u\right)} + 4 \cdot u\right) \]
    4. accelerator-lowering-fma.f32N/A

      \[\leadsto s \cdot \color{blue}{\mathsf{fma}\left(u \cdot \frac{64}{3} + 8, u \cdot u, 4 \cdot u\right)} \]
    5. accelerator-lowering-fma.f32N/A

      \[\leadsto s \cdot \mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(u, \frac{64}{3}, 8\right)}, u \cdot u, 4 \cdot u\right) \]
    6. *-lowering-*.f32N/A

      \[\leadsto s \cdot \mathsf{fma}\left(\mathsf{fma}\left(u, \frac{64}{3}, 8\right), \color{blue}{u \cdot u}, 4 \cdot u\right) \]
    7. *-commutativeN/A

      \[\leadsto s \cdot \mathsf{fma}\left(\mathsf{fma}\left(u, \frac{64}{3}, 8\right), u \cdot u, \color{blue}{u \cdot 4}\right) \]
    8. *-lowering-*.f3291.4

      \[\leadsto s \cdot \mathsf{fma}\left(\mathsf{fma}\left(u, 21.333333333333332, 8\right), u \cdot u, \color{blue}{u \cdot 4}\right) \]
  7. Applied egg-rr91.4%

    \[\leadsto s \cdot \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(u, 21.333333333333332, 8\right), u \cdot u, u \cdot 4\right)} \]
  8. Add Preprocessing

Alternative 6: 91.1% accurate, 5.4× speedup?

\[\begin{array}{l} \\ s \cdot \left(u \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, 21.333333333333332, 8\right), 4\right)\right) \end{array} \]
(FPCore (s u)
 :precision binary32
 (* s (* u (fma u (fma u 21.333333333333332 8.0) 4.0))))
float code(float s, float u) {
	return s * (u * fmaf(u, fmaf(u, 21.333333333333332f, 8.0f), 4.0f));
}
function code(s, u)
	return Float32(s * Float32(u * fma(u, fma(u, Float32(21.333333333333332), Float32(8.0)), Float32(4.0))))
end
\begin{array}{l}

\\
s \cdot \left(u \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, 21.333333333333332, 8\right), 4\right)\right)
\end{array}
Derivation
  1. Initial program 62.1%

    \[s \cdot \log \left(\frac{1}{1 - 4 \cdot u}\right) \]
  2. Add Preprocessing
  3. Taylor expanded in u around 0

    \[\leadsto s \cdot \color{blue}{\left(u \cdot \left(4 + u \cdot \left(8 + \frac{64}{3} \cdot u\right)\right)\right)} \]
  4. Step-by-step derivation
    1. *-lowering-*.f32N/A

      \[\leadsto s \cdot \color{blue}{\left(u \cdot \left(4 + u \cdot \left(8 + \frac{64}{3} \cdot u\right)\right)\right)} \]
    2. +-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \color{blue}{\left(u \cdot \left(8 + \frac{64}{3} \cdot u\right) + 4\right)}\right) \]
    3. accelerator-lowering-fma.f32N/A

      \[\leadsto s \cdot \left(u \cdot \color{blue}{\mathsf{fma}\left(u, 8 + \frac{64}{3} \cdot u, 4\right)}\right) \]
    4. +-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \color{blue}{\frac{64}{3} \cdot u + 8}, 4\right)\right) \]
    5. *-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \color{blue}{u \cdot \frac{64}{3}} + 8, 4\right)\right) \]
    6. accelerator-lowering-fma.f3291.1

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \color{blue}{\mathsf{fma}\left(u, 21.333333333333332, 8\right)}, 4\right)\right) \]
  5. Simplified91.1%

    \[\leadsto s \cdot \color{blue}{\left(u \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, 21.333333333333332, 8\right), 4\right)\right)} \]
  6. Add Preprocessing

Alternative 7: 86.9% accurate, 5.7× speedup?

\[\begin{array}{l} \\ s \cdot \mathsf{fma}\left(u \cdot 8, u, u \cdot 4\right) \end{array} \]
(FPCore (s u) :precision binary32 (* s (fma (* u 8.0) u (* u 4.0))))
float code(float s, float u) {
	return s * fmaf((u * 8.0f), u, (u * 4.0f));
}
function code(s, u)
	return Float32(s * fma(Float32(u * Float32(8.0)), u, Float32(u * Float32(4.0))))
end
\begin{array}{l}

\\
s \cdot \mathsf{fma}\left(u \cdot 8, u, u \cdot 4\right)
\end{array}
Derivation
  1. Initial program 62.1%

    \[s \cdot \log \left(\frac{1}{1 - 4 \cdot u}\right) \]
  2. Add Preprocessing
  3. Taylor expanded in u around 0

    \[\leadsto s \cdot \color{blue}{\left(u \cdot \left(4 + 8 \cdot u\right)\right)} \]
  4. Step-by-step derivation
    1. *-lowering-*.f32N/A

      \[\leadsto s \cdot \color{blue}{\left(u \cdot \left(4 + 8 \cdot u\right)\right)} \]
    2. +-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \color{blue}{\left(8 \cdot u + 4\right)}\right) \]
    3. *-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \left(\color{blue}{u \cdot 8} + 4\right)\right) \]
    4. accelerator-lowering-fma.f3287.0

      \[\leadsto s \cdot \left(u \cdot \color{blue}{\mathsf{fma}\left(u, 8, 4\right)}\right) \]
  5. Simplified87.0%

    \[\leadsto s \cdot \color{blue}{\left(u \cdot \mathsf{fma}\left(u, 8, 4\right)\right)} \]
  6. Step-by-step derivation
    1. distribute-rgt-inN/A

      \[\leadsto s \cdot \color{blue}{\left(\left(u \cdot 8\right) \cdot u + 4 \cdot u\right)} \]
    2. *-commutativeN/A

      \[\leadsto s \cdot \left(\left(u \cdot 8\right) \cdot u + \color{blue}{u \cdot 4}\right) \]
    3. accelerator-lowering-fma.f32N/A

      \[\leadsto s \cdot \color{blue}{\mathsf{fma}\left(u \cdot 8, u, u \cdot 4\right)} \]
    4. *-lowering-*.f32N/A

      \[\leadsto s \cdot \mathsf{fma}\left(\color{blue}{u \cdot 8}, u, u \cdot 4\right) \]
    5. *-lowering-*.f3287.2

      \[\leadsto s \cdot \mathsf{fma}\left(u \cdot 8, u, \color{blue}{u \cdot 4}\right) \]
  7. Applied egg-rr87.2%

    \[\leadsto s \cdot \color{blue}{\mathsf{fma}\left(u \cdot 8, u, u \cdot 4\right)} \]
  8. Add Preprocessing

Alternative 8: 86.9% accurate, 5.7× speedup?

\[\begin{array}{l} \\ s \cdot \mathsf{fma}\left(8, u \cdot u, u \cdot 4\right) \end{array} \]
(FPCore (s u) :precision binary32 (* s (fma 8.0 (* u u) (* u 4.0))))
float code(float s, float u) {
	return s * fmaf(8.0f, (u * u), (u * 4.0f));
}
function code(s, u)
	return Float32(s * fma(Float32(8.0), Float32(u * u), Float32(u * Float32(4.0))))
end
\begin{array}{l}

\\
s \cdot \mathsf{fma}\left(8, u \cdot u, u \cdot 4\right)
\end{array}
Derivation
  1. Initial program 62.1%

    \[s \cdot \log \left(\frac{1}{1 - 4 \cdot u}\right) \]
  2. Add Preprocessing
  3. Taylor expanded in u around 0

    \[\leadsto s \cdot \color{blue}{\left(u \cdot \left(4 + u \cdot \left(8 + \frac{64}{3} \cdot u\right)\right)\right)} \]
  4. Step-by-step derivation
    1. *-lowering-*.f32N/A

      \[\leadsto s \cdot \color{blue}{\left(u \cdot \left(4 + u \cdot \left(8 + \frac{64}{3} \cdot u\right)\right)\right)} \]
    2. +-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \color{blue}{\left(u \cdot \left(8 + \frac{64}{3} \cdot u\right) + 4\right)}\right) \]
    3. accelerator-lowering-fma.f32N/A

      \[\leadsto s \cdot \left(u \cdot \color{blue}{\mathsf{fma}\left(u, 8 + \frac{64}{3} \cdot u, 4\right)}\right) \]
    4. +-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \color{blue}{\frac{64}{3} \cdot u + 8}, 4\right)\right) \]
    5. *-commutativeN/A

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \color{blue}{u \cdot \frac{64}{3}} + 8, 4\right)\right) \]
    6. accelerator-lowering-fma.f3291.1

      \[\leadsto s \cdot \left(u \cdot \mathsf{fma}\left(u, \color{blue}{\mathsf{fma}\left(u, 21.333333333333332, 8\right)}, 4\right)\right) \]
  5. Simplified91.1%

    \[\leadsto s \cdot \color{blue}{\left(u \cdot \mathsf{fma}\left(u, \mathsf{fma}\left(u, 21.333333333333332, 8\right), 4\right)\right)} \]
  6. Step-by-step derivation
    1. distribute-rgt-inN/A

      \[\leadsto s \cdot \color{blue}{\left(\left(u \cdot \left(u \cdot \frac{64}{3} + 8\right)\right) \cdot u + 4 \cdot u\right)} \]
    2. *-commutativeN/A

      \[\leadsto s \cdot \left(\color{blue}{\left(\left(u \cdot \frac{64}{3} + 8\right) \cdot u\right)} \cdot u + 4 \cdot u\right) \]
    3. associate-*l*N/A

      \[\leadsto s \cdot \left(\color{blue}{\left(u \cdot \frac{64}{3} + 8\right) \cdot \left(u \cdot u\right)} + 4 \cdot u\right) \]
    4. accelerator-lowering-fma.f32N/A

      \[\leadsto s \cdot \color{blue}{\mathsf{fma}\left(u \cdot \frac{64}{3} + 8, u \cdot u, 4 \cdot u\right)} \]
    5. accelerator-lowering-fma.f32N/A

      \[\leadsto s \cdot \mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(u, \frac{64}{3}, 8\right)}, u \cdot u, 4 \cdot u\right) \]
    6. *-lowering-*.f32N/A

      \[\leadsto s \cdot \mathsf{fma}\left(\mathsf{fma}\left(u, \frac{64}{3}, 8\right), \color{blue}{u \cdot u}, 4 \cdot u\right) \]
    7. *-commutativeN/A

      \[\leadsto s \cdot \mathsf{fma}\left(\mathsf{fma}\left(u, \frac{64}{3}, 8\right), u \cdot u, \color{blue}{u \cdot 4}\right) \]
    8. *-lowering-*.f3291.4

      \[\leadsto s \cdot \mathsf{fma}\left(\mathsf{fma}\left(u, 21.333333333333332, 8\right), u \cdot u, \color{blue}{u \cdot 4}\right) \]
  7. Applied egg-rr91.4%

    \[\leadsto s \cdot \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(u, 21.333333333333332, 8\right), u \cdot u, u \cdot 4\right)} \]
  8. Taylor expanded in u around 0

    \[\leadsto s \cdot \mathsf{fma}\left(\color{blue}{8}, u \cdot u, u \cdot 4\right) \]
  9. Step-by-step derivation
    1. Simplified87.2%

      \[\leadsto s \cdot \mathsf{fma}\left(\color{blue}{8}, u \cdot u, u \cdot 4\right) \]
    2. Add Preprocessing

    Alternative 9: 86.7% accurate, 7.4× speedup?

    \[\begin{array}{l} \\ u \cdot \left(s \cdot \mathsf{fma}\left(u, 8, 4\right)\right) \end{array} \]
    (FPCore (s u) :precision binary32 (* u (* s (fma u 8.0 4.0))))
    float code(float s, float u) {
    	return u * (s * fmaf(u, 8.0f, 4.0f));
    }
    
    function code(s, u)
    	return Float32(u * Float32(s * fma(u, Float32(8.0), Float32(4.0))))
    end
    
    \begin{array}{l}
    
    \\
    u \cdot \left(s \cdot \mathsf{fma}\left(u, 8, 4\right)\right)
    \end{array}
    
    Derivation
    1. Initial program 62.1%

      \[s \cdot \log \left(\frac{1}{1 - 4 \cdot u}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in s around 0

      \[\leadsto \color{blue}{s \cdot \log \left(\frac{1}{1 - 4 \cdot u}\right)} \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \color{blue}{\log \left(\frac{1}{1 - 4 \cdot u}\right) \cdot s} \]
      2. log-recN/A

        \[\leadsto \color{blue}{\left(\mathsf{neg}\left(\log \left(1 - 4 \cdot u\right)\right)\right)} \cdot s \]
      3. distribute-lft-neg-outN/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(\log \left(1 - 4 \cdot u\right) \cdot s\right)} \]
      4. distribute-rgt-neg-inN/A

        \[\leadsto \color{blue}{\log \left(1 - 4 \cdot u\right) \cdot \left(\mathsf{neg}\left(s\right)\right)} \]
      5. *-lowering-*.f32N/A

        \[\leadsto \color{blue}{\log \left(1 - 4 \cdot u\right) \cdot \left(\mathsf{neg}\left(s\right)\right)} \]
      6. cancel-sign-sub-invN/A

        \[\leadsto \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(4\right)\right) \cdot u\right)} \cdot \left(\mathsf{neg}\left(s\right)\right) \]
      7. metadata-evalN/A

        \[\leadsto \log \left(1 + \color{blue}{-4} \cdot u\right) \cdot \left(\mathsf{neg}\left(s\right)\right) \]
      8. accelerator-lowering-log1p.f32N/A

        \[\leadsto \color{blue}{\mathsf{log1p}\left(-4 \cdot u\right)} \cdot \left(\mathsf{neg}\left(s\right)\right) \]
      9. *-commutativeN/A

        \[\leadsto \mathsf{log1p}\left(\color{blue}{u \cdot -4}\right) \cdot \left(\mathsf{neg}\left(s\right)\right) \]
      10. *-lowering-*.f32N/A

        \[\leadsto \mathsf{log1p}\left(\color{blue}{u \cdot -4}\right) \cdot \left(\mathsf{neg}\left(s\right)\right) \]
      11. neg-lowering-neg.f3299.4

        \[\leadsto \mathsf{log1p}\left(u \cdot -4\right) \cdot \color{blue}{\left(-s\right)} \]
    5. Simplified99.4%

      \[\leadsto \color{blue}{\mathsf{log1p}\left(u \cdot -4\right) \cdot \left(-s\right)} \]
    6. Taylor expanded in u around 0

      \[\leadsto \color{blue}{u \cdot \left(4 \cdot s + 8 \cdot \left(s \cdot u\right)\right)} \]
    7. Step-by-step derivation
      1. *-lowering-*.f32N/A

        \[\leadsto \color{blue}{u \cdot \left(4 \cdot s + 8 \cdot \left(s \cdot u\right)\right)} \]
      2. *-commutativeN/A

        \[\leadsto u \cdot \left(\color{blue}{s \cdot 4} + 8 \cdot \left(s \cdot u\right)\right) \]
      3. *-commutativeN/A

        \[\leadsto u \cdot \left(s \cdot 4 + \color{blue}{\left(s \cdot u\right) \cdot 8}\right) \]
      4. associate-*l*N/A

        \[\leadsto u \cdot \left(s \cdot 4 + \color{blue}{s \cdot \left(u \cdot 8\right)}\right) \]
      5. *-commutativeN/A

        \[\leadsto u \cdot \left(s \cdot 4 + s \cdot \color{blue}{\left(8 \cdot u\right)}\right) \]
      6. distribute-lft-outN/A

        \[\leadsto u \cdot \color{blue}{\left(s \cdot \left(4 + 8 \cdot u\right)\right)} \]
      7. *-lowering-*.f32N/A

        \[\leadsto u \cdot \color{blue}{\left(s \cdot \left(4 + 8 \cdot u\right)\right)} \]
      8. +-commutativeN/A

        \[\leadsto u \cdot \left(s \cdot \color{blue}{\left(8 \cdot u + 4\right)}\right) \]
      9. *-commutativeN/A

        \[\leadsto u \cdot \left(s \cdot \left(\color{blue}{u \cdot 8} + 4\right)\right) \]
      10. accelerator-lowering-fma.f3287.0

        \[\leadsto u \cdot \left(s \cdot \color{blue}{\mathsf{fma}\left(u, 8, 4\right)}\right) \]
    8. Simplified87.0%

      \[\leadsto \color{blue}{u \cdot \left(s \cdot \mathsf{fma}\left(u, 8, 4\right)\right)} \]
    9. Add Preprocessing

    Alternative 10: 86.7% accurate, 7.4× speedup?

    \[\begin{array}{l} \\ s \cdot \left(u \cdot \mathsf{fma}\left(u, 8, 4\right)\right) \end{array} \]
    (FPCore (s u) :precision binary32 (* s (* u (fma u 8.0 4.0))))
    float code(float s, float u) {
    	return s * (u * fmaf(u, 8.0f, 4.0f));
    }
    
    function code(s, u)
    	return Float32(s * Float32(u * fma(u, Float32(8.0), Float32(4.0))))
    end
    
    \begin{array}{l}
    
    \\
    s \cdot \left(u \cdot \mathsf{fma}\left(u, 8, 4\right)\right)
    \end{array}
    
    Derivation
    1. Initial program 62.1%

      \[s \cdot \log \left(\frac{1}{1 - 4 \cdot u}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in u around 0

      \[\leadsto s \cdot \color{blue}{\left(u \cdot \left(4 + 8 \cdot u\right)\right)} \]
    4. Step-by-step derivation
      1. *-lowering-*.f32N/A

        \[\leadsto s \cdot \color{blue}{\left(u \cdot \left(4 + 8 \cdot u\right)\right)} \]
      2. +-commutativeN/A

        \[\leadsto s \cdot \left(u \cdot \color{blue}{\left(8 \cdot u + 4\right)}\right) \]
      3. *-commutativeN/A

        \[\leadsto s \cdot \left(u \cdot \left(\color{blue}{u \cdot 8} + 4\right)\right) \]
      4. accelerator-lowering-fma.f3287.0

        \[\leadsto s \cdot \left(u \cdot \color{blue}{\mathsf{fma}\left(u, 8, 4\right)}\right) \]
    5. Simplified87.0%

      \[\leadsto s \cdot \color{blue}{\left(u \cdot \mathsf{fma}\left(u, 8, 4\right)\right)} \]
    6. Add Preprocessing

    Alternative 11: 73.7% accurate, 11.4× speedup?

    \[\begin{array}{l} \\ s \cdot \left(u \cdot 4\right) \end{array} \]
    (FPCore (s u) :precision binary32 (* s (* u 4.0)))
    float code(float s, float u) {
    	return s * (u * 4.0f);
    }
    
    real(4) function code(s, u)
        real(4), intent (in) :: s
        real(4), intent (in) :: u
        code = s * (u * 4.0e0)
    end function
    
    function code(s, u)
    	return Float32(s * Float32(u * Float32(4.0)))
    end
    
    function tmp = code(s, u)
    	tmp = s * (u * single(4.0));
    end
    
    \begin{array}{l}
    
    \\
    s \cdot \left(u \cdot 4\right)
    \end{array}
    
    Derivation
    1. Initial program 62.1%

      \[s \cdot \log \left(\frac{1}{1 - 4 \cdot u}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in u around 0

      \[\leadsto s \cdot \color{blue}{\left(4 \cdot u\right)} \]
    4. Step-by-step derivation
      1. *-lowering-*.f3272.7

        \[\leadsto s \cdot \color{blue}{\left(4 \cdot u\right)} \]
    5. Simplified72.7%

      \[\leadsto s \cdot \color{blue}{\left(4 \cdot u\right)} \]
    6. Final simplification72.7%

      \[\leadsto s \cdot \left(u \cdot 4\right) \]
    7. Add Preprocessing

    Alternative 12: 73.5% accurate, 11.4× speedup?

    \[\begin{array}{l} \\ 4 \cdot \left(u \cdot s\right) \end{array} \]
    (FPCore (s u) :precision binary32 (* 4.0 (* u s)))
    float code(float s, float u) {
    	return 4.0f * (u * s);
    }
    
    real(4) function code(s, u)
        real(4), intent (in) :: s
        real(4), intent (in) :: u
        code = 4.0e0 * (u * s)
    end function
    
    function code(s, u)
    	return Float32(Float32(4.0) * Float32(u * s))
    end
    
    function tmp = code(s, u)
    	tmp = single(4.0) * (u * s);
    end
    
    \begin{array}{l}
    
    \\
    4 \cdot \left(u \cdot s\right)
    \end{array}
    
    Derivation
    1. Initial program 62.1%

      \[s \cdot \log \left(\frac{1}{1 - 4 \cdot u}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in u around 0

      \[\leadsto \color{blue}{4 \cdot \left(s \cdot u\right)} \]
    4. Step-by-step derivation
      1. *-lowering-*.f32N/A

        \[\leadsto \color{blue}{4 \cdot \left(s \cdot u\right)} \]
      2. *-commutativeN/A

        \[\leadsto 4 \cdot \color{blue}{\left(u \cdot s\right)} \]
      3. *-lowering-*.f3272.5

        \[\leadsto 4 \cdot \color{blue}{\left(u \cdot s\right)} \]
    5. Simplified72.5%

      \[\leadsto \color{blue}{4 \cdot \left(u \cdot s\right)} \]
    6. Add Preprocessing

    Reproduce

    ?
    herbie shell --seed 2024204 
    (FPCore (s u)
      :name "Disney BSSRDF, sample scattering profile, lower"
      :precision binary32
      :pre (and (and (<= 0.0 s) (<= s 256.0)) (and (<= 2.328306437e-10 u) (<= u 0.25)))
      (* s (log (/ 1.0 (- 1.0 (* 4.0 u))))))