Beckmann Distribution sample, tan2theta, alphax == alphay

Percentage Accurate: 56.3% → 97.9%
Time: 6.8s
Alternatives: 9
Speedup: 10.5×

Specification

?
\[\left(0.0001 \leq \alpha \land \alpha \leq 1\right) \land \left(2.328306437 \cdot 10^{-10} \leq u0 \land u0 \leq 1\right)\]
\[\begin{array}{l} \\ \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \log \left(1 - u0\right) \end{array} \]
(FPCore (alpha u0)
 :precision binary32
 (* (* (- alpha) alpha) (log (- 1.0 u0))))
float code(float alpha, float u0) {
	return (-alpha * alpha) * logf((1.0f - u0));
}
real(4) function code(alpha, u0)
    real(4), intent (in) :: alpha
    real(4), intent (in) :: u0
    code = (-alpha * alpha) * log((1.0e0 - u0))
end function
function code(alpha, u0)
	return Float32(Float32(Float32(-alpha) * alpha) * log(Float32(Float32(1.0) - u0)))
end
function tmp = code(alpha, u0)
	tmp = (-alpha * alpha) * log((single(1.0) - u0));
end
\begin{array}{l}

\\
\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \log \left(1 - u0\right)
\end{array}

Sampling outcomes in binary32 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 9 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 56.3% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \log \left(1 - u0\right) \end{array} \]
(FPCore (alpha u0)
 :precision binary32
 (* (* (- alpha) alpha) (log (- 1.0 u0))))
float code(float alpha, float u0) {
	return (-alpha * alpha) * logf((1.0f - u0));
}
real(4) function code(alpha, u0)
    real(4), intent (in) :: alpha
    real(4), intent (in) :: u0
    code = (-alpha * alpha) * log((1.0e0 - u0))
end function
function code(alpha, u0)
	return Float32(Float32(Float32(-alpha) * alpha) * log(Float32(Float32(1.0) - u0)))
end
function tmp = code(alpha, u0)
	tmp = (-alpha * alpha) * log((single(1.0) - u0));
end
\begin{array}{l}

\\
\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \log \left(1 - u0\right)
\end{array}

Alternative 1: 97.9% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;1 - u0 \leq 0.9880499839782715:\\ \;\;\;\;\frac{{\alpha}^{4}}{\frac{-1}{\frac{\alpha \cdot \alpha}{{\alpha}^{4}}}} \cdot \log \left(1 - u0\right)\\ \mathbf{else}:\\ \;\;\;\;\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(-0.3333333333333333 \cdot u0 + -0.5\right) \cdot u0 - 1\right) \cdot u0\right)\\ \end{array} \end{array} \]
(FPCore (alpha u0)
 :precision binary32
 (if (<= (- 1.0 u0) 0.9880499839782715)
   (*
    (/ (pow alpha 4.0) (/ -1.0 (/ (* alpha alpha) (pow alpha 4.0))))
    (log (- 1.0 u0)))
   (*
    (* (- alpha) alpha)
    (* (- (* (+ (* -0.3333333333333333 u0) -0.5) u0) 1.0) u0))))
float code(float alpha, float u0) {
	float tmp;
	if ((1.0f - u0) <= 0.9880499839782715f) {
		tmp = (powf(alpha, 4.0f) / (-1.0f / ((alpha * alpha) / powf(alpha, 4.0f)))) * logf((1.0f - u0));
	} else {
		tmp = (-alpha * alpha) * (((((-0.3333333333333333f * u0) + -0.5f) * u0) - 1.0f) * u0);
	}
	return tmp;
}
real(4) function code(alpha, u0)
    real(4), intent (in) :: alpha
    real(4), intent (in) :: u0
    real(4) :: tmp
    if ((1.0e0 - u0) <= 0.9880499839782715e0) then
        tmp = ((alpha ** 4.0e0) / ((-1.0e0) / ((alpha * alpha) / (alpha ** 4.0e0)))) * log((1.0e0 - u0))
    else
        tmp = (-alpha * alpha) * ((((((-0.3333333333333333e0) * u0) + (-0.5e0)) * u0) - 1.0e0) * u0)
    end if
    code = tmp
end function
function code(alpha, u0)
	tmp = Float32(0.0)
	if (Float32(Float32(1.0) - u0) <= Float32(0.9880499839782715))
		tmp = Float32(Float32((alpha ^ Float32(4.0)) / Float32(Float32(-1.0) / Float32(Float32(alpha * alpha) / (alpha ^ Float32(4.0))))) * log(Float32(Float32(1.0) - u0)));
	else
		tmp = Float32(Float32(Float32(-alpha) * alpha) * Float32(Float32(Float32(Float32(Float32(Float32(-0.3333333333333333) * u0) + Float32(-0.5)) * u0) - Float32(1.0)) * u0));
	end
	return tmp
end
function tmp_2 = code(alpha, u0)
	tmp = single(0.0);
	if ((single(1.0) - u0) <= single(0.9880499839782715))
		tmp = ((alpha ^ single(4.0)) / (single(-1.0) / ((alpha * alpha) / (alpha ^ single(4.0))))) * log((single(1.0) - u0));
	else
		tmp = (-alpha * alpha) * (((((single(-0.3333333333333333) * u0) + single(-0.5)) * u0) - single(1.0)) * u0);
	end
	tmp_2 = tmp;
end
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;1 - u0 \leq 0.9880499839782715:\\
\;\;\;\;\frac{{\alpha}^{4}}{\frac{-1}{\frac{\alpha \cdot \alpha}{{\alpha}^{4}}}} \cdot \log \left(1 - u0\right)\\

\mathbf{else}:\\
\;\;\;\;\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(-0.3333333333333333 \cdot u0 + -0.5\right) \cdot u0 - 1\right) \cdot u0\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f32 #s(literal 1 binary32) u0) < 0.988049984

    1. Initial program 94.7%

      \[\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift-*.f32N/A

        \[\leadsto \color{blue}{\left(\left(-\alpha\right) \cdot \alpha\right)} \cdot \log \left(1 - u0\right) \]
      2. lift-neg.f32N/A

        \[\leadsto \left(\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right)} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
      3. distribute-lft-neg-outN/A

        \[\leadsto \color{blue}{\left(\mathsf{neg}\left(\alpha \cdot \alpha\right)\right)} \cdot \log \left(1 - u0\right) \]
      4. neg-sub0N/A

        \[\leadsto \color{blue}{\left(0 - \alpha \cdot \alpha\right)} \cdot \log \left(1 - u0\right) \]
      5. flip--N/A

        \[\leadsto \color{blue}{\frac{0 \cdot 0 - \left(\alpha \cdot \alpha\right) \cdot \left(\alpha \cdot \alpha\right)}{0 + \alpha \cdot \alpha}} \cdot \log \left(1 - u0\right) \]
      6. +-lft-identityN/A

        \[\leadsto \frac{0 \cdot 0 - \left(\alpha \cdot \alpha\right) \cdot \left(\alpha \cdot \alpha\right)}{\color{blue}{\alpha \cdot \alpha}} \cdot \log \left(1 - u0\right) \]
      7. lower-/.f32N/A

        \[\leadsto \color{blue}{\frac{0 \cdot 0 - \left(\alpha \cdot \alpha\right) \cdot \left(\alpha \cdot \alpha\right)}{\alpha \cdot \alpha}} \cdot \log \left(1 - u0\right) \]
      8. metadata-evalN/A

        \[\leadsto \frac{\color{blue}{0} - \left(\alpha \cdot \alpha\right) \cdot \left(\alpha \cdot \alpha\right)}{\alpha \cdot \alpha} \cdot \log \left(1 - u0\right) \]
      9. sub0-negN/A

        \[\leadsto \frac{\color{blue}{\mathsf{neg}\left(\left(\alpha \cdot \alpha\right) \cdot \left(\alpha \cdot \alpha\right)\right)}}{\alpha \cdot \alpha} \cdot \log \left(1 - u0\right) \]
      10. lower-neg.f32N/A

        \[\leadsto \frac{\color{blue}{-\left(\alpha \cdot \alpha\right) \cdot \left(\alpha \cdot \alpha\right)}}{\alpha \cdot \alpha} \cdot \log \left(1 - u0\right) \]
      11. pow2N/A

        \[\leadsto \frac{-\color{blue}{{\alpha}^{2}} \cdot \left(\alpha \cdot \alpha\right)}{\alpha \cdot \alpha} \cdot \log \left(1 - u0\right) \]
      12. pow2N/A

        \[\leadsto \frac{-{\alpha}^{2} \cdot \color{blue}{{\alpha}^{2}}}{\alpha \cdot \alpha} \cdot \log \left(1 - u0\right) \]
      13. pow-prod-upN/A

        \[\leadsto \frac{-\color{blue}{{\alpha}^{\left(2 + 2\right)}}}{\alpha \cdot \alpha} \cdot \log \left(1 - u0\right) \]
      14. lower-pow.f32N/A

        \[\leadsto \frac{-\color{blue}{{\alpha}^{\left(2 + 2\right)}}}{\alpha \cdot \alpha} \cdot \log \left(1 - u0\right) \]
      15. metadata-evalN/A

        \[\leadsto \frac{-{\alpha}^{\color{blue}{4}}}{\alpha \cdot \alpha} \cdot \log \left(1 - u0\right) \]
      16. lower-*.f3294.8

        \[\leadsto \frac{-{\alpha}^{4}}{\color{blue}{\alpha \cdot \alpha}} \cdot \log \left(1 - u0\right) \]
    4. Applied rewrites94.8%

      \[\leadsto \color{blue}{\frac{-{\alpha}^{4}}{\alpha \cdot \alpha}} \cdot \log \left(1 - u0\right) \]
    5. Step-by-step derivation
      1. lift-*.f32N/A

        \[\leadsto \frac{-{\alpha}^{4}}{\color{blue}{\alpha \cdot \alpha}} \cdot \log \left(1 - u0\right) \]
      2. pow2N/A

        \[\leadsto \frac{-{\alpha}^{4}}{\color{blue}{{\alpha}^{2}}} \cdot \log \left(1 - u0\right) \]
      3. metadata-evalN/A

        \[\leadsto \frac{-{\alpha}^{4}}{{\alpha}^{\color{blue}{\left(4 - 2\right)}}} \cdot \log \left(1 - u0\right) \]
      4. pow-divN/A

        \[\leadsto \frac{-{\alpha}^{4}}{\color{blue}{\frac{{\alpha}^{4}}{{\alpha}^{2}}}} \cdot \log \left(1 - u0\right) \]
      5. metadata-evalN/A

        \[\leadsto \frac{-{\alpha}^{4}}{\frac{{\alpha}^{\color{blue}{\left(2 + 2\right)}}}{{\alpha}^{2}}} \cdot \log \left(1 - u0\right) \]
      6. pow-prod-upN/A

        \[\leadsto \frac{-{\alpha}^{4}}{\frac{\color{blue}{{\alpha}^{2} \cdot {\alpha}^{2}}}{{\alpha}^{2}}} \cdot \log \left(1 - u0\right) \]
      7. pow-prod-downN/A

        \[\leadsto \frac{-{\alpha}^{4}}{\frac{\color{blue}{{\left(\alpha \cdot \alpha\right)}^{2}}}{{\alpha}^{2}}} \cdot \log \left(1 - u0\right) \]
      8. lift-*.f32N/A

        \[\leadsto \frac{-{\alpha}^{4}}{\frac{{\color{blue}{\left(\alpha \cdot \alpha\right)}}^{2}}{{\alpha}^{2}}} \cdot \log \left(1 - u0\right) \]
      9. pow2N/A

        \[\leadsto \frac{-{\alpha}^{4}}{\frac{\color{blue}{\left(\alpha \cdot \alpha\right) \cdot \left(\alpha \cdot \alpha\right)}}{{\alpha}^{2}}} \cdot \log \left(1 - u0\right) \]
      10. pow2N/A

        \[\leadsto \frac{-{\alpha}^{4}}{\frac{\left(\alpha \cdot \alpha\right) \cdot \left(\alpha \cdot \alpha\right)}{\color{blue}{\alpha \cdot \alpha}}} \cdot \log \left(1 - u0\right) \]
      11. lift-*.f32N/A

        \[\leadsto \frac{-{\alpha}^{4}}{\frac{\color{blue}{\left(\alpha \cdot \alpha\right)} \cdot \left(\alpha \cdot \alpha\right)}{\alpha \cdot \alpha}} \cdot \log \left(1 - u0\right) \]
      12. lift-*.f32N/A

        \[\leadsto \frac{-{\alpha}^{4}}{\frac{\left(\alpha \cdot \alpha\right) \cdot \color{blue}{\left(\alpha \cdot \alpha\right)}}{\alpha \cdot \alpha}} \cdot \log \left(1 - u0\right) \]
      13. sqr-negN/A

        \[\leadsto \frac{-{\alpha}^{4}}{\frac{\color{blue}{\left(\left(\mathsf{neg}\left(\alpha\right)\right) \cdot \left(\mathsf{neg}\left(\alpha\right)\right)\right)} \cdot \left(\alpha \cdot \alpha\right)}{\alpha \cdot \alpha}} \cdot \log \left(1 - u0\right) \]
      14. lift-neg.f32N/A

        \[\leadsto \frac{-{\alpha}^{4}}{\frac{\left(\color{blue}{\left(-\alpha\right)} \cdot \left(\mathsf{neg}\left(\alpha\right)\right)\right) \cdot \left(\alpha \cdot \alpha\right)}{\alpha \cdot \alpha}} \cdot \log \left(1 - u0\right) \]
      15. lift-neg.f32N/A

        \[\leadsto \frac{-{\alpha}^{4}}{\frac{\left(\left(-\alpha\right) \cdot \color{blue}{\left(-\alpha\right)}\right) \cdot \left(\alpha \cdot \alpha\right)}{\alpha \cdot \alpha}} \cdot \log \left(1 - u0\right) \]
      16. swap-sqrN/A

        \[\leadsto \frac{-{\alpha}^{4}}{\frac{\color{blue}{\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(-\alpha\right) \cdot \alpha\right)}}{\alpha \cdot \alpha}} \cdot \log \left(1 - u0\right) \]
      17. lift-*.f32N/A

        \[\leadsto \frac{-{\alpha}^{4}}{\frac{\color{blue}{\left(\left(-\alpha\right) \cdot \alpha\right)} \cdot \left(\left(-\alpha\right) \cdot \alpha\right)}{\alpha \cdot \alpha}} \cdot \log \left(1 - u0\right) \]
      18. lift-*.f32N/A

        \[\leadsto \frac{-{\alpha}^{4}}{\frac{\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\left(-\alpha\right) \cdot \alpha\right)}}{\alpha \cdot \alpha}} \cdot \log \left(1 - u0\right) \]
      19. lift-*.f32N/A

        \[\leadsto \frac{-{\alpha}^{4}}{\frac{\color{blue}{\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(-\alpha\right) \cdot \alpha\right)}}{\alpha \cdot \alpha}} \cdot \log \left(1 - u0\right) \]
      20. lift-*.f32N/A

        \[\leadsto \frac{-{\alpha}^{4}}{\frac{\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(-\alpha\right) \cdot \alpha\right)}{\color{blue}{\alpha \cdot \alpha}}} \cdot \log \left(1 - u0\right) \]
      21. clear-numN/A

        \[\leadsto \frac{-{\alpha}^{4}}{\color{blue}{\frac{1}{\frac{\alpha \cdot \alpha}{\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(-\alpha\right) \cdot \alpha\right)}}}} \cdot \log \left(1 - u0\right) \]
      22. lower-/.f32N/A

        \[\leadsto \frac{-{\alpha}^{4}}{\color{blue}{\frac{1}{\frac{\alpha \cdot \alpha}{\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(-\alpha\right) \cdot \alpha\right)}}}} \cdot \log \left(1 - u0\right) \]
      23. lower-/.f3294.9

        \[\leadsto \frac{-{\alpha}^{4}}{\frac{1}{\color{blue}{\frac{\alpha \cdot \alpha}{\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(-\alpha\right) \cdot \alpha\right)}}}} \cdot \log \left(1 - u0\right) \]
      24. lift-*.f32N/A

        \[\leadsto \frac{-{\alpha}^{4}}{\frac{1}{\frac{\alpha \cdot \alpha}{\color{blue}{\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(-\alpha\right) \cdot \alpha\right)}}}} \cdot \log \left(1 - u0\right) \]
    6. Applied rewrites95.0%

      \[\leadsto \frac{-{\alpha}^{4}}{\color{blue}{\frac{1}{\frac{\alpha \cdot \alpha}{{\alpha}^{4}}}}} \cdot \log \left(1 - u0\right) \]

    if 0.988049984 < (-.f32 #s(literal 1 binary32) u0)

    1. Initial program 45.1%

      \[\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
    2. Add Preprocessing
    3. Taylor expanded in u0 around 0

      \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(u0 \cdot \left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) - 1\right)\right)} \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) - 1\right) \cdot u0\right)} \]
      2. lower-*.f32N/A

        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) - 1\right) \cdot u0\right)} \]
      3. sub-negN/A

        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\color{blue}{\left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) + \left(\mathsf{neg}\left(1\right)\right)\right)} \cdot u0\right) \]
      4. *-commutativeN/A

        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\color{blue}{\left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) \cdot u0} + \left(\mathsf{neg}\left(1\right)\right)\right) \cdot u0\right) \]
      5. metadata-evalN/A

        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) \cdot u0 + \color{blue}{-1}\right) \cdot u0\right) \]
      6. lower-fma.f32N/A

        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\color{blue}{\mathsf{fma}\left(\frac{-1}{3} \cdot u0 - \frac{1}{2}, u0, -1\right)} \cdot u0\right) \]
      7. sub-negN/A

        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\mathsf{fma}\left(\color{blue}{\frac{-1}{3} \cdot u0 + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, u0, -1\right) \cdot u0\right) \]
      8. metadata-evalN/A

        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\mathsf{fma}\left(\frac{-1}{3} \cdot u0 + \color{blue}{\frac{-1}{2}}, u0, -1\right) \cdot u0\right) \]
      9. lower-fma.f3283.2

        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(-0.3333333333333333, u0, -0.5\right)}, u0, -1\right) \cdot u0\right) \]
    5. Applied rewrites82.8%

      \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.3333333333333333, u0, -0.5\right), u0, -1\right) \cdot u0\right)} \]
    6. Step-by-step derivation
      1. Applied rewrites95.0%

        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\mathsf{fma}\left(-0.3333333333333333, u0, -0.5\right) \cdot u0 - 1\right) \cdot u0\right) \]
      2. Step-by-step derivation
        1. Applied rewrites98.6%

          \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(-0.3333333333333333 \cdot u0 + -0.5\right) \cdot u0 - 1\right) \cdot u0\right) \]
      3. Recombined 2 regimes into one program.
      4. Final simplification97.9%

        \[\leadsto \begin{array}{l} \mathbf{if}\;1 - u0 \leq 0.9880499839782715:\\ \;\;\;\;\frac{{\alpha}^{4}}{\frac{-1}{\frac{\alpha \cdot \alpha}{{\alpha}^{4}}}} \cdot \log \left(1 - u0\right)\\ \mathbf{else}:\\ \;\;\;\;\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(-0.3333333333333333 \cdot u0 + -0.5\right) \cdot u0 - 1\right) \cdot u0\right)\\ \end{array} \]
      5. Add Preprocessing

      Alternative 2: 97.8% accurate, 0.5× speedup?

      \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;1 - u0 \leq 0.9880499839782715:\\ \;\;\;\;\left({\alpha}^{4} \cdot \frac{-1}{\alpha \cdot \alpha}\right) \cdot \log \left(1 - u0\right)\\ \mathbf{else}:\\ \;\;\;\;\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(-0.3333333333333333 \cdot u0 + -0.5\right) \cdot u0 - 1\right) \cdot u0\right)\\ \end{array} \end{array} \]
      (FPCore (alpha u0)
       :precision binary32
       (if (<= (- 1.0 u0) 0.9880499839782715)
         (* (* (pow alpha 4.0) (/ -1.0 (* alpha alpha))) (log (- 1.0 u0)))
         (*
          (* (- alpha) alpha)
          (* (- (* (+ (* -0.3333333333333333 u0) -0.5) u0) 1.0) u0))))
      float code(float alpha, float u0) {
      	float tmp;
      	if ((1.0f - u0) <= 0.9880499839782715f) {
      		tmp = (powf(alpha, 4.0f) * (-1.0f / (alpha * alpha))) * logf((1.0f - u0));
      	} else {
      		tmp = (-alpha * alpha) * (((((-0.3333333333333333f * u0) + -0.5f) * u0) - 1.0f) * u0);
      	}
      	return tmp;
      }
      
      real(4) function code(alpha, u0)
          real(4), intent (in) :: alpha
          real(4), intent (in) :: u0
          real(4) :: tmp
          if ((1.0e0 - u0) <= 0.9880499839782715e0) then
              tmp = ((alpha ** 4.0e0) * ((-1.0e0) / (alpha * alpha))) * log((1.0e0 - u0))
          else
              tmp = (-alpha * alpha) * ((((((-0.3333333333333333e0) * u0) + (-0.5e0)) * u0) - 1.0e0) * u0)
          end if
          code = tmp
      end function
      
      function code(alpha, u0)
      	tmp = Float32(0.0)
      	if (Float32(Float32(1.0) - u0) <= Float32(0.9880499839782715))
      		tmp = Float32(Float32((alpha ^ Float32(4.0)) * Float32(Float32(-1.0) / Float32(alpha * alpha))) * log(Float32(Float32(1.0) - u0)));
      	else
      		tmp = Float32(Float32(Float32(-alpha) * alpha) * Float32(Float32(Float32(Float32(Float32(Float32(-0.3333333333333333) * u0) + Float32(-0.5)) * u0) - Float32(1.0)) * u0));
      	end
      	return tmp
      end
      
      function tmp_2 = code(alpha, u0)
      	tmp = single(0.0);
      	if ((single(1.0) - u0) <= single(0.9880499839782715))
      		tmp = ((alpha ^ single(4.0)) * (single(-1.0) / (alpha * alpha))) * log((single(1.0) - u0));
      	else
      		tmp = (-alpha * alpha) * (((((single(-0.3333333333333333) * u0) + single(-0.5)) * u0) - single(1.0)) * u0);
      	end
      	tmp_2 = tmp;
      end
      
      \begin{array}{l}
      
      \\
      \begin{array}{l}
      \mathbf{if}\;1 - u0 \leq 0.9880499839782715:\\
      \;\;\;\;\left({\alpha}^{4} \cdot \frac{-1}{\alpha \cdot \alpha}\right) \cdot \log \left(1 - u0\right)\\
      
      \mathbf{else}:\\
      \;\;\;\;\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(-0.3333333333333333 \cdot u0 + -0.5\right) \cdot u0 - 1\right) \cdot u0\right)\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 2 regimes
      2. if (-.f32 #s(literal 1 binary32) u0) < 0.988049984

        1. Initial program 94.7%

          \[\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
        2. Add Preprocessing
        3. Step-by-step derivation
          1. lift-neg.f32N/A

            \[\leadsto \left(\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right)} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
          2. neg-sub0N/A

            \[\leadsto \left(\color{blue}{\left(0 - \alpha\right)} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
          3. flip--N/A

            \[\leadsto \left(\color{blue}{\frac{0 \cdot 0 - \alpha \cdot \alpha}{0 + \alpha}} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
          4. metadata-evalN/A

            \[\leadsto \left(\frac{\color{blue}{0} - \alpha \cdot \alpha}{0 + \alpha} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
          5. neg-sub0N/A

            \[\leadsto \left(\frac{\color{blue}{\mathsf{neg}\left(\alpha \cdot \alpha\right)}}{0 + \alpha} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
          6. distribute-lft-neg-outN/A

            \[\leadsto \left(\frac{\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right) \cdot \alpha}}{0 + \alpha} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
          7. lift-neg.f32N/A

            \[\leadsto \left(\frac{\color{blue}{\left(-\alpha\right)} \cdot \alpha}{0 + \alpha} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
          8. lift-*.f32N/A

            \[\leadsto \left(\frac{\color{blue}{\left(-\alpha\right) \cdot \alpha}}{0 + \alpha} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
          9. div-invN/A

            \[\leadsto \left(\color{blue}{\left(\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \frac{1}{0 + \alpha}\right)} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
          10. lower-*.f32N/A

            \[\leadsto \left(\color{blue}{\left(\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \frac{1}{0 + \alpha}\right)} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
          11. +-lft-identityN/A

            \[\leadsto \left(\left(\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \frac{1}{\color{blue}{\alpha}}\right) \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
          12. lower-/.f3294.7

            \[\leadsto \left(\left(\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\frac{1}{\alpha}}\right) \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
        4. Applied rewrites94.7%

          \[\leadsto \left(\color{blue}{\left(\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \frac{1}{\alpha}\right)} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
        5. Applied rewrites94.9%

          \[\leadsto \color{blue}{\left({\alpha}^{4} \cdot \frac{1}{\left(-\alpha\right) \cdot \alpha}\right)} \cdot \log \left(1 - u0\right) \]

        if 0.988049984 < (-.f32 #s(literal 1 binary32) u0)

        1. Initial program 45.1%

          \[\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
        2. Add Preprocessing
        3. Taylor expanded in u0 around 0

          \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(u0 \cdot \left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) - 1\right)\right)} \]
        4. Step-by-step derivation
          1. *-commutativeN/A

            \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) - 1\right) \cdot u0\right)} \]
          2. lower-*.f32N/A

            \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) - 1\right) \cdot u0\right)} \]
          3. sub-negN/A

            \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\color{blue}{\left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) + \left(\mathsf{neg}\left(1\right)\right)\right)} \cdot u0\right) \]
          4. *-commutativeN/A

            \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\color{blue}{\left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) \cdot u0} + \left(\mathsf{neg}\left(1\right)\right)\right) \cdot u0\right) \]
          5. metadata-evalN/A

            \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) \cdot u0 + \color{blue}{-1}\right) \cdot u0\right) \]
          6. lower-fma.f32N/A

            \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\color{blue}{\mathsf{fma}\left(\frac{-1}{3} \cdot u0 - \frac{1}{2}, u0, -1\right)} \cdot u0\right) \]
          7. sub-negN/A

            \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\mathsf{fma}\left(\color{blue}{\frac{-1}{3} \cdot u0 + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, u0, -1\right) \cdot u0\right) \]
          8. metadata-evalN/A

            \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\mathsf{fma}\left(\frac{-1}{3} \cdot u0 + \color{blue}{\frac{-1}{2}}, u0, -1\right) \cdot u0\right) \]
          9. lower-fma.f3283.2

            \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(-0.3333333333333333, u0, -0.5\right)}, u0, -1\right) \cdot u0\right) \]
        5. Applied rewrites82.8%

          \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.3333333333333333, u0, -0.5\right), u0, -1\right) \cdot u0\right)} \]
        6. Step-by-step derivation
          1. Applied rewrites95.0%

            \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\mathsf{fma}\left(-0.3333333333333333, u0, -0.5\right) \cdot u0 - 1\right) \cdot u0\right) \]
          2. Step-by-step derivation
            1. Applied rewrites98.6%

              \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(-0.3333333333333333 \cdot u0 + -0.5\right) \cdot u0 - 1\right) \cdot u0\right) \]
          3. Recombined 2 regimes into one program.
          4. Final simplification97.9%

            \[\leadsto \begin{array}{l} \mathbf{if}\;1 - u0 \leq 0.9880499839782715:\\ \;\;\;\;\left({\alpha}^{4} \cdot \frac{-1}{\alpha \cdot \alpha}\right) \cdot \log \left(1 - u0\right)\\ \mathbf{else}:\\ \;\;\;\;\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(-0.3333333333333333 \cdot u0 + -0.5\right) \cdot u0 - 1\right) \cdot u0\right)\\ \end{array} \]
          5. Add Preprocessing

          Alternative 3: 97.9% accurate, 0.8× speedup?

          \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;1 - u0 \leq 0.9860000014305115:\\ \;\;\;\;\frac{-1}{\frac{\alpha}{\left(\alpha \cdot \alpha\right) \cdot \alpha}} \cdot \log \left(1 - u0\right)\\ \mathbf{else}:\\ \;\;\;\;\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(-0.3333333333333333 \cdot u0 + -0.5\right) \cdot u0 - 1\right) \cdot u0\right)\\ \end{array} \end{array} \]
          (FPCore (alpha u0)
           :precision binary32
           (if (<= (- 1.0 u0) 0.9860000014305115)
             (* (/ -1.0 (/ alpha (* (* alpha alpha) alpha))) (log (- 1.0 u0)))
             (*
              (* (- alpha) alpha)
              (* (- (* (+ (* -0.3333333333333333 u0) -0.5) u0) 1.0) u0))))
          float code(float alpha, float u0) {
          	float tmp;
          	if ((1.0f - u0) <= 0.9860000014305115f) {
          		tmp = (-1.0f / (alpha / ((alpha * alpha) * alpha))) * logf((1.0f - u0));
          	} else {
          		tmp = (-alpha * alpha) * (((((-0.3333333333333333f * u0) + -0.5f) * u0) - 1.0f) * u0);
          	}
          	return tmp;
          }
          
          real(4) function code(alpha, u0)
              real(4), intent (in) :: alpha
              real(4), intent (in) :: u0
              real(4) :: tmp
              if ((1.0e0 - u0) <= 0.9860000014305115e0) then
                  tmp = ((-1.0e0) / (alpha / ((alpha * alpha) * alpha))) * log((1.0e0 - u0))
              else
                  tmp = (-alpha * alpha) * ((((((-0.3333333333333333e0) * u0) + (-0.5e0)) * u0) - 1.0e0) * u0)
              end if
              code = tmp
          end function
          
          function code(alpha, u0)
          	tmp = Float32(0.0)
          	if (Float32(Float32(1.0) - u0) <= Float32(0.9860000014305115))
          		tmp = Float32(Float32(Float32(-1.0) / Float32(alpha / Float32(Float32(alpha * alpha) * alpha))) * log(Float32(Float32(1.0) - u0)));
          	else
          		tmp = Float32(Float32(Float32(-alpha) * alpha) * Float32(Float32(Float32(Float32(Float32(Float32(-0.3333333333333333) * u0) + Float32(-0.5)) * u0) - Float32(1.0)) * u0));
          	end
          	return tmp
          end
          
          function tmp_2 = code(alpha, u0)
          	tmp = single(0.0);
          	if ((single(1.0) - u0) <= single(0.9860000014305115))
          		tmp = (single(-1.0) / (alpha / ((alpha * alpha) * alpha))) * log((single(1.0) - u0));
          	else
          		tmp = (-alpha * alpha) * (((((single(-0.3333333333333333) * u0) + single(-0.5)) * u0) - single(1.0)) * u0);
          	end
          	tmp_2 = tmp;
          end
          
          \begin{array}{l}
          
          \\
          \begin{array}{l}
          \mathbf{if}\;1 - u0 \leq 0.9860000014305115:\\
          \;\;\;\;\frac{-1}{\frac{\alpha}{\left(\alpha \cdot \alpha\right) \cdot \alpha}} \cdot \log \left(1 - u0\right)\\
          
          \mathbf{else}:\\
          \;\;\;\;\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(-0.3333333333333333 \cdot u0 + -0.5\right) \cdot u0 - 1\right) \cdot u0\right)\\
          
          
          \end{array}
          \end{array}
          
          Derivation
          1. Split input into 2 regimes
          2. if (-.f32 #s(literal 1 binary32) u0) < 0.986000001

            1. Initial program 95.1%

              \[\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
            2. Add Preprocessing
            3. Step-by-step derivation
              1. lift-*.f32N/A

                \[\leadsto \color{blue}{\left(\left(-\alpha\right) \cdot \alpha\right)} \cdot \log \left(1 - u0\right) \]
              2. lift-neg.f32N/A

                \[\leadsto \left(\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right)} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
              3. neg-sub0N/A

                \[\leadsto \left(\color{blue}{\left(0 - \alpha\right)} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
              4. flip--N/A

                \[\leadsto \left(\color{blue}{\frac{0 \cdot 0 - \alpha \cdot \alpha}{0 + \alpha}} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
              5. metadata-evalN/A

                \[\leadsto \left(\frac{\color{blue}{0} - \alpha \cdot \alpha}{0 + \alpha} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
              6. neg-sub0N/A

                \[\leadsto \left(\frac{\color{blue}{\mathsf{neg}\left(\alpha \cdot \alpha\right)}}{0 + \alpha} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
              7. distribute-lft-neg-outN/A

                \[\leadsto \left(\frac{\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right) \cdot \alpha}}{0 + \alpha} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
              8. lift-neg.f32N/A

                \[\leadsto \left(\frac{\color{blue}{\left(-\alpha\right)} \cdot \alpha}{0 + \alpha} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
              9. lift-*.f32N/A

                \[\leadsto \left(\frac{\color{blue}{\left(-\alpha\right) \cdot \alpha}}{0 + \alpha} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
              10. +-lft-identityN/A

                \[\leadsto \left(\frac{\left(-\alpha\right) \cdot \alpha}{\color{blue}{\alpha}} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
              11. associate-*l/N/A

                \[\leadsto \color{blue}{\frac{\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \alpha}{\alpha}} \cdot \log \left(1 - u0\right) \]
              12. clear-numN/A

                \[\leadsto \color{blue}{\frac{1}{\frac{\alpha}{\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \alpha}}} \cdot \log \left(1 - u0\right) \]
              13. lower-/.f32N/A

                \[\leadsto \color{blue}{\frac{1}{\frac{\alpha}{\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \alpha}}} \cdot \log \left(1 - u0\right) \]
              14. lower-/.f32N/A

                \[\leadsto \frac{1}{\color{blue}{\frac{\alpha}{\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \alpha}}} \cdot \log \left(1 - u0\right) \]
              15. lower-*.f3295.2

                \[\leadsto \frac{1}{\frac{\alpha}{\color{blue}{\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \alpha}}} \cdot \log \left(1 - u0\right) \]
            4. Applied rewrites95.2%

              \[\leadsto \color{blue}{\frac{1}{\frac{\alpha}{\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \alpha}}} \cdot \log \left(1 - u0\right) \]

            if 0.986000001 < (-.f32 #s(literal 1 binary32) u0)

            1. Initial program 46.0%

              \[\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
            2. Add Preprocessing
            3. Taylor expanded in u0 around 0

              \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(u0 \cdot \left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) - 1\right)\right)} \]
            4. Step-by-step derivation
              1. *-commutativeN/A

                \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) - 1\right) \cdot u0\right)} \]
              2. lower-*.f32N/A

                \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) - 1\right) \cdot u0\right)} \]
              3. sub-negN/A

                \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\color{blue}{\left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) + \left(\mathsf{neg}\left(1\right)\right)\right)} \cdot u0\right) \]
              4. *-commutativeN/A

                \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\color{blue}{\left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) \cdot u0} + \left(\mathsf{neg}\left(1\right)\right)\right) \cdot u0\right) \]
              5. metadata-evalN/A

                \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) \cdot u0 + \color{blue}{-1}\right) \cdot u0\right) \]
              6. lower-fma.f32N/A

                \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\color{blue}{\mathsf{fma}\left(\frac{-1}{3} \cdot u0 - \frac{1}{2}, u0, -1\right)} \cdot u0\right) \]
              7. sub-negN/A

                \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\mathsf{fma}\left(\color{blue}{\frac{-1}{3} \cdot u0 + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, u0, -1\right) \cdot u0\right) \]
              8. metadata-evalN/A

                \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\mathsf{fma}\left(\frac{-1}{3} \cdot u0 + \color{blue}{\frac{-1}{2}}, u0, -1\right) \cdot u0\right) \]
              9. lower-fma.f3282.6

                \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(-0.3333333333333333, u0, -0.5\right)}, u0, -1\right) \cdot u0\right) \]
            5. Applied rewrites82.6%

              \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.3333333333333333, u0, -0.5\right), u0, -1\right) \cdot u0\right)} \]
            6. Step-by-step derivation
              1. Applied rewrites95.4%

                \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\mathsf{fma}\left(-0.3333333333333333, u0, -0.5\right) \cdot u0 - 1\right) \cdot u0\right) \]
              2. Step-by-step derivation
                1. Applied rewrites98.4%

                  \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(-0.3333333333333333 \cdot u0 + -0.5\right) \cdot u0 - 1\right) \cdot u0\right) \]
              3. Recombined 2 regimes into one program.
              4. Final simplification97.9%

                \[\leadsto \begin{array}{l} \mathbf{if}\;1 - u0 \leq 0.9860000014305115:\\ \;\;\;\;\frac{-1}{\frac{\alpha}{\left(\alpha \cdot \alpha\right) \cdot \alpha}} \cdot \log \left(1 - u0\right)\\ \mathbf{else}:\\ \;\;\;\;\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(-0.3333333333333333 \cdot u0 + -0.5\right) \cdot u0 - 1\right) \cdot u0\right)\\ \end{array} \]
              5. Add Preprocessing

              Alternative 4: 97.9% accurate, 0.8× speedup?

              \[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(-\alpha\right) \cdot \alpha\\ \mathbf{if}\;1 - u0 \leq 0.9860000014305115:\\ \;\;\;\;\frac{t\_0 \cdot \alpha}{\alpha} \cdot \log \left(1 - u0\right)\\ \mathbf{else}:\\ \;\;\;\;t\_0 \cdot \left(\left(\left(-0.3333333333333333 \cdot u0 + -0.5\right) \cdot u0 - 1\right) \cdot u0\right)\\ \end{array} \end{array} \]
              (FPCore (alpha u0)
               :precision binary32
               (let* ((t_0 (* (- alpha) alpha)))
                 (if (<= (- 1.0 u0) 0.9860000014305115)
                   (* (/ (* t_0 alpha) alpha) (log (- 1.0 u0)))
                   (* t_0 (* (- (* (+ (* -0.3333333333333333 u0) -0.5) u0) 1.0) u0)))))
              float code(float alpha, float u0) {
              	float t_0 = -alpha * alpha;
              	float tmp;
              	if ((1.0f - u0) <= 0.9860000014305115f) {
              		tmp = ((t_0 * alpha) / alpha) * logf((1.0f - u0));
              	} else {
              		tmp = t_0 * (((((-0.3333333333333333f * u0) + -0.5f) * u0) - 1.0f) * u0);
              	}
              	return tmp;
              }
              
              real(4) function code(alpha, u0)
                  real(4), intent (in) :: alpha
                  real(4), intent (in) :: u0
                  real(4) :: t_0
                  real(4) :: tmp
                  t_0 = -alpha * alpha
                  if ((1.0e0 - u0) <= 0.9860000014305115e0) then
                      tmp = ((t_0 * alpha) / alpha) * log((1.0e0 - u0))
                  else
                      tmp = t_0 * ((((((-0.3333333333333333e0) * u0) + (-0.5e0)) * u0) - 1.0e0) * u0)
                  end if
                  code = tmp
              end function
              
              function code(alpha, u0)
              	t_0 = Float32(Float32(-alpha) * alpha)
              	tmp = Float32(0.0)
              	if (Float32(Float32(1.0) - u0) <= Float32(0.9860000014305115))
              		tmp = Float32(Float32(Float32(t_0 * alpha) / alpha) * log(Float32(Float32(1.0) - u0)));
              	else
              		tmp = Float32(t_0 * Float32(Float32(Float32(Float32(Float32(Float32(-0.3333333333333333) * u0) + Float32(-0.5)) * u0) - Float32(1.0)) * u0));
              	end
              	return tmp
              end
              
              function tmp_2 = code(alpha, u0)
              	t_0 = -alpha * alpha;
              	tmp = single(0.0);
              	if ((single(1.0) - u0) <= single(0.9860000014305115))
              		tmp = ((t_0 * alpha) / alpha) * log((single(1.0) - u0));
              	else
              		tmp = t_0 * (((((single(-0.3333333333333333) * u0) + single(-0.5)) * u0) - single(1.0)) * u0);
              	end
              	tmp_2 = tmp;
              end
              
              \begin{array}{l}
              
              \\
              \begin{array}{l}
              t_0 := \left(-\alpha\right) \cdot \alpha\\
              \mathbf{if}\;1 - u0 \leq 0.9860000014305115:\\
              \;\;\;\;\frac{t\_0 \cdot \alpha}{\alpha} \cdot \log \left(1 - u0\right)\\
              
              \mathbf{else}:\\
              \;\;\;\;t\_0 \cdot \left(\left(\left(-0.3333333333333333 \cdot u0 + -0.5\right) \cdot u0 - 1\right) \cdot u0\right)\\
              
              
              \end{array}
              \end{array}
              
              Derivation
              1. Split input into 2 regimes
              2. if (-.f32 #s(literal 1 binary32) u0) < 0.986000001

                1. Initial program 95.1%

                  \[\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
                2. Add Preprocessing
                3. Step-by-step derivation
                  1. lift-*.f32N/A

                    \[\leadsto \color{blue}{\left(\left(-\alpha\right) \cdot \alpha\right)} \cdot \log \left(1 - u0\right) \]
                  2. lift-neg.f32N/A

                    \[\leadsto \left(\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right)} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
                  3. neg-sub0N/A

                    \[\leadsto \left(\color{blue}{\left(0 - \alpha\right)} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
                  4. flip--N/A

                    \[\leadsto \left(\color{blue}{\frac{0 \cdot 0 - \alpha \cdot \alpha}{0 + \alpha}} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
                  5. metadata-evalN/A

                    \[\leadsto \left(\frac{\color{blue}{0} - \alpha \cdot \alpha}{0 + \alpha} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
                  6. neg-sub0N/A

                    \[\leadsto \left(\frac{\color{blue}{\mathsf{neg}\left(\alpha \cdot \alpha\right)}}{0 + \alpha} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
                  7. distribute-lft-neg-outN/A

                    \[\leadsto \left(\frac{\color{blue}{\left(\mathsf{neg}\left(\alpha\right)\right) \cdot \alpha}}{0 + \alpha} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
                  8. lift-neg.f32N/A

                    \[\leadsto \left(\frac{\color{blue}{\left(-\alpha\right)} \cdot \alpha}{0 + \alpha} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
                  9. lift-*.f32N/A

                    \[\leadsto \left(\frac{\color{blue}{\left(-\alpha\right) \cdot \alpha}}{0 + \alpha} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
                  10. +-lft-identityN/A

                    \[\leadsto \left(\frac{\left(-\alpha\right) \cdot \alpha}{\color{blue}{\alpha}} \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
                  11. associate-*l/N/A

                    \[\leadsto \color{blue}{\frac{\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \alpha}{\alpha}} \cdot \log \left(1 - u0\right) \]
                  12. lower-/.f32N/A

                    \[\leadsto \color{blue}{\frac{\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \alpha}{\alpha}} \cdot \log \left(1 - u0\right) \]
                  13. lower-*.f3295.1

                    \[\leadsto \frac{\color{blue}{\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \alpha}}{\alpha} \cdot \log \left(1 - u0\right) \]
                4. Applied rewrites95.1%

                  \[\leadsto \color{blue}{\frac{\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \alpha}{\alpha}} \cdot \log \left(1 - u0\right) \]

                if 0.986000001 < (-.f32 #s(literal 1 binary32) u0)

                1. Initial program 46.0%

                  \[\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
                2. Add Preprocessing
                3. Taylor expanded in u0 around 0

                  \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(u0 \cdot \left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) - 1\right)\right)} \]
                4. Step-by-step derivation
                  1. *-commutativeN/A

                    \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) - 1\right) \cdot u0\right)} \]
                  2. lower-*.f32N/A

                    \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) - 1\right) \cdot u0\right)} \]
                  3. sub-negN/A

                    \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\color{blue}{\left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) + \left(\mathsf{neg}\left(1\right)\right)\right)} \cdot u0\right) \]
                  4. *-commutativeN/A

                    \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\color{blue}{\left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) \cdot u0} + \left(\mathsf{neg}\left(1\right)\right)\right) \cdot u0\right) \]
                  5. metadata-evalN/A

                    \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) \cdot u0 + \color{blue}{-1}\right) \cdot u0\right) \]
                  6. lower-fma.f32N/A

                    \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\color{blue}{\mathsf{fma}\left(\frac{-1}{3} \cdot u0 - \frac{1}{2}, u0, -1\right)} \cdot u0\right) \]
                  7. sub-negN/A

                    \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\mathsf{fma}\left(\color{blue}{\frac{-1}{3} \cdot u0 + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, u0, -1\right) \cdot u0\right) \]
                  8. metadata-evalN/A

                    \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\mathsf{fma}\left(\frac{-1}{3} \cdot u0 + \color{blue}{\frac{-1}{2}}, u0, -1\right) \cdot u0\right) \]
                  9. lower-fma.f3282.6

                    \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(-0.3333333333333333, u0, -0.5\right)}, u0, -1\right) \cdot u0\right) \]
                5. Applied rewrites82.6%

                  \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.3333333333333333, u0, -0.5\right), u0, -1\right) \cdot u0\right)} \]
                6. Step-by-step derivation
                  1. Applied rewrites94.6%

                    \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\mathsf{fma}\left(-0.3333333333333333, u0, -0.5\right) \cdot u0 - 1\right) \cdot u0\right) \]
                  2. Step-by-step derivation
                    1. Applied rewrites98.4%

                      \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(-0.3333333333333333 \cdot u0 + -0.5\right) \cdot u0 - 1\right) \cdot u0\right) \]
                  3. Recombined 2 regimes into one program.
                  4. Add Preprocessing

                  Alternative 5: 97.9% accurate, 0.9× speedup?

                  \[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(-\alpha\right) \cdot \alpha\\ \mathbf{if}\;1 - u0 \leq 0.9860000014305115:\\ \;\;\;\;t\_0 \cdot \log \left(1 - u0\right)\\ \mathbf{else}:\\ \;\;\;\;t\_0 \cdot \left(\left(\left(-0.3333333333333333 \cdot u0 + -0.5\right) \cdot u0 - 1\right) \cdot u0\right)\\ \end{array} \end{array} \]
                  (FPCore (alpha u0)
                   :precision binary32
                   (let* ((t_0 (* (- alpha) alpha)))
                     (if (<= (- 1.0 u0) 0.9860000014305115)
                       (* t_0 (log (- 1.0 u0)))
                       (* t_0 (* (- (* (+ (* -0.3333333333333333 u0) -0.5) u0) 1.0) u0)))))
                  float code(float alpha, float u0) {
                  	float t_0 = -alpha * alpha;
                  	float tmp;
                  	if ((1.0f - u0) <= 0.9860000014305115f) {
                  		tmp = t_0 * logf((1.0f - u0));
                  	} else {
                  		tmp = t_0 * (((((-0.3333333333333333f * u0) + -0.5f) * u0) - 1.0f) * u0);
                  	}
                  	return tmp;
                  }
                  
                  real(4) function code(alpha, u0)
                      real(4), intent (in) :: alpha
                      real(4), intent (in) :: u0
                      real(4) :: t_0
                      real(4) :: tmp
                      t_0 = -alpha * alpha
                      if ((1.0e0 - u0) <= 0.9860000014305115e0) then
                          tmp = t_0 * log((1.0e0 - u0))
                      else
                          tmp = t_0 * ((((((-0.3333333333333333e0) * u0) + (-0.5e0)) * u0) - 1.0e0) * u0)
                      end if
                      code = tmp
                  end function
                  
                  function code(alpha, u0)
                  	t_0 = Float32(Float32(-alpha) * alpha)
                  	tmp = Float32(0.0)
                  	if (Float32(Float32(1.0) - u0) <= Float32(0.9860000014305115))
                  		tmp = Float32(t_0 * log(Float32(Float32(1.0) - u0)));
                  	else
                  		tmp = Float32(t_0 * Float32(Float32(Float32(Float32(Float32(Float32(-0.3333333333333333) * u0) + Float32(-0.5)) * u0) - Float32(1.0)) * u0));
                  	end
                  	return tmp
                  end
                  
                  function tmp_2 = code(alpha, u0)
                  	t_0 = -alpha * alpha;
                  	tmp = single(0.0);
                  	if ((single(1.0) - u0) <= single(0.9860000014305115))
                  		tmp = t_0 * log((single(1.0) - u0));
                  	else
                  		tmp = t_0 * (((((single(-0.3333333333333333) * u0) + single(-0.5)) * u0) - single(1.0)) * u0);
                  	end
                  	tmp_2 = tmp;
                  end
                  
                  \begin{array}{l}
                  
                  \\
                  \begin{array}{l}
                  t_0 := \left(-\alpha\right) \cdot \alpha\\
                  \mathbf{if}\;1 - u0 \leq 0.9860000014305115:\\
                  \;\;\;\;t\_0 \cdot \log \left(1 - u0\right)\\
                  
                  \mathbf{else}:\\
                  \;\;\;\;t\_0 \cdot \left(\left(\left(-0.3333333333333333 \cdot u0 + -0.5\right) \cdot u0 - 1\right) \cdot u0\right)\\
                  
                  
                  \end{array}
                  \end{array}
                  
                  Derivation
                  1. Split input into 2 regimes
                  2. if (-.f32 #s(literal 1 binary32) u0) < 0.986000001

                    1. Initial program 95.1%

                      \[\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
                    2. Add Preprocessing

                    if 0.986000001 < (-.f32 #s(literal 1 binary32) u0)

                    1. Initial program 46.0%

                      \[\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
                    2. Add Preprocessing
                    3. Taylor expanded in u0 around 0

                      \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(u0 \cdot \left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) - 1\right)\right)} \]
                    4. Step-by-step derivation
                      1. *-commutativeN/A

                        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) - 1\right) \cdot u0\right)} \]
                      2. lower-*.f32N/A

                        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) - 1\right) \cdot u0\right)} \]
                      3. sub-negN/A

                        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\color{blue}{\left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) + \left(\mathsf{neg}\left(1\right)\right)\right)} \cdot u0\right) \]
                      4. *-commutativeN/A

                        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\color{blue}{\left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) \cdot u0} + \left(\mathsf{neg}\left(1\right)\right)\right) \cdot u0\right) \]
                      5. metadata-evalN/A

                        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) \cdot u0 + \color{blue}{-1}\right) \cdot u0\right) \]
                      6. lower-fma.f32N/A

                        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\color{blue}{\mathsf{fma}\left(\frac{-1}{3} \cdot u0 - \frac{1}{2}, u0, -1\right)} \cdot u0\right) \]
                      7. sub-negN/A

                        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\mathsf{fma}\left(\color{blue}{\frac{-1}{3} \cdot u0 + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, u0, -1\right) \cdot u0\right) \]
                      8. metadata-evalN/A

                        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\mathsf{fma}\left(\frac{-1}{3} \cdot u0 + \color{blue}{\frac{-1}{2}}, u0, -1\right) \cdot u0\right) \]
                      9. lower-fma.f3282.6

                        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(-0.3333333333333333, u0, -0.5\right)}, u0, -1\right) \cdot u0\right) \]
                    5. Applied rewrites82.1%

                      \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.3333333333333333, u0, -0.5\right), u0, -1\right) \cdot u0\right)} \]
                    6. Step-by-step derivation
                      1. Applied rewrites94.6%

                        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\mathsf{fma}\left(-0.3333333333333333, u0, -0.5\right) \cdot u0 - 1\right) \cdot u0\right) \]
                      2. Step-by-step derivation
                        1. Applied rewrites98.4%

                          \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(-0.3333333333333333 \cdot u0 + -0.5\right) \cdot u0 - 1\right) \cdot u0\right) \]
                      3. Recombined 2 regimes into one program.
                      4. Add Preprocessing

                      Alternative 6: 91.4% accurate, 3.4× speedup?

                      \[\begin{array}{l} \\ \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(-0.3333333333333333 \cdot u0 + -0.5\right) \cdot u0 - 1\right) \cdot u0\right) \end{array} \]
                      (FPCore (alpha u0)
                       :precision binary32
                       (*
                        (* (- alpha) alpha)
                        (* (- (* (+ (* -0.3333333333333333 u0) -0.5) u0) 1.0) u0)))
                      float code(float alpha, float u0) {
                      	return (-alpha * alpha) * (((((-0.3333333333333333f * u0) + -0.5f) * u0) - 1.0f) * u0);
                      }
                      
                      real(4) function code(alpha, u0)
                          real(4), intent (in) :: alpha
                          real(4), intent (in) :: u0
                          code = (-alpha * alpha) * ((((((-0.3333333333333333e0) * u0) + (-0.5e0)) * u0) - 1.0e0) * u0)
                      end function
                      
                      function code(alpha, u0)
                      	return Float32(Float32(Float32(-alpha) * alpha) * Float32(Float32(Float32(Float32(Float32(Float32(-0.3333333333333333) * u0) + Float32(-0.5)) * u0) - Float32(1.0)) * u0))
                      end
                      
                      function tmp = code(alpha, u0)
                      	tmp = (-alpha * alpha) * (((((single(-0.3333333333333333) * u0) + single(-0.5)) * u0) - single(1.0)) * u0);
                      end
                      
                      \begin{array}{l}
                      
                      \\
                      \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(-0.3333333333333333 \cdot u0 + -0.5\right) \cdot u0 - 1\right) \cdot u0\right)
                      \end{array}
                      
                      Derivation
                      1. Initial program 54.8%

                        \[\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
                      2. Add Preprocessing
                      3. Taylor expanded in u0 around 0

                        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(u0 \cdot \left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) - 1\right)\right)} \]
                      4. Step-by-step derivation
                        1. *-commutativeN/A

                          \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) - 1\right) \cdot u0\right)} \]
                        2. lower-*.f32N/A

                          \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) - 1\right) \cdot u0\right)} \]
                        3. sub-negN/A

                          \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\color{blue}{\left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) + \left(\mathsf{neg}\left(1\right)\right)\right)} \cdot u0\right) \]
                        4. *-commutativeN/A

                          \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\color{blue}{\left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) \cdot u0} + \left(\mathsf{neg}\left(1\right)\right)\right) \cdot u0\right) \]
                        5. metadata-evalN/A

                          \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) \cdot u0 + \color{blue}{-1}\right) \cdot u0\right) \]
                        6. lower-fma.f32N/A

                          \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\color{blue}{\mathsf{fma}\left(\frac{-1}{3} \cdot u0 - \frac{1}{2}, u0, -1\right)} \cdot u0\right) \]
                        7. sub-negN/A

                          \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\mathsf{fma}\left(\color{blue}{\frac{-1}{3} \cdot u0 + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, u0, -1\right) \cdot u0\right) \]
                        8. metadata-evalN/A

                          \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\mathsf{fma}\left(\frac{-1}{3} \cdot u0 + \color{blue}{\frac{-1}{2}}, u0, -1\right) \cdot u0\right) \]
                        9. lower-fma.f3274.8

                          \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(-0.3333333333333333, u0, -0.5\right)}, u0, -1\right) \cdot u0\right) \]
                      5. Applied rewrites74.5%

                        \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.3333333333333333, u0, -0.5\right), u0, -1\right) \cdot u0\right)} \]
                      6. Step-by-step derivation
                        1. Applied rewrites87.6%

                          \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\mathsf{fma}\left(-0.3333333333333333, u0, -0.5\right) \cdot u0 - 1\right) \cdot u0\right) \]
                        2. Step-by-step derivation
                          1. Applied rewrites92.2%

                            \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(-0.3333333333333333 \cdot u0 + -0.5\right) \cdot u0 - 1\right) \cdot u0\right) \]
                          2. Add Preprocessing

                          Alternative 7: 87.2% accurate, 4.5× speedup?

                          \[\begin{array}{l} \\ \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(-0.5 \cdot u0 - 1\right) \cdot u0\right) \end{array} \]
                          (FPCore (alpha u0)
                           :precision binary32
                           (* (* (- alpha) alpha) (* (- (* -0.5 u0) 1.0) u0)))
                          float code(float alpha, float u0) {
                          	return (-alpha * alpha) * (((-0.5f * u0) - 1.0f) * u0);
                          }
                          
                          real(4) function code(alpha, u0)
                              real(4), intent (in) :: alpha
                              real(4), intent (in) :: u0
                              code = (-alpha * alpha) * ((((-0.5e0) * u0) - 1.0e0) * u0)
                          end function
                          
                          function code(alpha, u0)
                          	return Float32(Float32(Float32(-alpha) * alpha) * Float32(Float32(Float32(Float32(-0.5) * u0) - Float32(1.0)) * u0))
                          end
                          
                          function tmp = code(alpha, u0)
                          	tmp = (-alpha * alpha) * (((single(-0.5) * u0) - single(1.0)) * u0);
                          end
                          
                          \begin{array}{l}
                          
                          \\
                          \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(-0.5 \cdot u0 - 1\right) \cdot u0\right)
                          \end{array}
                          
                          Derivation
                          1. Initial program 54.8%

                            \[\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
                          2. Add Preprocessing
                          3. Taylor expanded in u0 around 0

                            \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(u0 \cdot \left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) - 1\right)\right)} \]
                          4. Step-by-step derivation
                            1. *-commutativeN/A

                              \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) - 1\right) \cdot u0\right)} \]
                            2. lower-*.f32N/A

                              \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) - 1\right) \cdot u0\right)} \]
                            3. sub-negN/A

                              \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\color{blue}{\left(u0 \cdot \left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) + \left(\mathsf{neg}\left(1\right)\right)\right)} \cdot u0\right) \]
                            4. *-commutativeN/A

                              \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\color{blue}{\left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) \cdot u0} + \left(\mathsf{neg}\left(1\right)\right)\right) \cdot u0\right) \]
                            5. metadata-evalN/A

                              \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\left(\frac{-1}{3} \cdot u0 - \frac{1}{2}\right) \cdot u0 + \color{blue}{-1}\right) \cdot u0\right) \]
                            6. lower-fma.f32N/A

                              \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\color{blue}{\mathsf{fma}\left(\frac{-1}{3} \cdot u0 - \frac{1}{2}, u0, -1\right)} \cdot u0\right) \]
                            7. sub-negN/A

                              \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\mathsf{fma}\left(\color{blue}{\frac{-1}{3} \cdot u0 + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, u0, -1\right) \cdot u0\right) \]
                            8. metadata-evalN/A

                              \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\mathsf{fma}\left(\frac{-1}{3} \cdot u0 + \color{blue}{\frac{-1}{2}}, u0, -1\right) \cdot u0\right) \]
                            9. lower-fma.f3274.8

                              \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(-0.3333333333333333, u0, -0.5\right)}, u0, -1\right) \cdot u0\right) \]
                          5. Applied rewrites74.5%

                            \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.3333333333333333, u0, -0.5\right), u0, -1\right) \cdot u0\right)} \]
                          6. Step-by-step derivation
                            1. Applied rewrites87.2%

                              \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\mathsf{fma}\left(-0.3333333333333333, u0, -0.5\right) \cdot u0 - 1\right) \cdot u0\right) \]
                            2. Taylor expanded in u0 around 0

                              \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(\frac{-1}{2} \cdot u0 - 1\right) \cdot u0\right) \]
                            3. Step-by-step derivation
                              1. Applied rewrites87.6%

                                \[\leadsto \left(\left(-\alpha\right) \cdot \alpha\right) \cdot \left(\left(-0.5 \cdot u0 - 1\right) \cdot u0\right) \]
                              2. Add Preprocessing

                              Alternative 8: 74.4% accurate, 10.5× speedup?

                              \[\begin{array}{l} \\ \left(u0 \cdot \alpha\right) \cdot \alpha \end{array} \]
                              (FPCore (alpha u0) :precision binary32 (* (* u0 alpha) alpha))
                              float code(float alpha, float u0) {
                              	return (u0 * alpha) * alpha;
                              }
                              
                              real(4) function code(alpha, u0)
                                  real(4), intent (in) :: alpha
                                  real(4), intent (in) :: u0
                                  code = (u0 * alpha) * alpha
                              end function
                              
                              function code(alpha, u0)
                              	return Float32(Float32(u0 * alpha) * alpha)
                              end
                              
                              function tmp = code(alpha, u0)
                              	tmp = (u0 * alpha) * alpha;
                              end
                              
                              \begin{array}{l}
                              
                              \\
                              \left(u0 \cdot \alpha\right) \cdot \alpha
                              \end{array}
                              
                              Derivation
                              1. Initial program 54.8%

                                \[\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
                              2. Add Preprocessing
                              3. Taylor expanded in u0 around 0

                                \[\leadsto \color{blue}{{\alpha}^{2} \cdot u0} \]
                              4. Step-by-step derivation
                                1. lower-*.f32N/A

                                  \[\leadsto \color{blue}{{\alpha}^{2} \cdot u0} \]
                                2. unpow2N/A

                                  \[\leadsto \color{blue}{\left(\alpha \cdot \alpha\right)} \cdot u0 \]
                                3. lower-*.f3274.8

                                  \[\leadsto \color{blue}{\left(\alpha \cdot \alpha\right)} \cdot u0 \]
                              5. Applied rewrites74.8%

                                \[\leadsto \color{blue}{\left(\alpha \cdot \alpha\right) \cdot u0} \]
                              6. Step-by-step derivation
                                1. Applied rewrites74.8%

                                  \[\leadsto \left(u0 \cdot \alpha\right) \cdot \color{blue}{\alpha} \]
                                2. Add Preprocessing

                                Alternative 9: 74.5% accurate, 10.5× speedup?

                                \[\begin{array}{l} \\ \left(\alpha \cdot \alpha\right) \cdot u0 \end{array} \]
                                (FPCore (alpha u0) :precision binary32 (* (* alpha alpha) u0))
                                float code(float alpha, float u0) {
                                	return (alpha * alpha) * u0;
                                }
                                
                                real(4) function code(alpha, u0)
                                    real(4), intent (in) :: alpha
                                    real(4), intent (in) :: u0
                                    code = (alpha * alpha) * u0
                                end function
                                
                                function code(alpha, u0)
                                	return Float32(Float32(alpha * alpha) * u0)
                                end
                                
                                function tmp = code(alpha, u0)
                                	tmp = (alpha * alpha) * u0;
                                end
                                
                                \begin{array}{l}
                                
                                \\
                                \left(\alpha \cdot \alpha\right) \cdot u0
                                \end{array}
                                
                                Derivation
                                1. Initial program 54.8%

                                  \[\left(\left(-\alpha\right) \cdot \alpha\right) \cdot \log \left(1 - u0\right) \]
                                2. Add Preprocessing
                                3. Taylor expanded in u0 around 0

                                  \[\leadsto \color{blue}{{\alpha}^{2} \cdot u0} \]
                                4. Step-by-step derivation
                                  1. lower-*.f32N/A

                                    \[\leadsto \color{blue}{{\alpha}^{2} \cdot u0} \]
                                  2. unpow2N/A

                                    \[\leadsto \color{blue}{\left(\alpha \cdot \alpha\right)} \cdot u0 \]
                                  3. lower-*.f3274.8

                                    \[\leadsto \color{blue}{\left(\alpha \cdot \alpha\right)} \cdot u0 \]
                                5. Applied rewrites74.8%

                                  \[\leadsto \color{blue}{\left(\alpha \cdot \alpha\right) \cdot u0} \]
                                6. Add Preprocessing

                                Reproduce

                                ?
                                herbie shell --seed 2024313 
                                (FPCore (alpha u0)
                                  :name "Beckmann Distribution sample, tan2theta, alphax == alphay"
                                  :precision binary32
                                  :pre (and (and (<= 0.0001 alpha) (<= alpha 1.0)) (and (<= 2.328306437e-10 u0) (<= u0 1.0)))
                                  (* (* (- alpha) alpha) (log (- 1.0 u0))))