HairBSDF, sample_f, cosTheta

Percentage Accurate: 99.5% → 99.5%
Time: 19.9s
Alternatives: 12
Speedup: 1.0×

Specification

?
\[\left(10^{-5} \leq u \land u \leq 1\right) \land \left(0 \leq v \land v \leq 109.746574\right)\]
\[\begin{array}{l} \\ 1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) \end{array} \]
(FPCore (u v)
 :precision binary32
 (+ 1.0 (* v (log (+ u (* (- 1.0 u) (exp (/ -2.0 v))))))))
float code(float u, float v) {
	return 1.0f + (v * logf((u + ((1.0f - u) * expf((-2.0f / v))))));
}
real(4) function code(u, v)
    real(4), intent (in) :: u
    real(4), intent (in) :: v
    code = 1.0e0 + (v * log((u + ((1.0e0 - u) * exp(((-2.0e0) / v))))))
end function
function code(u, v)
	return Float32(Float32(1.0) + Float32(v * log(Float32(u + Float32(Float32(Float32(1.0) - u) * exp(Float32(Float32(-2.0) / v)))))))
end
function tmp = code(u, v)
	tmp = single(1.0) + (v * log((u + ((single(1.0) - u) * exp((single(-2.0) / v))))));
end
\begin{array}{l}

\\
1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right)
\end{array}

Sampling outcomes in binary32 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 12 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 99.5% accurate, 1.0× speedup?

\[\begin{array}{l} \\ 1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) \end{array} \]
(FPCore (u v)
 :precision binary32
 (+ 1.0 (* v (log (+ u (* (- 1.0 u) (exp (/ -2.0 v))))))))
float code(float u, float v) {
	return 1.0f + (v * logf((u + ((1.0f - u) * expf((-2.0f / v))))));
}
real(4) function code(u, v)
    real(4), intent (in) :: u
    real(4), intent (in) :: v
    code = 1.0e0 + (v * log((u + ((1.0e0 - u) * exp(((-2.0e0) / v))))))
end function
function code(u, v)
	return Float32(Float32(1.0) + Float32(v * log(Float32(u + Float32(Float32(Float32(1.0) - u) * exp(Float32(Float32(-2.0) / v)))))))
end
function tmp = code(u, v)
	tmp = single(1.0) + (v * log((u + ((single(1.0) - u) * exp((single(-2.0) / v))))));
end
\begin{array}{l}

\\
1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right)
\end{array}

Alternative 1: 99.5% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(v, \log \left(u + e^{\frac{-2}{v} + \mathsf{log1p}\left(-u\right)}\right), 1\right) \end{array} \]
(FPCore (u v)
 :precision binary32
 (fma v (log (+ u (exp (+ (/ -2.0 v) (log1p (- u)))))) 1.0))
float code(float u, float v) {
	return fmaf(v, logf((u + expf(((-2.0f / v) + log1pf(-u))))), 1.0f);
}
function code(u, v)
	return fma(v, log(Float32(u + exp(Float32(Float32(Float32(-2.0) / v) + log1p(Float32(-u)))))), Float32(1.0))
end
\begin{array}{l}

\\
\mathsf{fma}\left(v, \log \left(u + e^{\frac{-2}{v} + \mathsf{log1p}\left(-u\right)}\right), 1\right)
\end{array}
Derivation
  1. Initial program 99.6%

    \[1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) \]
  2. Step-by-step derivation
    1. +-commutative99.6%

      \[\leadsto \color{blue}{v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) + 1} \]
    2. fma-def99.6%

      \[\leadsto \color{blue}{\mathsf{fma}\left(v, \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right), 1\right)} \]
    3. +-commutative99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
    4. fma-def99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\mathsf{fma}\left(1 - u, e^{\frac{-2}{v}}, u\right)\right)}, 1\right) \]
  3. Simplified99.6%

    \[\leadsto \color{blue}{\mathsf{fma}\left(v, \log \left(\mathsf{fma}\left(1 - u, e^{\frac{-2}{v}}, u\right)\right), 1\right)} \]
  4. Step-by-step derivation
    1. fma-udef99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
  5. Applied egg-rr99.6%

    \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
  6. Step-by-step derivation
    1. add-exp-log99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \left(\color{blue}{e^{\log \left(\left(1 - u\right) \cdot e^{\frac{-2}{v}}\right)}} + u\right), 1\right) \]
    2. *-commutative99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \left(e^{\log \color{blue}{\left(e^{\frac{-2}{v}} \cdot \left(1 - u\right)\right)}} + u\right), 1\right) \]
    3. log-prod99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \left(e^{\color{blue}{\log \left(e^{\frac{-2}{v}}\right) + \log \left(1 - u\right)}} + u\right), 1\right) \]
    4. add-log-exp99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \left(e^{\color{blue}{\frac{-2}{v}} + \log \left(1 - u\right)} + u\right), 1\right) \]
    5. sub-neg99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \left(e^{\frac{-2}{v} + \log \color{blue}{\left(1 + \left(-u\right)\right)}} + u\right), 1\right) \]
    6. log1p-def99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \left(e^{\frac{-2}{v} + \color{blue}{\mathsf{log1p}\left(-u\right)}} + u\right), 1\right) \]
  7. Applied egg-rr99.6%

    \[\leadsto \mathsf{fma}\left(v, \log \left(\color{blue}{e^{\frac{-2}{v} + \mathsf{log1p}\left(-u\right)}} + u\right), 1\right) \]
  8. Final simplification99.6%

    \[\leadsto \mathsf{fma}\left(v, \log \left(u + e^{\frac{-2}{v} + \mathsf{log1p}\left(-u\right)}\right), 1\right) \]

Alternative 2: 99.5% accurate, 0.7× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(v, \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right), 1\right) \end{array} \]
(FPCore (u v)
 :precision binary32
 (fma v (log (+ u (* (- 1.0 u) (exp (/ -2.0 v))))) 1.0))
float code(float u, float v) {
	return fmaf(v, logf((u + ((1.0f - u) * expf((-2.0f / v))))), 1.0f);
}
function code(u, v)
	return fma(v, log(Float32(u + Float32(Float32(Float32(1.0) - u) * exp(Float32(Float32(-2.0) / v))))), Float32(1.0))
end
\begin{array}{l}

\\
\mathsf{fma}\left(v, \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right), 1\right)
\end{array}
Derivation
  1. Initial program 99.6%

    \[1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) \]
  2. Step-by-step derivation
    1. +-commutative99.6%

      \[\leadsto \color{blue}{v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) + 1} \]
    2. fma-def99.6%

      \[\leadsto \color{blue}{\mathsf{fma}\left(v, \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right), 1\right)} \]
    3. +-commutative99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
    4. fma-def99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\mathsf{fma}\left(1 - u, e^{\frac{-2}{v}}, u\right)\right)}, 1\right) \]
  3. Simplified99.6%

    \[\leadsto \color{blue}{\mathsf{fma}\left(v, \log \left(\mathsf{fma}\left(1 - u, e^{\frac{-2}{v}}, u\right)\right), 1\right)} \]
  4. Step-by-step derivation
    1. fma-udef99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
  5. Applied egg-rr99.6%

    \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
  6. Final simplification99.6%

    \[\leadsto \mathsf{fma}\left(v, \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right), 1\right) \]

Alternative 3: 99.5% accurate, 1.0× speedup?

\[\begin{array}{l} \\ 1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) \end{array} \]
(FPCore (u v)
 :precision binary32
 (+ 1.0 (* v (log (+ u (* (- 1.0 u) (exp (/ -2.0 v))))))))
float code(float u, float v) {
	return 1.0f + (v * logf((u + ((1.0f - u) * expf((-2.0f / v))))));
}
real(4) function code(u, v)
    real(4), intent (in) :: u
    real(4), intent (in) :: v
    code = 1.0e0 + (v * log((u + ((1.0e0 - u) * exp(((-2.0e0) / v))))))
end function
function code(u, v)
	return Float32(Float32(1.0) + Float32(v * log(Float32(u + Float32(Float32(Float32(1.0) - u) * exp(Float32(Float32(-2.0) / v)))))))
end
function tmp = code(u, v)
	tmp = single(1.0) + (v * log((u + ((single(1.0) - u) * exp((single(-2.0) / v))))));
end
\begin{array}{l}

\\
1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right)
\end{array}
Derivation
  1. Initial program 99.6%

    \[1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) \]
  2. Final simplification99.6%

    \[\leadsto 1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) \]

Alternative 4: 96.2% accurate, 1.0× speedup?

\[\begin{array}{l} \\ 1 + v \cdot \log \left(u + e^{\frac{-2}{v}}\right) \end{array} \]
(FPCore (u v) :precision binary32 (+ 1.0 (* v (log (+ u (exp (/ -2.0 v)))))))
float code(float u, float v) {
	return 1.0f + (v * logf((u + expf((-2.0f / v)))));
}
real(4) function code(u, v)
    real(4), intent (in) :: u
    real(4), intent (in) :: v
    code = 1.0e0 + (v * log((u + exp(((-2.0e0) / v)))))
end function
function code(u, v)
	return Float32(Float32(1.0) + Float32(v * log(Float32(u + exp(Float32(Float32(-2.0) / v))))))
end
function tmp = code(u, v)
	tmp = single(1.0) + (v * log((u + exp((single(-2.0) / v)))));
end
\begin{array}{l}

\\
1 + v \cdot \log \left(u + e^{\frac{-2}{v}}\right)
\end{array}
Derivation
  1. Initial program 99.6%

    \[1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) \]
  2. Taylor expanded in u around 0 96.7%

    \[\leadsto 1 + v \cdot \log \left(u + \color{blue}{e^{\frac{-2}{v}}}\right) \]
  3. Final simplification96.7%

    \[\leadsto 1 + v \cdot \log \left(u + e^{\frac{-2}{v}}\right) \]

Alternative 5: 97.5% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;v \leq 0.20000000298023224:\\ \;\;\;\;1 + v \cdot \log u\\ \mathbf{else}:\\ \;\;\;\;u \cdot \left(v \cdot \mathsf{expm1}\left(\frac{2}{v}\right)\right) + -1\\ \end{array} \end{array} \]
(FPCore (u v)
 :precision binary32
 (if (<= v 0.20000000298023224)
   (+ 1.0 (* v (log u)))
   (+ (* u (* v (expm1 (/ 2.0 v)))) -1.0)))
float code(float u, float v) {
	float tmp;
	if (v <= 0.20000000298023224f) {
		tmp = 1.0f + (v * logf(u));
	} else {
		tmp = (u * (v * expm1f((2.0f / v)))) + -1.0f;
	}
	return tmp;
}
function code(u, v)
	tmp = Float32(0.0)
	if (v <= Float32(0.20000000298023224))
		tmp = Float32(Float32(1.0) + Float32(v * log(u)));
	else
		tmp = Float32(Float32(u * Float32(v * expm1(Float32(Float32(2.0) / v)))) + Float32(-1.0));
	end
	return tmp
end
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;v \leq 0.20000000298023224:\\
\;\;\;\;1 + v \cdot \log u\\

\mathbf{else}:\\
\;\;\;\;u \cdot \left(v \cdot \mathsf{expm1}\left(\frac{2}{v}\right)\right) + -1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if v < 0.200000003

    1. Initial program 100.0%

      \[1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) \]
    2. Step-by-step derivation
      1. +-commutative100.0%

        \[\leadsto \color{blue}{v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) + 1} \]
      2. fma-def100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(v, \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right), 1\right)} \]
      3. +-commutative100.0%

        \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
      4. fma-def100.0%

        \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\mathsf{fma}\left(1 - u, e^{\frac{-2}{v}}, u\right)\right)}, 1\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(v, \log \left(\mathsf{fma}\left(1 - u, e^{\frac{-2}{v}}, u\right)\right), 1\right)} \]
    4. Step-by-step derivation
      1. fma-udef100.0%

        \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
    5. Applied egg-rr100.0%

      \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
    6. Step-by-step derivation
      1. add-exp-log100.0%

        \[\leadsto \mathsf{fma}\left(v, \log \left(\color{blue}{e^{\log \left(\left(1 - u\right) \cdot e^{\frac{-2}{v}}\right)}} + u\right), 1\right) \]
      2. *-commutative100.0%

        \[\leadsto \mathsf{fma}\left(v, \log \left(e^{\log \color{blue}{\left(e^{\frac{-2}{v}} \cdot \left(1 - u\right)\right)}} + u\right), 1\right) \]
      3. log-prod100.0%

        \[\leadsto \mathsf{fma}\left(v, \log \left(e^{\color{blue}{\log \left(e^{\frac{-2}{v}}\right) + \log \left(1 - u\right)}} + u\right), 1\right) \]
      4. add-log-exp100.0%

        \[\leadsto \mathsf{fma}\left(v, \log \left(e^{\color{blue}{\frac{-2}{v}} + \log \left(1 - u\right)} + u\right), 1\right) \]
      5. sub-neg100.0%

        \[\leadsto \mathsf{fma}\left(v, \log \left(e^{\frac{-2}{v} + \log \color{blue}{\left(1 + \left(-u\right)\right)}} + u\right), 1\right) \]
      6. log1p-def100.0%

        \[\leadsto \mathsf{fma}\left(v, \log \left(e^{\frac{-2}{v} + \color{blue}{\mathsf{log1p}\left(-u\right)}} + u\right), 1\right) \]
    7. Applied egg-rr100.0%

      \[\leadsto \mathsf{fma}\left(v, \log \left(\color{blue}{e^{\frac{-2}{v} + \mathsf{log1p}\left(-u\right)}} + u\right), 1\right) \]
    8. Taylor expanded in u around inf 99.8%

      \[\leadsto \color{blue}{1 + -1 \cdot \left(v \cdot \log \left(\frac{1}{u}\right)\right)} \]
    9. Step-by-step derivation
      1. mul-1-neg99.8%

        \[\leadsto 1 + \color{blue}{\left(-v \cdot \log \left(\frac{1}{u}\right)\right)} \]
      2. distribute-rgt-neg-in99.8%

        \[\leadsto 1 + \color{blue}{v \cdot \left(-\log \left(\frac{1}{u}\right)\right)} \]
      3. log-rec99.8%

        \[\leadsto 1 + v \cdot \left(-\color{blue}{\left(-\log u\right)}\right) \]
      4. remove-double-neg99.8%

        \[\leadsto 1 + v \cdot \color{blue}{\log u} \]
    10. Simplified99.8%

      \[\leadsto \color{blue}{1 + v \cdot \log u} \]

    if 0.200000003 < v

    1. Initial program 93.0%

      \[1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) \]
    2. Taylor expanded in u around 0 72.5%

      \[\leadsto 1 + v \cdot \color{blue}{\left(u \cdot \left(\frac{1}{e^{\frac{-2}{v}}} - 1\right) - 2 \cdot \frac{1}{v}\right)} \]
    3. Step-by-step derivation
      1. fma-neg72.5%

        \[\leadsto 1 + v \cdot \color{blue}{\mathsf{fma}\left(u, \frac{1}{e^{\frac{-2}{v}}} - 1, -2 \cdot \frac{1}{v}\right)} \]
      2. rec-exp72.5%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \color{blue}{e^{-\frac{-2}{v}}} - 1, -2 \cdot \frac{1}{v}\right) \]
      3. expm1-def72.5%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \color{blue}{\mathsf{expm1}\left(-\frac{-2}{v}\right)}, -2 \cdot \frac{1}{v}\right) \]
      4. distribute-neg-frac72.5%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\color{blue}{\frac{--2}{v}}\right), -2 \cdot \frac{1}{v}\right) \]
      5. metadata-eval72.5%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{\color{blue}{2}}{v}\right), -2 \cdot \frac{1}{v}\right) \]
      6. associate-*r/72.5%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{2}{v}\right), -\color{blue}{\frac{2 \cdot 1}{v}}\right) \]
      7. metadata-eval72.5%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{2}{v}\right), -\frac{\color{blue}{2}}{v}\right) \]
      8. distribute-neg-frac72.5%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{2}{v}\right), \color{blue}{\frac{-2}{v}}\right) \]
      9. metadata-eval72.5%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{2}{v}\right), \frac{\color{blue}{-2}}{v}\right) \]
    4. Simplified72.5%

      \[\leadsto 1 + v \cdot \color{blue}{\mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{2}{v}\right), \frac{-2}{v}\right)} \]
    5. Taylor expanded in v around 0 73.9%

      \[\leadsto \color{blue}{u \cdot \left(v \cdot \left(e^{\frac{2}{v}} - 1\right)\right) - 1} \]
    6. Step-by-step derivation
      1. sub-neg73.9%

        \[\leadsto u \cdot \left(v \cdot \color{blue}{\left(e^{\frac{2}{v}} + \left(-1\right)\right)}\right) - 1 \]
      2. metadata-eval73.9%

        \[\leadsto u \cdot \left(v \cdot \left(e^{\frac{2}{v}} + \color{blue}{-1}\right)\right) - 1 \]
    7. Applied egg-rr73.9%

      \[\leadsto u \cdot \left(v \cdot \color{blue}{\left(e^{\frac{2}{v}} + -1\right)}\right) - 1 \]
    8. Step-by-step derivation
      1. remove-double-div73.9%

        \[\leadsto u \cdot \left(v \cdot \left(\color{blue}{\frac{1}{\frac{1}{e^{\frac{2}{v}}}}} + -1\right)\right) - 1 \]
      2. metadata-eval73.9%

        \[\leadsto u \cdot \left(v \cdot \left(\frac{1}{\frac{1}{e^{\frac{\color{blue}{2 \cdot 1}}{v}}}} + -1\right)\right) - 1 \]
      3. associate-*r/73.9%

        \[\leadsto u \cdot \left(v \cdot \left(\frac{1}{\frac{1}{e^{\color{blue}{2 \cdot \frac{1}{v}}}}} + -1\right)\right) - 1 \]
      4. exp-neg73.9%

        \[\leadsto u \cdot \left(v \cdot \left(\frac{1}{\color{blue}{e^{-2 \cdot \frac{1}{v}}}} + -1\right)\right) - 1 \]
      5. metadata-eval73.9%

        \[\leadsto u \cdot \left(v \cdot \left(\frac{1}{e^{-2 \cdot \frac{1}{v}}} + \color{blue}{\left(-1\right)}\right)\right) - 1 \]
      6. sub-neg73.9%

        \[\leadsto u \cdot \left(v \cdot \color{blue}{\left(\frac{1}{e^{-2 \cdot \frac{1}{v}}} - 1\right)}\right) - 1 \]
      7. rec-exp73.9%

        \[\leadsto u \cdot \left(v \cdot \left(\color{blue}{e^{-\left(-2 \cdot \frac{1}{v}\right)}} - 1\right)\right) - 1 \]
      8. expm1-def73.9%

        \[\leadsto u \cdot \left(v \cdot \color{blue}{\mathsf{expm1}\left(-\left(-2 \cdot \frac{1}{v}\right)\right)}\right) - 1 \]
      9. remove-double-neg73.9%

        \[\leadsto u \cdot \left(v \cdot \mathsf{expm1}\left(\color{blue}{2 \cdot \frac{1}{v}}\right)\right) - 1 \]
      10. associate-*r/73.9%

        \[\leadsto u \cdot \left(v \cdot \mathsf{expm1}\left(\color{blue}{\frac{2 \cdot 1}{v}}\right)\right) - 1 \]
      11. metadata-eval73.9%

        \[\leadsto u \cdot \left(v \cdot \mathsf{expm1}\left(\frac{\color{blue}{2}}{v}\right)\right) - 1 \]
    9. Simplified73.9%

      \[\leadsto u \cdot \left(v \cdot \color{blue}{\mathsf{expm1}\left(\frac{2}{v}\right)}\right) - 1 \]
  3. Recombined 2 regimes into one program.
  4. Final simplification98.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;v \leq 0.20000000298023224:\\ \;\;\;\;1 + v \cdot \log u\\ \mathbf{else}:\\ \;\;\;\;u \cdot \left(v \cdot \mathsf{expm1}\left(\frac{2}{v}\right)\right) + -1\\ \end{array} \]

Alternative 6: 94.6% accurate, 2.0× speedup?

\[\begin{array}{l} \\ 1 + v \cdot \log u \end{array} \]
(FPCore (u v) :precision binary32 (+ 1.0 (* v (log u))))
float code(float u, float v) {
	return 1.0f + (v * logf(u));
}
real(4) function code(u, v)
    real(4), intent (in) :: u
    real(4), intent (in) :: v
    code = 1.0e0 + (v * log(u))
end function
function code(u, v)
	return Float32(Float32(1.0) + Float32(v * log(u)))
end
function tmp = code(u, v)
	tmp = single(1.0) + (v * log(u));
end
\begin{array}{l}

\\
1 + v \cdot \log u
\end{array}
Derivation
  1. Initial program 99.6%

    \[1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) \]
  2. Step-by-step derivation
    1. +-commutative99.6%

      \[\leadsto \color{blue}{v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) + 1} \]
    2. fma-def99.6%

      \[\leadsto \color{blue}{\mathsf{fma}\left(v, \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right), 1\right)} \]
    3. +-commutative99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
    4. fma-def99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\mathsf{fma}\left(1 - u, e^{\frac{-2}{v}}, u\right)\right)}, 1\right) \]
  3. Simplified99.6%

    \[\leadsto \color{blue}{\mathsf{fma}\left(v, \log \left(\mathsf{fma}\left(1 - u, e^{\frac{-2}{v}}, u\right)\right), 1\right)} \]
  4. Step-by-step derivation
    1. fma-udef99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
  5. Applied egg-rr99.6%

    \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
  6. Step-by-step derivation
    1. add-exp-log99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \left(\color{blue}{e^{\log \left(\left(1 - u\right) \cdot e^{\frac{-2}{v}}\right)}} + u\right), 1\right) \]
    2. *-commutative99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \left(e^{\log \color{blue}{\left(e^{\frac{-2}{v}} \cdot \left(1 - u\right)\right)}} + u\right), 1\right) \]
    3. log-prod99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \left(e^{\color{blue}{\log \left(e^{\frac{-2}{v}}\right) + \log \left(1 - u\right)}} + u\right), 1\right) \]
    4. add-log-exp99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \left(e^{\color{blue}{\frac{-2}{v}} + \log \left(1 - u\right)} + u\right), 1\right) \]
    5. sub-neg99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \left(e^{\frac{-2}{v} + \log \color{blue}{\left(1 + \left(-u\right)\right)}} + u\right), 1\right) \]
    6. log1p-def99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \left(e^{\frac{-2}{v} + \color{blue}{\mathsf{log1p}\left(-u\right)}} + u\right), 1\right) \]
  7. Applied egg-rr99.6%

    \[\leadsto \mathsf{fma}\left(v, \log \left(\color{blue}{e^{\frac{-2}{v} + \mathsf{log1p}\left(-u\right)}} + u\right), 1\right) \]
  8. Taylor expanded in u around inf 95.0%

    \[\leadsto \color{blue}{1 + -1 \cdot \left(v \cdot \log \left(\frac{1}{u}\right)\right)} \]
  9. Step-by-step derivation
    1. mul-1-neg95.0%

      \[\leadsto 1 + \color{blue}{\left(-v \cdot \log \left(\frac{1}{u}\right)\right)} \]
    2. distribute-rgt-neg-in95.0%

      \[\leadsto 1 + \color{blue}{v \cdot \left(-\log \left(\frac{1}{u}\right)\right)} \]
    3. log-rec95.0%

      \[\leadsto 1 + v \cdot \left(-\color{blue}{\left(-\log u\right)}\right) \]
    4. remove-double-neg95.0%

      \[\leadsto 1 + v \cdot \color{blue}{\log u} \]
  10. Simplified95.0%

    \[\leadsto \color{blue}{1 + v \cdot \log u} \]
  11. Final simplification95.0%

    \[\leadsto 1 + v \cdot \log u \]

Alternative 7: 90.7% accurate, 12.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;v \leq 0.10000000149011612:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;u \cdot \left(2 + \left(\frac{2}{v} + \frac{1.3333333333333333}{v \cdot v}\right)\right) + -1\\ \end{array} \end{array} \]
(FPCore (u v)
 :precision binary32
 (if (<= v 0.10000000149011612)
   1.0
   (+ (* u (+ 2.0 (+ (/ 2.0 v) (/ 1.3333333333333333 (* v v))))) -1.0)))
float code(float u, float v) {
	float tmp;
	if (v <= 0.10000000149011612f) {
		tmp = 1.0f;
	} else {
		tmp = (u * (2.0f + ((2.0f / v) + (1.3333333333333333f / (v * v))))) + -1.0f;
	}
	return tmp;
}
real(4) function code(u, v)
    real(4), intent (in) :: u
    real(4), intent (in) :: v
    real(4) :: tmp
    if (v <= 0.10000000149011612e0) then
        tmp = 1.0e0
    else
        tmp = (u * (2.0e0 + ((2.0e0 / v) + (1.3333333333333333e0 / (v * v))))) + (-1.0e0)
    end if
    code = tmp
end function
function code(u, v)
	tmp = Float32(0.0)
	if (v <= Float32(0.10000000149011612))
		tmp = Float32(1.0);
	else
		tmp = Float32(Float32(u * Float32(Float32(2.0) + Float32(Float32(Float32(2.0) / v) + Float32(Float32(1.3333333333333333) / Float32(v * v))))) + Float32(-1.0));
	end
	return tmp
end
function tmp_2 = code(u, v)
	tmp = single(0.0);
	if (v <= single(0.10000000149011612))
		tmp = single(1.0);
	else
		tmp = (u * (single(2.0) + ((single(2.0) / v) + (single(1.3333333333333333) / (v * v))))) + single(-1.0);
	end
	tmp_2 = tmp;
end
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;v \leq 0.10000000149011612:\\
\;\;\;\;1\\

\mathbf{else}:\\
\;\;\;\;u \cdot \left(2 + \left(\frac{2}{v} + \frac{1.3333333333333333}{v \cdot v}\right)\right) + -1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if v < 0.100000001

    1. Initial program 100.0%

      \[1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) \]
    2. Step-by-step derivation
      1. +-commutative100.0%

        \[\leadsto \color{blue}{v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) + 1} \]
      2. fma-def100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(v, \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right), 1\right)} \]
      3. +-commutative100.0%

        \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
      4. fma-def100.0%

        \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\mathsf{fma}\left(1 - u, e^{\frac{-2}{v}}, u\right)\right)}, 1\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(v, \log \left(\mathsf{fma}\left(1 - u, e^{\frac{-2}{v}}, u\right)\right), 1\right)} \]
    4. Step-by-step derivation
      1. fma-udef100.0%

        \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
    5. Applied egg-rr100.0%

      \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
    6. Taylor expanded in v around 0 93.5%

      \[\leadsto \color{blue}{1} \]

    if 0.100000001 < v

    1. Initial program 93.5%

      \[1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) \]
    2. Taylor expanded in u around 0 68.1%

      \[\leadsto 1 + v \cdot \color{blue}{\left(u \cdot \left(\frac{1}{e^{\frac{-2}{v}}} - 1\right) - 2 \cdot \frac{1}{v}\right)} \]
    3. Step-by-step derivation
      1. fma-neg68.2%

        \[\leadsto 1 + v \cdot \color{blue}{\mathsf{fma}\left(u, \frac{1}{e^{\frac{-2}{v}}} - 1, -2 \cdot \frac{1}{v}\right)} \]
      2. rec-exp68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \color{blue}{e^{-\frac{-2}{v}}} - 1, -2 \cdot \frac{1}{v}\right) \]
      3. expm1-def68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \color{blue}{\mathsf{expm1}\left(-\frac{-2}{v}\right)}, -2 \cdot \frac{1}{v}\right) \]
      4. distribute-neg-frac68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\color{blue}{\frac{--2}{v}}\right), -2 \cdot \frac{1}{v}\right) \]
      5. metadata-eval68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{\color{blue}{2}}{v}\right), -2 \cdot \frac{1}{v}\right) \]
      6. associate-*r/68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{2}{v}\right), -\color{blue}{\frac{2 \cdot 1}{v}}\right) \]
      7. metadata-eval68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{2}{v}\right), -\frac{\color{blue}{2}}{v}\right) \]
      8. distribute-neg-frac68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{2}{v}\right), \color{blue}{\frac{-2}{v}}\right) \]
      9. metadata-eval68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{2}{v}\right), \frac{\color{blue}{-2}}{v}\right) \]
    4. Simplified68.2%

      \[\leadsto 1 + v \cdot \color{blue}{\mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{2}{v}\right), \frac{-2}{v}\right)} \]
    5. Taylor expanded in v around 0 69.4%

      \[\leadsto \color{blue}{u \cdot \left(v \cdot \left(e^{\frac{2}{v}} - 1\right)\right) - 1} \]
    6. Taylor expanded in v around inf 64.1%

      \[\leadsto u \cdot \color{blue}{\left(2 + \left(1.3333333333333333 \cdot \frac{1}{{v}^{2}} + 2 \cdot \frac{1}{v}\right)\right)} - 1 \]
    7. Step-by-step derivation
      1. +-commutative64.1%

        \[\leadsto u \cdot \left(2 + \color{blue}{\left(2 \cdot \frac{1}{v} + 1.3333333333333333 \cdot \frac{1}{{v}^{2}}\right)}\right) - 1 \]
      2. associate-*r/64.1%

        \[\leadsto u \cdot \left(2 + \left(\color{blue}{\frac{2 \cdot 1}{v}} + 1.3333333333333333 \cdot \frac{1}{{v}^{2}}\right)\right) - 1 \]
      3. metadata-eval64.1%

        \[\leadsto u \cdot \left(2 + \left(\frac{\color{blue}{2}}{v} + 1.3333333333333333 \cdot \frac{1}{{v}^{2}}\right)\right) - 1 \]
      4. associate-*r/64.1%

        \[\leadsto u \cdot \left(2 + \left(\frac{2}{v} + \color{blue}{\frac{1.3333333333333333 \cdot 1}{{v}^{2}}}\right)\right) - 1 \]
      5. metadata-eval64.1%

        \[\leadsto u \cdot \left(2 + \left(\frac{2}{v} + \frac{\color{blue}{1.3333333333333333}}{{v}^{2}}\right)\right) - 1 \]
      6. unpow264.1%

        \[\leadsto u \cdot \left(2 + \left(\frac{2}{v} + \frac{1.3333333333333333}{\color{blue}{v \cdot v}}\right)\right) - 1 \]
    8. Simplified64.1%

      \[\leadsto u \cdot \color{blue}{\left(2 + \left(\frac{2}{v} + \frac{1.3333333333333333}{v \cdot v}\right)\right)} - 1 \]
  3. Recombined 2 regimes into one program.
  4. Final simplification91.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;v \leq 0.10000000149011612:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;u \cdot \left(2 + \left(\frac{2}{v} + \frac{1.3333333333333333}{v \cdot v}\right)\right) + -1\\ \end{array} \]

Alternative 8: 90.4% accurate, 14.1× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;v \leq 0.10000000149011612:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;1 + \left(2 \cdot \frac{u}{v} - \left(2 + -2 \cdot u\right)\right)\\ \end{array} \end{array} \]
(FPCore (u v)
 :precision binary32
 (if (<= v 0.10000000149011612)
   1.0
   (+ 1.0 (- (* 2.0 (/ u v)) (+ 2.0 (* -2.0 u))))))
float code(float u, float v) {
	float tmp;
	if (v <= 0.10000000149011612f) {
		tmp = 1.0f;
	} else {
		tmp = 1.0f + ((2.0f * (u / v)) - (2.0f + (-2.0f * u)));
	}
	return tmp;
}
real(4) function code(u, v)
    real(4), intent (in) :: u
    real(4), intent (in) :: v
    real(4) :: tmp
    if (v <= 0.10000000149011612e0) then
        tmp = 1.0e0
    else
        tmp = 1.0e0 + ((2.0e0 * (u / v)) - (2.0e0 + ((-2.0e0) * u)))
    end if
    code = tmp
end function
function code(u, v)
	tmp = Float32(0.0)
	if (v <= Float32(0.10000000149011612))
		tmp = Float32(1.0);
	else
		tmp = Float32(Float32(1.0) + Float32(Float32(Float32(2.0) * Float32(u / v)) - Float32(Float32(2.0) + Float32(Float32(-2.0) * u))));
	end
	return tmp
end
function tmp_2 = code(u, v)
	tmp = single(0.0);
	if (v <= single(0.10000000149011612))
		tmp = single(1.0);
	else
		tmp = single(1.0) + ((single(2.0) * (u / v)) - (single(2.0) + (single(-2.0) * u)));
	end
	tmp_2 = tmp;
end
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;v \leq 0.10000000149011612:\\
\;\;\;\;1\\

\mathbf{else}:\\
\;\;\;\;1 + \left(2 \cdot \frac{u}{v} - \left(2 + -2 \cdot u\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if v < 0.100000001

    1. Initial program 100.0%

      \[1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) \]
    2. Step-by-step derivation
      1. +-commutative100.0%

        \[\leadsto \color{blue}{v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) + 1} \]
      2. fma-def100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(v, \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right), 1\right)} \]
      3. +-commutative100.0%

        \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
      4. fma-def100.0%

        \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\mathsf{fma}\left(1 - u, e^{\frac{-2}{v}}, u\right)\right)}, 1\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(v, \log \left(\mathsf{fma}\left(1 - u, e^{\frac{-2}{v}}, u\right)\right), 1\right)} \]
    4. Step-by-step derivation
      1. fma-udef100.0%

        \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
    5. Applied egg-rr100.0%

      \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
    6. Taylor expanded in v around 0 93.5%

      \[\leadsto \color{blue}{1} \]

    if 0.100000001 < v

    1. Initial program 93.5%

      \[1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) \]
    2. Taylor expanded in u around 0 68.1%

      \[\leadsto 1 + v \cdot \color{blue}{\left(u \cdot \left(\frac{1}{e^{\frac{-2}{v}}} - 1\right) - 2 \cdot \frac{1}{v}\right)} \]
    3. Step-by-step derivation
      1. fma-neg68.2%

        \[\leadsto 1 + v \cdot \color{blue}{\mathsf{fma}\left(u, \frac{1}{e^{\frac{-2}{v}}} - 1, -2 \cdot \frac{1}{v}\right)} \]
      2. rec-exp68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \color{blue}{e^{-\frac{-2}{v}}} - 1, -2 \cdot \frac{1}{v}\right) \]
      3. expm1-def68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \color{blue}{\mathsf{expm1}\left(-\frac{-2}{v}\right)}, -2 \cdot \frac{1}{v}\right) \]
      4. distribute-neg-frac68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\color{blue}{\frac{--2}{v}}\right), -2 \cdot \frac{1}{v}\right) \]
      5. metadata-eval68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{\color{blue}{2}}{v}\right), -2 \cdot \frac{1}{v}\right) \]
      6. associate-*r/68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{2}{v}\right), -\color{blue}{\frac{2 \cdot 1}{v}}\right) \]
      7. metadata-eval68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{2}{v}\right), -\frac{\color{blue}{2}}{v}\right) \]
      8. distribute-neg-frac68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{2}{v}\right), \color{blue}{\frac{-2}{v}}\right) \]
      9. metadata-eval68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{2}{v}\right), \frac{\color{blue}{-2}}{v}\right) \]
    4. Simplified68.2%

      \[\leadsto 1 + v \cdot \color{blue}{\mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{2}{v}\right), \frac{-2}{v}\right)} \]
    5. Taylor expanded in v around -inf 62.4%

      \[\leadsto 1 + \color{blue}{\left(-1 \cdot \left(2 + -2 \cdot u\right) + 2 \cdot \frac{u}{v}\right)} \]
    6. Step-by-step derivation
      1. +-commutative62.4%

        \[\leadsto 1 + \color{blue}{\left(2 \cdot \frac{u}{v} + -1 \cdot \left(2 + -2 \cdot u\right)\right)} \]
      2. mul-1-neg62.4%

        \[\leadsto 1 + \left(2 \cdot \frac{u}{v} + \color{blue}{\left(-\left(2 + -2 \cdot u\right)\right)}\right) \]
      3. unsub-neg62.4%

        \[\leadsto 1 + \color{blue}{\left(2 \cdot \frac{u}{v} - \left(2 + -2 \cdot u\right)\right)} \]
    7. Simplified62.4%

      \[\leadsto 1 + \color{blue}{\left(2 \cdot \frac{u}{v} - \left(2 + -2 \cdot u\right)\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification91.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;v \leq 0.10000000149011612:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;1 + \left(2 \cdot \frac{u}{v} - \left(2 + -2 \cdot u\right)\right)\\ \end{array} \]

Alternative 9: 90.4% accurate, 16.2× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;v \leq 0.10000000149011612:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;1 + \left(-2 + 2 \cdot \left(u + \frac{u}{v}\right)\right)\\ \end{array} \end{array} \]
(FPCore (u v)
 :precision binary32
 (if (<= v 0.10000000149011612) 1.0 (+ 1.0 (+ -2.0 (* 2.0 (+ u (/ u v)))))))
float code(float u, float v) {
	float tmp;
	if (v <= 0.10000000149011612f) {
		tmp = 1.0f;
	} else {
		tmp = 1.0f + (-2.0f + (2.0f * (u + (u / v))));
	}
	return tmp;
}
real(4) function code(u, v)
    real(4), intent (in) :: u
    real(4), intent (in) :: v
    real(4) :: tmp
    if (v <= 0.10000000149011612e0) then
        tmp = 1.0e0
    else
        tmp = 1.0e0 + ((-2.0e0) + (2.0e0 * (u + (u / v))))
    end if
    code = tmp
end function
function code(u, v)
	tmp = Float32(0.0)
	if (v <= Float32(0.10000000149011612))
		tmp = Float32(1.0);
	else
		tmp = Float32(Float32(1.0) + Float32(Float32(-2.0) + Float32(Float32(2.0) * Float32(u + Float32(u / v)))));
	end
	return tmp
end
function tmp_2 = code(u, v)
	tmp = single(0.0);
	if (v <= single(0.10000000149011612))
		tmp = single(1.0);
	else
		tmp = single(1.0) + (single(-2.0) + (single(2.0) * (u + (u / v))));
	end
	tmp_2 = tmp;
end
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;v \leq 0.10000000149011612:\\
\;\;\;\;1\\

\mathbf{else}:\\
\;\;\;\;1 + \left(-2 + 2 \cdot \left(u + \frac{u}{v}\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if v < 0.100000001

    1. Initial program 100.0%

      \[1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) \]
    2. Step-by-step derivation
      1. +-commutative100.0%

        \[\leadsto \color{blue}{v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) + 1} \]
      2. fma-def100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(v, \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right), 1\right)} \]
      3. +-commutative100.0%

        \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
      4. fma-def100.0%

        \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\mathsf{fma}\left(1 - u, e^{\frac{-2}{v}}, u\right)\right)}, 1\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(v, \log \left(\mathsf{fma}\left(1 - u, e^{\frac{-2}{v}}, u\right)\right), 1\right)} \]
    4. Step-by-step derivation
      1. fma-udef100.0%

        \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
    5. Applied egg-rr100.0%

      \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
    6. Taylor expanded in v around 0 93.5%

      \[\leadsto \color{blue}{1} \]

    if 0.100000001 < v

    1. Initial program 93.5%

      \[1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) \]
    2. Taylor expanded in u around 0 68.1%

      \[\leadsto 1 + v \cdot \color{blue}{\left(u \cdot \left(\frac{1}{e^{\frac{-2}{v}}} - 1\right) - 2 \cdot \frac{1}{v}\right)} \]
    3. Step-by-step derivation
      1. fma-neg68.2%

        \[\leadsto 1 + v \cdot \color{blue}{\mathsf{fma}\left(u, \frac{1}{e^{\frac{-2}{v}}} - 1, -2 \cdot \frac{1}{v}\right)} \]
      2. rec-exp68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \color{blue}{e^{-\frac{-2}{v}}} - 1, -2 \cdot \frac{1}{v}\right) \]
      3. expm1-def68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \color{blue}{\mathsf{expm1}\left(-\frac{-2}{v}\right)}, -2 \cdot \frac{1}{v}\right) \]
      4. distribute-neg-frac68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\color{blue}{\frac{--2}{v}}\right), -2 \cdot \frac{1}{v}\right) \]
      5. metadata-eval68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{\color{blue}{2}}{v}\right), -2 \cdot \frac{1}{v}\right) \]
      6. associate-*r/68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{2}{v}\right), -\color{blue}{\frac{2 \cdot 1}{v}}\right) \]
      7. metadata-eval68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{2}{v}\right), -\frac{\color{blue}{2}}{v}\right) \]
      8. distribute-neg-frac68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{2}{v}\right), \color{blue}{\frac{-2}{v}}\right) \]
      9. metadata-eval68.2%

        \[\leadsto 1 + v \cdot \mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{2}{v}\right), \frac{\color{blue}{-2}}{v}\right) \]
    4. Simplified68.2%

      \[\leadsto 1 + v \cdot \color{blue}{\mathsf{fma}\left(u, \mathsf{expm1}\left(\frac{2}{v}\right), \frac{-2}{v}\right)} \]
    5. Taylor expanded in v around inf 62.1%

      \[\leadsto 1 + \color{blue}{\left(\left(2 \cdot u + 2 \cdot \frac{u}{v}\right) - 2\right)} \]
    6. Step-by-step derivation
      1. sub-neg62.1%

        \[\leadsto 1 + \color{blue}{\left(\left(2 \cdot u + 2 \cdot \frac{u}{v}\right) + \left(-2\right)\right)} \]
      2. distribute-lft-out62.1%

        \[\leadsto 1 + \left(\color{blue}{2 \cdot \left(u + \frac{u}{v}\right)} + \left(-2\right)\right) \]
      3. metadata-eval62.1%

        \[\leadsto 1 + \left(2 \cdot \left(u + \frac{u}{v}\right) + \color{blue}{-2}\right) \]
    7. Simplified62.1%

      \[\leadsto 1 + \color{blue}{\left(2 \cdot \left(u + \frac{u}{v}\right) + -2\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification91.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;v \leq 0.10000000149011612:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;1 + \left(-2 + 2 \cdot \left(u + \frac{u}{v}\right)\right)\\ \end{array} \]

Alternative 10: 90.4% accurate, 19.1× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;v \leq 0.10000000149011612:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;2 \cdot \left(u + \frac{u}{v}\right) + -1\\ \end{array} \end{array} \]
(FPCore (u v)
 :precision binary32
 (if (<= v 0.10000000149011612) 1.0 (+ (* 2.0 (+ u (/ u v))) -1.0)))
float code(float u, float v) {
	float tmp;
	if (v <= 0.10000000149011612f) {
		tmp = 1.0f;
	} else {
		tmp = (2.0f * (u + (u / v))) + -1.0f;
	}
	return tmp;
}
real(4) function code(u, v)
    real(4), intent (in) :: u
    real(4), intent (in) :: v
    real(4) :: tmp
    if (v <= 0.10000000149011612e0) then
        tmp = 1.0e0
    else
        tmp = (2.0e0 * (u + (u / v))) + (-1.0e0)
    end if
    code = tmp
end function
function code(u, v)
	tmp = Float32(0.0)
	if (v <= Float32(0.10000000149011612))
		tmp = Float32(1.0);
	else
		tmp = Float32(Float32(Float32(2.0) * Float32(u + Float32(u / v))) + Float32(-1.0));
	end
	return tmp
end
function tmp_2 = code(u, v)
	tmp = single(0.0);
	if (v <= single(0.10000000149011612))
		tmp = single(1.0);
	else
		tmp = (single(2.0) * (u + (u / v))) + single(-1.0);
	end
	tmp_2 = tmp;
end
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;v \leq 0.10000000149011612:\\
\;\;\;\;1\\

\mathbf{else}:\\
\;\;\;\;2 \cdot \left(u + \frac{u}{v}\right) + -1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if v < 0.100000001

    1. Initial program 100.0%

      \[1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) \]
    2. Step-by-step derivation
      1. +-commutative100.0%

        \[\leadsto \color{blue}{v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) + 1} \]
      2. fma-def100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(v, \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right), 1\right)} \]
      3. +-commutative100.0%

        \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
      4. fma-def100.0%

        \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\mathsf{fma}\left(1 - u, e^{\frac{-2}{v}}, u\right)\right)}, 1\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(v, \log \left(\mathsf{fma}\left(1 - u, e^{\frac{-2}{v}}, u\right)\right), 1\right)} \]
    4. Step-by-step derivation
      1. fma-udef100.0%

        \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
    5. Applied egg-rr100.0%

      \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
    6. Taylor expanded in v around 0 93.5%

      \[\leadsto \color{blue}{1} \]

    if 0.100000001 < v

    1. Initial program 93.5%

      \[1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) \]
    2. Taylor expanded in u around 0 68.1%

      \[\leadsto 1 + v \cdot \color{blue}{\left(u \cdot \left(\frac{1}{e^{\frac{-2}{v}}} - 1\right) - 2 \cdot \frac{1}{v}\right)} \]
    3. Taylor expanded in v around inf 62.0%

      \[\leadsto \color{blue}{\left(2 \cdot u + 2 \cdot \frac{u}{v}\right) - 1} \]
    4. Step-by-step derivation
      1. sub-neg62.0%

        \[\leadsto \color{blue}{\left(2 \cdot u + 2 \cdot \frac{u}{v}\right) + \left(-1\right)} \]
      2. distribute-lft-out62.0%

        \[\leadsto \color{blue}{2 \cdot \left(u + \frac{u}{v}\right)} + \left(-1\right) \]
      3. metadata-eval62.0%

        \[\leadsto 2 \cdot \left(u + \frac{u}{v}\right) + \color{blue}{-1} \]
    5. Simplified62.0%

      \[\leadsto \color{blue}{2 \cdot \left(u + \frac{u}{v}\right) + -1} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification91.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;v \leq 0.10000000149011612:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;2 \cdot \left(u + \frac{u}{v}\right) + -1\\ \end{array} \]

Alternative 11: 5.8% accurate, 213.0× speedup?

\[\begin{array}{l} \\ -1 \end{array} \]
(FPCore (u v) :precision binary32 -1.0)
float code(float u, float v) {
	return -1.0f;
}
real(4) function code(u, v)
    real(4), intent (in) :: u
    real(4), intent (in) :: v
    code = -1.0e0
end function
function code(u, v)
	return Float32(-1.0)
end
function tmp = code(u, v)
	tmp = single(-1.0);
end
\begin{array}{l}

\\
-1
\end{array}
Derivation
  1. Initial program 99.6%

    \[1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) \]
  2. Step-by-step derivation
    1. +-commutative99.6%

      \[\leadsto \color{blue}{v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) + 1} \]
    2. fma-def99.6%

      \[\leadsto \color{blue}{\mathsf{fma}\left(v, \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right), 1\right)} \]
    3. +-commutative99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
    4. fma-def99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\mathsf{fma}\left(1 - u, e^{\frac{-2}{v}}, u\right)\right)}, 1\right) \]
  3. Simplified99.6%

    \[\leadsto \color{blue}{\mathsf{fma}\left(v, \log \left(\mathsf{fma}\left(1 - u, e^{\frac{-2}{v}}, u\right)\right), 1\right)} \]
  4. Taylor expanded in u around 0 5.7%

    \[\leadsto \color{blue}{-1} \]
  5. Final simplification5.7%

    \[\leadsto -1 \]

Alternative 12: 86.8% accurate, 213.0× speedup?

\[\begin{array}{l} \\ 1 \end{array} \]
(FPCore (u v) :precision binary32 1.0)
float code(float u, float v) {
	return 1.0f;
}
real(4) function code(u, v)
    real(4), intent (in) :: u
    real(4), intent (in) :: v
    code = 1.0e0
end function
function code(u, v)
	return Float32(1.0)
end
function tmp = code(u, v)
	tmp = single(1.0);
end
\begin{array}{l}

\\
1
\end{array}
Derivation
  1. Initial program 99.6%

    \[1 + v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) \]
  2. Step-by-step derivation
    1. +-commutative99.6%

      \[\leadsto \color{blue}{v \cdot \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right) + 1} \]
    2. fma-def99.6%

      \[\leadsto \color{blue}{\mathsf{fma}\left(v, \log \left(u + \left(1 - u\right) \cdot e^{\frac{-2}{v}}\right), 1\right)} \]
    3. +-commutative99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
    4. fma-def99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\mathsf{fma}\left(1 - u, e^{\frac{-2}{v}}, u\right)\right)}, 1\right) \]
  3. Simplified99.6%

    \[\leadsto \color{blue}{\mathsf{fma}\left(v, \log \left(\mathsf{fma}\left(1 - u, e^{\frac{-2}{v}}, u\right)\right), 1\right)} \]
  4. Step-by-step derivation
    1. fma-udef99.6%

      \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
  5. Applied egg-rr99.6%

    \[\leadsto \mathsf{fma}\left(v, \log \color{blue}{\left(\left(1 - u\right) \cdot e^{\frac{-2}{v}} + u\right)}, 1\right) \]
  6. Taylor expanded in v around 0 88.1%

    \[\leadsto \color{blue}{1} \]
  7. Final simplification88.1%

    \[\leadsto 1 \]

Reproduce

?
herbie shell --seed 2023285 
(FPCore (u v)
  :name "HairBSDF, sample_f, cosTheta"
  :precision binary32
  :pre (and (and (<= 1e-5 u) (<= u 1.0)) (and (<= 0.0 v) (<= v 109.746574)))
  (+ 1.0 (* v (log (+ u (* (- 1.0 u) (exp (/ -2.0 v))))))))