Numeric.SpecFunctions.Extra:bd0 from math-functions-0.1.5.2

Percentage Accurate: 77.5% → 99.3%
Time: 15.7s
Alternatives: 11
Speedup: 0.5×

Specification

?
\[\begin{array}{l} \\ x \cdot \log \left(\frac{x}{y}\right) - z \end{array} \]
(FPCore (x y z) :precision binary64 (- (* x (log (/ x y))) z))
double code(double x, double y, double z) {
	return (x * log((x / y))) - z;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = (x * log((x / y))) - z
end function
public static double code(double x, double y, double z) {
	return (x * Math.log((x / y))) - z;
}
def code(x, y, z):
	return (x * math.log((x / y))) - z
function code(x, y, z)
	return Float64(Float64(x * log(Float64(x / y))) - z)
end
function tmp = code(x, y, z)
	tmp = (x * log((x / y))) - z;
end
code[x_, y_, z_] := N[(N[(x * N[Log[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]
\begin{array}{l}

\\
x \cdot \log \left(\frac{x}{y}\right) - z
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 11 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 77.5% accurate, 1.0× speedup?

\[\begin{array}{l} \\ x \cdot \log \left(\frac{x}{y}\right) - z \end{array} \]
(FPCore (x y z) :precision binary64 (- (* x (log (/ x y))) z))
double code(double x, double y, double z) {
	return (x * log((x / y))) - z;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = (x * log((x / y))) - z
end function
public static double code(double x, double y, double z) {
	return (x * Math.log((x / y))) - z;
}
def code(x, y, z):
	return (x * math.log((x / y))) - z
function code(x, y, z)
	return Float64(Float64(x * log(Float64(x / y))) - z)
end
function tmp = code(x, y, z)
	tmp = (x * log((x / y))) - z;
end
code[x_, y_, z_] := N[(N[(x * N[Log[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]
\begin{array}{l}

\\
x \cdot \log \left(\frac{x}{y}\right) - z
\end{array}

Alternative 1: 99.3% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq -5 \cdot 10^{-310}:\\ \;\;\;\;x \cdot \left(\log \left(0 - x\right) - \log \left(0 - y\right)\right) - z\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\log x, x, \left(0 - x\right) \cdot \log y\right) - z\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (if (<= y -5e-310)
   (- (* x (- (log (- 0.0 x)) (log (- 0.0 y)))) z)
   (- (fma (log x) x (* (- 0.0 x) (log y))) z)))
double code(double x, double y, double z) {
	double tmp;
	if (y <= -5e-310) {
		tmp = (x * (log((0.0 - x)) - log((0.0 - y)))) - z;
	} else {
		tmp = fma(log(x), x, ((0.0 - x) * log(y))) - z;
	}
	return tmp;
}
function code(x, y, z)
	tmp = 0.0
	if (y <= -5e-310)
		tmp = Float64(Float64(x * Float64(log(Float64(0.0 - x)) - log(Float64(0.0 - y)))) - z);
	else
		tmp = Float64(fma(log(x), x, Float64(Float64(0.0 - x) * log(y))) - z);
	end
	return tmp
end
code[x_, y_, z_] := If[LessEqual[y, -5e-310], N[(N[(x * N[(N[Log[N[(0.0 - x), $MachinePrecision]], $MachinePrecision] - N[Log[N[(0.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision], N[(N[(N[Log[x], $MachinePrecision] * x + N[(N[(0.0 - x), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;y \leq -5 \cdot 10^{-310}:\\
\;\;\;\;x \cdot \left(\log \left(0 - x\right) - \log \left(0 - y\right)\right) - z\\

\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\log x, x, \left(0 - x\right) \cdot \log y\right) - z\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if y < -4.999999999999985e-310

    1. Initial program 78.3%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. frac-2negN/A

        \[\leadsto x \cdot \log \color{blue}{\left(\frac{\mathsf{neg}\left(x\right)}{\mathsf{neg}\left(y\right)}\right)} - z \]
      2. log-divN/A

        \[\leadsto x \cdot \color{blue}{\left(\log \left(\mathsf{neg}\left(x\right)\right) - \log \left(\mathsf{neg}\left(y\right)\right)\right)} - z \]
      3. --lowering--.f64N/A

        \[\leadsto x \cdot \color{blue}{\left(\log \left(\mathsf{neg}\left(x\right)\right) - \log \left(\mathsf{neg}\left(y\right)\right)\right)} - z \]
      4. log-lowering-log.f64N/A

        \[\leadsto x \cdot \left(\color{blue}{\log \left(\mathsf{neg}\left(x\right)\right)} - \log \left(\mathsf{neg}\left(y\right)\right)\right) - z \]
      5. neg-sub0N/A

        \[\leadsto x \cdot \left(\log \color{blue}{\left(0 - x\right)} - \log \left(\mathsf{neg}\left(y\right)\right)\right) - z \]
      6. metadata-evalN/A

        \[\leadsto x \cdot \left(\log \left(\color{blue}{\log 1} - x\right) - \log \left(\mathsf{neg}\left(y\right)\right)\right) - z \]
      7. --lowering--.f64N/A

        \[\leadsto x \cdot \left(\log \color{blue}{\left(\log 1 - x\right)} - \log \left(\mathsf{neg}\left(y\right)\right)\right) - z \]
      8. metadata-evalN/A

        \[\leadsto x \cdot \left(\log \left(\color{blue}{0} - x\right) - \log \left(\mathsf{neg}\left(y\right)\right)\right) - z \]
      9. log-lowering-log.f64N/A

        \[\leadsto x \cdot \left(\log \left(0 - x\right) - \color{blue}{\log \left(\mathsf{neg}\left(y\right)\right)}\right) - z \]
      10. neg-sub0N/A

        \[\leadsto x \cdot \left(\log \left(0 - x\right) - \log \color{blue}{\left(0 - y\right)}\right) - z \]
      11. metadata-evalN/A

        \[\leadsto x \cdot \left(\log \left(0 - x\right) - \log \left(\color{blue}{\log 1} - y\right)\right) - z \]
      12. --lowering--.f64N/A

        \[\leadsto x \cdot \left(\log \left(0 - x\right) - \log \color{blue}{\left(\log 1 - y\right)}\right) - z \]
      13. metadata-eval99.4

        \[\leadsto x \cdot \left(\log \left(0 - x\right) - \log \left(\color{blue}{0} - y\right)\right) - z \]
    4. Applied egg-rr99.4%

      \[\leadsto x \cdot \color{blue}{\left(\log \left(0 - x\right) - \log \left(0 - y\right)\right)} - z \]

    if -4.999999999999985e-310 < y

    1. Initial program 79.7%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. log-divN/A

        \[\leadsto x \cdot \color{blue}{\left(\log x - \log y\right)} - z \]
      2. sub-negN/A

        \[\leadsto x \cdot \color{blue}{\left(\log x + \left(\mathsf{neg}\left(\log y\right)\right)\right)} - z \]
      3. distribute-rgt-inN/A

        \[\leadsto \color{blue}{\left(\log x \cdot x + \left(\mathsf{neg}\left(\log y\right)\right) \cdot x\right)} - z \]
      4. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(\log x, x, \left(\mathsf{neg}\left(\log y\right)\right) \cdot x\right)} - z \]
      5. log-lowering-log.f64N/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{\log x}, x, \left(\mathsf{neg}\left(\log y\right)\right) \cdot x\right) - z \]
      6. *-lowering-*.f64N/A

        \[\leadsto \mathsf{fma}\left(\log x, x, \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right) \cdot x}\right) - z \]
      7. neg-logN/A

        \[\leadsto \mathsf{fma}\left(\log x, x, \color{blue}{\log \left(\frac{1}{y}\right)} \cdot x\right) - z \]
      8. log-divN/A

        \[\leadsto \mathsf{fma}\left(\log x, x, \color{blue}{\left(\log 1 - \log y\right)} \cdot x\right) - z \]
      9. --lowering--.f64N/A

        \[\leadsto \mathsf{fma}\left(\log x, x, \color{blue}{\left(\log 1 - \log y\right)} \cdot x\right) - z \]
      10. metadata-evalN/A

        \[\leadsto \mathsf{fma}\left(\log x, x, \left(\color{blue}{0} - \log y\right) \cdot x\right) - z \]
      11. log-lowering-log.f6499.4

        \[\leadsto \mathsf{fma}\left(\log x, x, \left(0 - \color{blue}{\log y}\right) \cdot x\right) - z \]
    4. Applied egg-rr99.4%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\log x, x, \left(0 - \log y\right) \cdot x\right)} - z \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -5 \cdot 10^{-310}:\\ \;\;\;\;x \cdot \left(\log \left(0 - x\right) - \log \left(0 - y\right)\right) - z\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\log x, x, \left(0 - x\right) \cdot \log y\right) - z\\ \end{array} \]
  5. Add Preprocessing

Alternative 2: 86.6% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := x \cdot \log \left(\frac{x}{y}\right)\\ \mathbf{if}\;t\_0 \leq -\infty:\\ \;\;\;\;\mathsf{fma}\left(x, \log \left(0 - x\right) - \log \left(0 - y\right), 0\right)\\ \mathbf{elif}\;t\_0 \leq 5 \cdot 10^{+302}:\\ \;\;\;\;t\_0 - z\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(x, \log x, \left(0 - x\right) \cdot \log y\right)\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (let* ((t_0 (* x (log (/ x y)))))
   (if (<= t_0 (- INFINITY))
     (fma x (- (log (- 0.0 x)) (log (- 0.0 y))) 0.0)
     (if (<= t_0 5e+302) (- t_0 z) (fma x (log x) (* (- 0.0 x) (log y)))))))
double code(double x, double y, double z) {
	double t_0 = x * log((x / y));
	double tmp;
	if (t_0 <= -((double) INFINITY)) {
		tmp = fma(x, (log((0.0 - x)) - log((0.0 - y))), 0.0);
	} else if (t_0 <= 5e+302) {
		tmp = t_0 - z;
	} else {
		tmp = fma(x, log(x), ((0.0 - x) * log(y)));
	}
	return tmp;
}
function code(x, y, z)
	t_0 = Float64(x * log(Float64(x / y)))
	tmp = 0.0
	if (t_0 <= Float64(-Inf))
		tmp = fma(x, Float64(log(Float64(0.0 - x)) - log(Float64(0.0 - y))), 0.0);
	elseif (t_0 <= 5e+302)
		tmp = Float64(t_0 - z);
	else
		tmp = fma(x, log(x), Float64(Float64(0.0 - x) * log(y)));
	end
	return tmp
end
code[x_, y_, z_] := Block[{t$95$0 = N[(x * N[Log[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, (-Infinity)], N[(x * N[(N[Log[N[(0.0 - x), $MachinePrecision]], $MachinePrecision] - N[Log[N[(0.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + 0.0), $MachinePrecision], If[LessEqual[t$95$0, 5e+302], N[(t$95$0 - z), $MachinePrecision], N[(x * N[Log[x], $MachinePrecision] + N[(N[(0.0 - x), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := x \cdot \log \left(\frac{x}{y}\right)\\
\mathbf{if}\;t\_0 \leq -\infty:\\
\;\;\;\;\mathsf{fma}\left(x, \log \left(0 - x\right) - \log \left(0 - y\right), 0\right)\\

\mathbf{elif}\;t\_0 \leq 5 \cdot 10^{+302}:\\
\;\;\;\;t\_0 - z\\

\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x, \log x, \left(0 - x\right) \cdot \log y\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if (*.f64 x (log.f64 (/.f64 x y))) < -inf.0

    1. Initial program 11.3%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf

      \[\leadsto \color{blue}{x \cdot \left(\log \left(\frac{1}{y}\right) + -1 \cdot \log \left(\frac{1}{x}\right)\right)} \]
    4. Step-by-step derivation
      1. distribute-rgt-inN/A

        \[\leadsto \color{blue}{\log \left(\frac{1}{y}\right) \cdot x + \left(-1 \cdot \log \left(\frac{1}{x}\right)\right) \cdot x} \]
      2. mul-1-negN/A

        \[\leadsto \log \left(\frac{1}{y}\right) \cdot x + \color{blue}{\left(\mathsf{neg}\left(\log \left(\frac{1}{x}\right)\right)\right)} \cdot x \]
      3. log-recN/A

        \[\leadsto \log \left(\frac{1}{y}\right) \cdot x + \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(\log x\right)\right)}\right)\right) \cdot x \]
      4. remove-double-negN/A

        \[\leadsto \log \left(\frac{1}{y}\right) \cdot x + \color{blue}{\log x} \cdot x \]
      5. distribute-rgt-inN/A

        \[\leadsto \color{blue}{x \cdot \left(\log \left(\frac{1}{y}\right) + \log x\right)} \]
      6. +-commutativeN/A

        \[\leadsto x \cdot \color{blue}{\left(\log x + \log \left(\frac{1}{y}\right)\right)} \]
      7. +-rgt-identityN/A

        \[\leadsto \color{blue}{x \cdot \left(\log x + \log \left(\frac{1}{y}\right)\right) + 0} \]
      8. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log x + \log \left(\frac{1}{y}\right), 0\right)} \]
      9. log-recN/A

        \[\leadsto \mathsf{fma}\left(x, \log x + \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}, 0\right) \]
      10. unsub-negN/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log x - \log y}, 0\right) \]
      11. --lowering--.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log x - \log y}, 0\right) \]
      12. log-lowering-log.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log x} - \log y, 0\right) \]
      13. log-lowering-log.f644.9

        \[\leadsto \mathsf{fma}\left(x, \log x - \color{blue}{\log y}, 0\right) \]
    5. Simplified4.9%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log x - \log y, 0\right)} \]
    6. Step-by-step derivation
      1. diff-logN/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log \left(\frac{x}{y}\right)}, 0\right) \]
      2. frac-2negN/A

        \[\leadsto \mathsf{fma}\left(x, \log \color{blue}{\left(\frac{\mathsf{neg}\left(x\right)}{\mathsf{neg}\left(y\right)}\right)}, 0\right) \]
      3. log-divN/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log \left(\mathsf{neg}\left(x\right)\right) - \log \left(\mathsf{neg}\left(y\right)\right)}, 0\right) \]
      4. --lowering--.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log \left(\mathsf{neg}\left(x\right)\right) - \log \left(\mathsf{neg}\left(y\right)\right)}, 0\right) \]
      5. log-lowering-log.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log \left(\mathsf{neg}\left(x\right)\right)} - \log \left(\mathsf{neg}\left(y\right)\right), 0\right) \]
      6. neg-sub0N/A

        \[\leadsto \mathsf{fma}\left(x, \log \color{blue}{\left(0 - x\right)} - \log \left(\mathsf{neg}\left(y\right)\right), 0\right) \]
      7. --lowering--.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \log \color{blue}{\left(0 - x\right)} - \log \left(\mathsf{neg}\left(y\right)\right), 0\right) \]
      8. log-lowering-log.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \log \left(0 - x\right) - \color{blue}{\log \left(\mathsf{neg}\left(y\right)\right)}, 0\right) \]
      9. neg-sub0N/A

        \[\leadsto \mathsf{fma}\left(x, \log \left(0 - x\right) - \log \color{blue}{\left(0 - y\right)}, 0\right) \]
      10. --lowering--.f6453.6

        \[\leadsto \mathsf{fma}\left(x, \log \left(0 - x\right) - \log \color{blue}{\left(0 - y\right)}, 0\right) \]
    7. Applied egg-rr53.6%

      \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log \left(0 - x\right) - \log \left(0 - y\right)}, 0\right) \]

    if -inf.0 < (*.f64 x (log.f64 (/.f64 x y))) < 5e302

    1. Initial program 99.7%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing

    if 5e302 < (*.f64 x (log.f64 (/.f64 x y)))

    1. Initial program 5.2%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf

      \[\leadsto \color{blue}{x \cdot \left(\log \left(\frac{1}{y}\right) + -1 \cdot \log \left(\frac{1}{x}\right)\right)} \]
    4. Step-by-step derivation
      1. distribute-rgt-inN/A

        \[\leadsto \color{blue}{\log \left(\frac{1}{y}\right) \cdot x + \left(-1 \cdot \log \left(\frac{1}{x}\right)\right) \cdot x} \]
      2. mul-1-negN/A

        \[\leadsto \log \left(\frac{1}{y}\right) \cdot x + \color{blue}{\left(\mathsf{neg}\left(\log \left(\frac{1}{x}\right)\right)\right)} \cdot x \]
      3. log-recN/A

        \[\leadsto \log \left(\frac{1}{y}\right) \cdot x + \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(\log x\right)\right)}\right)\right) \cdot x \]
      4. remove-double-negN/A

        \[\leadsto \log \left(\frac{1}{y}\right) \cdot x + \color{blue}{\log x} \cdot x \]
      5. distribute-rgt-inN/A

        \[\leadsto \color{blue}{x \cdot \left(\log \left(\frac{1}{y}\right) + \log x\right)} \]
      6. +-commutativeN/A

        \[\leadsto x \cdot \color{blue}{\left(\log x + \log \left(\frac{1}{y}\right)\right)} \]
      7. +-rgt-identityN/A

        \[\leadsto \color{blue}{x \cdot \left(\log x + \log \left(\frac{1}{y}\right)\right) + 0} \]
      8. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log x + \log \left(\frac{1}{y}\right), 0\right)} \]
      9. log-recN/A

        \[\leadsto \mathsf{fma}\left(x, \log x + \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}, 0\right) \]
      10. unsub-negN/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log x - \log y}, 0\right) \]
      11. --lowering--.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log x - \log y}, 0\right) \]
      12. log-lowering-log.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log x} - \log y, 0\right) \]
      13. log-lowering-log.f6449.6

        \[\leadsto \mathsf{fma}\left(x, \log x - \color{blue}{\log y}, 0\right) \]
    5. Simplified49.6%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log x - \log y, 0\right)} \]
    6. Step-by-step derivation
      1. +-rgt-identityN/A

        \[\leadsto \color{blue}{x \cdot \left(\log x - \log y\right)} \]
      2. sub-negN/A

        \[\leadsto x \cdot \color{blue}{\left(\log x + \left(\mathsf{neg}\left(\log y\right)\right)\right)} \]
      3. distribute-lft-inN/A

        \[\leadsto \color{blue}{x \cdot \log x + x \cdot \left(\mathsf{neg}\left(\log y\right)\right)} \]
      4. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log x, x \cdot \left(\mathsf{neg}\left(\log y\right)\right)\right)} \]
      5. log-lowering-log.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log x}, x \cdot \left(\mathsf{neg}\left(\log y\right)\right)\right) \]
      6. distribute-rgt-neg-outN/A

        \[\leadsto \mathsf{fma}\left(x, \log x, \color{blue}{\mathsf{neg}\left(x \cdot \log y\right)}\right) \]
      7. neg-lowering-neg.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \log x, \color{blue}{\mathsf{neg}\left(x \cdot \log y\right)}\right) \]
      8. *-lowering-*.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \log x, \mathsf{neg}\left(\color{blue}{x \cdot \log y}\right)\right) \]
      9. log-lowering-log.f6449.8

        \[\leadsto \mathsf{fma}\left(x, \log x, -x \cdot \color{blue}{\log y}\right) \]
    7. Applied egg-rr49.8%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log x, -x \cdot \log y\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification88.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \cdot \log \left(\frac{x}{y}\right) \leq -\infty:\\ \;\;\;\;\mathsf{fma}\left(x, \log \left(0 - x\right) - \log \left(0 - y\right), 0\right)\\ \mathbf{elif}\;x \cdot \log \left(\frac{x}{y}\right) \leq 5 \cdot 10^{+302}:\\ \;\;\;\;x \cdot \log \left(\frac{x}{y}\right) - z\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(x, \log x, \left(0 - x\right) \cdot \log y\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 86.8% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := x \cdot \log \left(\frac{x}{y}\right)\\ \mathbf{if}\;t\_0 \leq -\infty:\\ \;\;\;\;0 - z\\ \mathbf{elif}\;t\_0 \leq 5 \cdot 10^{+302}:\\ \;\;\;\;t\_0 - z\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(x, \log x, \left(0 - x\right) \cdot \log y\right)\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (let* ((t_0 (* x (log (/ x y)))))
   (if (<= t_0 (- INFINITY))
     (- 0.0 z)
     (if (<= t_0 5e+302) (- t_0 z) (fma x (log x) (* (- 0.0 x) (log y)))))))
double code(double x, double y, double z) {
	double t_0 = x * log((x / y));
	double tmp;
	if (t_0 <= -((double) INFINITY)) {
		tmp = 0.0 - z;
	} else if (t_0 <= 5e+302) {
		tmp = t_0 - z;
	} else {
		tmp = fma(x, log(x), ((0.0 - x) * log(y)));
	}
	return tmp;
}
function code(x, y, z)
	t_0 = Float64(x * log(Float64(x / y)))
	tmp = 0.0
	if (t_0 <= Float64(-Inf))
		tmp = Float64(0.0 - z);
	elseif (t_0 <= 5e+302)
		tmp = Float64(t_0 - z);
	else
		tmp = fma(x, log(x), Float64(Float64(0.0 - x) * log(y)));
	end
	return tmp
end
code[x_, y_, z_] := Block[{t$95$0 = N[(x * N[Log[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, (-Infinity)], N[(0.0 - z), $MachinePrecision], If[LessEqual[t$95$0, 5e+302], N[(t$95$0 - z), $MachinePrecision], N[(x * N[Log[x], $MachinePrecision] + N[(N[(0.0 - x), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := x \cdot \log \left(\frac{x}{y}\right)\\
\mathbf{if}\;t\_0 \leq -\infty:\\
\;\;\;\;0 - z\\

\mathbf{elif}\;t\_0 \leq 5 \cdot 10^{+302}:\\
\;\;\;\;t\_0 - z\\

\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x, \log x, \left(0 - x\right) \cdot \log y\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if (*.f64 x (log.f64 (/.f64 x y))) < -inf.0

    1. Initial program 11.3%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{-1 \cdot z} \]
    4. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(z\right)} \]
      2. neg-sub0N/A

        \[\leadsto \color{blue}{0 - z} \]
      3. --lowering--.f6444.6

        \[\leadsto \color{blue}{0 - z} \]
    5. Simplified44.6%

      \[\leadsto \color{blue}{0 - z} \]
    6. Step-by-step derivation
      1. sub0-negN/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(z\right)} \]
      2. +-lft-identityN/A

        \[\leadsto \mathsf{neg}\left(\color{blue}{\left(0 + z\right)}\right) \]
      3. neg-lowering-neg.f64N/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(\left(0 + z\right)\right)} \]
      4. +-lft-identity44.6

        \[\leadsto -\color{blue}{z} \]
    7. Applied egg-rr44.6%

      \[\leadsto \color{blue}{-z} \]

    if -inf.0 < (*.f64 x (log.f64 (/.f64 x y))) < 5e302

    1. Initial program 99.7%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing

    if 5e302 < (*.f64 x (log.f64 (/.f64 x y)))

    1. Initial program 5.2%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf

      \[\leadsto \color{blue}{x \cdot \left(\log \left(\frac{1}{y}\right) + -1 \cdot \log \left(\frac{1}{x}\right)\right)} \]
    4. Step-by-step derivation
      1. distribute-rgt-inN/A

        \[\leadsto \color{blue}{\log \left(\frac{1}{y}\right) \cdot x + \left(-1 \cdot \log \left(\frac{1}{x}\right)\right) \cdot x} \]
      2. mul-1-negN/A

        \[\leadsto \log \left(\frac{1}{y}\right) \cdot x + \color{blue}{\left(\mathsf{neg}\left(\log \left(\frac{1}{x}\right)\right)\right)} \cdot x \]
      3. log-recN/A

        \[\leadsto \log \left(\frac{1}{y}\right) \cdot x + \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(\log x\right)\right)}\right)\right) \cdot x \]
      4. remove-double-negN/A

        \[\leadsto \log \left(\frac{1}{y}\right) \cdot x + \color{blue}{\log x} \cdot x \]
      5. distribute-rgt-inN/A

        \[\leadsto \color{blue}{x \cdot \left(\log \left(\frac{1}{y}\right) + \log x\right)} \]
      6. +-commutativeN/A

        \[\leadsto x \cdot \color{blue}{\left(\log x + \log \left(\frac{1}{y}\right)\right)} \]
      7. +-rgt-identityN/A

        \[\leadsto \color{blue}{x \cdot \left(\log x + \log \left(\frac{1}{y}\right)\right) + 0} \]
      8. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log x + \log \left(\frac{1}{y}\right), 0\right)} \]
      9. log-recN/A

        \[\leadsto \mathsf{fma}\left(x, \log x + \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}, 0\right) \]
      10. unsub-negN/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log x - \log y}, 0\right) \]
      11. --lowering--.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log x - \log y}, 0\right) \]
      12. log-lowering-log.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log x} - \log y, 0\right) \]
      13. log-lowering-log.f6449.6

        \[\leadsto \mathsf{fma}\left(x, \log x - \color{blue}{\log y}, 0\right) \]
    5. Simplified49.6%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log x - \log y, 0\right)} \]
    6. Step-by-step derivation
      1. +-rgt-identityN/A

        \[\leadsto \color{blue}{x \cdot \left(\log x - \log y\right)} \]
      2. sub-negN/A

        \[\leadsto x \cdot \color{blue}{\left(\log x + \left(\mathsf{neg}\left(\log y\right)\right)\right)} \]
      3. distribute-lft-inN/A

        \[\leadsto \color{blue}{x \cdot \log x + x \cdot \left(\mathsf{neg}\left(\log y\right)\right)} \]
      4. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log x, x \cdot \left(\mathsf{neg}\left(\log y\right)\right)\right)} \]
      5. log-lowering-log.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log x}, x \cdot \left(\mathsf{neg}\left(\log y\right)\right)\right) \]
      6. distribute-rgt-neg-outN/A

        \[\leadsto \mathsf{fma}\left(x, \log x, \color{blue}{\mathsf{neg}\left(x \cdot \log y\right)}\right) \]
      7. neg-lowering-neg.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \log x, \color{blue}{\mathsf{neg}\left(x \cdot \log y\right)}\right) \]
      8. *-lowering-*.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \log x, \mathsf{neg}\left(\color{blue}{x \cdot \log y}\right)\right) \]
      9. log-lowering-log.f6449.8

        \[\leadsto \mathsf{fma}\left(x, \log x, -x \cdot \color{blue}{\log y}\right) \]
    7. Applied egg-rr49.8%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log x, -x \cdot \log y\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification87.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \cdot \log \left(\frac{x}{y}\right) \leq -\infty:\\ \;\;\;\;0 - z\\ \mathbf{elif}\;x \cdot \log \left(\frac{x}{y}\right) \leq 5 \cdot 10^{+302}:\\ \;\;\;\;x \cdot \log \left(\frac{x}{y}\right) - z\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(x, \log x, \left(0 - x\right) \cdot \log y\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 4: 86.8% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := x \cdot \log \left(\frac{x}{y}\right)\\ \mathbf{if}\;t\_0 \leq -\infty:\\ \;\;\;\;0 - z\\ \mathbf{elif}\;t\_0 \leq 5 \cdot 10^{+302}:\\ \;\;\;\;t\_0 - z\\ \mathbf{else}:\\ \;\;\;\;x \cdot \log x - x \cdot \log y\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (let* ((t_0 (* x (log (/ x y)))))
   (if (<= t_0 (- INFINITY))
     (- 0.0 z)
     (if (<= t_0 5e+302) (- t_0 z) (- (* x (log x)) (* x (log y)))))))
double code(double x, double y, double z) {
	double t_0 = x * log((x / y));
	double tmp;
	if (t_0 <= -((double) INFINITY)) {
		tmp = 0.0 - z;
	} else if (t_0 <= 5e+302) {
		tmp = t_0 - z;
	} else {
		tmp = (x * log(x)) - (x * log(y));
	}
	return tmp;
}
public static double code(double x, double y, double z) {
	double t_0 = x * Math.log((x / y));
	double tmp;
	if (t_0 <= -Double.POSITIVE_INFINITY) {
		tmp = 0.0 - z;
	} else if (t_0 <= 5e+302) {
		tmp = t_0 - z;
	} else {
		tmp = (x * Math.log(x)) - (x * Math.log(y));
	}
	return tmp;
}
def code(x, y, z):
	t_0 = x * math.log((x / y))
	tmp = 0
	if t_0 <= -math.inf:
		tmp = 0.0 - z
	elif t_0 <= 5e+302:
		tmp = t_0 - z
	else:
		tmp = (x * math.log(x)) - (x * math.log(y))
	return tmp
function code(x, y, z)
	t_0 = Float64(x * log(Float64(x / y)))
	tmp = 0.0
	if (t_0 <= Float64(-Inf))
		tmp = Float64(0.0 - z);
	elseif (t_0 <= 5e+302)
		tmp = Float64(t_0 - z);
	else
		tmp = Float64(Float64(x * log(x)) - Float64(x * log(y)));
	end
	return tmp
end
function tmp_2 = code(x, y, z)
	t_0 = x * log((x / y));
	tmp = 0.0;
	if (t_0 <= -Inf)
		tmp = 0.0 - z;
	elseif (t_0 <= 5e+302)
		tmp = t_0 - z;
	else
		tmp = (x * log(x)) - (x * log(y));
	end
	tmp_2 = tmp;
end
code[x_, y_, z_] := Block[{t$95$0 = N[(x * N[Log[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, (-Infinity)], N[(0.0 - z), $MachinePrecision], If[LessEqual[t$95$0, 5e+302], N[(t$95$0 - z), $MachinePrecision], N[(N[(x * N[Log[x], $MachinePrecision]), $MachinePrecision] - N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := x \cdot \log \left(\frac{x}{y}\right)\\
\mathbf{if}\;t\_0 \leq -\infty:\\
\;\;\;\;0 - z\\

\mathbf{elif}\;t\_0 \leq 5 \cdot 10^{+302}:\\
\;\;\;\;t\_0 - z\\

\mathbf{else}:\\
\;\;\;\;x \cdot \log x - x \cdot \log y\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if (*.f64 x (log.f64 (/.f64 x y))) < -inf.0

    1. Initial program 11.3%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{-1 \cdot z} \]
    4. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(z\right)} \]
      2. neg-sub0N/A

        \[\leadsto \color{blue}{0 - z} \]
      3. --lowering--.f6444.6

        \[\leadsto \color{blue}{0 - z} \]
    5. Simplified44.6%

      \[\leadsto \color{blue}{0 - z} \]
    6. Step-by-step derivation
      1. sub0-negN/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(z\right)} \]
      2. +-lft-identityN/A

        \[\leadsto \mathsf{neg}\left(\color{blue}{\left(0 + z\right)}\right) \]
      3. neg-lowering-neg.f64N/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(\left(0 + z\right)\right)} \]
      4. +-lft-identity44.6

        \[\leadsto -\color{blue}{z} \]
    7. Applied egg-rr44.6%

      \[\leadsto \color{blue}{-z} \]

    if -inf.0 < (*.f64 x (log.f64 (/.f64 x y))) < 5e302

    1. Initial program 99.7%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing

    if 5e302 < (*.f64 x (log.f64 (/.f64 x y)))

    1. Initial program 5.2%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf

      \[\leadsto \color{blue}{x \cdot \left(\log \left(\frac{1}{y}\right) + -1 \cdot \log \left(\frac{1}{x}\right)\right)} \]
    4. Step-by-step derivation
      1. distribute-rgt-inN/A

        \[\leadsto \color{blue}{\log \left(\frac{1}{y}\right) \cdot x + \left(-1 \cdot \log \left(\frac{1}{x}\right)\right) \cdot x} \]
      2. mul-1-negN/A

        \[\leadsto \log \left(\frac{1}{y}\right) \cdot x + \color{blue}{\left(\mathsf{neg}\left(\log \left(\frac{1}{x}\right)\right)\right)} \cdot x \]
      3. log-recN/A

        \[\leadsto \log \left(\frac{1}{y}\right) \cdot x + \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(\log x\right)\right)}\right)\right) \cdot x \]
      4. remove-double-negN/A

        \[\leadsto \log \left(\frac{1}{y}\right) \cdot x + \color{blue}{\log x} \cdot x \]
      5. distribute-rgt-inN/A

        \[\leadsto \color{blue}{x \cdot \left(\log \left(\frac{1}{y}\right) + \log x\right)} \]
      6. +-commutativeN/A

        \[\leadsto x \cdot \color{blue}{\left(\log x + \log \left(\frac{1}{y}\right)\right)} \]
      7. +-rgt-identityN/A

        \[\leadsto \color{blue}{x \cdot \left(\log x + \log \left(\frac{1}{y}\right)\right) + 0} \]
      8. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log x + \log \left(\frac{1}{y}\right), 0\right)} \]
      9. log-recN/A

        \[\leadsto \mathsf{fma}\left(x, \log x + \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}, 0\right) \]
      10. unsub-negN/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log x - \log y}, 0\right) \]
      11. --lowering--.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log x - \log y}, 0\right) \]
      12. log-lowering-log.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log x} - \log y, 0\right) \]
      13. log-lowering-log.f6449.6

        \[\leadsto \mathsf{fma}\left(x, \log x - \color{blue}{\log y}, 0\right) \]
    5. Simplified49.6%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log x - \log y, 0\right)} \]
    6. Step-by-step derivation
      1. +-rgt-identityN/A

        \[\leadsto \color{blue}{x \cdot \left(\log x - \log y\right)} \]
      2. diff-logN/A

        \[\leadsto x \cdot \color{blue}{\log \left(\frac{x}{y}\right)} \]
      3. *-commutativeN/A

        \[\leadsto \color{blue}{\log \left(\frac{x}{y}\right) \cdot x} \]
      4. remove-double-divN/A

        \[\leadsto \log \left(\frac{x}{y}\right) \cdot \color{blue}{\frac{1}{\frac{1}{x}}} \]
      5. un-div-invN/A

        \[\leadsto \color{blue}{\frac{\log \left(\frac{x}{y}\right)}{\frac{1}{x}}} \]
      6. diff-logN/A

        \[\leadsto \frac{\color{blue}{\log x - \log y}}{\frac{1}{x}} \]
      7. div-subN/A

        \[\leadsto \color{blue}{\frac{\log x}{\frac{1}{x}} - \frac{\log y}{\frac{1}{x}}} \]
      8. --lowering--.f64N/A

        \[\leadsto \color{blue}{\frac{\log x}{\frac{1}{x}} - \frac{\log y}{\frac{1}{x}}} \]
      9. div-invN/A

        \[\leadsto \color{blue}{\log x \cdot \frac{1}{\frac{1}{x}}} - \frac{\log y}{\frac{1}{x}} \]
      10. remove-double-divN/A

        \[\leadsto \log x \cdot \color{blue}{x} - \frac{\log y}{\frac{1}{x}} \]
      11. *-lowering-*.f64N/A

        \[\leadsto \color{blue}{\log x \cdot x} - \frac{\log y}{\frac{1}{x}} \]
      12. log-lowering-log.f64N/A

        \[\leadsto \color{blue}{\log x} \cdot x - \frac{\log y}{\frac{1}{x}} \]
      13. div-invN/A

        \[\leadsto \log x \cdot x - \color{blue}{\log y \cdot \frac{1}{\frac{1}{x}}} \]
      14. remove-double-divN/A

        \[\leadsto \log x \cdot x - \log y \cdot \color{blue}{x} \]
      15. *-lowering-*.f64N/A

        \[\leadsto \log x \cdot x - \color{blue}{\log y \cdot x} \]
      16. log-lowering-log.f6449.7

        \[\leadsto \log x \cdot x - \color{blue}{\log y} \cdot x \]
    7. Applied egg-rr49.7%

      \[\leadsto \color{blue}{\log x \cdot x - \log y \cdot x} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification87.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \cdot \log \left(\frac{x}{y}\right) \leq -\infty:\\ \;\;\;\;0 - z\\ \mathbf{elif}\;x \cdot \log \left(\frac{x}{y}\right) \leq 5 \cdot 10^{+302}:\\ \;\;\;\;x \cdot \log \left(\frac{x}{y}\right) - z\\ \mathbf{else}:\\ \;\;\;\;x \cdot \log x - x \cdot \log y\\ \end{array} \]
  5. Add Preprocessing

Alternative 5: 86.8% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := x \cdot \log \left(\frac{x}{y}\right)\\ \mathbf{if}\;t\_0 \leq -\infty:\\ \;\;\;\;0 - z\\ \mathbf{elif}\;t\_0 \leq 10^{+267}:\\ \;\;\;\;t\_0 - z\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(\log x - \log y\right)\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (let* ((t_0 (* x (log (/ x y)))))
   (if (<= t_0 (- INFINITY))
     (- 0.0 z)
     (if (<= t_0 1e+267) (- t_0 z) (* x (- (log x) (log y)))))))
double code(double x, double y, double z) {
	double t_0 = x * log((x / y));
	double tmp;
	if (t_0 <= -((double) INFINITY)) {
		tmp = 0.0 - z;
	} else if (t_0 <= 1e+267) {
		tmp = t_0 - z;
	} else {
		tmp = x * (log(x) - log(y));
	}
	return tmp;
}
public static double code(double x, double y, double z) {
	double t_0 = x * Math.log((x / y));
	double tmp;
	if (t_0 <= -Double.POSITIVE_INFINITY) {
		tmp = 0.0 - z;
	} else if (t_0 <= 1e+267) {
		tmp = t_0 - z;
	} else {
		tmp = x * (Math.log(x) - Math.log(y));
	}
	return tmp;
}
def code(x, y, z):
	t_0 = x * math.log((x / y))
	tmp = 0
	if t_0 <= -math.inf:
		tmp = 0.0 - z
	elif t_0 <= 1e+267:
		tmp = t_0 - z
	else:
		tmp = x * (math.log(x) - math.log(y))
	return tmp
function code(x, y, z)
	t_0 = Float64(x * log(Float64(x / y)))
	tmp = 0.0
	if (t_0 <= Float64(-Inf))
		tmp = Float64(0.0 - z);
	elseif (t_0 <= 1e+267)
		tmp = Float64(t_0 - z);
	else
		tmp = Float64(x * Float64(log(x) - log(y)));
	end
	return tmp
end
function tmp_2 = code(x, y, z)
	t_0 = x * log((x / y));
	tmp = 0.0;
	if (t_0 <= -Inf)
		tmp = 0.0 - z;
	elseif (t_0 <= 1e+267)
		tmp = t_0 - z;
	else
		tmp = x * (log(x) - log(y));
	end
	tmp_2 = tmp;
end
code[x_, y_, z_] := Block[{t$95$0 = N[(x * N[Log[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, (-Infinity)], N[(0.0 - z), $MachinePrecision], If[LessEqual[t$95$0, 1e+267], N[(t$95$0 - z), $MachinePrecision], N[(x * N[(N[Log[x], $MachinePrecision] - N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := x \cdot \log \left(\frac{x}{y}\right)\\
\mathbf{if}\;t\_0 \leq -\infty:\\
\;\;\;\;0 - z\\

\mathbf{elif}\;t\_0 \leq 10^{+267}:\\
\;\;\;\;t\_0 - z\\

\mathbf{else}:\\
\;\;\;\;x \cdot \left(\log x - \log y\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if (*.f64 x (log.f64 (/.f64 x y))) < -inf.0

    1. Initial program 11.3%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{-1 \cdot z} \]
    4. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(z\right)} \]
      2. neg-sub0N/A

        \[\leadsto \color{blue}{0 - z} \]
      3. --lowering--.f6444.6

        \[\leadsto \color{blue}{0 - z} \]
    5. Simplified44.6%

      \[\leadsto \color{blue}{0 - z} \]
    6. Step-by-step derivation
      1. sub0-negN/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(z\right)} \]
      2. +-lft-identityN/A

        \[\leadsto \mathsf{neg}\left(\color{blue}{\left(0 + z\right)}\right) \]
      3. neg-lowering-neg.f64N/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(\left(0 + z\right)\right)} \]
      4. +-lft-identity44.6

        \[\leadsto -\color{blue}{z} \]
    7. Applied egg-rr44.6%

      \[\leadsto \color{blue}{-z} \]

    if -inf.0 < (*.f64 x (log.f64 (/.f64 x y))) < 9.9999999999999997e266

    1. Initial program 99.7%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing

    if 9.9999999999999997e266 < (*.f64 x (log.f64 (/.f64 x y)))

    1. Initial program 16.3%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. sub-negN/A

        \[\leadsto \color{blue}{x \cdot \log \left(\frac{x}{y}\right) + \left(\mathsf{neg}\left(z\right)\right)} \]
      2. *-commutativeN/A

        \[\leadsto \color{blue}{\log \left(\frac{x}{y}\right) \cdot x} + \left(\mathsf{neg}\left(z\right)\right) \]
      3. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(\log \left(\frac{x}{y}\right), x, \mathsf{neg}\left(z\right)\right)} \]
      4. log-lowering-log.f64N/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{\log \left(\frac{x}{y}\right)}, x, \mathsf{neg}\left(z\right)\right) \]
      5. /-lowering-/.f64N/A

        \[\leadsto \mathsf{fma}\left(\log \color{blue}{\left(\frac{x}{y}\right)}, x, \mathsf{neg}\left(z\right)\right) \]
      6. neg-sub0N/A

        \[\leadsto \mathsf{fma}\left(\log \left(\frac{x}{y}\right), x, \color{blue}{0 - z}\right) \]
      7. metadata-evalN/A

        \[\leadsto \mathsf{fma}\left(\log \left(\frac{x}{y}\right), x, \color{blue}{\log 1} - z\right) \]
      8. --lowering--.f64N/A

        \[\leadsto \mathsf{fma}\left(\log \left(\frac{x}{y}\right), x, \color{blue}{\log 1 - z}\right) \]
      9. metadata-eval16.3

        \[\leadsto \mathsf{fma}\left(\log \left(\frac{x}{y}\right), x, \color{blue}{0} - z\right) \]
    4. Applied egg-rr16.3%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\log \left(\frac{x}{y}\right), x, 0 - z\right)} \]
    5. Taylor expanded in x around inf

      \[\leadsto \color{blue}{x \cdot \left(\log \left(\frac{1}{y}\right) + -1 \cdot \log \left(\frac{1}{x}\right)\right)} \]
    6. Step-by-step derivation
      1. distribute-lft-inN/A

        \[\leadsto \color{blue}{x \cdot \log \left(\frac{1}{y}\right) + x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right)\right)} \]
      2. +-commutativeN/A

        \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right)\right) + x \cdot \log \left(\frac{1}{y}\right)} \]
      3. mul-1-negN/A

        \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(\log \left(\frac{1}{x}\right)\right)\right)} + x \cdot \log \left(\frac{1}{y}\right) \]
      4. log-recN/A

        \[\leadsto x \cdot \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(\log x\right)\right)}\right)\right) + x \cdot \log \left(\frac{1}{y}\right) \]
      5. remove-double-negN/A

        \[\leadsto x \cdot \color{blue}{\log x} + x \cdot \log \left(\frac{1}{y}\right) \]
      6. distribute-lft-inN/A

        \[\leadsto \color{blue}{x \cdot \left(\log x + \log \left(\frac{1}{y}\right)\right)} \]
      7. *-lowering-*.f64N/A

        \[\leadsto \color{blue}{x \cdot \left(\log x + \log \left(\frac{1}{y}\right)\right)} \]
      8. log-recN/A

        \[\leadsto x \cdot \left(\log x + \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right) \]
      9. sub-negN/A

        \[\leadsto x \cdot \color{blue}{\left(\log x - \log y\right)} \]
      10. --lowering--.f64N/A

        \[\leadsto x \cdot \color{blue}{\left(\log x - \log y\right)} \]
      11. log-lowering-log.f64N/A

        \[\leadsto x \cdot \left(\color{blue}{\log x} - \log y\right) \]
      12. log-lowering-log.f6455.5

        \[\leadsto x \cdot \left(\log x - \color{blue}{\log y}\right) \]
    7. Simplified55.5%

      \[\leadsto \color{blue}{x \cdot \left(\log x - \log y\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification87.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \cdot \log \left(\frac{x}{y}\right) \leq -\infty:\\ \;\;\;\;0 - z\\ \mathbf{elif}\;x \cdot \log \left(\frac{x}{y}\right) \leq 10^{+267}:\\ \;\;\;\;x \cdot \log \left(\frac{x}{y}\right) - z\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(\log x - \log y\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 6: 86.9% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := x \cdot \log \left(\frac{x}{y}\right)\\ \mathbf{if}\;t\_0 \leq -\infty:\\ \;\;\;\;0 - z\\ \mathbf{elif}\;t\_0 \leq 5 \cdot 10^{+302}:\\ \;\;\;\;t\_0 - z\\ \mathbf{else}:\\ \;\;\;\;0 - z\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (let* ((t_0 (* x (log (/ x y)))))
   (if (<= t_0 (- INFINITY))
     (- 0.0 z)
     (if (<= t_0 5e+302) (- t_0 z) (- 0.0 z)))))
double code(double x, double y, double z) {
	double t_0 = x * log((x / y));
	double tmp;
	if (t_0 <= -((double) INFINITY)) {
		tmp = 0.0 - z;
	} else if (t_0 <= 5e+302) {
		tmp = t_0 - z;
	} else {
		tmp = 0.0 - z;
	}
	return tmp;
}
public static double code(double x, double y, double z) {
	double t_0 = x * Math.log((x / y));
	double tmp;
	if (t_0 <= -Double.POSITIVE_INFINITY) {
		tmp = 0.0 - z;
	} else if (t_0 <= 5e+302) {
		tmp = t_0 - z;
	} else {
		tmp = 0.0 - z;
	}
	return tmp;
}
def code(x, y, z):
	t_0 = x * math.log((x / y))
	tmp = 0
	if t_0 <= -math.inf:
		tmp = 0.0 - z
	elif t_0 <= 5e+302:
		tmp = t_0 - z
	else:
		tmp = 0.0 - z
	return tmp
function code(x, y, z)
	t_0 = Float64(x * log(Float64(x / y)))
	tmp = 0.0
	if (t_0 <= Float64(-Inf))
		tmp = Float64(0.0 - z);
	elseif (t_0 <= 5e+302)
		tmp = Float64(t_0 - z);
	else
		tmp = Float64(0.0 - z);
	end
	return tmp
end
function tmp_2 = code(x, y, z)
	t_0 = x * log((x / y));
	tmp = 0.0;
	if (t_0 <= -Inf)
		tmp = 0.0 - z;
	elseif (t_0 <= 5e+302)
		tmp = t_0 - z;
	else
		tmp = 0.0 - z;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_] := Block[{t$95$0 = N[(x * N[Log[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, (-Infinity)], N[(0.0 - z), $MachinePrecision], If[LessEqual[t$95$0, 5e+302], N[(t$95$0 - z), $MachinePrecision], N[(0.0 - z), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := x \cdot \log \left(\frac{x}{y}\right)\\
\mathbf{if}\;t\_0 \leq -\infty:\\
\;\;\;\;0 - z\\

\mathbf{elif}\;t\_0 \leq 5 \cdot 10^{+302}:\\
\;\;\;\;t\_0 - z\\

\mathbf{else}:\\
\;\;\;\;0 - z\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (*.f64 x (log.f64 (/.f64 x y))) < -inf.0 or 5e302 < (*.f64 x (log.f64 (/.f64 x y)))

    1. Initial program 8.2%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{-1 \cdot z} \]
    4. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(z\right)} \]
      2. neg-sub0N/A

        \[\leadsto \color{blue}{0 - z} \]
      3. --lowering--.f6442.1

        \[\leadsto \color{blue}{0 - z} \]
    5. Simplified42.1%

      \[\leadsto \color{blue}{0 - z} \]
    6. Step-by-step derivation
      1. sub0-negN/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(z\right)} \]
      2. +-lft-identityN/A

        \[\leadsto \mathsf{neg}\left(\color{blue}{\left(0 + z\right)}\right) \]
      3. neg-lowering-neg.f64N/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(\left(0 + z\right)\right)} \]
      4. +-lft-identity42.1

        \[\leadsto -\color{blue}{z} \]
    7. Applied egg-rr42.1%

      \[\leadsto \color{blue}{-z} \]

    if -inf.0 < (*.f64 x (log.f64 (/.f64 x y))) < 5e302

    1. Initial program 99.7%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing
  3. Recombined 2 regimes into one program.
  4. Final simplification86.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \cdot \log \left(\frac{x}{y}\right) \leq -\infty:\\ \;\;\;\;0 - z\\ \mathbf{elif}\;x \cdot \log \left(\frac{x}{y}\right) \leq 5 \cdot 10^{+302}:\\ \;\;\;\;x \cdot \log \left(\frac{x}{y}\right) - z\\ \mathbf{else}:\\ \;\;\;\;0 - z\\ \end{array} \]
  5. Add Preprocessing

Alternative 7: 93.6% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -2 \cdot 10^{-256}:\\ \;\;\;\;x \cdot \left(\log \left(0 - x\right) - \log \left(0 - y\right)\right) - z\\ \mathbf{elif}\;x \leq 7 \cdot 10^{-159}:\\ \;\;\;\;0 - z\\ \mathbf{elif}\;x \leq 3.3 \cdot 10^{+163}:\\ \;\;\;\;x \cdot \log \left(\frac{x}{y}\right) - z\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(\log x - \log y\right)\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (if (<= x -2e-256)
   (- (* x (- (log (- 0.0 x)) (log (- 0.0 y)))) z)
   (if (<= x 7e-159)
     (- 0.0 z)
     (if (<= x 3.3e+163)
       (- (* x (log (/ x y))) z)
       (* x (- (log x) (log y)))))))
double code(double x, double y, double z) {
	double tmp;
	if (x <= -2e-256) {
		tmp = (x * (log((0.0 - x)) - log((0.0 - y)))) - z;
	} else if (x <= 7e-159) {
		tmp = 0.0 - z;
	} else if (x <= 3.3e+163) {
		tmp = (x * log((x / y))) - z;
	} else {
		tmp = x * (log(x) - log(y));
	}
	return tmp;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8) :: tmp
    if (x <= (-2d-256)) then
        tmp = (x * (log((0.0d0 - x)) - log((0.0d0 - y)))) - z
    else if (x <= 7d-159) then
        tmp = 0.0d0 - z
    else if (x <= 3.3d+163) then
        tmp = (x * log((x / y))) - z
    else
        tmp = x * (log(x) - log(y))
    end if
    code = tmp
end function
public static double code(double x, double y, double z) {
	double tmp;
	if (x <= -2e-256) {
		tmp = (x * (Math.log((0.0 - x)) - Math.log((0.0 - y)))) - z;
	} else if (x <= 7e-159) {
		tmp = 0.0 - z;
	} else if (x <= 3.3e+163) {
		tmp = (x * Math.log((x / y))) - z;
	} else {
		tmp = x * (Math.log(x) - Math.log(y));
	}
	return tmp;
}
def code(x, y, z):
	tmp = 0
	if x <= -2e-256:
		tmp = (x * (math.log((0.0 - x)) - math.log((0.0 - y)))) - z
	elif x <= 7e-159:
		tmp = 0.0 - z
	elif x <= 3.3e+163:
		tmp = (x * math.log((x / y))) - z
	else:
		tmp = x * (math.log(x) - math.log(y))
	return tmp
function code(x, y, z)
	tmp = 0.0
	if (x <= -2e-256)
		tmp = Float64(Float64(x * Float64(log(Float64(0.0 - x)) - log(Float64(0.0 - y)))) - z);
	elseif (x <= 7e-159)
		tmp = Float64(0.0 - z);
	elseif (x <= 3.3e+163)
		tmp = Float64(Float64(x * log(Float64(x / y))) - z);
	else
		tmp = Float64(x * Float64(log(x) - log(y)));
	end
	return tmp
end
function tmp_2 = code(x, y, z)
	tmp = 0.0;
	if (x <= -2e-256)
		tmp = (x * (log((0.0 - x)) - log((0.0 - y)))) - z;
	elseif (x <= 7e-159)
		tmp = 0.0 - z;
	elseif (x <= 3.3e+163)
		tmp = (x * log((x / y))) - z;
	else
		tmp = x * (log(x) - log(y));
	end
	tmp_2 = tmp;
end
code[x_, y_, z_] := If[LessEqual[x, -2e-256], N[(N[(x * N[(N[Log[N[(0.0 - x), $MachinePrecision]], $MachinePrecision] - N[Log[N[(0.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision], If[LessEqual[x, 7e-159], N[(0.0 - z), $MachinePrecision], If[LessEqual[x, 3.3e+163], N[(N[(x * N[Log[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision], N[(x * N[(N[Log[x], $MachinePrecision] - N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -2 \cdot 10^{-256}:\\
\;\;\;\;x \cdot \left(\log \left(0 - x\right) - \log \left(0 - y\right)\right) - z\\

\mathbf{elif}\;x \leq 7 \cdot 10^{-159}:\\
\;\;\;\;0 - z\\

\mathbf{elif}\;x \leq 3.3 \cdot 10^{+163}:\\
\;\;\;\;x \cdot \log \left(\frac{x}{y}\right) - z\\

\mathbf{else}:\\
\;\;\;\;x \cdot \left(\log x - \log y\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 4 regimes
  2. if x < -1.99999999999999995e-256

    1. Initial program 79.1%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. frac-2negN/A

        \[\leadsto x \cdot \log \color{blue}{\left(\frac{\mathsf{neg}\left(x\right)}{\mathsf{neg}\left(y\right)}\right)} - z \]
      2. log-divN/A

        \[\leadsto x \cdot \color{blue}{\left(\log \left(\mathsf{neg}\left(x\right)\right) - \log \left(\mathsf{neg}\left(y\right)\right)\right)} - z \]
      3. --lowering--.f64N/A

        \[\leadsto x \cdot \color{blue}{\left(\log \left(\mathsf{neg}\left(x\right)\right) - \log \left(\mathsf{neg}\left(y\right)\right)\right)} - z \]
      4. log-lowering-log.f64N/A

        \[\leadsto x \cdot \left(\color{blue}{\log \left(\mathsf{neg}\left(x\right)\right)} - \log \left(\mathsf{neg}\left(y\right)\right)\right) - z \]
      5. neg-sub0N/A

        \[\leadsto x \cdot \left(\log \color{blue}{\left(0 - x\right)} - \log \left(\mathsf{neg}\left(y\right)\right)\right) - z \]
      6. metadata-evalN/A

        \[\leadsto x \cdot \left(\log \left(\color{blue}{\log 1} - x\right) - \log \left(\mathsf{neg}\left(y\right)\right)\right) - z \]
      7. --lowering--.f64N/A

        \[\leadsto x \cdot \left(\log \color{blue}{\left(\log 1 - x\right)} - \log \left(\mathsf{neg}\left(y\right)\right)\right) - z \]
      8. metadata-evalN/A

        \[\leadsto x \cdot \left(\log \left(\color{blue}{0} - x\right) - \log \left(\mathsf{neg}\left(y\right)\right)\right) - z \]
      9. log-lowering-log.f64N/A

        \[\leadsto x \cdot \left(\log \left(0 - x\right) - \color{blue}{\log \left(\mathsf{neg}\left(y\right)\right)}\right) - z \]
      10. neg-sub0N/A

        \[\leadsto x \cdot \left(\log \left(0 - x\right) - \log \color{blue}{\left(0 - y\right)}\right) - z \]
      11. metadata-evalN/A

        \[\leadsto x \cdot \left(\log \left(0 - x\right) - \log \left(\color{blue}{\log 1} - y\right)\right) - z \]
      12. --lowering--.f64N/A

        \[\leadsto x \cdot \left(\log \left(0 - x\right) - \log \color{blue}{\left(\log 1 - y\right)}\right) - z \]
      13. metadata-eval99.4

        \[\leadsto x \cdot \left(\log \left(0 - x\right) - \log \left(\color{blue}{0} - y\right)\right) - z \]
    4. Applied egg-rr99.4%

      \[\leadsto x \cdot \color{blue}{\left(\log \left(0 - x\right) - \log \left(0 - y\right)\right)} - z \]

    if -1.99999999999999995e-256 < x < 7.00000000000000005e-159

    1. Initial program 63.6%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{-1 \cdot z} \]
    4. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(z\right)} \]
      2. neg-sub0N/A

        \[\leadsto \color{blue}{0 - z} \]
      3. --lowering--.f6487.8

        \[\leadsto \color{blue}{0 - z} \]
    5. Simplified87.8%

      \[\leadsto \color{blue}{0 - z} \]
    6. Step-by-step derivation
      1. sub0-negN/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(z\right)} \]
      2. +-lft-identityN/A

        \[\leadsto \mathsf{neg}\left(\color{blue}{\left(0 + z\right)}\right) \]
      3. neg-lowering-neg.f64N/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(\left(0 + z\right)\right)} \]
      4. +-lft-identity87.8

        \[\leadsto -\color{blue}{z} \]
    7. Applied egg-rr87.8%

      \[\leadsto \color{blue}{-z} \]

    if 7.00000000000000005e-159 < x < 3.3e163

    1. Initial program 96.7%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing

    if 3.3e163 < x

    1. Initial program 57.8%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. sub-negN/A

        \[\leadsto \color{blue}{x \cdot \log \left(\frac{x}{y}\right) + \left(\mathsf{neg}\left(z\right)\right)} \]
      2. *-commutativeN/A

        \[\leadsto \color{blue}{\log \left(\frac{x}{y}\right) \cdot x} + \left(\mathsf{neg}\left(z\right)\right) \]
      3. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(\log \left(\frac{x}{y}\right), x, \mathsf{neg}\left(z\right)\right)} \]
      4. log-lowering-log.f64N/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{\log \left(\frac{x}{y}\right)}, x, \mathsf{neg}\left(z\right)\right) \]
      5. /-lowering-/.f64N/A

        \[\leadsto \mathsf{fma}\left(\log \color{blue}{\left(\frac{x}{y}\right)}, x, \mathsf{neg}\left(z\right)\right) \]
      6. neg-sub0N/A

        \[\leadsto \mathsf{fma}\left(\log \left(\frac{x}{y}\right), x, \color{blue}{0 - z}\right) \]
      7. metadata-evalN/A

        \[\leadsto \mathsf{fma}\left(\log \left(\frac{x}{y}\right), x, \color{blue}{\log 1} - z\right) \]
      8. --lowering--.f64N/A

        \[\leadsto \mathsf{fma}\left(\log \left(\frac{x}{y}\right), x, \color{blue}{\log 1 - z}\right) \]
      9. metadata-eval57.7

        \[\leadsto \mathsf{fma}\left(\log \left(\frac{x}{y}\right), x, \color{blue}{0} - z\right) \]
    4. Applied egg-rr57.7%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\log \left(\frac{x}{y}\right), x, 0 - z\right)} \]
    5. Taylor expanded in x around inf

      \[\leadsto \color{blue}{x \cdot \left(\log \left(\frac{1}{y}\right) + -1 \cdot \log \left(\frac{1}{x}\right)\right)} \]
    6. Step-by-step derivation
      1. distribute-lft-inN/A

        \[\leadsto \color{blue}{x \cdot \log \left(\frac{1}{y}\right) + x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right)\right)} \]
      2. +-commutativeN/A

        \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right)\right) + x \cdot \log \left(\frac{1}{y}\right)} \]
      3. mul-1-negN/A

        \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(\log \left(\frac{1}{x}\right)\right)\right)} + x \cdot \log \left(\frac{1}{y}\right) \]
      4. log-recN/A

        \[\leadsto x \cdot \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(\log x\right)\right)}\right)\right) + x \cdot \log \left(\frac{1}{y}\right) \]
      5. remove-double-negN/A

        \[\leadsto x \cdot \color{blue}{\log x} + x \cdot \log \left(\frac{1}{y}\right) \]
      6. distribute-lft-inN/A

        \[\leadsto \color{blue}{x \cdot \left(\log x + \log \left(\frac{1}{y}\right)\right)} \]
      7. *-lowering-*.f64N/A

        \[\leadsto \color{blue}{x \cdot \left(\log x + \log \left(\frac{1}{y}\right)\right)} \]
      8. log-recN/A

        \[\leadsto x \cdot \left(\log x + \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}\right) \]
      9. sub-negN/A

        \[\leadsto x \cdot \color{blue}{\left(\log x - \log y\right)} \]
      10. --lowering--.f64N/A

        \[\leadsto x \cdot \color{blue}{\left(\log x - \log y\right)} \]
      11. log-lowering-log.f64N/A

        \[\leadsto x \cdot \left(\color{blue}{\log x} - \log y\right) \]
      12. log-lowering-log.f6493.0

        \[\leadsto x \cdot \left(\log x - \color{blue}{\log y}\right) \]
    7. Simplified93.0%

      \[\leadsto \color{blue}{x \cdot \left(\log x - \log y\right)} \]
  3. Recombined 4 regimes into one program.
  4. Final simplification96.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -2 \cdot 10^{-256}:\\ \;\;\;\;x \cdot \left(\log \left(0 - x\right) - \log \left(0 - y\right)\right) - z\\ \mathbf{elif}\;x \leq 7 \cdot 10^{-159}:\\ \;\;\;\;0 - z\\ \mathbf{elif}\;x \leq 3.3 \cdot 10^{+163}:\\ \;\;\;\;x \cdot \log \left(\frac{x}{y}\right) - z\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(\log x - \log y\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 8: 65.6% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;z \leq -4.8 \cdot 10^{-64}:\\ \;\;\;\;0 - z\\ \mathbf{elif}\;z \leq 7.8 \cdot 10^{+89}:\\ \;\;\;\;\left(0 - x\right) \cdot \log \left(\frac{y}{x}\right)\\ \mathbf{else}:\\ \;\;\;\;0 - z\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (if (<= z -4.8e-64)
   (- 0.0 z)
   (if (<= z 7.8e+89) (* (- 0.0 x) (log (/ y x))) (- 0.0 z))))
double code(double x, double y, double z) {
	double tmp;
	if (z <= -4.8e-64) {
		tmp = 0.0 - z;
	} else if (z <= 7.8e+89) {
		tmp = (0.0 - x) * log((y / x));
	} else {
		tmp = 0.0 - z;
	}
	return tmp;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8) :: tmp
    if (z <= (-4.8d-64)) then
        tmp = 0.0d0 - z
    else if (z <= 7.8d+89) then
        tmp = (0.0d0 - x) * log((y / x))
    else
        tmp = 0.0d0 - z
    end if
    code = tmp
end function
public static double code(double x, double y, double z) {
	double tmp;
	if (z <= -4.8e-64) {
		tmp = 0.0 - z;
	} else if (z <= 7.8e+89) {
		tmp = (0.0 - x) * Math.log((y / x));
	} else {
		tmp = 0.0 - z;
	}
	return tmp;
}
def code(x, y, z):
	tmp = 0
	if z <= -4.8e-64:
		tmp = 0.0 - z
	elif z <= 7.8e+89:
		tmp = (0.0 - x) * math.log((y / x))
	else:
		tmp = 0.0 - z
	return tmp
function code(x, y, z)
	tmp = 0.0
	if (z <= -4.8e-64)
		tmp = Float64(0.0 - z);
	elseif (z <= 7.8e+89)
		tmp = Float64(Float64(0.0 - x) * log(Float64(y / x)));
	else
		tmp = Float64(0.0 - z);
	end
	return tmp
end
function tmp_2 = code(x, y, z)
	tmp = 0.0;
	if (z <= -4.8e-64)
		tmp = 0.0 - z;
	elseif (z <= 7.8e+89)
		tmp = (0.0 - x) * log((y / x));
	else
		tmp = 0.0 - z;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_] := If[LessEqual[z, -4.8e-64], N[(0.0 - z), $MachinePrecision], If[LessEqual[z, 7.8e+89], N[(N[(0.0 - x), $MachinePrecision] * N[Log[N[(y / x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(0.0 - z), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;z \leq -4.8 \cdot 10^{-64}:\\
\;\;\;\;0 - z\\

\mathbf{elif}\;z \leq 7.8 \cdot 10^{+89}:\\
\;\;\;\;\left(0 - x\right) \cdot \log \left(\frac{y}{x}\right)\\

\mathbf{else}:\\
\;\;\;\;0 - z\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if z < -4.79999999999999997e-64 or 7.80000000000000021e89 < z

    1. Initial program 76.1%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{-1 \cdot z} \]
    4. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(z\right)} \]
      2. neg-sub0N/A

        \[\leadsto \color{blue}{0 - z} \]
      3. --lowering--.f6471.1

        \[\leadsto \color{blue}{0 - z} \]
    5. Simplified71.1%

      \[\leadsto \color{blue}{0 - z} \]
    6. Step-by-step derivation
      1. sub0-negN/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(z\right)} \]
      2. +-lft-identityN/A

        \[\leadsto \mathsf{neg}\left(\color{blue}{\left(0 + z\right)}\right) \]
      3. neg-lowering-neg.f64N/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(\left(0 + z\right)\right)} \]
      4. +-lft-identity71.1

        \[\leadsto -\color{blue}{z} \]
    7. Applied egg-rr71.1%

      \[\leadsto \color{blue}{-z} \]

    if -4.79999999999999997e-64 < z < 7.80000000000000021e89

    1. Initial program 81.2%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf

      \[\leadsto \color{blue}{x \cdot \left(\log \left(\frac{1}{y}\right) + -1 \cdot \log \left(\frac{1}{x}\right)\right)} \]
    4. Step-by-step derivation
      1. distribute-rgt-inN/A

        \[\leadsto \color{blue}{\log \left(\frac{1}{y}\right) \cdot x + \left(-1 \cdot \log \left(\frac{1}{x}\right)\right) \cdot x} \]
      2. mul-1-negN/A

        \[\leadsto \log \left(\frac{1}{y}\right) \cdot x + \color{blue}{\left(\mathsf{neg}\left(\log \left(\frac{1}{x}\right)\right)\right)} \cdot x \]
      3. log-recN/A

        \[\leadsto \log \left(\frac{1}{y}\right) \cdot x + \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(\log x\right)\right)}\right)\right) \cdot x \]
      4. remove-double-negN/A

        \[\leadsto \log \left(\frac{1}{y}\right) \cdot x + \color{blue}{\log x} \cdot x \]
      5. distribute-rgt-inN/A

        \[\leadsto \color{blue}{x \cdot \left(\log \left(\frac{1}{y}\right) + \log x\right)} \]
      6. +-commutativeN/A

        \[\leadsto x \cdot \color{blue}{\left(\log x + \log \left(\frac{1}{y}\right)\right)} \]
      7. +-rgt-identityN/A

        \[\leadsto \color{blue}{x \cdot \left(\log x + \log \left(\frac{1}{y}\right)\right) + 0} \]
      8. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log x + \log \left(\frac{1}{y}\right), 0\right)} \]
      9. log-recN/A

        \[\leadsto \mathsf{fma}\left(x, \log x + \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}, 0\right) \]
      10. unsub-negN/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log x - \log y}, 0\right) \]
      11. --lowering--.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log x - \log y}, 0\right) \]
      12. log-lowering-log.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log x} - \log y, 0\right) \]
      13. log-lowering-log.f6435.7

        \[\leadsto \mathsf{fma}\left(x, \log x - \color{blue}{\log y}, 0\right) \]
    5. Simplified35.7%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log x - \log y, 0\right)} \]
    6. Step-by-step derivation
      1. +-rgt-identityN/A

        \[\leadsto \color{blue}{x \cdot \left(\log x - \log y\right)} \]
      2. diff-logN/A

        \[\leadsto x \cdot \color{blue}{\log \left(\frac{x}{y}\right)} \]
      3. clear-numN/A

        \[\leadsto x \cdot \log \color{blue}{\left(\frac{1}{\frac{y}{x}}\right)} \]
      4. log-recN/A

        \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(\log \left(\frac{y}{x}\right)\right)\right)} \]
      5. clear-numN/A

        \[\leadsto x \cdot \left(\mathsf{neg}\left(\log \color{blue}{\left(\frac{1}{\frac{x}{y}}\right)}\right)\right) \]
      6. neg-logN/A

        \[\leadsto x \cdot \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(\log \left(\frac{x}{y}\right)\right)\right)}\right)\right) \]
      7. distribute-rgt-neg-outN/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(x \cdot \left(\mathsf{neg}\left(\log \left(\frac{x}{y}\right)\right)\right)\right)} \]
      8. distribute-rgt-neg-inN/A

        \[\leadsto \mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(x \cdot \log \left(\frac{x}{y}\right)\right)\right)}\right) \]
      9. neg-lowering-neg.f64N/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(\left(\mathsf{neg}\left(x \cdot \log \left(\frac{x}{y}\right)\right)\right)\right)} \]
      10. distribute-rgt-neg-inN/A

        \[\leadsto \mathsf{neg}\left(\color{blue}{x \cdot \left(\mathsf{neg}\left(\log \left(\frac{x}{y}\right)\right)\right)}\right) \]
      11. *-lowering-*.f64N/A

        \[\leadsto \mathsf{neg}\left(\color{blue}{x \cdot \left(\mathsf{neg}\left(\log \left(\frac{x}{y}\right)\right)\right)}\right) \]
      12. neg-logN/A

        \[\leadsto \mathsf{neg}\left(x \cdot \color{blue}{\log \left(\frac{1}{\frac{x}{y}}\right)}\right) \]
      13. clear-numN/A

        \[\leadsto \mathsf{neg}\left(x \cdot \log \color{blue}{\left(\frac{y}{x}\right)}\right) \]
      14. log-lowering-log.f64N/A

        \[\leadsto \mathsf{neg}\left(x \cdot \color{blue}{\log \left(\frac{y}{x}\right)}\right) \]
      15. /-lowering-/.f6465.7

        \[\leadsto -x \cdot \log \color{blue}{\left(\frac{y}{x}\right)} \]
    7. Applied egg-rr65.7%

      \[\leadsto \color{blue}{-x \cdot \log \left(\frac{y}{x}\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification68.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;z \leq -4.8 \cdot 10^{-64}:\\ \;\;\;\;0 - z\\ \mathbf{elif}\;z \leq 7.8 \cdot 10^{+89}:\\ \;\;\;\;\left(0 - x\right) \cdot \log \left(\frac{y}{x}\right)\\ \mathbf{else}:\\ \;\;\;\;0 - z\\ \end{array} \]
  5. Add Preprocessing

Alternative 9: 65.5% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;z \leq -9.5 \cdot 10^{-64}:\\ \;\;\;\;0 - z\\ \mathbf{elif}\;z \leq 4.4 \cdot 10^{+89}:\\ \;\;\;\;x \cdot \log \left(\frac{x}{y}\right)\\ \mathbf{else}:\\ \;\;\;\;0 - z\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (if (<= z -9.5e-64)
   (- 0.0 z)
   (if (<= z 4.4e+89) (* x (log (/ x y))) (- 0.0 z))))
double code(double x, double y, double z) {
	double tmp;
	if (z <= -9.5e-64) {
		tmp = 0.0 - z;
	} else if (z <= 4.4e+89) {
		tmp = x * log((x / y));
	} else {
		tmp = 0.0 - z;
	}
	return tmp;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8) :: tmp
    if (z <= (-9.5d-64)) then
        tmp = 0.0d0 - z
    else if (z <= 4.4d+89) then
        tmp = x * log((x / y))
    else
        tmp = 0.0d0 - z
    end if
    code = tmp
end function
public static double code(double x, double y, double z) {
	double tmp;
	if (z <= -9.5e-64) {
		tmp = 0.0 - z;
	} else if (z <= 4.4e+89) {
		tmp = x * Math.log((x / y));
	} else {
		tmp = 0.0 - z;
	}
	return tmp;
}
def code(x, y, z):
	tmp = 0
	if z <= -9.5e-64:
		tmp = 0.0 - z
	elif z <= 4.4e+89:
		tmp = x * math.log((x / y))
	else:
		tmp = 0.0 - z
	return tmp
function code(x, y, z)
	tmp = 0.0
	if (z <= -9.5e-64)
		tmp = Float64(0.0 - z);
	elseif (z <= 4.4e+89)
		tmp = Float64(x * log(Float64(x / y)));
	else
		tmp = Float64(0.0 - z);
	end
	return tmp
end
function tmp_2 = code(x, y, z)
	tmp = 0.0;
	if (z <= -9.5e-64)
		tmp = 0.0 - z;
	elseif (z <= 4.4e+89)
		tmp = x * log((x / y));
	else
		tmp = 0.0 - z;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_] := If[LessEqual[z, -9.5e-64], N[(0.0 - z), $MachinePrecision], If[LessEqual[z, 4.4e+89], N[(x * N[Log[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(0.0 - z), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;z \leq -9.5 \cdot 10^{-64}:\\
\;\;\;\;0 - z\\

\mathbf{elif}\;z \leq 4.4 \cdot 10^{+89}:\\
\;\;\;\;x \cdot \log \left(\frac{x}{y}\right)\\

\mathbf{else}:\\
\;\;\;\;0 - z\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if z < -9.50000000000000043e-64 or 4.4e89 < z

    1. Initial program 76.1%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{-1 \cdot z} \]
    4. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(z\right)} \]
      2. neg-sub0N/A

        \[\leadsto \color{blue}{0 - z} \]
      3. --lowering--.f6471.1

        \[\leadsto \color{blue}{0 - z} \]
    5. Simplified71.1%

      \[\leadsto \color{blue}{0 - z} \]
    6. Step-by-step derivation
      1. sub0-negN/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(z\right)} \]
      2. +-lft-identityN/A

        \[\leadsto \mathsf{neg}\left(\color{blue}{\left(0 + z\right)}\right) \]
      3. neg-lowering-neg.f64N/A

        \[\leadsto \color{blue}{\mathsf{neg}\left(\left(0 + z\right)\right)} \]
      4. +-lft-identity71.1

        \[\leadsto -\color{blue}{z} \]
    7. Applied egg-rr71.1%

      \[\leadsto \color{blue}{-z} \]

    if -9.50000000000000043e-64 < z < 4.4e89

    1. Initial program 81.2%

      \[x \cdot \log \left(\frac{x}{y}\right) - z \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf

      \[\leadsto \color{blue}{x \cdot \left(\log \left(\frac{1}{y}\right) + -1 \cdot \log \left(\frac{1}{x}\right)\right)} \]
    4. Step-by-step derivation
      1. distribute-rgt-inN/A

        \[\leadsto \color{blue}{\log \left(\frac{1}{y}\right) \cdot x + \left(-1 \cdot \log \left(\frac{1}{x}\right)\right) \cdot x} \]
      2. mul-1-negN/A

        \[\leadsto \log \left(\frac{1}{y}\right) \cdot x + \color{blue}{\left(\mathsf{neg}\left(\log \left(\frac{1}{x}\right)\right)\right)} \cdot x \]
      3. log-recN/A

        \[\leadsto \log \left(\frac{1}{y}\right) \cdot x + \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(\log x\right)\right)}\right)\right) \cdot x \]
      4. remove-double-negN/A

        \[\leadsto \log \left(\frac{1}{y}\right) \cdot x + \color{blue}{\log x} \cdot x \]
      5. distribute-rgt-inN/A

        \[\leadsto \color{blue}{x \cdot \left(\log \left(\frac{1}{y}\right) + \log x\right)} \]
      6. +-commutativeN/A

        \[\leadsto x \cdot \color{blue}{\left(\log x + \log \left(\frac{1}{y}\right)\right)} \]
      7. +-rgt-identityN/A

        \[\leadsto \color{blue}{x \cdot \left(\log x + \log \left(\frac{1}{y}\right)\right) + 0} \]
      8. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log x + \log \left(\frac{1}{y}\right), 0\right)} \]
      9. log-recN/A

        \[\leadsto \mathsf{fma}\left(x, \log x + \color{blue}{\left(\mathsf{neg}\left(\log y\right)\right)}, 0\right) \]
      10. unsub-negN/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log x - \log y}, 0\right) \]
      11. --lowering--.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log x - \log y}, 0\right) \]
      12. log-lowering-log.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\log x} - \log y, 0\right) \]
      13. log-lowering-log.f6435.7

        \[\leadsto \mathsf{fma}\left(x, \log x - \color{blue}{\log y}, 0\right) \]
    5. Simplified35.7%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \log x - \log y, 0\right)} \]
    6. Step-by-step derivation
      1. +-rgt-identityN/A

        \[\leadsto \color{blue}{x \cdot \left(\log x - \log y\right)} \]
      2. diff-logN/A

        \[\leadsto x \cdot \color{blue}{\log \left(\frac{x}{y}\right)} \]
      3. *-commutativeN/A

        \[\leadsto \color{blue}{\log \left(\frac{x}{y}\right) \cdot x} \]
      4. *-lowering-*.f64N/A

        \[\leadsto \color{blue}{\log \left(\frac{x}{y}\right) \cdot x} \]
      5. log-lowering-log.f64N/A

        \[\leadsto \color{blue}{\log \left(\frac{x}{y}\right)} \cdot x \]
      6. /-lowering-/.f6465.4

        \[\leadsto \log \color{blue}{\left(\frac{x}{y}\right)} \cdot x \]
    7. Applied egg-rr65.4%

      \[\leadsto \color{blue}{\log \left(\frac{x}{y}\right) \cdot x} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification67.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;z \leq -9.5 \cdot 10^{-64}:\\ \;\;\;\;0 - z\\ \mathbf{elif}\;z \leq 4.4 \cdot 10^{+89}:\\ \;\;\;\;x \cdot \log \left(\frac{x}{y}\right)\\ \mathbf{else}:\\ \;\;\;\;0 - z\\ \end{array} \]
  5. Add Preprocessing

Alternative 10: 49.3% accurate, 30.0× speedup?

\[\begin{array}{l} \\ 0 - z \end{array} \]
(FPCore (x y z) :precision binary64 (- 0.0 z))
double code(double x, double y, double z) {
	return 0.0 - z;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = 0.0d0 - z
end function
public static double code(double x, double y, double z) {
	return 0.0 - z;
}
def code(x, y, z):
	return 0.0 - z
function code(x, y, z)
	return Float64(0.0 - z)
end
function tmp = code(x, y, z)
	tmp = 0.0 - z;
end
code[x_, y_, z_] := N[(0.0 - z), $MachinePrecision]
\begin{array}{l}

\\
0 - z
\end{array}
Derivation
  1. Initial program 79.0%

    \[x \cdot \log \left(\frac{x}{y}\right) - z \]
  2. Add Preprocessing
  3. Taylor expanded in x around 0

    \[\leadsto \color{blue}{-1 \cdot z} \]
  4. Step-by-step derivation
    1. mul-1-negN/A

      \[\leadsto \color{blue}{\mathsf{neg}\left(z\right)} \]
    2. neg-sub0N/A

      \[\leadsto \color{blue}{0 - z} \]
    3. --lowering--.f6444.2

      \[\leadsto \color{blue}{0 - z} \]
  5. Simplified44.2%

    \[\leadsto \color{blue}{0 - z} \]
  6. Step-by-step derivation
    1. sub0-negN/A

      \[\leadsto \color{blue}{\mathsf{neg}\left(z\right)} \]
    2. +-lft-identityN/A

      \[\leadsto \mathsf{neg}\left(\color{blue}{\left(0 + z\right)}\right) \]
    3. neg-lowering-neg.f64N/A

      \[\leadsto \color{blue}{\mathsf{neg}\left(\left(0 + z\right)\right)} \]
    4. +-lft-identity44.2

      \[\leadsto -\color{blue}{z} \]
  7. Applied egg-rr44.2%

    \[\leadsto \color{blue}{-z} \]
  8. Final simplification44.2%

    \[\leadsto 0 - z \]
  9. Add Preprocessing

Alternative 11: 2.2% accurate, 120.0× speedup?

\[\begin{array}{l} \\ z \end{array} \]
(FPCore (x y z) :precision binary64 z)
double code(double x, double y, double z) {
	return z;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = z
end function
public static double code(double x, double y, double z) {
	return z;
}
def code(x, y, z):
	return z
function code(x, y, z)
	return z
end
function tmp = code(x, y, z)
	tmp = z;
end
code[x_, y_, z_] := z
\begin{array}{l}

\\
z
\end{array}
Derivation
  1. Initial program 79.0%

    \[x \cdot \log \left(\frac{x}{y}\right) - z \]
  2. Add Preprocessing
  3. Taylor expanded in x around 0

    \[\leadsto \color{blue}{-1 \cdot z} \]
  4. Step-by-step derivation
    1. mul-1-negN/A

      \[\leadsto \color{blue}{\mathsf{neg}\left(z\right)} \]
    2. neg-sub0N/A

      \[\leadsto \color{blue}{0 - z} \]
    3. --lowering--.f6444.2

      \[\leadsto \color{blue}{0 - z} \]
  5. Simplified44.2%

    \[\leadsto \color{blue}{0 - z} \]
  6. Step-by-step derivation
    1. flip3--N/A

      \[\leadsto \color{blue}{\frac{{0}^{3} - {z}^{3}}{0 \cdot 0 + \left(z \cdot z + 0 \cdot z\right)}} \]
    2. metadata-evalN/A

      \[\leadsto \frac{\color{blue}{0} - {z}^{3}}{0 \cdot 0 + \left(z \cdot z + 0 \cdot z\right)} \]
    3. sub0-negN/A

      \[\leadsto \frac{\color{blue}{\mathsf{neg}\left({z}^{3}\right)}}{0 \cdot 0 + \left(z \cdot z + 0 \cdot z\right)} \]
    4. cube-negN/A

      \[\leadsto \frac{\color{blue}{{\left(\mathsf{neg}\left(z\right)\right)}^{3}}}{0 \cdot 0 + \left(z \cdot z + 0 \cdot z\right)} \]
    5. sub0-negN/A

      \[\leadsto \frac{{\color{blue}{\left(0 - z\right)}}^{3}}{0 \cdot 0 + \left(z \cdot z + 0 \cdot z\right)} \]
    6. sqr-powN/A

      \[\leadsto \frac{\color{blue}{{\left(0 - z\right)}^{\left(\frac{3}{2}\right)} \cdot {\left(0 - z\right)}^{\left(\frac{3}{2}\right)}}}{0 \cdot 0 + \left(z \cdot z + 0 \cdot z\right)} \]
    7. unpow-prod-downN/A

      \[\leadsto \frac{\color{blue}{{\left(\left(0 - z\right) \cdot \left(0 - z\right)\right)}^{\left(\frac{3}{2}\right)}}}{0 \cdot 0 + \left(z \cdot z + 0 \cdot z\right)} \]
    8. sub0-negN/A

      \[\leadsto \frac{{\left(\color{blue}{\left(\mathsf{neg}\left(z\right)\right)} \cdot \left(0 - z\right)\right)}^{\left(\frac{3}{2}\right)}}{0 \cdot 0 + \left(z \cdot z + 0 \cdot z\right)} \]
    9. sub0-negN/A

      \[\leadsto \frac{{\left(\left(\mathsf{neg}\left(z\right)\right) \cdot \color{blue}{\left(\mathsf{neg}\left(z\right)\right)}\right)}^{\left(\frac{3}{2}\right)}}{0 \cdot 0 + \left(z \cdot z + 0 \cdot z\right)} \]
    10. sqr-negN/A

      \[\leadsto \frac{{\color{blue}{\left(z \cdot z\right)}}^{\left(\frac{3}{2}\right)}}{0 \cdot 0 + \left(z \cdot z + 0 \cdot z\right)} \]
    11. pow2N/A

      \[\leadsto \frac{{\color{blue}{\left({z}^{2}\right)}}^{\left(\frac{3}{2}\right)}}{0 \cdot 0 + \left(z \cdot z + 0 \cdot z\right)} \]
    12. +-lft-identityN/A

      \[\leadsto \frac{{\left({\color{blue}{\left(0 + z\right)}}^{2}\right)}^{\left(\frac{3}{2}\right)}}{0 \cdot 0 + \left(z \cdot z + 0 \cdot z\right)} \]
    13. pow-powN/A

      \[\leadsto \frac{\color{blue}{{\left(0 + z\right)}^{\left(2 \cdot \frac{3}{2}\right)}}}{0 \cdot 0 + \left(z \cdot z + 0 \cdot z\right)} \]
    14. pow-sqrN/A

      \[\leadsto \frac{\color{blue}{{\left(0 + z\right)}^{\left(\frac{3}{2}\right)} \cdot {\left(0 + z\right)}^{\left(\frac{3}{2}\right)}}}{0 \cdot 0 + \left(z \cdot z + 0 \cdot z\right)} \]
    15. sqr-powN/A

      \[\leadsto \frac{\color{blue}{{\left(0 + z\right)}^{3}}}{0 \cdot 0 + \left(z \cdot z + 0 \cdot z\right)} \]
    16. metadata-evalN/A

      \[\leadsto \frac{{\left(0 + z\right)}^{3}}{\color{blue}{0} + \left(z \cdot z + 0 \cdot z\right)} \]
    17. +-lft-identityN/A

      \[\leadsto \frac{{\left(0 + z\right)}^{3}}{\color{blue}{z \cdot z + 0 \cdot z}} \]
    18. +-lft-identityN/A

      \[\leadsto \frac{{\left(0 + z\right)}^{3}}{z \cdot z + 0 \cdot \color{blue}{\left(0 + z\right)}} \]
    19. mul0-lftN/A

      \[\leadsto \frac{{\left(0 + z\right)}^{3}}{z \cdot z + \color{blue}{0}} \]
    20. +-rgt-identityN/A

      \[\leadsto \frac{{\left(0 + z\right)}^{3}}{\color{blue}{z \cdot z}} \]
    21. pow2N/A

      \[\leadsto \frac{{\left(0 + z\right)}^{3}}{\color{blue}{{z}^{2}}} \]
    22. +-lft-identityN/A

      \[\leadsto \frac{{\left(0 + z\right)}^{3}}{{\color{blue}{\left(0 + z\right)}}^{2}} \]
    23. pow-divN/A

      \[\leadsto \color{blue}{{\left(0 + z\right)}^{\left(3 - 2\right)}} \]
    24. metadata-evalN/A

      \[\leadsto {\left(0 + z\right)}^{\color{blue}{1}} \]
  7. Applied egg-rr2.6%

    \[\leadsto \color{blue}{z} \]
  8. Add Preprocessing

Developer Target 1: 88.4% accurate, 0.6× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y < 7.595077799083773 \cdot 10^{-308}:\\ \;\;\;\;x \cdot \log \left(\frac{x}{y}\right) - z\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(\log x - \log y\right) - z\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (if (< y 7.595077799083773e-308)
   (- (* x (log (/ x y))) z)
   (- (* x (- (log x) (log y))) z)))
double code(double x, double y, double z) {
	double tmp;
	if (y < 7.595077799083773e-308) {
		tmp = (x * log((x / y))) - z;
	} else {
		tmp = (x * (log(x) - log(y))) - z;
	}
	return tmp;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8) :: tmp
    if (y < 7.595077799083773d-308) then
        tmp = (x * log((x / y))) - z
    else
        tmp = (x * (log(x) - log(y))) - z
    end if
    code = tmp
end function
public static double code(double x, double y, double z) {
	double tmp;
	if (y < 7.595077799083773e-308) {
		tmp = (x * Math.log((x / y))) - z;
	} else {
		tmp = (x * (Math.log(x) - Math.log(y))) - z;
	}
	return tmp;
}
def code(x, y, z):
	tmp = 0
	if y < 7.595077799083773e-308:
		tmp = (x * math.log((x / y))) - z
	else:
		tmp = (x * (math.log(x) - math.log(y))) - z
	return tmp
function code(x, y, z)
	tmp = 0.0
	if (y < 7.595077799083773e-308)
		tmp = Float64(Float64(x * log(Float64(x / y))) - z);
	else
		tmp = Float64(Float64(x * Float64(log(x) - log(y))) - z);
	end
	return tmp
end
function tmp_2 = code(x, y, z)
	tmp = 0.0;
	if (y < 7.595077799083773e-308)
		tmp = (x * log((x / y))) - z;
	else
		tmp = (x * (log(x) - log(y))) - z;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_] := If[Less[y, 7.595077799083773e-308], N[(N[(x * N[Log[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision], N[(N[(x * N[(N[Log[x], $MachinePrecision] - N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;y < 7.595077799083773 \cdot 10^{-308}:\\
\;\;\;\;x \cdot \log \left(\frac{x}{y}\right) - z\\

\mathbf{else}:\\
\;\;\;\;x \cdot \left(\log x - \log y\right) - z\\


\end{array}
\end{array}

Reproduce

?
herbie shell --seed 2024196 
(FPCore (x y z)
  :name "Numeric.SpecFunctions.Extra:bd0 from math-functions-0.1.5.2"
  :precision binary64

  :alt
  (! :herbie-platform default (if (< y 7595077799083773/100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) (- (* x (log (/ x y))) z) (- (* x (- (log x) (log y))) z)))

  (- (* x (log (/ x y))) z))