Numeric.SpecFunctions:$slogFactorial from math-functions-0.1.5.2, B

Percentage Accurate: 94.1% → 95.8%
Time: 21.2s
Alternatives: 16
Speedup: 0.6×

Specification

?
\[\begin{array}{l} \\ \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+
  (+ (- (* (- x 0.5) (log x)) x) 0.91893853320467)
  (/
   (+
    (* (- (* (+ y 0.0007936500793651) z) 0.0027777777777778) z)
    0.083333333333333)
   x)))
double code(double x, double y, double z) {
	return ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x);
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = ((((x - 0.5d0) * log(x)) - x) + 0.91893853320467d0) + ((((((y + 0.0007936500793651d0) * z) - 0.0027777777777778d0) * z) + 0.083333333333333d0) / x)
end function
public static double code(double x, double y, double z) {
	return ((((x - 0.5) * Math.log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x);
}
def code(x, y, z):
	return ((((x - 0.5) * math.log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x)
function code(x, y, z)
	return Float64(Float64(Float64(Float64(Float64(x - 0.5) * log(x)) - x) + 0.91893853320467) + Float64(Float64(Float64(Float64(Float64(Float64(y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x))
end
function tmp = code(x, y, z)
	tmp = ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x);
end
code[x_, y_, z_] := N[(N[(N[(N[(N[(x - 0.5), $MachinePrecision] * N[Log[x], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + 0.91893853320467), $MachinePrecision] + N[(N[(N[(N[(N[(N[(y + 0.0007936500793651), $MachinePrecision] * z), $MachinePrecision] - 0.0027777777777778), $MachinePrecision] * z), $MachinePrecision] + 0.083333333333333), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 16 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 94.1% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+
  (+ (- (* (- x 0.5) (log x)) x) 0.91893853320467)
  (/
   (+
    (* (- (* (+ y 0.0007936500793651) z) 0.0027777777777778) z)
    0.083333333333333)
   x)))
double code(double x, double y, double z) {
	return ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x);
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = ((((x - 0.5d0) * log(x)) - x) + 0.91893853320467d0) + ((((((y + 0.0007936500793651d0) * z) - 0.0027777777777778d0) * z) + 0.083333333333333d0) / x)
end function
public static double code(double x, double y, double z) {
	return ((((x - 0.5) * Math.log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x);
}
def code(x, y, z):
	return ((((x - 0.5) * math.log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x)
function code(x, y, z)
	return Float64(Float64(Float64(Float64(Float64(x - 0.5) * log(x)) - x) + 0.91893853320467) + Float64(Float64(Float64(Float64(Float64(Float64(y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x))
end
function tmp = code(x, y, z)
	tmp = ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x);
end
code[x_, y_, z_] := N[(N[(N[(N[(N[(x - 0.5), $MachinePrecision] * N[Log[x], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + 0.91893853320467), $MachinePrecision] + N[(N[(N[(N[(N[(N[(y + 0.0007936500793651), $MachinePrecision] * z), $MachinePrecision] - 0.0027777777777778), $MachinePrecision] * z), $MachinePrecision] + 0.083333333333333), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}
\end{array}

Alternative 1: 95.8% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\\ \mathbf{if}\;x \leq 10^{-28}:\\ \;\;\;\;t_0 + {\left(\frac{x}{\mathsf{fma}\left(z, \mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right), 0.083333333333333\right)}\right)}^{-1}\\ \mathbf{else}:\\ \;\;\;\;t_0 + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \left(y + 0.0007936500793651\right) \cdot \frac{{z}^{2}}{x}\right)\right)\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (let* ((t_0 (+ (- (* (- x 0.5) (log x)) x) 0.91893853320467)))
   (if (<= x 1e-28)
     (+
      t_0
      (pow
       (/
        x
        (fma
         z
         (fma (+ y 0.0007936500793651) z -0.0027777777777778)
         0.083333333333333))
       -1.0))
     (+
      t_0
      (+
       (* -0.0027777777777778 (/ z x))
       (+
        (* 0.083333333333333 (/ 1.0 x))
        (* (+ y 0.0007936500793651) (/ (pow z 2.0) x))))))))
double code(double x, double y, double z) {
	double t_0 = (((x - 0.5) * log(x)) - x) + 0.91893853320467;
	double tmp;
	if (x <= 1e-28) {
		tmp = t_0 + pow((x / fma(z, fma((y + 0.0007936500793651), z, -0.0027777777777778), 0.083333333333333)), -1.0);
	} else {
		tmp = t_0 + ((-0.0027777777777778 * (z / x)) + ((0.083333333333333 * (1.0 / x)) + ((y + 0.0007936500793651) * (pow(z, 2.0) / x))));
	}
	return tmp;
}
function code(x, y, z)
	t_0 = Float64(Float64(Float64(Float64(x - 0.5) * log(x)) - x) + 0.91893853320467)
	tmp = 0.0
	if (x <= 1e-28)
		tmp = Float64(t_0 + (Float64(x / fma(z, fma(Float64(y + 0.0007936500793651), z, -0.0027777777777778), 0.083333333333333)) ^ -1.0));
	else
		tmp = Float64(t_0 + Float64(Float64(-0.0027777777777778 * Float64(z / x)) + Float64(Float64(0.083333333333333 * Float64(1.0 / x)) + Float64(Float64(y + 0.0007936500793651) * Float64((z ^ 2.0) / x)))));
	end
	return tmp
end
code[x_, y_, z_] := Block[{t$95$0 = N[(N[(N[(N[(x - 0.5), $MachinePrecision] * N[Log[x], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + 0.91893853320467), $MachinePrecision]}, If[LessEqual[x, 1e-28], N[(t$95$0 + N[Power[N[(x / N[(z * N[(N[(y + 0.0007936500793651), $MachinePrecision] * z + -0.0027777777777778), $MachinePrecision] + 0.083333333333333), $MachinePrecision]), $MachinePrecision], -1.0], $MachinePrecision]), $MachinePrecision], N[(t$95$0 + N[(N[(-0.0027777777777778 * N[(z / x), $MachinePrecision]), $MachinePrecision] + N[(N[(0.083333333333333 * N[(1.0 / x), $MachinePrecision]), $MachinePrecision] + N[(N[(y + 0.0007936500793651), $MachinePrecision] * N[(N[Power[z, 2.0], $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\\
\mathbf{if}\;x \leq 10^{-28}:\\
\;\;\;\;t_0 + {\left(\frac{x}{\mathsf{fma}\left(z, \mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right), 0.083333333333333\right)}\right)}^{-1}\\

\mathbf{else}:\\
\;\;\;\;t_0 + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \left(y + 0.0007936500793651\right) \cdot \frac{{z}^{2}}{x}\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 9.99999999999999971e-29

    1. Initial program 99.7%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. clear-num99.7%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{1}{\frac{x}{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}}} \]
      2. inv-pow99.7%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{{\left(\frac{x}{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}\right)}^{-1}} \]
      3. *-commutative99.7%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + {\left(\frac{x}{\color{blue}{z \cdot \left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right)} + 0.083333333333333}\right)}^{-1} \]
      4. fma-udef99.7%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + {\left(\frac{x}{\color{blue}{\mathsf{fma}\left(z, \left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778, 0.083333333333333\right)}}\right)}^{-1} \]
      5. fma-neg99.7%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + {\left(\frac{x}{\mathsf{fma}\left(z, \color{blue}{\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right)}, 0.083333333333333\right)}\right)}^{-1} \]
      6. metadata-eval99.7%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + {\left(\frac{x}{\mathsf{fma}\left(z, \mathsf{fma}\left(y + 0.0007936500793651, z, \color{blue}{-0.0027777777777778}\right), 0.083333333333333\right)}\right)}^{-1} \]
    4. Applied egg-rr99.7%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{{\left(\frac{x}{\mathsf{fma}\left(z, \mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right), 0.083333333333333\right)}\right)}^{-1}} \]

    if 9.99999999999999971e-29 < x

    1. Initial program 84.5%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Taylor expanded in z around inf 84.5%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \frac{{z}^{2} \cdot \left(0.0007936500793651 + y\right)}{x}\right)\right)} \]
    4. Step-by-step derivation
      1. *-commutative84.5%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \frac{\color{blue}{\left(0.0007936500793651 + y\right) \cdot {z}^{2}}}{x}\right)\right) \]
      2. *-un-lft-identity84.5%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \frac{\left(0.0007936500793651 + y\right) \cdot {z}^{2}}{\color{blue}{1 \cdot x}}\right)\right) \]
      3. times-frac95.5%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \color{blue}{\frac{0.0007936500793651 + y}{1} \cdot \frac{{z}^{2}}{x}}\right)\right) \]
      4. +-commutative95.5%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \frac{\color{blue}{y + 0.0007936500793651}}{1} \cdot \frac{{z}^{2}}{x}\right)\right) \]
    5. Applied egg-rr95.5%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \color{blue}{\frac{y + 0.0007936500793651}{1} \cdot \frac{{z}^{2}}{x}}\right)\right) \]
  3. Recombined 2 regimes into one program.
  4. Final simplification97.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 10^{-28}:\\ \;\;\;\;\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + {\left(\frac{x}{\mathsf{fma}\left(z, \mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right), 0.083333333333333\right)}\right)}^{-1}\\ \mathbf{else}:\\ \;\;\;\;\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \left(y + 0.0007936500793651\right) \cdot \frac{{z}^{2}}{x}\right)\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 2: 95.9% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 3 \cdot 10^{+27}:\\ \;\;\;\;\mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right), z, 0.083333333333333\right)}{x}\right)\\ \mathbf{else}:\\ \;\;\;\;\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \left(y + 0.0007936500793651\right) \cdot \frac{{z}^{2}}{x}\right)\right)\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (if (<= x 3e+27)
   (+
    (fma (+ x -0.5) (log x) (- x))
    (+
     0.91893853320467
     (/
      (fma
       (fma (+ y 0.0007936500793651) z -0.0027777777777778)
       z
       0.083333333333333)
      x)))
   (+
    (+ (- (* (- x 0.5) (log x)) x) 0.91893853320467)
    (+
     (* -0.0027777777777778 (/ z x))
     (+
      (* 0.083333333333333 (/ 1.0 x))
      (* (+ y 0.0007936500793651) (/ (pow z 2.0) x)))))))
double code(double x, double y, double z) {
	double tmp;
	if (x <= 3e+27) {
		tmp = fma((x + -0.5), log(x), -x) + (0.91893853320467 + (fma(fma((y + 0.0007936500793651), z, -0.0027777777777778), z, 0.083333333333333) / x));
	} else {
		tmp = ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((-0.0027777777777778 * (z / x)) + ((0.083333333333333 * (1.0 / x)) + ((y + 0.0007936500793651) * (pow(z, 2.0) / x))));
	}
	return tmp;
}
function code(x, y, z)
	tmp = 0.0
	if (x <= 3e+27)
		tmp = Float64(fma(Float64(x + -0.5), log(x), Float64(-x)) + Float64(0.91893853320467 + Float64(fma(fma(Float64(y + 0.0007936500793651), z, -0.0027777777777778), z, 0.083333333333333) / x)));
	else
		tmp = Float64(Float64(Float64(Float64(Float64(x - 0.5) * log(x)) - x) + 0.91893853320467) + Float64(Float64(-0.0027777777777778 * Float64(z / x)) + Float64(Float64(0.083333333333333 * Float64(1.0 / x)) + Float64(Float64(y + 0.0007936500793651) * Float64((z ^ 2.0) / x)))));
	end
	return tmp
end
code[x_, y_, z_] := If[LessEqual[x, 3e+27], N[(N[(N[(x + -0.5), $MachinePrecision] * N[Log[x], $MachinePrecision] + (-x)), $MachinePrecision] + N[(0.91893853320467 + N[(N[(N[(N[(y + 0.0007936500793651), $MachinePrecision] * z + -0.0027777777777778), $MachinePrecision] * z + 0.083333333333333), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(N[(x - 0.5), $MachinePrecision] * N[Log[x], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + 0.91893853320467), $MachinePrecision] + N[(N[(-0.0027777777777778 * N[(z / x), $MachinePrecision]), $MachinePrecision] + N[(N[(0.083333333333333 * N[(1.0 / x), $MachinePrecision]), $MachinePrecision] + N[(N[(y + 0.0007936500793651), $MachinePrecision] * N[(N[Power[z, 2.0], $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 3 \cdot 10^{+27}:\\
\;\;\;\;\mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right), z, 0.083333333333333\right)}{x}\right)\\

\mathbf{else}:\\
\;\;\;\;\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \left(y + 0.0007936500793651\right) \cdot \frac{{z}^{2}}{x}\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 2.99999999999999976e27

    1. Initial program 99.6%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Step-by-step derivation
      1. associate-+l+99.6%

        \[\leadsto \color{blue}{\left(\left(x - 0.5\right) \cdot \log x - x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right)} \]
      2. fma-neg99.6%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x - 0.5, \log x, -x\right)} + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
      3. sub-neg99.6%

        \[\leadsto \mathsf{fma}\left(\color{blue}{x + \left(-0.5\right)}, \log x, -x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
      4. metadata-eval99.6%

        \[\leadsto \mathsf{fma}\left(x + \color{blue}{-0.5}, \log x, -x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
      5. fma-def99.6%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\color{blue}{\mathsf{fma}\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778, z, 0.083333333333333\right)}}{x}\right) \]
      6. fma-neg99.6%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right)}, z, 0.083333333333333\right)}{x}\right) \]
      7. metadata-eval99.6%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\mathsf{fma}\left(y + 0.0007936500793651, z, \color{blue}{-0.0027777777777778}\right), z, 0.083333333333333\right)}{x}\right) \]
    3. Simplified99.6%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right), z, 0.083333333333333\right)}{x}\right)} \]
    4. Add Preprocessing

    if 2.99999999999999976e27 < x

    1. Initial program 81.5%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Taylor expanded in z around inf 81.5%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \frac{{z}^{2} \cdot \left(0.0007936500793651 + y\right)}{x}\right)\right)} \]
    4. Step-by-step derivation
      1. *-commutative81.5%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \frac{\color{blue}{\left(0.0007936500793651 + y\right) \cdot {z}^{2}}}{x}\right)\right) \]
      2. *-un-lft-identity81.5%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \frac{\left(0.0007936500793651 + y\right) \cdot {z}^{2}}{\color{blue}{1 \cdot x}}\right)\right) \]
      3. times-frac94.7%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \color{blue}{\frac{0.0007936500793651 + y}{1} \cdot \frac{{z}^{2}}{x}}\right)\right) \]
      4. +-commutative94.7%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \frac{\color{blue}{y + 0.0007936500793651}}{1} \cdot \frac{{z}^{2}}{x}\right)\right) \]
    5. Applied egg-rr94.7%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \color{blue}{\frac{y + 0.0007936500793651}{1} \cdot \frac{{z}^{2}}{x}}\right)\right) \]
  3. Recombined 2 regimes into one program.
  4. Final simplification97.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 3 \cdot 10^{+27}:\\ \;\;\;\;\mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right), z, 0.083333333333333\right)}{x}\right)\\ \mathbf{else}:\\ \;\;\;\;\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \left(y + 0.0007936500793651\right) \cdot \frac{{z}^{2}}{x}\right)\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 95.8% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 2.9 \cdot 10^{-9}:\\ \;\;\;\;\left(0.91893853320467 + \left(x + \log x \cdot \left(x + -0.5\right)\right)\right) + \frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x}\\ \mathbf{else}:\\ \;\;\;\;\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \left(y + 0.0007936500793651\right) \cdot \frac{{z}^{2}}{x}\right)\right)\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (if (<= x 2.9e-9)
   (+
    (+ 0.91893853320467 (+ x (* (log x) (+ x -0.5))))
    (/
     (+
      0.083333333333333
      (* z (- (* z (+ y 0.0007936500793651)) 0.0027777777777778)))
     x))
   (+
    (+ (- (* (- x 0.5) (log x)) x) 0.91893853320467)
    (+
     (* -0.0027777777777778 (/ z x))
     (+
      (* 0.083333333333333 (/ 1.0 x))
      (* (+ y 0.0007936500793651) (/ (pow z 2.0) x)))))))
double code(double x, double y, double z) {
	double tmp;
	if (x <= 2.9e-9) {
		tmp = (0.91893853320467 + (x + (log(x) * (x + -0.5)))) + ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x);
	} else {
		tmp = ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((-0.0027777777777778 * (z / x)) + ((0.083333333333333 * (1.0 / x)) + ((y + 0.0007936500793651) * (pow(z, 2.0) / x))));
	}
	return tmp;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8) :: tmp
    if (x <= 2.9d-9) then
        tmp = (0.91893853320467d0 + (x + (log(x) * (x + (-0.5d0))))) + ((0.083333333333333d0 + (z * ((z * (y + 0.0007936500793651d0)) - 0.0027777777777778d0))) / x)
    else
        tmp = ((((x - 0.5d0) * log(x)) - x) + 0.91893853320467d0) + (((-0.0027777777777778d0) * (z / x)) + ((0.083333333333333d0 * (1.0d0 / x)) + ((y + 0.0007936500793651d0) * ((z ** 2.0d0) / x))))
    end if
    code = tmp
end function
public static double code(double x, double y, double z) {
	double tmp;
	if (x <= 2.9e-9) {
		tmp = (0.91893853320467 + (x + (Math.log(x) * (x + -0.5)))) + ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x);
	} else {
		tmp = ((((x - 0.5) * Math.log(x)) - x) + 0.91893853320467) + ((-0.0027777777777778 * (z / x)) + ((0.083333333333333 * (1.0 / x)) + ((y + 0.0007936500793651) * (Math.pow(z, 2.0) / x))));
	}
	return tmp;
}
def code(x, y, z):
	tmp = 0
	if x <= 2.9e-9:
		tmp = (0.91893853320467 + (x + (math.log(x) * (x + -0.5)))) + ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x)
	else:
		tmp = ((((x - 0.5) * math.log(x)) - x) + 0.91893853320467) + ((-0.0027777777777778 * (z / x)) + ((0.083333333333333 * (1.0 / x)) + ((y + 0.0007936500793651) * (math.pow(z, 2.0) / x))))
	return tmp
function code(x, y, z)
	tmp = 0.0
	if (x <= 2.9e-9)
		tmp = Float64(Float64(0.91893853320467 + Float64(x + Float64(log(x) * Float64(x + -0.5)))) + Float64(Float64(0.083333333333333 + Float64(z * Float64(Float64(z * Float64(y + 0.0007936500793651)) - 0.0027777777777778))) / x));
	else
		tmp = Float64(Float64(Float64(Float64(Float64(x - 0.5) * log(x)) - x) + 0.91893853320467) + Float64(Float64(-0.0027777777777778 * Float64(z / x)) + Float64(Float64(0.083333333333333 * Float64(1.0 / x)) + Float64(Float64(y + 0.0007936500793651) * Float64((z ^ 2.0) / x)))));
	end
	return tmp
end
function tmp_2 = code(x, y, z)
	tmp = 0.0;
	if (x <= 2.9e-9)
		tmp = (0.91893853320467 + (x + (log(x) * (x + -0.5)))) + ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x);
	else
		tmp = ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((-0.0027777777777778 * (z / x)) + ((0.083333333333333 * (1.0 / x)) + ((y + 0.0007936500793651) * ((z ^ 2.0) / x))));
	end
	tmp_2 = tmp;
end
code[x_, y_, z_] := If[LessEqual[x, 2.9e-9], N[(N[(0.91893853320467 + N[(x + N[(N[Log[x], $MachinePrecision] * N[(x + -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(0.083333333333333 + N[(z * N[(N[(z * N[(y + 0.0007936500793651), $MachinePrecision]), $MachinePrecision] - 0.0027777777777778), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(N[(x - 0.5), $MachinePrecision] * N[Log[x], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + 0.91893853320467), $MachinePrecision] + N[(N[(-0.0027777777777778 * N[(z / x), $MachinePrecision]), $MachinePrecision] + N[(N[(0.083333333333333 * N[(1.0 / x), $MachinePrecision]), $MachinePrecision] + N[(N[(y + 0.0007936500793651), $MachinePrecision] * N[(N[Power[z, 2.0], $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 2.9 \cdot 10^{-9}:\\
\;\;\;\;\left(0.91893853320467 + \left(x + \log x \cdot \left(x + -0.5\right)\right)\right) + \frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x}\\

\mathbf{else}:\\
\;\;\;\;\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \left(y + 0.0007936500793651\right) \cdot \frac{{z}^{2}}{x}\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 2.89999999999999991e-9

    1. Initial program 99.7%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. sub-neg99.7%

        \[\leadsto \left(\color{blue}{\left(\left(x - 0.5\right) \cdot \log x + \left(-x\right)\right)} + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      2. add-sqr-sqrt0.0%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x + \color{blue}{\sqrt{-x} \cdot \sqrt{-x}}\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      3. sqrt-unprod99.7%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x + \color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      4. sqr-neg99.7%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x + \sqrt{\color{blue}{x \cdot x}}\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      5. sqrt-unprod99.7%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x + \color{blue}{\sqrt{x} \cdot \sqrt{x}}\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      6. add-sqr-sqrt99.7%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x + \color{blue}{x}\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      7. sub-neg99.7%

        \[\leadsto \left(\left(\color{blue}{\left(x + \left(-0.5\right)\right)} \cdot \log x + x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      8. metadata-eval99.7%

        \[\leadsto \left(\left(\left(x + \color{blue}{-0.5}\right) \cdot \log x + x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      9. *-commutative99.7%

        \[\leadsto \left(\left(\color{blue}{\log x \cdot \left(x + -0.5\right)} + x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    4. Applied egg-rr99.7%

      \[\leadsto \left(\color{blue}{\left(\log x \cdot \left(x + -0.5\right) + x\right)} + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]

    if 2.89999999999999991e-9 < x

    1. Initial program 83.7%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Taylor expanded in z around inf 83.7%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \frac{{z}^{2} \cdot \left(0.0007936500793651 + y\right)}{x}\right)\right)} \]
    4. Step-by-step derivation
      1. *-commutative83.7%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \frac{\color{blue}{\left(0.0007936500793651 + y\right) \cdot {z}^{2}}}{x}\right)\right) \]
      2. *-un-lft-identity83.7%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \frac{\left(0.0007936500793651 + y\right) \cdot {z}^{2}}{\color{blue}{1 \cdot x}}\right)\right) \]
      3. times-frac95.3%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \color{blue}{\frac{0.0007936500793651 + y}{1} \cdot \frac{{z}^{2}}{x}}\right)\right) \]
      4. +-commutative95.3%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \frac{\color{blue}{y + 0.0007936500793651}}{1} \cdot \frac{{z}^{2}}{x}\right)\right) \]
    5. Applied egg-rr95.3%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \color{blue}{\frac{y + 0.0007936500793651}{1} \cdot \frac{{z}^{2}}{x}}\right)\right) \]
  3. Recombined 2 regimes into one program.
  4. Final simplification97.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 2.9 \cdot 10^{-9}:\\ \;\;\;\;\left(0.91893853320467 + \left(x + \log x \cdot \left(x + -0.5\right)\right)\right) + \frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x}\\ \mathbf{else}:\\ \;\;\;\;\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(-0.0027777777777778 \cdot \frac{z}{x} + \left(0.083333333333333 \cdot \frac{1}{x} + \left(y + 0.0007936500793651\right) \cdot \frac{{z}^{2}}{x}\right)\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 4: 95.9% accurate, 0.6× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 5200000:\\ \;\;\;\;\frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x} + \left(\log x \cdot \left(x + -0.5\right) - \left(x + -0.91893853320467\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{y + 0.0007936500793651}{\frac{x}{{z}^{2}}}\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (if (<= x 5200000.0)
   (+
    (/
     (+
      0.083333333333333
      (* z (- (* z (+ y 0.0007936500793651)) 0.0027777777777778)))
     x)
    (- (* (log x) (+ x -0.5)) (+ x -0.91893853320467)))
   (+
    (+ (- (* (- x 0.5) (log x)) x) 0.91893853320467)
    (/ (+ y 0.0007936500793651) (/ x (pow z 2.0))))))
double code(double x, double y, double z) {
	double tmp;
	if (x <= 5200000.0) {
		tmp = ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x) + ((log(x) * (x + -0.5)) - (x + -0.91893853320467));
	} else {
		tmp = ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((y + 0.0007936500793651) / (x / pow(z, 2.0)));
	}
	return tmp;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8) :: tmp
    if (x <= 5200000.0d0) then
        tmp = ((0.083333333333333d0 + (z * ((z * (y + 0.0007936500793651d0)) - 0.0027777777777778d0))) / x) + ((log(x) * (x + (-0.5d0))) - (x + (-0.91893853320467d0)))
    else
        tmp = ((((x - 0.5d0) * log(x)) - x) + 0.91893853320467d0) + ((y + 0.0007936500793651d0) / (x / (z ** 2.0d0)))
    end if
    code = tmp
end function
public static double code(double x, double y, double z) {
	double tmp;
	if (x <= 5200000.0) {
		tmp = ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x) + ((Math.log(x) * (x + -0.5)) - (x + -0.91893853320467));
	} else {
		tmp = ((((x - 0.5) * Math.log(x)) - x) + 0.91893853320467) + ((y + 0.0007936500793651) / (x / Math.pow(z, 2.0)));
	}
	return tmp;
}
def code(x, y, z):
	tmp = 0
	if x <= 5200000.0:
		tmp = ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x) + ((math.log(x) * (x + -0.5)) - (x + -0.91893853320467))
	else:
		tmp = ((((x - 0.5) * math.log(x)) - x) + 0.91893853320467) + ((y + 0.0007936500793651) / (x / math.pow(z, 2.0)))
	return tmp
function code(x, y, z)
	tmp = 0.0
	if (x <= 5200000.0)
		tmp = Float64(Float64(Float64(0.083333333333333 + Float64(z * Float64(Float64(z * Float64(y + 0.0007936500793651)) - 0.0027777777777778))) / x) + Float64(Float64(log(x) * Float64(x + -0.5)) - Float64(x + -0.91893853320467)));
	else
		tmp = Float64(Float64(Float64(Float64(Float64(x - 0.5) * log(x)) - x) + 0.91893853320467) + Float64(Float64(y + 0.0007936500793651) / Float64(x / (z ^ 2.0))));
	end
	return tmp
end
function tmp_2 = code(x, y, z)
	tmp = 0.0;
	if (x <= 5200000.0)
		tmp = ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x) + ((log(x) * (x + -0.5)) - (x + -0.91893853320467));
	else
		tmp = ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((y + 0.0007936500793651) / (x / (z ^ 2.0)));
	end
	tmp_2 = tmp;
end
code[x_, y_, z_] := If[LessEqual[x, 5200000.0], N[(N[(N[(0.083333333333333 + N[(z * N[(N[(z * N[(y + 0.0007936500793651), $MachinePrecision]), $MachinePrecision] - 0.0027777777777778), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision] + N[(N[(N[Log[x], $MachinePrecision] * N[(x + -0.5), $MachinePrecision]), $MachinePrecision] - N[(x + -0.91893853320467), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(N[(x - 0.5), $MachinePrecision] * N[Log[x], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + 0.91893853320467), $MachinePrecision] + N[(N[(y + 0.0007936500793651), $MachinePrecision] / N[(x / N[Power[z, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 5200000:\\
\;\;\;\;\frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x} + \left(\log x \cdot \left(x + -0.5\right) - \left(x + -0.91893853320467\right)\right)\\

\mathbf{else}:\\
\;\;\;\;\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{y + 0.0007936500793651}{\frac{x}{{z}^{2}}}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 5.2e6

    1. Initial program 99.7%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. associate-+l-99.7%

        \[\leadsto \color{blue}{\left(\left(x - 0.5\right) \cdot \log x - \left(x - 0.91893853320467\right)\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      2. sub-neg99.7%

        \[\leadsto \left(\color{blue}{\left(x + \left(-0.5\right)\right)} \cdot \log x - \left(x - 0.91893853320467\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      3. metadata-eval99.7%

        \[\leadsto \left(\left(x + \color{blue}{-0.5}\right) \cdot \log x - \left(x - 0.91893853320467\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      4. *-commutative99.7%

        \[\leadsto \left(\color{blue}{\log x \cdot \left(x + -0.5\right)} - \left(x - 0.91893853320467\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      5. sub-neg99.7%

        \[\leadsto \left(\log x \cdot \left(x + -0.5\right) - \color{blue}{\left(x + \left(-0.91893853320467\right)\right)}\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      6. metadata-eval99.7%

        \[\leadsto \left(\log x \cdot \left(x + -0.5\right) - \left(x + \color{blue}{-0.91893853320467}\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    4. Applied egg-rr99.7%

      \[\leadsto \color{blue}{\left(\log x \cdot \left(x + -0.5\right) - \left(x + -0.91893853320467\right)\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]

    if 5.2e6 < x

    1. Initial program 82.7%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Taylor expanded in z around inf 82.7%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{{z}^{2} \cdot \left(0.0007936500793651 + y\right)}{x}} \]
    4. Step-by-step derivation
      1. +-commutative82.7%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{{z}^{2} \cdot \color{blue}{\left(y + 0.0007936500793651\right)}}{x} \]
      2. *-commutative82.7%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\color{blue}{\left(y + 0.0007936500793651\right) \cdot {z}^{2}}}{x} \]
      3. associate-/l*95.0%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{y + 0.0007936500793651}{\frac{x}{{z}^{2}}}} \]
      4. +-commutative95.0%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\color{blue}{0.0007936500793651 + y}}{\frac{x}{{z}^{2}}} \]
    5. Simplified95.0%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{0.0007936500793651 + y}{\frac{x}{{z}^{2}}}} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification97.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 5200000:\\ \;\;\;\;\frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x} + \left(\log x \cdot \left(x + -0.5\right) - \left(x + -0.91893853320467\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{y + 0.0007936500793651}{\frac{x}{{z}^{2}}}\\ \end{array} \]
  5. Add Preprocessing

Alternative 5: 93.4% accurate, 0.6× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\\ \mathbf{if}\;x \leq 1.15 \cdot 10^{+60}:\\ \;\;\;\;t_0 + \frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x}\\ \mathbf{else}:\\ \;\;\;\;t_0 + {z}^{2} \cdot \frac{y}{x}\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (let* ((t_0 (+ (- (* (- x 0.5) (log x)) x) 0.91893853320467)))
   (if (<= x 1.15e+60)
     (+
      t_0
      (/
       (+
        0.083333333333333
        (* z (- (* z (+ y 0.0007936500793651)) 0.0027777777777778)))
       x))
     (+ t_0 (* (pow z 2.0) (/ y x))))))
double code(double x, double y, double z) {
	double t_0 = (((x - 0.5) * log(x)) - x) + 0.91893853320467;
	double tmp;
	if (x <= 1.15e+60) {
		tmp = t_0 + ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x);
	} else {
		tmp = t_0 + (pow(z, 2.0) * (y / x));
	}
	return tmp;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8) :: t_0
    real(8) :: tmp
    t_0 = (((x - 0.5d0) * log(x)) - x) + 0.91893853320467d0
    if (x <= 1.15d+60) then
        tmp = t_0 + ((0.083333333333333d0 + (z * ((z * (y + 0.0007936500793651d0)) - 0.0027777777777778d0))) / x)
    else
        tmp = t_0 + ((z ** 2.0d0) * (y / x))
    end if
    code = tmp
end function
public static double code(double x, double y, double z) {
	double t_0 = (((x - 0.5) * Math.log(x)) - x) + 0.91893853320467;
	double tmp;
	if (x <= 1.15e+60) {
		tmp = t_0 + ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x);
	} else {
		tmp = t_0 + (Math.pow(z, 2.0) * (y / x));
	}
	return tmp;
}
def code(x, y, z):
	t_0 = (((x - 0.5) * math.log(x)) - x) + 0.91893853320467
	tmp = 0
	if x <= 1.15e+60:
		tmp = t_0 + ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x)
	else:
		tmp = t_0 + (math.pow(z, 2.0) * (y / x))
	return tmp
function code(x, y, z)
	t_0 = Float64(Float64(Float64(Float64(x - 0.5) * log(x)) - x) + 0.91893853320467)
	tmp = 0.0
	if (x <= 1.15e+60)
		tmp = Float64(t_0 + Float64(Float64(0.083333333333333 + Float64(z * Float64(Float64(z * Float64(y + 0.0007936500793651)) - 0.0027777777777778))) / x));
	else
		tmp = Float64(t_0 + Float64((z ^ 2.0) * Float64(y / x)));
	end
	return tmp
end
function tmp_2 = code(x, y, z)
	t_0 = (((x - 0.5) * log(x)) - x) + 0.91893853320467;
	tmp = 0.0;
	if (x <= 1.15e+60)
		tmp = t_0 + ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x);
	else
		tmp = t_0 + ((z ^ 2.0) * (y / x));
	end
	tmp_2 = tmp;
end
code[x_, y_, z_] := Block[{t$95$0 = N[(N[(N[(N[(x - 0.5), $MachinePrecision] * N[Log[x], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + 0.91893853320467), $MachinePrecision]}, If[LessEqual[x, 1.15e+60], N[(t$95$0 + N[(N[(0.083333333333333 + N[(z * N[(N[(z * N[(y + 0.0007936500793651), $MachinePrecision]), $MachinePrecision] - 0.0027777777777778), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision], N[(t$95$0 + N[(N[Power[z, 2.0], $MachinePrecision] * N[(y / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\\
\mathbf{if}\;x \leq 1.15 \cdot 10^{+60}:\\
\;\;\;\;t_0 + \frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x}\\

\mathbf{else}:\\
\;\;\;\;t_0 + {z}^{2} \cdot \frac{y}{x}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 1.15000000000000008e60

    1. Initial program 99.6%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing

    if 1.15000000000000008e60 < x

    1. Initial program 78.6%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Taylor expanded in y around inf 75.5%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{y \cdot {z}^{2}}{x}} \]
    4. Step-by-step derivation
      1. *-commutative75.5%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\color{blue}{{z}^{2} \cdot y}}{x} \]
      2. associate-*r/90.6%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{{z}^{2} \cdot \frac{y}{x}} \]
    5. Simplified90.6%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{{z}^{2} \cdot \frac{y}{x}} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification96.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 1.15 \cdot 10^{+60}:\\ \;\;\;\;\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x}\\ \mathbf{else}:\\ \;\;\;\;\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + {z}^{2} \cdot \frac{y}{x}\\ \end{array} \]
  5. Add Preprocessing

Alternative 6: 93.9% accurate, 0.6× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\\ \mathbf{if}\;x \leq 1.5 \cdot 10^{+60}:\\ \;\;\;\;t_0 + \frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x}\\ \mathbf{else}:\\ \;\;\;\;t_0 + \frac{y}{\frac{x}{{z}^{2}}}\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (let* ((t_0 (+ (- (* (- x 0.5) (log x)) x) 0.91893853320467)))
   (if (<= x 1.5e+60)
     (+
      t_0
      (/
       (+
        0.083333333333333
        (* z (- (* z (+ y 0.0007936500793651)) 0.0027777777777778)))
       x))
     (+ t_0 (/ y (/ x (pow z 2.0)))))))
double code(double x, double y, double z) {
	double t_0 = (((x - 0.5) * log(x)) - x) + 0.91893853320467;
	double tmp;
	if (x <= 1.5e+60) {
		tmp = t_0 + ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x);
	} else {
		tmp = t_0 + (y / (x / pow(z, 2.0)));
	}
	return tmp;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8) :: t_0
    real(8) :: tmp
    t_0 = (((x - 0.5d0) * log(x)) - x) + 0.91893853320467d0
    if (x <= 1.5d+60) then
        tmp = t_0 + ((0.083333333333333d0 + (z * ((z * (y + 0.0007936500793651d0)) - 0.0027777777777778d0))) / x)
    else
        tmp = t_0 + (y / (x / (z ** 2.0d0)))
    end if
    code = tmp
end function
public static double code(double x, double y, double z) {
	double t_0 = (((x - 0.5) * Math.log(x)) - x) + 0.91893853320467;
	double tmp;
	if (x <= 1.5e+60) {
		tmp = t_0 + ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x);
	} else {
		tmp = t_0 + (y / (x / Math.pow(z, 2.0)));
	}
	return tmp;
}
def code(x, y, z):
	t_0 = (((x - 0.5) * math.log(x)) - x) + 0.91893853320467
	tmp = 0
	if x <= 1.5e+60:
		tmp = t_0 + ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x)
	else:
		tmp = t_0 + (y / (x / math.pow(z, 2.0)))
	return tmp
function code(x, y, z)
	t_0 = Float64(Float64(Float64(Float64(x - 0.5) * log(x)) - x) + 0.91893853320467)
	tmp = 0.0
	if (x <= 1.5e+60)
		tmp = Float64(t_0 + Float64(Float64(0.083333333333333 + Float64(z * Float64(Float64(z * Float64(y + 0.0007936500793651)) - 0.0027777777777778))) / x));
	else
		tmp = Float64(t_0 + Float64(y / Float64(x / (z ^ 2.0))));
	end
	return tmp
end
function tmp_2 = code(x, y, z)
	t_0 = (((x - 0.5) * log(x)) - x) + 0.91893853320467;
	tmp = 0.0;
	if (x <= 1.5e+60)
		tmp = t_0 + ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x);
	else
		tmp = t_0 + (y / (x / (z ^ 2.0)));
	end
	tmp_2 = tmp;
end
code[x_, y_, z_] := Block[{t$95$0 = N[(N[(N[(N[(x - 0.5), $MachinePrecision] * N[Log[x], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + 0.91893853320467), $MachinePrecision]}, If[LessEqual[x, 1.5e+60], N[(t$95$0 + N[(N[(0.083333333333333 + N[(z * N[(N[(z * N[(y + 0.0007936500793651), $MachinePrecision]), $MachinePrecision] - 0.0027777777777778), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision], N[(t$95$0 + N[(y / N[(x / N[Power[z, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\\
\mathbf{if}\;x \leq 1.5 \cdot 10^{+60}:\\
\;\;\;\;t_0 + \frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x}\\

\mathbf{else}:\\
\;\;\;\;t_0 + \frac{y}{\frac{x}{{z}^{2}}}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 1.4999999999999999e60

    1. Initial program 99.6%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing

    if 1.4999999999999999e60 < x

    1. Initial program 78.6%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Taylor expanded in y around inf 75.5%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{y \cdot {z}^{2}}{x}} \]
    4. Step-by-step derivation
      1. associate-/l*90.7%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{y}{\frac{x}{{z}^{2}}}} \]
    5. Simplified90.7%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{y}{\frac{x}{{z}^{2}}}} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification96.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 1.5 \cdot 10^{+60}:\\ \;\;\;\;\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x}\\ \mathbf{else}:\\ \;\;\;\;\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{y}{\frac{x}{{z}^{2}}}\\ \end{array} \]
  5. Add Preprocessing

Alternative 7: 91.9% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := x \cdot \left(\log x + -1\right)\\ t_1 := \frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x} + t_0\\ \mathbf{if}\;x \leq 6.5 \cdot 10^{-13}:\\ \;\;\;\;t_1\\ \mathbf{elif}\;x \leq 310000:\\ \;\;\;\;\left(\log x \cdot \left(x + -0.5\right) - \left(x + -0.91893853320467\right)\right) + \frac{0.083333333333333}{x}\\ \mathbf{elif}\;x \leq 2.1 \cdot 10^{+168}:\\ \;\;\;\;t_1\\ \mathbf{else}:\\ \;\;\;\;t_0\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (let* ((t_0 (* x (+ (log x) -1.0)))
        (t_1
         (+
          (/
           (+
            0.083333333333333
            (* z (- (* z (+ y 0.0007936500793651)) 0.0027777777777778)))
           x)
          t_0)))
   (if (<= x 6.5e-13)
     t_1
     (if (<= x 310000.0)
       (+
        (- (* (log x) (+ x -0.5)) (+ x -0.91893853320467))
        (/ 0.083333333333333 x))
       (if (<= x 2.1e+168) t_1 t_0)))))
double code(double x, double y, double z) {
	double t_0 = x * (log(x) + -1.0);
	double t_1 = ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x) + t_0;
	double tmp;
	if (x <= 6.5e-13) {
		tmp = t_1;
	} else if (x <= 310000.0) {
		tmp = ((log(x) * (x + -0.5)) - (x + -0.91893853320467)) + (0.083333333333333 / x);
	} else if (x <= 2.1e+168) {
		tmp = t_1;
	} else {
		tmp = t_0;
	}
	return tmp;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8) :: t_0
    real(8) :: t_1
    real(8) :: tmp
    t_0 = x * (log(x) + (-1.0d0))
    t_1 = ((0.083333333333333d0 + (z * ((z * (y + 0.0007936500793651d0)) - 0.0027777777777778d0))) / x) + t_0
    if (x <= 6.5d-13) then
        tmp = t_1
    else if (x <= 310000.0d0) then
        tmp = ((log(x) * (x + (-0.5d0))) - (x + (-0.91893853320467d0))) + (0.083333333333333d0 / x)
    else if (x <= 2.1d+168) then
        tmp = t_1
    else
        tmp = t_0
    end if
    code = tmp
end function
public static double code(double x, double y, double z) {
	double t_0 = x * (Math.log(x) + -1.0);
	double t_1 = ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x) + t_0;
	double tmp;
	if (x <= 6.5e-13) {
		tmp = t_1;
	} else if (x <= 310000.0) {
		tmp = ((Math.log(x) * (x + -0.5)) - (x + -0.91893853320467)) + (0.083333333333333 / x);
	} else if (x <= 2.1e+168) {
		tmp = t_1;
	} else {
		tmp = t_0;
	}
	return tmp;
}
def code(x, y, z):
	t_0 = x * (math.log(x) + -1.0)
	t_1 = ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x) + t_0
	tmp = 0
	if x <= 6.5e-13:
		tmp = t_1
	elif x <= 310000.0:
		tmp = ((math.log(x) * (x + -0.5)) - (x + -0.91893853320467)) + (0.083333333333333 / x)
	elif x <= 2.1e+168:
		tmp = t_1
	else:
		tmp = t_0
	return tmp
function code(x, y, z)
	t_0 = Float64(x * Float64(log(x) + -1.0))
	t_1 = Float64(Float64(Float64(0.083333333333333 + Float64(z * Float64(Float64(z * Float64(y + 0.0007936500793651)) - 0.0027777777777778))) / x) + t_0)
	tmp = 0.0
	if (x <= 6.5e-13)
		tmp = t_1;
	elseif (x <= 310000.0)
		tmp = Float64(Float64(Float64(log(x) * Float64(x + -0.5)) - Float64(x + -0.91893853320467)) + Float64(0.083333333333333 / x));
	elseif (x <= 2.1e+168)
		tmp = t_1;
	else
		tmp = t_0;
	end
	return tmp
end
function tmp_2 = code(x, y, z)
	t_0 = x * (log(x) + -1.0);
	t_1 = ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x) + t_0;
	tmp = 0.0;
	if (x <= 6.5e-13)
		tmp = t_1;
	elseif (x <= 310000.0)
		tmp = ((log(x) * (x + -0.5)) - (x + -0.91893853320467)) + (0.083333333333333 / x);
	elseif (x <= 2.1e+168)
		tmp = t_1;
	else
		tmp = t_0;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_] := Block[{t$95$0 = N[(x * N[(N[Log[x], $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(N[(0.083333333333333 + N[(z * N[(N[(z * N[(y + 0.0007936500793651), $MachinePrecision]), $MachinePrecision] - 0.0027777777777778), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision] + t$95$0), $MachinePrecision]}, If[LessEqual[x, 6.5e-13], t$95$1, If[LessEqual[x, 310000.0], N[(N[(N[(N[Log[x], $MachinePrecision] * N[(x + -0.5), $MachinePrecision]), $MachinePrecision] - N[(x + -0.91893853320467), $MachinePrecision]), $MachinePrecision] + N[(0.083333333333333 / x), $MachinePrecision]), $MachinePrecision], If[LessEqual[x, 2.1e+168], t$95$1, t$95$0]]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := x \cdot \left(\log x + -1\right)\\
t_1 := \frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x} + t_0\\
\mathbf{if}\;x \leq 6.5 \cdot 10^{-13}:\\
\;\;\;\;t_1\\

\mathbf{elif}\;x \leq 310000:\\
\;\;\;\;\left(\log x \cdot \left(x + -0.5\right) - \left(x + -0.91893853320467\right)\right) + \frac{0.083333333333333}{x}\\

\mathbf{elif}\;x \leq 2.1 \cdot 10^{+168}:\\
\;\;\;\;t_1\\

\mathbf{else}:\\
\;\;\;\;t_0\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < 6.49999999999999957e-13 or 3.1e5 < x < 2.10000000000000003e168

    1. Initial program 97.7%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf 97.6%

      \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right) - 1\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    4. Step-by-step derivation
      1. sub-neg97.6%

        \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(-1\right)\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      2. mul-1-neg97.6%

        \[\leadsto x \cdot \left(\color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} + \left(-1\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      3. log-rec97.6%

        \[\leadsto x \cdot \left(\left(-\color{blue}{\left(-\log x\right)}\right) + \left(-1\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      4. remove-double-neg97.6%

        \[\leadsto x \cdot \left(\color{blue}{\log x} + \left(-1\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      5. metadata-eval97.6%

        \[\leadsto x \cdot \left(\log x + \color{blue}{-1}\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    5. Simplified97.6%

      \[\leadsto \color{blue}{x \cdot \left(\log x + -1\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]

    if 6.49999999999999957e-13 < x < 3.1e5

    1. Initial program 99.6%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Taylor expanded in z around 0 97.7%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{0.083333333333333}{x}} \]
    4. Step-by-step derivation
      1. associate-+l-99.7%

        \[\leadsto \color{blue}{\left(\left(x - 0.5\right) \cdot \log x - \left(x - 0.91893853320467\right)\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      2. sub-neg99.7%

        \[\leadsto \left(\color{blue}{\left(x + \left(-0.5\right)\right)} \cdot \log x - \left(x - 0.91893853320467\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      3. metadata-eval99.7%

        \[\leadsto \left(\left(x + \color{blue}{-0.5}\right) \cdot \log x - \left(x - 0.91893853320467\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      4. *-commutative99.7%

        \[\leadsto \left(\color{blue}{\log x \cdot \left(x + -0.5\right)} - \left(x - 0.91893853320467\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      5. sub-neg99.7%

        \[\leadsto \left(\log x \cdot \left(x + -0.5\right) - \color{blue}{\left(x + \left(-0.91893853320467\right)\right)}\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      6. metadata-eval99.7%

        \[\leadsto \left(\log x \cdot \left(x + -0.5\right) - \left(x + \color{blue}{-0.91893853320467}\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    5. Applied egg-rr97.8%

      \[\leadsto \color{blue}{\left(\log x \cdot \left(x + -0.5\right) - \left(x + -0.91893853320467\right)\right)} + \frac{0.083333333333333}{x} \]

    if 2.10000000000000003e168 < x

    1. Initial program 71.2%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Step-by-step derivation
      1. associate-+l+71.2%

        \[\leadsto \color{blue}{\left(\left(x - 0.5\right) \cdot \log x - x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right)} \]
      2. fma-neg71.4%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x - 0.5, \log x, -x\right)} + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
      3. sub-neg71.4%

        \[\leadsto \mathsf{fma}\left(\color{blue}{x + \left(-0.5\right)}, \log x, -x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
      4. metadata-eval71.4%

        \[\leadsto \mathsf{fma}\left(x + \color{blue}{-0.5}, \log x, -x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
      5. fma-def71.4%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\color{blue}{\mathsf{fma}\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778, z, 0.083333333333333\right)}}{x}\right) \]
      6. fma-neg71.4%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right)}, z, 0.083333333333333\right)}{x}\right) \]
      7. metadata-eval71.4%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\mathsf{fma}\left(y + 0.0007936500793651, z, \color{blue}{-0.0027777777777778}\right), z, 0.083333333333333\right)}{x}\right) \]
    3. Simplified71.4%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right), z, 0.083333333333333\right)}{x}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in z around 0 85.1%

      \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\color{blue}{0.083333333333333}}{x}\right) \]
    6. Taylor expanded in x around inf 85.1%

      \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right) - 1\right)} \]
    7. Step-by-step derivation
      1. sub-neg85.1%

        \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(-1\right)\right)} \]
      2. mul-1-neg85.1%

        \[\leadsto x \cdot \left(\color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} + \left(-1\right)\right) \]
      3. log-rec85.1%

        \[\leadsto x \cdot \left(\left(-\color{blue}{\left(-\log x\right)}\right) + \left(-1\right)\right) \]
      4. remove-double-neg85.1%

        \[\leadsto x \cdot \left(\color{blue}{\log x} + \left(-1\right)\right) \]
      5. metadata-eval85.1%

        \[\leadsto x \cdot \left(\log x + \color{blue}{-1}\right) \]
    8. Simplified85.1%

      \[\leadsto \color{blue}{x \cdot \left(\log x + -1\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification94.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 6.5 \cdot 10^{-13}:\\ \;\;\;\;\frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x} + x \cdot \left(\log x + -1\right)\\ \mathbf{elif}\;x \leq 310000:\\ \;\;\;\;\left(\log x \cdot \left(x + -0.5\right) - \left(x + -0.91893853320467\right)\right) + \frac{0.083333333333333}{x}\\ \mathbf{elif}\;x \leq 2.1 \cdot 10^{+168}:\\ \;\;\;\;\frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x} + x \cdot \left(\log x + -1\right)\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(\log x + -1\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 8: 92.5% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := x \cdot \left(\log x + -1\right)\\ t_1 := \log x \cdot \left(x + -0.5\right)\\ t_2 := \frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x}\\ \mathbf{if}\;x \leq 2.8 \cdot 10^{-7}:\\ \;\;\;\;\left(0.91893853320467 + \left(x + t_1\right)\right) + t_2\\ \mathbf{elif}\;x \leq 270000:\\ \;\;\;\;\left(t_1 - \left(x + -0.91893853320467\right)\right) + \frac{0.083333333333333}{x}\\ \mathbf{elif}\;x \leq 1.2 \cdot 10^{+170}:\\ \;\;\;\;t_2 + t_0\\ \mathbf{else}:\\ \;\;\;\;t_0\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (let* ((t_0 (* x (+ (log x) -1.0)))
        (t_1 (* (log x) (+ x -0.5)))
        (t_2
         (/
          (+
           0.083333333333333
           (* z (- (* z (+ y 0.0007936500793651)) 0.0027777777777778)))
          x)))
   (if (<= x 2.8e-7)
     (+ (+ 0.91893853320467 (+ x t_1)) t_2)
     (if (<= x 270000.0)
       (+ (- t_1 (+ x -0.91893853320467)) (/ 0.083333333333333 x))
       (if (<= x 1.2e+170) (+ t_2 t_0) t_0)))))
double code(double x, double y, double z) {
	double t_0 = x * (log(x) + -1.0);
	double t_1 = log(x) * (x + -0.5);
	double t_2 = (0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x;
	double tmp;
	if (x <= 2.8e-7) {
		tmp = (0.91893853320467 + (x + t_1)) + t_2;
	} else if (x <= 270000.0) {
		tmp = (t_1 - (x + -0.91893853320467)) + (0.083333333333333 / x);
	} else if (x <= 1.2e+170) {
		tmp = t_2 + t_0;
	} else {
		tmp = t_0;
	}
	return tmp;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8) :: t_0
    real(8) :: t_1
    real(8) :: t_2
    real(8) :: tmp
    t_0 = x * (log(x) + (-1.0d0))
    t_1 = log(x) * (x + (-0.5d0))
    t_2 = (0.083333333333333d0 + (z * ((z * (y + 0.0007936500793651d0)) - 0.0027777777777778d0))) / x
    if (x <= 2.8d-7) then
        tmp = (0.91893853320467d0 + (x + t_1)) + t_2
    else if (x <= 270000.0d0) then
        tmp = (t_1 - (x + (-0.91893853320467d0))) + (0.083333333333333d0 / x)
    else if (x <= 1.2d+170) then
        tmp = t_2 + t_0
    else
        tmp = t_0
    end if
    code = tmp
end function
public static double code(double x, double y, double z) {
	double t_0 = x * (Math.log(x) + -1.0);
	double t_1 = Math.log(x) * (x + -0.5);
	double t_2 = (0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x;
	double tmp;
	if (x <= 2.8e-7) {
		tmp = (0.91893853320467 + (x + t_1)) + t_2;
	} else if (x <= 270000.0) {
		tmp = (t_1 - (x + -0.91893853320467)) + (0.083333333333333 / x);
	} else if (x <= 1.2e+170) {
		tmp = t_2 + t_0;
	} else {
		tmp = t_0;
	}
	return tmp;
}
def code(x, y, z):
	t_0 = x * (math.log(x) + -1.0)
	t_1 = math.log(x) * (x + -0.5)
	t_2 = (0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x
	tmp = 0
	if x <= 2.8e-7:
		tmp = (0.91893853320467 + (x + t_1)) + t_2
	elif x <= 270000.0:
		tmp = (t_1 - (x + -0.91893853320467)) + (0.083333333333333 / x)
	elif x <= 1.2e+170:
		tmp = t_2 + t_0
	else:
		tmp = t_0
	return tmp
function code(x, y, z)
	t_0 = Float64(x * Float64(log(x) + -1.0))
	t_1 = Float64(log(x) * Float64(x + -0.5))
	t_2 = Float64(Float64(0.083333333333333 + Float64(z * Float64(Float64(z * Float64(y + 0.0007936500793651)) - 0.0027777777777778))) / x)
	tmp = 0.0
	if (x <= 2.8e-7)
		tmp = Float64(Float64(0.91893853320467 + Float64(x + t_1)) + t_2);
	elseif (x <= 270000.0)
		tmp = Float64(Float64(t_1 - Float64(x + -0.91893853320467)) + Float64(0.083333333333333 / x));
	elseif (x <= 1.2e+170)
		tmp = Float64(t_2 + t_0);
	else
		tmp = t_0;
	end
	return tmp
end
function tmp_2 = code(x, y, z)
	t_0 = x * (log(x) + -1.0);
	t_1 = log(x) * (x + -0.5);
	t_2 = (0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x;
	tmp = 0.0;
	if (x <= 2.8e-7)
		tmp = (0.91893853320467 + (x + t_1)) + t_2;
	elseif (x <= 270000.0)
		tmp = (t_1 - (x + -0.91893853320467)) + (0.083333333333333 / x);
	elseif (x <= 1.2e+170)
		tmp = t_2 + t_0;
	else
		tmp = t_0;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_] := Block[{t$95$0 = N[(x * N[(N[Log[x], $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[Log[x], $MachinePrecision] * N[(x + -0.5), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(N[(0.083333333333333 + N[(z * N[(N[(z * N[(y + 0.0007936500793651), $MachinePrecision]), $MachinePrecision] - 0.0027777777777778), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]}, If[LessEqual[x, 2.8e-7], N[(N[(0.91893853320467 + N[(x + t$95$1), $MachinePrecision]), $MachinePrecision] + t$95$2), $MachinePrecision], If[LessEqual[x, 270000.0], N[(N[(t$95$1 - N[(x + -0.91893853320467), $MachinePrecision]), $MachinePrecision] + N[(0.083333333333333 / x), $MachinePrecision]), $MachinePrecision], If[LessEqual[x, 1.2e+170], N[(t$95$2 + t$95$0), $MachinePrecision], t$95$0]]]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := x \cdot \left(\log x + -1\right)\\
t_1 := \log x \cdot \left(x + -0.5\right)\\
t_2 := \frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x}\\
\mathbf{if}\;x \leq 2.8 \cdot 10^{-7}:\\
\;\;\;\;\left(0.91893853320467 + \left(x + t_1\right)\right) + t_2\\

\mathbf{elif}\;x \leq 270000:\\
\;\;\;\;\left(t_1 - \left(x + -0.91893853320467\right)\right) + \frac{0.083333333333333}{x}\\

\mathbf{elif}\;x \leq 1.2 \cdot 10^{+170}:\\
\;\;\;\;t_2 + t_0\\

\mathbf{else}:\\
\;\;\;\;t_0\\


\end{array}
\end{array}
Derivation
  1. Split input into 4 regimes
  2. if x < 2.80000000000000019e-7

    1. Initial program 99.7%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. sub-neg99.7%

        \[\leadsto \left(\color{blue}{\left(\left(x - 0.5\right) \cdot \log x + \left(-x\right)\right)} + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      2. add-sqr-sqrt0.0%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x + \color{blue}{\sqrt{-x} \cdot \sqrt{-x}}\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      3. sqrt-unprod99.7%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x + \color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      4. sqr-neg99.7%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x + \sqrt{\color{blue}{x \cdot x}}\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      5. sqrt-unprod99.7%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x + \color{blue}{\sqrt{x} \cdot \sqrt{x}}\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      6. add-sqr-sqrt99.7%

        \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x + \color{blue}{x}\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      7. sub-neg99.7%

        \[\leadsto \left(\left(\color{blue}{\left(x + \left(-0.5\right)\right)} \cdot \log x + x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      8. metadata-eval99.7%

        \[\leadsto \left(\left(\left(x + \color{blue}{-0.5}\right) \cdot \log x + x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      9. *-commutative99.7%

        \[\leadsto \left(\left(\color{blue}{\log x \cdot \left(x + -0.5\right)} + x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    4. Applied egg-rr99.7%

      \[\leadsto \left(\color{blue}{\left(\log x \cdot \left(x + -0.5\right) + x\right)} + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]

    if 2.80000000000000019e-7 < x < 2.7e5

    1. Initial program 99.7%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Taylor expanded in z around 0 97.3%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{0.083333333333333}{x}} \]
    4. Step-by-step derivation
      1. associate-+l-99.8%

        \[\leadsto \color{blue}{\left(\left(x - 0.5\right) \cdot \log x - \left(x - 0.91893853320467\right)\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      2. sub-neg99.8%

        \[\leadsto \left(\color{blue}{\left(x + \left(-0.5\right)\right)} \cdot \log x - \left(x - 0.91893853320467\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      3. metadata-eval99.8%

        \[\leadsto \left(\left(x + \color{blue}{-0.5}\right) \cdot \log x - \left(x - 0.91893853320467\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      4. *-commutative99.8%

        \[\leadsto \left(\color{blue}{\log x \cdot \left(x + -0.5\right)} - \left(x - 0.91893853320467\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      5. sub-neg99.8%

        \[\leadsto \left(\log x \cdot \left(x + -0.5\right) - \color{blue}{\left(x + \left(-0.91893853320467\right)\right)}\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      6. metadata-eval99.8%

        \[\leadsto \left(\log x \cdot \left(x + -0.5\right) - \left(x + \color{blue}{-0.91893853320467}\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    5. Applied egg-rr97.4%

      \[\leadsto \color{blue}{\left(\log x \cdot \left(x + -0.5\right) - \left(x + -0.91893853320467\right)\right)} + \frac{0.083333333333333}{x} \]

    if 2.7e5 < x < 1.2e170

    1. Initial program 93.8%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf 93.4%

      \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right) - 1\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    4. Step-by-step derivation
      1. sub-neg93.4%

        \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(-1\right)\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      2. mul-1-neg93.4%

        \[\leadsto x \cdot \left(\color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} + \left(-1\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      3. log-rec93.4%

        \[\leadsto x \cdot \left(\left(-\color{blue}{\left(-\log x\right)}\right) + \left(-1\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      4. remove-double-neg93.4%

        \[\leadsto x \cdot \left(\color{blue}{\log x} + \left(-1\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      5. metadata-eval93.4%

        \[\leadsto x \cdot \left(\log x + \color{blue}{-1}\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    5. Simplified93.4%

      \[\leadsto \color{blue}{x \cdot \left(\log x + -1\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]

    if 1.2e170 < x

    1. Initial program 71.2%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Step-by-step derivation
      1. associate-+l+71.2%

        \[\leadsto \color{blue}{\left(\left(x - 0.5\right) \cdot \log x - x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right)} \]
      2. fma-neg71.4%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x - 0.5, \log x, -x\right)} + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
      3. sub-neg71.4%

        \[\leadsto \mathsf{fma}\left(\color{blue}{x + \left(-0.5\right)}, \log x, -x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
      4. metadata-eval71.4%

        \[\leadsto \mathsf{fma}\left(x + \color{blue}{-0.5}, \log x, -x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
      5. fma-def71.4%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\color{blue}{\mathsf{fma}\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778, z, 0.083333333333333\right)}}{x}\right) \]
      6. fma-neg71.4%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right)}, z, 0.083333333333333\right)}{x}\right) \]
      7. metadata-eval71.4%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\mathsf{fma}\left(y + 0.0007936500793651, z, \color{blue}{-0.0027777777777778}\right), z, 0.083333333333333\right)}{x}\right) \]
    3. Simplified71.4%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right), z, 0.083333333333333\right)}{x}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in z around 0 85.1%

      \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\color{blue}{0.083333333333333}}{x}\right) \]
    6. Taylor expanded in x around inf 85.1%

      \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right) - 1\right)} \]
    7. Step-by-step derivation
      1. sub-neg85.1%

        \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(-1\right)\right)} \]
      2. mul-1-neg85.1%

        \[\leadsto x \cdot \left(\color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} + \left(-1\right)\right) \]
      3. log-rec85.1%

        \[\leadsto x \cdot \left(\left(-\color{blue}{\left(-\log x\right)}\right) + \left(-1\right)\right) \]
      4. remove-double-neg85.1%

        \[\leadsto x \cdot \left(\color{blue}{\log x} + \left(-1\right)\right) \]
      5. metadata-eval85.1%

        \[\leadsto x \cdot \left(\log x + \color{blue}{-1}\right) \]
    8. Simplified85.1%

      \[\leadsto \color{blue}{x \cdot \left(\log x + -1\right)} \]
  3. Recombined 4 regimes into one program.
  4. Final simplification94.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 2.8 \cdot 10^{-7}:\\ \;\;\;\;\left(0.91893853320467 + \left(x + \log x \cdot \left(x + -0.5\right)\right)\right) + \frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x}\\ \mathbf{elif}\;x \leq 270000:\\ \;\;\;\;\left(\log x \cdot \left(x + -0.5\right) - \left(x + -0.91893853320467\right)\right) + \frac{0.083333333333333}{x}\\ \mathbf{elif}\;x \leq 1.2 \cdot 10^{+170}:\\ \;\;\;\;\frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x} + x \cdot \left(\log x + -1\right)\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(\log x + -1\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 9: 93.7% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 3.3 \cdot 10^{+168}:\\ \;\;\;\;\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x}\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(\log x + -1\right)\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (if (<= x 3.3e+168)
   (+
    (+ (- (* (- x 0.5) (log x)) x) 0.91893853320467)
    (/
     (+
      0.083333333333333
      (* z (- (* z (+ y 0.0007936500793651)) 0.0027777777777778)))
     x))
   (* x (+ (log x) -1.0))))
double code(double x, double y, double z) {
	double tmp;
	if (x <= 3.3e+168) {
		tmp = ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x);
	} else {
		tmp = x * (log(x) + -1.0);
	}
	return tmp;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8) :: tmp
    if (x <= 3.3d+168) then
        tmp = ((((x - 0.5d0) * log(x)) - x) + 0.91893853320467d0) + ((0.083333333333333d0 + (z * ((z * (y + 0.0007936500793651d0)) - 0.0027777777777778d0))) / x)
    else
        tmp = x * (log(x) + (-1.0d0))
    end if
    code = tmp
end function
public static double code(double x, double y, double z) {
	double tmp;
	if (x <= 3.3e+168) {
		tmp = ((((x - 0.5) * Math.log(x)) - x) + 0.91893853320467) + ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x);
	} else {
		tmp = x * (Math.log(x) + -1.0);
	}
	return tmp;
}
def code(x, y, z):
	tmp = 0
	if x <= 3.3e+168:
		tmp = ((((x - 0.5) * math.log(x)) - x) + 0.91893853320467) + ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x)
	else:
		tmp = x * (math.log(x) + -1.0)
	return tmp
function code(x, y, z)
	tmp = 0.0
	if (x <= 3.3e+168)
		tmp = Float64(Float64(Float64(Float64(Float64(x - 0.5) * log(x)) - x) + 0.91893853320467) + Float64(Float64(0.083333333333333 + Float64(z * Float64(Float64(z * Float64(y + 0.0007936500793651)) - 0.0027777777777778))) / x));
	else
		tmp = Float64(x * Float64(log(x) + -1.0));
	end
	return tmp
end
function tmp_2 = code(x, y, z)
	tmp = 0.0;
	if (x <= 3.3e+168)
		tmp = ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((0.083333333333333 + (z * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))) / x);
	else
		tmp = x * (log(x) + -1.0);
	end
	tmp_2 = tmp;
end
code[x_, y_, z_] := If[LessEqual[x, 3.3e+168], N[(N[(N[(N[(N[(x - 0.5), $MachinePrecision] * N[Log[x], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + 0.91893853320467), $MachinePrecision] + N[(N[(0.083333333333333 + N[(z * N[(N[(z * N[(y + 0.0007936500793651), $MachinePrecision]), $MachinePrecision] - 0.0027777777777778), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision], N[(x * N[(N[Log[x], $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 3.3 \cdot 10^{+168}:\\
\;\;\;\;\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x}\\

\mathbf{else}:\\
\;\;\;\;x \cdot \left(\log x + -1\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 3.2999999999999999e168

    1. Initial program 97.8%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing

    if 3.2999999999999999e168 < x

    1. Initial program 71.2%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Step-by-step derivation
      1. associate-+l+71.2%

        \[\leadsto \color{blue}{\left(\left(x - 0.5\right) \cdot \log x - x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right)} \]
      2. fma-neg71.4%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x - 0.5, \log x, -x\right)} + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
      3. sub-neg71.4%

        \[\leadsto \mathsf{fma}\left(\color{blue}{x + \left(-0.5\right)}, \log x, -x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
      4. metadata-eval71.4%

        \[\leadsto \mathsf{fma}\left(x + \color{blue}{-0.5}, \log x, -x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
      5. fma-def71.4%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\color{blue}{\mathsf{fma}\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778, z, 0.083333333333333\right)}}{x}\right) \]
      6. fma-neg71.4%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right)}, z, 0.083333333333333\right)}{x}\right) \]
      7. metadata-eval71.4%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\mathsf{fma}\left(y + 0.0007936500793651, z, \color{blue}{-0.0027777777777778}\right), z, 0.083333333333333\right)}{x}\right) \]
    3. Simplified71.4%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right), z, 0.083333333333333\right)}{x}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in z around 0 85.1%

      \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\color{blue}{0.083333333333333}}{x}\right) \]
    6. Taylor expanded in x around inf 85.1%

      \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right) - 1\right)} \]
    7. Step-by-step derivation
      1. sub-neg85.1%

        \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(-1\right)\right)} \]
      2. mul-1-neg85.1%

        \[\leadsto x \cdot \left(\color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} + \left(-1\right)\right) \]
      3. log-rec85.1%

        \[\leadsto x \cdot \left(\left(-\color{blue}{\left(-\log x\right)}\right) + \left(-1\right)\right) \]
      4. remove-double-neg85.1%

        \[\leadsto x \cdot \left(\color{blue}{\log x} + \left(-1\right)\right) \]
      5. metadata-eval85.1%

        \[\leadsto x \cdot \left(\log x + \color{blue}{-1}\right) \]
    8. Simplified85.1%

      \[\leadsto \color{blue}{x \cdot \left(\log x + -1\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification94.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 3.3 \cdot 10^{+168}:\\ \;\;\;\;\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{0.083333333333333 + z \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)}{x}\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(\log x + -1\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 10: 62.7% accurate, 1.1× speedup?

\[\begin{array}{l} \\ \left(\log x \cdot \left(x + -0.5\right) - \left(x + -0.91893853320467\right)\right) + \frac{0.083333333333333 + z \cdot -0.0027777777777778}{x} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+
  (- (* (log x) (+ x -0.5)) (+ x -0.91893853320467))
  (/ (+ 0.083333333333333 (* z -0.0027777777777778)) x)))
double code(double x, double y, double z) {
	return ((log(x) * (x + -0.5)) - (x + -0.91893853320467)) + ((0.083333333333333 + (z * -0.0027777777777778)) / x);
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = ((log(x) * (x + (-0.5d0))) - (x + (-0.91893853320467d0))) + ((0.083333333333333d0 + (z * (-0.0027777777777778d0))) / x)
end function
public static double code(double x, double y, double z) {
	return ((Math.log(x) * (x + -0.5)) - (x + -0.91893853320467)) + ((0.083333333333333 + (z * -0.0027777777777778)) / x);
}
def code(x, y, z):
	return ((math.log(x) * (x + -0.5)) - (x + -0.91893853320467)) + ((0.083333333333333 + (z * -0.0027777777777778)) / x)
function code(x, y, z)
	return Float64(Float64(Float64(log(x) * Float64(x + -0.5)) - Float64(x + -0.91893853320467)) + Float64(Float64(0.083333333333333 + Float64(z * -0.0027777777777778)) / x))
end
function tmp = code(x, y, z)
	tmp = ((log(x) * (x + -0.5)) - (x + -0.91893853320467)) + ((0.083333333333333 + (z * -0.0027777777777778)) / x);
end
code[x_, y_, z_] := N[(N[(N[(N[Log[x], $MachinePrecision] * N[(x + -0.5), $MachinePrecision]), $MachinePrecision] - N[(x + -0.91893853320467), $MachinePrecision]), $MachinePrecision] + N[(N[(0.083333333333333 + N[(z * -0.0027777777777778), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\log x \cdot \left(x + -0.5\right) - \left(x + -0.91893853320467\right)\right) + \frac{0.083333333333333 + z \cdot -0.0027777777777778}{x}
\end{array}
Derivation
  1. Initial program 91.6%

    \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. associate-+l-91.6%

      \[\leadsto \color{blue}{\left(\left(x - 0.5\right) \cdot \log x - \left(x - 0.91893853320467\right)\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. sub-neg91.6%

      \[\leadsto \left(\color{blue}{\left(x + \left(-0.5\right)\right)} \cdot \log x - \left(x - 0.91893853320467\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    3. metadata-eval91.6%

      \[\leadsto \left(\left(x + \color{blue}{-0.5}\right) \cdot \log x - \left(x - 0.91893853320467\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    4. *-commutative91.6%

      \[\leadsto \left(\color{blue}{\log x \cdot \left(x + -0.5\right)} - \left(x - 0.91893853320467\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    5. sub-neg91.6%

      \[\leadsto \left(\log x \cdot \left(x + -0.5\right) - \color{blue}{\left(x + \left(-0.91893853320467\right)\right)}\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    6. metadata-eval91.6%

      \[\leadsto \left(\log x \cdot \left(x + -0.5\right) - \left(x + \color{blue}{-0.91893853320467}\right)\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
  4. Applied egg-rr91.6%

    \[\leadsto \color{blue}{\left(\log x \cdot \left(x + -0.5\right) - \left(x + -0.91893853320467\right)\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
  5. Taylor expanded in z around 0 66.9%

    \[\leadsto \left(\log x \cdot \left(x + -0.5\right) - \left(x + -0.91893853320467\right)\right) + \frac{\color{blue}{-0.0027777777777778 \cdot z} + 0.083333333333333}{x} \]
  6. Step-by-step derivation
    1. *-commutative66.9%

      \[\leadsto \left(\log x \cdot \left(x + -0.5\right) - \left(x + -0.91893853320467\right)\right) + \frac{\color{blue}{z \cdot -0.0027777777777778} + 0.083333333333333}{x} \]
  7. Simplified66.9%

    \[\leadsto \left(\log x \cdot \left(x + -0.5\right) - \left(x + -0.91893853320467\right)\right) + \frac{\color{blue}{z \cdot -0.0027777777777778} + 0.083333333333333}{x} \]
  8. Final simplification66.9%

    \[\leadsto \left(\log x \cdot \left(x + -0.5\right) - \left(x + -0.91893853320467\right)\right) + \frac{0.083333333333333 + z \cdot -0.0027777777777778}{x} \]
  9. Add Preprocessing

Alternative 11: 56.3% accurate, 1.1× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 1.05:\\ \;\;\;\;0.91893853320467 + \left(0.083333333333333 \cdot \frac{1}{x} + \log x \cdot -0.5\right)\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(\log x + -1\right)\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (if (<= x 1.05)
   (+ 0.91893853320467 (+ (* 0.083333333333333 (/ 1.0 x)) (* (log x) -0.5)))
   (* x (+ (log x) -1.0))))
double code(double x, double y, double z) {
	double tmp;
	if (x <= 1.05) {
		tmp = 0.91893853320467 + ((0.083333333333333 * (1.0 / x)) + (log(x) * -0.5));
	} else {
		tmp = x * (log(x) + -1.0);
	}
	return tmp;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8) :: tmp
    if (x <= 1.05d0) then
        tmp = 0.91893853320467d0 + ((0.083333333333333d0 * (1.0d0 / x)) + (log(x) * (-0.5d0)))
    else
        tmp = x * (log(x) + (-1.0d0))
    end if
    code = tmp
end function
public static double code(double x, double y, double z) {
	double tmp;
	if (x <= 1.05) {
		tmp = 0.91893853320467 + ((0.083333333333333 * (1.0 / x)) + (Math.log(x) * -0.5));
	} else {
		tmp = x * (Math.log(x) + -1.0);
	}
	return tmp;
}
def code(x, y, z):
	tmp = 0
	if x <= 1.05:
		tmp = 0.91893853320467 + ((0.083333333333333 * (1.0 / x)) + (math.log(x) * -0.5))
	else:
		tmp = x * (math.log(x) + -1.0)
	return tmp
function code(x, y, z)
	tmp = 0.0
	if (x <= 1.05)
		tmp = Float64(0.91893853320467 + Float64(Float64(0.083333333333333 * Float64(1.0 / x)) + Float64(log(x) * -0.5)));
	else
		tmp = Float64(x * Float64(log(x) + -1.0));
	end
	return tmp
end
function tmp_2 = code(x, y, z)
	tmp = 0.0;
	if (x <= 1.05)
		tmp = 0.91893853320467 + ((0.083333333333333 * (1.0 / x)) + (log(x) * -0.5));
	else
		tmp = x * (log(x) + -1.0);
	end
	tmp_2 = tmp;
end
code[x_, y_, z_] := If[LessEqual[x, 1.05], N[(0.91893853320467 + N[(N[(0.083333333333333 * N[(1.0 / x), $MachinePrecision]), $MachinePrecision] + N[(N[Log[x], $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(N[Log[x], $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.05:\\
\;\;\;\;0.91893853320467 + \left(0.083333333333333 \cdot \frac{1}{x} + \log x \cdot -0.5\right)\\

\mathbf{else}:\\
\;\;\;\;x \cdot \left(\log x + -1\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 1.05000000000000004

    1. Initial program 99.7%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Step-by-step derivation
      1. associate-+l+99.6%

        \[\leadsto \color{blue}{\left(\left(x - 0.5\right) \cdot \log x - x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right)} \]
      2. fma-neg99.7%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x - 0.5, \log x, -x\right)} + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
      3. sub-neg99.7%

        \[\leadsto \mathsf{fma}\left(\color{blue}{x + \left(-0.5\right)}, \log x, -x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
      4. metadata-eval99.7%

        \[\leadsto \mathsf{fma}\left(x + \color{blue}{-0.5}, \log x, -x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
      5. fma-def99.7%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\color{blue}{\mathsf{fma}\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778, z, 0.083333333333333\right)}}{x}\right) \]
      6. fma-neg99.7%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right)}, z, 0.083333333333333\right)}{x}\right) \]
      7. metadata-eval99.7%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\mathsf{fma}\left(y + 0.0007936500793651, z, \color{blue}{-0.0027777777777778}\right), z, 0.083333333333333\right)}{x}\right) \]
    3. Simplified99.7%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right), z, 0.083333333333333\right)}{x}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in z around 0 50.9%

      \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\color{blue}{0.083333333333333}}{x}\right) \]
    6. Taylor expanded in x around 0 49.3%

      \[\leadsto \color{blue}{0.91893853320467 + \left(-0.5 \cdot \log x + 0.083333333333333 \cdot \frac{1}{x}\right)} \]

    if 1.05000000000000004 < x

    1. Initial program 83.1%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Step-by-step derivation
      1. associate-+l+83.1%

        \[\leadsto \color{blue}{\left(\left(x - 0.5\right) \cdot \log x - x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right)} \]
      2. fma-neg83.2%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x - 0.5, \log x, -x\right)} + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
      3. sub-neg83.2%

        \[\leadsto \mathsf{fma}\left(\color{blue}{x + \left(-0.5\right)}, \log x, -x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
      4. metadata-eval83.2%

        \[\leadsto \mathsf{fma}\left(x + \color{blue}{-0.5}, \log x, -x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
      5. fma-def83.2%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\color{blue}{\mathsf{fma}\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778, z, 0.083333333333333\right)}}{x}\right) \]
      6. fma-neg83.2%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right)}, z, 0.083333333333333\right)}{x}\right) \]
      7. metadata-eval83.2%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\mathsf{fma}\left(y + 0.0007936500793651, z, \color{blue}{-0.0027777777777778}\right), z, 0.083333333333333\right)}{x}\right) \]
    3. Simplified83.2%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right), z, 0.083333333333333\right)}{x}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in z around 0 70.2%

      \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\color{blue}{0.083333333333333}}{x}\right) \]
    6. Taylor expanded in x around inf 68.5%

      \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right) - 1\right)} \]
    7. Step-by-step derivation
      1. sub-neg68.5%

        \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(-1\right)\right)} \]
      2. mul-1-neg68.5%

        \[\leadsto x \cdot \left(\color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} + \left(-1\right)\right) \]
      3. log-rec68.5%

        \[\leadsto x \cdot \left(\left(-\color{blue}{\left(-\log x\right)}\right) + \left(-1\right)\right) \]
      4. remove-double-neg68.5%

        \[\leadsto x \cdot \left(\color{blue}{\log x} + \left(-1\right)\right) \]
      5. metadata-eval68.5%

        \[\leadsto x \cdot \left(\log x + \color{blue}{-1}\right) \]
    8. Simplified68.5%

      \[\leadsto \color{blue}{x \cdot \left(\log x + -1\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification58.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 1.05:\\ \;\;\;\;0.91893853320467 + \left(0.083333333333333 \cdot \frac{1}{x} + \log x \cdot -0.5\right)\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(\log x + -1\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 12: 56.8% accurate, 1.1× speedup?

\[\begin{array}{l} \\ \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{1}{x \cdot 12.000000000000048} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+
  (+ (- (* (- x 0.5) (log x)) x) 0.91893853320467)
  (/ 1.0 (* x 12.000000000000048))))
double code(double x, double y, double z) {
	return ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + (1.0 / (x * 12.000000000000048));
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = ((((x - 0.5d0) * log(x)) - x) + 0.91893853320467d0) + (1.0d0 / (x * 12.000000000000048d0))
end function
public static double code(double x, double y, double z) {
	return ((((x - 0.5) * Math.log(x)) - x) + 0.91893853320467) + (1.0 / (x * 12.000000000000048));
}
def code(x, y, z):
	return ((((x - 0.5) * math.log(x)) - x) + 0.91893853320467) + (1.0 / (x * 12.000000000000048))
function code(x, y, z)
	return Float64(Float64(Float64(Float64(Float64(x - 0.5) * log(x)) - x) + 0.91893853320467) + Float64(1.0 / Float64(x * 12.000000000000048)))
end
function tmp = code(x, y, z)
	tmp = ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + (1.0 / (x * 12.000000000000048));
end
code[x_, y_, z_] := N[(N[(N[(N[(N[(x - 0.5), $MachinePrecision] * N[Log[x], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + 0.91893853320467), $MachinePrecision] + N[(1.0 / N[(x * 12.000000000000048), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{1}{x \cdot 12.000000000000048}
\end{array}
Derivation
  1. Initial program 91.6%

    \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
  2. Add Preprocessing
  3. Taylor expanded in z around 0 60.3%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{0.083333333333333}{x}} \]
  4. Step-by-step derivation
    1. clear-num60.3%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{1}{\frac{x}{0.083333333333333}}} \]
    2. inv-pow60.3%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{{\left(\frac{x}{0.083333333333333}\right)}^{-1}} \]
    3. div-inv60.3%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + {\color{blue}{\left(x \cdot \frac{1}{0.083333333333333}\right)}}^{-1} \]
    4. metadata-eval60.3%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + {\left(x \cdot \color{blue}{12.000000000000048}\right)}^{-1} \]
  5. Applied egg-rr60.3%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{{\left(x \cdot 12.000000000000048\right)}^{-1}} \]
  6. Step-by-step derivation
    1. unpow-160.3%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{1}{x \cdot 12.000000000000048}} \]
  7. Simplified60.3%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{1}{x \cdot 12.000000000000048}} \]
  8. Final simplification60.3%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{1}{x \cdot 12.000000000000048} \]
  9. Add Preprocessing

Alternative 13: 56.3% accurate, 1.1× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 1.05:\\ \;\;\;\;\frac{0.083333333333333}{x} + \left(0.91893853320467 + \log x \cdot -0.5\right)\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(\log x + -1\right)\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (if (<= x 1.05)
   (+ (/ 0.083333333333333 x) (+ 0.91893853320467 (* (log x) -0.5)))
   (* x (+ (log x) -1.0))))
double code(double x, double y, double z) {
	double tmp;
	if (x <= 1.05) {
		tmp = (0.083333333333333 / x) + (0.91893853320467 + (log(x) * -0.5));
	} else {
		tmp = x * (log(x) + -1.0);
	}
	return tmp;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8) :: tmp
    if (x <= 1.05d0) then
        tmp = (0.083333333333333d0 / x) + (0.91893853320467d0 + (log(x) * (-0.5d0)))
    else
        tmp = x * (log(x) + (-1.0d0))
    end if
    code = tmp
end function
public static double code(double x, double y, double z) {
	double tmp;
	if (x <= 1.05) {
		tmp = (0.083333333333333 / x) + (0.91893853320467 + (Math.log(x) * -0.5));
	} else {
		tmp = x * (Math.log(x) + -1.0);
	}
	return tmp;
}
def code(x, y, z):
	tmp = 0
	if x <= 1.05:
		tmp = (0.083333333333333 / x) + (0.91893853320467 + (math.log(x) * -0.5))
	else:
		tmp = x * (math.log(x) + -1.0)
	return tmp
function code(x, y, z)
	tmp = 0.0
	if (x <= 1.05)
		tmp = Float64(Float64(0.083333333333333 / x) + Float64(0.91893853320467 + Float64(log(x) * -0.5)));
	else
		tmp = Float64(x * Float64(log(x) + -1.0));
	end
	return tmp
end
function tmp_2 = code(x, y, z)
	tmp = 0.0;
	if (x <= 1.05)
		tmp = (0.083333333333333 / x) + (0.91893853320467 + (log(x) * -0.5));
	else
		tmp = x * (log(x) + -1.0);
	end
	tmp_2 = tmp;
end
code[x_, y_, z_] := If[LessEqual[x, 1.05], N[(N[(0.083333333333333 / x), $MachinePrecision] + N[(0.91893853320467 + N[(N[Log[x], $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(N[Log[x], $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.05:\\
\;\;\;\;\frac{0.083333333333333}{x} + \left(0.91893853320467 + \log x \cdot -0.5\right)\\

\mathbf{else}:\\
\;\;\;\;x \cdot \left(\log x + -1\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 1.05000000000000004

    1. Initial program 99.7%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Taylor expanded in z around 0 50.9%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{0.083333333333333}{x}} \]
    4. Taylor expanded in x around 0 49.3%

      \[\leadsto \color{blue}{\left(0.91893853320467 + -0.5 \cdot \log x\right)} + \frac{0.083333333333333}{x} \]
    5. Step-by-step derivation
      1. +-commutative49.3%

        \[\leadsto \color{blue}{\left(-0.5 \cdot \log x + 0.91893853320467\right)} + \frac{0.083333333333333}{x} \]
    6. Simplified49.3%

      \[\leadsto \color{blue}{\left(-0.5 \cdot \log x + 0.91893853320467\right)} + \frac{0.083333333333333}{x} \]

    if 1.05000000000000004 < x

    1. Initial program 83.1%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Step-by-step derivation
      1. associate-+l+83.1%

        \[\leadsto \color{blue}{\left(\left(x - 0.5\right) \cdot \log x - x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right)} \]
      2. fma-neg83.2%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x - 0.5, \log x, -x\right)} + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
      3. sub-neg83.2%

        \[\leadsto \mathsf{fma}\left(\color{blue}{x + \left(-0.5\right)}, \log x, -x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
      4. metadata-eval83.2%

        \[\leadsto \mathsf{fma}\left(x + \color{blue}{-0.5}, \log x, -x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
      5. fma-def83.2%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\color{blue}{\mathsf{fma}\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778, z, 0.083333333333333\right)}}{x}\right) \]
      6. fma-neg83.2%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right)}, z, 0.083333333333333\right)}{x}\right) \]
      7. metadata-eval83.2%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\mathsf{fma}\left(y + 0.0007936500793651, z, \color{blue}{-0.0027777777777778}\right), z, 0.083333333333333\right)}{x}\right) \]
    3. Simplified83.2%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right), z, 0.083333333333333\right)}{x}\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in z around 0 70.2%

      \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\color{blue}{0.083333333333333}}{x}\right) \]
    6. Taylor expanded in x around inf 68.5%

      \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right) - 1\right)} \]
    7. Step-by-step derivation
      1. sub-neg68.5%

        \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(-1\right)\right)} \]
      2. mul-1-neg68.5%

        \[\leadsto x \cdot \left(\color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} + \left(-1\right)\right) \]
      3. log-rec68.5%

        \[\leadsto x \cdot \left(\left(-\color{blue}{\left(-\log x\right)}\right) + \left(-1\right)\right) \]
      4. remove-double-neg68.5%

        \[\leadsto x \cdot \left(\color{blue}{\log x} + \left(-1\right)\right) \]
      5. metadata-eval68.5%

        \[\leadsto x \cdot \left(\log x + \color{blue}{-1}\right) \]
    8. Simplified68.5%

      \[\leadsto \color{blue}{x \cdot \left(\log x + -1\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification58.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 1.05:\\ \;\;\;\;\frac{0.083333333333333}{x} + \left(0.91893853320467 + \log x \cdot -0.5\right)\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(\log x + -1\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 14: 56.8% accurate, 1.1× speedup?

\[\begin{array}{l} \\ \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{0.083333333333333}{x} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+ (+ (- (* (- x 0.5) (log x)) x) 0.91893853320467) (/ 0.083333333333333 x)))
double code(double x, double y, double z) {
	return ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + (0.083333333333333 / x);
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = ((((x - 0.5d0) * log(x)) - x) + 0.91893853320467d0) + (0.083333333333333d0 / x)
end function
public static double code(double x, double y, double z) {
	return ((((x - 0.5) * Math.log(x)) - x) + 0.91893853320467) + (0.083333333333333 / x);
}
def code(x, y, z):
	return ((((x - 0.5) * math.log(x)) - x) + 0.91893853320467) + (0.083333333333333 / x)
function code(x, y, z)
	return Float64(Float64(Float64(Float64(Float64(x - 0.5) * log(x)) - x) + 0.91893853320467) + Float64(0.083333333333333 / x))
end
function tmp = code(x, y, z)
	tmp = ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + (0.083333333333333 / x);
end
code[x_, y_, z_] := N[(N[(N[(N[(N[(x - 0.5), $MachinePrecision] * N[Log[x], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + 0.91893853320467), $MachinePrecision] + N[(0.083333333333333 / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{0.083333333333333}{x}
\end{array}
Derivation
  1. Initial program 91.6%

    \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
  2. Add Preprocessing
  3. Taylor expanded in z around 0 60.3%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{0.083333333333333}{x}} \]
  4. Final simplification60.3%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{0.083333333333333}{x} \]
  5. Add Preprocessing

Alternative 15: 56.0% accurate, 1.1× speedup?

\[\begin{array}{l} \\ \frac{0.083333333333333}{x} + \left(0.91893853320467 + \left(x \cdot \log x - x\right)\right) \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+ (/ 0.083333333333333 x) (+ 0.91893853320467 (- (* x (log x)) x))))
double code(double x, double y, double z) {
	return (0.083333333333333 / x) + (0.91893853320467 + ((x * log(x)) - x));
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = (0.083333333333333d0 / x) + (0.91893853320467d0 + ((x * log(x)) - x))
end function
public static double code(double x, double y, double z) {
	return (0.083333333333333 / x) + (0.91893853320467 + ((x * Math.log(x)) - x));
}
def code(x, y, z):
	return (0.083333333333333 / x) + (0.91893853320467 + ((x * math.log(x)) - x))
function code(x, y, z)
	return Float64(Float64(0.083333333333333 / x) + Float64(0.91893853320467 + Float64(Float64(x * log(x)) - x)))
end
function tmp = code(x, y, z)
	tmp = (0.083333333333333 / x) + (0.91893853320467 + ((x * log(x)) - x));
end
code[x_, y_, z_] := N[(N[(0.083333333333333 / x), $MachinePrecision] + N[(0.91893853320467 + N[(N[(x * N[Log[x], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{0.083333333333333}{x} + \left(0.91893853320467 + \left(x \cdot \log x - x\right)\right)
\end{array}
Derivation
  1. Initial program 91.6%

    \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
  2. Add Preprocessing
  3. Taylor expanded in z around 0 60.3%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{0.083333333333333}{x}} \]
  4. Taylor expanded in x around inf 57.9%

    \[\leadsto \left(\left(\color{blue}{-1 \cdot \left(x \cdot \log \left(\frac{1}{x}\right)\right)} - x\right) + 0.91893853320467\right) + \frac{0.083333333333333}{x} \]
  5. Step-by-step derivation
    1. mul-1-neg57.9%

      \[\leadsto \left(\left(\color{blue}{\left(-x \cdot \log \left(\frac{1}{x}\right)\right)} - x\right) + 0.91893853320467\right) + \frac{0.083333333333333}{x} \]
    2. distribute-rgt-neg-in57.9%

      \[\leadsto \left(\left(\color{blue}{x \cdot \left(-\log \left(\frac{1}{x}\right)\right)} - x\right) + 0.91893853320467\right) + \frac{0.083333333333333}{x} \]
    3. log-rec57.9%

      \[\leadsto \left(\left(x \cdot \left(-\color{blue}{\left(-\log x\right)}\right) - x\right) + 0.91893853320467\right) + \frac{0.083333333333333}{x} \]
    4. remove-double-neg57.9%

      \[\leadsto \left(\left(x \cdot \color{blue}{\log x} - x\right) + 0.91893853320467\right) + \frac{0.083333333333333}{x} \]
  6. Simplified57.9%

    \[\leadsto \left(\left(\color{blue}{x \cdot \log x} - x\right) + 0.91893853320467\right) + \frac{0.083333333333333}{x} \]
  7. Final simplification57.9%

    \[\leadsto \frac{0.083333333333333}{x} + \left(0.91893853320467 + \left(x \cdot \log x - x\right)\right) \]
  8. Add Preprocessing

Alternative 16: 35.4% accurate, 1.2× speedup?

\[\begin{array}{l} \\ x \cdot \left(\log x + -1\right) \end{array} \]
(FPCore (x y z) :precision binary64 (* x (+ (log x) -1.0)))
double code(double x, double y, double z) {
	return x * (log(x) + -1.0);
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = x * (log(x) + (-1.0d0))
end function
public static double code(double x, double y, double z) {
	return x * (Math.log(x) + -1.0);
}
def code(x, y, z):
	return x * (math.log(x) + -1.0)
function code(x, y, z)
	return Float64(x * Float64(log(x) + -1.0))
end
function tmp = code(x, y, z)
	tmp = x * (log(x) + -1.0);
end
code[x_, y_, z_] := N[(x * N[(N[Log[x], $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
x \cdot \left(\log x + -1\right)
\end{array}
Derivation
  1. Initial program 91.6%

    \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
  2. Step-by-step derivation
    1. associate-+l+91.6%

      \[\leadsto \color{blue}{\left(\left(x - 0.5\right) \cdot \log x - x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right)} \]
    2. fma-neg91.6%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x - 0.5, \log x, -x\right)} + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
    3. sub-neg91.6%

      \[\leadsto \mathsf{fma}\left(\color{blue}{x + \left(-0.5\right)}, \log x, -x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
    4. metadata-eval91.6%

      \[\leadsto \mathsf{fma}\left(x + \color{blue}{-0.5}, \log x, -x\right) + \left(0.91893853320467 + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}\right) \]
    5. fma-def91.6%

      \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\color{blue}{\mathsf{fma}\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778, z, 0.083333333333333\right)}}{x}\right) \]
    6. fma-neg91.6%

      \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right)}, z, 0.083333333333333\right)}{x}\right) \]
    7. metadata-eval91.6%

      \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\mathsf{fma}\left(y + 0.0007936500793651, z, \color{blue}{-0.0027777777777778}\right), z, 0.083333333333333\right)}{x}\right) \]
  3. Simplified91.6%

    \[\leadsto \color{blue}{\mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\mathsf{fma}\left(\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right), z, 0.083333333333333\right)}{x}\right)} \]
  4. Add Preprocessing
  5. Taylor expanded in z around 0 60.3%

    \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, -x\right) + \left(0.91893853320467 + \frac{\color{blue}{0.083333333333333}}{x}\right) \]
  6. Taylor expanded in x around inf 34.2%

    \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right) - 1\right)} \]
  7. Step-by-step derivation
    1. sub-neg34.2%

      \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(-1\right)\right)} \]
    2. mul-1-neg34.2%

      \[\leadsto x \cdot \left(\color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} + \left(-1\right)\right) \]
    3. log-rec34.2%

      \[\leadsto x \cdot \left(\left(-\color{blue}{\left(-\log x\right)}\right) + \left(-1\right)\right) \]
    4. remove-double-neg34.2%

      \[\leadsto x \cdot \left(\color{blue}{\log x} + \left(-1\right)\right) \]
    5. metadata-eval34.2%

      \[\leadsto x \cdot \left(\log x + \color{blue}{-1}\right) \]
  8. Simplified34.2%

    \[\leadsto \color{blue}{x \cdot \left(\log x + -1\right)} \]
  9. Final simplification34.2%

    \[\leadsto x \cdot \left(\log x + -1\right) \]
  10. Add Preprocessing

Developer target: 98.7% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(\left(x - 0.5\right) \cdot \log x + \left(0.91893853320467 - x\right)\right) + \frac{0.083333333333333}{x}\right) + \frac{z}{x} \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right) \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+
  (+ (+ (* (- x 0.5) (log x)) (- 0.91893853320467 x)) (/ 0.083333333333333 x))
  (* (/ z x) (- (* z (+ y 0.0007936500793651)) 0.0027777777777778))))
double code(double x, double y, double z) {
	return ((((x - 0.5) * log(x)) + (0.91893853320467 - x)) + (0.083333333333333 / x)) + ((z / x) * ((z * (y + 0.0007936500793651)) - 0.0027777777777778));
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = ((((x - 0.5d0) * log(x)) + (0.91893853320467d0 - x)) + (0.083333333333333d0 / x)) + ((z / x) * ((z * (y + 0.0007936500793651d0)) - 0.0027777777777778d0))
end function
public static double code(double x, double y, double z) {
	return ((((x - 0.5) * Math.log(x)) + (0.91893853320467 - x)) + (0.083333333333333 / x)) + ((z / x) * ((z * (y + 0.0007936500793651)) - 0.0027777777777778));
}
def code(x, y, z):
	return ((((x - 0.5) * math.log(x)) + (0.91893853320467 - x)) + (0.083333333333333 / x)) + ((z / x) * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))
function code(x, y, z)
	return Float64(Float64(Float64(Float64(Float64(x - 0.5) * log(x)) + Float64(0.91893853320467 - x)) + Float64(0.083333333333333 / x)) + Float64(Float64(z / x) * Float64(Float64(z * Float64(y + 0.0007936500793651)) - 0.0027777777777778)))
end
function tmp = code(x, y, z)
	tmp = ((((x - 0.5) * log(x)) + (0.91893853320467 - x)) + (0.083333333333333 / x)) + ((z / x) * ((z * (y + 0.0007936500793651)) - 0.0027777777777778));
end
code[x_, y_, z_] := N[(N[(N[(N[(N[(x - 0.5), $MachinePrecision] * N[Log[x], $MachinePrecision]), $MachinePrecision] + N[(0.91893853320467 - x), $MachinePrecision]), $MachinePrecision] + N[(0.083333333333333 / x), $MachinePrecision]), $MachinePrecision] + N[(N[(z / x), $MachinePrecision] * N[(N[(z * N[(y + 0.0007936500793651), $MachinePrecision]), $MachinePrecision] - 0.0027777777777778), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(x - 0.5\right) \cdot \log x + \left(0.91893853320467 - x\right)\right) + \frac{0.083333333333333}{x}\right) + \frac{z}{x} \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)
\end{array}

Reproduce

?
herbie shell --seed 2024020 
(FPCore (x y z)
  :name "Numeric.SpecFunctions:$slogFactorial from math-functions-0.1.5.2, B"
  :precision binary64

  :herbie-target
  (+ (+ (+ (* (- x 0.5) (log x)) (- 0.91893853320467 x)) (/ 0.083333333333333 x)) (* (/ z x) (- (* z (+ y 0.0007936500793651)) 0.0027777777777778)))

  (+ (+ (- (* (- x 0.5) (log x)) x) 0.91893853320467) (/ (+ (* (- (* (+ y 0.0007936500793651) z) 0.0027777777777778) z) 0.083333333333333) x)))