Numeric.SpecFunctions:$slogFactorial from math-functions-0.1.5.2, B

Percentage Accurate: 93.6% → 98.4%
Time: 14.8s
Alternatives: 9
Speedup: 1.0×

Specification

?
\[\begin{array}{l} \\ \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+
  (+ (- (* (- x 0.5) (log x)) x) 0.91893853320467)
  (/
   (+
    (* (- (* (+ y 0.0007936500793651) z) 0.0027777777777778) z)
    0.083333333333333)
   x)))
double code(double x, double y, double z) {
	return ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x);
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = ((((x - 0.5d0) * log(x)) - x) + 0.91893853320467d0) + ((((((y + 0.0007936500793651d0) * z) - 0.0027777777777778d0) * z) + 0.083333333333333d0) / x)
end function
public static double code(double x, double y, double z) {
	return ((((x - 0.5) * Math.log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x);
}
def code(x, y, z):
	return ((((x - 0.5) * math.log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x)
function code(x, y, z)
	return Float64(Float64(Float64(Float64(Float64(x - 0.5) * log(x)) - x) + 0.91893853320467) + Float64(Float64(Float64(Float64(Float64(Float64(y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x))
end
function tmp = code(x, y, z)
	tmp = ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x);
end
code[x_, y_, z_] := N[(N[(N[(N[(N[(x - 0.5), $MachinePrecision] * N[Log[x], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + 0.91893853320467), $MachinePrecision] + N[(N[(N[(N[(N[(N[(y + 0.0007936500793651), $MachinePrecision] * z), $MachinePrecision] - 0.0027777777777778), $MachinePrecision] * z), $MachinePrecision] + 0.083333333333333), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 9 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 93.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+
  (+ (- (* (- x 0.5) (log x)) x) 0.91893853320467)
  (/
   (+
    (* (- (* (+ y 0.0007936500793651) z) 0.0027777777777778) z)
    0.083333333333333)
   x)))
double code(double x, double y, double z) {
	return ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x);
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = ((((x - 0.5d0) * log(x)) - x) + 0.91893853320467d0) + ((((((y + 0.0007936500793651d0) * z) - 0.0027777777777778d0) * z) + 0.083333333333333d0) / x)
end function
public static double code(double x, double y, double z) {
	return ((((x - 0.5) * Math.log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x);
}
def code(x, y, z):
	return ((((x - 0.5) * math.log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x)
function code(x, y, z)
	return Float64(Float64(Float64(Float64(Float64(x - 0.5) * log(x)) - x) + 0.91893853320467) + Float64(Float64(Float64(Float64(Float64(Float64(y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x))
end
function tmp = code(x, y, z)
	tmp = ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x);
end
code[x_, y_, z_] := N[(N[(N[(N[(N[(x - 0.5), $MachinePrecision] * N[Log[x], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + 0.91893853320467), $MachinePrecision] + N[(N[(N[(N[(N[(N[(y + 0.0007936500793651), $MachinePrecision] * z), $MachinePrecision] - 0.0027777777777778), $MachinePrecision] * z), $MachinePrecision] + 0.083333333333333), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}
\end{array}

Alternative 1: 98.4% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+
  (+ (- (* (- x 0.5) (log x)) x) 0.91893853320467)
  (+
   (* z (* (/ z x) (+ 0.0007936500793651 y)))
   (/ 1.0 (* x 12.000000000000048)))))
double code(double x, double y, double z) {
	return ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((z * ((z / x) * (0.0007936500793651 + y))) + (1.0 / (x * 12.000000000000048)));
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = ((((x - 0.5d0) * log(x)) - x) + 0.91893853320467d0) + ((z * ((z / x) * (0.0007936500793651d0 + y))) + (1.0d0 / (x * 12.000000000000048d0)))
end function
public static double code(double x, double y, double z) {
	return ((((x - 0.5) * Math.log(x)) - x) + 0.91893853320467) + ((z * ((z / x) * (0.0007936500793651 + y))) + (1.0 / (x * 12.000000000000048)));
}
def code(x, y, z):
	return ((((x - 0.5) * math.log(x)) - x) + 0.91893853320467) + ((z * ((z / x) * (0.0007936500793651 + y))) + (1.0 / (x * 12.000000000000048)))
function code(x, y, z)
	return Float64(Float64(Float64(Float64(Float64(x - 0.5) * log(x)) - x) + 0.91893853320467) + Float64(Float64(z * Float64(Float64(z / x) * Float64(0.0007936500793651 + y))) + Float64(1.0 / Float64(x * 12.000000000000048))))
end
function tmp = code(x, y, z)
	tmp = ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((z * ((z / x) * (0.0007936500793651 + y))) + (1.0 / (x * 12.000000000000048)));
end
code[x_, y_, z_] := N[(N[(N[(N[(N[(x - 0.5), $MachinePrecision] * N[Log[x], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + 0.91893853320467), $MachinePrecision] + N[(N[(z * N[(N[(z / x), $MachinePrecision] * N[(0.0007936500793651 + y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(x * 12.000000000000048), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right)
\end{array}
Derivation
  1. Initial program 94.9%

    \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
  2. Add Preprocessing
  3. Taylor expanded in z around 0 95.5%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\left(z \cdot \left(z \cdot \left(0.0007936500793651 \cdot \frac{1}{x} + \frac{y}{x}\right) - 0.0027777777777778 \cdot \frac{1}{x}\right) + 0.083333333333333 \cdot \frac{1}{x}\right)} \]
  4. Taylor expanded in z around inf 92.1%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\color{blue}{{z}^{2} \cdot \left(0.0007936500793651 \cdot \frac{1}{x} + \frac{y}{x}\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  5. Step-by-step derivation
    1. unpow292.1%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\color{blue}{\left(z \cdot z\right)} \cdot \left(0.0007936500793651 \cdot \frac{1}{x} + \frac{y}{x}\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    2. associate-*l*95.1%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\color{blue}{z \cdot \left(z \cdot \left(0.0007936500793651 \cdot \frac{1}{x} + \frac{y}{x}\right)\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    3. associate-*r/95.1%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(z \cdot \left(\color{blue}{\frac{0.0007936500793651 \cdot 1}{x}} + \frac{y}{x}\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    4. metadata-eval95.1%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(z \cdot \left(\frac{\color{blue}{0.0007936500793651}}{x} + \frac{y}{x}\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    5. distribute-rgt-out91.6%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \color{blue}{\left(\frac{0.0007936500793651}{x} \cdot z + \frac{y}{x} \cdot z\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    6. associate-*l/91.6%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\color{blue}{\frac{0.0007936500793651 \cdot z}{x}} + \frac{y}{x} \cdot z\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    7. associate-*r/91.6%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\color{blue}{0.0007936500793651 \cdot \frac{z}{x}} + \frac{y}{x} \cdot z\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    8. associate-*l/95.0%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(0.0007936500793651 \cdot \frac{z}{x} + \color{blue}{\frac{y \cdot z}{x}}\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    9. associate-/l*91.1%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(0.0007936500793651 \cdot \frac{z}{x} + \color{blue}{y \cdot \frac{z}{x}}\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    10. distribute-rgt-out98.9%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \color{blue}{\left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  6. Simplified98.9%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\color{blue}{z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  7. Step-by-step derivation
    1. div-inv98.9%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \color{blue}{\frac{0.083333333333333}{x}}\right) \]
    2. clear-num98.9%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \color{blue}{\frac{1}{\frac{x}{0.083333333333333}}}\right) \]
    3. div-inv98.9%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{\color{blue}{x \cdot \frac{1}{0.083333333333333}}}\right) \]
    4. metadata-eval98.9%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot \color{blue}{12.000000000000048}}\right) \]
  8. Applied egg-rr98.9%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \color{blue}{\frac{1}{x \cdot 12.000000000000048}}\right) \]
  9. Add Preprocessing

Alternative 2: 73.1% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := 0.91893853320467 + \log x \cdot -0.5\\ t_1 := t\_0 + \frac{0.083333333333333 + z \cdot \left(z \cdot y - 0.0027777777777778\right)}{x}\\ \mathbf{if}\;x \leq 1.75 \cdot 10^{-275}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;x \leq 2.2 \cdot 10^{-139}:\\ \;\;\;\;t\_0 + \frac{0.083333333333333 + z \cdot \left(z \cdot 0.0007936500793651 - 0.0027777777777778\right)}{x}\\ \mathbf{elif}\;x \leq 1.45 \cdot 10^{+59}:\\ \;\;\;\;t\_1\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(\log x + -1\right)\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (let* ((t_0 (+ 0.91893853320467 (* (log x) -0.5)))
        (t_1
         (+
          t_0
          (/ (+ 0.083333333333333 (* z (- (* z y) 0.0027777777777778))) x))))
   (if (<= x 1.75e-275)
     t_1
     (if (<= x 2.2e-139)
       (+
        t_0
        (/
         (+
          0.083333333333333
          (* z (- (* z 0.0007936500793651) 0.0027777777777778)))
         x))
       (if (<= x 1.45e+59) t_1 (* x (+ (log x) -1.0)))))))
double code(double x, double y, double z) {
	double t_0 = 0.91893853320467 + (log(x) * -0.5);
	double t_1 = t_0 + ((0.083333333333333 + (z * ((z * y) - 0.0027777777777778))) / x);
	double tmp;
	if (x <= 1.75e-275) {
		tmp = t_1;
	} else if (x <= 2.2e-139) {
		tmp = t_0 + ((0.083333333333333 + (z * ((z * 0.0007936500793651) - 0.0027777777777778))) / x);
	} else if (x <= 1.45e+59) {
		tmp = t_1;
	} else {
		tmp = x * (log(x) + -1.0);
	}
	return tmp;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8) :: t_0
    real(8) :: t_1
    real(8) :: tmp
    t_0 = 0.91893853320467d0 + (log(x) * (-0.5d0))
    t_1 = t_0 + ((0.083333333333333d0 + (z * ((z * y) - 0.0027777777777778d0))) / x)
    if (x <= 1.75d-275) then
        tmp = t_1
    else if (x <= 2.2d-139) then
        tmp = t_0 + ((0.083333333333333d0 + (z * ((z * 0.0007936500793651d0) - 0.0027777777777778d0))) / x)
    else if (x <= 1.45d+59) then
        tmp = t_1
    else
        tmp = x * (log(x) + (-1.0d0))
    end if
    code = tmp
end function
public static double code(double x, double y, double z) {
	double t_0 = 0.91893853320467 + (Math.log(x) * -0.5);
	double t_1 = t_0 + ((0.083333333333333 + (z * ((z * y) - 0.0027777777777778))) / x);
	double tmp;
	if (x <= 1.75e-275) {
		tmp = t_1;
	} else if (x <= 2.2e-139) {
		tmp = t_0 + ((0.083333333333333 + (z * ((z * 0.0007936500793651) - 0.0027777777777778))) / x);
	} else if (x <= 1.45e+59) {
		tmp = t_1;
	} else {
		tmp = x * (Math.log(x) + -1.0);
	}
	return tmp;
}
def code(x, y, z):
	t_0 = 0.91893853320467 + (math.log(x) * -0.5)
	t_1 = t_0 + ((0.083333333333333 + (z * ((z * y) - 0.0027777777777778))) / x)
	tmp = 0
	if x <= 1.75e-275:
		tmp = t_1
	elif x <= 2.2e-139:
		tmp = t_0 + ((0.083333333333333 + (z * ((z * 0.0007936500793651) - 0.0027777777777778))) / x)
	elif x <= 1.45e+59:
		tmp = t_1
	else:
		tmp = x * (math.log(x) + -1.0)
	return tmp
function code(x, y, z)
	t_0 = Float64(0.91893853320467 + Float64(log(x) * -0.5))
	t_1 = Float64(t_0 + Float64(Float64(0.083333333333333 + Float64(z * Float64(Float64(z * y) - 0.0027777777777778))) / x))
	tmp = 0.0
	if (x <= 1.75e-275)
		tmp = t_1;
	elseif (x <= 2.2e-139)
		tmp = Float64(t_0 + Float64(Float64(0.083333333333333 + Float64(z * Float64(Float64(z * 0.0007936500793651) - 0.0027777777777778))) / x));
	elseif (x <= 1.45e+59)
		tmp = t_1;
	else
		tmp = Float64(x * Float64(log(x) + -1.0));
	end
	return tmp
end
function tmp_2 = code(x, y, z)
	t_0 = 0.91893853320467 + (log(x) * -0.5);
	t_1 = t_0 + ((0.083333333333333 + (z * ((z * y) - 0.0027777777777778))) / x);
	tmp = 0.0;
	if (x <= 1.75e-275)
		tmp = t_1;
	elseif (x <= 2.2e-139)
		tmp = t_0 + ((0.083333333333333 + (z * ((z * 0.0007936500793651) - 0.0027777777777778))) / x);
	elseif (x <= 1.45e+59)
		tmp = t_1;
	else
		tmp = x * (log(x) + -1.0);
	end
	tmp_2 = tmp;
end
code[x_, y_, z_] := Block[{t$95$0 = N[(0.91893853320467 + N[(N[Log[x], $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 + N[(N[(0.083333333333333 + N[(z * N[(N[(z * y), $MachinePrecision] - 0.0027777777777778), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, 1.75e-275], t$95$1, If[LessEqual[x, 2.2e-139], N[(t$95$0 + N[(N[(0.083333333333333 + N[(z * N[(N[(z * 0.0007936500793651), $MachinePrecision] - 0.0027777777777778), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision], If[LessEqual[x, 1.45e+59], t$95$1, N[(x * N[(N[Log[x], $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]]]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := 0.91893853320467 + \log x \cdot -0.5\\
t_1 := t\_0 + \frac{0.083333333333333 + z \cdot \left(z \cdot y - 0.0027777777777778\right)}{x}\\
\mathbf{if}\;x \leq 1.75 \cdot 10^{-275}:\\
\;\;\;\;t\_1\\

\mathbf{elif}\;x \leq 2.2 \cdot 10^{-139}:\\
\;\;\;\;t\_0 + \frac{0.083333333333333 + z \cdot \left(z \cdot 0.0007936500793651 - 0.0027777777777778\right)}{x}\\

\mathbf{elif}\;x \leq 1.45 \cdot 10^{+59}:\\
\;\;\;\;t\_1\\

\mathbf{else}:\\
\;\;\;\;x \cdot \left(\log x + -1\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < 1.74999999999999984e-275 or 2.2000000000000001e-139 < x < 1.44999999999999995e59

    1. Initial program 98.8%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-cbrt-cube98.8%

        \[\leadsto \left(\left(\color{blue}{\sqrt[3]{\left(\left(\left(x - 0.5\right) \cdot \log x\right) \cdot \left(\left(x - 0.5\right) \cdot \log x\right)\right) \cdot \left(\left(x - 0.5\right) \cdot \log x\right)}} - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      2. pow398.8%

        \[\leadsto \left(\left(\sqrt[3]{\color{blue}{{\left(\left(x - 0.5\right) \cdot \log x\right)}^{3}}} - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      3. sub-neg98.8%

        \[\leadsto \left(\left(\sqrt[3]{{\left(\color{blue}{\left(x + \left(-0.5\right)\right)} \cdot \log x\right)}^{3}} - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      4. metadata-eval98.8%

        \[\leadsto \left(\left(\sqrt[3]{{\left(\left(x + \color{blue}{-0.5}\right) \cdot \log x\right)}^{3}} - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    4. Applied egg-rr98.8%

      \[\leadsto \left(\left(\color{blue}{\sqrt[3]{{\left(\left(x + -0.5\right) \cdot \log x\right)}^{3}}} - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    5. Taylor expanded in x around 0 93.4%

      \[\leadsto \color{blue}{\left(0.91893853320467 + -0.5 \cdot \log x\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    6. Taylor expanded in y around inf 78.5%

      \[\leadsto \left(0.91893853320467 + -0.5 \cdot \log x\right) + \frac{\left(\color{blue}{y \cdot z} - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    7. Step-by-step derivation
      1. *-commutative78.5%

        \[\leadsto \left(0.91893853320467 + -0.5 \cdot \log x\right) + \frac{\left(\color{blue}{z \cdot y} - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    8. Simplified78.5%

      \[\leadsto \left(0.91893853320467 + -0.5 \cdot \log x\right) + \frac{\left(\color{blue}{z \cdot y} - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]

    if 1.74999999999999984e-275 < x < 2.2000000000000001e-139

    1. Initial program 99.7%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-cbrt-cube99.7%

        \[\leadsto \left(\left(\color{blue}{\sqrt[3]{\left(\left(\left(x - 0.5\right) \cdot \log x\right) \cdot \left(\left(x - 0.5\right) \cdot \log x\right)\right) \cdot \left(\left(x - 0.5\right) \cdot \log x\right)}} - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      2. pow399.7%

        \[\leadsto \left(\left(\sqrt[3]{\color{blue}{{\left(\left(x - 0.5\right) \cdot \log x\right)}^{3}}} - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      3. sub-neg99.7%

        \[\leadsto \left(\left(\sqrt[3]{{\left(\color{blue}{\left(x + \left(-0.5\right)\right)} \cdot \log x\right)}^{3}} - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      4. metadata-eval99.7%

        \[\leadsto \left(\left(\sqrt[3]{{\left(\left(x + \color{blue}{-0.5}\right) \cdot \log x\right)}^{3}} - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    4. Applied egg-rr99.7%

      \[\leadsto \left(\left(\color{blue}{\sqrt[3]{{\left(\left(x + -0.5\right) \cdot \log x\right)}^{3}}} - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    5. Taylor expanded in x around 0 99.7%

      \[\leadsto \color{blue}{\left(0.91893853320467 + -0.5 \cdot \log x\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    6. Taylor expanded in y around 0 96.0%

      \[\leadsto \left(0.91893853320467 + -0.5 \cdot \log x\right) + \frac{\left(\color{blue}{0.0007936500793651 \cdot z} - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    7. Step-by-step derivation
      1. *-commutative96.0%

        \[\leadsto \left(0.91893853320467 + -0.5 \cdot \log x\right) + \frac{\left(\color{blue}{z \cdot 0.0007936500793651} - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    8. Simplified96.0%

      \[\leadsto \left(0.91893853320467 + -0.5 \cdot \log x\right) + \frac{\left(\color{blue}{z \cdot 0.0007936500793651} - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]

    if 1.44999999999999995e59 < x

    1. Initial program 89.8%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Step-by-step derivation
      1. sub-neg89.8%

        \[\leadsto \left(\color{blue}{\left(\left(x - 0.5\right) \cdot \log x + \left(-x\right)\right)} + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      2. associate-+l+89.8%

        \[\leadsto \color{blue}{\left(\left(x - 0.5\right) \cdot \log x + \left(\left(-x\right) + 0.91893853320467\right)\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      3. fma-define89.9%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x - 0.5, \log x, \left(-x\right) + 0.91893853320467\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      4. sub-neg89.9%

        \[\leadsto \mathsf{fma}\left(\color{blue}{x + \left(-0.5\right)}, \log x, \left(-x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      5. metadata-eval89.9%

        \[\leadsto \mathsf{fma}\left(x + \color{blue}{-0.5}, \log x, \left(-x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      6. +-commutative89.9%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, \color{blue}{0.91893853320467 + \left(-x\right)}\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      7. unsub-neg89.9%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, \color{blue}{0.91893853320467 - x}\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      8. *-commutative89.9%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\color{blue}{z \cdot \left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right)} + 0.083333333333333}{x} \]
      9. fma-define89.9%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\color{blue}{\mathsf{fma}\left(z, \left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778, 0.083333333333333\right)}}{x} \]
      10. fma-neg89.9%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\mathsf{fma}\left(z, \color{blue}{\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right)}, 0.083333333333333\right)}{x} \]
      11. metadata-eval89.9%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\mathsf{fma}\left(z, \mathsf{fma}\left(y + 0.0007936500793651, z, \color{blue}{-0.0027777777777778}\right), 0.083333333333333\right)}{x} \]
    3. Simplified89.9%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\mathsf{fma}\left(z, \mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right), 0.083333333333333\right)}{x}} \]
    4. Add Preprocessing
    5. Taylor expanded in z around 0 81.0%

      \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\color{blue}{0.083333333333333}}{x} \]
    6. Taylor expanded in x around inf 81.0%

      \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right) - 1\right)} \]
    7. Step-by-step derivation
      1. sub-neg81.0%

        \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(-1\right)\right)} \]
      2. mul-1-neg81.0%

        \[\leadsto x \cdot \left(\color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} + \left(-1\right)\right) \]
      3. log-rec81.0%

        \[\leadsto x \cdot \left(\left(-\color{blue}{\left(-\log x\right)}\right) + \left(-1\right)\right) \]
      4. remove-double-neg81.0%

        \[\leadsto x \cdot \left(\color{blue}{\log x} + \left(-1\right)\right) \]
      5. metadata-eval81.0%

        \[\leadsto x \cdot \left(\log x + \color{blue}{-1}\right) \]
    8. Simplified81.0%

      \[\leadsto \color{blue}{x \cdot \left(\log x + -1\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification83.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 1.75 \cdot 10^{-275}:\\ \;\;\;\;\left(0.91893853320467 + \log x \cdot -0.5\right) + \frac{0.083333333333333 + z \cdot \left(z \cdot y - 0.0027777777777778\right)}{x}\\ \mathbf{elif}\;x \leq 2.2 \cdot 10^{-139}:\\ \;\;\;\;\left(0.91893853320467 + \log x \cdot -0.5\right) + \frac{0.083333333333333 + z \cdot \left(z \cdot 0.0007936500793651 - 0.0027777777777778\right)}{x}\\ \mathbf{elif}\;x \leq 1.45 \cdot 10^{+59}:\\ \;\;\;\;\left(0.91893853320467 + \log x \cdot -0.5\right) + \frac{0.083333333333333 + z \cdot \left(z \cdot y - 0.0027777777777778\right)}{x}\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(\log x + -1\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 93.5% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := x \cdot \left(\log x + -1\right)\\ \mathbf{if}\;x \leq 4.5 \cdot 10^{+188}:\\ \;\;\;\;t\_0 + \frac{0.083333333333333 + z \cdot \left(z \cdot \left(0.0007936500793651 + y\right) - 0.0027777777777778\right)}{x}\\ \mathbf{else}:\\ \;\;\;\;t\_0\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (let* ((t_0 (* x (+ (log x) -1.0))))
   (if (<= x 4.5e+188)
     (+
      t_0
      (/
       (+
        0.083333333333333
        (* z (- (* z (+ 0.0007936500793651 y)) 0.0027777777777778)))
       x))
     t_0)))
double code(double x, double y, double z) {
	double t_0 = x * (log(x) + -1.0);
	double tmp;
	if (x <= 4.5e+188) {
		tmp = t_0 + ((0.083333333333333 + (z * ((z * (0.0007936500793651 + y)) - 0.0027777777777778))) / x);
	} else {
		tmp = t_0;
	}
	return tmp;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8) :: t_0
    real(8) :: tmp
    t_0 = x * (log(x) + (-1.0d0))
    if (x <= 4.5d+188) then
        tmp = t_0 + ((0.083333333333333d0 + (z * ((z * (0.0007936500793651d0 + y)) - 0.0027777777777778d0))) / x)
    else
        tmp = t_0
    end if
    code = tmp
end function
public static double code(double x, double y, double z) {
	double t_0 = x * (Math.log(x) + -1.0);
	double tmp;
	if (x <= 4.5e+188) {
		tmp = t_0 + ((0.083333333333333 + (z * ((z * (0.0007936500793651 + y)) - 0.0027777777777778))) / x);
	} else {
		tmp = t_0;
	}
	return tmp;
}
def code(x, y, z):
	t_0 = x * (math.log(x) + -1.0)
	tmp = 0
	if x <= 4.5e+188:
		tmp = t_0 + ((0.083333333333333 + (z * ((z * (0.0007936500793651 + y)) - 0.0027777777777778))) / x)
	else:
		tmp = t_0
	return tmp
function code(x, y, z)
	t_0 = Float64(x * Float64(log(x) + -1.0))
	tmp = 0.0
	if (x <= 4.5e+188)
		tmp = Float64(t_0 + Float64(Float64(0.083333333333333 + Float64(z * Float64(Float64(z * Float64(0.0007936500793651 + y)) - 0.0027777777777778))) / x));
	else
		tmp = t_0;
	end
	return tmp
end
function tmp_2 = code(x, y, z)
	t_0 = x * (log(x) + -1.0);
	tmp = 0.0;
	if (x <= 4.5e+188)
		tmp = t_0 + ((0.083333333333333 + (z * ((z * (0.0007936500793651 + y)) - 0.0027777777777778))) / x);
	else
		tmp = t_0;
	end
	tmp_2 = tmp;
end
code[x_, y_, z_] := Block[{t$95$0 = N[(x * N[(N[Log[x], $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, 4.5e+188], N[(t$95$0 + N[(N[(0.083333333333333 + N[(z * N[(N[(z * N[(0.0007936500793651 + y), $MachinePrecision]), $MachinePrecision] - 0.0027777777777778), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision], t$95$0]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := x \cdot \left(\log x + -1\right)\\
\mathbf{if}\;x \leq 4.5 \cdot 10^{+188}:\\
\;\;\;\;t\_0 + \frac{0.083333333333333 + z \cdot \left(z \cdot \left(0.0007936500793651 + y\right) - 0.0027777777777778\right)}{x}\\

\mathbf{else}:\\
\;\;\;\;t\_0\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 4.5000000000000001e188

    1. Initial program 98.8%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf 98.7%

      \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right) - 1\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    4. Step-by-step derivation
      1. sub-neg98.7%

        \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(-1\right)\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
      2. mul-1-neg98.7%

        \[\leadsto x \cdot \left(\color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
      3. log-rec98.7%

        \[\leadsto x \cdot \left(\left(-\color{blue}{\left(-\log x\right)}\right) + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
      4. remove-double-neg98.7%

        \[\leadsto x \cdot \left(\color{blue}{\log x} + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
      5. metadata-eval98.7%

        \[\leadsto x \cdot \left(\log x + \color{blue}{-1}\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
      6. +-commutative98.7%

        \[\leadsto x \cdot \color{blue}{\left(-1 + \log x\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
    5. Simplified98.7%

      \[\leadsto \color{blue}{x \cdot \left(-1 + \log x\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]

    if 4.5000000000000001e188 < x

    1. Initial program 80.3%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Step-by-step derivation
      1. sub-neg80.3%

        \[\leadsto \left(\color{blue}{\left(\left(x - 0.5\right) \cdot \log x + \left(-x\right)\right)} + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      2. associate-+l+80.3%

        \[\leadsto \color{blue}{\left(\left(x - 0.5\right) \cdot \log x + \left(\left(-x\right) + 0.91893853320467\right)\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      3. fma-define80.4%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x - 0.5, \log x, \left(-x\right) + 0.91893853320467\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      4. sub-neg80.4%

        \[\leadsto \mathsf{fma}\left(\color{blue}{x + \left(-0.5\right)}, \log x, \left(-x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      5. metadata-eval80.4%

        \[\leadsto \mathsf{fma}\left(x + \color{blue}{-0.5}, \log x, \left(-x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      6. +-commutative80.4%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, \color{blue}{0.91893853320467 + \left(-x\right)}\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      7. unsub-neg80.4%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, \color{blue}{0.91893853320467 - x}\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      8. *-commutative80.4%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\color{blue}{z \cdot \left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right)} + 0.083333333333333}{x} \]
      9. fma-define80.4%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\color{blue}{\mathsf{fma}\left(z, \left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778, 0.083333333333333\right)}}{x} \]
      10. fma-neg80.4%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\mathsf{fma}\left(z, \color{blue}{\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right)}, 0.083333333333333\right)}{x} \]
      11. metadata-eval80.4%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\mathsf{fma}\left(z, \mathsf{fma}\left(y + 0.0007936500793651, z, \color{blue}{-0.0027777777777778}\right), 0.083333333333333\right)}{x} \]
    3. Simplified80.4%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\mathsf{fma}\left(z, \mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right), 0.083333333333333\right)}{x}} \]
    4. Add Preprocessing
    5. Taylor expanded in z around 0 90.3%

      \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\color{blue}{0.083333333333333}}{x} \]
    6. Taylor expanded in x around inf 90.3%

      \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right) - 1\right)} \]
    7. Step-by-step derivation
      1. sub-neg90.3%

        \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(-1\right)\right)} \]
      2. mul-1-neg90.3%

        \[\leadsto x \cdot \left(\color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} + \left(-1\right)\right) \]
      3. log-rec90.3%

        \[\leadsto x \cdot \left(\left(-\color{blue}{\left(-\log x\right)}\right) + \left(-1\right)\right) \]
      4. remove-double-neg90.3%

        \[\leadsto x \cdot \left(\color{blue}{\log x} + \left(-1\right)\right) \]
      5. metadata-eval90.3%

        \[\leadsto x \cdot \left(\log x + \color{blue}{-1}\right) \]
    8. Simplified90.3%

      \[\leadsto \color{blue}{x \cdot \left(\log x + -1\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification97.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 4.5 \cdot 10^{+188}:\\ \;\;\;\;x \cdot \left(\log x + -1\right) + \frac{0.083333333333333 + z \cdot \left(z \cdot \left(0.0007936500793651 + y\right) - 0.0027777777777778\right)}{x}\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(\log x + -1\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 4: 71.5% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 2.9 \cdot 10^{+60}:\\ \;\;\;\;\left(0.91893853320467 + \log x \cdot -0.5\right) + \frac{0.083333333333333 + z \cdot \left(z \cdot 0.0007936500793651 - 0.0027777777777778\right)}{x}\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(\log x + -1\right)\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (if (<= x 2.9e+60)
   (+
    (+ 0.91893853320467 (* (log x) -0.5))
    (/
     (+
      0.083333333333333
      (* z (- (* z 0.0007936500793651) 0.0027777777777778)))
     x))
   (* x (+ (log x) -1.0))))
double code(double x, double y, double z) {
	double tmp;
	if (x <= 2.9e+60) {
		tmp = (0.91893853320467 + (log(x) * -0.5)) + ((0.083333333333333 + (z * ((z * 0.0007936500793651) - 0.0027777777777778))) / x);
	} else {
		tmp = x * (log(x) + -1.0);
	}
	return tmp;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8) :: tmp
    if (x <= 2.9d+60) then
        tmp = (0.91893853320467d0 + (log(x) * (-0.5d0))) + ((0.083333333333333d0 + (z * ((z * 0.0007936500793651d0) - 0.0027777777777778d0))) / x)
    else
        tmp = x * (log(x) + (-1.0d0))
    end if
    code = tmp
end function
public static double code(double x, double y, double z) {
	double tmp;
	if (x <= 2.9e+60) {
		tmp = (0.91893853320467 + (Math.log(x) * -0.5)) + ((0.083333333333333 + (z * ((z * 0.0007936500793651) - 0.0027777777777778))) / x);
	} else {
		tmp = x * (Math.log(x) + -1.0);
	}
	return tmp;
}
def code(x, y, z):
	tmp = 0
	if x <= 2.9e+60:
		tmp = (0.91893853320467 + (math.log(x) * -0.5)) + ((0.083333333333333 + (z * ((z * 0.0007936500793651) - 0.0027777777777778))) / x)
	else:
		tmp = x * (math.log(x) + -1.0)
	return tmp
function code(x, y, z)
	tmp = 0.0
	if (x <= 2.9e+60)
		tmp = Float64(Float64(0.91893853320467 + Float64(log(x) * -0.5)) + Float64(Float64(0.083333333333333 + Float64(z * Float64(Float64(z * 0.0007936500793651) - 0.0027777777777778))) / x));
	else
		tmp = Float64(x * Float64(log(x) + -1.0));
	end
	return tmp
end
function tmp_2 = code(x, y, z)
	tmp = 0.0;
	if (x <= 2.9e+60)
		tmp = (0.91893853320467 + (log(x) * -0.5)) + ((0.083333333333333 + (z * ((z * 0.0007936500793651) - 0.0027777777777778))) / x);
	else
		tmp = x * (log(x) + -1.0);
	end
	tmp_2 = tmp;
end
code[x_, y_, z_] := If[LessEqual[x, 2.9e+60], N[(N[(0.91893853320467 + N[(N[Log[x], $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision] + N[(N[(0.083333333333333 + N[(z * N[(N[(z * 0.0007936500793651), $MachinePrecision] - 0.0027777777777778), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision], N[(x * N[(N[Log[x], $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 2.9 \cdot 10^{+60}:\\
\;\;\;\;\left(0.91893853320467 + \log x \cdot -0.5\right) + \frac{0.083333333333333 + z \cdot \left(z \cdot 0.0007936500793651 - 0.0027777777777778\right)}{x}\\

\mathbf{else}:\\
\;\;\;\;x \cdot \left(\log x + -1\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 2.9e60

    1. Initial program 99.1%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-cbrt-cube99.1%

        \[\leadsto \left(\left(\color{blue}{\sqrt[3]{\left(\left(\left(x - 0.5\right) \cdot \log x\right) \cdot \left(\left(x - 0.5\right) \cdot \log x\right)\right) \cdot \left(\left(x - 0.5\right) \cdot \log x\right)}} - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      2. pow399.1%

        \[\leadsto \left(\left(\sqrt[3]{\color{blue}{{\left(\left(x - 0.5\right) \cdot \log x\right)}^{3}}} - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      3. sub-neg99.1%

        \[\leadsto \left(\left(\sqrt[3]{{\left(\color{blue}{\left(x + \left(-0.5\right)\right)} \cdot \log x\right)}^{3}} - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      4. metadata-eval99.1%

        \[\leadsto \left(\left(\sqrt[3]{{\left(\left(x + \color{blue}{-0.5}\right) \cdot \log x\right)}^{3}} - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    4. Applied egg-rr99.1%

      \[\leadsto \left(\left(\color{blue}{\sqrt[3]{{\left(\left(x + -0.5\right) \cdot \log x\right)}^{3}}} - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    5. Taylor expanded in x around 0 95.7%

      \[\leadsto \color{blue}{\left(0.91893853320467 + -0.5 \cdot \log x\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    6. Taylor expanded in y around 0 73.5%

      \[\leadsto \left(0.91893853320467 + -0.5 \cdot \log x\right) + \frac{\left(\color{blue}{0.0007936500793651 \cdot z} - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    7. Step-by-step derivation
      1. *-commutative73.5%

        \[\leadsto \left(0.91893853320467 + -0.5 \cdot \log x\right) + \frac{\left(\color{blue}{z \cdot 0.0007936500793651} - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    8. Simplified73.5%

      \[\leadsto \left(0.91893853320467 + -0.5 \cdot \log x\right) + \frac{\left(\color{blue}{z \cdot 0.0007936500793651} - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]

    if 2.9e60 < x

    1. Initial program 89.7%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Step-by-step derivation
      1. sub-neg89.7%

        \[\leadsto \left(\color{blue}{\left(\left(x - 0.5\right) \cdot \log x + \left(-x\right)\right)} + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      2. associate-+l+89.7%

        \[\leadsto \color{blue}{\left(\left(x - 0.5\right) \cdot \log x + \left(\left(-x\right) + 0.91893853320467\right)\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      3. fma-define89.8%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x - 0.5, \log x, \left(-x\right) + 0.91893853320467\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      4. sub-neg89.8%

        \[\leadsto \mathsf{fma}\left(\color{blue}{x + \left(-0.5\right)}, \log x, \left(-x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      5. metadata-eval89.8%

        \[\leadsto \mathsf{fma}\left(x + \color{blue}{-0.5}, \log x, \left(-x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      6. +-commutative89.8%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, \color{blue}{0.91893853320467 + \left(-x\right)}\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      7. unsub-neg89.8%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, \color{blue}{0.91893853320467 - x}\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      8. *-commutative89.8%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\color{blue}{z \cdot \left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right)} + 0.083333333333333}{x} \]
      9. fma-define89.8%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\color{blue}{\mathsf{fma}\left(z, \left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778, 0.083333333333333\right)}}{x} \]
      10. fma-neg89.8%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\mathsf{fma}\left(z, \color{blue}{\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right)}, 0.083333333333333\right)}{x} \]
      11. metadata-eval89.8%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\mathsf{fma}\left(z, \mathsf{fma}\left(y + 0.0007936500793651, z, \color{blue}{-0.0027777777777778}\right), 0.083333333333333\right)}{x} \]
    3. Simplified89.8%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\mathsf{fma}\left(z, \mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right), 0.083333333333333\right)}{x}} \]
    4. Add Preprocessing
    5. Taylor expanded in z around 0 81.6%

      \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\color{blue}{0.083333333333333}}{x} \]
    6. Taylor expanded in x around inf 81.6%

      \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right) - 1\right)} \]
    7. Step-by-step derivation
      1. sub-neg81.6%

        \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(-1\right)\right)} \]
      2. mul-1-neg81.6%

        \[\leadsto x \cdot \left(\color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} + \left(-1\right)\right) \]
      3. log-rec81.6%

        \[\leadsto x \cdot \left(\left(-\color{blue}{\left(-\log x\right)}\right) + \left(-1\right)\right) \]
      4. remove-double-neg81.6%

        \[\leadsto x \cdot \left(\color{blue}{\log x} + \left(-1\right)\right) \]
      5. metadata-eval81.6%

        \[\leadsto x \cdot \left(\log x + \color{blue}{-1}\right) \]
    8. Simplified81.6%

      \[\leadsto \color{blue}{x \cdot \left(\log x + -1\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification77.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 2.9 \cdot 10^{+60}:\\ \;\;\;\;\left(0.91893853320467 + \log x \cdot -0.5\right) + \frac{0.083333333333333 + z \cdot \left(z \cdot 0.0007936500793651 - 0.0027777777777778\right)}{x}\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(\log x + -1\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 5: 97.3% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) + x \cdot \left(\log x + -1\right) \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+
  (+
   (* z (* (/ z x) (+ 0.0007936500793651 y)))
   (/ 1.0 (* x 12.000000000000048)))
  (* x (+ (log x) -1.0))))
double code(double x, double y, double z) {
	return ((z * ((z / x) * (0.0007936500793651 + y))) + (1.0 / (x * 12.000000000000048))) + (x * (log(x) + -1.0));
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = ((z * ((z / x) * (0.0007936500793651d0 + y))) + (1.0d0 / (x * 12.000000000000048d0))) + (x * (log(x) + (-1.0d0)))
end function
public static double code(double x, double y, double z) {
	return ((z * ((z / x) * (0.0007936500793651 + y))) + (1.0 / (x * 12.000000000000048))) + (x * (Math.log(x) + -1.0));
}
def code(x, y, z):
	return ((z * ((z / x) * (0.0007936500793651 + y))) + (1.0 / (x * 12.000000000000048))) + (x * (math.log(x) + -1.0))
function code(x, y, z)
	return Float64(Float64(Float64(z * Float64(Float64(z / x) * Float64(0.0007936500793651 + y))) + Float64(1.0 / Float64(x * 12.000000000000048))) + Float64(x * Float64(log(x) + -1.0)))
end
function tmp = code(x, y, z)
	tmp = ((z * ((z / x) * (0.0007936500793651 + y))) + (1.0 / (x * 12.000000000000048))) + (x * (log(x) + -1.0));
end
code[x_, y_, z_] := N[(N[(N[(z * N[(N[(z / x), $MachinePrecision] * N[(0.0007936500793651 + y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(x * 12.000000000000048), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(x * N[(N[Log[x], $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) + x \cdot \left(\log x + -1\right)
\end{array}
Derivation
  1. Initial program 94.9%

    \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
  2. Add Preprocessing
  3. Taylor expanded in z around 0 95.5%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\left(z \cdot \left(z \cdot \left(0.0007936500793651 \cdot \frac{1}{x} + \frac{y}{x}\right) - 0.0027777777777778 \cdot \frac{1}{x}\right) + 0.083333333333333 \cdot \frac{1}{x}\right)} \]
  4. Taylor expanded in z around inf 92.1%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\color{blue}{{z}^{2} \cdot \left(0.0007936500793651 \cdot \frac{1}{x} + \frac{y}{x}\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  5. Step-by-step derivation
    1. unpow292.1%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\color{blue}{\left(z \cdot z\right)} \cdot \left(0.0007936500793651 \cdot \frac{1}{x} + \frac{y}{x}\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    2. associate-*l*95.1%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\color{blue}{z \cdot \left(z \cdot \left(0.0007936500793651 \cdot \frac{1}{x} + \frac{y}{x}\right)\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    3. associate-*r/95.1%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(z \cdot \left(\color{blue}{\frac{0.0007936500793651 \cdot 1}{x}} + \frac{y}{x}\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    4. metadata-eval95.1%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(z \cdot \left(\frac{\color{blue}{0.0007936500793651}}{x} + \frac{y}{x}\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    5. distribute-rgt-out91.6%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \color{blue}{\left(\frac{0.0007936500793651}{x} \cdot z + \frac{y}{x} \cdot z\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    6. associate-*l/91.6%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\color{blue}{\frac{0.0007936500793651 \cdot z}{x}} + \frac{y}{x} \cdot z\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    7. associate-*r/91.6%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\color{blue}{0.0007936500793651 \cdot \frac{z}{x}} + \frac{y}{x} \cdot z\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    8. associate-*l/95.0%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(0.0007936500793651 \cdot \frac{z}{x} + \color{blue}{\frac{y \cdot z}{x}}\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    9. associate-/l*91.1%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(0.0007936500793651 \cdot \frac{z}{x} + \color{blue}{y \cdot \frac{z}{x}}\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    10. distribute-rgt-out98.9%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \color{blue}{\left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  6. Simplified98.9%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\color{blue}{z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  7. Step-by-step derivation
    1. div-inv98.9%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \color{blue}{\frac{0.083333333333333}{x}}\right) \]
    2. clear-num98.9%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \color{blue}{\frac{1}{\frac{x}{0.083333333333333}}}\right) \]
    3. div-inv98.9%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{\color{blue}{x \cdot \frac{1}{0.083333333333333}}}\right) \]
    4. metadata-eval98.9%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot \color{blue}{12.000000000000048}}\right) \]
  8. Applied egg-rr98.9%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \color{blue}{\frac{1}{x \cdot 12.000000000000048}}\right) \]
  9. Taylor expanded in x around inf 98.9%

    \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right) - 1\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
  10. Step-by-step derivation
    1. sub-neg98.9%

      \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(-1\right)\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
    2. mul-1-neg98.9%

      \[\leadsto x \cdot \left(\color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
    3. log-rec98.9%

      \[\leadsto x \cdot \left(\left(-\color{blue}{\left(-\log x\right)}\right) + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
    4. remove-double-neg98.9%

      \[\leadsto x \cdot \left(\color{blue}{\log x} + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
    5. metadata-eval98.9%

      \[\leadsto x \cdot \left(\log x + \color{blue}{-1}\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
    6. +-commutative98.9%

      \[\leadsto x \cdot \color{blue}{\left(-1 + \log x\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
  11. Simplified98.9%

    \[\leadsto \color{blue}{x \cdot \left(-1 + \log x\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
  12. Final simplification98.9%

    \[\leadsto \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) + x \cdot \left(\log x + -1\right) \]
  13. Add Preprocessing

Alternative 6: 97.3% accurate, 1.0× speedup?

\[\begin{array}{l} \\ x \cdot \left(\log x + -1\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+
  (* x (+ (log x) -1.0))
  (+
   (* z (* (/ z x) (+ 0.0007936500793651 y)))
   (* 0.083333333333333 (/ 1.0 x)))))
double code(double x, double y, double z) {
	return (x * (log(x) + -1.0)) + ((z * ((z / x) * (0.0007936500793651 + y))) + (0.083333333333333 * (1.0 / x)));
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = (x * (log(x) + (-1.0d0))) + ((z * ((z / x) * (0.0007936500793651d0 + y))) + (0.083333333333333d0 * (1.0d0 / x)))
end function
public static double code(double x, double y, double z) {
	return (x * (Math.log(x) + -1.0)) + ((z * ((z / x) * (0.0007936500793651 + y))) + (0.083333333333333 * (1.0 / x)));
}
def code(x, y, z):
	return (x * (math.log(x) + -1.0)) + ((z * ((z / x) * (0.0007936500793651 + y))) + (0.083333333333333 * (1.0 / x)))
function code(x, y, z)
	return Float64(Float64(x * Float64(log(x) + -1.0)) + Float64(Float64(z * Float64(Float64(z / x) * Float64(0.0007936500793651 + y))) + Float64(0.083333333333333 * Float64(1.0 / x))))
end
function tmp = code(x, y, z)
	tmp = (x * (log(x) + -1.0)) + ((z * ((z / x) * (0.0007936500793651 + y))) + (0.083333333333333 * (1.0 / x)));
end
code[x_, y_, z_] := N[(N[(x * N[(N[Log[x], $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision] + N[(N[(z * N[(N[(z / x), $MachinePrecision] * N[(0.0007936500793651 + y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.083333333333333 * N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
x \cdot \left(\log x + -1\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right)
\end{array}
Derivation
  1. Initial program 94.9%

    \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
  2. Add Preprocessing
  3. Taylor expanded in z around 0 95.5%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\left(z \cdot \left(z \cdot \left(0.0007936500793651 \cdot \frac{1}{x} + \frac{y}{x}\right) - 0.0027777777777778 \cdot \frac{1}{x}\right) + 0.083333333333333 \cdot \frac{1}{x}\right)} \]
  4. Taylor expanded in z around inf 92.1%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\color{blue}{{z}^{2} \cdot \left(0.0007936500793651 \cdot \frac{1}{x} + \frac{y}{x}\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  5. Step-by-step derivation
    1. unpow292.1%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\color{blue}{\left(z \cdot z\right)} \cdot \left(0.0007936500793651 \cdot \frac{1}{x} + \frac{y}{x}\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    2. associate-*l*95.1%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\color{blue}{z \cdot \left(z \cdot \left(0.0007936500793651 \cdot \frac{1}{x} + \frac{y}{x}\right)\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    3. associate-*r/95.1%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(z \cdot \left(\color{blue}{\frac{0.0007936500793651 \cdot 1}{x}} + \frac{y}{x}\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    4. metadata-eval95.1%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(z \cdot \left(\frac{\color{blue}{0.0007936500793651}}{x} + \frac{y}{x}\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    5. distribute-rgt-out91.6%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \color{blue}{\left(\frac{0.0007936500793651}{x} \cdot z + \frac{y}{x} \cdot z\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    6. associate-*l/91.6%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\color{blue}{\frac{0.0007936500793651 \cdot z}{x}} + \frac{y}{x} \cdot z\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    7. associate-*r/91.6%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\color{blue}{0.0007936500793651 \cdot \frac{z}{x}} + \frac{y}{x} \cdot z\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    8. associate-*l/95.0%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(0.0007936500793651 \cdot \frac{z}{x} + \color{blue}{\frac{y \cdot z}{x}}\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    9. associate-/l*91.1%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(0.0007936500793651 \cdot \frac{z}{x} + \color{blue}{y \cdot \frac{z}{x}}\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    10. distribute-rgt-out98.9%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \color{blue}{\left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  6. Simplified98.9%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\color{blue}{z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  7. Taylor expanded in x around inf 98.9%

    \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right) - 1\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  8. Step-by-step derivation
    1. sub-neg98.9%

      \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(-1\right)\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
    2. mul-1-neg98.9%

      \[\leadsto x \cdot \left(\color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
    3. log-rec98.9%

      \[\leadsto x \cdot \left(\left(-\color{blue}{\left(-\log x\right)}\right) + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
    4. remove-double-neg98.9%

      \[\leadsto x \cdot \left(\color{blue}{\log x} + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
    5. metadata-eval98.9%

      \[\leadsto x \cdot \left(\log x + \color{blue}{-1}\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
    6. +-commutative98.9%

      \[\leadsto x \cdot \color{blue}{\left(-1 + \log x\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
  9. Simplified98.9%

    \[\leadsto \color{blue}{x \cdot \left(-1 + \log x\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  10. Final simplification98.9%

    \[\leadsto x \cdot \left(\log x + -1\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  11. Add Preprocessing

Alternative 7: 62.7% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 1360000000:\\ \;\;\;\;\left(0.91893853320467 + \log x \cdot -0.5\right) + \frac{0.083333333333333 + z \cdot -0.0027777777777778}{x}\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(\log x + -1\right)\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (if (<= x 1360000000.0)
   (+
    (+ 0.91893853320467 (* (log x) -0.5))
    (/ (+ 0.083333333333333 (* z -0.0027777777777778)) x))
   (* x (+ (log x) -1.0))))
double code(double x, double y, double z) {
	double tmp;
	if (x <= 1360000000.0) {
		tmp = (0.91893853320467 + (log(x) * -0.5)) + ((0.083333333333333 + (z * -0.0027777777777778)) / x);
	} else {
		tmp = x * (log(x) + -1.0);
	}
	return tmp;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8) :: tmp
    if (x <= 1360000000.0d0) then
        tmp = (0.91893853320467d0 + (log(x) * (-0.5d0))) + ((0.083333333333333d0 + (z * (-0.0027777777777778d0))) / x)
    else
        tmp = x * (log(x) + (-1.0d0))
    end if
    code = tmp
end function
public static double code(double x, double y, double z) {
	double tmp;
	if (x <= 1360000000.0) {
		tmp = (0.91893853320467 + (Math.log(x) * -0.5)) + ((0.083333333333333 + (z * -0.0027777777777778)) / x);
	} else {
		tmp = x * (Math.log(x) + -1.0);
	}
	return tmp;
}
def code(x, y, z):
	tmp = 0
	if x <= 1360000000.0:
		tmp = (0.91893853320467 + (math.log(x) * -0.5)) + ((0.083333333333333 + (z * -0.0027777777777778)) / x)
	else:
		tmp = x * (math.log(x) + -1.0)
	return tmp
function code(x, y, z)
	tmp = 0.0
	if (x <= 1360000000.0)
		tmp = Float64(Float64(0.91893853320467 + Float64(log(x) * -0.5)) + Float64(Float64(0.083333333333333 + Float64(z * -0.0027777777777778)) / x));
	else
		tmp = Float64(x * Float64(log(x) + -1.0));
	end
	return tmp
end
function tmp_2 = code(x, y, z)
	tmp = 0.0;
	if (x <= 1360000000.0)
		tmp = (0.91893853320467 + (log(x) * -0.5)) + ((0.083333333333333 + (z * -0.0027777777777778)) / x);
	else
		tmp = x * (log(x) + -1.0);
	end
	tmp_2 = tmp;
end
code[x_, y_, z_] := If[LessEqual[x, 1360000000.0], N[(N[(0.91893853320467 + N[(N[Log[x], $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision] + N[(N[(0.083333333333333 + N[(z * -0.0027777777777778), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision], N[(x * N[(N[Log[x], $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 1360000000:\\
\;\;\;\;\left(0.91893853320467 + \log x \cdot -0.5\right) + \frac{0.083333333333333 + z \cdot -0.0027777777777778}{x}\\

\mathbf{else}:\\
\;\;\;\;x \cdot \left(\log x + -1\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 1.36e9

    1. Initial program 99.8%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-cbrt-cube99.8%

        \[\leadsto \left(\left(\color{blue}{\sqrt[3]{\left(\left(\left(x - 0.5\right) \cdot \log x\right) \cdot \left(\left(x - 0.5\right) \cdot \log x\right)\right) \cdot \left(\left(x - 0.5\right) \cdot \log x\right)}} - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      2. pow399.8%

        \[\leadsto \left(\left(\sqrt[3]{\color{blue}{{\left(\left(x - 0.5\right) \cdot \log x\right)}^{3}}} - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      3. sub-neg99.8%

        \[\leadsto \left(\left(\sqrt[3]{{\left(\color{blue}{\left(x + \left(-0.5\right)\right)} \cdot \log x\right)}^{3}} - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      4. metadata-eval99.8%

        \[\leadsto \left(\left(\sqrt[3]{{\left(\left(x + \color{blue}{-0.5}\right) \cdot \log x\right)}^{3}} - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    4. Applied egg-rr99.8%

      \[\leadsto \left(\left(\color{blue}{\sqrt[3]{{\left(\left(x + -0.5\right) \cdot \log x\right)}^{3}}} - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    5. Taylor expanded in x around 0 99.8%

      \[\leadsto \color{blue}{\left(0.91893853320467 + -0.5 \cdot \log x\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    6. Taylor expanded in z around 0 49.8%

      \[\leadsto \left(0.91893853320467 + -0.5 \cdot \log x\right) + \frac{\color{blue}{-0.0027777777777778 \cdot z} + 0.083333333333333}{x} \]
    7. Step-by-step derivation
      1. *-commutative49.8%

        \[\leadsto \left(0.91893853320467 + -0.5 \cdot \log x\right) + \frac{\color{blue}{z \cdot -0.0027777777777778} + 0.083333333333333}{x} \]
    8. Simplified49.8%

      \[\leadsto \left(0.91893853320467 + -0.5 \cdot \log x\right) + \frac{\color{blue}{z \cdot -0.0027777777777778} + 0.083333333333333}{x} \]

    if 1.36e9 < x

    1. Initial program 90.4%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Step-by-step derivation
      1. sub-neg90.4%

        \[\leadsto \left(\color{blue}{\left(\left(x - 0.5\right) \cdot \log x + \left(-x\right)\right)} + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      2. associate-+l+90.4%

        \[\leadsto \color{blue}{\left(\left(x - 0.5\right) \cdot \log x + \left(\left(-x\right) + 0.91893853320467\right)\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      3. fma-define90.6%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x - 0.5, \log x, \left(-x\right) + 0.91893853320467\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      4. sub-neg90.6%

        \[\leadsto \mathsf{fma}\left(\color{blue}{x + \left(-0.5\right)}, \log x, \left(-x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      5. metadata-eval90.6%

        \[\leadsto \mathsf{fma}\left(x + \color{blue}{-0.5}, \log x, \left(-x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      6. +-commutative90.6%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, \color{blue}{0.91893853320467 + \left(-x\right)}\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      7. unsub-neg90.6%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, \color{blue}{0.91893853320467 - x}\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
      8. *-commutative90.6%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\color{blue}{z \cdot \left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right)} + 0.083333333333333}{x} \]
      9. fma-define90.6%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\color{blue}{\mathsf{fma}\left(z, \left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778, 0.083333333333333\right)}}{x} \]
      10. fma-neg90.6%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\mathsf{fma}\left(z, \color{blue}{\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right)}, 0.083333333333333\right)}{x} \]
      11. metadata-eval90.6%

        \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\mathsf{fma}\left(z, \mathsf{fma}\left(y + 0.0007936500793651, z, \color{blue}{-0.0027777777777778}\right), 0.083333333333333\right)}{x} \]
    3. Simplified90.6%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\mathsf{fma}\left(z, \mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right), 0.083333333333333\right)}{x}} \]
    4. Add Preprocessing
    5. Taylor expanded in z around 0 74.0%

      \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\color{blue}{0.083333333333333}}{x} \]
    6. Taylor expanded in x around inf 74.0%

      \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right) - 1\right)} \]
    7. Step-by-step derivation
      1. sub-neg74.0%

        \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(-1\right)\right)} \]
      2. mul-1-neg74.0%

        \[\leadsto x \cdot \left(\color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} + \left(-1\right)\right) \]
      3. log-rec74.0%

        \[\leadsto x \cdot \left(\left(-\color{blue}{\left(-\log x\right)}\right) + \left(-1\right)\right) \]
      4. remove-double-neg74.0%

        \[\leadsto x \cdot \left(\color{blue}{\log x} + \left(-1\right)\right) \]
      5. metadata-eval74.0%

        \[\leadsto x \cdot \left(\log x + \color{blue}{-1}\right) \]
    8. Simplified74.0%

      \[\leadsto \color{blue}{x \cdot \left(\log x + -1\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification62.3%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 1360000000:\\ \;\;\;\;\left(0.91893853320467 + \log x \cdot -0.5\right) + \frac{0.083333333333333 + z \cdot -0.0027777777777778}{x}\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(\log x + -1\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 8: 57.2% accurate, 1.1× speedup?

\[\begin{array}{l} \\ x \cdot \left(\log x + -1\right) + \frac{0.083333333333333}{x} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+ (* x (+ (log x) -1.0)) (/ 0.083333333333333 x)))
double code(double x, double y, double z) {
	return (x * (log(x) + -1.0)) + (0.083333333333333 / x);
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = (x * (log(x) + (-1.0d0))) + (0.083333333333333d0 / x)
end function
public static double code(double x, double y, double z) {
	return (x * (Math.log(x) + -1.0)) + (0.083333333333333 / x);
}
def code(x, y, z):
	return (x * (math.log(x) + -1.0)) + (0.083333333333333 / x)
function code(x, y, z)
	return Float64(Float64(x * Float64(log(x) + -1.0)) + Float64(0.083333333333333 / x))
end
function tmp = code(x, y, z)
	tmp = (x * (log(x) + -1.0)) + (0.083333333333333 / x);
end
code[x_, y_, z_] := N[(N[(x * N[(N[Log[x], $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision] + N[(0.083333333333333 / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
x \cdot \left(\log x + -1\right) + \frac{0.083333333333333}{x}
\end{array}
Derivation
  1. Initial program 94.9%

    \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
  2. Add Preprocessing
  3. Taylor expanded in z around 0 56.7%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{0.083333333333333}{x}} \]
  4. Taylor expanded in x around inf 56.7%

    \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right) - 1\right)} + \frac{0.083333333333333}{x} \]
  5. Step-by-step derivation
    1. sub-neg98.9%

      \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(-1\right)\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
    2. mul-1-neg98.9%

      \[\leadsto x \cdot \left(\color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
    3. log-rec98.9%

      \[\leadsto x \cdot \left(\left(-\color{blue}{\left(-\log x\right)}\right) + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
    4. remove-double-neg98.9%

      \[\leadsto x \cdot \left(\color{blue}{\log x} + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
    5. metadata-eval98.9%

      \[\leadsto x \cdot \left(\log x + \color{blue}{-1}\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
    6. +-commutative98.9%

      \[\leadsto x \cdot \color{blue}{\left(-1 + \log x\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + \frac{1}{x \cdot 12.000000000000048}\right) \]
  6. Simplified56.7%

    \[\leadsto \color{blue}{x \cdot \left(-1 + \log x\right)} + \frac{0.083333333333333}{x} \]
  7. Final simplification56.7%

    \[\leadsto x \cdot \left(\log x + -1\right) + \frac{0.083333333333333}{x} \]
  8. Add Preprocessing

Alternative 9: 35.5% accurate, 1.2× speedup?

\[\begin{array}{l} \\ x \cdot \left(\log x + -1\right) \end{array} \]
(FPCore (x y z) :precision binary64 (* x (+ (log x) -1.0)))
double code(double x, double y, double z) {
	return x * (log(x) + -1.0);
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = x * (log(x) + (-1.0d0))
end function
public static double code(double x, double y, double z) {
	return x * (Math.log(x) + -1.0);
}
def code(x, y, z):
	return x * (math.log(x) + -1.0)
function code(x, y, z)
	return Float64(x * Float64(log(x) + -1.0))
end
function tmp = code(x, y, z)
	tmp = x * (log(x) + -1.0);
end
code[x_, y_, z_] := N[(x * N[(N[Log[x], $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
x \cdot \left(\log x + -1\right)
\end{array}
Derivation
  1. Initial program 94.9%

    \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
  2. Step-by-step derivation
    1. sub-neg94.9%

      \[\leadsto \left(\color{blue}{\left(\left(x - 0.5\right) \cdot \log x + \left(-x\right)\right)} + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. associate-+l+94.9%

      \[\leadsto \color{blue}{\left(\left(x - 0.5\right) \cdot \log x + \left(\left(-x\right) + 0.91893853320467\right)\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    3. fma-define95.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x - 0.5, \log x, \left(-x\right) + 0.91893853320467\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    4. sub-neg95.0%

      \[\leadsto \mathsf{fma}\left(\color{blue}{x + \left(-0.5\right)}, \log x, \left(-x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    5. metadata-eval95.0%

      \[\leadsto \mathsf{fma}\left(x + \color{blue}{-0.5}, \log x, \left(-x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    6. +-commutative95.0%

      \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, \color{blue}{0.91893853320467 + \left(-x\right)}\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    7. unsub-neg95.0%

      \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, \color{blue}{0.91893853320467 - x}\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    8. *-commutative95.0%

      \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\color{blue}{z \cdot \left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right)} + 0.083333333333333}{x} \]
    9. fma-define95.0%

      \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\color{blue}{\mathsf{fma}\left(z, \left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778, 0.083333333333333\right)}}{x} \]
    10. fma-neg95.0%

      \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\mathsf{fma}\left(z, \color{blue}{\mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right)}, 0.083333333333333\right)}{x} \]
    11. metadata-eval95.0%

      \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\mathsf{fma}\left(z, \mathsf{fma}\left(y + 0.0007936500793651, z, \color{blue}{-0.0027777777777778}\right), 0.083333333333333\right)}{x} \]
  3. Simplified95.0%

    \[\leadsto \color{blue}{\mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\mathsf{fma}\left(z, \mathsf{fma}\left(y + 0.0007936500793651, z, -0.0027777777777778\right), 0.083333333333333\right)}{x}} \]
  4. Add Preprocessing
  5. Taylor expanded in z around 0 56.8%

    \[\leadsto \mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right) + \frac{\color{blue}{0.083333333333333}}{x} \]
  6. Taylor expanded in x around inf 38.9%

    \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right) - 1\right)} \]
  7. Step-by-step derivation
    1. sub-neg38.9%

      \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(-1\right)\right)} \]
    2. mul-1-neg38.9%

      \[\leadsto x \cdot \left(\color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} + \left(-1\right)\right) \]
    3. log-rec38.9%

      \[\leadsto x \cdot \left(\left(-\color{blue}{\left(-\log x\right)}\right) + \left(-1\right)\right) \]
    4. remove-double-neg38.9%

      \[\leadsto x \cdot \left(\color{blue}{\log x} + \left(-1\right)\right) \]
    5. metadata-eval38.9%

      \[\leadsto x \cdot \left(\log x + \color{blue}{-1}\right) \]
  8. Simplified38.9%

    \[\leadsto \color{blue}{x \cdot \left(\log x + -1\right)} \]
  9. Add Preprocessing

Developer target: 98.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(\left(x - 0.5\right) \cdot \log x + \left(0.91893853320467 - x\right)\right) + \frac{0.083333333333333}{x}\right) + \frac{z}{x} \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right) \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+
  (+ (+ (* (- x 0.5) (log x)) (- 0.91893853320467 x)) (/ 0.083333333333333 x))
  (* (/ z x) (- (* z (+ y 0.0007936500793651)) 0.0027777777777778))))
double code(double x, double y, double z) {
	return ((((x - 0.5) * log(x)) + (0.91893853320467 - x)) + (0.083333333333333 / x)) + ((z / x) * ((z * (y + 0.0007936500793651)) - 0.0027777777777778));
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = ((((x - 0.5d0) * log(x)) + (0.91893853320467d0 - x)) + (0.083333333333333d0 / x)) + ((z / x) * ((z * (y + 0.0007936500793651d0)) - 0.0027777777777778d0))
end function
public static double code(double x, double y, double z) {
	return ((((x - 0.5) * Math.log(x)) + (0.91893853320467 - x)) + (0.083333333333333 / x)) + ((z / x) * ((z * (y + 0.0007936500793651)) - 0.0027777777777778));
}
def code(x, y, z):
	return ((((x - 0.5) * math.log(x)) + (0.91893853320467 - x)) + (0.083333333333333 / x)) + ((z / x) * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))
function code(x, y, z)
	return Float64(Float64(Float64(Float64(Float64(x - 0.5) * log(x)) + Float64(0.91893853320467 - x)) + Float64(0.083333333333333 / x)) + Float64(Float64(z / x) * Float64(Float64(z * Float64(y + 0.0007936500793651)) - 0.0027777777777778)))
end
function tmp = code(x, y, z)
	tmp = ((((x - 0.5) * log(x)) + (0.91893853320467 - x)) + (0.083333333333333 / x)) + ((z / x) * ((z * (y + 0.0007936500793651)) - 0.0027777777777778));
end
code[x_, y_, z_] := N[(N[(N[(N[(N[(x - 0.5), $MachinePrecision] * N[Log[x], $MachinePrecision]), $MachinePrecision] + N[(0.91893853320467 - x), $MachinePrecision]), $MachinePrecision] + N[(0.083333333333333 / x), $MachinePrecision]), $MachinePrecision] + N[(N[(z / x), $MachinePrecision] * N[(N[(z * N[(y + 0.0007936500793651), $MachinePrecision]), $MachinePrecision] - 0.0027777777777778), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(x - 0.5\right) \cdot \log x + \left(0.91893853320467 - x\right)\right) + \frac{0.083333333333333}{x}\right) + \frac{z}{x} \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)
\end{array}

Reproduce

?
herbie shell --seed 2024086 
(FPCore (x y z)
  :name "Numeric.SpecFunctions:$slogFactorial from math-functions-0.1.5.2, B"
  :precision binary64

  :alt
  (+ (+ (+ (* (- x 0.5) (log x)) (- 0.91893853320467 x)) (/ 0.083333333333333 x)) (* (/ z x) (- (* z (+ y 0.0007936500793651)) 0.0027777777777778)))

  (+ (+ (- (* (- x 0.5) (log x)) x) 0.91893853320467) (/ (+ (* (- (* (+ y 0.0007936500793651) z) 0.0027777777777778) z) 0.083333333333333) x)))