Numeric.SpecFunctions:$slogFactorial from math-functions-0.1.5.2, B

Percentage Accurate: 93.7% → 97.9%
Time: 13.4s
Alternatives: 7
Speedup: 1.0×

Specification

?
\[\begin{array}{l} \\ \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+
  (+ (- (* (- x 0.5) (log x)) x) 0.91893853320467)
  (/
   (+
    (* (- (* (+ y 0.0007936500793651) z) 0.0027777777777778) z)
    0.083333333333333)
   x)))
double code(double x, double y, double z) {
	return ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x);
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = ((((x - 0.5d0) * log(x)) - x) + 0.91893853320467d0) + ((((((y + 0.0007936500793651d0) * z) - 0.0027777777777778d0) * z) + 0.083333333333333d0) / x)
end function
public static double code(double x, double y, double z) {
	return ((((x - 0.5) * Math.log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x);
}
def code(x, y, z):
	return ((((x - 0.5) * math.log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x)
function code(x, y, z)
	return Float64(Float64(Float64(Float64(Float64(x - 0.5) * log(x)) - x) + 0.91893853320467) + Float64(Float64(Float64(Float64(Float64(Float64(y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x))
end
function tmp = code(x, y, z)
	tmp = ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x);
end
code[x_, y_, z_] := N[(N[(N[(N[(N[(x - 0.5), $MachinePrecision] * N[Log[x], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + 0.91893853320467), $MachinePrecision] + N[(N[(N[(N[(N[(N[(y + 0.0007936500793651), $MachinePrecision] * z), $MachinePrecision] - 0.0027777777777778), $MachinePrecision] * z), $MachinePrecision] + 0.083333333333333), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 7 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 93.7% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+
  (+ (- (* (- x 0.5) (log x)) x) 0.91893853320467)
  (/
   (+
    (* (- (* (+ y 0.0007936500793651) z) 0.0027777777777778) z)
    0.083333333333333)
   x)))
double code(double x, double y, double z) {
	return ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x);
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = ((((x - 0.5d0) * log(x)) - x) + 0.91893853320467d0) + ((((((y + 0.0007936500793651d0) * z) - 0.0027777777777778d0) * z) + 0.083333333333333d0) / x)
end function
public static double code(double x, double y, double z) {
	return ((((x - 0.5) * Math.log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x);
}
def code(x, y, z):
	return ((((x - 0.5) * math.log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x)
function code(x, y, z)
	return Float64(Float64(Float64(Float64(Float64(x - 0.5) * log(x)) - x) + 0.91893853320467) + Float64(Float64(Float64(Float64(Float64(Float64(y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x))
end
function tmp = code(x, y, z)
	tmp = ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((((((y + 0.0007936500793651) * z) - 0.0027777777777778) * z) + 0.083333333333333) / x);
end
code[x_, y_, z_] := N[(N[(N[(N[(N[(x - 0.5), $MachinePrecision] * N[Log[x], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + 0.91893853320467), $MachinePrecision] + N[(N[(N[(N[(N[(N[(y + 0.0007936500793651), $MachinePrecision] * z), $MachinePrecision] - 0.0027777777777778), $MachinePrecision] * z), $MachinePrecision] + 0.083333333333333), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x}
\end{array}

Alternative 1: 97.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+
  (+ (- (* (- x 0.5) (log x)) x) 0.91893853320467)
  (+
   (* z (* (/ z x) (+ 0.0007936500793651 y)))
   (* 0.083333333333333 (/ 1.0 x)))))
double code(double x, double y, double z) {
	return ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((z * ((z / x) * (0.0007936500793651 + y))) + (0.083333333333333 * (1.0 / x)));
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = ((((x - 0.5d0) * log(x)) - x) + 0.91893853320467d0) + ((z * ((z / x) * (0.0007936500793651d0 + y))) + (0.083333333333333d0 * (1.0d0 / x)))
end function
public static double code(double x, double y, double z) {
	return ((((x - 0.5) * Math.log(x)) - x) + 0.91893853320467) + ((z * ((z / x) * (0.0007936500793651 + y))) + (0.083333333333333 * (1.0 / x)));
}
def code(x, y, z):
	return ((((x - 0.5) * math.log(x)) - x) + 0.91893853320467) + ((z * ((z / x) * (0.0007936500793651 + y))) + (0.083333333333333 * (1.0 / x)))
function code(x, y, z)
	return Float64(Float64(Float64(Float64(Float64(x - 0.5) * log(x)) - x) + 0.91893853320467) + Float64(Float64(z * Float64(Float64(z / x) * Float64(0.0007936500793651 + y))) + Float64(0.083333333333333 * Float64(1.0 / x))))
end
function tmp = code(x, y, z)
	tmp = ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + ((z * ((z / x) * (0.0007936500793651 + y))) + (0.083333333333333 * (1.0 / x)));
end
code[x_, y_, z_] := N[(N[(N[(N[(N[(x - 0.5), $MachinePrecision] * N[Log[x], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + 0.91893853320467), $MachinePrecision] + N[(N[(z * N[(N[(z / x), $MachinePrecision] * N[(0.0007936500793651 + y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.083333333333333 * N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right)
\end{array}
Derivation
  1. Initial program 91.4%

    \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
  2. Add Preprocessing
  3. Taylor expanded in z around 0 95.9%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\left(z \cdot \left(z \cdot \left(0.0007936500793651 \cdot \frac{1}{x} + \frac{y}{x}\right) - 0.0027777777777778 \cdot \frac{1}{x}\right) + 0.083333333333333 \cdot \frac{1}{x}\right)} \]
  4. Taylor expanded in z around inf 90.3%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\color{blue}{{z}^{2} \cdot \left(0.0007936500793651 \cdot \frac{1}{x} + \frac{y}{x}\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  5. Step-by-step derivation
    1. unpow290.3%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\color{blue}{\left(z \cdot z\right)} \cdot \left(0.0007936500793651 \cdot \frac{1}{x} + \frac{y}{x}\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    2. associate-*r/90.3%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\left(z \cdot z\right) \cdot \left(\color{blue}{\frac{0.0007936500793651 \cdot 1}{x}} + \frac{y}{x}\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    3. metadata-eval90.3%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\left(z \cdot z\right) \cdot \left(\frac{\color{blue}{0.0007936500793651}}{x} + \frac{y}{x}\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    4. associate-*l*95.8%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\color{blue}{z \cdot \left(z \cdot \left(\frac{0.0007936500793651}{x} + \frac{y}{x}\right)\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    5. distribute-rgt-in90.7%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \color{blue}{\left(\frac{0.0007936500793651}{x} \cdot z + \frac{y}{x} \cdot z\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    6. associate-*l/90.7%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\color{blue}{\frac{0.0007936500793651 \cdot z}{x}} + \frac{y}{x} \cdot z\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    7. associate-*r/90.7%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\color{blue}{0.0007936500793651 \cdot \frac{z}{x}} + \frac{y}{x} \cdot z\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    8. associate-*l/91.9%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(0.0007936500793651 \cdot \frac{z}{x} + \color{blue}{\frac{y \cdot z}{x}}\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    9. associate-/l*92.1%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(0.0007936500793651 \cdot \frac{z}{x} + \color{blue}{y \cdot \frac{z}{x}}\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    10. distribute-rgt-out99.1%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \color{blue}{\left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  6. Simplified99.1%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\color{blue}{z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  7. Add Preprocessing

Alternative 2: 93.3% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := x \cdot \left(\log x + -1\right)\\ \mathbf{if}\;x \leq 4.2 \cdot 10^{+191}:\\ \;\;\;\;t\_0 + \frac{0.083333333333333 + z \cdot \left(z \cdot \left(0.0007936500793651 + y\right) - 0.0027777777777778\right)}{x}\\ \mathbf{else}:\\ \;\;\;\;t\_0 + \frac{0.083333333333333}{x}\\ \end{array} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (let* ((t_0 (* x (+ (log x) -1.0))))
   (if (<= x 4.2e+191)
     (+
      t_0
      (/
       (+
        0.083333333333333
        (* z (- (* z (+ 0.0007936500793651 y)) 0.0027777777777778)))
       x))
     (+ t_0 (/ 0.083333333333333 x)))))
double code(double x, double y, double z) {
	double t_0 = x * (log(x) + -1.0);
	double tmp;
	if (x <= 4.2e+191) {
		tmp = t_0 + ((0.083333333333333 + (z * ((z * (0.0007936500793651 + y)) - 0.0027777777777778))) / x);
	} else {
		tmp = t_0 + (0.083333333333333 / x);
	}
	return tmp;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8) :: t_0
    real(8) :: tmp
    t_0 = x * (log(x) + (-1.0d0))
    if (x <= 4.2d+191) then
        tmp = t_0 + ((0.083333333333333d0 + (z * ((z * (0.0007936500793651d0 + y)) - 0.0027777777777778d0))) / x)
    else
        tmp = t_0 + (0.083333333333333d0 / x)
    end if
    code = tmp
end function
public static double code(double x, double y, double z) {
	double t_0 = x * (Math.log(x) + -1.0);
	double tmp;
	if (x <= 4.2e+191) {
		tmp = t_0 + ((0.083333333333333 + (z * ((z * (0.0007936500793651 + y)) - 0.0027777777777778))) / x);
	} else {
		tmp = t_0 + (0.083333333333333 / x);
	}
	return tmp;
}
def code(x, y, z):
	t_0 = x * (math.log(x) + -1.0)
	tmp = 0
	if x <= 4.2e+191:
		tmp = t_0 + ((0.083333333333333 + (z * ((z * (0.0007936500793651 + y)) - 0.0027777777777778))) / x)
	else:
		tmp = t_0 + (0.083333333333333 / x)
	return tmp
function code(x, y, z)
	t_0 = Float64(x * Float64(log(x) + -1.0))
	tmp = 0.0
	if (x <= 4.2e+191)
		tmp = Float64(t_0 + Float64(Float64(0.083333333333333 + Float64(z * Float64(Float64(z * Float64(0.0007936500793651 + y)) - 0.0027777777777778))) / x));
	else
		tmp = Float64(t_0 + Float64(0.083333333333333 / x));
	end
	return tmp
end
function tmp_2 = code(x, y, z)
	t_0 = x * (log(x) + -1.0);
	tmp = 0.0;
	if (x <= 4.2e+191)
		tmp = t_0 + ((0.083333333333333 + (z * ((z * (0.0007936500793651 + y)) - 0.0027777777777778))) / x);
	else
		tmp = t_0 + (0.083333333333333 / x);
	end
	tmp_2 = tmp;
end
code[x_, y_, z_] := Block[{t$95$0 = N[(x * N[(N[Log[x], $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, 4.2e+191], N[(t$95$0 + N[(N[(0.083333333333333 + N[(z * N[(N[(z * N[(0.0007936500793651 + y), $MachinePrecision]), $MachinePrecision] - 0.0027777777777778), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision], N[(t$95$0 + N[(0.083333333333333 / x), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := x \cdot \left(\log x + -1\right)\\
\mathbf{if}\;x \leq 4.2 \cdot 10^{+191}:\\
\;\;\;\;t\_0 + \frac{0.083333333333333 + z \cdot \left(z \cdot \left(0.0007936500793651 + y\right) - 0.0027777777777778\right)}{x}\\

\mathbf{else}:\\
\;\;\;\;t\_0 + \frac{0.083333333333333}{x}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 4.2000000000000001e191

    1. Initial program 96.6%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf 95.8%

      \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right) - 1\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    4. Step-by-step derivation
      1. sub-neg98.3%

        \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(-1\right)\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
      2. mul-1-neg98.3%

        \[\leadsto x \cdot \left(\color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
      3. log-rec98.3%

        \[\leadsto x \cdot \left(\left(-\color{blue}{\left(-\log x\right)}\right) + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
      4. remove-double-neg98.3%

        \[\leadsto x \cdot \left(\color{blue}{\log x} + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
      5. metadata-eval98.3%

        \[\leadsto x \cdot \left(\log x + \color{blue}{-1}\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
      6. +-commutative98.3%

        \[\leadsto x \cdot \color{blue}{\left(-1 + \log x\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    5. Simplified95.8%

      \[\leadsto \color{blue}{x \cdot \left(-1 + \log x\right)} + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]

    if 4.2000000000000001e191 < x

    1. Initial program 68.0%

      \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
    2. Add Preprocessing
    3. Taylor expanded in z around 0 86.6%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{0.083333333333333}{x}} \]
    4. Taylor expanded in x around inf 86.6%

      \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right) - 1\right)} + \frac{0.083333333333333}{x} \]
    5. Step-by-step derivation
      1. sub-neg99.5%

        \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(-1\right)\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
      2. mul-1-neg99.5%

        \[\leadsto x \cdot \left(\color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
      3. log-rec99.5%

        \[\leadsto x \cdot \left(\left(-\color{blue}{\left(-\log x\right)}\right) + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
      4. remove-double-neg99.5%

        \[\leadsto x \cdot \left(\color{blue}{\log x} + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
      5. metadata-eval99.5%

        \[\leadsto x \cdot \left(\log x + \color{blue}{-1}\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
      6. +-commutative99.5%

        \[\leadsto x \cdot \color{blue}{\left(-1 + \log x\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    6. Simplified86.6%

      \[\leadsto \color{blue}{x \cdot \left(-1 + \log x\right)} + \frac{0.083333333333333}{x} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification94.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 4.2 \cdot 10^{+191}:\\ \;\;\;\;x \cdot \left(\log x + -1\right) + \frac{0.083333333333333 + z \cdot \left(z \cdot \left(0.0007936500793651 + y\right) - 0.0027777777777778\right)}{x}\\ \mathbf{else}:\\ \;\;\;\;x \cdot \left(\log x + -1\right) + \frac{0.083333333333333}{x}\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 96.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) + x \cdot \left(\log x + -1\right) \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+
  (+
   (* z (* (/ z x) (+ 0.0007936500793651 y)))
   (* 0.083333333333333 (/ 1.0 x)))
  (* x (+ (log x) -1.0))))
double code(double x, double y, double z) {
	return ((z * ((z / x) * (0.0007936500793651 + y))) + (0.083333333333333 * (1.0 / x))) + (x * (log(x) + -1.0));
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = ((z * ((z / x) * (0.0007936500793651d0 + y))) + (0.083333333333333d0 * (1.0d0 / x))) + (x * (log(x) + (-1.0d0)))
end function
public static double code(double x, double y, double z) {
	return ((z * ((z / x) * (0.0007936500793651 + y))) + (0.083333333333333 * (1.0 / x))) + (x * (Math.log(x) + -1.0));
}
def code(x, y, z):
	return ((z * ((z / x) * (0.0007936500793651 + y))) + (0.083333333333333 * (1.0 / x))) + (x * (math.log(x) + -1.0))
function code(x, y, z)
	return Float64(Float64(Float64(z * Float64(Float64(z / x) * Float64(0.0007936500793651 + y))) + Float64(0.083333333333333 * Float64(1.0 / x))) + Float64(x * Float64(log(x) + -1.0)))
end
function tmp = code(x, y, z)
	tmp = ((z * ((z / x) * (0.0007936500793651 + y))) + (0.083333333333333 * (1.0 / x))) + (x * (log(x) + -1.0));
end
code[x_, y_, z_] := N[(N[(N[(z * N[(N[(z / x), $MachinePrecision] * N[(0.0007936500793651 + y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.083333333333333 * N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(x * N[(N[Log[x], $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) + x \cdot \left(\log x + -1\right)
\end{array}
Derivation
  1. Initial program 91.4%

    \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
  2. Add Preprocessing
  3. Taylor expanded in z around 0 95.9%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\left(z \cdot \left(z \cdot \left(0.0007936500793651 \cdot \frac{1}{x} + \frac{y}{x}\right) - 0.0027777777777778 \cdot \frac{1}{x}\right) + 0.083333333333333 \cdot \frac{1}{x}\right)} \]
  4. Taylor expanded in z around inf 90.3%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\color{blue}{{z}^{2} \cdot \left(0.0007936500793651 \cdot \frac{1}{x} + \frac{y}{x}\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  5. Step-by-step derivation
    1. unpow290.3%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\color{blue}{\left(z \cdot z\right)} \cdot \left(0.0007936500793651 \cdot \frac{1}{x} + \frac{y}{x}\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    2. associate-*r/90.3%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\left(z \cdot z\right) \cdot \left(\color{blue}{\frac{0.0007936500793651 \cdot 1}{x}} + \frac{y}{x}\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    3. metadata-eval90.3%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\left(z \cdot z\right) \cdot \left(\frac{\color{blue}{0.0007936500793651}}{x} + \frac{y}{x}\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    4. associate-*l*95.8%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\color{blue}{z \cdot \left(z \cdot \left(\frac{0.0007936500793651}{x} + \frac{y}{x}\right)\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    5. distribute-rgt-in90.7%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \color{blue}{\left(\frac{0.0007936500793651}{x} \cdot z + \frac{y}{x} \cdot z\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    6. associate-*l/90.7%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\color{blue}{\frac{0.0007936500793651 \cdot z}{x}} + \frac{y}{x} \cdot z\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    7. associate-*r/90.7%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(\color{blue}{0.0007936500793651 \cdot \frac{z}{x}} + \frac{y}{x} \cdot z\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    8. associate-*l/91.9%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(0.0007936500793651 \cdot \frac{z}{x} + \color{blue}{\frac{y \cdot z}{x}}\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    9. associate-/l*92.1%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \left(0.0007936500793651 \cdot \frac{z}{x} + \color{blue}{y \cdot \frac{z}{x}}\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    10. distribute-rgt-out99.1%

      \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(z \cdot \color{blue}{\left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  6. Simplified99.1%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \left(\color{blue}{z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right)} + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  7. Taylor expanded in x around inf 98.5%

    \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right) - 1\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  8. Step-by-step derivation
    1. sub-neg98.5%

      \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(-1\right)\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    2. mul-1-neg98.5%

      \[\leadsto x \cdot \left(\color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    3. log-rec98.5%

      \[\leadsto x \cdot \left(\left(-\color{blue}{\left(-\log x\right)}\right) + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    4. remove-double-neg98.5%

      \[\leadsto x \cdot \left(\color{blue}{\log x} + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    5. metadata-eval98.5%

      \[\leadsto x \cdot \left(\log x + \color{blue}{-1}\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    6. +-commutative98.5%

      \[\leadsto x \cdot \color{blue}{\left(-1 + \log x\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  9. Simplified98.5%

    \[\leadsto \color{blue}{x \cdot \left(-1 + \log x\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  10. Final simplification98.5%

    \[\leadsto \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) + x \cdot \left(\log x + -1\right) \]
  11. Add Preprocessing

Alternative 4: 56.5% accurate, 1.1× speedup?

\[\begin{array}{l} \\ \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{0.083333333333333}{x} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+ (+ (- (* (- x 0.5) (log x)) x) 0.91893853320467) (/ 0.083333333333333 x)))
double code(double x, double y, double z) {
	return ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + (0.083333333333333 / x);
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = ((((x - 0.5d0) * log(x)) - x) + 0.91893853320467d0) + (0.083333333333333d0 / x)
end function
public static double code(double x, double y, double z) {
	return ((((x - 0.5) * Math.log(x)) - x) + 0.91893853320467) + (0.083333333333333 / x);
}
def code(x, y, z):
	return ((((x - 0.5) * math.log(x)) - x) + 0.91893853320467) + (0.083333333333333 / x)
function code(x, y, z)
	return Float64(Float64(Float64(Float64(Float64(x - 0.5) * log(x)) - x) + 0.91893853320467) + Float64(0.083333333333333 / x))
end
function tmp = code(x, y, z)
	tmp = ((((x - 0.5) * log(x)) - x) + 0.91893853320467) + (0.083333333333333 / x);
end
code[x_, y_, z_] := N[(N[(N[(N[(N[(x - 0.5), $MachinePrecision] * N[Log[x], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + 0.91893853320467), $MachinePrecision] + N[(0.083333333333333 / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{0.083333333333333}{x}
\end{array}
Derivation
  1. Initial program 91.4%

    \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
  2. Add Preprocessing
  3. Taylor expanded in z around 0 56.9%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{0.083333333333333}{x}} \]
  4. Add Preprocessing

Alternative 5: 55.4% accurate, 1.1× speedup?

\[\begin{array}{l} \\ x \cdot \left(\log x + -1\right) + \frac{0.083333333333333}{x} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+ (* x (+ (log x) -1.0)) (/ 0.083333333333333 x)))
double code(double x, double y, double z) {
	return (x * (log(x) + -1.0)) + (0.083333333333333 / x);
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = (x * (log(x) + (-1.0d0))) + (0.083333333333333d0 / x)
end function
public static double code(double x, double y, double z) {
	return (x * (Math.log(x) + -1.0)) + (0.083333333333333 / x);
}
def code(x, y, z):
	return (x * (math.log(x) + -1.0)) + (0.083333333333333 / x)
function code(x, y, z)
	return Float64(Float64(x * Float64(log(x) + -1.0)) + Float64(0.083333333333333 / x))
end
function tmp = code(x, y, z)
	tmp = (x * (log(x) + -1.0)) + (0.083333333333333 / x);
end
code[x_, y_, z_] := N[(N[(x * N[(N[Log[x], $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision] + N[(0.083333333333333 / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
x \cdot \left(\log x + -1\right) + \frac{0.083333333333333}{x}
\end{array}
Derivation
  1. Initial program 91.4%

    \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
  2. Add Preprocessing
  3. Taylor expanded in z around 0 56.9%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{0.083333333333333}{x}} \]
  4. Taylor expanded in x around inf 56.3%

    \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \log \left(\frac{1}{x}\right) - 1\right)} + \frac{0.083333333333333}{x} \]
  5. Step-by-step derivation
    1. sub-neg98.5%

      \[\leadsto x \cdot \color{blue}{\left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(-1\right)\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    2. mul-1-neg98.5%

      \[\leadsto x \cdot \left(\color{blue}{\left(-\log \left(\frac{1}{x}\right)\right)} + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    3. log-rec98.5%

      \[\leadsto x \cdot \left(\left(-\color{blue}{\left(-\log x\right)}\right) + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    4. remove-double-neg98.5%

      \[\leadsto x \cdot \left(\color{blue}{\log x} + \left(-1\right)\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    5. metadata-eval98.5%

      \[\leadsto x \cdot \left(\log x + \color{blue}{-1}\right) + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
    6. +-commutative98.5%

      \[\leadsto x \cdot \color{blue}{\left(-1 + \log x\right)} + \left(z \cdot \left(\frac{z}{x} \cdot \left(0.0007936500793651 + y\right)\right) + 0.083333333333333 \cdot \frac{1}{x}\right) \]
  6. Simplified56.3%

    \[\leadsto \color{blue}{x \cdot \left(-1 + \log x\right)} + \frac{0.083333333333333}{x} \]
  7. Final simplification56.3%

    \[\leadsto x \cdot \left(\log x + -1\right) + \frac{0.083333333333333}{x} \]
  8. Add Preprocessing

Alternative 6: 23.4% accurate, 24.6× speedup?

\[\begin{array}{l} \\ 0.91893853320467 + \frac{0.083333333333333}{x} \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+ 0.91893853320467 (/ 0.083333333333333 x)))
double code(double x, double y, double z) {
	return 0.91893853320467 + (0.083333333333333 / x);
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = 0.91893853320467d0 + (0.083333333333333d0 / x)
end function
public static double code(double x, double y, double z) {
	return 0.91893853320467 + (0.083333333333333 / x);
}
def code(x, y, z):
	return 0.91893853320467 + (0.083333333333333 / x)
function code(x, y, z)
	return Float64(0.91893853320467 + Float64(0.083333333333333 / x))
end
function tmp = code(x, y, z)
	tmp = 0.91893853320467 + (0.083333333333333 / x);
end
code[x_, y_, z_] := N[(0.91893853320467 + N[(0.083333333333333 / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
0.91893853320467 + \frac{0.083333333333333}{x}
\end{array}
Derivation
  1. Initial program 91.4%

    \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
  2. Add Preprocessing
  3. Taylor expanded in z around 0 56.9%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{0.083333333333333}{x}} \]
  4. Taylor expanded in x around inf 56.2%

    \[\leadsto \left(\left(\color{blue}{-1 \cdot \left(x \cdot \log \left(\frac{1}{x}\right)\right)} - x\right) + 0.91893853320467\right) + \frac{0.083333333333333}{x} \]
  5. Step-by-step derivation
    1. mul-1-neg56.2%

      \[\leadsto \left(\left(\color{blue}{\left(-x \cdot \log \left(\frac{1}{x}\right)\right)} - x\right) + 0.91893853320467\right) + \frac{0.083333333333333}{x} \]
    2. distribute-rgt-neg-in56.2%

      \[\leadsto \left(\left(\color{blue}{x \cdot \left(-\log \left(\frac{1}{x}\right)\right)} - x\right) + 0.91893853320467\right) + \frac{0.083333333333333}{x} \]
    3. log-rec56.2%

      \[\leadsto \left(\left(x \cdot \left(-\color{blue}{\left(-\log x\right)}\right) - x\right) + 0.91893853320467\right) + \frac{0.083333333333333}{x} \]
    4. remove-double-neg56.2%

      \[\leadsto \left(\left(x \cdot \color{blue}{\log x} - x\right) + 0.91893853320467\right) + \frac{0.083333333333333}{x} \]
  6. Simplified56.2%

    \[\leadsto \left(\left(\color{blue}{x \cdot \log x} - x\right) + 0.91893853320467\right) + \frac{0.083333333333333}{x} \]
  7. Taylor expanded in x around 0 25.1%

    \[\leadsto \color{blue}{0.91893853320467} + \frac{0.083333333333333}{x} \]
  8. Add Preprocessing

Alternative 7: 22.8% accurate, 41.0× speedup?

\[\begin{array}{l} \\ \frac{0.083333333333333}{x} \end{array} \]
(FPCore (x y z) :precision binary64 (/ 0.083333333333333 x))
double code(double x, double y, double z) {
	return 0.083333333333333 / x;
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = 0.083333333333333d0 / x
end function
public static double code(double x, double y, double z) {
	return 0.083333333333333 / x;
}
def code(x, y, z):
	return 0.083333333333333 / x
function code(x, y, z)
	return Float64(0.083333333333333 / x)
end
function tmp = code(x, y, z)
	tmp = 0.083333333333333 / x;
end
code[x_, y_, z_] := N[(0.083333333333333 / x), $MachinePrecision]
\begin{array}{l}

\\
\frac{0.083333333333333}{x}
\end{array}
Derivation
  1. Initial program 91.4%

    \[\left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \frac{\left(\left(y + 0.0007936500793651\right) \cdot z - 0.0027777777777778\right) \cdot z + 0.083333333333333}{x} \]
  2. Add Preprocessing
  3. Taylor expanded in z around 0 56.9%

    \[\leadsto \left(\left(\left(x - 0.5\right) \cdot \log x - x\right) + 0.91893853320467\right) + \color{blue}{\frac{0.083333333333333}{x}} \]
  4. Step-by-step derivation
    1. sub-neg56.9%

      \[\leadsto \left(\color{blue}{\left(\left(x - 0.5\right) \cdot \log x + \left(-x\right)\right)} + 0.91893853320467\right) + \frac{0.083333333333333}{x} \]
    2. associate-+l+56.9%

      \[\leadsto \color{blue}{\left(\left(x - 0.5\right) \cdot \log x + \left(\left(-x\right) + 0.91893853320467\right)\right)} + \frac{0.083333333333333}{x} \]
    3. +-commutative56.9%

      \[\leadsto \left(\left(x - 0.5\right) \cdot \log x + \color{blue}{\left(0.91893853320467 + \left(-x\right)\right)}\right) + \frac{0.083333333333333}{x} \]
    4. sub-neg56.9%

      \[\leadsto \left(\left(x - 0.5\right) \cdot \log x + \color{blue}{\left(0.91893853320467 - x\right)}\right) + \frac{0.083333333333333}{x} \]
    5. sub-neg56.9%

      \[\leadsto \left(\color{blue}{\left(x + \left(-0.5\right)\right)} \cdot \log x + \left(0.91893853320467 - x\right)\right) + \frac{0.083333333333333}{x} \]
    6. metadata-eval56.9%

      \[\leadsto \left(\left(x + \color{blue}{-0.5}\right) \cdot \log x + \left(0.91893853320467 - x\right)\right) + \frac{0.083333333333333}{x} \]
    7. fma-undefine56.9%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right)} + \frac{0.083333333333333}{x} \]
    8. add-cube-cbrt56.3%

      \[\leadsto \color{blue}{\left(\sqrt[3]{\mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right)} \cdot \sqrt[3]{\mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right)}\right) \cdot \sqrt[3]{\mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right)}} + \frac{0.083333333333333}{x} \]
    9. pow356.3%

      \[\leadsto \color{blue}{{\left(\sqrt[3]{\mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right)}\right)}^{3}} + \frac{0.083333333333333}{x} \]
  5. Applied egg-rr56.3%

    \[\leadsto \color{blue}{{\left(\sqrt[3]{\mathsf{fma}\left(x + -0.5, \log x, 0.91893853320467 - x\right)}\right)}^{3}} + \frac{0.083333333333333}{x} \]
  6. Taylor expanded in x around 0 24.0%

    \[\leadsto \color{blue}{\left(0.91893853320467 + -0.5 \cdot \log x\right)} + \frac{0.083333333333333}{x} \]
  7. Step-by-step derivation
    1. *-commutative24.0%

      \[\leadsto \left(0.91893853320467 + \color{blue}{\log x \cdot -0.5}\right) + \frac{0.083333333333333}{x} \]
  8. Simplified24.0%

    \[\leadsto \color{blue}{\left(0.91893853320467 + \log x \cdot -0.5\right)} + \frac{0.083333333333333}{x} \]
  9. Taylor expanded in x around 0 24.5%

    \[\leadsto \color{blue}{\frac{0.083333333333333}{x}} \]
  10. Add Preprocessing

Developer target: 98.7% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(\left(x - 0.5\right) \cdot \log x + \left(0.91893853320467 - x\right)\right) + \frac{0.083333333333333}{x}\right) + \frac{z}{x} \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right) \end{array} \]
(FPCore (x y z)
 :precision binary64
 (+
  (+ (+ (* (- x 0.5) (log x)) (- 0.91893853320467 x)) (/ 0.083333333333333 x))
  (* (/ z x) (- (* z (+ y 0.0007936500793651)) 0.0027777777777778))))
double code(double x, double y, double z) {
	return ((((x - 0.5) * log(x)) + (0.91893853320467 - x)) + (0.083333333333333 / x)) + ((z / x) * ((z * (y + 0.0007936500793651)) - 0.0027777777777778));
}
real(8) function code(x, y, z)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    code = ((((x - 0.5d0) * log(x)) + (0.91893853320467d0 - x)) + (0.083333333333333d0 / x)) + ((z / x) * ((z * (y + 0.0007936500793651d0)) - 0.0027777777777778d0))
end function
public static double code(double x, double y, double z) {
	return ((((x - 0.5) * Math.log(x)) + (0.91893853320467 - x)) + (0.083333333333333 / x)) + ((z / x) * ((z * (y + 0.0007936500793651)) - 0.0027777777777778));
}
def code(x, y, z):
	return ((((x - 0.5) * math.log(x)) + (0.91893853320467 - x)) + (0.083333333333333 / x)) + ((z / x) * ((z * (y + 0.0007936500793651)) - 0.0027777777777778))
function code(x, y, z)
	return Float64(Float64(Float64(Float64(Float64(x - 0.5) * log(x)) + Float64(0.91893853320467 - x)) + Float64(0.083333333333333 / x)) + Float64(Float64(z / x) * Float64(Float64(z * Float64(y + 0.0007936500793651)) - 0.0027777777777778)))
end
function tmp = code(x, y, z)
	tmp = ((((x - 0.5) * log(x)) + (0.91893853320467 - x)) + (0.083333333333333 / x)) + ((z / x) * ((z * (y + 0.0007936500793651)) - 0.0027777777777778));
end
code[x_, y_, z_] := N[(N[(N[(N[(N[(x - 0.5), $MachinePrecision] * N[Log[x], $MachinePrecision]), $MachinePrecision] + N[(0.91893853320467 - x), $MachinePrecision]), $MachinePrecision] + N[(0.083333333333333 / x), $MachinePrecision]), $MachinePrecision] + N[(N[(z / x), $MachinePrecision] * N[(N[(z * N[(y + 0.0007936500793651), $MachinePrecision]), $MachinePrecision] - 0.0027777777777778), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(x - 0.5\right) \cdot \log x + \left(0.91893853320467 - x\right)\right) + \frac{0.083333333333333}{x}\right) + \frac{z}{x} \cdot \left(z \cdot \left(y + 0.0007936500793651\right) - 0.0027777777777778\right)
\end{array}

Reproduce

?
herbie shell --seed 2024106 
(FPCore (x y z)
  :name "Numeric.SpecFunctions:$slogFactorial from math-functions-0.1.5.2, B"
  :precision binary64

  :alt
  (+ (+ (+ (* (- x 0.5) (log x)) (- 0.91893853320467 x)) (/ 0.083333333333333 x)) (* (/ z x) (- (* z (+ y 0.0007936500793651)) 0.0027777777777778)))

  (+ (+ (- (* (- x 0.5) (log x)) x) 0.91893853320467) (/ (+ (* (- (* (+ y 0.0007936500793651) z) 0.0027777777777778) z) 0.083333333333333) x)))