Hyperbolic arcsine

Percentage Accurate: 18.8% → 99.8%
Time: 10.2s
Alternatives: 9
Speedup: 207.0×

Specification

?
\[\begin{array}{l} \\ \log \left(x + \sqrt{x \cdot x + 1}\right) \end{array} \]
(FPCore (x) :precision binary64 (log (+ x (sqrt (+ (* x x) 1.0)))))
double code(double x) {
	return log((x + sqrt(((x * x) + 1.0))));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = log((x + sqrt(((x * x) + 1.0d0))))
end function
public static double code(double x) {
	return Math.log((x + Math.sqrt(((x * x) + 1.0))));
}
def code(x):
	return math.log((x + math.sqrt(((x * x) + 1.0))))
function code(x)
	return log(Float64(x + sqrt(Float64(Float64(x * x) + 1.0))))
end
function tmp = code(x)
	tmp = log((x + sqrt(((x * x) + 1.0))));
end
code[x_] := N[Log[N[(x + N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}

\\
\log \left(x + \sqrt{x \cdot x + 1}\right)
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 9 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 18.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \log \left(x + \sqrt{x \cdot x + 1}\right) \end{array} \]
(FPCore (x) :precision binary64 (log (+ x (sqrt (+ (* x x) 1.0)))))
double code(double x) {
	return log((x + sqrt(((x * x) + 1.0))));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = log((x + sqrt(((x * x) + 1.0d0))))
end function
public static double code(double x) {
	return Math.log((x + Math.sqrt(((x * x) + 1.0))));
}
def code(x):
	return math.log((x + math.sqrt(((x * x) + 1.0))))
function code(x)
	return log(Float64(x + sqrt(Float64(Float64(x * x) + 1.0))))
end
function tmp = code(x)
	tmp = log((x + sqrt(((x * x) + 1.0))));
end
code[x_] := N[Log[N[(x + N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}

\\
\log \left(x + \sqrt{x \cdot x + 1}\right)
\end{array}

Alternative 1: 99.8% accurate, 0.6× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -0.02:\\ \;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\ \mathbf{elif}\;x \leq 1.1:\\ \;\;\;\;-0.16666666666666666 \cdot {x}^{3} + \left(0.075 \cdot {x}^{5} + \left(x + -0.044642857142857144 \cdot {x}^{7}\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2 + 0.5 \cdot \frac{1}{x}\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -0.02)
   (- (log (- (hypot 1.0 x) x)))
   (if (<= x 1.1)
     (+
      (* -0.16666666666666666 (pow x 3.0))
      (+ (* 0.075 (pow x 5.0)) (+ x (* -0.044642857142857144 (pow x 7.0)))))
     (log (+ (* x 2.0) (* 0.5 (/ 1.0 x)))))))
double code(double x) {
	double tmp;
	if (x <= -0.02) {
		tmp = -log((hypot(1.0, x) - x));
	} else if (x <= 1.1) {
		tmp = (-0.16666666666666666 * pow(x, 3.0)) + ((0.075 * pow(x, 5.0)) + (x + (-0.044642857142857144 * pow(x, 7.0))));
	} else {
		tmp = log(((x * 2.0) + (0.5 * (1.0 / x))));
	}
	return tmp;
}
public static double code(double x) {
	double tmp;
	if (x <= -0.02) {
		tmp = -Math.log((Math.hypot(1.0, x) - x));
	} else if (x <= 1.1) {
		tmp = (-0.16666666666666666 * Math.pow(x, 3.0)) + ((0.075 * Math.pow(x, 5.0)) + (x + (-0.044642857142857144 * Math.pow(x, 7.0))));
	} else {
		tmp = Math.log(((x * 2.0) + (0.5 * (1.0 / x))));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -0.02:
		tmp = -math.log((math.hypot(1.0, x) - x))
	elif x <= 1.1:
		tmp = (-0.16666666666666666 * math.pow(x, 3.0)) + ((0.075 * math.pow(x, 5.0)) + (x + (-0.044642857142857144 * math.pow(x, 7.0))))
	else:
		tmp = math.log(((x * 2.0) + (0.5 * (1.0 / x))))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -0.02)
		tmp = Float64(-log(Float64(hypot(1.0, x) - x)));
	elseif (x <= 1.1)
		tmp = Float64(Float64(-0.16666666666666666 * (x ^ 3.0)) + Float64(Float64(0.075 * (x ^ 5.0)) + Float64(x + Float64(-0.044642857142857144 * (x ^ 7.0)))));
	else
		tmp = log(Float64(Float64(x * 2.0) + Float64(0.5 * Float64(1.0 / x))));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -0.02)
		tmp = -log((hypot(1.0, x) - x));
	elseif (x <= 1.1)
		tmp = (-0.16666666666666666 * (x ^ 3.0)) + ((0.075 * (x ^ 5.0)) + (x + (-0.044642857142857144 * (x ^ 7.0))));
	else
		tmp = log(((x * 2.0) + (0.5 * (1.0 / x))));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -0.02], (-N[Log[N[(N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision] - x), $MachinePrecision]], $MachinePrecision]), If[LessEqual[x, 1.1], N[(N[(-0.16666666666666666 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision] + N[(N[(0.075 * N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision] + N[(x + N[(-0.044642857142857144 * N[Power[x, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(N[(x * 2.0), $MachinePrecision] + N[(0.5 * N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.02:\\
\;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\

\mathbf{elif}\;x \leq 1.1:\\
\;\;\;\;-0.16666666666666666 \cdot {x}^{3} + \left(0.075 \cdot {x}^{5} + \left(x + -0.044642857142857144 \cdot {x}^{7}\right)\right)\\

\mathbf{else}:\\
\;\;\;\;\log \left(x \cdot 2 + 0.5 \cdot \frac{1}{x}\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -0.0200000000000000004

    1. Initial program 4.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg4.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative4.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg4.8%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def5.8%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified5.8%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Step-by-step derivation
      1. flip-+4.5%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. div-sub4.5%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      3. hypot-udef4.5%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{\sqrt{1 \cdot 1 + x \cdot x}} \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      4. hypot-udef4.5%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\sqrt{1 \cdot 1 + x \cdot x} \cdot \color{blue}{\sqrt{1 \cdot 1 + x \cdot x}}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      5. add-sqr-sqrt4.5%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{1 \cdot 1 + x \cdot x}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      6. metadata-eval4.5%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{1} + x \cdot x}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    5. Applied egg-rr4.5%

      \[\leadsto \log \color{blue}{\left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{1 + x \cdot x}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    6. Step-by-step derivation
      1. div-sub4.8%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \left(1 + x \cdot x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. +-commutative4.8%

        \[\leadsto \log \left(\frac{x \cdot x - \color{blue}{\left(x \cdot x + 1\right)}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      3. associate--r+59.1%

        \[\leadsto \log \left(\frac{\color{blue}{\left(x \cdot x - x \cdot x\right) - 1}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      4. +-inverses98.5%

        \[\leadsto \log \left(\frac{\color{blue}{0} - 1}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      5. metadata-eval98.5%

        \[\leadsto \log \left(\frac{\color{blue}{-1}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    7. Simplified98.5%

      \[\leadsto \log \color{blue}{\left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    8. Step-by-step derivation
      1. *-un-lft-identity98.5%

        \[\leadsto \log \color{blue}{\left(1 \cdot \frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. log-prod98.5%

        \[\leadsto \color{blue}{\log 1 + \log \left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      3. metadata-eval98.5%

        \[\leadsto \color{blue}{0} + \log \left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    9. Applied egg-rr98.5%

      \[\leadsto \color{blue}{0 + \log \left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    10. Step-by-step derivation
      1. +-lft-identity98.5%

        \[\leadsto \color{blue}{\log \left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. metadata-eval98.5%

        \[\leadsto \log \left(\frac{\color{blue}{\frac{1}{-1}}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      3. associate-/r*98.5%

        \[\leadsto \log \color{blue}{\left(\frac{1}{-1 \cdot \left(x - \mathsf{hypot}\left(1, x\right)\right)}\right)} \]
      4. *-commutative98.5%

        \[\leadsto \log \left(\frac{1}{\color{blue}{\left(x - \mathsf{hypot}\left(1, x\right)\right) \cdot -1}}\right) \]
      5. log-rec98.5%

        \[\leadsto \color{blue}{-\log \left(\left(x - \mathsf{hypot}\left(1, x\right)\right) \cdot -1\right)} \]
      6. *-commutative98.5%

        \[\leadsto -\log \color{blue}{\left(-1 \cdot \left(x - \mathsf{hypot}\left(1, x\right)\right)\right)} \]
      7. neg-mul-198.5%

        \[\leadsto -\log \color{blue}{\left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right)} \]
      8. neg-sub098.5%

        \[\leadsto -\log \color{blue}{\left(0 - \left(x - \mathsf{hypot}\left(1, x\right)\right)\right)} \]
      9. sub-neg98.5%

        \[\leadsto -\log \left(0 - \color{blue}{\left(x + \left(-\mathsf{hypot}\left(1, x\right)\right)\right)}\right) \]
      10. +-commutative98.5%

        \[\leadsto -\log \left(0 - \color{blue}{\left(\left(-\mathsf{hypot}\left(1, x\right)\right) + x\right)}\right) \]
      11. associate--r+98.5%

        \[\leadsto -\log \color{blue}{\left(\left(0 - \left(-\mathsf{hypot}\left(1, x\right)\right)\right) - x\right)} \]
      12. neg-sub098.5%

        \[\leadsto -\log \left(\color{blue}{\left(-\left(-\mathsf{hypot}\left(1, x\right)\right)\right)} - x\right) \]
      13. remove-double-neg98.5%

        \[\leadsto -\log \left(\color{blue}{\mathsf{hypot}\left(1, x\right)} - x\right) \]
    11. Simplified98.5%

      \[\leadsto \color{blue}{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \]

    if -0.0200000000000000004 < x < 1.1000000000000001

    1. Initial program 8.0%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg8.0%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative8.0%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg8.0%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def8.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified8.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around 0 100.0%

      \[\leadsto \color{blue}{-0.16666666666666666 \cdot {x}^{3} + \left(0.075 \cdot {x}^{5} + \left(-0.044642857142857144 \cdot {x}^{7} + x\right)\right)} \]

    if 1.1000000000000001 < x

    1. Initial program 44.7%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg44.7%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative44.7%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg44.7%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around inf 100.0%

      \[\leadsto \log \color{blue}{\left(2 \cdot x + 0.5 \cdot \frac{1}{x}\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -0.02:\\ \;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\ \mathbf{elif}\;x \leq 1.1:\\ \;\;\;\;-0.16666666666666666 \cdot {x}^{3} + \left(0.075 \cdot {x}^{5} + \left(x + -0.044642857142857144 \cdot {x}^{7}\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2 + 0.5 \cdot \frac{1}{x}\right)\\ \end{array} \]

Alternative 2: 99.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -0.007:\\ \;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\ \mathbf{elif}\;x \leq 0.0055:\\ \;\;\;\;-0.16666666666666666 \cdot {x}^{3} + \left(x + 0.075 \cdot {x}^{5}\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -0.007)
   (- (log (- (hypot 1.0 x) x)))
   (if (<= x 0.0055)
     (+ (* -0.16666666666666666 (pow x 3.0)) (+ x (* 0.075 (pow x 5.0))))
     (log (+ x (hypot 1.0 x))))))
double code(double x) {
	double tmp;
	if (x <= -0.007) {
		tmp = -log((hypot(1.0, x) - x));
	} else if (x <= 0.0055) {
		tmp = (-0.16666666666666666 * pow(x, 3.0)) + (x + (0.075 * pow(x, 5.0)));
	} else {
		tmp = log((x + hypot(1.0, x)));
	}
	return tmp;
}
public static double code(double x) {
	double tmp;
	if (x <= -0.007) {
		tmp = -Math.log((Math.hypot(1.0, x) - x));
	} else if (x <= 0.0055) {
		tmp = (-0.16666666666666666 * Math.pow(x, 3.0)) + (x + (0.075 * Math.pow(x, 5.0)));
	} else {
		tmp = Math.log((x + Math.hypot(1.0, x)));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -0.007:
		tmp = -math.log((math.hypot(1.0, x) - x))
	elif x <= 0.0055:
		tmp = (-0.16666666666666666 * math.pow(x, 3.0)) + (x + (0.075 * math.pow(x, 5.0)))
	else:
		tmp = math.log((x + math.hypot(1.0, x)))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -0.007)
		tmp = Float64(-log(Float64(hypot(1.0, x) - x)));
	elseif (x <= 0.0055)
		tmp = Float64(Float64(-0.16666666666666666 * (x ^ 3.0)) + Float64(x + Float64(0.075 * (x ^ 5.0))));
	else
		tmp = log(Float64(x + hypot(1.0, x)));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -0.007)
		tmp = -log((hypot(1.0, x) - x));
	elseif (x <= 0.0055)
		tmp = (-0.16666666666666666 * (x ^ 3.0)) + (x + (0.075 * (x ^ 5.0)));
	else
		tmp = log((x + hypot(1.0, x)));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -0.007], (-N[Log[N[(N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision] - x), $MachinePrecision]], $MachinePrecision]), If[LessEqual[x, 0.0055], N[(N[(-0.16666666666666666 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision] + N[(x + N[(0.075 * N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(x + N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.007:\\
\;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\

\mathbf{elif}\;x \leq 0.0055:\\
\;\;\;\;-0.16666666666666666 \cdot {x}^{3} + \left(x + 0.075 \cdot {x}^{5}\right)\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -0.00700000000000000015

    1. Initial program 4.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg4.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative4.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg4.8%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def5.8%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified5.8%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Step-by-step derivation
      1. flip-+4.5%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. div-sub4.5%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      3. hypot-udef4.5%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{\sqrt{1 \cdot 1 + x \cdot x}} \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      4. hypot-udef4.5%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\sqrt{1 \cdot 1 + x \cdot x} \cdot \color{blue}{\sqrt{1 \cdot 1 + x \cdot x}}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      5. add-sqr-sqrt4.5%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{1 \cdot 1 + x \cdot x}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      6. metadata-eval4.5%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{1} + x \cdot x}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    5. Applied egg-rr4.5%

      \[\leadsto \log \color{blue}{\left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{1 + x \cdot x}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    6. Step-by-step derivation
      1. div-sub4.8%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \left(1 + x \cdot x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. +-commutative4.8%

        \[\leadsto \log \left(\frac{x \cdot x - \color{blue}{\left(x \cdot x + 1\right)}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      3. associate--r+59.1%

        \[\leadsto \log \left(\frac{\color{blue}{\left(x \cdot x - x \cdot x\right) - 1}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      4. +-inverses98.5%

        \[\leadsto \log \left(\frac{\color{blue}{0} - 1}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      5. metadata-eval98.5%

        \[\leadsto \log \left(\frac{\color{blue}{-1}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    7. Simplified98.5%

      \[\leadsto \log \color{blue}{\left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    8. Step-by-step derivation
      1. *-un-lft-identity98.5%

        \[\leadsto \log \color{blue}{\left(1 \cdot \frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. log-prod98.5%

        \[\leadsto \color{blue}{\log 1 + \log \left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      3. metadata-eval98.5%

        \[\leadsto \color{blue}{0} + \log \left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    9. Applied egg-rr98.5%

      \[\leadsto \color{blue}{0 + \log \left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    10. Step-by-step derivation
      1. +-lft-identity98.5%

        \[\leadsto \color{blue}{\log \left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. metadata-eval98.5%

        \[\leadsto \log \left(\frac{\color{blue}{\frac{1}{-1}}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      3. associate-/r*98.5%

        \[\leadsto \log \color{blue}{\left(\frac{1}{-1 \cdot \left(x - \mathsf{hypot}\left(1, x\right)\right)}\right)} \]
      4. *-commutative98.5%

        \[\leadsto \log \left(\frac{1}{\color{blue}{\left(x - \mathsf{hypot}\left(1, x\right)\right) \cdot -1}}\right) \]
      5. log-rec98.5%

        \[\leadsto \color{blue}{-\log \left(\left(x - \mathsf{hypot}\left(1, x\right)\right) \cdot -1\right)} \]
      6. *-commutative98.5%

        \[\leadsto -\log \color{blue}{\left(-1 \cdot \left(x - \mathsf{hypot}\left(1, x\right)\right)\right)} \]
      7. neg-mul-198.5%

        \[\leadsto -\log \color{blue}{\left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right)} \]
      8. neg-sub098.5%

        \[\leadsto -\log \color{blue}{\left(0 - \left(x - \mathsf{hypot}\left(1, x\right)\right)\right)} \]
      9. sub-neg98.5%

        \[\leadsto -\log \left(0 - \color{blue}{\left(x + \left(-\mathsf{hypot}\left(1, x\right)\right)\right)}\right) \]
      10. +-commutative98.5%

        \[\leadsto -\log \left(0 - \color{blue}{\left(\left(-\mathsf{hypot}\left(1, x\right)\right) + x\right)}\right) \]
      11. associate--r+98.5%

        \[\leadsto -\log \color{blue}{\left(\left(0 - \left(-\mathsf{hypot}\left(1, x\right)\right)\right) - x\right)} \]
      12. neg-sub098.5%

        \[\leadsto -\log \left(\color{blue}{\left(-\left(-\mathsf{hypot}\left(1, x\right)\right)\right)} - x\right) \]
      13. remove-double-neg98.5%

        \[\leadsto -\log \left(\color{blue}{\mathsf{hypot}\left(1, x\right)} - x\right) \]
    11. Simplified98.5%

      \[\leadsto \color{blue}{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \]

    if -0.00700000000000000015 < x < 0.0054999999999999997

    1. Initial program 7.3%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg7.3%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative7.3%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg7.3%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def7.3%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified7.3%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around 0 100.0%

      \[\leadsto \color{blue}{-0.16666666666666666 \cdot {x}^{3} + \left(0.075 \cdot {x}^{5} + x\right)} \]

    if 0.0054999999999999997 < x

    1. Initial program 45.4%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg45.4%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative45.4%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg45.4%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def99.9%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified99.9%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -0.007:\\ \;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\ \mathbf{elif}\;x \leq 0.0055:\\ \;\;\;\;-0.16666666666666666 \cdot {x}^{3} + \left(x + 0.075 \cdot {x}^{5}\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\ \end{array} \]

Alternative 3: 99.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1.25:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 0.00086:\\ \;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -1.25)
   (log (/ -0.5 x))
   (if (<= x 0.00086)
     (+ x (* -0.16666666666666666 (pow x 3.0)))
     (log (+ x (hypot 1.0 x))))))
double code(double x) {
	double tmp;
	if (x <= -1.25) {
		tmp = log((-0.5 / x));
	} else if (x <= 0.00086) {
		tmp = x + (-0.16666666666666666 * pow(x, 3.0));
	} else {
		tmp = log((x + hypot(1.0, x)));
	}
	return tmp;
}
public static double code(double x) {
	double tmp;
	if (x <= -1.25) {
		tmp = Math.log((-0.5 / x));
	} else if (x <= 0.00086) {
		tmp = x + (-0.16666666666666666 * Math.pow(x, 3.0));
	} else {
		tmp = Math.log((x + Math.hypot(1.0, x)));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -1.25:
		tmp = math.log((-0.5 / x))
	elif x <= 0.00086:
		tmp = x + (-0.16666666666666666 * math.pow(x, 3.0))
	else:
		tmp = math.log((x + math.hypot(1.0, x)))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -1.25)
		tmp = log(Float64(-0.5 / x));
	elseif (x <= 0.00086)
		tmp = Float64(x + Float64(-0.16666666666666666 * (x ^ 3.0)));
	else
		tmp = log(Float64(x + hypot(1.0, x)));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -1.25)
		tmp = log((-0.5 / x));
	elseif (x <= 0.00086)
		tmp = x + (-0.16666666666666666 * (x ^ 3.0));
	else
		tmp = log((x + hypot(1.0, x)));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -1.25], N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision], If[LessEqual[x, 0.00086], N[(x + N[(-0.16666666666666666 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(x + N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.25:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right)\\

\mathbf{elif}\;x \leq 0.00086:\\
\;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1.25

    1. Initial program 4.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg4.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative4.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg4.8%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def5.8%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified5.8%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around -inf 98.1%

      \[\leadsto \log \color{blue}{\left(\frac{-0.5}{x}\right)} \]

    if -1.25 < x < 8.59999999999999979e-4

    1. Initial program 6.6%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg6.6%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative6.6%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg6.6%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def6.6%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified6.6%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around 0 100.0%

      \[\leadsto \color{blue}{-0.16666666666666666 \cdot {x}^{3} + x} \]

    if 8.59999999999999979e-4 < x

    1. Initial program 46.1%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg46.1%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative46.1%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg46.1%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def99.7%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified99.7%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.25:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 0.00086:\\ \;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\ \end{array} \]

Alternative 4: 99.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -0.00082:\\ \;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\ \mathbf{elif}\;x \leq 0.00086:\\ \;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -0.00082)
   (- (log (- (hypot 1.0 x) x)))
   (if (<= x 0.00086)
     (+ x (* -0.16666666666666666 (pow x 3.0)))
     (log (+ x (hypot 1.0 x))))))
double code(double x) {
	double tmp;
	if (x <= -0.00082) {
		tmp = -log((hypot(1.0, x) - x));
	} else if (x <= 0.00086) {
		tmp = x + (-0.16666666666666666 * pow(x, 3.0));
	} else {
		tmp = log((x + hypot(1.0, x)));
	}
	return tmp;
}
public static double code(double x) {
	double tmp;
	if (x <= -0.00082) {
		tmp = -Math.log((Math.hypot(1.0, x) - x));
	} else if (x <= 0.00086) {
		tmp = x + (-0.16666666666666666 * Math.pow(x, 3.0));
	} else {
		tmp = Math.log((x + Math.hypot(1.0, x)));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -0.00082:
		tmp = -math.log((math.hypot(1.0, x) - x))
	elif x <= 0.00086:
		tmp = x + (-0.16666666666666666 * math.pow(x, 3.0))
	else:
		tmp = math.log((x + math.hypot(1.0, x)))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -0.00082)
		tmp = Float64(-log(Float64(hypot(1.0, x) - x)));
	elseif (x <= 0.00086)
		tmp = Float64(x + Float64(-0.16666666666666666 * (x ^ 3.0)));
	else
		tmp = log(Float64(x + hypot(1.0, x)));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -0.00082)
		tmp = -log((hypot(1.0, x) - x));
	elseif (x <= 0.00086)
		tmp = x + (-0.16666666666666666 * (x ^ 3.0));
	else
		tmp = log((x + hypot(1.0, x)));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -0.00082], (-N[Log[N[(N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision] - x), $MachinePrecision]], $MachinePrecision]), If[LessEqual[x, 0.00086], N[(x + N[(-0.16666666666666666 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(x + N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.00082:\\
\;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\

\mathbf{elif}\;x \leq 0.00086:\\
\;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -8.1999999999999998e-4

    1. Initial program 4.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg4.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative4.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg4.8%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def5.8%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified5.8%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Step-by-step derivation
      1. flip-+4.5%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. div-sub4.5%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      3. hypot-udef4.5%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{\sqrt{1 \cdot 1 + x \cdot x}} \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      4. hypot-udef4.5%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\sqrt{1 \cdot 1 + x \cdot x} \cdot \color{blue}{\sqrt{1 \cdot 1 + x \cdot x}}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      5. add-sqr-sqrt4.5%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{1 \cdot 1 + x \cdot x}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      6. metadata-eval4.5%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{1} + x \cdot x}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    5. Applied egg-rr4.5%

      \[\leadsto \log \color{blue}{\left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{1 + x \cdot x}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    6. Step-by-step derivation
      1. div-sub4.8%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \left(1 + x \cdot x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. +-commutative4.8%

        \[\leadsto \log \left(\frac{x \cdot x - \color{blue}{\left(x \cdot x + 1\right)}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      3. associate--r+59.1%

        \[\leadsto \log \left(\frac{\color{blue}{\left(x \cdot x - x \cdot x\right) - 1}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      4. +-inverses98.5%

        \[\leadsto \log \left(\frac{\color{blue}{0} - 1}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      5. metadata-eval98.5%

        \[\leadsto \log \left(\frac{\color{blue}{-1}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    7. Simplified98.5%

      \[\leadsto \log \color{blue}{\left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    8. Step-by-step derivation
      1. *-un-lft-identity98.5%

        \[\leadsto \log \color{blue}{\left(1 \cdot \frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. log-prod98.5%

        \[\leadsto \color{blue}{\log 1 + \log \left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      3. metadata-eval98.5%

        \[\leadsto \color{blue}{0} + \log \left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    9. Applied egg-rr98.5%

      \[\leadsto \color{blue}{0 + \log \left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    10. Step-by-step derivation
      1. +-lft-identity98.5%

        \[\leadsto \color{blue}{\log \left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. metadata-eval98.5%

        \[\leadsto \log \left(\frac{\color{blue}{\frac{1}{-1}}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      3. associate-/r*98.5%

        \[\leadsto \log \color{blue}{\left(\frac{1}{-1 \cdot \left(x - \mathsf{hypot}\left(1, x\right)\right)}\right)} \]
      4. *-commutative98.5%

        \[\leadsto \log \left(\frac{1}{\color{blue}{\left(x - \mathsf{hypot}\left(1, x\right)\right) \cdot -1}}\right) \]
      5. log-rec98.5%

        \[\leadsto \color{blue}{-\log \left(\left(x - \mathsf{hypot}\left(1, x\right)\right) \cdot -1\right)} \]
      6. *-commutative98.5%

        \[\leadsto -\log \color{blue}{\left(-1 \cdot \left(x - \mathsf{hypot}\left(1, x\right)\right)\right)} \]
      7. neg-mul-198.5%

        \[\leadsto -\log \color{blue}{\left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right)} \]
      8. neg-sub098.5%

        \[\leadsto -\log \color{blue}{\left(0 - \left(x - \mathsf{hypot}\left(1, x\right)\right)\right)} \]
      9. sub-neg98.5%

        \[\leadsto -\log \left(0 - \color{blue}{\left(x + \left(-\mathsf{hypot}\left(1, x\right)\right)\right)}\right) \]
      10. +-commutative98.5%

        \[\leadsto -\log \left(0 - \color{blue}{\left(\left(-\mathsf{hypot}\left(1, x\right)\right) + x\right)}\right) \]
      11. associate--r+98.5%

        \[\leadsto -\log \color{blue}{\left(\left(0 - \left(-\mathsf{hypot}\left(1, x\right)\right)\right) - x\right)} \]
      12. neg-sub098.5%

        \[\leadsto -\log \left(\color{blue}{\left(-\left(-\mathsf{hypot}\left(1, x\right)\right)\right)} - x\right) \]
      13. remove-double-neg98.5%

        \[\leadsto -\log \left(\color{blue}{\mathsf{hypot}\left(1, x\right)} - x\right) \]
    11. Simplified98.5%

      \[\leadsto \color{blue}{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \]

    if -8.1999999999999998e-4 < x < 8.59999999999999979e-4

    1. Initial program 6.6%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg6.6%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative6.6%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg6.6%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def6.6%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified6.6%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around 0 100.0%

      \[\leadsto \color{blue}{-0.16666666666666666 \cdot {x}^{3} + x} \]

    if 8.59999999999999979e-4 < x

    1. Initial program 46.1%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg46.1%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative46.1%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg46.1%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def99.7%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified99.7%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -0.00082:\\ \;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\ \mathbf{elif}\;x \leq 0.00086:\\ \;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\ \end{array} \]

Alternative 5: 99.3% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1.25:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 0.95:\\ \;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2 + 0.5 \cdot \frac{1}{x}\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -1.25)
   (log (/ -0.5 x))
   (if (<= x 0.95)
     (+ x (* -0.16666666666666666 (pow x 3.0)))
     (log (+ (* x 2.0) (* 0.5 (/ 1.0 x)))))))
double code(double x) {
	double tmp;
	if (x <= -1.25) {
		tmp = log((-0.5 / x));
	} else if (x <= 0.95) {
		tmp = x + (-0.16666666666666666 * pow(x, 3.0));
	} else {
		tmp = log(((x * 2.0) + (0.5 * (1.0 / x))));
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= (-1.25d0)) then
        tmp = log(((-0.5d0) / x))
    else if (x <= 0.95d0) then
        tmp = x + ((-0.16666666666666666d0) * (x ** 3.0d0))
    else
        tmp = log(((x * 2.0d0) + (0.5d0 * (1.0d0 / x))))
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= -1.25) {
		tmp = Math.log((-0.5 / x));
	} else if (x <= 0.95) {
		tmp = x + (-0.16666666666666666 * Math.pow(x, 3.0));
	} else {
		tmp = Math.log(((x * 2.0) + (0.5 * (1.0 / x))));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -1.25:
		tmp = math.log((-0.5 / x))
	elif x <= 0.95:
		tmp = x + (-0.16666666666666666 * math.pow(x, 3.0))
	else:
		tmp = math.log(((x * 2.0) + (0.5 * (1.0 / x))))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -1.25)
		tmp = log(Float64(-0.5 / x));
	elseif (x <= 0.95)
		tmp = Float64(x + Float64(-0.16666666666666666 * (x ^ 3.0)));
	else
		tmp = log(Float64(Float64(x * 2.0) + Float64(0.5 * Float64(1.0 / x))));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -1.25)
		tmp = log((-0.5 / x));
	elseif (x <= 0.95)
		tmp = x + (-0.16666666666666666 * (x ^ 3.0));
	else
		tmp = log(((x * 2.0) + (0.5 * (1.0 / x))));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -1.25], N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision], If[LessEqual[x, 0.95], N[(x + N[(-0.16666666666666666 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(N[(x * 2.0), $MachinePrecision] + N[(0.5 * N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.25:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right)\\

\mathbf{elif}\;x \leq 0.95:\\
\;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\

\mathbf{else}:\\
\;\;\;\;\log \left(x \cdot 2 + 0.5 \cdot \frac{1}{x}\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1.25

    1. Initial program 4.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg4.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative4.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg4.8%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def5.8%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified5.8%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around -inf 98.1%

      \[\leadsto \log \color{blue}{\left(\frac{-0.5}{x}\right)} \]

    if -1.25 < x < 0.94999999999999996

    1. Initial program 8.0%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg8.0%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative8.0%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg8.0%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def8.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified8.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around 0 99.5%

      \[\leadsto \color{blue}{-0.16666666666666666 \cdot {x}^{3} + x} \]

    if 0.94999999999999996 < x

    1. Initial program 44.7%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg44.7%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative44.7%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg44.7%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around inf 100.0%

      \[\leadsto \log \color{blue}{\left(2 \cdot x + 0.5 \cdot \frac{1}{x}\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.3%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.25:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 0.95:\\ \;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2 + 0.5 \cdot \frac{1}{x}\right)\\ \end{array} \]

Alternative 6: 99.2% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1.25:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 1.25:\\ \;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + x\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -1.25)
   (log (/ -0.5 x))
   (if (<= x 1.25) (+ x (* -0.16666666666666666 (pow x 3.0))) (log (+ x x)))))
double code(double x) {
	double tmp;
	if (x <= -1.25) {
		tmp = log((-0.5 / x));
	} else if (x <= 1.25) {
		tmp = x + (-0.16666666666666666 * pow(x, 3.0));
	} else {
		tmp = log((x + x));
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= (-1.25d0)) then
        tmp = log(((-0.5d0) / x))
    else if (x <= 1.25d0) then
        tmp = x + ((-0.16666666666666666d0) * (x ** 3.0d0))
    else
        tmp = log((x + x))
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= -1.25) {
		tmp = Math.log((-0.5 / x));
	} else if (x <= 1.25) {
		tmp = x + (-0.16666666666666666 * Math.pow(x, 3.0));
	} else {
		tmp = Math.log((x + x));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -1.25:
		tmp = math.log((-0.5 / x))
	elif x <= 1.25:
		tmp = x + (-0.16666666666666666 * math.pow(x, 3.0))
	else:
		tmp = math.log((x + x))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -1.25)
		tmp = log(Float64(-0.5 / x));
	elseif (x <= 1.25)
		tmp = Float64(x + Float64(-0.16666666666666666 * (x ^ 3.0)));
	else
		tmp = log(Float64(x + x));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -1.25)
		tmp = log((-0.5 / x));
	elseif (x <= 1.25)
		tmp = x + (-0.16666666666666666 * (x ^ 3.0));
	else
		tmp = log((x + x));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -1.25], N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision], If[LessEqual[x, 1.25], N[(x + N[(-0.16666666666666666 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(x + x), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.25:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right)\\

\mathbf{elif}\;x \leq 1.25:\\
\;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + x\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1.25

    1. Initial program 4.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg4.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative4.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg4.8%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def5.8%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified5.8%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around -inf 98.1%

      \[\leadsto \log \color{blue}{\left(\frac{-0.5}{x}\right)} \]

    if -1.25 < x < 1.25

    1. Initial program 8.0%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg8.0%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative8.0%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg8.0%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def8.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified8.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around 0 99.5%

      \[\leadsto \color{blue}{-0.16666666666666666 \cdot {x}^{3} + x} \]

    if 1.25 < x

    1. Initial program 44.7%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg44.7%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative44.7%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg44.7%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around inf 99.9%

      \[\leadsto \log \color{blue}{\left(2 \cdot x\right)} \]
    5. Step-by-step derivation
      1. count-299.9%

        \[\leadsto \log \color{blue}{\left(x + x\right)} \]
    6. Simplified99.9%

      \[\leadsto \log \color{blue}{\left(x + x\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.3%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.25:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 1.25:\\ \;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + x\right)\\ \end{array} \]

Alternative 7: 99.0% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1.25:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 1.25:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + x\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -1.25) (log (/ -0.5 x)) (if (<= x 1.25) x (log (+ x x)))))
double code(double x) {
	double tmp;
	if (x <= -1.25) {
		tmp = log((-0.5 / x));
	} else if (x <= 1.25) {
		tmp = x;
	} else {
		tmp = log((x + x));
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= (-1.25d0)) then
        tmp = log(((-0.5d0) / x))
    else if (x <= 1.25d0) then
        tmp = x
    else
        tmp = log((x + x))
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= -1.25) {
		tmp = Math.log((-0.5 / x));
	} else if (x <= 1.25) {
		tmp = x;
	} else {
		tmp = Math.log((x + x));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -1.25:
		tmp = math.log((-0.5 / x))
	elif x <= 1.25:
		tmp = x
	else:
		tmp = math.log((x + x))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -1.25)
		tmp = log(Float64(-0.5 / x));
	elseif (x <= 1.25)
		tmp = x;
	else
		tmp = log(Float64(x + x));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -1.25)
		tmp = log((-0.5 / x));
	elseif (x <= 1.25)
		tmp = x;
	else
		tmp = log((x + x));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -1.25], N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision], If[LessEqual[x, 1.25], x, N[Log[N[(x + x), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.25:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right)\\

\mathbf{elif}\;x \leq 1.25:\\
\;\;\;\;x\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + x\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1.25

    1. Initial program 4.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg4.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative4.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg4.8%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def5.8%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified5.8%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around -inf 98.1%

      \[\leadsto \log \color{blue}{\left(\frac{-0.5}{x}\right)} \]

    if -1.25 < x < 1.25

    1. Initial program 8.0%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg8.0%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative8.0%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg8.0%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def8.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified8.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around 0 99.1%

      \[\leadsto \color{blue}{x} \]

    if 1.25 < x

    1. Initial program 44.7%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg44.7%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative44.7%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg44.7%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around inf 99.9%

      \[\leadsto \log \color{blue}{\left(2 \cdot x\right)} \]
    5. Step-by-step derivation
      1. count-299.9%

        \[\leadsto \log \color{blue}{\left(x + x\right)} \]
    6. Simplified99.9%

      \[\leadsto \log \color{blue}{\left(x + x\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.25:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 1.25:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + x\right)\\ \end{array} \]

Alternative 8: 76.1% accurate, 2.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 1.25:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + x\right)\\ \end{array} \end{array} \]
(FPCore (x) :precision binary64 (if (<= x 1.25) x (log (+ x x))))
double code(double x) {
	double tmp;
	if (x <= 1.25) {
		tmp = x;
	} else {
		tmp = log((x + x));
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= 1.25d0) then
        tmp = x
    else
        tmp = log((x + x))
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= 1.25) {
		tmp = x;
	} else {
		tmp = Math.log((x + x));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= 1.25:
		tmp = x
	else:
		tmp = math.log((x + x))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= 1.25)
		tmp = x;
	else
		tmp = log(Float64(x + x));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= 1.25)
		tmp = x;
	else
		tmp = log((x + x));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, 1.25], x, N[Log[N[(x + x), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.25:\\
\;\;\;\;x\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + x\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 1.25

    1. Initial program 6.9%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg6.9%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative6.9%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg6.9%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def7.2%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified7.2%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around 0 67.2%

      \[\leadsto \color{blue}{x} \]

    if 1.25 < x

    1. Initial program 44.7%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg44.7%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative44.7%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg44.7%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around inf 99.9%

      \[\leadsto \log \color{blue}{\left(2 \cdot x\right)} \]
    5. Step-by-step derivation
      1. count-299.9%

        \[\leadsto \log \color{blue}{\left(x + x\right)} \]
    6. Simplified99.9%

      \[\leadsto \log \color{blue}{\left(x + x\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification75.3%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 1.25:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + x\right)\\ \end{array} \]

Alternative 9: 52.3% accurate, 207.0× speedup?

\[\begin{array}{l} \\ x \end{array} \]
(FPCore (x) :precision binary64 x)
double code(double x) {
	return x;
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = x
end function
public static double code(double x) {
	return x;
}
def code(x):
	return x
function code(x)
	return x
end
function tmp = code(x)
	tmp = x;
end
code[x_] := x
\begin{array}{l}

\\
x
\end{array}
Derivation
  1. Initial program 16.2%

    \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
  2. Step-by-step derivation
    1. sqr-neg16.2%

      \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
    2. +-commutative16.2%

      \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
    3. sqr-neg16.2%

      \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
    4. hypot-1-def30.1%

      \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
  3. Simplified30.1%

    \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
  4. Taylor expanded in x around 0 52.0%

    \[\leadsto \color{blue}{x} \]
  5. Final simplification52.0%

    \[\leadsto x \]

Developer target: 30.2% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \sqrt{x \cdot x + 1}\\ \mathbf{if}\;x < 0:\\ \;\;\;\;\log \left(\frac{-1}{x - t_0}\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + t_0\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (let* ((t_0 (sqrt (+ (* x x) 1.0))))
   (if (< x 0.0) (log (/ -1.0 (- x t_0))) (log (+ x t_0)))))
double code(double x) {
	double t_0 = sqrt(((x * x) + 1.0));
	double tmp;
	if (x < 0.0) {
		tmp = log((-1.0 / (x - t_0)));
	} else {
		tmp = log((x + t_0));
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: t_0
    real(8) :: tmp
    t_0 = sqrt(((x * x) + 1.0d0))
    if (x < 0.0d0) then
        tmp = log(((-1.0d0) / (x - t_0)))
    else
        tmp = log((x + t_0))
    end if
    code = tmp
end function
public static double code(double x) {
	double t_0 = Math.sqrt(((x * x) + 1.0));
	double tmp;
	if (x < 0.0) {
		tmp = Math.log((-1.0 / (x - t_0)));
	} else {
		tmp = Math.log((x + t_0));
	}
	return tmp;
}
def code(x):
	t_0 = math.sqrt(((x * x) + 1.0))
	tmp = 0
	if x < 0.0:
		tmp = math.log((-1.0 / (x - t_0)))
	else:
		tmp = math.log((x + t_0))
	return tmp
function code(x)
	t_0 = sqrt(Float64(Float64(x * x) + 1.0))
	tmp = 0.0
	if (x < 0.0)
		tmp = log(Float64(-1.0 / Float64(x - t_0)));
	else
		tmp = log(Float64(x + t_0));
	end
	return tmp
end
function tmp_2 = code(x)
	t_0 = sqrt(((x * x) + 1.0));
	tmp = 0.0;
	if (x < 0.0)
		tmp = log((-1.0 / (x - t_0)));
	else
		tmp = log((x + t_0));
	end
	tmp_2 = tmp;
end
code[x_] := Block[{t$95$0 = N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]}, If[Less[x, 0.0], N[Log[N[(-1.0 / N[(x - t$95$0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Log[N[(x + t$95$0), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \sqrt{x \cdot x + 1}\\
\mathbf{if}\;x < 0:\\
\;\;\;\;\log \left(\frac{-1}{x - t_0}\right)\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + t_0\right)\\


\end{array}
\end{array}

Reproduce

?
herbie shell --seed 2023275 
(FPCore (x)
  :name "Hyperbolic arcsine"
  :precision binary64

  :herbie-target
  (if (< x 0.0) (log (/ -1.0 (- x (sqrt (+ (* x x) 1.0))))) (log (+ x (sqrt (+ (* x x) 1.0)))))

  (log (+ x (sqrt (+ (* x x) 1.0)))))