Hyperbolic arcsine

Percentage Accurate: 17.6% → 99.8%
Time: 8.6s
Alternatives: 12
Speedup: 207.0×

Specification

?
\[\begin{array}{l} \\ \log \left(x + \sqrt{x \cdot x + 1}\right) \end{array} \]
(FPCore (x) :precision binary64 (log (+ x (sqrt (+ (* x x) 1.0)))))
double code(double x) {
	return log((x + sqrt(((x * x) + 1.0))));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = log((x + sqrt(((x * x) + 1.0d0))))
end function
public static double code(double x) {
	return Math.log((x + Math.sqrt(((x * x) + 1.0))));
}
def code(x):
	return math.log((x + math.sqrt(((x * x) + 1.0))))
function code(x)
	return log(Float64(x + sqrt(Float64(Float64(x * x) + 1.0))))
end
function tmp = code(x)
	tmp = log((x + sqrt(((x * x) + 1.0))));
end
code[x_] := N[Log[N[(x + N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}

\\
\log \left(x + \sqrt{x \cdot x + 1}\right)
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 12 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 17.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \log \left(x + \sqrt{x \cdot x + 1}\right) \end{array} \]
(FPCore (x) :precision binary64 (log (+ x (sqrt (+ (* x x) 1.0)))))
double code(double x) {
	return log((x + sqrt(((x * x) + 1.0))));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = log((x + sqrt(((x * x) + 1.0d0))))
end function
public static double code(double x) {
	return Math.log((x + Math.sqrt(((x * x) + 1.0))));
}
def code(x):
	return math.log((x + math.sqrt(((x * x) + 1.0))))
function code(x)
	return log(Float64(x + sqrt(Float64(Float64(x * x) + 1.0))))
end
function tmp = code(x)
	tmp = log((x + sqrt(((x * x) + 1.0))));
end
code[x_] := N[Log[N[(x + N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}

\\
\log \left(x + \sqrt{x \cdot x + 1}\right)
\end{array}

Alternative 1: 99.8% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)\\ \mathbf{if}\;x \leq -1.15:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{x \cdot x}\\ \mathbf{elif}\;x \leq 0.0255:\\ \;\;\;\;\mathsf{fma}\left(0.075, {x}^{5}, -0.044642857142857144 \cdot {x}^{7}\right) + \left(x + -0.16666666666666666 \cdot {x}^{3}\right)\\ \mathbf{else}:\\ \;\;\;\;t_0 + t_0\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (let* ((t_0 (log (sqrt (+ x (hypot 1.0 x))))))
   (if (<= x -1.15)
     (+ (log (/ -0.5 x)) (/ -0.25 (* x x)))
     (if (<= x 0.0255)
       (+
        (fma 0.075 (pow x 5.0) (* -0.044642857142857144 (pow x 7.0)))
        (+ x (* -0.16666666666666666 (pow x 3.0))))
       (+ t_0 t_0)))))
double code(double x) {
	double t_0 = log(sqrt((x + hypot(1.0, x))));
	double tmp;
	if (x <= -1.15) {
		tmp = log((-0.5 / x)) + (-0.25 / (x * x));
	} else if (x <= 0.0255) {
		tmp = fma(0.075, pow(x, 5.0), (-0.044642857142857144 * pow(x, 7.0))) + (x + (-0.16666666666666666 * pow(x, 3.0)));
	} else {
		tmp = t_0 + t_0;
	}
	return tmp;
}
function code(x)
	t_0 = log(sqrt(Float64(x + hypot(1.0, x))))
	tmp = 0.0
	if (x <= -1.15)
		tmp = Float64(log(Float64(-0.5 / x)) + Float64(-0.25 / Float64(x * x)));
	elseif (x <= 0.0255)
		tmp = Float64(fma(0.075, (x ^ 5.0), Float64(-0.044642857142857144 * (x ^ 7.0))) + Float64(x + Float64(-0.16666666666666666 * (x ^ 3.0))));
	else
		tmp = Float64(t_0 + t_0);
	end
	return tmp
end
code[x_] := Block[{t$95$0 = N[Log[N[Sqrt[N[(x + N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]}, If[LessEqual[x, -1.15], N[(N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision] + N[(-0.25 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[x, 0.0255], N[(N[(0.075 * N[Power[x, 5.0], $MachinePrecision] + N[(-0.044642857142857144 * N[Power[x, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(x + N[(-0.16666666666666666 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(t$95$0 + t$95$0), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)\\
\mathbf{if}\;x \leq -1.15:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{x \cdot x}\\

\mathbf{elif}\;x \leq 0.0255:\\
\;\;\;\;\mathsf{fma}\left(0.075, {x}^{5}, -0.044642857142857144 \cdot {x}^{7}\right) + \left(x + -0.16666666666666666 \cdot {x}^{3}\right)\\

\mathbf{else}:\\
\;\;\;\;t_0 + t_0\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1.1499999999999999

    1. Initial program 2.5%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative2.5%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def3.7%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified3.7%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Step-by-step derivation
      1. flip-+3.1%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. div-sub2.2%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      3. hypot-udef2.2%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{\sqrt{1 \cdot 1 + x \cdot x}} \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      4. hypot-udef2.2%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\sqrt{1 \cdot 1 + x \cdot x} \cdot \color{blue}{\sqrt{1 \cdot 1 + x \cdot x}}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      5. add-sqr-sqrt2.2%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{1 \cdot 1 + x \cdot x}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      6. metadata-eval2.2%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{1} + x \cdot x}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    5. Applied egg-rr2.2%

      \[\leadsto \log \color{blue}{\left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{1 + x \cdot x}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    6. Step-by-step derivation
      1. div-sub3.1%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \left(1 + x \cdot x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. +-commutative3.1%

        \[\leadsto \log \left(\frac{x \cdot x - \color{blue}{\left(x \cdot x + 1\right)}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      3. associate--r+52.2%

        \[\leadsto \log \left(\frac{\color{blue}{\left(x \cdot x - x \cdot x\right) - 1}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      4. +-inverses100.0%

        \[\leadsto \log \left(\frac{\color{blue}{0} - 1}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      5. metadata-eval100.0%

        \[\leadsto \log \left(\frac{\color{blue}{-1}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    7. Simplified100.0%

      \[\leadsto \log \color{blue}{\left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    8. Taylor expanded in x around -inf 99.7%

      \[\leadsto \color{blue}{\left(\log \left(\frac{-1}{x}\right) + \log 0.5\right) - 0.25 \cdot \frac{1}{{x}^{2}}} \]
    9. Step-by-step derivation
      1. sub-neg99.7%

        \[\leadsto \color{blue}{\left(\log \left(\frac{-1}{x}\right) + \log 0.5\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right)} \]
      2. +-commutative99.7%

        \[\leadsto \color{blue}{\left(\log 0.5 + \log \left(\frac{-1}{x}\right)\right)} + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      3. metadata-eval99.7%

        \[\leadsto \left(\log 0.5 + \log \left(\frac{\color{blue}{-1}}{x}\right)\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      4. distribute-neg-frac99.7%

        \[\leadsto \left(\log 0.5 + \log \color{blue}{\left(-\frac{1}{x}\right)}\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      5. log-prod100.0%

        \[\leadsto \color{blue}{\log \left(0.5 \cdot \left(-\frac{1}{x}\right)\right)} + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      6. distribute-rgt-neg-in100.0%

        \[\leadsto \log \color{blue}{\left(-0.5 \cdot \frac{1}{x}\right)} + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      7. associate-*r/100.0%

        \[\leadsto \log \left(-\color{blue}{\frac{0.5 \cdot 1}{x}}\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      8. metadata-eval100.0%

        \[\leadsto \log \left(-\frac{\color{blue}{0.5}}{x}\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      9. distribute-neg-frac100.0%

        \[\leadsto \log \color{blue}{\left(\frac{-0.5}{x}\right)} + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      10. metadata-eval100.0%

        \[\leadsto \log \left(\frac{\color{blue}{-0.5}}{x}\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      11. associate-*r/100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \left(-\color{blue}{\frac{0.25 \cdot 1}{{x}^{2}}}\right) \]
      12. metadata-eval100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \left(-\frac{\color{blue}{0.25}}{{x}^{2}}\right) \]
      13. distribute-neg-frac100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \color{blue}{\frac{-0.25}{{x}^{2}}} \]
      14. metadata-eval100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \frac{\color{blue}{-0.25}}{{x}^{2}} \]
      15. unpow2100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{\color{blue}{x \cdot x}} \]
    10. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{x \cdot x}} \]

    if -1.1499999999999999 < x < 0.0254999999999999984

    1. Initial program 7.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative7.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def7.8%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified7.8%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Step-by-step derivation
      1. add-cube-cbrt7.8%

        \[\leadsto \color{blue}{\left(\sqrt[3]{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \cdot \sqrt[3]{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)}\right) \cdot \sqrt[3]{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)}} \]
      2. pow37.8%

        \[\leadsto \color{blue}{{\left(\sqrt[3]{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)}\right)}^{3}} \]
    5. Applied egg-rr7.8%

      \[\leadsto \color{blue}{{\left(\sqrt[3]{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)}\right)}^{3}} \]
    6. Taylor expanded in x around 0 97.9%

      \[\leadsto {\left(\sqrt[3]{\color{blue}{-0.16666666666666666 \cdot {x}^{3} + \left(0.075 \cdot {x}^{5} + \left(-0.044642857142857144 \cdot {x}^{7} + x\right)\right)}}\right)}^{3} \]
    7. Step-by-step derivation
      1. rem-cube-cbrt100.0%

        \[\leadsto \color{blue}{-0.16666666666666666 \cdot {x}^{3} + \left(0.075 \cdot {x}^{5} + \left(-0.044642857142857144 \cdot {x}^{7} + x\right)\right)} \]
      2. +-commutative100.0%

        \[\leadsto \color{blue}{\left(0.075 \cdot {x}^{5} + \left(-0.044642857142857144 \cdot {x}^{7} + x\right)\right) + -0.16666666666666666 \cdot {x}^{3}} \]
      3. associate-+r+100.0%

        \[\leadsto \color{blue}{\left(\left(0.075 \cdot {x}^{5} + -0.044642857142857144 \cdot {x}^{7}\right) + x\right)} + -0.16666666666666666 \cdot {x}^{3} \]
      4. associate-+l+100.0%

        \[\leadsto \color{blue}{\left(0.075 \cdot {x}^{5} + -0.044642857142857144 \cdot {x}^{7}\right) + \left(x + -0.16666666666666666 \cdot {x}^{3}\right)} \]
      5. fma-def100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(0.075, {x}^{5}, -0.044642857142857144 \cdot {x}^{7}\right)} + \left(x + -0.16666666666666666 \cdot {x}^{3}\right) \]
    8. Applied egg-rr100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(0.075, {x}^{5}, -0.044642857142857144 \cdot {x}^{7}\right) + \left(x + -0.16666666666666666 \cdot {x}^{3}\right)} \]

    if 0.0254999999999999984 < x

    1. Initial program 48.6%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative48.6%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Step-by-step derivation
      1. add-sqr-sqrt99.9%

        \[\leadsto \log \color{blue}{\left(\sqrt{x + \mathsf{hypot}\left(1, x\right)} \cdot \sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. log-prod100.0%

        \[\leadsto \color{blue}{\log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right) + \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
    5. Applied egg-rr100.0%

      \[\leadsto \color{blue}{\log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right) + \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.15:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{x \cdot x}\\ \mathbf{elif}\;x \leq 0.0255:\\ \;\;\;\;\mathsf{fma}\left(0.075, {x}^{5}, -0.044642857142857144 \cdot {x}^{7}\right) + \left(x + -0.16666666666666666 \cdot {x}^{3}\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right) + \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)\\ \end{array} \]

Alternative 2: 99.8% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1.15:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{x \cdot x}\\ \mathbf{elif}\;x \leq 0.0255:\\ \;\;\;\;\mathsf{fma}\left(0.075, {x}^{5}, -0.044642857142857144 \cdot {x}^{7}\right) + \left(x + -0.16666666666666666 \cdot {x}^{3}\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -1.15)
   (+ (log (/ -0.5 x)) (/ -0.25 (* x x)))
   (if (<= x 0.0255)
     (+
      (fma 0.075 (pow x 5.0) (* -0.044642857142857144 (pow x 7.0)))
      (+ x (* -0.16666666666666666 (pow x 3.0))))
     (log (+ x (hypot 1.0 x))))))
double code(double x) {
	double tmp;
	if (x <= -1.15) {
		tmp = log((-0.5 / x)) + (-0.25 / (x * x));
	} else if (x <= 0.0255) {
		tmp = fma(0.075, pow(x, 5.0), (-0.044642857142857144 * pow(x, 7.0))) + (x + (-0.16666666666666666 * pow(x, 3.0)));
	} else {
		tmp = log((x + hypot(1.0, x)));
	}
	return tmp;
}
function code(x)
	tmp = 0.0
	if (x <= -1.15)
		tmp = Float64(log(Float64(-0.5 / x)) + Float64(-0.25 / Float64(x * x)));
	elseif (x <= 0.0255)
		tmp = Float64(fma(0.075, (x ^ 5.0), Float64(-0.044642857142857144 * (x ^ 7.0))) + Float64(x + Float64(-0.16666666666666666 * (x ^ 3.0))));
	else
		tmp = log(Float64(x + hypot(1.0, x)));
	end
	return tmp
end
code[x_] := If[LessEqual[x, -1.15], N[(N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision] + N[(-0.25 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[x, 0.0255], N[(N[(0.075 * N[Power[x, 5.0], $MachinePrecision] + N[(-0.044642857142857144 * N[Power[x, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(x + N[(-0.16666666666666666 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(x + N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.15:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{x \cdot x}\\

\mathbf{elif}\;x \leq 0.0255:\\
\;\;\;\;\mathsf{fma}\left(0.075, {x}^{5}, -0.044642857142857144 \cdot {x}^{7}\right) + \left(x + -0.16666666666666666 \cdot {x}^{3}\right)\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1.1499999999999999

    1. Initial program 2.5%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative2.5%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def3.7%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified3.7%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Step-by-step derivation
      1. flip-+3.1%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. div-sub2.2%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      3. hypot-udef2.2%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{\sqrt{1 \cdot 1 + x \cdot x}} \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      4. hypot-udef2.2%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\sqrt{1 \cdot 1 + x \cdot x} \cdot \color{blue}{\sqrt{1 \cdot 1 + x \cdot x}}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      5. add-sqr-sqrt2.2%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{1 \cdot 1 + x \cdot x}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      6. metadata-eval2.2%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{1} + x \cdot x}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    5. Applied egg-rr2.2%

      \[\leadsto \log \color{blue}{\left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{1 + x \cdot x}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    6. Step-by-step derivation
      1. div-sub3.1%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \left(1 + x \cdot x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. +-commutative3.1%

        \[\leadsto \log \left(\frac{x \cdot x - \color{blue}{\left(x \cdot x + 1\right)}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      3. associate--r+52.2%

        \[\leadsto \log \left(\frac{\color{blue}{\left(x \cdot x - x \cdot x\right) - 1}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      4. +-inverses100.0%

        \[\leadsto \log \left(\frac{\color{blue}{0} - 1}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      5. metadata-eval100.0%

        \[\leadsto \log \left(\frac{\color{blue}{-1}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    7. Simplified100.0%

      \[\leadsto \log \color{blue}{\left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    8. Taylor expanded in x around -inf 99.7%

      \[\leadsto \color{blue}{\left(\log \left(\frac{-1}{x}\right) + \log 0.5\right) - 0.25 \cdot \frac{1}{{x}^{2}}} \]
    9. Step-by-step derivation
      1. sub-neg99.7%

        \[\leadsto \color{blue}{\left(\log \left(\frac{-1}{x}\right) + \log 0.5\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right)} \]
      2. +-commutative99.7%

        \[\leadsto \color{blue}{\left(\log 0.5 + \log \left(\frac{-1}{x}\right)\right)} + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      3. metadata-eval99.7%

        \[\leadsto \left(\log 0.5 + \log \left(\frac{\color{blue}{-1}}{x}\right)\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      4. distribute-neg-frac99.7%

        \[\leadsto \left(\log 0.5 + \log \color{blue}{\left(-\frac{1}{x}\right)}\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      5. log-prod100.0%

        \[\leadsto \color{blue}{\log \left(0.5 \cdot \left(-\frac{1}{x}\right)\right)} + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      6. distribute-rgt-neg-in100.0%

        \[\leadsto \log \color{blue}{\left(-0.5 \cdot \frac{1}{x}\right)} + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      7. associate-*r/100.0%

        \[\leadsto \log \left(-\color{blue}{\frac{0.5 \cdot 1}{x}}\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      8. metadata-eval100.0%

        \[\leadsto \log \left(-\frac{\color{blue}{0.5}}{x}\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      9. distribute-neg-frac100.0%

        \[\leadsto \log \color{blue}{\left(\frac{-0.5}{x}\right)} + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      10. metadata-eval100.0%

        \[\leadsto \log \left(\frac{\color{blue}{-0.5}}{x}\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      11. associate-*r/100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \left(-\color{blue}{\frac{0.25 \cdot 1}{{x}^{2}}}\right) \]
      12. metadata-eval100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \left(-\frac{\color{blue}{0.25}}{{x}^{2}}\right) \]
      13. distribute-neg-frac100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \color{blue}{\frac{-0.25}{{x}^{2}}} \]
      14. metadata-eval100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \frac{\color{blue}{-0.25}}{{x}^{2}} \]
      15. unpow2100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{\color{blue}{x \cdot x}} \]
    10. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{x \cdot x}} \]

    if -1.1499999999999999 < x < 0.0254999999999999984

    1. Initial program 7.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative7.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def7.8%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified7.8%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Step-by-step derivation
      1. add-cube-cbrt7.8%

        \[\leadsto \color{blue}{\left(\sqrt[3]{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \cdot \sqrt[3]{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)}\right) \cdot \sqrt[3]{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)}} \]
      2. pow37.8%

        \[\leadsto \color{blue}{{\left(\sqrt[3]{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)}\right)}^{3}} \]
    5. Applied egg-rr7.8%

      \[\leadsto \color{blue}{{\left(\sqrt[3]{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)}\right)}^{3}} \]
    6. Taylor expanded in x around 0 97.9%

      \[\leadsto {\left(\sqrt[3]{\color{blue}{-0.16666666666666666 \cdot {x}^{3} + \left(0.075 \cdot {x}^{5} + \left(-0.044642857142857144 \cdot {x}^{7} + x\right)\right)}}\right)}^{3} \]
    7. Step-by-step derivation
      1. rem-cube-cbrt100.0%

        \[\leadsto \color{blue}{-0.16666666666666666 \cdot {x}^{3} + \left(0.075 \cdot {x}^{5} + \left(-0.044642857142857144 \cdot {x}^{7} + x\right)\right)} \]
      2. +-commutative100.0%

        \[\leadsto \color{blue}{\left(0.075 \cdot {x}^{5} + \left(-0.044642857142857144 \cdot {x}^{7} + x\right)\right) + -0.16666666666666666 \cdot {x}^{3}} \]
      3. associate-+r+100.0%

        \[\leadsto \color{blue}{\left(\left(0.075 \cdot {x}^{5} + -0.044642857142857144 \cdot {x}^{7}\right) + x\right)} + -0.16666666666666666 \cdot {x}^{3} \]
      4. associate-+l+100.0%

        \[\leadsto \color{blue}{\left(0.075 \cdot {x}^{5} + -0.044642857142857144 \cdot {x}^{7}\right) + \left(x + -0.16666666666666666 \cdot {x}^{3}\right)} \]
      5. fma-def100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(0.075, {x}^{5}, -0.044642857142857144 \cdot {x}^{7}\right)} + \left(x + -0.16666666666666666 \cdot {x}^{3}\right) \]
    8. Applied egg-rr100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(0.075, {x}^{5}, -0.044642857142857144 \cdot {x}^{7}\right) + \left(x + -0.16666666666666666 \cdot {x}^{3}\right)} \]

    if 0.0254999999999999984 < x

    1. Initial program 48.6%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative48.6%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.15:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{x \cdot x}\\ \mathbf{elif}\;x \leq 0.0255:\\ \;\;\;\;\mathsf{fma}\left(0.075, {x}^{5}, -0.044642857142857144 \cdot {x}^{7}\right) + \left(x + -0.16666666666666666 \cdot {x}^{3}\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\ \end{array} \]

Alternative 3: 99.8% accurate, 0.6× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1.15:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{x \cdot x}\\ \mathbf{elif}\;x \leq 0.0255:\\ \;\;\;\;-0.16666666666666666 \cdot {x}^{3} + \left(0.075 \cdot {x}^{5} + \left(x + -0.044642857142857144 \cdot {x}^{7}\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -1.15)
   (+ (log (/ -0.5 x)) (/ -0.25 (* x x)))
   (if (<= x 0.0255)
     (+
      (* -0.16666666666666666 (pow x 3.0))
      (+ (* 0.075 (pow x 5.0)) (+ x (* -0.044642857142857144 (pow x 7.0)))))
     (log (+ x (hypot 1.0 x))))))
double code(double x) {
	double tmp;
	if (x <= -1.15) {
		tmp = log((-0.5 / x)) + (-0.25 / (x * x));
	} else if (x <= 0.0255) {
		tmp = (-0.16666666666666666 * pow(x, 3.0)) + ((0.075 * pow(x, 5.0)) + (x + (-0.044642857142857144 * pow(x, 7.0))));
	} else {
		tmp = log((x + hypot(1.0, x)));
	}
	return tmp;
}
public static double code(double x) {
	double tmp;
	if (x <= -1.15) {
		tmp = Math.log((-0.5 / x)) + (-0.25 / (x * x));
	} else if (x <= 0.0255) {
		tmp = (-0.16666666666666666 * Math.pow(x, 3.0)) + ((0.075 * Math.pow(x, 5.0)) + (x + (-0.044642857142857144 * Math.pow(x, 7.0))));
	} else {
		tmp = Math.log((x + Math.hypot(1.0, x)));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -1.15:
		tmp = math.log((-0.5 / x)) + (-0.25 / (x * x))
	elif x <= 0.0255:
		tmp = (-0.16666666666666666 * math.pow(x, 3.0)) + ((0.075 * math.pow(x, 5.0)) + (x + (-0.044642857142857144 * math.pow(x, 7.0))))
	else:
		tmp = math.log((x + math.hypot(1.0, x)))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -1.15)
		tmp = Float64(log(Float64(-0.5 / x)) + Float64(-0.25 / Float64(x * x)));
	elseif (x <= 0.0255)
		tmp = Float64(Float64(-0.16666666666666666 * (x ^ 3.0)) + Float64(Float64(0.075 * (x ^ 5.0)) + Float64(x + Float64(-0.044642857142857144 * (x ^ 7.0)))));
	else
		tmp = log(Float64(x + hypot(1.0, x)));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -1.15)
		tmp = log((-0.5 / x)) + (-0.25 / (x * x));
	elseif (x <= 0.0255)
		tmp = (-0.16666666666666666 * (x ^ 3.0)) + ((0.075 * (x ^ 5.0)) + (x + (-0.044642857142857144 * (x ^ 7.0))));
	else
		tmp = log((x + hypot(1.0, x)));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -1.15], N[(N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision] + N[(-0.25 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[x, 0.0255], N[(N[(-0.16666666666666666 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision] + N[(N[(0.075 * N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision] + N[(x + N[(-0.044642857142857144 * N[Power[x, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(x + N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.15:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{x \cdot x}\\

\mathbf{elif}\;x \leq 0.0255:\\
\;\;\;\;-0.16666666666666666 \cdot {x}^{3} + \left(0.075 \cdot {x}^{5} + \left(x + -0.044642857142857144 \cdot {x}^{7}\right)\right)\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1.1499999999999999

    1. Initial program 2.5%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative2.5%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def3.7%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified3.7%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Step-by-step derivation
      1. flip-+3.1%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. div-sub2.2%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      3. hypot-udef2.2%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{\sqrt{1 \cdot 1 + x \cdot x}} \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      4. hypot-udef2.2%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\sqrt{1 \cdot 1 + x \cdot x} \cdot \color{blue}{\sqrt{1 \cdot 1 + x \cdot x}}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      5. add-sqr-sqrt2.2%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{1 \cdot 1 + x \cdot x}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      6. metadata-eval2.2%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{1} + x \cdot x}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    5. Applied egg-rr2.2%

      \[\leadsto \log \color{blue}{\left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{1 + x \cdot x}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    6. Step-by-step derivation
      1. div-sub3.1%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \left(1 + x \cdot x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. +-commutative3.1%

        \[\leadsto \log \left(\frac{x \cdot x - \color{blue}{\left(x \cdot x + 1\right)}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      3. associate--r+52.2%

        \[\leadsto \log \left(\frac{\color{blue}{\left(x \cdot x - x \cdot x\right) - 1}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      4. +-inverses100.0%

        \[\leadsto \log \left(\frac{\color{blue}{0} - 1}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      5. metadata-eval100.0%

        \[\leadsto \log \left(\frac{\color{blue}{-1}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    7. Simplified100.0%

      \[\leadsto \log \color{blue}{\left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    8. Taylor expanded in x around -inf 99.7%

      \[\leadsto \color{blue}{\left(\log \left(\frac{-1}{x}\right) + \log 0.5\right) - 0.25 \cdot \frac{1}{{x}^{2}}} \]
    9. Step-by-step derivation
      1. sub-neg99.7%

        \[\leadsto \color{blue}{\left(\log \left(\frac{-1}{x}\right) + \log 0.5\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right)} \]
      2. +-commutative99.7%

        \[\leadsto \color{blue}{\left(\log 0.5 + \log \left(\frac{-1}{x}\right)\right)} + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      3. metadata-eval99.7%

        \[\leadsto \left(\log 0.5 + \log \left(\frac{\color{blue}{-1}}{x}\right)\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      4. distribute-neg-frac99.7%

        \[\leadsto \left(\log 0.5 + \log \color{blue}{\left(-\frac{1}{x}\right)}\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      5. log-prod100.0%

        \[\leadsto \color{blue}{\log \left(0.5 \cdot \left(-\frac{1}{x}\right)\right)} + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      6. distribute-rgt-neg-in100.0%

        \[\leadsto \log \color{blue}{\left(-0.5 \cdot \frac{1}{x}\right)} + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      7. associate-*r/100.0%

        \[\leadsto \log \left(-\color{blue}{\frac{0.5 \cdot 1}{x}}\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      8. metadata-eval100.0%

        \[\leadsto \log \left(-\frac{\color{blue}{0.5}}{x}\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      9. distribute-neg-frac100.0%

        \[\leadsto \log \color{blue}{\left(\frac{-0.5}{x}\right)} + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      10. metadata-eval100.0%

        \[\leadsto \log \left(\frac{\color{blue}{-0.5}}{x}\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      11. associate-*r/100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \left(-\color{blue}{\frac{0.25 \cdot 1}{{x}^{2}}}\right) \]
      12. metadata-eval100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \left(-\frac{\color{blue}{0.25}}{{x}^{2}}\right) \]
      13. distribute-neg-frac100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \color{blue}{\frac{-0.25}{{x}^{2}}} \]
      14. metadata-eval100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \frac{\color{blue}{-0.25}}{{x}^{2}} \]
      15. unpow2100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{\color{blue}{x \cdot x}} \]
    10. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{x \cdot x}} \]

    if -1.1499999999999999 < x < 0.0254999999999999984

    1. Initial program 7.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative7.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def7.8%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified7.8%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around 0 100.0%

      \[\leadsto \color{blue}{-0.16666666666666666 \cdot {x}^{3} + \left(0.075 \cdot {x}^{5} + \left(-0.044642857142857144 \cdot {x}^{7} + x\right)\right)} \]

    if 0.0254999999999999984 < x

    1. Initial program 48.6%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative48.6%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.15:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{x \cdot x}\\ \mathbf{elif}\;x \leq 0.0255:\\ \;\;\;\;-0.16666666666666666 \cdot {x}^{3} + \left(0.075 \cdot {x}^{5} + \left(x + -0.044642857142857144 \cdot {x}^{7}\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\ \end{array} \]

Alternative 4: 99.7% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{x \cdot x}\\ \mathbf{elif}\;x \leq 0.0009:\\ \;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -1.0)
   (+ (log (/ -0.5 x)) (/ -0.25 (* x x)))
   (if (<= x 0.0009)
     (+ x (* -0.16666666666666666 (pow x 3.0)))
     (log (+ x (hypot 1.0 x))))))
double code(double x) {
	double tmp;
	if (x <= -1.0) {
		tmp = log((-0.5 / x)) + (-0.25 / (x * x));
	} else if (x <= 0.0009) {
		tmp = x + (-0.16666666666666666 * pow(x, 3.0));
	} else {
		tmp = log((x + hypot(1.0, x)));
	}
	return tmp;
}
public static double code(double x) {
	double tmp;
	if (x <= -1.0) {
		tmp = Math.log((-0.5 / x)) + (-0.25 / (x * x));
	} else if (x <= 0.0009) {
		tmp = x + (-0.16666666666666666 * Math.pow(x, 3.0));
	} else {
		tmp = Math.log((x + Math.hypot(1.0, x)));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -1.0:
		tmp = math.log((-0.5 / x)) + (-0.25 / (x * x))
	elif x <= 0.0009:
		tmp = x + (-0.16666666666666666 * math.pow(x, 3.0))
	else:
		tmp = math.log((x + math.hypot(1.0, x)))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -1.0)
		tmp = Float64(log(Float64(-0.5 / x)) + Float64(-0.25 / Float64(x * x)));
	elseif (x <= 0.0009)
		tmp = Float64(x + Float64(-0.16666666666666666 * (x ^ 3.0)));
	else
		tmp = log(Float64(x + hypot(1.0, x)));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -1.0)
		tmp = log((-0.5 / x)) + (-0.25 / (x * x));
	elseif (x <= 0.0009)
		tmp = x + (-0.16666666666666666 * (x ^ 3.0));
	else
		tmp = log((x + hypot(1.0, x)));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -1.0], N[(N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision] + N[(-0.25 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[x, 0.0009], N[(x + N[(-0.16666666666666666 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(x + N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{x \cdot x}\\

\mathbf{elif}\;x \leq 0.0009:\\
\;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1

    1. Initial program 2.5%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative2.5%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def3.7%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified3.7%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Step-by-step derivation
      1. flip-+3.1%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. div-sub2.2%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      3. hypot-udef2.2%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{\sqrt{1 \cdot 1 + x \cdot x}} \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      4. hypot-udef2.2%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\sqrt{1 \cdot 1 + x \cdot x} \cdot \color{blue}{\sqrt{1 \cdot 1 + x \cdot x}}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      5. add-sqr-sqrt2.2%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{1 \cdot 1 + x \cdot x}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      6. metadata-eval2.2%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{1} + x \cdot x}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    5. Applied egg-rr2.2%

      \[\leadsto \log \color{blue}{\left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{1 + x \cdot x}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    6. Step-by-step derivation
      1. div-sub3.1%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \left(1 + x \cdot x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. +-commutative3.1%

        \[\leadsto \log \left(\frac{x \cdot x - \color{blue}{\left(x \cdot x + 1\right)}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      3. associate--r+52.2%

        \[\leadsto \log \left(\frac{\color{blue}{\left(x \cdot x - x \cdot x\right) - 1}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      4. +-inverses100.0%

        \[\leadsto \log \left(\frac{\color{blue}{0} - 1}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      5. metadata-eval100.0%

        \[\leadsto \log \left(\frac{\color{blue}{-1}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    7. Simplified100.0%

      \[\leadsto \log \color{blue}{\left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    8. Taylor expanded in x around -inf 99.7%

      \[\leadsto \color{blue}{\left(\log \left(\frac{-1}{x}\right) + \log 0.5\right) - 0.25 \cdot \frac{1}{{x}^{2}}} \]
    9. Step-by-step derivation
      1. sub-neg99.7%

        \[\leadsto \color{blue}{\left(\log \left(\frac{-1}{x}\right) + \log 0.5\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right)} \]
      2. +-commutative99.7%

        \[\leadsto \color{blue}{\left(\log 0.5 + \log \left(\frac{-1}{x}\right)\right)} + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      3. metadata-eval99.7%

        \[\leadsto \left(\log 0.5 + \log \left(\frac{\color{blue}{-1}}{x}\right)\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      4. distribute-neg-frac99.7%

        \[\leadsto \left(\log 0.5 + \log \color{blue}{\left(-\frac{1}{x}\right)}\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      5. log-prod100.0%

        \[\leadsto \color{blue}{\log \left(0.5 \cdot \left(-\frac{1}{x}\right)\right)} + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      6. distribute-rgt-neg-in100.0%

        \[\leadsto \log \color{blue}{\left(-0.5 \cdot \frac{1}{x}\right)} + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      7. associate-*r/100.0%

        \[\leadsto \log \left(-\color{blue}{\frac{0.5 \cdot 1}{x}}\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      8. metadata-eval100.0%

        \[\leadsto \log \left(-\frac{\color{blue}{0.5}}{x}\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      9. distribute-neg-frac100.0%

        \[\leadsto \log \color{blue}{\left(\frac{-0.5}{x}\right)} + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      10. metadata-eval100.0%

        \[\leadsto \log \left(\frac{\color{blue}{-0.5}}{x}\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      11. associate-*r/100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \left(-\color{blue}{\frac{0.25 \cdot 1}{{x}^{2}}}\right) \]
      12. metadata-eval100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \left(-\frac{\color{blue}{0.25}}{{x}^{2}}\right) \]
      13. distribute-neg-frac100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \color{blue}{\frac{-0.25}{{x}^{2}}} \]
      14. metadata-eval100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \frac{\color{blue}{-0.25}}{{x}^{2}} \]
      15. unpow2100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{\color{blue}{x \cdot x}} \]
    10. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{x \cdot x}} \]

    if -1 < x < 8.9999999999999998e-4

    1. Initial program 7.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative7.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def7.8%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified7.8%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around 0 99.7%

      \[\leadsto \color{blue}{-0.16666666666666666 \cdot {x}^{3} + x} \]

    if 8.9999999999999998e-4 < x

    1. Initial program 48.6%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative48.6%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{x \cdot x}\\ \mathbf{elif}\;x \leq 0.0009:\\ \;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\ \end{array} \]

Alternative 5: 99.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -6 \cdot 10^{-6}:\\ \;\;\;\;\log \left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)\\ \mathbf{elif}\;x \leq 5.5 \cdot 10^{-6}:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -6e-6)
   (log (/ -1.0 (- x (hypot 1.0 x))))
   (if (<= x 5.5e-6) x (log (+ x (hypot 1.0 x))))))
double code(double x) {
	double tmp;
	if (x <= -6e-6) {
		tmp = log((-1.0 / (x - hypot(1.0, x))));
	} else if (x <= 5.5e-6) {
		tmp = x;
	} else {
		tmp = log((x + hypot(1.0, x)));
	}
	return tmp;
}
public static double code(double x) {
	double tmp;
	if (x <= -6e-6) {
		tmp = Math.log((-1.0 / (x - Math.hypot(1.0, x))));
	} else if (x <= 5.5e-6) {
		tmp = x;
	} else {
		tmp = Math.log((x + Math.hypot(1.0, x)));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -6e-6:
		tmp = math.log((-1.0 / (x - math.hypot(1.0, x))))
	elif x <= 5.5e-6:
		tmp = x
	else:
		tmp = math.log((x + math.hypot(1.0, x)))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -6e-6)
		tmp = log(Float64(-1.0 / Float64(x - hypot(1.0, x))));
	elseif (x <= 5.5e-6)
		tmp = x;
	else
		tmp = log(Float64(x + hypot(1.0, x)));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -6e-6)
		tmp = log((-1.0 / (x - hypot(1.0, x))));
	elseif (x <= 5.5e-6)
		tmp = x;
	else
		tmp = log((x + hypot(1.0, x)));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -6e-6], N[Log[N[(-1.0 / N[(x - N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], If[LessEqual[x, 5.5e-6], x, N[Log[N[(x + N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -6 \cdot 10^{-6}:\\
\;\;\;\;\log \left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)\\

\mathbf{elif}\;x \leq 5.5 \cdot 10^{-6}:\\
\;\;\;\;x\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -6.0000000000000002e-6

    1. Initial program 3.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative3.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def5.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified5.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Step-by-step derivation
      1. flip-+4.4%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. div-sub3.5%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      3. hypot-udef3.5%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{\sqrt{1 \cdot 1 + x \cdot x}} \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      4. hypot-udef3.5%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\sqrt{1 \cdot 1 + x \cdot x} \cdot \color{blue}{\sqrt{1 \cdot 1 + x \cdot x}}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      5. add-sqr-sqrt3.5%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{1 \cdot 1 + x \cdot x}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      6. metadata-eval3.5%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{1} + x \cdot x}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    5. Applied egg-rr3.5%

      \[\leadsto \log \color{blue}{\left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{1 + x \cdot x}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    6. Step-by-step derivation
      1. div-sub4.4%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \left(1 + x \cdot x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. +-commutative4.4%

        \[\leadsto \log \left(\frac{x \cdot x - \color{blue}{\left(x \cdot x + 1\right)}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      3. associate--r+52.8%

        \[\leadsto \log \left(\frac{\color{blue}{\left(x \cdot x - x \cdot x\right) - 1}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      4. +-inverses99.9%

        \[\leadsto \log \left(\frac{\color{blue}{0} - 1}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      5. metadata-eval99.9%

        \[\leadsto \log \left(\frac{\color{blue}{-1}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    7. Simplified99.9%

      \[\leadsto \log \color{blue}{\left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]

    if -6.0000000000000002e-6 < x < 5.4999999999999999e-6

    1. Initial program 7.1%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative7.1%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def7.1%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified7.1%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around 0 100.0%

      \[\leadsto \color{blue}{x} \]

    if 5.4999999999999999e-6 < x

    1. Initial program 48.6%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative48.6%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -6 \cdot 10^{-6}:\\ \;\;\;\;\log \left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)\\ \mathbf{elif}\;x \leq 5.5 \cdot 10^{-6}:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\ \end{array} \]

Alternative 6: 99.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -4.8 \cdot 10^{-6}:\\ \;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\ \mathbf{elif}\;x \leq 5.5 \cdot 10^{-6}:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -4.8e-6)
   (- (log (- (hypot 1.0 x) x)))
   (if (<= x 5.5e-6) x (log (+ x (hypot 1.0 x))))))
double code(double x) {
	double tmp;
	if (x <= -4.8e-6) {
		tmp = -log((hypot(1.0, x) - x));
	} else if (x <= 5.5e-6) {
		tmp = x;
	} else {
		tmp = log((x + hypot(1.0, x)));
	}
	return tmp;
}
public static double code(double x) {
	double tmp;
	if (x <= -4.8e-6) {
		tmp = -Math.log((Math.hypot(1.0, x) - x));
	} else if (x <= 5.5e-6) {
		tmp = x;
	} else {
		tmp = Math.log((x + Math.hypot(1.0, x)));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -4.8e-6:
		tmp = -math.log((math.hypot(1.0, x) - x))
	elif x <= 5.5e-6:
		tmp = x
	else:
		tmp = math.log((x + math.hypot(1.0, x)))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -4.8e-6)
		tmp = Float64(-log(Float64(hypot(1.0, x) - x)));
	elseif (x <= 5.5e-6)
		tmp = x;
	else
		tmp = log(Float64(x + hypot(1.0, x)));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -4.8e-6)
		tmp = -log((hypot(1.0, x) - x));
	elseif (x <= 5.5e-6)
		tmp = x;
	else
		tmp = log((x + hypot(1.0, x)));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -4.8e-6], (-N[Log[N[(N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision] - x), $MachinePrecision]], $MachinePrecision]), If[LessEqual[x, 5.5e-6], x, N[Log[N[(x + N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -4.8 \cdot 10^{-6}:\\
\;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\

\mathbf{elif}\;x \leq 5.5 \cdot 10^{-6}:\\
\;\;\;\;x\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -4.7999999999999998e-6

    1. Initial program 3.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative3.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def5.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified5.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Step-by-step derivation
      1. flip-+4.4%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. div-sub3.5%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      3. hypot-udef3.5%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{\sqrt{1 \cdot 1 + x \cdot x}} \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      4. hypot-udef3.5%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\sqrt{1 \cdot 1 + x \cdot x} \cdot \color{blue}{\sqrt{1 \cdot 1 + x \cdot x}}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      5. add-sqr-sqrt3.5%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{1 \cdot 1 + x \cdot x}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      6. metadata-eval3.5%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{1} + x \cdot x}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    5. Applied egg-rr3.5%

      \[\leadsto \log \color{blue}{\left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{1 + x \cdot x}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    6. Step-by-step derivation
      1. div-sub4.4%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \left(1 + x \cdot x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. +-commutative4.4%

        \[\leadsto \log \left(\frac{x \cdot x - \color{blue}{\left(x \cdot x + 1\right)}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      3. associate--r+52.8%

        \[\leadsto \log \left(\frac{\color{blue}{\left(x \cdot x - x \cdot x\right) - 1}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      4. +-inverses99.9%

        \[\leadsto \log \left(\frac{\color{blue}{0} - 1}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      5. metadata-eval99.9%

        \[\leadsto \log \left(\frac{\color{blue}{-1}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    7. Simplified99.9%

      \[\leadsto \log \color{blue}{\left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    8. Step-by-step derivation
      1. frac-2neg99.9%

        \[\leadsto \log \color{blue}{\left(\frac{--1}{-\left(x - \mathsf{hypot}\left(1, x\right)\right)}\right)} \]
      2. metadata-eval99.9%

        \[\leadsto \log \left(\frac{\color{blue}{1}}{-\left(x - \mathsf{hypot}\left(1, x\right)\right)}\right) \]
      3. log-rec99.9%

        \[\leadsto \color{blue}{-\log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right)} \]
    9. Applied egg-rr99.9%

      \[\leadsto \color{blue}{-\log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right)} \]

    if -4.7999999999999998e-6 < x < 5.4999999999999999e-6

    1. Initial program 7.1%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative7.1%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def7.1%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified7.1%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around 0 100.0%

      \[\leadsto \color{blue}{x} \]

    if 5.4999999999999999e-6 < x

    1. Initial program 48.6%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative48.6%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -4.8 \cdot 10^{-6}:\\ \;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\ \mathbf{elif}\;x \leq 5.5 \cdot 10^{-6}:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\ \end{array} \]

Alternative 7: 99.4% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1.25:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 0.95:\\ \;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2 + 0.5 \cdot \frac{1}{x}\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -1.25)
   (log (/ -0.5 x))
   (if (<= x 0.95)
     (+ x (* -0.16666666666666666 (pow x 3.0)))
     (log (+ (* x 2.0) (* 0.5 (/ 1.0 x)))))))
double code(double x) {
	double tmp;
	if (x <= -1.25) {
		tmp = log((-0.5 / x));
	} else if (x <= 0.95) {
		tmp = x + (-0.16666666666666666 * pow(x, 3.0));
	} else {
		tmp = log(((x * 2.0) + (0.5 * (1.0 / x))));
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= (-1.25d0)) then
        tmp = log(((-0.5d0) / x))
    else if (x <= 0.95d0) then
        tmp = x + ((-0.16666666666666666d0) * (x ** 3.0d0))
    else
        tmp = log(((x * 2.0d0) + (0.5d0 * (1.0d0 / x))))
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= -1.25) {
		tmp = Math.log((-0.5 / x));
	} else if (x <= 0.95) {
		tmp = x + (-0.16666666666666666 * Math.pow(x, 3.0));
	} else {
		tmp = Math.log(((x * 2.0) + (0.5 * (1.0 / x))));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -1.25:
		tmp = math.log((-0.5 / x))
	elif x <= 0.95:
		tmp = x + (-0.16666666666666666 * math.pow(x, 3.0))
	else:
		tmp = math.log(((x * 2.0) + (0.5 * (1.0 / x))))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -1.25)
		tmp = log(Float64(-0.5 / x));
	elseif (x <= 0.95)
		tmp = Float64(x + Float64(-0.16666666666666666 * (x ^ 3.0)));
	else
		tmp = log(Float64(Float64(x * 2.0) + Float64(0.5 * Float64(1.0 / x))));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -1.25)
		tmp = log((-0.5 / x));
	elseif (x <= 0.95)
		tmp = x + (-0.16666666666666666 * (x ^ 3.0));
	else
		tmp = log(((x * 2.0) + (0.5 * (1.0 / x))));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -1.25], N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision], If[LessEqual[x, 0.95], N[(x + N[(-0.16666666666666666 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(N[(x * 2.0), $MachinePrecision] + N[(0.5 * N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.25:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right)\\

\mathbf{elif}\;x \leq 0.95:\\
\;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\

\mathbf{else}:\\
\;\;\;\;\log \left(x \cdot 2 + 0.5 \cdot \frac{1}{x}\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1.25

    1. Initial program 2.5%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative2.5%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def3.7%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified3.7%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around -inf 99.9%

      \[\leadsto \log \color{blue}{\left(\frac{-0.5}{x}\right)} \]

    if -1.25 < x < 0.94999999999999996

    1. Initial program 8.5%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative8.5%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def8.5%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified8.5%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around 0 99.2%

      \[\leadsto \color{blue}{-0.16666666666666666 \cdot {x}^{3} + x} \]

    if 0.94999999999999996 < x

    1. Initial program 47.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative47.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around inf 100.0%

      \[\leadsto \log \color{blue}{\left(2 \cdot x + 0.5 \cdot \frac{1}{x}\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.25:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 0.95:\\ \;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2 + 0.5 \cdot \frac{1}{x}\right)\\ \end{array} \]

Alternative 8: 99.6% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{x \cdot x}\\ \mathbf{elif}\;x \leq 0.95:\\ \;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2 + 0.5 \cdot \frac{1}{x}\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -1.0)
   (+ (log (/ -0.5 x)) (/ -0.25 (* x x)))
   (if (<= x 0.95)
     (+ x (* -0.16666666666666666 (pow x 3.0)))
     (log (+ (* x 2.0) (* 0.5 (/ 1.0 x)))))))
double code(double x) {
	double tmp;
	if (x <= -1.0) {
		tmp = log((-0.5 / x)) + (-0.25 / (x * x));
	} else if (x <= 0.95) {
		tmp = x + (-0.16666666666666666 * pow(x, 3.0));
	} else {
		tmp = log(((x * 2.0) + (0.5 * (1.0 / x))));
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= (-1.0d0)) then
        tmp = log(((-0.5d0) / x)) + ((-0.25d0) / (x * x))
    else if (x <= 0.95d0) then
        tmp = x + ((-0.16666666666666666d0) * (x ** 3.0d0))
    else
        tmp = log(((x * 2.0d0) + (0.5d0 * (1.0d0 / x))))
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= -1.0) {
		tmp = Math.log((-0.5 / x)) + (-0.25 / (x * x));
	} else if (x <= 0.95) {
		tmp = x + (-0.16666666666666666 * Math.pow(x, 3.0));
	} else {
		tmp = Math.log(((x * 2.0) + (0.5 * (1.0 / x))));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -1.0:
		tmp = math.log((-0.5 / x)) + (-0.25 / (x * x))
	elif x <= 0.95:
		tmp = x + (-0.16666666666666666 * math.pow(x, 3.0))
	else:
		tmp = math.log(((x * 2.0) + (0.5 * (1.0 / x))))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -1.0)
		tmp = Float64(log(Float64(-0.5 / x)) + Float64(-0.25 / Float64(x * x)));
	elseif (x <= 0.95)
		tmp = Float64(x + Float64(-0.16666666666666666 * (x ^ 3.0)));
	else
		tmp = log(Float64(Float64(x * 2.0) + Float64(0.5 * Float64(1.0 / x))));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -1.0)
		tmp = log((-0.5 / x)) + (-0.25 / (x * x));
	elseif (x <= 0.95)
		tmp = x + (-0.16666666666666666 * (x ^ 3.0));
	else
		tmp = log(((x * 2.0) + (0.5 * (1.0 / x))));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -1.0], N[(N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision] + N[(-0.25 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[x, 0.95], N[(x + N[(-0.16666666666666666 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(N[(x * 2.0), $MachinePrecision] + N[(0.5 * N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{x \cdot x}\\

\mathbf{elif}\;x \leq 0.95:\\
\;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\

\mathbf{else}:\\
\;\;\;\;\log \left(x \cdot 2 + 0.5 \cdot \frac{1}{x}\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1

    1. Initial program 2.5%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative2.5%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def3.7%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified3.7%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Step-by-step derivation
      1. flip-+3.1%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. div-sub2.2%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      3. hypot-udef2.2%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{\sqrt{1 \cdot 1 + x \cdot x}} \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      4. hypot-udef2.2%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\sqrt{1 \cdot 1 + x \cdot x} \cdot \color{blue}{\sqrt{1 \cdot 1 + x \cdot x}}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      5. add-sqr-sqrt2.2%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{1 \cdot 1 + x \cdot x}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      6. metadata-eval2.2%

        \[\leadsto \log \left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{\color{blue}{1} + x \cdot x}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    5. Applied egg-rr2.2%

      \[\leadsto \log \color{blue}{\left(\frac{x \cdot x}{x - \mathsf{hypot}\left(1, x\right)} - \frac{1 + x \cdot x}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    6. Step-by-step derivation
      1. div-sub3.1%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \left(1 + x \cdot x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. +-commutative3.1%

        \[\leadsto \log \left(\frac{x \cdot x - \color{blue}{\left(x \cdot x + 1\right)}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      3. associate--r+52.2%

        \[\leadsto \log \left(\frac{\color{blue}{\left(x \cdot x - x \cdot x\right) - 1}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      4. +-inverses100.0%

        \[\leadsto \log \left(\frac{\color{blue}{0} - 1}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
      5. metadata-eval100.0%

        \[\leadsto \log \left(\frac{\color{blue}{-1}}{x - \mathsf{hypot}\left(1, x\right)}\right) \]
    7. Simplified100.0%

      \[\leadsto \log \color{blue}{\left(\frac{-1}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
    8. Taylor expanded in x around -inf 99.7%

      \[\leadsto \color{blue}{\left(\log \left(\frac{-1}{x}\right) + \log 0.5\right) - 0.25 \cdot \frac{1}{{x}^{2}}} \]
    9. Step-by-step derivation
      1. sub-neg99.7%

        \[\leadsto \color{blue}{\left(\log \left(\frac{-1}{x}\right) + \log 0.5\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right)} \]
      2. +-commutative99.7%

        \[\leadsto \color{blue}{\left(\log 0.5 + \log \left(\frac{-1}{x}\right)\right)} + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      3. metadata-eval99.7%

        \[\leadsto \left(\log 0.5 + \log \left(\frac{\color{blue}{-1}}{x}\right)\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      4. distribute-neg-frac99.7%

        \[\leadsto \left(\log 0.5 + \log \color{blue}{\left(-\frac{1}{x}\right)}\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      5. log-prod100.0%

        \[\leadsto \color{blue}{\log \left(0.5 \cdot \left(-\frac{1}{x}\right)\right)} + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      6. distribute-rgt-neg-in100.0%

        \[\leadsto \log \color{blue}{\left(-0.5 \cdot \frac{1}{x}\right)} + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      7. associate-*r/100.0%

        \[\leadsto \log \left(-\color{blue}{\frac{0.5 \cdot 1}{x}}\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      8. metadata-eval100.0%

        \[\leadsto \log \left(-\frac{\color{blue}{0.5}}{x}\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      9. distribute-neg-frac100.0%

        \[\leadsto \log \color{blue}{\left(\frac{-0.5}{x}\right)} + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      10. metadata-eval100.0%

        \[\leadsto \log \left(\frac{\color{blue}{-0.5}}{x}\right) + \left(-0.25 \cdot \frac{1}{{x}^{2}}\right) \]
      11. associate-*r/100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \left(-\color{blue}{\frac{0.25 \cdot 1}{{x}^{2}}}\right) \]
      12. metadata-eval100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \left(-\frac{\color{blue}{0.25}}{{x}^{2}}\right) \]
      13. distribute-neg-frac100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \color{blue}{\frac{-0.25}{{x}^{2}}} \]
      14. metadata-eval100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \frac{\color{blue}{-0.25}}{{x}^{2}} \]
      15. unpow2100.0%

        \[\leadsto \log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{\color{blue}{x \cdot x}} \]
    10. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{x \cdot x}} \]

    if -1 < x < 0.94999999999999996

    1. Initial program 8.5%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative8.5%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def8.5%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified8.5%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around 0 99.2%

      \[\leadsto \color{blue}{-0.16666666666666666 \cdot {x}^{3} + x} \]

    if 0.94999999999999996 < x

    1. Initial program 47.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative47.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around inf 100.0%

      \[\leadsto \log \color{blue}{\left(2 \cdot x + 0.5 \cdot \frac{1}{x}\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right) + \frac{-0.25}{x \cdot x}\\ \mathbf{elif}\;x \leq 0.95:\\ \;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2 + 0.5 \cdot \frac{1}{x}\right)\\ \end{array} \]

Alternative 9: 99.3% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1.25:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 1.26:\\ \;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + x\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -1.25)
   (log (/ -0.5 x))
   (if (<= x 1.26) (+ x (* -0.16666666666666666 (pow x 3.0))) (log (+ x x)))))
double code(double x) {
	double tmp;
	if (x <= -1.25) {
		tmp = log((-0.5 / x));
	} else if (x <= 1.26) {
		tmp = x + (-0.16666666666666666 * pow(x, 3.0));
	} else {
		tmp = log((x + x));
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= (-1.25d0)) then
        tmp = log(((-0.5d0) / x))
    else if (x <= 1.26d0) then
        tmp = x + ((-0.16666666666666666d0) * (x ** 3.0d0))
    else
        tmp = log((x + x))
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= -1.25) {
		tmp = Math.log((-0.5 / x));
	} else if (x <= 1.26) {
		tmp = x + (-0.16666666666666666 * Math.pow(x, 3.0));
	} else {
		tmp = Math.log((x + x));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -1.25:
		tmp = math.log((-0.5 / x))
	elif x <= 1.26:
		tmp = x + (-0.16666666666666666 * math.pow(x, 3.0))
	else:
		tmp = math.log((x + x))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -1.25)
		tmp = log(Float64(-0.5 / x));
	elseif (x <= 1.26)
		tmp = Float64(x + Float64(-0.16666666666666666 * (x ^ 3.0)));
	else
		tmp = log(Float64(x + x));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -1.25)
		tmp = log((-0.5 / x));
	elseif (x <= 1.26)
		tmp = x + (-0.16666666666666666 * (x ^ 3.0));
	else
		tmp = log((x + x));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -1.25], N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision], If[LessEqual[x, 1.26], N[(x + N[(-0.16666666666666666 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(x + x), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.25:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right)\\

\mathbf{elif}\;x \leq 1.26:\\
\;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + x\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1.25

    1. Initial program 2.5%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative2.5%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def3.7%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified3.7%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around -inf 99.9%

      \[\leadsto \log \color{blue}{\left(\frac{-0.5}{x}\right)} \]

    if -1.25 < x < 1.26000000000000001

    1. Initial program 8.5%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative8.5%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def8.5%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified8.5%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around 0 99.2%

      \[\leadsto \color{blue}{-0.16666666666666666 \cdot {x}^{3} + x} \]

    if 1.26000000000000001 < x

    1. Initial program 47.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative47.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around inf 99.6%

      \[\leadsto \log \color{blue}{\left(2 \cdot x\right)} \]
    5. Step-by-step derivation
      1. count-299.6%

        \[\leadsto \log \color{blue}{\left(x + x\right)} \]
    6. Simplified99.6%

      \[\leadsto \log \color{blue}{\left(x + x\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.25:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 1.26:\\ \;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + x\right)\\ \end{array} \]

Alternative 10: 99.1% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1.25:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 1.26:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + x\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -1.25) (log (/ -0.5 x)) (if (<= x 1.26) x (log (+ x x)))))
double code(double x) {
	double tmp;
	if (x <= -1.25) {
		tmp = log((-0.5 / x));
	} else if (x <= 1.26) {
		tmp = x;
	} else {
		tmp = log((x + x));
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= (-1.25d0)) then
        tmp = log(((-0.5d0) / x))
    else if (x <= 1.26d0) then
        tmp = x
    else
        tmp = log((x + x))
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= -1.25) {
		tmp = Math.log((-0.5 / x));
	} else if (x <= 1.26) {
		tmp = x;
	} else {
		tmp = Math.log((x + x));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -1.25:
		tmp = math.log((-0.5 / x))
	elif x <= 1.26:
		tmp = x
	else:
		tmp = math.log((x + x))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -1.25)
		tmp = log(Float64(-0.5 / x));
	elseif (x <= 1.26)
		tmp = x;
	else
		tmp = log(Float64(x + x));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -1.25)
		tmp = log((-0.5 / x));
	elseif (x <= 1.26)
		tmp = x;
	else
		tmp = log((x + x));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -1.25], N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision], If[LessEqual[x, 1.26], x, N[Log[N[(x + x), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.25:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right)\\

\mathbf{elif}\;x \leq 1.26:\\
\;\;\;\;x\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + x\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1.25

    1. Initial program 2.5%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative2.5%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def3.7%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified3.7%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around -inf 99.9%

      \[\leadsto \log \color{blue}{\left(\frac{-0.5}{x}\right)} \]

    if -1.25 < x < 1.26000000000000001

    1. Initial program 8.5%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative8.5%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def8.5%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified8.5%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around 0 99.0%

      \[\leadsto \color{blue}{x} \]

    if 1.26000000000000001 < x

    1. Initial program 47.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative47.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around inf 99.6%

      \[\leadsto \log \color{blue}{\left(2 \cdot x\right)} \]
    5. Step-by-step derivation
      1. count-299.6%

        \[\leadsto \log \color{blue}{\left(x + x\right)} \]
    6. Simplified99.6%

      \[\leadsto \log \color{blue}{\left(x + x\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.25:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 1.26:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + x\right)\\ \end{array} \]

Alternative 11: 75.0% accurate, 2.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 1.26:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + x\right)\\ \end{array} \end{array} \]
(FPCore (x) :precision binary64 (if (<= x 1.26) x (log (+ x x))))
double code(double x) {
	double tmp;
	if (x <= 1.26) {
		tmp = x;
	} else {
		tmp = log((x + x));
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= 1.26d0) then
        tmp = x
    else
        tmp = log((x + x))
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= 1.26) {
		tmp = x;
	} else {
		tmp = Math.log((x + x));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= 1.26:
		tmp = x
	else:
		tmp = math.log((x + x))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= 1.26)
		tmp = x;
	else
		tmp = log(Float64(x + x));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= 1.26)
		tmp = x;
	else
		tmp = log((x + x));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, 1.26], x, N[Log[N[(x + x), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.26:\\
\;\;\;\;x\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + x\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 1.26000000000000001

    1. Initial program 6.4%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative6.4%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def6.8%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified6.8%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around 0 66.1%

      \[\leadsto \color{blue}{x} \]

    if 1.26000000000000001 < x

    1. Initial program 47.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. +-commutative47.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
      2. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Taylor expanded in x around inf 99.6%

      \[\leadsto \log \color{blue}{\left(2 \cdot x\right)} \]
    5. Step-by-step derivation
      1. count-299.6%

        \[\leadsto \log \color{blue}{\left(x + x\right)} \]
    6. Simplified99.6%

      \[\leadsto \log \color{blue}{\left(x + x\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification74.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 1.26:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + x\right)\\ \end{array} \]

Alternative 12: 52.3% accurate, 207.0× speedup?

\[\begin{array}{l} \\ x \end{array} \]
(FPCore (x) :precision binary64 x)
double code(double x) {
	return x;
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = x
end function
public static double code(double x) {
	return x;
}
def code(x):
	return x
function code(x)
	return x
end
function tmp = code(x)
	tmp = x;
end
code[x_] := x
\begin{array}{l}

\\
x
\end{array}
Derivation
  1. Initial program 16.9%

    \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
  2. Step-by-step derivation
    1. +-commutative16.9%

      \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + x \cdot x}}\right) \]
    2. hypot-1-def30.5%

      \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
  3. Simplified30.5%

    \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
  4. Taylor expanded in x around 0 50.7%

    \[\leadsto \color{blue}{x} \]
  5. Final simplification50.7%

    \[\leadsto x \]

Developer target: 30.0% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \sqrt{x \cdot x + 1}\\ \mathbf{if}\;x < 0:\\ \;\;\;\;\log \left(\frac{-1}{x - t_0}\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + t_0\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (let* ((t_0 (sqrt (+ (* x x) 1.0))))
   (if (< x 0.0) (log (/ -1.0 (- x t_0))) (log (+ x t_0)))))
double code(double x) {
	double t_0 = sqrt(((x * x) + 1.0));
	double tmp;
	if (x < 0.0) {
		tmp = log((-1.0 / (x - t_0)));
	} else {
		tmp = log((x + t_0));
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: t_0
    real(8) :: tmp
    t_0 = sqrt(((x * x) + 1.0d0))
    if (x < 0.0d0) then
        tmp = log(((-1.0d0) / (x - t_0)))
    else
        tmp = log((x + t_0))
    end if
    code = tmp
end function
public static double code(double x) {
	double t_0 = Math.sqrt(((x * x) + 1.0));
	double tmp;
	if (x < 0.0) {
		tmp = Math.log((-1.0 / (x - t_0)));
	} else {
		tmp = Math.log((x + t_0));
	}
	return tmp;
}
def code(x):
	t_0 = math.sqrt(((x * x) + 1.0))
	tmp = 0
	if x < 0.0:
		tmp = math.log((-1.0 / (x - t_0)))
	else:
		tmp = math.log((x + t_0))
	return tmp
function code(x)
	t_0 = sqrt(Float64(Float64(x * x) + 1.0))
	tmp = 0.0
	if (x < 0.0)
		tmp = log(Float64(-1.0 / Float64(x - t_0)));
	else
		tmp = log(Float64(x + t_0));
	end
	return tmp
end
function tmp_2 = code(x)
	t_0 = sqrt(((x * x) + 1.0));
	tmp = 0.0;
	if (x < 0.0)
		tmp = log((-1.0 / (x - t_0)));
	else
		tmp = log((x + t_0));
	end
	tmp_2 = tmp;
end
code[x_] := Block[{t$95$0 = N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]}, If[Less[x, 0.0], N[Log[N[(-1.0 / N[(x - t$95$0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Log[N[(x + t$95$0), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \sqrt{x \cdot x + 1}\\
\mathbf{if}\;x < 0:\\
\;\;\;\;\log \left(\frac{-1}{x - t_0}\right)\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + t_0\right)\\


\end{array}
\end{array}

Reproduce

?
herbie shell --seed 2023174 
(FPCore (x)
  :name "Hyperbolic arcsine"
  :precision binary64

  :herbie-target
  (if (< x 0.0) (log (/ -1.0 (- x (sqrt (+ (* x x) 1.0))))) (log (+ x (sqrt (+ (* x x) 1.0)))))

  (log (+ x (sqrt (+ (* x x) 1.0)))))