Hyperbolic arcsine

Percentage Accurate: 17.9% → 99.9%
Time: 11.7s
Alternatives: 17
Speedup: 18.8×

Specification

?
\[\begin{array}{l} \\ \log \left(x + \sqrt{x \cdot x + 1}\right) \end{array} \]
(FPCore (x) :precision binary64 (log (+ x (sqrt (+ (* x x) 1.0)))))
double code(double x) {
	return log((x + sqrt(((x * x) + 1.0))));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = log((x + sqrt(((x * x) + 1.0d0))))
end function
public static double code(double x) {
	return Math.log((x + Math.sqrt(((x * x) + 1.0))));
}
def code(x):
	return math.log((x + math.sqrt(((x * x) + 1.0))))
function code(x)
	return log(Float64(x + sqrt(Float64(Float64(x * x) + 1.0))))
end
function tmp = code(x)
	tmp = log((x + sqrt(((x * x) + 1.0))));
end
code[x_] := N[Log[N[(x + N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}

\\
\log \left(x + \sqrt{x \cdot x + 1}\right)
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 17 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 17.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \log \left(x + \sqrt{x \cdot x + 1}\right) \end{array} \]
(FPCore (x) :precision binary64 (log (+ x (sqrt (+ (* x x) 1.0)))))
double code(double x) {
	return log((x + sqrt(((x * x) + 1.0))));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = log((x + sqrt(((x * x) + 1.0d0))))
end function
public static double code(double x) {
	return Math.log((x + Math.sqrt(((x * x) + 1.0))));
}
def code(x):
	return math.log((x + math.sqrt(((x * x) + 1.0))))
function code(x)
	return log(Float64(x + sqrt(Float64(Float64(x * x) + 1.0))))
end
function tmp = code(x)
	tmp = log((x + sqrt(((x * x) + 1.0))));
end
code[x_] := N[Log[N[(x + N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}

\\
\log \left(x + \sqrt{x \cdot x + 1}\right)
\end{array}

Alternative 1: 99.9% accurate, 0.6× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -0.05:\\ \;\;\;\;-\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + \left(-1 - x\right)\right)\\ \mathbf{elif}\;x \leq 0.022:\\ \;\;\;\;x \cdot \left(1 + {x}^{2} \cdot \left({x}^{2} \cdot \left(0.075 + {x}^{2} \cdot -0.044642857142857144\right) - 0.16666666666666666\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + x \cdot \left(1 - \frac{1}{x}\right)\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -0.05)
   (- (log1p (+ (hypot 1.0 x) (- -1.0 x))))
   (if (<= x 0.022)
     (*
      x
      (+
       1.0
       (*
        (pow x 2.0)
        (-
         (* (pow x 2.0) (+ 0.075 (* (pow x 2.0) -0.044642857142857144)))
         0.16666666666666666))))
     (log1p (+ (hypot 1.0 x) (* x (- 1.0 (/ 1.0 x))))))))
double code(double x) {
	double tmp;
	if (x <= -0.05) {
		tmp = -log1p((hypot(1.0, x) + (-1.0 - x)));
	} else if (x <= 0.022) {
		tmp = x * (1.0 + (pow(x, 2.0) * ((pow(x, 2.0) * (0.075 + (pow(x, 2.0) * -0.044642857142857144))) - 0.16666666666666666)));
	} else {
		tmp = log1p((hypot(1.0, x) + (x * (1.0 - (1.0 / x)))));
	}
	return tmp;
}
public static double code(double x) {
	double tmp;
	if (x <= -0.05) {
		tmp = -Math.log1p((Math.hypot(1.0, x) + (-1.0 - x)));
	} else if (x <= 0.022) {
		tmp = x * (1.0 + (Math.pow(x, 2.0) * ((Math.pow(x, 2.0) * (0.075 + (Math.pow(x, 2.0) * -0.044642857142857144))) - 0.16666666666666666)));
	} else {
		tmp = Math.log1p((Math.hypot(1.0, x) + (x * (1.0 - (1.0 / x)))));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -0.05:
		tmp = -math.log1p((math.hypot(1.0, x) + (-1.0 - x)))
	elif x <= 0.022:
		tmp = x * (1.0 + (math.pow(x, 2.0) * ((math.pow(x, 2.0) * (0.075 + (math.pow(x, 2.0) * -0.044642857142857144))) - 0.16666666666666666)))
	else:
		tmp = math.log1p((math.hypot(1.0, x) + (x * (1.0 - (1.0 / x)))))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -0.05)
		tmp = Float64(-log1p(Float64(hypot(1.0, x) + Float64(-1.0 - x))));
	elseif (x <= 0.022)
		tmp = Float64(x * Float64(1.0 + Float64((x ^ 2.0) * Float64(Float64((x ^ 2.0) * Float64(0.075 + Float64((x ^ 2.0) * -0.044642857142857144))) - 0.16666666666666666))));
	else
		tmp = log1p(Float64(hypot(1.0, x) + Float64(x * Float64(1.0 - Float64(1.0 / x)))));
	end
	return tmp
end
code[x_] := If[LessEqual[x, -0.05], (-N[Log[1 + N[(N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision] + N[(-1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), If[LessEqual[x, 0.022], N[(x * N[(1.0 + N[(N[Power[x, 2.0], $MachinePrecision] * N[(N[(N[Power[x, 2.0], $MachinePrecision] * N[(0.075 + N[(N[Power[x, 2.0], $MachinePrecision] * -0.044642857142857144), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[1 + N[(N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision] + N[(x * N[(1.0 - N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.05:\\
\;\;\;\;-\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + \left(-1 - x\right)\right)\\

\mathbf{elif}\;x \leq 0.022:\\
\;\;\;\;x \cdot \left(1 + {x}^{2} \cdot \left({x}^{2} \cdot \left(0.075 + {x}^{2} \cdot -0.044642857142857144\right) - 0.16666666666666666\right)\right)\\

\mathbf{else}:\\
\;\;\;\;\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + x \cdot \left(1 - \frac{1}{x}\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -0.050000000000000003

    1. Initial program 5.1%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. flip-+4.8%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \sqrt{x \cdot x + 1} \cdot \sqrt{x \cdot x + 1}}{x - \sqrt{x \cdot x + 1}}\right)} \]
      2. frac-2neg4.8%

        \[\leadsto \log \color{blue}{\left(\frac{-\left(x \cdot x - \sqrt{x \cdot x + 1} \cdot \sqrt{x \cdot x + 1}\right)}{-\left(x - \sqrt{x \cdot x + 1}\right)}\right)} \]
      3. log-div4.8%

        \[\leadsto \color{blue}{\log \left(-\left(x \cdot x - \sqrt{x \cdot x + 1} \cdot \sqrt{x \cdot x + 1}\right)\right) - \log \left(-\left(x - \sqrt{x \cdot x + 1}\right)\right)} \]
      4. add-sqr-sqrt4.8%

        \[\leadsto \log \left(-\left(x \cdot x - \color{blue}{\left(x \cdot x + 1\right)}\right)\right) - \log \left(-\left(x - \sqrt{x \cdot x + 1}\right)\right) \]
      5. pow24.8%

        \[\leadsto \log \left(-\left(\color{blue}{{x}^{2}} - \left(x \cdot x + 1\right)\right)\right) - \log \left(-\left(x - \sqrt{x \cdot x + 1}\right)\right) \]
      6. fma-define4.8%

        \[\leadsto \log \left(-\left({x}^{2} - \color{blue}{\mathsf{fma}\left(x, x, 1\right)}\right)\right) - \log \left(-\left(x - \sqrt{x \cdot x + 1}\right)\right) \]
      7. +-commutative4.8%

        \[\leadsto \log \left(-\left({x}^{2} - \mathsf{fma}\left(x, x, 1\right)\right)\right) - \log \left(-\left(x - \sqrt{\color{blue}{1 + x \cdot x}}\right)\right) \]
      8. hypot-1-def4.8%

        \[\leadsto \log \left(-\left({x}^{2} - \mathsf{fma}\left(x, x, 1\right)\right)\right) - \log \left(-\left(x - \color{blue}{\mathsf{hypot}\left(1, x\right)}\right)\right) \]
    4. Applied egg-rr4.8%

      \[\leadsto \color{blue}{\log \left(-\left({x}^{2} - \mathsf{fma}\left(x, x, 1\right)\right)\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right)} \]
    5. Step-by-step derivation
      1. fma-undefine4.8%

        \[\leadsto \log \left(-\left({x}^{2} - \color{blue}{\left(x \cdot x + 1\right)}\right)\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      2. unpow24.8%

        \[\leadsto \log \left(-\left({x}^{2} - \left(\color{blue}{{x}^{2}} + 1\right)\right)\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      3. associate--r+49.0%

        \[\leadsto \log \left(-\color{blue}{\left(\left({x}^{2} - {x}^{2}\right) - 1\right)}\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      4. +-inverses99.9%

        \[\leadsto \log \left(-\left(\color{blue}{0} - 1\right)\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      5. metadata-eval99.9%

        \[\leadsto \log \left(-\color{blue}{-1}\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      6. metadata-eval99.9%

        \[\leadsto \log \color{blue}{1} - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      7. metadata-eval99.9%

        \[\leadsto \color{blue}{0} - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      8. neg-sub099.9%

        \[\leadsto 0 - \log \color{blue}{\left(0 - \left(x - \mathsf{hypot}\left(1, x\right)\right)\right)} \]
      9. associate--r-99.9%

        \[\leadsto 0 - \log \color{blue}{\left(\left(0 - x\right) + \mathsf{hypot}\left(1, x\right)\right)} \]
      10. neg-sub099.9%

        \[\leadsto 0 - \log \left(\color{blue}{\left(-x\right)} + \mathsf{hypot}\left(1, x\right)\right) \]
      11. +-commutative99.9%

        \[\leadsto 0 - \log \color{blue}{\left(\mathsf{hypot}\left(1, x\right) + \left(-x\right)\right)} \]
      12. sub-neg99.9%

        \[\leadsto 0 - \log \color{blue}{\left(\mathsf{hypot}\left(1, x\right) - x\right)} \]
      13. neg-sub099.9%

        \[\leadsto \color{blue}{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \]
    6. Simplified99.9%

      \[\leadsto \color{blue}{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \]
    7. Step-by-step derivation
      1. add-sqr-sqrt99.1%

        \[\leadsto -\color{blue}{\sqrt{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \cdot \sqrt{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)}} \]
      2. sqrt-unprod99.9%

        \[\leadsto -\color{blue}{\sqrt{\log \left(\mathsf{hypot}\left(1, x\right) - x\right) \cdot \log \left(\mathsf{hypot}\left(1, x\right) - x\right)}} \]
      3. sqr-neg99.9%

        \[\leadsto -\sqrt{\color{blue}{\left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right) \cdot \left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right)}} \]
      4. sqrt-unprod0.0%

        \[\leadsto -\color{blue}{\sqrt{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \cdot \sqrt{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)}} \]
      5. add-sqr-sqrt1.5%

        \[\leadsto -\color{blue}{\left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right)} \]
      6. log1p-expm1-u0.7%

        \[\leadsto -\color{blue}{\mathsf{log1p}\left(\mathsf{expm1}\left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right)\right)} \]
      7. add-sqr-sqrt0.0%

        \[\leadsto -\mathsf{log1p}\left(\mathsf{expm1}\left(\color{blue}{\sqrt{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \cdot \sqrt{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)}}\right)\right) \]
      8. sqrt-unprod99.9%

        \[\leadsto -\mathsf{log1p}\left(\mathsf{expm1}\left(\color{blue}{\sqrt{\left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right) \cdot \left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right)}}\right)\right) \]
      9. sqr-neg99.9%

        \[\leadsto -\mathsf{log1p}\left(\mathsf{expm1}\left(\sqrt{\color{blue}{\log \left(\mathsf{hypot}\left(1, x\right) - x\right) \cdot \log \left(\mathsf{hypot}\left(1, x\right) - x\right)}}\right)\right) \]
      10. sqrt-unprod99.1%

        \[\leadsto -\mathsf{log1p}\left(\mathsf{expm1}\left(\color{blue}{\sqrt{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \cdot \sqrt{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)}}\right)\right) \]
      11. add-sqr-sqrt99.9%

        \[\leadsto -\mathsf{log1p}\left(\mathsf{expm1}\left(\color{blue}{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)}\right)\right) \]
      12. expm1-undefine99.9%

        \[\leadsto -\mathsf{log1p}\left(\color{blue}{e^{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} - 1}\right) \]
      13. add-exp-log99.9%

        \[\leadsto -\mathsf{log1p}\left(\color{blue}{\left(\mathsf{hypot}\left(1, x\right) - x\right)} - 1\right) \]
    8. Applied egg-rr99.9%

      \[\leadsto -\color{blue}{\mathsf{log1p}\left(\left(\mathsf{hypot}\left(1, x\right) - x\right) - 1\right)} \]
    9. Step-by-step derivation
      1. sub-neg99.9%

        \[\leadsto -\mathsf{log1p}\left(\color{blue}{\left(\mathsf{hypot}\left(1, x\right) - x\right) + \left(-1\right)}\right) \]
      2. metadata-eval99.9%

        \[\leadsto -\mathsf{log1p}\left(\left(\mathsf{hypot}\left(1, x\right) - x\right) + \color{blue}{-1}\right) \]
      3. associate-+l-100.0%

        \[\leadsto -\mathsf{log1p}\left(\color{blue}{\mathsf{hypot}\left(1, x\right) - \left(x - -1\right)}\right) \]
    10. Simplified100.0%

      \[\leadsto -\color{blue}{\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) - \left(x - -1\right)\right)} \]

    if -0.050000000000000003 < x < 0.021999999999999999

    1. Initial program 9.9%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0 100.0%

      \[\leadsto \color{blue}{x \cdot \left(1 + {x}^{2} \cdot \left({x}^{2} \cdot \left(0.075 + -0.044642857142857144 \cdot {x}^{2}\right) - 0.16666666666666666\right)\right)} \]

    if 0.021999999999999999 < x

    1. Initial program 57.7%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-sqr-sqrt57.7%

        \[\leadsto \log \color{blue}{\left(\sqrt{x + \sqrt{x \cdot x + 1}} \cdot \sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      2. pow257.7%

        \[\leadsto \log \color{blue}{\left({\left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)}^{2}\right)} \]
      3. log-pow57.8%

        \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      4. +-commutative57.8%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \sqrt{\color{blue}{1 + x \cdot x}}}\right) \]
      5. hypot-1-def100.0%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \color{blue}{\mathsf{hypot}\left(1, x\right)}}\right) \]
    4. Applied egg-rr100.0%

      \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
    5. Step-by-step derivation
      1. log1p-expm1-u100.0%

        \[\leadsto \color{blue}{\mathsf{log1p}\left(\mathsf{expm1}\left(2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)\right)\right)} \]
      2. log1p-undefine99.9%

        \[\leadsto \color{blue}{\log \left(1 + \mathsf{expm1}\left(2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)\right)\right)} \]
      3. expm1-undefine99.9%

        \[\leadsto \log \left(1 + \color{blue}{\left(e^{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} - 1\right)}\right) \]
      4. *-commutative99.9%

        \[\leadsto \log \left(1 + \left(e^{\color{blue}{\log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right) \cdot 2}} - 1\right)\right) \]
      5. exp-to-pow99.9%

        \[\leadsto \log \left(1 + \left(\color{blue}{{\left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)}^{2}} - 1\right)\right) \]
      6. pow299.9%

        \[\leadsto \log \left(1 + \left(\color{blue}{\sqrt{x + \mathsf{hypot}\left(1, x\right)} \cdot \sqrt{x + \mathsf{hypot}\left(1, x\right)}} - 1\right)\right) \]
      7. add-sqr-sqrt99.9%

        \[\leadsto \log \left(1 + \left(\color{blue}{\left(x + \mathsf{hypot}\left(1, x\right)\right)} - 1\right)\right) \]
    6. Applied egg-rr99.9%

      \[\leadsto \color{blue}{\log \left(1 + \left(\left(x + \mathsf{hypot}\left(1, x\right)\right) - 1\right)\right)} \]
    7. Step-by-step derivation
      1. log1p-define99.9%

        \[\leadsto \color{blue}{\mathsf{log1p}\left(\left(x + \mathsf{hypot}\left(1, x\right)\right) - 1\right)} \]
      2. sub-neg99.9%

        \[\leadsto \mathsf{log1p}\left(\color{blue}{\left(x + \mathsf{hypot}\left(1, x\right)\right) + \left(-1\right)}\right) \]
      3. +-commutative99.9%

        \[\leadsto \mathsf{log1p}\left(\color{blue}{\left(\mathsf{hypot}\left(1, x\right) + x\right)} + \left(-1\right)\right) \]
      4. metadata-eval99.9%

        \[\leadsto \mathsf{log1p}\left(\left(\mathsf{hypot}\left(1, x\right) + x\right) + \color{blue}{-1}\right) \]
      5. associate-+l+99.9%

        \[\leadsto \mathsf{log1p}\left(\color{blue}{\mathsf{hypot}\left(1, x\right) + \left(x + -1\right)}\right) \]
    8. Simplified99.9%

      \[\leadsto \color{blue}{\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + \left(x + -1\right)\right)} \]
    9. Taylor expanded in x around inf 100.0%

      \[\leadsto \mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + \color{blue}{x \cdot \left(1 - \frac{1}{x}\right)}\right) \]
  3. Recombined 3 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -0.05:\\ \;\;\;\;-\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + \left(-1 - x\right)\right)\\ \mathbf{elif}\;x \leq 0.022:\\ \;\;\;\;x \cdot \left(1 + {x}^{2} \cdot \left({x}^{2} \cdot \left(0.075 + {x}^{2} \cdot -0.044642857142857144\right) - 0.16666666666666666\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + x \cdot \left(1 - \frac{1}{x}\right)\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 2: 99.9% accurate, 0.6× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -0.014:\\ \;\;\;\;-\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + \left(-1 - x\right)\right)\\ \mathbf{elif}\;x \leq 0.0138:\\ \;\;\;\;\mathsf{log1p}\left(x \cdot \left(1 + x \cdot \left(0.5 + {x}^{2} \cdot \left({x}^{2} \cdot 0.0625 - 0.125\right)\right)\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + x \cdot \left(1 - \frac{1}{x}\right)\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -0.014)
   (- (log1p (+ (hypot 1.0 x) (- -1.0 x))))
   (if (<= x 0.0138)
     (log1p
      (*
       x
       (+ 1.0 (* x (+ 0.5 (* (pow x 2.0) (- (* (pow x 2.0) 0.0625) 0.125)))))))
     (log1p (+ (hypot 1.0 x) (* x (- 1.0 (/ 1.0 x))))))))
double code(double x) {
	double tmp;
	if (x <= -0.014) {
		tmp = -log1p((hypot(1.0, x) + (-1.0 - x)));
	} else if (x <= 0.0138) {
		tmp = log1p((x * (1.0 + (x * (0.5 + (pow(x, 2.0) * ((pow(x, 2.0) * 0.0625) - 0.125)))))));
	} else {
		tmp = log1p((hypot(1.0, x) + (x * (1.0 - (1.0 / x)))));
	}
	return tmp;
}
public static double code(double x) {
	double tmp;
	if (x <= -0.014) {
		tmp = -Math.log1p((Math.hypot(1.0, x) + (-1.0 - x)));
	} else if (x <= 0.0138) {
		tmp = Math.log1p((x * (1.0 + (x * (0.5 + (Math.pow(x, 2.0) * ((Math.pow(x, 2.0) * 0.0625) - 0.125)))))));
	} else {
		tmp = Math.log1p((Math.hypot(1.0, x) + (x * (1.0 - (1.0 / x)))));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -0.014:
		tmp = -math.log1p((math.hypot(1.0, x) + (-1.0 - x)))
	elif x <= 0.0138:
		tmp = math.log1p((x * (1.0 + (x * (0.5 + (math.pow(x, 2.0) * ((math.pow(x, 2.0) * 0.0625) - 0.125)))))))
	else:
		tmp = math.log1p((math.hypot(1.0, x) + (x * (1.0 - (1.0 / x)))))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -0.014)
		tmp = Float64(-log1p(Float64(hypot(1.0, x) + Float64(-1.0 - x))));
	elseif (x <= 0.0138)
		tmp = log1p(Float64(x * Float64(1.0 + Float64(x * Float64(0.5 + Float64((x ^ 2.0) * Float64(Float64((x ^ 2.0) * 0.0625) - 0.125)))))));
	else
		tmp = log1p(Float64(hypot(1.0, x) + Float64(x * Float64(1.0 - Float64(1.0 / x)))));
	end
	return tmp
end
code[x_] := If[LessEqual[x, -0.014], (-N[Log[1 + N[(N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision] + N[(-1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), If[LessEqual[x, 0.0138], N[Log[1 + N[(x * N[(1.0 + N[(x * N[(0.5 + N[(N[Power[x, 2.0], $MachinePrecision] * N[(N[(N[Power[x, 2.0], $MachinePrecision] * 0.0625), $MachinePrecision] - 0.125), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Log[1 + N[(N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision] + N[(x * N[(1.0 - N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.014:\\
\;\;\;\;-\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + \left(-1 - x\right)\right)\\

\mathbf{elif}\;x \leq 0.0138:\\
\;\;\;\;\mathsf{log1p}\left(x \cdot \left(1 + x \cdot \left(0.5 + {x}^{2} \cdot \left({x}^{2} \cdot 0.0625 - 0.125\right)\right)\right)\right)\\

\mathbf{else}:\\
\;\;\;\;\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + x \cdot \left(1 - \frac{1}{x}\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -0.0140000000000000003

    1. Initial program 5.1%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. flip-+4.8%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \sqrt{x \cdot x + 1} \cdot \sqrt{x \cdot x + 1}}{x - \sqrt{x \cdot x + 1}}\right)} \]
      2. frac-2neg4.8%

        \[\leadsto \log \color{blue}{\left(\frac{-\left(x \cdot x - \sqrt{x \cdot x + 1} \cdot \sqrt{x \cdot x + 1}\right)}{-\left(x - \sqrt{x \cdot x + 1}\right)}\right)} \]
      3. log-div4.8%

        \[\leadsto \color{blue}{\log \left(-\left(x \cdot x - \sqrt{x \cdot x + 1} \cdot \sqrt{x \cdot x + 1}\right)\right) - \log \left(-\left(x - \sqrt{x \cdot x + 1}\right)\right)} \]
      4. add-sqr-sqrt4.8%

        \[\leadsto \log \left(-\left(x \cdot x - \color{blue}{\left(x \cdot x + 1\right)}\right)\right) - \log \left(-\left(x - \sqrt{x \cdot x + 1}\right)\right) \]
      5. pow24.8%

        \[\leadsto \log \left(-\left(\color{blue}{{x}^{2}} - \left(x \cdot x + 1\right)\right)\right) - \log \left(-\left(x - \sqrt{x \cdot x + 1}\right)\right) \]
      6. fma-define4.8%

        \[\leadsto \log \left(-\left({x}^{2} - \color{blue}{\mathsf{fma}\left(x, x, 1\right)}\right)\right) - \log \left(-\left(x - \sqrt{x \cdot x + 1}\right)\right) \]
      7. +-commutative4.8%

        \[\leadsto \log \left(-\left({x}^{2} - \mathsf{fma}\left(x, x, 1\right)\right)\right) - \log \left(-\left(x - \sqrt{\color{blue}{1 + x \cdot x}}\right)\right) \]
      8. hypot-1-def4.8%

        \[\leadsto \log \left(-\left({x}^{2} - \mathsf{fma}\left(x, x, 1\right)\right)\right) - \log \left(-\left(x - \color{blue}{\mathsf{hypot}\left(1, x\right)}\right)\right) \]
    4. Applied egg-rr4.8%

      \[\leadsto \color{blue}{\log \left(-\left({x}^{2} - \mathsf{fma}\left(x, x, 1\right)\right)\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right)} \]
    5. Step-by-step derivation
      1. fma-undefine4.8%

        \[\leadsto \log \left(-\left({x}^{2} - \color{blue}{\left(x \cdot x + 1\right)}\right)\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      2. unpow24.8%

        \[\leadsto \log \left(-\left({x}^{2} - \left(\color{blue}{{x}^{2}} + 1\right)\right)\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      3. associate--r+49.0%

        \[\leadsto \log \left(-\color{blue}{\left(\left({x}^{2} - {x}^{2}\right) - 1\right)}\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      4. +-inverses99.9%

        \[\leadsto \log \left(-\left(\color{blue}{0} - 1\right)\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      5. metadata-eval99.9%

        \[\leadsto \log \left(-\color{blue}{-1}\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      6. metadata-eval99.9%

        \[\leadsto \log \color{blue}{1} - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      7. metadata-eval99.9%

        \[\leadsto \color{blue}{0} - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      8. neg-sub099.9%

        \[\leadsto 0 - \log \color{blue}{\left(0 - \left(x - \mathsf{hypot}\left(1, x\right)\right)\right)} \]
      9. associate--r-99.9%

        \[\leadsto 0 - \log \color{blue}{\left(\left(0 - x\right) + \mathsf{hypot}\left(1, x\right)\right)} \]
      10. neg-sub099.9%

        \[\leadsto 0 - \log \left(\color{blue}{\left(-x\right)} + \mathsf{hypot}\left(1, x\right)\right) \]
      11. +-commutative99.9%

        \[\leadsto 0 - \log \color{blue}{\left(\mathsf{hypot}\left(1, x\right) + \left(-x\right)\right)} \]
      12. sub-neg99.9%

        \[\leadsto 0 - \log \color{blue}{\left(\mathsf{hypot}\left(1, x\right) - x\right)} \]
      13. neg-sub099.9%

        \[\leadsto \color{blue}{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \]
    6. Simplified99.9%

      \[\leadsto \color{blue}{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \]
    7. Step-by-step derivation
      1. add-sqr-sqrt99.1%

        \[\leadsto -\color{blue}{\sqrt{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \cdot \sqrt{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)}} \]
      2. sqrt-unprod99.9%

        \[\leadsto -\color{blue}{\sqrt{\log \left(\mathsf{hypot}\left(1, x\right) - x\right) \cdot \log \left(\mathsf{hypot}\left(1, x\right) - x\right)}} \]
      3. sqr-neg99.9%

        \[\leadsto -\sqrt{\color{blue}{\left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right) \cdot \left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right)}} \]
      4. sqrt-unprod0.0%

        \[\leadsto -\color{blue}{\sqrt{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \cdot \sqrt{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)}} \]
      5. add-sqr-sqrt1.5%

        \[\leadsto -\color{blue}{\left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right)} \]
      6. log1p-expm1-u0.7%

        \[\leadsto -\color{blue}{\mathsf{log1p}\left(\mathsf{expm1}\left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right)\right)} \]
      7. add-sqr-sqrt0.0%

        \[\leadsto -\mathsf{log1p}\left(\mathsf{expm1}\left(\color{blue}{\sqrt{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \cdot \sqrt{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)}}\right)\right) \]
      8. sqrt-unprod99.9%

        \[\leadsto -\mathsf{log1p}\left(\mathsf{expm1}\left(\color{blue}{\sqrt{\left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right) \cdot \left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right)}}\right)\right) \]
      9. sqr-neg99.9%

        \[\leadsto -\mathsf{log1p}\left(\mathsf{expm1}\left(\sqrt{\color{blue}{\log \left(\mathsf{hypot}\left(1, x\right) - x\right) \cdot \log \left(\mathsf{hypot}\left(1, x\right) - x\right)}}\right)\right) \]
      10. sqrt-unprod99.1%

        \[\leadsto -\mathsf{log1p}\left(\mathsf{expm1}\left(\color{blue}{\sqrt{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \cdot \sqrt{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)}}\right)\right) \]
      11. add-sqr-sqrt99.9%

        \[\leadsto -\mathsf{log1p}\left(\mathsf{expm1}\left(\color{blue}{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)}\right)\right) \]
      12. expm1-undefine99.9%

        \[\leadsto -\mathsf{log1p}\left(\color{blue}{e^{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} - 1}\right) \]
      13. add-exp-log99.9%

        \[\leadsto -\mathsf{log1p}\left(\color{blue}{\left(\mathsf{hypot}\left(1, x\right) - x\right)} - 1\right) \]
    8. Applied egg-rr99.9%

      \[\leadsto -\color{blue}{\mathsf{log1p}\left(\left(\mathsf{hypot}\left(1, x\right) - x\right) - 1\right)} \]
    9. Step-by-step derivation
      1. sub-neg99.9%

        \[\leadsto -\mathsf{log1p}\left(\color{blue}{\left(\mathsf{hypot}\left(1, x\right) - x\right) + \left(-1\right)}\right) \]
      2. metadata-eval99.9%

        \[\leadsto -\mathsf{log1p}\left(\left(\mathsf{hypot}\left(1, x\right) - x\right) + \color{blue}{-1}\right) \]
      3. associate-+l-100.0%

        \[\leadsto -\mathsf{log1p}\left(\color{blue}{\mathsf{hypot}\left(1, x\right) - \left(x - -1\right)}\right) \]
    10. Simplified100.0%

      \[\leadsto -\color{blue}{\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) - \left(x - -1\right)\right)} \]

    if -0.0140000000000000003 < x < 0.0138

    1. Initial program 9.9%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-sqr-sqrt9.9%

        \[\leadsto \log \color{blue}{\left(\sqrt{x + \sqrt{x \cdot x + 1}} \cdot \sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      2. pow29.9%

        \[\leadsto \log \color{blue}{\left({\left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)}^{2}\right)} \]
      3. log-pow9.9%

        \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      4. +-commutative9.9%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \sqrt{\color{blue}{1 + x \cdot x}}}\right) \]
      5. hypot-1-def9.8%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \color{blue}{\mathsf{hypot}\left(1, x\right)}}\right) \]
    4. Applied egg-rr9.8%

      \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
    5. Step-by-step derivation
      1. log1p-expm1-u9.8%

        \[\leadsto \color{blue}{\mathsf{log1p}\left(\mathsf{expm1}\left(2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)\right)\right)} \]
      2. log1p-undefine9.8%

        \[\leadsto \color{blue}{\log \left(1 + \mathsf{expm1}\left(2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)\right)\right)} \]
      3. expm1-undefine9.8%

        \[\leadsto \log \left(1 + \color{blue}{\left(e^{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} - 1\right)}\right) \]
      4. *-commutative9.8%

        \[\leadsto \log \left(1 + \left(e^{\color{blue}{\log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right) \cdot 2}} - 1\right)\right) \]
      5. exp-to-pow9.8%

        \[\leadsto \log \left(1 + \left(\color{blue}{{\left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)}^{2}} - 1\right)\right) \]
      6. pow29.8%

        \[\leadsto \log \left(1 + \left(\color{blue}{\sqrt{x + \mathsf{hypot}\left(1, x\right)} \cdot \sqrt{x + \mathsf{hypot}\left(1, x\right)}} - 1\right)\right) \]
      7. add-sqr-sqrt9.9%

        \[\leadsto \log \left(1 + \left(\color{blue}{\left(x + \mathsf{hypot}\left(1, x\right)\right)} - 1\right)\right) \]
    6. Applied egg-rr9.9%

      \[\leadsto \color{blue}{\log \left(1 + \left(\left(x + \mathsf{hypot}\left(1, x\right)\right) - 1\right)\right)} \]
    7. Step-by-step derivation
      1. log1p-define9.9%

        \[\leadsto \color{blue}{\mathsf{log1p}\left(\left(x + \mathsf{hypot}\left(1, x\right)\right) - 1\right)} \]
      2. sub-neg9.9%

        \[\leadsto \mathsf{log1p}\left(\color{blue}{\left(x + \mathsf{hypot}\left(1, x\right)\right) + \left(-1\right)}\right) \]
      3. +-commutative9.9%

        \[\leadsto \mathsf{log1p}\left(\color{blue}{\left(\mathsf{hypot}\left(1, x\right) + x\right)} + \left(-1\right)\right) \]
      4. metadata-eval9.9%

        \[\leadsto \mathsf{log1p}\left(\left(\mathsf{hypot}\left(1, x\right) + x\right) + \color{blue}{-1}\right) \]
      5. associate-+l+9.8%

        \[\leadsto \mathsf{log1p}\left(\color{blue}{\mathsf{hypot}\left(1, x\right) + \left(x + -1\right)}\right) \]
    8. Simplified9.8%

      \[\leadsto \color{blue}{\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + \left(x + -1\right)\right)} \]
    9. Taylor expanded in x around 0 99.9%

      \[\leadsto \mathsf{log1p}\left(\color{blue}{x \cdot \left(1 + x \cdot \left(0.5 + {x}^{2} \cdot \left(0.0625 \cdot {x}^{2} - 0.125\right)\right)\right)}\right) \]

    if 0.0138 < x

    1. Initial program 57.7%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-sqr-sqrt57.7%

        \[\leadsto \log \color{blue}{\left(\sqrt{x + \sqrt{x \cdot x + 1}} \cdot \sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      2. pow257.7%

        \[\leadsto \log \color{blue}{\left({\left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)}^{2}\right)} \]
      3. log-pow57.8%

        \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      4. +-commutative57.8%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \sqrt{\color{blue}{1 + x \cdot x}}}\right) \]
      5. hypot-1-def100.0%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \color{blue}{\mathsf{hypot}\left(1, x\right)}}\right) \]
    4. Applied egg-rr100.0%

      \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
    5. Step-by-step derivation
      1. log1p-expm1-u100.0%

        \[\leadsto \color{blue}{\mathsf{log1p}\left(\mathsf{expm1}\left(2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)\right)\right)} \]
      2. log1p-undefine99.9%

        \[\leadsto \color{blue}{\log \left(1 + \mathsf{expm1}\left(2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)\right)\right)} \]
      3. expm1-undefine99.9%

        \[\leadsto \log \left(1 + \color{blue}{\left(e^{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} - 1\right)}\right) \]
      4. *-commutative99.9%

        \[\leadsto \log \left(1 + \left(e^{\color{blue}{\log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right) \cdot 2}} - 1\right)\right) \]
      5. exp-to-pow99.9%

        \[\leadsto \log \left(1 + \left(\color{blue}{{\left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)}^{2}} - 1\right)\right) \]
      6. pow299.9%

        \[\leadsto \log \left(1 + \left(\color{blue}{\sqrt{x + \mathsf{hypot}\left(1, x\right)} \cdot \sqrt{x + \mathsf{hypot}\left(1, x\right)}} - 1\right)\right) \]
      7. add-sqr-sqrt99.9%

        \[\leadsto \log \left(1 + \left(\color{blue}{\left(x + \mathsf{hypot}\left(1, x\right)\right)} - 1\right)\right) \]
    6. Applied egg-rr99.9%

      \[\leadsto \color{blue}{\log \left(1 + \left(\left(x + \mathsf{hypot}\left(1, x\right)\right) - 1\right)\right)} \]
    7. Step-by-step derivation
      1. log1p-define99.9%

        \[\leadsto \color{blue}{\mathsf{log1p}\left(\left(x + \mathsf{hypot}\left(1, x\right)\right) - 1\right)} \]
      2. sub-neg99.9%

        \[\leadsto \mathsf{log1p}\left(\color{blue}{\left(x + \mathsf{hypot}\left(1, x\right)\right) + \left(-1\right)}\right) \]
      3. +-commutative99.9%

        \[\leadsto \mathsf{log1p}\left(\color{blue}{\left(\mathsf{hypot}\left(1, x\right) + x\right)} + \left(-1\right)\right) \]
      4. metadata-eval99.9%

        \[\leadsto \mathsf{log1p}\left(\left(\mathsf{hypot}\left(1, x\right) + x\right) + \color{blue}{-1}\right) \]
      5. associate-+l+99.9%

        \[\leadsto \mathsf{log1p}\left(\color{blue}{\mathsf{hypot}\left(1, x\right) + \left(x + -1\right)}\right) \]
    8. Simplified99.9%

      \[\leadsto \color{blue}{\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + \left(x + -1\right)\right)} \]
    9. Taylor expanded in x around inf 100.0%

      \[\leadsto \mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + \color{blue}{x \cdot \left(1 - \frac{1}{x}\right)}\right) \]
  3. Recombined 3 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -0.014:\\ \;\;\;\;-\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + \left(-1 - x\right)\right)\\ \mathbf{elif}\;x \leq 0.0138:\\ \;\;\;\;\mathsf{log1p}\left(x \cdot \left(1 + x \cdot \left(0.5 + {x}^{2} \cdot \left({x}^{2} \cdot 0.0625 - 0.125\right)\right)\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + x \cdot \left(1 - \frac{1}{x}\right)\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 99.9% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -0.0072:\\ \;\;\;\;-\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + \left(-1 - x\right)\right)\\ \mathbf{elif}\;x \leq 0.007:\\ \;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + x \cdot \left(1 - \frac{1}{x}\right)\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -0.0072)
   (- (log1p (+ (hypot 1.0 x) (- -1.0 x))))
   (if (<= x 0.007)
     (* x (+ 1.0 (* (* x x) (- (* 0.075 (* x x)) 0.16666666666666666))))
     (log1p (+ (hypot 1.0 x) (* x (- 1.0 (/ 1.0 x))))))))
double code(double x) {
	double tmp;
	if (x <= -0.0072) {
		tmp = -log1p((hypot(1.0, x) + (-1.0 - x)));
	} else if (x <= 0.007) {
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	} else {
		tmp = log1p((hypot(1.0, x) + (x * (1.0 - (1.0 / x)))));
	}
	return tmp;
}
public static double code(double x) {
	double tmp;
	if (x <= -0.0072) {
		tmp = -Math.log1p((Math.hypot(1.0, x) + (-1.0 - x)));
	} else if (x <= 0.007) {
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	} else {
		tmp = Math.log1p((Math.hypot(1.0, x) + (x * (1.0 - (1.0 / x)))));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -0.0072:
		tmp = -math.log1p((math.hypot(1.0, x) + (-1.0 - x)))
	elif x <= 0.007:
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)))
	else:
		tmp = math.log1p((math.hypot(1.0, x) + (x * (1.0 - (1.0 / x)))))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -0.0072)
		tmp = Float64(-log1p(Float64(hypot(1.0, x) + Float64(-1.0 - x))));
	elseif (x <= 0.007)
		tmp = Float64(x * Float64(1.0 + Float64(Float64(x * x) * Float64(Float64(0.075 * Float64(x * x)) - 0.16666666666666666))));
	else
		tmp = log1p(Float64(hypot(1.0, x) + Float64(x * Float64(1.0 - Float64(1.0 / x)))));
	end
	return tmp
end
code[x_] := If[LessEqual[x, -0.0072], (-N[Log[1 + N[(N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision] + N[(-1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), If[LessEqual[x, 0.007], N[(x * N[(1.0 + N[(N[(x * x), $MachinePrecision] * N[(N[(0.075 * N[(x * x), $MachinePrecision]), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[1 + N[(N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision] + N[(x * N[(1.0 - N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.0072:\\
\;\;\;\;-\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + \left(-1 - x\right)\right)\\

\mathbf{elif}\;x \leq 0.007:\\
\;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right)\\

\mathbf{else}:\\
\;\;\;\;\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + x \cdot \left(1 - \frac{1}{x}\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -0.0071999999999999998

    1. Initial program 5.1%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. flip-+4.8%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \sqrt{x \cdot x + 1} \cdot \sqrt{x \cdot x + 1}}{x - \sqrt{x \cdot x + 1}}\right)} \]
      2. frac-2neg4.8%

        \[\leadsto \log \color{blue}{\left(\frac{-\left(x \cdot x - \sqrt{x \cdot x + 1} \cdot \sqrt{x \cdot x + 1}\right)}{-\left(x - \sqrt{x \cdot x + 1}\right)}\right)} \]
      3. log-div4.8%

        \[\leadsto \color{blue}{\log \left(-\left(x \cdot x - \sqrt{x \cdot x + 1} \cdot \sqrt{x \cdot x + 1}\right)\right) - \log \left(-\left(x - \sqrt{x \cdot x + 1}\right)\right)} \]
      4. add-sqr-sqrt4.8%

        \[\leadsto \log \left(-\left(x \cdot x - \color{blue}{\left(x \cdot x + 1\right)}\right)\right) - \log \left(-\left(x - \sqrt{x \cdot x + 1}\right)\right) \]
      5. pow24.8%

        \[\leadsto \log \left(-\left(\color{blue}{{x}^{2}} - \left(x \cdot x + 1\right)\right)\right) - \log \left(-\left(x - \sqrt{x \cdot x + 1}\right)\right) \]
      6. fma-define4.8%

        \[\leadsto \log \left(-\left({x}^{2} - \color{blue}{\mathsf{fma}\left(x, x, 1\right)}\right)\right) - \log \left(-\left(x - \sqrt{x \cdot x + 1}\right)\right) \]
      7. +-commutative4.8%

        \[\leadsto \log \left(-\left({x}^{2} - \mathsf{fma}\left(x, x, 1\right)\right)\right) - \log \left(-\left(x - \sqrt{\color{blue}{1 + x \cdot x}}\right)\right) \]
      8. hypot-1-def4.8%

        \[\leadsto \log \left(-\left({x}^{2} - \mathsf{fma}\left(x, x, 1\right)\right)\right) - \log \left(-\left(x - \color{blue}{\mathsf{hypot}\left(1, x\right)}\right)\right) \]
    4. Applied egg-rr4.8%

      \[\leadsto \color{blue}{\log \left(-\left({x}^{2} - \mathsf{fma}\left(x, x, 1\right)\right)\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right)} \]
    5. Step-by-step derivation
      1. fma-undefine4.8%

        \[\leadsto \log \left(-\left({x}^{2} - \color{blue}{\left(x \cdot x + 1\right)}\right)\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      2. unpow24.8%

        \[\leadsto \log \left(-\left({x}^{2} - \left(\color{blue}{{x}^{2}} + 1\right)\right)\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      3. associate--r+49.0%

        \[\leadsto \log \left(-\color{blue}{\left(\left({x}^{2} - {x}^{2}\right) - 1\right)}\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      4. +-inverses99.9%

        \[\leadsto \log \left(-\left(\color{blue}{0} - 1\right)\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      5. metadata-eval99.9%

        \[\leadsto \log \left(-\color{blue}{-1}\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      6. metadata-eval99.9%

        \[\leadsto \log \color{blue}{1} - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      7. metadata-eval99.9%

        \[\leadsto \color{blue}{0} - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      8. neg-sub099.9%

        \[\leadsto 0 - \log \color{blue}{\left(0 - \left(x - \mathsf{hypot}\left(1, x\right)\right)\right)} \]
      9. associate--r-99.9%

        \[\leadsto 0 - \log \color{blue}{\left(\left(0 - x\right) + \mathsf{hypot}\left(1, x\right)\right)} \]
      10. neg-sub099.9%

        \[\leadsto 0 - \log \left(\color{blue}{\left(-x\right)} + \mathsf{hypot}\left(1, x\right)\right) \]
      11. +-commutative99.9%

        \[\leadsto 0 - \log \color{blue}{\left(\mathsf{hypot}\left(1, x\right) + \left(-x\right)\right)} \]
      12. sub-neg99.9%

        \[\leadsto 0 - \log \color{blue}{\left(\mathsf{hypot}\left(1, x\right) - x\right)} \]
      13. neg-sub099.9%

        \[\leadsto \color{blue}{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \]
    6. Simplified99.9%

      \[\leadsto \color{blue}{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \]
    7. Step-by-step derivation
      1. add-sqr-sqrt99.1%

        \[\leadsto -\color{blue}{\sqrt{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \cdot \sqrt{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)}} \]
      2. sqrt-unprod99.9%

        \[\leadsto -\color{blue}{\sqrt{\log \left(\mathsf{hypot}\left(1, x\right) - x\right) \cdot \log \left(\mathsf{hypot}\left(1, x\right) - x\right)}} \]
      3. sqr-neg99.9%

        \[\leadsto -\sqrt{\color{blue}{\left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right) \cdot \left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right)}} \]
      4. sqrt-unprod0.0%

        \[\leadsto -\color{blue}{\sqrt{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \cdot \sqrt{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)}} \]
      5. add-sqr-sqrt1.5%

        \[\leadsto -\color{blue}{\left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right)} \]
      6. log1p-expm1-u0.7%

        \[\leadsto -\color{blue}{\mathsf{log1p}\left(\mathsf{expm1}\left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right)\right)} \]
      7. add-sqr-sqrt0.0%

        \[\leadsto -\mathsf{log1p}\left(\mathsf{expm1}\left(\color{blue}{\sqrt{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \cdot \sqrt{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)}}\right)\right) \]
      8. sqrt-unprod99.9%

        \[\leadsto -\mathsf{log1p}\left(\mathsf{expm1}\left(\color{blue}{\sqrt{\left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right) \cdot \left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right)}}\right)\right) \]
      9. sqr-neg99.9%

        \[\leadsto -\mathsf{log1p}\left(\mathsf{expm1}\left(\sqrt{\color{blue}{\log \left(\mathsf{hypot}\left(1, x\right) - x\right) \cdot \log \left(\mathsf{hypot}\left(1, x\right) - x\right)}}\right)\right) \]
      10. sqrt-unprod99.1%

        \[\leadsto -\mathsf{log1p}\left(\mathsf{expm1}\left(\color{blue}{\sqrt{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \cdot \sqrt{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)}}\right)\right) \]
      11. add-sqr-sqrt99.9%

        \[\leadsto -\mathsf{log1p}\left(\mathsf{expm1}\left(\color{blue}{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)}\right)\right) \]
      12. expm1-undefine99.9%

        \[\leadsto -\mathsf{log1p}\left(\color{blue}{e^{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} - 1}\right) \]
      13. add-exp-log99.9%

        \[\leadsto -\mathsf{log1p}\left(\color{blue}{\left(\mathsf{hypot}\left(1, x\right) - x\right)} - 1\right) \]
    8. Applied egg-rr99.9%

      \[\leadsto -\color{blue}{\mathsf{log1p}\left(\left(\mathsf{hypot}\left(1, x\right) - x\right) - 1\right)} \]
    9. Step-by-step derivation
      1. sub-neg99.9%

        \[\leadsto -\mathsf{log1p}\left(\color{blue}{\left(\mathsf{hypot}\left(1, x\right) - x\right) + \left(-1\right)}\right) \]
      2. metadata-eval99.9%

        \[\leadsto -\mathsf{log1p}\left(\left(\mathsf{hypot}\left(1, x\right) - x\right) + \color{blue}{-1}\right) \]
      3. associate-+l-100.0%

        \[\leadsto -\mathsf{log1p}\left(\color{blue}{\mathsf{hypot}\left(1, x\right) - \left(x - -1\right)}\right) \]
    10. Simplified100.0%

      \[\leadsto -\color{blue}{\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) - \left(x - -1\right)\right)} \]

    if -0.0071999999999999998 < x < 0.00700000000000000015

    1. Initial program 9.9%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0 99.9%

      \[\leadsto \color{blue}{x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot {x}^{2} - 0.16666666666666666\right)\right)} \]
    4. Step-by-step derivation
      1. unpow299.9%

        \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    5. Applied egg-rr99.9%

      \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    6. Step-by-step derivation
      1. unpow299.9%

        \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    7. Applied egg-rr99.9%

      \[\leadsto x \cdot \left(1 + \color{blue}{\left(x \cdot x\right)} \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right) \]

    if 0.00700000000000000015 < x

    1. Initial program 57.7%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-sqr-sqrt57.7%

        \[\leadsto \log \color{blue}{\left(\sqrt{x + \sqrt{x \cdot x + 1}} \cdot \sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      2. pow257.7%

        \[\leadsto \log \color{blue}{\left({\left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)}^{2}\right)} \]
      3. log-pow57.8%

        \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      4. +-commutative57.8%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \sqrt{\color{blue}{1 + x \cdot x}}}\right) \]
      5. hypot-1-def100.0%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \color{blue}{\mathsf{hypot}\left(1, x\right)}}\right) \]
    4. Applied egg-rr100.0%

      \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
    5. Step-by-step derivation
      1. log1p-expm1-u100.0%

        \[\leadsto \color{blue}{\mathsf{log1p}\left(\mathsf{expm1}\left(2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)\right)\right)} \]
      2. log1p-undefine99.9%

        \[\leadsto \color{blue}{\log \left(1 + \mathsf{expm1}\left(2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)\right)\right)} \]
      3. expm1-undefine99.9%

        \[\leadsto \log \left(1 + \color{blue}{\left(e^{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} - 1\right)}\right) \]
      4. *-commutative99.9%

        \[\leadsto \log \left(1 + \left(e^{\color{blue}{\log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right) \cdot 2}} - 1\right)\right) \]
      5. exp-to-pow99.9%

        \[\leadsto \log \left(1 + \left(\color{blue}{{\left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)}^{2}} - 1\right)\right) \]
      6. pow299.9%

        \[\leadsto \log \left(1 + \left(\color{blue}{\sqrt{x + \mathsf{hypot}\left(1, x\right)} \cdot \sqrt{x + \mathsf{hypot}\left(1, x\right)}} - 1\right)\right) \]
      7. add-sqr-sqrt99.9%

        \[\leadsto \log \left(1 + \left(\color{blue}{\left(x + \mathsf{hypot}\left(1, x\right)\right)} - 1\right)\right) \]
    6. Applied egg-rr99.9%

      \[\leadsto \color{blue}{\log \left(1 + \left(\left(x + \mathsf{hypot}\left(1, x\right)\right) - 1\right)\right)} \]
    7. Step-by-step derivation
      1. log1p-define99.9%

        \[\leadsto \color{blue}{\mathsf{log1p}\left(\left(x + \mathsf{hypot}\left(1, x\right)\right) - 1\right)} \]
      2. sub-neg99.9%

        \[\leadsto \mathsf{log1p}\left(\color{blue}{\left(x + \mathsf{hypot}\left(1, x\right)\right) + \left(-1\right)}\right) \]
      3. +-commutative99.9%

        \[\leadsto \mathsf{log1p}\left(\color{blue}{\left(\mathsf{hypot}\left(1, x\right) + x\right)} + \left(-1\right)\right) \]
      4. metadata-eval99.9%

        \[\leadsto \mathsf{log1p}\left(\left(\mathsf{hypot}\left(1, x\right) + x\right) + \color{blue}{-1}\right) \]
      5. associate-+l+99.9%

        \[\leadsto \mathsf{log1p}\left(\color{blue}{\mathsf{hypot}\left(1, x\right) + \left(x + -1\right)}\right) \]
    8. Simplified99.9%

      \[\leadsto \color{blue}{\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + \left(x + -1\right)\right)} \]
    9. Taylor expanded in x around inf 100.0%

      \[\leadsto \mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + \color{blue}{x \cdot \left(1 - \frac{1}{x}\right)}\right) \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -0.0072:\\ \;\;\;\;-\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + \left(-1 - x\right)\right)\\ \mathbf{elif}\;x \leq 0.007:\\ \;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + x \cdot \left(1 - \frac{1}{x}\right)\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 4: 99.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -0.0072:\\ \;\;\;\;-\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + \left(-1 - x\right)\right)\\ \mathbf{elif}\;x \leq 0.0075:\\ \;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -0.0072)
   (- (log1p (+ (hypot 1.0 x) (- -1.0 x))))
   (if (<= x 0.0075)
     (* x (+ 1.0 (* (* x x) (- (* 0.075 (* x x)) 0.16666666666666666))))
     (log (+ x (hypot 1.0 x))))))
double code(double x) {
	double tmp;
	if (x <= -0.0072) {
		tmp = -log1p((hypot(1.0, x) + (-1.0 - x)));
	} else if (x <= 0.0075) {
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	} else {
		tmp = log((x + hypot(1.0, x)));
	}
	return tmp;
}
public static double code(double x) {
	double tmp;
	if (x <= -0.0072) {
		tmp = -Math.log1p((Math.hypot(1.0, x) + (-1.0 - x)));
	} else if (x <= 0.0075) {
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	} else {
		tmp = Math.log((x + Math.hypot(1.0, x)));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -0.0072:
		tmp = -math.log1p((math.hypot(1.0, x) + (-1.0 - x)))
	elif x <= 0.0075:
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)))
	else:
		tmp = math.log((x + math.hypot(1.0, x)))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -0.0072)
		tmp = Float64(-log1p(Float64(hypot(1.0, x) + Float64(-1.0 - x))));
	elseif (x <= 0.0075)
		tmp = Float64(x * Float64(1.0 + Float64(Float64(x * x) * Float64(Float64(0.075 * Float64(x * x)) - 0.16666666666666666))));
	else
		tmp = log(Float64(x + hypot(1.0, x)));
	end
	return tmp
end
code[x_] := If[LessEqual[x, -0.0072], (-N[Log[1 + N[(N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision] + N[(-1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), If[LessEqual[x, 0.0075], N[(x * N[(1.0 + N[(N[(x * x), $MachinePrecision] * N[(N[(0.075 * N[(x * x), $MachinePrecision]), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(x + N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.0072:\\
\;\;\;\;-\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + \left(-1 - x\right)\right)\\

\mathbf{elif}\;x \leq 0.0075:\\
\;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right)\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -0.0071999999999999998

    1. Initial program 5.1%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. flip-+4.8%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \sqrt{x \cdot x + 1} \cdot \sqrt{x \cdot x + 1}}{x - \sqrt{x \cdot x + 1}}\right)} \]
      2. frac-2neg4.8%

        \[\leadsto \log \color{blue}{\left(\frac{-\left(x \cdot x - \sqrt{x \cdot x + 1} \cdot \sqrt{x \cdot x + 1}\right)}{-\left(x - \sqrt{x \cdot x + 1}\right)}\right)} \]
      3. log-div4.8%

        \[\leadsto \color{blue}{\log \left(-\left(x \cdot x - \sqrt{x \cdot x + 1} \cdot \sqrt{x \cdot x + 1}\right)\right) - \log \left(-\left(x - \sqrt{x \cdot x + 1}\right)\right)} \]
      4. add-sqr-sqrt4.8%

        \[\leadsto \log \left(-\left(x \cdot x - \color{blue}{\left(x \cdot x + 1\right)}\right)\right) - \log \left(-\left(x - \sqrt{x \cdot x + 1}\right)\right) \]
      5. pow24.8%

        \[\leadsto \log \left(-\left(\color{blue}{{x}^{2}} - \left(x \cdot x + 1\right)\right)\right) - \log \left(-\left(x - \sqrt{x \cdot x + 1}\right)\right) \]
      6. fma-define4.8%

        \[\leadsto \log \left(-\left({x}^{2} - \color{blue}{\mathsf{fma}\left(x, x, 1\right)}\right)\right) - \log \left(-\left(x - \sqrt{x \cdot x + 1}\right)\right) \]
      7. +-commutative4.8%

        \[\leadsto \log \left(-\left({x}^{2} - \mathsf{fma}\left(x, x, 1\right)\right)\right) - \log \left(-\left(x - \sqrt{\color{blue}{1 + x \cdot x}}\right)\right) \]
      8. hypot-1-def4.8%

        \[\leadsto \log \left(-\left({x}^{2} - \mathsf{fma}\left(x, x, 1\right)\right)\right) - \log \left(-\left(x - \color{blue}{\mathsf{hypot}\left(1, x\right)}\right)\right) \]
    4. Applied egg-rr4.8%

      \[\leadsto \color{blue}{\log \left(-\left({x}^{2} - \mathsf{fma}\left(x, x, 1\right)\right)\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right)} \]
    5. Step-by-step derivation
      1. fma-undefine4.8%

        \[\leadsto \log \left(-\left({x}^{2} - \color{blue}{\left(x \cdot x + 1\right)}\right)\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      2. unpow24.8%

        \[\leadsto \log \left(-\left({x}^{2} - \left(\color{blue}{{x}^{2}} + 1\right)\right)\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      3. associate--r+49.0%

        \[\leadsto \log \left(-\color{blue}{\left(\left({x}^{2} - {x}^{2}\right) - 1\right)}\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      4. +-inverses99.9%

        \[\leadsto \log \left(-\left(\color{blue}{0} - 1\right)\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      5. metadata-eval99.9%

        \[\leadsto \log \left(-\color{blue}{-1}\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      6. metadata-eval99.9%

        \[\leadsto \log \color{blue}{1} - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      7. metadata-eval99.9%

        \[\leadsto \color{blue}{0} - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      8. neg-sub099.9%

        \[\leadsto 0 - \log \color{blue}{\left(0 - \left(x - \mathsf{hypot}\left(1, x\right)\right)\right)} \]
      9. associate--r-99.9%

        \[\leadsto 0 - \log \color{blue}{\left(\left(0 - x\right) + \mathsf{hypot}\left(1, x\right)\right)} \]
      10. neg-sub099.9%

        \[\leadsto 0 - \log \left(\color{blue}{\left(-x\right)} + \mathsf{hypot}\left(1, x\right)\right) \]
      11. +-commutative99.9%

        \[\leadsto 0 - \log \color{blue}{\left(\mathsf{hypot}\left(1, x\right) + \left(-x\right)\right)} \]
      12. sub-neg99.9%

        \[\leadsto 0 - \log \color{blue}{\left(\mathsf{hypot}\left(1, x\right) - x\right)} \]
      13. neg-sub099.9%

        \[\leadsto \color{blue}{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \]
    6. Simplified99.9%

      \[\leadsto \color{blue}{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \]
    7. Step-by-step derivation
      1. add-sqr-sqrt99.1%

        \[\leadsto -\color{blue}{\sqrt{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \cdot \sqrt{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)}} \]
      2. sqrt-unprod99.9%

        \[\leadsto -\color{blue}{\sqrt{\log \left(\mathsf{hypot}\left(1, x\right) - x\right) \cdot \log \left(\mathsf{hypot}\left(1, x\right) - x\right)}} \]
      3. sqr-neg99.9%

        \[\leadsto -\sqrt{\color{blue}{\left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right) \cdot \left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right)}} \]
      4. sqrt-unprod0.0%

        \[\leadsto -\color{blue}{\sqrt{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \cdot \sqrt{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)}} \]
      5. add-sqr-sqrt1.5%

        \[\leadsto -\color{blue}{\left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right)} \]
      6. log1p-expm1-u0.7%

        \[\leadsto -\color{blue}{\mathsf{log1p}\left(\mathsf{expm1}\left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right)\right)} \]
      7. add-sqr-sqrt0.0%

        \[\leadsto -\mathsf{log1p}\left(\mathsf{expm1}\left(\color{blue}{\sqrt{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \cdot \sqrt{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)}}\right)\right) \]
      8. sqrt-unprod99.9%

        \[\leadsto -\mathsf{log1p}\left(\mathsf{expm1}\left(\color{blue}{\sqrt{\left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right) \cdot \left(-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\right)}}\right)\right) \]
      9. sqr-neg99.9%

        \[\leadsto -\mathsf{log1p}\left(\mathsf{expm1}\left(\sqrt{\color{blue}{\log \left(\mathsf{hypot}\left(1, x\right) - x\right) \cdot \log \left(\mathsf{hypot}\left(1, x\right) - x\right)}}\right)\right) \]
      10. sqrt-unprod99.1%

        \[\leadsto -\mathsf{log1p}\left(\mathsf{expm1}\left(\color{blue}{\sqrt{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \cdot \sqrt{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)}}\right)\right) \]
      11. add-sqr-sqrt99.9%

        \[\leadsto -\mathsf{log1p}\left(\mathsf{expm1}\left(\color{blue}{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)}\right)\right) \]
      12. expm1-undefine99.9%

        \[\leadsto -\mathsf{log1p}\left(\color{blue}{e^{\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} - 1}\right) \]
      13. add-exp-log99.9%

        \[\leadsto -\mathsf{log1p}\left(\color{blue}{\left(\mathsf{hypot}\left(1, x\right) - x\right)} - 1\right) \]
    8. Applied egg-rr99.9%

      \[\leadsto -\color{blue}{\mathsf{log1p}\left(\left(\mathsf{hypot}\left(1, x\right) - x\right) - 1\right)} \]
    9. Step-by-step derivation
      1. sub-neg99.9%

        \[\leadsto -\mathsf{log1p}\left(\color{blue}{\left(\mathsf{hypot}\left(1, x\right) - x\right) + \left(-1\right)}\right) \]
      2. metadata-eval99.9%

        \[\leadsto -\mathsf{log1p}\left(\left(\mathsf{hypot}\left(1, x\right) - x\right) + \color{blue}{-1}\right) \]
      3. associate-+l-100.0%

        \[\leadsto -\mathsf{log1p}\left(\color{blue}{\mathsf{hypot}\left(1, x\right) - \left(x - -1\right)}\right) \]
    10. Simplified100.0%

      \[\leadsto -\color{blue}{\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) - \left(x - -1\right)\right)} \]

    if -0.0071999999999999998 < x < 0.0074999999999999997

    1. Initial program 9.9%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0 99.9%

      \[\leadsto \color{blue}{x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot {x}^{2} - 0.16666666666666666\right)\right)} \]
    4. Step-by-step derivation
      1. unpow299.9%

        \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    5. Applied egg-rr99.9%

      \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    6. Step-by-step derivation
      1. unpow299.9%

        \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    7. Applied egg-rr99.9%

      \[\leadsto x \cdot \left(1 + \color{blue}{\left(x \cdot x\right)} \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right) \]

    if 0.0074999999999999997 < x

    1. Initial program 57.7%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg57.7%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative57.7%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg57.7%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def99.9%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified99.9%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
  3. Recombined 3 regimes into one program.
  4. Final simplification99.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -0.0072:\\ \;\;\;\;-\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + \left(-1 - x\right)\right)\\ \mathbf{elif}\;x \leq 0.0075:\\ \;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 5: 99.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -0.0068:\\ \;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\ \mathbf{elif}\;x \leq 0.0075:\\ \;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -0.0068)
   (- (log (- (hypot 1.0 x) x)))
   (if (<= x 0.0075)
     (* x (+ 1.0 (* (* x x) (- (* 0.075 (* x x)) 0.16666666666666666))))
     (log (+ x (hypot 1.0 x))))))
double code(double x) {
	double tmp;
	if (x <= -0.0068) {
		tmp = -log((hypot(1.0, x) - x));
	} else if (x <= 0.0075) {
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	} else {
		tmp = log((x + hypot(1.0, x)));
	}
	return tmp;
}
public static double code(double x) {
	double tmp;
	if (x <= -0.0068) {
		tmp = -Math.log((Math.hypot(1.0, x) - x));
	} else if (x <= 0.0075) {
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	} else {
		tmp = Math.log((x + Math.hypot(1.0, x)));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -0.0068:
		tmp = -math.log((math.hypot(1.0, x) - x))
	elif x <= 0.0075:
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)))
	else:
		tmp = math.log((x + math.hypot(1.0, x)))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -0.0068)
		tmp = Float64(-log(Float64(hypot(1.0, x) - x)));
	elseif (x <= 0.0075)
		tmp = Float64(x * Float64(1.0 + Float64(Float64(x * x) * Float64(Float64(0.075 * Float64(x * x)) - 0.16666666666666666))));
	else
		tmp = log(Float64(x + hypot(1.0, x)));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -0.0068)
		tmp = -log((hypot(1.0, x) - x));
	elseif (x <= 0.0075)
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	else
		tmp = log((x + hypot(1.0, x)));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -0.0068], (-N[Log[N[(N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision] - x), $MachinePrecision]], $MachinePrecision]), If[LessEqual[x, 0.0075], N[(x * N[(1.0 + N[(N[(x * x), $MachinePrecision] * N[(N[(0.075 * N[(x * x), $MachinePrecision]), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(x + N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.0068:\\
\;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\

\mathbf{elif}\;x \leq 0.0075:\\
\;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right)\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -0.00679999999999999962

    1. Initial program 5.1%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. flip-+4.8%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \sqrt{x \cdot x + 1} \cdot \sqrt{x \cdot x + 1}}{x - \sqrt{x \cdot x + 1}}\right)} \]
      2. frac-2neg4.8%

        \[\leadsto \log \color{blue}{\left(\frac{-\left(x \cdot x - \sqrt{x \cdot x + 1} \cdot \sqrt{x \cdot x + 1}\right)}{-\left(x - \sqrt{x \cdot x + 1}\right)}\right)} \]
      3. log-div4.8%

        \[\leadsto \color{blue}{\log \left(-\left(x \cdot x - \sqrt{x \cdot x + 1} \cdot \sqrt{x \cdot x + 1}\right)\right) - \log \left(-\left(x - \sqrt{x \cdot x + 1}\right)\right)} \]
      4. add-sqr-sqrt4.8%

        \[\leadsto \log \left(-\left(x \cdot x - \color{blue}{\left(x \cdot x + 1\right)}\right)\right) - \log \left(-\left(x - \sqrt{x \cdot x + 1}\right)\right) \]
      5. pow24.8%

        \[\leadsto \log \left(-\left(\color{blue}{{x}^{2}} - \left(x \cdot x + 1\right)\right)\right) - \log \left(-\left(x - \sqrt{x \cdot x + 1}\right)\right) \]
      6. fma-define4.8%

        \[\leadsto \log \left(-\left({x}^{2} - \color{blue}{\mathsf{fma}\left(x, x, 1\right)}\right)\right) - \log \left(-\left(x - \sqrt{x \cdot x + 1}\right)\right) \]
      7. +-commutative4.8%

        \[\leadsto \log \left(-\left({x}^{2} - \mathsf{fma}\left(x, x, 1\right)\right)\right) - \log \left(-\left(x - \sqrt{\color{blue}{1 + x \cdot x}}\right)\right) \]
      8. hypot-1-def4.8%

        \[\leadsto \log \left(-\left({x}^{2} - \mathsf{fma}\left(x, x, 1\right)\right)\right) - \log \left(-\left(x - \color{blue}{\mathsf{hypot}\left(1, x\right)}\right)\right) \]
    4. Applied egg-rr4.8%

      \[\leadsto \color{blue}{\log \left(-\left({x}^{2} - \mathsf{fma}\left(x, x, 1\right)\right)\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right)} \]
    5. Step-by-step derivation
      1. fma-undefine4.8%

        \[\leadsto \log \left(-\left({x}^{2} - \color{blue}{\left(x \cdot x + 1\right)}\right)\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      2. unpow24.8%

        \[\leadsto \log \left(-\left({x}^{2} - \left(\color{blue}{{x}^{2}} + 1\right)\right)\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      3. associate--r+49.0%

        \[\leadsto \log \left(-\color{blue}{\left(\left({x}^{2} - {x}^{2}\right) - 1\right)}\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      4. +-inverses99.9%

        \[\leadsto \log \left(-\left(\color{blue}{0} - 1\right)\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      5. metadata-eval99.9%

        \[\leadsto \log \left(-\color{blue}{-1}\right) - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      6. metadata-eval99.9%

        \[\leadsto \log \color{blue}{1} - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      7. metadata-eval99.9%

        \[\leadsto \color{blue}{0} - \log \left(-\left(x - \mathsf{hypot}\left(1, x\right)\right)\right) \]
      8. neg-sub099.9%

        \[\leadsto 0 - \log \color{blue}{\left(0 - \left(x - \mathsf{hypot}\left(1, x\right)\right)\right)} \]
      9. associate--r-99.9%

        \[\leadsto 0 - \log \color{blue}{\left(\left(0 - x\right) + \mathsf{hypot}\left(1, x\right)\right)} \]
      10. neg-sub099.9%

        \[\leadsto 0 - \log \left(\color{blue}{\left(-x\right)} + \mathsf{hypot}\left(1, x\right)\right) \]
      11. +-commutative99.9%

        \[\leadsto 0 - \log \color{blue}{\left(\mathsf{hypot}\left(1, x\right) + \left(-x\right)\right)} \]
      12. sub-neg99.9%

        \[\leadsto 0 - \log \color{blue}{\left(\mathsf{hypot}\left(1, x\right) - x\right)} \]
      13. neg-sub099.9%

        \[\leadsto \color{blue}{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \]
    6. Simplified99.9%

      \[\leadsto \color{blue}{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \]

    if -0.00679999999999999962 < x < 0.0074999999999999997

    1. Initial program 9.9%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0 99.9%

      \[\leadsto \color{blue}{x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot {x}^{2} - 0.16666666666666666\right)\right)} \]
    4. Step-by-step derivation
      1. unpow299.9%

        \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    5. Applied egg-rr99.9%

      \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    6. Step-by-step derivation
      1. unpow299.9%

        \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    7. Applied egg-rr99.9%

      \[\leadsto x \cdot \left(1 + \color{blue}{\left(x \cdot x\right)} \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right) \]

    if 0.0074999999999999997 < x

    1. Initial program 57.7%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg57.7%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative57.7%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg57.7%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def99.9%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified99.9%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
  3. Recombined 3 regimes into one program.
  4. Add Preprocessing

Alternative 6: 99.7% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1.3:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 0.0075:\\ \;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -1.3)
   (log (/ -0.5 x))
   (if (<= x 0.0075)
     (* x (+ 1.0 (* (* x x) (- (* 0.075 (* x x)) 0.16666666666666666))))
     (log (+ x (hypot 1.0 x))))))
double code(double x) {
	double tmp;
	if (x <= -1.3) {
		tmp = log((-0.5 / x));
	} else if (x <= 0.0075) {
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	} else {
		tmp = log((x + hypot(1.0, x)));
	}
	return tmp;
}
public static double code(double x) {
	double tmp;
	if (x <= -1.3) {
		tmp = Math.log((-0.5 / x));
	} else if (x <= 0.0075) {
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	} else {
		tmp = Math.log((x + Math.hypot(1.0, x)));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -1.3:
		tmp = math.log((-0.5 / x))
	elif x <= 0.0075:
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)))
	else:
		tmp = math.log((x + math.hypot(1.0, x)))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -1.3)
		tmp = log(Float64(-0.5 / x));
	elseif (x <= 0.0075)
		tmp = Float64(x * Float64(1.0 + Float64(Float64(x * x) * Float64(Float64(0.075 * Float64(x * x)) - 0.16666666666666666))));
	else
		tmp = log(Float64(x + hypot(1.0, x)));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -1.3)
		tmp = log((-0.5 / x));
	elseif (x <= 0.0075)
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	else
		tmp = log((x + hypot(1.0, x)));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -1.3], N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision], If[LessEqual[x, 0.0075], N[(x * N[(1.0 + N[(N[(x * x), $MachinePrecision] * N[(N[(0.075 * N[(x * x), $MachinePrecision]), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(x + N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.3:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right)\\

\mathbf{elif}\;x \leq 0.0075:\\
\;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right)\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + \mathsf{hypot}\left(1, x\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1.30000000000000004

    1. Initial program 3.5%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in x around -inf 98.7%

      \[\leadsto \log \color{blue}{\left(\frac{-0.5}{x}\right)} \]

    if -1.30000000000000004 < x < 0.0074999999999999997

    1. Initial program 10.6%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0 99.5%

      \[\leadsto \color{blue}{x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot {x}^{2} - 0.16666666666666666\right)\right)} \]
    4. Step-by-step derivation
      1. unpow299.5%

        \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    5. Applied egg-rr99.5%

      \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    6. Step-by-step derivation
      1. unpow299.5%

        \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    7. Applied egg-rr99.5%

      \[\leadsto x \cdot \left(1 + \color{blue}{\left(x \cdot x\right)} \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right) \]

    if 0.0074999999999999997 < x

    1. Initial program 57.7%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg57.7%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative57.7%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg57.7%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def99.9%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified99.9%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
  3. Recombined 3 regimes into one program.
  4. Add Preprocessing

Alternative 7: 99.5% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1.3:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 1.3:\\ \;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + x\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -1.3)
   (log (/ -0.5 x))
   (if (<= x 1.3)
     (* x (+ 1.0 (* (* x x) (- (* 0.075 (* x x)) 0.16666666666666666))))
     (log (+ x x)))))
double code(double x) {
	double tmp;
	if (x <= -1.3) {
		tmp = log((-0.5 / x));
	} else if (x <= 1.3) {
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	} else {
		tmp = log((x + x));
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= (-1.3d0)) then
        tmp = log(((-0.5d0) / x))
    else if (x <= 1.3d0) then
        tmp = x * (1.0d0 + ((x * x) * ((0.075d0 * (x * x)) - 0.16666666666666666d0)))
    else
        tmp = log((x + x))
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= -1.3) {
		tmp = Math.log((-0.5 / x));
	} else if (x <= 1.3) {
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	} else {
		tmp = Math.log((x + x));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -1.3:
		tmp = math.log((-0.5 / x))
	elif x <= 1.3:
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)))
	else:
		tmp = math.log((x + x))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -1.3)
		tmp = log(Float64(-0.5 / x));
	elseif (x <= 1.3)
		tmp = Float64(x * Float64(1.0 + Float64(Float64(x * x) * Float64(Float64(0.075 * Float64(x * x)) - 0.16666666666666666))));
	else
		tmp = log(Float64(x + x));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -1.3)
		tmp = log((-0.5 / x));
	elseif (x <= 1.3)
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	else
		tmp = log((x + x));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -1.3], N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision], If[LessEqual[x, 1.3], N[(x * N[(1.0 + N[(N[(x * x), $MachinePrecision] * N[(N[(0.075 * N[(x * x), $MachinePrecision]), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(x + x), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.3:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right)\\

\mathbf{elif}\;x \leq 1.3:\\
\;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right)\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + x\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1.30000000000000004

    1. Initial program 3.5%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in x around -inf 98.7%

      \[\leadsto \log \color{blue}{\left(\frac{-0.5}{x}\right)} \]

    if -1.30000000000000004 < x < 1.30000000000000004

    1. Initial program 11.3%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0 99.2%

      \[\leadsto \color{blue}{x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot {x}^{2} - 0.16666666666666666\right)\right)} \]
    4. Step-by-step derivation
      1. unpow299.2%

        \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    5. Applied egg-rr99.2%

      \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    6. Step-by-step derivation
      1. unpow299.2%

        \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    7. Applied egg-rr99.2%

      \[\leadsto x \cdot \left(1 + \color{blue}{\left(x \cdot x\right)} \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right) \]

    if 1.30000000000000004 < x

    1. Initial program 57.2%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf 98.7%

      \[\leadsto \log \left(x + \color{blue}{x}\right) \]
  3. Recombined 3 regimes into one program.
  4. Add Preprocessing

Alternative 8: 78.4% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -3.6:\\ \;\;\;\;-42.666666666666664\\ \mathbf{elif}\;x \leq 1.3:\\ \;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + x\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -3.6)
   -42.666666666666664
   (if (<= x 1.3)
     (* x (+ 1.0 (* (* x x) (- (* 0.075 (* x x)) 0.16666666666666666))))
     (log (+ x x)))))
double code(double x) {
	double tmp;
	if (x <= -3.6) {
		tmp = -42.666666666666664;
	} else if (x <= 1.3) {
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	} else {
		tmp = log((x + x));
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= (-3.6d0)) then
        tmp = -42.666666666666664d0
    else if (x <= 1.3d0) then
        tmp = x * (1.0d0 + ((x * x) * ((0.075d0 * (x * x)) - 0.16666666666666666d0)))
    else
        tmp = log((x + x))
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= -3.6) {
		tmp = -42.666666666666664;
	} else if (x <= 1.3) {
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	} else {
		tmp = Math.log((x + x));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -3.6:
		tmp = -42.666666666666664
	elif x <= 1.3:
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)))
	else:
		tmp = math.log((x + x))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -3.6)
		tmp = -42.666666666666664;
	elseif (x <= 1.3)
		tmp = Float64(x * Float64(1.0 + Float64(Float64(x * x) * Float64(Float64(0.075 * Float64(x * x)) - 0.16666666666666666))));
	else
		tmp = log(Float64(x + x));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -3.6)
		tmp = -42.666666666666664;
	elseif (x <= 1.3)
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	else
		tmp = log((x + x));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -3.6], -42.666666666666664, If[LessEqual[x, 1.3], N[(x * N[(1.0 + N[(N[(x * x), $MachinePrecision] * N[(N[(0.075 * N[(x * x), $MachinePrecision]), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(x + x), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -3.6:\\
\;\;\;\;-42.666666666666664\\

\mathbf{elif}\;x \leq 1.3:\\
\;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right)\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + x\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -3.60000000000000009

    1. Initial program 1.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-sqr-sqrt1.8%

        \[\leadsto \log \color{blue}{\left(\sqrt{x + \sqrt{x \cdot x + 1}} \cdot \sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      2. pow21.8%

        \[\leadsto \log \color{blue}{\left({\left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)}^{2}\right)} \]
      3. log-pow1.8%

        \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      4. +-commutative1.8%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \sqrt{\color{blue}{1 + x \cdot x}}}\right) \]
      5. hypot-1-def3.1%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \color{blue}{\mathsf{hypot}\left(1, x\right)}}\right) \]
    4. Applied egg-rr3.1%

      \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
    5. Taylor expanded in x around 0 1.1%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + 0.125 \cdot x\right)\right)} \]
    6. Step-by-step derivation
      1. *-commutative1.1%

        \[\leadsto 2 \cdot \log \left(1 + x \cdot \left(0.5 + \color{blue}{x \cdot 0.125}\right)\right) \]
    7. Simplified1.1%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + x \cdot 0.125\right)\right)} \]
    8. Taylor expanded in x around -inf 1.5%

      \[\leadsto \color{blue}{-1 \cdot \frac{21.333333333333332 \cdot \frac{1}{{x}^{2}} - \left(8 + \frac{64}{{x}^{3}}\right)}{x} + 2 \cdot \left(\log 0.125 + -2 \cdot \log \left(\frac{-1}{x}\right)\right)} \]
    9. Simplified17.2%

      \[\leadsto \color{blue}{-42.666666666666664} \]

    if -3.60000000000000009 < x < 1.30000000000000004

    1. Initial program 12.0%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0 98.5%

      \[\leadsto \color{blue}{x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot {x}^{2} - 0.16666666666666666\right)\right)} \]
    4. Step-by-step derivation
      1. unpow298.5%

        \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    5. Applied egg-rr98.5%

      \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    6. Step-by-step derivation
      1. unpow298.5%

        \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    7. Applied egg-rr98.5%

      \[\leadsto x \cdot \left(1 + \color{blue}{\left(x \cdot x\right)} \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right) \]

    if 1.30000000000000004 < x

    1. Initial program 57.2%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf 98.7%

      \[\leadsto \log \left(x + \color{blue}{x}\right) \]
  3. Recombined 3 regimes into one program.
  4. Add Preprocessing

Alternative 9: 61.7% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -3.6:\\ \;\;\;\;-42.666666666666664\\ \mathbf{elif}\;x \leq 1.55:\\ \;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + 1\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -3.6)
   -42.666666666666664
   (if (<= x 1.55)
     (* x (+ 1.0 (* (* x x) (- (* 0.075 (* x x)) 0.16666666666666666))))
     (log (+ x 1.0)))))
double code(double x) {
	double tmp;
	if (x <= -3.6) {
		tmp = -42.666666666666664;
	} else if (x <= 1.55) {
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	} else {
		tmp = log((x + 1.0));
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= (-3.6d0)) then
        tmp = -42.666666666666664d0
    else if (x <= 1.55d0) then
        tmp = x * (1.0d0 + ((x * x) * ((0.075d0 * (x * x)) - 0.16666666666666666d0)))
    else
        tmp = log((x + 1.0d0))
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= -3.6) {
		tmp = -42.666666666666664;
	} else if (x <= 1.55) {
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	} else {
		tmp = Math.log((x + 1.0));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -3.6:
		tmp = -42.666666666666664
	elif x <= 1.55:
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)))
	else:
		tmp = math.log((x + 1.0))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -3.6)
		tmp = -42.666666666666664;
	elseif (x <= 1.55)
		tmp = Float64(x * Float64(1.0 + Float64(Float64(x * x) * Float64(Float64(0.075 * Float64(x * x)) - 0.16666666666666666))));
	else
		tmp = log(Float64(x + 1.0));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -3.6)
		tmp = -42.666666666666664;
	elseif (x <= 1.55)
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	else
		tmp = log((x + 1.0));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -3.6], -42.666666666666664, If[LessEqual[x, 1.55], N[(x * N[(1.0 + N[(N[(x * x), $MachinePrecision] * N[(N[(0.075 * N[(x * x), $MachinePrecision]), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -3.6:\\
\;\;\;\;-42.666666666666664\\

\mathbf{elif}\;x \leq 1.55:\\
\;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right)\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + 1\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -3.60000000000000009

    1. Initial program 1.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-sqr-sqrt1.8%

        \[\leadsto \log \color{blue}{\left(\sqrt{x + \sqrt{x \cdot x + 1}} \cdot \sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      2. pow21.8%

        \[\leadsto \log \color{blue}{\left({\left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)}^{2}\right)} \]
      3. log-pow1.8%

        \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      4. +-commutative1.8%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \sqrt{\color{blue}{1 + x \cdot x}}}\right) \]
      5. hypot-1-def3.1%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \color{blue}{\mathsf{hypot}\left(1, x\right)}}\right) \]
    4. Applied egg-rr3.1%

      \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
    5. Taylor expanded in x around 0 1.1%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + 0.125 \cdot x\right)\right)} \]
    6. Step-by-step derivation
      1. *-commutative1.1%

        \[\leadsto 2 \cdot \log \left(1 + x \cdot \left(0.5 + \color{blue}{x \cdot 0.125}\right)\right) \]
    7. Simplified1.1%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + x \cdot 0.125\right)\right)} \]
    8. Taylor expanded in x around -inf 1.5%

      \[\leadsto \color{blue}{-1 \cdot \frac{21.333333333333332 \cdot \frac{1}{{x}^{2}} - \left(8 + \frac{64}{{x}^{3}}\right)}{x} + 2 \cdot \left(\log 0.125 + -2 \cdot \log \left(\frac{-1}{x}\right)\right)} \]
    9. Simplified17.2%

      \[\leadsto \color{blue}{-42.666666666666664} \]

    if -3.60000000000000009 < x < 1.55000000000000004

    1. Initial program 12.0%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0 98.5%

      \[\leadsto \color{blue}{x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot {x}^{2} - 0.16666666666666666\right)\right)} \]
    4. Step-by-step derivation
      1. unpow298.5%

        \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    5. Applied egg-rr98.5%

      \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    6. Step-by-step derivation
      1. unpow298.5%

        \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    7. Applied egg-rr98.5%

      \[\leadsto x \cdot \left(1 + \color{blue}{\left(x \cdot x\right)} \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right) \]

    if 1.55000000000000004 < x

    1. Initial program 57.2%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0 31.2%

      \[\leadsto \log \left(x + \color{blue}{1}\right) \]
  3. Recombined 3 regimes into one program.
  4. Add Preprocessing

Alternative 10: 61.7% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -3.6:\\ \;\;\;\;-42.666666666666664\\ \mathbf{elif}\;x \leq 1.55:\\ \;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{log1p}\left(x\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -3.6)
   -42.666666666666664
   (if (<= x 1.55)
     (* x (+ 1.0 (* (* x x) (- (* 0.075 (* x x)) 0.16666666666666666))))
     (log1p x))))
double code(double x) {
	double tmp;
	if (x <= -3.6) {
		tmp = -42.666666666666664;
	} else if (x <= 1.55) {
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	} else {
		tmp = log1p(x);
	}
	return tmp;
}
public static double code(double x) {
	double tmp;
	if (x <= -3.6) {
		tmp = -42.666666666666664;
	} else if (x <= 1.55) {
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	} else {
		tmp = Math.log1p(x);
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -3.6:
		tmp = -42.666666666666664
	elif x <= 1.55:
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)))
	else:
		tmp = math.log1p(x)
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -3.6)
		tmp = -42.666666666666664;
	elseif (x <= 1.55)
		tmp = Float64(x * Float64(1.0 + Float64(Float64(x * x) * Float64(Float64(0.075 * Float64(x * x)) - 0.16666666666666666))));
	else
		tmp = log1p(x);
	end
	return tmp
end
code[x_] := If[LessEqual[x, -3.6], -42.666666666666664, If[LessEqual[x, 1.55], N[(x * N[(1.0 + N[(N[(x * x), $MachinePrecision] * N[(N[(0.075 * N[(x * x), $MachinePrecision]), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[1 + x], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -3.6:\\
\;\;\;\;-42.666666666666664\\

\mathbf{elif}\;x \leq 1.55:\\
\;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right)\\

\mathbf{else}:\\
\;\;\;\;\mathsf{log1p}\left(x\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -3.60000000000000009

    1. Initial program 1.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-sqr-sqrt1.8%

        \[\leadsto \log \color{blue}{\left(\sqrt{x + \sqrt{x \cdot x + 1}} \cdot \sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      2. pow21.8%

        \[\leadsto \log \color{blue}{\left({\left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)}^{2}\right)} \]
      3. log-pow1.8%

        \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      4. +-commutative1.8%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \sqrt{\color{blue}{1 + x \cdot x}}}\right) \]
      5. hypot-1-def3.1%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \color{blue}{\mathsf{hypot}\left(1, x\right)}}\right) \]
    4. Applied egg-rr3.1%

      \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
    5. Taylor expanded in x around 0 1.1%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + 0.125 \cdot x\right)\right)} \]
    6. Step-by-step derivation
      1. *-commutative1.1%

        \[\leadsto 2 \cdot \log \left(1 + x \cdot \left(0.5 + \color{blue}{x \cdot 0.125}\right)\right) \]
    7. Simplified1.1%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + x \cdot 0.125\right)\right)} \]
    8. Taylor expanded in x around -inf 1.5%

      \[\leadsto \color{blue}{-1 \cdot \frac{21.333333333333332 \cdot \frac{1}{{x}^{2}} - \left(8 + \frac{64}{{x}^{3}}\right)}{x} + 2 \cdot \left(\log 0.125 + -2 \cdot \log \left(\frac{-1}{x}\right)\right)} \]
    9. Simplified17.2%

      \[\leadsto \color{blue}{-42.666666666666664} \]

    if -3.60000000000000009 < x < 1.55000000000000004

    1. Initial program 12.0%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0 98.5%

      \[\leadsto \color{blue}{x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot {x}^{2} - 0.16666666666666666\right)\right)} \]
    4. Step-by-step derivation
      1. unpow298.5%

        \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    5. Applied egg-rr98.5%

      \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    6. Step-by-step derivation
      1. unpow298.5%

        \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    7. Applied egg-rr98.5%

      \[\leadsto x \cdot \left(1 + \color{blue}{\left(x \cdot x\right)} \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right) \]

    if 1.55000000000000004 < x

    1. Initial program 57.2%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0 31.2%

      \[\leadsto \log \left(x + \color{blue}{1}\right) \]
    4. Step-by-step derivation
      1. *-un-lft-identity31.2%

        \[\leadsto \log \color{blue}{\left(1 \cdot \left(x + 1\right)\right)} \]
      2. log-prod31.2%

        \[\leadsto \color{blue}{\log 1 + \log \left(x + 1\right)} \]
      3. metadata-eval31.2%

        \[\leadsto \color{blue}{0} + \log \left(x + 1\right) \]
      4. +-commutative31.2%

        \[\leadsto 0 + \log \color{blue}{\left(1 + x\right)} \]
      5. log1p-define31.2%

        \[\leadsto 0 + \color{blue}{\mathsf{log1p}\left(x\right)} \]
    5. Applied egg-rr31.2%

      \[\leadsto \color{blue}{0 + \mathsf{log1p}\left(x\right)} \]
    6. Step-by-step derivation
      1. +-lft-identity31.2%

        \[\leadsto \color{blue}{\mathsf{log1p}\left(x\right)} \]
    7. Simplified31.2%

      \[\leadsto \color{blue}{\mathsf{log1p}\left(x\right)} \]
  3. Recombined 3 regimes into one program.
  4. Add Preprocessing

Alternative 11: 58.1% accurate, 8.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -3.6:\\ \;\;\;\;-42.666666666666664\\ \mathbf{elif}\;x \leq 3.6:\\ \;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right)\\ \mathbf{else}:\\ \;\;\;\;43.666666666666664\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -3.6)
   -42.666666666666664
   (if (<= x 3.6)
     (* x (+ 1.0 (* (* x x) (- (* 0.075 (* x x)) 0.16666666666666666))))
     43.666666666666664)))
double code(double x) {
	double tmp;
	if (x <= -3.6) {
		tmp = -42.666666666666664;
	} else if (x <= 3.6) {
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	} else {
		tmp = 43.666666666666664;
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= (-3.6d0)) then
        tmp = -42.666666666666664d0
    else if (x <= 3.6d0) then
        tmp = x * (1.0d0 + ((x * x) * ((0.075d0 * (x * x)) - 0.16666666666666666d0)))
    else
        tmp = 43.666666666666664d0
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= -3.6) {
		tmp = -42.666666666666664;
	} else if (x <= 3.6) {
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	} else {
		tmp = 43.666666666666664;
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -3.6:
		tmp = -42.666666666666664
	elif x <= 3.6:
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)))
	else:
		tmp = 43.666666666666664
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -3.6)
		tmp = -42.666666666666664;
	elseif (x <= 3.6)
		tmp = Float64(x * Float64(1.0 + Float64(Float64(x * x) * Float64(Float64(0.075 * Float64(x * x)) - 0.16666666666666666))));
	else
		tmp = 43.666666666666664;
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -3.6)
		tmp = -42.666666666666664;
	elseif (x <= 3.6)
		tmp = x * (1.0 + ((x * x) * ((0.075 * (x * x)) - 0.16666666666666666)));
	else
		tmp = 43.666666666666664;
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -3.6], -42.666666666666664, If[LessEqual[x, 3.6], N[(x * N[(1.0 + N[(N[(x * x), $MachinePrecision] * N[(N[(0.075 * N[(x * x), $MachinePrecision]), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 43.666666666666664]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -3.6:\\
\;\;\;\;-42.666666666666664\\

\mathbf{elif}\;x \leq 3.6:\\
\;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right)\\

\mathbf{else}:\\
\;\;\;\;43.666666666666664\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -3.60000000000000009

    1. Initial program 1.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-sqr-sqrt1.8%

        \[\leadsto \log \color{blue}{\left(\sqrt{x + \sqrt{x \cdot x + 1}} \cdot \sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      2. pow21.8%

        \[\leadsto \log \color{blue}{\left({\left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)}^{2}\right)} \]
      3. log-pow1.8%

        \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      4. +-commutative1.8%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \sqrt{\color{blue}{1 + x \cdot x}}}\right) \]
      5. hypot-1-def3.1%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \color{blue}{\mathsf{hypot}\left(1, x\right)}}\right) \]
    4. Applied egg-rr3.1%

      \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
    5. Taylor expanded in x around 0 1.1%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + 0.125 \cdot x\right)\right)} \]
    6. Step-by-step derivation
      1. *-commutative1.1%

        \[\leadsto 2 \cdot \log \left(1 + x \cdot \left(0.5 + \color{blue}{x \cdot 0.125}\right)\right) \]
    7. Simplified1.1%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + x \cdot 0.125\right)\right)} \]
    8. Taylor expanded in x around -inf 1.5%

      \[\leadsto \color{blue}{-1 \cdot \frac{21.333333333333332 \cdot \frac{1}{{x}^{2}} - \left(8 + \frac{64}{{x}^{3}}\right)}{x} + 2 \cdot \left(\log 0.125 + -2 \cdot \log \left(\frac{-1}{x}\right)\right)} \]
    9. Simplified17.2%

      \[\leadsto \color{blue}{-42.666666666666664} \]

    if -3.60000000000000009 < x < 3.60000000000000009

    1. Initial program 12.0%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0 98.5%

      \[\leadsto \color{blue}{x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot {x}^{2} - 0.16666666666666666\right)\right)} \]
    4. Step-by-step derivation
      1. unpow298.5%

        \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    5. Applied egg-rr98.5%

      \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    6. Step-by-step derivation
      1. unpow298.5%

        \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    7. Applied egg-rr98.5%

      \[\leadsto x \cdot \left(1 + \color{blue}{\left(x \cdot x\right)} \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right) \]

    if 3.60000000000000009 < x

    1. Initial program 57.2%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-sqr-sqrt57.2%

        \[\leadsto \log \color{blue}{\left(\sqrt{x + \sqrt{x \cdot x + 1}} \cdot \sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      2. pow257.2%

        \[\leadsto \log \color{blue}{\left({\left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)}^{2}\right)} \]
      3. log-pow57.2%

        \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      4. +-commutative57.2%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \sqrt{\color{blue}{1 + x \cdot x}}}\right) \]
      5. hypot-1-def100.0%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \color{blue}{\mathsf{hypot}\left(1, x\right)}}\right) \]
    4. Applied egg-rr100.0%

      \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
    5. Taylor expanded in x around 0 11.2%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + 0.125 \cdot x\right)\right)} \]
    6. Step-by-step derivation
      1. *-commutative11.2%

        \[\leadsto 2 \cdot \log \left(1 + x \cdot \left(0.5 + \color{blue}{x \cdot 0.125}\right)\right) \]
    7. Simplified11.2%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + x \cdot 0.125\right)\right)} \]
    8. Taylor expanded in x around inf 17.2%

      \[\leadsto \color{blue}{\left(2 \cdot \left(\log 0.125 + -2 \cdot \log \left(\frac{1}{x}\right)\right) + \left(8 \cdot \frac{1}{x} + 64 \cdot \frac{1}{{x}^{4}}\right)\right) - \frac{21.333333333333332}{{x}^{3}}} \]
    9. Simplified16.8%

      \[\leadsto \color{blue}{43.666666666666664} \]
  3. Recombined 3 regimes into one program.
  4. Add Preprocessing

Alternative 12: 58.0% accurate, 10.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -2.4:\\ \;\;\;\;-42.666666666666664\\ \mathbf{elif}\;x \leq 2.4:\\ \;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot -0.16666666666666666\right)\\ \mathbf{else}:\\ \;\;\;\;43.666666666666664\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -2.4)
   -42.666666666666664
   (if (<= x 2.4)
     (* x (+ 1.0 (* (* x x) -0.16666666666666666)))
     43.666666666666664)))
double code(double x) {
	double tmp;
	if (x <= -2.4) {
		tmp = -42.666666666666664;
	} else if (x <= 2.4) {
		tmp = x * (1.0 + ((x * x) * -0.16666666666666666));
	} else {
		tmp = 43.666666666666664;
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= (-2.4d0)) then
        tmp = -42.666666666666664d0
    else if (x <= 2.4d0) then
        tmp = x * (1.0d0 + ((x * x) * (-0.16666666666666666d0)))
    else
        tmp = 43.666666666666664d0
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= -2.4) {
		tmp = -42.666666666666664;
	} else if (x <= 2.4) {
		tmp = x * (1.0 + ((x * x) * -0.16666666666666666));
	} else {
		tmp = 43.666666666666664;
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -2.4:
		tmp = -42.666666666666664
	elif x <= 2.4:
		tmp = x * (1.0 + ((x * x) * -0.16666666666666666))
	else:
		tmp = 43.666666666666664
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -2.4)
		tmp = -42.666666666666664;
	elseif (x <= 2.4)
		tmp = Float64(x * Float64(1.0 + Float64(Float64(x * x) * -0.16666666666666666)));
	else
		tmp = 43.666666666666664;
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -2.4)
		tmp = -42.666666666666664;
	elseif (x <= 2.4)
		tmp = x * (1.0 + ((x * x) * -0.16666666666666666));
	else
		tmp = 43.666666666666664;
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -2.4], -42.666666666666664, If[LessEqual[x, 2.4], N[(x * N[(1.0 + N[(N[(x * x), $MachinePrecision] * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 43.666666666666664]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -2.4:\\
\;\;\;\;-42.666666666666664\\

\mathbf{elif}\;x \leq 2.4:\\
\;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot -0.16666666666666666\right)\\

\mathbf{else}:\\
\;\;\;\;43.666666666666664\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -2.39999999999999991

    1. Initial program 1.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-sqr-sqrt1.8%

        \[\leadsto \log \color{blue}{\left(\sqrt{x + \sqrt{x \cdot x + 1}} \cdot \sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      2. pow21.8%

        \[\leadsto \log \color{blue}{\left({\left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)}^{2}\right)} \]
      3. log-pow1.8%

        \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      4. +-commutative1.8%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \sqrt{\color{blue}{1 + x \cdot x}}}\right) \]
      5. hypot-1-def3.1%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \color{blue}{\mathsf{hypot}\left(1, x\right)}}\right) \]
    4. Applied egg-rr3.1%

      \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
    5. Taylor expanded in x around 0 1.1%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + 0.125 \cdot x\right)\right)} \]
    6. Step-by-step derivation
      1. *-commutative1.1%

        \[\leadsto 2 \cdot \log \left(1 + x \cdot \left(0.5 + \color{blue}{x \cdot 0.125}\right)\right) \]
    7. Simplified1.1%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + x \cdot 0.125\right)\right)} \]
    8. Taylor expanded in x around -inf 1.5%

      \[\leadsto \color{blue}{-1 \cdot \frac{21.333333333333332 \cdot \frac{1}{{x}^{2}} - \left(8 + \frac{64}{{x}^{3}}\right)}{x} + 2 \cdot \left(\log 0.125 + -2 \cdot \log \left(\frac{-1}{x}\right)\right)} \]
    9. Simplified17.2%

      \[\leadsto \color{blue}{-42.666666666666664} \]

    if -2.39999999999999991 < x < 2.39999999999999991

    1. Initial program 12.0%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0 98.5%

      \[\leadsto \color{blue}{x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot {x}^{2} - 0.16666666666666666\right)\right)} \]
    4. Step-by-step derivation
      1. unpow298.5%

        \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    5. Applied egg-rr98.5%

      \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    6. Step-by-step derivation
      1. unpow298.5%

        \[\leadsto x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot \color{blue}{\left(x \cdot x\right)} - 0.16666666666666666\right)\right) \]
    7. Applied egg-rr98.5%

      \[\leadsto x \cdot \left(1 + \color{blue}{\left(x \cdot x\right)} \cdot \left(0.075 \cdot \left(x \cdot x\right) - 0.16666666666666666\right)\right) \]
    8. Taylor expanded in x around 0 98.0%

      \[\leadsto x \cdot \left(1 + \left(x \cdot x\right) \cdot \color{blue}{-0.16666666666666666}\right) \]

    if 2.39999999999999991 < x

    1. Initial program 57.2%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-sqr-sqrt57.2%

        \[\leadsto \log \color{blue}{\left(\sqrt{x + \sqrt{x \cdot x + 1}} \cdot \sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      2. pow257.2%

        \[\leadsto \log \color{blue}{\left({\left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)}^{2}\right)} \]
      3. log-pow57.2%

        \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      4. +-commutative57.2%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \sqrt{\color{blue}{1 + x \cdot x}}}\right) \]
      5. hypot-1-def100.0%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \color{blue}{\mathsf{hypot}\left(1, x\right)}}\right) \]
    4. Applied egg-rr100.0%

      \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
    5. Taylor expanded in x around 0 11.2%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + 0.125 \cdot x\right)\right)} \]
    6. Step-by-step derivation
      1. *-commutative11.2%

        \[\leadsto 2 \cdot \log \left(1 + x \cdot \left(0.5 + \color{blue}{x \cdot 0.125}\right)\right) \]
    7. Simplified11.2%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + x \cdot 0.125\right)\right)} \]
    8. Taylor expanded in x around inf 17.2%

      \[\leadsto \color{blue}{\left(2 \cdot \left(\log 0.125 + -2 \cdot \log \left(\frac{1}{x}\right)\right) + \left(8 \cdot \frac{1}{x} + 64 \cdot \frac{1}{{x}^{4}}\right)\right) - \frac{21.333333333333332}{{x}^{3}}} \]
    9. Simplified16.8%

      \[\leadsto \color{blue}{43.666666666666664} \]
  3. Recombined 3 regimes into one program.
  4. Add Preprocessing

Alternative 13: 57.7% accurate, 18.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -44:\\ \;\;\;\;-42.666666666666664\\ \mathbf{elif}\;x \leq 43:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;43.666666666666664\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -44.0) -42.666666666666664 (if (<= x 43.0) x 43.666666666666664)))
double code(double x) {
	double tmp;
	if (x <= -44.0) {
		tmp = -42.666666666666664;
	} else if (x <= 43.0) {
		tmp = x;
	} else {
		tmp = 43.666666666666664;
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= (-44.0d0)) then
        tmp = -42.666666666666664d0
    else if (x <= 43.0d0) then
        tmp = x
    else
        tmp = 43.666666666666664d0
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= -44.0) {
		tmp = -42.666666666666664;
	} else if (x <= 43.0) {
		tmp = x;
	} else {
		tmp = 43.666666666666664;
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -44.0:
		tmp = -42.666666666666664
	elif x <= 43.0:
		tmp = x
	else:
		tmp = 43.666666666666664
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -44.0)
		tmp = -42.666666666666664;
	elseif (x <= 43.0)
		tmp = x;
	else
		tmp = 43.666666666666664;
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -44.0)
		tmp = -42.666666666666664;
	elseif (x <= 43.0)
		tmp = x;
	else
		tmp = 43.666666666666664;
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -44.0], -42.666666666666664, If[LessEqual[x, 43.0], x, 43.666666666666664]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -44:\\
\;\;\;\;-42.666666666666664\\

\mathbf{elif}\;x \leq 43:\\
\;\;\;\;x\\

\mathbf{else}:\\
\;\;\;\;43.666666666666664\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -44

    1. Initial program 1.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-sqr-sqrt1.8%

        \[\leadsto \log \color{blue}{\left(\sqrt{x + \sqrt{x \cdot x + 1}} \cdot \sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      2. pow21.8%

        \[\leadsto \log \color{blue}{\left({\left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)}^{2}\right)} \]
      3. log-pow1.8%

        \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      4. +-commutative1.8%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \sqrt{\color{blue}{1 + x \cdot x}}}\right) \]
      5. hypot-1-def3.1%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \color{blue}{\mathsf{hypot}\left(1, x\right)}}\right) \]
    4. Applied egg-rr3.1%

      \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
    5. Taylor expanded in x around 0 1.1%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + 0.125 \cdot x\right)\right)} \]
    6. Step-by-step derivation
      1. *-commutative1.1%

        \[\leadsto 2 \cdot \log \left(1 + x \cdot \left(0.5 + \color{blue}{x \cdot 0.125}\right)\right) \]
    7. Simplified1.1%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + x \cdot 0.125\right)\right)} \]
    8. Taylor expanded in x around -inf 1.5%

      \[\leadsto \color{blue}{-1 \cdot \frac{21.333333333333332 \cdot \frac{1}{{x}^{2}} - \left(8 + \frac{64}{{x}^{3}}\right)}{x} + 2 \cdot \left(\log 0.125 + -2 \cdot \log \left(\frac{-1}{x}\right)\right)} \]
    9. Simplified17.2%

      \[\leadsto \color{blue}{-42.666666666666664} \]

    if -44 < x < 43

    1. Initial program 12.7%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0 97.9%

      \[\leadsto \color{blue}{x \cdot \left(1 + {x}^{2} \cdot \left(0.075 \cdot {x}^{2} - 0.16666666666666666\right)\right)} \]
    4. Taylor expanded in x around 0 96.1%

      \[\leadsto \color{blue}{x} \]

    if 43 < x

    1. Initial program 56.7%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-sqr-sqrt56.7%

        \[\leadsto \log \color{blue}{\left(\sqrt{x + \sqrt{x \cdot x + 1}} \cdot \sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      2. pow256.7%

        \[\leadsto \log \color{blue}{\left({\left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)}^{2}\right)} \]
      3. log-pow56.7%

        \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      4. +-commutative56.7%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \sqrt{\color{blue}{1 + x \cdot x}}}\right) \]
      5. hypot-1-def100.0%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \color{blue}{\mathsf{hypot}\left(1, x\right)}}\right) \]
    4. Applied egg-rr100.0%

      \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
    5. Taylor expanded in x around 0 11.1%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + 0.125 \cdot x\right)\right)} \]
    6. Step-by-step derivation
      1. *-commutative11.1%

        \[\leadsto 2 \cdot \log \left(1 + x \cdot \left(0.5 + \color{blue}{x \cdot 0.125}\right)\right) \]
    7. Simplified11.1%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + x \cdot 0.125\right)\right)} \]
    8. Taylor expanded in x around inf 17.2%

      \[\leadsto \color{blue}{\left(2 \cdot \left(\log 0.125 + -2 \cdot \log \left(\frac{1}{x}\right)\right) + \left(8 \cdot \frac{1}{x} + 64 \cdot \frac{1}{{x}^{4}}\right)\right) - \frac{21.333333333333332}{{x}^{3}}} \]
    9. Simplified16.9%

      \[\leadsto \color{blue}{43.666666666666664} \]
  3. Recombined 3 regimes into one program.
  4. Add Preprocessing

Alternative 14: 11.1% accurate, 34.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 5 \cdot 10^{-311}:\\ \;\;\;\;-42.666666666666664\\ \mathbf{else}:\\ \;\;\;\;43.666666666666664\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x 5e-311) -42.666666666666664 43.666666666666664))
double code(double x) {
	double tmp;
	if (x <= 5e-311) {
		tmp = -42.666666666666664;
	} else {
		tmp = 43.666666666666664;
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= 5d-311) then
        tmp = -42.666666666666664d0
    else
        tmp = 43.666666666666664d0
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= 5e-311) {
		tmp = -42.666666666666664;
	} else {
		tmp = 43.666666666666664;
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= 5e-311:
		tmp = -42.666666666666664
	else:
		tmp = 43.666666666666664
	return tmp
function code(x)
	tmp = 0.0
	if (x <= 5e-311)
		tmp = -42.666666666666664;
	else
		tmp = 43.666666666666664;
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= 5e-311)
		tmp = -42.666666666666664;
	else
		tmp = 43.666666666666664;
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, 5e-311], -42.666666666666664, 43.666666666666664]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 5 \cdot 10^{-311}:\\
\;\;\;\;-42.666666666666664\\

\mathbf{else}:\\
\;\;\;\;43.666666666666664\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 5.00000000000023e-311

    1. Initial program 7.3%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-sqr-sqrt7.3%

        \[\leadsto \log \color{blue}{\left(\sqrt{x + \sqrt{x \cdot x + 1}} \cdot \sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      2. pow27.3%

        \[\leadsto \log \color{blue}{\left({\left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)}^{2}\right)} \]
      3. log-pow7.3%

        \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      4. +-commutative7.3%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \sqrt{\color{blue}{1 + x \cdot x}}}\right) \]
      5. hypot-1-def7.8%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \color{blue}{\mathsf{hypot}\left(1, x\right)}}\right) \]
    4. Applied egg-rr7.8%

      \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
    5. Taylor expanded in x around 0 5.5%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + 0.125 \cdot x\right)\right)} \]
    6. Step-by-step derivation
      1. *-commutative5.5%

        \[\leadsto 2 \cdot \log \left(1 + x \cdot \left(0.5 + \color{blue}{x \cdot 0.125}\right)\right) \]
    7. Simplified5.5%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + x \cdot 0.125\right)\right)} \]
    8. Taylor expanded in x around -inf 1.4%

      \[\leadsto \color{blue}{-1 \cdot \frac{21.333333333333332 \cdot \frac{1}{{x}^{2}} - \left(8 + \frac{64}{{x}^{3}}\right)}{x} + 2 \cdot \left(\log 0.125 + -2 \cdot \log \left(\frac{-1}{x}\right)\right)} \]
    9. Simplified10.7%

      \[\leadsto \color{blue}{-42.666666666666664} \]

    if 5.00000000000023e-311 < x

    1. Initial program 38.6%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-sqr-sqrt38.5%

        \[\leadsto \log \color{blue}{\left(\sqrt{x + \sqrt{x \cdot x + 1}} \cdot \sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      2. pow238.5%

        \[\leadsto \log \color{blue}{\left({\left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)}^{2}\right)} \]
      3. log-pow38.6%

        \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      4. +-commutative38.6%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \sqrt{\color{blue}{1 + x \cdot x}}}\right) \]
      5. hypot-1-def63.5%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \color{blue}{\mathsf{hypot}\left(1, x\right)}}\right) \]
    4. Applied egg-rr63.5%

      \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
    5. Taylor expanded in x around 0 10.9%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + 0.125 \cdot x\right)\right)} \]
    6. Step-by-step derivation
      1. *-commutative10.9%

        \[\leadsto 2 \cdot \log \left(1 + x \cdot \left(0.5 + \color{blue}{x \cdot 0.125}\right)\right) \]
    7. Simplified10.9%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + x \cdot 0.125\right)\right)} \]
    8. Taylor expanded in x around inf 10.8%

      \[\leadsto \color{blue}{\left(2 \cdot \left(\log 0.125 + -2 \cdot \log \left(\frac{1}{x}\right)\right) + \left(8 \cdot \frac{1}{x} + 64 \cdot \frac{1}{{x}^{4}}\right)\right) - \frac{21.333333333333332}{{x}^{3}}} \]
    9. Simplified12.2%

      \[\leadsto \color{blue}{43.666666666666664} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 15: 10.9% accurate, 34.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1.1 \cdot 10^{-308}:\\ \;\;\;\;-42.666666666666664\\ \mathbf{else}:\\ \;\;\;\;21.333333333333332\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -1.1e-308) -42.666666666666664 21.333333333333332))
double code(double x) {
	double tmp;
	if (x <= -1.1e-308) {
		tmp = -42.666666666666664;
	} else {
		tmp = 21.333333333333332;
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= (-1.1d-308)) then
        tmp = -42.666666666666664d0
    else
        tmp = 21.333333333333332d0
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= -1.1e-308) {
		tmp = -42.666666666666664;
	} else {
		tmp = 21.333333333333332;
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -1.1e-308:
		tmp = -42.666666666666664
	else:
		tmp = 21.333333333333332
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -1.1e-308)
		tmp = -42.666666666666664;
	else
		tmp = 21.333333333333332;
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -1.1e-308)
		tmp = -42.666666666666664;
	else
		tmp = 21.333333333333332;
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -1.1e-308], -42.666666666666664, 21.333333333333332]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.1 \cdot 10^{-308}:\\
\;\;\;\;-42.666666666666664\\

\mathbf{else}:\\
\;\;\;\;21.333333333333332\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -1.1000000000000001e-308

    1. Initial program 7.3%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-sqr-sqrt7.3%

        \[\leadsto \log \color{blue}{\left(\sqrt{x + \sqrt{x \cdot x + 1}} \cdot \sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      2. pow27.3%

        \[\leadsto \log \color{blue}{\left({\left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)}^{2}\right)} \]
      3. log-pow7.3%

        \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      4. +-commutative7.3%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \sqrt{\color{blue}{1 + x \cdot x}}}\right) \]
      5. hypot-1-def7.8%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \color{blue}{\mathsf{hypot}\left(1, x\right)}}\right) \]
    4. Applied egg-rr7.8%

      \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
    5. Taylor expanded in x around 0 5.5%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + 0.125 \cdot x\right)\right)} \]
    6. Step-by-step derivation
      1. *-commutative5.5%

        \[\leadsto 2 \cdot \log \left(1 + x \cdot \left(0.5 + \color{blue}{x \cdot 0.125}\right)\right) \]
    7. Simplified5.5%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + x \cdot 0.125\right)\right)} \]
    8. Taylor expanded in x around -inf 1.4%

      \[\leadsto \color{blue}{-1 \cdot \frac{21.333333333333332 \cdot \frac{1}{{x}^{2}} - \left(8 + \frac{64}{{x}^{3}}\right)}{x} + 2 \cdot \left(\log 0.125 + -2 \cdot \log \left(\frac{-1}{x}\right)\right)} \]
    9. Simplified10.7%

      \[\leadsto \color{blue}{-42.666666666666664} \]

    if -1.1000000000000001e-308 < x

    1. Initial program 38.6%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-sqr-sqrt38.5%

        \[\leadsto \log \color{blue}{\left(\sqrt{x + \sqrt{x \cdot x + 1}} \cdot \sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      2. pow238.5%

        \[\leadsto \log \color{blue}{\left({\left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)}^{2}\right)} \]
      3. log-pow38.6%

        \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      4. +-commutative38.6%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \sqrt{\color{blue}{1 + x \cdot x}}}\right) \]
      5. hypot-1-def63.5%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \color{blue}{\mathsf{hypot}\left(1, x\right)}}\right) \]
    4. Applied egg-rr63.5%

      \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
    5. Taylor expanded in x around 0 10.9%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + 0.125 \cdot x\right)\right)} \]
    6. Step-by-step derivation
      1. *-commutative10.9%

        \[\leadsto 2 \cdot \log \left(1 + x \cdot \left(0.5 + \color{blue}{x \cdot 0.125}\right)\right) \]
    7. Simplified10.9%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + x \cdot 0.125\right)\right)} \]
    8. Taylor expanded in x around -inf 0.0%

      \[\leadsto \color{blue}{-1 \cdot \frac{21.333333333333332 \cdot \frac{1}{{x}^{2}} - 8}{x} + 2 \cdot \left(\log 0.125 + -2 \cdot \log \left(\frac{-1}{x}\right)\right)} \]
    9. Simplified11.7%

      \[\leadsto \color{blue}{21.333333333333332} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 16: 10.4% accurate, 34.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -7 \cdot 10^{-308}:\\ \;\;\;\;-42.666666666666664\\ \mathbf{else}:\\ \;\;\;\;1.2083333333333333\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -7e-308) -42.666666666666664 1.2083333333333333))
double code(double x) {
	double tmp;
	if (x <= -7e-308) {
		tmp = -42.666666666666664;
	} else {
		tmp = 1.2083333333333333;
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= (-7d-308)) then
        tmp = -42.666666666666664d0
    else
        tmp = 1.2083333333333333d0
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= -7e-308) {
		tmp = -42.666666666666664;
	} else {
		tmp = 1.2083333333333333;
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -7e-308:
		tmp = -42.666666666666664
	else:
		tmp = 1.2083333333333333
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -7e-308)
		tmp = -42.666666666666664;
	else
		tmp = 1.2083333333333333;
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -7e-308)
		tmp = -42.666666666666664;
	else
		tmp = 1.2083333333333333;
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -7e-308], -42.666666666666664, 1.2083333333333333]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -7 \cdot 10^{-308}:\\
\;\;\;\;-42.666666666666664\\

\mathbf{else}:\\
\;\;\;\;1.2083333333333333\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -7e-308

    1. Initial program 7.3%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-sqr-sqrt7.3%

        \[\leadsto \log \color{blue}{\left(\sqrt{x + \sqrt{x \cdot x + 1}} \cdot \sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      2. pow27.3%

        \[\leadsto \log \color{blue}{\left({\left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)}^{2}\right)} \]
      3. log-pow7.3%

        \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      4. +-commutative7.3%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \sqrt{\color{blue}{1 + x \cdot x}}}\right) \]
      5. hypot-1-def7.8%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \color{blue}{\mathsf{hypot}\left(1, x\right)}}\right) \]
    4. Applied egg-rr7.8%

      \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
    5. Taylor expanded in x around 0 5.5%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + 0.125 \cdot x\right)\right)} \]
    6. Step-by-step derivation
      1. *-commutative5.5%

        \[\leadsto 2 \cdot \log \left(1 + x \cdot \left(0.5 + \color{blue}{x \cdot 0.125}\right)\right) \]
    7. Simplified5.5%

      \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + x \cdot 0.125\right)\right)} \]
    8. Taylor expanded in x around -inf 1.4%

      \[\leadsto \color{blue}{-1 \cdot \frac{21.333333333333332 \cdot \frac{1}{{x}^{2}} - \left(8 + \frac{64}{{x}^{3}}\right)}{x} + 2 \cdot \left(\log 0.125 + -2 \cdot \log \left(\frac{-1}{x}\right)\right)} \]
    9. Simplified10.7%

      \[\leadsto \color{blue}{-42.666666666666664} \]

    if -7e-308 < x

    1. Initial program 38.6%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. add-sqr-sqrt38.5%

        \[\leadsto \log \color{blue}{\left(\sqrt{x + \sqrt{x \cdot x + 1}} \cdot \sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      2. pow238.5%

        \[\leadsto \log \color{blue}{\left({\left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)}^{2}\right)} \]
      3. log-pow38.6%

        \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
      4. +-commutative38.6%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \sqrt{\color{blue}{1 + x \cdot x}}}\right) \]
      5. hypot-1-def63.5%

        \[\leadsto 2 \cdot \log \left(\sqrt{x + \color{blue}{\mathsf{hypot}\left(1, x\right)}}\right) \]
    4. Applied egg-rr63.5%

      \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
    5. Step-by-step derivation
      1. log1p-expm1-u63.5%

        \[\leadsto \color{blue}{\mathsf{log1p}\left(\mathsf{expm1}\left(2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)\right)\right)} \]
      2. log1p-undefine63.5%

        \[\leadsto \color{blue}{\log \left(1 + \mathsf{expm1}\left(2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)\right)\right)} \]
      3. expm1-undefine63.5%

        \[\leadsto \log \left(1 + \color{blue}{\left(e^{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} - 1\right)}\right) \]
      4. *-commutative63.5%

        \[\leadsto \log \left(1 + \left(e^{\color{blue}{\log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right) \cdot 2}} - 1\right)\right) \]
      5. exp-to-pow63.5%

        \[\leadsto \log \left(1 + \left(\color{blue}{{\left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)}^{2}} - 1\right)\right) \]
      6. pow263.5%

        \[\leadsto \log \left(1 + \left(\color{blue}{\sqrt{x + \mathsf{hypot}\left(1, x\right)} \cdot \sqrt{x + \mathsf{hypot}\left(1, x\right)}} - 1\right)\right) \]
      7. add-sqr-sqrt63.5%

        \[\leadsto \log \left(1 + \left(\color{blue}{\left(x + \mathsf{hypot}\left(1, x\right)\right)} - 1\right)\right) \]
    6. Applied egg-rr63.5%

      \[\leadsto \color{blue}{\log \left(1 + \left(\left(x + \mathsf{hypot}\left(1, x\right)\right) - 1\right)\right)} \]
    7. Step-by-step derivation
      1. log1p-define63.5%

        \[\leadsto \color{blue}{\mathsf{log1p}\left(\left(x + \mathsf{hypot}\left(1, x\right)\right) - 1\right)} \]
      2. sub-neg63.5%

        \[\leadsto \mathsf{log1p}\left(\color{blue}{\left(x + \mathsf{hypot}\left(1, x\right)\right) + \left(-1\right)}\right) \]
      3. +-commutative63.5%

        \[\leadsto \mathsf{log1p}\left(\color{blue}{\left(\mathsf{hypot}\left(1, x\right) + x\right)} + \left(-1\right)\right) \]
      4. metadata-eval63.5%

        \[\leadsto \mathsf{log1p}\left(\left(\mathsf{hypot}\left(1, x\right) + x\right) + \color{blue}{-1}\right) \]
      5. associate-+l+63.4%

        \[\leadsto \mathsf{log1p}\left(\color{blue}{\mathsf{hypot}\left(1, x\right) + \left(x + -1\right)}\right) \]
    8. Simplified63.4%

      \[\leadsto \color{blue}{\mathsf{log1p}\left(\mathsf{hypot}\left(1, x\right) + \left(x + -1\right)\right)} \]
    9. Taylor expanded in x around inf 58.5%

      \[\leadsto \color{blue}{\left(\log 2 + \left(-1 \cdot \log \left(\frac{1}{x}\right) + \left(0.25 \cdot \frac{1}{{x}^{2}} + 0.052083333333333336 \cdot \frac{1}{{x}^{6}}\right)\right)\right) - \frac{0.09375}{{x}^{4}}} \]
    10. Simplified10.7%

      \[\leadsto \color{blue}{1.2083333333333333} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 17: 6.5% accurate, 207.0× speedup?

\[\begin{array}{l} \\ -42.666666666666664 \end{array} \]
(FPCore (x) :precision binary64 -42.666666666666664)
double code(double x) {
	return -42.666666666666664;
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = -42.666666666666664d0
end function
public static double code(double x) {
	return -42.666666666666664;
}
def code(x):
	return -42.666666666666664
function code(x)
	return -42.666666666666664
end
function tmp = code(x)
	tmp = -42.666666666666664;
end
code[x_] := -42.666666666666664
\begin{array}{l}

\\
-42.666666666666664
\end{array}
Derivation
  1. Initial program 23.4%

    \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. add-sqr-sqrt23.4%

      \[\leadsto \log \color{blue}{\left(\sqrt{x + \sqrt{x \cdot x + 1}} \cdot \sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
    2. pow223.4%

      \[\leadsto \log \color{blue}{\left({\left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)}^{2}\right)} \]
    3. log-pow23.4%

      \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \sqrt{x \cdot x + 1}}\right)} \]
    4. +-commutative23.4%

      \[\leadsto 2 \cdot \log \left(\sqrt{x + \sqrt{\color{blue}{1 + x \cdot x}}}\right) \]
    5. hypot-1-def36.5%

      \[\leadsto 2 \cdot \log \left(\sqrt{x + \color{blue}{\mathsf{hypot}\left(1, x\right)}}\right) \]
  4. Applied egg-rr36.5%

    \[\leadsto \color{blue}{2 \cdot \log \left(\sqrt{x + \mathsf{hypot}\left(1, x\right)}\right)} \]
  5. Taylor expanded in x around 0 8.3%

    \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + 0.125 \cdot x\right)\right)} \]
  6. Step-by-step derivation
    1. *-commutative8.3%

      \[\leadsto 2 \cdot \log \left(1 + x \cdot \left(0.5 + \color{blue}{x \cdot 0.125}\right)\right) \]
  7. Simplified8.3%

    \[\leadsto 2 \cdot \log \color{blue}{\left(1 + x \cdot \left(0.5 + x \cdot 0.125\right)\right)} \]
  8. Taylor expanded in x around -inf 0.7%

    \[\leadsto \color{blue}{-1 \cdot \frac{21.333333333333332 \cdot \frac{1}{{x}^{2}} - \left(8 + \frac{64}{{x}^{3}}\right)}{x} + 2 \cdot \left(\log 0.125 + -2 \cdot \log \left(\frac{-1}{x}\right)\right)} \]
  9. Simplified6.1%

    \[\leadsto \color{blue}{-42.666666666666664} \]
  10. Add Preprocessing

Developer Target 1: 30.3% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \sqrt{x \cdot x + 1}\\ \mathbf{if}\;x < 0:\\ \;\;\;\;\log \left(\frac{-1}{x - t\_0}\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + t\_0\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (let* ((t_0 (sqrt (+ (* x x) 1.0))))
   (if (< x 0.0) (log (/ -1.0 (- x t_0))) (log (+ x t_0)))))
double code(double x) {
	double t_0 = sqrt(((x * x) + 1.0));
	double tmp;
	if (x < 0.0) {
		tmp = log((-1.0 / (x - t_0)));
	} else {
		tmp = log((x + t_0));
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: t_0
    real(8) :: tmp
    t_0 = sqrt(((x * x) + 1.0d0))
    if (x < 0.0d0) then
        tmp = log(((-1.0d0) / (x - t_0)))
    else
        tmp = log((x + t_0))
    end if
    code = tmp
end function
public static double code(double x) {
	double t_0 = Math.sqrt(((x * x) + 1.0));
	double tmp;
	if (x < 0.0) {
		tmp = Math.log((-1.0 / (x - t_0)));
	} else {
		tmp = Math.log((x + t_0));
	}
	return tmp;
}
def code(x):
	t_0 = math.sqrt(((x * x) + 1.0))
	tmp = 0
	if x < 0.0:
		tmp = math.log((-1.0 / (x - t_0)))
	else:
		tmp = math.log((x + t_0))
	return tmp
function code(x)
	t_0 = sqrt(Float64(Float64(x * x) + 1.0))
	tmp = 0.0
	if (x < 0.0)
		tmp = log(Float64(-1.0 / Float64(x - t_0)));
	else
		tmp = log(Float64(x + t_0));
	end
	return tmp
end
function tmp_2 = code(x)
	t_0 = sqrt(((x * x) + 1.0));
	tmp = 0.0;
	if (x < 0.0)
		tmp = log((-1.0 / (x - t_0)));
	else
		tmp = log((x + t_0));
	end
	tmp_2 = tmp;
end
code[x_] := Block[{t$95$0 = N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]}, If[Less[x, 0.0], N[Log[N[(-1.0 / N[(x - t$95$0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Log[N[(x + t$95$0), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \sqrt{x \cdot x + 1}\\
\mathbf{if}\;x < 0:\\
\;\;\;\;\log \left(\frac{-1}{x - t\_0}\right)\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + t\_0\right)\\


\end{array}
\end{array}

Reproduce

?
herbie shell --seed 2024131 
(FPCore (x)
  :name "Hyperbolic arcsine"
  :precision binary64

  :alt
  (! :herbie-platform default (if (< x 0) (log (/ -1 (- x (sqrt (+ (* x x) 1))))) (log (+ x (sqrt (+ (* x x) 1))))))

  (log (+ x (sqrt (+ (* x x) 1.0)))))