Hyperbolic arcsine

Percentage Accurate: 18.4% → 99.7%
Time: 12.0s
Alternatives: 8
Speedup: 207.0×

Specification

?
\[\begin{array}{l} \\ \log \left(x + \sqrt{x \cdot x + 1}\right) \end{array} \]
(FPCore (x) :precision binary64 (log (+ x (sqrt (+ (* x x) 1.0)))))
double code(double x) {
	return log((x + sqrt(((x * x) + 1.0))));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = log((x + sqrt(((x * x) + 1.0d0))))
end function
public static double code(double x) {
	return Math.log((x + Math.sqrt(((x * x) + 1.0))));
}
def code(x):
	return math.log((x + math.sqrt(((x * x) + 1.0))))
function code(x)
	return log(Float64(x + sqrt(Float64(Float64(x * x) + 1.0))))
end
function tmp = code(x)
	tmp = log((x + sqrt(((x * x) + 1.0))));
end
code[x_] := N[Log[N[(x + N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}

\\
\log \left(x + \sqrt{x \cdot x + 1}\right)
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 8 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 18.4% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \log \left(x + \sqrt{x \cdot x + 1}\right) \end{array} \]
(FPCore (x) :precision binary64 (log (+ x (sqrt (+ (* x x) 1.0)))))
double code(double x) {
	return log((x + sqrt(((x * x) + 1.0))));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = log((x + sqrt(((x * x) + 1.0d0))))
end function
public static double code(double x) {
	return Math.log((x + Math.sqrt(((x * x) + 1.0))));
}
def code(x):
	return math.log((x + math.sqrt(((x * x) + 1.0))))
function code(x)
	return log(Float64(x + sqrt(Float64(Float64(x * x) + 1.0))))
end
function tmp = code(x)
	tmp = log((x + sqrt(((x * x) + 1.0))));
end
code[x_] := N[Log[N[(x + N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}

\\
\log \left(x + \sqrt{x \cdot x + 1}\right)
\end{array}

Alternative 1: 99.7% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -0.0285:\\ \;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\ \mathbf{elif}\;x \leq 1.65:\\ \;\;\;\;\left(1 - {x}^{4} \cdot \left(0.027777777777777776 + {x}^{2} \cdot -0.025\right)\right) \cdot \frac{x}{1 - {x}^{2} \cdot \mathsf{fma}\left({x}^{2}, \mathsf{fma}\left({x}^{2}, -0.044642857142857144, 0.075\right), -0.16666666666666666\right)}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -0.0285)
   (- (log (- (hypot 1.0 x) x)))
   (if (<= x 1.65)
     (*
      (- 1.0 (* (pow x 4.0) (+ 0.027777777777777776 (* (pow x 2.0) -0.025))))
      (/
       x
       (-
        1.0
        (*
         (pow x 2.0)
         (fma
          (pow x 2.0)
          (fma (pow x 2.0) -0.044642857142857144 0.075)
          -0.16666666666666666)))))
     (log (* x 2.0)))))
double code(double x) {
	double tmp;
	if (x <= -0.0285) {
		tmp = -log((hypot(1.0, x) - x));
	} else if (x <= 1.65) {
		tmp = (1.0 - (pow(x, 4.0) * (0.027777777777777776 + (pow(x, 2.0) * -0.025)))) * (x / (1.0 - (pow(x, 2.0) * fma(pow(x, 2.0), fma(pow(x, 2.0), -0.044642857142857144, 0.075), -0.16666666666666666))));
	} else {
		tmp = log((x * 2.0));
	}
	return tmp;
}
function code(x)
	tmp = 0.0
	if (x <= -0.0285)
		tmp = Float64(-log(Float64(hypot(1.0, x) - x)));
	elseif (x <= 1.65)
		tmp = Float64(Float64(1.0 - Float64((x ^ 4.0) * Float64(0.027777777777777776 + Float64((x ^ 2.0) * -0.025)))) * Float64(x / Float64(1.0 - Float64((x ^ 2.0) * fma((x ^ 2.0), fma((x ^ 2.0), -0.044642857142857144, 0.075), -0.16666666666666666)))));
	else
		tmp = log(Float64(x * 2.0));
	end
	return tmp
end
code[x_] := If[LessEqual[x, -0.0285], (-N[Log[N[(N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision] - x), $MachinePrecision]], $MachinePrecision]), If[LessEqual[x, 1.65], N[(N[(1.0 - N[(N[Power[x, 4.0], $MachinePrecision] * N[(0.027777777777777776 + N[(N[Power[x, 2.0], $MachinePrecision] * -0.025), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(x / N[(1.0 - N[(N[Power[x, 2.0], $MachinePrecision] * N[(N[Power[x, 2.0], $MachinePrecision] * N[(N[Power[x, 2.0], $MachinePrecision] * -0.044642857142857144 + 0.075), $MachinePrecision] + -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(x * 2.0), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.0285:\\
\;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\

\mathbf{elif}\;x \leq 1.65:\\
\;\;\;\;\left(1 - {x}^{4} \cdot \left(0.027777777777777776 + {x}^{2} \cdot -0.025\right)\right) \cdot \frac{x}{1 - {x}^{2} \cdot \mathsf{fma}\left({x}^{2}, \mathsf{fma}\left({x}^{2}, -0.044642857142857144, 0.075\right), -0.16666666666666666\right)}\\

\mathbf{else}:\\
\;\;\;\;\log \left(x \cdot 2\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -0.028500000000000001

    1. Initial program 2.4%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg2.4%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative2.4%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg2.4%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def3.9%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified3.9%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
    5. Step-by-step derivation
      1. flip-+2.7%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. clear-num2.7%

        \[\leadsto \log \color{blue}{\left(\frac{1}{\frac{x - \mathsf{hypot}\left(1, x\right)}{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}}\right)} \]
      3. log-div1.4%

        \[\leadsto \color{blue}{\log 1 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}\right)} \]
      4. metadata-eval1.4%

        \[\leadsto \color{blue}{0} - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}\right) \]
      5. pow21.4%

        \[\leadsto 0 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{\color{blue}{{x}^{2}} - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}\right) \]
      6. hypot-1-def1.4%

        \[\leadsto 0 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{{x}^{2} - \color{blue}{\sqrt{1 + x \cdot x}} \cdot \mathsf{hypot}\left(1, x\right)}\right) \]
      7. hypot-1-def1.4%

        \[\leadsto 0 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{{x}^{2} - \sqrt{1 + x \cdot x} \cdot \color{blue}{\sqrt{1 + x \cdot x}}}\right) \]
      8. add-sqr-sqrt1.4%

        \[\leadsto 0 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{{x}^{2} - \color{blue}{\left(1 + x \cdot x\right)}}\right) \]
      9. +-commutative1.4%

        \[\leadsto 0 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{{x}^{2} - \color{blue}{\left(x \cdot x + 1\right)}}\right) \]
      10. fma-define1.4%

        \[\leadsto 0 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{{x}^{2} - \color{blue}{\mathsf{fma}\left(x, x, 1\right)}}\right) \]
    6. Applied egg-rr1.4%

      \[\leadsto \color{blue}{0 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right)} \]
    7. Step-by-step derivation
      1. neg-sub01.4%

        \[\leadsto \color{blue}{-\log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right)} \]
      2. div-sub1.4%

        \[\leadsto -\log \color{blue}{\left(\frac{x}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right)} \]
      3. fma-undefine1.4%

        \[\leadsto -\log \left(\frac{x}{{x}^{2} - \color{blue}{\left(x \cdot x + 1\right)}} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      4. unpow21.4%

        \[\leadsto -\log \left(\frac{x}{{x}^{2} - \left(\color{blue}{{x}^{2}} + 1\right)} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      5. associate--r+1.4%

        \[\leadsto -\log \left(\frac{x}{\color{blue}{\left({x}^{2} - {x}^{2}\right) - 1}} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      6. +-inverses1.4%

        \[\leadsto -\log \left(\frac{x}{\color{blue}{0} - 1} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      7. metadata-eval1.4%

        \[\leadsto -\log \left(\frac{x}{\color{blue}{-1}} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      8. *-rgt-identity1.4%

        \[\leadsto -\log \left(\frac{\color{blue}{x \cdot 1}}{-1} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      9. associate-/l*1.4%

        \[\leadsto -\log \left(\color{blue}{x \cdot \frac{1}{-1}} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      10. metadata-eval1.4%

        \[\leadsto -\log \left(x \cdot \color{blue}{-1} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      11. fma-undefine1.4%

        \[\leadsto -\log \left(x \cdot -1 - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \color{blue}{\left(x \cdot x + 1\right)}}\right) \]
      12. unpow21.4%

        \[\leadsto -\log \left(x \cdot -1 - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \left(\color{blue}{{x}^{2}} + 1\right)}\right) \]
      13. associate--r+41.4%

        \[\leadsto -\log \left(x \cdot -1 - \frac{\mathsf{hypot}\left(1, x\right)}{\color{blue}{\left({x}^{2} - {x}^{2}\right) - 1}}\right) \]
      14. +-inverses100.0%

        \[\leadsto -\log \left(x \cdot -1 - \frac{\mathsf{hypot}\left(1, x\right)}{\color{blue}{0} - 1}\right) \]
      15. metadata-eval100.0%

        \[\leadsto -\log \left(x \cdot -1 - \frac{\mathsf{hypot}\left(1, x\right)}{\color{blue}{-1}}\right) \]
      16. *-rgt-identity100.0%

        \[\leadsto -\log \left(x \cdot -1 - \frac{\color{blue}{\mathsf{hypot}\left(1, x\right) \cdot 1}}{-1}\right) \]
      17. associate-/l*100.0%

        \[\leadsto -\log \left(x \cdot -1 - \color{blue}{\mathsf{hypot}\left(1, x\right) \cdot \frac{1}{-1}}\right) \]
      18. metadata-eval100.0%

        \[\leadsto -\log \left(x \cdot -1 - \mathsf{hypot}\left(1, x\right) \cdot \color{blue}{-1}\right) \]
      19. *-commutative100.0%

        \[\leadsto -\log \left(x \cdot -1 - \color{blue}{-1 \cdot \mathsf{hypot}\left(1, x\right)}\right) \]
      20. neg-mul-1100.0%

        \[\leadsto -\log \left(x \cdot -1 - \color{blue}{\left(-\mathsf{hypot}\left(1, x\right)\right)}\right) \]
    8. Simplified100.0%

      \[\leadsto \color{blue}{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \]

    if -0.028500000000000001 < x < 1.6499999999999999

    1. Initial program 9.2%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg9.2%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative9.2%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg9.2%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def9.2%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified9.2%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 100.0%

      \[\leadsto \color{blue}{x \cdot \left(1 + {x}^{2} \cdot \left({x}^{2} \cdot \left(0.075 + -0.044642857142857144 \cdot {x}^{2}\right) - 0.16666666666666666\right)\right)} \]
    6. Step-by-step derivation
      1. flip-+100.0%

        \[\leadsto x \cdot \color{blue}{\frac{1 \cdot 1 - \left({x}^{2} \cdot \left({x}^{2} \cdot \left(0.075 + -0.044642857142857144 \cdot {x}^{2}\right) - 0.16666666666666666\right)\right) \cdot \left({x}^{2} \cdot \left({x}^{2} \cdot \left(0.075 + -0.044642857142857144 \cdot {x}^{2}\right) - 0.16666666666666666\right)\right)}{1 - {x}^{2} \cdot \left({x}^{2} \cdot \left(0.075 + -0.044642857142857144 \cdot {x}^{2}\right) - 0.16666666666666666\right)}} \]
      2. associate-*r/100.0%

        \[\leadsto \color{blue}{\frac{x \cdot \left(1 \cdot 1 - \left({x}^{2} \cdot \left({x}^{2} \cdot \left(0.075 + -0.044642857142857144 \cdot {x}^{2}\right) - 0.16666666666666666\right)\right) \cdot \left({x}^{2} \cdot \left({x}^{2} \cdot \left(0.075 + -0.044642857142857144 \cdot {x}^{2}\right) - 0.16666666666666666\right)\right)\right)}{1 - {x}^{2} \cdot \left({x}^{2} \cdot \left(0.075 + -0.044642857142857144 \cdot {x}^{2}\right) - 0.16666666666666666\right)}} \]
    7. Applied egg-rr100.0%

      \[\leadsto \color{blue}{\frac{x \cdot \left(1 - {x}^{4} \cdot {\left(\mathsf{fma}\left({x}^{2}, \mathsf{fma}\left({x}^{2}, -0.044642857142857144, 0.075\right), -0.16666666666666666\right)\right)}^{2}\right)}{1 - {x}^{2} \cdot \mathsf{fma}\left({x}^{2}, \mathsf{fma}\left({x}^{2}, -0.044642857142857144, 0.075\right), -0.16666666666666666\right)}} \]
    8. Step-by-step derivation
      1. *-commutative100.0%

        \[\leadsto \frac{\color{blue}{\left(1 - {x}^{4} \cdot {\left(\mathsf{fma}\left({x}^{2}, \mathsf{fma}\left({x}^{2}, -0.044642857142857144, 0.075\right), -0.16666666666666666\right)\right)}^{2}\right) \cdot x}}{1 - {x}^{2} \cdot \mathsf{fma}\left({x}^{2}, \mathsf{fma}\left({x}^{2}, -0.044642857142857144, 0.075\right), -0.16666666666666666\right)} \]
      2. associate-/l*100.0%

        \[\leadsto \color{blue}{\left(1 - {x}^{4} \cdot {\left(\mathsf{fma}\left({x}^{2}, \mathsf{fma}\left({x}^{2}, -0.044642857142857144, 0.075\right), -0.16666666666666666\right)\right)}^{2}\right) \cdot \frac{x}{1 - {x}^{2} \cdot \mathsf{fma}\left({x}^{2}, \mathsf{fma}\left({x}^{2}, -0.044642857142857144, 0.075\right), -0.16666666666666666\right)}} \]
    9. Simplified100.0%

      \[\leadsto \color{blue}{\left(1 - {x}^{4} \cdot {\left(\mathsf{fma}\left({x}^{2}, \mathsf{fma}\left({x}^{2}, -0.044642857142857144, 0.075\right), -0.16666666666666666\right)\right)}^{2}\right) \cdot \frac{x}{1 - {x}^{2} \cdot \mathsf{fma}\left({x}^{2}, \mathsf{fma}\left({x}^{2}, -0.044642857142857144, 0.075\right), -0.16666666666666666\right)}} \]
    10. Taylor expanded in x around 0 100.0%

      \[\leadsto \left(1 - \color{blue}{{x}^{4} \cdot \left(0.027777777777777776 + -0.025 \cdot {x}^{2}\right)}\right) \cdot \frac{x}{1 - {x}^{2} \cdot \mathsf{fma}\left({x}^{2}, \mathsf{fma}\left({x}^{2}, -0.044642857142857144, 0.075\right), -0.16666666666666666\right)} \]
    11. Step-by-step derivation
      1. *-commutative100.0%

        \[\leadsto \left(1 - {x}^{4} \cdot \left(0.027777777777777776 + \color{blue}{{x}^{2} \cdot -0.025}\right)\right) \cdot \frac{x}{1 - {x}^{2} \cdot \mathsf{fma}\left({x}^{2}, \mathsf{fma}\left({x}^{2}, -0.044642857142857144, 0.075\right), -0.16666666666666666\right)} \]
    12. Simplified100.0%

      \[\leadsto \left(1 - \color{blue}{{x}^{4} \cdot \left(0.027777777777777776 + {x}^{2} \cdot -0.025\right)}\right) \cdot \frac{x}{1 - {x}^{2} \cdot \mathsf{fma}\left({x}^{2}, \mathsf{fma}\left({x}^{2}, -0.044642857142857144, 0.075\right), -0.16666666666666666\right)} \]

    if 1.6499999999999999 < x

    1. Initial program 54.1%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg54.1%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative54.1%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg54.1%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around inf 100.0%

      \[\leadsto \log \color{blue}{\left(2 \cdot x\right)} \]
    6. Step-by-step derivation
      1. *-commutative100.0%

        \[\leadsto \log \color{blue}{\left(x \cdot 2\right)} \]
    7. Simplified100.0%

      \[\leadsto \log \color{blue}{\left(x \cdot 2\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -0.0285:\\ \;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\ \mathbf{elif}\;x \leq 1.65:\\ \;\;\;\;\left(1 - {x}^{4} \cdot \left(0.027777777777777776 + {x}^{2} \cdot -0.025\right)\right) \cdot \frac{x}{1 - {x}^{2} \cdot \mathsf{fma}\left({x}^{2}, \mathsf{fma}\left({x}^{2}, -0.044642857142857144, 0.075\right), -0.16666666666666666\right)}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 2: 99.7% accurate, 0.6× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -0.022:\\ \;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\ \mathbf{elif}\;x \leq 1.25:\\ \;\;\;\;x \cdot \left(1 + {x}^{2} \cdot \left({x}^{2} \cdot \left(0.075 + {x}^{2} \cdot -0.044642857142857144\right) - 0.16666666666666666\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -0.022)
   (- (log (- (hypot 1.0 x) x)))
   (if (<= x 1.25)
     (*
      x
      (+
       1.0
       (*
        (pow x 2.0)
        (-
         (* (pow x 2.0) (+ 0.075 (* (pow x 2.0) -0.044642857142857144)))
         0.16666666666666666))))
     (log (* x 2.0)))))
double code(double x) {
	double tmp;
	if (x <= -0.022) {
		tmp = -log((hypot(1.0, x) - x));
	} else if (x <= 1.25) {
		tmp = x * (1.0 + (pow(x, 2.0) * ((pow(x, 2.0) * (0.075 + (pow(x, 2.0) * -0.044642857142857144))) - 0.16666666666666666)));
	} else {
		tmp = log((x * 2.0));
	}
	return tmp;
}
public static double code(double x) {
	double tmp;
	if (x <= -0.022) {
		tmp = -Math.log((Math.hypot(1.0, x) - x));
	} else if (x <= 1.25) {
		tmp = x * (1.0 + (Math.pow(x, 2.0) * ((Math.pow(x, 2.0) * (0.075 + (Math.pow(x, 2.0) * -0.044642857142857144))) - 0.16666666666666666)));
	} else {
		tmp = Math.log((x * 2.0));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -0.022:
		tmp = -math.log((math.hypot(1.0, x) - x))
	elif x <= 1.25:
		tmp = x * (1.0 + (math.pow(x, 2.0) * ((math.pow(x, 2.0) * (0.075 + (math.pow(x, 2.0) * -0.044642857142857144))) - 0.16666666666666666)))
	else:
		tmp = math.log((x * 2.0))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -0.022)
		tmp = Float64(-log(Float64(hypot(1.0, x) - x)));
	elseif (x <= 1.25)
		tmp = Float64(x * Float64(1.0 + Float64((x ^ 2.0) * Float64(Float64((x ^ 2.0) * Float64(0.075 + Float64((x ^ 2.0) * -0.044642857142857144))) - 0.16666666666666666))));
	else
		tmp = log(Float64(x * 2.0));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -0.022)
		tmp = -log((hypot(1.0, x) - x));
	elseif (x <= 1.25)
		tmp = x * (1.0 + ((x ^ 2.0) * (((x ^ 2.0) * (0.075 + ((x ^ 2.0) * -0.044642857142857144))) - 0.16666666666666666)));
	else
		tmp = log((x * 2.0));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -0.022], (-N[Log[N[(N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision] - x), $MachinePrecision]], $MachinePrecision]), If[LessEqual[x, 1.25], N[(x * N[(1.0 + N[(N[Power[x, 2.0], $MachinePrecision] * N[(N[(N[Power[x, 2.0], $MachinePrecision] * N[(0.075 + N[(N[Power[x, 2.0], $MachinePrecision] * -0.044642857142857144), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(x * 2.0), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.022:\\
\;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\

\mathbf{elif}\;x \leq 1.25:\\
\;\;\;\;x \cdot \left(1 + {x}^{2} \cdot \left({x}^{2} \cdot \left(0.075 + {x}^{2} \cdot -0.044642857142857144\right) - 0.16666666666666666\right)\right)\\

\mathbf{else}:\\
\;\;\;\;\log \left(x \cdot 2\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -0.021999999999999999

    1. Initial program 2.4%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg2.4%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative2.4%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg2.4%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def3.9%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified3.9%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
    5. Step-by-step derivation
      1. flip-+2.7%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. clear-num2.7%

        \[\leadsto \log \color{blue}{\left(\frac{1}{\frac{x - \mathsf{hypot}\left(1, x\right)}{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}}\right)} \]
      3. log-div1.4%

        \[\leadsto \color{blue}{\log 1 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}\right)} \]
      4. metadata-eval1.4%

        \[\leadsto \color{blue}{0} - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}\right) \]
      5. pow21.4%

        \[\leadsto 0 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{\color{blue}{{x}^{2}} - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}\right) \]
      6. hypot-1-def1.4%

        \[\leadsto 0 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{{x}^{2} - \color{blue}{\sqrt{1 + x \cdot x}} \cdot \mathsf{hypot}\left(1, x\right)}\right) \]
      7. hypot-1-def1.4%

        \[\leadsto 0 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{{x}^{2} - \sqrt{1 + x \cdot x} \cdot \color{blue}{\sqrt{1 + x \cdot x}}}\right) \]
      8. add-sqr-sqrt1.4%

        \[\leadsto 0 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{{x}^{2} - \color{blue}{\left(1 + x \cdot x\right)}}\right) \]
      9. +-commutative1.4%

        \[\leadsto 0 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{{x}^{2} - \color{blue}{\left(x \cdot x + 1\right)}}\right) \]
      10. fma-define1.4%

        \[\leadsto 0 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{{x}^{2} - \color{blue}{\mathsf{fma}\left(x, x, 1\right)}}\right) \]
    6. Applied egg-rr1.4%

      \[\leadsto \color{blue}{0 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right)} \]
    7. Step-by-step derivation
      1. neg-sub01.4%

        \[\leadsto \color{blue}{-\log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right)} \]
      2. div-sub1.4%

        \[\leadsto -\log \color{blue}{\left(\frac{x}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right)} \]
      3. fma-undefine1.4%

        \[\leadsto -\log \left(\frac{x}{{x}^{2} - \color{blue}{\left(x \cdot x + 1\right)}} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      4. unpow21.4%

        \[\leadsto -\log \left(\frac{x}{{x}^{2} - \left(\color{blue}{{x}^{2}} + 1\right)} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      5. associate--r+1.4%

        \[\leadsto -\log \left(\frac{x}{\color{blue}{\left({x}^{2} - {x}^{2}\right) - 1}} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      6. +-inverses1.4%

        \[\leadsto -\log \left(\frac{x}{\color{blue}{0} - 1} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      7. metadata-eval1.4%

        \[\leadsto -\log \left(\frac{x}{\color{blue}{-1}} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      8. *-rgt-identity1.4%

        \[\leadsto -\log \left(\frac{\color{blue}{x \cdot 1}}{-1} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      9. associate-/l*1.4%

        \[\leadsto -\log \left(\color{blue}{x \cdot \frac{1}{-1}} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      10. metadata-eval1.4%

        \[\leadsto -\log \left(x \cdot \color{blue}{-1} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      11. fma-undefine1.4%

        \[\leadsto -\log \left(x \cdot -1 - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \color{blue}{\left(x \cdot x + 1\right)}}\right) \]
      12. unpow21.4%

        \[\leadsto -\log \left(x \cdot -1 - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \left(\color{blue}{{x}^{2}} + 1\right)}\right) \]
      13. associate--r+41.4%

        \[\leadsto -\log \left(x \cdot -1 - \frac{\mathsf{hypot}\left(1, x\right)}{\color{blue}{\left({x}^{2} - {x}^{2}\right) - 1}}\right) \]
      14. +-inverses100.0%

        \[\leadsto -\log \left(x \cdot -1 - \frac{\mathsf{hypot}\left(1, x\right)}{\color{blue}{0} - 1}\right) \]
      15. metadata-eval100.0%

        \[\leadsto -\log \left(x \cdot -1 - \frac{\mathsf{hypot}\left(1, x\right)}{\color{blue}{-1}}\right) \]
      16. *-rgt-identity100.0%

        \[\leadsto -\log \left(x \cdot -1 - \frac{\color{blue}{\mathsf{hypot}\left(1, x\right) \cdot 1}}{-1}\right) \]
      17. associate-/l*100.0%

        \[\leadsto -\log \left(x \cdot -1 - \color{blue}{\mathsf{hypot}\left(1, x\right) \cdot \frac{1}{-1}}\right) \]
      18. metadata-eval100.0%

        \[\leadsto -\log \left(x \cdot -1 - \mathsf{hypot}\left(1, x\right) \cdot \color{blue}{-1}\right) \]
      19. *-commutative100.0%

        \[\leadsto -\log \left(x \cdot -1 - \color{blue}{-1 \cdot \mathsf{hypot}\left(1, x\right)}\right) \]
      20. neg-mul-1100.0%

        \[\leadsto -\log \left(x \cdot -1 - \color{blue}{\left(-\mathsf{hypot}\left(1, x\right)\right)}\right) \]
    8. Simplified100.0%

      \[\leadsto \color{blue}{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \]

    if -0.021999999999999999 < x < 1.25

    1. Initial program 9.2%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg9.2%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative9.2%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg9.2%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def9.2%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified9.2%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 100.0%

      \[\leadsto \color{blue}{x \cdot \left(1 + {x}^{2} \cdot \left({x}^{2} \cdot \left(0.075 + -0.044642857142857144 \cdot {x}^{2}\right) - 0.16666666666666666\right)\right)} \]

    if 1.25 < x

    1. Initial program 54.1%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg54.1%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative54.1%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg54.1%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around inf 100.0%

      \[\leadsto \log \color{blue}{\left(2 \cdot x\right)} \]
    6. Step-by-step derivation
      1. *-commutative100.0%

        \[\leadsto \log \color{blue}{\left(x \cdot 2\right)} \]
    7. Simplified100.0%

      \[\leadsto \log \color{blue}{\left(x \cdot 2\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -0.022:\\ \;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\ \mathbf{elif}\;x \leq 1.25:\\ \;\;\;\;x \cdot \left(1 + {x}^{2} \cdot \left({x}^{2} \cdot \left(0.075 + {x}^{2} \cdot -0.044642857142857144\right) - 0.16666666666666666\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 99.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -0.00105:\\ \;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\ \mathbf{elif}\;x \leq 1.25:\\ \;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -0.00105)
   (- (log (- (hypot 1.0 x) x)))
   (if (<= x 1.25)
     (+ x (* -0.16666666666666666 (pow x 3.0)))
     (log (* x 2.0)))))
double code(double x) {
	double tmp;
	if (x <= -0.00105) {
		tmp = -log((hypot(1.0, x) - x));
	} else if (x <= 1.25) {
		tmp = x + (-0.16666666666666666 * pow(x, 3.0));
	} else {
		tmp = log((x * 2.0));
	}
	return tmp;
}
public static double code(double x) {
	double tmp;
	if (x <= -0.00105) {
		tmp = -Math.log((Math.hypot(1.0, x) - x));
	} else if (x <= 1.25) {
		tmp = x + (-0.16666666666666666 * Math.pow(x, 3.0));
	} else {
		tmp = Math.log((x * 2.0));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -0.00105:
		tmp = -math.log((math.hypot(1.0, x) - x))
	elif x <= 1.25:
		tmp = x + (-0.16666666666666666 * math.pow(x, 3.0))
	else:
		tmp = math.log((x * 2.0))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -0.00105)
		tmp = Float64(-log(Float64(hypot(1.0, x) - x)));
	elseif (x <= 1.25)
		tmp = Float64(x + Float64(-0.16666666666666666 * (x ^ 3.0)));
	else
		tmp = log(Float64(x * 2.0));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -0.00105)
		tmp = -log((hypot(1.0, x) - x));
	elseif (x <= 1.25)
		tmp = x + (-0.16666666666666666 * (x ^ 3.0));
	else
		tmp = log((x * 2.0));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -0.00105], (-N[Log[N[(N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision] - x), $MachinePrecision]], $MachinePrecision]), If[LessEqual[x, 1.25], N[(x + N[(-0.16666666666666666 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(x * 2.0), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.00105:\\
\;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\

\mathbf{elif}\;x \leq 1.25:\\
\;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\

\mathbf{else}:\\
\;\;\;\;\log \left(x \cdot 2\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -0.00104999999999999994

    1. Initial program 5.0%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg5.0%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative5.0%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg5.0%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def6.4%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified6.4%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
    5. Step-by-step derivation
      1. flip-+5.1%

        \[\leadsto \log \color{blue}{\left(\frac{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}{x - \mathsf{hypot}\left(1, x\right)}\right)} \]
      2. clear-num5.1%

        \[\leadsto \log \color{blue}{\left(\frac{1}{\frac{x - \mathsf{hypot}\left(1, x\right)}{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}}\right)} \]
      3. log-div3.9%

        \[\leadsto \color{blue}{\log 1 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}\right)} \]
      4. metadata-eval3.9%

        \[\leadsto \color{blue}{0} - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{x \cdot x - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}\right) \]
      5. pow23.9%

        \[\leadsto 0 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{\color{blue}{{x}^{2}} - \mathsf{hypot}\left(1, x\right) \cdot \mathsf{hypot}\left(1, x\right)}\right) \]
      6. hypot-1-def3.9%

        \[\leadsto 0 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{{x}^{2} - \color{blue}{\sqrt{1 + x \cdot x}} \cdot \mathsf{hypot}\left(1, x\right)}\right) \]
      7. hypot-1-def3.9%

        \[\leadsto 0 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{{x}^{2} - \sqrt{1 + x \cdot x} \cdot \color{blue}{\sqrt{1 + x \cdot x}}}\right) \]
      8. add-sqr-sqrt3.9%

        \[\leadsto 0 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{{x}^{2} - \color{blue}{\left(1 + x \cdot x\right)}}\right) \]
      9. +-commutative3.9%

        \[\leadsto 0 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{{x}^{2} - \color{blue}{\left(x \cdot x + 1\right)}}\right) \]
      10. fma-define3.9%

        \[\leadsto 0 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{{x}^{2} - \color{blue}{\mathsf{fma}\left(x, x, 1\right)}}\right) \]
    6. Applied egg-rr3.9%

      \[\leadsto \color{blue}{0 - \log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right)} \]
    7. Step-by-step derivation
      1. neg-sub03.9%

        \[\leadsto \color{blue}{-\log \left(\frac{x - \mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right)} \]
      2. div-sub3.9%

        \[\leadsto -\log \color{blue}{\left(\frac{x}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right)} \]
      3. fma-undefine3.9%

        \[\leadsto -\log \left(\frac{x}{{x}^{2} - \color{blue}{\left(x \cdot x + 1\right)}} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      4. unpow23.9%

        \[\leadsto -\log \left(\frac{x}{{x}^{2} - \left(\color{blue}{{x}^{2}} + 1\right)} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      5. associate--r+3.9%

        \[\leadsto -\log \left(\frac{x}{\color{blue}{\left({x}^{2} - {x}^{2}\right) - 1}} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      6. +-inverses3.9%

        \[\leadsto -\log \left(\frac{x}{\color{blue}{0} - 1} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      7. metadata-eval3.9%

        \[\leadsto -\log \left(\frac{x}{\color{blue}{-1}} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      8. *-rgt-identity3.9%

        \[\leadsto -\log \left(\frac{\color{blue}{x \cdot 1}}{-1} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      9. associate-/l*3.9%

        \[\leadsto -\log \left(\color{blue}{x \cdot \frac{1}{-1}} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      10. metadata-eval3.9%

        \[\leadsto -\log \left(x \cdot \color{blue}{-1} - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \mathsf{fma}\left(x, x, 1\right)}\right) \]
      11. fma-undefine3.9%

        \[\leadsto -\log \left(x \cdot -1 - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \color{blue}{\left(x \cdot x + 1\right)}}\right) \]
      12. unpow23.9%

        \[\leadsto -\log \left(x \cdot -1 - \frac{\mathsf{hypot}\left(1, x\right)}{{x}^{2} - \left(\color{blue}{{x}^{2}} + 1\right)}\right) \]
      13. associate--r+42.9%

        \[\leadsto -\log \left(x \cdot -1 - \frac{\mathsf{hypot}\left(1, x\right)}{\color{blue}{\left({x}^{2} - {x}^{2}\right) - 1}}\right) \]
      14. +-inverses99.9%

        \[\leadsto -\log \left(x \cdot -1 - \frac{\mathsf{hypot}\left(1, x\right)}{\color{blue}{0} - 1}\right) \]
      15. metadata-eval99.9%

        \[\leadsto -\log \left(x \cdot -1 - \frac{\mathsf{hypot}\left(1, x\right)}{\color{blue}{-1}}\right) \]
      16. *-rgt-identity99.9%

        \[\leadsto -\log \left(x \cdot -1 - \frac{\color{blue}{\mathsf{hypot}\left(1, x\right) \cdot 1}}{-1}\right) \]
      17. associate-/l*99.9%

        \[\leadsto -\log \left(x \cdot -1 - \color{blue}{\mathsf{hypot}\left(1, x\right) \cdot \frac{1}{-1}}\right) \]
      18. metadata-eval99.9%

        \[\leadsto -\log \left(x \cdot -1 - \mathsf{hypot}\left(1, x\right) \cdot \color{blue}{-1}\right) \]
      19. *-commutative99.9%

        \[\leadsto -\log \left(x \cdot -1 - \color{blue}{-1 \cdot \mathsf{hypot}\left(1, x\right)}\right) \]
      20. neg-mul-199.9%

        \[\leadsto -\log \left(x \cdot -1 - \color{blue}{\left(-\mathsf{hypot}\left(1, x\right)\right)}\right) \]
    8. Simplified99.9%

      \[\leadsto \color{blue}{-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)} \]

    if -0.00104999999999999994 < x < 1.25

    1. Initial program 7.9%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg7.9%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative7.9%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg7.9%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def7.9%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified7.9%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 100.0%

      \[\leadsto \color{blue}{x \cdot \left(1 + -0.16666666666666666 \cdot {x}^{2}\right)} \]
    6. Step-by-step derivation
      1. distribute-rgt-in100.0%

        \[\leadsto \color{blue}{1 \cdot x + \left(-0.16666666666666666 \cdot {x}^{2}\right) \cdot x} \]
      2. *-lft-identity100.0%

        \[\leadsto \color{blue}{x} + \left(-0.16666666666666666 \cdot {x}^{2}\right) \cdot x \]
      3. associate-*l*100.0%

        \[\leadsto x + \color{blue}{-0.16666666666666666 \cdot \left({x}^{2} \cdot x\right)} \]
      4. unpow2100.0%

        \[\leadsto x + -0.16666666666666666 \cdot \left(\color{blue}{\left(x \cdot x\right)} \cdot x\right) \]
      5. unpow3100.0%

        \[\leadsto x + -0.16666666666666666 \cdot \color{blue}{{x}^{3}} \]
    7. Simplified100.0%

      \[\leadsto \color{blue}{x + -0.16666666666666666 \cdot {x}^{3}} \]

    if 1.25 < x

    1. Initial program 54.1%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg54.1%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative54.1%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg54.1%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around inf 100.0%

      \[\leadsto \log \color{blue}{\left(2 \cdot x\right)} \]
    6. Step-by-step derivation
      1. *-commutative100.0%

        \[\leadsto \log \color{blue}{\left(x \cdot 2\right)} \]
    7. Simplified100.0%

      \[\leadsto \log \color{blue}{\left(x \cdot 2\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -0.00105:\\ \;\;\;\;-\log \left(\mathsf{hypot}\left(1, x\right) - x\right)\\ \mathbf{elif}\;x \leq 1.25:\\ \;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 4: 99.4% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1.26:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 1.25:\\ \;\;\;\;x \cdot \left(1 + {x}^{2} \cdot -0.16666666666666666\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -1.26)
   (log (/ -0.5 x))
   (if (<= x 1.25)
     (* x (+ 1.0 (* (pow x 2.0) -0.16666666666666666)))
     (log (* x 2.0)))))
double code(double x) {
	double tmp;
	if (x <= -1.26) {
		tmp = log((-0.5 / x));
	} else if (x <= 1.25) {
		tmp = x * (1.0 + (pow(x, 2.0) * -0.16666666666666666));
	} else {
		tmp = log((x * 2.0));
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= (-1.26d0)) then
        tmp = log(((-0.5d0) / x))
    else if (x <= 1.25d0) then
        tmp = x * (1.0d0 + ((x ** 2.0d0) * (-0.16666666666666666d0)))
    else
        tmp = log((x * 2.0d0))
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= -1.26) {
		tmp = Math.log((-0.5 / x));
	} else if (x <= 1.25) {
		tmp = x * (1.0 + (Math.pow(x, 2.0) * -0.16666666666666666));
	} else {
		tmp = Math.log((x * 2.0));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -1.26:
		tmp = math.log((-0.5 / x))
	elif x <= 1.25:
		tmp = x * (1.0 + (math.pow(x, 2.0) * -0.16666666666666666))
	else:
		tmp = math.log((x * 2.0))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -1.26)
		tmp = log(Float64(-0.5 / x));
	elseif (x <= 1.25)
		tmp = Float64(x * Float64(1.0 + Float64((x ^ 2.0) * -0.16666666666666666)));
	else
		tmp = log(Float64(x * 2.0));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -1.26)
		tmp = log((-0.5 / x));
	elseif (x <= 1.25)
		tmp = x * (1.0 + ((x ^ 2.0) * -0.16666666666666666));
	else
		tmp = log((x * 2.0));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -1.26], N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision], If[LessEqual[x, 1.25], N[(x * N[(1.0 + N[(N[Power[x, 2.0], $MachinePrecision] * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(x * 2.0), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.26:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right)\\

\mathbf{elif}\;x \leq 1.25:\\
\;\;\;\;x \cdot \left(1 + {x}^{2} \cdot -0.16666666666666666\right)\\

\mathbf{else}:\\
\;\;\;\;\log \left(x \cdot 2\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1.26000000000000001

    1. Initial program 2.4%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg2.4%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative2.4%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg2.4%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def3.9%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified3.9%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around -inf 99.7%

      \[\leadsto \log \color{blue}{\left(\frac{-0.5}{x}\right)} \]

    if -1.26000000000000001 < x < 1.25

    1. Initial program 9.2%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg9.2%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative9.2%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg9.2%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def9.2%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified9.2%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 99.4%

      \[\leadsto \color{blue}{x \cdot \left(1 + -0.16666666666666666 \cdot {x}^{2}\right)} \]

    if 1.25 < x

    1. Initial program 54.1%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg54.1%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative54.1%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg54.1%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around inf 100.0%

      \[\leadsto \log \color{blue}{\left(2 \cdot x\right)} \]
    6. Step-by-step derivation
      1. *-commutative100.0%

        \[\leadsto \log \color{blue}{\left(x \cdot 2\right)} \]
    7. Simplified100.0%

      \[\leadsto \log \color{blue}{\left(x \cdot 2\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.26:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 1.25:\\ \;\;\;\;x \cdot \left(1 + {x}^{2} \cdot -0.16666666666666666\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 5: 99.4% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1.26:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 1.25:\\ \;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -1.26)
   (log (/ -0.5 x))
   (if (<= x 1.25)
     (+ x (* -0.16666666666666666 (pow x 3.0)))
     (log (* x 2.0)))))
double code(double x) {
	double tmp;
	if (x <= -1.26) {
		tmp = log((-0.5 / x));
	} else if (x <= 1.25) {
		tmp = x + (-0.16666666666666666 * pow(x, 3.0));
	} else {
		tmp = log((x * 2.0));
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= (-1.26d0)) then
        tmp = log(((-0.5d0) / x))
    else if (x <= 1.25d0) then
        tmp = x + ((-0.16666666666666666d0) * (x ** 3.0d0))
    else
        tmp = log((x * 2.0d0))
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= -1.26) {
		tmp = Math.log((-0.5 / x));
	} else if (x <= 1.25) {
		tmp = x + (-0.16666666666666666 * Math.pow(x, 3.0));
	} else {
		tmp = Math.log((x * 2.0));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -1.26:
		tmp = math.log((-0.5 / x))
	elif x <= 1.25:
		tmp = x + (-0.16666666666666666 * math.pow(x, 3.0))
	else:
		tmp = math.log((x * 2.0))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -1.26)
		tmp = log(Float64(-0.5 / x));
	elseif (x <= 1.25)
		tmp = Float64(x + Float64(-0.16666666666666666 * (x ^ 3.0)));
	else
		tmp = log(Float64(x * 2.0));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -1.26)
		tmp = log((-0.5 / x));
	elseif (x <= 1.25)
		tmp = x + (-0.16666666666666666 * (x ^ 3.0));
	else
		tmp = log((x * 2.0));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -1.26], N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision], If[LessEqual[x, 1.25], N[(x + N[(-0.16666666666666666 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Log[N[(x * 2.0), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.26:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right)\\

\mathbf{elif}\;x \leq 1.25:\\
\;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\

\mathbf{else}:\\
\;\;\;\;\log \left(x \cdot 2\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1.26000000000000001

    1. Initial program 2.4%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg2.4%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative2.4%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg2.4%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def3.9%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified3.9%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around -inf 99.7%

      \[\leadsto \log \color{blue}{\left(\frac{-0.5}{x}\right)} \]

    if -1.26000000000000001 < x < 1.25

    1. Initial program 9.2%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg9.2%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative9.2%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg9.2%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def9.2%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified9.2%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 99.4%

      \[\leadsto \color{blue}{x \cdot \left(1 + -0.16666666666666666 \cdot {x}^{2}\right)} \]
    6. Step-by-step derivation
      1. distribute-rgt-in99.4%

        \[\leadsto \color{blue}{1 \cdot x + \left(-0.16666666666666666 \cdot {x}^{2}\right) \cdot x} \]
      2. *-lft-identity99.4%

        \[\leadsto \color{blue}{x} + \left(-0.16666666666666666 \cdot {x}^{2}\right) \cdot x \]
      3. associate-*l*99.4%

        \[\leadsto x + \color{blue}{-0.16666666666666666 \cdot \left({x}^{2} \cdot x\right)} \]
      4. unpow299.4%

        \[\leadsto x + -0.16666666666666666 \cdot \left(\color{blue}{\left(x \cdot x\right)} \cdot x\right) \]
      5. unpow399.4%

        \[\leadsto x + -0.16666666666666666 \cdot \color{blue}{{x}^{3}} \]
    7. Simplified99.4%

      \[\leadsto \color{blue}{x + -0.16666666666666666 \cdot {x}^{3}} \]

    if 1.25 < x

    1. Initial program 54.1%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg54.1%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative54.1%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg54.1%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around inf 100.0%

      \[\leadsto \log \color{blue}{\left(2 \cdot x\right)} \]
    6. Step-by-step derivation
      1. *-commutative100.0%

        \[\leadsto \log \color{blue}{\left(x \cdot 2\right)} \]
    7. Simplified100.0%

      \[\leadsto \log \color{blue}{\left(x \cdot 2\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.26:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 1.25:\\ \;\;\;\;x + -0.16666666666666666 \cdot {x}^{3}\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 6: 99.1% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1.26:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 1.25:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x -1.26) (log (/ -0.5 x)) (if (<= x 1.25) x (log (* x 2.0)))))
double code(double x) {
	double tmp;
	if (x <= -1.26) {
		tmp = log((-0.5 / x));
	} else if (x <= 1.25) {
		tmp = x;
	} else {
		tmp = log((x * 2.0));
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= (-1.26d0)) then
        tmp = log(((-0.5d0) / x))
    else if (x <= 1.25d0) then
        tmp = x
    else
        tmp = log((x * 2.0d0))
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= -1.26) {
		tmp = Math.log((-0.5 / x));
	} else if (x <= 1.25) {
		tmp = x;
	} else {
		tmp = Math.log((x * 2.0));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= -1.26:
		tmp = math.log((-0.5 / x))
	elif x <= 1.25:
		tmp = x
	else:
		tmp = math.log((x * 2.0))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= -1.26)
		tmp = log(Float64(-0.5 / x));
	elseif (x <= 1.25)
		tmp = x;
	else
		tmp = log(Float64(x * 2.0));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= -1.26)
		tmp = log((-0.5 / x));
	elseif (x <= 1.25)
		tmp = x;
	else
		tmp = log((x * 2.0));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, -1.26], N[Log[N[(-0.5 / x), $MachinePrecision]], $MachinePrecision], If[LessEqual[x, 1.25], x, N[Log[N[(x * 2.0), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.26:\\
\;\;\;\;\log \left(\frac{-0.5}{x}\right)\\

\mathbf{elif}\;x \leq 1.25:\\
\;\;\;\;x\\

\mathbf{else}:\\
\;\;\;\;\log \left(x \cdot 2\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1.26000000000000001

    1. Initial program 2.4%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg2.4%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative2.4%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg2.4%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def3.9%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified3.9%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around -inf 99.7%

      \[\leadsto \log \color{blue}{\left(\frac{-0.5}{x}\right)} \]

    if -1.26000000000000001 < x < 1.25

    1. Initial program 9.2%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg9.2%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative9.2%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg9.2%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def9.2%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified9.2%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 98.9%

      \[\leadsto \color{blue}{x} \]

    if 1.25 < x

    1. Initial program 54.1%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg54.1%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative54.1%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg54.1%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around inf 100.0%

      \[\leadsto \log \color{blue}{\left(2 \cdot x\right)} \]
    6. Step-by-step derivation
      1. *-commutative100.0%

        \[\leadsto \log \color{blue}{\left(x \cdot 2\right)} \]
    7. Simplified100.0%

      \[\leadsto \log \color{blue}{\left(x \cdot 2\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.26:\\ \;\;\;\;\log \left(\frac{-0.5}{x}\right)\\ \mathbf{elif}\;x \leq 1.25:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 7: 76.3% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 1.25:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2\right)\\ \end{array} \end{array} \]
(FPCore (x) :precision binary64 (if (<= x 1.25) x (log (* x 2.0))))
double code(double x) {
	double tmp;
	if (x <= 1.25) {
		tmp = x;
	} else {
		tmp = log((x * 2.0));
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= 1.25d0) then
        tmp = x
    else
        tmp = log((x * 2.0d0))
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= 1.25) {
		tmp = x;
	} else {
		tmp = Math.log((x * 2.0));
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= 1.25:
		tmp = x
	else:
		tmp = math.log((x * 2.0))
	return tmp
function code(x)
	tmp = 0.0
	if (x <= 1.25)
		tmp = x;
	else
		tmp = log(Float64(x * 2.0));
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= 1.25)
		tmp = x;
	else
		tmp = log((x * 2.0));
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, 1.25], x, N[Log[N[(x * 2.0), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.25:\\
\;\;\;\;x\\

\mathbf{else}:\\
\;\;\;\;\log \left(x \cdot 2\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 1.25

    1. Initial program 6.8%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg6.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative6.8%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg6.8%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def7.4%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified7.4%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 65.9%

      \[\leadsto \color{blue}{x} \]

    if 1.25 < x

    1. Initial program 54.1%

      \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
    2. Step-by-step derivation
      1. sqr-neg54.1%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
      2. +-commutative54.1%

        \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
      3. sqr-neg54.1%

        \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
      4. hypot-1-def100.0%

        \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
    4. Add Preprocessing
    5. Taylor expanded in x around inf 100.0%

      \[\leadsto \log \color{blue}{\left(2 \cdot x\right)} \]
    6. Step-by-step derivation
      1. *-commutative100.0%

        \[\leadsto \log \color{blue}{\left(x \cdot 2\right)} \]
    7. Simplified100.0%

      \[\leadsto \log \color{blue}{\left(x \cdot 2\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification73.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 1.25:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;\log \left(x \cdot 2\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 8: 52.6% accurate, 207.0× speedup?

\[\begin{array}{l} \\ x \end{array} \]
(FPCore (x) :precision binary64 x)
double code(double x) {
	return x;
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = x
end function
public static double code(double x) {
	return x;
}
def code(x):
	return x
function code(x)
	return x
end
function tmp = code(x)
	tmp = x;
end
code[x_] := x
\begin{array}{l}

\\
x
\end{array}
Derivation
  1. Initial program 17.4%

    \[\log \left(x + \sqrt{x \cdot x + 1}\right) \]
  2. Step-by-step derivation
    1. sqr-neg17.4%

      \[\leadsto \log \left(x + \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)} + 1}\right) \]
    2. +-commutative17.4%

      \[\leadsto \log \left(x + \sqrt{\color{blue}{1 + \left(-x\right) \cdot \left(-x\right)}}\right) \]
    3. sqr-neg17.4%

      \[\leadsto \log \left(x + \sqrt{1 + \color{blue}{x \cdot x}}\right) \]
    4. hypot-1-def28.0%

      \[\leadsto \log \left(x + \color{blue}{\mathsf{hypot}\left(1, x\right)}\right) \]
  3. Simplified28.0%

    \[\leadsto \color{blue}{\log \left(x + \mathsf{hypot}\left(1, x\right)\right)} \]
  4. Add Preprocessing
  5. Taylor expanded in x around 0 52.4%

    \[\leadsto \color{blue}{x} \]
  6. Final simplification52.4%

    \[\leadsto x \]
  7. Add Preprocessing

Developer target: 30.0% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \sqrt{x \cdot x + 1}\\ \mathbf{if}\;x < 0:\\ \;\;\;\;\log \left(\frac{-1}{x - t\_0}\right)\\ \mathbf{else}:\\ \;\;\;\;\log \left(x + t\_0\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (let* ((t_0 (sqrt (+ (* x x) 1.0))))
   (if (< x 0.0) (log (/ -1.0 (- x t_0))) (log (+ x t_0)))))
double code(double x) {
	double t_0 = sqrt(((x * x) + 1.0));
	double tmp;
	if (x < 0.0) {
		tmp = log((-1.0 / (x - t_0)));
	} else {
		tmp = log((x + t_0));
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: t_0
    real(8) :: tmp
    t_0 = sqrt(((x * x) + 1.0d0))
    if (x < 0.0d0) then
        tmp = log(((-1.0d0) / (x - t_0)))
    else
        tmp = log((x + t_0))
    end if
    code = tmp
end function
public static double code(double x) {
	double t_0 = Math.sqrt(((x * x) + 1.0));
	double tmp;
	if (x < 0.0) {
		tmp = Math.log((-1.0 / (x - t_0)));
	} else {
		tmp = Math.log((x + t_0));
	}
	return tmp;
}
def code(x):
	t_0 = math.sqrt(((x * x) + 1.0))
	tmp = 0
	if x < 0.0:
		tmp = math.log((-1.0 / (x - t_0)))
	else:
		tmp = math.log((x + t_0))
	return tmp
function code(x)
	t_0 = sqrt(Float64(Float64(x * x) + 1.0))
	tmp = 0.0
	if (x < 0.0)
		tmp = log(Float64(-1.0 / Float64(x - t_0)));
	else
		tmp = log(Float64(x + t_0));
	end
	return tmp
end
function tmp_2 = code(x)
	t_0 = sqrt(((x * x) + 1.0));
	tmp = 0.0;
	if (x < 0.0)
		tmp = log((-1.0 / (x - t_0)));
	else
		tmp = log((x + t_0));
	end
	tmp_2 = tmp;
end
code[x_] := Block[{t$95$0 = N[Sqrt[N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]}, If[Less[x, 0.0], N[Log[N[(-1.0 / N[(x - t$95$0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Log[N[(x + t$95$0), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \sqrt{x \cdot x + 1}\\
\mathbf{if}\;x < 0:\\
\;\;\;\;\log \left(\frac{-1}{x - t\_0}\right)\\

\mathbf{else}:\\
\;\;\;\;\log \left(x + t\_0\right)\\


\end{array}
\end{array}

Reproduce

?
herbie shell --seed 2024085 
(FPCore (x)
  :name "Hyperbolic arcsine"
  :precision binary64

  :alt
  (if (< x 0.0) (log (/ -1.0 (- x (sqrt (+ (* x x) 1.0))))) (log (+ x (sqrt (+ (* x x) 1.0)))))

  (log (+ x (sqrt (+ (* x x) 1.0)))))