exp2 (problem 3.3.7)

Percentage Accurate: 77.1% → 100.0%
Time: 13.3s
Alternatives: 10
Speedup: 2.0×

Specification

?
\[\begin{array}{l} \\ \left(e^{x} - 2\right) + e^{-x} \end{array} \]
(FPCore (x) :precision binary64 (+ (- (exp x) 2.0) (exp (- x))))
double code(double x) {
	return (exp(x) - 2.0) + exp(-x);
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = (exp(x) - 2.0d0) + exp(-x)
end function
public static double code(double x) {
	return (Math.exp(x) - 2.0) + Math.exp(-x);
}
def code(x):
	return (math.exp(x) - 2.0) + math.exp(-x)
function code(x)
	return Float64(Float64(exp(x) - 2.0) + exp(Float64(-x)))
end
function tmp = code(x)
	tmp = (exp(x) - 2.0) + exp(-x);
end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(e^{x} - 2\right) + e^{-x}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 10 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 77.1% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(e^{x} - 2\right) + e^{-x} \end{array} \]
(FPCore (x) :precision binary64 (+ (- (exp x) 2.0) (exp (- x))))
double code(double x) {
	return (exp(x) - 2.0) + exp(-x);
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = (exp(x) - 2.0d0) + exp(-x)
end function
public static double code(double x) {
	return (Math.exp(x) - 2.0) + Math.exp(-x);
}
def code(x):
	return (math.exp(x) - 2.0) + math.exp(-x)
function code(x)
	return Float64(Float64(exp(x) - 2.0) + exp(Float64(-x)))
end
function tmp = code(x)
	tmp = (exp(x) - 2.0) + exp(-x);
end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(e^{x} - 2\right) + e^{-x}
\end{array}

Alternative 1: 100.0% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\left(e^{x} - 2\right) + e^{-x} \leq 0.001:\\ \;\;\;\;\mathsf{fma}\left(0.002777777777777778, {x}^{6}, \mathsf{fma}\left(x, x, \mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{expm1}\left(\mathsf{log1p}\left(\mathsf{fma}\left(2, \cosh x, -2\right)\right)\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= (+ (- (exp x) 2.0) (exp (- x))) 0.001)
   (fma
    0.002777777777777778
    (pow x 6.0)
    (fma
     x
     x
     (fma
      0.08333333333333333
      (pow x 4.0)
      (* 4.96031746031746e-5 (pow x 8.0)))))
   (expm1 (log1p (fma 2.0 (cosh x) -2.0)))))
double code(double x) {
	double tmp;
	if (((exp(x) - 2.0) + exp(-x)) <= 0.001) {
		tmp = fma(0.002777777777777778, pow(x, 6.0), fma(x, x, fma(0.08333333333333333, pow(x, 4.0), (4.96031746031746e-5 * pow(x, 8.0)))));
	} else {
		tmp = expm1(log1p(fma(2.0, cosh(x), -2.0)));
	}
	return tmp;
}
function code(x)
	tmp = 0.0
	if (Float64(Float64(exp(x) - 2.0) + exp(Float64(-x))) <= 0.001)
		tmp = fma(0.002777777777777778, (x ^ 6.0), fma(x, x, fma(0.08333333333333333, (x ^ 4.0), Float64(4.96031746031746e-5 * (x ^ 8.0)))));
	else
		tmp = expm1(log1p(fma(2.0, cosh(x), -2.0)));
	end
	return tmp
end
code[x_] := If[LessEqual[N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision], 0.001], N[(0.002777777777777778 * N[Power[x, 6.0], $MachinePrecision] + N[(x * x + N[(0.08333333333333333 * N[Power[x, 4.0], $MachinePrecision] + N[(4.96031746031746e-5 * N[Power[x, 8.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(Exp[N[Log[1 + N[(2.0 * N[Cosh[x], $MachinePrecision] + -2.0), $MachinePrecision]], $MachinePrecision]] - 1), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;\left(e^{x} - 2\right) + e^{-x} \leq 0.001:\\
\;\;\;\;\mathsf{fma}\left(0.002777777777777778, {x}^{6}, \mathsf{fma}\left(x, x, \mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right)\right)\\

\mathbf{else}:\\
\;\;\;\;\mathsf{expm1}\left(\mathsf{log1p}\left(\mathsf{fma}\left(2, \cosh x, -2\right)\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (+.f64 (-.f64 (exp.f64 x) 2) (exp.f64 (neg.f64 x))) < 1e-3

    1. Initial program 54.1%

      \[\left(e^{x} - 2\right) + e^{-x} \]
    2. Step-by-step derivation
      1. associate-+l-54.1%

        \[\leadsto \color{blue}{e^{x} - \left(2 - e^{-x}\right)} \]
      2. sub-neg54.1%

        \[\leadsto \color{blue}{e^{x} + \left(-\left(2 - e^{-x}\right)\right)} \]
      3. sub-neg54.1%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(2 + \left(-e^{-x}\right)\right)}\right) \]
      4. +-commutative54.1%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(\left(-e^{-x}\right) + 2\right)}\right) \]
      5. distribute-neg-in54.1%

        \[\leadsto e^{x} + \color{blue}{\left(\left(-\left(-e^{-x}\right)\right) + \left(-2\right)\right)} \]
      6. remove-double-neg54.1%

        \[\leadsto e^{x} + \left(\color{blue}{e^{-x}} + \left(-2\right)\right) \]
      7. metadata-eval54.1%

        \[\leadsto e^{x} + \left(e^{-x} + \color{blue}{-2}\right) \]
    3. Simplified54.1%

      \[\leadsto \color{blue}{e^{x} + \left(e^{-x} + -2\right)} \]
    4. Taylor expanded in x around 0 100.0%

      \[\leadsto \color{blue}{0.002777777777777778 \cdot {x}^{6} + \left({x}^{2} + \left(0.08333333333333333 \cdot {x}^{4} + 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right)} \]
    5. Step-by-step derivation
      1. fma-def100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(0.002777777777777778, {x}^{6}, {x}^{2} + \left(0.08333333333333333 \cdot {x}^{4} + 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right)} \]
      2. unpow2100.0%

        \[\leadsto \mathsf{fma}\left(0.002777777777777778, {x}^{6}, \color{blue}{x \cdot x} + \left(0.08333333333333333 \cdot {x}^{4} + 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right) \]
      3. fma-def100.0%

        \[\leadsto \mathsf{fma}\left(0.002777777777777778, {x}^{6}, \color{blue}{\mathsf{fma}\left(x, x, 0.08333333333333333 \cdot {x}^{4} + 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)}\right) \]
      4. fma-def100.0%

        \[\leadsto \mathsf{fma}\left(0.002777777777777778, {x}^{6}, \mathsf{fma}\left(x, x, \color{blue}{\mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)}\right)\right) \]
    6. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(0.002777777777777778, {x}^{6}, \mathsf{fma}\left(x, x, \mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right)\right)} \]

    if 1e-3 < (+.f64 (-.f64 (exp.f64 x) 2) (exp.f64 (neg.f64 x)))

    1. Initial program 99.9%

      \[\left(e^{x} - 2\right) + e^{-x} \]
    2. Step-by-step derivation
      1. associate-+l-100.0%

        \[\leadsto \color{blue}{e^{x} - \left(2 - e^{-x}\right)} \]
      2. sub-neg100.0%

        \[\leadsto \color{blue}{e^{x} + \left(-\left(2 - e^{-x}\right)\right)} \]
      3. sub-neg100.0%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(2 + \left(-e^{-x}\right)\right)}\right) \]
      4. +-commutative100.0%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(\left(-e^{-x}\right) + 2\right)}\right) \]
      5. distribute-neg-in100.0%

        \[\leadsto e^{x} + \color{blue}{\left(\left(-\left(-e^{-x}\right)\right) + \left(-2\right)\right)} \]
      6. remove-double-neg100.0%

        \[\leadsto e^{x} + \left(\color{blue}{e^{-x}} + \left(-2\right)\right) \]
      7. metadata-eval100.0%

        \[\leadsto e^{x} + \left(e^{-x} + \color{blue}{-2}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{e^{x} + \left(e^{-x} + -2\right)} \]
    4. Step-by-step derivation
      1. +-commutative100.0%

        \[\leadsto e^{x} + \color{blue}{\left(-2 + e^{-x}\right)} \]
      2. associate-+r+99.9%

        \[\leadsto \color{blue}{\left(e^{x} + -2\right) + e^{-x}} \]
      3. metadata-eval99.9%

        \[\leadsto \left(e^{x} + \color{blue}{\left(-2\right)}\right) + e^{-x} \]
      4. sub-neg99.9%

        \[\leadsto \color{blue}{\left(e^{x} - 2\right)} + e^{-x} \]
      5. expm1-log1p-u99.9%

        \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(\left(e^{x} - 2\right) + e^{-x}\right)\right)} \]
      6. +-commutative99.9%

        \[\leadsto \mathsf{expm1}\left(\mathsf{log1p}\left(\color{blue}{e^{-x} + \left(e^{x} - 2\right)}\right)\right) \]
      7. associate-+r-100.0%

        \[\leadsto \mathsf{expm1}\left(\mathsf{log1p}\left(\color{blue}{\left(e^{-x} + e^{x}\right) - 2}\right)\right) \]
      8. +-commutative100.0%

        \[\leadsto \mathsf{expm1}\left(\mathsf{log1p}\left(\color{blue}{\left(e^{x} + e^{-x}\right)} - 2\right)\right) \]
      9. cosh-undef100.0%

        \[\leadsto \mathsf{expm1}\left(\mathsf{log1p}\left(\color{blue}{2 \cdot \cosh x} - 2\right)\right) \]
      10. fma-neg100.0%

        \[\leadsto \mathsf{expm1}\left(\mathsf{log1p}\left(\color{blue}{\mathsf{fma}\left(2, \cosh x, -2\right)}\right)\right) \]
      11. metadata-eval100.0%

        \[\leadsto \mathsf{expm1}\left(\mathsf{log1p}\left(\mathsf{fma}\left(2, \cosh x, \color{blue}{-2}\right)\right)\right) \]
    5. Applied egg-rr100.0%

      \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(\mathsf{fma}\left(2, \cosh x, -2\right)\right)\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\left(e^{x} - 2\right) + e^{-x} \leq 0.001:\\ \;\;\;\;\mathsf{fma}\left(0.002777777777777778, {x}^{6}, \mathsf{fma}\left(x, x, \mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{expm1}\left(\mathsf{log1p}\left(\mathsf{fma}\left(2, \cosh x, -2\right)\right)\right)\\ \end{array} \]

Alternative 2: 100.0% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\left(e^{x} - 2\right) + e^{-x} \leq 0.001:\\ \;\;\;\;\left(0.002777777777777778 \cdot {x}^{6} + x \cdot x\right) + \left(4.96031746031746 \cdot 10^{-5} \cdot {x}^{8} + 0.08333333333333333 \cdot {x}^{4}\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{expm1}\left(\mathsf{log1p}\left(\mathsf{fma}\left(2, \cosh x, -2\right)\right)\right)\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= (+ (- (exp x) 2.0) (exp (- x))) 0.001)
   (+
    (+ (* 0.002777777777777778 (pow x 6.0)) (* x x))
    (+
     (* 4.96031746031746e-5 (pow x 8.0))
     (* 0.08333333333333333 (pow x 4.0))))
   (expm1 (log1p (fma 2.0 (cosh x) -2.0)))))
double code(double x) {
	double tmp;
	if (((exp(x) - 2.0) + exp(-x)) <= 0.001) {
		tmp = ((0.002777777777777778 * pow(x, 6.0)) + (x * x)) + ((4.96031746031746e-5 * pow(x, 8.0)) + (0.08333333333333333 * pow(x, 4.0)));
	} else {
		tmp = expm1(log1p(fma(2.0, cosh(x), -2.0)));
	}
	return tmp;
}
function code(x)
	tmp = 0.0
	if (Float64(Float64(exp(x) - 2.0) + exp(Float64(-x))) <= 0.001)
		tmp = Float64(Float64(Float64(0.002777777777777778 * (x ^ 6.0)) + Float64(x * x)) + Float64(Float64(4.96031746031746e-5 * (x ^ 8.0)) + Float64(0.08333333333333333 * (x ^ 4.0))));
	else
		tmp = expm1(log1p(fma(2.0, cosh(x), -2.0)));
	end
	return tmp
end
code[x_] := If[LessEqual[N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision], 0.001], N[(N[(N[(0.002777777777777778 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision] + N[(x * x), $MachinePrecision]), $MachinePrecision] + N[(N[(4.96031746031746e-5 * N[Power[x, 8.0], $MachinePrecision]), $MachinePrecision] + N[(0.08333333333333333 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(Exp[N[Log[1 + N[(2.0 * N[Cosh[x], $MachinePrecision] + -2.0), $MachinePrecision]], $MachinePrecision]] - 1), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;\left(e^{x} - 2\right) + e^{-x} \leq 0.001:\\
\;\;\;\;\left(0.002777777777777778 \cdot {x}^{6} + x \cdot x\right) + \left(4.96031746031746 \cdot 10^{-5} \cdot {x}^{8} + 0.08333333333333333 \cdot {x}^{4}\right)\\

\mathbf{else}:\\
\;\;\;\;\mathsf{expm1}\left(\mathsf{log1p}\left(\mathsf{fma}\left(2, \cosh x, -2\right)\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (+.f64 (-.f64 (exp.f64 x) 2) (exp.f64 (neg.f64 x))) < 1e-3

    1. Initial program 54.1%

      \[\left(e^{x} - 2\right) + e^{-x} \]
    2. Step-by-step derivation
      1. associate-+l-54.1%

        \[\leadsto \color{blue}{e^{x} - \left(2 - e^{-x}\right)} \]
      2. sub-neg54.1%

        \[\leadsto \color{blue}{e^{x} + \left(-\left(2 - e^{-x}\right)\right)} \]
      3. sub-neg54.1%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(2 + \left(-e^{-x}\right)\right)}\right) \]
      4. +-commutative54.1%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(\left(-e^{-x}\right) + 2\right)}\right) \]
      5. distribute-neg-in54.1%

        \[\leadsto e^{x} + \color{blue}{\left(\left(-\left(-e^{-x}\right)\right) + \left(-2\right)\right)} \]
      6. remove-double-neg54.1%

        \[\leadsto e^{x} + \left(\color{blue}{e^{-x}} + \left(-2\right)\right) \]
      7. metadata-eval54.1%

        \[\leadsto e^{x} + \left(e^{-x} + \color{blue}{-2}\right) \]
    3. Simplified54.1%

      \[\leadsto \color{blue}{e^{x} + \left(e^{-x} + -2\right)} \]
    4. Taylor expanded in x around 0 100.0%

      \[\leadsto \color{blue}{0.002777777777777778 \cdot {x}^{6} + \left({x}^{2} + \left(0.08333333333333333 \cdot {x}^{4} + 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right)} \]
    5. Step-by-step derivation
      1. fma-def100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(0.002777777777777778, {x}^{6}, {x}^{2} + \left(0.08333333333333333 \cdot {x}^{4} + 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right)} \]
      2. unpow2100.0%

        \[\leadsto \mathsf{fma}\left(0.002777777777777778, {x}^{6}, \color{blue}{x \cdot x} + \left(0.08333333333333333 \cdot {x}^{4} + 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right) \]
      3. fma-def100.0%

        \[\leadsto \mathsf{fma}\left(0.002777777777777778, {x}^{6}, \color{blue}{\mathsf{fma}\left(x, x, 0.08333333333333333 \cdot {x}^{4} + 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)}\right) \]
      4. fma-def100.0%

        \[\leadsto \mathsf{fma}\left(0.002777777777777778, {x}^{6}, \mathsf{fma}\left(x, x, \color{blue}{\mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)}\right)\right) \]
    6. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(0.002777777777777778, {x}^{6}, \mathsf{fma}\left(x, x, \mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right)\right)} \]
    7. Step-by-step derivation
      1. fma-udef100.0%

        \[\leadsto \color{blue}{0.002777777777777778 \cdot {x}^{6} + \mathsf{fma}\left(x, x, \mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right)} \]
      2. fma-udef100.0%

        \[\leadsto 0.002777777777777778 \cdot {x}^{6} + \color{blue}{\left(x \cdot x + \mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right)} \]
      3. associate-+r+100.0%

        \[\leadsto \color{blue}{\left(0.002777777777777778 \cdot {x}^{6} + x \cdot x\right) + \mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)} \]
    8. Applied egg-rr100.0%

      \[\leadsto \color{blue}{\left(0.002777777777777778 \cdot {x}^{6} + x \cdot x\right) + \mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)} \]
    9. Taylor expanded in x around 0 100.0%

      \[\leadsto \left(0.002777777777777778 \cdot {x}^{6} + x \cdot x\right) + \color{blue}{\left(0.08333333333333333 \cdot {x}^{4} + 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)} \]

    if 1e-3 < (+.f64 (-.f64 (exp.f64 x) 2) (exp.f64 (neg.f64 x)))

    1. Initial program 99.9%

      \[\left(e^{x} - 2\right) + e^{-x} \]
    2. Step-by-step derivation
      1. associate-+l-100.0%

        \[\leadsto \color{blue}{e^{x} - \left(2 - e^{-x}\right)} \]
      2. sub-neg100.0%

        \[\leadsto \color{blue}{e^{x} + \left(-\left(2 - e^{-x}\right)\right)} \]
      3. sub-neg100.0%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(2 + \left(-e^{-x}\right)\right)}\right) \]
      4. +-commutative100.0%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(\left(-e^{-x}\right) + 2\right)}\right) \]
      5. distribute-neg-in100.0%

        \[\leadsto e^{x} + \color{blue}{\left(\left(-\left(-e^{-x}\right)\right) + \left(-2\right)\right)} \]
      6. remove-double-neg100.0%

        \[\leadsto e^{x} + \left(\color{blue}{e^{-x}} + \left(-2\right)\right) \]
      7. metadata-eval100.0%

        \[\leadsto e^{x} + \left(e^{-x} + \color{blue}{-2}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{e^{x} + \left(e^{-x} + -2\right)} \]
    4. Step-by-step derivation
      1. +-commutative100.0%

        \[\leadsto e^{x} + \color{blue}{\left(-2 + e^{-x}\right)} \]
      2. associate-+r+99.9%

        \[\leadsto \color{blue}{\left(e^{x} + -2\right) + e^{-x}} \]
      3. metadata-eval99.9%

        \[\leadsto \left(e^{x} + \color{blue}{\left(-2\right)}\right) + e^{-x} \]
      4. sub-neg99.9%

        \[\leadsto \color{blue}{\left(e^{x} - 2\right)} + e^{-x} \]
      5. expm1-log1p-u99.9%

        \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(\left(e^{x} - 2\right) + e^{-x}\right)\right)} \]
      6. +-commutative99.9%

        \[\leadsto \mathsf{expm1}\left(\mathsf{log1p}\left(\color{blue}{e^{-x} + \left(e^{x} - 2\right)}\right)\right) \]
      7. associate-+r-100.0%

        \[\leadsto \mathsf{expm1}\left(\mathsf{log1p}\left(\color{blue}{\left(e^{-x} + e^{x}\right) - 2}\right)\right) \]
      8. +-commutative100.0%

        \[\leadsto \mathsf{expm1}\left(\mathsf{log1p}\left(\color{blue}{\left(e^{x} + e^{-x}\right)} - 2\right)\right) \]
      9. cosh-undef100.0%

        \[\leadsto \mathsf{expm1}\left(\mathsf{log1p}\left(\color{blue}{2 \cdot \cosh x} - 2\right)\right) \]
      10. fma-neg100.0%

        \[\leadsto \mathsf{expm1}\left(\mathsf{log1p}\left(\color{blue}{\mathsf{fma}\left(2, \cosh x, -2\right)}\right)\right) \]
      11. metadata-eval100.0%

        \[\leadsto \mathsf{expm1}\left(\mathsf{log1p}\left(\mathsf{fma}\left(2, \cosh x, \color{blue}{-2}\right)\right)\right) \]
    5. Applied egg-rr100.0%

      \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(\mathsf{fma}\left(2, \cosh x, -2\right)\right)\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\left(e^{x} - 2\right) + e^{-x} \leq 0.001:\\ \;\;\;\;\left(0.002777777777777778 \cdot {x}^{6} + x \cdot x\right) + \left(4.96031746031746 \cdot 10^{-5} \cdot {x}^{8} + 0.08333333333333333 \cdot {x}^{4}\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{expm1}\left(\mathsf{log1p}\left(\mathsf{fma}\left(2, \cosh x, -2\right)\right)\right)\\ \end{array} \]

Alternative 3: 100.0% accurate, 0.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\left(e^{x} - 2\right) + e^{-x} \leq 0.001:\\ \;\;\;\;\left(0.002777777777777778 \cdot {x}^{6} + x \cdot x\right) + \left(4.96031746031746 \cdot 10^{-5} \cdot {x}^{8} + 0.08333333333333333 \cdot {x}^{4}\right)\\ \mathbf{else}:\\ \;\;\;\;2 \cdot \cosh x - 2\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= (+ (- (exp x) 2.0) (exp (- x))) 0.001)
   (+
    (+ (* 0.002777777777777778 (pow x 6.0)) (* x x))
    (+
     (* 4.96031746031746e-5 (pow x 8.0))
     (* 0.08333333333333333 (pow x 4.0))))
   (- (* 2.0 (cosh x)) 2.0)))
double code(double x) {
	double tmp;
	if (((exp(x) - 2.0) + exp(-x)) <= 0.001) {
		tmp = ((0.002777777777777778 * pow(x, 6.0)) + (x * x)) + ((4.96031746031746e-5 * pow(x, 8.0)) + (0.08333333333333333 * pow(x, 4.0)));
	} else {
		tmp = (2.0 * cosh(x)) - 2.0;
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (((exp(x) - 2.0d0) + exp(-x)) <= 0.001d0) then
        tmp = ((0.002777777777777778d0 * (x ** 6.0d0)) + (x * x)) + ((4.96031746031746d-5 * (x ** 8.0d0)) + (0.08333333333333333d0 * (x ** 4.0d0)))
    else
        tmp = (2.0d0 * cosh(x)) - 2.0d0
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (((Math.exp(x) - 2.0) + Math.exp(-x)) <= 0.001) {
		tmp = ((0.002777777777777778 * Math.pow(x, 6.0)) + (x * x)) + ((4.96031746031746e-5 * Math.pow(x, 8.0)) + (0.08333333333333333 * Math.pow(x, 4.0)));
	} else {
		tmp = (2.0 * Math.cosh(x)) - 2.0;
	}
	return tmp;
}
def code(x):
	tmp = 0
	if ((math.exp(x) - 2.0) + math.exp(-x)) <= 0.001:
		tmp = ((0.002777777777777778 * math.pow(x, 6.0)) + (x * x)) + ((4.96031746031746e-5 * math.pow(x, 8.0)) + (0.08333333333333333 * math.pow(x, 4.0)))
	else:
		tmp = (2.0 * math.cosh(x)) - 2.0
	return tmp
function code(x)
	tmp = 0.0
	if (Float64(Float64(exp(x) - 2.0) + exp(Float64(-x))) <= 0.001)
		tmp = Float64(Float64(Float64(0.002777777777777778 * (x ^ 6.0)) + Float64(x * x)) + Float64(Float64(4.96031746031746e-5 * (x ^ 8.0)) + Float64(0.08333333333333333 * (x ^ 4.0))));
	else
		tmp = Float64(Float64(2.0 * cosh(x)) - 2.0);
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (((exp(x) - 2.0) + exp(-x)) <= 0.001)
		tmp = ((0.002777777777777778 * (x ^ 6.0)) + (x * x)) + ((4.96031746031746e-5 * (x ^ 8.0)) + (0.08333333333333333 * (x ^ 4.0)));
	else
		tmp = (2.0 * cosh(x)) - 2.0;
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision], 0.001], N[(N[(N[(0.002777777777777778 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision] + N[(x * x), $MachinePrecision]), $MachinePrecision] + N[(N[(4.96031746031746e-5 * N[Power[x, 8.0], $MachinePrecision]), $MachinePrecision] + N[(0.08333333333333333 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(2.0 * N[Cosh[x], $MachinePrecision]), $MachinePrecision] - 2.0), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;\left(e^{x} - 2\right) + e^{-x} \leq 0.001:\\
\;\;\;\;\left(0.002777777777777778 \cdot {x}^{6} + x \cdot x\right) + \left(4.96031746031746 \cdot 10^{-5} \cdot {x}^{8} + 0.08333333333333333 \cdot {x}^{4}\right)\\

\mathbf{else}:\\
\;\;\;\;2 \cdot \cosh x - 2\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (+.f64 (-.f64 (exp.f64 x) 2) (exp.f64 (neg.f64 x))) < 1e-3

    1. Initial program 54.1%

      \[\left(e^{x} - 2\right) + e^{-x} \]
    2. Step-by-step derivation
      1. associate-+l-54.1%

        \[\leadsto \color{blue}{e^{x} - \left(2 - e^{-x}\right)} \]
      2. sub-neg54.1%

        \[\leadsto \color{blue}{e^{x} + \left(-\left(2 - e^{-x}\right)\right)} \]
      3. sub-neg54.1%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(2 + \left(-e^{-x}\right)\right)}\right) \]
      4. +-commutative54.1%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(\left(-e^{-x}\right) + 2\right)}\right) \]
      5. distribute-neg-in54.1%

        \[\leadsto e^{x} + \color{blue}{\left(\left(-\left(-e^{-x}\right)\right) + \left(-2\right)\right)} \]
      6. remove-double-neg54.1%

        \[\leadsto e^{x} + \left(\color{blue}{e^{-x}} + \left(-2\right)\right) \]
      7. metadata-eval54.1%

        \[\leadsto e^{x} + \left(e^{-x} + \color{blue}{-2}\right) \]
    3. Simplified54.1%

      \[\leadsto \color{blue}{e^{x} + \left(e^{-x} + -2\right)} \]
    4. Taylor expanded in x around 0 100.0%

      \[\leadsto \color{blue}{0.002777777777777778 \cdot {x}^{6} + \left({x}^{2} + \left(0.08333333333333333 \cdot {x}^{4} + 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right)} \]
    5. Step-by-step derivation
      1. fma-def100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(0.002777777777777778, {x}^{6}, {x}^{2} + \left(0.08333333333333333 \cdot {x}^{4} + 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right)} \]
      2. unpow2100.0%

        \[\leadsto \mathsf{fma}\left(0.002777777777777778, {x}^{6}, \color{blue}{x \cdot x} + \left(0.08333333333333333 \cdot {x}^{4} + 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right) \]
      3. fma-def100.0%

        \[\leadsto \mathsf{fma}\left(0.002777777777777778, {x}^{6}, \color{blue}{\mathsf{fma}\left(x, x, 0.08333333333333333 \cdot {x}^{4} + 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)}\right) \]
      4. fma-def100.0%

        \[\leadsto \mathsf{fma}\left(0.002777777777777778, {x}^{6}, \mathsf{fma}\left(x, x, \color{blue}{\mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)}\right)\right) \]
    6. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(0.002777777777777778, {x}^{6}, \mathsf{fma}\left(x, x, \mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right)\right)} \]
    7. Step-by-step derivation
      1. fma-udef100.0%

        \[\leadsto \color{blue}{0.002777777777777778 \cdot {x}^{6} + \mathsf{fma}\left(x, x, \mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right)} \]
      2. fma-udef100.0%

        \[\leadsto 0.002777777777777778 \cdot {x}^{6} + \color{blue}{\left(x \cdot x + \mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right)} \]
      3. associate-+r+100.0%

        \[\leadsto \color{blue}{\left(0.002777777777777778 \cdot {x}^{6} + x \cdot x\right) + \mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)} \]
    8. Applied egg-rr100.0%

      \[\leadsto \color{blue}{\left(0.002777777777777778 \cdot {x}^{6} + x \cdot x\right) + \mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)} \]
    9. Taylor expanded in x around 0 100.0%

      \[\leadsto \left(0.002777777777777778 \cdot {x}^{6} + x \cdot x\right) + \color{blue}{\left(0.08333333333333333 \cdot {x}^{4} + 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)} \]

    if 1e-3 < (+.f64 (-.f64 (exp.f64 x) 2) (exp.f64 (neg.f64 x)))

    1. Initial program 99.9%

      \[\left(e^{x} - 2\right) + e^{-x} \]
    2. Step-by-step derivation
      1. associate-+l-100.0%

        \[\leadsto \color{blue}{e^{x} - \left(2 - e^{-x}\right)} \]
      2. sub-neg100.0%

        \[\leadsto \color{blue}{e^{x} + \left(-\left(2 - e^{-x}\right)\right)} \]
      3. sub-neg100.0%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(2 + \left(-e^{-x}\right)\right)}\right) \]
      4. +-commutative100.0%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(\left(-e^{-x}\right) + 2\right)}\right) \]
      5. distribute-neg-in100.0%

        \[\leadsto e^{x} + \color{blue}{\left(\left(-\left(-e^{-x}\right)\right) + \left(-2\right)\right)} \]
      6. remove-double-neg100.0%

        \[\leadsto e^{x} + \left(\color{blue}{e^{-x}} + \left(-2\right)\right) \]
      7. metadata-eval100.0%

        \[\leadsto e^{x} + \left(e^{-x} + \color{blue}{-2}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{e^{x} + \left(e^{-x} + -2\right)} \]
    4. Step-by-step derivation
      1. associate-+r+100.0%

        \[\leadsto \color{blue}{\left(e^{x} + e^{-x}\right) + -2} \]
      2. cosh-undef100.0%

        \[\leadsto \color{blue}{2 \cdot \cosh x} + -2 \]
      3. fma-def100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(2, \cosh x, -2\right)} \]
      4. metadata-eval100.0%

        \[\leadsto \mathsf{fma}\left(2, \cosh x, \color{blue}{-2}\right) \]
      5. fma-neg100.0%

        \[\leadsto \color{blue}{2 \cdot \cosh x - 2} \]
    5. Applied egg-rr100.0%

      \[\leadsto \color{blue}{2 \cdot \cosh x - 2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\left(e^{x} - 2\right) + e^{-x} \leq 0.001:\\ \;\;\;\;\left(0.002777777777777778 \cdot {x}^{6} + x \cdot x\right) + \left(4.96031746031746 \cdot 10^{-5} \cdot {x}^{8} + 0.08333333333333333 \cdot {x}^{4}\right)\\ \mathbf{else}:\\ \;\;\;\;2 \cdot \cosh x - 2\\ \end{array} \]

Alternative 4: 100.0% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\left(e^{x} - 2\right) + e^{-x} \leq 0.001:\\ \;\;\;\;\left(0.002777777777777778 \cdot {x}^{6} + x \cdot x\right) + 0.08333333333333333 \cdot {x}^{4}\\ \mathbf{else}:\\ \;\;\;\;2 \cdot \cosh x - 2\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= (+ (- (exp x) 2.0) (exp (- x))) 0.001)
   (+
    (+ (* 0.002777777777777778 (pow x 6.0)) (* x x))
    (* 0.08333333333333333 (pow x 4.0)))
   (- (* 2.0 (cosh x)) 2.0)))
double code(double x) {
	double tmp;
	if (((exp(x) - 2.0) + exp(-x)) <= 0.001) {
		tmp = ((0.002777777777777778 * pow(x, 6.0)) + (x * x)) + (0.08333333333333333 * pow(x, 4.0));
	} else {
		tmp = (2.0 * cosh(x)) - 2.0;
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (((exp(x) - 2.0d0) + exp(-x)) <= 0.001d0) then
        tmp = ((0.002777777777777778d0 * (x ** 6.0d0)) + (x * x)) + (0.08333333333333333d0 * (x ** 4.0d0))
    else
        tmp = (2.0d0 * cosh(x)) - 2.0d0
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (((Math.exp(x) - 2.0) + Math.exp(-x)) <= 0.001) {
		tmp = ((0.002777777777777778 * Math.pow(x, 6.0)) + (x * x)) + (0.08333333333333333 * Math.pow(x, 4.0));
	} else {
		tmp = (2.0 * Math.cosh(x)) - 2.0;
	}
	return tmp;
}
def code(x):
	tmp = 0
	if ((math.exp(x) - 2.0) + math.exp(-x)) <= 0.001:
		tmp = ((0.002777777777777778 * math.pow(x, 6.0)) + (x * x)) + (0.08333333333333333 * math.pow(x, 4.0))
	else:
		tmp = (2.0 * math.cosh(x)) - 2.0
	return tmp
function code(x)
	tmp = 0.0
	if (Float64(Float64(exp(x) - 2.0) + exp(Float64(-x))) <= 0.001)
		tmp = Float64(Float64(Float64(0.002777777777777778 * (x ^ 6.0)) + Float64(x * x)) + Float64(0.08333333333333333 * (x ^ 4.0)));
	else
		tmp = Float64(Float64(2.0 * cosh(x)) - 2.0);
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (((exp(x) - 2.0) + exp(-x)) <= 0.001)
		tmp = ((0.002777777777777778 * (x ^ 6.0)) + (x * x)) + (0.08333333333333333 * (x ^ 4.0));
	else
		tmp = (2.0 * cosh(x)) - 2.0;
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision], 0.001], N[(N[(N[(0.002777777777777778 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision] + N[(x * x), $MachinePrecision]), $MachinePrecision] + N[(0.08333333333333333 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(2.0 * N[Cosh[x], $MachinePrecision]), $MachinePrecision] - 2.0), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;\left(e^{x} - 2\right) + e^{-x} \leq 0.001:\\
\;\;\;\;\left(0.002777777777777778 \cdot {x}^{6} + x \cdot x\right) + 0.08333333333333333 \cdot {x}^{4}\\

\mathbf{else}:\\
\;\;\;\;2 \cdot \cosh x - 2\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (+.f64 (-.f64 (exp.f64 x) 2) (exp.f64 (neg.f64 x))) < 1e-3

    1. Initial program 54.1%

      \[\left(e^{x} - 2\right) + e^{-x} \]
    2. Step-by-step derivation
      1. associate-+l-54.1%

        \[\leadsto \color{blue}{e^{x} - \left(2 - e^{-x}\right)} \]
      2. sub-neg54.1%

        \[\leadsto \color{blue}{e^{x} + \left(-\left(2 - e^{-x}\right)\right)} \]
      3. sub-neg54.1%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(2 + \left(-e^{-x}\right)\right)}\right) \]
      4. +-commutative54.1%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(\left(-e^{-x}\right) + 2\right)}\right) \]
      5. distribute-neg-in54.1%

        \[\leadsto e^{x} + \color{blue}{\left(\left(-\left(-e^{-x}\right)\right) + \left(-2\right)\right)} \]
      6. remove-double-neg54.1%

        \[\leadsto e^{x} + \left(\color{blue}{e^{-x}} + \left(-2\right)\right) \]
      7. metadata-eval54.1%

        \[\leadsto e^{x} + \left(e^{-x} + \color{blue}{-2}\right) \]
    3. Simplified54.1%

      \[\leadsto \color{blue}{e^{x} + \left(e^{-x} + -2\right)} \]
    4. Taylor expanded in x around 0 100.0%

      \[\leadsto \color{blue}{0.002777777777777778 \cdot {x}^{6} + \left({x}^{2} + \left(0.08333333333333333 \cdot {x}^{4} + 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right)} \]
    5. Step-by-step derivation
      1. fma-def100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(0.002777777777777778, {x}^{6}, {x}^{2} + \left(0.08333333333333333 \cdot {x}^{4} + 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right)} \]
      2. unpow2100.0%

        \[\leadsto \mathsf{fma}\left(0.002777777777777778, {x}^{6}, \color{blue}{x \cdot x} + \left(0.08333333333333333 \cdot {x}^{4} + 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right) \]
      3. fma-def100.0%

        \[\leadsto \mathsf{fma}\left(0.002777777777777778, {x}^{6}, \color{blue}{\mathsf{fma}\left(x, x, 0.08333333333333333 \cdot {x}^{4} + 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)}\right) \]
      4. fma-def100.0%

        \[\leadsto \mathsf{fma}\left(0.002777777777777778, {x}^{6}, \mathsf{fma}\left(x, x, \color{blue}{\mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)}\right)\right) \]
    6. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(0.002777777777777778, {x}^{6}, \mathsf{fma}\left(x, x, \mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right)\right)} \]
    7. Step-by-step derivation
      1. fma-udef100.0%

        \[\leadsto \color{blue}{0.002777777777777778 \cdot {x}^{6} + \mathsf{fma}\left(x, x, \mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right)} \]
      2. fma-udef100.0%

        \[\leadsto 0.002777777777777778 \cdot {x}^{6} + \color{blue}{\left(x \cdot x + \mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)\right)} \]
      3. associate-+r+100.0%

        \[\leadsto \color{blue}{\left(0.002777777777777778 \cdot {x}^{6} + x \cdot x\right) + \mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)} \]
    8. Applied egg-rr100.0%

      \[\leadsto \color{blue}{\left(0.002777777777777778 \cdot {x}^{6} + x \cdot x\right) + \mathsf{fma}\left(0.08333333333333333, {x}^{4}, 4.96031746031746 \cdot 10^{-5} \cdot {x}^{8}\right)} \]
    9. Taylor expanded in x around 0 99.9%

      \[\leadsto \left(0.002777777777777778 \cdot {x}^{6} + x \cdot x\right) + \color{blue}{0.08333333333333333 \cdot {x}^{4}} \]

    if 1e-3 < (+.f64 (-.f64 (exp.f64 x) 2) (exp.f64 (neg.f64 x)))

    1. Initial program 99.9%

      \[\left(e^{x} - 2\right) + e^{-x} \]
    2. Step-by-step derivation
      1. associate-+l-100.0%

        \[\leadsto \color{blue}{e^{x} - \left(2 - e^{-x}\right)} \]
      2. sub-neg100.0%

        \[\leadsto \color{blue}{e^{x} + \left(-\left(2 - e^{-x}\right)\right)} \]
      3. sub-neg100.0%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(2 + \left(-e^{-x}\right)\right)}\right) \]
      4. +-commutative100.0%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(\left(-e^{-x}\right) + 2\right)}\right) \]
      5. distribute-neg-in100.0%

        \[\leadsto e^{x} + \color{blue}{\left(\left(-\left(-e^{-x}\right)\right) + \left(-2\right)\right)} \]
      6. remove-double-neg100.0%

        \[\leadsto e^{x} + \left(\color{blue}{e^{-x}} + \left(-2\right)\right) \]
      7. metadata-eval100.0%

        \[\leadsto e^{x} + \left(e^{-x} + \color{blue}{-2}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{e^{x} + \left(e^{-x} + -2\right)} \]
    4. Step-by-step derivation
      1. associate-+r+100.0%

        \[\leadsto \color{blue}{\left(e^{x} + e^{-x}\right) + -2} \]
      2. cosh-undef100.0%

        \[\leadsto \color{blue}{2 \cdot \cosh x} + -2 \]
      3. fma-def100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(2, \cosh x, -2\right)} \]
      4. metadata-eval100.0%

        \[\leadsto \mathsf{fma}\left(2, \cosh x, \color{blue}{-2}\right) \]
      5. fma-neg100.0%

        \[\leadsto \color{blue}{2 \cdot \cosh x - 2} \]
    5. Applied egg-rr100.0%

      \[\leadsto \color{blue}{2 \cdot \cosh x - 2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\left(e^{x} - 2\right) + e^{-x} \leq 0.001:\\ \;\;\;\;\left(0.002777777777777778 \cdot {x}^{6} + x \cdot x\right) + 0.08333333333333333 \cdot {x}^{4}\\ \mathbf{else}:\\ \;\;\;\;2 \cdot \cosh x - 2\\ \end{array} \]

Alternative 5: 99.9% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\left(e^{x} - 2\right) + e^{-x} \leq 10^{-6}:\\ \;\;\;\;\mathsf{fma}\left(x, x, 0.08333333333333333 \cdot {x}^{4}\right)\\ \mathbf{else}:\\ \;\;\;\;2 \cdot \cosh x - 2\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= (+ (- (exp x) 2.0) (exp (- x))) 1e-6)
   (fma x x (* 0.08333333333333333 (pow x 4.0)))
   (- (* 2.0 (cosh x)) 2.0)))
double code(double x) {
	double tmp;
	if (((exp(x) - 2.0) + exp(-x)) <= 1e-6) {
		tmp = fma(x, x, (0.08333333333333333 * pow(x, 4.0)));
	} else {
		tmp = (2.0 * cosh(x)) - 2.0;
	}
	return tmp;
}
function code(x)
	tmp = 0.0
	if (Float64(Float64(exp(x) - 2.0) + exp(Float64(-x))) <= 1e-6)
		tmp = fma(x, x, Float64(0.08333333333333333 * (x ^ 4.0)));
	else
		tmp = Float64(Float64(2.0 * cosh(x)) - 2.0);
	end
	return tmp
end
code[x_] := If[LessEqual[N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision], 1e-6], N[(x * x + N[(0.08333333333333333 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(2.0 * N[Cosh[x], $MachinePrecision]), $MachinePrecision] - 2.0), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;\left(e^{x} - 2\right) + e^{-x} \leq 10^{-6}:\\
\;\;\;\;\mathsf{fma}\left(x, x, 0.08333333333333333 \cdot {x}^{4}\right)\\

\mathbf{else}:\\
\;\;\;\;2 \cdot \cosh x - 2\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (+.f64 (-.f64 (exp.f64 x) 2) (exp.f64 (neg.f64 x))) < 9.99999999999999955e-7

    1. Initial program 53.9%

      \[\left(e^{x} - 2\right) + e^{-x} \]
    2. Step-by-step derivation
      1. associate-+l-53.8%

        \[\leadsto \color{blue}{e^{x} - \left(2 - e^{-x}\right)} \]
      2. sub-neg53.8%

        \[\leadsto \color{blue}{e^{x} + \left(-\left(2 - e^{-x}\right)\right)} \]
      3. sub-neg53.8%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(2 + \left(-e^{-x}\right)\right)}\right) \]
      4. +-commutative53.8%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(\left(-e^{-x}\right) + 2\right)}\right) \]
      5. distribute-neg-in53.8%

        \[\leadsto e^{x} + \color{blue}{\left(\left(-\left(-e^{-x}\right)\right) + \left(-2\right)\right)} \]
      6. remove-double-neg53.8%

        \[\leadsto e^{x} + \left(\color{blue}{e^{-x}} + \left(-2\right)\right) \]
      7. metadata-eval53.8%

        \[\leadsto e^{x} + \left(e^{-x} + \color{blue}{-2}\right) \]
    3. Simplified53.8%

      \[\leadsto \color{blue}{e^{x} + \left(e^{-x} + -2\right)} \]
    4. Taylor expanded in x around 0 99.9%

      \[\leadsto \color{blue}{{x}^{2} + 0.08333333333333333 \cdot {x}^{4}} \]
    5. Step-by-step derivation
      1. unpow299.9%

        \[\leadsto \color{blue}{x \cdot x} + 0.08333333333333333 \cdot {x}^{4} \]
    6. Simplified99.9%

      \[\leadsto \color{blue}{x \cdot x + 0.08333333333333333 \cdot {x}^{4}} \]
    7. Step-by-step derivation
      1. fma-def99.9%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x, x, 0.08333333333333333 \cdot {x}^{4}\right)} \]
    8. Applied egg-rr99.9%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, x, 0.08333333333333333 \cdot {x}^{4}\right)} \]

    if 9.99999999999999955e-7 < (+.f64 (-.f64 (exp.f64 x) 2) (exp.f64 (neg.f64 x)))

    1. Initial program 99.8%

      \[\left(e^{x} - 2\right) + e^{-x} \]
    2. Step-by-step derivation
      1. associate-+l-99.8%

        \[\leadsto \color{blue}{e^{x} - \left(2 - e^{-x}\right)} \]
      2. sub-neg99.8%

        \[\leadsto \color{blue}{e^{x} + \left(-\left(2 - e^{-x}\right)\right)} \]
      3. sub-neg99.8%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(2 + \left(-e^{-x}\right)\right)}\right) \]
      4. +-commutative99.8%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(\left(-e^{-x}\right) + 2\right)}\right) \]
      5. distribute-neg-in99.8%

        \[\leadsto e^{x} + \color{blue}{\left(\left(-\left(-e^{-x}\right)\right) + \left(-2\right)\right)} \]
      6. remove-double-neg99.8%

        \[\leadsto e^{x} + \left(\color{blue}{e^{-x}} + \left(-2\right)\right) \]
      7. metadata-eval99.8%

        \[\leadsto e^{x} + \left(e^{-x} + \color{blue}{-2}\right) \]
    3. Simplified99.8%

      \[\leadsto \color{blue}{e^{x} + \left(e^{-x} + -2\right)} \]
    4. Step-by-step derivation
      1. associate-+r+99.8%

        \[\leadsto \color{blue}{\left(e^{x} + e^{-x}\right) + -2} \]
      2. cosh-undef99.8%

        \[\leadsto \color{blue}{2 \cdot \cosh x} + -2 \]
      3. fma-def99.8%

        \[\leadsto \color{blue}{\mathsf{fma}\left(2, \cosh x, -2\right)} \]
      4. metadata-eval99.8%

        \[\leadsto \mathsf{fma}\left(2, \cosh x, \color{blue}{-2}\right) \]
      5. fma-neg99.8%

        \[\leadsto \color{blue}{2 \cdot \cosh x - 2} \]
    5. Applied egg-rr99.8%

      \[\leadsto \color{blue}{2 \cdot \cosh x - 2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\left(e^{x} - 2\right) + e^{-x} \leq 10^{-6}:\\ \;\;\;\;\mathsf{fma}\left(x, x, 0.08333333333333333 \cdot {x}^{4}\right)\\ \mathbf{else}:\\ \;\;\;\;2 \cdot \cosh x - 2\\ \end{array} \]

Alternative 6: 99.9% accurate, 0.7× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\left(e^{x} - 2\right) + e^{-x} \leq 10^{-6}:\\ \;\;\;\;x \cdot x + 0.08333333333333333 \cdot {x}^{4}\\ \mathbf{else}:\\ \;\;\;\;2 \cdot \cosh x - 2\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= (+ (- (exp x) 2.0) (exp (- x))) 1e-6)
   (+ (* x x) (* 0.08333333333333333 (pow x 4.0)))
   (- (* 2.0 (cosh x)) 2.0)))
double code(double x) {
	double tmp;
	if (((exp(x) - 2.0) + exp(-x)) <= 1e-6) {
		tmp = (x * x) + (0.08333333333333333 * pow(x, 4.0));
	} else {
		tmp = (2.0 * cosh(x)) - 2.0;
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (((exp(x) - 2.0d0) + exp(-x)) <= 1d-6) then
        tmp = (x * x) + (0.08333333333333333d0 * (x ** 4.0d0))
    else
        tmp = (2.0d0 * cosh(x)) - 2.0d0
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (((Math.exp(x) - 2.0) + Math.exp(-x)) <= 1e-6) {
		tmp = (x * x) + (0.08333333333333333 * Math.pow(x, 4.0));
	} else {
		tmp = (2.0 * Math.cosh(x)) - 2.0;
	}
	return tmp;
}
def code(x):
	tmp = 0
	if ((math.exp(x) - 2.0) + math.exp(-x)) <= 1e-6:
		tmp = (x * x) + (0.08333333333333333 * math.pow(x, 4.0))
	else:
		tmp = (2.0 * math.cosh(x)) - 2.0
	return tmp
function code(x)
	tmp = 0.0
	if (Float64(Float64(exp(x) - 2.0) + exp(Float64(-x))) <= 1e-6)
		tmp = Float64(Float64(x * x) + Float64(0.08333333333333333 * (x ^ 4.0)));
	else
		tmp = Float64(Float64(2.0 * cosh(x)) - 2.0);
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (((exp(x) - 2.0) + exp(-x)) <= 1e-6)
		tmp = (x * x) + (0.08333333333333333 * (x ^ 4.0));
	else
		tmp = (2.0 * cosh(x)) - 2.0;
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision], 1e-6], N[(N[(x * x), $MachinePrecision] + N[(0.08333333333333333 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(2.0 * N[Cosh[x], $MachinePrecision]), $MachinePrecision] - 2.0), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;\left(e^{x} - 2\right) + e^{-x} \leq 10^{-6}:\\
\;\;\;\;x \cdot x + 0.08333333333333333 \cdot {x}^{4}\\

\mathbf{else}:\\
\;\;\;\;2 \cdot \cosh x - 2\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (+.f64 (-.f64 (exp.f64 x) 2) (exp.f64 (neg.f64 x))) < 9.99999999999999955e-7

    1. Initial program 53.9%

      \[\left(e^{x} - 2\right) + e^{-x} \]
    2. Step-by-step derivation
      1. associate-+l-53.8%

        \[\leadsto \color{blue}{e^{x} - \left(2 - e^{-x}\right)} \]
      2. sub-neg53.8%

        \[\leadsto \color{blue}{e^{x} + \left(-\left(2 - e^{-x}\right)\right)} \]
      3. sub-neg53.8%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(2 + \left(-e^{-x}\right)\right)}\right) \]
      4. +-commutative53.8%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(\left(-e^{-x}\right) + 2\right)}\right) \]
      5. distribute-neg-in53.8%

        \[\leadsto e^{x} + \color{blue}{\left(\left(-\left(-e^{-x}\right)\right) + \left(-2\right)\right)} \]
      6. remove-double-neg53.8%

        \[\leadsto e^{x} + \left(\color{blue}{e^{-x}} + \left(-2\right)\right) \]
      7. metadata-eval53.8%

        \[\leadsto e^{x} + \left(e^{-x} + \color{blue}{-2}\right) \]
    3. Simplified53.8%

      \[\leadsto \color{blue}{e^{x} + \left(e^{-x} + -2\right)} \]
    4. Taylor expanded in x around 0 99.9%

      \[\leadsto \color{blue}{{x}^{2} + 0.08333333333333333 \cdot {x}^{4}} \]
    5. Step-by-step derivation
      1. unpow299.9%

        \[\leadsto \color{blue}{x \cdot x} + 0.08333333333333333 \cdot {x}^{4} \]
    6. Simplified99.9%

      \[\leadsto \color{blue}{x \cdot x + 0.08333333333333333 \cdot {x}^{4}} \]

    if 9.99999999999999955e-7 < (+.f64 (-.f64 (exp.f64 x) 2) (exp.f64 (neg.f64 x)))

    1. Initial program 99.8%

      \[\left(e^{x} - 2\right) + e^{-x} \]
    2. Step-by-step derivation
      1. associate-+l-99.8%

        \[\leadsto \color{blue}{e^{x} - \left(2 - e^{-x}\right)} \]
      2. sub-neg99.8%

        \[\leadsto \color{blue}{e^{x} + \left(-\left(2 - e^{-x}\right)\right)} \]
      3. sub-neg99.8%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(2 + \left(-e^{-x}\right)\right)}\right) \]
      4. +-commutative99.8%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(\left(-e^{-x}\right) + 2\right)}\right) \]
      5. distribute-neg-in99.8%

        \[\leadsto e^{x} + \color{blue}{\left(\left(-\left(-e^{-x}\right)\right) + \left(-2\right)\right)} \]
      6. remove-double-neg99.8%

        \[\leadsto e^{x} + \left(\color{blue}{e^{-x}} + \left(-2\right)\right) \]
      7. metadata-eval99.8%

        \[\leadsto e^{x} + \left(e^{-x} + \color{blue}{-2}\right) \]
    3. Simplified99.8%

      \[\leadsto \color{blue}{e^{x} + \left(e^{-x} + -2\right)} \]
    4. Step-by-step derivation
      1. associate-+r+99.8%

        \[\leadsto \color{blue}{\left(e^{x} + e^{-x}\right) + -2} \]
      2. cosh-undef99.8%

        \[\leadsto \color{blue}{2 \cdot \cosh x} + -2 \]
      3. fma-def99.8%

        \[\leadsto \color{blue}{\mathsf{fma}\left(2, \cosh x, -2\right)} \]
      4. metadata-eval99.8%

        \[\leadsto \mathsf{fma}\left(2, \cosh x, \color{blue}{-2}\right) \]
      5. fma-neg99.8%

        \[\leadsto \color{blue}{2 \cdot \cosh x - 2} \]
    5. Applied egg-rr99.8%

      \[\leadsto \color{blue}{2 \cdot \cosh x - 2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\left(e^{x} - 2\right) + e^{-x} \leq 10^{-6}:\\ \;\;\;\;x \cdot x + 0.08333333333333333 \cdot {x}^{4}\\ \mathbf{else}:\\ \;\;\;\;2 \cdot \cosh x - 2\\ \end{array} \]

Alternative 7: 88.0% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 0.000195:\\ \;\;\;\;x \cdot x\\ \mathbf{else}:\\ \;\;\;\;2 \cdot \cosh x - 2\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (if (<= x 0.000195) (* x x) (- (* 2.0 (cosh x)) 2.0)))
double code(double x) {
	double tmp;
	if (x <= 0.000195) {
		tmp = x * x;
	} else {
		tmp = (2.0 * cosh(x)) - 2.0;
	}
	return tmp;
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: tmp
    if (x <= 0.000195d0) then
        tmp = x * x
    else
        tmp = (2.0d0 * cosh(x)) - 2.0d0
    end if
    code = tmp
end function
public static double code(double x) {
	double tmp;
	if (x <= 0.000195) {
		tmp = x * x;
	} else {
		tmp = (2.0 * Math.cosh(x)) - 2.0;
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= 0.000195:
		tmp = x * x
	else:
		tmp = (2.0 * math.cosh(x)) - 2.0
	return tmp
function code(x)
	tmp = 0.0
	if (x <= 0.000195)
		tmp = Float64(x * x);
	else
		tmp = Float64(Float64(2.0 * cosh(x)) - 2.0);
	end
	return tmp
end
function tmp_2 = code(x)
	tmp = 0.0;
	if (x <= 0.000195)
		tmp = x * x;
	else
		tmp = (2.0 * cosh(x)) - 2.0;
	end
	tmp_2 = tmp;
end
code[x_] := If[LessEqual[x, 0.000195], N[(x * x), $MachinePrecision], N[(N[(2.0 * N[Cosh[x], $MachinePrecision]), $MachinePrecision] - 2.0), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 0.000195:\\
\;\;\;\;x \cdot x\\

\mathbf{else}:\\
\;\;\;\;2 \cdot \cosh x - 2\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 1.94999999999999996e-4

    1. Initial program 71.1%

      \[\left(e^{x} - 2\right) + e^{-x} \]
    2. Step-by-step derivation
      1. associate-+l-71.1%

        \[\leadsto \color{blue}{e^{x} - \left(2 - e^{-x}\right)} \]
      2. sub-neg71.1%

        \[\leadsto \color{blue}{e^{x} + \left(-\left(2 - e^{-x}\right)\right)} \]
      3. sub-neg71.1%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(2 + \left(-e^{-x}\right)\right)}\right) \]
      4. +-commutative71.1%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(\left(-e^{-x}\right) + 2\right)}\right) \]
      5. distribute-neg-in71.1%

        \[\leadsto e^{x} + \color{blue}{\left(\left(-\left(-e^{-x}\right)\right) + \left(-2\right)\right)} \]
      6. remove-double-neg71.1%

        \[\leadsto e^{x} + \left(\color{blue}{e^{-x}} + \left(-2\right)\right) \]
      7. metadata-eval71.1%

        \[\leadsto e^{x} + \left(e^{-x} + \color{blue}{-2}\right) \]
    3. Simplified71.1%

      \[\leadsto \color{blue}{e^{x} + \left(e^{-x} + -2\right)} \]
    4. Taylor expanded in x around 0 81.0%

      \[\leadsto \color{blue}{{x}^{2}} \]
    5. Step-by-step derivation
      1. unpow281.0%

        \[\leadsto \color{blue}{x \cdot x} \]
    6. Simplified81.0%

      \[\leadsto \color{blue}{x \cdot x} \]

    if 1.94999999999999996e-4 < x

    1. Initial program 98.9%

      \[\left(e^{x} - 2\right) + e^{-x} \]
    2. Step-by-step derivation
      1. associate-+l-98.9%

        \[\leadsto \color{blue}{e^{x} - \left(2 - e^{-x}\right)} \]
      2. sub-neg98.9%

        \[\leadsto \color{blue}{e^{x} + \left(-\left(2 - e^{-x}\right)\right)} \]
      3. sub-neg98.9%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(2 + \left(-e^{-x}\right)\right)}\right) \]
      4. +-commutative98.9%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(\left(-e^{-x}\right) + 2\right)}\right) \]
      5. distribute-neg-in98.9%

        \[\leadsto e^{x} + \color{blue}{\left(\left(-\left(-e^{-x}\right)\right) + \left(-2\right)\right)} \]
      6. remove-double-neg98.9%

        \[\leadsto e^{x} + \left(\color{blue}{e^{-x}} + \left(-2\right)\right) \]
      7. metadata-eval98.9%

        \[\leadsto e^{x} + \left(e^{-x} + \color{blue}{-2}\right) \]
    3. Simplified98.9%

      \[\leadsto \color{blue}{e^{x} + \left(e^{-x} + -2\right)} \]
    4. Step-by-step derivation
      1. associate-+r+98.8%

        \[\leadsto \color{blue}{\left(e^{x} + e^{-x}\right) + -2} \]
      2. cosh-undef98.9%

        \[\leadsto \color{blue}{2 \cdot \cosh x} + -2 \]
      3. fma-def98.9%

        \[\leadsto \color{blue}{\mathsf{fma}\left(2, \cosh x, -2\right)} \]
      4. metadata-eval98.9%

        \[\leadsto \mathsf{fma}\left(2, \cosh x, \color{blue}{-2}\right) \]
      5. fma-neg98.9%

        \[\leadsto \color{blue}{2 \cdot \cosh x - 2} \]
    5. Applied egg-rr98.9%

      \[\leadsto \color{blue}{2 \cdot \cosh x - 2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification85.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 0.000195:\\ \;\;\;\;x \cdot x\\ \mathbf{else}:\\ \;\;\;\;2 \cdot \cosh x - 2\\ \end{array} \]

Alternative 8: 87.8% accurate, 2.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 1.65:\\ \;\;\;\;x \cdot x\\ \mathbf{else}:\\ \;\;\;\;\mathsf{expm1}\left(x\right)\\ \end{array} \end{array} \]
(FPCore (x) :precision binary64 (if (<= x 1.65) (* x x) (expm1 x)))
double code(double x) {
	double tmp;
	if (x <= 1.65) {
		tmp = x * x;
	} else {
		tmp = expm1(x);
	}
	return tmp;
}
public static double code(double x) {
	double tmp;
	if (x <= 1.65) {
		tmp = x * x;
	} else {
		tmp = Math.expm1(x);
	}
	return tmp;
}
def code(x):
	tmp = 0
	if x <= 1.65:
		tmp = x * x
	else:
		tmp = math.expm1(x)
	return tmp
function code(x)
	tmp = 0.0
	if (x <= 1.65)
		tmp = Float64(x * x);
	else
		tmp = expm1(x);
	end
	return tmp
end
code[x_] := If[LessEqual[x, 1.65], N[(x * x), $MachinePrecision], N[(Exp[x] - 1), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.65:\\
\;\;\;\;x \cdot x\\

\mathbf{else}:\\
\;\;\;\;\mathsf{expm1}\left(x\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 1.6499999999999999

    1. Initial program 71.0%

      \[\left(e^{x} - 2\right) + e^{-x} \]
    2. Step-by-step derivation
      1. associate-+l-71.0%

        \[\leadsto \color{blue}{e^{x} - \left(2 - e^{-x}\right)} \]
      2. sub-neg71.0%

        \[\leadsto \color{blue}{e^{x} + \left(-\left(2 - e^{-x}\right)\right)} \]
      3. sub-neg71.0%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(2 + \left(-e^{-x}\right)\right)}\right) \]
      4. +-commutative71.0%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(\left(-e^{-x}\right) + 2\right)}\right) \]
      5. distribute-neg-in71.0%

        \[\leadsto e^{x} + \color{blue}{\left(\left(-\left(-e^{-x}\right)\right) + \left(-2\right)\right)} \]
      6. remove-double-neg71.0%

        \[\leadsto e^{x} + \left(\color{blue}{e^{-x}} + \left(-2\right)\right) \]
      7. metadata-eval71.0%

        \[\leadsto e^{x} + \left(e^{-x} + \color{blue}{-2}\right) \]
    3. Simplified71.0%

      \[\leadsto \color{blue}{e^{x} + \left(e^{-x} + -2\right)} \]
    4. Taylor expanded in x around 0 80.8%

      \[\leadsto \color{blue}{{x}^{2}} \]
    5. Step-by-step derivation
      1. unpow280.8%

        \[\leadsto \color{blue}{x \cdot x} \]
    6. Simplified80.8%

      \[\leadsto \color{blue}{x \cdot x} \]

    if 1.6499999999999999 < x

    1. Initial program 100.0%

      \[\left(e^{x} - 2\right) + e^{-x} \]
    2. Step-by-step derivation
      1. associate-+l-100.0%

        \[\leadsto \color{blue}{e^{x} - \left(2 - e^{-x}\right)} \]
      2. sub-neg100.0%

        \[\leadsto \color{blue}{e^{x} + \left(-\left(2 - e^{-x}\right)\right)} \]
      3. sub-neg100.0%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(2 + \left(-e^{-x}\right)\right)}\right) \]
      4. +-commutative100.0%

        \[\leadsto e^{x} + \left(-\color{blue}{\left(\left(-e^{-x}\right) + 2\right)}\right) \]
      5. distribute-neg-in100.0%

        \[\leadsto e^{x} + \color{blue}{\left(\left(-\left(-e^{-x}\right)\right) + \left(-2\right)\right)} \]
      6. remove-double-neg100.0%

        \[\leadsto e^{x} + \left(\color{blue}{e^{-x}} + \left(-2\right)\right) \]
      7. metadata-eval100.0%

        \[\leadsto e^{x} + \left(e^{-x} + \color{blue}{-2}\right) \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{e^{x} + \left(e^{-x} + -2\right)} \]
    4. Taylor expanded in x around 0 98.8%

      \[\leadsto e^{x} + \color{blue}{-1} \]
    5. Taylor expanded in x around inf 98.8%

      \[\leadsto \color{blue}{e^{x} - 1} \]
    6. Step-by-step derivation
      1. expm1-def98.8%

        \[\leadsto \color{blue}{\mathsf{expm1}\left(x\right)} \]
    7. Simplified98.8%

      \[\leadsto \color{blue}{\mathsf{expm1}\left(x\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification85.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 1.65:\\ \;\;\;\;x \cdot x\\ \mathbf{else}:\\ \;\;\;\;\mathsf{expm1}\left(x\right)\\ \end{array} \]

Alternative 9: 75.9% accurate, 68.7× speedup?

\[\begin{array}{l} \\ x \cdot x \end{array} \]
(FPCore (x) :precision binary64 (* x x))
double code(double x) {
	return x * x;
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = x * x
end function
public static double code(double x) {
	return x * x;
}
def code(x):
	return x * x
function code(x)
	return Float64(x * x)
end
function tmp = code(x)
	tmp = x * x;
end
code[x_] := N[(x * x), $MachinePrecision]
\begin{array}{l}

\\
x \cdot x
\end{array}
Derivation
  1. Initial program 77.9%

    \[\left(e^{x} - 2\right) + e^{-x} \]
  2. Step-by-step derivation
    1. associate-+l-77.9%

      \[\leadsto \color{blue}{e^{x} - \left(2 - e^{-x}\right)} \]
    2. sub-neg77.9%

      \[\leadsto \color{blue}{e^{x} + \left(-\left(2 - e^{-x}\right)\right)} \]
    3. sub-neg77.9%

      \[\leadsto e^{x} + \left(-\color{blue}{\left(2 + \left(-e^{-x}\right)\right)}\right) \]
    4. +-commutative77.9%

      \[\leadsto e^{x} + \left(-\color{blue}{\left(\left(-e^{-x}\right) + 2\right)}\right) \]
    5. distribute-neg-in77.9%

      \[\leadsto e^{x} + \color{blue}{\left(\left(-\left(-e^{-x}\right)\right) + \left(-2\right)\right)} \]
    6. remove-double-neg77.9%

      \[\leadsto e^{x} + \left(\color{blue}{e^{-x}} + \left(-2\right)\right) \]
    7. metadata-eval77.9%

      \[\leadsto e^{x} + \left(e^{-x} + \color{blue}{-2}\right) \]
  3. Simplified77.9%

    \[\leadsto \color{blue}{e^{x} + \left(e^{-x} + -2\right)} \]
  4. Taylor expanded in x around 0 72.1%

    \[\leadsto \color{blue}{{x}^{2}} \]
  5. Step-by-step derivation
    1. unpow272.1%

      \[\leadsto \color{blue}{x \cdot x} \]
  6. Simplified72.1%

    \[\leadsto \color{blue}{x \cdot x} \]
  7. Final simplification72.1%

    \[\leadsto x \cdot x \]

Alternative 10: 4.5% accurate, 206.0× speedup?

\[\begin{array}{l} \\ x \end{array} \]
(FPCore (x) :precision binary64 x)
double code(double x) {
	return x;
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = x
end function
public static double code(double x) {
	return x;
}
def code(x):
	return x
function code(x)
	return x
end
function tmp = code(x)
	tmp = x;
end
code[x_] := x
\begin{array}{l}

\\
x
\end{array}
Derivation
  1. Initial program 77.9%

    \[\left(e^{x} - 2\right) + e^{-x} \]
  2. Step-by-step derivation
    1. associate-+l-77.9%

      \[\leadsto \color{blue}{e^{x} - \left(2 - e^{-x}\right)} \]
    2. sub-neg77.9%

      \[\leadsto \color{blue}{e^{x} + \left(-\left(2 - e^{-x}\right)\right)} \]
    3. sub-neg77.9%

      \[\leadsto e^{x} + \left(-\color{blue}{\left(2 + \left(-e^{-x}\right)\right)}\right) \]
    4. +-commutative77.9%

      \[\leadsto e^{x} + \left(-\color{blue}{\left(\left(-e^{-x}\right) + 2\right)}\right) \]
    5. distribute-neg-in77.9%

      \[\leadsto e^{x} + \color{blue}{\left(\left(-\left(-e^{-x}\right)\right) + \left(-2\right)\right)} \]
    6. remove-double-neg77.9%

      \[\leadsto e^{x} + \left(\color{blue}{e^{-x}} + \left(-2\right)\right) \]
    7. metadata-eval77.9%

      \[\leadsto e^{x} + \left(e^{-x} + \color{blue}{-2}\right) \]
  3. Simplified77.9%

    \[\leadsto \color{blue}{e^{x} + \left(e^{-x} + -2\right)} \]
  4. Taylor expanded in x around 0 48.2%

    \[\leadsto e^{x} + \color{blue}{-1} \]
  5. Taylor expanded in x around 0 4.3%

    \[\leadsto \color{blue}{x} \]
  6. Final simplification4.3%

    \[\leadsto x \]

Developer target: 100.0% accurate, 1.0× speedup?

\[\begin{array}{l} \\ 4 \cdot {\sinh \left(\frac{x}{2}\right)}^{2} \end{array} \]
(FPCore (x) :precision binary64 (* 4.0 (pow (sinh (/ x 2.0)) 2.0)))
double code(double x) {
	return 4.0 * pow(sinh((x / 2.0)), 2.0);
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = 4.0d0 * (sinh((x / 2.0d0)) ** 2.0d0)
end function
public static double code(double x) {
	return 4.0 * Math.pow(Math.sinh((x / 2.0)), 2.0);
}
def code(x):
	return 4.0 * math.pow(math.sinh((x / 2.0)), 2.0)
function code(x)
	return Float64(4.0 * (sinh(Float64(x / 2.0)) ^ 2.0))
end
function tmp = code(x)
	tmp = 4.0 * (sinh((x / 2.0)) ^ 2.0);
end
code[x_] := N[(4.0 * N[Power[N[Sinh[N[(x / 2.0), $MachinePrecision]], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
4 \cdot {\sinh \left(\frac{x}{2}\right)}^{2}
\end{array}

Reproduce

?
herbie shell --seed 2023278 
(FPCore (x)
  :name "exp2 (problem 3.3.7)"
  :precision binary64

  :herbie-target
  (* 4.0 (pow (sinh (/ x 2.0)) 2.0))

  (+ (- (exp x) 2.0) (exp (- x))))