exp2 (problem 3.3.7)

Percentage Accurate: 53.2% → 99.2%
Time: 11.1s
Alternatives: 6
Speedup: 34.8×

Specification

?
\[\left|x\right| \leq 710\]
\[\begin{array}{l} \\ \left(e^{x} - 2\right) + e^{-x} \end{array} \]
(FPCore (x) :precision binary64 (+ (- (exp x) 2.0) (exp (- x))))
double code(double x) {
	return (exp(x) - 2.0) + exp(-x);
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = (exp(x) - 2.0d0) + exp(-x)
end function
public static double code(double x) {
	return (Math.exp(x) - 2.0) + Math.exp(-x);
}
def code(x):
	return (math.exp(x) - 2.0) + math.exp(-x)
function code(x)
	return Float64(Float64(exp(x) - 2.0) + exp(Float64(-x)))
end
function tmp = code(x)
	tmp = (exp(x) - 2.0) + exp(-x);
end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(e^{x} - 2\right) + e^{-x}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 6 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 53.2% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(e^{x} - 2\right) + e^{-x} \end{array} \]
(FPCore (x) :precision binary64 (+ (- (exp x) 2.0) (exp (- x))))
double code(double x) {
	return (exp(x) - 2.0) + exp(-x);
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = (exp(x) - 2.0d0) + exp(-x)
end function
public static double code(double x) {
	return (Math.exp(x) - 2.0) + Math.exp(-x);
}
def code(x):
	return (math.exp(x) - 2.0) + math.exp(-x)
function code(x)
	return Float64(Float64(exp(x) - 2.0) + exp(Float64(-x)))
end
function tmp = code(x)
	tmp = (exp(x) - 2.0) + exp(-x);
end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(e^{x} - 2\right) + e^{-x}
\end{array}

Alternative 1: 99.2% accurate, 4.3× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), 0.08333333333333333\right), x \cdot \left(x \cdot \left(x \cdot x\right)\right), x \cdot x\right) \end{array} \]
(FPCore (x)
 :precision binary64
 (fma
  (fma
   x
   (* x (fma x (* x 4.96031746031746e-5) 0.002777777777777778))
   0.08333333333333333)
  (* x (* x (* x x)))
  (* x x)))
double code(double x) {
	return fma(fma(x, (x * fma(x, (x * 4.96031746031746e-5), 0.002777777777777778)), 0.08333333333333333), (x * (x * (x * x))), (x * x));
}
function code(x)
	return fma(fma(x, Float64(x * fma(x, Float64(x * 4.96031746031746e-5), 0.002777777777777778)), 0.08333333333333333), Float64(x * Float64(x * Float64(x * x))), Float64(x * x))
end
code[x_] := N[(N[(x * N[(x * N[(x * N[(x * 4.96031746031746e-5), $MachinePrecision] + 0.002777777777777778), $MachinePrecision]), $MachinePrecision] + 0.08333333333333333), $MachinePrecision] * N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), 0.08333333333333333\right), x \cdot \left(x \cdot \left(x \cdot x\right)\right), x \cdot x\right)
\end{array}
Derivation
  1. Initial program 53.4%

    \[\left(e^{x} - 2\right) + e^{-x} \]
  2. Add Preprocessing
  3. Taylor expanded in x around 0

    \[\leadsto \color{blue}{{x}^{2} \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right)} \]
  4. Step-by-step derivation
    1. unpow2N/A

      \[\leadsto \color{blue}{\left(x \cdot x\right)} \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \]
    2. associate-*l*N/A

      \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right)\right)} \]
    3. lower-*.f64N/A

      \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right)\right)} \]
    4. +-commutativeN/A

      \[\leadsto x \cdot \left(x \cdot \color{blue}{\left({x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right) + 1\right)}\right) \]
    5. distribute-lft-inN/A

      \[\leadsto x \cdot \color{blue}{\left(x \cdot \left({x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) + x \cdot 1\right)} \]
    6. associate-*r*N/A

      \[\leadsto x \cdot \left(\color{blue}{\left(x \cdot {x}^{2}\right) \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)} + x \cdot 1\right) \]
    7. *-commutativeN/A

      \[\leadsto x \cdot \left(\color{blue}{\left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right) \cdot \left(x \cdot {x}^{2}\right)} + x \cdot 1\right) \]
    8. *-rgt-identityN/A

      \[\leadsto x \cdot \left(\left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right) \cdot \left(x \cdot {x}^{2}\right) + \color{blue}{x}\right) \]
    9. lower-fma.f64N/A

      \[\leadsto x \cdot \color{blue}{\mathsf{fma}\left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right), x \cdot {x}^{2}, x\right)} \]
  5. Simplified98.9%

    \[\leadsto \color{blue}{x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), 0.08333333333333333\right), x \cdot \left(x \cdot x\right), x\right)} \]
  6. Step-by-step derivation
    1. lift-*.f64N/A

      \[\leadsto x \cdot \left(\left(x \cdot \left(x \cdot \left(\color{blue}{\left(x \cdot x\right)} \cdot \frac{1}{20160} + \frac{1}{360}\right)\right) + \frac{1}{12}\right) \cdot \left(x \cdot \left(x \cdot x\right)\right) + x\right) \]
    2. lift-fma.f64N/A

      \[\leadsto x \cdot \left(\left(x \cdot \left(x \cdot \color{blue}{\mathsf{fma}\left(x \cdot x, \frac{1}{20160}, \frac{1}{360}\right)}\right) + \frac{1}{12}\right) \cdot \left(x \cdot \left(x \cdot x\right)\right) + x\right) \]
    3. lift-*.f64N/A

      \[\leadsto x \cdot \left(\left(x \cdot \color{blue}{\left(x \cdot \mathsf{fma}\left(x \cdot x, \frac{1}{20160}, \frac{1}{360}\right)\right)} + \frac{1}{12}\right) \cdot \left(x \cdot \left(x \cdot x\right)\right) + x\right) \]
    4. lift-fma.f64N/A

      \[\leadsto x \cdot \left(\color{blue}{\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, \frac{1}{20160}, \frac{1}{360}\right), \frac{1}{12}\right)} \cdot \left(x \cdot \left(x \cdot x\right)\right) + x\right) \]
    5. lift-*.f64N/A

      \[\leadsto x \cdot \left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, \frac{1}{20160}, \frac{1}{360}\right), \frac{1}{12}\right) \cdot \left(x \cdot \color{blue}{\left(x \cdot x\right)}\right) + x\right) \]
    6. lift-*.f64N/A

      \[\leadsto x \cdot \left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, \frac{1}{20160}, \frac{1}{360}\right), \frac{1}{12}\right) \cdot \color{blue}{\left(x \cdot \left(x \cdot x\right)\right)} + x\right) \]
    7. distribute-rgt-inN/A

      \[\leadsto \color{blue}{\left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, \frac{1}{20160}, \frac{1}{360}\right), \frac{1}{12}\right) \cdot \left(x \cdot \left(x \cdot x\right)\right)\right) \cdot x + x \cdot x} \]
    8. lift-*.f64N/A

      \[\leadsto \left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, \frac{1}{20160}, \frac{1}{360}\right), \frac{1}{12}\right) \cdot \left(x \cdot \left(x \cdot x\right)\right)\right) \cdot x + \color{blue}{x \cdot x} \]
  7. Applied egg-rr98.9%

    \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), 0.08333333333333333\right), \left(x \cdot \left(x \cdot x\right)\right) \cdot x, x \cdot x\right)} \]
  8. Final simplification98.9%

    \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), 0.08333333333333333\right), x \cdot \left(x \cdot \left(x \cdot x\right)\right), x \cdot x\right) \]
  9. Add Preprocessing

Alternative 2: 99.2% accurate, 4.8× speedup?

\[\begin{array}{l} \\ x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), 0.08333333333333333\right), x \cdot \left(x \cdot x\right), x\right) \end{array} \]
(FPCore (x)
 :precision binary64
 (*
  x
  (fma
   (fma
    x
    (* x (fma (* x x) 4.96031746031746e-5 0.002777777777777778))
    0.08333333333333333)
   (* x (* x x))
   x)))
double code(double x) {
	return x * fma(fma(x, (x * fma((x * x), 4.96031746031746e-5, 0.002777777777777778)), 0.08333333333333333), (x * (x * x)), x);
}
function code(x)
	return Float64(x * fma(fma(x, Float64(x * fma(Float64(x * x), 4.96031746031746e-5, 0.002777777777777778)), 0.08333333333333333), Float64(x * Float64(x * x)), x))
end
code[x_] := N[(x * N[(N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * 4.96031746031746e-5 + 0.002777777777777778), $MachinePrecision]), $MachinePrecision] + 0.08333333333333333), $MachinePrecision] * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), 0.08333333333333333\right), x \cdot \left(x \cdot x\right), x\right)
\end{array}
Derivation
  1. Initial program 53.4%

    \[\left(e^{x} - 2\right) + e^{-x} \]
  2. Add Preprocessing
  3. Taylor expanded in x around 0

    \[\leadsto \color{blue}{{x}^{2} \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right)} \]
  4. Step-by-step derivation
    1. unpow2N/A

      \[\leadsto \color{blue}{\left(x \cdot x\right)} \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \]
    2. associate-*l*N/A

      \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right)\right)} \]
    3. lower-*.f64N/A

      \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right)\right)} \]
    4. +-commutativeN/A

      \[\leadsto x \cdot \left(x \cdot \color{blue}{\left({x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right) + 1\right)}\right) \]
    5. distribute-lft-inN/A

      \[\leadsto x \cdot \color{blue}{\left(x \cdot \left({x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) + x \cdot 1\right)} \]
    6. associate-*r*N/A

      \[\leadsto x \cdot \left(\color{blue}{\left(x \cdot {x}^{2}\right) \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)} + x \cdot 1\right) \]
    7. *-commutativeN/A

      \[\leadsto x \cdot \left(\color{blue}{\left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right) \cdot \left(x \cdot {x}^{2}\right)} + x \cdot 1\right) \]
    8. *-rgt-identityN/A

      \[\leadsto x \cdot \left(\left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right) \cdot \left(x \cdot {x}^{2}\right) + \color{blue}{x}\right) \]
    9. lower-fma.f64N/A

      \[\leadsto x \cdot \color{blue}{\mathsf{fma}\left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right), x \cdot {x}^{2}, x\right)} \]
  5. Simplified98.9%

    \[\leadsto \color{blue}{x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), 0.08333333333333333\right), x \cdot \left(x \cdot x\right), x\right)} \]
  6. Add Preprocessing

Alternative 3: 98.8% accurate, 4.9× speedup?

\[\begin{array}{l} \\ x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \left(4.96031746031746 \cdot 10^{-5} \cdot \left(x \cdot x\right)\right), 0.08333333333333333\right), x \cdot \left(x \cdot x\right), x\right) \end{array} \]
(FPCore (x)
 :precision binary64
 (*
  x
  (fma
   (fma x (* x (* 4.96031746031746e-5 (* x x))) 0.08333333333333333)
   (* x (* x x))
   x)))
double code(double x) {
	return x * fma(fma(x, (x * (4.96031746031746e-5 * (x * x))), 0.08333333333333333), (x * (x * x)), x);
}
function code(x)
	return Float64(x * fma(fma(x, Float64(x * Float64(4.96031746031746e-5 * Float64(x * x))), 0.08333333333333333), Float64(x * Float64(x * x)), x))
end
code[x_] := N[(x * N[(N[(x * N[(x * N[(4.96031746031746e-5 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 0.08333333333333333), $MachinePrecision] * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \left(4.96031746031746 \cdot 10^{-5} \cdot \left(x \cdot x\right)\right), 0.08333333333333333\right), x \cdot \left(x \cdot x\right), x\right)
\end{array}
Derivation
  1. Initial program 53.4%

    \[\left(e^{x} - 2\right) + e^{-x} \]
  2. Add Preprocessing
  3. Taylor expanded in x around 0

    \[\leadsto \color{blue}{{x}^{2} \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right)} \]
  4. Step-by-step derivation
    1. unpow2N/A

      \[\leadsto \color{blue}{\left(x \cdot x\right)} \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \]
    2. associate-*l*N/A

      \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right)\right)} \]
    3. lower-*.f64N/A

      \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right)\right)} \]
    4. +-commutativeN/A

      \[\leadsto x \cdot \left(x \cdot \color{blue}{\left({x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right) + 1\right)}\right) \]
    5. distribute-lft-inN/A

      \[\leadsto x \cdot \color{blue}{\left(x \cdot \left({x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) + x \cdot 1\right)} \]
    6. associate-*r*N/A

      \[\leadsto x \cdot \left(\color{blue}{\left(x \cdot {x}^{2}\right) \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)} + x \cdot 1\right) \]
    7. *-commutativeN/A

      \[\leadsto x \cdot \left(\color{blue}{\left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right) \cdot \left(x \cdot {x}^{2}\right)} + x \cdot 1\right) \]
    8. *-rgt-identityN/A

      \[\leadsto x \cdot \left(\left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right) \cdot \left(x \cdot {x}^{2}\right) + \color{blue}{x}\right) \]
    9. lower-fma.f64N/A

      \[\leadsto x \cdot \color{blue}{\mathsf{fma}\left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right), x \cdot {x}^{2}, x\right)} \]
  5. Simplified98.9%

    \[\leadsto \color{blue}{x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), 0.08333333333333333\right), x \cdot \left(x \cdot x\right), x\right)} \]
  6. Taylor expanded in x around inf

    \[\leadsto x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, \color{blue}{\frac{1}{20160} \cdot {x}^{3}}, \frac{1}{12}\right), x \cdot \left(x \cdot x\right), x\right) \]
  7. Step-by-step derivation
    1. unpow3N/A

      \[\leadsto x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, \frac{1}{20160} \cdot \color{blue}{\left(\left(x \cdot x\right) \cdot x\right)}, \frac{1}{12}\right), x \cdot \left(x \cdot x\right), x\right) \]
    2. unpow2N/A

      \[\leadsto x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, \frac{1}{20160} \cdot \left(\color{blue}{{x}^{2}} \cdot x\right), \frac{1}{12}\right), x \cdot \left(x \cdot x\right), x\right) \]
    3. associate-*r*N/A

      \[\leadsto x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, \color{blue}{\left(\frac{1}{20160} \cdot {x}^{2}\right) \cdot x}, \frac{1}{12}\right), x \cdot \left(x \cdot x\right), x\right) \]
    4. *-commutativeN/A

      \[\leadsto x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, \color{blue}{x \cdot \left(\frac{1}{20160} \cdot {x}^{2}\right)}, \frac{1}{12}\right), x \cdot \left(x \cdot x\right), x\right) \]
    5. lower-*.f64N/A

      \[\leadsto x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, \color{blue}{x \cdot \left(\frac{1}{20160} \cdot {x}^{2}\right)}, \frac{1}{12}\right), x \cdot \left(x \cdot x\right), x\right) \]
    6. lower-*.f64N/A

      \[\leadsto x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \color{blue}{\left(\frac{1}{20160} \cdot {x}^{2}\right)}, \frac{1}{12}\right), x \cdot \left(x \cdot x\right), x\right) \]
    7. unpow2N/A

      \[\leadsto x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \left(\frac{1}{20160} \cdot \color{blue}{\left(x \cdot x\right)}\right), \frac{1}{12}\right), x \cdot \left(x \cdot x\right), x\right) \]
    8. lower-*.f6498.9

      \[\leadsto x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \left(4.96031746031746 \cdot 10^{-5} \cdot \color{blue}{\left(x \cdot x\right)}\right), 0.08333333333333333\right), x \cdot \left(x \cdot x\right), x\right) \]
  8. Simplified98.9%

    \[\leadsto x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, \color{blue}{x \cdot \left(4.96031746031746 \cdot 10^{-5} \cdot \left(x \cdot x\right)\right)}, 0.08333333333333333\right), x \cdot \left(x \cdot x\right), x\right) \]
  9. Add Preprocessing

Alternative 4: 98.8% accurate, 7.7× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(0.08333333333333333 \cdot \left(x \cdot \left(x \cdot x\right)\right), x, x \cdot x\right) \end{array} \]
(FPCore (x)
 :precision binary64
 (fma (* 0.08333333333333333 (* x (* x x))) x (* x x)))
double code(double x) {
	return fma((0.08333333333333333 * (x * (x * x))), x, (x * x));
}
function code(x)
	return fma(Float64(0.08333333333333333 * Float64(x * Float64(x * x))), x, Float64(x * x))
end
code[x_] := N[(N[(0.08333333333333333 * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * x + N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(0.08333333333333333 \cdot \left(x \cdot \left(x \cdot x\right)\right), x, x \cdot x\right)
\end{array}
Derivation
  1. Initial program 53.4%

    \[\left(e^{x} - 2\right) + e^{-x} \]
  2. Add Preprocessing
  3. Taylor expanded in x around 0

    \[\leadsto \color{blue}{{x}^{2} \cdot \left(1 + \frac{1}{12} \cdot {x}^{2}\right)} \]
  4. Step-by-step derivation
    1. unpow2N/A

      \[\leadsto \color{blue}{\left(x \cdot x\right)} \cdot \left(1 + \frac{1}{12} \cdot {x}^{2}\right) \]
    2. associate-*l*N/A

      \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(1 + \frac{1}{12} \cdot {x}^{2}\right)\right)} \]
    3. lower-*.f64N/A

      \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(1 + \frac{1}{12} \cdot {x}^{2}\right)\right)} \]
    4. +-commutativeN/A

      \[\leadsto x \cdot \left(x \cdot \color{blue}{\left(\frac{1}{12} \cdot {x}^{2} + 1\right)}\right) \]
    5. distribute-lft-inN/A

      \[\leadsto x \cdot \color{blue}{\left(x \cdot \left(\frac{1}{12} \cdot {x}^{2}\right) + x \cdot 1\right)} \]
    6. *-rgt-identityN/A

      \[\leadsto x \cdot \left(x \cdot \left(\frac{1}{12} \cdot {x}^{2}\right) + \color{blue}{x}\right) \]
    7. lower-fma.f64N/A

      \[\leadsto x \cdot \color{blue}{\mathsf{fma}\left(x, \frac{1}{12} \cdot {x}^{2}, x\right)} \]
    8. *-commutativeN/A

      \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{{x}^{2} \cdot \frac{1}{12}}, x\right) \]
    9. lower-*.f64N/A

      \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{{x}^{2} \cdot \frac{1}{12}}, x\right) \]
    10. unpow2N/A

      \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{\left(x \cdot x\right)} \cdot \frac{1}{12}, x\right) \]
    11. lower-*.f6498.9

      \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{\left(x \cdot x\right)} \cdot 0.08333333333333333, x\right) \]
  5. Simplified98.9%

    \[\leadsto \color{blue}{x \cdot \mathsf{fma}\left(x, \left(x \cdot x\right) \cdot 0.08333333333333333, x\right)} \]
  6. Step-by-step derivation
    1. lift-*.f64N/A

      \[\leadsto x \cdot \left(x \cdot \left(\color{blue}{\left(x \cdot x\right)} \cdot \frac{1}{12}\right) + x\right) \]
    2. lift-*.f64N/A

      \[\leadsto x \cdot \left(x \cdot \color{blue}{\left(\left(x \cdot x\right) \cdot \frac{1}{12}\right)} + x\right) \]
    3. distribute-rgt-inN/A

      \[\leadsto \color{blue}{\left(x \cdot \left(\left(x \cdot x\right) \cdot \frac{1}{12}\right)\right) \cdot x + x \cdot x} \]
    4. lift-*.f64N/A

      \[\leadsto \left(x \cdot \left(\left(x \cdot x\right) \cdot \frac{1}{12}\right)\right) \cdot x + \color{blue}{x \cdot x} \]
    5. lower-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left(x \cdot \left(\left(x \cdot x\right) \cdot \frac{1}{12}\right), x, x \cdot x\right)} \]
    6. lift-*.f64N/A

      \[\leadsto \mathsf{fma}\left(x \cdot \color{blue}{\left(\left(x \cdot x\right) \cdot \frac{1}{12}\right)}, x, x \cdot x\right) \]
    7. associate-*r*N/A

      \[\leadsto \mathsf{fma}\left(\color{blue}{\left(x \cdot \left(x \cdot x\right)\right) \cdot \frac{1}{12}}, x, x \cdot x\right) \]
    8. lift-*.f64N/A

      \[\leadsto \mathsf{fma}\left(\color{blue}{\left(x \cdot \left(x \cdot x\right)\right)} \cdot \frac{1}{12}, x, x \cdot x\right) \]
    9. lower-*.f6498.9

      \[\leadsto \mathsf{fma}\left(\color{blue}{\left(x \cdot \left(x \cdot x\right)\right) \cdot 0.08333333333333333}, x, x \cdot x\right) \]
  7. Applied egg-rr98.9%

    \[\leadsto \color{blue}{\mathsf{fma}\left(\left(x \cdot \left(x \cdot x\right)\right) \cdot 0.08333333333333333, x, x \cdot x\right)} \]
  8. Final simplification98.9%

    \[\leadsto \mathsf{fma}\left(0.08333333333333333 \cdot \left(x \cdot \left(x \cdot x\right)\right), x, x \cdot x\right) \]
  9. Add Preprocessing

Alternative 5: 98.8% accurate, 9.5× speedup?

\[\begin{array}{l} \\ x \cdot \mathsf{fma}\left(x, x \cdot \left(x \cdot 0.08333333333333333\right), x\right) \end{array} \]
(FPCore (x)
 :precision binary64
 (* x (fma x (* x (* x 0.08333333333333333)) x)))
double code(double x) {
	return x * fma(x, (x * (x * 0.08333333333333333)), x);
}
function code(x)
	return Float64(x * fma(x, Float64(x * Float64(x * 0.08333333333333333)), x))
end
code[x_] := N[(x * N[(x * N[(x * N[(x * 0.08333333333333333), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
x \cdot \mathsf{fma}\left(x, x \cdot \left(x \cdot 0.08333333333333333\right), x\right)
\end{array}
Derivation
  1. Initial program 53.4%

    \[\left(e^{x} - 2\right) + e^{-x} \]
  2. Add Preprocessing
  3. Taylor expanded in x around 0

    \[\leadsto \color{blue}{{x}^{2} \cdot \left(1 + \frac{1}{12} \cdot {x}^{2}\right)} \]
  4. Step-by-step derivation
    1. unpow2N/A

      \[\leadsto \color{blue}{\left(x \cdot x\right)} \cdot \left(1 + \frac{1}{12} \cdot {x}^{2}\right) \]
    2. associate-*l*N/A

      \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(1 + \frac{1}{12} \cdot {x}^{2}\right)\right)} \]
    3. lower-*.f64N/A

      \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(1 + \frac{1}{12} \cdot {x}^{2}\right)\right)} \]
    4. +-commutativeN/A

      \[\leadsto x \cdot \left(x \cdot \color{blue}{\left(\frac{1}{12} \cdot {x}^{2} + 1\right)}\right) \]
    5. distribute-lft-inN/A

      \[\leadsto x \cdot \color{blue}{\left(x \cdot \left(\frac{1}{12} \cdot {x}^{2}\right) + x \cdot 1\right)} \]
    6. *-rgt-identityN/A

      \[\leadsto x \cdot \left(x \cdot \left(\frac{1}{12} \cdot {x}^{2}\right) + \color{blue}{x}\right) \]
    7. lower-fma.f64N/A

      \[\leadsto x \cdot \color{blue}{\mathsf{fma}\left(x, \frac{1}{12} \cdot {x}^{2}, x\right)} \]
    8. *-commutativeN/A

      \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{{x}^{2} \cdot \frac{1}{12}}, x\right) \]
    9. lower-*.f64N/A

      \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{{x}^{2} \cdot \frac{1}{12}}, x\right) \]
    10. unpow2N/A

      \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{\left(x \cdot x\right)} \cdot \frac{1}{12}, x\right) \]
    11. lower-*.f6498.9

      \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{\left(x \cdot x\right)} \cdot 0.08333333333333333, x\right) \]
  5. Simplified98.9%

    \[\leadsto \color{blue}{x \cdot \mathsf{fma}\left(x, \left(x \cdot x\right) \cdot 0.08333333333333333, x\right)} \]
  6. Step-by-step derivation
    1. associate-*l*N/A

      \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{x \cdot \left(x \cdot \frac{1}{12}\right)}, x\right) \]
    2. *-commutativeN/A

      \[\leadsto x \cdot \mathsf{fma}\left(x, x \cdot \color{blue}{\left(\frac{1}{12} \cdot x\right)}, x\right) \]
    3. *-commutativeN/A

      \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{\left(\frac{1}{12} \cdot x\right) \cdot x}, x\right) \]
    4. lower-*.f64N/A

      \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{\left(\frac{1}{12} \cdot x\right) \cdot x}, x\right) \]
    5. *-commutativeN/A

      \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{\left(x \cdot \frac{1}{12}\right)} \cdot x, x\right) \]
    6. lower-*.f6498.9

      \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{\left(x \cdot 0.08333333333333333\right)} \cdot x, x\right) \]
  7. Applied egg-rr98.9%

    \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{\left(x \cdot 0.08333333333333333\right) \cdot x}, x\right) \]
  8. Final simplification98.9%

    \[\leadsto x \cdot \mathsf{fma}\left(x, x \cdot \left(x \cdot 0.08333333333333333\right), x\right) \]
  9. Add Preprocessing

Alternative 6: 98.3% accurate, 34.8× speedup?

\[\begin{array}{l} \\ x \cdot x \end{array} \]
(FPCore (x) :precision binary64 (* x x))
double code(double x) {
	return x * x;
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = x * x
end function
public static double code(double x) {
	return x * x;
}
def code(x):
	return x * x
function code(x)
	return Float64(x * x)
end
function tmp = code(x)
	tmp = x * x;
end
code[x_] := N[(x * x), $MachinePrecision]
\begin{array}{l}

\\
x \cdot x
\end{array}
Derivation
  1. Initial program 53.4%

    \[\left(e^{x} - 2\right) + e^{-x} \]
  2. Add Preprocessing
  3. Taylor expanded in x around 0

    \[\leadsto \color{blue}{{x}^{2}} \]
  4. Step-by-step derivation
    1. unpow2N/A

      \[\leadsto \color{blue}{x \cdot x} \]
    2. lower-*.f6498.7

      \[\leadsto \color{blue}{x \cdot x} \]
  5. Simplified98.7%

    \[\leadsto \color{blue}{x \cdot x} \]
  6. Add Preprocessing

Developer Target 1: 99.9% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \sinh \left(\frac{x}{2}\right)\\ 4 \cdot \left(t\_0 \cdot t\_0\right) \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (let* ((t_0 (sinh (/ x 2.0)))) (* 4.0 (* t_0 t_0))))
double code(double x) {
	double t_0 = sinh((x / 2.0));
	return 4.0 * (t_0 * t_0);
}
real(8) function code(x)
    real(8), intent (in) :: x
    real(8) :: t_0
    t_0 = sinh((x / 2.0d0))
    code = 4.0d0 * (t_0 * t_0)
end function
public static double code(double x) {
	double t_0 = Math.sinh((x / 2.0));
	return 4.0 * (t_0 * t_0);
}
def code(x):
	t_0 = math.sinh((x / 2.0))
	return 4.0 * (t_0 * t_0)
function code(x)
	t_0 = sinh(Float64(x / 2.0))
	return Float64(4.0 * Float64(t_0 * t_0))
end
function tmp = code(x)
	t_0 = sinh((x / 2.0));
	tmp = 4.0 * (t_0 * t_0);
end
code[x_] := Block[{t$95$0 = N[Sinh[N[(x / 2.0), $MachinePrecision]], $MachinePrecision]}, N[(4.0 * N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \sinh \left(\frac{x}{2}\right)\\
4 \cdot \left(t\_0 \cdot t\_0\right)
\end{array}
\end{array}

Reproduce

?
herbie shell --seed 2024215 
(FPCore (x)
  :name "exp2 (problem 3.3.7)"
  :precision binary64
  :pre (<= (fabs x) 710.0)

  :alt
  (! :herbie-platform default (* 4 (* (sinh (/ x 2)) (sinh (/ x 2)))))

  (+ (- (exp x) 2.0) (exp (- x))))