exp2 (problem 3.3.7)

Percentage Accurate: 53.0% → 99.2%
Time: 13.2s
Alternatives: 8
Speedup: 34.8×

Specification

?
\[\left|x\right| \leq 710\]
\[\begin{array}{l} \\ \left(e^{x} - 2\right) + e^{-x} \end{array} \]
(FPCore (x) :precision binary64 (+ (- (exp x) 2.0) (exp (- x))))
double code(double x) {
	return (exp(x) - 2.0) + exp(-x);
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = (exp(x) - 2.0d0) + exp(-x)
end function
public static double code(double x) {
	return (Math.exp(x) - 2.0) + Math.exp(-x);
}
def code(x):
	return (math.exp(x) - 2.0) + math.exp(-x)
function code(x)
	return Float64(Float64(exp(x) - 2.0) + exp(Float64(-x)))
end
function tmp = code(x)
	tmp = (exp(x) - 2.0) + exp(-x);
end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(e^{x} - 2\right) + e^{-x}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 8 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 53.0% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(e^{x} - 2\right) + e^{-x} \end{array} \]
(FPCore (x) :precision binary64 (+ (- (exp x) 2.0) (exp (- x))))
double code(double x) {
	return (exp(x) - 2.0) + exp(-x);
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = (exp(x) - 2.0d0) + exp(-x)
end function
public static double code(double x) {
	return (Math.exp(x) - 2.0) + Math.exp(-x);
}
def code(x):
	return (math.exp(x) - 2.0) + math.exp(-x)
function code(x)
	return Float64(Float64(exp(x) - 2.0) + exp(Float64(-x)))
end
function tmp = code(x)
	tmp = (exp(x) - 2.0) + exp(-x);
end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(e^{x} - 2\right) + e^{-x}
\end{array}

Alternative 1: 99.2% accurate, 4.8× speedup?

\[\begin{array}{l} \\ x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), 0.08333333333333333\right), x\right) \end{array} \]
(FPCore (x)
 :precision binary64
 (*
  x
  (fma
   (* x x)
   (*
    x
    (fma
     x
     (* x (fma x (* x 4.96031746031746e-5) 0.002777777777777778))
     0.08333333333333333))
   x)))
double code(double x) {
	return x * fma((x * x), (x * fma(x, (x * fma(x, (x * 4.96031746031746e-5), 0.002777777777777778)), 0.08333333333333333)), x);
}
function code(x)
	return Float64(x * fma(Float64(x * x), Float64(x * fma(x, Float64(x * fma(x, Float64(x * 4.96031746031746e-5), 0.002777777777777778)), 0.08333333333333333)), x))
end
code[x_] := N[(x * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(x * N[(x * N[(x * 4.96031746031746e-5), $MachinePrecision] + 0.002777777777777778), $MachinePrecision]), $MachinePrecision] + 0.08333333333333333), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), 0.08333333333333333\right), x\right)
\end{array}
Derivation
  1. Initial program 53.8%

    \[\left(e^{x} - 2\right) + e^{-x} \]
  2. Add Preprocessing
  3. Taylor expanded in x around 0

    \[\leadsto \color{blue}{{x}^{2} \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right)} \]
  4. Step-by-step derivation
    1. unpow2N/A

      \[\leadsto \color{blue}{\left(x \cdot x\right)} \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \]
    2. associate-*l*N/A

      \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right)\right)} \]
    3. lower-*.f64N/A

      \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right)\right)} \]
    4. +-commutativeN/A

      \[\leadsto x \cdot \left(x \cdot \color{blue}{\left({x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right) + 1\right)}\right) \]
    5. distribute-lft-inN/A

      \[\leadsto x \cdot \color{blue}{\left(x \cdot \left({x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) + x \cdot 1\right)} \]
    6. associate-*r*N/A

      \[\leadsto x \cdot \left(\color{blue}{\left(x \cdot {x}^{2}\right) \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)} + x \cdot 1\right) \]
    7. *-commutativeN/A

      \[\leadsto x \cdot \left(\color{blue}{\left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right) \cdot \left(x \cdot {x}^{2}\right)} + x \cdot 1\right) \]
    8. *-rgt-identityN/A

      \[\leadsto x \cdot \left(\left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right) \cdot \left(x \cdot {x}^{2}\right) + \color{blue}{x}\right) \]
    9. lower-fma.f64N/A

      \[\leadsto x \cdot \color{blue}{\mathsf{fma}\left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right), x \cdot {x}^{2}, x\right)} \]
  5. Applied rewrites98.7%

    \[\leadsto \color{blue}{x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), 0.08333333333333333\right), x \cdot \left(x \cdot x\right), x\right)} \]
  6. Step-by-step derivation
    1. Applied rewrites98.7%

      \[\leadsto \mathsf{fma}\left(x \cdot x, x \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), 0.08333333333333333\right), x\right) \cdot \color{blue}{x} \]
    2. Final simplification98.7%

      \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), 0.08333333333333333\right), x\right) \]
    3. Add Preprocessing

    Alternative 2: 99.2% accurate, 4.8× speedup?

    \[\begin{array}{l} \\ x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), 0.08333333333333333\right), x \cdot \left(x \cdot x\right), x\right) \end{array} \]
    (FPCore (x)
     :precision binary64
     (*
      x
      (fma
       (fma
        x
        (* x (fma (* x x) 4.96031746031746e-5 0.002777777777777778))
        0.08333333333333333)
       (* x (* x x))
       x)))
    double code(double x) {
    	return x * fma(fma(x, (x * fma((x * x), 4.96031746031746e-5, 0.002777777777777778)), 0.08333333333333333), (x * (x * x)), x);
    }
    
    function code(x)
    	return Float64(x * fma(fma(x, Float64(x * fma(Float64(x * x), 4.96031746031746e-5, 0.002777777777777778)), 0.08333333333333333), Float64(x * Float64(x * x)), x))
    end
    
    code[x_] := N[(x * N[(N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * 4.96031746031746e-5 + 0.002777777777777778), $MachinePrecision]), $MachinePrecision] + 0.08333333333333333), $MachinePrecision] * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), 0.08333333333333333\right), x \cdot \left(x \cdot x\right), x\right)
    \end{array}
    
    Derivation
    1. Initial program 53.8%

      \[\left(e^{x} - 2\right) + e^{-x} \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{{x}^{2} \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right)} \]
    4. Step-by-step derivation
      1. unpow2N/A

        \[\leadsto \color{blue}{\left(x \cdot x\right)} \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \]
      2. associate-*l*N/A

        \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right)\right)} \]
      3. lower-*.f64N/A

        \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right)\right)} \]
      4. +-commutativeN/A

        \[\leadsto x \cdot \left(x \cdot \color{blue}{\left({x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right) + 1\right)}\right) \]
      5. distribute-lft-inN/A

        \[\leadsto x \cdot \color{blue}{\left(x \cdot \left({x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) + x \cdot 1\right)} \]
      6. associate-*r*N/A

        \[\leadsto x \cdot \left(\color{blue}{\left(x \cdot {x}^{2}\right) \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)} + x \cdot 1\right) \]
      7. *-commutativeN/A

        \[\leadsto x \cdot \left(\color{blue}{\left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right) \cdot \left(x \cdot {x}^{2}\right)} + x \cdot 1\right) \]
      8. *-rgt-identityN/A

        \[\leadsto x \cdot \left(\left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right) \cdot \left(x \cdot {x}^{2}\right) + \color{blue}{x}\right) \]
      9. lower-fma.f64N/A

        \[\leadsto x \cdot \color{blue}{\mathsf{fma}\left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right), x \cdot {x}^{2}, x\right)} \]
    5. Applied rewrites98.7%

      \[\leadsto \color{blue}{x \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), 0.08333333333333333\right), x \cdot \left(x \cdot x\right), x\right)} \]
    6. Add Preprocessing

    Alternative 3: 99.1% accurate, 5.5× speedup?

    \[\begin{array}{l} \\ \mathsf{fma}\left(x, x, \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \mathsf{fma}\left(x, x \cdot 0.002777777777777778, 0.08333333333333333\right)\right) \end{array} \]
    (FPCore (x)
     :precision binary64
     (fma
      x
      x
      (*
       (* (* x x) (* x x))
       (fma x (* x 0.002777777777777778) 0.08333333333333333))))
    double code(double x) {
    	return fma(x, x, (((x * x) * (x * x)) * fma(x, (x * 0.002777777777777778), 0.08333333333333333)));
    }
    
    function code(x)
    	return fma(x, x, Float64(Float64(Float64(x * x) * Float64(x * x)) * fma(x, Float64(x * 0.002777777777777778), 0.08333333333333333)))
    end
    
    code[x_] := N[(x * x + N[(N[(N[(x * x), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(x * N[(x * 0.002777777777777778), $MachinePrecision] + 0.08333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \mathsf{fma}\left(x, x, \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \mathsf{fma}\left(x, x \cdot 0.002777777777777778, 0.08333333333333333\right)\right)
    \end{array}
    
    Derivation
    1. Initial program 53.8%

      \[\left(e^{x} - 2\right) + e^{-x} \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{{x}^{2} \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)\right)} \]
    4. Step-by-step derivation
      1. unpow2N/A

        \[\leadsto \color{blue}{\left(x \cdot x\right)} \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)\right) \]
      2. associate-*l*N/A

        \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)\right)\right)} \]
      3. *-commutativeN/A

        \[\leadsto x \cdot \color{blue}{\left(\left(1 + {x}^{2} \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)\right) \cdot x\right)} \]
      4. lower-*.f64N/A

        \[\leadsto \color{blue}{x \cdot \left(\left(1 + {x}^{2} \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)\right) \cdot x\right)} \]
      5. +-commutativeN/A

        \[\leadsto x \cdot \left(\color{blue}{\left({x}^{2} \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right) + 1\right)} \cdot x\right) \]
      6. distribute-lft1-inN/A

        \[\leadsto x \cdot \color{blue}{\left(\left({x}^{2} \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)\right) \cdot x + x\right)} \]
      7. associate-*l*N/A

        \[\leadsto x \cdot \left(\color{blue}{{x}^{2} \cdot \left(\left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right) \cdot x\right)} + x\right) \]
      8. lower-fma.f64N/A

        \[\leadsto x \cdot \color{blue}{\mathsf{fma}\left({x}^{2}, \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right) \cdot x, x\right)} \]
      9. unpow2N/A

        \[\leadsto x \cdot \mathsf{fma}\left(\color{blue}{x \cdot x}, \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right) \cdot x, x\right) \]
      10. lower-*.f64N/A

        \[\leadsto x \cdot \mathsf{fma}\left(\color{blue}{x \cdot x}, \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right) \cdot x, x\right) \]
      11. *-commutativeN/A

        \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, \color{blue}{x \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)}, x\right) \]
      12. lower-*.f64N/A

        \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, \color{blue}{x \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)}, x\right) \]
      13. +-commutativeN/A

        \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \color{blue}{\left(\frac{1}{360} \cdot {x}^{2} + \frac{1}{12}\right)}, x\right) \]
      14. *-commutativeN/A

        \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \left(\color{blue}{{x}^{2} \cdot \frac{1}{360}} + \frac{1}{12}\right), x\right) \]
      15. lower-fma.f64N/A

        \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \color{blue}{\mathsf{fma}\left({x}^{2}, \frac{1}{360}, \frac{1}{12}\right)}, x\right) \]
      16. unpow2N/A

        \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{1}{360}, \frac{1}{12}\right), x\right) \]
      17. lower-*.f6498.6

        \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \mathsf{fma}\left(\color{blue}{x \cdot x}, 0.002777777777777778, 0.08333333333333333\right), x\right) \]
    5. Applied rewrites98.6%

      \[\leadsto \color{blue}{x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \mathsf{fma}\left(x \cdot x, 0.002777777777777778, 0.08333333333333333\right), x\right)} \]
    6. Step-by-step derivation
      1. Applied rewrites98.6%

        \[\leadsto x \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot \mathsf{fma}\left(x, x \cdot 0.002777777777777778, 0.08333333333333333\right)\right) + \color{blue}{x}\right) \]
      2. Step-by-step derivation
        1. Applied rewrites98.6%

          \[\leadsto \mathsf{fma}\left(x, \color{blue}{x}, \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \mathsf{fma}\left(x, x \cdot 0.002777777777777778, 0.08333333333333333\right)\right) \]
        2. Add Preprocessing

        Alternative 4: 99.1% accurate, 5.5× speedup?

        \[\begin{array}{l} \\ \mathsf{fma}\left(x, x, \left(x \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot \mathsf{fma}\left(x, x \cdot 0.002777777777777778, 0.08333333333333333\right)\right)\right) \end{array} \]
        (FPCore (x)
         :precision binary64
         (fma
          x
          x
          (*
           (* x (* x x))
           (* x (fma x (* x 0.002777777777777778) 0.08333333333333333)))))
        double code(double x) {
        	return fma(x, x, ((x * (x * x)) * (x * fma(x, (x * 0.002777777777777778), 0.08333333333333333))));
        }
        
        function code(x)
        	return fma(x, x, Float64(Float64(x * Float64(x * x)) * Float64(x * fma(x, Float64(x * 0.002777777777777778), 0.08333333333333333))))
        end
        
        code[x_] := N[(x * x + N[(N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(x * N[(x * N[(x * 0.002777777777777778), $MachinePrecision] + 0.08333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
        
        \begin{array}{l}
        
        \\
        \mathsf{fma}\left(x, x, \left(x \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot \mathsf{fma}\left(x, x \cdot 0.002777777777777778, 0.08333333333333333\right)\right)\right)
        \end{array}
        
        Derivation
        1. Initial program 53.8%

          \[\left(e^{x} - 2\right) + e^{-x} \]
        2. Add Preprocessing
        3. Taylor expanded in x around 0

          \[\leadsto \color{blue}{{x}^{2} \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)\right)} \]
        4. Step-by-step derivation
          1. unpow2N/A

            \[\leadsto \color{blue}{\left(x \cdot x\right)} \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)\right) \]
          2. associate-*l*N/A

            \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)\right)\right)} \]
          3. *-commutativeN/A

            \[\leadsto x \cdot \color{blue}{\left(\left(1 + {x}^{2} \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)\right) \cdot x\right)} \]
          4. lower-*.f64N/A

            \[\leadsto \color{blue}{x \cdot \left(\left(1 + {x}^{2} \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)\right) \cdot x\right)} \]
          5. +-commutativeN/A

            \[\leadsto x \cdot \left(\color{blue}{\left({x}^{2} \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right) + 1\right)} \cdot x\right) \]
          6. distribute-lft1-inN/A

            \[\leadsto x \cdot \color{blue}{\left(\left({x}^{2} \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)\right) \cdot x + x\right)} \]
          7. associate-*l*N/A

            \[\leadsto x \cdot \left(\color{blue}{{x}^{2} \cdot \left(\left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right) \cdot x\right)} + x\right) \]
          8. lower-fma.f64N/A

            \[\leadsto x \cdot \color{blue}{\mathsf{fma}\left({x}^{2}, \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right) \cdot x, x\right)} \]
          9. unpow2N/A

            \[\leadsto x \cdot \mathsf{fma}\left(\color{blue}{x \cdot x}, \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right) \cdot x, x\right) \]
          10. lower-*.f64N/A

            \[\leadsto x \cdot \mathsf{fma}\left(\color{blue}{x \cdot x}, \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right) \cdot x, x\right) \]
          11. *-commutativeN/A

            \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, \color{blue}{x \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)}, x\right) \]
          12. lower-*.f64N/A

            \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, \color{blue}{x \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)}, x\right) \]
          13. +-commutativeN/A

            \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \color{blue}{\left(\frac{1}{360} \cdot {x}^{2} + \frac{1}{12}\right)}, x\right) \]
          14. *-commutativeN/A

            \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \left(\color{blue}{{x}^{2} \cdot \frac{1}{360}} + \frac{1}{12}\right), x\right) \]
          15. lower-fma.f64N/A

            \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \color{blue}{\mathsf{fma}\left({x}^{2}, \frac{1}{360}, \frac{1}{12}\right)}, x\right) \]
          16. unpow2N/A

            \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{1}{360}, \frac{1}{12}\right), x\right) \]
          17. lower-*.f6498.6

            \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \mathsf{fma}\left(\color{blue}{x \cdot x}, 0.002777777777777778, 0.08333333333333333\right), x\right) \]
        5. Applied rewrites98.6%

          \[\leadsto \color{blue}{x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \mathsf{fma}\left(x \cdot x, 0.002777777777777778, 0.08333333333333333\right), x\right)} \]
        6. Step-by-step derivation
          1. Applied rewrites98.6%

            \[\leadsto \mathsf{fma}\left(x, \color{blue}{x}, \left(x \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot \mathsf{fma}\left(x, x \cdot 0.002777777777777778, 0.08333333333333333\right)\right)\right) \]
          2. Add Preprocessing

          Alternative 5: 99.1% accurate, 6.3× speedup?

          \[\begin{array}{l} \\ x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \mathsf{fma}\left(x \cdot x, 0.002777777777777778, 0.08333333333333333\right), x\right) \end{array} \]
          (FPCore (x)
           :precision binary64
           (*
            x
            (fma
             (* x x)
             (* x (fma (* x x) 0.002777777777777778 0.08333333333333333))
             x)))
          double code(double x) {
          	return x * fma((x * x), (x * fma((x * x), 0.002777777777777778, 0.08333333333333333)), x);
          }
          
          function code(x)
          	return Float64(x * fma(Float64(x * x), Float64(x * fma(Float64(x * x), 0.002777777777777778, 0.08333333333333333)), x))
          end
          
          code[x_] := N[(x * N[(N[(x * x), $MachinePrecision] * N[(x * N[(N[(x * x), $MachinePrecision] * 0.002777777777777778 + 0.08333333333333333), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]), $MachinePrecision]
          
          \begin{array}{l}
          
          \\
          x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \mathsf{fma}\left(x \cdot x, 0.002777777777777778, 0.08333333333333333\right), x\right)
          \end{array}
          
          Derivation
          1. Initial program 53.8%

            \[\left(e^{x} - 2\right) + e^{-x} \]
          2. Add Preprocessing
          3. Taylor expanded in x around 0

            \[\leadsto \color{blue}{{x}^{2} \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)\right)} \]
          4. Step-by-step derivation
            1. unpow2N/A

              \[\leadsto \color{blue}{\left(x \cdot x\right)} \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)\right) \]
            2. associate-*l*N/A

              \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)\right)\right)} \]
            3. *-commutativeN/A

              \[\leadsto x \cdot \color{blue}{\left(\left(1 + {x}^{2} \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)\right) \cdot x\right)} \]
            4. lower-*.f64N/A

              \[\leadsto \color{blue}{x \cdot \left(\left(1 + {x}^{2} \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)\right) \cdot x\right)} \]
            5. +-commutativeN/A

              \[\leadsto x \cdot \left(\color{blue}{\left({x}^{2} \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right) + 1\right)} \cdot x\right) \]
            6. distribute-lft1-inN/A

              \[\leadsto x \cdot \color{blue}{\left(\left({x}^{2} \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)\right) \cdot x + x\right)} \]
            7. associate-*l*N/A

              \[\leadsto x \cdot \left(\color{blue}{{x}^{2} \cdot \left(\left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right) \cdot x\right)} + x\right) \]
            8. lower-fma.f64N/A

              \[\leadsto x \cdot \color{blue}{\mathsf{fma}\left({x}^{2}, \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right) \cdot x, x\right)} \]
            9. unpow2N/A

              \[\leadsto x \cdot \mathsf{fma}\left(\color{blue}{x \cdot x}, \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right) \cdot x, x\right) \]
            10. lower-*.f64N/A

              \[\leadsto x \cdot \mathsf{fma}\left(\color{blue}{x \cdot x}, \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right) \cdot x, x\right) \]
            11. *-commutativeN/A

              \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, \color{blue}{x \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)}, x\right) \]
            12. lower-*.f64N/A

              \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, \color{blue}{x \cdot \left(\frac{1}{12} + \frac{1}{360} \cdot {x}^{2}\right)}, x\right) \]
            13. +-commutativeN/A

              \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \color{blue}{\left(\frac{1}{360} \cdot {x}^{2} + \frac{1}{12}\right)}, x\right) \]
            14. *-commutativeN/A

              \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \left(\color{blue}{{x}^{2} \cdot \frac{1}{360}} + \frac{1}{12}\right), x\right) \]
            15. lower-fma.f64N/A

              \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \color{blue}{\mathsf{fma}\left({x}^{2}, \frac{1}{360}, \frac{1}{12}\right)}, x\right) \]
            16. unpow2N/A

              \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{1}{360}, \frac{1}{12}\right), x\right) \]
            17. lower-*.f6498.6

              \[\leadsto x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \mathsf{fma}\left(\color{blue}{x \cdot x}, 0.002777777777777778, 0.08333333333333333\right), x\right) \]
          5. Applied rewrites98.6%

            \[\leadsto \color{blue}{x \cdot \mathsf{fma}\left(x \cdot x, x \cdot \mathsf{fma}\left(x \cdot x, 0.002777777777777778, 0.08333333333333333\right), x\right)} \]
          6. Add Preprocessing

          Alternative 6: 98.9% accurate, 9.5× speedup?

          \[\begin{array}{l} \\ x \cdot \left(x \cdot \mathsf{fma}\left(x, x \cdot 0.08333333333333333, 1\right)\right) \end{array} \]
          (FPCore (x)
           :precision binary64
           (* x (* x (fma x (* x 0.08333333333333333) 1.0))))
          double code(double x) {
          	return x * (x * fma(x, (x * 0.08333333333333333), 1.0));
          }
          
          function code(x)
          	return Float64(x * Float64(x * fma(x, Float64(x * 0.08333333333333333), 1.0)))
          end
          
          code[x_] := N[(x * N[(x * N[(x * N[(x * 0.08333333333333333), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
          
          \begin{array}{l}
          
          \\
          x \cdot \left(x \cdot \mathsf{fma}\left(x, x \cdot 0.08333333333333333, 1\right)\right)
          \end{array}
          
          Derivation
          1. Initial program 53.8%

            \[\left(e^{x} - 2\right) + e^{-x} \]
          2. Add Preprocessing
          3. Step-by-step derivation
            1. lift-exp.f64N/A

              \[\leadsto \left(e^{x} - 2\right) + \color{blue}{e^{\mathsf{neg}\left(x\right)}} \]
            2. lift-neg.f64N/A

              \[\leadsto \left(e^{x} - 2\right) + e^{\color{blue}{\mathsf{neg}\left(x\right)}} \]
            3. exp-negN/A

              \[\leadsto \left(e^{x} - 2\right) + \color{blue}{\frac{1}{e^{x}}} \]
            4. lift-exp.f64N/A

              \[\leadsto \left(e^{x} - 2\right) + \frac{1}{\color{blue}{e^{x}}} \]
            5. lower-/.f6453.9

              \[\leadsto \left(e^{x} - 2\right) + \color{blue}{\frac{1}{e^{x}}} \]
          4. Applied rewrites53.9%

            \[\leadsto \left(e^{x} - 2\right) + \color{blue}{\frac{1}{e^{x}}} \]
          5. Taylor expanded in x around 0

            \[\leadsto \color{blue}{{x}^{2} \cdot \left(1 + \frac{1}{12} \cdot {x}^{2}\right)} \]
          6. Step-by-step derivation
            1. unpow2N/A

              \[\leadsto \color{blue}{\left(x \cdot x\right)} \cdot \left(1 + \frac{1}{12} \cdot {x}^{2}\right) \]
            2. associate-*l*N/A

              \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(1 + \frac{1}{12} \cdot {x}^{2}\right)\right)} \]
            3. lower-*.f64N/A

              \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(1 + \frac{1}{12} \cdot {x}^{2}\right)\right)} \]
            4. +-commutativeN/A

              \[\leadsto x \cdot \left(x \cdot \color{blue}{\left(\frac{1}{12} \cdot {x}^{2} + 1\right)}\right) \]
            5. distribute-lft-inN/A

              \[\leadsto x \cdot \color{blue}{\left(x \cdot \left(\frac{1}{12} \cdot {x}^{2}\right) + x \cdot 1\right)} \]
            6. *-rgt-identityN/A

              \[\leadsto x \cdot \left(x \cdot \left(\frac{1}{12} \cdot {x}^{2}\right) + \color{blue}{x}\right) \]
            7. lower-fma.f64N/A

              \[\leadsto x \cdot \color{blue}{\mathsf{fma}\left(x, \frac{1}{12} \cdot {x}^{2}, x\right)} \]
            8. *-commutativeN/A

              \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{{x}^{2} \cdot \frac{1}{12}}, x\right) \]
            9. lower-*.f64N/A

              \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{{x}^{2} \cdot \frac{1}{12}}, x\right) \]
            10. unpow2N/A

              \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{\left(x \cdot x\right)} \cdot \frac{1}{12}, x\right) \]
            11. lower-*.f6498.4

              \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{\left(x \cdot x\right)} \cdot 0.08333333333333333, x\right) \]
          7. Applied rewrites98.4%

            \[\leadsto \color{blue}{x \cdot \mathsf{fma}\left(x, \left(x \cdot x\right) \cdot 0.08333333333333333, x\right)} \]
          8. Step-by-step derivation
            1. Applied rewrites98.4%

              \[\leadsto x \cdot \left(\mathsf{fma}\left(x, x \cdot 0.08333333333333333, 1\right) \cdot \color{blue}{x}\right) \]
            2. Final simplification98.4%

              \[\leadsto x \cdot \left(x \cdot \mathsf{fma}\left(x, x \cdot 0.08333333333333333, 1\right)\right) \]
            3. Add Preprocessing

            Alternative 7: 98.9% accurate, 9.5× speedup?

            \[\begin{array}{l} \\ x \cdot \mathsf{fma}\left(x, \left(x \cdot x\right) \cdot 0.08333333333333333, x\right) \end{array} \]
            (FPCore (x)
             :precision binary64
             (* x (fma x (* (* x x) 0.08333333333333333) x)))
            double code(double x) {
            	return x * fma(x, ((x * x) * 0.08333333333333333), x);
            }
            
            function code(x)
            	return Float64(x * fma(x, Float64(Float64(x * x) * 0.08333333333333333), x))
            end
            
            code[x_] := N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * 0.08333333333333333), $MachinePrecision] + x), $MachinePrecision]), $MachinePrecision]
            
            \begin{array}{l}
            
            \\
            x \cdot \mathsf{fma}\left(x, \left(x \cdot x\right) \cdot 0.08333333333333333, x\right)
            \end{array}
            
            Derivation
            1. Initial program 53.8%

              \[\left(e^{x} - 2\right) + e^{-x} \]
            2. Add Preprocessing
            3. Taylor expanded in x around 0

              \[\leadsto \color{blue}{{x}^{2} \cdot \left(1 + \frac{1}{12} \cdot {x}^{2}\right)} \]
            4. Step-by-step derivation
              1. unpow2N/A

                \[\leadsto \color{blue}{\left(x \cdot x\right)} \cdot \left(1 + \frac{1}{12} \cdot {x}^{2}\right) \]
              2. associate-*l*N/A

                \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(1 + \frac{1}{12} \cdot {x}^{2}\right)\right)} \]
              3. lower-*.f64N/A

                \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(1 + \frac{1}{12} \cdot {x}^{2}\right)\right)} \]
              4. +-commutativeN/A

                \[\leadsto x \cdot \left(x \cdot \color{blue}{\left(\frac{1}{12} \cdot {x}^{2} + 1\right)}\right) \]
              5. distribute-lft-inN/A

                \[\leadsto x \cdot \color{blue}{\left(x \cdot \left(\frac{1}{12} \cdot {x}^{2}\right) + x \cdot 1\right)} \]
              6. *-rgt-identityN/A

                \[\leadsto x \cdot \left(x \cdot \left(\frac{1}{12} \cdot {x}^{2}\right) + \color{blue}{x}\right) \]
              7. lower-fma.f64N/A

                \[\leadsto x \cdot \color{blue}{\mathsf{fma}\left(x, \frac{1}{12} \cdot {x}^{2}, x\right)} \]
              8. *-commutativeN/A

                \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{{x}^{2} \cdot \frac{1}{12}}, x\right) \]
              9. lower-*.f64N/A

                \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{{x}^{2} \cdot \frac{1}{12}}, x\right) \]
              10. unpow2N/A

                \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{\left(x \cdot x\right)} \cdot \frac{1}{12}, x\right) \]
              11. lower-*.f6498.4

                \[\leadsto x \cdot \mathsf{fma}\left(x, \color{blue}{\left(x \cdot x\right)} \cdot 0.08333333333333333, x\right) \]
            5. Applied rewrites98.4%

              \[\leadsto \color{blue}{x \cdot \mathsf{fma}\left(x, \left(x \cdot x\right) \cdot 0.08333333333333333, x\right)} \]
            6. Add Preprocessing

            Alternative 8: 98.4% accurate, 34.8× speedup?

            \[\begin{array}{l} \\ x \cdot x \end{array} \]
            (FPCore (x) :precision binary64 (* x x))
            double code(double x) {
            	return x * x;
            }
            
            real(8) function code(x)
                real(8), intent (in) :: x
                code = x * x
            end function
            
            public static double code(double x) {
            	return x * x;
            }
            
            def code(x):
            	return x * x
            
            function code(x)
            	return Float64(x * x)
            end
            
            function tmp = code(x)
            	tmp = x * x;
            end
            
            code[x_] := N[(x * x), $MachinePrecision]
            
            \begin{array}{l}
            
            \\
            x \cdot x
            \end{array}
            
            Derivation
            1. Initial program 53.8%

              \[\left(e^{x} - 2\right) + e^{-x} \]
            2. Add Preprocessing
            3. Taylor expanded in x around 0

              \[\leadsto \color{blue}{{x}^{2}} \]
            4. Step-by-step derivation
              1. unpow2N/A

                \[\leadsto \color{blue}{x \cdot x} \]
              2. lower-*.f6497.7

                \[\leadsto \color{blue}{x \cdot x} \]
            5. Applied rewrites97.7%

              \[\leadsto \color{blue}{x \cdot x} \]
            6. Add Preprocessing

            Developer Target 1: 99.9% accurate, 0.9× speedup?

            \[\begin{array}{l} \\ \begin{array}{l} t_0 := \sinh \left(\frac{x}{2}\right)\\ 4 \cdot \left(t\_0 \cdot t\_0\right) \end{array} \end{array} \]
            (FPCore (x)
             :precision binary64
             (let* ((t_0 (sinh (/ x 2.0)))) (* 4.0 (* t_0 t_0))))
            double code(double x) {
            	double t_0 = sinh((x / 2.0));
            	return 4.0 * (t_0 * t_0);
            }
            
            real(8) function code(x)
                real(8), intent (in) :: x
                real(8) :: t_0
                t_0 = sinh((x / 2.0d0))
                code = 4.0d0 * (t_0 * t_0)
            end function
            
            public static double code(double x) {
            	double t_0 = Math.sinh((x / 2.0));
            	return 4.0 * (t_0 * t_0);
            }
            
            def code(x):
            	t_0 = math.sinh((x / 2.0))
            	return 4.0 * (t_0 * t_0)
            
            function code(x)
            	t_0 = sinh(Float64(x / 2.0))
            	return Float64(4.0 * Float64(t_0 * t_0))
            end
            
            function tmp = code(x)
            	t_0 = sinh((x / 2.0));
            	tmp = 4.0 * (t_0 * t_0);
            end
            
            code[x_] := Block[{t$95$0 = N[Sinh[N[(x / 2.0), $MachinePrecision]], $MachinePrecision]}, N[(4.0 * N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision]]
            
            \begin{array}{l}
            
            \\
            \begin{array}{l}
            t_0 := \sinh \left(\frac{x}{2}\right)\\
            4 \cdot \left(t\_0 \cdot t\_0\right)
            \end{array}
            \end{array}
            

            Reproduce

            ?
            herbie shell --seed 2024235 
            (FPCore (x)
              :name "exp2 (problem 3.3.7)"
              :precision binary64
              :pre (<= (fabs x) 710.0)
            
              :alt
              (! :herbie-platform default (* 4 (* (sinh (/ x 2)) (sinh (/ x 2)))))
            
              (+ (- (exp x) 2.0) (exp (- x))))