exp2 (problem 3.3.7)

Percentage Accurate: 53.8% → 99.1%
Time: 6.4s
Alternatives: 8
Speedup: 34.8×

Specification

?
\[\left|x\right| \leq 710\]
\[\begin{array}{l} \\ \left(e^{x} - 2\right) + e^{-x} \end{array} \]
(FPCore (x) :precision binary64 (+ (- (exp x) 2.0) (exp (- x))))
double code(double x) {
	return (exp(x) - 2.0) + exp(-x);
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(x)
use fmin_fmax_functions
    real(8), intent (in) :: x
    code = (exp(x) - 2.0d0) + exp(-x)
end function
public static double code(double x) {
	return (Math.exp(x) - 2.0) + Math.exp(-x);
}
def code(x):
	return (math.exp(x) - 2.0) + math.exp(-x)
function code(x)
	return Float64(Float64(exp(x) - 2.0) + exp(Float64(-x)))
end
function tmp = code(x)
	tmp = (exp(x) - 2.0) + exp(-x);
end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(e^{x} - 2\right) + e^{-x}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 8 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 53.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(e^{x} - 2\right) + e^{-x} \end{array} \]
(FPCore (x) :precision binary64 (+ (- (exp x) 2.0) (exp (- x))))
double code(double x) {
	return (exp(x) - 2.0) + exp(-x);
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(x)
use fmin_fmax_functions
    real(8), intent (in) :: x
    code = (exp(x) - 2.0d0) + exp(-x)
end function
public static double code(double x) {
	return (Math.exp(x) - 2.0) + Math.exp(-x);
}
def code(x):
	return (math.exp(x) - 2.0) + math.exp(-x)
function code(x)
	return Float64(Float64(exp(x) - 2.0) + exp(Float64(-x)))
end
function tmp = code(x)
	tmp = (exp(x) - 2.0) + exp(-x);
end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(e^{x} - 2\right) + e^{-x}
\end{array}

Alternative 1: 99.1% accurate, 4.8× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(4.96031746031746 \cdot 10^{-5}, x \cdot x, 0.002777777777777778\right), x \cdot x, 0.08333333333333333\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right) \end{array} \]
(FPCore (x)
 :precision binary64
 (*
  (fma
   (fma
    (fma 4.96031746031746e-5 (* x x) 0.002777777777777778)
    (* x x)
    0.08333333333333333)
   (* x x)
   1.0)
  (* x x)))
double code(double x) {
	return fma(fma(fma(4.96031746031746e-5, (x * x), 0.002777777777777778), (x * x), 0.08333333333333333), (x * x), 1.0) * (x * x);
}
function code(x)
	return Float64(fma(fma(fma(4.96031746031746e-5, Float64(x * x), 0.002777777777777778), Float64(x * x), 0.08333333333333333), Float64(x * x), 1.0) * Float64(x * x))
end
code[x_] := N[(N[(N[(N[(4.96031746031746e-5 * N[(x * x), $MachinePrecision] + 0.002777777777777778), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.08333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(4.96031746031746 \cdot 10^{-5}, x \cdot x, 0.002777777777777778\right), x \cdot x, 0.08333333333333333\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)
\end{array}
Derivation
  1. Initial program 52.6%

    \[\left(e^{x} - 2\right) + e^{-x} \]
  2. Add Preprocessing
  3. Taylor expanded in x around 0

    \[\leadsto \color{blue}{{x}^{2} \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right)} \]
  4. Step-by-step derivation
    1. *-commutativeN/A

      \[\leadsto \color{blue}{\left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \cdot {x}^{2}} \]
    2. unpow2N/A

      \[\leadsto \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \cdot \color{blue}{\left(x \cdot x\right)} \]
    3. associate-*r*N/A

      \[\leadsto \color{blue}{\left(\left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \cdot x\right) \cdot x} \]
    4. lower-*.f64N/A

      \[\leadsto \color{blue}{\left(\left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \cdot x\right) \cdot x} \]
  5. Applied rewrites99.3%

    \[\leadsto \color{blue}{\mathsf{fma}\left({x}^{3}, \mathsf{fma}\left(\mathsf{fma}\left(4.96031746031746 \cdot 10^{-5}, x \cdot x, 0.002777777777777778\right), x \cdot x, 0.08333333333333333\right), x\right) \cdot x} \]
  6. Step-by-step derivation
    1. Applied rewrites99.3%

      \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), x \cdot x, 0.08333333333333333\right) \cdot \left(x \cdot x\right), x, x\right) \cdot x \]
    2. Step-by-step derivation
      1. Applied rewrites99.3%

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(4.96031746031746 \cdot 10^{-5}, x \cdot x, 0.002777777777777778\right), x \cdot x, 0.08333333333333333\right), x \cdot x, 1\right) \cdot \color{blue}{\left(x \cdot x\right)} \]
      2. Add Preprocessing

      Alternative 2: 99.1% accurate, 4.8× speedup?

      \[\begin{array}{l} \\ \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), x \cdot x, 0.08333333333333333\right) \cdot \left(x \cdot x\right), x, x\right) \cdot x \end{array} \]
      (FPCore (x)
       :precision binary64
       (*
        (fma
         (*
          (fma
           (fma (* x x) 4.96031746031746e-5 0.002777777777777778)
           (* x x)
           0.08333333333333333)
          (* x x))
         x
         x)
        x))
      double code(double x) {
      	return fma((fma(fma((x * x), 4.96031746031746e-5, 0.002777777777777778), (x * x), 0.08333333333333333) * (x * x)), x, x) * x;
      }
      
      function code(x)
      	return Float64(fma(Float64(fma(fma(Float64(x * x), 4.96031746031746e-5, 0.002777777777777778), Float64(x * x), 0.08333333333333333) * Float64(x * x)), x, x) * x)
      end
      
      code[x_] := N[(N[(N[(N[(N[(N[(x * x), $MachinePrecision] * 4.96031746031746e-5 + 0.002777777777777778), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.08333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] * x + x), $MachinePrecision] * x), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), x \cdot x, 0.08333333333333333\right) \cdot \left(x \cdot x\right), x, x\right) \cdot x
      \end{array}
      
      Derivation
      1. Initial program 52.6%

        \[\left(e^{x} - 2\right) + e^{-x} \]
      2. Add Preprocessing
      3. Taylor expanded in x around 0

        \[\leadsto \color{blue}{{x}^{2} \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right)} \]
      4. Step-by-step derivation
        1. *-commutativeN/A

          \[\leadsto \color{blue}{\left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \cdot {x}^{2}} \]
        2. unpow2N/A

          \[\leadsto \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \cdot \color{blue}{\left(x \cdot x\right)} \]
        3. associate-*r*N/A

          \[\leadsto \color{blue}{\left(\left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \cdot x\right) \cdot x} \]
        4. lower-*.f64N/A

          \[\leadsto \color{blue}{\left(\left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \cdot x\right) \cdot x} \]
      5. Applied rewrites99.3%

        \[\leadsto \color{blue}{\mathsf{fma}\left({x}^{3}, \mathsf{fma}\left(\mathsf{fma}\left(4.96031746031746 \cdot 10^{-5}, x \cdot x, 0.002777777777777778\right), x \cdot x, 0.08333333333333333\right), x\right) \cdot x} \]
      6. Step-by-step derivation
        1. Applied rewrites99.3%

          \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), x \cdot x, 0.08333333333333333\right) \cdot \left(x \cdot x\right), x, x\right) \cdot x \]
        2. Add Preprocessing

        Alternative 3: 99.0% accurate, 6.3× speedup?

        \[\begin{array}{l} \\ \mathsf{fma}\left(\mathsf{fma}\left(0.002777777777777778, x \cdot x, 0.08333333333333333\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right) \end{array} \]
        (FPCore (x)
         :precision binary64
         (*
          (fma (fma 0.002777777777777778 (* x x) 0.08333333333333333) (* x x) 1.0)
          (* x x)))
        double code(double x) {
        	return fma(fma(0.002777777777777778, (x * x), 0.08333333333333333), (x * x), 1.0) * (x * x);
        }
        
        function code(x)
        	return Float64(fma(fma(0.002777777777777778, Float64(x * x), 0.08333333333333333), Float64(x * x), 1.0) * Float64(x * x))
        end
        
        code[x_] := N[(N[(N[(0.002777777777777778 * N[(x * x), $MachinePrecision] + 0.08333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]
        
        \begin{array}{l}
        
        \\
        \mathsf{fma}\left(\mathsf{fma}\left(0.002777777777777778, x \cdot x, 0.08333333333333333\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)
        \end{array}
        
        Derivation
        1. Initial program 52.6%

          \[\left(e^{x} - 2\right) + e^{-x} \]
        2. Add Preprocessing
        3. Taylor expanded in x around 0

          \[\leadsto \color{blue}{{x}^{2} \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right)} \]
        4. Step-by-step derivation
          1. *-commutativeN/A

            \[\leadsto \color{blue}{\left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \cdot {x}^{2}} \]
          2. unpow2N/A

            \[\leadsto \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \cdot \color{blue}{\left(x \cdot x\right)} \]
          3. associate-*r*N/A

            \[\leadsto \color{blue}{\left(\left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \cdot x\right) \cdot x} \]
          4. lower-*.f64N/A

            \[\leadsto \color{blue}{\left(\left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \cdot x\right) \cdot x} \]
        5. Applied rewrites99.3%

          \[\leadsto \color{blue}{\mathsf{fma}\left({x}^{3}, \mathsf{fma}\left(\mathsf{fma}\left(4.96031746031746 \cdot 10^{-5}, x \cdot x, 0.002777777777777778\right), x \cdot x, 0.08333333333333333\right), x\right) \cdot x} \]
        6. Step-by-step derivation
          1. Applied rewrites99.3%

            \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), x \cdot x, 0.08333333333333333\right) \cdot \left(x \cdot x\right), x, x\right) \cdot x \]
          2. Taylor expanded in x around 0

            \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{360}, x \cdot x, \frac{1}{12}\right) \cdot \left(x \cdot x\right), x, x\right) \cdot x \]
          3. Step-by-step derivation
            1. Applied rewrites99.2%

              \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(0.002777777777777778, x \cdot x, 0.08333333333333333\right) \cdot \left(x \cdot x\right), x, x\right) \cdot x \]
            2. Step-by-step derivation
              1. Applied rewrites99.2%

                \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(0.002777777777777778, x \cdot x, 0.08333333333333333\right), x \cdot x, 1\right) \cdot \color{blue}{\left(x \cdot x\right)} \]
              2. Add Preprocessing

              Alternative 4: 99.0% accurate, 6.3× speedup?

              \[\begin{array}{l} \\ \mathsf{fma}\left(\mathsf{fma}\left(0.002777777777777778, x \cdot x, 0.08333333333333333\right) \cdot \left(x \cdot x\right), x, x\right) \cdot x \end{array} \]
              (FPCore (x)
               :precision binary64
               (*
                (fma (* (fma 0.002777777777777778 (* x x) 0.08333333333333333) (* x x)) x x)
                x))
              double code(double x) {
              	return fma((fma(0.002777777777777778, (x * x), 0.08333333333333333) * (x * x)), x, x) * x;
              }
              
              function code(x)
              	return Float64(fma(Float64(fma(0.002777777777777778, Float64(x * x), 0.08333333333333333) * Float64(x * x)), x, x) * x)
              end
              
              code[x_] := N[(N[(N[(N[(0.002777777777777778 * N[(x * x), $MachinePrecision] + 0.08333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] * x + x), $MachinePrecision] * x), $MachinePrecision]
              
              \begin{array}{l}
              
              \\
              \mathsf{fma}\left(\mathsf{fma}\left(0.002777777777777778, x \cdot x, 0.08333333333333333\right) \cdot \left(x \cdot x\right), x, x\right) \cdot x
              \end{array}
              
              Derivation
              1. Initial program 52.6%

                \[\left(e^{x} - 2\right) + e^{-x} \]
              2. Add Preprocessing
              3. Taylor expanded in x around 0

                \[\leadsto \color{blue}{{x}^{2} \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right)} \]
              4. Step-by-step derivation
                1. *-commutativeN/A

                  \[\leadsto \color{blue}{\left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \cdot {x}^{2}} \]
                2. unpow2N/A

                  \[\leadsto \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \cdot \color{blue}{\left(x \cdot x\right)} \]
                3. associate-*r*N/A

                  \[\leadsto \color{blue}{\left(\left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \cdot x\right) \cdot x} \]
                4. lower-*.f64N/A

                  \[\leadsto \color{blue}{\left(\left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \cdot x\right) \cdot x} \]
              5. Applied rewrites99.3%

                \[\leadsto \color{blue}{\mathsf{fma}\left({x}^{3}, \mathsf{fma}\left(\mathsf{fma}\left(4.96031746031746 \cdot 10^{-5}, x \cdot x, 0.002777777777777778\right), x \cdot x, 0.08333333333333333\right), x\right) \cdot x} \]
              6. Step-by-step derivation
                1. Applied rewrites99.3%

                  \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), x \cdot x, 0.08333333333333333\right) \cdot \left(x \cdot x\right), x, x\right) \cdot x \]
                2. Taylor expanded in x around 0

                  \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{360}, x \cdot x, \frac{1}{12}\right) \cdot \left(x \cdot x\right), x, x\right) \cdot x \]
                3. Step-by-step derivation
                  1. Applied rewrites99.2%

                    \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(0.002777777777777778, x \cdot x, 0.08333333333333333\right) \cdot \left(x \cdot x\right), x, x\right) \cdot x \]
                  2. Add Preprocessing

                  Alternative 5: 98.8% accurate, 7.7× speedup?

                  \[\begin{array}{l} \\ \mathsf{fma}\left(x, x, x \cdot \left(x \cdot \left(0.08333333333333333 \cdot \left(x \cdot x\right)\right)\right)\right) \end{array} \]
                  (FPCore (x)
                   :precision binary64
                   (fma x x (* x (* x (* 0.08333333333333333 (* x x))))))
                  double code(double x) {
                  	return fma(x, x, (x * (x * (0.08333333333333333 * (x * x)))));
                  }
                  
                  function code(x)
                  	return fma(x, x, Float64(x * Float64(x * Float64(0.08333333333333333 * Float64(x * x)))))
                  end
                  
                  code[x_] := N[(x * x + N[(x * N[(x * N[(0.08333333333333333 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
                  
                  \begin{array}{l}
                  
                  \\
                  \mathsf{fma}\left(x, x, x \cdot \left(x \cdot \left(0.08333333333333333 \cdot \left(x \cdot x\right)\right)\right)\right)
                  \end{array}
                  
                  Derivation
                  1. Initial program 52.6%

                    \[\left(e^{x} - 2\right) + e^{-x} \]
                  2. Add Preprocessing
                  3. Taylor expanded in x around 0

                    \[\leadsto \color{blue}{{x}^{2} \cdot \left(1 + \frac{1}{12} \cdot {x}^{2}\right)} \]
                  4. Step-by-step derivation
                    1. +-commutativeN/A

                      \[\leadsto {x}^{2} \cdot \color{blue}{\left(\frac{1}{12} \cdot {x}^{2} + 1\right)} \]
                    2. distribute-lft-inN/A

                      \[\leadsto \color{blue}{{x}^{2} \cdot \left(\frac{1}{12} \cdot {x}^{2}\right) + {x}^{2} \cdot 1} \]
                    3. metadata-evalN/A

                      \[\leadsto {x}^{2} \cdot \left(\frac{1}{12} \cdot {x}^{2}\right) + {x}^{2} \cdot \color{blue}{\left(1 + 0\right)} \]
                    4. distribute-lft-inN/A

                      \[\leadsto {x}^{2} \cdot \left(\frac{1}{12} \cdot {x}^{2}\right) + \color{blue}{\left({x}^{2} \cdot 1 + {x}^{2} \cdot 0\right)} \]
                    5. *-rgt-identityN/A

                      \[\leadsto {x}^{2} \cdot \left(\frac{1}{12} \cdot {x}^{2}\right) + \left(\color{blue}{{x}^{2}} + {x}^{2} \cdot 0\right) \]
                    6. mul0-rgtN/A

                      \[\leadsto {x}^{2} \cdot \left(\frac{1}{12} \cdot {x}^{2}\right) + \left({x}^{2} + \color{blue}{0}\right) \]
                    7. *-commutativeN/A

                      \[\leadsto {x}^{2} \cdot \color{blue}{\left({x}^{2} \cdot \frac{1}{12}\right)} + \left({x}^{2} + 0\right) \]
                    8. associate-*r*N/A

                      \[\leadsto \color{blue}{\left({x}^{2} \cdot {x}^{2}\right) \cdot \frac{1}{12}} + \left({x}^{2} + 0\right) \]
                    9. mul0-lftN/A

                      \[\leadsto \left({x}^{2} \cdot {x}^{2}\right) \cdot \frac{1}{12} + \left({x}^{2} + \color{blue}{0 \cdot {x}^{2}}\right) \]
                    10. distribute-rgt1-inN/A

                      \[\leadsto \left({x}^{2} \cdot {x}^{2}\right) \cdot \frac{1}{12} + \color{blue}{\left(0 + 1\right) \cdot {x}^{2}} \]
                    11. metadata-evalN/A

                      \[\leadsto \left({x}^{2} \cdot {x}^{2}\right) \cdot \frac{1}{12} + \color{blue}{1} \cdot {x}^{2} \]
                    12. *-lft-identityN/A

                      \[\leadsto \left({x}^{2} \cdot {x}^{2}\right) \cdot \frac{1}{12} + \color{blue}{{x}^{2}} \]
                    13. lower-fma.f64N/A

                      \[\leadsto \color{blue}{\mathsf{fma}\left({x}^{2} \cdot {x}^{2}, \frac{1}{12}, {x}^{2}\right)} \]
                    14. pow-sqrN/A

                      \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{\left(2 \cdot 2\right)}}, \frac{1}{12}, {x}^{2}\right) \]
                    15. lower-pow.f64N/A

                      \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{\left(2 \cdot 2\right)}}, \frac{1}{12}, {x}^{2}\right) \]
                    16. metadata-evalN/A

                      \[\leadsto \mathsf{fma}\left({x}^{\color{blue}{4}}, \frac{1}{12}, {x}^{2}\right) \]
                    17. unpow2N/A

                      \[\leadsto \mathsf{fma}\left({x}^{4}, \frac{1}{12}, \color{blue}{x \cdot x}\right) \]
                    18. lower-*.f6499.0

                      \[\leadsto \mathsf{fma}\left({x}^{4}, 0.08333333333333333, \color{blue}{x \cdot x}\right) \]
                  5. Applied rewrites99.0%

                    \[\leadsto \color{blue}{\mathsf{fma}\left({x}^{4}, 0.08333333333333333, x \cdot x\right)} \]
                  6. Step-by-step derivation
                    1. Applied rewrites99.0%

                      \[\leadsto \mathsf{fma}\left(x, \color{blue}{x}, {x}^{4} \cdot 0.08333333333333333\right) \]
                    2. Step-by-step derivation
                      1. Applied rewrites99.0%

                        \[\leadsto \mathsf{fma}\left(x, x, x \cdot \left(x \cdot \left(0.08333333333333333 \cdot \left(x \cdot x\right)\right)\right)\right) \]
                      2. Add Preprocessing

                      Alternative 6: 98.8% accurate, 9.5× speedup?

                      \[\begin{array}{l} \\ \mathsf{fma}\left(0.08333333333333333, x \cdot x, 1\right) \cdot \left(x \cdot x\right) \end{array} \]
                      (FPCore (x)
                       :precision binary64
                       (* (fma 0.08333333333333333 (* x x) 1.0) (* x x)))
                      double code(double x) {
                      	return fma(0.08333333333333333, (x * x), 1.0) * (x * x);
                      }
                      
                      function code(x)
                      	return Float64(fma(0.08333333333333333, Float64(x * x), 1.0) * Float64(x * x))
                      end
                      
                      code[x_] := N[(N[(0.08333333333333333 * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]
                      
                      \begin{array}{l}
                      
                      \\
                      \mathsf{fma}\left(0.08333333333333333, x \cdot x, 1\right) \cdot \left(x \cdot x\right)
                      \end{array}
                      
                      Derivation
                      1. Initial program 52.6%

                        \[\left(e^{x} - 2\right) + e^{-x} \]
                      2. Add Preprocessing
                      3. Taylor expanded in x around 0

                        \[\leadsto \color{blue}{{x}^{2} \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right)} \]
                      4. Step-by-step derivation
                        1. *-commutativeN/A

                          \[\leadsto \color{blue}{\left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \cdot {x}^{2}} \]
                        2. unpow2N/A

                          \[\leadsto \left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \cdot \color{blue}{\left(x \cdot x\right)} \]
                        3. associate-*r*N/A

                          \[\leadsto \color{blue}{\left(\left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \cdot x\right) \cdot x} \]
                        4. lower-*.f64N/A

                          \[\leadsto \color{blue}{\left(\left(1 + {x}^{2} \cdot \left(\frac{1}{12} + {x}^{2} \cdot \left(\frac{1}{360} + \frac{1}{20160} \cdot {x}^{2}\right)\right)\right) \cdot x\right) \cdot x} \]
                      5. Applied rewrites99.3%

                        \[\leadsto \color{blue}{\mathsf{fma}\left({x}^{3}, \mathsf{fma}\left(\mathsf{fma}\left(4.96031746031746 \cdot 10^{-5}, x \cdot x, 0.002777777777777778\right), x \cdot x, 0.08333333333333333\right), x\right) \cdot x} \]
                      6. Step-by-step derivation
                        1. Applied rewrites99.3%

                          \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 4.96031746031746 \cdot 10^{-5}, 0.002777777777777778\right), x \cdot x, 0.08333333333333333\right) \cdot \left(x \cdot x\right), x, x\right) \cdot x \]
                        2. Step-by-step derivation
                          1. Applied rewrites99.3%

                            \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(4.96031746031746 \cdot 10^{-5}, x \cdot x, 0.002777777777777778\right), x \cdot x, 0.08333333333333333\right), x \cdot x, 1\right) \cdot \color{blue}{\left(x \cdot x\right)} \]
                          2. Taylor expanded in x around 0

                            \[\leadsto \mathsf{fma}\left(\frac{1}{12}, x \cdot x, 1\right) \cdot \left(x \cdot x\right) \]
                          3. Step-by-step derivation
                            1. Applied rewrites99.0%

                              \[\leadsto \mathsf{fma}\left(0.08333333333333333, x \cdot x, 1\right) \cdot \left(x \cdot x\right) \]
                            2. Add Preprocessing

                            Alternative 7: 98.2% accurate, 34.8× speedup?

                            \[\begin{array}{l} \\ x \cdot x \end{array} \]
                            (FPCore (x) :precision binary64 (* x x))
                            double code(double x) {
                            	return x * x;
                            }
                            
                            module fmin_fmax_functions
                                implicit none
                                private
                                public fmax
                                public fmin
                            
                                interface fmax
                                    module procedure fmax88
                                    module procedure fmax44
                                    module procedure fmax84
                                    module procedure fmax48
                                end interface
                                interface fmin
                                    module procedure fmin88
                                    module procedure fmin44
                                    module procedure fmin84
                                    module procedure fmin48
                                end interface
                            contains
                                real(8) function fmax88(x, y) result (res)
                                    real(8), intent (in) :: x
                                    real(8), intent (in) :: y
                                    res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                                end function
                                real(4) function fmax44(x, y) result (res)
                                    real(4), intent (in) :: x
                                    real(4), intent (in) :: y
                                    res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                                end function
                                real(8) function fmax84(x, y) result(res)
                                    real(8), intent (in) :: x
                                    real(4), intent (in) :: y
                                    res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                                end function
                                real(8) function fmax48(x, y) result(res)
                                    real(4), intent (in) :: x
                                    real(8), intent (in) :: y
                                    res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                                end function
                                real(8) function fmin88(x, y) result (res)
                                    real(8), intent (in) :: x
                                    real(8), intent (in) :: y
                                    res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                                end function
                                real(4) function fmin44(x, y) result (res)
                                    real(4), intent (in) :: x
                                    real(4), intent (in) :: y
                                    res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                                end function
                                real(8) function fmin84(x, y) result(res)
                                    real(8), intent (in) :: x
                                    real(4), intent (in) :: y
                                    res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                                end function
                                real(8) function fmin48(x, y) result(res)
                                    real(4), intent (in) :: x
                                    real(8), intent (in) :: y
                                    res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                                end function
                            end module
                            
                            real(8) function code(x)
                            use fmin_fmax_functions
                                real(8), intent (in) :: x
                                code = x * x
                            end function
                            
                            public static double code(double x) {
                            	return x * x;
                            }
                            
                            def code(x):
                            	return x * x
                            
                            function code(x)
                            	return Float64(x * x)
                            end
                            
                            function tmp = code(x)
                            	tmp = x * x;
                            end
                            
                            code[x_] := N[(x * x), $MachinePrecision]
                            
                            \begin{array}{l}
                            
                            \\
                            x \cdot x
                            \end{array}
                            
                            Derivation
                            1. Initial program 52.6%

                              \[\left(e^{x} - 2\right) + e^{-x} \]
                            2. Add Preprocessing
                            3. Taylor expanded in x around 0

                              \[\leadsto \color{blue}{{x}^{2}} \]
                            4. Step-by-step derivation
                              1. unpow2N/A

                                \[\leadsto \color{blue}{x \cdot x} \]
                              2. lower-*.f6498.8

                                \[\leadsto \color{blue}{x \cdot x} \]
                            5. Applied rewrites98.8%

                              \[\leadsto \color{blue}{x \cdot x} \]
                            6. Add Preprocessing

                            Alternative 8: 51.4% accurate, 52.3× speedup?

                            \[\begin{array}{l} \\ -1 + 1 \end{array} \]
                            (FPCore (x) :precision binary64 (+ -1.0 1.0))
                            double code(double x) {
                            	return -1.0 + 1.0;
                            }
                            
                            module fmin_fmax_functions
                                implicit none
                                private
                                public fmax
                                public fmin
                            
                                interface fmax
                                    module procedure fmax88
                                    module procedure fmax44
                                    module procedure fmax84
                                    module procedure fmax48
                                end interface
                                interface fmin
                                    module procedure fmin88
                                    module procedure fmin44
                                    module procedure fmin84
                                    module procedure fmin48
                                end interface
                            contains
                                real(8) function fmax88(x, y) result (res)
                                    real(8), intent (in) :: x
                                    real(8), intent (in) :: y
                                    res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                                end function
                                real(4) function fmax44(x, y) result (res)
                                    real(4), intent (in) :: x
                                    real(4), intent (in) :: y
                                    res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                                end function
                                real(8) function fmax84(x, y) result(res)
                                    real(8), intent (in) :: x
                                    real(4), intent (in) :: y
                                    res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                                end function
                                real(8) function fmax48(x, y) result(res)
                                    real(4), intent (in) :: x
                                    real(8), intent (in) :: y
                                    res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                                end function
                                real(8) function fmin88(x, y) result (res)
                                    real(8), intent (in) :: x
                                    real(8), intent (in) :: y
                                    res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                                end function
                                real(4) function fmin44(x, y) result (res)
                                    real(4), intent (in) :: x
                                    real(4), intent (in) :: y
                                    res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                                end function
                                real(8) function fmin84(x, y) result(res)
                                    real(8), intent (in) :: x
                                    real(4), intent (in) :: y
                                    res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                                end function
                                real(8) function fmin48(x, y) result(res)
                                    real(4), intent (in) :: x
                                    real(8), intent (in) :: y
                                    res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                                end function
                            end module
                            
                            real(8) function code(x)
                            use fmin_fmax_functions
                                real(8), intent (in) :: x
                                code = (-1.0d0) + 1.0d0
                            end function
                            
                            public static double code(double x) {
                            	return -1.0 + 1.0;
                            }
                            
                            def code(x):
                            	return -1.0 + 1.0
                            
                            function code(x)
                            	return Float64(-1.0 + 1.0)
                            end
                            
                            function tmp = code(x)
                            	tmp = -1.0 + 1.0;
                            end
                            
                            code[x_] := N[(-1.0 + 1.0), $MachinePrecision]
                            
                            \begin{array}{l}
                            
                            \\
                            -1 + 1
                            \end{array}
                            
                            Derivation
                            1. Initial program 52.6%

                              \[\left(e^{x} - 2\right) + e^{-x} \]
                            2. Add Preprocessing
                            3. Taylor expanded in x around 0

                              \[\leadsto \color{blue}{-1} + e^{-x} \]
                            4. Step-by-step derivation
                              1. Applied rewrites51.4%

                                \[\leadsto \color{blue}{-1} + e^{-x} \]
                              2. Taylor expanded in x around 0

                                \[\leadsto -1 + \color{blue}{1} \]
                              3. Step-by-step derivation
                                1. Applied rewrites50.9%

                                  \[\leadsto -1 + \color{blue}{1} \]
                                2. Add Preprocessing

                                Developer Target 1: 99.9% accurate, 0.9× speedup?

                                \[\begin{array}{l} \\ \begin{array}{l} t_0 := \sinh \left(\frac{x}{2}\right)\\ 4 \cdot \left(t\_0 \cdot t\_0\right) \end{array} \end{array} \]
                                (FPCore (x)
                                 :precision binary64
                                 (let* ((t_0 (sinh (/ x 2.0)))) (* 4.0 (* t_0 t_0))))
                                double code(double x) {
                                	double t_0 = sinh((x / 2.0));
                                	return 4.0 * (t_0 * t_0);
                                }
                                
                                module fmin_fmax_functions
                                    implicit none
                                    private
                                    public fmax
                                    public fmin
                                
                                    interface fmax
                                        module procedure fmax88
                                        module procedure fmax44
                                        module procedure fmax84
                                        module procedure fmax48
                                    end interface
                                    interface fmin
                                        module procedure fmin88
                                        module procedure fmin44
                                        module procedure fmin84
                                        module procedure fmin48
                                    end interface
                                contains
                                    real(8) function fmax88(x, y) result (res)
                                        real(8), intent (in) :: x
                                        real(8), intent (in) :: y
                                        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                                    end function
                                    real(4) function fmax44(x, y) result (res)
                                        real(4), intent (in) :: x
                                        real(4), intent (in) :: y
                                        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                                    end function
                                    real(8) function fmax84(x, y) result(res)
                                        real(8), intent (in) :: x
                                        real(4), intent (in) :: y
                                        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                                    end function
                                    real(8) function fmax48(x, y) result(res)
                                        real(4), intent (in) :: x
                                        real(8), intent (in) :: y
                                        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                                    end function
                                    real(8) function fmin88(x, y) result (res)
                                        real(8), intent (in) :: x
                                        real(8), intent (in) :: y
                                        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                                    end function
                                    real(4) function fmin44(x, y) result (res)
                                        real(4), intent (in) :: x
                                        real(4), intent (in) :: y
                                        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                                    end function
                                    real(8) function fmin84(x, y) result(res)
                                        real(8), intent (in) :: x
                                        real(4), intent (in) :: y
                                        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                                    end function
                                    real(8) function fmin48(x, y) result(res)
                                        real(4), intent (in) :: x
                                        real(8), intent (in) :: y
                                        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                                    end function
                                end module
                                
                                real(8) function code(x)
                                use fmin_fmax_functions
                                    real(8), intent (in) :: x
                                    real(8) :: t_0
                                    t_0 = sinh((x / 2.0d0))
                                    code = 4.0d0 * (t_0 * t_0)
                                end function
                                
                                public static double code(double x) {
                                	double t_0 = Math.sinh((x / 2.0));
                                	return 4.0 * (t_0 * t_0);
                                }
                                
                                def code(x):
                                	t_0 = math.sinh((x / 2.0))
                                	return 4.0 * (t_0 * t_0)
                                
                                function code(x)
                                	t_0 = sinh(Float64(x / 2.0))
                                	return Float64(4.0 * Float64(t_0 * t_0))
                                end
                                
                                function tmp = code(x)
                                	t_0 = sinh((x / 2.0));
                                	tmp = 4.0 * (t_0 * t_0);
                                end
                                
                                code[x_] := Block[{t$95$0 = N[Sinh[N[(x / 2.0), $MachinePrecision]], $MachinePrecision]}, N[(4.0 * N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision]]
                                
                                \begin{array}{l}
                                
                                \\
                                \begin{array}{l}
                                t_0 := \sinh \left(\frac{x}{2}\right)\\
                                4 \cdot \left(t\_0 \cdot t\_0\right)
                                \end{array}
                                \end{array}
                                

                                Reproduce

                                ?
                                herbie shell --seed 2025017 
                                (FPCore (x)
                                  :name "exp2 (problem 3.3.7)"
                                  :precision binary64
                                  :pre (<= (fabs x) 710.0)
                                
                                  :alt
                                  (! :herbie-platform default (* 4 (* (sinh (/ x 2)) (sinh (/ x 2)))))
                                
                                  (+ (- (exp x) 2.0) (exp (- x))))