NMSE Section 6.1 mentioned, A

Percentage Accurate: 73.1% → 99.9%
Time: 14.6s
Alternatives: 15
Speedup: 9.7×

Specification

?
\[\begin{array}{l} \\ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 15 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 73.1% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}

Alternative 1: 99.9% accurate, 0.6× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := e^{x \cdot \left(-1 - \varepsilon\right)}\\ \mathbf{if}\;\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(\varepsilon + -1\right)} + t\_0 \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq 0:\\ \;\;\;\;0.5 \cdot \left(e^{-x} \cdot \left(x + \left(x + 2\right)\right)\right)\\ \mathbf{else}:\\ \;\;\;\;0.5 \cdot \left(t\_0 + e^{x \cdot \varepsilon}\right)\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (exp (* x (- -1.0 eps)))))
   (if (<=
        (+
         (* (+ 1.0 (/ 1.0 eps)) (exp (* x (+ eps -1.0))))
         (* t_0 (+ 1.0 (/ -1.0 eps))))
        0.0)
     (* 0.5 (* (exp (- x)) (+ x (+ x 2.0))))
     (* 0.5 (+ t_0 (exp (* x eps)))))))
double code(double x, double eps) {
	double t_0 = exp((x * (-1.0 - eps)));
	double tmp;
	if ((((1.0 + (1.0 / eps)) * exp((x * (eps + -1.0)))) + (t_0 * (1.0 + (-1.0 / eps)))) <= 0.0) {
		tmp = 0.5 * (exp(-x) * (x + (x + 2.0)));
	} else {
		tmp = 0.5 * (t_0 + exp((x * eps)));
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: t_0
    real(8) :: tmp
    t_0 = exp((x * ((-1.0d0) - eps)))
    if ((((1.0d0 + (1.0d0 / eps)) * exp((x * (eps + (-1.0d0))))) + (t_0 * (1.0d0 + ((-1.0d0) / eps)))) <= 0.0d0) then
        tmp = 0.5d0 * (exp(-x) * (x + (x + 2.0d0)))
    else
        tmp = 0.5d0 * (t_0 + exp((x * eps)))
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double t_0 = Math.exp((x * (-1.0 - eps)));
	double tmp;
	if ((((1.0 + (1.0 / eps)) * Math.exp((x * (eps + -1.0)))) + (t_0 * (1.0 + (-1.0 / eps)))) <= 0.0) {
		tmp = 0.5 * (Math.exp(-x) * (x + (x + 2.0)));
	} else {
		tmp = 0.5 * (t_0 + Math.exp((x * eps)));
	}
	return tmp;
}
def code(x, eps):
	t_0 = math.exp((x * (-1.0 - eps)))
	tmp = 0
	if (((1.0 + (1.0 / eps)) * math.exp((x * (eps + -1.0)))) + (t_0 * (1.0 + (-1.0 / eps)))) <= 0.0:
		tmp = 0.5 * (math.exp(-x) * (x + (x + 2.0)))
	else:
		tmp = 0.5 * (t_0 + math.exp((x * eps)))
	return tmp
function code(x, eps)
	t_0 = exp(Float64(x * Float64(-1.0 - eps)))
	tmp = 0.0
	if (Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(x * Float64(eps + -1.0)))) + Float64(t_0 * Float64(1.0 + Float64(-1.0 / eps)))) <= 0.0)
		tmp = Float64(0.5 * Float64(exp(Float64(-x)) * Float64(x + Float64(x + 2.0))));
	else
		tmp = Float64(0.5 * Float64(t_0 + exp(Float64(x * eps))));
	end
	return tmp
end
function tmp_2 = code(x, eps)
	t_0 = exp((x * (-1.0 - eps)));
	tmp = 0.0;
	if ((((1.0 + (1.0 / eps)) * exp((x * (eps + -1.0)))) + (t_0 * (1.0 + (-1.0 / eps)))) <= 0.0)
		tmp = 0.5 * (exp(-x) * (x + (x + 2.0)));
	else
		tmp = 0.5 * (t_0 + exp((x * eps)));
	end
	tmp_2 = tmp;
end
code[x_, eps_] := Block[{t$95$0 = N[Exp[N[(x * N[(-1.0 - eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[N[(x * N[(eps + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + N[(t$95$0 * N[(1.0 + N[(-1.0 / eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.0], N[(0.5 * N[(N[Exp[(-x)], $MachinePrecision] * N[(x + N[(x + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(0.5 * N[(t$95$0 + N[Exp[N[(x * eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := e^{x \cdot \left(-1 - \varepsilon\right)}\\
\mathbf{if}\;\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(\varepsilon + -1\right)} + t\_0 \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq 0:\\
\;\;\;\;0.5 \cdot \left(e^{-x} \cdot \left(x + \left(x + 2\right)\right)\right)\\

\mathbf{else}:\\
\;\;\;\;0.5 \cdot \left(t\_0 + e^{x \cdot \varepsilon}\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x))))) < 0.0

    1. Initial program 31.6%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in eps around 0

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - \left(-1 \cdot e^{\mathsf{neg}\left(x\right)} + -1 \cdot \left(x \cdot e^{\mathsf{neg}\left(x\right)}\right)\right)\right)} \]
    4. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - \left(-1 \cdot e^{\mathsf{neg}\left(x\right)} + -1 \cdot \left(x \cdot e^{\mathsf{neg}\left(x\right)}\right)\right)\right)} \]
      2. mul-1-negN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - \left(-1 \cdot e^{\mathsf{neg}\left(x\right)} + \color{blue}{\left(\mathsf{neg}\left(x \cdot e^{\mathsf{neg}\left(x\right)}\right)\right)}\right)\right) \]
      3. unsub-negN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - \color{blue}{\left(-1 \cdot e^{\mathsf{neg}\left(x\right)} - x \cdot e^{\mathsf{neg}\left(x\right)}\right)}\right) \]
      4. associate-+l-N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(\left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - -1 \cdot e^{\mathsf{neg}\left(x\right)}\right) + x \cdot e^{\mathsf{neg}\left(x\right)}\right)} \]
      5. *-commutativeN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - -1 \cdot e^{\mathsf{neg}\left(x\right)}\right) + \color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot x}\right) \]
      6. distribute-rgt1-inN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(\color{blue}{\left(x + 1\right) \cdot e^{\mathsf{neg}\left(x\right)}} - -1 \cdot e^{\mathsf{neg}\left(x\right)}\right) + e^{\mathsf{neg}\left(x\right)} \cdot x\right) \]
      7. distribute-rgt-out--N/A

        \[\leadsto \frac{1}{2} \cdot \left(\color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot \left(\left(x + 1\right) - -1\right)} + e^{\mathsf{neg}\left(x\right)} \cdot x\right) \]
      8. distribute-lft-outN/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x\right)} \cdot \left(\left(\left(x + 1\right) - -1\right) + x\right)\right)} \]
      9. lower-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x\right)} \cdot \left(\left(\left(x + 1\right) - -1\right) + x\right)\right)} \]
    5. Applied rewrites99.9%

      \[\leadsto \color{blue}{0.5 \cdot \left(e^{-x} \cdot \left(\left(x + 2\right) + x\right)\right)} \]

    if 0.0 < (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x)))))

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in eps around inf

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} - -1 \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
    4. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} - -1 \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
      2. cancel-sign-sub-invN/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \left(\mathsf{neg}\left(-1\right)\right) \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
      3. metadata-evalN/A

        \[\leadsto \frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \color{blue}{1} \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right) \]
      4. *-lft-identityN/A

        \[\leadsto \frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \color{blue}{e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}}\right) \]
      5. lower-+.f64N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
    5. Applied rewrites100.0%

      \[\leadsto \color{blue}{0.5 \cdot \left(e^{\mathsf{fma}\left(x, \varepsilon, -x\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}\right)} \]
    6. Taylor expanded in eps around inf

      \[\leadsto \frac{1}{2} \cdot \left(e^{\color{blue}{\varepsilon \cdot x}} + e^{x \cdot \left(-1 - \varepsilon\right)}\right) \]
    7. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \frac{1}{2} \cdot \left(e^{\color{blue}{x \cdot \varepsilon}} + e^{x \cdot \left(-1 - \varepsilon\right)}\right) \]
      2. lower-*.f64100.0

        \[\leadsto 0.5 \cdot \left(e^{\color{blue}{x \cdot \varepsilon}} + e^{x \cdot \left(-1 - \varepsilon\right)}\right) \]
    8. Applied rewrites100.0%

      \[\leadsto 0.5 \cdot \left(e^{\color{blue}{x \cdot \varepsilon}} + e^{x \cdot \left(-1 - \varepsilon\right)}\right) \]
  3. Recombined 2 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq 0:\\ \;\;\;\;0.5 \cdot \left(e^{-x} \cdot \left(x + \left(x + 2\right)\right)\right)\\ \mathbf{else}:\\ \;\;\;\;0.5 \cdot \left(e^{x \cdot \left(-1 - \varepsilon\right)} + e^{x \cdot \varepsilon}\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 2: 94.6% accurate, 0.7× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq 2:\\ \;\;\;\;0.5 \cdot \left(e^{-x} \cdot \left(x + \left(x + 2\right)\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\left(\varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \varepsilon\right) - x\right)\right) \cdot \left(0.5 \cdot x\right), \frac{1}{\varepsilon}, 1\right)\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<=
      (+
       (* (+ 1.0 (/ 1.0 eps)) (exp (* x (+ eps -1.0))))
       (* (exp (* x (- -1.0 eps))) (+ 1.0 (/ -1.0 eps))))
      2.0)
   (* 0.5 (* (exp (- x)) (+ x (+ x 2.0))))
   (fma (* (* eps (- (* eps (* x eps)) x)) (* 0.5 x)) (/ 1.0 eps) 1.0)))
double code(double x, double eps) {
	double tmp;
	if ((((1.0 + (1.0 / eps)) * exp((x * (eps + -1.0)))) + (exp((x * (-1.0 - eps))) * (1.0 + (-1.0 / eps)))) <= 2.0) {
		tmp = 0.5 * (exp(-x) * (x + (x + 2.0)));
	} else {
		tmp = fma(((eps * ((eps * (x * eps)) - x)) * (0.5 * x)), (1.0 / eps), 1.0);
	}
	return tmp;
}
function code(x, eps)
	tmp = 0.0
	if (Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(x * Float64(eps + -1.0)))) + Float64(exp(Float64(x * Float64(-1.0 - eps))) * Float64(1.0 + Float64(-1.0 / eps)))) <= 2.0)
		tmp = Float64(0.5 * Float64(exp(Float64(-x)) * Float64(x + Float64(x + 2.0))));
	else
		tmp = fma(Float64(Float64(eps * Float64(Float64(eps * Float64(x * eps)) - x)) * Float64(0.5 * x)), Float64(1.0 / eps), 1.0);
	end
	return tmp
end
code[x_, eps_] := If[LessEqual[N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[N[(x * N[(eps + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + N[(N[Exp[N[(x * N[(-1.0 - eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(1.0 + N[(-1.0 / eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 2.0], N[(0.5 * N[(N[Exp[(-x)], $MachinePrecision] * N[(x + N[(x + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(eps * N[(N[(eps * N[(x * eps), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision] * N[(0.5 * x), $MachinePrecision]), $MachinePrecision] * N[(1.0 / eps), $MachinePrecision] + 1.0), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq 2:\\
\;\;\;\;0.5 \cdot \left(e^{-x} \cdot \left(x + \left(x + 2\right)\right)\right)\\

\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\left(\varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \varepsilon\right) - x\right)\right) \cdot \left(0.5 \cdot x\right), \frac{1}{\varepsilon}, 1\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x))))) < 2

    1. Initial program 50.5%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in eps around 0

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - \left(-1 \cdot e^{\mathsf{neg}\left(x\right)} + -1 \cdot \left(x \cdot e^{\mathsf{neg}\left(x\right)}\right)\right)\right)} \]
    4. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - \left(-1 \cdot e^{\mathsf{neg}\left(x\right)} + -1 \cdot \left(x \cdot e^{\mathsf{neg}\left(x\right)}\right)\right)\right)} \]
      2. mul-1-negN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - \left(-1 \cdot e^{\mathsf{neg}\left(x\right)} + \color{blue}{\left(\mathsf{neg}\left(x \cdot e^{\mathsf{neg}\left(x\right)}\right)\right)}\right)\right) \]
      3. unsub-negN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - \color{blue}{\left(-1 \cdot e^{\mathsf{neg}\left(x\right)} - x \cdot e^{\mathsf{neg}\left(x\right)}\right)}\right) \]
      4. associate-+l-N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(\left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - -1 \cdot e^{\mathsf{neg}\left(x\right)}\right) + x \cdot e^{\mathsf{neg}\left(x\right)}\right)} \]
      5. *-commutativeN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - -1 \cdot e^{\mathsf{neg}\left(x\right)}\right) + \color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot x}\right) \]
      6. distribute-rgt1-inN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(\color{blue}{\left(x + 1\right) \cdot e^{\mathsf{neg}\left(x\right)}} - -1 \cdot e^{\mathsf{neg}\left(x\right)}\right) + e^{\mathsf{neg}\left(x\right)} \cdot x\right) \]
      7. distribute-rgt-out--N/A

        \[\leadsto \frac{1}{2} \cdot \left(\color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot \left(\left(x + 1\right) - -1\right)} + e^{\mathsf{neg}\left(x\right)} \cdot x\right) \]
      8. distribute-lft-outN/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x\right)} \cdot \left(\left(\left(x + 1\right) - -1\right) + x\right)\right)} \]
      9. lower-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x\right)} \cdot \left(\left(\left(x + 1\right) - -1\right) + x\right)\right)} \]
    5. Applied rewrites100.0%

      \[\leadsto \color{blue}{0.5 \cdot \left(e^{-x} \cdot \left(\left(x + 2\right) + x\right)\right)} \]

    if 2 < (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x)))))

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1 + x \cdot \left(\frac{1}{2} \cdot \left(x \cdot \left(\frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot {\left(\varepsilon - 1\right)}^{2}\right) - \frac{1}{2} \cdot \left({\left(1 + \varepsilon\right)}^{2} \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right) + \frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) - -1 \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right)} \]
    4. Applied rewrites86.9%

      \[\leadsto \color{blue}{\mathsf{fma}\left(0.5 \cdot x, \left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) + \mathsf{fma}\left(1 + \frac{1}{\varepsilon}, \mathsf{fma}\left(0.5 \cdot x, \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right), -1 + \varepsilon\right), x \cdot \left(0.5 \cdot \left(\left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) \cdot \left(-1 - \varepsilon\right)\right)\right)\right), 1\right)} \]
    5. Taylor expanded in eps around 0

      \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, \color{blue}{\frac{\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(-1 \cdot x + \left(\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(x + \left(-1 \cdot x + \varepsilon \cdot x\right)\right)\right)\right)\right)\right)}{\varepsilon}}, 1\right) \]
    6. Step-by-step derivation
      1. lower-/.f64N/A

        \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, \color{blue}{\frac{\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(-1 \cdot x + \left(\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(x + \left(-1 \cdot x + \varepsilon \cdot x\right)\right)\right)\right)\right)\right)}{\varepsilon}}, 1\right) \]
    7. Applied rewrites91.0%

      \[\leadsto \mathsf{fma}\left(0.5 \cdot x, \color{blue}{\frac{0 + \varepsilon \cdot \left(\left(-x\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}{\varepsilon}}, 1\right) \]
    8. Step-by-step derivation
      1. lift-*.f64N/A

        \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot x\right)} \cdot \frac{0 + \varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}{\varepsilon} + 1 \]
      2. lift-neg.f64N/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \frac{0 + \varepsilon \cdot \left(\color{blue}{\left(\mathsf{neg}\left(x\right)\right)} + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}{\varepsilon} + 1 \]
      3. lift-*.f64N/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \frac{0 + \varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + \color{blue}{x \cdot \varepsilon}\right)\right)}{\varepsilon} + 1 \]
      4. lift-+.f64N/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \frac{0 + \varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \color{blue}{\left(0 + x \cdot \varepsilon\right)}\right)}{\varepsilon} + 1 \]
      5. lift-*.f64N/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \frac{0 + \varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \color{blue}{\varepsilon \cdot \left(0 + x \cdot \varepsilon\right)}\right)}{\varepsilon} + 1 \]
      6. lift-+.f64N/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \frac{0 + \varepsilon \cdot \color{blue}{\left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}}{\varepsilon} + 1 \]
      7. lift-*.f64N/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \frac{0 + \color{blue}{\varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}}{\varepsilon} + 1 \]
      8. +-lft-identityN/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \frac{\color{blue}{\varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}}{\varepsilon} + 1 \]
      9. div-invN/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \color{blue}{\left(\left(\varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)\right) \cdot \frac{1}{\varepsilon}\right)} + 1 \]
      10. associate-*r*N/A

        \[\leadsto \color{blue}{\left(\left(\frac{1}{2} \cdot x\right) \cdot \left(\varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)\right)\right) \cdot \frac{1}{\varepsilon}} + 1 \]
      11. lower-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(\left(\frac{1}{2} \cdot x\right) \cdot \left(\varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)\right), \frac{1}{\varepsilon}, 1\right)} \]
    9. Applied rewrites92.6%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\left(\varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \varepsilon\right) - x\right)\right) \cdot \left(x \cdot 0.5\right), \frac{1}{\varepsilon}, 1\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification96.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq 2:\\ \;\;\;\;0.5 \cdot \left(e^{-x} \cdot \left(x + \left(x + 2\right)\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\left(\varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \varepsilon\right) - x\right)\right) \cdot \left(0.5 \cdot x\right), \frac{1}{\varepsilon}, 1\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 93.8% accurate, 0.7× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq 2:\\ \;\;\;\;e^{-x}\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\left(\varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \varepsilon\right) - x\right)\right) \cdot \left(0.5 \cdot x\right), \frac{1}{\varepsilon}, 1\right)\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<=
      (+
       (* (+ 1.0 (/ 1.0 eps)) (exp (* x (+ eps -1.0))))
       (* (exp (* x (- -1.0 eps))) (+ 1.0 (/ -1.0 eps))))
      2.0)
   (exp (- x))
   (fma (* (* eps (- (* eps (* x eps)) x)) (* 0.5 x)) (/ 1.0 eps) 1.0)))
double code(double x, double eps) {
	double tmp;
	if ((((1.0 + (1.0 / eps)) * exp((x * (eps + -1.0)))) + (exp((x * (-1.0 - eps))) * (1.0 + (-1.0 / eps)))) <= 2.0) {
		tmp = exp(-x);
	} else {
		tmp = fma(((eps * ((eps * (x * eps)) - x)) * (0.5 * x)), (1.0 / eps), 1.0);
	}
	return tmp;
}
function code(x, eps)
	tmp = 0.0
	if (Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(x * Float64(eps + -1.0)))) + Float64(exp(Float64(x * Float64(-1.0 - eps))) * Float64(1.0 + Float64(-1.0 / eps)))) <= 2.0)
		tmp = exp(Float64(-x));
	else
		tmp = fma(Float64(Float64(eps * Float64(Float64(eps * Float64(x * eps)) - x)) * Float64(0.5 * x)), Float64(1.0 / eps), 1.0);
	end
	return tmp
end
code[x_, eps_] := If[LessEqual[N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[N[(x * N[(eps + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + N[(N[Exp[N[(x * N[(-1.0 - eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(1.0 + N[(-1.0 / eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 2.0], N[Exp[(-x)], $MachinePrecision], N[(N[(N[(eps * N[(N[(eps * N[(x * eps), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision] * N[(0.5 * x), $MachinePrecision]), $MachinePrecision] * N[(1.0 / eps), $MachinePrecision] + 1.0), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq 2:\\
\;\;\;\;e^{-x}\\

\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\left(\varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \varepsilon\right) - x\right)\right) \cdot \left(0.5 \cdot x\right), \frac{1}{\varepsilon}, 1\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x))))) < 2

    1. Initial program 50.5%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in eps around inf

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} - -1 \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
    4. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} - -1 \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
      2. cancel-sign-sub-invN/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \left(\mathsf{neg}\left(-1\right)\right) \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
      3. metadata-evalN/A

        \[\leadsto \frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \color{blue}{1} \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right) \]
      4. *-lft-identityN/A

        \[\leadsto \frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \color{blue}{e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}}\right) \]
      5. lower-+.f64N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
    5. Applied rewrites97.9%

      \[\leadsto \color{blue}{0.5 \cdot \left(e^{\mathsf{fma}\left(x, \varepsilon, -x\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}\right)} \]
    6. Taylor expanded in eps around 0

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x\right)} + e^{-1 \cdot x}\right)} \]
    7. Step-by-step derivation
      1. distribute-rgt-inN/A

        \[\leadsto \color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot \frac{1}{2} + e^{-1 \cdot x} \cdot \frac{1}{2}} \]
      2. neg-mul-1N/A

        \[\leadsto e^{\mathsf{neg}\left(x\right)} \cdot \frac{1}{2} + e^{\color{blue}{\mathsf{neg}\left(x\right)}} \cdot \frac{1}{2} \]
      3. distribute-lft-outN/A

        \[\leadsto \color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot \left(\frac{1}{2} + \frac{1}{2}\right)} \]
      4. metadata-evalN/A

        \[\leadsto e^{\mathsf{neg}\left(x\right)} \cdot \color{blue}{1} \]
      5. lower-*.f64N/A

        \[\leadsto \color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot 1} \]
      6. neg-mul-1N/A

        \[\leadsto e^{\color{blue}{-1 \cdot x}} \cdot 1 \]
      7. lower-exp.f64N/A

        \[\leadsto \color{blue}{e^{-1 \cdot x}} \cdot 1 \]
      8. neg-mul-1N/A

        \[\leadsto e^{\color{blue}{\mathsf{neg}\left(x\right)}} \cdot 1 \]
      9. lower-neg.f6497.9

        \[\leadsto e^{\color{blue}{-x}} \cdot 1 \]
    8. Applied rewrites97.9%

      \[\leadsto \color{blue}{e^{-x} \cdot 1} \]

    if 2 < (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x)))))

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1 + x \cdot \left(\frac{1}{2} \cdot \left(x \cdot \left(\frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot {\left(\varepsilon - 1\right)}^{2}\right) - \frac{1}{2} \cdot \left({\left(1 + \varepsilon\right)}^{2} \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right) + \frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) - -1 \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right)} \]
    4. Applied rewrites86.9%

      \[\leadsto \color{blue}{\mathsf{fma}\left(0.5 \cdot x, \left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) + \mathsf{fma}\left(1 + \frac{1}{\varepsilon}, \mathsf{fma}\left(0.5 \cdot x, \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right), -1 + \varepsilon\right), x \cdot \left(0.5 \cdot \left(\left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) \cdot \left(-1 - \varepsilon\right)\right)\right)\right), 1\right)} \]
    5. Taylor expanded in eps around 0

      \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, \color{blue}{\frac{\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(-1 \cdot x + \left(\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(x + \left(-1 \cdot x + \varepsilon \cdot x\right)\right)\right)\right)\right)\right)}{\varepsilon}}, 1\right) \]
    6. Step-by-step derivation
      1. lower-/.f64N/A

        \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, \color{blue}{\frac{\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(-1 \cdot x + \left(\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(x + \left(-1 \cdot x + \varepsilon \cdot x\right)\right)\right)\right)\right)\right)}{\varepsilon}}, 1\right) \]
    7. Applied rewrites91.0%

      \[\leadsto \mathsf{fma}\left(0.5 \cdot x, \color{blue}{\frac{0 + \varepsilon \cdot \left(\left(-x\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}{\varepsilon}}, 1\right) \]
    8. Step-by-step derivation
      1. lift-*.f64N/A

        \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot x\right)} \cdot \frac{0 + \varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}{\varepsilon} + 1 \]
      2. lift-neg.f64N/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \frac{0 + \varepsilon \cdot \left(\color{blue}{\left(\mathsf{neg}\left(x\right)\right)} + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}{\varepsilon} + 1 \]
      3. lift-*.f64N/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \frac{0 + \varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + \color{blue}{x \cdot \varepsilon}\right)\right)}{\varepsilon} + 1 \]
      4. lift-+.f64N/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \frac{0 + \varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \color{blue}{\left(0 + x \cdot \varepsilon\right)}\right)}{\varepsilon} + 1 \]
      5. lift-*.f64N/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \frac{0 + \varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \color{blue}{\varepsilon \cdot \left(0 + x \cdot \varepsilon\right)}\right)}{\varepsilon} + 1 \]
      6. lift-+.f64N/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \frac{0 + \varepsilon \cdot \color{blue}{\left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}}{\varepsilon} + 1 \]
      7. lift-*.f64N/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \frac{0 + \color{blue}{\varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}}{\varepsilon} + 1 \]
      8. +-lft-identityN/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \frac{\color{blue}{\varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}}{\varepsilon} + 1 \]
      9. div-invN/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \color{blue}{\left(\left(\varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)\right) \cdot \frac{1}{\varepsilon}\right)} + 1 \]
      10. associate-*r*N/A

        \[\leadsto \color{blue}{\left(\left(\frac{1}{2} \cdot x\right) \cdot \left(\varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)\right)\right) \cdot \frac{1}{\varepsilon}} + 1 \]
      11. lower-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(\left(\frac{1}{2} \cdot x\right) \cdot \left(\varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)\right), \frac{1}{\varepsilon}, 1\right)} \]
    9. Applied rewrites92.6%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\left(\varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \varepsilon\right) - x\right)\right) \cdot \left(x \cdot 0.5\right), \frac{1}{\varepsilon}, 1\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification95.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq 2:\\ \;\;\;\;e^{-x}\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\left(\varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \varepsilon\right) - x\right)\right) \cdot \left(0.5 \cdot x\right), \frac{1}{\varepsilon}, 1\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 4: 78.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq 4:\\ \;\;\;\;\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, 0.3333333333333333, -0.5\right), 1\right)\\ \mathbf{else}:\\ \;\;\;\;0.5 \cdot \left(x \cdot \left(x \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\right)\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<=
      (+
       (* (+ 1.0 (/ 1.0 eps)) (exp (* x (+ eps -1.0))))
       (* (exp (* x (- -1.0 eps))) (+ 1.0 (/ -1.0 eps))))
      4.0)
   (fma (* x x) (fma x 0.3333333333333333 -0.5) 1.0)
   (* 0.5 (* x (* x (* eps eps))))))
double code(double x, double eps) {
	double tmp;
	if ((((1.0 + (1.0 / eps)) * exp((x * (eps + -1.0)))) + (exp((x * (-1.0 - eps))) * (1.0 + (-1.0 / eps)))) <= 4.0) {
		tmp = fma((x * x), fma(x, 0.3333333333333333, -0.5), 1.0);
	} else {
		tmp = 0.5 * (x * (x * (eps * eps)));
	}
	return tmp;
}
function code(x, eps)
	tmp = 0.0
	if (Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(x * Float64(eps + -1.0)))) + Float64(exp(Float64(x * Float64(-1.0 - eps))) * Float64(1.0 + Float64(-1.0 / eps)))) <= 4.0)
		tmp = fma(Float64(x * x), fma(x, 0.3333333333333333, -0.5), 1.0);
	else
		tmp = Float64(0.5 * Float64(x * Float64(x * Float64(eps * eps))));
	end
	return tmp
end
code[x_, eps_] := If[LessEqual[N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[N[(x * N[(eps + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + N[(N[Exp[N[(x * N[(-1.0 - eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(1.0 + N[(-1.0 / eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 4.0], N[(N[(x * x), $MachinePrecision] * N[(x * 0.3333333333333333 + -0.5), $MachinePrecision] + 1.0), $MachinePrecision], N[(0.5 * N[(x * N[(x * N[(eps * eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq 4:\\
\;\;\;\;\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, 0.3333333333333333, -0.5\right), 1\right)\\

\mathbf{else}:\\
\;\;\;\;0.5 \cdot \left(x \cdot \left(x \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x))))) < 4

    1. Initial program 51.5%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in eps around 0

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - \left(-1 \cdot e^{\mathsf{neg}\left(x\right)} + -1 \cdot \left(x \cdot e^{\mathsf{neg}\left(x\right)}\right)\right)\right)} \]
    4. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - \left(-1 \cdot e^{\mathsf{neg}\left(x\right)} + -1 \cdot \left(x \cdot e^{\mathsf{neg}\left(x\right)}\right)\right)\right)} \]
      2. mul-1-negN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - \left(-1 \cdot e^{\mathsf{neg}\left(x\right)} + \color{blue}{\left(\mathsf{neg}\left(x \cdot e^{\mathsf{neg}\left(x\right)}\right)\right)}\right)\right) \]
      3. unsub-negN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - \color{blue}{\left(-1 \cdot e^{\mathsf{neg}\left(x\right)} - x \cdot e^{\mathsf{neg}\left(x\right)}\right)}\right) \]
      4. associate-+l-N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(\left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - -1 \cdot e^{\mathsf{neg}\left(x\right)}\right) + x \cdot e^{\mathsf{neg}\left(x\right)}\right)} \]
      5. *-commutativeN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - -1 \cdot e^{\mathsf{neg}\left(x\right)}\right) + \color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot x}\right) \]
      6. distribute-rgt1-inN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(\color{blue}{\left(x + 1\right) \cdot e^{\mathsf{neg}\left(x\right)}} - -1 \cdot e^{\mathsf{neg}\left(x\right)}\right) + e^{\mathsf{neg}\left(x\right)} \cdot x\right) \]
      7. distribute-rgt-out--N/A

        \[\leadsto \frac{1}{2} \cdot \left(\color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot \left(\left(x + 1\right) - -1\right)} + e^{\mathsf{neg}\left(x\right)} \cdot x\right) \]
      8. distribute-lft-outN/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x\right)} \cdot \left(\left(\left(x + 1\right) - -1\right) + x\right)\right)} \]
      9. lower-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x\right)} \cdot \left(\left(\left(x + 1\right) - -1\right) + x\right)\right)} \]
    5. Applied rewrites98.9%

      \[\leadsto \color{blue}{0.5 \cdot \left(e^{-x} \cdot \left(\left(x + 2\right) + x\right)\right)} \]
    6. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1 + {x}^{2} \cdot \left(\frac{1}{3} \cdot x - \frac{1}{2}\right)} \]
    7. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \color{blue}{{x}^{2} \cdot \left(\frac{1}{3} \cdot x - \frac{1}{2}\right) + 1} \]
      2. lower-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left({x}^{2}, \frac{1}{3} \cdot x - \frac{1}{2}, 1\right)} \]
      3. unpow2N/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{1}{3} \cdot x - \frac{1}{2}, 1\right) \]
      4. lower-*.f64N/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{1}{3} \cdot x - \frac{1}{2}, 1\right) \]
      5. sub-negN/A

        \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{\frac{1}{3} \cdot x + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, 1\right) \]
      6. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{x \cdot \frac{1}{3}} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right), 1\right) \]
      7. metadata-evalN/A

        \[\leadsto \mathsf{fma}\left(x \cdot x, x \cdot \frac{1}{3} + \color{blue}{\frac{-1}{2}}, 1\right) \]
      8. lower-fma.f6477.6

        \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{\mathsf{fma}\left(x, 0.3333333333333333, -0.5\right)}, 1\right) \]
    8. Applied rewrites77.6%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, 0.3333333333333333, -0.5\right), 1\right)} \]

    if 4 < (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x)))))

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1 + x \cdot \left(\frac{1}{2} \cdot \left(x \cdot \left(\frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot {\left(\varepsilon - 1\right)}^{2}\right) - \frac{1}{2} \cdot \left({\left(1 + \varepsilon\right)}^{2} \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right) + \frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) - -1 \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right)} \]
    4. Applied rewrites87.4%

      \[\leadsto \color{blue}{\mathsf{fma}\left(0.5 \cdot x, \left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) + \mathsf{fma}\left(1 + \frac{1}{\varepsilon}, \mathsf{fma}\left(0.5 \cdot x, \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right), -1 + \varepsilon\right), x \cdot \left(0.5 \cdot \left(\left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) \cdot \left(-1 - \varepsilon\right)\right)\right)\right), 1\right)} \]
    5. Taylor expanded in eps around 0

      \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, \color{blue}{\frac{\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(-1 \cdot x + \left(\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(x + \left(-1 \cdot x + \varepsilon \cdot x\right)\right)\right)\right)\right)\right)}{\varepsilon}}, 1\right) \]
    6. Step-by-step derivation
      1. lower-/.f64N/A

        \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, \color{blue}{\frac{\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(-1 \cdot x + \left(\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(x + \left(-1 \cdot x + \varepsilon \cdot x\right)\right)\right)\right)\right)\right)}{\varepsilon}}, 1\right) \]
    7. Applied rewrites91.6%

      \[\leadsto \mathsf{fma}\left(0.5 \cdot x, \color{blue}{\frac{0 + \varepsilon \cdot \left(\left(-x\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}{\varepsilon}}, 1\right) \]
    8. Taylor expanded in eps around inf

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left({\varepsilon}^{2} \cdot {x}^{2}\right)} \]
    9. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \color{blue}{\frac{1}{2} \cdot \left({\varepsilon}^{2} \cdot {x}^{2}\right)} \]
      2. *-commutativeN/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left({x}^{2} \cdot {\varepsilon}^{2}\right)} \]
      3. unpow2N/A

        \[\leadsto \frac{1}{2} \cdot \left(\color{blue}{\left(x \cdot x\right)} \cdot {\varepsilon}^{2}\right) \]
      4. associate-*l*N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(x \cdot \left(x \cdot {\varepsilon}^{2}\right)\right)} \]
      5. *-commutativeN/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \color{blue}{\left({\varepsilon}^{2} \cdot x\right)}\right) \]
      6. lower-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(x \cdot \left({\varepsilon}^{2} \cdot x\right)\right)} \]
      7. *-commutativeN/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \color{blue}{\left(x \cdot {\varepsilon}^{2}\right)}\right) \]
      8. lower-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \color{blue}{\left(x \cdot {\varepsilon}^{2}\right)}\right) \]
      9. unpow2N/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \left(x \cdot \color{blue}{\left(\varepsilon \cdot \varepsilon\right)}\right)\right) \]
      10. lower-*.f6487.4

        \[\leadsto 0.5 \cdot \left(x \cdot \left(x \cdot \color{blue}{\left(\varepsilon \cdot \varepsilon\right)}\right)\right) \]
    10. Applied rewrites87.4%

      \[\leadsto \color{blue}{0.5 \cdot \left(x \cdot \left(x \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification81.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq 4:\\ \;\;\;\;\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, 0.3333333333333333, -0.5\right), 1\right)\\ \mathbf{else}:\\ \;\;\;\;0.5 \cdot \left(x \cdot \left(x \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 5: 99.1% accurate, 1.2× speedup?

\[\begin{array}{l} \\ 0.5 \cdot \left(\frac{1}{e^{\mathsf{fma}\left(x, -\varepsilon, x\right)}} + e^{x \cdot \left(-1 - \varepsilon\right)}\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (* 0.5 (+ (/ 1.0 (exp (fma x (- eps) x))) (exp (* x (- -1.0 eps))))))
double code(double x, double eps) {
	return 0.5 * ((1.0 / exp(fma(x, -eps, x))) + exp((x * (-1.0 - eps))));
}
function code(x, eps)
	return Float64(0.5 * Float64(Float64(1.0 / exp(fma(x, Float64(-eps), x))) + exp(Float64(x * Float64(-1.0 - eps)))))
end
code[x_, eps_] := N[(0.5 * N[(N[(1.0 / N[Exp[N[(x * (-eps) + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + N[Exp[N[(x * N[(-1.0 - eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
0.5 \cdot \left(\frac{1}{e^{\mathsf{fma}\left(x, -\varepsilon, x\right)}} + e^{x \cdot \left(-1 - \varepsilon\right)}\right)
\end{array}
Derivation
  1. Initial program 72.7%

    \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
  2. Add Preprocessing
  3. Taylor expanded in eps around inf

    \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} - -1 \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} - -1 \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
    2. cancel-sign-sub-invN/A

      \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \left(\mathsf{neg}\left(-1\right)\right) \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
    3. metadata-evalN/A

      \[\leadsto \frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \color{blue}{1} \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right) \]
    4. *-lft-identityN/A

      \[\leadsto \frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \color{blue}{e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}}\right) \]
    5. lower-+.f64N/A

      \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
  5. Applied rewrites98.8%

    \[\leadsto \color{blue}{0.5 \cdot \left(e^{\mathsf{fma}\left(x, \varepsilon, -x\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}\right)} \]
  6. Applied rewrites98.8%

    \[\leadsto 0.5 \cdot \left(\color{blue}{\frac{1}{e^{\mathsf{fma}\left(x, -\varepsilon, x\right)}}} + e^{x \cdot \left(-1 - \varepsilon\right)}\right) \]
  7. Add Preprocessing

Alternative 6: 99.1% accurate, 1.2× speedup?

\[\begin{array}{l} \\ 0.5 \cdot \left(e^{\mathsf{fma}\left(x, \varepsilon, -x\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (* 0.5 (+ (exp (fma x eps (- x))) (exp (* x (- -1.0 eps))))))
double code(double x, double eps) {
	return 0.5 * (exp(fma(x, eps, -x)) + exp((x * (-1.0 - eps))));
}
function code(x, eps)
	return Float64(0.5 * Float64(exp(fma(x, eps, Float64(-x))) + exp(Float64(x * Float64(-1.0 - eps)))))
end
code[x_, eps_] := N[(0.5 * N[(N[Exp[N[(x * eps + (-x)), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(x * N[(-1.0 - eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
0.5 \cdot \left(e^{\mathsf{fma}\left(x, \varepsilon, -x\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}\right)
\end{array}
Derivation
  1. Initial program 72.7%

    \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
  2. Add Preprocessing
  3. Taylor expanded in eps around inf

    \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} - -1 \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} - -1 \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
    2. cancel-sign-sub-invN/A

      \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \left(\mathsf{neg}\left(-1\right)\right) \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
    3. metadata-evalN/A

      \[\leadsto \frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \color{blue}{1} \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right) \]
    4. *-lft-identityN/A

      \[\leadsto \frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \color{blue}{e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}}\right) \]
    5. lower-+.f64N/A

      \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
  5. Applied rewrites98.8%

    \[\leadsto \color{blue}{0.5 \cdot \left(e^{\mathsf{fma}\left(x, \varepsilon, -x\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}\right)} \]
  6. Add Preprocessing

Alternative 7: 87.5% accurate, 5.2× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 9.5 \cdot 10^{-5}:\\ \;\;\;\;\mathsf{fma}\left(\left(\varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \varepsilon\right) - x\right)\right) \cdot \left(0.5 \cdot x\right), \frac{1}{\varepsilon}, 1\right)\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(0.5 \cdot x\right) \cdot \left(x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\right)}{\varepsilon}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= x 9.5e-5)
   (fma (* (* eps (- (* eps (* x eps)) x)) (* 0.5 x)) (/ 1.0 eps) 1.0)
   (/ (* (* 0.5 x) (* x (* eps (* eps eps)))) eps)))
double code(double x, double eps) {
	double tmp;
	if (x <= 9.5e-5) {
		tmp = fma(((eps * ((eps * (x * eps)) - x)) * (0.5 * x)), (1.0 / eps), 1.0);
	} else {
		tmp = ((0.5 * x) * (x * (eps * (eps * eps)))) / eps;
	}
	return tmp;
}
function code(x, eps)
	tmp = 0.0
	if (x <= 9.5e-5)
		tmp = fma(Float64(Float64(eps * Float64(Float64(eps * Float64(x * eps)) - x)) * Float64(0.5 * x)), Float64(1.0 / eps), 1.0);
	else
		tmp = Float64(Float64(Float64(0.5 * x) * Float64(x * Float64(eps * Float64(eps * eps)))) / eps);
	end
	return tmp
end
code[x_, eps_] := If[LessEqual[x, 9.5e-5], N[(N[(N[(eps * N[(N[(eps * N[(x * eps), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision] * N[(0.5 * x), $MachinePrecision]), $MachinePrecision] * N[(1.0 / eps), $MachinePrecision] + 1.0), $MachinePrecision], N[(N[(N[(0.5 * x), $MachinePrecision] * N[(x * N[(eps * N[(eps * eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 9.5 \cdot 10^{-5}:\\
\;\;\;\;\mathsf{fma}\left(\left(\varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \varepsilon\right) - x\right)\right) \cdot \left(0.5 \cdot x\right), \frac{1}{\varepsilon}, 1\right)\\

\mathbf{else}:\\
\;\;\;\;\frac{\left(0.5 \cdot x\right) \cdot \left(x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\right)}{\varepsilon}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 9.5000000000000005e-5

    1. Initial program 62.7%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1 + x \cdot \left(\frac{1}{2} \cdot \left(x \cdot \left(\frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot {\left(\varepsilon - 1\right)}^{2}\right) - \frac{1}{2} \cdot \left({\left(1 + \varepsilon\right)}^{2} \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right) + \frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) - -1 \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right)} \]
    4. Applied rewrites92.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(0.5 \cdot x, \left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) + \mathsf{fma}\left(1 + \frac{1}{\varepsilon}, \mathsf{fma}\left(0.5 \cdot x, \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right), -1 + \varepsilon\right), x \cdot \left(0.5 \cdot \left(\left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) \cdot \left(-1 - \varepsilon\right)\right)\right)\right), 1\right)} \]
    5. Taylor expanded in eps around 0

      \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, \color{blue}{\frac{\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(-1 \cdot x + \left(\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(x + \left(-1 \cdot x + \varepsilon \cdot x\right)\right)\right)\right)\right)\right)}{\varepsilon}}, 1\right) \]
    6. Step-by-step derivation
      1. lower-/.f64N/A

        \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, \color{blue}{\frac{\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(-1 \cdot x + \left(\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(x + \left(-1 \cdot x + \varepsilon \cdot x\right)\right)\right)\right)\right)\right)}{\varepsilon}}, 1\right) \]
    7. Applied rewrites96.6%

      \[\leadsto \mathsf{fma}\left(0.5 \cdot x, \color{blue}{\frac{0 + \varepsilon \cdot \left(\left(-x\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}{\varepsilon}}, 1\right) \]
    8. Step-by-step derivation
      1. lift-*.f64N/A

        \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot x\right)} \cdot \frac{0 + \varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}{\varepsilon} + 1 \]
      2. lift-neg.f64N/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \frac{0 + \varepsilon \cdot \left(\color{blue}{\left(\mathsf{neg}\left(x\right)\right)} + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}{\varepsilon} + 1 \]
      3. lift-*.f64N/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \frac{0 + \varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + \color{blue}{x \cdot \varepsilon}\right)\right)}{\varepsilon} + 1 \]
      4. lift-+.f64N/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \frac{0 + \varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \color{blue}{\left(0 + x \cdot \varepsilon\right)}\right)}{\varepsilon} + 1 \]
      5. lift-*.f64N/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \frac{0 + \varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \color{blue}{\varepsilon \cdot \left(0 + x \cdot \varepsilon\right)}\right)}{\varepsilon} + 1 \]
      6. lift-+.f64N/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \frac{0 + \varepsilon \cdot \color{blue}{\left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}}{\varepsilon} + 1 \]
      7. lift-*.f64N/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \frac{0 + \color{blue}{\varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}}{\varepsilon} + 1 \]
      8. +-lft-identityN/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \frac{\color{blue}{\varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}}{\varepsilon} + 1 \]
      9. div-invN/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \color{blue}{\left(\left(\varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)\right) \cdot \frac{1}{\varepsilon}\right)} + 1 \]
      10. associate-*r*N/A

        \[\leadsto \color{blue}{\left(\left(\frac{1}{2} \cdot x\right) \cdot \left(\varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)\right)\right) \cdot \frac{1}{\varepsilon}} + 1 \]
      11. lower-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(\left(\frac{1}{2} \cdot x\right) \cdot \left(\varepsilon \cdot \left(\left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)\right), \frac{1}{\varepsilon}, 1\right)} \]
    9. Applied rewrites97.1%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\left(\varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \varepsilon\right) - x\right)\right) \cdot \left(x \cdot 0.5\right), \frac{1}{\varepsilon}, 1\right)} \]

    if 9.5000000000000005e-5 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1 + x \cdot \left(\frac{1}{2} \cdot \left(x \cdot \left(\frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot {\left(\varepsilon - 1\right)}^{2}\right) - \frac{1}{2} \cdot \left({\left(1 + \varepsilon\right)}^{2} \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right) + \frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) - -1 \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right)} \]
    4. Applied rewrites46.4%

      \[\leadsto \color{blue}{\mathsf{fma}\left(0.5 \cdot x, \left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) + \mathsf{fma}\left(1 + \frac{1}{\varepsilon}, \mathsf{fma}\left(0.5 \cdot x, \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right), -1 + \varepsilon\right), x \cdot \left(0.5 \cdot \left(\left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) \cdot \left(-1 - \varepsilon\right)\right)\right)\right), 1\right)} \]
    5. Taylor expanded in eps around 0

      \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, \color{blue}{\frac{\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(-1 \cdot x + \left(\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(x + \left(-1 \cdot x + \varepsilon \cdot x\right)\right)\right)\right)\right)\right)}{\varepsilon}}, 1\right) \]
    6. Step-by-step derivation
      1. lower-/.f64N/A

        \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, \color{blue}{\frac{\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(-1 \cdot x + \left(\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(x + \left(-1 \cdot x + \varepsilon \cdot x\right)\right)\right)\right)\right)\right)}{\varepsilon}}, 1\right) \]
    7. Applied rewrites47.9%

      \[\leadsto \mathsf{fma}\left(0.5 \cdot x, \color{blue}{\frac{0 + \varepsilon \cdot \left(\left(-x\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}{\varepsilon}}, 1\right) \]
    8. Taylor expanded in eps around inf

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left({\varepsilon}^{2} \cdot {x}^{2}\right)} \]
    9. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \color{blue}{\frac{1}{2} \cdot \left({\varepsilon}^{2} \cdot {x}^{2}\right)} \]
      2. *-commutativeN/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left({x}^{2} \cdot {\varepsilon}^{2}\right)} \]
      3. unpow2N/A

        \[\leadsto \frac{1}{2} \cdot \left(\color{blue}{\left(x \cdot x\right)} \cdot {\varepsilon}^{2}\right) \]
      4. associate-*l*N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(x \cdot \left(x \cdot {\varepsilon}^{2}\right)\right)} \]
      5. *-commutativeN/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \color{blue}{\left({\varepsilon}^{2} \cdot x\right)}\right) \]
      6. lower-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(x \cdot \left({\varepsilon}^{2} \cdot x\right)\right)} \]
      7. *-commutativeN/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \color{blue}{\left(x \cdot {\varepsilon}^{2}\right)}\right) \]
      8. lower-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \color{blue}{\left(x \cdot {\varepsilon}^{2}\right)}\right) \]
      9. unpow2N/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \left(x \cdot \color{blue}{\left(\varepsilon \cdot \varepsilon\right)}\right)\right) \]
      10. lower-*.f6472.2

        \[\leadsto 0.5 \cdot \left(x \cdot \left(x \cdot \color{blue}{\left(\varepsilon \cdot \varepsilon\right)}\right)\right) \]
    10. Applied rewrites72.2%

      \[\leadsto \color{blue}{0.5 \cdot \left(x \cdot \left(x \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\right)} \]
    11. Step-by-step derivation
      1. lift-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \left(x \cdot \color{blue}{\left(\varepsilon \cdot \varepsilon\right)}\right)\right) \]
      2. associate-*r*N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(\left(x \cdot x\right) \cdot \left(\varepsilon \cdot \varepsilon\right)\right)} \]
      3. lift-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(x \cdot x\right) \cdot \color{blue}{\left(\varepsilon \cdot \varepsilon\right)}\right) \]
      4. pow2N/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(x \cdot x\right) \cdot \color{blue}{{\varepsilon}^{2}}\right) \]
      5. metadata-evalN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(x \cdot x\right) \cdot {\varepsilon}^{\color{blue}{\left(3 + -1\right)}}\right) \]
      6. pow-prod-upN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(x \cdot x\right) \cdot \color{blue}{\left({\varepsilon}^{3} \cdot {\varepsilon}^{-1}\right)}\right) \]
      7. cube-unmultN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(x \cdot x\right) \cdot \left(\color{blue}{\left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)} \cdot {\varepsilon}^{-1}\right)\right) \]
      8. lift-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(x \cdot x\right) \cdot \left(\left(\varepsilon \cdot \color{blue}{\left(\varepsilon \cdot \varepsilon\right)}\right) \cdot {\varepsilon}^{-1}\right)\right) \]
      9. lift-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(x \cdot x\right) \cdot \left(\color{blue}{\left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)} \cdot {\varepsilon}^{-1}\right)\right) \]
      10. inv-powN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(x \cdot x\right) \cdot \left(\left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right) \cdot \color{blue}{\frac{1}{\varepsilon}}\right)\right) \]
      11. div-invN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(x \cdot x\right) \cdot \color{blue}{\frac{\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)}{\varepsilon}}\right) \]
      12. associate-*r*N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(x \cdot \left(x \cdot \frac{\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)}{\varepsilon}\right)\right)} \]
      13. associate-/l*N/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \color{blue}{\frac{x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)}{\varepsilon}}\right) \]
      14. lift-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \frac{\color{blue}{x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)}}{\varepsilon}\right) \]
      15. lift-/.f64N/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \color{blue}{\frac{x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)}{\varepsilon}}\right) \]
      16. associate-*l*N/A

        \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot x\right) \cdot \frac{x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)}{\varepsilon}} \]
      17. lift-*.f64N/A

        \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot x\right)} \cdot \frac{x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)}{\varepsilon} \]
      18. lift-/.f64N/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \color{blue}{\frac{x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)}{\varepsilon}} \]
      19. associate-*r/N/A

        \[\leadsto \color{blue}{\frac{\left(\frac{1}{2} \cdot x\right) \cdot \left(x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\right)}{\varepsilon}} \]
      20. lower-/.f64N/A

        \[\leadsto \color{blue}{\frac{\left(\frac{1}{2} \cdot x\right) \cdot \left(x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\right)}{\varepsilon}} \]
    12. Applied rewrites81.9%

      \[\leadsto \color{blue}{\frac{\left(x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\right) \cdot \left(x \cdot 0.5\right)}{\varepsilon}} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification93.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 9.5 \cdot 10^{-5}:\\ \;\;\;\;\mathsf{fma}\left(\left(\varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \varepsilon\right) - x\right)\right) \cdot \left(0.5 \cdot x\right), \frac{1}{\varepsilon}, 1\right)\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(0.5 \cdot x\right) \cdot \left(x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\right)}{\varepsilon}\\ \end{array} \]
  5. Add Preprocessing

Alternative 8: 84.7% accurate, 6.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 9.5 \cdot 10^{-5}:\\ \;\;\;\;\mathsf{fma}\left(0.5 \cdot x, x \cdot \left(\varepsilon \cdot \varepsilon\right), 1\right)\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(0.5 \cdot x\right) \cdot \left(x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\right)}{\varepsilon}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= x 9.5e-5)
   (fma (* 0.5 x) (* x (* eps eps)) 1.0)
   (/ (* (* 0.5 x) (* x (* eps (* eps eps)))) eps)))
double code(double x, double eps) {
	double tmp;
	if (x <= 9.5e-5) {
		tmp = fma((0.5 * x), (x * (eps * eps)), 1.0);
	} else {
		tmp = ((0.5 * x) * (x * (eps * (eps * eps)))) / eps;
	}
	return tmp;
}
function code(x, eps)
	tmp = 0.0
	if (x <= 9.5e-5)
		tmp = fma(Float64(0.5 * x), Float64(x * Float64(eps * eps)), 1.0);
	else
		tmp = Float64(Float64(Float64(0.5 * x) * Float64(x * Float64(eps * Float64(eps * eps)))) / eps);
	end
	return tmp
end
code[x_, eps_] := If[LessEqual[x, 9.5e-5], N[(N[(0.5 * x), $MachinePrecision] * N[(x * N[(eps * eps), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision], N[(N[(N[(0.5 * x), $MachinePrecision] * N[(x * N[(eps * N[(eps * eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 9.5 \cdot 10^{-5}:\\
\;\;\;\;\mathsf{fma}\left(0.5 \cdot x, x \cdot \left(\varepsilon \cdot \varepsilon\right), 1\right)\\

\mathbf{else}:\\
\;\;\;\;\frac{\left(0.5 \cdot x\right) \cdot \left(x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\right)}{\varepsilon}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 9.5000000000000005e-5

    1. Initial program 62.7%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1 + x \cdot \left(\frac{1}{2} \cdot \left(x \cdot \left(\frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot {\left(\varepsilon - 1\right)}^{2}\right) - \frac{1}{2} \cdot \left({\left(1 + \varepsilon\right)}^{2} \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right) + \frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) - -1 \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right)} \]
    4. Applied rewrites92.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(0.5 \cdot x, \left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) + \mathsf{fma}\left(1 + \frac{1}{\varepsilon}, \mathsf{fma}\left(0.5 \cdot x, \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right), -1 + \varepsilon\right), x \cdot \left(0.5 \cdot \left(\left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) \cdot \left(-1 - \varepsilon\right)\right)\right)\right), 1\right)} \]
    5. Taylor expanded in eps around inf

      \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, \color{blue}{{\varepsilon}^{2} \cdot x}, 1\right) \]
    6. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, \color{blue}{x \cdot {\varepsilon}^{2}}, 1\right) \]
      2. lower-*.f64N/A

        \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, \color{blue}{x \cdot {\varepsilon}^{2}}, 1\right) \]
      3. unpow2N/A

        \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, x \cdot \color{blue}{\left(\varepsilon \cdot \varepsilon\right)}, 1\right) \]
      4. lower-*.f6492.2

        \[\leadsto \mathsf{fma}\left(0.5 \cdot x, x \cdot \color{blue}{\left(\varepsilon \cdot \varepsilon\right)}, 1\right) \]
    7. Applied rewrites92.2%

      \[\leadsto \mathsf{fma}\left(0.5 \cdot x, \color{blue}{x \cdot \left(\varepsilon \cdot \varepsilon\right)}, 1\right) \]

    if 9.5000000000000005e-5 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1 + x \cdot \left(\frac{1}{2} \cdot \left(x \cdot \left(\frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot {\left(\varepsilon - 1\right)}^{2}\right) - \frac{1}{2} \cdot \left({\left(1 + \varepsilon\right)}^{2} \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right) + \frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) - -1 \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right)} \]
    4. Applied rewrites46.4%

      \[\leadsto \color{blue}{\mathsf{fma}\left(0.5 \cdot x, \left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) + \mathsf{fma}\left(1 + \frac{1}{\varepsilon}, \mathsf{fma}\left(0.5 \cdot x, \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right), -1 + \varepsilon\right), x \cdot \left(0.5 \cdot \left(\left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) \cdot \left(-1 - \varepsilon\right)\right)\right)\right), 1\right)} \]
    5. Taylor expanded in eps around 0

      \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, \color{blue}{\frac{\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(-1 \cdot x + \left(\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(x + \left(-1 \cdot x + \varepsilon \cdot x\right)\right)\right)\right)\right)\right)}{\varepsilon}}, 1\right) \]
    6. Step-by-step derivation
      1. lower-/.f64N/A

        \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, \color{blue}{\frac{\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(-1 \cdot x + \left(\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(x + \left(-1 \cdot x + \varepsilon \cdot x\right)\right)\right)\right)\right)\right)}{\varepsilon}}, 1\right) \]
    7. Applied rewrites47.9%

      \[\leadsto \mathsf{fma}\left(0.5 \cdot x, \color{blue}{\frac{0 + \varepsilon \cdot \left(\left(-x\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}{\varepsilon}}, 1\right) \]
    8. Taylor expanded in eps around inf

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left({\varepsilon}^{2} \cdot {x}^{2}\right)} \]
    9. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \color{blue}{\frac{1}{2} \cdot \left({\varepsilon}^{2} \cdot {x}^{2}\right)} \]
      2. *-commutativeN/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left({x}^{2} \cdot {\varepsilon}^{2}\right)} \]
      3. unpow2N/A

        \[\leadsto \frac{1}{2} \cdot \left(\color{blue}{\left(x \cdot x\right)} \cdot {\varepsilon}^{2}\right) \]
      4. associate-*l*N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(x \cdot \left(x \cdot {\varepsilon}^{2}\right)\right)} \]
      5. *-commutativeN/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \color{blue}{\left({\varepsilon}^{2} \cdot x\right)}\right) \]
      6. lower-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(x \cdot \left({\varepsilon}^{2} \cdot x\right)\right)} \]
      7. *-commutativeN/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \color{blue}{\left(x \cdot {\varepsilon}^{2}\right)}\right) \]
      8. lower-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \color{blue}{\left(x \cdot {\varepsilon}^{2}\right)}\right) \]
      9. unpow2N/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \left(x \cdot \color{blue}{\left(\varepsilon \cdot \varepsilon\right)}\right)\right) \]
      10. lower-*.f6472.2

        \[\leadsto 0.5 \cdot \left(x \cdot \left(x \cdot \color{blue}{\left(\varepsilon \cdot \varepsilon\right)}\right)\right) \]
    10. Applied rewrites72.2%

      \[\leadsto \color{blue}{0.5 \cdot \left(x \cdot \left(x \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\right)} \]
    11. Step-by-step derivation
      1. lift-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \left(x \cdot \color{blue}{\left(\varepsilon \cdot \varepsilon\right)}\right)\right) \]
      2. associate-*r*N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(\left(x \cdot x\right) \cdot \left(\varepsilon \cdot \varepsilon\right)\right)} \]
      3. lift-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(x \cdot x\right) \cdot \color{blue}{\left(\varepsilon \cdot \varepsilon\right)}\right) \]
      4. pow2N/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(x \cdot x\right) \cdot \color{blue}{{\varepsilon}^{2}}\right) \]
      5. metadata-evalN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(x \cdot x\right) \cdot {\varepsilon}^{\color{blue}{\left(3 + -1\right)}}\right) \]
      6. pow-prod-upN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(x \cdot x\right) \cdot \color{blue}{\left({\varepsilon}^{3} \cdot {\varepsilon}^{-1}\right)}\right) \]
      7. cube-unmultN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(x \cdot x\right) \cdot \left(\color{blue}{\left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)} \cdot {\varepsilon}^{-1}\right)\right) \]
      8. lift-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(x \cdot x\right) \cdot \left(\left(\varepsilon \cdot \color{blue}{\left(\varepsilon \cdot \varepsilon\right)}\right) \cdot {\varepsilon}^{-1}\right)\right) \]
      9. lift-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(x \cdot x\right) \cdot \left(\color{blue}{\left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)} \cdot {\varepsilon}^{-1}\right)\right) \]
      10. inv-powN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(x \cdot x\right) \cdot \left(\left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right) \cdot \color{blue}{\frac{1}{\varepsilon}}\right)\right) \]
      11. div-invN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(x \cdot x\right) \cdot \color{blue}{\frac{\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)}{\varepsilon}}\right) \]
      12. associate-*r*N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(x \cdot \left(x \cdot \frac{\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)}{\varepsilon}\right)\right)} \]
      13. associate-/l*N/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \color{blue}{\frac{x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)}{\varepsilon}}\right) \]
      14. lift-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \frac{\color{blue}{x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)}}{\varepsilon}\right) \]
      15. lift-/.f64N/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \color{blue}{\frac{x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)}{\varepsilon}}\right) \]
      16. associate-*l*N/A

        \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot x\right) \cdot \frac{x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)}{\varepsilon}} \]
      17. lift-*.f64N/A

        \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot x\right)} \cdot \frac{x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)}{\varepsilon} \]
      18. lift-/.f64N/A

        \[\leadsto \left(\frac{1}{2} \cdot x\right) \cdot \color{blue}{\frac{x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)}{\varepsilon}} \]
      19. associate-*r/N/A

        \[\leadsto \color{blue}{\frac{\left(\frac{1}{2} \cdot x\right) \cdot \left(x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\right)}{\varepsilon}} \]
      20. lower-/.f64N/A

        \[\leadsto \color{blue}{\frac{\left(\frac{1}{2} \cdot x\right) \cdot \left(x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\right)}{\varepsilon}} \]
    12. Applied rewrites81.9%

      \[\leadsto \color{blue}{\frac{\left(x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\right) \cdot \left(x \cdot 0.5\right)}{\varepsilon}} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification89.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 9.5 \cdot 10^{-5}:\\ \;\;\;\;\mathsf{fma}\left(0.5 \cdot x, x \cdot \left(\varepsilon \cdot \varepsilon\right), 1\right)\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(0.5 \cdot x\right) \cdot \left(x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\right)}{\varepsilon}\\ \end{array} \]
  5. Add Preprocessing

Alternative 9: 66.6% accurate, 9.1× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 1.6:\\ \;\;\;\;\mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(x, -0.16666666666666666, 0.5\right), -1\right), 1\right)\\ \mathbf{elif}\;x \leq 1.05 \cdot 10^{+96}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, 0.3333333333333333, -0.5\right), 1\right)\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= x 1.6)
   (fma x (fma x (fma x -0.16666666666666666 0.5) -1.0) 1.0)
   (if (<= x 1.05e+96) 0.0 (fma (* x x) (fma x 0.3333333333333333 -0.5) 1.0))))
double code(double x, double eps) {
	double tmp;
	if (x <= 1.6) {
		tmp = fma(x, fma(x, fma(x, -0.16666666666666666, 0.5), -1.0), 1.0);
	} else if (x <= 1.05e+96) {
		tmp = 0.0;
	} else {
		tmp = fma((x * x), fma(x, 0.3333333333333333, -0.5), 1.0);
	}
	return tmp;
}
function code(x, eps)
	tmp = 0.0
	if (x <= 1.6)
		tmp = fma(x, fma(x, fma(x, -0.16666666666666666, 0.5), -1.0), 1.0);
	elseif (x <= 1.05e+96)
		tmp = 0.0;
	else
		tmp = fma(Float64(x * x), fma(x, 0.3333333333333333, -0.5), 1.0);
	end
	return tmp
end
code[x_, eps_] := If[LessEqual[x, 1.6], N[(x * N[(x * N[(x * -0.16666666666666666 + 0.5), $MachinePrecision] + -1.0), $MachinePrecision] + 1.0), $MachinePrecision], If[LessEqual[x, 1.05e+96], 0.0, N[(N[(x * x), $MachinePrecision] * N[(x * 0.3333333333333333 + -0.5), $MachinePrecision] + 1.0), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.6:\\
\;\;\;\;\mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(x, -0.16666666666666666, 0.5\right), -1\right), 1\right)\\

\mathbf{elif}\;x \leq 1.05 \cdot 10^{+96}:\\
\;\;\;\;0\\

\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, 0.3333333333333333, -0.5\right), 1\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < 1.6000000000000001

    1. Initial program 62.9%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in eps around inf

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} - -1 \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
    4. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} - -1 \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
      2. cancel-sign-sub-invN/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \left(\mathsf{neg}\left(-1\right)\right) \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
      3. metadata-evalN/A

        \[\leadsto \frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \color{blue}{1} \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right) \]
      4. *-lft-identityN/A

        \[\leadsto \frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \color{blue}{e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}}\right) \]
      5. lower-+.f64N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
    5. Applied rewrites98.4%

      \[\leadsto \color{blue}{0.5 \cdot \left(e^{\mathsf{fma}\left(x, \varepsilon, -x\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}\right)} \]
    6. Taylor expanded in eps around 0

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x\right)} + e^{-1 \cdot x}\right)} \]
    7. Step-by-step derivation
      1. distribute-rgt-inN/A

        \[\leadsto \color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot \frac{1}{2} + e^{-1 \cdot x} \cdot \frac{1}{2}} \]
      2. neg-mul-1N/A

        \[\leadsto e^{\mathsf{neg}\left(x\right)} \cdot \frac{1}{2} + e^{\color{blue}{\mathsf{neg}\left(x\right)}} \cdot \frac{1}{2} \]
      3. distribute-lft-outN/A

        \[\leadsto \color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot \left(\frac{1}{2} + \frac{1}{2}\right)} \]
      4. metadata-evalN/A

        \[\leadsto e^{\mathsf{neg}\left(x\right)} \cdot \color{blue}{1} \]
      5. lower-*.f64N/A

        \[\leadsto \color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot 1} \]
      6. neg-mul-1N/A

        \[\leadsto e^{\color{blue}{-1 \cdot x}} \cdot 1 \]
      7. lower-exp.f64N/A

        \[\leadsto \color{blue}{e^{-1 \cdot x}} \cdot 1 \]
      8. neg-mul-1N/A

        \[\leadsto e^{\color{blue}{\mathsf{neg}\left(x\right)}} \cdot 1 \]
      9. lower-neg.f6477.5

        \[\leadsto e^{\color{blue}{-x}} \cdot 1 \]
    8. Applied rewrites77.5%

      \[\leadsto \color{blue}{e^{-x} \cdot 1} \]
    9. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1 + x \cdot \left(x \cdot \left(\frac{1}{2} + \frac{-1}{6} \cdot x\right) - 1\right)} \]
    10. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \color{blue}{x \cdot \left(x \cdot \left(\frac{1}{2} + \frac{-1}{6} \cdot x\right) - 1\right) + 1} \]
      2. lower-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(x, x \cdot \left(\frac{1}{2} + \frac{-1}{6} \cdot x\right) - 1, 1\right)} \]
      3. sub-negN/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{x \cdot \left(\frac{1}{2} + \frac{-1}{6} \cdot x\right) + \left(\mathsf{neg}\left(1\right)\right)}, 1\right) \]
      4. metadata-evalN/A

        \[\leadsto \mathsf{fma}\left(x, x \cdot \left(\frac{1}{2} + \frac{-1}{6} \cdot x\right) + \color{blue}{-1}, 1\right) \]
      5. lower-fma.f64N/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\mathsf{fma}\left(x, \frac{1}{2} + \frac{-1}{6} \cdot x, -1\right)}, 1\right) \]
      6. +-commutativeN/A

        \[\leadsto \mathsf{fma}\left(x, \mathsf{fma}\left(x, \color{blue}{\frac{-1}{6} \cdot x + \frac{1}{2}}, -1\right), 1\right) \]
      7. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left(x, \mathsf{fma}\left(x, \color{blue}{x \cdot \frac{-1}{6}} + \frac{1}{2}, -1\right), 1\right) \]
      8. lower-fma.f6473.5

        \[\leadsto \mathsf{fma}\left(x, \mathsf{fma}\left(x, \color{blue}{\mathsf{fma}\left(x, -0.16666666666666666, 0.5\right)}, -1\right), 1\right) \]
    11. Applied rewrites73.5%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(x, -0.16666666666666666, 0.5\right), -1\right), 1\right)} \]

    if 1.6000000000000001 < x < 1.0500000000000001e96

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1 + x \cdot \left(\frac{1}{2} \cdot \left(x \cdot \left(\frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot {\left(\varepsilon - 1\right)}^{2}\right) - \frac{1}{2} \cdot \left({\left(1 + \varepsilon\right)}^{2} \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right) + \frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) - -1 \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right)} \]
    4. Applied rewrites28.3%

      \[\leadsto \color{blue}{\mathsf{fma}\left(0.5 \cdot x, \left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) + \mathsf{fma}\left(1 + \frac{1}{\varepsilon}, \mathsf{fma}\left(0.5 \cdot x, \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right), -1 + \varepsilon\right), x \cdot \left(0.5 \cdot \left(\left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) \cdot \left(-1 - \varepsilon\right)\right)\right)\right), 1\right)} \]
    5. Taylor expanded in eps around 0

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \frac{x \cdot \left(\frac{-1}{2} \cdot x + \frac{1}{2} \cdot x\right)}{\varepsilon}} \]
    6. Step-by-step derivation
      1. associate-*r/N/A

        \[\leadsto \color{blue}{\frac{\frac{1}{2} \cdot \left(x \cdot \left(\frac{-1}{2} \cdot x + \frac{1}{2} \cdot x\right)\right)}{\varepsilon}} \]
      2. distribute-rgt-outN/A

        \[\leadsto \frac{\frac{1}{2} \cdot \left(x \cdot \color{blue}{\left(x \cdot \left(\frac{-1}{2} + \frac{1}{2}\right)\right)}\right)}{\varepsilon} \]
      3. metadata-evalN/A

        \[\leadsto \frac{\frac{1}{2} \cdot \left(x \cdot \left(x \cdot \color{blue}{0}\right)\right)}{\varepsilon} \]
      4. mul0-rgtN/A

        \[\leadsto \frac{\frac{1}{2} \cdot \left(x \cdot \color{blue}{0}\right)}{\varepsilon} \]
      5. mul0-rgtN/A

        \[\leadsto \frac{\frac{1}{2} \cdot \color{blue}{0}}{\varepsilon} \]
      6. metadata-evalN/A

        \[\leadsto \frac{\color{blue}{0}}{\varepsilon} \]
      7. +-inversesN/A

        \[\leadsto \frac{\color{blue}{e^{\mathsf{neg}\left(x\right)} - e^{\mathsf{neg}\left(x\right)}}}{\varepsilon} \]
      8. div-subN/A

        \[\leadsto \color{blue}{\frac{e^{\mathsf{neg}\left(x\right)}}{\varepsilon} - \frac{e^{\mathsf{neg}\left(x\right)}}{\varepsilon}} \]
      9. +-inverses52.6

        \[\leadsto \color{blue}{0} \]
    7. Applied rewrites52.6%

      \[\leadsto \color{blue}{0} \]

    if 1.0500000000000001e96 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in eps around 0

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - \left(-1 \cdot e^{\mathsf{neg}\left(x\right)} + -1 \cdot \left(x \cdot e^{\mathsf{neg}\left(x\right)}\right)\right)\right)} \]
    4. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - \left(-1 \cdot e^{\mathsf{neg}\left(x\right)} + -1 \cdot \left(x \cdot e^{\mathsf{neg}\left(x\right)}\right)\right)\right)} \]
      2. mul-1-negN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - \left(-1 \cdot e^{\mathsf{neg}\left(x\right)} + \color{blue}{\left(\mathsf{neg}\left(x \cdot e^{\mathsf{neg}\left(x\right)}\right)\right)}\right)\right) \]
      3. unsub-negN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - \color{blue}{\left(-1 \cdot e^{\mathsf{neg}\left(x\right)} - x \cdot e^{\mathsf{neg}\left(x\right)}\right)}\right) \]
      4. associate-+l-N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(\left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - -1 \cdot e^{\mathsf{neg}\left(x\right)}\right) + x \cdot e^{\mathsf{neg}\left(x\right)}\right)} \]
      5. *-commutativeN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - -1 \cdot e^{\mathsf{neg}\left(x\right)}\right) + \color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot x}\right) \]
      6. distribute-rgt1-inN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(\color{blue}{\left(x + 1\right) \cdot e^{\mathsf{neg}\left(x\right)}} - -1 \cdot e^{\mathsf{neg}\left(x\right)}\right) + e^{\mathsf{neg}\left(x\right)} \cdot x\right) \]
      7. distribute-rgt-out--N/A

        \[\leadsto \frac{1}{2} \cdot \left(\color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot \left(\left(x + 1\right) - -1\right)} + e^{\mathsf{neg}\left(x\right)} \cdot x\right) \]
      8. distribute-lft-outN/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x\right)} \cdot \left(\left(\left(x + 1\right) - -1\right) + x\right)\right)} \]
      9. lower-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x\right)} \cdot \left(\left(\left(x + 1\right) - -1\right) + x\right)\right)} \]
    5. Applied rewrites40.0%

      \[\leadsto \color{blue}{0.5 \cdot \left(e^{-x} \cdot \left(\left(x + 2\right) + x\right)\right)} \]
    6. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1 + {x}^{2} \cdot \left(\frac{1}{3} \cdot x - \frac{1}{2}\right)} \]
    7. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \color{blue}{{x}^{2} \cdot \left(\frac{1}{3} \cdot x - \frac{1}{2}\right) + 1} \]
      2. lower-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left({x}^{2}, \frac{1}{3} \cdot x - \frac{1}{2}, 1\right)} \]
      3. unpow2N/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{1}{3} \cdot x - \frac{1}{2}, 1\right) \]
      4. lower-*.f64N/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{1}{3} \cdot x - \frac{1}{2}, 1\right) \]
      5. sub-negN/A

        \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{\frac{1}{3} \cdot x + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, 1\right) \]
      6. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{x \cdot \frac{1}{3}} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right), 1\right) \]
      7. metadata-evalN/A

        \[\leadsto \mathsf{fma}\left(x \cdot x, x \cdot \frac{1}{3} + \color{blue}{\frac{-1}{2}}, 1\right) \]
      8. lower-fma.f6457.2

        \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{\mathsf{fma}\left(x, 0.3333333333333333, -0.5\right)}, 1\right) \]
    8. Applied rewrites57.2%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, 0.3333333333333333, -0.5\right), 1\right)} \]
  3. Recombined 3 regimes into one program.
  4. Add Preprocessing

Alternative 10: 64.5% accurate, 9.1× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 520:\\ \;\;\;\;\mathsf{fma}\left(x, \mathsf{fma}\left(x, 0.5, -1\right), 1\right)\\ \mathbf{elif}\;x \leq 1.05 \cdot 10^{+96}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, 0.3333333333333333, -0.5\right), 1\right)\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= x 520.0)
   (fma x (fma x 0.5 -1.0) 1.0)
   (if (<= x 1.05e+96) 0.0 (fma (* x x) (fma x 0.3333333333333333 -0.5) 1.0))))
double code(double x, double eps) {
	double tmp;
	if (x <= 520.0) {
		tmp = fma(x, fma(x, 0.5, -1.0), 1.0);
	} else if (x <= 1.05e+96) {
		tmp = 0.0;
	} else {
		tmp = fma((x * x), fma(x, 0.3333333333333333, -0.5), 1.0);
	}
	return tmp;
}
function code(x, eps)
	tmp = 0.0
	if (x <= 520.0)
		tmp = fma(x, fma(x, 0.5, -1.0), 1.0);
	elseif (x <= 1.05e+96)
		tmp = 0.0;
	else
		tmp = fma(Float64(x * x), fma(x, 0.3333333333333333, -0.5), 1.0);
	end
	return tmp
end
code[x_, eps_] := If[LessEqual[x, 520.0], N[(x * N[(x * 0.5 + -1.0), $MachinePrecision] + 1.0), $MachinePrecision], If[LessEqual[x, 1.05e+96], 0.0, N[(N[(x * x), $MachinePrecision] * N[(x * 0.3333333333333333 + -0.5), $MachinePrecision] + 1.0), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 520:\\
\;\;\;\;\mathsf{fma}\left(x, \mathsf{fma}\left(x, 0.5, -1\right), 1\right)\\

\mathbf{elif}\;x \leq 1.05 \cdot 10^{+96}:\\
\;\;\;\;0\\

\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, 0.3333333333333333, -0.5\right), 1\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < 520

    1. Initial program 62.9%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in eps around inf

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} - -1 \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
    4. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} - -1 \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
      2. cancel-sign-sub-invN/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \left(\mathsf{neg}\left(-1\right)\right) \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
      3. metadata-evalN/A

        \[\leadsto \frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \color{blue}{1} \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right) \]
      4. *-lft-identityN/A

        \[\leadsto \frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \color{blue}{e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}}\right) \]
      5. lower-+.f64N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
    5. Applied rewrites98.4%

      \[\leadsto \color{blue}{0.5 \cdot \left(e^{\mathsf{fma}\left(x, \varepsilon, -x\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}\right)} \]
    6. Taylor expanded in eps around 0

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x\right)} + e^{-1 \cdot x}\right)} \]
    7. Step-by-step derivation
      1. distribute-rgt-inN/A

        \[\leadsto \color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot \frac{1}{2} + e^{-1 \cdot x} \cdot \frac{1}{2}} \]
      2. neg-mul-1N/A

        \[\leadsto e^{\mathsf{neg}\left(x\right)} \cdot \frac{1}{2} + e^{\color{blue}{\mathsf{neg}\left(x\right)}} \cdot \frac{1}{2} \]
      3. distribute-lft-outN/A

        \[\leadsto \color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot \left(\frac{1}{2} + \frac{1}{2}\right)} \]
      4. metadata-evalN/A

        \[\leadsto e^{\mathsf{neg}\left(x\right)} \cdot \color{blue}{1} \]
      5. lower-*.f64N/A

        \[\leadsto \color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot 1} \]
      6. neg-mul-1N/A

        \[\leadsto e^{\color{blue}{-1 \cdot x}} \cdot 1 \]
      7. lower-exp.f64N/A

        \[\leadsto \color{blue}{e^{-1 \cdot x}} \cdot 1 \]
      8. neg-mul-1N/A

        \[\leadsto e^{\color{blue}{\mathsf{neg}\left(x\right)}} \cdot 1 \]
      9. lower-neg.f6477.5

        \[\leadsto e^{\color{blue}{-x}} \cdot 1 \]
    8. Applied rewrites77.5%

      \[\leadsto \color{blue}{e^{-x} \cdot 1} \]
    9. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1 + x \cdot \left(\frac{1}{2} \cdot x - 1\right)} \]
    10. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \color{blue}{x \cdot \left(\frac{1}{2} \cdot x - 1\right) + 1} \]
      2. lower-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(x, \frac{1}{2} \cdot x - 1, 1\right)} \]
      3. sub-negN/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\frac{1}{2} \cdot x + \left(\mathsf{neg}\left(1\right)\right)}, 1\right) \]
      4. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{x \cdot \frac{1}{2}} + \left(\mathsf{neg}\left(1\right)\right), 1\right) \]
      5. metadata-evalN/A

        \[\leadsto \mathsf{fma}\left(x, x \cdot \frac{1}{2} + \color{blue}{-1}, 1\right) \]
      6. lower-fma.f6471.5

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\mathsf{fma}\left(x, 0.5, -1\right)}, 1\right) \]
    11. Applied rewrites71.5%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \mathsf{fma}\left(x, 0.5, -1\right), 1\right)} \]

    if 520 < x < 1.0500000000000001e96

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1 + x \cdot \left(\frac{1}{2} \cdot \left(x \cdot \left(\frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot {\left(\varepsilon - 1\right)}^{2}\right) - \frac{1}{2} \cdot \left({\left(1 + \varepsilon\right)}^{2} \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right) + \frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) - -1 \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right)} \]
    4. Applied rewrites28.3%

      \[\leadsto \color{blue}{\mathsf{fma}\left(0.5 \cdot x, \left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) + \mathsf{fma}\left(1 + \frac{1}{\varepsilon}, \mathsf{fma}\left(0.5 \cdot x, \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right), -1 + \varepsilon\right), x \cdot \left(0.5 \cdot \left(\left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) \cdot \left(-1 - \varepsilon\right)\right)\right)\right), 1\right)} \]
    5. Taylor expanded in eps around 0

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \frac{x \cdot \left(\frac{-1}{2} \cdot x + \frac{1}{2} \cdot x\right)}{\varepsilon}} \]
    6. Step-by-step derivation
      1. associate-*r/N/A

        \[\leadsto \color{blue}{\frac{\frac{1}{2} \cdot \left(x \cdot \left(\frac{-1}{2} \cdot x + \frac{1}{2} \cdot x\right)\right)}{\varepsilon}} \]
      2. distribute-rgt-outN/A

        \[\leadsto \frac{\frac{1}{2} \cdot \left(x \cdot \color{blue}{\left(x \cdot \left(\frac{-1}{2} + \frac{1}{2}\right)\right)}\right)}{\varepsilon} \]
      3. metadata-evalN/A

        \[\leadsto \frac{\frac{1}{2} \cdot \left(x \cdot \left(x \cdot \color{blue}{0}\right)\right)}{\varepsilon} \]
      4. mul0-rgtN/A

        \[\leadsto \frac{\frac{1}{2} \cdot \left(x \cdot \color{blue}{0}\right)}{\varepsilon} \]
      5. mul0-rgtN/A

        \[\leadsto \frac{\frac{1}{2} \cdot \color{blue}{0}}{\varepsilon} \]
      6. metadata-evalN/A

        \[\leadsto \frac{\color{blue}{0}}{\varepsilon} \]
      7. +-inversesN/A

        \[\leadsto \frac{\color{blue}{e^{\mathsf{neg}\left(x\right)} - e^{\mathsf{neg}\left(x\right)}}}{\varepsilon} \]
      8. div-subN/A

        \[\leadsto \color{blue}{\frac{e^{\mathsf{neg}\left(x\right)}}{\varepsilon} - \frac{e^{\mathsf{neg}\left(x\right)}}{\varepsilon}} \]
      9. +-inverses52.6

        \[\leadsto \color{blue}{0} \]
    7. Applied rewrites52.6%

      \[\leadsto \color{blue}{0} \]

    if 1.0500000000000001e96 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in eps around 0

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - \left(-1 \cdot e^{\mathsf{neg}\left(x\right)} + -1 \cdot \left(x \cdot e^{\mathsf{neg}\left(x\right)}\right)\right)\right)} \]
    4. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - \left(-1 \cdot e^{\mathsf{neg}\left(x\right)} + -1 \cdot \left(x \cdot e^{\mathsf{neg}\left(x\right)}\right)\right)\right)} \]
      2. mul-1-negN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - \left(-1 \cdot e^{\mathsf{neg}\left(x\right)} + \color{blue}{\left(\mathsf{neg}\left(x \cdot e^{\mathsf{neg}\left(x\right)}\right)\right)}\right)\right) \]
      3. unsub-negN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - \color{blue}{\left(-1 \cdot e^{\mathsf{neg}\left(x\right)} - x \cdot e^{\mathsf{neg}\left(x\right)}\right)}\right) \]
      4. associate-+l-N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(\left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - -1 \cdot e^{\mathsf{neg}\left(x\right)}\right) + x \cdot e^{\mathsf{neg}\left(x\right)}\right)} \]
      5. *-commutativeN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(\left(e^{\mathsf{neg}\left(x\right)} + x \cdot e^{\mathsf{neg}\left(x\right)}\right) - -1 \cdot e^{\mathsf{neg}\left(x\right)}\right) + \color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot x}\right) \]
      6. distribute-rgt1-inN/A

        \[\leadsto \frac{1}{2} \cdot \left(\left(\color{blue}{\left(x + 1\right) \cdot e^{\mathsf{neg}\left(x\right)}} - -1 \cdot e^{\mathsf{neg}\left(x\right)}\right) + e^{\mathsf{neg}\left(x\right)} \cdot x\right) \]
      7. distribute-rgt-out--N/A

        \[\leadsto \frac{1}{2} \cdot \left(\color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot \left(\left(x + 1\right) - -1\right)} + e^{\mathsf{neg}\left(x\right)} \cdot x\right) \]
      8. distribute-lft-outN/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x\right)} \cdot \left(\left(\left(x + 1\right) - -1\right) + x\right)\right)} \]
      9. lower-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x\right)} \cdot \left(\left(\left(x + 1\right) - -1\right) + x\right)\right)} \]
    5. Applied rewrites40.0%

      \[\leadsto \color{blue}{0.5 \cdot \left(e^{-x} \cdot \left(\left(x + 2\right) + x\right)\right)} \]
    6. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1 + {x}^{2} \cdot \left(\frac{1}{3} \cdot x - \frac{1}{2}\right)} \]
    7. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \color{blue}{{x}^{2} \cdot \left(\frac{1}{3} \cdot x - \frac{1}{2}\right) + 1} \]
      2. lower-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left({x}^{2}, \frac{1}{3} \cdot x - \frac{1}{2}, 1\right)} \]
      3. unpow2N/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{1}{3} \cdot x - \frac{1}{2}, 1\right) \]
      4. lower-*.f64N/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{1}{3} \cdot x - \frac{1}{2}, 1\right) \]
      5. sub-negN/A

        \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{\frac{1}{3} \cdot x + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, 1\right) \]
      6. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{x \cdot \frac{1}{3}} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right), 1\right) \]
      7. metadata-evalN/A

        \[\leadsto \mathsf{fma}\left(x \cdot x, x \cdot \frac{1}{3} + \color{blue}{\frac{-1}{2}}, 1\right) \]
      8. lower-fma.f6457.2

        \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{\mathsf{fma}\left(x, 0.3333333333333333, -0.5\right)}, 1\right) \]
    8. Applied rewrites57.2%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, 0.3333333333333333, -0.5\right), 1\right)} \]
  3. Recombined 3 regimes into one program.
  4. Add Preprocessing

Alternative 11: 81.9% accurate, 9.7× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := x \cdot \left(\varepsilon \cdot \varepsilon\right)\\ \mathbf{if}\;x \leq 9.5 \cdot 10^{-5}:\\ \;\;\;\;\mathsf{fma}\left(0.5 \cdot x, t\_0, 1\right)\\ \mathbf{else}:\\ \;\;\;\;0.5 \cdot \left(x \cdot t\_0\right)\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (* x (* eps eps))))
   (if (<= x 9.5e-5) (fma (* 0.5 x) t_0 1.0) (* 0.5 (* x t_0)))))
double code(double x, double eps) {
	double t_0 = x * (eps * eps);
	double tmp;
	if (x <= 9.5e-5) {
		tmp = fma((0.5 * x), t_0, 1.0);
	} else {
		tmp = 0.5 * (x * t_0);
	}
	return tmp;
}
function code(x, eps)
	t_0 = Float64(x * Float64(eps * eps))
	tmp = 0.0
	if (x <= 9.5e-5)
		tmp = fma(Float64(0.5 * x), t_0, 1.0);
	else
		tmp = Float64(0.5 * Float64(x * t_0));
	end
	return tmp
end
code[x_, eps_] := Block[{t$95$0 = N[(x * N[(eps * eps), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, 9.5e-5], N[(N[(0.5 * x), $MachinePrecision] * t$95$0 + 1.0), $MachinePrecision], N[(0.5 * N[(x * t$95$0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := x \cdot \left(\varepsilon \cdot \varepsilon\right)\\
\mathbf{if}\;x \leq 9.5 \cdot 10^{-5}:\\
\;\;\;\;\mathsf{fma}\left(0.5 \cdot x, t\_0, 1\right)\\

\mathbf{else}:\\
\;\;\;\;0.5 \cdot \left(x \cdot t\_0\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 9.5000000000000005e-5

    1. Initial program 62.7%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1 + x \cdot \left(\frac{1}{2} \cdot \left(x \cdot \left(\frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot {\left(\varepsilon - 1\right)}^{2}\right) - \frac{1}{2} \cdot \left({\left(1 + \varepsilon\right)}^{2} \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right) + \frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) - -1 \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right)} \]
    4. Applied rewrites92.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(0.5 \cdot x, \left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) + \mathsf{fma}\left(1 + \frac{1}{\varepsilon}, \mathsf{fma}\left(0.5 \cdot x, \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right), -1 + \varepsilon\right), x \cdot \left(0.5 \cdot \left(\left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) \cdot \left(-1 - \varepsilon\right)\right)\right)\right), 1\right)} \]
    5. Taylor expanded in eps around inf

      \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, \color{blue}{{\varepsilon}^{2} \cdot x}, 1\right) \]
    6. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, \color{blue}{x \cdot {\varepsilon}^{2}}, 1\right) \]
      2. lower-*.f64N/A

        \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, \color{blue}{x \cdot {\varepsilon}^{2}}, 1\right) \]
      3. unpow2N/A

        \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, x \cdot \color{blue}{\left(\varepsilon \cdot \varepsilon\right)}, 1\right) \]
      4. lower-*.f6492.2

        \[\leadsto \mathsf{fma}\left(0.5 \cdot x, x \cdot \color{blue}{\left(\varepsilon \cdot \varepsilon\right)}, 1\right) \]
    7. Applied rewrites92.2%

      \[\leadsto \mathsf{fma}\left(0.5 \cdot x, \color{blue}{x \cdot \left(\varepsilon \cdot \varepsilon\right)}, 1\right) \]

    if 9.5000000000000005e-5 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1 + x \cdot \left(\frac{1}{2} \cdot \left(x \cdot \left(\frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot {\left(\varepsilon - 1\right)}^{2}\right) - \frac{1}{2} \cdot \left({\left(1 + \varepsilon\right)}^{2} \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right) + \frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) - -1 \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right)} \]
    4. Applied rewrites46.4%

      \[\leadsto \color{blue}{\mathsf{fma}\left(0.5 \cdot x, \left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) + \mathsf{fma}\left(1 + \frac{1}{\varepsilon}, \mathsf{fma}\left(0.5 \cdot x, \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right), -1 + \varepsilon\right), x \cdot \left(0.5 \cdot \left(\left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) \cdot \left(-1 - \varepsilon\right)\right)\right)\right), 1\right)} \]
    5. Taylor expanded in eps around 0

      \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, \color{blue}{\frac{\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(-1 \cdot x + \left(\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(x + \left(-1 \cdot x + \varepsilon \cdot x\right)\right)\right)\right)\right)\right)}{\varepsilon}}, 1\right) \]
    6. Step-by-step derivation
      1. lower-/.f64N/A

        \[\leadsto \mathsf{fma}\left(\frac{1}{2} \cdot x, \color{blue}{\frac{\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(-1 \cdot x + \left(\frac{-1}{2} \cdot x + \left(\frac{1}{2} \cdot x + \varepsilon \cdot \left(x + \left(-1 \cdot x + \varepsilon \cdot x\right)\right)\right)\right)\right)\right)}{\varepsilon}}, 1\right) \]
    7. Applied rewrites47.9%

      \[\leadsto \mathsf{fma}\left(0.5 \cdot x, \color{blue}{\frac{0 + \varepsilon \cdot \left(\left(-x\right) + \varepsilon \cdot \left(0 + x \cdot \varepsilon\right)\right)}{\varepsilon}}, 1\right) \]
    8. Taylor expanded in eps around inf

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left({\varepsilon}^{2} \cdot {x}^{2}\right)} \]
    9. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \color{blue}{\frac{1}{2} \cdot \left({\varepsilon}^{2} \cdot {x}^{2}\right)} \]
      2. *-commutativeN/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left({x}^{2} \cdot {\varepsilon}^{2}\right)} \]
      3. unpow2N/A

        \[\leadsto \frac{1}{2} \cdot \left(\color{blue}{\left(x \cdot x\right)} \cdot {\varepsilon}^{2}\right) \]
      4. associate-*l*N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(x \cdot \left(x \cdot {\varepsilon}^{2}\right)\right)} \]
      5. *-commutativeN/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \color{blue}{\left({\varepsilon}^{2} \cdot x\right)}\right) \]
      6. lower-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(x \cdot \left({\varepsilon}^{2} \cdot x\right)\right)} \]
      7. *-commutativeN/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \color{blue}{\left(x \cdot {\varepsilon}^{2}\right)}\right) \]
      8. lower-*.f64N/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \color{blue}{\left(x \cdot {\varepsilon}^{2}\right)}\right) \]
      9. unpow2N/A

        \[\leadsto \frac{1}{2} \cdot \left(x \cdot \left(x \cdot \color{blue}{\left(\varepsilon \cdot \varepsilon\right)}\right)\right) \]
      10. lower-*.f6472.2

        \[\leadsto 0.5 \cdot \left(x \cdot \left(x \cdot \color{blue}{\left(\varepsilon \cdot \varepsilon\right)}\right)\right) \]
    10. Applied rewrites72.2%

      \[\leadsto \color{blue}{0.5 \cdot \left(x \cdot \left(x \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\right)} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 12: 64.8% accurate, 10.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \mathsf{fma}\left(x, \mathsf{fma}\left(x, 0.5, -1\right), 1\right)\\ \mathbf{if}\;x \leq 520:\\ \;\;\;\;t\_0\\ \mathbf{elif}\;x \leq 5 \cdot 10^{+154}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;t\_0\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (fma x (fma x 0.5 -1.0) 1.0)))
   (if (<= x 520.0) t_0 (if (<= x 5e+154) 0.0 t_0))))
double code(double x, double eps) {
	double t_0 = fma(x, fma(x, 0.5, -1.0), 1.0);
	double tmp;
	if (x <= 520.0) {
		tmp = t_0;
	} else if (x <= 5e+154) {
		tmp = 0.0;
	} else {
		tmp = t_0;
	}
	return tmp;
}
function code(x, eps)
	t_0 = fma(x, fma(x, 0.5, -1.0), 1.0)
	tmp = 0.0
	if (x <= 520.0)
		tmp = t_0;
	elseif (x <= 5e+154)
		tmp = 0.0;
	else
		tmp = t_0;
	end
	return tmp
end
code[x_, eps_] := Block[{t$95$0 = N[(x * N[(x * 0.5 + -1.0), $MachinePrecision] + 1.0), $MachinePrecision]}, If[LessEqual[x, 520.0], t$95$0, If[LessEqual[x, 5e+154], 0.0, t$95$0]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(x, \mathsf{fma}\left(x, 0.5, -1\right), 1\right)\\
\mathbf{if}\;x \leq 520:\\
\;\;\;\;t\_0\\

\mathbf{elif}\;x \leq 5 \cdot 10^{+154}:\\
\;\;\;\;0\\

\mathbf{else}:\\
\;\;\;\;t\_0\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 520 or 5.00000000000000004e154 < x

    1. Initial program 67.8%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in eps around inf

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} - -1 \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
    4. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} - -1 \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
      2. cancel-sign-sub-invN/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \left(\mathsf{neg}\left(-1\right)\right) \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
      3. metadata-evalN/A

        \[\leadsto \frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \color{blue}{1} \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right) \]
      4. *-lft-identityN/A

        \[\leadsto \frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \color{blue}{e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}}\right) \]
      5. lower-+.f64N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
    5. Applied rewrites98.6%

      \[\leadsto \color{blue}{0.5 \cdot \left(e^{\mathsf{fma}\left(x, \varepsilon, -x\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}\right)} \]
    6. Taylor expanded in eps around 0

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x\right)} + e^{-1 \cdot x}\right)} \]
    7. Step-by-step derivation
      1. distribute-rgt-inN/A

        \[\leadsto \color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot \frac{1}{2} + e^{-1 \cdot x} \cdot \frac{1}{2}} \]
      2. neg-mul-1N/A

        \[\leadsto e^{\mathsf{neg}\left(x\right)} \cdot \frac{1}{2} + e^{\color{blue}{\mathsf{neg}\left(x\right)}} \cdot \frac{1}{2} \]
      3. distribute-lft-outN/A

        \[\leadsto \color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot \left(\frac{1}{2} + \frac{1}{2}\right)} \]
      4. metadata-evalN/A

        \[\leadsto e^{\mathsf{neg}\left(x\right)} \cdot \color{blue}{1} \]
      5. lower-*.f64N/A

        \[\leadsto \color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot 1} \]
      6. neg-mul-1N/A

        \[\leadsto e^{\color{blue}{-1 \cdot x}} \cdot 1 \]
      7. lower-exp.f64N/A

        \[\leadsto \color{blue}{e^{-1 \cdot x}} \cdot 1 \]
      8. neg-mul-1N/A

        \[\leadsto e^{\color{blue}{\mathsf{neg}\left(x\right)}} \cdot 1 \]
      9. lower-neg.f6472.8

        \[\leadsto e^{\color{blue}{-x}} \cdot 1 \]
    8. Applied rewrites72.8%

      \[\leadsto \color{blue}{e^{-x} \cdot 1} \]
    9. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1 + x \cdot \left(\frac{1}{2} \cdot x - 1\right)} \]
    10. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \color{blue}{x \cdot \left(\frac{1}{2} \cdot x - 1\right) + 1} \]
      2. lower-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(x, \frac{1}{2} \cdot x - 1, 1\right)} \]
      3. sub-negN/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\frac{1}{2} \cdot x + \left(\mathsf{neg}\left(1\right)\right)}, 1\right) \]
      4. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{x \cdot \frac{1}{2}} + \left(\mathsf{neg}\left(1\right)\right), 1\right) \]
      5. metadata-evalN/A

        \[\leadsto \mathsf{fma}\left(x, x \cdot \frac{1}{2} + \color{blue}{-1}, 1\right) \]
      6. lower-fma.f6469.8

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\mathsf{fma}\left(x, 0.5, -1\right)}, 1\right) \]
    11. Applied rewrites69.8%

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \mathsf{fma}\left(x, 0.5, -1\right), 1\right)} \]

    if 520 < x < 5.00000000000000004e154

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1 + x \cdot \left(\frac{1}{2} \cdot \left(x \cdot \left(\frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot {\left(\varepsilon - 1\right)}^{2}\right) - \frac{1}{2} \cdot \left({\left(1 + \varepsilon\right)}^{2} \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right) + \frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) - -1 \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right)} \]
    4. Applied rewrites37.9%

      \[\leadsto \color{blue}{\mathsf{fma}\left(0.5 \cdot x, \left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) + \mathsf{fma}\left(1 + \frac{1}{\varepsilon}, \mathsf{fma}\left(0.5 \cdot x, \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right), -1 + \varepsilon\right), x \cdot \left(0.5 \cdot \left(\left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) \cdot \left(-1 - \varepsilon\right)\right)\right)\right), 1\right)} \]
    5. Taylor expanded in eps around 0

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \frac{x \cdot \left(\frac{-1}{2} \cdot x + \frac{1}{2} \cdot x\right)}{\varepsilon}} \]
    6. Step-by-step derivation
      1. associate-*r/N/A

        \[\leadsto \color{blue}{\frac{\frac{1}{2} \cdot \left(x \cdot \left(\frac{-1}{2} \cdot x + \frac{1}{2} \cdot x\right)\right)}{\varepsilon}} \]
      2. distribute-rgt-outN/A

        \[\leadsto \frac{\frac{1}{2} \cdot \left(x \cdot \color{blue}{\left(x \cdot \left(\frac{-1}{2} + \frac{1}{2}\right)\right)}\right)}{\varepsilon} \]
      3. metadata-evalN/A

        \[\leadsto \frac{\frac{1}{2} \cdot \left(x \cdot \left(x \cdot \color{blue}{0}\right)\right)}{\varepsilon} \]
      4. mul0-rgtN/A

        \[\leadsto \frac{\frac{1}{2} \cdot \left(x \cdot \color{blue}{0}\right)}{\varepsilon} \]
      5. mul0-rgtN/A

        \[\leadsto \frac{\frac{1}{2} \cdot \color{blue}{0}}{\varepsilon} \]
      6. metadata-evalN/A

        \[\leadsto \frac{\color{blue}{0}}{\varepsilon} \]
      7. +-inversesN/A

        \[\leadsto \frac{\color{blue}{e^{\mathsf{neg}\left(x\right)} - e^{\mathsf{neg}\left(x\right)}}}{\varepsilon} \]
      8. div-subN/A

        \[\leadsto \color{blue}{\frac{e^{\mathsf{neg}\left(x\right)}}{\varepsilon} - \frac{e^{\mathsf{neg}\left(x\right)}}{\varepsilon}} \]
      9. +-inverses47.0

        \[\leadsto \color{blue}{0} \]
    7. Applied rewrites47.0%

      \[\leadsto \color{blue}{0} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 13: 57.7% accurate, 27.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 1:\\ \;\;\;\;1 - x\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
(FPCore (x eps) :precision binary64 (if (<= x 1.0) (- 1.0 x) 0.0))
double code(double x, double eps) {
	double tmp;
	if (x <= 1.0) {
		tmp = 1.0 - x;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: tmp
    if (x <= 1.0d0) then
        tmp = 1.0d0 - x
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double tmp;
	if (x <= 1.0) {
		tmp = 1.0 - x;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
def code(x, eps):
	tmp = 0
	if x <= 1.0:
		tmp = 1.0 - x
	else:
		tmp = 0.0
	return tmp
function code(x, eps)
	tmp = 0.0
	if (x <= 1.0)
		tmp = Float64(1.0 - x);
	else
		tmp = 0.0;
	end
	return tmp
end
function tmp_2 = code(x, eps)
	tmp = 0.0;
	if (x <= 1.0)
		tmp = 1.0 - x;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := If[LessEqual[x, 1.0], N[(1.0 - x), $MachinePrecision], 0.0]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 1:\\
\;\;\;\;1 - x\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 1

    1. Initial program 62.9%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in eps around inf

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} - -1 \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
    4. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} - -1 \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
      2. cancel-sign-sub-invN/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \left(\mathsf{neg}\left(-1\right)\right) \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
      3. metadata-evalN/A

        \[\leadsto \frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \color{blue}{1} \cdot e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right) \]
      4. *-lft-identityN/A

        \[\leadsto \frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + \color{blue}{e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}}\right) \]
      5. lower-+.f64N/A

        \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(e^{\mathsf{neg}\left(x \cdot \left(1 - \varepsilon\right)\right)} + e^{\mathsf{neg}\left(x \cdot \left(1 + \varepsilon\right)\right)}\right)} \]
    5. Applied rewrites98.4%

      \[\leadsto \color{blue}{0.5 \cdot \left(e^{\mathsf{fma}\left(x, \varepsilon, -x\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}\right)} \]
    6. Taylor expanded in eps around 0

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(e^{\mathsf{neg}\left(x\right)} + e^{-1 \cdot x}\right)} \]
    7. Step-by-step derivation
      1. distribute-rgt-inN/A

        \[\leadsto \color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot \frac{1}{2} + e^{-1 \cdot x} \cdot \frac{1}{2}} \]
      2. neg-mul-1N/A

        \[\leadsto e^{\mathsf{neg}\left(x\right)} \cdot \frac{1}{2} + e^{\color{blue}{\mathsf{neg}\left(x\right)}} \cdot \frac{1}{2} \]
      3. distribute-lft-outN/A

        \[\leadsto \color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot \left(\frac{1}{2} + \frac{1}{2}\right)} \]
      4. metadata-evalN/A

        \[\leadsto e^{\mathsf{neg}\left(x\right)} \cdot \color{blue}{1} \]
      5. lower-*.f64N/A

        \[\leadsto \color{blue}{e^{\mathsf{neg}\left(x\right)} \cdot 1} \]
      6. neg-mul-1N/A

        \[\leadsto e^{\color{blue}{-1 \cdot x}} \cdot 1 \]
      7. lower-exp.f64N/A

        \[\leadsto \color{blue}{e^{-1 \cdot x}} \cdot 1 \]
      8. neg-mul-1N/A

        \[\leadsto e^{\color{blue}{\mathsf{neg}\left(x\right)}} \cdot 1 \]
      9. lower-neg.f6477.5

        \[\leadsto e^{\color{blue}{-x}} \cdot 1 \]
    8. Applied rewrites77.5%

      \[\leadsto \color{blue}{e^{-x} \cdot 1} \]
    9. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1 + -1 \cdot x} \]
    10. Step-by-step derivation
      1. neg-mul-1N/A

        \[\leadsto 1 + \color{blue}{\left(\mathsf{neg}\left(x\right)\right)} \]
      2. unsub-negN/A

        \[\leadsto \color{blue}{1 - x} \]
      3. lower--.f6460.0

        \[\leadsto \color{blue}{1 - x} \]
    11. Applied rewrites60.0%

      \[\leadsto \color{blue}{1 - x} \]

    if 1 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1 + x \cdot \left(\frac{1}{2} \cdot \left(x \cdot \left(\frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot {\left(\varepsilon - 1\right)}^{2}\right) - \frac{1}{2} \cdot \left({\left(1 + \varepsilon\right)}^{2} \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right) + \frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) - -1 \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right)} \]
    4. Applied rewrites47.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(0.5 \cdot x, \left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) + \mathsf{fma}\left(1 + \frac{1}{\varepsilon}, \mathsf{fma}\left(0.5 \cdot x, \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right), -1 + \varepsilon\right), x \cdot \left(0.5 \cdot \left(\left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) \cdot \left(-1 - \varepsilon\right)\right)\right)\right), 1\right)} \]
    5. Taylor expanded in eps around 0

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \frac{x \cdot \left(\frac{-1}{2} \cdot x + \frac{1}{2} \cdot x\right)}{\varepsilon}} \]
    6. Step-by-step derivation
      1. associate-*r/N/A

        \[\leadsto \color{blue}{\frac{\frac{1}{2} \cdot \left(x \cdot \left(\frac{-1}{2} \cdot x + \frac{1}{2} \cdot x\right)\right)}{\varepsilon}} \]
      2. distribute-rgt-outN/A

        \[\leadsto \frac{\frac{1}{2} \cdot \left(x \cdot \color{blue}{\left(x \cdot \left(\frac{-1}{2} + \frac{1}{2}\right)\right)}\right)}{\varepsilon} \]
      3. metadata-evalN/A

        \[\leadsto \frac{\frac{1}{2} \cdot \left(x \cdot \left(x \cdot \color{blue}{0}\right)\right)}{\varepsilon} \]
      4. mul0-rgtN/A

        \[\leadsto \frac{\frac{1}{2} \cdot \left(x \cdot \color{blue}{0}\right)}{\varepsilon} \]
      5. mul0-rgtN/A

        \[\leadsto \frac{\frac{1}{2} \cdot \color{blue}{0}}{\varepsilon} \]
      6. metadata-evalN/A

        \[\leadsto \frac{\color{blue}{0}}{\varepsilon} \]
      7. +-inversesN/A

        \[\leadsto \frac{\color{blue}{e^{\mathsf{neg}\left(x\right)} - e^{\mathsf{neg}\left(x\right)}}}{\varepsilon} \]
      8. div-subN/A

        \[\leadsto \color{blue}{\frac{e^{\mathsf{neg}\left(x\right)}}{\varepsilon} - \frac{e^{\mathsf{neg}\left(x\right)}}{\varepsilon}} \]
      9. +-inverses45.0

        \[\leadsto \color{blue}{0} \]
    7. Applied rewrites45.0%

      \[\leadsto \color{blue}{0} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 14: 57.7% accurate, 38.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 520:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
(FPCore (x eps) :precision binary64 (if (<= x 520.0) 1.0 0.0))
double code(double x, double eps) {
	double tmp;
	if (x <= 520.0) {
		tmp = 1.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: tmp
    if (x <= 520.0d0) then
        tmp = 1.0d0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double tmp;
	if (x <= 520.0) {
		tmp = 1.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
def code(x, eps):
	tmp = 0
	if x <= 520.0:
		tmp = 1.0
	else:
		tmp = 0.0
	return tmp
function code(x, eps)
	tmp = 0.0
	if (x <= 520.0)
		tmp = 1.0;
	else
		tmp = 0.0;
	end
	return tmp
end
function tmp_2 = code(x, eps)
	tmp = 0.0;
	if (x <= 520.0)
		tmp = 1.0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := If[LessEqual[x, 520.0], 1.0, 0.0]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 520:\\
\;\;\;\;1\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 520

    1. Initial program 62.9%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1} \]
    4. Step-by-step derivation
      1. Applied rewrites59.9%

        \[\leadsto \color{blue}{1} \]

      if 520 < x

      1. Initial program 100.0%

        \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      2. Add Preprocessing
      3. Taylor expanded in x around 0

        \[\leadsto \color{blue}{1 + x \cdot \left(\frac{1}{2} \cdot \left(x \cdot \left(\frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot {\left(\varepsilon - 1\right)}^{2}\right) - \frac{1}{2} \cdot \left({\left(1 + \varepsilon\right)}^{2} \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right) + \frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) - -1 \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right)} \]
      4. Applied rewrites47.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(0.5 \cdot x, \left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) + \mathsf{fma}\left(1 + \frac{1}{\varepsilon}, \mathsf{fma}\left(0.5 \cdot x, \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right), -1 + \varepsilon\right), x \cdot \left(0.5 \cdot \left(\left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) \cdot \left(-1 - \varepsilon\right)\right)\right)\right), 1\right)} \]
      5. Taylor expanded in eps around 0

        \[\leadsto \color{blue}{\frac{1}{2} \cdot \frac{x \cdot \left(\frac{-1}{2} \cdot x + \frac{1}{2} \cdot x\right)}{\varepsilon}} \]
      6. Step-by-step derivation
        1. associate-*r/N/A

          \[\leadsto \color{blue}{\frac{\frac{1}{2} \cdot \left(x \cdot \left(\frac{-1}{2} \cdot x + \frac{1}{2} \cdot x\right)\right)}{\varepsilon}} \]
        2. distribute-rgt-outN/A

          \[\leadsto \frac{\frac{1}{2} \cdot \left(x \cdot \color{blue}{\left(x \cdot \left(\frac{-1}{2} + \frac{1}{2}\right)\right)}\right)}{\varepsilon} \]
        3. metadata-evalN/A

          \[\leadsto \frac{\frac{1}{2} \cdot \left(x \cdot \left(x \cdot \color{blue}{0}\right)\right)}{\varepsilon} \]
        4. mul0-rgtN/A

          \[\leadsto \frac{\frac{1}{2} \cdot \left(x \cdot \color{blue}{0}\right)}{\varepsilon} \]
        5. mul0-rgtN/A

          \[\leadsto \frac{\frac{1}{2} \cdot \color{blue}{0}}{\varepsilon} \]
        6. metadata-evalN/A

          \[\leadsto \frac{\color{blue}{0}}{\varepsilon} \]
        7. +-inversesN/A

          \[\leadsto \frac{\color{blue}{e^{\mathsf{neg}\left(x\right)} - e^{\mathsf{neg}\left(x\right)}}}{\varepsilon} \]
        8. div-subN/A

          \[\leadsto \color{blue}{\frac{e^{\mathsf{neg}\left(x\right)}}{\varepsilon} - \frac{e^{\mathsf{neg}\left(x\right)}}{\varepsilon}} \]
        9. +-inverses45.0

          \[\leadsto \color{blue}{0} \]
      7. Applied rewrites45.0%

        \[\leadsto \color{blue}{0} \]
    5. Recombined 2 regimes into one program.
    6. Add Preprocessing

    Alternative 15: 15.9% accurate, 273.0× speedup?

    \[\begin{array}{l} \\ 0 \end{array} \]
    (FPCore (x eps) :precision binary64 0.0)
    double code(double x, double eps) {
    	return 0.0;
    }
    
    real(8) function code(x, eps)
        real(8), intent (in) :: x
        real(8), intent (in) :: eps
        code = 0.0d0
    end function
    
    public static double code(double x, double eps) {
    	return 0.0;
    }
    
    def code(x, eps):
    	return 0.0
    
    function code(x, eps)
    	return 0.0
    end
    
    function tmp = code(x, eps)
    	tmp = 0.0;
    end
    
    code[x_, eps_] := 0.0
    
    \begin{array}{l}
    
    \\
    0
    \end{array}
    
    Derivation
    1. Initial program 72.7%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{1 + x \cdot \left(\frac{1}{2} \cdot \left(x \cdot \left(\frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot {\left(\varepsilon - 1\right)}^{2}\right) - \frac{1}{2} \cdot \left({\left(1 + \varepsilon\right)}^{2} \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right) + \frac{1}{2} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) - -1 \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right)\right)} \]
    4. Applied rewrites79.7%

      \[\leadsto \color{blue}{\mathsf{fma}\left(0.5 \cdot x, \left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) + \mathsf{fma}\left(1 + \frac{1}{\varepsilon}, \mathsf{fma}\left(0.5 \cdot x, \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right), -1 + \varepsilon\right), x \cdot \left(0.5 \cdot \left(\left(\left(-1 - \varepsilon\right) + \frac{\varepsilon + 1}{\varepsilon}\right) \cdot \left(-1 - \varepsilon\right)\right)\right)\right), 1\right)} \]
    5. Taylor expanded in eps around 0

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \frac{x \cdot \left(\frac{-1}{2} \cdot x + \frac{1}{2} \cdot x\right)}{\varepsilon}} \]
    6. Step-by-step derivation
      1. associate-*r/N/A

        \[\leadsto \color{blue}{\frac{\frac{1}{2} \cdot \left(x \cdot \left(\frac{-1}{2} \cdot x + \frac{1}{2} \cdot x\right)\right)}{\varepsilon}} \]
      2. distribute-rgt-outN/A

        \[\leadsto \frac{\frac{1}{2} \cdot \left(x \cdot \color{blue}{\left(x \cdot \left(\frac{-1}{2} + \frac{1}{2}\right)\right)}\right)}{\varepsilon} \]
      3. metadata-evalN/A

        \[\leadsto \frac{\frac{1}{2} \cdot \left(x \cdot \left(x \cdot \color{blue}{0}\right)\right)}{\varepsilon} \]
      4. mul0-rgtN/A

        \[\leadsto \frac{\frac{1}{2} \cdot \left(x \cdot \color{blue}{0}\right)}{\varepsilon} \]
      5. mul0-rgtN/A

        \[\leadsto \frac{\frac{1}{2} \cdot \color{blue}{0}}{\varepsilon} \]
      6. metadata-evalN/A

        \[\leadsto \frac{\color{blue}{0}}{\varepsilon} \]
      7. +-inversesN/A

        \[\leadsto \frac{\color{blue}{e^{\mathsf{neg}\left(x\right)} - e^{\mathsf{neg}\left(x\right)}}}{\varepsilon} \]
      8. div-subN/A

        \[\leadsto \color{blue}{\frac{e^{\mathsf{neg}\left(x\right)}}{\varepsilon} - \frac{e^{\mathsf{neg}\left(x\right)}}{\varepsilon}} \]
      9. +-inverses13.8

        \[\leadsto \color{blue}{0} \]
    7. Applied rewrites13.8%

      \[\leadsto \color{blue}{0} \]
    8. Add Preprocessing

    Reproduce

    ?
    herbie shell --seed 2024216 
    (FPCore (x eps)
      :name "NMSE Section 6.1 mentioned, A"
      :precision binary64
      (/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))