math.exp on complex, imaginary part

Percentage Accurate: 100.0% → 100.0%
Time: 1.9s
Alternatives: 10
Speedup: 1.0×

Specification

?
\[e^{re} \cdot \sin im \]
(FPCore (re im)
  :precision binary64
  (* (exp re) (sin im)))
double code(double re, double im) {
	return exp(re) * sin(im);
}
real(8) function code(re, im)
use fmin_fmax_functions
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    code = exp(re) * sin(im)
end function
public static double code(double re, double im) {
	return Math.exp(re) * Math.sin(im);
}
def code(re, im):
	return math.exp(re) * math.sin(im)
function code(re, im)
	return Float64(exp(re) * sin(im))
end
function tmp = code(re, im)
	tmp = exp(re) * sin(im);
end
code[re_, im_] := N[(N[Exp[re], $MachinePrecision] * N[Sin[im], $MachinePrecision]), $MachinePrecision]
e^{re} \cdot \sin im

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 10 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 100.0% accurate, 1.0× speedup?

\[e^{re} \cdot \sin im \]
(FPCore (re im)
  :precision binary64
  (* (exp re) (sin im)))
double code(double re, double im) {
	return exp(re) * sin(im);
}
real(8) function code(re, im)
use fmin_fmax_functions
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    code = exp(re) * sin(im)
end function
public static double code(double re, double im) {
	return Math.exp(re) * Math.sin(im);
}
def code(re, im):
	return math.exp(re) * math.sin(im)
function code(re, im)
	return Float64(exp(re) * sin(im))
end
function tmp = code(re, im)
	tmp = exp(re) * sin(im);
end
code[re_, im_] := N[(N[Exp[re], $MachinePrecision] * N[Sin[im], $MachinePrecision]), $MachinePrecision]
e^{re} \cdot \sin im

Alternative 1: 99.0% accurate, 0.2× speedup?

\[\begin{array}{l} t_0 := \sin \left(\left|im\right|\right)\\ t_1 := \left(1 + re\right) \cdot t\_0\\ t_2 := e^{re} \cdot t\_0\\ t_3 := \left|im\right| \cdot e^{re}\\ \mathsf{copysign}\left(1, im\right) \cdot \begin{array}{l} \mathbf{if}\;t\_2 \leq -\infty:\\ \;\;\;\;e^{re} \cdot \mathsf{fma}\left(\left(\left|im\right| \cdot -0.16666666666666666\right) \cdot \left|im\right|, \left|im\right|, \left|im\right|\right)\\ \mathbf{elif}\;t\_2 \leq -0.05:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;t\_2 \leq 2 \cdot 10^{-165}:\\ \;\;\;\;t\_3\\ \mathbf{elif}\;t\_2 \leq 1:\\ \;\;\;\;t\_1\\ \mathbf{else}:\\ \;\;\;\;t\_3\\ \end{array} \end{array} \]
(FPCore (re im)
  :precision binary64
  (let* ((t_0 (sin (fabs im)))
       (t_1 (* (+ 1.0 re) t_0))
       (t_2 (* (exp re) t_0))
       (t_3 (* (fabs im) (exp re))))
  (*
   (copysign 1.0 im)
   (if (<= t_2 (- INFINITY))
     (*
      (exp re)
      (fma
       (* (* (fabs im) -0.16666666666666666) (fabs im))
       (fabs im)
       (fabs im)))
     (if (<= t_2 -0.05)
       t_1
       (if (<= t_2 2e-165) t_3 (if (<= t_2 1.0) t_1 t_3)))))))
double code(double re, double im) {
	double t_0 = sin(fabs(im));
	double t_1 = (1.0 + re) * t_0;
	double t_2 = exp(re) * t_0;
	double t_3 = fabs(im) * exp(re);
	double tmp;
	if (t_2 <= -((double) INFINITY)) {
		tmp = exp(re) * fma(((fabs(im) * -0.16666666666666666) * fabs(im)), fabs(im), fabs(im));
	} else if (t_2 <= -0.05) {
		tmp = t_1;
	} else if (t_2 <= 2e-165) {
		tmp = t_3;
	} else if (t_2 <= 1.0) {
		tmp = t_1;
	} else {
		tmp = t_3;
	}
	return copysign(1.0, im) * tmp;
}
function code(re, im)
	t_0 = sin(abs(im))
	t_1 = Float64(Float64(1.0 + re) * t_0)
	t_2 = Float64(exp(re) * t_0)
	t_3 = Float64(abs(im) * exp(re))
	tmp = 0.0
	if (t_2 <= Float64(-Inf))
		tmp = Float64(exp(re) * fma(Float64(Float64(abs(im) * -0.16666666666666666) * abs(im)), abs(im), abs(im)));
	elseif (t_2 <= -0.05)
		tmp = t_1;
	elseif (t_2 <= 2e-165)
		tmp = t_3;
	elseif (t_2 <= 1.0)
		tmp = t_1;
	else
		tmp = t_3;
	end
	return Float64(copysign(1.0, im) * tmp)
end
code[re_, im_] := Block[{t$95$0 = N[Sin[N[Abs[im], $MachinePrecision]], $MachinePrecision]}, Block[{t$95$1 = N[(N[(1.0 + re), $MachinePrecision] * t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(N[Exp[re], $MachinePrecision] * t$95$0), $MachinePrecision]}, Block[{t$95$3 = N[(N[Abs[im], $MachinePrecision] * N[Exp[re], $MachinePrecision]), $MachinePrecision]}, N[(N[With[{TMP1 = Abs[1.0], TMP2 = Sign[im]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision] * If[LessEqual[t$95$2, (-Infinity)], N[(N[Exp[re], $MachinePrecision] * N[(N[(N[(N[Abs[im], $MachinePrecision] * -0.16666666666666666), $MachinePrecision] * N[Abs[im], $MachinePrecision]), $MachinePrecision] * N[Abs[im], $MachinePrecision] + N[Abs[im], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[t$95$2, -0.05], t$95$1, If[LessEqual[t$95$2, 2e-165], t$95$3, If[LessEqual[t$95$2, 1.0], t$95$1, t$95$3]]]]), $MachinePrecision]]]]]
\begin{array}{l}
t_0 := \sin \left(\left|im\right|\right)\\
t_1 := \left(1 + re\right) \cdot t\_0\\
t_2 := e^{re} \cdot t\_0\\
t_3 := \left|im\right| \cdot e^{re}\\
\mathsf{copysign}\left(1, im\right) \cdot \begin{array}{l}
\mathbf{if}\;t\_2 \leq -\infty:\\
\;\;\;\;e^{re} \cdot \mathsf{fma}\left(\left(\left|im\right| \cdot -0.16666666666666666\right) \cdot \left|im\right|, \left|im\right|, \left|im\right|\right)\\

\mathbf{elif}\;t\_2 \leq -0.05:\\
\;\;\;\;t\_1\\

\mathbf{elif}\;t\_2 \leq 2 \cdot 10^{-165}:\\
\;\;\;\;t\_3\\

\mathbf{elif}\;t\_2 \leq 1:\\
\;\;\;\;t\_1\\

\mathbf{else}:\\
\;\;\;\;t\_3\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if (*.f64 (exp.f64 re) (sin.f64 im)) < -inf.0

    1. Initial program 100.0%

      \[e^{re} \cdot \sin im \]
    2. Taylor expanded in im around 0

      \[\leadsto e^{re} \cdot \color{blue}{\left(im \cdot \left(1 + \frac{-1}{6} \cdot {im}^{2}\right)\right)} \]
    3. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \color{blue}{\left(1 + \frac{-1}{6} \cdot {im}^{2}\right)}\right) \]
      2. lower-+.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(1 + \color{blue}{\frac{-1}{6} \cdot {im}^{2}}\right)\right) \]
      3. lower-*.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(1 + \frac{-1}{6} \cdot \color{blue}{{im}^{2}}\right)\right) \]
      4. lower-pow.f6460.1%

        \[\leadsto e^{re} \cdot \left(im \cdot \left(1 + -0.16666666666666666 \cdot {im}^{\color{blue}{2}}\right)\right) \]
    4. Applied rewrites60.1%

      \[\leadsto e^{re} \cdot \color{blue}{\left(im \cdot \left(1 + -0.16666666666666666 \cdot {im}^{2}\right)\right)} \]
    5. Step-by-step derivation
      1. lift-*.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \color{blue}{\left(1 + \frac{-1}{6} \cdot {im}^{2}\right)}\right) \]
      2. lift-+.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(1 + \color{blue}{\frac{-1}{6} \cdot {im}^{2}}\right)\right) \]
      3. distribute-lft-inN/A

        \[\leadsto e^{re} \cdot \left(im \cdot 1 + \color{blue}{im \cdot \left(\frac{-1}{6} \cdot {im}^{2}\right)}\right) \]
      4. *-rgt-identityN/A

        \[\leadsto e^{re} \cdot \left(im + \color{blue}{im} \cdot \left(\frac{-1}{6} \cdot {im}^{2}\right)\right) \]
      5. +-commutativeN/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(\frac{-1}{6} \cdot {im}^{2}\right) + \color{blue}{im}\right) \]
      6. lift-*.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(\frac{-1}{6} \cdot {im}^{2}\right) + im\right) \]
      7. associate-*r*N/A

        \[\leadsto e^{re} \cdot \left(\left(im \cdot \frac{-1}{6}\right) \cdot {im}^{2} + im\right) \]
      8. lift-pow.f64N/A

        \[\leadsto e^{re} \cdot \left(\left(im \cdot \frac{-1}{6}\right) \cdot {im}^{2} + im\right) \]
      9. unpow2N/A

        \[\leadsto e^{re} \cdot \left(\left(im \cdot \frac{-1}{6}\right) \cdot \left(im \cdot im\right) + im\right) \]
      10. associate-*r*N/A

        \[\leadsto e^{re} \cdot \left(\left(\left(im \cdot \frac{-1}{6}\right) \cdot im\right) \cdot im + im\right) \]
      11. lower-fma.f64N/A

        \[\leadsto e^{re} \cdot \mathsf{fma}\left(\left(im \cdot \frac{-1}{6}\right) \cdot im, \color{blue}{im}, im\right) \]
      12. lower-*.f64N/A

        \[\leadsto e^{re} \cdot \mathsf{fma}\left(\left(im \cdot \frac{-1}{6}\right) \cdot im, im, im\right) \]
      13. lower-*.f6460.1%

        \[\leadsto e^{re} \cdot \mathsf{fma}\left(\left(im \cdot -0.16666666666666666\right) \cdot im, im, im\right) \]
    6. Applied rewrites60.1%

      \[\leadsto e^{re} \cdot \mathsf{fma}\left(\left(im \cdot -0.16666666666666666\right) \cdot im, \color{blue}{im}, im\right) \]

    if -inf.0 < (*.f64 (exp.f64 re) (sin.f64 im)) < -0.050000000000000003 or 2e-165 < (*.f64 (exp.f64 re) (sin.f64 im)) < 1

    1. Initial program 100.0%

      \[e^{re} \cdot \sin im \]
    2. Taylor expanded in re around 0

      \[\leadsto \color{blue}{\left(1 + re\right)} \cdot \sin im \]
    3. Step-by-step derivation
      1. lower-+.f6451.9%

        \[\leadsto \left(1 + \color{blue}{re}\right) \cdot \sin im \]
    4. Applied rewrites51.9%

      \[\leadsto \color{blue}{\left(1 + re\right)} \cdot \sin im \]

    if -0.050000000000000003 < (*.f64 (exp.f64 re) (sin.f64 im)) < 2e-165 or 1 < (*.f64 (exp.f64 re) (sin.f64 im))

    1. Initial program 100.0%

      \[e^{re} \cdot \sin im \]
    2. Taylor expanded in im around 0

      \[\leadsto \color{blue}{im \cdot e^{re}} \]
    3. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto im \cdot \color{blue}{e^{re}} \]
      2. lower-exp.f6469.2%

        \[\leadsto im \cdot e^{re} \]
    4. Applied rewrites69.2%

      \[\leadsto \color{blue}{im \cdot e^{re}} \]
  3. Recombined 3 regimes into one program.
  4. Add Preprocessing

Alternative 2: 98.8% accurate, 0.2× speedup?

\[\begin{array}{l} t_0 := \left|im\right| \cdot e^{re}\\ t_1 := \sin \left(\left|im\right|\right)\\ t_2 := e^{re} \cdot t\_1\\ \mathsf{copysign}\left(1, im\right) \cdot \begin{array}{l} \mathbf{if}\;t\_2 \leq -\infty:\\ \;\;\;\;e^{re} \cdot \mathsf{fma}\left(\left(\left|im\right| \cdot -0.16666666666666666\right) \cdot \left|im\right|, \left|im\right|, \left|im\right|\right)\\ \mathbf{elif}\;t\_2 \leq -0.05:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;t\_2 \leq 2 \cdot 10^{-73}:\\ \;\;\;\;t\_0\\ \mathbf{elif}\;t\_2 \leq 1:\\ \;\;\;\;t\_1\\ \mathbf{else}:\\ \;\;\;\;t\_0\\ \end{array} \end{array} \]
(FPCore (re im)
  :precision binary64
  (let* ((t_0 (* (fabs im) (exp re)))
       (t_1 (sin (fabs im)))
       (t_2 (* (exp re) t_1)))
  (*
   (copysign 1.0 im)
   (if (<= t_2 (- INFINITY))
     (*
      (exp re)
      (fma
       (* (* (fabs im) -0.16666666666666666) (fabs im))
       (fabs im)
       (fabs im)))
     (if (<= t_2 -0.05)
       t_1
       (if (<= t_2 2e-73) t_0 (if (<= t_2 1.0) t_1 t_0)))))))
double code(double re, double im) {
	double t_0 = fabs(im) * exp(re);
	double t_1 = sin(fabs(im));
	double t_2 = exp(re) * t_1;
	double tmp;
	if (t_2 <= -((double) INFINITY)) {
		tmp = exp(re) * fma(((fabs(im) * -0.16666666666666666) * fabs(im)), fabs(im), fabs(im));
	} else if (t_2 <= -0.05) {
		tmp = t_1;
	} else if (t_2 <= 2e-73) {
		tmp = t_0;
	} else if (t_2 <= 1.0) {
		tmp = t_1;
	} else {
		tmp = t_0;
	}
	return copysign(1.0, im) * tmp;
}
function code(re, im)
	t_0 = Float64(abs(im) * exp(re))
	t_1 = sin(abs(im))
	t_2 = Float64(exp(re) * t_1)
	tmp = 0.0
	if (t_2 <= Float64(-Inf))
		tmp = Float64(exp(re) * fma(Float64(Float64(abs(im) * -0.16666666666666666) * abs(im)), abs(im), abs(im)));
	elseif (t_2 <= -0.05)
		tmp = t_1;
	elseif (t_2 <= 2e-73)
		tmp = t_0;
	elseif (t_2 <= 1.0)
		tmp = t_1;
	else
		tmp = t_0;
	end
	return Float64(copysign(1.0, im) * tmp)
end
code[re_, im_] := Block[{t$95$0 = N[(N[Abs[im], $MachinePrecision] * N[Exp[re], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[Sin[N[Abs[im], $MachinePrecision]], $MachinePrecision]}, Block[{t$95$2 = N[(N[Exp[re], $MachinePrecision] * t$95$1), $MachinePrecision]}, N[(N[With[{TMP1 = Abs[1.0], TMP2 = Sign[im]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision] * If[LessEqual[t$95$2, (-Infinity)], N[(N[Exp[re], $MachinePrecision] * N[(N[(N[(N[Abs[im], $MachinePrecision] * -0.16666666666666666), $MachinePrecision] * N[Abs[im], $MachinePrecision]), $MachinePrecision] * N[Abs[im], $MachinePrecision] + N[Abs[im], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[t$95$2, -0.05], t$95$1, If[LessEqual[t$95$2, 2e-73], t$95$0, If[LessEqual[t$95$2, 1.0], t$95$1, t$95$0]]]]), $MachinePrecision]]]]
\begin{array}{l}
t_0 := \left|im\right| \cdot e^{re}\\
t_1 := \sin \left(\left|im\right|\right)\\
t_2 := e^{re} \cdot t\_1\\
\mathsf{copysign}\left(1, im\right) \cdot \begin{array}{l}
\mathbf{if}\;t\_2 \leq -\infty:\\
\;\;\;\;e^{re} \cdot \mathsf{fma}\left(\left(\left|im\right| \cdot -0.16666666666666666\right) \cdot \left|im\right|, \left|im\right|, \left|im\right|\right)\\

\mathbf{elif}\;t\_2 \leq -0.05:\\
\;\;\;\;t\_1\\

\mathbf{elif}\;t\_2 \leq 2 \cdot 10^{-73}:\\
\;\;\;\;t\_0\\

\mathbf{elif}\;t\_2 \leq 1:\\
\;\;\;\;t\_1\\

\mathbf{else}:\\
\;\;\;\;t\_0\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if (*.f64 (exp.f64 re) (sin.f64 im)) < -inf.0

    1. Initial program 100.0%

      \[e^{re} \cdot \sin im \]
    2. Taylor expanded in im around 0

      \[\leadsto e^{re} \cdot \color{blue}{\left(im \cdot \left(1 + \frac{-1}{6} \cdot {im}^{2}\right)\right)} \]
    3. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \color{blue}{\left(1 + \frac{-1}{6} \cdot {im}^{2}\right)}\right) \]
      2. lower-+.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(1 + \color{blue}{\frac{-1}{6} \cdot {im}^{2}}\right)\right) \]
      3. lower-*.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(1 + \frac{-1}{6} \cdot \color{blue}{{im}^{2}}\right)\right) \]
      4. lower-pow.f6460.1%

        \[\leadsto e^{re} \cdot \left(im \cdot \left(1 + -0.16666666666666666 \cdot {im}^{\color{blue}{2}}\right)\right) \]
    4. Applied rewrites60.1%

      \[\leadsto e^{re} \cdot \color{blue}{\left(im \cdot \left(1 + -0.16666666666666666 \cdot {im}^{2}\right)\right)} \]
    5. Step-by-step derivation
      1. lift-*.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \color{blue}{\left(1 + \frac{-1}{6} \cdot {im}^{2}\right)}\right) \]
      2. lift-+.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(1 + \color{blue}{\frac{-1}{6} \cdot {im}^{2}}\right)\right) \]
      3. distribute-lft-inN/A

        \[\leadsto e^{re} \cdot \left(im \cdot 1 + \color{blue}{im \cdot \left(\frac{-1}{6} \cdot {im}^{2}\right)}\right) \]
      4. *-rgt-identityN/A

        \[\leadsto e^{re} \cdot \left(im + \color{blue}{im} \cdot \left(\frac{-1}{6} \cdot {im}^{2}\right)\right) \]
      5. +-commutativeN/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(\frac{-1}{6} \cdot {im}^{2}\right) + \color{blue}{im}\right) \]
      6. lift-*.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(\frac{-1}{6} \cdot {im}^{2}\right) + im\right) \]
      7. associate-*r*N/A

        \[\leadsto e^{re} \cdot \left(\left(im \cdot \frac{-1}{6}\right) \cdot {im}^{2} + im\right) \]
      8. lift-pow.f64N/A

        \[\leadsto e^{re} \cdot \left(\left(im \cdot \frac{-1}{6}\right) \cdot {im}^{2} + im\right) \]
      9. unpow2N/A

        \[\leadsto e^{re} \cdot \left(\left(im \cdot \frac{-1}{6}\right) \cdot \left(im \cdot im\right) + im\right) \]
      10. associate-*r*N/A

        \[\leadsto e^{re} \cdot \left(\left(\left(im \cdot \frac{-1}{6}\right) \cdot im\right) \cdot im + im\right) \]
      11. lower-fma.f64N/A

        \[\leadsto e^{re} \cdot \mathsf{fma}\left(\left(im \cdot \frac{-1}{6}\right) \cdot im, \color{blue}{im}, im\right) \]
      12. lower-*.f64N/A

        \[\leadsto e^{re} \cdot \mathsf{fma}\left(\left(im \cdot \frac{-1}{6}\right) \cdot im, im, im\right) \]
      13. lower-*.f6460.1%

        \[\leadsto e^{re} \cdot \mathsf{fma}\left(\left(im \cdot -0.16666666666666666\right) \cdot im, im, im\right) \]
    6. Applied rewrites60.1%

      \[\leadsto e^{re} \cdot \mathsf{fma}\left(\left(im \cdot -0.16666666666666666\right) \cdot im, \color{blue}{im}, im\right) \]

    if -inf.0 < (*.f64 (exp.f64 re) (sin.f64 im)) < -0.050000000000000003 or 2e-73 < (*.f64 (exp.f64 re) (sin.f64 im)) < 1

    1. Initial program 100.0%

      \[e^{re} \cdot \sin im \]
    2. Taylor expanded in re around 0

      \[\leadsto \color{blue}{\sin im} \]
    3. Step-by-step derivation
      1. lower-sin.f6451.4%

        \[\leadsto \sin im \]
    4. Applied rewrites51.4%

      \[\leadsto \color{blue}{\sin im} \]

    if -0.050000000000000003 < (*.f64 (exp.f64 re) (sin.f64 im)) < 2e-73 or 1 < (*.f64 (exp.f64 re) (sin.f64 im))

    1. Initial program 100.0%

      \[e^{re} \cdot \sin im \]
    2. Taylor expanded in im around 0

      \[\leadsto \color{blue}{im \cdot e^{re}} \]
    3. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto im \cdot \color{blue}{e^{re}} \]
      2. lower-exp.f6469.2%

        \[\leadsto im \cdot e^{re} \]
    4. Applied rewrites69.2%

      \[\leadsto \color{blue}{im \cdot e^{re}} \]
  3. Recombined 3 regimes into one program.
  4. Add Preprocessing

Alternative 3: 75.4% accurate, 0.6× speedup?

\[\mathsf{copysign}\left(1, im\right) \cdot \begin{array}{l} \mathbf{if}\;e^{re} \cdot \sin \left(\left|im\right|\right) \leq -0.05:\\ \;\;\;\;e^{re} \cdot \mathsf{fma}\left(\left(\left|im\right| \cdot -0.16666666666666666\right) \cdot \left|im\right|, \left|im\right|, \left|im\right|\right)\\ \mathbf{else}:\\ \;\;\;\;\left|im\right| \cdot e^{re}\\ \end{array} \]
(FPCore (re im)
  :precision binary64
  (*
 (copysign 1.0 im)
 (if (<= (* (exp re) (sin (fabs im))) -0.05)
   (*
    (exp re)
    (fma
     (* (* (fabs im) -0.16666666666666666) (fabs im))
     (fabs im)
     (fabs im)))
   (* (fabs im) (exp re)))))
double code(double re, double im) {
	double tmp;
	if ((exp(re) * sin(fabs(im))) <= -0.05) {
		tmp = exp(re) * fma(((fabs(im) * -0.16666666666666666) * fabs(im)), fabs(im), fabs(im));
	} else {
		tmp = fabs(im) * exp(re);
	}
	return copysign(1.0, im) * tmp;
}
function code(re, im)
	tmp = 0.0
	if (Float64(exp(re) * sin(abs(im))) <= -0.05)
		tmp = Float64(exp(re) * fma(Float64(Float64(abs(im) * -0.16666666666666666) * abs(im)), abs(im), abs(im)));
	else
		tmp = Float64(abs(im) * exp(re));
	end
	return Float64(copysign(1.0, im) * tmp)
end
code[re_, im_] := N[(N[With[{TMP1 = Abs[1.0], TMP2 = Sign[im]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision] * If[LessEqual[N[(N[Exp[re], $MachinePrecision] * N[Sin[N[Abs[im], $MachinePrecision]], $MachinePrecision]), $MachinePrecision], -0.05], N[(N[Exp[re], $MachinePrecision] * N[(N[(N[(N[Abs[im], $MachinePrecision] * -0.16666666666666666), $MachinePrecision] * N[Abs[im], $MachinePrecision]), $MachinePrecision] * N[Abs[im], $MachinePrecision] + N[Abs[im], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[Abs[im], $MachinePrecision] * N[Exp[re], $MachinePrecision]), $MachinePrecision]]), $MachinePrecision]
\mathsf{copysign}\left(1, im\right) \cdot \begin{array}{l}
\mathbf{if}\;e^{re} \cdot \sin \left(\left|im\right|\right) \leq -0.05:\\
\;\;\;\;e^{re} \cdot \mathsf{fma}\left(\left(\left|im\right| \cdot -0.16666666666666666\right) \cdot \left|im\right|, \left|im\right|, \left|im\right|\right)\\

\mathbf{else}:\\
\;\;\;\;\left|im\right| \cdot e^{re}\\


\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (*.f64 (exp.f64 re) (sin.f64 im)) < -0.050000000000000003

    1. Initial program 100.0%

      \[e^{re} \cdot \sin im \]
    2. Taylor expanded in im around 0

      \[\leadsto e^{re} \cdot \color{blue}{\left(im \cdot \left(1 + \frac{-1}{6} \cdot {im}^{2}\right)\right)} \]
    3. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \color{blue}{\left(1 + \frac{-1}{6} \cdot {im}^{2}\right)}\right) \]
      2. lower-+.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(1 + \color{blue}{\frac{-1}{6} \cdot {im}^{2}}\right)\right) \]
      3. lower-*.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(1 + \frac{-1}{6} \cdot \color{blue}{{im}^{2}}\right)\right) \]
      4. lower-pow.f6460.1%

        \[\leadsto e^{re} \cdot \left(im \cdot \left(1 + -0.16666666666666666 \cdot {im}^{\color{blue}{2}}\right)\right) \]
    4. Applied rewrites60.1%

      \[\leadsto e^{re} \cdot \color{blue}{\left(im \cdot \left(1 + -0.16666666666666666 \cdot {im}^{2}\right)\right)} \]
    5. Step-by-step derivation
      1. lift-*.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \color{blue}{\left(1 + \frac{-1}{6} \cdot {im}^{2}\right)}\right) \]
      2. lift-+.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(1 + \color{blue}{\frac{-1}{6} \cdot {im}^{2}}\right)\right) \]
      3. distribute-lft-inN/A

        \[\leadsto e^{re} \cdot \left(im \cdot 1 + \color{blue}{im \cdot \left(\frac{-1}{6} \cdot {im}^{2}\right)}\right) \]
      4. *-rgt-identityN/A

        \[\leadsto e^{re} \cdot \left(im + \color{blue}{im} \cdot \left(\frac{-1}{6} \cdot {im}^{2}\right)\right) \]
      5. +-commutativeN/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(\frac{-1}{6} \cdot {im}^{2}\right) + \color{blue}{im}\right) \]
      6. lift-*.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(\frac{-1}{6} \cdot {im}^{2}\right) + im\right) \]
      7. associate-*r*N/A

        \[\leadsto e^{re} \cdot \left(\left(im \cdot \frac{-1}{6}\right) \cdot {im}^{2} + im\right) \]
      8. lift-pow.f64N/A

        \[\leadsto e^{re} \cdot \left(\left(im \cdot \frac{-1}{6}\right) \cdot {im}^{2} + im\right) \]
      9. unpow2N/A

        \[\leadsto e^{re} \cdot \left(\left(im \cdot \frac{-1}{6}\right) \cdot \left(im \cdot im\right) + im\right) \]
      10. associate-*r*N/A

        \[\leadsto e^{re} \cdot \left(\left(\left(im \cdot \frac{-1}{6}\right) \cdot im\right) \cdot im + im\right) \]
      11. lower-fma.f64N/A

        \[\leadsto e^{re} \cdot \mathsf{fma}\left(\left(im \cdot \frac{-1}{6}\right) \cdot im, \color{blue}{im}, im\right) \]
      12. lower-*.f64N/A

        \[\leadsto e^{re} \cdot \mathsf{fma}\left(\left(im \cdot \frac{-1}{6}\right) \cdot im, im, im\right) \]
      13. lower-*.f6460.1%

        \[\leadsto e^{re} \cdot \mathsf{fma}\left(\left(im \cdot -0.16666666666666666\right) \cdot im, im, im\right) \]
    6. Applied rewrites60.1%

      \[\leadsto e^{re} \cdot \mathsf{fma}\left(\left(im \cdot -0.16666666666666666\right) \cdot im, \color{blue}{im}, im\right) \]

    if -0.050000000000000003 < (*.f64 (exp.f64 re) (sin.f64 im))

    1. Initial program 100.0%

      \[e^{re} \cdot \sin im \]
    2. Taylor expanded in im around 0

      \[\leadsto \color{blue}{im \cdot e^{re}} \]
    3. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto im \cdot \color{blue}{e^{re}} \]
      2. lower-exp.f6469.2%

        \[\leadsto im \cdot e^{re} \]
    4. Applied rewrites69.2%

      \[\leadsto \color{blue}{im \cdot e^{re}} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 4: 74.5% accurate, 0.6× speedup?

\[\mathsf{copysign}\left(1, im\right) \cdot \begin{array}{l} \mathbf{if}\;e^{re} \cdot \sin \left(\left|im\right|\right) \leq -0.05:\\ \;\;\;\;\left(1 + re\right) \cdot \mathsf{fma}\left(\left|im\right|, \left|im\right| \cdot \left(-0.16666666666666666 \cdot \left|im\right|\right), \left|im\right|\right)\\ \mathbf{else}:\\ \;\;\;\;\left|im\right| \cdot e^{re}\\ \end{array} \]
(FPCore (re im)
  :precision binary64
  (*
 (copysign 1.0 im)
 (if (<= (* (exp re) (sin (fabs im))) -0.05)
   (*
    (+ 1.0 re)
    (fma
     (fabs im)
     (* (fabs im) (* -0.16666666666666666 (fabs im)))
     (fabs im)))
   (* (fabs im) (exp re)))))
double code(double re, double im) {
	double tmp;
	if ((exp(re) * sin(fabs(im))) <= -0.05) {
		tmp = (1.0 + re) * fma(fabs(im), (fabs(im) * (-0.16666666666666666 * fabs(im))), fabs(im));
	} else {
		tmp = fabs(im) * exp(re);
	}
	return copysign(1.0, im) * tmp;
}
function code(re, im)
	tmp = 0.0
	if (Float64(exp(re) * sin(abs(im))) <= -0.05)
		tmp = Float64(Float64(1.0 + re) * fma(abs(im), Float64(abs(im) * Float64(-0.16666666666666666 * abs(im))), abs(im)));
	else
		tmp = Float64(abs(im) * exp(re));
	end
	return Float64(copysign(1.0, im) * tmp)
end
code[re_, im_] := N[(N[With[{TMP1 = Abs[1.0], TMP2 = Sign[im]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision] * If[LessEqual[N[(N[Exp[re], $MachinePrecision] * N[Sin[N[Abs[im], $MachinePrecision]], $MachinePrecision]), $MachinePrecision], -0.05], N[(N[(1.0 + re), $MachinePrecision] * N[(N[Abs[im], $MachinePrecision] * N[(N[Abs[im], $MachinePrecision] * N[(-0.16666666666666666 * N[Abs[im], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[Abs[im], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[Abs[im], $MachinePrecision] * N[Exp[re], $MachinePrecision]), $MachinePrecision]]), $MachinePrecision]
\mathsf{copysign}\left(1, im\right) \cdot \begin{array}{l}
\mathbf{if}\;e^{re} \cdot \sin \left(\left|im\right|\right) \leq -0.05:\\
\;\;\;\;\left(1 + re\right) \cdot \mathsf{fma}\left(\left|im\right|, \left|im\right| \cdot \left(-0.16666666666666666 \cdot \left|im\right|\right), \left|im\right|\right)\\

\mathbf{else}:\\
\;\;\;\;\left|im\right| \cdot e^{re}\\


\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (*.f64 (exp.f64 re) (sin.f64 im)) < -0.050000000000000003

    1. Initial program 100.0%

      \[e^{re} \cdot \sin im \]
    2. Taylor expanded in im around 0

      \[\leadsto e^{re} \cdot \color{blue}{\left(im \cdot \left(1 + \frac{-1}{6} \cdot {im}^{2}\right)\right)} \]
    3. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \color{blue}{\left(1 + \frac{-1}{6} \cdot {im}^{2}\right)}\right) \]
      2. lower-+.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(1 + \color{blue}{\frac{-1}{6} \cdot {im}^{2}}\right)\right) \]
      3. lower-*.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(1 + \frac{-1}{6} \cdot \color{blue}{{im}^{2}}\right)\right) \]
      4. lower-pow.f6460.1%

        \[\leadsto e^{re} \cdot \left(im \cdot \left(1 + -0.16666666666666666 \cdot {im}^{\color{blue}{2}}\right)\right) \]
    4. Applied rewrites60.1%

      \[\leadsto e^{re} \cdot \color{blue}{\left(im \cdot \left(1 + -0.16666666666666666 \cdot {im}^{2}\right)\right)} \]
    5. Step-by-step derivation
      1. lift-*.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \color{blue}{\left(1 + \frac{-1}{6} \cdot {im}^{2}\right)}\right) \]
      2. lift-+.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(1 + \color{blue}{\frac{-1}{6} \cdot {im}^{2}}\right)\right) \]
      3. +-commutativeN/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(\frac{-1}{6} \cdot {im}^{2} + \color{blue}{1}\right)\right) \]
      4. distribute-lft-inN/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(\frac{-1}{6} \cdot {im}^{2}\right) + \color{blue}{im \cdot 1}\right) \]
      5. lift-*.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(\frac{-1}{6} \cdot {im}^{2}\right) + im \cdot 1\right) \]
      6. *-commutativeN/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left({im}^{2} \cdot \frac{-1}{6}\right) + im \cdot 1\right) \]
      7. associate-*r*N/A

        \[\leadsto e^{re} \cdot \left(\left(im \cdot {im}^{2}\right) \cdot \frac{-1}{6} + \color{blue}{im} \cdot 1\right) \]
      8. lift-pow.f64N/A

        \[\leadsto e^{re} \cdot \left(\left(im \cdot {im}^{2}\right) \cdot \frac{-1}{6} + im \cdot 1\right) \]
      9. unpow2N/A

        \[\leadsto e^{re} \cdot \left(\left(im \cdot \left(im \cdot im\right)\right) \cdot \frac{-1}{6} + im \cdot 1\right) \]
      10. cube-unmultN/A

        \[\leadsto e^{re} \cdot \left({im}^{3} \cdot \frac{-1}{6} + im \cdot 1\right) \]
      11. *-lft-identityN/A

        \[\leadsto e^{re} \cdot \left({\left(1 \cdot im\right)}^{3} \cdot \frac{-1}{6} + im \cdot 1\right) \]
      12. *-rgt-identityN/A

        \[\leadsto e^{re} \cdot \left({\left(1 \cdot im\right)}^{3} \cdot \frac{-1}{6} + im\right) \]
      13. lower-fma.f64N/A

        \[\leadsto e^{re} \cdot \mathsf{fma}\left({\left(1 \cdot im\right)}^{3}, \color{blue}{\frac{-1}{6}}, im\right) \]
      14. *-lft-identityN/A

        \[\leadsto e^{re} \cdot \mathsf{fma}\left({im}^{3}, \frac{-1}{6}, im\right) \]
      15. metadata-evalN/A

        \[\leadsto e^{re} \cdot \mathsf{fma}\left({im}^{\left(2 + 1\right)}, \frac{-1}{6}, im\right) \]
      16. pow-plusN/A

        \[\leadsto e^{re} \cdot \mathsf{fma}\left({im}^{2} \cdot im, \frac{-1}{6}, im\right) \]
      17. lift-pow.f64N/A

        \[\leadsto e^{re} \cdot \mathsf{fma}\left({im}^{2} \cdot im, \frac{-1}{6}, im\right) \]
      18. lower-*.f6460.1%

        \[\leadsto e^{re} \cdot \mathsf{fma}\left({im}^{2} \cdot im, -0.16666666666666666, im\right) \]
      19. lift-pow.f64N/A

        \[\leadsto e^{re} \cdot \mathsf{fma}\left({im}^{2} \cdot im, \frac{-1}{6}, im\right) \]
      20. unpow2N/A

        \[\leadsto e^{re} \cdot \mathsf{fma}\left(\left(im \cdot im\right) \cdot im, \frac{-1}{6}, im\right) \]
      21. lower-*.f6460.1%

        \[\leadsto e^{re} \cdot \mathsf{fma}\left(\left(im \cdot im\right) \cdot im, -0.16666666666666666, im\right) \]
    6. Applied rewrites60.1%

      \[\leadsto e^{re} \cdot \mathsf{fma}\left(\left(im \cdot im\right) \cdot im, \color{blue}{-0.16666666666666666}, im\right) \]
    7. Taylor expanded in re around 0

      \[\leadsto \color{blue}{\left(1 + re\right)} \cdot \mathsf{fma}\left(\left(im \cdot im\right) \cdot im, -0.16666666666666666, im\right) \]
    8. Step-by-step derivation
      1. lower-+.f6431.6%

        \[\leadsto \left(1 + \color{blue}{re}\right) \cdot \mathsf{fma}\left(\left(im \cdot im\right) \cdot im, -0.16666666666666666, im\right) \]
    9. Applied rewrites31.6%

      \[\leadsto \color{blue}{\left(1 + re\right)} \cdot \mathsf{fma}\left(\left(im \cdot im\right) \cdot im, -0.16666666666666666, im\right) \]
    10. Step-by-step derivation
      1. lift-fma.f64N/A

        \[\leadsto \left(1 + re\right) \cdot \left(\left(\left(im \cdot im\right) \cdot im\right) \cdot \frac{-1}{6} + \color{blue}{im}\right) \]
      2. lift-*.f64N/A

        \[\leadsto \left(1 + re\right) \cdot \left(\left(\left(im \cdot im\right) \cdot im\right) \cdot \frac{-1}{6} + im\right) \]
      3. associate-*l*N/A

        \[\leadsto \left(1 + re\right) \cdot \left(\left(im \cdot im\right) \cdot \left(im \cdot \frac{-1}{6}\right) + im\right) \]
      4. lift-*.f64N/A

        \[\leadsto \left(1 + re\right) \cdot \left(\left(im \cdot im\right) \cdot \left(im \cdot \frac{-1}{6}\right) + im\right) \]
      5. associate-*l*N/A

        \[\leadsto \left(1 + re\right) \cdot \left(im \cdot \left(im \cdot \left(im \cdot \frac{-1}{6}\right)\right) + im\right) \]
      6. lower-fma.f64N/A

        \[\leadsto \left(1 + re\right) \cdot \mathsf{fma}\left(im, \color{blue}{im \cdot \left(im \cdot \frac{-1}{6}\right)}, im\right) \]
      7. lower-*.f64N/A

        \[\leadsto \left(1 + re\right) \cdot \mathsf{fma}\left(im, im \cdot \color{blue}{\left(im \cdot \frac{-1}{6}\right)}, im\right) \]
      8. *-commutativeN/A

        \[\leadsto \left(1 + re\right) \cdot \mathsf{fma}\left(im, im \cdot \left(\frac{-1}{6} \cdot \color{blue}{im}\right), im\right) \]
      9. lower-*.f6431.6%

        \[\leadsto \left(1 + re\right) \cdot \mathsf{fma}\left(im, im \cdot \left(-0.16666666666666666 \cdot \color{blue}{im}\right), im\right) \]
    11. Applied rewrites31.6%

      \[\leadsto \left(1 + re\right) \cdot \mathsf{fma}\left(im, \color{blue}{im \cdot \left(-0.16666666666666666 \cdot im\right)}, im\right) \]

    if -0.050000000000000003 < (*.f64 (exp.f64 re) (sin.f64 im))

    1. Initial program 100.0%

      \[e^{re} \cdot \sin im \]
    2. Taylor expanded in im around 0

      \[\leadsto \color{blue}{im \cdot e^{re}} \]
    3. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto im \cdot \color{blue}{e^{re}} \]
      2. lower-exp.f6469.2%

        \[\leadsto im \cdot e^{re} \]
    4. Applied rewrites69.2%

      \[\leadsto \color{blue}{im \cdot e^{re}} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 5: 73.6% accurate, 0.6× speedup?

\[\mathsf{copysign}\left(1, im\right) \cdot \begin{array}{l} \mathbf{if}\;e^{re} \cdot \sin \left(\left|im\right|\right) \leq -0.05:\\ \;\;\;\;1 \cdot \mathsf{fma}\left(\left(\left|im\right| \cdot \left|im\right|\right) \cdot \left|im\right|, -0.16666666666666666, \left|im\right|\right)\\ \mathbf{else}:\\ \;\;\;\;\left|im\right| \cdot e^{re}\\ \end{array} \]
(FPCore (re im)
  :precision binary64
  (*
 (copysign 1.0 im)
 (if (<= (* (exp re) (sin (fabs im))) -0.05)
   (*
    1.0
    (fma
     (* (* (fabs im) (fabs im)) (fabs im))
     -0.16666666666666666
     (fabs im)))
   (* (fabs im) (exp re)))))
double code(double re, double im) {
	double tmp;
	if ((exp(re) * sin(fabs(im))) <= -0.05) {
		tmp = 1.0 * fma(((fabs(im) * fabs(im)) * fabs(im)), -0.16666666666666666, fabs(im));
	} else {
		tmp = fabs(im) * exp(re);
	}
	return copysign(1.0, im) * tmp;
}
function code(re, im)
	tmp = 0.0
	if (Float64(exp(re) * sin(abs(im))) <= -0.05)
		tmp = Float64(1.0 * fma(Float64(Float64(abs(im) * abs(im)) * abs(im)), -0.16666666666666666, abs(im)));
	else
		tmp = Float64(abs(im) * exp(re));
	end
	return Float64(copysign(1.0, im) * tmp)
end
code[re_, im_] := N[(N[With[{TMP1 = Abs[1.0], TMP2 = Sign[im]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision] * If[LessEqual[N[(N[Exp[re], $MachinePrecision] * N[Sin[N[Abs[im], $MachinePrecision]], $MachinePrecision]), $MachinePrecision], -0.05], N[(1.0 * N[(N[(N[(N[Abs[im], $MachinePrecision] * N[Abs[im], $MachinePrecision]), $MachinePrecision] * N[Abs[im], $MachinePrecision]), $MachinePrecision] * -0.16666666666666666 + N[Abs[im], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[Abs[im], $MachinePrecision] * N[Exp[re], $MachinePrecision]), $MachinePrecision]]), $MachinePrecision]
\mathsf{copysign}\left(1, im\right) \cdot \begin{array}{l}
\mathbf{if}\;e^{re} \cdot \sin \left(\left|im\right|\right) \leq -0.05:\\
\;\;\;\;1 \cdot \mathsf{fma}\left(\left(\left|im\right| \cdot \left|im\right|\right) \cdot \left|im\right|, -0.16666666666666666, \left|im\right|\right)\\

\mathbf{else}:\\
\;\;\;\;\left|im\right| \cdot e^{re}\\


\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (*.f64 (exp.f64 re) (sin.f64 im)) < -0.050000000000000003

    1. Initial program 100.0%

      \[e^{re} \cdot \sin im \]
    2. Taylor expanded in im around 0

      \[\leadsto e^{re} \cdot \color{blue}{\left(im \cdot \left(1 + \frac{-1}{6} \cdot {im}^{2}\right)\right)} \]
    3. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \color{blue}{\left(1 + \frac{-1}{6} \cdot {im}^{2}\right)}\right) \]
      2. lower-+.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(1 + \color{blue}{\frac{-1}{6} \cdot {im}^{2}}\right)\right) \]
      3. lower-*.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(1 + \frac{-1}{6} \cdot \color{blue}{{im}^{2}}\right)\right) \]
      4. lower-pow.f6460.1%

        \[\leadsto e^{re} \cdot \left(im \cdot \left(1 + -0.16666666666666666 \cdot {im}^{\color{blue}{2}}\right)\right) \]
    4. Applied rewrites60.1%

      \[\leadsto e^{re} \cdot \color{blue}{\left(im \cdot \left(1 + -0.16666666666666666 \cdot {im}^{2}\right)\right)} \]
    5. Step-by-step derivation
      1. lift-*.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \color{blue}{\left(1 + \frac{-1}{6} \cdot {im}^{2}\right)}\right) \]
      2. lift-+.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(1 + \color{blue}{\frac{-1}{6} \cdot {im}^{2}}\right)\right) \]
      3. +-commutativeN/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(\frac{-1}{6} \cdot {im}^{2} + \color{blue}{1}\right)\right) \]
      4. distribute-lft-inN/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(\frac{-1}{6} \cdot {im}^{2}\right) + \color{blue}{im \cdot 1}\right) \]
      5. lift-*.f64N/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left(\frac{-1}{6} \cdot {im}^{2}\right) + im \cdot 1\right) \]
      6. *-commutativeN/A

        \[\leadsto e^{re} \cdot \left(im \cdot \left({im}^{2} \cdot \frac{-1}{6}\right) + im \cdot 1\right) \]
      7. associate-*r*N/A

        \[\leadsto e^{re} \cdot \left(\left(im \cdot {im}^{2}\right) \cdot \frac{-1}{6} + \color{blue}{im} \cdot 1\right) \]
      8. lift-pow.f64N/A

        \[\leadsto e^{re} \cdot \left(\left(im \cdot {im}^{2}\right) \cdot \frac{-1}{6} + im \cdot 1\right) \]
      9. unpow2N/A

        \[\leadsto e^{re} \cdot \left(\left(im \cdot \left(im \cdot im\right)\right) \cdot \frac{-1}{6} + im \cdot 1\right) \]
      10. cube-unmultN/A

        \[\leadsto e^{re} \cdot \left({im}^{3} \cdot \frac{-1}{6} + im \cdot 1\right) \]
      11. *-lft-identityN/A

        \[\leadsto e^{re} \cdot \left({\left(1 \cdot im\right)}^{3} \cdot \frac{-1}{6} + im \cdot 1\right) \]
      12. *-rgt-identityN/A

        \[\leadsto e^{re} \cdot \left({\left(1 \cdot im\right)}^{3} \cdot \frac{-1}{6} + im\right) \]
      13. lower-fma.f64N/A

        \[\leadsto e^{re} \cdot \mathsf{fma}\left({\left(1 \cdot im\right)}^{3}, \color{blue}{\frac{-1}{6}}, im\right) \]
      14. *-lft-identityN/A

        \[\leadsto e^{re} \cdot \mathsf{fma}\left({im}^{3}, \frac{-1}{6}, im\right) \]
      15. metadata-evalN/A

        \[\leadsto e^{re} \cdot \mathsf{fma}\left({im}^{\left(2 + 1\right)}, \frac{-1}{6}, im\right) \]
      16. pow-plusN/A

        \[\leadsto e^{re} \cdot \mathsf{fma}\left({im}^{2} \cdot im, \frac{-1}{6}, im\right) \]
      17. lift-pow.f64N/A

        \[\leadsto e^{re} \cdot \mathsf{fma}\left({im}^{2} \cdot im, \frac{-1}{6}, im\right) \]
      18. lower-*.f6460.1%

        \[\leadsto e^{re} \cdot \mathsf{fma}\left({im}^{2} \cdot im, -0.16666666666666666, im\right) \]
      19. lift-pow.f64N/A

        \[\leadsto e^{re} \cdot \mathsf{fma}\left({im}^{2} \cdot im, \frac{-1}{6}, im\right) \]
      20. unpow2N/A

        \[\leadsto e^{re} \cdot \mathsf{fma}\left(\left(im \cdot im\right) \cdot im, \frac{-1}{6}, im\right) \]
      21. lower-*.f6460.1%

        \[\leadsto e^{re} \cdot \mathsf{fma}\left(\left(im \cdot im\right) \cdot im, -0.16666666666666666, im\right) \]
    6. Applied rewrites60.1%

      \[\leadsto e^{re} \cdot \mathsf{fma}\left(\left(im \cdot im\right) \cdot im, \color{blue}{-0.16666666666666666}, im\right) \]
    7. Taylor expanded in re around 0

      \[\leadsto \color{blue}{1} \cdot \mathsf{fma}\left(\left(im \cdot im\right) \cdot im, -0.16666666666666666, im\right) \]
    8. Step-by-step derivation
      1. Applied rewrites30.5%

        \[\leadsto \color{blue}{1} \cdot \mathsf{fma}\left(\left(im \cdot im\right) \cdot im, -0.16666666666666666, im\right) \]

      if -0.050000000000000003 < (*.f64 (exp.f64 re) (sin.f64 im))

      1. Initial program 100.0%

        \[e^{re} \cdot \sin im \]
      2. Taylor expanded in im around 0

        \[\leadsto \color{blue}{im \cdot e^{re}} \]
      3. Step-by-step derivation
        1. lower-*.f64N/A

          \[\leadsto im \cdot \color{blue}{e^{re}} \]
        2. lower-exp.f6469.2%

          \[\leadsto im \cdot e^{re} \]
      4. Applied rewrites69.2%

        \[\leadsto \color{blue}{im \cdot e^{re}} \]
    9. Recombined 2 regimes into one program.
    10. Add Preprocessing

    Alternative 6: 69.2% accurate, 3.2× speedup?

    \[im \cdot e^{re} \]
    (FPCore (re im)
      :precision binary64
      (* im (exp re)))
    double code(double re, double im) {
    	return im * exp(re);
    }
    
    real(8) function code(re, im)
    use fmin_fmax_functions
        real(8), intent (in) :: re
        real(8), intent (in) :: im
        code = im * exp(re)
    end function
    
    public static double code(double re, double im) {
    	return im * Math.exp(re);
    }
    
    def code(re, im):
    	return im * math.exp(re)
    
    function code(re, im)
    	return Float64(im * exp(re))
    end
    
    function tmp = code(re, im)
    	tmp = im * exp(re);
    end
    
    code[re_, im_] := N[(im * N[Exp[re], $MachinePrecision]), $MachinePrecision]
    
    im \cdot e^{re}
    
    Derivation
    1. Initial program 100.0%

      \[e^{re} \cdot \sin im \]
    2. Taylor expanded in im around 0

      \[\leadsto \color{blue}{im \cdot e^{re}} \]
    3. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto im \cdot \color{blue}{e^{re}} \]
      2. lower-exp.f6469.2%

        \[\leadsto im \cdot e^{re} \]
    4. Applied rewrites69.2%

      \[\leadsto \color{blue}{im \cdot e^{re}} \]
    5. Add Preprocessing

    Alternative 7: 37.2% accurate, 3.4× speedup?

    \[im \cdot \sqrt{\left(re - -1\right) \cdot \left(re - -1\right)} \]
    (FPCore (re im)
      :precision binary64
      (* im (sqrt (* (- re -1.0) (- re -1.0)))))
    double code(double re, double im) {
    	return im * sqrt(((re - -1.0) * (re - -1.0)));
    }
    
    real(8) function code(re, im)
    use fmin_fmax_functions
        real(8), intent (in) :: re
        real(8), intent (in) :: im
        code = im * sqrt(((re - (-1.0d0)) * (re - (-1.0d0))))
    end function
    
    public static double code(double re, double im) {
    	return im * Math.sqrt(((re - -1.0) * (re - -1.0)));
    }
    
    def code(re, im):
    	return im * math.sqrt(((re - -1.0) * (re - -1.0)))
    
    function code(re, im)
    	return Float64(im * sqrt(Float64(Float64(re - -1.0) * Float64(re - -1.0))))
    end
    
    function tmp = code(re, im)
    	tmp = im * sqrt(((re - -1.0) * (re - -1.0)));
    end
    
    code[re_, im_] := N[(im * N[Sqrt[N[(N[(re - -1.0), $MachinePrecision] * N[(re - -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
    
    im \cdot \sqrt{\left(re - -1\right) \cdot \left(re - -1\right)}
    
    Derivation
    1. Initial program 100.0%

      \[e^{re} \cdot \sin im \]
    2. Taylor expanded in im around 0

      \[\leadsto \color{blue}{im \cdot e^{re}} \]
    3. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto im \cdot \color{blue}{e^{re}} \]
      2. lower-exp.f6469.2%

        \[\leadsto im \cdot e^{re} \]
    4. Applied rewrites69.2%

      \[\leadsto \color{blue}{im \cdot e^{re}} \]
    5. Taylor expanded in re around 0

      \[\leadsto im \cdot \left(1 + \color{blue}{re}\right) \]
    6. Step-by-step derivation
      1. lower-+.f6430.3%

        \[\leadsto im \cdot \left(1 + re\right) \]
    7. Applied rewrites30.3%

      \[\leadsto im \cdot \left(1 + \color{blue}{re}\right) \]
    8. Step-by-step derivation
      1. rem-square-sqrtN/A

        \[\leadsto im \cdot \left(\sqrt{1 + re} \cdot \color{blue}{\sqrt{1 + re}}\right) \]
      2. sqrt-unprodN/A

        \[\leadsto im \cdot \sqrt{\left(1 + re\right) \cdot \left(1 + re\right)} \]
      3. lower-sqrt.f64N/A

        \[\leadsto im \cdot \sqrt{\left(1 + re\right) \cdot \left(1 + re\right)} \]
      4. lower-*.f6437.2%

        \[\leadsto im \cdot \sqrt{\left(1 + re\right) \cdot \left(1 + re\right)} \]
      5. lift-+.f64N/A

        \[\leadsto im \cdot \sqrt{\left(1 + re\right) \cdot \left(1 + re\right)} \]
      6. +-commutativeN/A

        \[\leadsto im \cdot \sqrt{\left(re + 1\right) \cdot \left(1 + re\right)} \]
      7. add-flipN/A

        \[\leadsto im \cdot \sqrt{\left(re - \left(\mathsf{neg}\left(1\right)\right)\right) \cdot \left(1 + re\right)} \]
      8. lower--.f64N/A

        \[\leadsto im \cdot \sqrt{\left(re - \left(\mathsf{neg}\left(1\right)\right)\right) \cdot \left(1 + re\right)} \]
      9. metadata-eval37.2%

        \[\leadsto im \cdot \sqrt{\left(re - -1\right) \cdot \left(1 + re\right)} \]
      10. metadata-evalN/A

        \[\leadsto im \cdot \sqrt{\left(re - -1\right) \cdot \mathsf{Rewrite=>}\left(lift-+.f64, \left(1 + re\right)\right)} \]
      11. metadata-evalN/A

        \[\leadsto im \cdot \sqrt{\left(re - -1\right) \cdot \mathsf{Rewrite=>}\left(+-commutative, \left(re + 1\right)\right)} \]
      12. metadata-evalN/A

        \[\leadsto im \cdot \sqrt{\left(re - -1\right) \cdot \mathsf{Rewrite=>}\left(add-flip, \left(re - \left(\mathsf{neg}\left(1\right)\right)\right)\right)} \]
      13. metadata-evalN/A

        \[\leadsto im \cdot \sqrt{\left(re - -1\right) \cdot \mathsf{Rewrite=>}\left(lower--.f64, \left(re - \left(\mathsf{neg}\left(1\right)\right)\right)\right)} \]
      14. metadata-evalN/A

        \[\leadsto im \cdot \sqrt{\left(re - -1\right) \cdot \left(re - \mathsf{Rewrite=>}\left(metadata-eval, -1\right)\right)} \]
    9. Applied rewrites37.2%

      \[\leadsto im \cdot \sqrt{\left(re - -1\right) \cdot \left(re - -1\right)} \]
    10. Add Preprocessing

    Alternative 8: 30.3% accurate, 8.1× speedup?

    \[\mathsf{fma}\left(re, im, im\right) \]
    (FPCore (re im)
      :precision binary64
      (fma re im im))
    double code(double re, double im) {
    	return fma(re, im, im);
    }
    
    function code(re, im)
    	return fma(re, im, im)
    end
    
    code[re_, im_] := N[(re * im + im), $MachinePrecision]
    
    \mathsf{fma}\left(re, im, im\right)
    
    Derivation
    1. Initial program 100.0%

      \[e^{re} \cdot \sin im \]
    2. Taylor expanded in im around 0

      \[\leadsto \color{blue}{im \cdot e^{re}} \]
    3. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto im \cdot \color{blue}{e^{re}} \]
      2. lower-exp.f6469.2%

        \[\leadsto im \cdot e^{re} \]
    4. Applied rewrites69.2%

      \[\leadsto \color{blue}{im \cdot e^{re}} \]
    5. Taylor expanded in re around 0

      \[\leadsto im + \color{blue}{im \cdot re} \]
    6. Step-by-step derivation
      1. lower-+.f64N/A

        \[\leadsto im + im \cdot \color{blue}{re} \]
      2. lower-*.f6430.3%

        \[\leadsto im + im \cdot re \]
    7. Applied rewrites30.3%

      \[\leadsto im + \color{blue}{im \cdot re} \]
    8. Step-by-step derivation
      1. lift-+.f64N/A

        \[\leadsto im + im \cdot \color{blue}{re} \]
      2. +-commutativeN/A

        \[\leadsto im \cdot re + im \]
      3. lift-*.f64N/A

        \[\leadsto im \cdot re + im \]
      4. *-commutativeN/A

        \[\leadsto re \cdot im + im \]
      5. lower-fma.f6430.3%

        \[\leadsto \mathsf{fma}\left(re, im, im\right) \]
    9. Applied rewrites30.3%

      \[\leadsto \mathsf{fma}\left(re, im, im\right) \]
    10. Add Preprocessing

    Alternative 9: 27.0% accurate, 48.6× speedup?

    \[im \]
    (FPCore (re im)
      :precision binary64
      im)
    double code(double re, double im) {
    	return im;
    }
    
    real(8) function code(re, im)
    use fmin_fmax_functions
        real(8), intent (in) :: re
        real(8), intent (in) :: im
        code = im
    end function
    
    public static double code(double re, double im) {
    	return im;
    }
    
    def code(re, im):
    	return im
    
    function code(re, im)
    	return im
    end
    
    function tmp = code(re, im)
    	tmp = im;
    end
    
    code[re_, im_] := im
    
    im
    
    Derivation
    1. Initial program 100.0%

      \[e^{re} \cdot \sin im \]
    2. Taylor expanded in im around 0

      \[\leadsto \color{blue}{im \cdot e^{re}} \]
    3. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto im \cdot \color{blue}{e^{re}} \]
      2. lower-exp.f6469.2%

        \[\leadsto im \cdot e^{re} \]
    4. Applied rewrites69.2%

      \[\leadsto \color{blue}{im \cdot e^{re}} \]
    5. Taylor expanded in re around 0

      \[\leadsto im \]
    6. Step-by-step derivation
      1. Applied rewrites27.0%

        \[\leadsto im \]
      2. Add Preprocessing

      Reproduce

      ?
      herbie shell --seed 2025313 -o setup:search
      (FPCore (re im)
        :name "math.exp on complex, imaginary part"
        :precision binary64
        (* (exp re) (sin im)))