math.sin on complex, real part

Percentage Accurate: 100.0% → 100.0%
Time: 4.9s
Alternatives: 9
Speedup: 1.5×

Specification

?
\[\begin{array}{l} \\ \left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \end{array} \]
(FPCore (re im)
 :precision binary64
 (* (* 0.5 (sin re)) (+ (exp (- 0.0 im)) (exp im))))
double code(double re, double im) {
	return (0.5 * sin(re)) * (exp((0.0 - im)) + exp(im));
}
real(8) function code(re, im)
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    code = (0.5d0 * sin(re)) * (exp((0.0d0 - im)) + exp(im))
end function
public static double code(double re, double im) {
	return (0.5 * Math.sin(re)) * (Math.exp((0.0 - im)) + Math.exp(im));
}
def code(re, im):
	return (0.5 * math.sin(re)) * (math.exp((0.0 - im)) + math.exp(im))
function code(re, im)
	return Float64(Float64(0.5 * sin(re)) * Float64(exp(Float64(0.0 - im)) + exp(im)))
end
function tmp = code(re, im)
	tmp = (0.5 * sin(re)) * (exp((0.0 - im)) + exp(im));
end
code[re_, im_] := N[(N[(0.5 * N[Sin[re], $MachinePrecision]), $MachinePrecision] * N[(N[Exp[N[(0.0 - im), $MachinePrecision]], $MachinePrecision] + N[Exp[im], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 9 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 100.0% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \end{array} \]
(FPCore (re im)
 :precision binary64
 (* (* 0.5 (sin re)) (+ (exp (- 0.0 im)) (exp im))))
double code(double re, double im) {
	return (0.5 * sin(re)) * (exp((0.0 - im)) + exp(im));
}
real(8) function code(re, im)
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    code = (0.5d0 * sin(re)) * (exp((0.0d0 - im)) + exp(im))
end function
public static double code(double re, double im) {
	return (0.5 * Math.sin(re)) * (Math.exp((0.0 - im)) + Math.exp(im));
}
def code(re, im):
	return (0.5 * math.sin(re)) * (math.exp((0.0 - im)) + math.exp(im))
function code(re, im)
	return Float64(Float64(0.5 * sin(re)) * Float64(exp(Float64(0.0 - im)) + exp(im)))
end
function tmp = code(re, im)
	tmp = (0.5 * sin(re)) * (exp((0.0 - im)) + exp(im));
end
code[re_, im_] := N[(N[(0.5 * N[Sin[re], $MachinePrecision]), $MachinePrecision] * N[(N[Exp[N[(0.0 - im), $MachinePrecision]], $MachinePrecision] + N[Exp[im], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)
\end{array}

Alternative 1: 100.0% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \left(0.5 \cdot \sin re\right) \cdot \left(2 \cdot \cosh im\right) \end{array} \]
(FPCore (re im) :precision binary64 (* (* 0.5 (sin re)) (* 2.0 (cosh im))))
double code(double re, double im) {
	return (0.5 * sin(re)) * (2.0 * cosh(im));
}
real(8) function code(re, im)
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    code = (0.5d0 * sin(re)) * (2.0d0 * cosh(im))
end function
public static double code(double re, double im) {
	return (0.5 * Math.sin(re)) * (2.0 * Math.cosh(im));
}
def code(re, im):
	return (0.5 * math.sin(re)) * (2.0 * math.cosh(im))
function code(re, im)
	return Float64(Float64(0.5 * sin(re)) * Float64(2.0 * cosh(im)))
end
function tmp = code(re, im)
	tmp = (0.5 * sin(re)) * (2.0 * cosh(im));
end
code[re_, im_] := N[(N[(0.5 * N[Sin[re], $MachinePrecision]), $MachinePrecision] * N[(2.0 * N[Cosh[im], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(0.5 \cdot \sin re\right) \cdot \left(2 \cdot \cosh im\right)
\end{array}
Derivation
  1. Initial program 100.0%

    \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
  2. Step-by-step derivation
    1. add-log-exp76.8%

      \[\leadsto \color{blue}{\log \left(e^{\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)}\right)} \]
    2. *-un-lft-identity76.8%

      \[\leadsto \log \color{blue}{\left(1 \cdot e^{\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)}\right)} \]
    3. log-prod76.8%

      \[\leadsto \color{blue}{\log 1 + \log \left(e^{\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)}\right)} \]
    4. metadata-eval76.8%

      \[\leadsto \color{blue}{0} + \log \left(e^{\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)}\right) \]
    5. add-log-exp100.0%

      \[\leadsto 0 + \color{blue}{\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)} \]
    6. +-commutative100.0%

      \[\leadsto 0 + \left(0.5 \cdot \sin re\right) \cdot \color{blue}{\left(e^{im} + e^{0 - im}\right)} \]
    7. sub0-neg100.0%

      \[\leadsto 0 + \left(0.5 \cdot \sin re\right) \cdot \left(e^{im} + e^{\color{blue}{-im}}\right) \]
    8. cosh-undef100.0%

      \[\leadsto 0 + \left(0.5 \cdot \sin re\right) \cdot \color{blue}{\left(2 \cdot \cosh im\right)} \]
  3. Applied egg-rr100.0%

    \[\leadsto \color{blue}{0 + \left(0.5 \cdot \sin re\right) \cdot \left(2 \cdot \cosh im\right)} \]
  4. Final simplification100.0%

    \[\leadsto \left(0.5 \cdot \sin re\right) \cdot \left(2 \cdot \cosh im\right) \]

Alternative 2: 87.5% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;im \leq -9 \cdot 10^{-5} \lor \neg \left(im \leq 6.2 \cdot 10^{-6}\right):\\ \;\;\;\;0.5 \cdot \left(re \cdot \left(e^{im} + e^{-im}\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\sin re\\ \end{array} \end{array} \]
(FPCore (re im)
 :precision binary64
 (if (or (<= im -9e-5) (not (<= im 6.2e-6)))
   (* 0.5 (* re (+ (exp im) (exp (- im)))))
   (sin re)))
double code(double re, double im) {
	double tmp;
	if ((im <= -9e-5) || !(im <= 6.2e-6)) {
		tmp = 0.5 * (re * (exp(im) + exp(-im)));
	} else {
		tmp = sin(re);
	}
	return tmp;
}
real(8) function code(re, im)
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    real(8) :: tmp
    if ((im <= (-9d-5)) .or. (.not. (im <= 6.2d-6))) then
        tmp = 0.5d0 * (re * (exp(im) + exp(-im)))
    else
        tmp = sin(re)
    end if
    code = tmp
end function
public static double code(double re, double im) {
	double tmp;
	if ((im <= -9e-5) || !(im <= 6.2e-6)) {
		tmp = 0.5 * (re * (Math.exp(im) + Math.exp(-im)));
	} else {
		tmp = Math.sin(re);
	}
	return tmp;
}
def code(re, im):
	tmp = 0
	if (im <= -9e-5) or not (im <= 6.2e-6):
		tmp = 0.5 * (re * (math.exp(im) + math.exp(-im)))
	else:
		tmp = math.sin(re)
	return tmp
function code(re, im)
	tmp = 0.0
	if ((im <= -9e-5) || !(im <= 6.2e-6))
		tmp = Float64(0.5 * Float64(re * Float64(exp(im) + exp(Float64(-im)))));
	else
		tmp = sin(re);
	end
	return tmp
end
function tmp_2 = code(re, im)
	tmp = 0.0;
	if ((im <= -9e-5) || ~((im <= 6.2e-6)))
		tmp = 0.5 * (re * (exp(im) + exp(-im)));
	else
		tmp = sin(re);
	end
	tmp_2 = tmp;
end
code[re_, im_] := If[Or[LessEqual[im, -9e-5], N[Not[LessEqual[im, 6.2e-6]], $MachinePrecision]], N[(0.5 * N[(re * N[(N[Exp[im], $MachinePrecision] + N[Exp[(-im)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Sin[re], $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;im \leq -9 \cdot 10^{-5} \lor \neg \left(im \leq 6.2 \cdot 10^{-6}\right):\\
\;\;\;\;0.5 \cdot \left(re \cdot \left(e^{im} + e^{-im}\right)\right)\\

\mathbf{else}:\\
\;\;\;\;\sin re\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if im < -9.00000000000000057e-5 or 6.1999999999999999e-6 < im

    1. Initial program 100.0%

      \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
    2. Taylor expanded in re around 0 68.9%

      \[\leadsto \color{blue}{0.5 \cdot \left(re \cdot \left(e^{im} + e^{-im}\right)\right)} \]

    if -9.00000000000000057e-5 < im < 6.1999999999999999e-6

    1. Initial program 100.0%

      \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
    2. Taylor expanded in im around 0 99.8%

      \[\leadsto \color{blue}{\sin re} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification83.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;im \leq -9 \cdot 10^{-5} \lor \neg \left(im \leq 6.2 \cdot 10^{-6}\right):\\ \;\;\;\;0.5 \cdot \left(re \cdot \left(e^{im} + e^{-im}\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\sin re\\ \end{array} \]

Alternative 3: 73.5% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := 0.5 \cdot \left(re \cdot \left(im \cdot im\right) + re \cdot 2\right)\\ \mathbf{if}\;im \leq -5.8 \cdot 10^{+125}:\\ \;\;\;\;t_0\\ \mathbf{elif}\;im \leq -1900000000000:\\ \;\;\;\;0.5 \cdot \mathsf{log1p}\left(\mathsf{expm1}\left(re\right)\right)\\ \mathbf{elif}\;im \leq 2 \cdot 10^{+20}:\\ \;\;\;\;\sin re\\ \mathbf{elif}\;im \leq 4.5 \cdot 10^{+94}:\\ \;\;\;\;0.5 \cdot {re}^{-2}\\ \mathbf{else}:\\ \;\;\;\;t_0\\ \end{array} \end{array} \]
(FPCore (re im)
 :precision binary64
 (let* ((t_0 (* 0.5 (+ (* re (* im im)) (* re 2.0)))))
   (if (<= im -5.8e+125)
     t_0
     (if (<= im -1900000000000.0)
       (* 0.5 (log1p (expm1 re)))
       (if (<= im 2e+20)
         (sin re)
         (if (<= im 4.5e+94) (* 0.5 (pow re -2.0)) t_0))))))
double code(double re, double im) {
	double t_0 = 0.5 * ((re * (im * im)) + (re * 2.0));
	double tmp;
	if (im <= -5.8e+125) {
		tmp = t_0;
	} else if (im <= -1900000000000.0) {
		tmp = 0.5 * log1p(expm1(re));
	} else if (im <= 2e+20) {
		tmp = sin(re);
	} else if (im <= 4.5e+94) {
		tmp = 0.5 * pow(re, -2.0);
	} else {
		tmp = t_0;
	}
	return tmp;
}
public static double code(double re, double im) {
	double t_0 = 0.5 * ((re * (im * im)) + (re * 2.0));
	double tmp;
	if (im <= -5.8e+125) {
		tmp = t_0;
	} else if (im <= -1900000000000.0) {
		tmp = 0.5 * Math.log1p(Math.expm1(re));
	} else if (im <= 2e+20) {
		tmp = Math.sin(re);
	} else if (im <= 4.5e+94) {
		tmp = 0.5 * Math.pow(re, -2.0);
	} else {
		tmp = t_0;
	}
	return tmp;
}
def code(re, im):
	t_0 = 0.5 * ((re * (im * im)) + (re * 2.0))
	tmp = 0
	if im <= -5.8e+125:
		tmp = t_0
	elif im <= -1900000000000.0:
		tmp = 0.5 * math.log1p(math.expm1(re))
	elif im <= 2e+20:
		tmp = math.sin(re)
	elif im <= 4.5e+94:
		tmp = 0.5 * math.pow(re, -2.0)
	else:
		tmp = t_0
	return tmp
function code(re, im)
	t_0 = Float64(0.5 * Float64(Float64(re * Float64(im * im)) + Float64(re * 2.0)))
	tmp = 0.0
	if (im <= -5.8e+125)
		tmp = t_0;
	elseif (im <= -1900000000000.0)
		tmp = Float64(0.5 * log1p(expm1(re)));
	elseif (im <= 2e+20)
		tmp = sin(re);
	elseif (im <= 4.5e+94)
		tmp = Float64(0.5 * (re ^ -2.0));
	else
		tmp = t_0;
	end
	return tmp
end
code[re_, im_] := Block[{t$95$0 = N[(0.5 * N[(N[(re * N[(im * im), $MachinePrecision]), $MachinePrecision] + N[(re * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[im, -5.8e+125], t$95$0, If[LessEqual[im, -1900000000000.0], N[(0.5 * N[Log[1 + N[(Exp[re] - 1), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], If[LessEqual[im, 2e+20], N[Sin[re], $MachinePrecision], If[LessEqual[im, 4.5e+94], N[(0.5 * N[Power[re, -2.0], $MachinePrecision]), $MachinePrecision], t$95$0]]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := 0.5 \cdot \left(re \cdot \left(im \cdot im\right) + re \cdot 2\right)\\
\mathbf{if}\;im \leq -5.8 \cdot 10^{+125}:\\
\;\;\;\;t_0\\

\mathbf{elif}\;im \leq -1900000000000:\\
\;\;\;\;0.5 \cdot \mathsf{log1p}\left(\mathsf{expm1}\left(re\right)\right)\\

\mathbf{elif}\;im \leq 2 \cdot 10^{+20}:\\
\;\;\;\;\sin re\\

\mathbf{elif}\;im \leq 4.5 \cdot 10^{+94}:\\
\;\;\;\;0.5 \cdot {re}^{-2}\\

\mathbf{else}:\\
\;\;\;\;t_0\\


\end{array}
\end{array}
Derivation
  1. Split input into 4 regimes
  2. if im < -5.79999999999999986e125 or 4.49999999999999972e94 < im

    1. Initial program 100.0%

      \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
    2. Taylor expanded in re around 0 67.5%

      \[\leadsto \color{blue}{0.5 \cdot \left(re \cdot \left(e^{im} + e^{-im}\right)\right)} \]
    3. Taylor expanded in im around 0 52.1%

      \[\leadsto 0.5 \cdot \color{blue}{\left(re \cdot {im}^{2} + 2 \cdot re\right)} \]
    4. Step-by-step derivation
      1. expm1-log1p-u23.0%

        \[\leadsto 0.5 \cdot \left(\color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(re \cdot {im}^{2}\right)\right)} + 2 \cdot re\right) \]
      2. expm1-udef23.0%

        \[\leadsto 0.5 \cdot \left(\color{blue}{\left(e^{\mathsf{log1p}\left(re \cdot {im}^{2}\right)} - 1\right)} + 2 \cdot re\right) \]
      3. log1p-udef23.0%

        \[\leadsto 0.5 \cdot \left(\left(e^{\color{blue}{\log \left(1 + re \cdot {im}^{2}\right)}} - 1\right) + 2 \cdot re\right) \]
      4. add-exp-log52.1%

        \[\leadsto 0.5 \cdot \left(\left(\color{blue}{\left(1 + re \cdot {im}^{2}\right)} - 1\right) + 2 \cdot re\right) \]
      5. unpow252.1%

        \[\leadsto 0.5 \cdot \left(\left(\left(1 + re \cdot \color{blue}{\left(im \cdot im\right)}\right) - 1\right) + 2 \cdot re\right) \]
    5. Applied egg-rr52.1%

      \[\leadsto 0.5 \cdot \left(\color{blue}{\left(\left(1 + re \cdot \left(im \cdot im\right)\right) - 1\right)} + 2 \cdot re\right) \]
    6. Step-by-step derivation
      1. +-commutative52.1%

        \[\leadsto 0.5 \cdot \left(\left(\color{blue}{\left(re \cdot \left(im \cdot im\right) + 1\right)} - 1\right) + 2 \cdot re\right) \]
      2. associate--l+52.1%

        \[\leadsto 0.5 \cdot \left(\color{blue}{\left(re \cdot \left(im \cdot im\right) + \left(1 - 1\right)\right)} + 2 \cdot re\right) \]
      3. metadata-eval52.1%

        \[\leadsto 0.5 \cdot \left(\left(re \cdot \left(im \cdot im\right) + \color{blue}{0}\right) + 2 \cdot re\right) \]
      4. +-rgt-identity52.1%

        \[\leadsto 0.5 \cdot \left(\color{blue}{re \cdot \left(im \cdot im\right)} + 2 \cdot re\right) \]
    7. Simplified52.1%

      \[\leadsto 0.5 \cdot \left(\color{blue}{re \cdot \left(im \cdot im\right)} + 2 \cdot re\right) \]

    if -5.79999999999999986e125 < im < -1.9e12

    1. Initial program 100.0%

      \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
    2. Taylor expanded in re around 0 70.4%

      \[\leadsto \color{blue}{0.5 \cdot \left(re \cdot \left(e^{im} + e^{-im}\right)\right)} \]
    3. Applied egg-rr41.4%

      \[\leadsto 0.5 \cdot \color{blue}{\mathsf{log1p}\left(\mathsf{expm1}\left(re\right)\right)} \]

    if -1.9e12 < im < 2e20

    1. Initial program 100.0%

      \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
    2. Taylor expanded in im around 0 94.3%

      \[\leadsto \color{blue}{\sin re} \]

    if 2e20 < im < 4.49999999999999972e94

    1. Initial program 100.0%

      \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
    2. Taylor expanded in re around 0 68.8%

      \[\leadsto \color{blue}{0.5 \cdot \left(re \cdot \left(e^{im} + e^{-im}\right)\right)} \]
    3. Applied egg-rr39.0%

      \[\leadsto 0.5 \cdot \color{blue}{{re}^{-2}} \]
  3. Recombined 4 regimes into one program.
  4. Final simplification72.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;im \leq -5.8 \cdot 10^{+125}:\\ \;\;\;\;0.5 \cdot \left(re \cdot \left(im \cdot im\right) + re \cdot 2\right)\\ \mathbf{elif}\;im \leq -1900000000000:\\ \;\;\;\;0.5 \cdot \mathsf{log1p}\left(\mathsf{expm1}\left(re\right)\right)\\ \mathbf{elif}\;im \leq 2 \cdot 10^{+20}:\\ \;\;\;\;\sin re\\ \mathbf{elif}\;im \leq 4.5 \cdot 10^{+94}:\\ \;\;\;\;0.5 \cdot {re}^{-2}\\ \mathbf{else}:\\ \;\;\;\;0.5 \cdot \left(re \cdot \left(im \cdot im\right) + re \cdot 2\right)\\ \end{array} \]

Alternative 4: 72.2% accurate, 2.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := 0.5 \cdot \left(re \cdot \left(im \cdot im\right) + re \cdot 2\right)\\ \mathbf{if}\;im \leq -0.000195:\\ \;\;\;\;t_0\\ \mathbf{elif}\;im \leq 3.9 \cdot 10^{+18}:\\ \;\;\;\;\sin re\\ \mathbf{elif}\;im \leq 2.2 \cdot 10^{+95}:\\ \;\;\;\;0.5 \cdot {re}^{-2}\\ \mathbf{else}:\\ \;\;\;\;t_0\\ \end{array} \end{array} \]
(FPCore (re im)
 :precision binary64
 (let* ((t_0 (* 0.5 (+ (* re (* im im)) (* re 2.0)))))
   (if (<= im -0.000195)
     t_0
     (if (<= im 3.9e+18)
       (sin re)
       (if (<= im 2.2e+95) (* 0.5 (pow re -2.0)) t_0)))))
double code(double re, double im) {
	double t_0 = 0.5 * ((re * (im * im)) + (re * 2.0));
	double tmp;
	if (im <= -0.000195) {
		tmp = t_0;
	} else if (im <= 3.9e+18) {
		tmp = sin(re);
	} else if (im <= 2.2e+95) {
		tmp = 0.5 * pow(re, -2.0);
	} else {
		tmp = t_0;
	}
	return tmp;
}
real(8) function code(re, im)
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    real(8) :: t_0
    real(8) :: tmp
    t_0 = 0.5d0 * ((re * (im * im)) + (re * 2.0d0))
    if (im <= (-0.000195d0)) then
        tmp = t_0
    else if (im <= 3.9d+18) then
        tmp = sin(re)
    else if (im <= 2.2d+95) then
        tmp = 0.5d0 * (re ** (-2.0d0))
    else
        tmp = t_0
    end if
    code = tmp
end function
public static double code(double re, double im) {
	double t_0 = 0.5 * ((re * (im * im)) + (re * 2.0));
	double tmp;
	if (im <= -0.000195) {
		tmp = t_0;
	} else if (im <= 3.9e+18) {
		tmp = Math.sin(re);
	} else if (im <= 2.2e+95) {
		tmp = 0.5 * Math.pow(re, -2.0);
	} else {
		tmp = t_0;
	}
	return tmp;
}
def code(re, im):
	t_0 = 0.5 * ((re * (im * im)) + (re * 2.0))
	tmp = 0
	if im <= -0.000195:
		tmp = t_0
	elif im <= 3.9e+18:
		tmp = math.sin(re)
	elif im <= 2.2e+95:
		tmp = 0.5 * math.pow(re, -2.0)
	else:
		tmp = t_0
	return tmp
function code(re, im)
	t_0 = Float64(0.5 * Float64(Float64(re * Float64(im * im)) + Float64(re * 2.0)))
	tmp = 0.0
	if (im <= -0.000195)
		tmp = t_0;
	elseif (im <= 3.9e+18)
		tmp = sin(re);
	elseif (im <= 2.2e+95)
		tmp = Float64(0.5 * (re ^ -2.0));
	else
		tmp = t_0;
	end
	return tmp
end
function tmp_2 = code(re, im)
	t_0 = 0.5 * ((re * (im * im)) + (re * 2.0));
	tmp = 0.0;
	if (im <= -0.000195)
		tmp = t_0;
	elseif (im <= 3.9e+18)
		tmp = sin(re);
	elseif (im <= 2.2e+95)
		tmp = 0.5 * (re ^ -2.0);
	else
		tmp = t_0;
	end
	tmp_2 = tmp;
end
code[re_, im_] := Block[{t$95$0 = N[(0.5 * N[(N[(re * N[(im * im), $MachinePrecision]), $MachinePrecision] + N[(re * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[im, -0.000195], t$95$0, If[LessEqual[im, 3.9e+18], N[Sin[re], $MachinePrecision], If[LessEqual[im, 2.2e+95], N[(0.5 * N[Power[re, -2.0], $MachinePrecision]), $MachinePrecision], t$95$0]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := 0.5 \cdot \left(re \cdot \left(im \cdot im\right) + re \cdot 2\right)\\
\mathbf{if}\;im \leq -0.000195:\\
\;\;\;\;t_0\\

\mathbf{elif}\;im \leq 3.9 \cdot 10^{+18}:\\
\;\;\;\;\sin re\\

\mathbf{elif}\;im \leq 2.2 \cdot 10^{+95}:\\
\;\;\;\;0.5 \cdot {re}^{-2}\\

\mathbf{else}:\\
\;\;\;\;t_0\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if im < -1.94999999999999996e-4 or 2.1999999999999999e95 < im

    1. Initial program 100.0%

      \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
    2. Taylor expanded in re around 0 68.2%

      \[\leadsto \color{blue}{0.5 \cdot \left(re \cdot \left(e^{im} + e^{-im}\right)\right)} \]
    3. Taylor expanded in im around 0 43.0%

      \[\leadsto 0.5 \cdot \color{blue}{\left(re \cdot {im}^{2} + 2 \cdot re\right)} \]
    4. Step-by-step derivation
      1. expm1-log1p-u19.0%

        \[\leadsto 0.5 \cdot \left(\color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(re \cdot {im}^{2}\right)\right)} + 2 \cdot re\right) \]
      2. expm1-udef18.6%

        \[\leadsto 0.5 \cdot \left(\color{blue}{\left(e^{\mathsf{log1p}\left(re \cdot {im}^{2}\right)} - 1\right)} + 2 \cdot re\right) \]
      3. log1p-udef18.6%

        \[\leadsto 0.5 \cdot \left(\left(e^{\color{blue}{\log \left(1 + re \cdot {im}^{2}\right)}} - 1\right) + 2 \cdot re\right) \]
      4. add-exp-log42.6%

        \[\leadsto 0.5 \cdot \left(\left(\color{blue}{\left(1 + re \cdot {im}^{2}\right)} - 1\right) + 2 \cdot re\right) \]
      5. unpow242.6%

        \[\leadsto 0.5 \cdot \left(\left(\left(1 + re \cdot \color{blue}{\left(im \cdot im\right)}\right) - 1\right) + 2 \cdot re\right) \]
    5. Applied egg-rr42.6%

      \[\leadsto 0.5 \cdot \left(\color{blue}{\left(\left(1 + re \cdot \left(im \cdot im\right)\right) - 1\right)} + 2 \cdot re\right) \]
    6. Step-by-step derivation
      1. +-commutative42.6%

        \[\leadsto 0.5 \cdot \left(\left(\color{blue}{\left(re \cdot \left(im \cdot im\right) + 1\right)} - 1\right) + 2 \cdot re\right) \]
      2. associate--l+43.0%

        \[\leadsto 0.5 \cdot \left(\color{blue}{\left(re \cdot \left(im \cdot im\right) + \left(1 - 1\right)\right)} + 2 \cdot re\right) \]
      3. metadata-eval43.0%

        \[\leadsto 0.5 \cdot \left(\left(re \cdot \left(im \cdot im\right) + \color{blue}{0}\right) + 2 \cdot re\right) \]
      4. +-rgt-identity43.0%

        \[\leadsto 0.5 \cdot \left(\color{blue}{re \cdot \left(im \cdot im\right)} + 2 \cdot re\right) \]
    7. Simplified43.0%

      \[\leadsto 0.5 \cdot \left(\color{blue}{re \cdot \left(im \cdot im\right)} + 2 \cdot re\right) \]

    if -1.94999999999999996e-4 < im < 3.9e18

    1. Initial program 100.0%

      \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
    2. Taylor expanded in im around 0 95.9%

      \[\leadsto \color{blue}{\sin re} \]

    if 3.9e18 < im < 2.1999999999999999e95

    1. Initial program 100.0%

      \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
    2. Taylor expanded in re around 0 68.8%

      \[\leadsto \color{blue}{0.5 \cdot \left(re \cdot \left(e^{im} + e^{-im}\right)\right)} \]
    3. Applied egg-rr39.0%

      \[\leadsto 0.5 \cdot \color{blue}{{re}^{-2}} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification69.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;im \leq -0.000195:\\ \;\;\;\;0.5 \cdot \left(re \cdot \left(im \cdot im\right) + re \cdot 2\right)\\ \mathbf{elif}\;im \leq 3.9 \cdot 10^{+18}:\\ \;\;\;\;\sin re\\ \mathbf{elif}\;im \leq 2.2 \cdot 10^{+95}:\\ \;\;\;\;0.5 \cdot {re}^{-2}\\ \mathbf{else}:\\ \;\;\;\;0.5 \cdot \left(re \cdot \left(im \cdot im\right) + re \cdot 2\right)\\ \end{array} \]

Alternative 5: 71.8% accurate, 2.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;im \leq -0.00014 \lor \neg \left(im \leq 8.2 \cdot 10^{+79}\right):\\ \;\;\;\;0.5 \cdot \left(re \cdot \left(im \cdot im\right) + re \cdot 2\right)\\ \mathbf{else}:\\ \;\;\;\;\sin re\\ \end{array} \end{array} \]
(FPCore (re im)
 :precision binary64
 (if (or (<= im -0.00014) (not (<= im 8.2e+79)))
   (* 0.5 (+ (* re (* im im)) (* re 2.0)))
   (sin re)))
double code(double re, double im) {
	double tmp;
	if ((im <= -0.00014) || !(im <= 8.2e+79)) {
		tmp = 0.5 * ((re * (im * im)) + (re * 2.0));
	} else {
		tmp = sin(re);
	}
	return tmp;
}
real(8) function code(re, im)
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    real(8) :: tmp
    if ((im <= (-0.00014d0)) .or. (.not. (im <= 8.2d+79))) then
        tmp = 0.5d0 * ((re * (im * im)) + (re * 2.0d0))
    else
        tmp = sin(re)
    end if
    code = tmp
end function
public static double code(double re, double im) {
	double tmp;
	if ((im <= -0.00014) || !(im <= 8.2e+79)) {
		tmp = 0.5 * ((re * (im * im)) + (re * 2.0));
	} else {
		tmp = Math.sin(re);
	}
	return tmp;
}
def code(re, im):
	tmp = 0
	if (im <= -0.00014) or not (im <= 8.2e+79):
		tmp = 0.5 * ((re * (im * im)) + (re * 2.0))
	else:
		tmp = math.sin(re)
	return tmp
function code(re, im)
	tmp = 0.0
	if ((im <= -0.00014) || !(im <= 8.2e+79))
		tmp = Float64(0.5 * Float64(Float64(re * Float64(im * im)) + Float64(re * 2.0)));
	else
		tmp = sin(re);
	end
	return tmp
end
function tmp_2 = code(re, im)
	tmp = 0.0;
	if ((im <= -0.00014) || ~((im <= 8.2e+79)))
		tmp = 0.5 * ((re * (im * im)) + (re * 2.0));
	else
		tmp = sin(re);
	end
	tmp_2 = tmp;
end
code[re_, im_] := If[Or[LessEqual[im, -0.00014], N[Not[LessEqual[im, 8.2e+79]], $MachinePrecision]], N[(0.5 * N[(N[(re * N[(im * im), $MachinePrecision]), $MachinePrecision] + N[(re * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Sin[re], $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;im \leq -0.00014 \lor \neg \left(im \leq 8.2 \cdot 10^{+79}\right):\\
\;\;\;\;0.5 \cdot \left(re \cdot \left(im \cdot im\right) + re \cdot 2\right)\\

\mathbf{else}:\\
\;\;\;\;\sin re\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if im < -1.3999999999999999e-4 or 8.2e79 < im

    1. Initial program 100.0%

      \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
    2. Taylor expanded in re around 0 69.0%

      \[\leadsto \color{blue}{0.5 \cdot \left(re \cdot \left(e^{im} + e^{-im}\right)\right)} \]
    3. Taylor expanded in im around 0 41.9%

      \[\leadsto 0.5 \cdot \color{blue}{\left(re \cdot {im}^{2} + 2 \cdot re\right)} \]
    4. Step-by-step derivation
      1. expm1-log1p-u18.6%

        \[\leadsto 0.5 \cdot \left(\color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(re \cdot {im}^{2}\right)\right)} + 2 \cdot re\right) \]
      2. expm1-udef18.2%

        \[\leadsto 0.5 \cdot \left(\color{blue}{\left(e^{\mathsf{log1p}\left(re \cdot {im}^{2}\right)} - 1\right)} + 2 \cdot re\right) \]
      3. log1p-udef18.2%

        \[\leadsto 0.5 \cdot \left(\left(e^{\color{blue}{\log \left(1 + re \cdot {im}^{2}\right)}} - 1\right) + 2 \cdot re\right) \]
      4. add-exp-log41.5%

        \[\leadsto 0.5 \cdot \left(\left(\color{blue}{\left(1 + re \cdot {im}^{2}\right)} - 1\right) + 2 \cdot re\right) \]
      5. unpow241.5%

        \[\leadsto 0.5 \cdot \left(\left(\left(1 + re \cdot \color{blue}{\left(im \cdot im\right)}\right) - 1\right) + 2 \cdot re\right) \]
    5. Applied egg-rr41.5%

      \[\leadsto 0.5 \cdot \left(\color{blue}{\left(\left(1 + re \cdot \left(im \cdot im\right)\right) - 1\right)} + 2 \cdot re\right) \]
    6. Step-by-step derivation
      1. +-commutative41.5%

        \[\leadsto 0.5 \cdot \left(\left(\color{blue}{\left(re \cdot \left(im \cdot im\right) + 1\right)} - 1\right) + 2 \cdot re\right) \]
      2. associate--l+41.9%

        \[\leadsto 0.5 \cdot \left(\color{blue}{\left(re \cdot \left(im \cdot im\right) + \left(1 - 1\right)\right)} + 2 \cdot re\right) \]
      3. metadata-eval41.9%

        \[\leadsto 0.5 \cdot \left(\left(re \cdot \left(im \cdot im\right) + \color{blue}{0}\right) + 2 \cdot re\right) \]
      4. +-rgt-identity41.9%

        \[\leadsto 0.5 \cdot \left(\color{blue}{re \cdot \left(im \cdot im\right)} + 2 \cdot re\right) \]
    7. Simplified41.9%

      \[\leadsto 0.5 \cdot \left(\color{blue}{re \cdot \left(im \cdot im\right)} + 2 \cdot re\right) \]

    if -1.3999999999999999e-4 < im < 8.2e79

    1. Initial program 100.0%

      \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
    2. Taylor expanded in im around 0 87.4%

      \[\leadsto \color{blue}{\sin re} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification67.3%

    \[\leadsto \begin{array}{l} \mathbf{if}\;im \leq -0.00014 \lor \neg \left(im \leq 8.2 \cdot 10^{+79}\right):\\ \;\;\;\;0.5 \cdot \left(re \cdot \left(im \cdot im\right) + re \cdot 2\right)\\ \mathbf{else}:\\ \;\;\;\;\sin re\\ \end{array} \]

Alternative 6: 48.0% accurate, 28.1× speedup?

\[\begin{array}{l} \\ 0.5 \cdot \left(re \cdot \left(im \cdot im\right) + re \cdot 2\right) \end{array} \]
(FPCore (re im) :precision binary64 (* 0.5 (+ (* re (* im im)) (* re 2.0))))
double code(double re, double im) {
	return 0.5 * ((re * (im * im)) + (re * 2.0));
}
real(8) function code(re, im)
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    code = 0.5d0 * ((re * (im * im)) + (re * 2.0d0))
end function
public static double code(double re, double im) {
	return 0.5 * ((re * (im * im)) + (re * 2.0));
}
def code(re, im):
	return 0.5 * ((re * (im * im)) + (re * 2.0))
function code(re, im)
	return Float64(0.5 * Float64(Float64(re * Float64(im * im)) + Float64(re * 2.0)))
end
function tmp = code(re, im)
	tmp = 0.5 * ((re * (im * im)) + (re * 2.0));
end
code[re_, im_] := N[(0.5 * N[(N[(re * N[(im * im), $MachinePrecision]), $MachinePrecision] + N[(re * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
0.5 \cdot \left(re \cdot \left(im \cdot im\right) + re \cdot 2\right)
\end{array}
Derivation
  1. Initial program 100.0%

    \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
  2. Taylor expanded in re around 0 61.3%

    \[\leadsto \color{blue}{0.5 \cdot \left(re \cdot \left(e^{im} + e^{-im}\right)\right)} \]
  3. Taylor expanded in im around 0 44.8%

    \[\leadsto 0.5 \cdot \color{blue}{\left(re \cdot {im}^{2} + 2 \cdot re\right)} \]
  4. Step-by-step derivation
    1. expm1-log1p-u34.4%

      \[\leadsto 0.5 \cdot \left(\color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(re \cdot {im}^{2}\right)\right)} + 2 \cdot re\right) \]
    2. expm1-udef34.1%

      \[\leadsto 0.5 \cdot \left(\color{blue}{\left(e^{\mathsf{log1p}\left(re \cdot {im}^{2}\right)} - 1\right)} + 2 \cdot re\right) \]
    3. log1p-udef34.1%

      \[\leadsto 0.5 \cdot \left(\left(e^{\color{blue}{\log \left(1 + re \cdot {im}^{2}\right)}} - 1\right) + 2 \cdot re\right) \]
    4. add-exp-log44.5%

      \[\leadsto 0.5 \cdot \left(\left(\color{blue}{\left(1 + re \cdot {im}^{2}\right)} - 1\right) + 2 \cdot re\right) \]
    5. unpow244.5%

      \[\leadsto 0.5 \cdot \left(\left(\left(1 + re \cdot \color{blue}{\left(im \cdot im\right)}\right) - 1\right) + 2 \cdot re\right) \]
  5. Applied egg-rr44.5%

    \[\leadsto 0.5 \cdot \left(\color{blue}{\left(\left(1 + re \cdot \left(im \cdot im\right)\right) - 1\right)} + 2 \cdot re\right) \]
  6. Step-by-step derivation
    1. +-commutative44.5%

      \[\leadsto 0.5 \cdot \left(\left(\color{blue}{\left(re \cdot \left(im \cdot im\right) + 1\right)} - 1\right) + 2 \cdot re\right) \]
    2. associate--l+44.8%

      \[\leadsto 0.5 \cdot \left(\color{blue}{\left(re \cdot \left(im \cdot im\right) + \left(1 - 1\right)\right)} + 2 \cdot re\right) \]
    3. metadata-eval44.8%

      \[\leadsto 0.5 \cdot \left(\left(re \cdot \left(im \cdot im\right) + \color{blue}{0}\right) + 2 \cdot re\right) \]
    4. +-rgt-identity44.8%

      \[\leadsto 0.5 \cdot \left(\color{blue}{re \cdot \left(im \cdot im\right)} + 2 \cdot re\right) \]
  7. Simplified44.8%

    \[\leadsto 0.5 \cdot \left(\color{blue}{re \cdot \left(im \cdot im\right)} + 2 \cdot re\right) \]
  8. Final simplification44.8%

    \[\leadsto 0.5 \cdot \left(re \cdot \left(im \cdot im\right) + re \cdot 2\right) \]

Alternative 7: 26.5% accurate, 61.8× speedup?

\[\begin{array}{l} \\ 0.5 \cdot \left(re + re\right) \end{array} \]
(FPCore (re im) :precision binary64 (* 0.5 (+ re re)))
double code(double re, double im) {
	return 0.5 * (re + re);
}
real(8) function code(re, im)
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    code = 0.5d0 * (re + re)
end function
public static double code(double re, double im) {
	return 0.5 * (re + re);
}
def code(re, im):
	return 0.5 * (re + re)
function code(re, im)
	return Float64(0.5 * Float64(re + re))
end
function tmp = code(re, im)
	tmp = 0.5 * (re + re);
end
code[re_, im_] := N[(0.5 * N[(re + re), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
0.5 \cdot \left(re + re\right)
\end{array}
Derivation
  1. Initial program 100.0%

    \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
  2. Taylor expanded in re around 0 61.3%

    \[\leadsto \color{blue}{0.5 \cdot \left(re \cdot \left(e^{im} + e^{-im}\right)\right)} \]
  3. Applied egg-rr27.5%

    \[\leadsto 0.5 \cdot \color{blue}{\left(re + re\right)} \]
  4. Final simplification27.5%

    \[\leadsto 0.5 \cdot \left(re + re\right) \]

Alternative 8: 4.3% accurate, 309.0× speedup?

\[\begin{array}{l} \\ -2 \end{array} \]
(FPCore (re im) :precision binary64 -2.0)
double code(double re, double im) {
	return -2.0;
}
real(8) function code(re, im)
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    code = -2.0d0
end function
public static double code(double re, double im) {
	return -2.0;
}
def code(re, im):
	return -2.0
function code(re, im)
	return -2.0
end
function tmp = code(re, im)
	tmp = -2.0;
end
code[re_, im_] := -2.0
\begin{array}{l}

\\
-2
\end{array}
Derivation
  1. Initial program 100.0%

    \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
  2. Step-by-step derivation
    1. add-log-exp76.8%

      \[\leadsto \color{blue}{\log \left(e^{\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)}\right)} \]
    2. *-un-lft-identity76.8%

      \[\leadsto \log \color{blue}{\left(1 \cdot e^{\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)}\right)} \]
    3. log-prod76.8%

      \[\leadsto \color{blue}{\log 1 + \log \left(e^{\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)}\right)} \]
    4. metadata-eval76.8%

      \[\leadsto \color{blue}{0} + \log \left(e^{\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)}\right) \]
    5. add-log-exp100.0%

      \[\leadsto 0 + \color{blue}{\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)} \]
    6. +-commutative100.0%

      \[\leadsto 0 + \left(0.5 \cdot \sin re\right) \cdot \color{blue}{\left(e^{im} + e^{0 - im}\right)} \]
    7. sub0-neg100.0%

      \[\leadsto 0 + \left(0.5 \cdot \sin re\right) \cdot \left(e^{im} + e^{\color{blue}{-im}}\right) \]
    8. cosh-undef100.0%

      \[\leadsto 0 + \left(0.5 \cdot \sin re\right) \cdot \color{blue}{\left(2 \cdot \cosh im\right)} \]
  3. Applied egg-rr100.0%

    \[\leadsto \color{blue}{0 + \left(0.5 \cdot \sin re\right) \cdot \left(2 \cdot \cosh im\right)} \]
  4. Applied egg-rr4.8%

    \[\leadsto 0 + \color{blue}{-2} \]
  5. Final simplification4.8%

    \[\leadsto -2 \]

Alternative 9: 4.7% accurate, 309.0× speedup?

\[\begin{array}{l} \\ -1 \end{array} \]
(FPCore (re im) :precision binary64 -1.0)
double code(double re, double im) {
	return -1.0;
}
real(8) function code(re, im)
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    code = -1.0d0
end function
public static double code(double re, double im) {
	return -1.0;
}
def code(re, im):
	return -1.0
function code(re, im)
	return -1.0
end
function tmp = code(re, im)
	tmp = -1.0;
end
code[re_, im_] := -1.0
\begin{array}{l}

\\
-1
\end{array}
Derivation
  1. Initial program 100.0%

    \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
  2. Step-by-step derivation
    1. add-log-exp76.8%

      \[\leadsto \color{blue}{\log \left(e^{\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)}\right)} \]
    2. *-un-lft-identity76.8%

      \[\leadsto \log \color{blue}{\left(1 \cdot e^{\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)}\right)} \]
    3. log-prod76.8%

      \[\leadsto \color{blue}{\log 1 + \log \left(e^{\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)}\right)} \]
    4. metadata-eval76.8%

      \[\leadsto \color{blue}{0} + \log \left(e^{\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)}\right) \]
    5. add-log-exp100.0%

      \[\leadsto 0 + \color{blue}{\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)} \]
    6. +-commutative100.0%

      \[\leadsto 0 + \left(0.5 \cdot \sin re\right) \cdot \color{blue}{\left(e^{im} + e^{0 - im}\right)} \]
    7. sub0-neg100.0%

      \[\leadsto 0 + \left(0.5 \cdot \sin re\right) \cdot \left(e^{im} + e^{\color{blue}{-im}}\right) \]
    8. cosh-undef100.0%

      \[\leadsto 0 + \left(0.5 \cdot \sin re\right) \cdot \color{blue}{\left(2 \cdot \cosh im\right)} \]
  3. Applied egg-rr100.0%

    \[\leadsto \color{blue}{0 + \left(0.5 \cdot \sin re\right) \cdot \left(2 \cdot \cosh im\right)} \]
  4. Applied egg-rr5.3%

    \[\leadsto 0 + \color{blue}{-1} \]
  5. Final simplification5.3%

    \[\leadsto -1 \]

Reproduce

?
herbie shell --seed 2023174 
(FPCore (re im)
  :name "math.sin on complex, real part"
  :precision binary64
  (* (* 0.5 (sin re)) (+ (exp (- 0.0 im)) (exp im))))