math.sin on complex, real part

Percentage Accurate: 100.0% → 100.0%
Time: 5.8s
Alternatives: 8
Speedup: 1.4×

Specification

?
\[\begin{array}{l} \\ \left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \end{array} \]
(FPCore (re im)
 :precision binary64
 (* (* 0.5 (sin re)) (+ (exp (- 0.0 im)) (exp im))))
double code(double re, double im) {
	return (0.5 * sin(re)) * (exp((0.0 - im)) + exp(im));
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(re, im)
use fmin_fmax_functions
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    code = (0.5d0 * sin(re)) * (exp((0.0d0 - im)) + exp(im))
end function
public static double code(double re, double im) {
	return (0.5 * Math.sin(re)) * (Math.exp((0.0 - im)) + Math.exp(im));
}
def code(re, im):
	return (0.5 * math.sin(re)) * (math.exp((0.0 - im)) + math.exp(im))
function code(re, im)
	return Float64(Float64(0.5 * sin(re)) * Float64(exp(Float64(0.0 - im)) + exp(im)))
end
function tmp = code(re, im)
	tmp = (0.5 * sin(re)) * (exp((0.0 - im)) + exp(im));
end
code[re_, im_] := N[(N[(0.5 * N[Sin[re], $MachinePrecision]), $MachinePrecision] * N[(N[Exp[N[(0.0 - im), $MachinePrecision]], $MachinePrecision] + N[Exp[im], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)
\end{array}

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 8 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 100.0% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \end{array} \]
(FPCore (re im)
 :precision binary64
 (* (* 0.5 (sin re)) (+ (exp (- 0.0 im)) (exp im))))
double code(double re, double im) {
	return (0.5 * sin(re)) * (exp((0.0 - im)) + exp(im));
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(re, im)
use fmin_fmax_functions
    real(8), intent (in) :: re
    real(8), intent (in) :: im
    code = (0.5d0 * sin(re)) * (exp((0.0d0 - im)) + exp(im))
end function
public static double code(double re, double im) {
	return (0.5 * Math.sin(re)) * (Math.exp((0.0 - im)) + Math.exp(im));
}
def code(re, im):
	return (0.5 * math.sin(re)) * (math.exp((0.0 - im)) + math.exp(im))
function code(re, im)
	return Float64(Float64(0.5 * sin(re)) * Float64(exp(Float64(0.0 - im)) + exp(im)))
end
function tmp = code(re, im)
	tmp = (0.5 * sin(re)) * (exp((0.0 - im)) + exp(im));
end
code[re_, im_] := N[(N[(0.5 * N[Sin[re], $MachinePrecision]), $MachinePrecision] * N[(N[Exp[N[(0.0 - im), $MachinePrecision]], $MachinePrecision] + N[Exp[im], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)
\end{array}

Alternative 1: 100.0% accurate, 0.6× speedup?

\[\begin{array}{l} im_m = \left|im\right| \\ \mathsf{fma}\left(\frac{\sin re}{e^{im\_m}}, 0.5, e^{im\_m} \cdot \left(\sin re \cdot 0.5\right)\right) \end{array} \]
im_m = (fabs.f64 im)
(FPCore (re im_m)
 :precision binary64
 (fma (/ (sin re) (exp im_m)) 0.5 (* (exp im_m) (* (sin re) 0.5))))
im_m = fabs(im);
double code(double re, double im_m) {
	return fma((sin(re) / exp(im_m)), 0.5, (exp(im_m) * (sin(re) * 0.5)));
}
im_m = abs(im)
function code(re, im_m)
	return fma(Float64(sin(re) / exp(im_m)), 0.5, Float64(exp(im_m) * Float64(sin(re) * 0.5)))
end
im_m = N[Abs[im], $MachinePrecision]
code[re_, im$95$m_] := N[(N[(N[Sin[re], $MachinePrecision] / N[Exp[im$95$m], $MachinePrecision]), $MachinePrecision] * 0.5 + N[(N[Exp[im$95$m], $MachinePrecision] * N[(N[Sin[re], $MachinePrecision] * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
im_m = \left|im\right|

\\
\mathsf{fma}\left(\frac{\sin re}{e^{im\_m}}, 0.5, e^{im\_m} \cdot \left(\sin re \cdot 0.5\right)\right)
\end{array}
Derivation
  1. Initial program 100.0%

    \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
  2. Step-by-step derivation
    1. lift-*.f64N/A

      \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)} \]
    2. lift-+.f64N/A

      \[\leadsto \left(\frac{1}{2} \cdot \sin re\right) \cdot \color{blue}{\left(e^{0 - im} + e^{im}\right)} \]
    3. distribute-lft-inN/A

      \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \sin re\right) \cdot e^{0 - im} + \left(\frac{1}{2} \cdot \sin re\right) \cdot e^{im}} \]
    4. lift-*.f64N/A

      \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \sin re\right)} \cdot e^{0 - im} + \left(\frac{1}{2} \cdot \sin re\right) \cdot e^{im} \]
    5. associate-*l*N/A

      \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(\sin re \cdot e^{0 - im}\right)} + \left(\frac{1}{2} \cdot \sin re\right) \cdot e^{im} \]
    6. *-commutativeN/A

      \[\leadsto \color{blue}{\left(\sin re \cdot e^{0 - im}\right) \cdot \frac{1}{2}} + \left(\frac{1}{2} \cdot \sin re\right) \cdot e^{im} \]
    7. lower-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left(\sin re \cdot e^{0 - im}, \frac{1}{2}, \left(\frac{1}{2} \cdot \sin re\right) \cdot e^{im}\right)} \]
    8. lift-exp.f64N/A

      \[\leadsto \mathsf{fma}\left(\sin re \cdot \color{blue}{e^{0 - im}}, \frac{1}{2}, \left(\frac{1}{2} \cdot \sin re\right) \cdot e^{im}\right) \]
    9. lift--.f64N/A

      \[\leadsto \mathsf{fma}\left(\sin re \cdot e^{\color{blue}{0 - im}}, \frac{1}{2}, \left(\frac{1}{2} \cdot \sin re\right) \cdot e^{im}\right) \]
    10. sub0-negN/A

      \[\leadsto \mathsf{fma}\left(\sin re \cdot e^{\color{blue}{\mathsf{neg}\left(im\right)}}, \frac{1}{2}, \left(\frac{1}{2} \cdot \sin re\right) \cdot e^{im}\right) \]
    11. exp-negN/A

      \[\leadsto \mathsf{fma}\left(\sin re \cdot \color{blue}{\frac{1}{e^{im}}}, \frac{1}{2}, \left(\frac{1}{2} \cdot \sin re\right) \cdot e^{im}\right) \]
    12. lift-exp.f64N/A

      \[\leadsto \mathsf{fma}\left(\sin re \cdot \frac{1}{\color{blue}{e^{im}}}, \frac{1}{2}, \left(\frac{1}{2} \cdot \sin re\right) \cdot e^{im}\right) \]
    13. mult-flip-revN/A

      \[\leadsto \mathsf{fma}\left(\color{blue}{\frac{\sin re}{e^{im}}}, \frac{1}{2}, \left(\frac{1}{2} \cdot \sin re\right) \cdot e^{im}\right) \]
    14. lower-/.f64N/A

      \[\leadsto \mathsf{fma}\left(\color{blue}{\frac{\sin re}{e^{im}}}, \frac{1}{2}, \left(\frac{1}{2} \cdot \sin re\right) \cdot e^{im}\right) \]
    15. *-commutativeN/A

      \[\leadsto \mathsf{fma}\left(\frac{\sin re}{e^{im}}, \frac{1}{2}, \color{blue}{e^{im} \cdot \left(\frac{1}{2} \cdot \sin re\right)}\right) \]
    16. lower-*.f64100.0

      \[\leadsto \mathsf{fma}\left(\frac{\sin re}{e^{im}}, 0.5, \color{blue}{e^{im} \cdot \left(0.5 \cdot \sin re\right)}\right) \]
    17. lift-*.f64N/A

      \[\leadsto \mathsf{fma}\left(\frac{\sin re}{e^{im}}, \frac{1}{2}, e^{im} \cdot \color{blue}{\left(\frac{1}{2} \cdot \sin re\right)}\right) \]
    18. *-commutativeN/A

      \[\leadsto \mathsf{fma}\left(\frac{\sin re}{e^{im}}, \frac{1}{2}, e^{im} \cdot \color{blue}{\left(\sin re \cdot \frac{1}{2}\right)}\right) \]
    19. lower-*.f64100.0

      \[\leadsto \mathsf{fma}\left(\frac{\sin re}{e^{im}}, 0.5, e^{im} \cdot \color{blue}{\left(\sin re \cdot 0.5\right)}\right) \]
  3. Applied rewrites100.0%

    \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{\sin re}{e^{im}}, 0.5, e^{im} \cdot \left(\sin re \cdot 0.5\right)\right)} \]
  4. Add Preprocessing

Alternative 2: 100.0% accurate, 1.4× speedup?

\[\begin{array}{l} im_m = \left|im\right| \\ \sin re \cdot \cosh im\_m \end{array} \]
im_m = (fabs.f64 im)
(FPCore (re im_m) :precision binary64 (* (sin re) (cosh im_m)))
im_m = fabs(im);
double code(double re, double im_m) {
	return sin(re) * cosh(im_m);
}
im_m =     private
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(re, im_m)
use fmin_fmax_functions
    real(8), intent (in) :: re
    real(8), intent (in) :: im_m
    code = sin(re) * cosh(im_m)
end function
im_m = Math.abs(im);
public static double code(double re, double im_m) {
	return Math.sin(re) * Math.cosh(im_m);
}
im_m = math.fabs(im)
def code(re, im_m):
	return math.sin(re) * math.cosh(im_m)
im_m = abs(im)
function code(re, im_m)
	return Float64(sin(re) * cosh(im_m))
end
im_m = abs(im);
function tmp = code(re, im_m)
	tmp = sin(re) * cosh(im_m);
end
im_m = N[Abs[im], $MachinePrecision]
code[re_, im$95$m_] := N[(N[Sin[re], $MachinePrecision] * N[Cosh[im$95$m], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
im_m = \left|im\right|

\\
\sin re \cdot \cosh im\_m
\end{array}
Derivation
  1. Initial program 100.0%

    \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
  2. Step-by-step derivation
    1. lift-*.f64N/A

      \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)} \]
    2. lift-*.f64N/A

      \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \sin re\right)} \cdot \left(e^{0 - im} + e^{im}\right) \]
    3. *-commutativeN/A

      \[\leadsto \color{blue}{\left(\sin re \cdot \frac{1}{2}\right)} \cdot \left(e^{0 - im} + e^{im}\right) \]
    4. associate-*l*N/A

      \[\leadsto \color{blue}{\sin re \cdot \left(\frac{1}{2} \cdot \left(e^{0 - im} + e^{im}\right)\right)} \]
    5. lift-+.f64N/A

      \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \color{blue}{\left(e^{0 - im} + e^{im}\right)}\right) \]
    6. +-commutativeN/A

      \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \color{blue}{\left(e^{im} + e^{0 - im}\right)}\right) \]
    7. lift-exp.f64N/A

      \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(\color{blue}{e^{im}} + e^{0 - im}\right)\right) \]
    8. lift-exp.f64N/A

      \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(e^{im} + \color{blue}{e^{0 - im}}\right)\right) \]
    9. lift--.f64N/A

      \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(e^{im} + e^{\color{blue}{0 - im}}\right)\right) \]
    10. sub0-negN/A

      \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(e^{im} + e^{\color{blue}{\mathsf{neg}\left(im\right)}}\right)\right) \]
    11. cosh-undefN/A

      \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \color{blue}{\left(2 \cdot \cosh im\right)}\right) \]
    12. associate-*r*N/A

      \[\leadsto \sin re \cdot \color{blue}{\left(\left(\frac{1}{2} \cdot 2\right) \cdot \cosh im\right)} \]
    13. metadata-evalN/A

      \[\leadsto \sin re \cdot \left(\color{blue}{1} \cdot \cosh im\right) \]
    14. *-lft-identityN/A

      \[\leadsto \sin re \cdot \color{blue}{\cosh im} \]
    15. lower-*.f64N/A

      \[\leadsto \color{blue}{\sin re \cdot \cosh im} \]
    16. lower-cosh.f64100.0

      \[\leadsto \sin re \cdot \color{blue}{\cosh im} \]
  3. Applied rewrites100.0%

    \[\leadsto \color{blue}{\sin re \cdot \cosh im} \]
  4. Add Preprocessing

Alternative 3: 86.8% accurate, 0.4× speedup?

\[\begin{array}{l} im_m = \left|im\right| \\ \begin{array}{l} t_0 := \left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im\_m} + e^{im\_m}\right)\\ \mathbf{if}\;t\_0 \leq -\infty:\\ \;\;\;\;\mathsf{fma}\left(\left(re \cdot re\right) \cdot re, -0.16666666666666666, re\right) \cdot \cosh im\_m\\ \mathbf{elif}\;t\_0 \leq 1:\\ \;\;\;\;\left(\sin re \cdot 2\right) \cdot 0.5\\ \mathbf{else}:\\ \;\;\;\;re \cdot \cosh im\_m\\ \end{array} \end{array} \]
im_m = (fabs.f64 im)
(FPCore (re im_m)
 :precision binary64
 (let* ((t_0 (* (* 0.5 (sin re)) (+ (exp (- 0.0 im_m)) (exp im_m)))))
   (if (<= t_0 (- INFINITY))
     (* (fma (* (* re re) re) -0.16666666666666666 re) (cosh im_m))
     (if (<= t_0 1.0) (* (* (sin re) 2.0) 0.5) (* re (cosh im_m))))))
im_m = fabs(im);
double code(double re, double im_m) {
	double t_0 = (0.5 * sin(re)) * (exp((0.0 - im_m)) + exp(im_m));
	double tmp;
	if (t_0 <= -((double) INFINITY)) {
		tmp = fma(((re * re) * re), -0.16666666666666666, re) * cosh(im_m);
	} else if (t_0 <= 1.0) {
		tmp = (sin(re) * 2.0) * 0.5;
	} else {
		tmp = re * cosh(im_m);
	}
	return tmp;
}
im_m = abs(im)
function code(re, im_m)
	t_0 = Float64(Float64(0.5 * sin(re)) * Float64(exp(Float64(0.0 - im_m)) + exp(im_m)))
	tmp = 0.0
	if (t_0 <= Float64(-Inf))
		tmp = Float64(fma(Float64(Float64(re * re) * re), -0.16666666666666666, re) * cosh(im_m));
	elseif (t_0 <= 1.0)
		tmp = Float64(Float64(sin(re) * 2.0) * 0.5);
	else
		tmp = Float64(re * cosh(im_m));
	end
	return tmp
end
im_m = N[Abs[im], $MachinePrecision]
code[re_, im$95$m_] := Block[{t$95$0 = N[(N[(0.5 * N[Sin[re], $MachinePrecision]), $MachinePrecision] * N[(N[Exp[N[(0.0 - im$95$m), $MachinePrecision]], $MachinePrecision] + N[Exp[im$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, (-Infinity)], N[(N[(N[(N[(re * re), $MachinePrecision] * re), $MachinePrecision] * -0.16666666666666666 + re), $MachinePrecision] * N[Cosh[im$95$m], $MachinePrecision]), $MachinePrecision], If[LessEqual[t$95$0, 1.0], N[(N[(N[Sin[re], $MachinePrecision] * 2.0), $MachinePrecision] * 0.5), $MachinePrecision], N[(re * N[Cosh[im$95$m], $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
im_m = \left|im\right|

\\
\begin{array}{l}
t_0 := \left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im\_m} + e^{im\_m}\right)\\
\mathbf{if}\;t\_0 \leq -\infty:\\
\;\;\;\;\mathsf{fma}\left(\left(re \cdot re\right) \cdot re, -0.16666666666666666, re\right) \cdot \cosh im\_m\\

\mathbf{elif}\;t\_0 \leq 1:\\
\;\;\;\;\left(\sin re \cdot 2\right) \cdot 0.5\\

\mathbf{else}:\\
\;\;\;\;re \cdot \cosh im\_m\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if (*.f64 (*.f64 #s(literal 1/2 binary64) (sin.f64 re)) (+.f64 (exp.f64 (-.f64 #s(literal 0 binary64) im)) (exp.f64 im))) < -inf.0

    1. Initial program 100.0%

      \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
    2. Step-by-step derivation
      1. lift-*.f64N/A

        \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)} \]
      2. lift-*.f64N/A

        \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \sin re\right)} \cdot \left(e^{0 - im} + e^{im}\right) \]
      3. *-commutativeN/A

        \[\leadsto \color{blue}{\left(\sin re \cdot \frac{1}{2}\right)} \cdot \left(e^{0 - im} + e^{im}\right) \]
      4. associate-*l*N/A

        \[\leadsto \color{blue}{\sin re \cdot \left(\frac{1}{2} \cdot \left(e^{0 - im} + e^{im}\right)\right)} \]
      5. lift-+.f64N/A

        \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \color{blue}{\left(e^{0 - im} + e^{im}\right)}\right) \]
      6. +-commutativeN/A

        \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \color{blue}{\left(e^{im} + e^{0 - im}\right)}\right) \]
      7. lift-exp.f64N/A

        \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(\color{blue}{e^{im}} + e^{0 - im}\right)\right) \]
      8. lift-exp.f64N/A

        \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(e^{im} + \color{blue}{e^{0 - im}}\right)\right) \]
      9. lift--.f64N/A

        \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(e^{im} + e^{\color{blue}{0 - im}}\right)\right) \]
      10. sub0-negN/A

        \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(e^{im} + e^{\color{blue}{\mathsf{neg}\left(im\right)}}\right)\right) \]
      11. cosh-undefN/A

        \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \color{blue}{\left(2 \cdot \cosh im\right)}\right) \]
      12. associate-*r*N/A

        \[\leadsto \sin re \cdot \color{blue}{\left(\left(\frac{1}{2} \cdot 2\right) \cdot \cosh im\right)} \]
      13. metadata-evalN/A

        \[\leadsto \sin re \cdot \left(\color{blue}{1} \cdot \cosh im\right) \]
      14. *-lft-identityN/A

        \[\leadsto \sin re \cdot \color{blue}{\cosh im} \]
      15. lower-*.f64N/A

        \[\leadsto \color{blue}{\sin re \cdot \cosh im} \]
      16. lower-cosh.f64100.0

        \[\leadsto \sin re \cdot \color{blue}{\cosh im} \]
    3. Applied rewrites100.0%

      \[\leadsto \color{blue}{\sin re \cdot \cosh im} \]
    4. Taylor expanded in re around 0

      \[\leadsto \color{blue}{\left(re \cdot \left(1 + \frac{-1}{6} \cdot {re}^{2}\right)\right)} \cdot \cosh im \]
    5. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \left(re \cdot \color{blue}{\left(1 + \frac{-1}{6} \cdot {re}^{2}\right)}\right) \cdot \cosh im \]
      2. lower-+.f64N/A

        \[\leadsto \left(re \cdot \left(1 + \color{blue}{\frac{-1}{6} \cdot {re}^{2}}\right)\right) \cdot \cosh im \]
      3. lower-*.f64N/A

        \[\leadsto \left(re \cdot \left(1 + \frac{-1}{6} \cdot \color{blue}{{re}^{2}}\right)\right) \cdot \cosh im \]
      4. lower-pow.f6462.9

        \[\leadsto \left(re \cdot \left(1 + -0.16666666666666666 \cdot {re}^{\color{blue}{2}}\right)\right) \cdot \cosh im \]
    6. Applied rewrites62.9%

      \[\leadsto \color{blue}{\left(re \cdot \left(1 + -0.16666666666666666 \cdot {re}^{2}\right)\right)} \cdot \cosh im \]
    7. Step-by-step derivation
      1. lift-*.f64N/A

        \[\leadsto \left(re \cdot \color{blue}{\left(1 + \frac{-1}{6} \cdot {re}^{2}\right)}\right) \cdot \cosh im \]
      2. lift-+.f64N/A

        \[\leadsto \left(re \cdot \left(1 + \color{blue}{\frac{-1}{6} \cdot {re}^{2}}\right)\right) \cdot \cosh im \]
      3. +-commutativeN/A

        \[\leadsto \left(re \cdot \left(\frac{-1}{6} \cdot {re}^{2} + \color{blue}{1}\right)\right) \cdot \cosh im \]
      4. distribute-rgt-inN/A

        \[\leadsto \left(\left(\frac{-1}{6} \cdot {re}^{2}\right) \cdot re + \color{blue}{1 \cdot re}\right) \cdot \cosh im \]
      5. *-commutativeN/A

        \[\leadsto \left(re \cdot \left(\frac{-1}{6} \cdot {re}^{2}\right) + \color{blue}{1} \cdot re\right) \cdot \cosh im \]
      6. lift-*.f64N/A

        \[\leadsto \left(re \cdot \left(\frac{-1}{6} \cdot {re}^{2}\right) + 1 \cdot re\right) \cdot \cosh im \]
      7. *-commutativeN/A

        \[\leadsto \left(re \cdot \left({re}^{2} \cdot \frac{-1}{6}\right) + 1 \cdot re\right) \cdot \cosh im \]
      8. associate-*r*N/A

        \[\leadsto \left(\left(re \cdot {re}^{2}\right) \cdot \frac{-1}{6} + \color{blue}{1} \cdot re\right) \cdot \cosh im \]
      9. *-lft-identityN/A

        \[\leadsto \left(\left(re \cdot {re}^{2}\right) \cdot \frac{-1}{6} + re\right) \cdot \cosh im \]
      10. lower-fma.f64N/A

        \[\leadsto \mathsf{fma}\left(re \cdot {re}^{2}, \color{blue}{\frac{-1}{6}}, re\right) \cdot \cosh im \]
      11. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left({re}^{2} \cdot re, \frac{-1}{6}, re\right) \cdot \cosh im \]
      12. lower-*.f6462.9

        \[\leadsto \mathsf{fma}\left({re}^{2} \cdot re, -0.16666666666666666, re\right) \cdot \cosh im \]
      13. lift-pow.f64N/A

        \[\leadsto \mathsf{fma}\left({re}^{2} \cdot re, \frac{-1}{6}, re\right) \cdot \cosh im \]
      14. unpow2N/A

        \[\leadsto \mathsf{fma}\left(\left(re \cdot re\right) \cdot re, \frac{-1}{6}, re\right) \cdot \cosh im \]
      15. lower-*.f6462.9

        \[\leadsto \mathsf{fma}\left(\left(re \cdot re\right) \cdot re, -0.16666666666666666, re\right) \cdot \cosh im \]
    8. Applied rewrites62.9%

      \[\leadsto \mathsf{fma}\left(\left(re \cdot re\right) \cdot re, \color{blue}{-0.16666666666666666}, re\right) \cdot \cosh im \]

    if -inf.0 < (*.f64 (*.f64 #s(literal 1/2 binary64) (sin.f64 re)) (+.f64 (exp.f64 (-.f64 #s(literal 0 binary64) im)) (exp.f64 im))) < 1

    1. Initial program 100.0%

      \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
    2. Taylor expanded in im around 0

      \[\leadsto \left(\frac{1}{2} \cdot \sin re\right) \cdot \color{blue}{2} \]
    3. Step-by-step derivation
      1. Applied rewrites50.9%

        \[\leadsto \left(0.5 \cdot \sin re\right) \cdot \color{blue}{2} \]
      2. Step-by-step derivation
        1. lift-*.f64N/A

          \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \sin re\right) \cdot 2} \]
        2. lift-*.f64N/A

          \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \sin re\right)} \cdot 2 \]
        3. associate-*l*N/A

          \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(\sin re \cdot 2\right)} \]
        4. *-commutativeN/A

          \[\leadsto \color{blue}{\left(\sin re \cdot 2\right) \cdot \frac{1}{2}} \]
        5. lower-*.f64N/A

          \[\leadsto \color{blue}{\left(\sin re \cdot 2\right) \cdot \frac{1}{2}} \]
        6. lower-*.f6450.9

          \[\leadsto \color{blue}{\left(\sin re \cdot 2\right)} \cdot 0.5 \]
      3. Applied rewrites50.9%

        \[\leadsto \color{blue}{\left(\sin re \cdot 2\right) \cdot 0.5} \]

      if 1 < (*.f64 (*.f64 #s(literal 1/2 binary64) (sin.f64 re)) (+.f64 (exp.f64 (-.f64 #s(literal 0 binary64) im)) (exp.f64 im)))

      1. Initial program 100.0%

        \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
      2. Step-by-step derivation
        1. lift-*.f64N/A

          \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)} \]
        2. lift-*.f64N/A

          \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \sin re\right)} \cdot \left(e^{0 - im} + e^{im}\right) \]
        3. *-commutativeN/A

          \[\leadsto \color{blue}{\left(\sin re \cdot \frac{1}{2}\right)} \cdot \left(e^{0 - im} + e^{im}\right) \]
        4. associate-*l*N/A

          \[\leadsto \color{blue}{\sin re \cdot \left(\frac{1}{2} \cdot \left(e^{0 - im} + e^{im}\right)\right)} \]
        5. lift-+.f64N/A

          \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \color{blue}{\left(e^{0 - im} + e^{im}\right)}\right) \]
        6. +-commutativeN/A

          \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \color{blue}{\left(e^{im} + e^{0 - im}\right)}\right) \]
        7. lift-exp.f64N/A

          \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(\color{blue}{e^{im}} + e^{0 - im}\right)\right) \]
        8. lift-exp.f64N/A

          \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(e^{im} + \color{blue}{e^{0 - im}}\right)\right) \]
        9. lift--.f64N/A

          \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(e^{im} + e^{\color{blue}{0 - im}}\right)\right) \]
        10. sub0-negN/A

          \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(e^{im} + e^{\color{blue}{\mathsf{neg}\left(im\right)}}\right)\right) \]
        11. cosh-undefN/A

          \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \color{blue}{\left(2 \cdot \cosh im\right)}\right) \]
        12. associate-*r*N/A

          \[\leadsto \sin re \cdot \color{blue}{\left(\left(\frac{1}{2} \cdot 2\right) \cdot \cosh im\right)} \]
        13. metadata-evalN/A

          \[\leadsto \sin re \cdot \left(\color{blue}{1} \cdot \cosh im\right) \]
        14. *-lft-identityN/A

          \[\leadsto \sin re \cdot \color{blue}{\cosh im} \]
        15. lower-*.f64N/A

          \[\leadsto \color{blue}{\sin re \cdot \cosh im} \]
        16. lower-cosh.f64100.0

          \[\leadsto \sin re \cdot \color{blue}{\cosh im} \]
      3. Applied rewrites100.0%

        \[\leadsto \color{blue}{\sin re \cdot \cosh im} \]
      4. Taylor expanded in re around 0

        \[\leadsto \color{blue}{re} \cdot \cosh im \]
      5. Step-by-step derivation
        1. Applied rewrites61.9%

          \[\leadsto \color{blue}{re} \cdot \cosh im \]
      6. Recombined 3 regimes into one program.
      7. Add Preprocessing

      Alternative 4: 62.5% accurate, 0.7× speedup?

      \[\begin{array}{l} im_m = \left|im\right| \\ \begin{array}{l} \mathbf{if}\;\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im\_m} + e^{im\_m}\right) \leq 5 \cdot 10^{-7}:\\ \;\;\;\;\mathsf{fma}\left(\left(re \cdot re\right) \cdot re, -0.16666666666666666, re\right) \cdot \cosh im\_m\\ \mathbf{else}:\\ \;\;\;\;re \cdot \cosh im\_m\\ \end{array} \end{array} \]
      im_m = (fabs.f64 im)
      (FPCore (re im_m)
       :precision binary64
       (if (<= (* (* 0.5 (sin re)) (+ (exp (- 0.0 im_m)) (exp im_m))) 5e-7)
         (* (fma (* (* re re) re) -0.16666666666666666 re) (cosh im_m))
         (* re (cosh im_m))))
      im_m = fabs(im);
      double code(double re, double im_m) {
      	double tmp;
      	if (((0.5 * sin(re)) * (exp((0.0 - im_m)) + exp(im_m))) <= 5e-7) {
      		tmp = fma(((re * re) * re), -0.16666666666666666, re) * cosh(im_m);
      	} else {
      		tmp = re * cosh(im_m);
      	}
      	return tmp;
      }
      
      im_m = abs(im)
      function code(re, im_m)
      	tmp = 0.0
      	if (Float64(Float64(0.5 * sin(re)) * Float64(exp(Float64(0.0 - im_m)) + exp(im_m))) <= 5e-7)
      		tmp = Float64(fma(Float64(Float64(re * re) * re), -0.16666666666666666, re) * cosh(im_m));
      	else
      		tmp = Float64(re * cosh(im_m));
      	end
      	return tmp
      end
      
      im_m = N[Abs[im], $MachinePrecision]
      code[re_, im$95$m_] := If[LessEqual[N[(N[(0.5 * N[Sin[re], $MachinePrecision]), $MachinePrecision] * N[(N[Exp[N[(0.0 - im$95$m), $MachinePrecision]], $MachinePrecision] + N[Exp[im$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 5e-7], N[(N[(N[(N[(re * re), $MachinePrecision] * re), $MachinePrecision] * -0.16666666666666666 + re), $MachinePrecision] * N[Cosh[im$95$m], $MachinePrecision]), $MachinePrecision], N[(re * N[Cosh[im$95$m], $MachinePrecision]), $MachinePrecision]]
      
      \begin{array}{l}
      im_m = \left|im\right|
      
      \\
      \begin{array}{l}
      \mathbf{if}\;\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im\_m} + e^{im\_m}\right) \leq 5 \cdot 10^{-7}:\\
      \;\;\;\;\mathsf{fma}\left(\left(re \cdot re\right) \cdot re, -0.16666666666666666, re\right) \cdot \cosh im\_m\\
      
      \mathbf{else}:\\
      \;\;\;\;re \cdot \cosh im\_m\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 2 regimes
      2. if (*.f64 (*.f64 #s(literal 1/2 binary64) (sin.f64 re)) (+.f64 (exp.f64 (-.f64 #s(literal 0 binary64) im)) (exp.f64 im))) < 4.99999999999999977e-7

        1. Initial program 100.0%

          \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
        2. Step-by-step derivation
          1. lift-*.f64N/A

            \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)} \]
          2. lift-*.f64N/A

            \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \sin re\right)} \cdot \left(e^{0 - im} + e^{im}\right) \]
          3. *-commutativeN/A

            \[\leadsto \color{blue}{\left(\sin re \cdot \frac{1}{2}\right)} \cdot \left(e^{0 - im} + e^{im}\right) \]
          4. associate-*l*N/A

            \[\leadsto \color{blue}{\sin re \cdot \left(\frac{1}{2} \cdot \left(e^{0 - im} + e^{im}\right)\right)} \]
          5. lift-+.f64N/A

            \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \color{blue}{\left(e^{0 - im} + e^{im}\right)}\right) \]
          6. +-commutativeN/A

            \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \color{blue}{\left(e^{im} + e^{0 - im}\right)}\right) \]
          7. lift-exp.f64N/A

            \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(\color{blue}{e^{im}} + e^{0 - im}\right)\right) \]
          8. lift-exp.f64N/A

            \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(e^{im} + \color{blue}{e^{0 - im}}\right)\right) \]
          9. lift--.f64N/A

            \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(e^{im} + e^{\color{blue}{0 - im}}\right)\right) \]
          10. sub0-negN/A

            \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(e^{im} + e^{\color{blue}{\mathsf{neg}\left(im\right)}}\right)\right) \]
          11. cosh-undefN/A

            \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \color{blue}{\left(2 \cdot \cosh im\right)}\right) \]
          12. associate-*r*N/A

            \[\leadsto \sin re \cdot \color{blue}{\left(\left(\frac{1}{2} \cdot 2\right) \cdot \cosh im\right)} \]
          13. metadata-evalN/A

            \[\leadsto \sin re \cdot \left(\color{blue}{1} \cdot \cosh im\right) \]
          14. *-lft-identityN/A

            \[\leadsto \sin re \cdot \color{blue}{\cosh im} \]
          15. lower-*.f64N/A

            \[\leadsto \color{blue}{\sin re \cdot \cosh im} \]
          16. lower-cosh.f64100.0

            \[\leadsto \sin re \cdot \color{blue}{\cosh im} \]
        3. Applied rewrites100.0%

          \[\leadsto \color{blue}{\sin re \cdot \cosh im} \]
        4. Taylor expanded in re around 0

          \[\leadsto \color{blue}{\left(re \cdot \left(1 + \frac{-1}{6} \cdot {re}^{2}\right)\right)} \cdot \cosh im \]
        5. Step-by-step derivation
          1. lower-*.f64N/A

            \[\leadsto \left(re \cdot \color{blue}{\left(1 + \frac{-1}{6} \cdot {re}^{2}\right)}\right) \cdot \cosh im \]
          2. lower-+.f64N/A

            \[\leadsto \left(re \cdot \left(1 + \color{blue}{\frac{-1}{6} \cdot {re}^{2}}\right)\right) \cdot \cosh im \]
          3. lower-*.f64N/A

            \[\leadsto \left(re \cdot \left(1 + \frac{-1}{6} \cdot \color{blue}{{re}^{2}}\right)\right) \cdot \cosh im \]
          4. lower-pow.f6462.9

            \[\leadsto \left(re \cdot \left(1 + -0.16666666666666666 \cdot {re}^{\color{blue}{2}}\right)\right) \cdot \cosh im \]
        6. Applied rewrites62.9%

          \[\leadsto \color{blue}{\left(re \cdot \left(1 + -0.16666666666666666 \cdot {re}^{2}\right)\right)} \cdot \cosh im \]
        7. Step-by-step derivation
          1. lift-*.f64N/A

            \[\leadsto \left(re \cdot \color{blue}{\left(1 + \frac{-1}{6} \cdot {re}^{2}\right)}\right) \cdot \cosh im \]
          2. lift-+.f64N/A

            \[\leadsto \left(re \cdot \left(1 + \color{blue}{\frac{-1}{6} \cdot {re}^{2}}\right)\right) \cdot \cosh im \]
          3. +-commutativeN/A

            \[\leadsto \left(re \cdot \left(\frac{-1}{6} \cdot {re}^{2} + \color{blue}{1}\right)\right) \cdot \cosh im \]
          4. distribute-rgt-inN/A

            \[\leadsto \left(\left(\frac{-1}{6} \cdot {re}^{2}\right) \cdot re + \color{blue}{1 \cdot re}\right) \cdot \cosh im \]
          5. *-commutativeN/A

            \[\leadsto \left(re \cdot \left(\frac{-1}{6} \cdot {re}^{2}\right) + \color{blue}{1} \cdot re\right) \cdot \cosh im \]
          6. lift-*.f64N/A

            \[\leadsto \left(re \cdot \left(\frac{-1}{6} \cdot {re}^{2}\right) + 1 \cdot re\right) \cdot \cosh im \]
          7. *-commutativeN/A

            \[\leadsto \left(re \cdot \left({re}^{2} \cdot \frac{-1}{6}\right) + 1 \cdot re\right) \cdot \cosh im \]
          8. associate-*r*N/A

            \[\leadsto \left(\left(re \cdot {re}^{2}\right) \cdot \frac{-1}{6} + \color{blue}{1} \cdot re\right) \cdot \cosh im \]
          9. *-lft-identityN/A

            \[\leadsto \left(\left(re \cdot {re}^{2}\right) \cdot \frac{-1}{6} + re\right) \cdot \cosh im \]
          10. lower-fma.f64N/A

            \[\leadsto \mathsf{fma}\left(re \cdot {re}^{2}, \color{blue}{\frac{-1}{6}}, re\right) \cdot \cosh im \]
          11. *-commutativeN/A

            \[\leadsto \mathsf{fma}\left({re}^{2} \cdot re, \frac{-1}{6}, re\right) \cdot \cosh im \]
          12. lower-*.f6462.9

            \[\leadsto \mathsf{fma}\left({re}^{2} \cdot re, -0.16666666666666666, re\right) \cdot \cosh im \]
          13. lift-pow.f64N/A

            \[\leadsto \mathsf{fma}\left({re}^{2} \cdot re, \frac{-1}{6}, re\right) \cdot \cosh im \]
          14. unpow2N/A

            \[\leadsto \mathsf{fma}\left(\left(re \cdot re\right) \cdot re, \frac{-1}{6}, re\right) \cdot \cosh im \]
          15. lower-*.f6462.9

            \[\leadsto \mathsf{fma}\left(\left(re \cdot re\right) \cdot re, -0.16666666666666666, re\right) \cdot \cosh im \]
        8. Applied rewrites62.9%

          \[\leadsto \mathsf{fma}\left(\left(re \cdot re\right) \cdot re, \color{blue}{-0.16666666666666666}, re\right) \cdot \cosh im \]

        if 4.99999999999999977e-7 < (*.f64 (*.f64 #s(literal 1/2 binary64) (sin.f64 re)) (+.f64 (exp.f64 (-.f64 #s(literal 0 binary64) im)) (exp.f64 im)))

        1. Initial program 100.0%

          \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
        2. Step-by-step derivation
          1. lift-*.f64N/A

            \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)} \]
          2. lift-*.f64N/A

            \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \sin re\right)} \cdot \left(e^{0 - im} + e^{im}\right) \]
          3. *-commutativeN/A

            \[\leadsto \color{blue}{\left(\sin re \cdot \frac{1}{2}\right)} \cdot \left(e^{0 - im} + e^{im}\right) \]
          4. associate-*l*N/A

            \[\leadsto \color{blue}{\sin re \cdot \left(\frac{1}{2} \cdot \left(e^{0 - im} + e^{im}\right)\right)} \]
          5. lift-+.f64N/A

            \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \color{blue}{\left(e^{0 - im} + e^{im}\right)}\right) \]
          6. +-commutativeN/A

            \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \color{blue}{\left(e^{im} + e^{0 - im}\right)}\right) \]
          7. lift-exp.f64N/A

            \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(\color{blue}{e^{im}} + e^{0 - im}\right)\right) \]
          8. lift-exp.f64N/A

            \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(e^{im} + \color{blue}{e^{0 - im}}\right)\right) \]
          9. lift--.f64N/A

            \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(e^{im} + e^{\color{blue}{0 - im}}\right)\right) \]
          10. sub0-negN/A

            \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(e^{im} + e^{\color{blue}{\mathsf{neg}\left(im\right)}}\right)\right) \]
          11. cosh-undefN/A

            \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \color{blue}{\left(2 \cdot \cosh im\right)}\right) \]
          12. associate-*r*N/A

            \[\leadsto \sin re \cdot \color{blue}{\left(\left(\frac{1}{2} \cdot 2\right) \cdot \cosh im\right)} \]
          13. metadata-evalN/A

            \[\leadsto \sin re \cdot \left(\color{blue}{1} \cdot \cosh im\right) \]
          14. *-lft-identityN/A

            \[\leadsto \sin re \cdot \color{blue}{\cosh im} \]
          15. lower-*.f64N/A

            \[\leadsto \color{blue}{\sin re \cdot \cosh im} \]
          16. lower-cosh.f64100.0

            \[\leadsto \sin re \cdot \color{blue}{\cosh im} \]
        3. Applied rewrites100.0%

          \[\leadsto \color{blue}{\sin re \cdot \cosh im} \]
        4. Taylor expanded in re around 0

          \[\leadsto \color{blue}{re} \cdot \cosh im \]
        5. Step-by-step derivation
          1. Applied rewrites61.9%

            \[\leadsto \color{blue}{re} \cdot \cosh im \]
        6. Recombined 2 regimes into one program.
        7. Add Preprocessing

        Alternative 5: 47.8% accurate, 0.7× speedup?

        \[\begin{array}{l} im_m = \left|im\right| \\ \begin{array}{l} \mathbf{if}\;\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im\_m} + e^{im\_m}\right) \leq -0.96:\\ \;\;\;\;0.5 \cdot \left(re \cdot \left(1 + \frac{1 \cdot 1 - im\_m \cdot im\_m}{1 + im\_m}\right)\right)\\ \mathbf{else}:\\ \;\;\;\;re \cdot \cosh im\_m\\ \end{array} \end{array} \]
        im_m = (fabs.f64 im)
        (FPCore (re im_m)
         :precision binary64
         (if (<= (* (* 0.5 (sin re)) (+ (exp (- 0.0 im_m)) (exp im_m))) -0.96)
           (* 0.5 (* re (+ 1.0 (/ (- (* 1.0 1.0) (* im_m im_m)) (+ 1.0 im_m)))))
           (* re (cosh im_m))))
        im_m = fabs(im);
        double code(double re, double im_m) {
        	double tmp;
        	if (((0.5 * sin(re)) * (exp((0.0 - im_m)) + exp(im_m))) <= -0.96) {
        		tmp = 0.5 * (re * (1.0 + (((1.0 * 1.0) - (im_m * im_m)) / (1.0 + im_m))));
        	} else {
        		tmp = re * cosh(im_m);
        	}
        	return tmp;
        }
        
        im_m =     private
        module fmin_fmax_functions
            implicit none
            private
            public fmax
            public fmin
        
            interface fmax
                module procedure fmax88
                module procedure fmax44
                module procedure fmax84
                module procedure fmax48
            end interface
            interface fmin
                module procedure fmin88
                module procedure fmin44
                module procedure fmin84
                module procedure fmin48
            end interface
        contains
            real(8) function fmax88(x, y) result (res)
                real(8), intent (in) :: x
                real(8), intent (in) :: y
                res = merge(y, merge(x, max(x, y), y /= y), x /= x)
            end function
            real(4) function fmax44(x, y) result (res)
                real(4), intent (in) :: x
                real(4), intent (in) :: y
                res = merge(y, merge(x, max(x, y), y /= y), x /= x)
            end function
            real(8) function fmax84(x, y) result(res)
                real(8), intent (in) :: x
                real(4), intent (in) :: y
                res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
            end function
            real(8) function fmax48(x, y) result(res)
                real(4), intent (in) :: x
                real(8), intent (in) :: y
                res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
            end function
            real(8) function fmin88(x, y) result (res)
                real(8), intent (in) :: x
                real(8), intent (in) :: y
                res = merge(y, merge(x, min(x, y), y /= y), x /= x)
            end function
            real(4) function fmin44(x, y) result (res)
                real(4), intent (in) :: x
                real(4), intent (in) :: y
                res = merge(y, merge(x, min(x, y), y /= y), x /= x)
            end function
            real(8) function fmin84(x, y) result(res)
                real(8), intent (in) :: x
                real(4), intent (in) :: y
                res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
            end function
            real(8) function fmin48(x, y) result(res)
                real(4), intent (in) :: x
                real(8), intent (in) :: y
                res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
            end function
        end module
        
        real(8) function code(re, im_m)
        use fmin_fmax_functions
            real(8), intent (in) :: re
            real(8), intent (in) :: im_m
            real(8) :: tmp
            if (((0.5d0 * sin(re)) * (exp((0.0d0 - im_m)) + exp(im_m))) <= (-0.96d0)) then
                tmp = 0.5d0 * (re * (1.0d0 + (((1.0d0 * 1.0d0) - (im_m * im_m)) / (1.0d0 + im_m))))
            else
                tmp = re * cosh(im_m)
            end if
            code = tmp
        end function
        
        im_m = Math.abs(im);
        public static double code(double re, double im_m) {
        	double tmp;
        	if (((0.5 * Math.sin(re)) * (Math.exp((0.0 - im_m)) + Math.exp(im_m))) <= -0.96) {
        		tmp = 0.5 * (re * (1.0 + (((1.0 * 1.0) - (im_m * im_m)) / (1.0 + im_m))));
        	} else {
        		tmp = re * Math.cosh(im_m);
        	}
        	return tmp;
        }
        
        im_m = math.fabs(im)
        def code(re, im_m):
        	tmp = 0
        	if ((0.5 * math.sin(re)) * (math.exp((0.0 - im_m)) + math.exp(im_m))) <= -0.96:
        		tmp = 0.5 * (re * (1.0 + (((1.0 * 1.0) - (im_m * im_m)) / (1.0 + im_m))))
        	else:
        		tmp = re * math.cosh(im_m)
        	return tmp
        
        im_m = abs(im)
        function code(re, im_m)
        	tmp = 0.0
        	if (Float64(Float64(0.5 * sin(re)) * Float64(exp(Float64(0.0 - im_m)) + exp(im_m))) <= -0.96)
        		tmp = Float64(0.5 * Float64(re * Float64(1.0 + Float64(Float64(Float64(1.0 * 1.0) - Float64(im_m * im_m)) / Float64(1.0 + im_m)))));
        	else
        		tmp = Float64(re * cosh(im_m));
        	end
        	return tmp
        end
        
        im_m = abs(im);
        function tmp_2 = code(re, im_m)
        	tmp = 0.0;
        	if (((0.5 * sin(re)) * (exp((0.0 - im_m)) + exp(im_m))) <= -0.96)
        		tmp = 0.5 * (re * (1.0 + (((1.0 * 1.0) - (im_m * im_m)) / (1.0 + im_m))));
        	else
        		tmp = re * cosh(im_m);
        	end
        	tmp_2 = tmp;
        end
        
        im_m = N[Abs[im], $MachinePrecision]
        code[re_, im$95$m_] := If[LessEqual[N[(N[(0.5 * N[Sin[re], $MachinePrecision]), $MachinePrecision] * N[(N[Exp[N[(0.0 - im$95$m), $MachinePrecision]], $MachinePrecision] + N[Exp[im$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], -0.96], N[(0.5 * N[(re * N[(1.0 + N[(N[(N[(1.0 * 1.0), $MachinePrecision] - N[(im$95$m * im$95$m), $MachinePrecision]), $MachinePrecision] / N[(1.0 + im$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(re * N[Cosh[im$95$m], $MachinePrecision]), $MachinePrecision]]
        
        \begin{array}{l}
        im_m = \left|im\right|
        
        \\
        \begin{array}{l}
        \mathbf{if}\;\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im\_m} + e^{im\_m}\right) \leq -0.96:\\
        \;\;\;\;0.5 \cdot \left(re \cdot \left(1 + \frac{1 \cdot 1 - im\_m \cdot im\_m}{1 + im\_m}\right)\right)\\
        
        \mathbf{else}:\\
        \;\;\;\;re \cdot \cosh im\_m\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if (*.f64 (*.f64 #s(literal 1/2 binary64) (sin.f64 re)) (+.f64 (exp.f64 (-.f64 #s(literal 0 binary64) im)) (exp.f64 im))) < -0.95999999999999996

          1. Initial program 100.0%

            \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
          2. Taylor expanded in re around 0

            \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(re \cdot \left(e^{im} + e^{\mathsf{neg}\left(im\right)}\right)\right)} \]
          3. Step-by-step derivation
            1. lower-*.f64N/A

              \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(re \cdot \left(e^{im} + e^{\mathsf{neg}\left(im\right)}\right)\right)} \]
            2. lower-*.f64N/A

              \[\leadsto \frac{1}{2} \cdot \left(re \cdot \color{blue}{\left(e^{im} + e^{\mathsf{neg}\left(im\right)}\right)}\right) \]
            3. lower-+.f64N/A

              \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(e^{im} + \color{blue}{e^{\mathsf{neg}\left(im\right)}}\right)\right) \]
            4. lower-exp.f64N/A

              \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(e^{im} + e^{\color{blue}{\mathsf{neg}\left(im\right)}}\right)\right) \]
            5. lower-exp.f64N/A

              \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(e^{im} + e^{\mathsf{neg}\left(im\right)}\right)\right) \]
            6. lower-neg.f6461.9

              \[\leadsto 0.5 \cdot \left(re \cdot \left(e^{im} + e^{-im}\right)\right) \]
          4. Applied rewrites61.9%

            \[\leadsto \color{blue}{0.5 \cdot \left(re \cdot \left(e^{im} + e^{-im}\right)\right)} \]
          5. Taylor expanded in im around 0

            \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + e^{\color{blue}{-im}}\right)\right) \]
          6. Step-by-step derivation
            1. Applied rewrites25.8%

              \[\leadsto 0.5 \cdot \left(re \cdot \left(1 + e^{\color{blue}{-im}}\right)\right) \]
            2. Taylor expanded in im around 0

              \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + \left(1 + \color{blue}{-1 \cdot im}\right)\right)\right) \]
            3. Step-by-step derivation
              1. lower-+.f64N/A

                \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + \left(1 + -1 \cdot \color{blue}{im}\right)\right)\right) \]
              2. lower-*.f6431.5

                \[\leadsto 0.5 \cdot \left(re \cdot \left(1 + \left(1 + -1 \cdot im\right)\right)\right) \]
            4. Applied rewrites31.5%

              \[\leadsto 0.5 \cdot \left(re \cdot \left(1 + \left(1 + \color{blue}{-1 \cdot im}\right)\right)\right) \]
            5. Step-by-step derivation
              1. lift-+.f64N/A

                \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + \left(1 + -1 \cdot \color{blue}{im}\right)\right)\right) \]
              2. lift-*.f64N/A

                \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + \left(1 + -1 \cdot im\right)\right)\right) \]
              3. fp-cancel-sign-sub-invN/A

                \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + \left(1 - \left(\mathsf{neg}\left(-1\right)\right) \cdot \color{blue}{im}\right)\right)\right) \]
              4. flip--N/A

                \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + \frac{1 \cdot 1 - \left(\left(\mathsf{neg}\left(-1\right)\right) \cdot im\right) \cdot \left(\left(\mathsf{neg}\left(-1\right)\right) \cdot im\right)}{1 + \color{blue}{\left(\mathsf{neg}\left(-1\right)\right) \cdot im}}\right)\right) \]
              5. distribute-lft-neg-outN/A

                \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + \frac{1 \cdot 1 - \left(\mathsf{neg}\left(-1 \cdot im\right)\right) \cdot \left(\left(\mathsf{neg}\left(-1\right)\right) \cdot im\right)}{1 + \left(\mathsf{neg}\left(-1\right)\right) \cdot im}\right)\right) \]
              6. lift-*.f64N/A

                \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + \frac{1 \cdot 1 - \left(\mathsf{neg}\left(-1 \cdot im\right)\right) \cdot \left(\left(\mathsf{neg}\left(-1\right)\right) \cdot im\right)}{1 + \left(\mathsf{neg}\left(-1\right)\right) \cdot im}\right)\right) \]
              7. distribute-lft-neg-outN/A

                \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + \frac{1 \cdot 1 - \left(\mathsf{neg}\left(-1 \cdot im\right)\right) \cdot \left(\mathsf{neg}\left(-1 \cdot im\right)\right)}{1 + \left(\mathsf{neg}\left(-1\right)\right) \cdot im}\right)\right) \]
              8. lift-*.f64N/A

                \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + \frac{1 \cdot 1 - \left(\mathsf{neg}\left(-1 \cdot im\right)\right) \cdot \left(\mathsf{neg}\left(-1 \cdot im\right)\right)}{1 + \left(\mathsf{neg}\left(-1\right)\right) \cdot im}\right)\right) \]
              9. distribute-lft-neg-outN/A

                \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + \frac{1 \cdot 1 - \left(\mathsf{neg}\left(-1 \cdot im\right)\right) \cdot \left(\mathsf{neg}\left(-1 \cdot im\right)\right)}{1 + \left(\mathsf{neg}\left(-1 \cdot im\right)\right)}\right)\right) \]
              10. lift-*.f64N/A

                \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + \frac{1 \cdot 1 - \left(\mathsf{neg}\left(-1 \cdot im\right)\right) \cdot \left(\mathsf{neg}\left(-1 \cdot im\right)\right)}{1 + \left(\mathsf{neg}\left(-1 \cdot im\right)\right)}\right)\right) \]
              11. lower-special-+.f32N/A

                \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + \frac{1 \cdot 1 - \left(\mathsf{neg}\left(-1 \cdot im\right)\right) \cdot \left(\mathsf{neg}\left(-1 \cdot im\right)\right)}{1 + \left(\mathsf{neg}\left(-1 \cdot im\right)\right)}\right)\right) \]
              12. lower-+.f32N/A

                \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + \frac{1 \cdot 1 - \left(\mathsf{neg}\left(-1 \cdot im\right)\right) \cdot \left(\mathsf{neg}\left(-1 \cdot im\right)\right)}{1 + \left(\mathsf{neg}\left(-1 \cdot im\right)\right)}\right)\right) \]
              13. lower-special-/.f64N/A

                \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + \frac{1 \cdot 1 - \left(\mathsf{neg}\left(-1 \cdot im\right)\right) \cdot \left(\mathsf{neg}\left(-1 \cdot im\right)\right)}{1 + \color{blue}{\left(\mathsf{neg}\left(-1 \cdot im\right)\right)}}\right)\right) \]
            6. Applied rewrites32.9%

              \[\leadsto 0.5 \cdot \left(re \cdot \left(1 + \frac{1 \cdot 1 - im \cdot im}{1 + \color{blue}{im}}\right)\right) \]

            if -0.95999999999999996 < (*.f64 (*.f64 #s(literal 1/2 binary64) (sin.f64 re)) (+.f64 (exp.f64 (-.f64 #s(literal 0 binary64) im)) (exp.f64 im)))

            1. Initial program 100.0%

              \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
            2. Step-by-step derivation
              1. lift-*.f64N/A

                \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)} \]
              2. lift-*.f64N/A

                \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \sin re\right)} \cdot \left(e^{0 - im} + e^{im}\right) \]
              3. *-commutativeN/A

                \[\leadsto \color{blue}{\left(\sin re \cdot \frac{1}{2}\right)} \cdot \left(e^{0 - im} + e^{im}\right) \]
              4. associate-*l*N/A

                \[\leadsto \color{blue}{\sin re \cdot \left(\frac{1}{2} \cdot \left(e^{0 - im} + e^{im}\right)\right)} \]
              5. lift-+.f64N/A

                \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \color{blue}{\left(e^{0 - im} + e^{im}\right)}\right) \]
              6. +-commutativeN/A

                \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \color{blue}{\left(e^{im} + e^{0 - im}\right)}\right) \]
              7. lift-exp.f64N/A

                \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(\color{blue}{e^{im}} + e^{0 - im}\right)\right) \]
              8. lift-exp.f64N/A

                \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(e^{im} + \color{blue}{e^{0 - im}}\right)\right) \]
              9. lift--.f64N/A

                \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(e^{im} + e^{\color{blue}{0 - im}}\right)\right) \]
              10. sub0-negN/A

                \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(e^{im} + e^{\color{blue}{\mathsf{neg}\left(im\right)}}\right)\right) \]
              11. cosh-undefN/A

                \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \color{blue}{\left(2 \cdot \cosh im\right)}\right) \]
              12. associate-*r*N/A

                \[\leadsto \sin re \cdot \color{blue}{\left(\left(\frac{1}{2} \cdot 2\right) \cdot \cosh im\right)} \]
              13. metadata-evalN/A

                \[\leadsto \sin re \cdot \left(\color{blue}{1} \cdot \cosh im\right) \]
              14. *-lft-identityN/A

                \[\leadsto \sin re \cdot \color{blue}{\cosh im} \]
              15. lower-*.f64N/A

                \[\leadsto \color{blue}{\sin re \cdot \cosh im} \]
              16. lower-cosh.f64100.0

                \[\leadsto \sin re \cdot \color{blue}{\cosh im} \]
            3. Applied rewrites100.0%

              \[\leadsto \color{blue}{\sin re \cdot \cosh im} \]
            4. Taylor expanded in re around 0

              \[\leadsto \color{blue}{re} \cdot \cosh im \]
            5. Step-by-step derivation
              1. Applied rewrites61.9%

                \[\leadsto \color{blue}{re} \cdot \cosh im \]
            6. Recombined 2 regimes into one program.
            7. Add Preprocessing

            Alternative 6: 47.2% accurate, 0.8× speedup?

            \[\begin{array}{l} im_m = \left|im\right| \\ \begin{array}{l} \mathbf{if}\;\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im\_m} + e^{im\_m}\right) \leq -0.8:\\ \;\;\;\;\left(\left(1 - im\_m\right) + 1\right) \cdot \left(0.5 \cdot re\right)\\ \mathbf{else}:\\ \;\;\;\;re \cdot \cosh im\_m\\ \end{array} \end{array} \]
            im_m = (fabs.f64 im)
            (FPCore (re im_m)
             :precision binary64
             (if (<= (* (* 0.5 (sin re)) (+ (exp (- 0.0 im_m)) (exp im_m))) -0.8)
               (* (+ (- 1.0 im_m) 1.0) (* 0.5 re))
               (* re (cosh im_m))))
            im_m = fabs(im);
            double code(double re, double im_m) {
            	double tmp;
            	if (((0.5 * sin(re)) * (exp((0.0 - im_m)) + exp(im_m))) <= -0.8) {
            		tmp = ((1.0 - im_m) + 1.0) * (0.5 * re);
            	} else {
            		tmp = re * cosh(im_m);
            	}
            	return tmp;
            }
            
            im_m =     private
            module fmin_fmax_functions
                implicit none
                private
                public fmax
                public fmin
            
                interface fmax
                    module procedure fmax88
                    module procedure fmax44
                    module procedure fmax84
                    module procedure fmax48
                end interface
                interface fmin
                    module procedure fmin88
                    module procedure fmin44
                    module procedure fmin84
                    module procedure fmin48
                end interface
            contains
                real(8) function fmax88(x, y) result (res)
                    real(8), intent (in) :: x
                    real(8), intent (in) :: y
                    res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                end function
                real(4) function fmax44(x, y) result (res)
                    real(4), intent (in) :: x
                    real(4), intent (in) :: y
                    res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                end function
                real(8) function fmax84(x, y) result(res)
                    real(8), intent (in) :: x
                    real(4), intent (in) :: y
                    res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                end function
                real(8) function fmax48(x, y) result(res)
                    real(4), intent (in) :: x
                    real(8), intent (in) :: y
                    res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                end function
                real(8) function fmin88(x, y) result (res)
                    real(8), intent (in) :: x
                    real(8), intent (in) :: y
                    res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                end function
                real(4) function fmin44(x, y) result (res)
                    real(4), intent (in) :: x
                    real(4), intent (in) :: y
                    res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                end function
                real(8) function fmin84(x, y) result(res)
                    real(8), intent (in) :: x
                    real(4), intent (in) :: y
                    res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                end function
                real(8) function fmin48(x, y) result(res)
                    real(4), intent (in) :: x
                    real(8), intent (in) :: y
                    res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                end function
            end module
            
            real(8) function code(re, im_m)
            use fmin_fmax_functions
                real(8), intent (in) :: re
                real(8), intent (in) :: im_m
                real(8) :: tmp
                if (((0.5d0 * sin(re)) * (exp((0.0d0 - im_m)) + exp(im_m))) <= (-0.8d0)) then
                    tmp = ((1.0d0 - im_m) + 1.0d0) * (0.5d0 * re)
                else
                    tmp = re * cosh(im_m)
                end if
                code = tmp
            end function
            
            im_m = Math.abs(im);
            public static double code(double re, double im_m) {
            	double tmp;
            	if (((0.5 * Math.sin(re)) * (Math.exp((0.0 - im_m)) + Math.exp(im_m))) <= -0.8) {
            		tmp = ((1.0 - im_m) + 1.0) * (0.5 * re);
            	} else {
            		tmp = re * Math.cosh(im_m);
            	}
            	return tmp;
            }
            
            im_m = math.fabs(im)
            def code(re, im_m):
            	tmp = 0
            	if ((0.5 * math.sin(re)) * (math.exp((0.0 - im_m)) + math.exp(im_m))) <= -0.8:
            		tmp = ((1.0 - im_m) + 1.0) * (0.5 * re)
            	else:
            		tmp = re * math.cosh(im_m)
            	return tmp
            
            im_m = abs(im)
            function code(re, im_m)
            	tmp = 0.0
            	if (Float64(Float64(0.5 * sin(re)) * Float64(exp(Float64(0.0 - im_m)) + exp(im_m))) <= -0.8)
            		tmp = Float64(Float64(Float64(1.0 - im_m) + 1.0) * Float64(0.5 * re));
            	else
            		tmp = Float64(re * cosh(im_m));
            	end
            	return tmp
            end
            
            im_m = abs(im);
            function tmp_2 = code(re, im_m)
            	tmp = 0.0;
            	if (((0.5 * sin(re)) * (exp((0.0 - im_m)) + exp(im_m))) <= -0.8)
            		tmp = ((1.0 - im_m) + 1.0) * (0.5 * re);
            	else
            		tmp = re * cosh(im_m);
            	end
            	tmp_2 = tmp;
            end
            
            im_m = N[Abs[im], $MachinePrecision]
            code[re_, im$95$m_] := If[LessEqual[N[(N[(0.5 * N[Sin[re], $MachinePrecision]), $MachinePrecision] * N[(N[Exp[N[(0.0 - im$95$m), $MachinePrecision]], $MachinePrecision] + N[Exp[im$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], -0.8], N[(N[(N[(1.0 - im$95$m), $MachinePrecision] + 1.0), $MachinePrecision] * N[(0.5 * re), $MachinePrecision]), $MachinePrecision], N[(re * N[Cosh[im$95$m], $MachinePrecision]), $MachinePrecision]]
            
            \begin{array}{l}
            im_m = \left|im\right|
            
            \\
            \begin{array}{l}
            \mathbf{if}\;\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im\_m} + e^{im\_m}\right) \leq -0.8:\\
            \;\;\;\;\left(\left(1 - im\_m\right) + 1\right) \cdot \left(0.5 \cdot re\right)\\
            
            \mathbf{else}:\\
            \;\;\;\;re \cdot \cosh im\_m\\
            
            
            \end{array}
            \end{array}
            
            Derivation
            1. Split input into 2 regimes
            2. if (*.f64 (*.f64 #s(literal 1/2 binary64) (sin.f64 re)) (+.f64 (exp.f64 (-.f64 #s(literal 0 binary64) im)) (exp.f64 im))) < -0.80000000000000004

              1. Initial program 100.0%

                \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
              2. Taylor expanded in re around 0

                \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(re \cdot \left(e^{im} + e^{\mathsf{neg}\left(im\right)}\right)\right)} \]
              3. Step-by-step derivation
                1. lower-*.f64N/A

                  \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(re \cdot \left(e^{im} + e^{\mathsf{neg}\left(im\right)}\right)\right)} \]
                2. lower-*.f64N/A

                  \[\leadsto \frac{1}{2} \cdot \left(re \cdot \color{blue}{\left(e^{im} + e^{\mathsf{neg}\left(im\right)}\right)}\right) \]
                3. lower-+.f64N/A

                  \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(e^{im} + \color{blue}{e^{\mathsf{neg}\left(im\right)}}\right)\right) \]
                4. lower-exp.f64N/A

                  \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(e^{im} + e^{\color{blue}{\mathsf{neg}\left(im\right)}}\right)\right) \]
                5. lower-exp.f64N/A

                  \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(e^{im} + e^{\mathsf{neg}\left(im\right)}\right)\right) \]
                6. lower-neg.f6461.9

                  \[\leadsto 0.5 \cdot \left(re \cdot \left(e^{im} + e^{-im}\right)\right) \]
              4. Applied rewrites61.9%

                \[\leadsto \color{blue}{0.5 \cdot \left(re \cdot \left(e^{im} + e^{-im}\right)\right)} \]
              5. Taylor expanded in im around 0

                \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + e^{\color{blue}{-im}}\right)\right) \]
              6. Step-by-step derivation
                1. Applied rewrites25.8%

                  \[\leadsto 0.5 \cdot \left(re \cdot \left(1 + e^{\color{blue}{-im}}\right)\right) \]
                2. Taylor expanded in im around 0

                  \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + \left(1 + \color{blue}{-1 \cdot im}\right)\right)\right) \]
                3. Step-by-step derivation
                  1. lower-+.f64N/A

                    \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + \left(1 + -1 \cdot \color{blue}{im}\right)\right)\right) \]
                  2. lower-*.f6431.5

                    \[\leadsto 0.5 \cdot \left(re \cdot \left(1 + \left(1 + -1 \cdot im\right)\right)\right) \]
                4. Applied rewrites31.5%

                  \[\leadsto 0.5 \cdot \left(re \cdot \left(1 + \left(1 + \color{blue}{-1 \cdot im}\right)\right)\right) \]
                5. Step-by-step derivation
                  1. lift-*.f64N/A

                    \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(re \cdot \left(1 + \left(1 + -1 \cdot im\right)\right)\right)} \]
                  2. lift-*.f64N/A

                    \[\leadsto \frac{1}{2} \cdot \left(re \cdot \color{blue}{\left(1 + \left(1 + -1 \cdot im\right)\right)}\right) \]
                  3. associate-*r*N/A

                    \[\leadsto \left(\frac{1}{2} \cdot re\right) \cdot \color{blue}{\left(1 + \left(1 + -1 \cdot im\right)\right)} \]
                  4. *-commutativeN/A

                    \[\leadsto \left(1 + \left(1 + -1 \cdot im\right)\right) \cdot \color{blue}{\left(\frac{1}{2} \cdot re\right)} \]
                  5. lower-*.f64N/A

                    \[\leadsto \left(1 + \left(1 + -1 \cdot im\right)\right) \cdot \color{blue}{\left(\frac{1}{2} \cdot re\right)} \]
                  6. lift-+.f64N/A

                    \[\leadsto \left(1 + \left(1 + -1 \cdot im\right)\right) \cdot \left(\color{blue}{\frac{1}{2}} \cdot re\right) \]
                  7. +-commutativeN/A

                    \[\leadsto \left(\left(1 + -1 \cdot im\right) + 1\right) \cdot \left(\color{blue}{\frac{1}{2}} \cdot re\right) \]
                  8. lower-+.f64N/A

                    \[\leadsto \left(\left(1 + -1 \cdot im\right) + 1\right) \cdot \left(\color{blue}{\frac{1}{2}} \cdot re\right) \]
                  9. lift-+.f64N/A

                    \[\leadsto \left(\left(1 + -1 \cdot im\right) + 1\right) \cdot \left(\frac{1}{2} \cdot re\right) \]
                  10. add-flipN/A

                    \[\leadsto \left(\left(1 - \left(\mathsf{neg}\left(-1 \cdot im\right)\right)\right) + 1\right) \cdot \left(\frac{1}{2} \cdot re\right) \]
                  11. lower--.f64N/A

                    \[\leadsto \left(\left(1 - \left(\mathsf{neg}\left(-1 \cdot im\right)\right)\right) + 1\right) \cdot \left(\frac{1}{2} \cdot re\right) \]
                  12. lift-*.f64N/A

                    \[\leadsto \left(\left(1 - \left(\mathsf{neg}\left(-1 \cdot im\right)\right)\right) + 1\right) \cdot \left(\frac{1}{2} \cdot re\right) \]
                  13. distribute-lft-neg-outN/A

                    \[\leadsto \left(\left(1 - \left(\mathsf{neg}\left(-1\right)\right) \cdot im\right) + 1\right) \cdot \left(\frac{1}{2} \cdot re\right) \]
                  14. metadata-evalN/A

                    \[\leadsto \left(\left(1 - 1 \cdot im\right) + 1\right) \cdot \left(\frac{1}{2} \cdot re\right) \]
                  15. *-lft-identityN/A

                    \[\leadsto \left(\left(1 - im\right) + 1\right) \cdot \left(\frac{1}{2} \cdot re\right) \]
                  16. *-lft-identityN/A

                    \[\leadsto \left(\left(1 - im\right) + 1\right) \cdot \mathsf{Rewrite=>}\left(lower-*.f64, \left(\frac{1}{2} \cdot re\right)\right) \]
                6. Applied rewrites31.5%

                  \[\leadsto \left(\left(1 - im\right) + 1\right) \cdot \color{blue}{\left(0.5 \cdot re\right)} \]

                if -0.80000000000000004 < (*.f64 (*.f64 #s(literal 1/2 binary64) (sin.f64 re)) (+.f64 (exp.f64 (-.f64 #s(literal 0 binary64) im)) (exp.f64 im)))

                1. Initial program 100.0%

                  \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
                2. Step-by-step derivation
                  1. lift-*.f64N/A

                    \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right)} \]
                  2. lift-*.f64N/A

                    \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \sin re\right)} \cdot \left(e^{0 - im} + e^{im}\right) \]
                  3. *-commutativeN/A

                    \[\leadsto \color{blue}{\left(\sin re \cdot \frac{1}{2}\right)} \cdot \left(e^{0 - im} + e^{im}\right) \]
                  4. associate-*l*N/A

                    \[\leadsto \color{blue}{\sin re \cdot \left(\frac{1}{2} \cdot \left(e^{0 - im} + e^{im}\right)\right)} \]
                  5. lift-+.f64N/A

                    \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \color{blue}{\left(e^{0 - im} + e^{im}\right)}\right) \]
                  6. +-commutativeN/A

                    \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \color{blue}{\left(e^{im} + e^{0 - im}\right)}\right) \]
                  7. lift-exp.f64N/A

                    \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(\color{blue}{e^{im}} + e^{0 - im}\right)\right) \]
                  8. lift-exp.f64N/A

                    \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(e^{im} + \color{blue}{e^{0 - im}}\right)\right) \]
                  9. lift--.f64N/A

                    \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(e^{im} + e^{\color{blue}{0 - im}}\right)\right) \]
                  10. sub0-negN/A

                    \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \left(e^{im} + e^{\color{blue}{\mathsf{neg}\left(im\right)}}\right)\right) \]
                  11. cosh-undefN/A

                    \[\leadsto \sin re \cdot \left(\frac{1}{2} \cdot \color{blue}{\left(2 \cdot \cosh im\right)}\right) \]
                  12. associate-*r*N/A

                    \[\leadsto \sin re \cdot \color{blue}{\left(\left(\frac{1}{2} \cdot 2\right) \cdot \cosh im\right)} \]
                  13. metadata-evalN/A

                    \[\leadsto \sin re \cdot \left(\color{blue}{1} \cdot \cosh im\right) \]
                  14. *-lft-identityN/A

                    \[\leadsto \sin re \cdot \color{blue}{\cosh im} \]
                  15. lower-*.f64N/A

                    \[\leadsto \color{blue}{\sin re \cdot \cosh im} \]
                  16. lower-cosh.f64100.0

                    \[\leadsto \sin re \cdot \color{blue}{\cosh im} \]
                3. Applied rewrites100.0%

                  \[\leadsto \color{blue}{\sin re \cdot \cosh im} \]
                4. Taylor expanded in re around 0

                  \[\leadsto \color{blue}{re} \cdot \cosh im \]
                5. Step-by-step derivation
                  1. Applied rewrites61.9%

                    \[\leadsto \color{blue}{re} \cdot \cosh im \]
                6. Recombined 2 regimes into one program.
                7. Add Preprocessing

                Alternative 7: 31.5% accurate, 5.2× speedup?

                \[\begin{array}{l} im_m = \left|im\right| \\ \left(\left(1 - im\_m\right) + 1\right) \cdot \left(0.5 \cdot re\right) \end{array} \]
                im_m = (fabs.f64 im)
                (FPCore (re im_m) :precision binary64 (* (+ (- 1.0 im_m) 1.0) (* 0.5 re)))
                im_m = fabs(im);
                double code(double re, double im_m) {
                	return ((1.0 - im_m) + 1.0) * (0.5 * re);
                }
                
                im_m =     private
                module fmin_fmax_functions
                    implicit none
                    private
                    public fmax
                    public fmin
                
                    interface fmax
                        module procedure fmax88
                        module procedure fmax44
                        module procedure fmax84
                        module procedure fmax48
                    end interface
                    interface fmin
                        module procedure fmin88
                        module procedure fmin44
                        module procedure fmin84
                        module procedure fmin48
                    end interface
                contains
                    real(8) function fmax88(x, y) result (res)
                        real(8), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                    end function
                    real(4) function fmax44(x, y) result (res)
                        real(4), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                    end function
                    real(8) function fmax84(x, y) result(res)
                        real(8), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                    end function
                    real(8) function fmax48(x, y) result(res)
                        real(4), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                    end function
                    real(8) function fmin88(x, y) result (res)
                        real(8), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                    end function
                    real(4) function fmin44(x, y) result (res)
                        real(4), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                    end function
                    real(8) function fmin84(x, y) result(res)
                        real(8), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                    end function
                    real(8) function fmin48(x, y) result(res)
                        real(4), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                    end function
                end module
                
                real(8) function code(re, im_m)
                use fmin_fmax_functions
                    real(8), intent (in) :: re
                    real(8), intent (in) :: im_m
                    code = ((1.0d0 - im_m) + 1.0d0) * (0.5d0 * re)
                end function
                
                im_m = Math.abs(im);
                public static double code(double re, double im_m) {
                	return ((1.0 - im_m) + 1.0) * (0.5 * re);
                }
                
                im_m = math.fabs(im)
                def code(re, im_m):
                	return ((1.0 - im_m) + 1.0) * (0.5 * re)
                
                im_m = abs(im)
                function code(re, im_m)
                	return Float64(Float64(Float64(1.0 - im_m) + 1.0) * Float64(0.5 * re))
                end
                
                im_m = abs(im);
                function tmp = code(re, im_m)
                	tmp = ((1.0 - im_m) + 1.0) * (0.5 * re);
                end
                
                im_m = N[Abs[im], $MachinePrecision]
                code[re_, im$95$m_] := N[(N[(N[(1.0 - im$95$m), $MachinePrecision] + 1.0), $MachinePrecision] * N[(0.5 * re), $MachinePrecision]), $MachinePrecision]
                
                \begin{array}{l}
                im_m = \left|im\right|
                
                \\
                \left(\left(1 - im\_m\right) + 1\right) \cdot \left(0.5 \cdot re\right)
                \end{array}
                
                Derivation
                1. Initial program 100.0%

                  \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
                2. Taylor expanded in re around 0

                  \[\leadsto \color{blue}{\frac{1}{2} \cdot \left(re \cdot \left(e^{im} + e^{\mathsf{neg}\left(im\right)}\right)\right)} \]
                3. Step-by-step derivation
                  1. lower-*.f64N/A

                    \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(re \cdot \left(e^{im} + e^{\mathsf{neg}\left(im\right)}\right)\right)} \]
                  2. lower-*.f64N/A

                    \[\leadsto \frac{1}{2} \cdot \left(re \cdot \color{blue}{\left(e^{im} + e^{\mathsf{neg}\left(im\right)}\right)}\right) \]
                  3. lower-+.f64N/A

                    \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(e^{im} + \color{blue}{e^{\mathsf{neg}\left(im\right)}}\right)\right) \]
                  4. lower-exp.f64N/A

                    \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(e^{im} + e^{\color{blue}{\mathsf{neg}\left(im\right)}}\right)\right) \]
                  5. lower-exp.f64N/A

                    \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(e^{im} + e^{\mathsf{neg}\left(im\right)}\right)\right) \]
                  6. lower-neg.f6461.9

                    \[\leadsto 0.5 \cdot \left(re \cdot \left(e^{im} + e^{-im}\right)\right) \]
                4. Applied rewrites61.9%

                  \[\leadsto \color{blue}{0.5 \cdot \left(re \cdot \left(e^{im} + e^{-im}\right)\right)} \]
                5. Taylor expanded in im around 0

                  \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + e^{\color{blue}{-im}}\right)\right) \]
                6. Step-by-step derivation
                  1. Applied rewrites25.8%

                    \[\leadsto 0.5 \cdot \left(re \cdot \left(1 + e^{\color{blue}{-im}}\right)\right) \]
                  2. Taylor expanded in im around 0

                    \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + \left(1 + \color{blue}{-1 \cdot im}\right)\right)\right) \]
                  3. Step-by-step derivation
                    1. lower-+.f64N/A

                      \[\leadsto \frac{1}{2} \cdot \left(re \cdot \left(1 + \left(1 + -1 \cdot \color{blue}{im}\right)\right)\right) \]
                    2. lower-*.f6431.5

                      \[\leadsto 0.5 \cdot \left(re \cdot \left(1 + \left(1 + -1 \cdot im\right)\right)\right) \]
                  4. Applied rewrites31.5%

                    \[\leadsto 0.5 \cdot \left(re \cdot \left(1 + \left(1 + \color{blue}{-1 \cdot im}\right)\right)\right) \]
                  5. Step-by-step derivation
                    1. lift-*.f64N/A

                      \[\leadsto \frac{1}{2} \cdot \color{blue}{\left(re \cdot \left(1 + \left(1 + -1 \cdot im\right)\right)\right)} \]
                    2. lift-*.f64N/A

                      \[\leadsto \frac{1}{2} \cdot \left(re \cdot \color{blue}{\left(1 + \left(1 + -1 \cdot im\right)\right)}\right) \]
                    3. associate-*r*N/A

                      \[\leadsto \left(\frac{1}{2} \cdot re\right) \cdot \color{blue}{\left(1 + \left(1 + -1 \cdot im\right)\right)} \]
                    4. *-commutativeN/A

                      \[\leadsto \left(1 + \left(1 + -1 \cdot im\right)\right) \cdot \color{blue}{\left(\frac{1}{2} \cdot re\right)} \]
                    5. lower-*.f64N/A

                      \[\leadsto \left(1 + \left(1 + -1 \cdot im\right)\right) \cdot \color{blue}{\left(\frac{1}{2} \cdot re\right)} \]
                    6. lift-+.f64N/A

                      \[\leadsto \left(1 + \left(1 + -1 \cdot im\right)\right) \cdot \left(\color{blue}{\frac{1}{2}} \cdot re\right) \]
                    7. +-commutativeN/A

                      \[\leadsto \left(\left(1 + -1 \cdot im\right) + 1\right) \cdot \left(\color{blue}{\frac{1}{2}} \cdot re\right) \]
                    8. lower-+.f64N/A

                      \[\leadsto \left(\left(1 + -1 \cdot im\right) + 1\right) \cdot \left(\color{blue}{\frac{1}{2}} \cdot re\right) \]
                    9. lift-+.f64N/A

                      \[\leadsto \left(\left(1 + -1 \cdot im\right) + 1\right) \cdot \left(\frac{1}{2} \cdot re\right) \]
                    10. add-flipN/A

                      \[\leadsto \left(\left(1 - \left(\mathsf{neg}\left(-1 \cdot im\right)\right)\right) + 1\right) \cdot \left(\frac{1}{2} \cdot re\right) \]
                    11. lower--.f64N/A

                      \[\leadsto \left(\left(1 - \left(\mathsf{neg}\left(-1 \cdot im\right)\right)\right) + 1\right) \cdot \left(\frac{1}{2} \cdot re\right) \]
                    12. lift-*.f64N/A

                      \[\leadsto \left(\left(1 - \left(\mathsf{neg}\left(-1 \cdot im\right)\right)\right) + 1\right) \cdot \left(\frac{1}{2} \cdot re\right) \]
                    13. distribute-lft-neg-outN/A

                      \[\leadsto \left(\left(1 - \left(\mathsf{neg}\left(-1\right)\right) \cdot im\right) + 1\right) \cdot \left(\frac{1}{2} \cdot re\right) \]
                    14. metadata-evalN/A

                      \[\leadsto \left(\left(1 - 1 \cdot im\right) + 1\right) \cdot \left(\frac{1}{2} \cdot re\right) \]
                    15. *-lft-identityN/A

                      \[\leadsto \left(\left(1 - im\right) + 1\right) \cdot \left(\frac{1}{2} \cdot re\right) \]
                    16. *-lft-identityN/A

                      \[\leadsto \left(\left(1 - im\right) + 1\right) \cdot \mathsf{Rewrite=>}\left(lower-*.f64, \left(\frac{1}{2} \cdot re\right)\right) \]
                  6. Applied rewrites31.5%

                    \[\leadsto \left(\left(1 - im\right) + 1\right) \cdot \color{blue}{\left(0.5 \cdot re\right)} \]
                  7. Add Preprocessing

                  Alternative 8: 26.1% accurate, 9.3× speedup?

                  \[\begin{array}{l} im_m = \left|im\right| \\ \left(0.5 \cdot re\right) \cdot 2 \end{array} \]
                  im_m = (fabs.f64 im)
                  (FPCore (re im_m) :precision binary64 (* (* 0.5 re) 2.0))
                  im_m = fabs(im);
                  double code(double re, double im_m) {
                  	return (0.5 * re) * 2.0;
                  }
                  
                  im_m =     private
                  module fmin_fmax_functions
                      implicit none
                      private
                      public fmax
                      public fmin
                  
                      interface fmax
                          module procedure fmax88
                          module procedure fmax44
                          module procedure fmax84
                          module procedure fmax48
                      end interface
                      interface fmin
                          module procedure fmin88
                          module procedure fmin44
                          module procedure fmin84
                          module procedure fmin48
                      end interface
                  contains
                      real(8) function fmax88(x, y) result (res)
                          real(8), intent (in) :: x
                          real(8), intent (in) :: y
                          res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                      end function
                      real(4) function fmax44(x, y) result (res)
                          real(4), intent (in) :: x
                          real(4), intent (in) :: y
                          res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                      end function
                      real(8) function fmax84(x, y) result(res)
                          real(8), intent (in) :: x
                          real(4), intent (in) :: y
                          res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                      end function
                      real(8) function fmax48(x, y) result(res)
                          real(4), intent (in) :: x
                          real(8), intent (in) :: y
                          res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                      end function
                      real(8) function fmin88(x, y) result (res)
                          real(8), intent (in) :: x
                          real(8), intent (in) :: y
                          res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                      end function
                      real(4) function fmin44(x, y) result (res)
                          real(4), intent (in) :: x
                          real(4), intent (in) :: y
                          res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                      end function
                      real(8) function fmin84(x, y) result(res)
                          real(8), intent (in) :: x
                          real(4), intent (in) :: y
                          res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                      end function
                      real(8) function fmin48(x, y) result(res)
                          real(4), intent (in) :: x
                          real(8), intent (in) :: y
                          res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                      end function
                  end module
                  
                  real(8) function code(re, im_m)
                  use fmin_fmax_functions
                      real(8), intent (in) :: re
                      real(8), intent (in) :: im_m
                      code = (0.5d0 * re) * 2.0d0
                  end function
                  
                  im_m = Math.abs(im);
                  public static double code(double re, double im_m) {
                  	return (0.5 * re) * 2.0;
                  }
                  
                  im_m = math.fabs(im)
                  def code(re, im_m):
                  	return (0.5 * re) * 2.0
                  
                  im_m = abs(im)
                  function code(re, im_m)
                  	return Float64(Float64(0.5 * re) * 2.0)
                  end
                  
                  im_m = abs(im);
                  function tmp = code(re, im_m)
                  	tmp = (0.5 * re) * 2.0;
                  end
                  
                  im_m = N[Abs[im], $MachinePrecision]
                  code[re_, im$95$m_] := N[(N[(0.5 * re), $MachinePrecision] * 2.0), $MachinePrecision]
                  
                  \begin{array}{l}
                  im_m = \left|im\right|
                  
                  \\
                  \left(0.5 \cdot re\right) \cdot 2
                  \end{array}
                  
                  Derivation
                  1. Initial program 100.0%

                    \[\left(0.5 \cdot \sin re\right) \cdot \left(e^{0 - im} + e^{im}\right) \]
                  2. Taylor expanded in im around 0

                    \[\leadsto \left(\frac{1}{2} \cdot \sin re\right) \cdot \color{blue}{2} \]
                  3. Step-by-step derivation
                    1. Applied rewrites50.9%

                      \[\leadsto \left(0.5 \cdot \sin re\right) \cdot \color{blue}{2} \]
                    2. Taylor expanded in re around 0

                      \[\leadsto \left(\frac{1}{2} \cdot \color{blue}{re}\right) \cdot 2 \]
                    3. Step-by-step derivation
                      1. Applied rewrites26.1%

                        \[\leadsto \left(0.5 \cdot \color{blue}{re}\right) \cdot 2 \]
                      2. Add Preprocessing

                      Reproduce

                      ?
                      herbie shell --seed 2025151 
                      (FPCore (re im)
                        :name "math.sin on complex, real part"
                        :precision binary64
                        (* (* 0.5 (sin re)) (+ (exp (- 0.0 im)) (exp im))))