Maksimov and Kolovsky, Equation (4)

Percentage Accurate: 86.0% → 99.9%
Time: 5.6s
Alternatives: 12
Speedup: 1.2×

Specification

?
\[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
(FPCore (J l K U)
 :precision binary64
 (+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))
double code(double J, double l, double K, double U) {
	return ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U;
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(j, l, k, u)
use fmin_fmax_functions
    real(8), intent (in) :: j
    real(8), intent (in) :: l
    real(8), intent (in) :: k
    real(8), intent (in) :: u
    code = ((j * (exp(l) - exp(-l))) * cos((k / 2.0d0))) + u
end function
public static double code(double J, double l, double K, double U) {
	return ((J * (Math.exp(l) - Math.exp(-l))) * Math.cos((K / 2.0))) + U;
}
def code(J, l, K, U):
	return ((J * (math.exp(l) - math.exp(-l))) * math.cos((K / 2.0))) + U
function code(J, l, K, U)
	return Float64(Float64(Float64(J * Float64(exp(l) - exp(Float64(-l)))) * cos(Float64(K / 2.0))) + U)
end
function tmp = code(J, l, K, U)
	tmp = ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U;
end
code[J_, l_, K_, U_] := N[(N[(N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 12 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 86.0% accurate, 1.0× speedup?

\[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
(FPCore (J l K U)
 :precision binary64
 (+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))
double code(double J, double l, double K, double U) {
	return ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U;
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(j, l, k, u)
use fmin_fmax_functions
    real(8), intent (in) :: j
    real(8), intent (in) :: l
    real(8), intent (in) :: k
    real(8), intent (in) :: u
    code = ((j * (exp(l) - exp(-l))) * cos((k / 2.0d0))) + u
end function
public static double code(double J, double l, double K, double U) {
	return ((J * (Math.exp(l) - Math.exp(-l))) * Math.cos((K / 2.0))) + U;
}
def code(J, l, K, U):
	return ((J * (math.exp(l) - math.exp(-l))) * math.cos((K / 2.0))) + U
function code(J, l, K, U)
	return Float64(Float64(Float64(J * Float64(exp(l) - exp(Float64(-l)))) * cos(Float64(K / 2.0))) + U)
end
function tmp = code(J, l, K, U)
	tmp = ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U;
end
code[J_, l_, K_, U_] := N[(N[(N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U

Alternative 1: 99.9% accurate, 1.2× speedup?

\[\mathsf{fma}\left(\left(J + J\right) \cdot \sinh \ell, \cos \left(0.5 \cdot K\right), U\right) \]
(FPCore (J l K U)
 :precision binary64
 (fma (* (+ J J) (sinh l)) (cos (* 0.5 K)) U))
double code(double J, double l, double K, double U) {
	return fma(((J + J) * sinh(l)), cos((0.5 * K)), U);
}
function code(J, l, K, U)
	return fma(Float64(Float64(J + J) * sinh(l)), cos(Float64(0.5 * K)), U)
end
code[J_, l_, K_, U_] := N[(N[(N[(J + J), $MachinePrecision] * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * N[Cos[N[(0.5 * K), $MachinePrecision]], $MachinePrecision] + U), $MachinePrecision]
\mathsf{fma}\left(\left(J + J\right) \cdot \sinh \ell, \cos \left(0.5 \cdot K\right), U\right)
Derivation
  1. Initial program 86.0%

    \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
  2. Step-by-step derivation
    1. lift-+.f64N/A

      \[\leadsto \color{blue}{\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U} \]
    2. lift-*.f64N/A

      \[\leadsto \color{blue}{\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right)} + U \]
    3. *-commutativeN/A

      \[\leadsto \color{blue}{\cos \left(\frac{K}{2}\right) \cdot \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right)} + U \]
    4. lift-*.f64N/A

      \[\leadsto \cos \left(\frac{K}{2}\right) \cdot \color{blue}{\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right)} + U \]
    5. *-commutativeN/A

      \[\leadsto \cos \left(\frac{K}{2}\right) \cdot \color{blue}{\left(\left(e^{\ell} - e^{-\ell}\right) \cdot J\right)} + U \]
    6. lift--.f64N/A

      \[\leadsto \cos \left(\frac{K}{2}\right) \cdot \left(\color{blue}{\left(e^{\ell} - e^{-\ell}\right)} \cdot J\right) + U \]
    7. lift-exp.f64N/A

      \[\leadsto \cos \left(\frac{K}{2}\right) \cdot \left(\left(\color{blue}{e^{\ell}} - e^{-\ell}\right) \cdot J\right) + U \]
    8. lift-exp.f64N/A

      \[\leadsto \cos \left(\frac{K}{2}\right) \cdot \left(\left(e^{\ell} - \color{blue}{e^{-\ell}}\right) \cdot J\right) + U \]
    9. lift-neg.f64N/A

      \[\leadsto \cos \left(\frac{K}{2}\right) \cdot \left(\left(e^{\ell} - e^{\color{blue}{\mathsf{neg}\left(\ell\right)}}\right) \cdot J\right) + U \]
    10. sinh-undefN/A

      \[\leadsto \cos \left(\frac{K}{2}\right) \cdot \left(\color{blue}{\left(2 \cdot \sinh \ell\right)} \cdot J\right) + U \]
    11. associate-*l*N/A

      \[\leadsto \cos \left(\frac{K}{2}\right) \cdot \color{blue}{\left(2 \cdot \left(\sinh \ell \cdot J\right)\right)} + U \]
    12. associate-*r*N/A

      \[\leadsto \color{blue}{\left(\cos \left(\frac{K}{2}\right) \cdot 2\right) \cdot \left(\sinh \ell \cdot J\right)} + U \]
    13. lower-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left(\cos \left(\frac{K}{2}\right) \cdot 2, \sinh \ell \cdot J, U\right)} \]
  3. Applied rewrites100.0%

    \[\leadsto \color{blue}{\mathsf{fma}\left(\cos \left(-0.5 \cdot K\right) \cdot 2, \sinh \ell \cdot J, U\right)} \]
  4. Step-by-step derivation
    1. lift-fma.f64N/A

      \[\leadsto \color{blue}{\left(\cos \left(\frac{-1}{2} \cdot K\right) \cdot 2\right) \cdot \left(\sinh \ell \cdot J\right) + U} \]
  5. Applied rewrites99.9%

    \[\leadsto \color{blue}{\mathsf{fma}\left(\left(J + J\right) \cdot \sinh \ell, \cos \left(0.5 \cdot K\right), U\right)} \]
  6. Add Preprocessing

Alternative 2: 88.4% accurate, 0.6× speedup?

\[\begin{array}{l} t_0 := \cos \left(\frac{K}{2}\right)\\ \mathbf{if}\;t\_0 \leq -0.45:\\ \;\;\;\;\mathsf{fma}\left(\left(J + J\right) \cdot \ell, \cos \left(0.5 \cdot K\right), U\right)\\ \mathbf{elif}\;t\_0 \leq -0.055:\\ \;\;\;\;\mathsf{fma}\left(\left(J + J\right) \cdot \sinh \ell, \mathsf{fma}\left(K \cdot K, -0.125, 1\right), U\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(J + J, \sinh \ell, U\right)\\ \end{array} \]
(FPCore (J l K U)
 :precision binary64
 (let* ((t_0 (cos (/ K 2.0))))
   (if (<= t_0 -0.45)
     (fma (* (+ J J) l) (cos (* 0.5 K)) U)
     (if (<= t_0 -0.055)
       (fma (* (+ J J) (sinh l)) (fma (* K K) -0.125 1.0) U)
       (fma (+ J J) (sinh l) U)))))
double code(double J, double l, double K, double U) {
	double t_0 = cos((K / 2.0));
	double tmp;
	if (t_0 <= -0.45) {
		tmp = fma(((J + J) * l), cos((0.5 * K)), U);
	} else if (t_0 <= -0.055) {
		tmp = fma(((J + J) * sinh(l)), fma((K * K), -0.125, 1.0), U);
	} else {
		tmp = fma((J + J), sinh(l), U);
	}
	return tmp;
}
function code(J, l, K, U)
	t_0 = cos(Float64(K / 2.0))
	tmp = 0.0
	if (t_0 <= -0.45)
		tmp = fma(Float64(Float64(J + J) * l), cos(Float64(0.5 * K)), U);
	elseif (t_0 <= -0.055)
		tmp = fma(Float64(Float64(J + J) * sinh(l)), fma(Float64(K * K), -0.125, 1.0), U);
	else
		tmp = fma(Float64(J + J), sinh(l), U);
	end
	return tmp
end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[t$95$0, -0.45], N[(N[(N[(J + J), $MachinePrecision] * l), $MachinePrecision] * N[Cos[N[(0.5 * K), $MachinePrecision]], $MachinePrecision] + U), $MachinePrecision], If[LessEqual[t$95$0, -0.055], N[(N[(N[(J + J), $MachinePrecision] * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision] + U), $MachinePrecision], N[(N[(J + J), $MachinePrecision] * N[Sinh[l], $MachinePrecision] + U), $MachinePrecision]]]]
\begin{array}{l}
t_0 := \cos \left(\frac{K}{2}\right)\\
\mathbf{if}\;t\_0 \leq -0.45:\\
\;\;\;\;\mathsf{fma}\left(\left(J + J\right) \cdot \ell, \cos \left(0.5 \cdot K\right), U\right)\\

\mathbf{elif}\;t\_0 \leq -0.055:\\
\;\;\;\;\mathsf{fma}\left(\left(J + J\right) \cdot \sinh \ell, \mathsf{fma}\left(K \cdot K, -0.125, 1\right), U\right)\\

\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(J + J, \sinh \ell, U\right)\\


\end{array}
Derivation
  1. Split input into 3 regimes
  2. if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.450000000000000011

    1. Initial program 86.0%

      \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    2. Step-by-step derivation
      1. lift-+.f64N/A

        \[\leadsto \color{blue}{\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U} \]
      2. lift-*.f64N/A

        \[\leadsto \color{blue}{\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right)} + U \]
      3. *-commutativeN/A

        \[\leadsto \color{blue}{\cos \left(\frac{K}{2}\right) \cdot \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right)} + U \]
      4. lift-*.f64N/A

        \[\leadsto \cos \left(\frac{K}{2}\right) \cdot \color{blue}{\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right)} + U \]
      5. *-commutativeN/A

        \[\leadsto \cos \left(\frac{K}{2}\right) \cdot \color{blue}{\left(\left(e^{\ell} - e^{-\ell}\right) \cdot J\right)} + U \]
      6. lift--.f64N/A

        \[\leadsto \cos \left(\frac{K}{2}\right) \cdot \left(\color{blue}{\left(e^{\ell} - e^{-\ell}\right)} \cdot J\right) + U \]
      7. lift-exp.f64N/A

        \[\leadsto \cos \left(\frac{K}{2}\right) \cdot \left(\left(\color{blue}{e^{\ell}} - e^{-\ell}\right) \cdot J\right) + U \]
      8. lift-exp.f64N/A

        \[\leadsto \cos \left(\frac{K}{2}\right) \cdot \left(\left(e^{\ell} - \color{blue}{e^{-\ell}}\right) \cdot J\right) + U \]
      9. lift-neg.f64N/A

        \[\leadsto \cos \left(\frac{K}{2}\right) \cdot \left(\left(e^{\ell} - e^{\color{blue}{\mathsf{neg}\left(\ell\right)}}\right) \cdot J\right) + U \]
      10. sinh-undefN/A

        \[\leadsto \cos \left(\frac{K}{2}\right) \cdot \left(\color{blue}{\left(2 \cdot \sinh \ell\right)} \cdot J\right) + U \]
      11. associate-*l*N/A

        \[\leadsto \cos \left(\frac{K}{2}\right) \cdot \color{blue}{\left(2 \cdot \left(\sinh \ell \cdot J\right)\right)} + U \]
      12. associate-*r*N/A

        \[\leadsto \color{blue}{\left(\cos \left(\frac{K}{2}\right) \cdot 2\right) \cdot \left(\sinh \ell \cdot J\right)} + U \]
      13. lower-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(\cos \left(\frac{K}{2}\right) \cdot 2, \sinh \ell \cdot J, U\right)} \]
    3. Applied rewrites100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\cos \left(-0.5 \cdot K\right) \cdot 2, \sinh \ell \cdot J, U\right)} \]
    4. Step-by-step derivation
      1. lift-fma.f64N/A

        \[\leadsto \color{blue}{\left(\cos \left(\frac{-1}{2} \cdot K\right) \cdot 2\right) \cdot \left(\sinh \ell \cdot J\right) + U} \]
    5. Applied rewrites99.9%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\left(J + J\right) \cdot \sinh \ell, \cos \left(0.5 \cdot K\right), U\right)} \]
    6. Taylor expanded in l around 0

      \[\leadsto \mathsf{fma}\left(\left(J + J\right) \cdot \color{blue}{\ell}, \cos \left(\frac{1}{2} \cdot K\right), U\right) \]
    7. Step-by-step derivation
      1. Applied rewrites65.1%

        \[\leadsto \mathsf{fma}\left(\left(J + J\right) \cdot \color{blue}{\ell}, \cos \left(0.5 \cdot K\right), U\right) \]

      if -0.450000000000000011 < (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0550000000000000003

      1. Initial program 86.0%

        \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      2. Taylor expanded in K around 0

        \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \color{blue}{\left(1 + \frac{-1}{8} \cdot {K}^{2}\right)} + U \]
      3. Step-by-step derivation
        1. lower-+.f64N/A

          \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \color{blue}{\frac{-1}{8} \cdot {K}^{2}}\right) + U \]
        2. lower-*.f64N/A

          \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \frac{-1}{8} \cdot \color{blue}{{K}^{2}}\right) + U \]
        3. lower-pow.f6464.6

          \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + -0.125 \cdot {K}^{\color{blue}{2}}\right) + U \]
      4. Applied rewrites64.6%

        \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \color{blue}{\left(1 + -0.125 \cdot {K}^{2}\right)} + U \]
      5. Step-by-step derivation
        1. lift-+.f64N/A

          \[\leadsto \color{blue}{\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \frac{-1}{8} \cdot {K}^{2}\right) + U} \]
        2. lift-*.f64N/A

          \[\leadsto \color{blue}{\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \frac{-1}{8} \cdot {K}^{2}\right)} + U \]
        3. lower-fma.f6464.6

          \[\leadsto \color{blue}{\mathsf{fma}\left(J \cdot \left(e^{\ell} - e^{-\ell}\right), 1 + -0.125 \cdot {K}^{2}, U\right)} \]
      6. Applied rewrites69.6%

        \[\leadsto \color{blue}{\mathsf{fma}\left(\left(J + J\right) \cdot \sinh \ell, \mathsf{fma}\left(K \cdot K, -0.125, 1\right), U\right)} \]

      if -0.0550000000000000003 < (cos.f64 (/.f64 K #s(literal 2 binary64)))

      1. Initial program 86.0%

        \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      2. Taylor expanded in K around 0

        \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
      3. Step-by-step derivation
        1. lower-+.f64N/A

          \[\leadsto U + \color{blue}{J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
        2. lower-*.f64N/A

          \[\leadsto U + J \cdot \color{blue}{\left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
        3. lower--.f64N/A

          \[\leadsto U + J \cdot \left(e^{\ell} - \color{blue}{e^{\mathsf{neg}\left(\ell\right)}}\right) \]
        4. lower-exp.f64N/A

          \[\leadsto U + J \cdot \left(e^{\ell} - e^{\color{blue}{\mathsf{neg}\left(\ell\right)}}\right) \]
        5. lower-exp.f64N/A

          \[\leadsto U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right) \]
        6. lower-neg.f6473.3

          \[\leadsto U + J \cdot \left(e^{\ell} - e^{-\ell}\right) \]
      4. Applied rewrites73.3%

        \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{-\ell}\right)} \]
      5. Step-by-step derivation
        1. lift-+.f64N/A

          \[\leadsto U + \color{blue}{J \cdot \left(e^{\ell} - e^{-\ell}\right)} \]
        2. +-commutativeN/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + \color{blue}{U} \]
        3. lift-*.f64N/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
        4. lift--.f64N/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
        5. lift-exp.f64N/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
        6. lift-exp.f64N/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
        7. lift-neg.f64N/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right) + U \]
        8. sinh-undefN/A

          \[\leadsto J \cdot \left(2 \cdot \sinh \ell\right) + U \]
        9. lift-sinh.f64N/A

          \[\leadsto J \cdot \left(2 \cdot \sinh \ell\right) + U \]
        10. associate-*r*N/A

          \[\leadsto \left(J \cdot 2\right) \cdot \sinh \ell + U \]
        11. *-commutativeN/A

          \[\leadsto \left(2 \cdot J\right) \cdot \sinh \ell + U \]
        12. lower-fma.f64N/A

          \[\leadsto \mathsf{fma}\left(2 \cdot J, \color{blue}{\sinh \ell}, U\right) \]
        13. count-2-revN/A

          \[\leadsto \mathsf{fma}\left(J + J, \sinh \color{blue}{\ell}, U\right) \]
        14. lower-+.f6480.8

          \[\leadsto \mathsf{fma}\left(J + J, \sinh \color{blue}{\ell}, U\right) \]
      6. Applied rewrites80.8%

        \[\leadsto \mathsf{fma}\left(J + J, \color{blue}{\sinh \ell}, U\right) \]
    8. Recombined 3 regimes into one program.
    9. Add Preprocessing

    Alternative 3: 87.5% accurate, 0.7× speedup?

    \[\begin{array}{l} \mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.055:\\ \;\;\;\;\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + -0.125 \cdot {K}^{2}\right) + U\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(J + J, \sinh \ell, U\right)\\ \end{array} \]
    (FPCore (J l K U)
     :precision binary64
     (if (<= (cos (/ K 2.0)) -0.055)
       (+ (* (* J (- (exp l) (exp (- l)))) (+ 1.0 (* -0.125 (pow K 2.0)))) U)
       (fma (+ J J) (sinh l) U)))
    double code(double J, double l, double K, double U) {
    	double tmp;
    	if (cos((K / 2.0)) <= -0.055) {
    		tmp = ((J * (exp(l) - exp(-l))) * (1.0 + (-0.125 * pow(K, 2.0)))) + U;
    	} else {
    		tmp = fma((J + J), sinh(l), U);
    	}
    	return tmp;
    }
    
    function code(J, l, K, U)
    	tmp = 0.0
    	if (cos(Float64(K / 2.0)) <= -0.055)
    		tmp = Float64(Float64(Float64(J * Float64(exp(l) - exp(Float64(-l)))) * Float64(1.0 + Float64(-0.125 * (K ^ 2.0)))) + U);
    	else
    		tmp = fma(Float64(J + J), sinh(l), U);
    	end
    	return tmp
    end
    
    code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.055], N[(N[(N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(1.0 + N[(-0.125 * N[Power[K, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision], N[(N[(J + J), $MachinePrecision] * N[Sinh[l], $MachinePrecision] + U), $MachinePrecision]]
    
    \begin{array}{l}
    \mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.055:\\
    \;\;\;\;\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + -0.125 \cdot {K}^{2}\right) + U\\
    
    \mathbf{else}:\\
    \;\;\;\;\mathsf{fma}\left(J + J, \sinh \ell, U\right)\\
    
    
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0550000000000000003

      1. Initial program 86.0%

        \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      2. Taylor expanded in K around 0

        \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \color{blue}{\left(1 + \frac{-1}{8} \cdot {K}^{2}\right)} + U \]
      3. Step-by-step derivation
        1. lower-+.f64N/A

          \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \color{blue}{\frac{-1}{8} \cdot {K}^{2}}\right) + U \]
        2. lower-*.f64N/A

          \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \frac{-1}{8} \cdot \color{blue}{{K}^{2}}\right) + U \]
        3. lower-pow.f6464.6

          \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + -0.125 \cdot {K}^{\color{blue}{2}}\right) + U \]
      4. Applied rewrites64.6%

        \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \color{blue}{\left(1 + -0.125 \cdot {K}^{2}\right)} + U \]

      if -0.0550000000000000003 < (cos.f64 (/.f64 K #s(literal 2 binary64)))

      1. Initial program 86.0%

        \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      2. Taylor expanded in K around 0

        \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
      3. Step-by-step derivation
        1. lower-+.f64N/A

          \[\leadsto U + \color{blue}{J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
        2. lower-*.f64N/A

          \[\leadsto U + J \cdot \color{blue}{\left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
        3. lower--.f64N/A

          \[\leadsto U + J \cdot \left(e^{\ell} - \color{blue}{e^{\mathsf{neg}\left(\ell\right)}}\right) \]
        4. lower-exp.f64N/A

          \[\leadsto U + J \cdot \left(e^{\ell} - e^{\color{blue}{\mathsf{neg}\left(\ell\right)}}\right) \]
        5. lower-exp.f64N/A

          \[\leadsto U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right) \]
        6. lower-neg.f6473.3

          \[\leadsto U + J \cdot \left(e^{\ell} - e^{-\ell}\right) \]
      4. Applied rewrites73.3%

        \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{-\ell}\right)} \]
      5. Step-by-step derivation
        1. lift-+.f64N/A

          \[\leadsto U + \color{blue}{J \cdot \left(e^{\ell} - e^{-\ell}\right)} \]
        2. +-commutativeN/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + \color{blue}{U} \]
        3. lift-*.f64N/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
        4. lift--.f64N/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
        5. lift-exp.f64N/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
        6. lift-exp.f64N/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
        7. lift-neg.f64N/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right) + U \]
        8. sinh-undefN/A

          \[\leadsto J \cdot \left(2 \cdot \sinh \ell\right) + U \]
        9. lift-sinh.f64N/A

          \[\leadsto J \cdot \left(2 \cdot \sinh \ell\right) + U \]
        10. associate-*r*N/A

          \[\leadsto \left(J \cdot 2\right) \cdot \sinh \ell + U \]
        11. *-commutativeN/A

          \[\leadsto \left(2 \cdot J\right) \cdot \sinh \ell + U \]
        12. lower-fma.f64N/A

          \[\leadsto \mathsf{fma}\left(2 \cdot J, \color{blue}{\sinh \ell}, U\right) \]
        13. count-2-revN/A

          \[\leadsto \mathsf{fma}\left(J + J, \sinh \color{blue}{\ell}, U\right) \]
        14. lower-+.f6480.8

          \[\leadsto \mathsf{fma}\left(J + J, \sinh \color{blue}{\ell}, U\right) \]
      6. Applied rewrites80.8%

        \[\leadsto \mathsf{fma}\left(J + J, \color{blue}{\sinh \ell}, U\right) \]
    3. Recombined 2 regimes into one program.
    4. Add Preprocessing

    Alternative 4: 87.4% accurate, 1.0× speedup?

    \[\begin{array}{l} \mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.055:\\ \;\;\;\;\mathsf{fma}\left(\left(J + J\right) \cdot \sinh \ell, \mathsf{fma}\left(K \cdot K, -0.125, 1\right), U\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(J + J, \sinh \ell, U\right)\\ \end{array} \]
    (FPCore (J l K U)
     :precision binary64
     (if (<= (cos (/ K 2.0)) -0.055)
       (fma (* (+ J J) (sinh l)) (fma (* K K) -0.125 1.0) U)
       (fma (+ J J) (sinh l) U)))
    double code(double J, double l, double K, double U) {
    	double tmp;
    	if (cos((K / 2.0)) <= -0.055) {
    		tmp = fma(((J + J) * sinh(l)), fma((K * K), -0.125, 1.0), U);
    	} else {
    		tmp = fma((J + J), sinh(l), U);
    	}
    	return tmp;
    }
    
    function code(J, l, K, U)
    	tmp = 0.0
    	if (cos(Float64(K / 2.0)) <= -0.055)
    		tmp = fma(Float64(Float64(J + J) * sinh(l)), fma(Float64(K * K), -0.125, 1.0), U);
    	else
    		tmp = fma(Float64(J + J), sinh(l), U);
    	end
    	return tmp
    end
    
    code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.055], N[(N[(N[(J + J), $MachinePrecision] * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision] + U), $MachinePrecision], N[(N[(J + J), $MachinePrecision] * N[Sinh[l], $MachinePrecision] + U), $MachinePrecision]]
    
    \begin{array}{l}
    \mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.055:\\
    \;\;\;\;\mathsf{fma}\left(\left(J + J\right) \cdot \sinh \ell, \mathsf{fma}\left(K \cdot K, -0.125, 1\right), U\right)\\
    
    \mathbf{else}:\\
    \;\;\;\;\mathsf{fma}\left(J + J, \sinh \ell, U\right)\\
    
    
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0550000000000000003

      1. Initial program 86.0%

        \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      2. Taylor expanded in K around 0

        \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \color{blue}{\left(1 + \frac{-1}{8} \cdot {K}^{2}\right)} + U \]
      3. Step-by-step derivation
        1. lower-+.f64N/A

          \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \color{blue}{\frac{-1}{8} \cdot {K}^{2}}\right) + U \]
        2. lower-*.f64N/A

          \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \frac{-1}{8} \cdot \color{blue}{{K}^{2}}\right) + U \]
        3. lower-pow.f6464.6

          \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + -0.125 \cdot {K}^{\color{blue}{2}}\right) + U \]
      4. Applied rewrites64.6%

        \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \color{blue}{\left(1 + -0.125 \cdot {K}^{2}\right)} + U \]
      5. Step-by-step derivation
        1. lift-+.f64N/A

          \[\leadsto \color{blue}{\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \frac{-1}{8} \cdot {K}^{2}\right) + U} \]
        2. lift-*.f64N/A

          \[\leadsto \color{blue}{\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \frac{-1}{8} \cdot {K}^{2}\right)} + U \]
        3. lower-fma.f6464.6

          \[\leadsto \color{blue}{\mathsf{fma}\left(J \cdot \left(e^{\ell} - e^{-\ell}\right), 1 + -0.125 \cdot {K}^{2}, U\right)} \]
      6. Applied rewrites69.6%

        \[\leadsto \color{blue}{\mathsf{fma}\left(\left(J + J\right) \cdot \sinh \ell, \mathsf{fma}\left(K \cdot K, -0.125, 1\right), U\right)} \]

      if -0.0550000000000000003 < (cos.f64 (/.f64 K #s(literal 2 binary64)))

      1. Initial program 86.0%

        \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      2. Taylor expanded in K around 0

        \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
      3. Step-by-step derivation
        1. lower-+.f64N/A

          \[\leadsto U + \color{blue}{J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
        2. lower-*.f64N/A

          \[\leadsto U + J \cdot \color{blue}{\left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
        3. lower--.f64N/A

          \[\leadsto U + J \cdot \left(e^{\ell} - \color{blue}{e^{\mathsf{neg}\left(\ell\right)}}\right) \]
        4. lower-exp.f64N/A

          \[\leadsto U + J \cdot \left(e^{\ell} - e^{\color{blue}{\mathsf{neg}\left(\ell\right)}}\right) \]
        5. lower-exp.f64N/A

          \[\leadsto U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right) \]
        6. lower-neg.f6473.3

          \[\leadsto U + J \cdot \left(e^{\ell} - e^{-\ell}\right) \]
      4. Applied rewrites73.3%

        \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{-\ell}\right)} \]
      5. Step-by-step derivation
        1. lift-+.f64N/A

          \[\leadsto U + \color{blue}{J \cdot \left(e^{\ell} - e^{-\ell}\right)} \]
        2. +-commutativeN/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + \color{blue}{U} \]
        3. lift-*.f64N/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
        4. lift--.f64N/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
        5. lift-exp.f64N/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
        6. lift-exp.f64N/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
        7. lift-neg.f64N/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right) + U \]
        8. sinh-undefN/A

          \[\leadsto J \cdot \left(2 \cdot \sinh \ell\right) + U \]
        9. lift-sinh.f64N/A

          \[\leadsto J \cdot \left(2 \cdot \sinh \ell\right) + U \]
        10. associate-*r*N/A

          \[\leadsto \left(J \cdot 2\right) \cdot \sinh \ell + U \]
        11. *-commutativeN/A

          \[\leadsto \left(2 \cdot J\right) \cdot \sinh \ell + U \]
        12. lower-fma.f64N/A

          \[\leadsto \mathsf{fma}\left(2 \cdot J, \color{blue}{\sinh \ell}, U\right) \]
        13. count-2-revN/A

          \[\leadsto \mathsf{fma}\left(J + J, \sinh \color{blue}{\ell}, U\right) \]
        14. lower-+.f6480.8

          \[\leadsto \mathsf{fma}\left(J + J, \sinh \color{blue}{\ell}, U\right) \]
      6. Applied rewrites80.8%

        \[\leadsto \mathsf{fma}\left(J + J, \color{blue}{\sinh \ell}, U\right) \]
    3. Recombined 2 regimes into one program.
    4. Add Preprocessing

    Alternative 5: 87.2% accurate, 1.0× speedup?

    \[\begin{array}{l} \mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.055:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(-0.125, K \cdot K, 1\right) \cdot \left(J + J\right), \sinh \ell, U\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(J + J, \sinh \ell, U\right)\\ \end{array} \]
    (FPCore (J l K U)
     :precision binary64
     (if (<= (cos (/ K 2.0)) -0.055)
       (fma (* (fma -0.125 (* K K) 1.0) (+ J J)) (sinh l) U)
       (fma (+ J J) (sinh l) U)))
    double code(double J, double l, double K, double U) {
    	double tmp;
    	if (cos((K / 2.0)) <= -0.055) {
    		tmp = fma((fma(-0.125, (K * K), 1.0) * (J + J)), sinh(l), U);
    	} else {
    		tmp = fma((J + J), sinh(l), U);
    	}
    	return tmp;
    }
    
    function code(J, l, K, U)
    	tmp = 0.0
    	if (cos(Float64(K / 2.0)) <= -0.055)
    		tmp = fma(Float64(fma(-0.125, Float64(K * K), 1.0) * Float64(J + J)), sinh(l), U);
    	else
    		tmp = fma(Float64(J + J), sinh(l), U);
    	end
    	return tmp
    end
    
    code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.055], N[(N[(N[(-0.125 * N[(K * K), $MachinePrecision] + 1.0), $MachinePrecision] * N[(J + J), $MachinePrecision]), $MachinePrecision] * N[Sinh[l], $MachinePrecision] + U), $MachinePrecision], N[(N[(J + J), $MachinePrecision] * N[Sinh[l], $MachinePrecision] + U), $MachinePrecision]]
    
    \begin{array}{l}
    \mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.055:\\
    \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(-0.125, K \cdot K, 1\right) \cdot \left(J + J\right), \sinh \ell, U\right)\\
    
    \mathbf{else}:\\
    \;\;\;\;\mathsf{fma}\left(J + J, \sinh \ell, U\right)\\
    
    
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0550000000000000003

      1. Initial program 86.0%

        \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      2. Taylor expanded in K around 0

        \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \color{blue}{\left(1 + \frac{-1}{8} \cdot {K}^{2}\right)} + U \]
      3. Step-by-step derivation
        1. lower-+.f64N/A

          \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \color{blue}{\frac{-1}{8} \cdot {K}^{2}}\right) + U \]
        2. lower-*.f64N/A

          \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \frac{-1}{8} \cdot \color{blue}{{K}^{2}}\right) + U \]
        3. lower-pow.f6464.6

          \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + -0.125 \cdot {K}^{\color{blue}{2}}\right) + U \]
      4. Applied rewrites64.6%

        \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \color{blue}{\left(1 + -0.125 \cdot {K}^{2}\right)} + U \]
      5. Step-by-step derivation
        1. lift-+.f64N/A

          \[\leadsto \color{blue}{\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \frac{-1}{8} \cdot {K}^{2}\right) + U} \]
        2. lift-*.f64N/A

          \[\leadsto \color{blue}{\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \frac{-1}{8} \cdot {K}^{2}\right)} + U \]
        3. lower-fma.f6464.6

          \[\leadsto \color{blue}{\mathsf{fma}\left(J \cdot \left(e^{\ell} - e^{-\ell}\right), 1 + -0.125 \cdot {K}^{2}, U\right)} \]
      6. Applied rewrites69.6%

        \[\leadsto \color{blue}{\mathsf{fma}\left(\left(J + J\right) \cdot \sinh \ell, \mathsf{fma}\left(K \cdot K, -0.125, 1\right), U\right)} \]
      7. Step-by-step derivation
        1. lift-fma.f64N/A

          \[\leadsto \color{blue}{\left(\left(J + J\right) \cdot \sinh \ell\right) \cdot \mathsf{fma}\left(K \cdot K, \frac{-1}{8}, 1\right) + U} \]
        2. *-commutativeN/A

          \[\leadsto \color{blue}{\mathsf{fma}\left(K \cdot K, \frac{-1}{8}, 1\right) \cdot \left(\left(J + J\right) \cdot \sinh \ell\right)} + U \]
        3. lift-*.f64N/A

          \[\leadsto \mathsf{fma}\left(K \cdot K, \frac{-1}{8}, 1\right) \cdot \color{blue}{\left(\left(J + J\right) \cdot \sinh \ell\right)} + U \]
        4. associate-*r*N/A

          \[\leadsto \color{blue}{\left(\mathsf{fma}\left(K \cdot K, \frac{-1}{8}, 1\right) \cdot \left(J + J\right)\right) \cdot \sinh \ell} + U \]
        5. lower-fma.f64N/A

          \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(K \cdot K, \frac{-1}{8}, 1\right) \cdot \left(J + J\right), \sinh \ell, U\right)} \]
        6. lower-*.f6468.9

          \[\leadsto \mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(K \cdot K, -0.125, 1\right) \cdot \left(J + J\right)}, \sinh \ell, U\right) \]
        7. lift-fma.f64N/A

          \[\leadsto \mathsf{fma}\left(\left(\left(K \cdot K\right) \cdot \frac{-1}{8} + \color{blue}{1}\right) \cdot \left(J + J\right), \sinh \ell, U\right) \]
        8. *-commutativeN/A

          \[\leadsto \mathsf{fma}\left(\left(\frac{-1}{8} \cdot \left(K \cdot K\right) + 1\right) \cdot \left(J + J\right), \sinh \ell, U\right) \]
        9. lower-fma.f6468.9

          \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(-0.125, \color{blue}{K \cdot K}, 1\right) \cdot \left(J + J\right), \sinh \ell, U\right) \]
      8. Applied rewrites68.9%

        \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(-0.125, K \cdot K, 1\right) \cdot \left(J + J\right), \sinh \ell, U\right)} \]

      if -0.0550000000000000003 < (cos.f64 (/.f64 K #s(literal 2 binary64)))

      1. Initial program 86.0%

        \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      2. Taylor expanded in K around 0

        \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
      3. Step-by-step derivation
        1. lower-+.f64N/A

          \[\leadsto U + \color{blue}{J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
        2. lower-*.f64N/A

          \[\leadsto U + J \cdot \color{blue}{\left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
        3. lower--.f64N/A

          \[\leadsto U + J \cdot \left(e^{\ell} - \color{blue}{e^{\mathsf{neg}\left(\ell\right)}}\right) \]
        4. lower-exp.f64N/A

          \[\leadsto U + J \cdot \left(e^{\ell} - e^{\color{blue}{\mathsf{neg}\left(\ell\right)}}\right) \]
        5. lower-exp.f64N/A

          \[\leadsto U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right) \]
        6. lower-neg.f6473.3

          \[\leadsto U + J \cdot \left(e^{\ell} - e^{-\ell}\right) \]
      4. Applied rewrites73.3%

        \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{-\ell}\right)} \]
      5. Step-by-step derivation
        1. lift-+.f64N/A

          \[\leadsto U + \color{blue}{J \cdot \left(e^{\ell} - e^{-\ell}\right)} \]
        2. +-commutativeN/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + \color{blue}{U} \]
        3. lift-*.f64N/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
        4. lift--.f64N/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
        5. lift-exp.f64N/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
        6. lift-exp.f64N/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
        7. lift-neg.f64N/A

          \[\leadsto J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right) + U \]
        8. sinh-undefN/A

          \[\leadsto J \cdot \left(2 \cdot \sinh \ell\right) + U \]
        9. lift-sinh.f64N/A

          \[\leadsto J \cdot \left(2 \cdot \sinh \ell\right) + U \]
        10. associate-*r*N/A

          \[\leadsto \left(J \cdot 2\right) \cdot \sinh \ell + U \]
        11. *-commutativeN/A

          \[\leadsto \left(2 \cdot J\right) \cdot \sinh \ell + U \]
        12. lower-fma.f64N/A

          \[\leadsto \mathsf{fma}\left(2 \cdot J, \color{blue}{\sinh \ell}, U\right) \]
        13. count-2-revN/A

          \[\leadsto \mathsf{fma}\left(J + J, \sinh \color{blue}{\ell}, U\right) \]
        14. lower-+.f6480.8

          \[\leadsto \mathsf{fma}\left(J + J, \sinh \color{blue}{\ell}, U\right) \]
      6. Applied rewrites80.8%

        \[\leadsto \mathsf{fma}\left(J + J, \color{blue}{\sinh \ell}, U\right) \]
    3. Recombined 2 regimes into one program.
    4. Add Preprocessing

    Alternative 6: 85.5% accurate, 1.1× speedup?

    \[\begin{array}{l} \mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.055:\\ \;\;\;\;U - \left(\left(\ell + \ell\right) \cdot \mathsf{fma}\left(-0.125, K \cdot K, 1\right)\right) \cdot \left(-J\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(J + J, \sinh \ell, U\right)\\ \end{array} \]
    (FPCore (J l K U)
     :precision binary64
     (if (<= (cos (/ K 2.0)) -0.055)
       (- U (* (* (+ l l) (fma -0.125 (* K K) 1.0)) (- J)))
       (fma (+ J J) (sinh l) U)))
    double code(double J, double l, double K, double U) {
    	double tmp;
    	if (cos((K / 2.0)) <= -0.055) {
    		tmp = U - (((l + l) * fma(-0.125, (K * K), 1.0)) * -J);
    	} else {
    		tmp = fma((J + J), sinh(l), U);
    	}
    	return tmp;
    }
    
    function code(J, l, K, U)
    	tmp = 0.0
    	if (cos(Float64(K / 2.0)) <= -0.055)
    		tmp = Float64(U - Float64(Float64(Float64(l + l) * fma(-0.125, Float64(K * K), 1.0)) * Float64(-J)));
    	else
    		tmp = fma(Float64(J + J), sinh(l), U);
    	end
    	return tmp
    end
    
    code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.055], N[(U - N[(N[(N[(l + l), $MachinePrecision] * N[(-0.125 * N[(K * K), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] * (-J)), $MachinePrecision]), $MachinePrecision], N[(N[(J + J), $MachinePrecision] * N[Sinh[l], $MachinePrecision] + U), $MachinePrecision]]
    
    \begin{array}{l}
    \mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.055:\\
    \;\;\;\;U - \left(\left(\ell + \ell\right) \cdot \mathsf{fma}\left(-0.125, K \cdot K, 1\right)\right) \cdot \left(-J\right)\\
    
    \mathbf{else}:\\
    \;\;\;\;\mathsf{fma}\left(J + J, \sinh \ell, U\right)\\
    
    
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0550000000000000003

      1. Initial program 86.0%

        \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      2. Taylor expanded in K around 0

        \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \color{blue}{\left(1 + \frac{-1}{8} \cdot {K}^{2}\right)} + U \]
      3. Step-by-step derivation
        1. lower-+.f64N/A

          \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \color{blue}{\frac{-1}{8} \cdot {K}^{2}}\right) + U \]
        2. lower-*.f64N/A

          \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \frac{-1}{8} \cdot \color{blue}{{K}^{2}}\right) + U \]
        3. lower-pow.f6464.6

          \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + -0.125 \cdot {K}^{\color{blue}{2}}\right) + U \]
      4. Applied rewrites64.6%

        \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \color{blue}{\left(1 + -0.125 \cdot {K}^{2}\right)} + U \]
      5. Step-by-step derivation
        1. lift-+.f64N/A

          \[\leadsto \color{blue}{\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \frac{-1}{8} \cdot {K}^{2}\right) + U} \]
        2. +-commutativeN/A

          \[\leadsto \color{blue}{U + \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \frac{-1}{8} \cdot {K}^{2}\right)} \]
        3. lift-*.f64N/A

          \[\leadsto U + \color{blue}{\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \frac{-1}{8} \cdot {K}^{2}\right)} \]
        4. lift-*.f64N/A

          \[\leadsto U + \color{blue}{\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right)} \cdot \left(1 + \frac{-1}{8} \cdot {K}^{2}\right) \]
        5. associate-*l*N/A

          \[\leadsto U + \color{blue}{J \cdot \left(\left(e^{\ell} - e^{-\ell}\right) \cdot \left(1 + \frac{-1}{8} \cdot {K}^{2}\right)\right)} \]
        6. fp-cancel-sign-sub-invN/A

          \[\leadsto \color{blue}{U - \left(\mathsf{neg}\left(J\right)\right) \cdot \left(\left(e^{\ell} - e^{-\ell}\right) \cdot \left(1 + \frac{-1}{8} \cdot {K}^{2}\right)\right)} \]
        7. sub-to-multN/A

          \[\leadsto \color{blue}{\left(1 - \frac{\left(\mathsf{neg}\left(J\right)\right) \cdot \left(\left(e^{\ell} - e^{-\ell}\right) \cdot \left(1 + \frac{-1}{8} \cdot {K}^{2}\right)\right)}{U}\right) \cdot U} \]
        8. lower-unsound-*.f64N/A

          \[\leadsto \color{blue}{\left(1 - \frac{\left(\mathsf{neg}\left(J\right)\right) \cdot \left(\left(e^{\ell} - e^{-\ell}\right) \cdot \left(1 + \frac{-1}{8} \cdot {K}^{2}\right)\right)}{U}\right) \cdot U} \]
      6. Applied rewrites68.3%

        \[\leadsto \color{blue}{\left(1 - \frac{\left(-J\right) \cdot \left(\mathsf{fma}\left(K \cdot K, -0.125, 1\right) \cdot \left(\sinh \ell \cdot 2\right)\right)}{U}\right) \cdot U} \]
      7. Taylor expanded in l around 0

        \[\leadsto \left(1 - \frac{\left(-J\right) \cdot \left(\mathsf{fma}\left(K \cdot K, \frac{-1}{8}, 1\right) \cdot \left(\color{blue}{\ell} \cdot 2\right)\right)}{U}\right) \cdot U \]
      8. Step-by-step derivation
        1. Applied rewrites52.3%

          \[\leadsto \left(1 - \frac{\left(-J\right) \cdot \left(\mathsf{fma}\left(K \cdot K, -0.125, 1\right) \cdot \left(\color{blue}{\ell} \cdot 2\right)\right)}{U}\right) \cdot U \]
        2. Step-by-step derivation
          1. lift-*.f64N/A

            \[\leadsto \color{blue}{\left(1 - \frac{\left(-J\right) \cdot \left(\mathsf{fma}\left(K \cdot K, \frac{-1}{8}, 1\right) \cdot \left(\ell \cdot 2\right)\right)}{U}\right) \cdot U} \]
          2. lift--.f64N/A

            \[\leadsto \color{blue}{\left(1 - \frac{\left(-J\right) \cdot \left(\mathsf{fma}\left(K \cdot K, \frac{-1}{8}, 1\right) \cdot \left(\ell \cdot 2\right)\right)}{U}\right)} \cdot U \]
          3. lift-/.f64N/A

            \[\leadsto \left(1 - \color{blue}{\frac{\left(-J\right) \cdot \left(\mathsf{fma}\left(K \cdot K, \frac{-1}{8}, 1\right) \cdot \left(\ell \cdot 2\right)\right)}{U}}\right) \cdot U \]
          4. sub-to-mult-revN/A

            \[\leadsto \color{blue}{U - \left(-J\right) \cdot \left(\mathsf{fma}\left(K \cdot K, \frac{-1}{8}, 1\right) \cdot \left(\ell \cdot 2\right)\right)} \]
          5. lower--.f6450.4

            \[\leadsto \color{blue}{U - \left(-J\right) \cdot \left(\mathsf{fma}\left(K \cdot K, -0.125, 1\right) \cdot \left(\ell \cdot 2\right)\right)} \]
          6. lift-*.f64N/A

            \[\leadsto U - \color{blue}{\left(-J\right) \cdot \left(\mathsf{fma}\left(K \cdot K, \frac{-1}{8}, 1\right) \cdot \left(\ell \cdot 2\right)\right)} \]
          7. *-commutativeN/A

            \[\leadsto U - \color{blue}{\left(\mathsf{fma}\left(K \cdot K, \frac{-1}{8}, 1\right) \cdot \left(\ell \cdot 2\right)\right) \cdot \left(-J\right)} \]
          8. lower-*.f6450.4

            \[\leadsto U - \color{blue}{\left(\mathsf{fma}\left(K \cdot K, -0.125, 1\right) \cdot \left(\ell \cdot 2\right)\right) \cdot \left(-J\right)} \]
        3. Applied rewrites50.4%

          \[\leadsto \color{blue}{U - \left(\left(\ell + \ell\right) \cdot \mathsf{fma}\left(-0.125, K \cdot K, 1\right)\right) \cdot \left(-J\right)} \]

        if -0.0550000000000000003 < (cos.f64 (/.f64 K #s(literal 2 binary64)))

        1. Initial program 86.0%

          \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
        2. Taylor expanded in K around 0

          \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
        3. Step-by-step derivation
          1. lower-+.f64N/A

            \[\leadsto U + \color{blue}{J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
          2. lower-*.f64N/A

            \[\leadsto U + J \cdot \color{blue}{\left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
          3. lower--.f64N/A

            \[\leadsto U + J \cdot \left(e^{\ell} - \color{blue}{e^{\mathsf{neg}\left(\ell\right)}}\right) \]
          4. lower-exp.f64N/A

            \[\leadsto U + J \cdot \left(e^{\ell} - e^{\color{blue}{\mathsf{neg}\left(\ell\right)}}\right) \]
          5. lower-exp.f64N/A

            \[\leadsto U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right) \]
          6. lower-neg.f6473.3

            \[\leadsto U + J \cdot \left(e^{\ell} - e^{-\ell}\right) \]
        4. Applied rewrites73.3%

          \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{-\ell}\right)} \]
        5. Step-by-step derivation
          1. lift-+.f64N/A

            \[\leadsto U + \color{blue}{J \cdot \left(e^{\ell} - e^{-\ell}\right)} \]
          2. +-commutativeN/A

            \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + \color{blue}{U} \]
          3. lift-*.f64N/A

            \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
          4. lift--.f64N/A

            \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
          5. lift-exp.f64N/A

            \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
          6. lift-exp.f64N/A

            \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
          7. lift-neg.f64N/A

            \[\leadsto J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right) + U \]
          8. sinh-undefN/A

            \[\leadsto J \cdot \left(2 \cdot \sinh \ell\right) + U \]
          9. lift-sinh.f64N/A

            \[\leadsto J \cdot \left(2 \cdot \sinh \ell\right) + U \]
          10. associate-*r*N/A

            \[\leadsto \left(J \cdot 2\right) \cdot \sinh \ell + U \]
          11. *-commutativeN/A

            \[\leadsto \left(2 \cdot J\right) \cdot \sinh \ell + U \]
          12. lower-fma.f64N/A

            \[\leadsto \mathsf{fma}\left(2 \cdot J, \color{blue}{\sinh \ell}, U\right) \]
          13. count-2-revN/A

            \[\leadsto \mathsf{fma}\left(J + J, \sinh \color{blue}{\ell}, U\right) \]
          14. lower-+.f6480.8

            \[\leadsto \mathsf{fma}\left(J + J, \sinh \color{blue}{\ell}, U\right) \]
        6. Applied rewrites80.8%

          \[\leadsto \mathsf{fma}\left(J + J, \color{blue}{\sinh \ell}, U\right) \]
      9. Recombined 2 regimes into one program.
      10. Add Preprocessing

      Alternative 7: 84.6% accurate, 1.1× speedup?

      \[\begin{array}{l} \mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.055:\\ \;\;\;\;\mathsf{fma}\left(\left(J + J\right) \cdot \ell, \mathsf{fma}\left(K \cdot K, -0.125, 1\right), U\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(J + J, \sinh \ell, U\right)\\ \end{array} \]
      (FPCore (J l K U)
       :precision binary64
       (if (<= (cos (/ K 2.0)) -0.055)
         (fma (* (+ J J) l) (fma (* K K) -0.125 1.0) U)
         (fma (+ J J) (sinh l) U)))
      double code(double J, double l, double K, double U) {
      	double tmp;
      	if (cos((K / 2.0)) <= -0.055) {
      		tmp = fma(((J + J) * l), fma((K * K), -0.125, 1.0), U);
      	} else {
      		tmp = fma((J + J), sinh(l), U);
      	}
      	return tmp;
      }
      
      function code(J, l, K, U)
      	tmp = 0.0
      	if (cos(Float64(K / 2.0)) <= -0.055)
      		tmp = fma(Float64(Float64(J + J) * l), fma(Float64(K * K), -0.125, 1.0), U);
      	else
      		tmp = fma(Float64(J + J), sinh(l), U);
      	end
      	return tmp
      end
      
      code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.055], N[(N[(N[(J + J), $MachinePrecision] * l), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision] + U), $MachinePrecision], N[(N[(J + J), $MachinePrecision] * N[Sinh[l], $MachinePrecision] + U), $MachinePrecision]]
      
      \begin{array}{l}
      \mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.055:\\
      \;\;\;\;\mathsf{fma}\left(\left(J + J\right) \cdot \ell, \mathsf{fma}\left(K \cdot K, -0.125, 1\right), U\right)\\
      
      \mathbf{else}:\\
      \;\;\;\;\mathsf{fma}\left(J + J, \sinh \ell, U\right)\\
      
      
      \end{array}
      
      Derivation
      1. Split input into 2 regimes
      2. if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0550000000000000003

        1. Initial program 86.0%

          \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
        2. Taylor expanded in K around 0

          \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \color{blue}{\left(1 + \frac{-1}{8} \cdot {K}^{2}\right)} + U \]
        3. Step-by-step derivation
          1. lower-+.f64N/A

            \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \color{blue}{\frac{-1}{8} \cdot {K}^{2}}\right) + U \]
          2. lower-*.f64N/A

            \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \frac{-1}{8} \cdot \color{blue}{{K}^{2}}\right) + U \]
          3. lower-pow.f6464.6

            \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + -0.125 \cdot {K}^{\color{blue}{2}}\right) + U \]
        4. Applied rewrites64.6%

          \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \color{blue}{\left(1 + -0.125 \cdot {K}^{2}\right)} + U \]
        5. Step-by-step derivation
          1. lift-+.f64N/A

            \[\leadsto \color{blue}{\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \frac{-1}{8} \cdot {K}^{2}\right) + U} \]
          2. lift-*.f64N/A

            \[\leadsto \color{blue}{\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \frac{-1}{8} \cdot {K}^{2}\right)} + U \]
          3. lower-fma.f6464.6

            \[\leadsto \color{blue}{\mathsf{fma}\left(J \cdot \left(e^{\ell} - e^{-\ell}\right), 1 + -0.125 \cdot {K}^{2}, U\right)} \]
        6. Applied rewrites69.6%

          \[\leadsto \color{blue}{\mathsf{fma}\left(\left(J + J\right) \cdot \sinh \ell, \mathsf{fma}\left(K \cdot K, -0.125, 1\right), U\right)} \]
        7. Taylor expanded in l around 0

          \[\leadsto \mathsf{fma}\left(\left(J + J\right) \cdot \color{blue}{\ell}, \mathsf{fma}\left(K \cdot K, \frac{-1}{8}, 1\right), U\right) \]
        8. Step-by-step derivation
          1. Applied rewrites49.6%

            \[\leadsto \mathsf{fma}\left(\left(J + J\right) \cdot \color{blue}{\ell}, \mathsf{fma}\left(K \cdot K, -0.125, 1\right), U\right) \]

          if -0.0550000000000000003 < (cos.f64 (/.f64 K #s(literal 2 binary64)))

          1. Initial program 86.0%

            \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
          2. Taylor expanded in K around 0

            \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
          3. Step-by-step derivation
            1. lower-+.f64N/A

              \[\leadsto U + \color{blue}{J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
            2. lower-*.f64N/A

              \[\leadsto U + J \cdot \color{blue}{\left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
            3. lower--.f64N/A

              \[\leadsto U + J \cdot \left(e^{\ell} - \color{blue}{e^{\mathsf{neg}\left(\ell\right)}}\right) \]
            4. lower-exp.f64N/A

              \[\leadsto U + J \cdot \left(e^{\ell} - e^{\color{blue}{\mathsf{neg}\left(\ell\right)}}\right) \]
            5. lower-exp.f64N/A

              \[\leadsto U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right) \]
            6. lower-neg.f6473.3

              \[\leadsto U + J \cdot \left(e^{\ell} - e^{-\ell}\right) \]
          4. Applied rewrites73.3%

            \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{-\ell}\right)} \]
          5. Step-by-step derivation
            1. lift-+.f64N/A

              \[\leadsto U + \color{blue}{J \cdot \left(e^{\ell} - e^{-\ell}\right)} \]
            2. +-commutativeN/A

              \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + \color{blue}{U} \]
            3. lift-*.f64N/A

              \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
            4. lift--.f64N/A

              \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
            5. lift-exp.f64N/A

              \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
            6. lift-exp.f64N/A

              \[\leadsto J \cdot \left(e^{\ell} - e^{-\ell}\right) + U \]
            7. lift-neg.f64N/A

              \[\leadsto J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right) + U \]
            8. sinh-undefN/A

              \[\leadsto J \cdot \left(2 \cdot \sinh \ell\right) + U \]
            9. lift-sinh.f64N/A

              \[\leadsto J \cdot \left(2 \cdot \sinh \ell\right) + U \]
            10. associate-*r*N/A

              \[\leadsto \left(J \cdot 2\right) \cdot \sinh \ell + U \]
            11. *-commutativeN/A

              \[\leadsto \left(2 \cdot J\right) \cdot \sinh \ell + U \]
            12. lower-fma.f64N/A

              \[\leadsto \mathsf{fma}\left(2 \cdot J, \color{blue}{\sinh \ell}, U\right) \]
            13. count-2-revN/A

              \[\leadsto \mathsf{fma}\left(J + J, \sinh \color{blue}{\ell}, U\right) \]
            14. lower-+.f6480.8

              \[\leadsto \mathsf{fma}\left(J + J, \sinh \color{blue}{\ell}, U\right) \]
          6. Applied rewrites80.8%

            \[\leadsto \mathsf{fma}\left(J + J, \color{blue}{\sinh \ell}, U\right) \]
        9. Recombined 2 regimes into one program.
        10. Add Preprocessing

        Alternative 8: 70.0% accurate, 2.8× speedup?

        \[\begin{array}{l} \mathbf{if}\;\ell \leq -2.8 \cdot 10^{-13}:\\ \;\;\;\;U + J \cdot \left(1 - e^{-\ell}\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(J, \frac{\ell + \ell}{U}, 1\right) \cdot U\\ \end{array} \]
        (FPCore (J l K U)
         :precision binary64
         (if (<= l -2.8e-13)
           (+ U (* J (- 1.0 (exp (- l)))))
           (* (fma J (/ (+ l l) U) 1.0) U)))
        double code(double J, double l, double K, double U) {
        	double tmp;
        	if (l <= -2.8e-13) {
        		tmp = U + (J * (1.0 - exp(-l)));
        	} else {
        		tmp = fma(J, ((l + l) / U), 1.0) * U;
        	}
        	return tmp;
        }
        
        function code(J, l, K, U)
        	tmp = 0.0
        	if (l <= -2.8e-13)
        		tmp = Float64(U + Float64(J * Float64(1.0 - exp(Float64(-l)))));
        	else
        		tmp = Float64(fma(J, Float64(Float64(l + l) / U), 1.0) * U);
        	end
        	return tmp
        end
        
        code[J_, l_, K_, U_] := If[LessEqual[l, -2.8e-13], N[(U + N[(J * N[(1.0 - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(J * N[(N[(l + l), $MachinePrecision] / U), $MachinePrecision] + 1.0), $MachinePrecision] * U), $MachinePrecision]]
        
        \begin{array}{l}
        \mathbf{if}\;\ell \leq -2.8 \cdot 10^{-13}:\\
        \;\;\;\;U + J \cdot \left(1 - e^{-\ell}\right)\\
        
        \mathbf{else}:\\
        \;\;\;\;\mathsf{fma}\left(J, \frac{\ell + \ell}{U}, 1\right) \cdot U\\
        
        
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if l < -2.8000000000000002e-13

          1. Initial program 86.0%

            \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
          2. Taylor expanded in K around 0

            \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
          3. Step-by-step derivation
            1. lower-+.f64N/A

              \[\leadsto U + \color{blue}{J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
            2. lower-*.f64N/A

              \[\leadsto U + J \cdot \color{blue}{\left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
            3. lower--.f64N/A

              \[\leadsto U + J \cdot \left(e^{\ell} - \color{blue}{e^{\mathsf{neg}\left(\ell\right)}}\right) \]
            4. lower-exp.f64N/A

              \[\leadsto U + J \cdot \left(e^{\ell} - e^{\color{blue}{\mathsf{neg}\left(\ell\right)}}\right) \]
            5. lower-exp.f64N/A

              \[\leadsto U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right) \]
            6. lower-neg.f6473.3

              \[\leadsto U + J \cdot \left(e^{\ell} - e^{-\ell}\right) \]
          4. Applied rewrites73.3%

            \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{-\ell}\right)} \]
          5. Taylor expanded in l around 0

            \[\leadsto U + J \cdot \left(1 - e^{\color{blue}{-\ell}}\right) \]
          6. Step-by-step derivation
            1. Applied rewrites55.0%

              \[\leadsto U + J \cdot \left(1 - e^{\color{blue}{-\ell}}\right) \]

            if -2.8000000000000002e-13 < l

            1. Initial program 86.0%

              \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
            2. Taylor expanded in K around 0

              \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
            3. Step-by-step derivation
              1. lower-+.f64N/A

                \[\leadsto U + \color{blue}{J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
              2. lower-*.f64N/A

                \[\leadsto U + J \cdot \color{blue}{\left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
              3. lower--.f64N/A

                \[\leadsto U + J \cdot \left(e^{\ell} - \color{blue}{e^{\mathsf{neg}\left(\ell\right)}}\right) \]
              4. lower-exp.f64N/A

                \[\leadsto U + J \cdot \left(e^{\ell} - e^{\color{blue}{\mathsf{neg}\left(\ell\right)}}\right) \]
              5. lower-exp.f64N/A

                \[\leadsto U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right) \]
              6. lower-neg.f6473.3

                \[\leadsto U + J \cdot \left(e^{\ell} - e^{-\ell}\right) \]
            4. Applied rewrites73.3%

              \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{-\ell}\right)} \]
            5. Taylor expanded in l around 0

              \[\leadsto U + J \cdot \left(2 \cdot \color{blue}{\ell}\right) \]
            6. Step-by-step derivation
              1. lower-*.f6455.3

                \[\leadsto U + J \cdot \left(2 \cdot \ell\right) \]
            7. Applied rewrites55.3%

              \[\leadsto U + J \cdot \left(2 \cdot \color{blue}{\ell}\right) \]
            8. Step-by-step derivation
              1. lift-+.f64N/A

                \[\leadsto U + \color{blue}{J \cdot \left(2 \cdot \ell\right)} \]
              2. sum-to-multN/A

                \[\leadsto \left(1 + \frac{J \cdot \left(2 \cdot \ell\right)}{U}\right) \cdot \color{blue}{U} \]
              3. lower-unsound-*.f64N/A

                \[\leadsto \left(1 + \frac{J \cdot \left(2 \cdot \ell\right)}{U}\right) \cdot \color{blue}{U} \]
              4. lower-unsound-+.f64N/A

                \[\leadsto \left(1 + \frac{J \cdot \left(2 \cdot \ell\right)}{U}\right) \cdot U \]
              5. lower-unsound-/.f6458.4

                \[\leadsto \left(1 + \frac{J \cdot \left(2 \cdot \ell\right)}{U}\right) \cdot U \]
              6. lift-*.f64N/A

                \[\leadsto \left(1 + \frac{J \cdot \left(2 \cdot \ell\right)}{U}\right) \cdot U \]
              7. *-commutativeN/A

                \[\leadsto \left(1 + \frac{\left(2 \cdot \ell\right) \cdot J}{U}\right) \cdot U \]
              8. lower-*.f6458.4

                \[\leadsto \left(1 + \frac{\left(2 \cdot \ell\right) \cdot J}{U}\right) \cdot U \]
              9. lift-*.f64N/A

                \[\leadsto \left(1 + \frac{\left(2 \cdot \ell\right) \cdot J}{U}\right) \cdot U \]
              10. count-2-revN/A

                \[\leadsto \left(1 + \frac{\left(\ell + \ell\right) \cdot J}{U}\right) \cdot U \]
              11. lower-+.f6458.4

                \[\leadsto \left(1 + \frac{\left(\ell + \ell\right) \cdot J}{U}\right) \cdot U \]
            9. Applied rewrites58.4%

              \[\leadsto \left(1 + \frac{\left(\ell + \ell\right) \cdot J}{U}\right) \cdot \color{blue}{U} \]
            10. Step-by-step derivation
              1. lift-+.f64N/A

                \[\leadsto \left(1 + \frac{\left(\ell + \ell\right) \cdot J}{U}\right) \cdot U \]
              2. +-commutativeN/A

                \[\leadsto \left(\frac{\left(\ell + \ell\right) \cdot J}{U} + 1\right) \cdot U \]
              3. lift-/.f64N/A

                \[\leadsto \left(\frac{\left(\ell + \ell\right) \cdot J}{U} + 1\right) \cdot U \]
              4. lift-*.f64N/A

                \[\leadsto \left(\frac{\left(\ell + \ell\right) \cdot J}{U} + 1\right) \cdot U \]
              5. *-commutativeN/A

                \[\leadsto \left(\frac{J \cdot \left(\ell + \ell\right)}{U} + 1\right) \cdot U \]
              6. associate-/l*N/A

                \[\leadsto \left(J \cdot \frac{\ell + \ell}{U} + 1\right) \cdot U \]
              7. lower-fma.f64N/A

                \[\leadsto \mathsf{fma}\left(J, \frac{\ell + \ell}{U}, 1\right) \cdot U \]
              8. lower-/.f6461.4

                \[\leadsto \mathsf{fma}\left(J, \frac{\ell + \ell}{U}, 1\right) \cdot U \]
            11. Applied rewrites61.4%

              \[\leadsto \mathsf{fma}\left(J, \frac{\ell + \ell}{U}, 1\right) \cdot U \]
          7. Recombined 2 regimes into one program.
          8. Add Preprocessing

          Alternative 9: 65.1% accurate, 1.2× speedup?

          \[\begin{array}{l} \mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.055:\\ \;\;\;\;\mathsf{fma}\left(\left(J + J\right) \cdot \ell, \mathsf{fma}\left(K \cdot K, -0.125, 1\right), U\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(J, \frac{\ell + \ell}{U}, 1\right) \cdot U\\ \end{array} \]
          (FPCore (J l K U)
           :precision binary64
           (if (<= (cos (/ K 2.0)) -0.055)
             (fma (* (+ J J) l) (fma (* K K) -0.125 1.0) U)
             (* (fma J (/ (+ l l) U) 1.0) U)))
          double code(double J, double l, double K, double U) {
          	double tmp;
          	if (cos((K / 2.0)) <= -0.055) {
          		tmp = fma(((J + J) * l), fma((K * K), -0.125, 1.0), U);
          	} else {
          		tmp = fma(J, ((l + l) / U), 1.0) * U;
          	}
          	return tmp;
          }
          
          function code(J, l, K, U)
          	tmp = 0.0
          	if (cos(Float64(K / 2.0)) <= -0.055)
          		tmp = fma(Float64(Float64(J + J) * l), fma(Float64(K * K), -0.125, 1.0), U);
          	else
          		tmp = Float64(fma(J, Float64(Float64(l + l) / U), 1.0) * U);
          	end
          	return tmp
          end
          
          code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.055], N[(N[(N[(J + J), $MachinePrecision] * l), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision] + U), $MachinePrecision], N[(N[(J * N[(N[(l + l), $MachinePrecision] / U), $MachinePrecision] + 1.0), $MachinePrecision] * U), $MachinePrecision]]
          
          \begin{array}{l}
          \mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.055:\\
          \;\;\;\;\mathsf{fma}\left(\left(J + J\right) \cdot \ell, \mathsf{fma}\left(K \cdot K, -0.125, 1\right), U\right)\\
          
          \mathbf{else}:\\
          \;\;\;\;\mathsf{fma}\left(J, \frac{\ell + \ell}{U}, 1\right) \cdot U\\
          
          
          \end{array}
          
          Derivation
          1. Split input into 2 regimes
          2. if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0550000000000000003

            1. Initial program 86.0%

              \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
            2. Taylor expanded in K around 0

              \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \color{blue}{\left(1 + \frac{-1}{8} \cdot {K}^{2}\right)} + U \]
            3. Step-by-step derivation
              1. lower-+.f64N/A

                \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \color{blue}{\frac{-1}{8} \cdot {K}^{2}}\right) + U \]
              2. lower-*.f64N/A

                \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \frac{-1}{8} \cdot \color{blue}{{K}^{2}}\right) + U \]
              3. lower-pow.f6464.6

                \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + -0.125 \cdot {K}^{\color{blue}{2}}\right) + U \]
            4. Applied rewrites64.6%

              \[\leadsto \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \color{blue}{\left(1 + -0.125 \cdot {K}^{2}\right)} + U \]
            5. Step-by-step derivation
              1. lift-+.f64N/A

                \[\leadsto \color{blue}{\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \frac{-1}{8} \cdot {K}^{2}\right) + U} \]
              2. lift-*.f64N/A

                \[\leadsto \color{blue}{\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \left(1 + \frac{-1}{8} \cdot {K}^{2}\right)} + U \]
              3. lower-fma.f6464.6

                \[\leadsto \color{blue}{\mathsf{fma}\left(J \cdot \left(e^{\ell} - e^{-\ell}\right), 1 + -0.125 \cdot {K}^{2}, U\right)} \]
            6. Applied rewrites69.6%

              \[\leadsto \color{blue}{\mathsf{fma}\left(\left(J + J\right) \cdot \sinh \ell, \mathsf{fma}\left(K \cdot K, -0.125, 1\right), U\right)} \]
            7. Taylor expanded in l around 0

              \[\leadsto \mathsf{fma}\left(\left(J + J\right) \cdot \color{blue}{\ell}, \mathsf{fma}\left(K \cdot K, \frac{-1}{8}, 1\right), U\right) \]
            8. Step-by-step derivation
              1. Applied rewrites49.6%

                \[\leadsto \mathsf{fma}\left(\left(J + J\right) \cdot \color{blue}{\ell}, \mathsf{fma}\left(K \cdot K, -0.125, 1\right), U\right) \]

              if -0.0550000000000000003 < (cos.f64 (/.f64 K #s(literal 2 binary64)))

              1. Initial program 86.0%

                \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
              2. Taylor expanded in K around 0

                \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
              3. Step-by-step derivation
                1. lower-+.f64N/A

                  \[\leadsto U + \color{blue}{J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
                2. lower-*.f64N/A

                  \[\leadsto U + J \cdot \color{blue}{\left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
                3. lower--.f64N/A

                  \[\leadsto U + J \cdot \left(e^{\ell} - \color{blue}{e^{\mathsf{neg}\left(\ell\right)}}\right) \]
                4. lower-exp.f64N/A

                  \[\leadsto U + J \cdot \left(e^{\ell} - e^{\color{blue}{\mathsf{neg}\left(\ell\right)}}\right) \]
                5. lower-exp.f64N/A

                  \[\leadsto U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right) \]
                6. lower-neg.f6473.3

                  \[\leadsto U + J \cdot \left(e^{\ell} - e^{-\ell}\right) \]
              4. Applied rewrites73.3%

                \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{-\ell}\right)} \]
              5. Taylor expanded in l around 0

                \[\leadsto U + J \cdot \left(2 \cdot \color{blue}{\ell}\right) \]
              6. Step-by-step derivation
                1. lower-*.f6455.3

                  \[\leadsto U + J \cdot \left(2 \cdot \ell\right) \]
              7. Applied rewrites55.3%

                \[\leadsto U + J \cdot \left(2 \cdot \color{blue}{\ell}\right) \]
              8. Step-by-step derivation
                1. lift-+.f64N/A

                  \[\leadsto U + \color{blue}{J \cdot \left(2 \cdot \ell\right)} \]
                2. sum-to-multN/A

                  \[\leadsto \left(1 + \frac{J \cdot \left(2 \cdot \ell\right)}{U}\right) \cdot \color{blue}{U} \]
                3. lower-unsound-*.f64N/A

                  \[\leadsto \left(1 + \frac{J \cdot \left(2 \cdot \ell\right)}{U}\right) \cdot \color{blue}{U} \]
                4. lower-unsound-+.f64N/A

                  \[\leadsto \left(1 + \frac{J \cdot \left(2 \cdot \ell\right)}{U}\right) \cdot U \]
                5. lower-unsound-/.f6458.4

                  \[\leadsto \left(1 + \frac{J \cdot \left(2 \cdot \ell\right)}{U}\right) \cdot U \]
                6. lift-*.f64N/A

                  \[\leadsto \left(1 + \frac{J \cdot \left(2 \cdot \ell\right)}{U}\right) \cdot U \]
                7. *-commutativeN/A

                  \[\leadsto \left(1 + \frac{\left(2 \cdot \ell\right) \cdot J}{U}\right) \cdot U \]
                8. lower-*.f6458.4

                  \[\leadsto \left(1 + \frac{\left(2 \cdot \ell\right) \cdot J}{U}\right) \cdot U \]
                9. lift-*.f64N/A

                  \[\leadsto \left(1 + \frac{\left(2 \cdot \ell\right) \cdot J}{U}\right) \cdot U \]
                10. count-2-revN/A

                  \[\leadsto \left(1 + \frac{\left(\ell + \ell\right) \cdot J}{U}\right) \cdot U \]
                11. lower-+.f6458.4

                  \[\leadsto \left(1 + \frac{\left(\ell + \ell\right) \cdot J}{U}\right) \cdot U \]
              9. Applied rewrites58.4%

                \[\leadsto \left(1 + \frac{\left(\ell + \ell\right) \cdot J}{U}\right) \cdot \color{blue}{U} \]
              10. Step-by-step derivation
                1. lift-+.f64N/A

                  \[\leadsto \left(1 + \frac{\left(\ell + \ell\right) \cdot J}{U}\right) \cdot U \]
                2. +-commutativeN/A

                  \[\leadsto \left(\frac{\left(\ell + \ell\right) \cdot J}{U} + 1\right) \cdot U \]
                3. lift-/.f64N/A

                  \[\leadsto \left(\frac{\left(\ell + \ell\right) \cdot J}{U} + 1\right) \cdot U \]
                4. lift-*.f64N/A

                  \[\leadsto \left(\frac{\left(\ell + \ell\right) \cdot J}{U} + 1\right) \cdot U \]
                5. *-commutativeN/A

                  \[\leadsto \left(\frac{J \cdot \left(\ell + \ell\right)}{U} + 1\right) \cdot U \]
                6. associate-/l*N/A

                  \[\leadsto \left(J \cdot \frac{\ell + \ell}{U} + 1\right) \cdot U \]
                7. lower-fma.f64N/A

                  \[\leadsto \mathsf{fma}\left(J, \frac{\ell + \ell}{U}, 1\right) \cdot U \]
                8. lower-/.f6461.4

                  \[\leadsto \mathsf{fma}\left(J, \frac{\ell + \ell}{U}, 1\right) \cdot U \]
              11. Applied rewrites61.4%

                \[\leadsto \mathsf{fma}\left(J, \frac{\ell + \ell}{U}, 1\right) \cdot U \]
            9. Recombined 2 regimes into one program.
            10. Add Preprocessing

            Alternative 10: 61.4% accurate, 4.6× speedup?

            \[\mathsf{fma}\left(J, \frac{\ell + \ell}{U}, 1\right) \cdot U \]
            (FPCore (J l K U) :precision binary64 (* (fma J (/ (+ l l) U) 1.0) U))
            double code(double J, double l, double K, double U) {
            	return fma(J, ((l + l) / U), 1.0) * U;
            }
            
            function code(J, l, K, U)
            	return Float64(fma(J, Float64(Float64(l + l) / U), 1.0) * U)
            end
            
            code[J_, l_, K_, U_] := N[(N[(J * N[(N[(l + l), $MachinePrecision] / U), $MachinePrecision] + 1.0), $MachinePrecision] * U), $MachinePrecision]
            
            \mathsf{fma}\left(J, \frac{\ell + \ell}{U}, 1\right) \cdot U
            
            Derivation
            1. Initial program 86.0%

              \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
            2. Taylor expanded in K around 0

              \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
            3. Step-by-step derivation
              1. lower-+.f64N/A

                \[\leadsto U + \color{blue}{J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
              2. lower-*.f64N/A

                \[\leadsto U + J \cdot \color{blue}{\left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
              3. lower--.f64N/A

                \[\leadsto U + J \cdot \left(e^{\ell} - \color{blue}{e^{\mathsf{neg}\left(\ell\right)}}\right) \]
              4. lower-exp.f64N/A

                \[\leadsto U + J \cdot \left(e^{\ell} - e^{\color{blue}{\mathsf{neg}\left(\ell\right)}}\right) \]
              5. lower-exp.f64N/A

                \[\leadsto U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right) \]
              6. lower-neg.f6473.3

                \[\leadsto U + J \cdot \left(e^{\ell} - e^{-\ell}\right) \]
            4. Applied rewrites73.3%

              \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{-\ell}\right)} \]
            5. Taylor expanded in l around 0

              \[\leadsto U + J \cdot \left(2 \cdot \color{blue}{\ell}\right) \]
            6. Step-by-step derivation
              1. lower-*.f6455.3

                \[\leadsto U + J \cdot \left(2 \cdot \ell\right) \]
            7. Applied rewrites55.3%

              \[\leadsto U + J \cdot \left(2 \cdot \color{blue}{\ell}\right) \]
            8. Step-by-step derivation
              1. lift-+.f64N/A

                \[\leadsto U + \color{blue}{J \cdot \left(2 \cdot \ell\right)} \]
              2. sum-to-multN/A

                \[\leadsto \left(1 + \frac{J \cdot \left(2 \cdot \ell\right)}{U}\right) \cdot \color{blue}{U} \]
              3. lower-unsound-*.f64N/A

                \[\leadsto \left(1 + \frac{J \cdot \left(2 \cdot \ell\right)}{U}\right) \cdot \color{blue}{U} \]
              4. lower-unsound-+.f64N/A

                \[\leadsto \left(1 + \frac{J \cdot \left(2 \cdot \ell\right)}{U}\right) \cdot U \]
              5. lower-unsound-/.f6458.4

                \[\leadsto \left(1 + \frac{J \cdot \left(2 \cdot \ell\right)}{U}\right) \cdot U \]
              6. lift-*.f64N/A

                \[\leadsto \left(1 + \frac{J \cdot \left(2 \cdot \ell\right)}{U}\right) \cdot U \]
              7. *-commutativeN/A

                \[\leadsto \left(1 + \frac{\left(2 \cdot \ell\right) \cdot J}{U}\right) \cdot U \]
              8. lower-*.f6458.4

                \[\leadsto \left(1 + \frac{\left(2 \cdot \ell\right) \cdot J}{U}\right) \cdot U \]
              9. lift-*.f64N/A

                \[\leadsto \left(1 + \frac{\left(2 \cdot \ell\right) \cdot J}{U}\right) \cdot U \]
              10. count-2-revN/A

                \[\leadsto \left(1 + \frac{\left(\ell + \ell\right) \cdot J}{U}\right) \cdot U \]
              11. lower-+.f6458.4

                \[\leadsto \left(1 + \frac{\left(\ell + \ell\right) \cdot J}{U}\right) \cdot U \]
            9. Applied rewrites58.4%

              \[\leadsto \left(1 + \frac{\left(\ell + \ell\right) \cdot J}{U}\right) \cdot \color{blue}{U} \]
            10. Step-by-step derivation
              1. lift-+.f64N/A

                \[\leadsto \left(1 + \frac{\left(\ell + \ell\right) \cdot J}{U}\right) \cdot U \]
              2. +-commutativeN/A

                \[\leadsto \left(\frac{\left(\ell + \ell\right) \cdot J}{U} + 1\right) \cdot U \]
              3. lift-/.f64N/A

                \[\leadsto \left(\frac{\left(\ell + \ell\right) \cdot J}{U} + 1\right) \cdot U \]
              4. lift-*.f64N/A

                \[\leadsto \left(\frac{\left(\ell + \ell\right) \cdot J}{U} + 1\right) \cdot U \]
              5. *-commutativeN/A

                \[\leadsto \left(\frac{J \cdot \left(\ell + \ell\right)}{U} + 1\right) \cdot U \]
              6. associate-/l*N/A

                \[\leadsto \left(J \cdot \frac{\ell + \ell}{U} + 1\right) \cdot U \]
              7. lower-fma.f64N/A

                \[\leadsto \mathsf{fma}\left(J, \frac{\ell + \ell}{U}, 1\right) \cdot U \]
              8. lower-/.f6461.4

                \[\leadsto \mathsf{fma}\left(J, \frac{\ell + \ell}{U}, 1\right) \cdot U \]
            11. Applied rewrites61.4%

              \[\leadsto \mathsf{fma}\left(J, \frac{\ell + \ell}{U}, 1\right) \cdot U \]
            12. Add Preprocessing

            Alternative 11: 55.3% accurate, 7.9× speedup?

            \[\mathsf{fma}\left(\ell + \ell, J, U\right) \]
            (FPCore (J l K U) :precision binary64 (fma (+ l l) J U))
            double code(double J, double l, double K, double U) {
            	return fma((l + l), J, U);
            }
            
            function code(J, l, K, U)
            	return fma(Float64(l + l), J, U)
            end
            
            code[J_, l_, K_, U_] := N[(N[(l + l), $MachinePrecision] * J + U), $MachinePrecision]
            
            \mathsf{fma}\left(\ell + \ell, J, U\right)
            
            Derivation
            1. Initial program 86.0%

              \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
            2. Taylor expanded in K around 0

              \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
            3. Step-by-step derivation
              1. lower-+.f64N/A

                \[\leadsto U + \color{blue}{J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
              2. lower-*.f64N/A

                \[\leadsto U + J \cdot \color{blue}{\left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right)} \]
              3. lower--.f64N/A

                \[\leadsto U + J \cdot \left(e^{\ell} - \color{blue}{e^{\mathsf{neg}\left(\ell\right)}}\right) \]
              4. lower-exp.f64N/A

                \[\leadsto U + J \cdot \left(e^{\ell} - e^{\color{blue}{\mathsf{neg}\left(\ell\right)}}\right) \]
              5. lower-exp.f64N/A

                \[\leadsto U + J \cdot \left(e^{\ell} - e^{\mathsf{neg}\left(\ell\right)}\right) \]
              6. lower-neg.f6473.3

                \[\leadsto U + J \cdot \left(e^{\ell} - e^{-\ell}\right) \]
            4. Applied rewrites73.3%

              \[\leadsto \color{blue}{U + J \cdot \left(e^{\ell} - e^{-\ell}\right)} \]
            5. Taylor expanded in l around 0

              \[\leadsto U + J \cdot \left(2 \cdot \color{blue}{\ell}\right) \]
            6. Step-by-step derivation
              1. lower-*.f6455.3

                \[\leadsto U + J \cdot \left(2 \cdot \ell\right) \]
            7. Applied rewrites55.3%

              \[\leadsto U + J \cdot \left(2 \cdot \color{blue}{\ell}\right) \]
            8. Step-by-step derivation
              1. lift-+.f64N/A

                \[\leadsto U + \color{blue}{J \cdot \left(2 \cdot \ell\right)} \]
              2. +-commutativeN/A

                \[\leadsto J \cdot \left(2 \cdot \ell\right) + \color{blue}{U} \]
              3. lift-*.f64N/A

                \[\leadsto J \cdot \left(2 \cdot \ell\right) + U \]
              4. *-commutativeN/A

                \[\leadsto \left(2 \cdot \ell\right) \cdot J + U \]
              5. lower-fma.f6455.3

                \[\leadsto \mathsf{fma}\left(2 \cdot \ell, \color{blue}{J}, U\right) \]
              6. lift-*.f64N/A

                \[\leadsto \mathsf{fma}\left(2 \cdot \ell, J, U\right) \]
              7. count-2-revN/A

                \[\leadsto \mathsf{fma}\left(\ell + \ell, J, U\right) \]
              8. lower-+.f6455.3

                \[\leadsto \mathsf{fma}\left(\ell + \ell, J, U\right) \]
            9. Applied rewrites55.3%

              \[\leadsto \mathsf{fma}\left(\ell + \ell, \color{blue}{J}, U\right) \]
            10. Add Preprocessing

            Alternative 12: 37.2% accurate, 68.7× speedup?

            \[U \]
            (FPCore (J l K U) :precision binary64 U)
            double code(double J, double l, double K, double U) {
            	return U;
            }
            
            module fmin_fmax_functions
                implicit none
                private
                public fmax
                public fmin
            
                interface fmax
                    module procedure fmax88
                    module procedure fmax44
                    module procedure fmax84
                    module procedure fmax48
                end interface
                interface fmin
                    module procedure fmin88
                    module procedure fmin44
                    module procedure fmin84
                    module procedure fmin48
                end interface
            contains
                real(8) function fmax88(x, y) result (res)
                    real(8), intent (in) :: x
                    real(8), intent (in) :: y
                    res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                end function
                real(4) function fmax44(x, y) result (res)
                    real(4), intent (in) :: x
                    real(4), intent (in) :: y
                    res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                end function
                real(8) function fmax84(x, y) result(res)
                    real(8), intent (in) :: x
                    real(4), intent (in) :: y
                    res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                end function
                real(8) function fmax48(x, y) result(res)
                    real(4), intent (in) :: x
                    real(8), intent (in) :: y
                    res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                end function
                real(8) function fmin88(x, y) result (res)
                    real(8), intent (in) :: x
                    real(8), intent (in) :: y
                    res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                end function
                real(4) function fmin44(x, y) result (res)
                    real(4), intent (in) :: x
                    real(4), intent (in) :: y
                    res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                end function
                real(8) function fmin84(x, y) result(res)
                    real(8), intent (in) :: x
                    real(4), intent (in) :: y
                    res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                end function
                real(8) function fmin48(x, y) result(res)
                    real(4), intent (in) :: x
                    real(8), intent (in) :: y
                    res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                end function
            end module
            
            real(8) function code(j, l, k, u)
            use fmin_fmax_functions
                real(8), intent (in) :: j
                real(8), intent (in) :: l
                real(8), intent (in) :: k
                real(8), intent (in) :: u
                code = u
            end function
            
            public static double code(double J, double l, double K, double U) {
            	return U;
            }
            
            def code(J, l, K, U):
            	return U
            
            function code(J, l, K, U)
            	return U
            end
            
            function tmp = code(J, l, K, U)
            	tmp = U;
            end
            
            code[J_, l_, K_, U_] := U
            
            U
            
            Derivation
            1. Initial program 86.0%

              \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
            2. Taylor expanded in J around 0

              \[\leadsto \color{blue}{U} \]
            3. Step-by-step derivation
              1. Applied rewrites37.2%

                \[\leadsto \color{blue}{U} \]
              2. Add Preprocessing

              Reproduce

              ?
              herbie shell --seed 2025174 
              (FPCore (J l K U)
                :name "Maksimov and Kolovsky, Equation (4)"
                :precision binary64
                (+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))