Maksimov and Kolovsky, Equation (4)

Percentage Accurate: 86.6% → 99.8%
Time: 9.3s
Alternatives: 14
Speedup: 1.5×

Specification

?
\[\begin{array}{l} \\ \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \end{array} \]
(FPCore (J l K U)
 :precision binary64
 (+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))
double code(double J, double l, double K, double U) {
	return ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U;
}
real(8) function code(j, l, k, u)
    real(8), intent (in) :: j
    real(8), intent (in) :: l
    real(8), intent (in) :: k
    real(8), intent (in) :: u
    code = ((j * (exp(l) - exp(-l))) * cos((k / 2.0d0))) + u
end function
public static double code(double J, double l, double K, double U) {
	return ((J * (Math.exp(l) - Math.exp(-l))) * Math.cos((K / 2.0))) + U;
}
def code(J, l, K, U):
	return ((J * (math.exp(l) - math.exp(-l))) * math.cos((K / 2.0))) + U
function code(J, l, K, U)
	return Float64(Float64(Float64(J * Float64(exp(l) - exp(Float64(-l)))) * cos(Float64(K / 2.0))) + U)
end
function tmp = code(J, l, K, U)
	tmp = ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U;
end
code[J_, l_, K_, U_] := N[(N[(N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}

\\
\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 14 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 86.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \end{array} \]
(FPCore (J l K U)
 :precision binary64
 (+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))
double code(double J, double l, double K, double U) {
	return ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U;
}
real(8) function code(j, l, k, u)
    real(8), intent (in) :: j
    real(8), intent (in) :: l
    real(8), intent (in) :: k
    real(8), intent (in) :: u
    code = ((j * (exp(l) - exp(-l))) * cos((k / 2.0d0))) + u
end function
public static double code(double J, double l, double K, double U) {
	return ((J * (Math.exp(l) - Math.exp(-l))) * Math.cos((K / 2.0))) + U;
}
def code(J, l, K, U):
	return ((J * (math.exp(l) - math.exp(-l))) * math.cos((K / 2.0))) + U
function code(J, l, K, U)
	return Float64(Float64(Float64(J * Float64(exp(l) - exp(Float64(-l)))) * cos(Float64(K / 2.0))) + U)
end
function tmp = code(J, l, K, U)
	tmp = ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U;
end
code[J_, l_, K_, U_] := N[(N[(N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}

\\
\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U
\end{array}

Alternative 1: 99.8% accurate, 0.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := e^{\ell} - e^{-\ell}\\ \mathbf{if}\;t_0 \leq -\infty \lor \neg \left(t_0 \leq 0.05\right):\\ \;\;\;\;\left(J \cdot t_0\right) \cdot \cos \left(\frac{K}{2}\right) + U\\ \mathbf{else}:\\ \;\;\;\;U + \left(J \cdot \cos \left(K \cdot 0.5\right)\right) \cdot \left(0.3333333333333333 \cdot {\ell}^{3} + \left(\ell \cdot 2 + 0.016666666666666666 \cdot {\ell}^{5}\right)\right)\\ \end{array} \end{array} \]
(FPCore (J l K U)
 :precision binary64
 (let* ((t_0 (- (exp l) (exp (- l)))))
   (if (or (<= t_0 (- INFINITY)) (not (<= t_0 0.05)))
     (+ (* (* J t_0) (cos (/ K 2.0))) U)
     (+
      U
      (*
       (* J (cos (* K 0.5)))
       (+
        (* 0.3333333333333333 (pow l 3.0))
        (+ (* l 2.0) (* 0.016666666666666666 (pow l 5.0)))))))))
double code(double J, double l, double K, double U) {
	double t_0 = exp(l) - exp(-l);
	double tmp;
	if ((t_0 <= -((double) INFINITY)) || !(t_0 <= 0.05)) {
		tmp = ((J * t_0) * cos((K / 2.0))) + U;
	} else {
		tmp = U + ((J * cos((K * 0.5))) * ((0.3333333333333333 * pow(l, 3.0)) + ((l * 2.0) + (0.016666666666666666 * pow(l, 5.0)))));
	}
	return tmp;
}
public static double code(double J, double l, double K, double U) {
	double t_0 = Math.exp(l) - Math.exp(-l);
	double tmp;
	if ((t_0 <= -Double.POSITIVE_INFINITY) || !(t_0 <= 0.05)) {
		tmp = ((J * t_0) * Math.cos((K / 2.0))) + U;
	} else {
		tmp = U + ((J * Math.cos((K * 0.5))) * ((0.3333333333333333 * Math.pow(l, 3.0)) + ((l * 2.0) + (0.016666666666666666 * Math.pow(l, 5.0)))));
	}
	return tmp;
}
def code(J, l, K, U):
	t_0 = math.exp(l) - math.exp(-l)
	tmp = 0
	if (t_0 <= -math.inf) or not (t_0 <= 0.05):
		tmp = ((J * t_0) * math.cos((K / 2.0))) + U
	else:
		tmp = U + ((J * math.cos((K * 0.5))) * ((0.3333333333333333 * math.pow(l, 3.0)) + ((l * 2.0) + (0.016666666666666666 * math.pow(l, 5.0)))))
	return tmp
function code(J, l, K, U)
	t_0 = Float64(exp(l) - exp(Float64(-l)))
	tmp = 0.0
	if ((t_0 <= Float64(-Inf)) || !(t_0 <= 0.05))
		tmp = Float64(Float64(Float64(J * t_0) * cos(Float64(K / 2.0))) + U);
	else
		tmp = Float64(U + Float64(Float64(J * cos(Float64(K * 0.5))) * Float64(Float64(0.3333333333333333 * (l ^ 3.0)) + Float64(Float64(l * 2.0) + Float64(0.016666666666666666 * (l ^ 5.0))))));
	end
	return tmp
end
function tmp_2 = code(J, l, K, U)
	t_0 = exp(l) - exp(-l);
	tmp = 0.0;
	if ((t_0 <= -Inf) || ~((t_0 <= 0.05)))
		tmp = ((J * t_0) * cos((K / 2.0))) + U;
	else
		tmp = U + ((J * cos((K * 0.5))) * ((0.3333333333333333 * (l ^ 3.0)) + ((l * 2.0) + (0.016666666666666666 * (l ^ 5.0)))));
	end
	tmp_2 = tmp;
end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]}, If[Or[LessEqual[t$95$0, (-Infinity)], N[Not[LessEqual[t$95$0, 0.05]], $MachinePrecision]], N[(N[(N[(J * t$95$0), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision], N[(U + N[(N[(J * N[Cos[N[(K * 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(N[(0.3333333333333333 * N[Power[l, 3.0], $MachinePrecision]), $MachinePrecision] + N[(N[(l * 2.0), $MachinePrecision] + N[(0.016666666666666666 * N[Power[l, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := e^{\ell} - e^{-\ell}\\
\mathbf{if}\;t_0 \leq -\infty \lor \neg \left(t_0 \leq 0.05\right):\\
\;\;\;\;\left(J \cdot t_0\right) \cdot \cos \left(\frac{K}{2}\right) + U\\

\mathbf{else}:\\
\;\;\;\;U + \left(J \cdot \cos \left(K \cdot 0.5\right)\right) \cdot \left(0.3333333333333333 \cdot {\ell}^{3} + \left(\ell \cdot 2 + 0.016666666666666666 \cdot {\ell}^{5}\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l))) < -inf.0 or 0.050000000000000003 < (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))

    1. Initial program 100.0%

      \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]

    if -inf.0 < (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l))) < 0.050000000000000003

    1. Initial program 74.1%

      \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    2. Taylor expanded in l around 0 99.8%

      \[\leadsto \left(J \cdot \color{blue}{\left(0.3333333333333333 \cdot {\ell}^{3} + \left(0.016666666666666666 \cdot {\ell}^{5} + 2 \cdot \ell\right)\right)}\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    3. Taylor expanded in J around 0 99.9%

      \[\leadsto \color{blue}{\left(0.3333333333333333 \cdot {\ell}^{3} + \left(0.016666666666666666 \cdot {\ell}^{5} + 2 \cdot \ell\right)\right) \cdot \left(\cos \left(0.5 \cdot K\right) \cdot J\right)} + U \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;e^{\ell} - e^{-\ell} \leq -\infty \lor \neg \left(e^{\ell} - e^{-\ell} \leq 0.05\right):\\ \;\;\;\;\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U\\ \mathbf{else}:\\ \;\;\;\;U + \left(J \cdot \cos \left(K \cdot 0.5\right)\right) \cdot \left(0.3333333333333333 \cdot {\ell}^{3} + \left(\ell \cdot 2 + 0.016666666666666666 \cdot {\ell}^{5}\right)\right)\\ \end{array} \]

Alternative 2: 97.5% accurate, 0.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right)\\ \mathbf{if}\;t_0 \leq -\infty:\\ \;\;\;\;t_0 + U\\ \mathbf{else}:\\ \;\;\;\;U + \left(J \cdot \cos \left(K \cdot 0.5\right)\right) \cdot \left(0.3333333333333333 \cdot {\ell}^{3} + \left(0.0003968253968253968 \cdot {\ell}^{7} + \left(\ell \cdot 2 + 0.016666666666666666 \cdot {\ell}^{5}\right)\right)\right)\\ \end{array} \end{array} \]
(FPCore (J l K U)
 :precision binary64
 (let* ((t_0 (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0)))))
   (if (<= t_0 (- INFINITY))
     (+ t_0 U)
     (+
      U
      (*
       (* J (cos (* K 0.5)))
       (+
        (* 0.3333333333333333 (pow l 3.0))
        (+
         (* 0.0003968253968253968 (pow l 7.0))
         (+ (* l 2.0) (* 0.016666666666666666 (pow l 5.0))))))))))
double code(double J, double l, double K, double U) {
	double t_0 = (J * (exp(l) - exp(-l))) * cos((K / 2.0));
	double tmp;
	if (t_0 <= -((double) INFINITY)) {
		tmp = t_0 + U;
	} else {
		tmp = U + ((J * cos((K * 0.5))) * ((0.3333333333333333 * pow(l, 3.0)) + ((0.0003968253968253968 * pow(l, 7.0)) + ((l * 2.0) + (0.016666666666666666 * pow(l, 5.0))))));
	}
	return tmp;
}
public static double code(double J, double l, double K, double U) {
	double t_0 = (J * (Math.exp(l) - Math.exp(-l))) * Math.cos((K / 2.0));
	double tmp;
	if (t_0 <= -Double.POSITIVE_INFINITY) {
		tmp = t_0 + U;
	} else {
		tmp = U + ((J * Math.cos((K * 0.5))) * ((0.3333333333333333 * Math.pow(l, 3.0)) + ((0.0003968253968253968 * Math.pow(l, 7.0)) + ((l * 2.0) + (0.016666666666666666 * Math.pow(l, 5.0))))));
	}
	return tmp;
}
def code(J, l, K, U):
	t_0 = (J * (math.exp(l) - math.exp(-l))) * math.cos((K / 2.0))
	tmp = 0
	if t_0 <= -math.inf:
		tmp = t_0 + U
	else:
		tmp = U + ((J * math.cos((K * 0.5))) * ((0.3333333333333333 * math.pow(l, 3.0)) + ((0.0003968253968253968 * math.pow(l, 7.0)) + ((l * 2.0) + (0.016666666666666666 * math.pow(l, 5.0))))))
	return tmp
function code(J, l, K, U)
	t_0 = Float64(Float64(J * Float64(exp(l) - exp(Float64(-l)))) * cos(Float64(K / 2.0)))
	tmp = 0.0
	if (t_0 <= Float64(-Inf))
		tmp = Float64(t_0 + U);
	else
		tmp = Float64(U + Float64(Float64(J * cos(Float64(K * 0.5))) * Float64(Float64(0.3333333333333333 * (l ^ 3.0)) + Float64(Float64(0.0003968253968253968 * (l ^ 7.0)) + Float64(Float64(l * 2.0) + Float64(0.016666666666666666 * (l ^ 5.0)))))));
	end
	return tmp
end
function tmp_2 = code(J, l, K, U)
	t_0 = (J * (exp(l) - exp(-l))) * cos((K / 2.0));
	tmp = 0.0;
	if (t_0 <= -Inf)
		tmp = t_0 + U;
	else
		tmp = U + ((J * cos((K * 0.5))) * ((0.3333333333333333 * (l ^ 3.0)) + ((0.0003968253968253968 * (l ^ 7.0)) + ((l * 2.0) + (0.016666666666666666 * (l ^ 5.0))))));
	end
	tmp_2 = tmp;
end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[(N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, (-Infinity)], N[(t$95$0 + U), $MachinePrecision], N[(U + N[(N[(J * N[Cos[N[(K * 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(N[(0.3333333333333333 * N[Power[l, 3.0], $MachinePrecision]), $MachinePrecision] + N[(N[(0.0003968253968253968 * N[Power[l, 7.0], $MachinePrecision]), $MachinePrecision] + N[(N[(l * 2.0), $MachinePrecision] + N[(0.016666666666666666 * N[Power[l, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right)\\
\mathbf{if}\;t_0 \leq -\infty:\\
\;\;\;\;t_0 + U\\

\mathbf{else}:\\
\;\;\;\;U + \left(J \cdot \cos \left(K \cdot 0.5\right)\right) \cdot \left(0.3333333333333333 \cdot {\ell}^{3} + \left(0.0003968253968253968 \cdot {\ell}^{7} + \left(\ell \cdot 2 + 0.016666666666666666 \cdot {\ell}^{5}\right)\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (*.f64 (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) (cos.f64 (/.f64 K 2))) < -inf.0

    1. Initial program 100.0%

      \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]

    if -inf.0 < (*.f64 (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) (cos.f64 (/.f64 K 2)))

    1. Initial program 81.4%

      \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    2. Taylor expanded in l around 0 98.2%

      \[\leadsto \left(J \cdot \color{blue}{\left(0.3333333333333333 \cdot {\ell}^{3} + \left(0.0003968253968253968 \cdot {\ell}^{7} + \left(0.016666666666666666 \cdot {\ell}^{5} + 2 \cdot \ell\right)\right)\right)}\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    3. Taylor expanded in J around 0 98.2%

      \[\leadsto \color{blue}{\left(0.3333333333333333 \cdot {\ell}^{3} + \left(0.0003968253968253968 \cdot {\ell}^{7} + \left(2 \cdot \ell + 0.016666666666666666 \cdot {\ell}^{5}\right)\right)\right) \cdot \left(\cos \left(0.5 \cdot K\right) \cdot J\right)} + U \]
  3. Recombined 2 regimes into one program.
  4. Final simplification98.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) \leq -\infty:\\ \;\;\;\;\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U\\ \mathbf{else}:\\ \;\;\;\;U + \left(J \cdot \cos \left(K \cdot 0.5\right)\right) \cdot \left(0.3333333333333333 \cdot {\ell}^{3} + \left(0.0003968253968253968 \cdot {\ell}^{7} + \left(\ell \cdot 2 + 0.016666666666666666 \cdot {\ell}^{5}\right)\right)\right)\\ \end{array} \]

Alternative 3: 97.5% accurate, 0.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \cos \left(\frac{K}{2}\right)\\ t_1 := \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot t_0\\ \mathbf{if}\;t_1 \leq -\infty:\\ \;\;\;\;t_1 + U\\ \mathbf{else}:\\ \;\;\;\;U + t_0 \cdot \left(J \cdot \left(0.3333333333333333 \cdot {\ell}^{3} + \left(0.0003968253968253968 \cdot {\ell}^{7} + \left(\ell \cdot 2 + 0.016666666666666666 \cdot {\ell}^{5}\right)\right)\right)\right)\\ \end{array} \end{array} \]
(FPCore (J l K U)
 :precision binary64
 (let* ((t_0 (cos (/ K 2.0))) (t_1 (* (* J (- (exp l) (exp (- l)))) t_0)))
   (if (<= t_1 (- INFINITY))
     (+ t_1 U)
     (+
      U
      (*
       t_0
       (*
        J
        (+
         (* 0.3333333333333333 (pow l 3.0))
         (+
          (* 0.0003968253968253968 (pow l 7.0))
          (+ (* l 2.0) (* 0.016666666666666666 (pow l 5.0)))))))))))
double code(double J, double l, double K, double U) {
	double t_0 = cos((K / 2.0));
	double t_1 = (J * (exp(l) - exp(-l))) * t_0;
	double tmp;
	if (t_1 <= -((double) INFINITY)) {
		tmp = t_1 + U;
	} else {
		tmp = U + (t_0 * (J * ((0.3333333333333333 * pow(l, 3.0)) + ((0.0003968253968253968 * pow(l, 7.0)) + ((l * 2.0) + (0.016666666666666666 * pow(l, 5.0)))))));
	}
	return tmp;
}
public static double code(double J, double l, double K, double U) {
	double t_0 = Math.cos((K / 2.0));
	double t_1 = (J * (Math.exp(l) - Math.exp(-l))) * t_0;
	double tmp;
	if (t_1 <= -Double.POSITIVE_INFINITY) {
		tmp = t_1 + U;
	} else {
		tmp = U + (t_0 * (J * ((0.3333333333333333 * Math.pow(l, 3.0)) + ((0.0003968253968253968 * Math.pow(l, 7.0)) + ((l * 2.0) + (0.016666666666666666 * Math.pow(l, 5.0)))))));
	}
	return tmp;
}
def code(J, l, K, U):
	t_0 = math.cos((K / 2.0))
	t_1 = (J * (math.exp(l) - math.exp(-l))) * t_0
	tmp = 0
	if t_1 <= -math.inf:
		tmp = t_1 + U
	else:
		tmp = U + (t_0 * (J * ((0.3333333333333333 * math.pow(l, 3.0)) + ((0.0003968253968253968 * math.pow(l, 7.0)) + ((l * 2.0) + (0.016666666666666666 * math.pow(l, 5.0)))))))
	return tmp
function code(J, l, K, U)
	t_0 = cos(Float64(K / 2.0))
	t_1 = Float64(Float64(J * Float64(exp(l) - exp(Float64(-l)))) * t_0)
	tmp = 0.0
	if (t_1 <= Float64(-Inf))
		tmp = Float64(t_1 + U);
	else
		tmp = Float64(U + Float64(t_0 * Float64(J * Float64(Float64(0.3333333333333333 * (l ^ 3.0)) + Float64(Float64(0.0003968253968253968 * (l ^ 7.0)) + Float64(Float64(l * 2.0) + Float64(0.016666666666666666 * (l ^ 5.0))))))));
	end
	return tmp
end
function tmp_2 = code(J, l, K, U)
	t_0 = cos((K / 2.0));
	t_1 = (J * (exp(l) - exp(-l))) * t_0;
	tmp = 0.0;
	if (t_1 <= -Inf)
		tmp = t_1 + U;
	else
		tmp = U + (t_0 * (J * ((0.3333333333333333 * (l ^ 3.0)) + ((0.0003968253968253968 * (l ^ 7.0)) + ((l * 2.0) + (0.016666666666666666 * (l ^ 5.0)))))));
	end
	tmp_2 = tmp;
end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]}, Block[{t$95$1 = N[(N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision]}, If[LessEqual[t$95$1, (-Infinity)], N[(t$95$1 + U), $MachinePrecision], N[(U + N[(t$95$0 * N[(J * N[(N[(0.3333333333333333 * N[Power[l, 3.0], $MachinePrecision]), $MachinePrecision] + N[(N[(0.0003968253968253968 * N[Power[l, 7.0], $MachinePrecision]), $MachinePrecision] + N[(N[(l * 2.0), $MachinePrecision] + N[(0.016666666666666666 * N[Power[l, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \cos \left(\frac{K}{2}\right)\\
t_1 := \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot t_0\\
\mathbf{if}\;t_1 \leq -\infty:\\
\;\;\;\;t_1 + U\\

\mathbf{else}:\\
\;\;\;\;U + t_0 \cdot \left(J \cdot \left(0.3333333333333333 \cdot {\ell}^{3} + \left(0.0003968253968253968 \cdot {\ell}^{7} + \left(\ell \cdot 2 + 0.016666666666666666 \cdot {\ell}^{5}\right)\right)\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (*.f64 (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) (cos.f64 (/.f64 K 2))) < -inf.0

    1. Initial program 100.0%

      \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]

    if -inf.0 < (*.f64 (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) (cos.f64 (/.f64 K 2)))

    1. Initial program 81.4%

      \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    2. Taylor expanded in l around 0 98.2%

      \[\leadsto \left(J \cdot \color{blue}{\left(0.3333333333333333 \cdot {\ell}^{3} + \left(0.0003968253968253968 \cdot {\ell}^{7} + \left(0.016666666666666666 \cdot {\ell}^{5} + 2 \cdot \ell\right)\right)\right)}\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
  3. Recombined 2 regimes into one program.
  4. Final simplification98.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) \leq -\infty:\\ \;\;\;\;\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U\\ \mathbf{else}:\\ \;\;\;\;U + \cos \left(\frac{K}{2}\right) \cdot \left(J \cdot \left(0.3333333333333333 \cdot {\ell}^{3} + \left(0.0003968253968253968 \cdot {\ell}^{7} + \left(\ell \cdot 2 + 0.016666666666666666 \cdot {\ell}^{5}\right)\right)\right)\right)\\ \end{array} \]

Alternative 4: 99.8% accurate, 0.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \cos \left(\frac{K}{2}\right)\\ t_1 := e^{\ell} - e^{-\ell}\\ \mathbf{if}\;t_1 \leq -\infty \lor \neg \left(t_1 \leq 0.05\right):\\ \;\;\;\;\left(J \cdot t_1\right) \cdot t_0 + U\\ \mathbf{else}:\\ \;\;\;\;U + t_0 \cdot \left(J \cdot \left(0.3333333333333333 \cdot {\ell}^{3} + \left(\ell \cdot 2 + 0.016666666666666666 \cdot {\ell}^{5}\right)\right)\right)\\ \end{array} \end{array} \]
(FPCore (J l K U)
 :precision binary64
 (let* ((t_0 (cos (/ K 2.0))) (t_1 (- (exp l) (exp (- l)))))
   (if (or (<= t_1 (- INFINITY)) (not (<= t_1 0.05)))
     (+ (* (* J t_1) t_0) U)
     (+
      U
      (*
       t_0
       (*
        J
        (+
         (* 0.3333333333333333 (pow l 3.0))
         (+ (* l 2.0) (* 0.016666666666666666 (pow l 5.0))))))))))
double code(double J, double l, double K, double U) {
	double t_0 = cos((K / 2.0));
	double t_1 = exp(l) - exp(-l);
	double tmp;
	if ((t_1 <= -((double) INFINITY)) || !(t_1 <= 0.05)) {
		tmp = ((J * t_1) * t_0) + U;
	} else {
		tmp = U + (t_0 * (J * ((0.3333333333333333 * pow(l, 3.0)) + ((l * 2.0) + (0.016666666666666666 * pow(l, 5.0))))));
	}
	return tmp;
}
public static double code(double J, double l, double K, double U) {
	double t_0 = Math.cos((K / 2.0));
	double t_1 = Math.exp(l) - Math.exp(-l);
	double tmp;
	if ((t_1 <= -Double.POSITIVE_INFINITY) || !(t_1 <= 0.05)) {
		tmp = ((J * t_1) * t_0) + U;
	} else {
		tmp = U + (t_0 * (J * ((0.3333333333333333 * Math.pow(l, 3.0)) + ((l * 2.0) + (0.016666666666666666 * Math.pow(l, 5.0))))));
	}
	return tmp;
}
def code(J, l, K, U):
	t_0 = math.cos((K / 2.0))
	t_1 = math.exp(l) - math.exp(-l)
	tmp = 0
	if (t_1 <= -math.inf) or not (t_1 <= 0.05):
		tmp = ((J * t_1) * t_0) + U
	else:
		tmp = U + (t_0 * (J * ((0.3333333333333333 * math.pow(l, 3.0)) + ((l * 2.0) + (0.016666666666666666 * math.pow(l, 5.0))))))
	return tmp
function code(J, l, K, U)
	t_0 = cos(Float64(K / 2.0))
	t_1 = Float64(exp(l) - exp(Float64(-l)))
	tmp = 0.0
	if ((t_1 <= Float64(-Inf)) || !(t_1 <= 0.05))
		tmp = Float64(Float64(Float64(J * t_1) * t_0) + U);
	else
		tmp = Float64(U + Float64(t_0 * Float64(J * Float64(Float64(0.3333333333333333 * (l ^ 3.0)) + Float64(Float64(l * 2.0) + Float64(0.016666666666666666 * (l ^ 5.0)))))));
	end
	return tmp
end
function tmp_2 = code(J, l, K, U)
	t_0 = cos((K / 2.0));
	t_1 = exp(l) - exp(-l);
	tmp = 0.0;
	if ((t_1 <= -Inf) || ~((t_1 <= 0.05)))
		tmp = ((J * t_1) * t_0) + U;
	else
		tmp = U + (t_0 * (J * ((0.3333333333333333 * (l ^ 3.0)) + ((l * 2.0) + (0.016666666666666666 * (l ^ 5.0))))));
	end
	tmp_2 = tmp;
end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]}, Block[{t$95$1 = N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]}, If[Or[LessEqual[t$95$1, (-Infinity)], N[Not[LessEqual[t$95$1, 0.05]], $MachinePrecision]], N[(N[(N[(J * t$95$1), $MachinePrecision] * t$95$0), $MachinePrecision] + U), $MachinePrecision], N[(U + N[(t$95$0 * N[(J * N[(N[(0.3333333333333333 * N[Power[l, 3.0], $MachinePrecision]), $MachinePrecision] + N[(N[(l * 2.0), $MachinePrecision] + N[(0.016666666666666666 * N[Power[l, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \cos \left(\frac{K}{2}\right)\\
t_1 := e^{\ell} - e^{-\ell}\\
\mathbf{if}\;t_1 \leq -\infty \lor \neg \left(t_1 \leq 0.05\right):\\
\;\;\;\;\left(J \cdot t_1\right) \cdot t_0 + U\\

\mathbf{else}:\\
\;\;\;\;U + t_0 \cdot \left(J \cdot \left(0.3333333333333333 \cdot {\ell}^{3} + \left(\ell \cdot 2 + 0.016666666666666666 \cdot {\ell}^{5}\right)\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l))) < -inf.0 or 0.050000000000000003 < (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))

    1. Initial program 100.0%

      \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]

    if -inf.0 < (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l))) < 0.050000000000000003

    1. Initial program 74.1%

      \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    2. Taylor expanded in l around 0 99.8%

      \[\leadsto \left(J \cdot \color{blue}{\left(0.3333333333333333 \cdot {\ell}^{3} + \left(0.016666666666666666 \cdot {\ell}^{5} + 2 \cdot \ell\right)\right)}\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;e^{\ell} - e^{-\ell} \leq -\infty \lor \neg \left(e^{\ell} - e^{-\ell} \leq 0.05\right):\\ \;\;\;\;\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U\\ \mathbf{else}:\\ \;\;\;\;U + \cos \left(\frac{K}{2}\right) \cdot \left(J \cdot \left(0.3333333333333333 \cdot {\ell}^{3} + \left(\ell \cdot 2 + 0.016666666666666666 \cdot {\ell}^{5}\right)\right)\right)\\ \end{array} \]

Alternative 5: 99.8% accurate, 0.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \cos \left(\frac{K}{2}\right)\\ t_1 := e^{\ell} - e^{-\ell}\\ \mathbf{if}\;t_1 \leq -\infty \lor \neg \left(t_1 \leq 0.002\right):\\ \;\;\;\;\left(J \cdot t_1\right) \cdot t_0 + U\\ \mathbf{else}:\\ \;\;\;\;U + t_0 \cdot \left(\ell \cdot \left(J \cdot \mathsf{fma}\left(0.3333333333333333, \ell \cdot \ell, 2\right)\right)\right)\\ \end{array} \end{array} \]
(FPCore (J l K U)
 :precision binary64
 (let* ((t_0 (cos (/ K 2.0))) (t_1 (- (exp l) (exp (- l)))))
   (if (or (<= t_1 (- INFINITY)) (not (<= t_1 0.002)))
     (+ (* (* J t_1) t_0) U)
     (+ U (* t_0 (* l (* J (fma 0.3333333333333333 (* l l) 2.0))))))))
double code(double J, double l, double K, double U) {
	double t_0 = cos((K / 2.0));
	double t_1 = exp(l) - exp(-l);
	double tmp;
	if ((t_1 <= -((double) INFINITY)) || !(t_1 <= 0.002)) {
		tmp = ((J * t_1) * t_0) + U;
	} else {
		tmp = U + (t_0 * (l * (J * fma(0.3333333333333333, (l * l), 2.0))));
	}
	return tmp;
}
function code(J, l, K, U)
	t_0 = cos(Float64(K / 2.0))
	t_1 = Float64(exp(l) - exp(Float64(-l)))
	tmp = 0.0
	if ((t_1 <= Float64(-Inf)) || !(t_1 <= 0.002))
		tmp = Float64(Float64(Float64(J * t_1) * t_0) + U);
	else
		tmp = Float64(U + Float64(t_0 * Float64(l * Float64(J * fma(0.3333333333333333, Float64(l * l), 2.0)))));
	end
	return tmp
end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]}, Block[{t$95$1 = N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]}, If[Or[LessEqual[t$95$1, (-Infinity)], N[Not[LessEqual[t$95$1, 0.002]], $MachinePrecision]], N[(N[(N[(J * t$95$1), $MachinePrecision] * t$95$0), $MachinePrecision] + U), $MachinePrecision], N[(U + N[(t$95$0 * N[(l * N[(J * N[(0.3333333333333333 * N[(l * l), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \cos \left(\frac{K}{2}\right)\\
t_1 := e^{\ell} - e^{-\ell}\\
\mathbf{if}\;t_1 \leq -\infty \lor \neg \left(t_1 \leq 0.002\right):\\
\;\;\;\;\left(J \cdot t_1\right) \cdot t_0 + U\\

\mathbf{else}:\\
\;\;\;\;U + t_0 \cdot \left(\ell \cdot \left(J \cdot \mathsf{fma}\left(0.3333333333333333, \ell \cdot \ell, 2\right)\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l))) < -inf.0 or 2e-3 < (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))

    1. Initial program 99.9%

      \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]

    if -inf.0 < (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l))) < 2e-3

    1. Initial program 74.0%

      \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    2. Taylor expanded in l around 0 99.9%

      \[\leadsto \left(J \cdot \color{blue}{\left(0.3333333333333333 \cdot {\ell}^{3} + \left(0.0003968253968253968 \cdot {\ell}^{7} + \left(0.016666666666666666 \cdot {\ell}^{5} + 2 \cdot \ell\right)\right)\right)}\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    3. Taylor expanded in l around 0 99.8%

      \[\leadsto \color{blue}{\left(2 \cdot \left(\ell \cdot J\right) + 0.3333333333333333 \cdot \left({\ell}^{3} \cdot J\right)\right)} \cdot \cos \left(\frac{K}{2}\right) + U \]
    4. Step-by-step derivation
      1. associate-*r*99.8%

        \[\leadsto \left(\color{blue}{\left(2 \cdot \ell\right) \cdot J} + 0.3333333333333333 \cdot \left({\ell}^{3} \cdot J\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      2. associate-*r*99.8%

        \[\leadsto \left(\left(2 \cdot \ell\right) \cdot J + \color{blue}{\left(0.3333333333333333 \cdot {\ell}^{3}\right) \cdot J}\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      3. distribute-rgt-out99.8%

        \[\leadsto \color{blue}{\left(J \cdot \left(2 \cdot \ell + 0.3333333333333333 \cdot {\ell}^{3}\right)\right)} \cdot \cos \left(\frac{K}{2}\right) + U \]
      4. *-commutative99.8%

        \[\leadsto \left(J \cdot \left(\color{blue}{\ell \cdot 2} + 0.3333333333333333 \cdot {\ell}^{3}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      5. *-commutative99.8%

        \[\leadsto \left(J \cdot \left(\ell \cdot 2 + \color{blue}{{\ell}^{3} \cdot 0.3333333333333333}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      6. cube-mult99.8%

        \[\leadsto \left(J \cdot \left(\ell \cdot 2 + \color{blue}{\left(\ell \cdot \left(\ell \cdot \ell\right)\right)} \cdot 0.3333333333333333\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      7. associate-*l*99.8%

        \[\leadsto \left(J \cdot \left(\ell \cdot 2 + \color{blue}{\ell \cdot \left(\left(\ell \cdot \ell\right) \cdot 0.3333333333333333\right)}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      8. distribute-lft-out99.8%

        \[\leadsto \left(J \cdot \color{blue}{\left(\ell \cdot \left(2 + \left(\ell \cdot \ell\right) \cdot 0.3333333333333333\right)\right)}\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    5. Simplified99.8%

      \[\leadsto \color{blue}{\left(J \cdot \left(\ell \cdot \left(2 + \left(\ell \cdot \ell\right) \cdot 0.3333333333333333\right)\right)\right)} \cdot \cos \left(\frac{K}{2}\right) + U \]
    6. Taylor expanded in J around 0 99.8%

      \[\leadsto \color{blue}{\left(\ell \cdot \left(\left(2 + 0.3333333333333333 \cdot {\ell}^{2}\right) \cdot J\right)\right)} \cdot \cos \left(\frac{K}{2}\right) + U \]
    7. Step-by-step derivation
      1. *-commutative99.8%

        \[\leadsto \left(\ell \cdot \color{blue}{\left(J \cdot \left(2 + 0.3333333333333333 \cdot {\ell}^{2}\right)\right)}\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      2. +-commutative99.8%

        \[\leadsto \left(\ell \cdot \left(J \cdot \color{blue}{\left(0.3333333333333333 \cdot {\ell}^{2} + 2\right)}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      3. unpow299.8%

        \[\leadsto \left(\ell \cdot \left(J \cdot \left(0.3333333333333333 \cdot \color{blue}{\left(\ell \cdot \ell\right)} + 2\right)\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      4. fma-udef99.8%

        \[\leadsto \left(\ell \cdot \left(J \cdot \color{blue}{\mathsf{fma}\left(0.3333333333333333, \ell \cdot \ell, 2\right)}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    8. Simplified99.8%

      \[\leadsto \color{blue}{\left(\ell \cdot \left(J \cdot \mathsf{fma}\left(0.3333333333333333, \ell \cdot \ell, 2\right)\right)\right)} \cdot \cos \left(\frac{K}{2}\right) + U \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;e^{\ell} - e^{-\ell} \leq -\infty \lor \neg \left(e^{\ell} - e^{-\ell} \leq 0.002\right):\\ \;\;\;\;\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U\\ \mathbf{else}:\\ \;\;\;\;U + \cos \left(\frac{K}{2}\right) \cdot \left(\ell \cdot \left(J \cdot \mathsf{fma}\left(0.3333333333333333, \ell \cdot \ell, 2\right)\right)\right)\\ \end{array} \]

Alternative 6: 93.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \cos \left(\frac{K}{2}\right)\\ \mathbf{if}\;t_0 \leq 0.988:\\ \;\;\;\;U + t_0 \cdot \left(2 \cdot \left(J \cdot \ell\right) + 0.3333333333333333 \cdot \left(J \cdot {\ell}^{3}\right)\right)\\ \mathbf{else}:\\ \;\;\;\;U + J \cdot \left(2 \cdot \sinh \ell\right)\\ \end{array} \end{array} \]
(FPCore (J l K U)
 :precision binary64
 (let* ((t_0 (cos (/ K 2.0))))
   (if (<= t_0 0.988)
     (+ U (* t_0 (+ (* 2.0 (* J l)) (* 0.3333333333333333 (* J (pow l 3.0))))))
     (+ U (* J (* 2.0 (sinh l)))))))
double code(double J, double l, double K, double U) {
	double t_0 = cos((K / 2.0));
	double tmp;
	if (t_0 <= 0.988) {
		tmp = U + (t_0 * ((2.0 * (J * l)) + (0.3333333333333333 * (J * pow(l, 3.0)))));
	} else {
		tmp = U + (J * (2.0 * sinh(l)));
	}
	return tmp;
}
real(8) function code(j, l, k, u)
    real(8), intent (in) :: j
    real(8), intent (in) :: l
    real(8), intent (in) :: k
    real(8), intent (in) :: u
    real(8) :: t_0
    real(8) :: tmp
    t_0 = cos((k / 2.0d0))
    if (t_0 <= 0.988d0) then
        tmp = u + (t_0 * ((2.0d0 * (j * l)) + (0.3333333333333333d0 * (j * (l ** 3.0d0)))))
    else
        tmp = u + (j * (2.0d0 * sinh(l)))
    end if
    code = tmp
end function
public static double code(double J, double l, double K, double U) {
	double t_0 = Math.cos((K / 2.0));
	double tmp;
	if (t_0 <= 0.988) {
		tmp = U + (t_0 * ((2.0 * (J * l)) + (0.3333333333333333 * (J * Math.pow(l, 3.0)))));
	} else {
		tmp = U + (J * (2.0 * Math.sinh(l)));
	}
	return tmp;
}
def code(J, l, K, U):
	t_0 = math.cos((K / 2.0))
	tmp = 0
	if t_0 <= 0.988:
		tmp = U + (t_0 * ((2.0 * (J * l)) + (0.3333333333333333 * (J * math.pow(l, 3.0)))))
	else:
		tmp = U + (J * (2.0 * math.sinh(l)))
	return tmp
function code(J, l, K, U)
	t_0 = cos(Float64(K / 2.0))
	tmp = 0.0
	if (t_0 <= 0.988)
		tmp = Float64(U + Float64(t_0 * Float64(Float64(2.0 * Float64(J * l)) + Float64(0.3333333333333333 * Float64(J * (l ^ 3.0))))));
	else
		tmp = Float64(U + Float64(J * Float64(2.0 * sinh(l))));
	end
	return tmp
end
function tmp_2 = code(J, l, K, U)
	t_0 = cos((K / 2.0));
	tmp = 0.0;
	if (t_0 <= 0.988)
		tmp = U + (t_0 * ((2.0 * (J * l)) + (0.3333333333333333 * (J * (l ^ 3.0)))));
	else
		tmp = U + (J * (2.0 * sinh(l)));
	end
	tmp_2 = tmp;
end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[t$95$0, 0.988], N[(U + N[(t$95$0 * N[(N[(2.0 * N[(J * l), $MachinePrecision]), $MachinePrecision] + N[(0.3333333333333333 * N[(J * N[Power[l, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(U + N[(J * N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \cos \left(\frac{K}{2}\right)\\
\mathbf{if}\;t_0 \leq 0.988:\\
\;\;\;\;U + t_0 \cdot \left(2 \cdot \left(J \cdot \ell\right) + 0.3333333333333333 \cdot \left(J \cdot {\ell}^{3}\right)\right)\\

\mathbf{else}:\\
\;\;\;\;U + J \cdot \left(2 \cdot \sinh \ell\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (cos.f64 (/.f64 K 2)) < 0.98799999999999999

    1. Initial program 83.4%

      \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    2. Taylor expanded in l around 0 92.0%

      \[\leadsto \color{blue}{\left(2 \cdot \left(\ell \cdot J\right) + 0.3333333333333333 \cdot \left({\ell}^{3} \cdot J\right)\right)} \cdot \cos \left(\frac{K}{2}\right) + U \]

    if 0.98799999999999999 < (cos.f64 (/.f64 K 2))

    1. Initial program 90.1%

      \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    2. Taylor expanded in K around 0 90.1%

      \[\leadsto \color{blue}{\left(e^{\ell} - e^{-\ell}\right) \cdot J} + U \]
    3. Step-by-step derivation
      1. expm1-log1p-u63.3%

        \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(\left(e^{\ell} - e^{-\ell}\right) \cdot J\right)\right)} + U \]
      2. expm1-udef63.3%

        \[\leadsto \color{blue}{\left(e^{\mathsf{log1p}\left(\left(e^{\ell} - e^{-\ell}\right) \cdot J\right)} - 1\right)} + U \]
      3. *-commutative63.3%

        \[\leadsto \left(e^{\mathsf{log1p}\left(\color{blue}{J \cdot \left(e^{\ell} - e^{-\ell}\right)}\right)} - 1\right) + U \]
      4. sinh-undef63.7%

        \[\leadsto \left(e^{\mathsf{log1p}\left(J \cdot \color{blue}{\left(2 \cdot \sinh \ell\right)}\right)} - 1\right) + U \]
    4. Applied egg-rr63.7%

      \[\leadsto \color{blue}{\left(e^{\mathsf{log1p}\left(J \cdot \left(2 \cdot \sinh \ell\right)\right)} - 1\right)} + U \]
    5. Step-by-step derivation
      1. expm1-def68.2%

        \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(J \cdot \left(2 \cdot \sinh \ell\right)\right)\right)} + U \]
      2. expm1-log1p100.0%

        \[\leadsto \color{blue}{J \cdot \left(2 \cdot \sinh \ell\right)} + U \]
    6. Simplified100.0%

      \[\leadsto \color{blue}{J \cdot \left(2 \cdot \sinh \ell\right)} + U \]
  3. Recombined 2 regimes into one program.
  4. Final simplification96.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq 0.988:\\ \;\;\;\;U + \cos \left(\frac{K}{2}\right) \cdot \left(2 \cdot \left(J \cdot \ell\right) + 0.3333333333333333 \cdot \left(J \cdot {\ell}^{3}\right)\right)\\ \mathbf{else}:\\ \;\;\;\;U + J \cdot \left(2 \cdot \sinh \ell\right)\\ \end{array} \]

Alternative 7: 93.9% accurate, 1.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \cos \left(\frac{K}{2}\right)\\ \mathbf{if}\;t_0 \leq 0.988:\\ \;\;\;\;U + t_0 \cdot \left(J \cdot \left(\ell \cdot 2 + \ell \cdot \left(0.3333333333333333 \cdot \left(\ell \cdot \ell\right)\right)\right)\right)\\ \mathbf{else}:\\ \;\;\;\;U + J \cdot \left(2 \cdot \sinh \ell\right)\\ \end{array} \end{array} \]
(FPCore (J l K U)
 :precision binary64
 (let* ((t_0 (cos (/ K 2.0))))
   (if (<= t_0 0.988)
     (+ U (* t_0 (* J (+ (* l 2.0) (* l (* 0.3333333333333333 (* l l)))))))
     (+ U (* J (* 2.0 (sinh l)))))))
double code(double J, double l, double K, double U) {
	double t_0 = cos((K / 2.0));
	double tmp;
	if (t_0 <= 0.988) {
		tmp = U + (t_0 * (J * ((l * 2.0) + (l * (0.3333333333333333 * (l * l))))));
	} else {
		tmp = U + (J * (2.0 * sinh(l)));
	}
	return tmp;
}
real(8) function code(j, l, k, u)
    real(8), intent (in) :: j
    real(8), intent (in) :: l
    real(8), intent (in) :: k
    real(8), intent (in) :: u
    real(8) :: t_0
    real(8) :: tmp
    t_0 = cos((k / 2.0d0))
    if (t_0 <= 0.988d0) then
        tmp = u + (t_0 * (j * ((l * 2.0d0) + (l * (0.3333333333333333d0 * (l * l))))))
    else
        tmp = u + (j * (2.0d0 * sinh(l)))
    end if
    code = tmp
end function
public static double code(double J, double l, double K, double U) {
	double t_0 = Math.cos((K / 2.0));
	double tmp;
	if (t_0 <= 0.988) {
		tmp = U + (t_0 * (J * ((l * 2.0) + (l * (0.3333333333333333 * (l * l))))));
	} else {
		tmp = U + (J * (2.0 * Math.sinh(l)));
	}
	return tmp;
}
def code(J, l, K, U):
	t_0 = math.cos((K / 2.0))
	tmp = 0
	if t_0 <= 0.988:
		tmp = U + (t_0 * (J * ((l * 2.0) + (l * (0.3333333333333333 * (l * l))))))
	else:
		tmp = U + (J * (2.0 * math.sinh(l)))
	return tmp
function code(J, l, K, U)
	t_0 = cos(Float64(K / 2.0))
	tmp = 0.0
	if (t_0 <= 0.988)
		tmp = Float64(U + Float64(t_0 * Float64(J * Float64(Float64(l * 2.0) + Float64(l * Float64(0.3333333333333333 * Float64(l * l)))))));
	else
		tmp = Float64(U + Float64(J * Float64(2.0 * sinh(l))));
	end
	return tmp
end
function tmp_2 = code(J, l, K, U)
	t_0 = cos((K / 2.0));
	tmp = 0.0;
	if (t_0 <= 0.988)
		tmp = U + (t_0 * (J * ((l * 2.0) + (l * (0.3333333333333333 * (l * l))))));
	else
		tmp = U + (J * (2.0 * sinh(l)));
	end
	tmp_2 = tmp;
end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[t$95$0, 0.988], N[(U + N[(t$95$0 * N[(J * N[(N[(l * 2.0), $MachinePrecision] + N[(l * N[(0.3333333333333333 * N[(l * l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(U + N[(J * N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \cos \left(\frac{K}{2}\right)\\
\mathbf{if}\;t_0 \leq 0.988:\\
\;\;\;\;U + t_0 \cdot \left(J \cdot \left(\ell \cdot 2 + \ell \cdot \left(0.3333333333333333 \cdot \left(\ell \cdot \ell\right)\right)\right)\right)\\

\mathbf{else}:\\
\;\;\;\;U + J \cdot \left(2 \cdot \sinh \ell\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (cos.f64 (/.f64 K 2)) < 0.98799999999999999

    1. Initial program 83.4%

      \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    2. Taylor expanded in l around 0 96.8%

      \[\leadsto \left(J \cdot \color{blue}{\left(0.3333333333333333 \cdot {\ell}^{3} + \left(0.0003968253968253968 \cdot {\ell}^{7} + \left(0.016666666666666666 \cdot {\ell}^{5} + 2 \cdot \ell\right)\right)\right)}\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    3. Taylor expanded in l around 0 92.0%

      \[\leadsto \color{blue}{\left(2 \cdot \left(\ell \cdot J\right) + 0.3333333333333333 \cdot \left({\ell}^{3} \cdot J\right)\right)} \cdot \cos \left(\frac{K}{2}\right) + U \]
    4. Step-by-step derivation
      1. associate-*r*92.0%

        \[\leadsto \left(\color{blue}{\left(2 \cdot \ell\right) \cdot J} + 0.3333333333333333 \cdot \left({\ell}^{3} \cdot J\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      2. associate-*r*92.0%

        \[\leadsto \left(\left(2 \cdot \ell\right) \cdot J + \color{blue}{\left(0.3333333333333333 \cdot {\ell}^{3}\right) \cdot J}\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      3. distribute-rgt-out92.0%

        \[\leadsto \color{blue}{\left(J \cdot \left(2 \cdot \ell + 0.3333333333333333 \cdot {\ell}^{3}\right)\right)} \cdot \cos \left(\frac{K}{2}\right) + U \]
      4. *-commutative92.0%

        \[\leadsto \left(J \cdot \left(\color{blue}{\ell \cdot 2} + 0.3333333333333333 \cdot {\ell}^{3}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      5. *-commutative92.0%

        \[\leadsto \left(J \cdot \left(\ell \cdot 2 + \color{blue}{{\ell}^{3} \cdot 0.3333333333333333}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      6. cube-mult92.0%

        \[\leadsto \left(J \cdot \left(\ell \cdot 2 + \color{blue}{\left(\ell \cdot \left(\ell \cdot \ell\right)\right)} \cdot 0.3333333333333333\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      7. associate-*l*92.0%

        \[\leadsto \left(J \cdot \left(\ell \cdot 2 + \color{blue}{\ell \cdot \left(\left(\ell \cdot \ell\right) \cdot 0.3333333333333333\right)}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      8. distribute-lft-out92.0%

        \[\leadsto \left(J \cdot \color{blue}{\left(\ell \cdot \left(2 + \left(\ell \cdot \ell\right) \cdot 0.3333333333333333\right)\right)}\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    5. Simplified92.0%

      \[\leadsto \color{blue}{\left(J \cdot \left(\ell \cdot \left(2 + \left(\ell \cdot \ell\right) \cdot 0.3333333333333333\right)\right)\right)} \cdot \cos \left(\frac{K}{2}\right) + U \]
    6. Step-by-step derivation
      1. distribute-rgt-in92.0%

        \[\leadsto \left(J \cdot \color{blue}{\left(2 \cdot \ell + \left(\left(\ell \cdot \ell\right) \cdot 0.3333333333333333\right) \cdot \ell\right)}\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      2. *-commutative92.0%

        \[\leadsto \left(J \cdot \left(\color{blue}{\ell \cdot 2} + \left(\left(\ell \cdot \ell\right) \cdot 0.3333333333333333\right) \cdot \ell\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      3. *-commutative92.0%

        \[\leadsto \left(J \cdot \left(\ell \cdot 2 + \color{blue}{\left(0.3333333333333333 \cdot \left(\ell \cdot \ell\right)\right)} \cdot \ell\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    7. Applied egg-rr92.0%

      \[\leadsto \left(J \cdot \color{blue}{\left(\ell \cdot 2 + \left(0.3333333333333333 \cdot \left(\ell \cdot \ell\right)\right) \cdot \ell\right)}\right) \cdot \cos \left(\frac{K}{2}\right) + U \]

    if 0.98799999999999999 < (cos.f64 (/.f64 K 2))

    1. Initial program 90.1%

      \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    2. Taylor expanded in K around 0 90.1%

      \[\leadsto \color{blue}{\left(e^{\ell} - e^{-\ell}\right) \cdot J} + U \]
    3. Step-by-step derivation
      1. expm1-log1p-u63.3%

        \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(\left(e^{\ell} - e^{-\ell}\right) \cdot J\right)\right)} + U \]
      2. expm1-udef63.3%

        \[\leadsto \color{blue}{\left(e^{\mathsf{log1p}\left(\left(e^{\ell} - e^{-\ell}\right) \cdot J\right)} - 1\right)} + U \]
      3. *-commutative63.3%

        \[\leadsto \left(e^{\mathsf{log1p}\left(\color{blue}{J \cdot \left(e^{\ell} - e^{-\ell}\right)}\right)} - 1\right) + U \]
      4. sinh-undef63.7%

        \[\leadsto \left(e^{\mathsf{log1p}\left(J \cdot \color{blue}{\left(2 \cdot \sinh \ell\right)}\right)} - 1\right) + U \]
    4. Applied egg-rr63.7%

      \[\leadsto \color{blue}{\left(e^{\mathsf{log1p}\left(J \cdot \left(2 \cdot \sinh \ell\right)\right)} - 1\right)} + U \]
    5. Step-by-step derivation
      1. expm1-def68.2%

        \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(J \cdot \left(2 \cdot \sinh \ell\right)\right)\right)} + U \]
      2. expm1-log1p100.0%

        \[\leadsto \color{blue}{J \cdot \left(2 \cdot \sinh \ell\right)} + U \]
    6. Simplified100.0%

      \[\leadsto \color{blue}{J \cdot \left(2 \cdot \sinh \ell\right)} + U \]
  3. Recombined 2 regimes into one program.
  4. Final simplification96.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq 0.988:\\ \;\;\;\;U + \cos \left(\frac{K}{2}\right) \cdot \left(J \cdot \left(\ell \cdot 2 + \ell \cdot \left(0.3333333333333333 \cdot \left(\ell \cdot \ell\right)\right)\right)\right)\\ \mathbf{else}:\\ \;\;\;\;U + J \cdot \left(2 \cdot \sinh \ell\right)\\ \end{array} \]

Alternative 8: 93.9% accurate, 1.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \cos \left(\frac{K}{2}\right)\\ \mathbf{if}\;t_0 \leq 0.988:\\ \;\;\;\;U + t_0 \cdot \left(J \cdot \left(\ell \cdot \left(2 + 0.3333333333333333 \cdot \left(\ell \cdot \ell\right)\right)\right)\right)\\ \mathbf{else}:\\ \;\;\;\;U + J \cdot \left(2 \cdot \sinh \ell\right)\\ \end{array} \end{array} \]
(FPCore (J l K U)
 :precision binary64
 (let* ((t_0 (cos (/ K 2.0))))
   (if (<= t_0 0.988)
     (+ U (* t_0 (* J (* l (+ 2.0 (* 0.3333333333333333 (* l l)))))))
     (+ U (* J (* 2.0 (sinh l)))))))
double code(double J, double l, double K, double U) {
	double t_0 = cos((K / 2.0));
	double tmp;
	if (t_0 <= 0.988) {
		tmp = U + (t_0 * (J * (l * (2.0 + (0.3333333333333333 * (l * l))))));
	} else {
		tmp = U + (J * (2.0 * sinh(l)));
	}
	return tmp;
}
real(8) function code(j, l, k, u)
    real(8), intent (in) :: j
    real(8), intent (in) :: l
    real(8), intent (in) :: k
    real(8), intent (in) :: u
    real(8) :: t_0
    real(8) :: tmp
    t_0 = cos((k / 2.0d0))
    if (t_0 <= 0.988d0) then
        tmp = u + (t_0 * (j * (l * (2.0d0 + (0.3333333333333333d0 * (l * l))))))
    else
        tmp = u + (j * (2.0d0 * sinh(l)))
    end if
    code = tmp
end function
public static double code(double J, double l, double K, double U) {
	double t_0 = Math.cos((K / 2.0));
	double tmp;
	if (t_0 <= 0.988) {
		tmp = U + (t_0 * (J * (l * (2.0 + (0.3333333333333333 * (l * l))))));
	} else {
		tmp = U + (J * (2.0 * Math.sinh(l)));
	}
	return tmp;
}
def code(J, l, K, U):
	t_0 = math.cos((K / 2.0))
	tmp = 0
	if t_0 <= 0.988:
		tmp = U + (t_0 * (J * (l * (2.0 + (0.3333333333333333 * (l * l))))))
	else:
		tmp = U + (J * (2.0 * math.sinh(l)))
	return tmp
function code(J, l, K, U)
	t_0 = cos(Float64(K / 2.0))
	tmp = 0.0
	if (t_0 <= 0.988)
		tmp = Float64(U + Float64(t_0 * Float64(J * Float64(l * Float64(2.0 + Float64(0.3333333333333333 * Float64(l * l)))))));
	else
		tmp = Float64(U + Float64(J * Float64(2.0 * sinh(l))));
	end
	return tmp
end
function tmp_2 = code(J, l, K, U)
	t_0 = cos((K / 2.0));
	tmp = 0.0;
	if (t_0 <= 0.988)
		tmp = U + (t_0 * (J * (l * (2.0 + (0.3333333333333333 * (l * l))))));
	else
		tmp = U + (J * (2.0 * sinh(l)));
	end
	tmp_2 = tmp;
end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[t$95$0, 0.988], N[(U + N[(t$95$0 * N[(J * N[(l * N[(2.0 + N[(0.3333333333333333 * N[(l * l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(U + N[(J * N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \cos \left(\frac{K}{2}\right)\\
\mathbf{if}\;t_0 \leq 0.988:\\
\;\;\;\;U + t_0 \cdot \left(J \cdot \left(\ell \cdot \left(2 + 0.3333333333333333 \cdot \left(\ell \cdot \ell\right)\right)\right)\right)\\

\mathbf{else}:\\
\;\;\;\;U + J \cdot \left(2 \cdot \sinh \ell\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (cos.f64 (/.f64 K 2)) < 0.98799999999999999

    1. Initial program 83.4%

      \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    2. Taylor expanded in l around 0 96.8%

      \[\leadsto \left(J \cdot \color{blue}{\left(0.3333333333333333 \cdot {\ell}^{3} + \left(0.0003968253968253968 \cdot {\ell}^{7} + \left(0.016666666666666666 \cdot {\ell}^{5} + 2 \cdot \ell\right)\right)\right)}\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    3. Taylor expanded in l around 0 92.0%

      \[\leadsto \color{blue}{\left(2 \cdot \left(\ell \cdot J\right) + 0.3333333333333333 \cdot \left({\ell}^{3} \cdot J\right)\right)} \cdot \cos \left(\frac{K}{2}\right) + U \]
    4. Step-by-step derivation
      1. associate-*r*92.0%

        \[\leadsto \left(\color{blue}{\left(2 \cdot \ell\right) \cdot J} + 0.3333333333333333 \cdot \left({\ell}^{3} \cdot J\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      2. associate-*r*92.0%

        \[\leadsto \left(\left(2 \cdot \ell\right) \cdot J + \color{blue}{\left(0.3333333333333333 \cdot {\ell}^{3}\right) \cdot J}\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      3. distribute-rgt-out92.0%

        \[\leadsto \color{blue}{\left(J \cdot \left(2 \cdot \ell + 0.3333333333333333 \cdot {\ell}^{3}\right)\right)} \cdot \cos \left(\frac{K}{2}\right) + U \]
      4. *-commutative92.0%

        \[\leadsto \left(J \cdot \left(\color{blue}{\ell \cdot 2} + 0.3333333333333333 \cdot {\ell}^{3}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      5. *-commutative92.0%

        \[\leadsto \left(J \cdot \left(\ell \cdot 2 + \color{blue}{{\ell}^{3} \cdot 0.3333333333333333}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      6. cube-mult92.0%

        \[\leadsto \left(J \cdot \left(\ell \cdot 2 + \color{blue}{\left(\ell \cdot \left(\ell \cdot \ell\right)\right)} \cdot 0.3333333333333333\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      7. associate-*l*92.0%

        \[\leadsto \left(J \cdot \left(\ell \cdot 2 + \color{blue}{\ell \cdot \left(\left(\ell \cdot \ell\right) \cdot 0.3333333333333333\right)}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      8. distribute-lft-out92.0%

        \[\leadsto \left(J \cdot \color{blue}{\left(\ell \cdot \left(2 + \left(\ell \cdot \ell\right) \cdot 0.3333333333333333\right)\right)}\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    5. Simplified92.0%

      \[\leadsto \color{blue}{\left(J \cdot \left(\ell \cdot \left(2 + \left(\ell \cdot \ell\right) \cdot 0.3333333333333333\right)\right)\right)} \cdot \cos \left(\frac{K}{2}\right) + U \]

    if 0.98799999999999999 < (cos.f64 (/.f64 K 2))

    1. Initial program 90.1%

      \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    2. Taylor expanded in K around 0 90.1%

      \[\leadsto \color{blue}{\left(e^{\ell} - e^{-\ell}\right) \cdot J} + U \]
    3. Step-by-step derivation
      1. expm1-log1p-u63.3%

        \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(\left(e^{\ell} - e^{-\ell}\right) \cdot J\right)\right)} + U \]
      2. expm1-udef63.3%

        \[\leadsto \color{blue}{\left(e^{\mathsf{log1p}\left(\left(e^{\ell} - e^{-\ell}\right) \cdot J\right)} - 1\right)} + U \]
      3. *-commutative63.3%

        \[\leadsto \left(e^{\mathsf{log1p}\left(\color{blue}{J \cdot \left(e^{\ell} - e^{-\ell}\right)}\right)} - 1\right) + U \]
      4. sinh-undef63.7%

        \[\leadsto \left(e^{\mathsf{log1p}\left(J \cdot \color{blue}{\left(2 \cdot \sinh \ell\right)}\right)} - 1\right) + U \]
    4. Applied egg-rr63.7%

      \[\leadsto \color{blue}{\left(e^{\mathsf{log1p}\left(J \cdot \left(2 \cdot \sinh \ell\right)\right)} - 1\right)} + U \]
    5. Step-by-step derivation
      1. expm1-def68.2%

        \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(J \cdot \left(2 \cdot \sinh \ell\right)\right)\right)} + U \]
      2. expm1-log1p100.0%

        \[\leadsto \color{blue}{J \cdot \left(2 \cdot \sinh \ell\right)} + U \]
    6. Simplified100.0%

      \[\leadsto \color{blue}{J \cdot \left(2 \cdot \sinh \ell\right)} + U \]
  3. Recombined 2 regimes into one program.
  4. Final simplification96.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq 0.988:\\ \;\;\;\;U + \cos \left(\frac{K}{2}\right) \cdot \left(J \cdot \left(\ell \cdot \left(2 + 0.3333333333333333 \cdot \left(\ell \cdot \ell\right)\right)\right)\right)\\ \mathbf{else}:\\ \;\;\;\;U + J \cdot \left(2 \cdot \sinh \ell\right)\\ \end{array} \]

Alternative 9: 88.1% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \cos \left(\frac{K}{2}\right)\\ \mathbf{if}\;t_0 \leq -0.02:\\ \;\;\;\;U + t_0 \cdot \left(\ell \cdot \left(J \cdot 2\right)\right)\\ \mathbf{else}:\\ \;\;\;\;U + J \cdot \left(2 \cdot \sinh \ell\right)\\ \end{array} \end{array} \]
(FPCore (J l K U)
 :precision binary64
 (let* ((t_0 (cos (/ K 2.0))))
   (if (<= t_0 -0.02)
     (+ U (* t_0 (* l (* J 2.0))))
     (+ U (* J (* 2.0 (sinh l)))))))
double code(double J, double l, double K, double U) {
	double t_0 = cos((K / 2.0));
	double tmp;
	if (t_0 <= -0.02) {
		tmp = U + (t_0 * (l * (J * 2.0)));
	} else {
		tmp = U + (J * (2.0 * sinh(l)));
	}
	return tmp;
}
real(8) function code(j, l, k, u)
    real(8), intent (in) :: j
    real(8), intent (in) :: l
    real(8), intent (in) :: k
    real(8), intent (in) :: u
    real(8) :: t_0
    real(8) :: tmp
    t_0 = cos((k / 2.0d0))
    if (t_0 <= (-0.02d0)) then
        tmp = u + (t_0 * (l * (j * 2.0d0)))
    else
        tmp = u + (j * (2.0d0 * sinh(l)))
    end if
    code = tmp
end function
public static double code(double J, double l, double K, double U) {
	double t_0 = Math.cos((K / 2.0));
	double tmp;
	if (t_0 <= -0.02) {
		tmp = U + (t_0 * (l * (J * 2.0)));
	} else {
		tmp = U + (J * (2.0 * Math.sinh(l)));
	}
	return tmp;
}
def code(J, l, K, U):
	t_0 = math.cos((K / 2.0))
	tmp = 0
	if t_0 <= -0.02:
		tmp = U + (t_0 * (l * (J * 2.0)))
	else:
		tmp = U + (J * (2.0 * math.sinh(l)))
	return tmp
function code(J, l, K, U)
	t_0 = cos(Float64(K / 2.0))
	tmp = 0.0
	if (t_0 <= -0.02)
		tmp = Float64(U + Float64(t_0 * Float64(l * Float64(J * 2.0))));
	else
		tmp = Float64(U + Float64(J * Float64(2.0 * sinh(l))));
	end
	return tmp
end
function tmp_2 = code(J, l, K, U)
	t_0 = cos((K / 2.0));
	tmp = 0.0;
	if (t_0 <= -0.02)
		tmp = U + (t_0 * (l * (J * 2.0)));
	else
		tmp = U + (J * (2.0 * sinh(l)));
	end
	tmp_2 = tmp;
end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[t$95$0, -0.02], N[(U + N[(t$95$0 * N[(l * N[(J * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(U + N[(J * N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \cos \left(\frac{K}{2}\right)\\
\mathbf{if}\;t_0 \leq -0.02:\\
\;\;\;\;U + t_0 \cdot \left(\ell \cdot \left(J \cdot 2\right)\right)\\

\mathbf{else}:\\
\;\;\;\;U + J \cdot \left(2 \cdot \sinh \ell\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (cos.f64 (/.f64 K 2)) < -0.0200000000000000004

    1. Initial program 80.1%

      \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    2. Taylor expanded in l around 0 67.4%

      \[\leadsto \color{blue}{\left(2 \cdot \left(\ell \cdot J\right)\right)} \cdot \cos \left(\frac{K}{2}\right) + U \]
    3. Step-by-step derivation
      1. associate-*r*67.4%

        \[\leadsto \color{blue}{\left(\left(2 \cdot \ell\right) \cdot J\right)} \cdot \cos \left(\frac{K}{2}\right) + U \]
      2. *-commutative67.4%

        \[\leadsto \left(\color{blue}{\left(\ell \cdot 2\right)} \cdot J\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
      3. associate-*l*67.4%

        \[\leadsto \color{blue}{\left(\ell \cdot \left(2 \cdot J\right)\right)} \cdot \cos \left(\frac{K}{2}\right) + U \]
    4. Simplified67.4%

      \[\leadsto \color{blue}{\left(\ell \cdot \left(2 \cdot J\right)\right)} \cdot \cos \left(\frac{K}{2}\right) + U \]

    if -0.0200000000000000004 < (cos.f64 (/.f64 K 2))

    1. Initial program 89.1%

      \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    2. Taylor expanded in K around 0 88.1%

      \[\leadsto \color{blue}{\left(e^{\ell} - e^{-\ell}\right) \cdot J} + U \]
    3. Step-by-step derivation
      1. expm1-log1p-u59.5%

        \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(\left(e^{\ell} - e^{-\ell}\right) \cdot J\right)\right)} + U \]
      2. expm1-udef59.3%

        \[\leadsto \color{blue}{\left(e^{\mathsf{log1p}\left(\left(e^{\ell} - e^{-\ell}\right) \cdot J\right)} - 1\right)} + U \]
      3. *-commutative59.3%

        \[\leadsto \left(e^{\mathsf{log1p}\left(\color{blue}{J \cdot \left(e^{\ell} - e^{-\ell}\right)}\right)} - 1\right) + U \]
      4. sinh-undef59.1%

        \[\leadsto \left(e^{\mathsf{log1p}\left(J \cdot \color{blue}{\left(2 \cdot \sinh \ell\right)}\right)} - 1\right) + U \]
    4. Applied egg-rr59.1%

      \[\leadsto \color{blue}{\left(e^{\mathsf{log1p}\left(J \cdot \left(2 \cdot \sinh \ell\right)\right)} - 1\right)} + U \]
    5. Step-by-step derivation
      1. expm1-def62.6%

        \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(J \cdot \left(2 \cdot \sinh \ell\right)\right)\right)} + U \]
      2. expm1-log1p95.4%

        \[\leadsto \color{blue}{J \cdot \left(2 \cdot \sinh \ell\right)} + U \]
    6. Simplified95.4%

      \[\leadsto \color{blue}{J \cdot \left(2 \cdot \sinh \ell\right)} + U \]
  3. Recombined 2 regimes into one program.
  4. Final simplification88.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.02:\\ \;\;\;\;U + \cos \left(\frac{K}{2}\right) \cdot \left(\ell \cdot \left(J \cdot 2\right)\right)\\ \mathbf{else}:\\ \;\;\;\;U + J \cdot \left(2 \cdot \sinh \ell\right)\\ \end{array} \]

Alternative 10: 80.6% accurate, 2.9× speedup?

\[\begin{array}{l} \\ U + J \cdot \left(2 \cdot \sinh \ell\right) \end{array} \]
(FPCore (J l K U) :precision binary64 (+ U (* J (* 2.0 (sinh l)))))
double code(double J, double l, double K, double U) {
	return U + (J * (2.0 * sinh(l)));
}
real(8) function code(j, l, k, u)
    real(8), intent (in) :: j
    real(8), intent (in) :: l
    real(8), intent (in) :: k
    real(8), intent (in) :: u
    code = u + (j * (2.0d0 * sinh(l)))
end function
public static double code(double J, double l, double K, double U) {
	return U + (J * (2.0 * Math.sinh(l)));
}
def code(J, l, K, U):
	return U + (J * (2.0 * math.sinh(l)))
function code(J, l, K, U)
	return Float64(U + Float64(J * Float64(2.0 * sinh(l))))
end
function tmp = code(J, l, K, U)
	tmp = U + (J * (2.0 * sinh(l)));
end
code[J_, l_, K_, U_] := N[(U + N[(J * N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
U + J \cdot \left(2 \cdot \sinh \ell\right)
\end{array}
Derivation
  1. Initial program 86.7%

    \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
  2. Taylor expanded in K around 0 72.7%

    \[\leadsto \color{blue}{\left(e^{\ell} - e^{-\ell}\right) \cdot J} + U \]
  3. Step-by-step derivation
    1. expm1-log1p-u51.5%

      \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(\left(e^{\ell} - e^{-\ell}\right) \cdot J\right)\right)} + U \]
    2. expm1-udef51.4%

      \[\leadsto \color{blue}{\left(e^{\mathsf{log1p}\left(\left(e^{\ell} - e^{-\ell}\right) \cdot J\right)} - 1\right)} + U \]
    3. *-commutative51.4%

      \[\leadsto \left(e^{\mathsf{log1p}\left(\color{blue}{J \cdot \left(e^{\ell} - e^{-\ell}\right)}\right)} - 1\right) + U \]
    4. sinh-undef50.4%

      \[\leadsto \left(e^{\mathsf{log1p}\left(J \cdot \color{blue}{\left(2 \cdot \sinh \ell\right)}\right)} - 1\right) + U \]
  4. Applied egg-rr50.4%

    \[\leadsto \color{blue}{\left(e^{\mathsf{log1p}\left(J \cdot \left(2 \cdot \sinh \ell\right)\right)} - 1\right)} + U \]
  5. Step-by-step derivation
    1. expm1-def53.0%

      \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(J \cdot \left(2 \cdot \sinh \ell\right)\right)\right)} + U \]
    2. expm1-log1p78.0%

      \[\leadsto \color{blue}{J \cdot \left(2 \cdot \sinh \ell\right)} + U \]
  6. Simplified78.0%

    \[\leadsto \color{blue}{J \cdot \left(2 \cdot \sinh \ell\right)} + U \]
  7. Final simplification78.0%

    \[\leadsto U + J \cdot \left(2 \cdot \sinh \ell\right) \]

Alternative 11: 53.4% accurate, 44.6× speedup?

\[\begin{array}{l} \\ U + J \cdot \left(\ell \cdot 2\right) \end{array} \]
(FPCore (J l K U) :precision binary64 (+ U (* J (* l 2.0))))
double code(double J, double l, double K, double U) {
	return U + (J * (l * 2.0));
}
real(8) function code(j, l, k, u)
    real(8), intent (in) :: j
    real(8), intent (in) :: l
    real(8), intent (in) :: k
    real(8), intent (in) :: u
    code = u + (j * (l * 2.0d0))
end function
public static double code(double J, double l, double K, double U) {
	return U + (J * (l * 2.0));
}
def code(J, l, K, U):
	return U + (J * (l * 2.0))
function code(J, l, K, U)
	return Float64(U + Float64(J * Float64(l * 2.0)))
end
function tmp = code(J, l, K, U)
	tmp = U + (J * (l * 2.0));
end
code[J_, l_, K_, U_] := N[(U + N[(J * N[(l * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
U + J \cdot \left(\ell \cdot 2\right)
\end{array}
Derivation
  1. Initial program 86.7%

    \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
  2. Taylor expanded in K around 0 72.7%

    \[\leadsto \color{blue}{\left(e^{\ell} - e^{-\ell}\right) \cdot J} + U \]
  3. Taylor expanded in l around 0 56.1%

    \[\leadsto \color{blue}{\left(2 \cdot \ell\right)} \cdot J + U \]
  4. Final simplification56.1%

    \[\leadsto U + J \cdot \left(\ell \cdot 2\right) \]

Alternative 12: 39.7% accurate, 61.6× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\ell \leq -2.85 \cdot 10^{+51}:\\ \;\;\;\;U \cdot U\\ \mathbf{else}:\\ \;\;\;\;U\\ \end{array} \end{array} \]
(FPCore (J l K U) :precision binary64 (if (<= l -2.85e+51) (* U U) U))
double code(double J, double l, double K, double U) {
	double tmp;
	if (l <= -2.85e+51) {
		tmp = U * U;
	} else {
		tmp = U;
	}
	return tmp;
}
real(8) function code(j, l, k, u)
    real(8), intent (in) :: j
    real(8), intent (in) :: l
    real(8), intent (in) :: k
    real(8), intent (in) :: u
    real(8) :: tmp
    if (l <= (-2.85d+51)) then
        tmp = u * u
    else
        tmp = u
    end if
    code = tmp
end function
public static double code(double J, double l, double K, double U) {
	double tmp;
	if (l <= -2.85e+51) {
		tmp = U * U;
	} else {
		tmp = U;
	}
	return tmp;
}
def code(J, l, K, U):
	tmp = 0
	if l <= -2.85e+51:
		tmp = U * U
	else:
		tmp = U
	return tmp
function code(J, l, K, U)
	tmp = 0.0
	if (l <= -2.85e+51)
		tmp = Float64(U * U);
	else
		tmp = U;
	end
	return tmp
end
function tmp_2 = code(J, l, K, U)
	tmp = 0.0;
	if (l <= -2.85e+51)
		tmp = U * U;
	else
		tmp = U;
	end
	tmp_2 = tmp;
end
code[J_, l_, K_, U_] := If[LessEqual[l, -2.85e+51], N[(U * U), $MachinePrecision], U]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;\ell \leq -2.85 \cdot 10^{+51}:\\
\;\;\;\;U \cdot U\\

\mathbf{else}:\\
\;\;\;\;U\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if l < -2.8500000000000001e51

    1. Initial program 100.0%

      \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    2. Step-by-step derivation
      1. associate-*l*100.0%

        \[\leadsto \color{blue}{J \cdot \left(\left(e^{\ell} - e^{-\ell}\right) \cdot \cos \left(\frac{K}{2}\right)\right)} + U \]
      2. fma-def100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(J, \left(e^{\ell} - e^{-\ell}\right) \cdot \cos \left(\frac{K}{2}\right), U\right)} \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(J, \left(e^{\ell} - e^{-\ell}\right) \cdot \cos \left(\frac{K}{2}\right), U\right)} \]
    4. Applied egg-rr27.8%

      \[\leadsto \color{blue}{U \cdot U} \]

    if -2.8500000000000001e51 < l

    1. Initial program 84.2%

      \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
    2. Step-by-step derivation
      1. associate-*l*84.2%

        \[\leadsto \color{blue}{J \cdot \left(\left(e^{\ell} - e^{-\ell}\right) \cdot \cos \left(\frac{K}{2}\right)\right)} + U \]
      2. fma-def84.2%

        \[\leadsto \color{blue}{\mathsf{fma}\left(J, \left(e^{\ell} - e^{-\ell}\right) \cdot \cos \left(\frac{K}{2}\right), U\right)} \]
    3. Simplified84.2%

      \[\leadsto \color{blue}{\mathsf{fma}\left(J, \left(e^{\ell} - e^{-\ell}\right) \cdot \cos \left(\frac{K}{2}\right), U\right)} \]
    4. Taylor expanded in J around 0 43.9%

      \[\leadsto \color{blue}{U} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification41.3%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\ell \leq -2.85 \cdot 10^{+51}:\\ \;\;\;\;U \cdot U\\ \mathbf{else}:\\ \;\;\;\;U\\ \end{array} \]

Alternative 13: 2.8% accurate, 312.0× speedup?

\[\begin{array}{l} \\ 1 \end{array} \]
(FPCore (J l K U) :precision binary64 1.0)
double code(double J, double l, double K, double U) {
	return 1.0;
}
real(8) function code(j, l, k, u)
    real(8), intent (in) :: j
    real(8), intent (in) :: l
    real(8), intent (in) :: k
    real(8), intent (in) :: u
    code = 1.0d0
end function
public static double code(double J, double l, double K, double U) {
	return 1.0;
}
def code(J, l, K, U):
	return 1.0
function code(J, l, K, U)
	return 1.0
end
function tmp = code(J, l, K, U)
	tmp = 1.0;
end
code[J_, l_, K_, U_] := 1.0
\begin{array}{l}

\\
1
\end{array}
Derivation
  1. Initial program 86.7%

    \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
  2. Step-by-step derivation
    1. associate-*l*86.7%

      \[\leadsto \color{blue}{J \cdot \left(\left(e^{\ell} - e^{-\ell}\right) \cdot \cos \left(\frac{K}{2}\right)\right)} + U \]
    2. fma-def86.7%

      \[\leadsto \color{blue}{\mathsf{fma}\left(J, \left(e^{\ell} - e^{-\ell}\right) \cdot \cos \left(\frac{K}{2}\right), U\right)} \]
  3. Simplified86.7%

    \[\leadsto \color{blue}{\mathsf{fma}\left(J, \left(e^{\ell} - e^{-\ell}\right) \cdot \cos \left(\frac{K}{2}\right), U\right)} \]
  4. Applied egg-rr2.6%

    \[\leadsto \color{blue}{\frac{-8 - U}{-8 - U}} \]
  5. Step-by-step derivation
    1. *-inverses2.6%

      \[\leadsto \color{blue}{1} \]
  6. Simplified2.6%

    \[\leadsto \color{blue}{1} \]
  7. Final simplification2.6%

    \[\leadsto 1 \]

Alternative 14: 37.3% accurate, 312.0× speedup?

\[\begin{array}{l} \\ U \end{array} \]
(FPCore (J l K U) :precision binary64 U)
double code(double J, double l, double K, double U) {
	return U;
}
real(8) function code(j, l, k, u)
    real(8), intent (in) :: j
    real(8), intent (in) :: l
    real(8), intent (in) :: k
    real(8), intent (in) :: u
    code = u
end function
public static double code(double J, double l, double K, double U) {
	return U;
}
def code(J, l, K, U):
	return U
function code(J, l, K, U)
	return U
end
function tmp = code(J, l, K, U)
	tmp = U;
end
code[J_, l_, K_, U_] := U
\begin{array}{l}

\\
U
\end{array}
Derivation
  1. Initial program 86.7%

    \[\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U \]
  2. Step-by-step derivation
    1. associate-*l*86.7%

      \[\leadsto \color{blue}{J \cdot \left(\left(e^{\ell} - e^{-\ell}\right) \cdot \cos \left(\frac{K}{2}\right)\right)} + U \]
    2. fma-def86.7%

      \[\leadsto \color{blue}{\mathsf{fma}\left(J, \left(e^{\ell} - e^{-\ell}\right) \cdot \cos \left(\frac{K}{2}\right), U\right)} \]
  3. Simplified86.7%

    \[\leadsto \color{blue}{\mathsf{fma}\left(J, \left(e^{\ell} - e^{-\ell}\right) \cdot \cos \left(\frac{K}{2}\right), U\right)} \]
  4. Taylor expanded in J around 0 37.2%

    \[\leadsto \color{blue}{U} \]
  5. Final simplification37.2%

    \[\leadsto U \]

Reproduce

?
herbie shell --seed 2023182 
(FPCore (J l K U)
  :name "Maksimov and Kolovsky, Equation (4)"
  :precision binary64
  (+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))