
(FPCore (J l K U) :precision binary64 (+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))
double code(double J, double l, double K, double U) {
return ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U;
}
real(8) function code(j, l, k, u)
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = ((j * (exp(l) - exp(-l))) * cos((k / 2.0d0))) + u
end function
public static double code(double J, double l, double K, double U) {
return ((J * (Math.exp(l) - Math.exp(-l))) * Math.cos((K / 2.0))) + U;
}
def code(J, l, K, U): return ((J * (math.exp(l) - math.exp(-l))) * math.cos((K / 2.0))) + U
function code(J, l, K, U) return Float64(Float64(Float64(J * Float64(exp(l) - exp(Float64(-l)))) * cos(Float64(K / 2.0))) + U) end
function tmp = code(J, l, K, U) tmp = ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U; end
code[J_, l_, K_, U_] := N[(N[(N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (J l K U) :precision binary64 (+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))
double code(double J, double l, double K, double U) {
return ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U;
}
real(8) function code(j, l, k, u)
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = ((j * (exp(l) - exp(-l))) * cos((k / 2.0d0))) + u
end function
public static double code(double J, double l, double K, double U) {
return ((J * (Math.exp(l) - Math.exp(-l))) * Math.cos((K / 2.0))) + U;
}
def code(J, l, K, U): return ((J * (math.exp(l) - math.exp(-l))) * math.cos((K / 2.0))) + U
function code(J, l, K, U) return Float64(Float64(Float64(J * Float64(exp(l) - exp(Float64(-l)))) * cos(Float64(K / 2.0))) + U) end
function tmp = code(J, l, K, U) tmp = ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U; end
code[J_, l_, K_, U_] := N[(N[(N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U
\end{array}
(FPCore (J l K U) :precision binary64 (+ (* (* J (log1p (expm1 (* l 2.0)))) (cos (/ K 2.0))) U))
double code(double J, double l, double K, double U) {
return ((J * log1p(expm1((l * 2.0)))) * cos((K / 2.0))) + U;
}
public static double code(double J, double l, double K, double U) {
return ((J * Math.log1p(Math.expm1((l * 2.0)))) * Math.cos((K / 2.0))) + U;
}
def code(J, l, K, U): return ((J * math.log1p(math.expm1((l * 2.0)))) * math.cos((K / 2.0))) + U
function code(J, l, K, U) return Float64(Float64(Float64(J * log1p(expm1(Float64(l * 2.0)))) * cos(Float64(K / 2.0))) + U) end
code[J_, l_, K_, U_] := N[(N[(N[(J * N[Log[1 + N[(Exp[N[(l * 2.0), $MachinePrecision]] - 1), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\left(J \cdot \mathsf{log1p}\left(\mathsf{expm1}\left(\ell \cdot 2\right)\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U
\end{array}
Initial program 81.6%
Taylor expanded in l around 0 68.3%
*-commutative68.3%
associate-*l*68.3%
Simplified68.3%
log1p-expm1-u99.3%
Applied egg-rr99.3%
(FPCore (J l K U)
:precision binary64
(+
U
(*
(cos (/ K 2.0))
(*
J
(*
l
(+
2.0
(* (* l l) (+ 0.3333333333333333 (* (* l l) 0.016666666666666666)))))))))
double code(double J, double l, double K, double U) {
return U + (cos((K / 2.0)) * (J * (l * (2.0 + ((l * l) * (0.3333333333333333 + ((l * l) * 0.016666666666666666)))))));
}
real(8) function code(j, l, k, u)
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = u + (cos((k / 2.0d0)) * (j * (l * (2.0d0 + ((l * l) * (0.3333333333333333d0 + ((l * l) * 0.016666666666666666d0)))))))
end function
public static double code(double J, double l, double K, double U) {
return U + (Math.cos((K / 2.0)) * (J * (l * (2.0 + ((l * l) * (0.3333333333333333 + ((l * l) * 0.016666666666666666)))))));
}
def code(J, l, K, U): return U + (math.cos((K / 2.0)) * (J * (l * (2.0 + ((l * l) * (0.3333333333333333 + ((l * l) * 0.016666666666666666)))))))
function code(J, l, K, U) return Float64(U + Float64(cos(Float64(K / 2.0)) * Float64(J * Float64(l * Float64(2.0 + Float64(Float64(l * l) * Float64(0.3333333333333333 + Float64(Float64(l * l) * 0.016666666666666666)))))))) end
function tmp = code(J, l, K, U) tmp = U + (cos((K / 2.0)) * (J * (l * (2.0 + ((l * l) * (0.3333333333333333 + ((l * l) * 0.016666666666666666))))))); end
code[J_, l_, K_, U_] := N[(U + N[(N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision] * N[(J * N[(l * N[(2.0 + N[(N[(l * l), $MachinePrecision] * N[(0.3333333333333333 + N[(N[(l * l), $MachinePrecision] * 0.016666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
U + \cos \left(\frac{K}{2}\right) \cdot \left(J \cdot \left(\ell \cdot \left(2 + \left(\ell \cdot \ell\right) \cdot \left(0.3333333333333333 + \left(\ell \cdot \ell\right) \cdot 0.016666666666666666\right)\right)\right)\right)
\end{array}
Initial program 81.6%
Taylor expanded in l around 0 93.5%
*-commutative93.5%
Simplified93.5%
unpow293.5%
Applied egg-rr93.5%
unpow293.5%
Applied egg-rr93.5%
Final simplification93.5%
(FPCore (J l K U) :precision binary64 (if (<= l 5.8e+53) (+ U (* J (* 2.0 (* l (cos (* K 0.5)))))) (* U (+ 1.0 (* 10.0 (* J (/ l U)))))))
double code(double J, double l, double K, double U) {
double tmp;
if (l <= 5.8e+53) {
tmp = U + (J * (2.0 * (l * cos((K * 0.5)))));
} else {
tmp = U * (1.0 + (10.0 * (J * (l / U))));
}
return tmp;
}
real(8) function code(j, l, k, u)
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
real(8) :: tmp
if (l <= 5.8d+53) then
tmp = u + (j * (2.0d0 * (l * cos((k * 0.5d0)))))
else
tmp = u * (1.0d0 + (10.0d0 * (j * (l / u))))
end if
code = tmp
end function
public static double code(double J, double l, double K, double U) {
double tmp;
if (l <= 5.8e+53) {
tmp = U + (J * (2.0 * (l * Math.cos((K * 0.5)))));
} else {
tmp = U * (1.0 + (10.0 * (J * (l / U))));
}
return tmp;
}
def code(J, l, K, U): tmp = 0 if l <= 5.8e+53: tmp = U + (J * (2.0 * (l * math.cos((K * 0.5))))) else: tmp = U * (1.0 + (10.0 * (J * (l / U)))) return tmp
function code(J, l, K, U) tmp = 0.0 if (l <= 5.8e+53) tmp = Float64(U + Float64(J * Float64(2.0 * Float64(l * cos(Float64(K * 0.5)))))); else tmp = Float64(U * Float64(1.0 + Float64(10.0 * Float64(J * Float64(l / U))))); end return tmp end
function tmp_2 = code(J, l, K, U) tmp = 0.0; if (l <= 5.8e+53) tmp = U + (J * (2.0 * (l * cos((K * 0.5))))); else tmp = U * (1.0 + (10.0 * (J * (l / U)))); end tmp_2 = tmp; end
code[J_, l_, K_, U_] := If[LessEqual[l, 5.8e+53], N[(U + N[(J * N[(2.0 * N[(l * N[Cos[N[(K * 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(U * N[(1.0 + N[(10.0 * N[(J * N[(l / U), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\ell \leq 5.8 \cdot 10^{+53}:\\
\;\;\;\;U + J \cdot \left(2 \cdot \left(\ell \cdot \cos \left(K \cdot 0.5\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;U \cdot \left(1 + 10 \cdot \left(J \cdot \frac{\ell}{U}\right)\right)\\
\end{array}
\end{array}
if l < 5.8000000000000004e53Initial program 77.3%
Taylor expanded in l around 0 80.2%
*-commutative80.2%
associate-*l*80.2%
*-commutative80.2%
*-commutative80.2%
Simplified80.2%
if 5.8000000000000004e53 < l Initial program 100.0%
Taylor expanded in l around 0 17.1%
*-commutative17.1%
associate-*l*17.1%
Simplified17.1%
Applied egg-rr16.4%
log1p-undefine16.4%
rem-exp-log16.4%
+-commutative16.4%
associate--l+16.4%
*-commutative16.4%
metadata-eval16.4%
Simplified16.4%
Taylor expanded in K around 0 16.4%
*-commutative16.4%
Simplified16.4%
Taylor expanded in U around inf 28.4%
associate-/l*34.4%
Simplified34.4%
Final simplification71.6%
(FPCore (J l K U) :precision binary64 (* U (+ 1.0 (* 2.0 (* J (/ (* l (cos (* K 0.5))) U))))))
double code(double J, double l, double K, double U) {
return U * (1.0 + (2.0 * (J * ((l * cos((K * 0.5))) / U))));
}
real(8) function code(j, l, k, u)
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = u * (1.0d0 + (2.0d0 * (j * ((l * cos((k * 0.5d0))) / u))))
end function
public static double code(double J, double l, double K, double U) {
return U * (1.0 + (2.0 * (J * ((l * Math.cos((K * 0.5))) / U))));
}
def code(J, l, K, U): return U * (1.0 + (2.0 * (J * ((l * math.cos((K * 0.5))) / U))))
function code(J, l, K, U) return Float64(U * Float64(1.0 + Float64(2.0 * Float64(J * Float64(Float64(l * cos(Float64(K * 0.5))) / U))))) end
function tmp = code(J, l, K, U) tmp = U * (1.0 + (2.0 * (J * ((l * cos((K * 0.5))) / U)))); end
code[J_, l_, K_, U_] := N[(U * N[(1.0 + N[(2.0 * N[(J * N[(N[(l * N[Cos[N[(K * 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / U), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
U \cdot \left(1 + 2 \cdot \left(J \cdot \frac{\ell \cdot \cos \left(K \cdot 0.5\right)}{U}\right)\right)
\end{array}
Initial program 81.6%
Taylor expanded in l around 0 68.3%
*-commutative68.3%
associate-*l*68.3%
Simplified68.3%
log1p-expm1-u99.3%
Applied egg-rr99.3%
Taylor expanded in U around inf 71.2%
associate-/l*73.5%
Simplified73.5%
Final simplification73.5%
(FPCore (J l K U) :precision binary64 (if (<= l 1.4e+17) (+ U (* l (* J 2.0))) (* U (+ 1.0 (* 10.0 (* J (/ l U)))))))
double code(double J, double l, double K, double U) {
double tmp;
if (l <= 1.4e+17) {
tmp = U + (l * (J * 2.0));
} else {
tmp = U * (1.0 + (10.0 * (J * (l / U))));
}
return tmp;
}
real(8) function code(j, l, k, u)
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
real(8) :: tmp
if (l <= 1.4d+17) then
tmp = u + (l * (j * 2.0d0))
else
tmp = u * (1.0d0 + (10.0d0 * (j * (l / u))))
end if
code = tmp
end function
public static double code(double J, double l, double K, double U) {
double tmp;
if (l <= 1.4e+17) {
tmp = U + (l * (J * 2.0));
} else {
tmp = U * (1.0 + (10.0 * (J * (l / U))));
}
return tmp;
}
def code(J, l, K, U): tmp = 0 if l <= 1.4e+17: tmp = U + (l * (J * 2.0)) else: tmp = U * (1.0 + (10.0 * (J * (l / U)))) return tmp
function code(J, l, K, U) tmp = 0.0 if (l <= 1.4e+17) tmp = Float64(U + Float64(l * Float64(J * 2.0))); else tmp = Float64(U * Float64(1.0 + Float64(10.0 * Float64(J * Float64(l / U))))); end return tmp end
function tmp_2 = code(J, l, K, U) tmp = 0.0; if (l <= 1.4e+17) tmp = U + (l * (J * 2.0)); else tmp = U * (1.0 + (10.0 * (J * (l / U)))); end tmp_2 = tmp; end
code[J_, l_, K_, U_] := If[LessEqual[l, 1.4e+17], N[(U + N[(l * N[(J * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(U * N[(1.0 + N[(10.0 * N[(J * N[(l / U), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\ell \leq 1.4 \cdot 10^{+17}:\\
\;\;\;\;U + \ell \cdot \left(J \cdot 2\right)\\
\mathbf{else}:\\
\;\;\;\;U \cdot \left(1 + 10 \cdot \left(J \cdot \frac{\ell}{U}\right)\right)\\
\end{array}
\end{array}
if l < 1.4e17Initial program 76.4%
Taylor expanded in l around 0 82.8%
*-commutative82.8%
associate-*l*82.8%
Simplified82.8%
Taylor expanded in K around 0 67.3%
associate-*r*67.3%
*-commutative67.3%
Simplified67.3%
if 1.4e17 < l Initial program 100.0%
Taylor expanded in l around 0 16.8%
*-commutative16.8%
associate-*l*16.8%
Simplified16.8%
Applied egg-rr14.4%
log1p-undefine14.4%
rem-exp-log14.4%
+-commutative14.4%
associate--l+14.4%
*-commutative14.4%
metadata-eval14.4%
Simplified14.4%
Taylor expanded in K around 0 14.4%
*-commutative14.4%
Simplified14.4%
Taylor expanded in U around inf 26.3%
associate-/l*31.4%
Simplified31.4%
Final simplification59.5%
(FPCore (J l K U) :precision binary64 (if (or (<= l -1.2e+14) (not (<= l 5.2e-12))) (/ (* U U) U) U))
double code(double J, double l, double K, double U) {
double tmp;
if ((l <= -1.2e+14) || !(l <= 5.2e-12)) {
tmp = (U * U) / U;
} else {
tmp = U;
}
return tmp;
}
real(8) function code(j, l, k, u)
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
real(8) :: tmp
if ((l <= (-1.2d+14)) .or. (.not. (l <= 5.2d-12))) then
tmp = (u * u) / u
else
tmp = u
end if
code = tmp
end function
public static double code(double J, double l, double K, double U) {
double tmp;
if ((l <= -1.2e+14) || !(l <= 5.2e-12)) {
tmp = (U * U) / U;
} else {
tmp = U;
}
return tmp;
}
def code(J, l, K, U): tmp = 0 if (l <= -1.2e+14) or not (l <= 5.2e-12): tmp = (U * U) / U else: tmp = U return tmp
function code(J, l, K, U) tmp = 0.0 if ((l <= -1.2e+14) || !(l <= 5.2e-12)) tmp = Float64(Float64(U * U) / U); else tmp = U; end return tmp end
function tmp_2 = code(J, l, K, U) tmp = 0.0; if ((l <= -1.2e+14) || ~((l <= 5.2e-12))) tmp = (U * U) / U; else tmp = U; end tmp_2 = tmp; end
code[J_, l_, K_, U_] := If[Or[LessEqual[l, -1.2e+14], N[Not[LessEqual[l, 5.2e-12]], $MachinePrecision]], N[(N[(U * U), $MachinePrecision] / U), $MachinePrecision], U]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\ell \leq -1.2 \cdot 10^{+14} \lor \neg \left(\ell \leq 5.2 \cdot 10^{-12}\right):\\
\;\;\;\;\frac{U \cdot U}{U}\\
\mathbf{else}:\\
\;\;\;\;U\\
\end{array}
\end{array}
if l < -1.2e14 or 5.19999999999999965e-12 < l Initial program 99.2%
Applied egg-rr3.6%
flip-+16.9%
metadata-eval16.9%
div-sub16.9%
sub0-neg16.9%
pow216.9%
sub0-neg16.9%
Applied egg-rr16.9%
div016.9%
neg-sub016.9%
distribute-frac-neg216.9%
remove-double-neg16.9%
Simplified16.9%
unpow216.9%
Applied egg-rr16.9%
if -1.2e14 < l < 5.19999999999999965e-12Initial program 69.3%
Applied egg-rr42.8%
Taylor expanded in U around inf 67.5%
Final simplification46.7%
(FPCore (J l K U) :precision binary64 (if (<= l 1.9e+16) (+ U (* -6.0 (* J l))) (/ (* U U) U)))
double code(double J, double l, double K, double U) {
double tmp;
if (l <= 1.9e+16) {
tmp = U + (-6.0 * (J * l));
} else {
tmp = (U * U) / U;
}
return tmp;
}
real(8) function code(j, l, k, u)
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
real(8) :: tmp
if (l <= 1.9d+16) then
tmp = u + ((-6.0d0) * (j * l))
else
tmp = (u * u) / u
end if
code = tmp
end function
public static double code(double J, double l, double K, double U) {
double tmp;
if (l <= 1.9e+16) {
tmp = U + (-6.0 * (J * l));
} else {
tmp = (U * U) / U;
}
return tmp;
}
def code(J, l, K, U): tmp = 0 if l <= 1.9e+16: tmp = U + (-6.0 * (J * l)) else: tmp = (U * U) / U return tmp
function code(J, l, K, U) tmp = 0.0 if (l <= 1.9e+16) tmp = Float64(U + Float64(-6.0 * Float64(J * l))); else tmp = Float64(Float64(U * U) / U); end return tmp end
function tmp_2 = code(J, l, K, U) tmp = 0.0; if (l <= 1.9e+16) tmp = U + (-6.0 * (J * l)); else tmp = (U * U) / U; end tmp_2 = tmp; end
code[J_, l_, K_, U_] := If[LessEqual[l, 1.9e+16], N[(U + N[(-6.0 * N[(J * l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(U * U), $MachinePrecision] / U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\ell \leq 1.9 \cdot 10^{+16}:\\
\;\;\;\;U + -6 \cdot \left(J \cdot \ell\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{U \cdot U}{U}\\
\end{array}
\end{array}
if l < 1.9e16Initial program 76.3%
Taylor expanded in l around 0 83.2%
*-commutative83.2%
associate-*l*83.2%
Simplified83.2%
Applied egg-rr55.1%
Taylor expanded in K around 0 55.6%
*-commutative55.6%
Simplified55.6%
if 1.9e16 < l Initial program 100.0%
Applied egg-rr2.8%
flip-+19.0%
metadata-eval19.0%
div-sub19.0%
sub0-neg19.0%
pow219.0%
sub0-neg19.0%
Applied egg-rr19.0%
div019.0%
neg-sub019.0%
distribute-frac-neg219.0%
remove-double-neg19.0%
Simplified19.0%
unpow219.0%
Applied egg-rr19.0%
Final simplification47.4%
(FPCore (J l K U) :precision binary64 (+ U (* l (* J 2.0))))
double code(double J, double l, double K, double U) {
return U + (l * (J * 2.0));
}
real(8) function code(j, l, k, u)
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = u + (l * (j * 2.0d0))
end function
public static double code(double J, double l, double K, double U) {
return U + (l * (J * 2.0));
}
def code(J, l, K, U): return U + (l * (J * 2.0))
function code(J, l, K, U) return Float64(U + Float64(l * Float64(J * 2.0))) end
function tmp = code(J, l, K, U) tmp = U + (l * (J * 2.0)); end
code[J_, l_, K_, U_] := N[(U + N[(l * N[(J * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
U + \ell \cdot \left(J \cdot 2\right)
\end{array}
Initial program 81.6%
Taylor expanded in l around 0 68.3%
*-commutative68.3%
associate-*l*68.3%
Simplified68.3%
Taylor expanded in K around 0 55.7%
associate-*r*55.7%
*-commutative55.7%
Simplified55.7%
Final simplification55.7%
(FPCore (J l K U) :precision binary64 U)
double code(double J, double l, double K, double U) {
return U;
}
real(8) function code(j, l, k, u)
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = u
end function
public static double code(double J, double l, double K, double U) {
return U;
}
def code(J, l, K, U): return U
function code(J, l, K, U) return U end
function tmp = code(J, l, K, U) tmp = U; end
code[J_, l_, K_, U_] := U
\begin{array}{l}
\\
U
\end{array}
Initial program 81.6%
Applied egg-rr26.3%
Taylor expanded in U around inf 41.3%
(FPCore (J l K U) :precision binary64 8.0)
double code(double J, double l, double K, double U) {
return 8.0;
}
real(8) function code(j, l, k, u)
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = 8.0d0
end function
public static double code(double J, double l, double K, double U) {
return 8.0;
}
def code(J, l, K, U): return 8.0
function code(J, l, K, U) return 8.0 end
function tmp = code(J, l, K, U) tmp = 8.0; end
code[J_, l_, K_, U_] := 8.0
\begin{array}{l}
\\
8
\end{array}
Initial program 81.6%
Applied egg-rr26.4%
Taylor expanded in U around 0 3.0%
(FPCore (J l K U) :precision binary64 -4.0)
double code(double J, double l, double K, double U) {
return -4.0;
}
real(8) function code(j, l, k, u)
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = -4.0d0
end function
public static double code(double J, double l, double K, double U) {
return -4.0;
}
def code(J, l, K, U): return -4.0
function code(J, l, K, U) return -4.0 end
function tmp = code(J, l, K, U) tmp = -4.0; end
code[J_, l_, K_, U_] := -4.0
\begin{array}{l}
\\
-4
\end{array}
Initial program 81.6%
Applied egg-rr26.3%
Taylor expanded in U around 0 3.0%
herbie shell --seed 2024157
(FPCore (J l K U)
:name "Maksimov and Kolovsky, Equation (4)"
:precision binary64
(+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))