
(FPCore (t l k) :precision binary64 (/ 2.0 (* (* (* (/ (pow t 3.0) (* l l)) (sin k)) (tan k)) (- (+ 1.0 (pow (/ k t) 2.0)) 1.0))))
double code(double t, double l, double k) {
return 2.0 / ((((pow(t, 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + pow((k / t), 2.0)) - 1.0));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = 2.0d0 / (((((t ** 3.0d0) / (l * l)) * sin(k)) * tan(k)) * ((1.0d0 + ((k / t) ** 2.0d0)) - 1.0d0))
end function
public static double code(double t, double l, double k) {
return 2.0 / ((((Math.pow(t, 3.0) / (l * l)) * Math.sin(k)) * Math.tan(k)) * ((1.0 + Math.pow((k / t), 2.0)) - 1.0));
}
def code(t, l, k): return 2.0 / ((((math.pow(t, 3.0) / (l * l)) * math.sin(k)) * math.tan(k)) * ((1.0 + math.pow((k / t), 2.0)) - 1.0))
function code(t, l, k) return Float64(2.0 / Float64(Float64(Float64(Float64((t ^ 3.0) / Float64(l * l)) * sin(k)) * tan(k)) * Float64(Float64(1.0 + (Float64(k / t) ^ 2.0)) - 1.0))) end
function tmp = code(t, l, k) tmp = 2.0 / (((((t ^ 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + ((k / t) ^ 2.0)) - 1.0)); end
code[t_, l_, k_] := N[(2.0 / N[(N[(N[(N[(N[Power[t, 3.0], $MachinePrecision] / N[(l * l), $MachinePrecision]), $MachinePrecision] * N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[Tan[k], $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 + N[Power[N[(k / t), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\left(\left(\frac{{t}^{3}}{\ell \cdot \ell} \cdot \sin k\right) \cdot \tan k\right) \cdot \left(\left(1 + {\left(\frac{k}{t}\right)}^{2}\right) - 1\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (t l k) :precision binary64 (/ 2.0 (* (* (* (/ (pow t 3.0) (* l l)) (sin k)) (tan k)) (- (+ 1.0 (pow (/ k t) 2.0)) 1.0))))
double code(double t, double l, double k) {
return 2.0 / ((((pow(t, 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + pow((k / t), 2.0)) - 1.0));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = 2.0d0 / (((((t ** 3.0d0) / (l * l)) * sin(k)) * tan(k)) * ((1.0d0 + ((k / t) ** 2.0d0)) - 1.0d0))
end function
public static double code(double t, double l, double k) {
return 2.0 / ((((Math.pow(t, 3.0) / (l * l)) * Math.sin(k)) * Math.tan(k)) * ((1.0 + Math.pow((k / t), 2.0)) - 1.0));
}
def code(t, l, k): return 2.0 / ((((math.pow(t, 3.0) / (l * l)) * math.sin(k)) * math.tan(k)) * ((1.0 + math.pow((k / t), 2.0)) - 1.0))
function code(t, l, k) return Float64(2.0 / Float64(Float64(Float64(Float64((t ^ 3.0) / Float64(l * l)) * sin(k)) * tan(k)) * Float64(Float64(1.0 + (Float64(k / t) ^ 2.0)) - 1.0))) end
function tmp = code(t, l, k) tmp = 2.0 / (((((t ^ 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + ((k / t) ^ 2.0)) - 1.0)); end
code[t_, l_, k_] := N[(2.0 / N[(N[(N[(N[(N[Power[t, 3.0], $MachinePrecision] / N[(l * l), $MachinePrecision]), $MachinePrecision] * N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[Tan[k], $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 + N[Power[N[(k / t), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\left(\left(\frac{{t}^{3}}{\ell \cdot \ell} \cdot \sin k\right) \cdot \tan k\right) \cdot \left(\left(1 + {\left(\frac{k}{t}\right)}^{2}\right) - 1\right)}
\end{array}
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (if (<= k_m 1.15e-20) (* (/ (/ 2.0 t) (/ k_m (/ l k_m))) (/ (/ l k_m) k_m)) (/ 2.0 (* (/ k_m l) (* (/ k_m l) (* (sin k_m) (* t (tan k_m))))))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
double tmp;
if (k_m <= 1.15e-20) {
tmp = ((2.0 / t) / (k_m / (l / k_m))) * ((l / k_m) / k_m);
} else {
tmp = 2.0 / ((k_m / l) * ((k_m / l) * (sin(k_m) * (t * tan(k_m)))));
}
return tmp;
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
real(8) :: tmp
if (k_m <= 1.15d-20) then
tmp = ((2.0d0 / t) / (k_m / (l / k_m))) * ((l / k_m) / k_m)
else
tmp = 2.0d0 / ((k_m / l) * ((k_m / l) * (sin(k_m) * (t * tan(k_m)))))
end if
code = tmp
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
double tmp;
if (k_m <= 1.15e-20) {
tmp = ((2.0 / t) / (k_m / (l / k_m))) * ((l / k_m) / k_m);
} else {
tmp = 2.0 / ((k_m / l) * ((k_m / l) * (Math.sin(k_m) * (t * Math.tan(k_m)))));
}
return tmp;
}
k_m = math.fabs(k) def code(t, l, k_m): tmp = 0 if k_m <= 1.15e-20: tmp = ((2.0 / t) / (k_m / (l / k_m))) * ((l / k_m) / k_m) else: tmp = 2.0 / ((k_m / l) * ((k_m / l) * (math.sin(k_m) * (t * math.tan(k_m))))) return tmp
k_m = abs(k) function code(t, l, k_m) tmp = 0.0 if (k_m <= 1.15e-20) tmp = Float64(Float64(Float64(2.0 / t) / Float64(k_m / Float64(l / k_m))) * Float64(Float64(l / k_m) / k_m)); else tmp = Float64(2.0 / Float64(Float64(k_m / l) * Float64(Float64(k_m / l) * Float64(sin(k_m) * Float64(t * tan(k_m)))))); end return tmp end
k_m = abs(k); function tmp_2 = code(t, l, k_m) tmp = 0.0; if (k_m <= 1.15e-20) tmp = ((2.0 / t) / (k_m / (l / k_m))) * ((l / k_m) / k_m); else tmp = 2.0 / ((k_m / l) * ((k_m / l) * (sin(k_m) * (t * tan(k_m))))); end tmp_2 = tmp; end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := If[LessEqual[k$95$m, 1.15e-20], N[(N[(N[(2.0 / t), $MachinePrecision] / N[(k$95$m / N[(l / k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(l / k$95$m), $MachinePrecision] / k$95$m), $MachinePrecision]), $MachinePrecision], N[(2.0 / N[(N[(k$95$m / l), $MachinePrecision] * N[(N[(k$95$m / l), $MachinePrecision] * N[(N[Sin[k$95$m], $MachinePrecision] * N[(t * N[Tan[k$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
\mathbf{if}\;k\_m \leq 1.15 \cdot 10^{-20}:\\
\;\;\;\;\frac{\frac{2}{t}}{\frac{k\_m}{\frac{\ell}{k\_m}}} \cdot \frac{\frac{\ell}{k\_m}}{k\_m}\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{\frac{k\_m}{\ell} \cdot \left(\frac{k\_m}{\ell} \cdot \left(\sin k\_m \cdot \left(t \cdot \tan k\_m\right)\right)\right)}\\
\end{array}
\end{array}
if k < 1.15e-20Initial program 36.1%
Taylor expanded in k around 0
/-lowering-/.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6460.0%
Simplified60.0%
remove-double-negN/A
times-fracN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
*-lowering-*.f64N/A
neg-sub0N/A
--lowering--.f64N/A
/-lowering-/.f64N/A
neg-sub0N/A
--lowering--.f64N/A
associate-*l*N/A
associate-/l*N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6450.8%
Applied egg-rr50.8%
associate-/r*N/A
sub0-negN/A
distribute-frac-neg2N/A
distribute-frac-negN/A
distribute-frac-neg2N/A
sub0-negN/A
remove-double-negN/A
un-div-invN/A
clear-numN/A
/-lowering-/.f64N/A
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
associate-/l*N/A
associate-*r*N/A
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
Applied egg-rr74.2%
associate-/r/N/A
associate-/r/N/A
times-fracN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
associate-*l/N/A
associate-/r/N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6482.6%
Applied egg-rr82.6%
if 1.15e-20 < k Initial program 37.8%
Taylor expanded in t around 0
associate-/l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-commutativeN/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
pow-lowering-pow.f64N/A
sin-lowering-sin.f64N/A
cos-lowering-cos.f64N/A
unpow2N/A
*-lowering-*.f6467.6%
Simplified67.6%
associate-*r/N/A
times-fracN/A
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
associate-/l*N/A
*-lowering-*.f64N/A
unpow2N/A
associate-/l*N/A
tan-quotN/A
*-lowering-*.f64N/A
sin-lowering-sin.f64N/A
tan-lowering-tan.f6475.4%
Applied egg-rr75.4%
associate-/l*N/A
div-invN/A
times-fracN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
sin-lowering-sin.f64N/A
tan-lowering-tan.f6499.2%
Applied egg-rr99.2%
div-invN/A
clear-numN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
clear-numN/A
associate-/l/N/A
remove-double-divN/A
associate-*l*N/A
*-lowering-*.f64N/A
sin-lowering-sin.f64N/A
*-lowering-*.f64N/A
tan-lowering-tan.f6499.3%
Applied egg-rr99.3%
Final simplification87.3%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (if (<= k_m 1.15e-20) (* (/ (/ 2.0 t) (/ k_m (/ l k_m))) (/ (/ l k_m) k_m)) (/ 2.0 (* k_m (/ (/ k_m l) (/ (/ l t) (* (sin k_m) (tan k_m))))))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
double tmp;
if (k_m <= 1.15e-20) {
tmp = ((2.0 / t) / (k_m / (l / k_m))) * ((l / k_m) / k_m);
} else {
tmp = 2.0 / (k_m * ((k_m / l) / ((l / t) / (sin(k_m) * tan(k_m)))));
}
return tmp;
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
real(8) :: tmp
if (k_m <= 1.15d-20) then
tmp = ((2.0d0 / t) / (k_m / (l / k_m))) * ((l / k_m) / k_m)
else
tmp = 2.0d0 / (k_m * ((k_m / l) / ((l / t) / (sin(k_m) * tan(k_m)))))
end if
code = tmp
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
double tmp;
if (k_m <= 1.15e-20) {
tmp = ((2.0 / t) / (k_m / (l / k_m))) * ((l / k_m) / k_m);
} else {
tmp = 2.0 / (k_m * ((k_m / l) / ((l / t) / (Math.sin(k_m) * Math.tan(k_m)))));
}
return tmp;
}
k_m = math.fabs(k) def code(t, l, k_m): tmp = 0 if k_m <= 1.15e-20: tmp = ((2.0 / t) / (k_m / (l / k_m))) * ((l / k_m) / k_m) else: tmp = 2.0 / (k_m * ((k_m / l) / ((l / t) / (math.sin(k_m) * math.tan(k_m))))) return tmp
k_m = abs(k) function code(t, l, k_m) tmp = 0.0 if (k_m <= 1.15e-20) tmp = Float64(Float64(Float64(2.0 / t) / Float64(k_m / Float64(l / k_m))) * Float64(Float64(l / k_m) / k_m)); else tmp = Float64(2.0 / Float64(k_m * Float64(Float64(k_m / l) / Float64(Float64(l / t) / Float64(sin(k_m) * tan(k_m)))))); end return tmp end
k_m = abs(k); function tmp_2 = code(t, l, k_m) tmp = 0.0; if (k_m <= 1.15e-20) tmp = ((2.0 / t) / (k_m / (l / k_m))) * ((l / k_m) / k_m); else tmp = 2.0 / (k_m * ((k_m / l) / ((l / t) / (sin(k_m) * tan(k_m))))); end tmp_2 = tmp; end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := If[LessEqual[k$95$m, 1.15e-20], N[(N[(N[(2.0 / t), $MachinePrecision] / N[(k$95$m / N[(l / k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(l / k$95$m), $MachinePrecision] / k$95$m), $MachinePrecision]), $MachinePrecision], N[(2.0 / N[(k$95$m * N[(N[(k$95$m / l), $MachinePrecision] / N[(N[(l / t), $MachinePrecision] / N[(N[Sin[k$95$m], $MachinePrecision] * N[Tan[k$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
\mathbf{if}\;k\_m \leq 1.15 \cdot 10^{-20}:\\
\;\;\;\;\frac{\frac{2}{t}}{\frac{k\_m}{\frac{\ell}{k\_m}}} \cdot \frac{\frac{\ell}{k\_m}}{k\_m}\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{k\_m \cdot \frac{\frac{k\_m}{\ell}}{\frac{\frac{\ell}{t}}{\sin k\_m \cdot \tan k\_m}}}\\
\end{array}
\end{array}
if k < 1.15e-20Initial program 36.1%
Taylor expanded in k around 0
/-lowering-/.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6460.0%
Simplified60.0%
remove-double-negN/A
times-fracN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
*-lowering-*.f64N/A
neg-sub0N/A
--lowering--.f64N/A
/-lowering-/.f64N/A
neg-sub0N/A
--lowering--.f64N/A
associate-*l*N/A
associate-/l*N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6450.8%
Applied egg-rr50.8%
associate-/r*N/A
sub0-negN/A
distribute-frac-neg2N/A
distribute-frac-negN/A
distribute-frac-neg2N/A
sub0-negN/A
remove-double-negN/A
un-div-invN/A
clear-numN/A
/-lowering-/.f64N/A
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
associate-/l*N/A
associate-*r*N/A
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
Applied egg-rr74.2%
associate-/r/N/A
associate-/r/N/A
times-fracN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
associate-*l/N/A
associate-/r/N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6482.6%
Applied egg-rr82.6%
if 1.15e-20 < k Initial program 37.8%
Taylor expanded in t around 0
associate-/l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-commutativeN/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
pow-lowering-pow.f64N/A
sin-lowering-sin.f64N/A
cos-lowering-cos.f64N/A
unpow2N/A
*-lowering-*.f6467.6%
Simplified67.6%
associate-*r/N/A
times-fracN/A
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
associate-/l*N/A
*-lowering-*.f64N/A
unpow2N/A
associate-/l*N/A
tan-quotN/A
*-lowering-*.f64N/A
sin-lowering-sin.f64N/A
tan-lowering-tan.f6475.4%
Applied egg-rr75.4%
div-invN/A
clear-numN/A
associate-/l*N/A
associate-*l*N/A
*-lowering-*.f64N/A
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
sin-lowering-sin.f64N/A
tan-lowering-tan.f6492.7%
Applied egg-rr92.7%
k_m = (fabs.f64 k)
(FPCore (t l k_m)
:precision binary64
(let* ((t_1 (/ (/ l k_m) k_m)))
(if (<= k_m 5e-100)
(* (/ (/ 2.0 t) (/ k_m (/ l k_m))) t_1)
(* 2.0 (* t_1 (/ (/ l t) (* (sin k_m) (tan k_m))))))))k_m = fabs(k);
double code(double t, double l, double k_m) {
double t_1 = (l / k_m) / k_m;
double tmp;
if (k_m <= 5e-100) {
tmp = ((2.0 / t) / (k_m / (l / k_m))) * t_1;
} else {
tmp = 2.0 * (t_1 * ((l / t) / (sin(k_m) * tan(k_m))));
}
return tmp;
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
real(8) :: t_1
real(8) :: tmp
t_1 = (l / k_m) / k_m
if (k_m <= 5d-100) then
tmp = ((2.0d0 / t) / (k_m / (l / k_m))) * t_1
else
tmp = 2.0d0 * (t_1 * ((l / t) / (sin(k_m) * tan(k_m))))
end if
code = tmp
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
double t_1 = (l / k_m) / k_m;
double tmp;
if (k_m <= 5e-100) {
tmp = ((2.0 / t) / (k_m / (l / k_m))) * t_1;
} else {
tmp = 2.0 * (t_1 * ((l / t) / (Math.sin(k_m) * Math.tan(k_m))));
}
return tmp;
}
k_m = math.fabs(k) def code(t, l, k_m): t_1 = (l / k_m) / k_m tmp = 0 if k_m <= 5e-100: tmp = ((2.0 / t) / (k_m / (l / k_m))) * t_1 else: tmp = 2.0 * (t_1 * ((l / t) / (math.sin(k_m) * math.tan(k_m)))) return tmp
k_m = abs(k) function code(t, l, k_m) t_1 = Float64(Float64(l / k_m) / k_m) tmp = 0.0 if (k_m <= 5e-100) tmp = Float64(Float64(Float64(2.0 / t) / Float64(k_m / Float64(l / k_m))) * t_1); else tmp = Float64(2.0 * Float64(t_1 * Float64(Float64(l / t) / Float64(sin(k_m) * tan(k_m))))); end return tmp end
k_m = abs(k); function tmp_2 = code(t, l, k_m) t_1 = (l / k_m) / k_m; tmp = 0.0; if (k_m <= 5e-100) tmp = ((2.0 / t) / (k_m / (l / k_m))) * t_1; else tmp = 2.0 * (t_1 * ((l / t) / (sin(k_m) * tan(k_m)))); end tmp_2 = tmp; end
k_m = N[Abs[k], $MachinePrecision]
code[t_, l_, k$95$m_] := Block[{t$95$1 = N[(N[(l / k$95$m), $MachinePrecision] / k$95$m), $MachinePrecision]}, If[LessEqual[k$95$m, 5e-100], N[(N[(N[(2.0 / t), $MachinePrecision] / N[(k$95$m / N[(l / k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * t$95$1), $MachinePrecision], N[(2.0 * N[(t$95$1 * N[(N[(l / t), $MachinePrecision] / N[(N[Sin[k$95$m], $MachinePrecision] * N[Tan[k$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
t_1 := \frac{\frac{\ell}{k\_m}}{k\_m}\\
\mathbf{if}\;k\_m \leq 5 \cdot 10^{-100}:\\
\;\;\;\;\frac{\frac{2}{t}}{\frac{k\_m}{\frac{\ell}{k\_m}}} \cdot t\_1\\
\mathbf{else}:\\
\;\;\;\;2 \cdot \left(t\_1 \cdot \frac{\frac{\ell}{t}}{\sin k\_m \cdot \tan k\_m}\right)\\
\end{array}
\end{array}
if k < 5.0000000000000001e-100Initial program 35.7%
Taylor expanded in k around 0
/-lowering-/.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6460.0%
Simplified60.0%
remove-double-negN/A
times-fracN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
*-lowering-*.f64N/A
neg-sub0N/A
--lowering--.f64N/A
/-lowering-/.f64N/A
neg-sub0N/A
--lowering--.f64N/A
associate-*l*N/A
associate-/l*N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6448.2%
Applied egg-rr48.2%
associate-/r*N/A
sub0-negN/A
distribute-frac-neg2N/A
distribute-frac-negN/A
distribute-frac-neg2N/A
sub0-negN/A
remove-double-negN/A
un-div-invN/A
clear-numN/A
/-lowering-/.f64N/A
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
associate-/l*N/A
associate-*r*N/A
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
Applied egg-rr71.9%
associate-/r/N/A
associate-/r/N/A
times-fracN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
associate-*l/N/A
associate-/r/N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6481.1%
Applied egg-rr81.1%
if 5.0000000000000001e-100 < k Initial program 38.3%
Taylor expanded in t around 0
associate-/l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-commutativeN/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
pow-lowering-pow.f64N/A
sin-lowering-sin.f64N/A
cos-lowering-cos.f64N/A
unpow2N/A
*-lowering-*.f6468.6%
Simplified68.6%
associate-*r/N/A
times-fracN/A
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
associate-/l*N/A
*-lowering-*.f64N/A
unpow2N/A
associate-/l*N/A
tan-quotN/A
*-lowering-*.f64N/A
sin-lowering-sin.f64N/A
tan-lowering-tan.f6476.4%
Applied egg-rr76.4%
associate-/l*N/A
div-invN/A
times-fracN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
sin-lowering-sin.f64N/A
tan-lowering-tan.f6496.0%
Applied egg-rr96.0%
clear-numN/A
associate-/r/N/A
*-lowering-*.f64N/A
Applied egg-rr87.7%
Final simplification83.4%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (if (<= t 4e+14) (* (/ 2.0 k_m) (/ (/ l t) (/ k_m (/ (/ l k_m) k_m)))) (* l (/ (* l (/ 2.0 k_m)) (* k_m (* k_m (* k_m t)))))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
double tmp;
if (t <= 4e+14) {
tmp = (2.0 / k_m) * ((l / t) / (k_m / ((l / k_m) / k_m)));
} else {
tmp = l * ((l * (2.0 / k_m)) / (k_m * (k_m * (k_m * t))));
}
return tmp;
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
real(8) :: tmp
if (t <= 4d+14) then
tmp = (2.0d0 / k_m) * ((l / t) / (k_m / ((l / k_m) / k_m)))
else
tmp = l * ((l * (2.0d0 / k_m)) / (k_m * (k_m * (k_m * t))))
end if
code = tmp
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
double tmp;
if (t <= 4e+14) {
tmp = (2.0 / k_m) * ((l / t) / (k_m / ((l / k_m) / k_m)));
} else {
tmp = l * ((l * (2.0 / k_m)) / (k_m * (k_m * (k_m * t))));
}
return tmp;
}
k_m = math.fabs(k) def code(t, l, k_m): tmp = 0 if t <= 4e+14: tmp = (2.0 / k_m) * ((l / t) / (k_m / ((l / k_m) / k_m))) else: tmp = l * ((l * (2.0 / k_m)) / (k_m * (k_m * (k_m * t)))) return tmp
k_m = abs(k) function code(t, l, k_m) tmp = 0.0 if (t <= 4e+14) tmp = Float64(Float64(2.0 / k_m) * Float64(Float64(l / t) / Float64(k_m / Float64(Float64(l / k_m) / k_m)))); else tmp = Float64(l * Float64(Float64(l * Float64(2.0 / k_m)) / Float64(k_m * Float64(k_m * Float64(k_m * t))))); end return tmp end
k_m = abs(k); function tmp_2 = code(t, l, k_m) tmp = 0.0; if (t <= 4e+14) tmp = (2.0 / k_m) * ((l / t) / (k_m / ((l / k_m) / k_m))); else tmp = l * ((l * (2.0 / k_m)) / (k_m * (k_m * (k_m * t)))); end tmp_2 = tmp; end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := If[LessEqual[t, 4e+14], N[(N[(2.0 / k$95$m), $MachinePrecision] * N[(N[(l / t), $MachinePrecision] / N[(k$95$m / N[(N[(l / k$95$m), $MachinePrecision] / k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(l * N[(N[(l * N[(2.0 / k$95$m), $MachinePrecision]), $MachinePrecision] / N[(k$95$m * N[(k$95$m * N[(k$95$m * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
\mathbf{if}\;t \leq 4 \cdot 10^{+14}:\\
\;\;\;\;\frac{2}{k\_m} \cdot \frac{\frac{\ell}{t}}{\frac{k\_m}{\frac{\frac{\ell}{k\_m}}{k\_m}}}\\
\mathbf{else}:\\
\;\;\;\;\ell \cdot \frac{\ell \cdot \frac{2}{k\_m}}{k\_m \cdot \left(k\_m \cdot \left(k\_m \cdot t\right)\right)}\\
\end{array}
\end{array}
if t < 4e14Initial program 41.5%
Taylor expanded in k around 0
/-lowering-/.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6458.2%
Simplified58.2%
remove-double-negN/A
times-fracN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
*-lowering-*.f64N/A
neg-sub0N/A
--lowering--.f64N/A
/-lowering-/.f64N/A
neg-sub0N/A
--lowering--.f64N/A
associate-*l*N/A
associate-/l*N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6453.2%
Applied egg-rr53.2%
associate-/r*N/A
sub0-negN/A
distribute-frac-neg2N/A
distribute-frac-negN/A
distribute-frac-neg2N/A
sub0-negN/A
remove-double-negN/A
un-div-invN/A
clear-numN/A
/-lowering-/.f64N/A
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
associate-/l*N/A
associate-*r*N/A
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
Applied egg-rr72.4%
div-invN/A
associate-/l*N/A
times-fracN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
associate-/r*N/A
associate-/r/N/A
*-commutativeN/A
/-lowering-/.f64N/A
clear-numN/A
/-lowering-/.f64N/A
clear-numN/A
associate-/r*N/A
div-invN/A
/-lowering-/.f64N/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6475.0%
Applied egg-rr75.0%
if 4e14 < t Initial program 23.2%
Taylor expanded in k around 0
/-lowering-/.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6460.6%
Simplified60.6%
associate-/r/N/A
associate-*r*N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
associate-*l*N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6462.8%
Applied egg-rr62.8%
*-commutativeN/A
associate-/r*N/A
associate-*l/N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
associate-*r*N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6467.0%
Applied egg-rr67.0%
Final simplification72.8%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (if (<= t 1.2e+17) (* (/ 2.0 k_m) (/ (/ l t) (/ k_m (/ (/ l k_m) k_m)))) (* l (* (/ l k_m) (/ 2.0 (* k_m (* k_m (* k_m t))))))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
double tmp;
if (t <= 1.2e+17) {
tmp = (2.0 / k_m) * ((l / t) / (k_m / ((l / k_m) / k_m)));
} else {
tmp = l * ((l / k_m) * (2.0 / (k_m * (k_m * (k_m * t)))));
}
return tmp;
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
real(8) :: tmp
if (t <= 1.2d+17) then
tmp = (2.0d0 / k_m) * ((l / t) / (k_m / ((l / k_m) / k_m)))
else
tmp = l * ((l / k_m) * (2.0d0 / (k_m * (k_m * (k_m * t)))))
end if
code = tmp
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
double tmp;
if (t <= 1.2e+17) {
tmp = (2.0 / k_m) * ((l / t) / (k_m / ((l / k_m) / k_m)));
} else {
tmp = l * ((l / k_m) * (2.0 / (k_m * (k_m * (k_m * t)))));
}
return tmp;
}
k_m = math.fabs(k) def code(t, l, k_m): tmp = 0 if t <= 1.2e+17: tmp = (2.0 / k_m) * ((l / t) / (k_m / ((l / k_m) / k_m))) else: tmp = l * ((l / k_m) * (2.0 / (k_m * (k_m * (k_m * t))))) return tmp
k_m = abs(k) function code(t, l, k_m) tmp = 0.0 if (t <= 1.2e+17) tmp = Float64(Float64(2.0 / k_m) * Float64(Float64(l / t) / Float64(k_m / Float64(Float64(l / k_m) / k_m)))); else tmp = Float64(l * Float64(Float64(l / k_m) * Float64(2.0 / Float64(k_m * Float64(k_m * Float64(k_m * t)))))); end return tmp end
k_m = abs(k); function tmp_2 = code(t, l, k_m) tmp = 0.0; if (t <= 1.2e+17) tmp = (2.0 / k_m) * ((l / t) / (k_m / ((l / k_m) / k_m))); else tmp = l * ((l / k_m) * (2.0 / (k_m * (k_m * (k_m * t))))); end tmp_2 = tmp; end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := If[LessEqual[t, 1.2e+17], N[(N[(2.0 / k$95$m), $MachinePrecision] * N[(N[(l / t), $MachinePrecision] / N[(k$95$m / N[(N[(l / k$95$m), $MachinePrecision] / k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(l * N[(N[(l / k$95$m), $MachinePrecision] * N[(2.0 / N[(k$95$m * N[(k$95$m * N[(k$95$m * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
\mathbf{if}\;t \leq 1.2 \cdot 10^{+17}:\\
\;\;\;\;\frac{2}{k\_m} \cdot \frac{\frac{\ell}{t}}{\frac{k\_m}{\frac{\frac{\ell}{k\_m}}{k\_m}}}\\
\mathbf{else}:\\
\;\;\;\;\ell \cdot \left(\frac{\ell}{k\_m} \cdot \frac{2}{k\_m \cdot \left(k\_m \cdot \left(k\_m \cdot t\right)\right)}\right)\\
\end{array}
\end{array}
if t < 1.2e17Initial program 41.5%
Taylor expanded in k around 0
/-lowering-/.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6458.2%
Simplified58.2%
remove-double-negN/A
times-fracN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
*-lowering-*.f64N/A
neg-sub0N/A
--lowering--.f64N/A
/-lowering-/.f64N/A
neg-sub0N/A
--lowering--.f64N/A
associate-*l*N/A
associate-/l*N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6453.2%
Applied egg-rr53.2%
associate-/r*N/A
sub0-negN/A
distribute-frac-neg2N/A
distribute-frac-negN/A
distribute-frac-neg2N/A
sub0-negN/A
remove-double-negN/A
un-div-invN/A
clear-numN/A
/-lowering-/.f64N/A
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
associate-/l*N/A
associate-*r*N/A
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
Applied egg-rr72.4%
div-invN/A
associate-/l*N/A
times-fracN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
associate-/r*N/A
associate-/r/N/A
*-commutativeN/A
/-lowering-/.f64N/A
clear-numN/A
/-lowering-/.f64N/A
clear-numN/A
associate-/r*N/A
div-invN/A
/-lowering-/.f64N/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6475.0%
Applied egg-rr75.0%
if 1.2e17 < t Initial program 23.2%
Taylor expanded in k around 0
/-lowering-/.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6460.6%
Simplified60.6%
associate-/r/N/A
associate-*r*N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
associate-*l*N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6462.8%
Applied egg-rr62.8%
associate-*r/N/A
times-fracN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
associate-*r*N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6467.0%
Applied egg-rr67.0%
Final simplification72.8%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (* (/ (/ 2.0 t) (/ k_m (/ l k_m))) (/ (/ l k_m) k_m)))
k_m = fabs(k);
double code(double t, double l, double k_m) {
return ((2.0 / t) / (k_m / (l / k_m))) * ((l / k_m) / k_m);
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
code = ((2.0d0 / t) / (k_m / (l / k_m))) * ((l / k_m) / k_m)
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
return ((2.0 / t) / (k_m / (l / k_m))) * ((l / k_m) / k_m);
}
k_m = math.fabs(k) def code(t, l, k_m): return ((2.0 / t) / (k_m / (l / k_m))) * ((l / k_m) / k_m)
k_m = abs(k) function code(t, l, k_m) return Float64(Float64(Float64(2.0 / t) / Float64(k_m / Float64(l / k_m))) * Float64(Float64(l / k_m) / k_m)) end
k_m = abs(k); function tmp = code(t, l, k_m) tmp = ((2.0 / t) / (k_m / (l / k_m))) * ((l / k_m) / k_m); end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := N[(N[(N[(2.0 / t), $MachinePrecision] / N[(k$95$m / N[(l / k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(l / k$95$m), $MachinePrecision] / k$95$m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
k_m = \left|k\right|
\\
\frac{\frac{2}{t}}{\frac{k\_m}{\frac{\ell}{k\_m}}} \cdot \frac{\frac{\ell}{k\_m}}{k\_m}
\end{array}
Initial program 36.6%
Taylor expanded in k around 0
/-lowering-/.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6458.8%
Simplified58.8%
remove-double-negN/A
times-fracN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
*-lowering-*.f64N/A
neg-sub0N/A
--lowering--.f64N/A
/-lowering-/.f64N/A
neg-sub0N/A
--lowering--.f64N/A
associate-*l*N/A
associate-/l*N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6451.8%
Applied egg-rr51.8%
associate-/r*N/A
sub0-negN/A
distribute-frac-neg2N/A
distribute-frac-negN/A
distribute-frac-neg2N/A
sub0-negN/A
remove-double-negN/A
un-div-invN/A
clear-numN/A
/-lowering-/.f64N/A
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
associate-/l*N/A
associate-*r*N/A
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
Applied egg-rr69.4%
associate-/r/N/A
associate-/r/N/A
times-fracN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
associate-*l/N/A
associate-/r/N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6475.5%
Applied egg-rr75.5%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (* l (* (/ l k_m) (/ 2.0 (* k_m (* k_m (* k_m t)))))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
return l * ((l / k_m) * (2.0 / (k_m * (k_m * (k_m * t)))));
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
code = l * ((l / k_m) * (2.0d0 / (k_m * (k_m * (k_m * t)))))
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
return l * ((l / k_m) * (2.0 / (k_m * (k_m * (k_m * t)))));
}
k_m = math.fabs(k) def code(t, l, k_m): return l * ((l / k_m) * (2.0 / (k_m * (k_m * (k_m * t)))))
k_m = abs(k) function code(t, l, k_m) return Float64(l * Float64(Float64(l / k_m) * Float64(2.0 / Float64(k_m * Float64(k_m * Float64(k_m * t)))))) end
k_m = abs(k); function tmp = code(t, l, k_m) tmp = l * ((l / k_m) * (2.0 / (k_m * (k_m * (k_m * t))))); end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := N[(l * N[(N[(l / k$95$m), $MachinePrecision] * N[(2.0 / N[(k$95$m * N[(k$95$m * N[(k$95$m * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
k_m = \left|k\right|
\\
\ell \cdot \left(\frac{\ell}{k\_m} \cdot \frac{2}{k\_m \cdot \left(k\_m \cdot \left(k\_m \cdot t\right)\right)}\right)
\end{array}
Initial program 36.6%
Taylor expanded in k around 0
/-lowering-/.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6458.8%
Simplified58.8%
associate-/r/N/A
associate-*r*N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
associate-*l*N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6467.5%
Applied egg-rr67.5%
associate-*r/N/A
times-fracN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
associate-*r*N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6469.6%
Applied egg-rr69.6%
Final simplification69.6%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (* l (* (/ 2.0 (* k_m t)) (/ l (* k_m (* k_m k_m))))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
return l * ((2.0 / (k_m * t)) * (l / (k_m * (k_m * k_m))));
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
code = l * ((2.0d0 / (k_m * t)) * (l / (k_m * (k_m * k_m))))
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
return l * ((2.0 / (k_m * t)) * (l / (k_m * (k_m * k_m))));
}
k_m = math.fabs(k) def code(t, l, k_m): return l * ((2.0 / (k_m * t)) * (l / (k_m * (k_m * k_m))))
k_m = abs(k) function code(t, l, k_m) return Float64(l * Float64(Float64(2.0 / Float64(k_m * t)) * Float64(l / Float64(k_m * Float64(k_m * k_m))))) end
k_m = abs(k); function tmp = code(t, l, k_m) tmp = l * ((2.0 / (k_m * t)) * (l / (k_m * (k_m * k_m)))); end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := N[(l * N[(N[(2.0 / N[(k$95$m * t), $MachinePrecision]), $MachinePrecision] * N[(l / N[(k$95$m * N[(k$95$m * k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
k_m = \left|k\right|
\\
\ell \cdot \left(\frac{2}{k\_m \cdot t} \cdot \frac{\ell}{k\_m \cdot \left(k\_m \cdot k\_m\right)}\right)
\end{array}
Initial program 36.6%
Taylor expanded in k around 0
/-lowering-/.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6458.8%
Simplified58.8%
associate-/r/N/A
associate-*r*N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
associate-*l*N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6467.5%
Applied egg-rr67.5%
associate-*r/N/A
*-commutativeN/A
associate-*r*N/A
times-fracN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6469.3%
Applied egg-rr69.3%
Final simplification69.3%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (* l (* l (/ 2.0 (* k_m (* t (* k_m (* k_m k_m))))))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
return l * (l * (2.0 / (k_m * (t * (k_m * (k_m * k_m))))));
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
code = l * (l * (2.0d0 / (k_m * (t * (k_m * (k_m * k_m))))))
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
return l * (l * (2.0 / (k_m * (t * (k_m * (k_m * k_m))))));
}
k_m = math.fabs(k) def code(t, l, k_m): return l * (l * (2.0 / (k_m * (t * (k_m * (k_m * k_m))))))
k_m = abs(k) function code(t, l, k_m) return Float64(l * Float64(l * Float64(2.0 / Float64(k_m * Float64(t * Float64(k_m * Float64(k_m * k_m))))))) end
k_m = abs(k); function tmp = code(t, l, k_m) tmp = l * (l * (2.0 / (k_m * (t * (k_m * (k_m * k_m)))))); end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := N[(l * N[(l * N[(2.0 / N[(k$95$m * N[(t * N[(k$95$m * N[(k$95$m * k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
k_m = \left|k\right|
\\
\ell \cdot \left(\ell \cdot \frac{2}{k\_m \cdot \left(t \cdot \left(k\_m \cdot \left(k\_m \cdot k\_m\right)\right)\right)}\right)
\end{array}
Initial program 36.6%
Taylor expanded in k around 0
/-lowering-/.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6458.8%
Simplified58.8%
associate-/r/N/A
associate-*r*N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
associate-*l*N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6467.5%
Applied egg-rr67.5%
Final simplification67.5%
herbie shell --seed 2024138
(FPCore (t l k)
:name "Toniolo and Linder, Equation (10-)"
:precision binary64
(/ 2.0 (* (* (* (/ (pow t 3.0) (* l l)) (sin k)) (tan k)) (- (+ 1.0 (pow (/ k t) 2.0)) 1.0))))