
(FPCore (t l k) :precision binary64 (/ 2.0 (* (* (* (/ (pow t 3.0) (* l l)) (sin k)) (tan k)) (- (+ 1.0 (pow (/ k t) 2.0)) 1.0))))
double code(double t, double l, double k) {
return 2.0 / ((((pow(t, 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + pow((k / t), 2.0)) - 1.0));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = 2.0d0 / (((((t ** 3.0d0) / (l * l)) * sin(k)) * tan(k)) * ((1.0d0 + ((k / t) ** 2.0d0)) - 1.0d0))
end function
public static double code(double t, double l, double k) {
return 2.0 / ((((Math.pow(t, 3.0) / (l * l)) * Math.sin(k)) * Math.tan(k)) * ((1.0 + Math.pow((k / t), 2.0)) - 1.0));
}
def code(t, l, k): return 2.0 / ((((math.pow(t, 3.0) / (l * l)) * math.sin(k)) * math.tan(k)) * ((1.0 + math.pow((k / t), 2.0)) - 1.0))
function code(t, l, k) return Float64(2.0 / Float64(Float64(Float64(Float64((t ^ 3.0) / Float64(l * l)) * sin(k)) * tan(k)) * Float64(Float64(1.0 + (Float64(k / t) ^ 2.0)) - 1.0))) end
function tmp = code(t, l, k) tmp = 2.0 / (((((t ^ 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + ((k / t) ^ 2.0)) - 1.0)); end
code[t_, l_, k_] := N[(2.0 / N[(N[(N[(N[(N[Power[t, 3.0], $MachinePrecision] / N[(l * l), $MachinePrecision]), $MachinePrecision] * N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[Tan[k], $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 + N[Power[N[(k / t), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\left(\left(\frac{{t}^{3}}{\ell \cdot \ell} \cdot \sin k\right) \cdot \tan k\right) \cdot \left(\left(1 + {\left(\frac{k}{t}\right)}^{2}\right) - 1\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (t l k) :precision binary64 (/ 2.0 (* (* (* (/ (pow t 3.0) (* l l)) (sin k)) (tan k)) (- (+ 1.0 (pow (/ k t) 2.0)) 1.0))))
double code(double t, double l, double k) {
return 2.0 / ((((pow(t, 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + pow((k / t), 2.0)) - 1.0));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = 2.0d0 / (((((t ** 3.0d0) / (l * l)) * sin(k)) * tan(k)) * ((1.0d0 + ((k / t) ** 2.0d0)) - 1.0d0))
end function
public static double code(double t, double l, double k) {
return 2.0 / ((((Math.pow(t, 3.0) / (l * l)) * Math.sin(k)) * Math.tan(k)) * ((1.0 + Math.pow((k / t), 2.0)) - 1.0));
}
def code(t, l, k): return 2.0 / ((((math.pow(t, 3.0) / (l * l)) * math.sin(k)) * math.tan(k)) * ((1.0 + math.pow((k / t), 2.0)) - 1.0))
function code(t, l, k) return Float64(2.0 / Float64(Float64(Float64(Float64((t ^ 3.0) / Float64(l * l)) * sin(k)) * tan(k)) * Float64(Float64(1.0 + (Float64(k / t) ^ 2.0)) - 1.0))) end
function tmp = code(t, l, k) tmp = 2.0 / (((((t ^ 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + ((k / t) ^ 2.0)) - 1.0)); end
code[t_, l_, k_] := N[(2.0 / N[(N[(N[(N[(N[Power[t, 3.0], $MachinePrecision] / N[(l * l), $MachinePrecision]), $MachinePrecision] * N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[Tan[k], $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 + N[Power[N[(k / t), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\left(\left(\frac{{t}^{3}}{\ell \cdot \ell} \cdot \sin k\right) \cdot \tan k\right) \cdot \left(\left(1 + {\left(\frac{k}{t}\right)}^{2}\right) - 1\right)}
\end{array}
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (if (<= k_m 1.5e-51) (/ 2.0 (/ (* (* (/ k_m l) k_m) t) (/ (/ l k_m) k_m))) (/ 2.0 (* (/ (* t (/ k_m l)) (/ l k_m)) (* (tan k_m) (sin k_m))))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
double tmp;
if (k_m <= 1.5e-51) {
tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m));
} else {
tmp = 2.0 / (((t * (k_m / l)) / (l / k_m)) * (tan(k_m) * sin(k_m)));
}
return tmp;
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
real(8) :: tmp
if (k_m <= 1.5d-51) then
tmp = 2.0d0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m))
else
tmp = 2.0d0 / (((t * (k_m / l)) / (l / k_m)) * (tan(k_m) * sin(k_m)))
end if
code = tmp
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
double tmp;
if (k_m <= 1.5e-51) {
tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m));
} else {
tmp = 2.0 / (((t * (k_m / l)) / (l / k_m)) * (Math.tan(k_m) * Math.sin(k_m)));
}
return tmp;
}
k_m = math.fabs(k) def code(t, l, k_m): tmp = 0 if k_m <= 1.5e-51: tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m)) else: tmp = 2.0 / (((t * (k_m / l)) / (l / k_m)) * (math.tan(k_m) * math.sin(k_m))) return tmp
k_m = abs(k) function code(t, l, k_m) tmp = 0.0 if (k_m <= 1.5e-51) tmp = Float64(2.0 / Float64(Float64(Float64(Float64(k_m / l) * k_m) * t) / Float64(Float64(l / k_m) / k_m))); else tmp = Float64(2.0 / Float64(Float64(Float64(t * Float64(k_m / l)) / Float64(l / k_m)) * Float64(tan(k_m) * sin(k_m)))); end return tmp end
k_m = abs(k); function tmp_2 = code(t, l, k_m) tmp = 0.0; if (k_m <= 1.5e-51) tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m)); else tmp = 2.0 / (((t * (k_m / l)) / (l / k_m)) * (tan(k_m) * sin(k_m))); end tmp_2 = tmp; end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := If[LessEqual[k$95$m, 1.5e-51], N[(2.0 / N[(N[(N[(N[(k$95$m / l), $MachinePrecision] * k$95$m), $MachinePrecision] * t), $MachinePrecision] / N[(N[(l / k$95$m), $MachinePrecision] / k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(2.0 / N[(N[(N[(t * N[(k$95$m / l), $MachinePrecision]), $MachinePrecision] / N[(l / k$95$m), $MachinePrecision]), $MachinePrecision] * N[(N[Tan[k$95$m], $MachinePrecision] * N[Sin[k$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
\mathbf{if}\;k\_m \leq 1.5 \cdot 10^{-51}:\\
\;\;\;\;\frac{2}{\frac{\left(\frac{k\_m}{\ell} \cdot k\_m\right) \cdot t}{\frac{\frac{\ell}{k\_m}}{k\_m}}}\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{\frac{t \cdot \frac{k\_m}{\ell}}{\frac{\ell}{k\_m}} \cdot \left(\tan k\_m \cdot \sin k\_m\right)}\\
\end{array}
\end{array}
if k < 1.50000000000000001e-51Initial program 35.9%
Taylor expanded in k around 0
*-commutativeN/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-pow.f6472.4
Applied rewrites72.4%
Applied rewrites79.0%
Applied rewrites83.6%
if 1.50000000000000001e-51 < k Initial program 33.0%
Taylor expanded in t around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
unpow2N/A
associate-*r*N/A
times-fracN/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-cos.f64N/A
*-commutativeN/A
lower-/.f64N/A
Applied rewrites90.0%
Applied rewrites98.6%
Applied rewrites98.8%
Final simplification88.2%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (if (<= k_m 5.3e-39) (/ 2.0 (/ (* (* (/ k_m l) k_m) t) (/ (/ l k_m) k_m))) (/ 2.0 (* (* (/ k_m l) t) (* (/ k_m l) (* (tan k_m) (sin k_m)))))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
double tmp;
if (k_m <= 5.3e-39) {
tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m));
} else {
tmp = 2.0 / (((k_m / l) * t) * ((k_m / l) * (tan(k_m) * sin(k_m))));
}
return tmp;
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
real(8) :: tmp
if (k_m <= 5.3d-39) then
tmp = 2.0d0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m))
else
tmp = 2.0d0 / (((k_m / l) * t) * ((k_m / l) * (tan(k_m) * sin(k_m))))
end if
code = tmp
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
double tmp;
if (k_m <= 5.3e-39) {
tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m));
} else {
tmp = 2.0 / (((k_m / l) * t) * ((k_m / l) * (Math.tan(k_m) * Math.sin(k_m))));
}
return tmp;
}
k_m = math.fabs(k) def code(t, l, k_m): tmp = 0 if k_m <= 5.3e-39: tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m)) else: tmp = 2.0 / (((k_m / l) * t) * ((k_m / l) * (math.tan(k_m) * math.sin(k_m)))) return tmp
k_m = abs(k) function code(t, l, k_m) tmp = 0.0 if (k_m <= 5.3e-39) tmp = Float64(2.0 / Float64(Float64(Float64(Float64(k_m / l) * k_m) * t) / Float64(Float64(l / k_m) / k_m))); else tmp = Float64(2.0 / Float64(Float64(Float64(k_m / l) * t) * Float64(Float64(k_m / l) * Float64(tan(k_m) * sin(k_m))))); end return tmp end
k_m = abs(k); function tmp_2 = code(t, l, k_m) tmp = 0.0; if (k_m <= 5.3e-39) tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m)); else tmp = 2.0 / (((k_m / l) * t) * ((k_m / l) * (tan(k_m) * sin(k_m)))); end tmp_2 = tmp; end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := If[LessEqual[k$95$m, 5.3e-39], N[(2.0 / N[(N[(N[(N[(k$95$m / l), $MachinePrecision] * k$95$m), $MachinePrecision] * t), $MachinePrecision] / N[(N[(l / k$95$m), $MachinePrecision] / k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(2.0 / N[(N[(N[(k$95$m / l), $MachinePrecision] * t), $MachinePrecision] * N[(N[(k$95$m / l), $MachinePrecision] * N[(N[Tan[k$95$m], $MachinePrecision] * N[Sin[k$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
\mathbf{if}\;k\_m \leq 5.3 \cdot 10^{-39}:\\
\;\;\;\;\frac{2}{\frac{\left(\frac{k\_m}{\ell} \cdot k\_m\right) \cdot t}{\frac{\frac{\ell}{k\_m}}{k\_m}}}\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{\left(\frac{k\_m}{\ell} \cdot t\right) \cdot \left(\frac{k\_m}{\ell} \cdot \left(\tan k\_m \cdot \sin k\_m\right)\right)}\\
\end{array}
\end{array}
if k < 5.30000000000000003e-39Initial program 36.0%
Taylor expanded in k around 0
*-commutativeN/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-pow.f6472.7
Applied rewrites72.7%
Applied rewrites79.2%
Applied rewrites83.8%
if 5.30000000000000003e-39 < k Initial program 32.5%
Taylor expanded in t around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
unpow2N/A
associate-*r*N/A
times-fracN/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-cos.f64N/A
*-commutativeN/A
lower-/.f64N/A
Applied rewrites89.7%
Applied rewrites98.6%
Applied rewrites98.8%
Applied rewrites98.7%
Final simplification88.2%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (if (<= k_m 1.05e-43) (/ 2.0 (/ (* (* (/ k_m l) k_m) t) (/ (/ l k_m) k_m))) (/ 2.0 (* (* (/ k_m l) (* (/ t l) k_m)) (* (tan k_m) (sin k_m))))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
double tmp;
if (k_m <= 1.05e-43) {
tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m));
} else {
tmp = 2.0 / (((k_m / l) * ((t / l) * k_m)) * (tan(k_m) * sin(k_m)));
}
return tmp;
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
real(8) :: tmp
if (k_m <= 1.05d-43) then
tmp = 2.0d0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m))
else
tmp = 2.0d0 / (((k_m / l) * ((t / l) * k_m)) * (tan(k_m) * sin(k_m)))
end if
code = tmp
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
double tmp;
if (k_m <= 1.05e-43) {
tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m));
} else {
tmp = 2.0 / (((k_m / l) * ((t / l) * k_m)) * (Math.tan(k_m) * Math.sin(k_m)));
}
return tmp;
}
k_m = math.fabs(k) def code(t, l, k_m): tmp = 0 if k_m <= 1.05e-43: tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m)) else: tmp = 2.0 / (((k_m / l) * ((t / l) * k_m)) * (math.tan(k_m) * math.sin(k_m))) return tmp
k_m = abs(k) function code(t, l, k_m) tmp = 0.0 if (k_m <= 1.05e-43) tmp = Float64(2.0 / Float64(Float64(Float64(Float64(k_m / l) * k_m) * t) / Float64(Float64(l / k_m) / k_m))); else tmp = Float64(2.0 / Float64(Float64(Float64(k_m / l) * Float64(Float64(t / l) * k_m)) * Float64(tan(k_m) * sin(k_m)))); end return tmp end
k_m = abs(k); function tmp_2 = code(t, l, k_m) tmp = 0.0; if (k_m <= 1.05e-43) tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m)); else tmp = 2.0 / (((k_m / l) * ((t / l) * k_m)) * (tan(k_m) * sin(k_m))); end tmp_2 = tmp; end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := If[LessEqual[k$95$m, 1.05e-43], N[(2.0 / N[(N[(N[(N[(k$95$m / l), $MachinePrecision] * k$95$m), $MachinePrecision] * t), $MachinePrecision] / N[(N[(l / k$95$m), $MachinePrecision] / k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(2.0 / N[(N[(N[(k$95$m / l), $MachinePrecision] * N[(N[(t / l), $MachinePrecision] * k$95$m), $MachinePrecision]), $MachinePrecision] * N[(N[Tan[k$95$m], $MachinePrecision] * N[Sin[k$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
\mathbf{if}\;k\_m \leq 1.05 \cdot 10^{-43}:\\
\;\;\;\;\frac{2}{\frac{\left(\frac{k\_m}{\ell} \cdot k\_m\right) \cdot t}{\frac{\frac{\ell}{k\_m}}{k\_m}}}\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{\left(\frac{k\_m}{\ell} \cdot \left(\frac{t}{\ell} \cdot k\_m\right)\right) \cdot \left(\tan k\_m \cdot \sin k\_m\right)}\\
\end{array}
\end{array}
if k < 1.05e-43Initial program 36.2%
Taylor expanded in k around 0
*-commutativeN/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-pow.f6472.5
Applied rewrites72.5%
Applied rewrites79.1%
Applied rewrites83.7%
if 1.05e-43 < k Initial program 32.1%
Taylor expanded in t around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
unpow2N/A
associate-*r*N/A
times-fracN/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-cos.f64N/A
*-commutativeN/A
lower-/.f64N/A
Applied rewrites89.8%
Applied rewrites98.6%
Applied rewrites98.8%
Taylor expanded in t around 0
Applied rewrites95.2%
Final simplification87.1%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (if (<= k_m 5e-23) (/ 2.0 (/ (* (* (/ k_m l) k_m) t) (/ (/ l k_m) k_m))) (/ 2.0 (* k_m (* (/ (* t (/ k_m l)) l) (* (tan k_m) (sin k_m)))))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
double tmp;
if (k_m <= 5e-23) {
tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m));
} else {
tmp = 2.0 / (k_m * (((t * (k_m / l)) / l) * (tan(k_m) * sin(k_m))));
}
return tmp;
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
real(8) :: tmp
if (k_m <= 5d-23) then
tmp = 2.0d0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m))
else
tmp = 2.0d0 / (k_m * (((t * (k_m / l)) / l) * (tan(k_m) * sin(k_m))))
end if
code = tmp
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
double tmp;
if (k_m <= 5e-23) {
tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m));
} else {
tmp = 2.0 / (k_m * (((t * (k_m / l)) / l) * (Math.tan(k_m) * Math.sin(k_m))));
}
return tmp;
}
k_m = math.fabs(k) def code(t, l, k_m): tmp = 0 if k_m <= 5e-23: tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m)) else: tmp = 2.0 / (k_m * (((t * (k_m / l)) / l) * (math.tan(k_m) * math.sin(k_m)))) return tmp
k_m = abs(k) function code(t, l, k_m) tmp = 0.0 if (k_m <= 5e-23) tmp = Float64(2.0 / Float64(Float64(Float64(Float64(k_m / l) * k_m) * t) / Float64(Float64(l / k_m) / k_m))); else tmp = Float64(2.0 / Float64(k_m * Float64(Float64(Float64(t * Float64(k_m / l)) / l) * Float64(tan(k_m) * sin(k_m))))); end return tmp end
k_m = abs(k); function tmp_2 = code(t, l, k_m) tmp = 0.0; if (k_m <= 5e-23) tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m)); else tmp = 2.0 / (k_m * (((t * (k_m / l)) / l) * (tan(k_m) * sin(k_m)))); end tmp_2 = tmp; end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := If[LessEqual[k$95$m, 5e-23], N[(2.0 / N[(N[(N[(N[(k$95$m / l), $MachinePrecision] * k$95$m), $MachinePrecision] * t), $MachinePrecision] / N[(N[(l / k$95$m), $MachinePrecision] / k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(2.0 / N[(k$95$m * N[(N[(N[(t * N[(k$95$m / l), $MachinePrecision]), $MachinePrecision] / l), $MachinePrecision] * N[(N[Tan[k$95$m], $MachinePrecision] * N[Sin[k$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
\mathbf{if}\;k\_m \leq 5 \cdot 10^{-23}:\\
\;\;\;\;\frac{2}{\frac{\left(\frac{k\_m}{\ell} \cdot k\_m\right) \cdot t}{\frac{\frac{\ell}{k\_m}}{k\_m}}}\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{k\_m \cdot \left(\frac{t \cdot \frac{k\_m}{\ell}}{\ell} \cdot \left(\tan k\_m \cdot \sin k\_m\right)\right)}\\
\end{array}
\end{array}
if k < 5.0000000000000002e-23Initial program 35.8%
Taylor expanded in k around 0
*-commutativeN/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-pow.f6473.2
Applied rewrites73.2%
Applied rewrites79.5%
Applied rewrites84.4%
if 5.0000000000000002e-23 < k Initial program 32.8%
Taylor expanded in t around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
unpow2N/A
associate-*r*N/A
times-fracN/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-cos.f64N/A
*-commutativeN/A
lower-/.f64N/A
Applied rewrites88.7%
Applied rewrites98.5%
Applied rewrites94.6%
Final simplification87.1%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (if (<= k_m 2.9e-6) (/ 2.0 (/ (* (* (/ k_m l) k_m) t) (/ (/ l k_m) k_m))) (/ 2.0 (* (/ (* (* k_m t) k_m) (* l l)) (* (tan k_m) (sin k_m))))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
double tmp;
if (k_m <= 2.9e-6) {
tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m));
} else {
tmp = 2.0 / ((((k_m * t) * k_m) / (l * l)) * (tan(k_m) * sin(k_m)));
}
return tmp;
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
real(8) :: tmp
if (k_m <= 2.9d-6) then
tmp = 2.0d0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m))
else
tmp = 2.0d0 / ((((k_m * t) * k_m) / (l * l)) * (tan(k_m) * sin(k_m)))
end if
code = tmp
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
double tmp;
if (k_m <= 2.9e-6) {
tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m));
} else {
tmp = 2.0 / ((((k_m * t) * k_m) / (l * l)) * (Math.tan(k_m) * Math.sin(k_m)));
}
return tmp;
}
k_m = math.fabs(k) def code(t, l, k_m): tmp = 0 if k_m <= 2.9e-6: tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m)) else: tmp = 2.0 / ((((k_m * t) * k_m) / (l * l)) * (math.tan(k_m) * math.sin(k_m))) return tmp
k_m = abs(k) function code(t, l, k_m) tmp = 0.0 if (k_m <= 2.9e-6) tmp = Float64(2.0 / Float64(Float64(Float64(Float64(k_m / l) * k_m) * t) / Float64(Float64(l / k_m) / k_m))); else tmp = Float64(2.0 / Float64(Float64(Float64(Float64(k_m * t) * k_m) / Float64(l * l)) * Float64(tan(k_m) * sin(k_m)))); end return tmp end
k_m = abs(k); function tmp_2 = code(t, l, k_m) tmp = 0.0; if (k_m <= 2.9e-6) tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m)); else tmp = 2.0 / ((((k_m * t) * k_m) / (l * l)) * (tan(k_m) * sin(k_m))); end tmp_2 = tmp; end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := If[LessEqual[k$95$m, 2.9e-6], N[(2.0 / N[(N[(N[(N[(k$95$m / l), $MachinePrecision] * k$95$m), $MachinePrecision] * t), $MachinePrecision] / N[(N[(l / k$95$m), $MachinePrecision] / k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(2.0 / N[(N[(N[(N[(k$95$m * t), $MachinePrecision] * k$95$m), $MachinePrecision] / N[(l * l), $MachinePrecision]), $MachinePrecision] * N[(N[Tan[k$95$m], $MachinePrecision] * N[Sin[k$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
\mathbf{if}\;k\_m \leq 2.9 \cdot 10^{-6}:\\
\;\;\;\;\frac{2}{\frac{\left(\frac{k\_m}{\ell} \cdot k\_m\right) \cdot t}{\frac{\frac{\ell}{k\_m}}{k\_m}}}\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{\frac{\left(k\_m \cdot t\right) \cdot k\_m}{\ell \cdot \ell} \cdot \left(\tan k\_m \cdot \sin k\_m\right)}\\
\end{array}
\end{array}
if k < 2.9000000000000002e-6Initial program 35.6%
Taylor expanded in k around 0
*-commutativeN/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-pow.f6473.2
Applied rewrites73.2%
Applied rewrites79.4%
Applied rewrites84.7%
if 2.9000000000000002e-6 < k Initial program 33.3%
Taylor expanded in t around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
unpow2N/A
associate-*r*N/A
times-fracN/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-cos.f64N/A
*-commutativeN/A
lower-/.f64N/A
Applied rewrites88.0%
Applied rewrites98.4%
Applied rewrites98.6%
Applied rewrites71.4%
Final simplification81.4%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (if (<= k_m 2.9e-6) (/ 2.0 (/ (* (* (/ k_m l) k_m) t) (/ (/ l k_m) k_m))) (/ 2.0 (* (/ (* (* k_m k_m) t) (* l l)) (* (tan k_m) (sin k_m))))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
double tmp;
if (k_m <= 2.9e-6) {
tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m));
} else {
tmp = 2.0 / ((((k_m * k_m) * t) / (l * l)) * (tan(k_m) * sin(k_m)));
}
return tmp;
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
real(8) :: tmp
if (k_m <= 2.9d-6) then
tmp = 2.0d0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m))
else
tmp = 2.0d0 / ((((k_m * k_m) * t) / (l * l)) * (tan(k_m) * sin(k_m)))
end if
code = tmp
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
double tmp;
if (k_m <= 2.9e-6) {
tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m));
} else {
tmp = 2.0 / ((((k_m * k_m) * t) / (l * l)) * (Math.tan(k_m) * Math.sin(k_m)));
}
return tmp;
}
k_m = math.fabs(k) def code(t, l, k_m): tmp = 0 if k_m <= 2.9e-6: tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m)) else: tmp = 2.0 / ((((k_m * k_m) * t) / (l * l)) * (math.tan(k_m) * math.sin(k_m))) return tmp
k_m = abs(k) function code(t, l, k_m) tmp = 0.0 if (k_m <= 2.9e-6) tmp = Float64(2.0 / Float64(Float64(Float64(Float64(k_m / l) * k_m) * t) / Float64(Float64(l / k_m) / k_m))); else tmp = Float64(2.0 / Float64(Float64(Float64(Float64(k_m * k_m) * t) / Float64(l * l)) * Float64(tan(k_m) * sin(k_m)))); end return tmp end
k_m = abs(k); function tmp_2 = code(t, l, k_m) tmp = 0.0; if (k_m <= 2.9e-6) tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m)); else tmp = 2.0 / ((((k_m * k_m) * t) / (l * l)) * (tan(k_m) * sin(k_m))); end tmp_2 = tmp; end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := If[LessEqual[k$95$m, 2.9e-6], N[(2.0 / N[(N[(N[(N[(k$95$m / l), $MachinePrecision] * k$95$m), $MachinePrecision] * t), $MachinePrecision] / N[(N[(l / k$95$m), $MachinePrecision] / k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(2.0 / N[(N[(N[(N[(k$95$m * k$95$m), $MachinePrecision] * t), $MachinePrecision] / N[(l * l), $MachinePrecision]), $MachinePrecision] * N[(N[Tan[k$95$m], $MachinePrecision] * N[Sin[k$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
\mathbf{if}\;k\_m \leq 2.9 \cdot 10^{-6}:\\
\;\;\;\;\frac{2}{\frac{\left(\frac{k\_m}{\ell} \cdot k\_m\right) \cdot t}{\frac{\frac{\ell}{k\_m}}{k\_m}}}\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{\frac{\left(k\_m \cdot k\_m\right) \cdot t}{\ell \cdot \ell} \cdot \left(\tan k\_m \cdot \sin k\_m\right)}\\
\end{array}
\end{array}
if k < 2.9000000000000002e-6Initial program 35.6%
Taylor expanded in k around 0
*-commutativeN/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-pow.f6473.2
Applied rewrites73.2%
Applied rewrites79.4%
Applied rewrites84.7%
if 2.9000000000000002e-6 < k Initial program 33.3%
Taylor expanded in t around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
unpow2N/A
associate-*r*N/A
times-fracN/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-cos.f64N/A
*-commutativeN/A
lower-/.f64N/A
Applied rewrites88.0%
Applied rewrites98.4%
Applied rewrites98.6%
Applied rewrites62.1%
Final simplification79.0%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (/ 2.0 (/ (* (* (/ k_m l) k_m) t) (/ (/ l k_m) k_m))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
return 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m));
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
code = 2.0d0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m))
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
return 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m));
}
k_m = math.fabs(k) def code(t, l, k_m): return 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m))
k_m = abs(k) function code(t, l, k_m) return Float64(2.0 / Float64(Float64(Float64(Float64(k_m / l) * k_m) * t) / Float64(Float64(l / k_m) / k_m))) end
k_m = abs(k); function tmp = code(t, l, k_m) tmp = 2.0 / ((((k_m / l) * k_m) * t) / ((l / k_m) / k_m)); end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := N[(2.0 / N[(N[(N[(N[(k$95$m / l), $MachinePrecision] * k$95$m), $MachinePrecision] * t), $MachinePrecision] / N[(N[(l / k$95$m), $MachinePrecision] / k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
k_m = \left|k\right|
\\
\frac{2}{\frac{\left(\frac{k\_m}{\ell} \cdot k\_m\right) \cdot t}{\frac{\frac{\ell}{k\_m}}{k\_m}}}
\end{array}
Initial program 35.0%
Taylor expanded in k around 0
*-commutativeN/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-pow.f6466.6
Applied rewrites66.6%
Applied rewrites71.2%
Applied rewrites75.2%
Final simplification75.2%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (let* ((t_1 (* (/ k_m l) k_m))) (/ 2.0 (* t_1 (* t_1 t)))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
double t_1 = (k_m / l) * k_m;
return 2.0 / (t_1 * (t_1 * t));
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
real(8) :: t_1
t_1 = (k_m / l) * k_m
code = 2.0d0 / (t_1 * (t_1 * t))
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
double t_1 = (k_m / l) * k_m;
return 2.0 / (t_1 * (t_1 * t));
}
k_m = math.fabs(k) def code(t, l, k_m): t_1 = (k_m / l) * k_m return 2.0 / (t_1 * (t_1 * t))
k_m = abs(k) function code(t, l, k_m) t_1 = Float64(Float64(k_m / l) * k_m) return Float64(2.0 / Float64(t_1 * Float64(t_1 * t))) end
k_m = abs(k); function tmp = code(t, l, k_m) t_1 = (k_m / l) * k_m; tmp = 2.0 / (t_1 * (t_1 * t)); end
k_m = N[Abs[k], $MachinePrecision]
code[t_, l_, k$95$m_] := Block[{t$95$1 = N[(N[(k$95$m / l), $MachinePrecision] * k$95$m), $MachinePrecision]}, N[(2.0 / N[(t$95$1 * N[(t$95$1 * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
t_1 := \frac{k\_m}{\ell} \cdot k\_m\\
\frac{2}{t\_1 \cdot \left(t\_1 \cdot t\right)}
\end{array}
\end{array}
Initial program 35.0%
Taylor expanded in k around 0
*-commutativeN/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-pow.f6466.6
Applied rewrites66.6%
Applied rewrites71.2%
Applied rewrites75.1%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (/ 2.0 (* k_m (* (/ k_m l) (* (* (/ k_m l) k_m) t)))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
return 2.0 / (k_m * ((k_m / l) * (((k_m / l) * k_m) * t)));
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
code = 2.0d0 / (k_m * ((k_m / l) * (((k_m / l) * k_m) * t)))
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
return 2.0 / (k_m * ((k_m / l) * (((k_m / l) * k_m) * t)));
}
k_m = math.fabs(k) def code(t, l, k_m): return 2.0 / (k_m * ((k_m / l) * (((k_m / l) * k_m) * t)))
k_m = abs(k) function code(t, l, k_m) return Float64(2.0 / Float64(k_m * Float64(Float64(k_m / l) * Float64(Float64(Float64(k_m / l) * k_m) * t)))) end
k_m = abs(k); function tmp = code(t, l, k_m) tmp = 2.0 / (k_m * ((k_m / l) * (((k_m / l) * k_m) * t))); end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := N[(2.0 / N[(k$95$m * N[(N[(k$95$m / l), $MachinePrecision] * N[(N[(N[(k$95$m / l), $MachinePrecision] * k$95$m), $MachinePrecision] * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
k_m = \left|k\right|
\\
\frac{2}{k\_m \cdot \left(\frac{k\_m}{\ell} \cdot \left(\left(\frac{k\_m}{\ell} \cdot k\_m\right) \cdot t\right)\right)}
\end{array}
Initial program 35.0%
Taylor expanded in k around 0
*-commutativeN/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-pow.f6466.6
Applied rewrites66.6%
Applied rewrites71.2%
Applied rewrites74.4%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (/ 2.0 (* k_m (* (/ k_m l) (* (* (/ t l) k_m) k_m)))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
return 2.0 / (k_m * ((k_m / l) * (((t / l) * k_m) * k_m)));
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
code = 2.0d0 / (k_m * ((k_m / l) * (((t / l) * k_m) * k_m)))
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
return 2.0 / (k_m * ((k_m / l) * (((t / l) * k_m) * k_m)));
}
k_m = math.fabs(k) def code(t, l, k_m): return 2.0 / (k_m * ((k_m / l) * (((t / l) * k_m) * k_m)))
k_m = abs(k) function code(t, l, k_m) return Float64(2.0 / Float64(k_m * Float64(Float64(k_m / l) * Float64(Float64(Float64(t / l) * k_m) * k_m)))) end
k_m = abs(k); function tmp = code(t, l, k_m) tmp = 2.0 / (k_m * ((k_m / l) * (((t / l) * k_m) * k_m))); end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := N[(2.0 / N[(k$95$m * N[(N[(k$95$m / l), $MachinePrecision] * N[(N[(N[(t / l), $MachinePrecision] * k$95$m), $MachinePrecision] * k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
k_m = \left|k\right|
\\
\frac{2}{k\_m \cdot \left(\frac{k\_m}{\ell} \cdot \left(\left(\frac{t}{\ell} \cdot k\_m\right) \cdot k\_m\right)\right)}
\end{array}
Initial program 35.0%
Taylor expanded in k around 0
*-commutativeN/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-pow.f6466.6
Applied rewrites66.6%
Applied rewrites71.2%
Applied rewrites74.4%
Taylor expanded in t around 0
Applied rewrites69.5%
herbie shell --seed 2024304
(FPCore (t l k)
:name "Toniolo and Linder, Equation (10-)"
:precision binary64
(/ 2.0 (* (* (* (/ (pow t 3.0) (* l l)) (sin k)) (tan k)) (- (+ 1.0 (pow (/ k t) 2.0)) 1.0))))