
(FPCore (t l k) :precision binary64 (/ 2.0 (* (* (* (/ (pow t 3.0) (* l l)) (sin k)) (tan k)) (- (+ 1.0 (pow (/ k t) 2.0)) 1.0))))
double code(double t, double l, double k) {
return 2.0 / ((((pow(t, 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + pow((k / t), 2.0)) - 1.0));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = 2.0d0 / (((((t ** 3.0d0) / (l * l)) * sin(k)) * tan(k)) * ((1.0d0 + ((k / t) ** 2.0d0)) - 1.0d0))
end function
public static double code(double t, double l, double k) {
return 2.0 / ((((Math.pow(t, 3.0) / (l * l)) * Math.sin(k)) * Math.tan(k)) * ((1.0 + Math.pow((k / t), 2.0)) - 1.0));
}
def code(t, l, k): return 2.0 / ((((math.pow(t, 3.0) / (l * l)) * math.sin(k)) * math.tan(k)) * ((1.0 + math.pow((k / t), 2.0)) - 1.0))
function code(t, l, k) return Float64(2.0 / Float64(Float64(Float64(Float64((t ^ 3.0) / Float64(l * l)) * sin(k)) * tan(k)) * Float64(Float64(1.0 + (Float64(k / t) ^ 2.0)) - 1.0))) end
function tmp = code(t, l, k) tmp = 2.0 / (((((t ^ 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + ((k / t) ^ 2.0)) - 1.0)); end
code[t_, l_, k_] := N[(2.0 / N[(N[(N[(N[(N[Power[t, 3.0], $MachinePrecision] / N[(l * l), $MachinePrecision]), $MachinePrecision] * N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[Tan[k], $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 + N[Power[N[(k / t), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\left(\left(\frac{{t}^{3}}{\ell \cdot \ell} \cdot \sin k\right) \cdot \tan k\right) \cdot \left(\left(1 + {\left(\frac{k}{t}\right)}^{2}\right) - 1\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (t l k) :precision binary64 (/ 2.0 (* (* (* (/ (pow t 3.0) (* l l)) (sin k)) (tan k)) (- (+ 1.0 (pow (/ k t) 2.0)) 1.0))))
double code(double t, double l, double k) {
return 2.0 / ((((pow(t, 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + pow((k / t), 2.0)) - 1.0));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = 2.0d0 / (((((t ** 3.0d0) / (l * l)) * sin(k)) * tan(k)) * ((1.0d0 + ((k / t) ** 2.0d0)) - 1.0d0))
end function
public static double code(double t, double l, double k) {
return 2.0 / ((((Math.pow(t, 3.0) / (l * l)) * Math.sin(k)) * Math.tan(k)) * ((1.0 + Math.pow((k / t), 2.0)) - 1.0));
}
def code(t, l, k): return 2.0 / ((((math.pow(t, 3.0) / (l * l)) * math.sin(k)) * math.tan(k)) * ((1.0 + math.pow((k / t), 2.0)) - 1.0))
function code(t, l, k) return Float64(2.0 / Float64(Float64(Float64(Float64((t ^ 3.0) / Float64(l * l)) * sin(k)) * tan(k)) * Float64(Float64(1.0 + (Float64(k / t) ^ 2.0)) - 1.0))) end
function tmp = code(t, l, k) tmp = 2.0 / (((((t ^ 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + ((k / t) ^ 2.0)) - 1.0)); end
code[t_, l_, k_] := N[(2.0 / N[(N[(N[(N[(N[Power[t, 3.0], $MachinePrecision] / N[(l * l), $MachinePrecision]), $MachinePrecision] * N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[Tan[k], $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 + N[Power[N[(k / t), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\left(\left(\frac{{t}^{3}}{\ell \cdot \ell} \cdot \sin k\right) \cdot \tan k\right) \cdot \left(\left(1 + {\left(\frac{k}{t}\right)}^{2}\right) - 1\right)}
\end{array}
k_m = (fabs.f64 k)
(FPCore (t l k_m)
:precision binary64
(if (<= k_m 0.0001)
(* (/ (* 2.0 l) (* t (* k_m k_m))) (/ l (* k_m k_m)))
(*
(/ (/ (* 2.0 l) (* t (fma (cos (+ k_m k_m)) -0.5 0.5))) k_m)
(/ (* l (cos k_m)) k_m))))k_m = fabs(k);
double code(double t, double l, double k_m) {
double tmp;
if (k_m <= 0.0001) {
tmp = ((2.0 * l) / (t * (k_m * k_m))) * (l / (k_m * k_m));
} else {
tmp = (((2.0 * l) / (t * fma(cos((k_m + k_m)), -0.5, 0.5))) / k_m) * ((l * cos(k_m)) / k_m);
}
return tmp;
}
k_m = abs(k) function code(t, l, k_m) tmp = 0.0 if (k_m <= 0.0001) tmp = Float64(Float64(Float64(2.0 * l) / Float64(t * Float64(k_m * k_m))) * Float64(l / Float64(k_m * k_m))); else tmp = Float64(Float64(Float64(Float64(2.0 * l) / Float64(t * fma(cos(Float64(k_m + k_m)), -0.5, 0.5))) / k_m) * Float64(Float64(l * cos(k_m)) / k_m)); end return tmp end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := If[LessEqual[k$95$m, 0.0001], N[(N[(N[(2.0 * l), $MachinePrecision] / N[(t * N[(k$95$m * k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(l / N[(k$95$m * k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(2.0 * l), $MachinePrecision] / N[(t * N[(N[Cos[N[(k$95$m + k$95$m), $MachinePrecision]], $MachinePrecision] * -0.5 + 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / k$95$m), $MachinePrecision] * N[(N[(l * N[Cos[k$95$m], $MachinePrecision]), $MachinePrecision] / k$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
\mathbf{if}\;k\_m \leq 0.0001:\\
\;\;\;\;\frac{2 \cdot \ell}{t \cdot \left(k\_m \cdot k\_m\right)} \cdot \frac{\ell}{k\_m \cdot k\_m}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{2 \cdot \ell}{t \cdot \mathsf{fma}\left(\cos \left(k\_m + k\_m\right), -0.5, 0.5\right)}}{k\_m} \cdot \frac{\ell \cdot \cos k\_m}{k\_m}\\
\end{array}
\end{array}
if k < 1.00000000000000005e-4Initial program 42.4%
Taylor expanded in k around 0
associate-*r/N/A
lower-/.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-evalN/A
pow-sqrN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6468.0
Applied rewrites68.0%
Applied rewrites82.7%
if 1.00000000000000005e-4 < k Initial program 29.4%
Taylor expanded in t around 0
associate-*r/N/A
lower-/.f64N/A
lower-*.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower-sin.f6475.7
Applied rewrites75.7%
Applied rewrites96.0%
Applied rewrites96.0%
Final simplification86.3%
k_m = (fabs.f64 k)
(FPCore (t l k_m)
:precision binary64
(if (<= k_m 0.0001)
(* (/ (* 2.0 l) (* t (* k_m k_m))) (/ l (* k_m k_m)))
(*
(/ (* l (cos k_m)) k_m)
(/ (* 2.0 l) (* k_m (* t (fma (cos (+ k_m k_m)) -0.5 0.5)))))))k_m = fabs(k);
double code(double t, double l, double k_m) {
double tmp;
if (k_m <= 0.0001) {
tmp = ((2.0 * l) / (t * (k_m * k_m))) * (l / (k_m * k_m));
} else {
tmp = ((l * cos(k_m)) / k_m) * ((2.0 * l) / (k_m * (t * fma(cos((k_m + k_m)), -0.5, 0.5))));
}
return tmp;
}
k_m = abs(k) function code(t, l, k_m) tmp = 0.0 if (k_m <= 0.0001) tmp = Float64(Float64(Float64(2.0 * l) / Float64(t * Float64(k_m * k_m))) * Float64(l / Float64(k_m * k_m))); else tmp = Float64(Float64(Float64(l * cos(k_m)) / k_m) * Float64(Float64(2.0 * l) / Float64(k_m * Float64(t * fma(cos(Float64(k_m + k_m)), -0.5, 0.5))))); end return tmp end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := If[LessEqual[k$95$m, 0.0001], N[(N[(N[(2.0 * l), $MachinePrecision] / N[(t * N[(k$95$m * k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(l / N[(k$95$m * k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(l * N[Cos[k$95$m], $MachinePrecision]), $MachinePrecision] / k$95$m), $MachinePrecision] * N[(N[(2.0 * l), $MachinePrecision] / N[(k$95$m * N[(t * N[(N[Cos[N[(k$95$m + k$95$m), $MachinePrecision]], $MachinePrecision] * -0.5 + 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
\mathbf{if}\;k\_m \leq 0.0001:\\
\;\;\;\;\frac{2 \cdot \ell}{t \cdot \left(k\_m \cdot k\_m\right)} \cdot \frac{\ell}{k\_m \cdot k\_m}\\
\mathbf{else}:\\
\;\;\;\;\frac{\ell \cdot \cos k\_m}{k\_m} \cdot \frac{2 \cdot \ell}{k\_m \cdot \left(t \cdot \mathsf{fma}\left(\cos \left(k\_m + k\_m\right), -0.5, 0.5\right)\right)}\\
\end{array}
\end{array}
if k < 1.00000000000000005e-4Initial program 42.4%
Taylor expanded in k around 0
associate-*r/N/A
lower-/.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-evalN/A
pow-sqrN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6468.0
Applied rewrites68.0%
Applied rewrites82.7%
if 1.00000000000000005e-4 < k Initial program 29.4%
Taylor expanded in t around 0
associate-*r/N/A
lower-/.f64N/A
lower-*.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower-sin.f6475.7
Applied rewrites75.7%
Applied rewrites96.0%
Applied rewrites96.0%
Final simplification86.3%
k_m = (fabs.f64 k)
(FPCore (t l k_m)
:precision binary64
(if (<= k_m 0.0001)
(* (/ (* 2.0 l) (* t (* k_m k_m))) (/ l (* k_m k_m)))
(*
(* 2.0 l)
(/
(* l (/ (cos k_m) k_m))
(* (fma (cos (+ k_m k_m)) -0.5 0.5) (* k_m t))))))k_m = fabs(k);
double code(double t, double l, double k_m) {
double tmp;
if (k_m <= 0.0001) {
tmp = ((2.0 * l) / (t * (k_m * k_m))) * (l / (k_m * k_m));
} else {
tmp = (2.0 * l) * ((l * (cos(k_m) / k_m)) / (fma(cos((k_m + k_m)), -0.5, 0.5) * (k_m * t)));
}
return tmp;
}
k_m = abs(k) function code(t, l, k_m) tmp = 0.0 if (k_m <= 0.0001) tmp = Float64(Float64(Float64(2.0 * l) / Float64(t * Float64(k_m * k_m))) * Float64(l / Float64(k_m * k_m))); else tmp = Float64(Float64(2.0 * l) * Float64(Float64(l * Float64(cos(k_m) / k_m)) / Float64(fma(cos(Float64(k_m + k_m)), -0.5, 0.5) * Float64(k_m * t)))); end return tmp end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := If[LessEqual[k$95$m, 0.0001], N[(N[(N[(2.0 * l), $MachinePrecision] / N[(t * N[(k$95$m * k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(l / N[(k$95$m * k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(2.0 * l), $MachinePrecision] * N[(N[(l * N[(N[Cos[k$95$m], $MachinePrecision] / k$95$m), $MachinePrecision]), $MachinePrecision] / N[(N[(N[Cos[N[(k$95$m + k$95$m), $MachinePrecision]], $MachinePrecision] * -0.5 + 0.5), $MachinePrecision] * N[(k$95$m * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
\mathbf{if}\;k\_m \leq 0.0001:\\
\;\;\;\;\frac{2 \cdot \ell}{t \cdot \left(k\_m \cdot k\_m\right)} \cdot \frac{\ell}{k\_m \cdot k\_m}\\
\mathbf{else}:\\
\;\;\;\;\left(2 \cdot \ell\right) \cdot \frac{\ell \cdot \frac{\cos k\_m}{k\_m}}{\mathsf{fma}\left(\cos \left(k\_m + k\_m\right), -0.5, 0.5\right) \cdot \left(k\_m \cdot t\right)}\\
\end{array}
\end{array}
if k < 1.00000000000000005e-4Initial program 42.4%
Taylor expanded in k around 0
associate-*r/N/A
lower-/.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-evalN/A
pow-sqrN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6468.0
Applied rewrites68.0%
Applied rewrites82.7%
if 1.00000000000000005e-4 < k Initial program 29.4%
Taylor expanded in t around 0
associate-*r/N/A
lower-/.f64N/A
lower-*.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower-sin.f6475.7
Applied rewrites75.7%
Applied rewrites96.0%
Applied rewrites94.7%
k_m = (fabs.f64 k)
(FPCore (t l k_m)
:precision binary64
(if (<= k_m 0.0001)
(* (/ (* 2.0 l) (* t (* k_m k_m))) (/ l (* k_m k_m)))
(*
(* 2.0 l)
(/
(* l (cos k_m))
(* (fma (cos (+ k_m k_m)) -0.5 0.5) (* k_m (* k_m t)))))))k_m = fabs(k);
double code(double t, double l, double k_m) {
double tmp;
if (k_m <= 0.0001) {
tmp = ((2.0 * l) / (t * (k_m * k_m))) * (l / (k_m * k_m));
} else {
tmp = (2.0 * l) * ((l * cos(k_m)) / (fma(cos((k_m + k_m)), -0.5, 0.5) * (k_m * (k_m * t))));
}
return tmp;
}
k_m = abs(k) function code(t, l, k_m) tmp = 0.0 if (k_m <= 0.0001) tmp = Float64(Float64(Float64(2.0 * l) / Float64(t * Float64(k_m * k_m))) * Float64(l / Float64(k_m * k_m))); else tmp = Float64(Float64(2.0 * l) * Float64(Float64(l * cos(k_m)) / Float64(fma(cos(Float64(k_m + k_m)), -0.5, 0.5) * Float64(k_m * Float64(k_m * t))))); end return tmp end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := If[LessEqual[k$95$m, 0.0001], N[(N[(N[(2.0 * l), $MachinePrecision] / N[(t * N[(k$95$m * k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(l / N[(k$95$m * k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(2.0 * l), $MachinePrecision] * N[(N[(l * N[Cos[k$95$m], $MachinePrecision]), $MachinePrecision] / N[(N[(N[Cos[N[(k$95$m + k$95$m), $MachinePrecision]], $MachinePrecision] * -0.5 + 0.5), $MachinePrecision] * N[(k$95$m * N[(k$95$m * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
\mathbf{if}\;k\_m \leq 0.0001:\\
\;\;\;\;\frac{2 \cdot \ell}{t \cdot \left(k\_m \cdot k\_m\right)} \cdot \frac{\ell}{k\_m \cdot k\_m}\\
\mathbf{else}:\\
\;\;\;\;\left(2 \cdot \ell\right) \cdot \frac{\ell \cdot \cos k\_m}{\mathsf{fma}\left(\cos \left(k\_m + k\_m\right), -0.5, 0.5\right) \cdot \left(k\_m \cdot \left(k\_m \cdot t\right)\right)}\\
\end{array}
\end{array}
if k < 1.00000000000000005e-4Initial program 42.4%
Taylor expanded in k around 0
associate-*r/N/A
lower-/.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-evalN/A
pow-sqrN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6468.0
Applied rewrites68.0%
Applied rewrites82.7%
if 1.00000000000000005e-4 < k Initial program 29.4%
Taylor expanded in t around 0
associate-*r/N/A
lower-/.f64N/A
lower-*.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower-sin.f6475.7
Applied rewrites75.7%
Applied rewrites96.0%
Applied rewrites85.2%
Final simplification83.4%
k_m = (fabs.f64 k)
(FPCore (t l k_m)
:precision binary64
(if (<= k_m 3.2e-87)
(* (/ (* 2.0 l) (* t (* k_m k_m))) (/ l (* k_m k_m)))
(if (<= k_m 5.6e+102)
(*
(/ (* l (cos k_m)) k_m)
(/
(* (/ l t) (fma 0.6666666666666666 (* k_m k_m) 2.0))
(* k_m (* k_m k_m))))
(* (/ l t) (/ (/ (* 2.0 l) (* k_m k_m)) (* k_m k_m))))))k_m = fabs(k);
double code(double t, double l, double k_m) {
double tmp;
if (k_m <= 3.2e-87) {
tmp = ((2.0 * l) / (t * (k_m * k_m))) * (l / (k_m * k_m));
} else if (k_m <= 5.6e+102) {
tmp = ((l * cos(k_m)) / k_m) * (((l / t) * fma(0.6666666666666666, (k_m * k_m), 2.0)) / (k_m * (k_m * k_m)));
} else {
tmp = (l / t) * (((2.0 * l) / (k_m * k_m)) / (k_m * k_m));
}
return tmp;
}
k_m = abs(k) function code(t, l, k_m) tmp = 0.0 if (k_m <= 3.2e-87) tmp = Float64(Float64(Float64(2.0 * l) / Float64(t * Float64(k_m * k_m))) * Float64(l / Float64(k_m * k_m))); elseif (k_m <= 5.6e+102) tmp = Float64(Float64(Float64(l * cos(k_m)) / k_m) * Float64(Float64(Float64(l / t) * fma(0.6666666666666666, Float64(k_m * k_m), 2.0)) / Float64(k_m * Float64(k_m * k_m)))); else tmp = Float64(Float64(l / t) * Float64(Float64(Float64(2.0 * l) / Float64(k_m * k_m)) / Float64(k_m * k_m))); end return tmp end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := If[LessEqual[k$95$m, 3.2e-87], N[(N[(N[(2.0 * l), $MachinePrecision] / N[(t * N[(k$95$m * k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(l / N[(k$95$m * k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[k$95$m, 5.6e+102], N[(N[(N[(l * N[Cos[k$95$m], $MachinePrecision]), $MachinePrecision] / k$95$m), $MachinePrecision] * N[(N[(N[(l / t), $MachinePrecision] * N[(0.6666666666666666 * N[(k$95$m * k$95$m), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] / N[(k$95$m * N[(k$95$m * k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(l / t), $MachinePrecision] * N[(N[(N[(2.0 * l), $MachinePrecision] / N[(k$95$m * k$95$m), $MachinePrecision]), $MachinePrecision] / N[(k$95$m * k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
\mathbf{if}\;k\_m \leq 3.2 \cdot 10^{-87}:\\
\;\;\;\;\frac{2 \cdot \ell}{t \cdot \left(k\_m \cdot k\_m\right)} \cdot \frac{\ell}{k\_m \cdot k\_m}\\
\mathbf{elif}\;k\_m \leq 5.6 \cdot 10^{+102}:\\
\;\;\;\;\frac{\ell \cdot \cos k\_m}{k\_m} \cdot \frac{\frac{\ell}{t} \cdot \mathsf{fma}\left(0.6666666666666666, k\_m \cdot k\_m, 2\right)}{k\_m \cdot \left(k\_m \cdot k\_m\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\ell}{t} \cdot \frac{\frac{2 \cdot \ell}{k\_m \cdot k\_m}}{k\_m \cdot k\_m}\\
\end{array}
\end{array}
if k < 3.19999999999999979e-87Initial program 42.3%
Taylor expanded in k around 0
associate-*r/N/A
lower-/.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-evalN/A
pow-sqrN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6465.2
Applied rewrites65.2%
Applied rewrites80.2%
if 3.19999999999999979e-87 < k < 5.60000000000000037e102Initial program 36.0%
Taylor expanded in t around 0
associate-*r/N/A
lower-/.f64N/A
lower-*.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower-sin.f6481.4
Applied rewrites81.4%
Applied rewrites79.0%
Taylor expanded in k around 0
Applied rewrites81.4%
if 5.60000000000000037e102 < k Initial program 30.6%
Taylor expanded in k around 0
associate-*r/N/A
lower-/.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-evalN/A
pow-sqrN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6450.1
Applied rewrites50.1%
Applied rewrites54.4%
Applied rewrites58.1%
Final simplification75.8%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (* (/ (* 2.0 l) (* t (* k_m k_m))) (/ l (* k_m k_m))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
return ((2.0 * l) / (t * (k_m * k_m))) * (l / (k_m * k_m));
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
code = ((2.0d0 * l) / (t * (k_m * k_m))) * (l / (k_m * k_m))
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
return ((2.0 * l) / (t * (k_m * k_m))) * (l / (k_m * k_m));
}
k_m = math.fabs(k) def code(t, l, k_m): return ((2.0 * l) / (t * (k_m * k_m))) * (l / (k_m * k_m))
k_m = abs(k) function code(t, l, k_m) return Float64(Float64(Float64(2.0 * l) / Float64(t * Float64(k_m * k_m))) * Float64(l / Float64(k_m * k_m))) end
k_m = abs(k); function tmp = code(t, l, k_m) tmp = ((2.0 * l) / (t * (k_m * k_m))) * (l / (k_m * k_m)); end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := N[(N[(N[(2.0 * l), $MachinePrecision] / N[(t * N[(k$95$m * k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(l / N[(k$95$m * k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
k_m = \left|k\right|
\\
\frac{2 \cdot \ell}{t \cdot \left(k\_m \cdot k\_m\right)} \cdot \frac{\ell}{k\_m \cdot k\_m}
\end{array}
Initial program 38.9%
Taylor expanded in k around 0
associate-*r/N/A
lower-/.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-evalN/A
pow-sqrN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6462.6
Applied rewrites62.6%
Applied rewrites74.2%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (* 2.0 (* l (/ l (* k_m (* k_m (* t (* k_m k_m))))))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
return 2.0 * (l * (l / (k_m * (k_m * (t * (k_m * k_m))))));
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
code = 2.0d0 * (l * (l / (k_m * (k_m * (t * (k_m * k_m))))))
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
return 2.0 * (l * (l / (k_m * (k_m * (t * (k_m * k_m))))));
}
k_m = math.fabs(k) def code(t, l, k_m): return 2.0 * (l * (l / (k_m * (k_m * (t * (k_m * k_m))))))
k_m = abs(k) function code(t, l, k_m) return Float64(2.0 * Float64(l * Float64(l / Float64(k_m * Float64(k_m * Float64(t * Float64(k_m * k_m))))))) end
k_m = abs(k); function tmp = code(t, l, k_m) tmp = 2.0 * (l * (l / (k_m * (k_m * (t * (k_m * k_m)))))); end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := N[(2.0 * N[(l * N[(l / N[(k$95$m * N[(k$95$m * N[(t * N[(k$95$m * k$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
k_m = \left|k\right|
\\
2 \cdot \left(\ell \cdot \frac{\ell}{k\_m \cdot \left(k\_m \cdot \left(t \cdot \left(k\_m \cdot k\_m\right)\right)\right)}\right)
\end{array}
Initial program 38.9%
Taylor expanded in k around 0
associate-*r/N/A
lower-/.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-evalN/A
pow-sqrN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6462.6
Applied rewrites62.6%
Applied rewrites71.1%
Final simplification71.1%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (/ 1.0 (/ t (* -0.11666666666666667 (* l l)))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
return 1.0 / (t / (-0.11666666666666667 * (l * l)));
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
code = 1.0d0 / (t / ((-0.11666666666666667d0) * (l * l)))
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
return 1.0 / (t / (-0.11666666666666667 * (l * l)));
}
k_m = math.fabs(k) def code(t, l, k_m): return 1.0 / (t / (-0.11666666666666667 * (l * l)))
k_m = abs(k) function code(t, l, k_m) return Float64(1.0 / Float64(t / Float64(-0.11666666666666667 * Float64(l * l)))) end
k_m = abs(k); function tmp = code(t, l, k_m) tmp = 1.0 / (t / (-0.11666666666666667 * (l * l))); end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := N[(1.0 / N[(t / N[(-0.11666666666666667 * N[(l * l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
k_m = \left|k\right|
\\
\frac{1}{\frac{t}{-0.11666666666666667 \cdot \left(\ell \cdot \ell\right)}}
\end{array}
Initial program 38.9%
Taylor expanded in k around 0
lower-/.f64N/A
Applied rewrites24.7%
Taylor expanded in k around inf
Applied rewrites16.5%
Applied rewrites16.5%
Final simplification16.5%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (* (* l l) (/ -0.11666666666666667 t)))
k_m = fabs(k);
double code(double t, double l, double k_m) {
return (l * l) * (-0.11666666666666667 / t);
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
code = (l * l) * ((-0.11666666666666667d0) / t)
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
return (l * l) * (-0.11666666666666667 / t);
}
k_m = math.fabs(k) def code(t, l, k_m): return (l * l) * (-0.11666666666666667 / t)
k_m = abs(k) function code(t, l, k_m) return Float64(Float64(l * l) * Float64(-0.11666666666666667 / t)) end
k_m = abs(k); function tmp = code(t, l, k_m) tmp = (l * l) * (-0.11666666666666667 / t); end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := N[(N[(l * l), $MachinePrecision] * N[(-0.11666666666666667 / t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
k_m = \left|k\right|
\\
\left(\ell \cdot \ell\right) \cdot \frac{-0.11666666666666667}{t}
\end{array}
Initial program 38.9%
Taylor expanded in k around 0
lower-/.f64N/A
Applied rewrites24.7%
Taylor expanded in k around inf
Applied rewrites16.5%
Applied rewrites16.5%
Final simplification16.5%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (* l (* l (/ -0.11666666666666667 t))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
return l * (l * (-0.11666666666666667 / t));
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
code = l * (l * ((-0.11666666666666667d0) / t))
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
return l * (l * (-0.11666666666666667 / t));
}
k_m = math.fabs(k) def code(t, l, k_m): return l * (l * (-0.11666666666666667 / t))
k_m = abs(k) function code(t, l, k_m) return Float64(l * Float64(l * Float64(-0.11666666666666667 / t))) end
k_m = abs(k); function tmp = code(t, l, k_m) tmp = l * (l * (-0.11666666666666667 / t)); end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := N[(l * N[(l * N[(-0.11666666666666667 / t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
k_m = \left|k\right|
\\
\ell \cdot \left(\ell \cdot \frac{-0.11666666666666667}{t}\right)
\end{array}
Initial program 38.9%
Taylor expanded in k around 0
lower-/.f64N/A
Applied rewrites24.7%
Taylor expanded in k around inf
Applied rewrites16.5%
Applied rewrites13.3%
herbie shell --seed 2024221
(FPCore (t l k)
:name "Toniolo and Linder, Equation (10-)"
:precision binary64
(/ 2.0 (* (* (* (/ (pow t 3.0) (* l l)) (sin k)) (tan k)) (- (+ 1.0 (pow (/ k t) 2.0)) 1.0))))