
(FPCore (t l k) :precision binary64 (/ 2.0 (* (* (* (/ (pow t 3.0) (* l l)) (sin k)) (tan k)) (- (+ 1.0 (pow (/ k t) 2.0)) 1.0))))
double code(double t, double l, double k) {
return 2.0 / ((((pow(t, 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + pow((k / t), 2.0)) - 1.0));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = 2.0d0 / (((((t ** 3.0d0) / (l * l)) * sin(k)) * tan(k)) * ((1.0d0 + ((k / t) ** 2.0d0)) - 1.0d0))
end function
public static double code(double t, double l, double k) {
return 2.0 / ((((Math.pow(t, 3.0) / (l * l)) * Math.sin(k)) * Math.tan(k)) * ((1.0 + Math.pow((k / t), 2.0)) - 1.0));
}
def code(t, l, k): return 2.0 / ((((math.pow(t, 3.0) / (l * l)) * math.sin(k)) * math.tan(k)) * ((1.0 + math.pow((k / t), 2.0)) - 1.0))
function code(t, l, k) return Float64(2.0 / Float64(Float64(Float64(Float64((t ^ 3.0) / Float64(l * l)) * sin(k)) * tan(k)) * Float64(Float64(1.0 + (Float64(k / t) ^ 2.0)) - 1.0))) end
function tmp = code(t, l, k) tmp = 2.0 / (((((t ^ 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + ((k / t) ^ 2.0)) - 1.0)); end
code[t_, l_, k_] := N[(2.0 / N[(N[(N[(N[(N[Power[t, 3.0], $MachinePrecision] / N[(l * l), $MachinePrecision]), $MachinePrecision] * N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[Tan[k], $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 + N[Power[N[(k / t), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\left(\left(\frac{{t}^{3}}{\ell \cdot \ell} \cdot \sin k\right) \cdot \tan k\right) \cdot \left(\left(1 + {\left(\frac{k}{t}\right)}^{2}\right) - 1\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (t l k) :precision binary64 (/ 2.0 (* (* (* (/ (pow t 3.0) (* l l)) (sin k)) (tan k)) (- (+ 1.0 (pow (/ k t) 2.0)) 1.0))))
double code(double t, double l, double k) {
return 2.0 / ((((pow(t, 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + pow((k / t), 2.0)) - 1.0));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = 2.0d0 / (((((t ** 3.0d0) / (l * l)) * sin(k)) * tan(k)) * ((1.0d0 + ((k / t) ** 2.0d0)) - 1.0d0))
end function
public static double code(double t, double l, double k) {
return 2.0 / ((((Math.pow(t, 3.0) / (l * l)) * Math.sin(k)) * Math.tan(k)) * ((1.0 + Math.pow((k / t), 2.0)) - 1.0));
}
def code(t, l, k): return 2.0 / ((((math.pow(t, 3.0) / (l * l)) * math.sin(k)) * math.tan(k)) * ((1.0 + math.pow((k / t), 2.0)) - 1.0))
function code(t, l, k) return Float64(2.0 / Float64(Float64(Float64(Float64((t ^ 3.0) / Float64(l * l)) * sin(k)) * tan(k)) * Float64(Float64(1.0 + (Float64(k / t) ^ 2.0)) - 1.0))) end
function tmp = code(t, l, k) tmp = 2.0 / (((((t ^ 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + ((k / t) ^ 2.0)) - 1.0)); end
code[t_, l_, k_] := N[(2.0 / N[(N[(N[(N[(N[Power[t, 3.0], $MachinePrecision] / N[(l * l), $MachinePrecision]), $MachinePrecision] * N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[Tan[k], $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 + N[Power[N[(k / t), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\left(\left(\frac{{t}^{3}}{\ell \cdot \ell} \cdot \sin k\right) \cdot \tan k\right) \cdot \left(\left(1 + {\left(\frac{k}{t}\right)}^{2}\right) - 1\right)}
\end{array}
k_m = (fabs.f64 k)
(FPCore (t l k_m)
:precision binary64
(let* ((t_1 (* (/ k_m l) k_m)))
(if (<= k_m 0.0025)
(/ 2.0 (* (* (* t_1 t) t_1) (fma 0.16666666666666666 (* k_m k_m) 1.0)))
(/
2.0
(*
(* (* (- 0.5 (* (cos (+ k_m k_m)) 0.5)) (/ k_m l)) t)
(/ (/ k_m (cos k_m)) l))))))k_m = fabs(k);
double code(double t, double l, double k_m) {
double t_1 = (k_m / l) * k_m;
double tmp;
if (k_m <= 0.0025) {
tmp = 2.0 / (((t_1 * t) * t_1) * fma(0.16666666666666666, (k_m * k_m), 1.0));
} else {
tmp = 2.0 / ((((0.5 - (cos((k_m + k_m)) * 0.5)) * (k_m / l)) * t) * ((k_m / cos(k_m)) / l));
}
return tmp;
}
k_m = abs(k) function code(t, l, k_m) t_1 = Float64(Float64(k_m / l) * k_m) tmp = 0.0 if (k_m <= 0.0025) tmp = Float64(2.0 / Float64(Float64(Float64(t_1 * t) * t_1) * fma(0.16666666666666666, Float64(k_m * k_m), 1.0))); else tmp = Float64(2.0 / Float64(Float64(Float64(Float64(0.5 - Float64(cos(Float64(k_m + k_m)) * 0.5)) * Float64(k_m / l)) * t) * Float64(Float64(k_m / cos(k_m)) / l))); end return tmp end
k_m = N[Abs[k], $MachinePrecision]
code[t_, l_, k$95$m_] := Block[{t$95$1 = N[(N[(k$95$m / l), $MachinePrecision] * k$95$m), $MachinePrecision]}, If[LessEqual[k$95$m, 0.0025], N[(2.0 / N[(N[(N[(t$95$1 * t), $MachinePrecision] * t$95$1), $MachinePrecision] * N[(0.16666666666666666 * N[(k$95$m * k$95$m), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(2.0 / N[(N[(N[(N[(0.5 - N[(N[Cos[N[(k$95$m + k$95$m), $MachinePrecision]], $MachinePrecision] * 0.5), $MachinePrecision]), $MachinePrecision] * N[(k$95$m / l), $MachinePrecision]), $MachinePrecision] * t), $MachinePrecision] * N[(N[(k$95$m / N[Cos[k$95$m], $MachinePrecision]), $MachinePrecision] / l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
t_1 := \frac{k\_m}{\ell} \cdot k\_m\\
\mathbf{if}\;k\_m \leq 0.0025:\\
\;\;\;\;\frac{2}{\left(\left(t\_1 \cdot t\right) \cdot t\_1\right) \cdot \mathsf{fma}\left(0.16666666666666666, k\_m \cdot k\_m, 1\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{\left(\left(\left(0.5 - \cos \left(k\_m + k\_m\right) \cdot 0.5\right) \cdot \frac{k\_m}{\ell}\right) \cdot t\right) \cdot \frac{\frac{k\_m}{\cos k\_m}}{\ell}}\\
\end{array}
\end{array}
if k < 0.00250000000000000005Initial program 41.9%
Taylor expanded in k around 0
distribute-rgt-inN/A
associate-/l*N/A
associate-*r*N/A
associate-*l*N/A
distribute-lft1-inN/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
associate-*l/N/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
Applied rewrites75.5%
Applied rewrites82.4%
if 0.00250000000000000005 < k Initial program 24.0%
Taylor expanded in t around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
unpow2N/A
associate-*r*N/A
times-fracN/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-cos.f64N/A
*-commutativeN/A
lower-/.f64N/A
Applied rewrites94.8%
Applied rewrites99.5%
Applied rewrites99.5%
Final simplification86.3%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (/ 2.0 (* (* (* (/ k_m l) (pow (sin k_m) 2.0)) t) (/ (/ k_m (cos k_m)) l))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
return 2.0 / ((((k_m / l) * pow(sin(k_m), 2.0)) * t) * ((k_m / cos(k_m)) / l));
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
code = 2.0d0 / ((((k_m / l) * (sin(k_m) ** 2.0d0)) * t) * ((k_m / cos(k_m)) / l))
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
return 2.0 / ((((k_m / l) * Math.pow(Math.sin(k_m), 2.0)) * t) * ((k_m / Math.cos(k_m)) / l));
}
k_m = math.fabs(k) def code(t, l, k_m): return 2.0 / ((((k_m / l) * math.pow(math.sin(k_m), 2.0)) * t) * ((k_m / math.cos(k_m)) / l))
k_m = abs(k) function code(t, l, k_m) return Float64(2.0 / Float64(Float64(Float64(Float64(k_m / l) * (sin(k_m) ^ 2.0)) * t) * Float64(Float64(k_m / cos(k_m)) / l))) end
k_m = abs(k); function tmp = code(t, l, k_m) tmp = 2.0 / ((((k_m / l) * (sin(k_m) ^ 2.0)) * t) * ((k_m / cos(k_m)) / l)); end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := N[(2.0 / N[(N[(N[(N[(k$95$m / l), $MachinePrecision] * N[Power[N[Sin[k$95$m], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * t), $MachinePrecision] * N[(N[(k$95$m / N[Cos[k$95$m], $MachinePrecision]), $MachinePrecision] / l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
k_m = \left|k\right|
\\
\frac{2}{\left(\left(\frac{k\_m}{\ell} \cdot {\sin k\_m}^{2}\right) \cdot t\right) \cdot \frac{\frac{k\_m}{\cos k\_m}}{\ell}}
\end{array}
Initial program 37.8%
Taylor expanded in t around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
unpow2N/A
associate-*r*N/A
times-fracN/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-cos.f64N/A
*-commutativeN/A
lower-/.f64N/A
Applied rewrites94.5%
Applied rewrites98.2%
Final simplification98.2%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (/ 2.0 (* (/ k_m (* (cos k_m) l)) (* (* (/ k_m l) (pow (sin k_m) 2.0)) t))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
return 2.0 / ((k_m / (cos(k_m) * l)) * (((k_m / l) * pow(sin(k_m), 2.0)) * t));
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
code = 2.0d0 / ((k_m / (cos(k_m) * l)) * (((k_m / l) * (sin(k_m) ** 2.0d0)) * t))
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
return 2.0 / ((k_m / (Math.cos(k_m) * l)) * (((k_m / l) * Math.pow(Math.sin(k_m), 2.0)) * t));
}
k_m = math.fabs(k) def code(t, l, k_m): return 2.0 / ((k_m / (math.cos(k_m) * l)) * (((k_m / l) * math.pow(math.sin(k_m), 2.0)) * t))
k_m = abs(k) function code(t, l, k_m) return Float64(2.0 / Float64(Float64(k_m / Float64(cos(k_m) * l)) * Float64(Float64(Float64(k_m / l) * (sin(k_m) ^ 2.0)) * t))) end
k_m = abs(k); function tmp = code(t, l, k_m) tmp = 2.0 / ((k_m / (cos(k_m) * l)) * (((k_m / l) * (sin(k_m) ^ 2.0)) * t)); end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := N[(2.0 / N[(N[(k$95$m / N[(N[Cos[k$95$m], $MachinePrecision] * l), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(k$95$m / l), $MachinePrecision] * N[Power[N[Sin[k$95$m], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
k_m = \left|k\right|
\\
\frac{2}{\frac{k\_m}{\cos k\_m \cdot \ell} \cdot \left(\left(\frac{k\_m}{\ell} \cdot {\sin k\_m}^{2}\right) \cdot t\right)}
\end{array}
Initial program 37.8%
Taylor expanded in t around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
unpow2N/A
associate-*r*N/A
times-fracN/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-cos.f64N/A
*-commutativeN/A
lower-/.f64N/A
Applied rewrites94.5%
Applied rewrites98.2%
Applied rewrites98.2%
Final simplification98.2%
k_m = (fabs.f64 k)
(FPCore (t l k_m)
:precision binary64
(let* ((t_1 (* (/ k_m l) k_m)))
(if (<= k_m 0.0044)
(/ 2.0 (* (* (* t_1 t) t_1) (fma 0.16666666666666666 (* k_m k_m) 1.0)))
(if (<= k_m 1.25e+150)
(/
2.0
(*
(/ (fma (cos (* k_m 2.0)) -0.5 0.5) (* (* (cos k_m) l) l))
(* (* k_m k_m) t)))
(/ 2.0 (* (/ k_m l) (* (* (/ k_m l) (pow (sin k_m) 2.0)) t)))))))k_m = fabs(k);
double code(double t, double l, double k_m) {
double t_1 = (k_m / l) * k_m;
double tmp;
if (k_m <= 0.0044) {
tmp = 2.0 / (((t_1 * t) * t_1) * fma(0.16666666666666666, (k_m * k_m), 1.0));
} else if (k_m <= 1.25e+150) {
tmp = 2.0 / ((fma(cos((k_m * 2.0)), -0.5, 0.5) / ((cos(k_m) * l) * l)) * ((k_m * k_m) * t));
} else {
tmp = 2.0 / ((k_m / l) * (((k_m / l) * pow(sin(k_m), 2.0)) * t));
}
return tmp;
}
k_m = abs(k) function code(t, l, k_m) t_1 = Float64(Float64(k_m / l) * k_m) tmp = 0.0 if (k_m <= 0.0044) tmp = Float64(2.0 / Float64(Float64(Float64(t_1 * t) * t_1) * fma(0.16666666666666666, Float64(k_m * k_m), 1.0))); elseif (k_m <= 1.25e+150) tmp = Float64(2.0 / Float64(Float64(fma(cos(Float64(k_m * 2.0)), -0.5, 0.5) / Float64(Float64(cos(k_m) * l) * l)) * Float64(Float64(k_m * k_m) * t))); else tmp = Float64(2.0 / Float64(Float64(k_m / l) * Float64(Float64(Float64(k_m / l) * (sin(k_m) ^ 2.0)) * t))); end return tmp end
k_m = N[Abs[k], $MachinePrecision]
code[t_, l_, k$95$m_] := Block[{t$95$1 = N[(N[(k$95$m / l), $MachinePrecision] * k$95$m), $MachinePrecision]}, If[LessEqual[k$95$m, 0.0044], N[(2.0 / N[(N[(N[(t$95$1 * t), $MachinePrecision] * t$95$1), $MachinePrecision] * N[(0.16666666666666666 * N[(k$95$m * k$95$m), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[k$95$m, 1.25e+150], N[(2.0 / N[(N[(N[(N[Cos[N[(k$95$m * 2.0), $MachinePrecision]], $MachinePrecision] * -0.5 + 0.5), $MachinePrecision] / N[(N[(N[Cos[k$95$m], $MachinePrecision] * l), $MachinePrecision] * l), $MachinePrecision]), $MachinePrecision] * N[(N[(k$95$m * k$95$m), $MachinePrecision] * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(2.0 / N[(N[(k$95$m / l), $MachinePrecision] * N[(N[(N[(k$95$m / l), $MachinePrecision] * N[Power[N[Sin[k$95$m], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
t_1 := \frac{k\_m}{\ell} \cdot k\_m\\
\mathbf{if}\;k\_m \leq 0.0044:\\
\;\;\;\;\frac{2}{\left(\left(t\_1 \cdot t\right) \cdot t\_1\right) \cdot \mathsf{fma}\left(0.16666666666666666, k\_m \cdot k\_m, 1\right)}\\
\mathbf{elif}\;k\_m \leq 1.25 \cdot 10^{+150}:\\
\;\;\;\;\frac{2}{\frac{\mathsf{fma}\left(\cos \left(k\_m \cdot 2\right), -0.5, 0.5\right)}{\left(\cos k\_m \cdot \ell\right) \cdot \ell} \cdot \left(\left(k\_m \cdot k\_m\right) \cdot t\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{\frac{k\_m}{\ell} \cdot \left(\left(\frac{k\_m}{\ell} \cdot {\sin k\_m}^{2}\right) \cdot t\right)}\\
\end{array}
\end{array}
if k < 0.00440000000000000027Initial program 41.9%
Taylor expanded in k around 0
distribute-rgt-inN/A
associate-/l*N/A
associate-*r*N/A
associate-*l*N/A
distribute-lft1-inN/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
associate-*l/N/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
Applied rewrites75.5%
Applied rewrites82.4%
if 0.00440000000000000027 < k < 1.25000000000000002e150Initial program 12.8%
Taylor expanded in t around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
unpow2N/A
associate-*r*N/A
times-fracN/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-cos.f64N/A
*-commutativeN/A
lower-/.f64N/A
Applied rewrites92.1%
Applied rewrites81.7%
Applied rewrites81.6%
Taylor expanded in t around 0
Applied rewrites81.6%
if 1.25000000000000002e150 < k Initial program 45.3%
Taylor expanded in t around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
unpow2N/A
associate-*r*N/A
times-fracN/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-cos.f64N/A
*-commutativeN/A
lower-/.f64N/A
Applied rewrites99.8%
Applied rewrites99.7%
Taylor expanded in k around 0
Applied rewrites77.6%
Final simplification81.9%
k_m = (fabs.f64 k)
(FPCore (t l k_m)
:precision binary64
(let* ((t_1 (* (/ k_m l) k_m)))
(if (<= k_m 0.0044)
(/ 2.0 (* (* (* t_1 t) t_1) (fma 0.16666666666666666 (* k_m k_m) 1.0)))
(/
2.0
(/
(* (* (fma t 0.5 (* (* (cos (* k_m 2.0)) -0.5) t)) k_m) k_m)
(* (* (cos k_m) l) l))))))k_m = fabs(k);
double code(double t, double l, double k_m) {
double t_1 = (k_m / l) * k_m;
double tmp;
if (k_m <= 0.0044) {
tmp = 2.0 / (((t_1 * t) * t_1) * fma(0.16666666666666666, (k_m * k_m), 1.0));
} else {
tmp = 2.0 / (((fma(t, 0.5, ((cos((k_m * 2.0)) * -0.5) * t)) * k_m) * k_m) / ((cos(k_m) * l) * l));
}
return tmp;
}
k_m = abs(k) function code(t, l, k_m) t_1 = Float64(Float64(k_m / l) * k_m) tmp = 0.0 if (k_m <= 0.0044) tmp = Float64(2.0 / Float64(Float64(Float64(t_1 * t) * t_1) * fma(0.16666666666666666, Float64(k_m * k_m), 1.0))); else tmp = Float64(2.0 / Float64(Float64(Float64(fma(t, 0.5, Float64(Float64(cos(Float64(k_m * 2.0)) * -0.5) * t)) * k_m) * k_m) / Float64(Float64(cos(k_m) * l) * l))); end return tmp end
k_m = N[Abs[k], $MachinePrecision]
code[t_, l_, k$95$m_] := Block[{t$95$1 = N[(N[(k$95$m / l), $MachinePrecision] * k$95$m), $MachinePrecision]}, If[LessEqual[k$95$m, 0.0044], N[(2.0 / N[(N[(N[(t$95$1 * t), $MachinePrecision] * t$95$1), $MachinePrecision] * N[(0.16666666666666666 * N[(k$95$m * k$95$m), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(2.0 / N[(N[(N[(N[(t * 0.5 + N[(N[(N[Cos[N[(k$95$m * 2.0), $MachinePrecision]], $MachinePrecision] * -0.5), $MachinePrecision] * t), $MachinePrecision]), $MachinePrecision] * k$95$m), $MachinePrecision] * k$95$m), $MachinePrecision] / N[(N[(N[Cos[k$95$m], $MachinePrecision] * l), $MachinePrecision] * l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
t_1 := \frac{k\_m}{\ell} \cdot k\_m\\
\mathbf{if}\;k\_m \leq 0.0044:\\
\;\;\;\;\frac{2}{\left(\left(t\_1 \cdot t\right) \cdot t\_1\right) \cdot \mathsf{fma}\left(0.16666666666666666, k\_m \cdot k\_m, 1\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{\frac{\left(\mathsf{fma}\left(t, 0.5, \left(\cos \left(k\_m \cdot 2\right) \cdot -0.5\right) \cdot t\right) \cdot k\_m\right) \cdot k\_m}{\left(\cos k\_m \cdot \ell\right) \cdot \ell}}\\
\end{array}
\end{array}
if k < 0.00440000000000000027Initial program 41.9%
Taylor expanded in k around 0
distribute-rgt-inN/A
associate-/l*N/A
associate-*r*N/A
associate-*l*N/A
distribute-lft1-inN/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
associate-*l/N/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
Applied rewrites75.5%
Applied rewrites82.4%
if 0.00440000000000000027 < k Initial program 24.0%
Taylor expanded in t around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
unpow2N/A
associate-*r*N/A
times-fracN/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-cos.f64N/A
*-commutativeN/A
lower-/.f64N/A
Applied rewrites94.8%
Applied rewrites82.9%
Applied rewrites82.9%
Applied rewrites82.8%
Final simplification82.5%
k_m = (fabs.f64 k)
(FPCore (t l k_m)
:precision binary64
(let* ((t_1 (* (/ k_m l) k_m)))
(if (<= k_m 0.0044)
(/ 2.0 (* (* (* t_1 t) t_1) (fma 0.16666666666666666 (* k_m k_m) 1.0)))
(/
2.0
(/
(* (* (* (- 0.5 (* (cos (+ k_m k_m)) 0.5)) t) k_m) k_m)
(* (* (cos k_m) l) l))))))k_m = fabs(k);
double code(double t, double l, double k_m) {
double t_1 = (k_m / l) * k_m;
double tmp;
if (k_m <= 0.0044) {
tmp = 2.0 / (((t_1 * t) * t_1) * fma(0.16666666666666666, (k_m * k_m), 1.0));
} else {
tmp = 2.0 / (((((0.5 - (cos((k_m + k_m)) * 0.5)) * t) * k_m) * k_m) / ((cos(k_m) * l) * l));
}
return tmp;
}
k_m = abs(k) function code(t, l, k_m) t_1 = Float64(Float64(k_m / l) * k_m) tmp = 0.0 if (k_m <= 0.0044) tmp = Float64(2.0 / Float64(Float64(Float64(t_1 * t) * t_1) * fma(0.16666666666666666, Float64(k_m * k_m), 1.0))); else tmp = Float64(2.0 / Float64(Float64(Float64(Float64(Float64(0.5 - Float64(cos(Float64(k_m + k_m)) * 0.5)) * t) * k_m) * k_m) / Float64(Float64(cos(k_m) * l) * l))); end return tmp end
k_m = N[Abs[k], $MachinePrecision]
code[t_, l_, k$95$m_] := Block[{t$95$1 = N[(N[(k$95$m / l), $MachinePrecision] * k$95$m), $MachinePrecision]}, If[LessEqual[k$95$m, 0.0044], N[(2.0 / N[(N[(N[(t$95$1 * t), $MachinePrecision] * t$95$1), $MachinePrecision] * N[(0.16666666666666666 * N[(k$95$m * k$95$m), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(2.0 / N[(N[(N[(N[(N[(0.5 - N[(N[Cos[N[(k$95$m + k$95$m), $MachinePrecision]], $MachinePrecision] * 0.5), $MachinePrecision]), $MachinePrecision] * t), $MachinePrecision] * k$95$m), $MachinePrecision] * k$95$m), $MachinePrecision] / N[(N[(N[Cos[k$95$m], $MachinePrecision] * l), $MachinePrecision] * l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
t_1 := \frac{k\_m}{\ell} \cdot k\_m\\
\mathbf{if}\;k\_m \leq 0.0044:\\
\;\;\;\;\frac{2}{\left(\left(t\_1 \cdot t\right) \cdot t\_1\right) \cdot \mathsf{fma}\left(0.16666666666666666, k\_m \cdot k\_m, 1\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{\frac{\left(\left(\left(0.5 - \cos \left(k\_m + k\_m\right) \cdot 0.5\right) \cdot t\right) \cdot k\_m\right) \cdot k\_m}{\left(\cos k\_m \cdot \ell\right) \cdot \ell}}\\
\end{array}
\end{array}
if k < 0.00440000000000000027Initial program 41.9%
Taylor expanded in k around 0
distribute-rgt-inN/A
associate-/l*N/A
associate-*r*N/A
associate-*l*N/A
distribute-lft1-inN/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
associate-*l/N/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
Applied rewrites75.5%
Applied rewrites82.4%
if 0.00440000000000000027 < k Initial program 24.0%
Taylor expanded in t around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
unpow2N/A
associate-*r*N/A
times-fracN/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-cos.f64N/A
*-commutativeN/A
lower-/.f64N/A
Applied rewrites94.8%
Applied rewrites82.9%
Applied rewrites82.9%
Final simplification82.5%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (/ 2.0 (* (/ k_m l) (* (* (/ k_m l) (pow (sin k_m) 2.0)) t))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
return 2.0 / ((k_m / l) * (((k_m / l) * pow(sin(k_m), 2.0)) * t));
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
code = 2.0d0 / ((k_m / l) * (((k_m / l) * (sin(k_m) ** 2.0d0)) * t))
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
return 2.0 / ((k_m / l) * (((k_m / l) * Math.pow(Math.sin(k_m), 2.0)) * t));
}
k_m = math.fabs(k) def code(t, l, k_m): return 2.0 / ((k_m / l) * (((k_m / l) * math.pow(math.sin(k_m), 2.0)) * t))
k_m = abs(k) function code(t, l, k_m) return Float64(2.0 / Float64(Float64(k_m / l) * Float64(Float64(Float64(k_m / l) * (sin(k_m) ^ 2.0)) * t))) end
k_m = abs(k); function tmp = code(t, l, k_m) tmp = 2.0 / ((k_m / l) * (((k_m / l) * (sin(k_m) ^ 2.0)) * t)); end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := N[(2.0 / N[(N[(k$95$m / l), $MachinePrecision] * N[(N[(N[(k$95$m / l), $MachinePrecision] * N[Power[N[Sin[k$95$m], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
k_m = \left|k\right|
\\
\frac{2}{\frac{k\_m}{\ell} \cdot \left(\left(\frac{k\_m}{\ell} \cdot {\sin k\_m}^{2}\right) \cdot t\right)}
\end{array}
Initial program 37.8%
Taylor expanded in t around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
unpow2N/A
associate-*r*N/A
times-fracN/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-cos.f64N/A
*-commutativeN/A
lower-/.f64N/A
Applied rewrites94.5%
Applied rewrites98.2%
Taylor expanded in k around 0
Applied rewrites78.3%
Final simplification78.3%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (let* ((t_1 (* (/ k_m l) k_m))) (/ 2.0 (* (* t_1 t) t_1))))
k_m = fabs(k);
double code(double t, double l, double k_m) {
double t_1 = (k_m / l) * k_m;
return 2.0 / ((t_1 * t) * t_1);
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
real(8) :: t_1
t_1 = (k_m / l) * k_m
code = 2.0d0 / ((t_1 * t) * t_1)
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
double t_1 = (k_m / l) * k_m;
return 2.0 / ((t_1 * t) * t_1);
}
k_m = math.fabs(k) def code(t, l, k_m): t_1 = (k_m / l) * k_m return 2.0 / ((t_1 * t) * t_1)
k_m = abs(k) function code(t, l, k_m) t_1 = Float64(Float64(k_m / l) * k_m) return Float64(2.0 / Float64(Float64(t_1 * t) * t_1)) end
k_m = abs(k); function tmp = code(t, l, k_m) t_1 = (k_m / l) * k_m; tmp = 2.0 / ((t_1 * t) * t_1); end
k_m = N[Abs[k], $MachinePrecision]
code[t_, l_, k$95$m_] := Block[{t$95$1 = N[(N[(k$95$m / l), $MachinePrecision] * k$95$m), $MachinePrecision]}, N[(2.0 / N[(N[(t$95$1 * t), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
k_m = \left|k\right|
\\
\begin{array}{l}
t_1 := \frac{k\_m}{\ell} \cdot k\_m\\
\frac{2}{\left(t\_1 \cdot t\right) \cdot t\_1}
\end{array}
\end{array}
Initial program 37.8%
Taylor expanded in k around 0
*-commutativeN/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-pow.f6471.2
Applied rewrites71.2%
Applied rewrites75.5%
Applied rewrites76.6%
Final simplification76.6%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (/ 2.0 (* (* (* (* (/ k_m l) k_m) k_m) (/ k_m l)) t)))
k_m = fabs(k);
double code(double t, double l, double k_m) {
return 2.0 / (((((k_m / l) * k_m) * k_m) * (k_m / l)) * t);
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
code = 2.0d0 / (((((k_m / l) * k_m) * k_m) * (k_m / l)) * t)
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
return 2.0 / (((((k_m / l) * k_m) * k_m) * (k_m / l)) * t);
}
k_m = math.fabs(k) def code(t, l, k_m): return 2.0 / (((((k_m / l) * k_m) * k_m) * (k_m / l)) * t)
k_m = abs(k) function code(t, l, k_m) return Float64(2.0 / Float64(Float64(Float64(Float64(Float64(k_m / l) * k_m) * k_m) * Float64(k_m / l)) * t)) end
k_m = abs(k); function tmp = code(t, l, k_m) tmp = 2.0 / (((((k_m / l) * k_m) * k_m) * (k_m / l)) * t); end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := N[(2.0 / N[(N[(N[(N[(N[(k$95$m / l), $MachinePrecision] * k$95$m), $MachinePrecision] * k$95$m), $MachinePrecision] * N[(k$95$m / l), $MachinePrecision]), $MachinePrecision] * t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
k_m = \left|k\right|
\\
\frac{2}{\left(\left(\left(\frac{k\_m}{\ell} \cdot k\_m\right) \cdot k\_m\right) \cdot \frac{k\_m}{\ell}\right) \cdot t}
\end{array}
Initial program 37.8%
Taylor expanded in k around 0
*-commutativeN/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-pow.f6471.2
Applied rewrites71.2%
Applied rewrites75.5%
Applied rewrites76.3%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (/ 2.0 (* (* (* (/ k_m l) (/ k_m l)) (* k_m k_m)) t)))
k_m = fabs(k);
double code(double t, double l, double k_m) {
return 2.0 / ((((k_m / l) * (k_m / l)) * (k_m * k_m)) * t);
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
code = 2.0d0 / ((((k_m / l) * (k_m / l)) * (k_m * k_m)) * t)
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
return 2.0 / ((((k_m / l) * (k_m / l)) * (k_m * k_m)) * t);
}
k_m = math.fabs(k) def code(t, l, k_m): return 2.0 / ((((k_m / l) * (k_m / l)) * (k_m * k_m)) * t)
k_m = abs(k) function code(t, l, k_m) return Float64(2.0 / Float64(Float64(Float64(Float64(k_m / l) * Float64(k_m / l)) * Float64(k_m * k_m)) * t)) end
k_m = abs(k); function tmp = code(t, l, k_m) tmp = 2.0 / ((((k_m / l) * (k_m / l)) * (k_m * k_m)) * t); end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := N[(2.0 / N[(N[(N[(N[(k$95$m / l), $MachinePrecision] * N[(k$95$m / l), $MachinePrecision]), $MachinePrecision] * N[(k$95$m * k$95$m), $MachinePrecision]), $MachinePrecision] * t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
k_m = \left|k\right|
\\
\frac{2}{\left(\left(\frac{k\_m}{\ell} \cdot \frac{k\_m}{\ell}\right) \cdot \left(k\_m \cdot k\_m\right)\right) \cdot t}
\end{array}
Initial program 37.8%
Taylor expanded in k around 0
*-commutativeN/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-pow.f6471.2
Applied rewrites71.2%
Applied rewrites75.5%
Final simplification75.5%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (/ 2.0 (* (* (* (/ k_m l) t) (* (/ k_m l) k_m)) k_m)))
k_m = fabs(k);
double code(double t, double l, double k_m) {
return 2.0 / ((((k_m / l) * t) * ((k_m / l) * k_m)) * k_m);
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
code = 2.0d0 / ((((k_m / l) * t) * ((k_m / l) * k_m)) * k_m)
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
return 2.0 / ((((k_m / l) * t) * ((k_m / l) * k_m)) * k_m);
}
k_m = math.fabs(k) def code(t, l, k_m): return 2.0 / ((((k_m / l) * t) * ((k_m / l) * k_m)) * k_m)
k_m = abs(k) function code(t, l, k_m) return Float64(2.0 / Float64(Float64(Float64(Float64(k_m / l) * t) * Float64(Float64(k_m / l) * k_m)) * k_m)) end
k_m = abs(k); function tmp = code(t, l, k_m) tmp = 2.0 / ((((k_m / l) * t) * ((k_m / l) * k_m)) * k_m); end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := N[(2.0 / N[(N[(N[(N[(k$95$m / l), $MachinePrecision] * t), $MachinePrecision] * N[(N[(k$95$m / l), $MachinePrecision] * k$95$m), $MachinePrecision]), $MachinePrecision] * k$95$m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
k_m = \left|k\right|
\\
\frac{2}{\left(\left(\frac{k\_m}{\ell} \cdot t\right) \cdot \left(\frac{k\_m}{\ell} \cdot k\_m\right)\right) \cdot k\_m}
\end{array}
Initial program 37.8%
Taylor expanded in k around 0
*-commutativeN/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-pow.f6471.2
Applied rewrites71.2%
Applied rewrites75.5%
Applied rewrites76.3%
Applied rewrites75.2%
Final simplification75.2%
k_m = (fabs.f64 k) (FPCore (t l k_m) :precision binary64 (/ 2.0 (* (* (/ (* k_m k_m) (* l l)) (* k_m k_m)) t)))
k_m = fabs(k);
double code(double t, double l, double k_m) {
return 2.0 / ((((k_m * k_m) / (l * l)) * (k_m * k_m)) * t);
}
k_m = abs(k)
real(8) function code(t, l, k_m)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k_m
code = 2.0d0 / ((((k_m * k_m) / (l * l)) * (k_m * k_m)) * t)
end function
k_m = Math.abs(k);
public static double code(double t, double l, double k_m) {
return 2.0 / ((((k_m * k_m) / (l * l)) * (k_m * k_m)) * t);
}
k_m = math.fabs(k) def code(t, l, k_m): return 2.0 / ((((k_m * k_m) / (l * l)) * (k_m * k_m)) * t)
k_m = abs(k) function code(t, l, k_m) return Float64(2.0 / Float64(Float64(Float64(Float64(k_m * k_m) / Float64(l * l)) * Float64(k_m * k_m)) * t)) end
k_m = abs(k); function tmp = code(t, l, k_m) tmp = 2.0 / ((((k_m * k_m) / (l * l)) * (k_m * k_m)) * t); end
k_m = N[Abs[k], $MachinePrecision] code[t_, l_, k$95$m_] := N[(2.0 / N[(N[(N[(N[(k$95$m * k$95$m), $MachinePrecision] / N[(l * l), $MachinePrecision]), $MachinePrecision] * N[(k$95$m * k$95$m), $MachinePrecision]), $MachinePrecision] * t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
k_m = \left|k\right|
\\
\frac{2}{\left(\frac{k\_m \cdot k\_m}{\ell \cdot \ell} \cdot \left(k\_m \cdot k\_m\right)\right) \cdot t}
\end{array}
Initial program 37.8%
Taylor expanded in k around 0
*-commutativeN/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-pow.f6471.2
Applied rewrites71.2%
Applied rewrites75.5%
Applied rewrites76.3%
Applied rewrites68.0%
herbie shell --seed 2024250
(FPCore (t l k)
:name "Toniolo and Linder, Equation (10-)"
:precision binary64
(/ 2.0 (* (* (* (/ (pow t 3.0) (* l l)) (sin k)) (tan k)) (- (+ 1.0 (pow (/ k t) 2.0)) 1.0))))