
(FPCore (t l k) :precision binary64 (/ 2.0 (* (* (* (/ (pow t 3.0) (* l l)) (sin k)) (tan k)) (- (+ 1.0 (pow (/ k t) 2.0)) 1.0))))
double code(double t, double l, double k) {
return 2.0 / ((((pow(t, 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + pow((k / t), 2.0)) - 1.0));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = 2.0d0 / (((((t ** 3.0d0) / (l * l)) * sin(k)) * tan(k)) * ((1.0d0 + ((k / t) ** 2.0d0)) - 1.0d0))
end function
public static double code(double t, double l, double k) {
return 2.0 / ((((Math.pow(t, 3.0) / (l * l)) * Math.sin(k)) * Math.tan(k)) * ((1.0 + Math.pow((k / t), 2.0)) - 1.0));
}
def code(t, l, k): return 2.0 / ((((math.pow(t, 3.0) / (l * l)) * math.sin(k)) * math.tan(k)) * ((1.0 + math.pow((k / t), 2.0)) - 1.0))
function code(t, l, k) return Float64(2.0 / Float64(Float64(Float64(Float64((t ^ 3.0) / Float64(l * l)) * sin(k)) * tan(k)) * Float64(Float64(1.0 + (Float64(k / t) ^ 2.0)) - 1.0))) end
function tmp = code(t, l, k) tmp = 2.0 / (((((t ^ 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + ((k / t) ^ 2.0)) - 1.0)); end
code[t_, l_, k_] := N[(2.0 / N[(N[(N[(N[(N[Power[t, 3.0], $MachinePrecision] / N[(l * l), $MachinePrecision]), $MachinePrecision] * N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[Tan[k], $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 + N[Power[N[(k / t), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\left(\left(\frac{{t}^{3}}{\ell \cdot \ell} \cdot \sin k\right) \cdot \tan k\right) \cdot \left(\left(1 + {\left(\frac{k}{t}\right)}^{2}\right) - 1\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (t l k) :precision binary64 (/ 2.0 (* (* (* (/ (pow t 3.0) (* l l)) (sin k)) (tan k)) (- (+ 1.0 (pow (/ k t) 2.0)) 1.0))))
double code(double t, double l, double k) {
return 2.0 / ((((pow(t, 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + pow((k / t), 2.0)) - 1.0));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = 2.0d0 / (((((t ** 3.0d0) / (l * l)) * sin(k)) * tan(k)) * ((1.0d0 + ((k / t) ** 2.0d0)) - 1.0d0))
end function
public static double code(double t, double l, double k) {
return 2.0 / ((((Math.pow(t, 3.0) / (l * l)) * Math.sin(k)) * Math.tan(k)) * ((1.0 + Math.pow((k / t), 2.0)) - 1.0));
}
def code(t, l, k): return 2.0 / ((((math.pow(t, 3.0) / (l * l)) * math.sin(k)) * math.tan(k)) * ((1.0 + math.pow((k / t), 2.0)) - 1.0))
function code(t, l, k) return Float64(2.0 / Float64(Float64(Float64(Float64((t ^ 3.0) / Float64(l * l)) * sin(k)) * tan(k)) * Float64(Float64(1.0 + (Float64(k / t) ^ 2.0)) - 1.0))) end
function tmp = code(t, l, k) tmp = 2.0 / (((((t ^ 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + ((k / t) ^ 2.0)) - 1.0)); end
code[t_, l_, k_] := N[(2.0 / N[(N[(N[(N[(N[Power[t, 3.0], $MachinePrecision] / N[(l * l), $MachinePrecision]), $MachinePrecision] * N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[Tan[k], $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 + N[Power[N[(k / t), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\left(\left(\frac{{t}^{3}}{\ell \cdot \ell} \cdot \sin k\right) \cdot \tan k\right) \cdot \left(\left(1 + {\left(\frac{k}{t}\right)}^{2}\right) - 1\right)}
\end{array}
(FPCore (t l k) :precision binary64 (* (/ l (sin k)) (/ (/ l k) (/ (tan k) (/ (/ 2.0 k) t)))))
double code(double t, double l, double k) {
return (l / sin(k)) * ((l / k) / (tan(k) / ((2.0 / k) / t)));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = (l / sin(k)) * ((l / k) / (tan(k) / ((2.0d0 / k) / t)))
end function
public static double code(double t, double l, double k) {
return (l / Math.sin(k)) * ((l / k) / (Math.tan(k) / ((2.0 / k) / t)));
}
def code(t, l, k): return (l / math.sin(k)) * ((l / k) / (math.tan(k) / ((2.0 / k) / t)))
function code(t, l, k) return Float64(Float64(l / sin(k)) * Float64(Float64(l / k) / Float64(tan(k) / Float64(Float64(2.0 / k) / t)))) end
function tmp = code(t, l, k) tmp = (l / sin(k)) * ((l / k) / (tan(k) / ((2.0 / k) / t))); end
code[t_, l_, k_] := N[(N[(l / N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[(N[(l / k), $MachinePrecision] / N[(N[Tan[k], $MachinePrecision] / N[(N[(2.0 / k), $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\ell}{\sin k} \cdot \frac{\frac{\ell}{k}}{\frac{\tan k}{\frac{\frac{2}{k}}{t}}}
\end{array}
Initial program 36.1%
associate-*l*36.1%
associate-*l*36.1%
associate-/r*36.1%
associate-/r/35.7%
*-commutative35.7%
times-frac36.5%
+-commutative36.5%
associate--l+42.9%
metadata-eval42.9%
+-rgt-identity42.9%
times-frac46.6%
Simplified46.6%
Taylor expanded in t around 0 84.6%
unpow284.6%
Simplified84.6%
associate-*l/84.7%
associate-*l*88.1%
Applied egg-rr88.1%
*-commutative88.1%
associate-*r/88.1%
associate-*r*84.6%
unpow284.6%
associate-/r*84.7%
unpow284.7%
associate-*l*88.0%
unpow288.0%
associate-/r*87.8%
unpow287.8%
associate-*r*92.7%
Simplified92.7%
associate-*l/92.7%
Applied egg-rr92.7%
frac-times90.4%
Applied egg-rr90.4%
times-frac92.7%
associate-*r*87.8%
unpow287.8%
associate-*r/87.8%
unpow287.8%
associate-*r*92.7%
times-frac96.8%
associate-/l*96.4%
associate-/r*96.4%
Simplified96.4%
Final simplification96.4%
(FPCore (t l k) :precision binary64 (if (<= k 8.2e-25) (* (/ l (sin k)) (* (/ l k) (/ 2.0 (* k (* k t))))) (* (/ 2.0 k) (/ (/ (* l (/ l (tan k))) (sin k)) (* k t)))))
double code(double t, double l, double k) {
double tmp;
if (k <= 8.2e-25) {
tmp = (l / sin(k)) * ((l / k) * (2.0 / (k * (k * t))));
} else {
tmp = (2.0 / k) * (((l * (l / tan(k))) / sin(k)) / (k * t));
}
return tmp;
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8) :: tmp
if (k <= 8.2d-25) then
tmp = (l / sin(k)) * ((l / k) * (2.0d0 / (k * (k * t))))
else
tmp = (2.0d0 / k) * (((l * (l / tan(k))) / sin(k)) / (k * t))
end if
code = tmp
end function
public static double code(double t, double l, double k) {
double tmp;
if (k <= 8.2e-25) {
tmp = (l / Math.sin(k)) * ((l / k) * (2.0 / (k * (k * t))));
} else {
tmp = (2.0 / k) * (((l * (l / Math.tan(k))) / Math.sin(k)) / (k * t));
}
return tmp;
}
def code(t, l, k): tmp = 0 if k <= 8.2e-25: tmp = (l / math.sin(k)) * ((l / k) * (2.0 / (k * (k * t)))) else: tmp = (2.0 / k) * (((l * (l / math.tan(k))) / math.sin(k)) / (k * t)) return tmp
function code(t, l, k) tmp = 0.0 if (k <= 8.2e-25) tmp = Float64(Float64(l / sin(k)) * Float64(Float64(l / k) * Float64(2.0 / Float64(k * Float64(k * t))))); else tmp = Float64(Float64(2.0 / k) * Float64(Float64(Float64(l * Float64(l / tan(k))) / sin(k)) / Float64(k * t))); end return tmp end
function tmp_2 = code(t, l, k) tmp = 0.0; if (k <= 8.2e-25) tmp = (l / sin(k)) * ((l / k) * (2.0 / (k * (k * t)))); else tmp = (2.0 / k) * (((l * (l / tan(k))) / sin(k)) / (k * t)); end tmp_2 = tmp; end
code[t_, l_, k_] := If[LessEqual[k, 8.2e-25], N[(N[(l / N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[(N[(l / k), $MachinePrecision] * N[(2.0 / N[(k * N[(k * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(2.0 / k), $MachinePrecision] * N[(N[(N[(l * N[(l / N[Tan[k], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Sin[k], $MachinePrecision]), $MachinePrecision] / N[(k * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;k \leq 8.2 \cdot 10^{-25}:\\
\;\;\;\;\frac{\ell}{\sin k} \cdot \left(\frac{\ell}{k} \cdot \frac{2}{k \cdot \left(k \cdot t\right)}\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{k} \cdot \frac{\frac{\ell \cdot \frac{\ell}{\tan k}}{\sin k}}{k \cdot t}\\
\end{array}
\end{array}
if k < 8.19999999999999974e-25Initial program 37.6%
associate-*l*37.6%
associate-*l*37.6%
associate-/r*37.6%
associate-/r/37.6%
*-commutative37.6%
times-frac38.2%
+-commutative38.2%
associate--l+43.5%
metadata-eval43.5%
+-rgt-identity43.5%
times-frac48.8%
Simplified48.8%
Taylor expanded in t around 0 88.0%
unpow288.0%
Simplified88.0%
associate-*l/88.0%
associate-*l*91.4%
Applied egg-rr91.4%
*-commutative91.4%
associate-*r/91.3%
associate-*r*88.0%
unpow288.0%
associate-/r*88.0%
unpow288.0%
associate-*l*91.6%
unpow291.6%
associate-/r*91.6%
unpow291.6%
associate-*r*96.5%
Simplified96.5%
Taylor expanded in k around 0 85.1%
if 8.19999999999999974e-25 < k Initial program 32.7%
associate-*l*32.7%
associate-*l*32.7%
associate-/r*32.7%
associate-/r/31.4%
*-commutative31.4%
times-frac32.7%
+-commutative32.7%
associate--l+41.5%
metadata-eval41.5%
+-rgt-identity41.5%
times-frac41.5%
Simplified41.5%
Taylor expanded in t around 0 77.3%
unpow277.3%
Simplified77.3%
associate-*l/77.3%
associate-*l*81.0%
Applied egg-rr81.0%
times-frac85.8%
associate-*l/85.8%
Simplified85.8%
Final simplification85.3%
(FPCore (t l k) :precision binary64 (* (/ l (sin k)) (* (/ l (tan k)) (/ 2.0 (* k (* k t))))))
double code(double t, double l, double k) {
return (l / sin(k)) * ((l / tan(k)) * (2.0 / (k * (k * t))));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = (l / sin(k)) * ((l / tan(k)) * (2.0d0 / (k * (k * t))))
end function
public static double code(double t, double l, double k) {
return (l / Math.sin(k)) * ((l / Math.tan(k)) * (2.0 / (k * (k * t))));
}
def code(t, l, k): return (l / math.sin(k)) * ((l / math.tan(k)) * (2.0 / (k * (k * t))))
function code(t, l, k) return Float64(Float64(l / sin(k)) * Float64(Float64(l / tan(k)) * Float64(2.0 / Float64(k * Float64(k * t))))) end
function tmp = code(t, l, k) tmp = (l / sin(k)) * ((l / tan(k)) * (2.0 / (k * (k * t)))); end
code[t_, l_, k_] := N[(N[(l / N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[(N[(l / N[Tan[k], $MachinePrecision]), $MachinePrecision] * N[(2.0 / N[(k * N[(k * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\ell}{\sin k} \cdot \left(\frac{\ell}{\tan k} \cdot \frac{2}{k \cdot \left(k \cdot t\right)}\right)
\end{array}
Initial program 36.1%
associate-*l*36.1%
associate-*l*36.1%
associate-/r*36.1%
associate-/r/35.7%
*-commutative35.7%
times-frac36.5%
+-commutative36.5%
associate--l+42.9%
metadata-eval42.9%
+-rgt-identity42.9%
times-frac46.6%
Simplified46.6%
Taylor expanded in t around 0 84.6%
unpow284.6%
Simplified84.6%
associate-*l/84.7%
associate-*l*88.1%
Applied egg-rr88.1%
*-commutative88.1%
associate-*r/88.1%
associate-*r*84.6%
unpow284.6%
associate-/r*84.7%
unpow284.7%
associate-*l*88.0%
unpow288.0%
associate-/r*87.8%
unpow287.8%
associate-*r*92.7%
Simplified92.7%
Final simplification92.7%
(FPCore (t l k) :precision binary64 (if (<= k 1.35e+68) (* (/ l (sin k)) (* (/ l k) (/ 2.0 (* k (* k t))))) (- (* 2.0 (/ l (/ t (/ l (pow k 4.0))))) (* (/ l k) (/ l (* k t))))))
double code(double t, double l, double k) {
double tmp;
if (k <= 1.35e+68) {
tmp = (l / sin(k)) * ((l / k) * (2.0 / (k * (k * t))));
} else {
tmp = (2.0 * (l / (t / (l / pow(k, 4.0))))) - ((l / k) * (l / (k * t)));
}
return tmp;
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8) :: tmp
if (k <= 1.35d+68) then
tmp = (l / sin(k)) * ((l / k) * (2.0d0 / (k * (k * t))))
else
tmp = (2.0d0 * (l / (t / (l / (k ** 4.0d0))))) - ((l / k) * (l / (k * t)))
end if
code = tmp
end function
public static double code(double t, double l, double k) {
double tmp;
if (k <= 1.35e+68) {
tmp = (l / Math.sin(k)) * ((l / k) * (2.0 / (k * (k * t))));
} else {
tmp = (2.0 * (l / (t / (l / Math.pow(k, 4.0))))) - ((l / k) * (l / (k * t)));
}
return tmp;
}
def code(t, l, k): tmp = 0 if k <= 1.35e+68: tmp = (l / math.sin(k)) * ((l / k) * (2.0 / (k * (k * t)))) else: tmp = (2.0 * (l / (t / (l / math.pow(k, 4.0))))) - ((l / k) * (l / (k * t))) return tmp
function code(t, l, k) tmp = 0.0 if (k <= 1.35e+68) tmp = Float64(Float64(l / sin(k)) * Float64(Float64(l / k) * Float64(2.0 / Float64(k * Float64(k * t))))); else tmp = Float64(Float64(2.0 * Float64(l / Float64(t / Float64(l / (k ^ 4.0))))) - Float64(Float64(l / k) * Float64(l / Float64(k * t)))); end return tmp end
function tmp_2 = code(t, l, k) tmp = 0.0; if (k <= 1.35e+68) tmp = (l / sin(k)) * ((l / k) * (2.0 / (k * (k * t)))); else tmp = (2.0 * (l / (t / (l / (k ^ 4.0))))) - ((l / k) * (l / (k * t))); end tmp_2 = tmp; end
code[t_, l_, k_] := If[LessEqual[k, 1.35e+68], N[(N[(l / N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[(N[(l / k), $MachinePrecision] * N[(2.0 / N[(k * N[(k * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(2.0 * N[(l / N[(t / N[(l / N[Power[k, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(l / k), $MachinePrecision] * N[(l / N[(k * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;k \leq 1.35 \cdot 10^{+68}:\\
\;\;\;\;\frac{\ell}{\sin k} \cdot \left(\frac{\ell}{k} \cdot \frac{2}{k \cdot \left(k \cdot t\right)}\right)\\
\mathbf{else}:\\
\;\;\;\;2 \cdot \frac{\ell}{\frac{t}{\frac{\ell}{{k}^{4}}}} - \frac{\ell}{k} \cdot \frac{\ell}{k \cdot t}\\
\end{array}
\end{array}
if k < 1.34999999999999995e68Initial program 36.2%
associate-*l*36.2%
associate-*l*36.2%
associate-/r*36.2%
associate-/r/36.2%
*-commutative36.2%
times-frac36.7%
+-commutative36.7%
associate--l+42.6%
metadata-eval42.6%
+-rgt-identity42.6%
times-frac47.4%
Simplified47.4%
Taylor expanded in t around 0 88.6%
unpow288.6%
Simplified88.6%
associate-*l/88.6%
associate-*l*91.6%
Applied egg-rr91.6%
*-commutative91.6%
associate-*r/91.6%
associate-*r*88.6%
unpow288.6%
associate-/r*88.6%
unpow288.6%
associate-*l*92.1%
unpow292.1%
associate-/r*91.9%
unpow291.9%
associate-*r*96.3%
Simplified96.3%
Taylor expanded in k around 0 84.0%
if 1.34999999999999995e68 < k Initial program 35.7%
+-rgt-identity35.6%
associate-*l*35.6%
mul0-rgt9.9%
distribute-lft-in35.7%
+-rgt-identity35.7%
sub-neg35.7%
+-commutative35.7%
associate-+l+43.9%
metadata-eval43.9%
metadata-eval43.9%
+-rgt-identity43.9%
Simplified43.9%
Taylor expanded in t around 0 72.3%
unpow272.3%
times-frac71.0%
unpow271.0%
*-commutative71.0%
Simplified71.0%
Taylor expanded in k around 0 58.6%
*-commutative58.6%
unpow258.6%
times-frac59.1%
unpow259.1%
Simplified59.1%
Taylor expanded in k around 0 55.3%
+-commutative55.3%
mul-1-neg55.3%
unsub-neg55.3%
unpow255.3%
associate-/l*58.6%
*-commutative58.6%
associate-/l*58.6%
unpow258.6%
unpow258.6%
associate-*r*59.1%
*-commutative59.1%
times-frac60.6%
Simplified60.6%
Final simplification78.3%
(FPCore (t l k) :precision binary64 (if (<= (* l l) 0.0) (* (/ l (sin k)) (* (/ l k) (/ 2.0 (* k (* k t))))) (/ 2.0 (* (/ (* k k) (cos k)) (/ (* t (/ (* k k) l)) l)))))
double code(double t, double l, double k) {
double tmp;
if ((l * l) <= 0.0) {
tmp = (l / sin(k)) * ((l / k) * (2.0 / (k * (k * t))));
} else {
tmp = 2.0 / (((k * k) / cos(k)) * ((t * ((k * k) / l)) / l));
}
return tmp;
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8) :: tmp
if ((l * l) <= 0.0d0) then
tmp = (l / sin(k)) * ((l / k) * (2.0d0 / (k * (k * t))))
else
tmp = 2.0d0 / (((k * k) / cos(k)) * ((t * ((k * k) / l)) / l))
end if
code = tmp
end function
public static double code(double t, double l, double k) {
double tmp;
if ((l * l) <= 0.0) {
tmp = (l / Math.sin(k)) * ((l / k) * (2.0 / (k * (k * t))));
} else {
tmp = 2.0 / (((k * k) / Math.cos(k)) * ((t * ((k * k) / l)) / l));
}
return tmp;
}
def code(t, l, k): tmp = 0 if (l * l) <= 0.0: tmp = (l / math.sin(k)) * ((l / k) * (2.0 / (k * (k * t)))) else: tmp = 2.0 / (((k * k) / math.cos(k)) * ((t * ((k * k) / l)) / l)) return tmp
function code(t, l, k) tmp = 0.0 if (Float64(l * l) <= 0.0) tmp = Float64(Float64(l / sin(k)) * Float64(Float64(l / k) * Float64(2.0 / Float64(k * Float64(k * t))))); else tmp = Float64(2.0 / Float64(Float64(Float64(k * k) / cos(k)) * Float64(Float64(t * Float64(Float64(k * k) / l)) / l))); end return tmp end
function tmp_2 = code(t, l, k) tmp = 0.0; if ((l * l) <= 0.0) tmp = (l / sin(k)) * ((l / k) * (2.0 / (k * (k * t)))); else tmp = 2.0 / (((k * k) / cos(k)) * ((t * ((k * k) / l)) / l)); end tmp_2 = tmp; end
code[t_, l_, k_] := If[LessEqual[N[(l * l), $MachinePrecision], 0.0], N[(N[(l / N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[(N[(l / k), $MachinePrecision] * N[(2.0 / N[(k * N[(k * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(2.0 / N[(N[(N[(k * k), $MachinePrecision] / N[Cos[k], $MachinePrecision]), $MachinePrecision] * N[(N[(t * N[(N[(k * k), $MachinePrecision] / l), $MachinePrecision]), $MachinePrecision] / l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\ell \cdot \ell \leq 0:\\
\;\;\;\;\frac{\ell}{\sin k} \cdot \left(\frac{\ell}{k} \cdot \frac{2}{k \cdot \left(k \cdot t\right)}\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{\frac{k \cdot k}{\cos k} \cdot \frac{t \cdot \frac{k \cdot k}{\ell}}{\ell}}\\
\end{array}
\end{array}
if (*.f64 l l) < 0.0Initial program 22.5%
associate-*l*22.5%
associate-*l*22.5%
associate-/r*22.5%
associate-/r/22.5%
*-commutative22.5%
times-frac22.5%
+-commutative22.5%
associate--l+34.9%
metadata-eval34.9%
+-rgt-identity34.9%
times-frac49.8%
Simplified49.8%
Taylor expanded in t around 0 93.2%
unpow293.2%
Simplified93.2%
associate-*l/93.2%
associate-*l*94.9%
Applied egg-rr94.9%
*-commutative94.9%
associate-*r/94.8%
associate-*r*93.2%
unpow293.2%
associate-/r*93.2%
unpow293.2%
associate-*l*96.5%
unpow296.5%
associate-/r*96.5%
unpow296.5%
associate-*r*98.1%
Simplified98.1%
Taylor expanded in k around 0 97.4%
if 0.0 < (*.f64 l l) Initial program 40.0%
+-rgt-identity20.8%
associate-*l*20.8%
mul0-rgt23.3%
distribute-lft-in34.0%
+-rgt-identity40.0%
sub-neg40.0%
+-commutative40.0%
associate-+l+44.7%
metadata-eval44.7%
metadata-eval44.7%
+-rgt-identity44.7%
Simplified44.7%
Taylor expanded in t around 0 82.0%
unpow282.0%
times-frac82.7%
unpow282.7%
*-commutative82.7%
Simplified82.7%
Taylor expanded in k around 0 71.7%
*-commutative71.7%
unpow271.7%
times-frac71.3%
unpow271.3%
Simplified71.3%
associate-*l/73.4%
Applied egg-rr73.4%
Final simplification78.8%
(FPCore (t l k) :precision binary64 (if (<= k 2.7e-37) (* (/ l (sin k)) (* (/ l k) (/ 2.0 (* k (* k t))))) (/ 2.0 (* (/ (* k k) (cos k)) (* (/ (* k k) l) (/ t l))))))
double code(double t, double l, double k) {
double tmp;
if (k <= 2.7e-37) {
tmp = (l / sin(k)) * ((l / k) * (2.0 / (k * (k * t))));
} else {
tmp = 2.0 / (((k * k) / cos(k)) * (((k * k) / l) * (t / l)));
}
return tmp;
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8) :: tmp
if (k <= 2.7d-37) then
tmp = (l / sin(k)) * ((l / k) * (2.0d0 / (k * (k * t))))
else
tmp = 2.0d0 / (((k * k) / cos(k)) * (((k * k) / l) * (t / l)))
end if
code = tmp
end function
public static double code(double t, double l, double k) {
double tmp;
if (k <= 2.7e-37) {
tmp = (l / Math.sin(k)) * ((l / k) * (2.0 / (k * (k * t))));
} else {
tmp = 2.0 / (((k * k) / Math.cos(k)) * (((k * k) / l) * (t / l)));
}
return tmp;
}
def code(t, l, k): tmp = 0 if k <= 2.7e-37: tmp = (l / math.sin(k)) * ((l / k) * (2.0 / (k * (k * t)))) else: tmp = 2.0 / (((k * k) / math.cos(k)) * (((k * k) / l) * (t / l))) return tmp
function code(t, l, k) tmp = 0.0 if (k <= 2.7e-37) tmp = Float64(Float64(l / sin(k)) * Float64(Float64(l / k) * Float64(2.0 / Float64(k * Float64(k * t))))); else tmp = Float64(2.0 / Float64(Float64(Float64(k * k) / cos(k)) * Float64(Float64(Float64(k * k) / l) * Float64(t / l)))); end return tmp end
function tmp_2 = code(t, l, k) tmp = 0.0; if (k <= 2.7e-37) tmp = (l / sin(k)) * ((l / k) * (2.0 / (k * (k * t)))); else tmp = 2.0 / (((k * k) / cos(k)) * (((k * k) / l) * (t / l))); end tmp_2 = tmp; end
code[t_, l_, k_] := If[LessEqual[k, 2.7e-37], N[(N[(l / N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[(N[(l / k), $MachinePrecision] * N[(2.0 / N[(k * N[(k * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(2.0 / N[(N[(N[(k * k), $MachinePrecision] / N[Cos[k], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(k * k), $MachinePrecision] / l), $MachinePrecision] * N[(t / l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;k \leq 2.7 \cdot 10^{-37}:\\
\;\;\;\;\frac{\ell}{\sin k} \cdot \left(\frac{\ell}{k} \cdot \frac{2}{k \cdot \left(k \cdot t\right)}\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{\frac{k \cdot k}{\cos k} \cdot \left(\frac{k \cdot k}{\ell} \cdot \frac{t}{\ell}\right)}\\
\end{array}
\end{array}
if k < 2.70000000000000016e-37Initial program 37.6%
associate-*l*37.6%
associate-*l*37.6%
associate-/r*37.6%
associate-/r/37.6%
*-commutative37.6%
times-frac38.2%
+-commutative38.2%
associate--l+43.5%
metadata-eval43.5%
+-rgt-identity43.5%
times-frac48.8%
Simplified48.8%
Taylor expanded in t around 0 88.0%
unpow288.0%
Simplified88.0%
associate-*l/88.0%
associate-*l*91.4%
Applied egg-rr91.4%
*-commutative91.4%
associate-*r/91.3%
associate-*r*88.0%
unpow288.0%
associate-/r*88.0%
unpow288.0%
associate-*l*91.6%
unpow291.6%
associate-/r*91.6%
unpow291.6%
associate-*r*96.5%
Simplified96.5%
Taylor expanded in k around 0 85.1%
if 2.70000000000000016e-37 < k Initial program 32.7%
+-rgt-identity30.1%
associate-*l*30.1%
mul0-rgt10.2%
distribute-lft-in32.7%
+-rgt-identity32.7%
sub-neg32.7%
+-commutative32.7%
associate-+l+41.5%
metadata-eval41.5%
metadata-eval41.5%
+-rgt-identity41.5%
Simplified41.5%
Taylor expanded in t around 0 77.2%
unpow277.2%
times-frac76.3%
unpow276.3%
*-commutative76.3%
Simplified76.3%
Taylor expanded in k around 0 62.0%
*-commutative62.0%
unpow262.0%
times-frac62.4%
unpow262.4%
Simplified62.4%
Final simplification78.0%
(FPCore (t l k) :precision binary64 (* (/ 2.0 (* t (* k k))) (* (/ l (sin k)) (/ l k))))
double code(double t, double l, double k) {
return (2.0 / (t * (k * k))) * ((l / sin(k)) * (l / k));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = (2.0d0 / (t * (k * k))) * ((l / sin(k)) * (l / k))
end function
public static double code(double t, double l, double k) {
return (2.0 / (t * (k * k))) * ((l / Math.sin(k)) * (l / k));
}
def code(t, l, k): return (2.0 / (t * (k * k))) * ((l / math.sin(k)) * (l / k))
function code(t, l, k) return Float64(Float64(2.0 / Float64(t * Float64(k * k))) * Float64(Float64(l / sin(k)) * Float64(l / k))) end
function tmp = code(t, l, k) tmp = (2.0 / (t * (k * k))) * ((l / sin(k)) * (l / k)); end
code[t_, l_, k_] := N[(N[(2.0 / N[(t * N[(k * k), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(l / N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[(l / k), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{t \cdot \left(k \cdot k\right)} \cdot \left(\frac{\ell}{\sin k} \cdot \frac{\ell}{k}\right)
\end{array}
Initial program 36.1%
associate-*l*36.1%
associate-*l*36.1%
associate-/r*36.1%
associate-/r/35.7%
*-commutative35.7%
times-frac36.5%
+-commutative36.5%
associate--l+42.9%
metadata-eval42.9%
+-rgt-identity42.9%
times-frac46.6%
Simplified46.6%
Taylor expanded in t around 0 84.6%
unpow284.6%
Simplified84.6%
Taylor expanded in k around 0 75.5%
Final simplification75.5%
(FPCore (t l k) :precision binary64 (* (/ l (sin k)) (* (/ l k) (/ 2.0 (* k (* k t))))))
double code(double t, double l, double k) {
return (l / sin(k)) * ((l / k) * (2.0 / (k * (k * t))));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = (l / sin(k)) * ((l / k) * (2.0d0 / (k * (k * t))))
end function
public static double code(double t, double l, double k) {
return (l / Math.sin(k)) * ((l / k) * (2.0 / (k * (k * t))));
}
def code(t, l, k): return (l / math.sin(k)) * ((l / k) * (2.0 / (k * (k * t))))
function code(t, l, k) return Float64(Float64(l / sin(k)) * Float64(Float64(l / k) * Float64(2.0 / Float64(k * Float64(k * t))))) end
function tmp = code(t, l, k) tmp = (l / sin(k)) * ((l / k) * (2.0 / (k * (k * t)))); end
code[t_, l_, k_] := N[(N[(l / N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[(N[(l / k), $MachinePrecision] * N[(2.0 / N[(k * N[(k * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\ell}{\sin k} \cdot \left(\frac{\ell}{k} \cdot \frac{2}{k \cdot \left(k \cdot t\right)}\right)
\end{array}
Initial program 36.1%
associate-*l*36.1%
associate-*l*36.1%
associate-/r*36.1%
associate-/r/35.7%
*-commutative35.7%
times-frac36.5%
+-commutative36.5%
associate--l+42.9%
metadata-eval42.9%
+-rgt-identity42.9%
times-frac46.6%
Simplified46.6%
Taylor expanded in t around 0 84.6%
unpow284.6%
Simplified84.6%
associate-*l/84.7%
associate-*l*88.1%
Applied egg-rr88.1%
*-commutative88.1%
associate-*r/88.1%
associate-*r*84.6%
unpow284.6%
associate-/r*84.7%
unpow284.7%
associate-*l*88.0%
unpow288.0%
associate-/r*87.8%
unpow287.8%
associate-*r*92.7%
Simplified92.7%
Taylor expanded in k around 0 77.2%
Final simplification77.2%
(FPCore (t l k) :precision binary64 (* 2.0 (* (/ l (pow k 4.0)) (/ l t))))
double code(double t, double l, double k) {
return 2.0 * ((l / pow(k, 4.0)) * (l / t));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = 2.0d0 * ((l / (k ** 4.0d0)) * (l / t))
end function
public static double code(double t, double l, double k) {
return 2.0 * ((l / Math.pow(k, 4.0)) * (l / t));
}
def code(t, l, k): return 2.0 * ((l / math.pow(k, 4.0)) * (l / t))
function code(t, l, k) return Float64(2.0 * Float64(Float64(l / (k ^ 4.0)) * Float64(l / t))) end
function tmp = code(t, l, k) tmp = 2.0 * ((l / (k ^ 4.0)) * (l / t)); end
code[t_, l_, k_] := N[(2.0 * N[(N[(l / N[Power[k, 4.0], $MachinePrecision]), $MachinePrecision] * N[(l / t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
2 \cdot \left(\frac{\ell}{{k}^{4}} \cdot \frac{\ell}{t}\right)
\end{array}
Initial program 36.1%
associate-*l*36.1%
associate-*l*36.1%
associate-/r*36.1%
associate-/r/35.7%
*-commutative35.7%
times-frac36.5%
+-commutative36.5%
associate--l+42.9%
metadata-eval42.9%
+-rgt-identity42.9%
times-frac46.6%
Simplified46.6%
Taylor expanded in t around 0 84.6%
unpow284.6%
Simplified84.6%
Taylor expanded in k around 0 67.5%
*-commutative67.5%
associate-/r*66.7%
unpow266.7%
Simplified66.7%
Taylor expanded in l around 0 67.5%
unpow267.5%
times-frac70.1%
Simplified70.1%
Final simplification70.1%
(FPCore (t l k) :precision binary64 (* 2.0 (/ l (/ t (/ l (pow k 4.0))))))
double code(double t, double l, double k) {
return 2.0 * (l / (t / (l / pow(k, 4.0))));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = 2.0d0 * (l / (t / (l / (k ** 4.0d0))))
end function
public static double code(double t, double l, double k) {
return 2.0 * (l / (t / (l / Math.pow(k, 4.0))));
}
def code(t, l, k): return 2.0 * (l / (t / (l / math.pow(k, 4.0))))
function code(t, l, k) return Float64(2.0 * Float64(l / Float64(t / Float64(l / (k ^ 4.0))))) end
function tmp = code(t, l, k) tmp = 2.0 * (l / (t / (l / (k ^ 4.0)))); end
code[t_, l_, k_] := N[(2.0 * N[(l / N[(t / N[(l / N[Power[k, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
2 \cdot \frac{\ell}{\frac{t}{\frac{\ell}{{k}^{4}}}}
\end{array}
Initial program 36.1%
associate-*l*36.1%
associate-*l*36.1%
associate-/r*36.1%
associate-/r/35.7%
*-commutative35.7%
times-frac36.5%
+-commutative36.5%
associate--l+42.9%
metadata-eval42.9%
+-rgt-identity42.9%
times-frac46.6%
Simplified46.6%
Taylor expanded in t around 0 84.6%
unpow284.6%
Simplified84.6%
associate-*l/84.7%
associate-*l*88.1%
Applied egg-rr88.1%
*-commutative88.1%
associate-*r/88.1%
associate-*r*84.6%
unpow284.6%
associate-/r*84.7%
unpow284.7%
associate-*l*88.0%
unpow288.0%
associate-/r*87.8%
unpow287.8%
associate-*r*92.7%
Simplified92.7%
associate-*l/92.7%
Applied egg-rr92.7%
Taylor expanded in k around 0 67.5%
unpow267.5%
associate-/l*72.1%
*-commutative72.1%
associate-/l*72.2%
Simplified72.2%
Final simplification72.2%
herbie shell --seed 2023213
(FPCore (t l k)
:name "Toniolo and Linder, Equation (10-)"
:precision binary64
(/ 2.0 (* (* (* (/ (pow t 3.0) (* l l)) (sin k)) (tan k)) (- (+ 1.0 (pow (/ k t) 2.0)) 1.0))))