
(FPCore (t l k) :precision binary64 (/ 2.0 (* (* (* (/ (pow t 3.0) (* l l)) (sin k)) (tan k)) (- (+ 1.0 (pow (/ k t) 2.0)) 1.0))))
double code(double t, double l, double k) {
return 2.0 / ((((pow(t, 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + pow((k / t), 2.0)) - 1.0));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = 2.0d0 / (((((t ** 3.0d0) / (l * l)) * sin(k)) * tan(k)) * ((1.0d0 + ((k / t) ** 2.0d0)) - 1.0d0))
end function
public static double code(double t, double l, double k) {
return 2.0 / ((((Math.pow(t, 3.0) / (l * l)) * Math.sin(k)) * Math.tan(k)) * ((1.0 + Math.pow((k / t), 2.0)) - 1.0));
}
def code(t, l, k): return 2.0 / ((((math.pow(t, 3.0) / (l * l)) * math.sin(k)) * math.tan(k)) * ((1.0 + math.pow((k / t), 2.0)) - 1.0))
function code(t, l, k) return Float64(2.0 / Float64(Float64(Float64(Float64((t ^ 3.0) / Float64(l * l)) * sin(k)) * tan(k)) * Float64(Float64(1.0 + (Float64(k / t) ^ 2.0)) - 1.0))) end
function tmp = code(t, l, k) tmp = 2.0 / (((((t ^ 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + ((k / t) ^ 2.0)) - 1.0)); end
code[t_, l_, k_] := N[(2.0 / N[(N[(N[(N[(N[Power[t, 3.0], $MachinePrecision] / N[(l * l), $MachinePrecision]), $MachinePrecision] * N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[Tan[k], $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 + N[Power[N[(k / t), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\left(\left(\frac{{t}^{3}}{\ell \cdot \ell} \cdot \sin k\right) \cdot \tan k\right) \cdot \left(\left(1 + {\left(\frac{k}{t}\right)}^{2}\right) - 1\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (t l k) :precision binary64 (/ 2.0 (* (* (* (/ (pow t 3.0) (* l l)) (sin k)) (tan k)) (- (+ 1.0 (pow (/ k t) 2.0)) 1.0))))
double code(double t, double l, double k) {
return 2.0 / ((((pow(t, 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + pow((k / t), 2.0)) - 1.0));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = 2.0d0 / (((((t ** 3.0d0) / (l * l)) * sin(k)) * tan(k)) * ((1.0d0 + ((k / t) ** 2.0d0)) - 1.0d0))
end function
public static double code(double t, double l, double k) {
return 2.0 / ((((Math.pow(t, 3.0) / (l * l)) * Math.sin(k)) * Math.tan(k)) * ((1.0 + Math.pow((k / t), 2.0)) - 1.0));
}
def code(t, l, k): return 2.0 / ((((math.pow(t, 3.0) / (l * l)) * math.sin(k)) * math.tan(k)) * ((1.0 + math.pow((k / t), 2.0)) - 1.0))
function code(t, l, k) return Float64(2.0 / Float64(Float64(Float64(Float64((t ^ 3.0) / Float64(l * l)) * sin(k)) * tan(k)) * Float64(Float64(1.0 + (Float64(k / t) ^ 2.0)) - 1.0))) end
function tmp = code(t, l, k) tmp = 2.0 / (((((t ^ 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + ((k / t) ^ 2.0)) - 1.0)); end
code[t_, l_, k_] := N[(2.0 / N[(N[(N[(N[(N[Power[t, 3.0], $MachinePrecision] / N[(l * l), $MachinePrecision]), $MachinePrecision] * N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[Tan[k], $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 + N[Power[N[(k / t), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\left(\left(\frac{{t}^{3}}{\ell \cdot \ell} \cdot \sin k\right) \cdot \tan k\right) \cdot \left(\left(1 + {\left(\frac{k}{t}\right)}^{2}\right) - 1\right)}
\end{array}
(FPCore (t l k) :precision binary64 (* (/ (/ (* 2.0 l) (sin k)) k) (* (/ l (tan k)) (/ 1.0 (* k t)))))
double code(double t, double l, double k) {
return (((2.0 * l) / sin(k)) / k) * ((l / tan(k)) * (1.0 / (k * t)));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = (((2.0d0 * l) / sin(k)) / k) * ((l / tan(k)) * (1.0d0 / (k * t)))
end function
public static double code(double t, double l, double k) {
return (((2.0 * l) / Math.sin(k)) / k) * ((l / Math.tan(k)) * (1.0 / (k * t)));
}
def code(t, l, k): return (((2.0 * l) / math.sin(k)) / k) * ((l / math.tan(k)) * (1.0 / (k * t)))
function code(t, l, k) return Float64(Float64(Float64(Float64(2.0 * l) / sin(k)) / k) * Float64(Float64(l / tan(k)) * Float64(1.0 / Float64(k * t)))) end
function tmp = code(t, l, k) tmp = (((2.0 * l) / sin(k)) / k) * ((l / tan(k)) * (1.0 / (k * t))); end
code[t_, l_, k_] := N[(N[(N[(N[(2.0 * l), $MachinePrecision] / N[Sin[k], $MachinePrecision]), $MachinePrecision] / k), $MachinePrecision] * N[(N[(l / N[Tan[k], $MachinePrecision]), $MachinePrecision] * N[(1.0 / N[(k * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{2 \cdot \ell}{\sin k}}{k} \cdot \left(\frac{\ell}{\tan k} \cdot \frac{1}{k \cdot t}\right)
\end{array}
Initial program 35.1%
associate-*l*35.1%
associate-*l*35.5%
associate-/r*35.5%
associate-/r/35.5%
*-commutative35.5%
times-frac35.2%
+-commutative35.2%
associate--l+46.1%
metadata-eval46.1%
+-rgt-identity46.1%
times-frac48.4%
Simplified48.4%
Taylor expanded in t around 0 81.0%
unpow281.0%
associate-*l*84.2%
Simplified84.2%
associate-*l/84.3%
Applied egg-rr84.3%
associate-*r*84.3%
times-frac98.3%
associate-*r/98.3%
Simplified98.3%
div-inv98.3%
Applied egg-rr98.3%
Final simplification98.3%
(FPCore (t l k) :precision binary64 (* (/ (/ l (tan k)) (* k t)) (* 2.0 (/ l (* k (sin k))))))
double code(double t, double l, double k) {
return ((l / tan(k)) / (k * t)) * (2.0 * (l / (k * sin(k))));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = ((l / tan(k)) / (k * t)) * (2.0d0 * (l / (k * sin(k))))
end function
public static double code(double t, double l, double k) {
return ((l / Math.tan(k)) / (k * t)) * (2.0 * (l / (k * Math.sin(k))));
}
def code(t, l, k): return ((l / math.tan(k)) / (k * t)) * (2.0 * (l / (k * math.sin(k))))
function code(t, l, k) return Float64(Float64(Float64(l / tan(k)) / Float64(k * t)) * Float64(2.0 * Float64(l / Float64(k * sin(k))))) end
function tmp = code(t, l, k) tmp = ((l / tan(k)) / (k * t)) * (2.0 * (l / (k * sin(k)))); end
code[t_, l_, k_] := N[(N[(N[(l / N[Tan[k], $MachinePrecision]), $MachinePrecision] / N[(k * t), $MachinePrecision]), $MachinePrecision] * N[(2.0 * N[(l / N[(k * N[Sin[k], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\ell}{\tan k}}{k \cdot t} \cdot \left(2 \cdot \frac{\ell}{k \cdot \sin k}\right)
\end{array}
Initial program 35.1%
associate-*l*35.1%
associate-*l*35.5%
associate-/r*35.5%
associate-/r/35.5%
*-commutative35.5%
times-frac35.2%
+-commutative35.2%
associate--l+46.1%
metadata-eval46.1%
+-rgt-identity46.1%
times-frac48.4%
Simplified48.4%
Taylor expanded in t around 0 81.0%
unpow281.0%
associate-*l*84.2%
Simplified84.2%
associate-*l/84.3%
Applied egg-rr84.3%
associate-*r*84.3%
times-frac98.3%
associate-*r/98.3%
Simplified98.3%
Taylor expanded in l around 0 97.8%
Final simplification97.8%
(FPCore (t l k) :precision binary64 (* (/ (/ l (tan k)) (* k t)) (* 2.0 (/ (/ l k) (sin k)))))
double code(double t, double l, double k) {
return ((l / tan(k)) / (k * t)) * (2.0 * ((l / k) / sin(k)));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = ((l / tan(k)) / (k * t)) * (2.0d0 * ((l / k) / sin(k)))
end function
public static double code(double t, double l, double k) {
return ((l / Math.tan(k)) / (k * t)) * (2.0 * ((l / k) / Math.sin(k)));
}
def code(t, l, k): return ((l / math.tan(k)) / (k * t)) * (2.0 * ((l / k) / math.sin(k)))
function code(t, l, k) return Float64(Float64(Float64(l / tan(k)) / Float64(k * t)) * Float64(2.0 * Float64(Float64(l / k) / sin(k)))) end
function tmp = code(t, l, k) tmp = ((l / tan(k)) / (k * t)) * (2.0 * ((l / k) / sin(k))); end
code[t_, l_, k_] := N[(N[(N[(l / N[Tan[k], $MachinePrecision]), $MachinePrecision] / N[(k * t), $MachinePrecision]), $MachinePrecision] * N[(2.0 * N[(N[(l / k), $MachinePrecision] / N[Sin[k], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\ell}{\tan k}}{k \cdot t} \cdot \left(2 \cdot \frac{\frac{\ell}{k}}{\sin k}\right)
\end{array}
Initial program 35.1%
associate-*l*35.1%
associate-*l*35.5%
associate-/r*35.5%
associate-/r/35.5%
*-commutative35.5%
times-frac35.2%
+-commutative35.2%
associate--l+46.1%
metadata-eval46.1%
+-rgt-identity46.1%
times-frac48.4%
Simplified48.4%
Taylor expanded in t around 0 81.0%
unpow281.0%
associate-*l*84.2%
Simplified84.2%
associate-*l/84.3%
Applied egg-rr84.3%
associate-*r*84.3%
times-frac98.3%
associate-*r/98.3%
Simplified98.3%
Taylor expanded in l around 0 97.8%
associate-/r*98.3%
Simplified98.3%
Final simplification98.3%
(FPCore (t l k) :precision binary64 (* (/ (/ (* 2.0 l) (sin k)) k) (/ (/ l (tan k)) (* k t))))
double code(double t, double l, double k) {
return (((2.0 * l) / sin(k)) / k) * ((l / tan(k)) / (k * t));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = (((2.0d0 * l) / sin(k)) / k) * ((l / tan(k)) / (k * t))
end function
public static double code(double t, double l, double k) {
return (((2.0 * l) / Math.sin(k)) / k) * ((l / Math.tan(k)) / (k * t));
}
def code(t, l, k): return (((2.0 * l) / math.sin(k)) / k) * ((l / math.tan(k)) / (k * t))
function code(t, l, k) return Float64(Float64(Float64(Float64(2.0 * l) / sin(k)) / k) * Float64(Float64(l / tan(k)) / Float64(k * t))) end
function tmp = code(t, l, k) tmp = (((2.0 * l) / sin(k)) / k) * ((l / tan(k)) / (k * t)); end
code[t_, l_, k_] := N[(N[(N[(N[(2.0 * l), $MachinePrecision] / N[Sin[k], $MachinePrecision]), $MachinePrecision] / k), $MachinePrecision] * N[(N[(l / N[Tan[k], $MachinePrecision]), $MachinePrecision] / N[(k * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{2 \cdot \ell}{\sin k}}{k} \cdot \frac{\frac{\ell}{\tan k}}{k \cdot t}
\end{array}
Initial program 35.1%
associate-*l*35.1%
associate-*l*35.5%
associate-/r*35.5%
associate-/r/35.5%
*-commutative35.5%
times-frac35.2%
+-commutative35.2%
associate--l+46.1%
metadata-eval46.1%
+-rgt-identity46.1%
times-frac48.4%
Simplified48.4%
Taylor expanded in t around 0 81.0%
unpow281.0%
associate-*l*84.2%
Simplified84.2%
associate-*l/84.3%
Applied egg-rr84.3%
associate-*r*84.3%
times-frac98.3%
associate-*r/98.3%
Simplified98.3%
Final simplification98.3%
(FPCore (t l k) :precision binary64 (if (<= k 1.5e-75) (* (/ 2.0 (* k (* k t))) (* (/ l k) (/ l (sin k)))) (* 2.0 (* (/ l t) (/ l (pow k 4.0))))))
double code(double t, double l, double k) {
double tmp;
if (k <= 1.5e-75) {
tmp = (2.0 / (k * (k * t))) * ((l / k) * (l / sin(k)));
} else {
tmp = 2.0 * ((l / t) * (l / pow(k, 4.0)));
}
return tmp;
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8) :: tmp
if (k <= 1.5d-75) then
tmp = (2.0d0 / (k * (k * t))) * ((l / k) * (l / sin(k)))
else
tmp = 2.0d0 * ((l / t) * (l / (k ** 4.0d0)))
end if
code = tmp
end function
public static double code(double t, double l, double k) {
double tmp;
if (k <= 1.5e-75) {
tmp = (2.0 / (k * (k * t))) * ((l / k) * (l / Math.sin(k)));
} else {
tmp = 2.0 * ((l / t) * (l / Math.pow(k, 4.0)));
}
return tmp;
}
def code(t, l, k): tmp = 0 if k <= 1.5e-75: tmp = (2.0 / (k * (k * t))) * ((l / k) * (l / math.sin(k))) else: tmp = 2.0 * ((l / t) * (l / math.pow(k, 4.0))) return tmp
function code(t, l, k) tmp = 0.0 if (k <= 1.5e-75) tmp = Float64(Float64(2.0 / Float64(k * Float64(k * t))) * Float64(Float64(l / k) * Float64(l / sin(k)))); else tmp = Float64(2.0 * Float64(Float64(l / t) * Float64(l / (k ^ 4.0)))); end return tmp end
function tmp_2 = code(t, l, k) tmp = 0.0; if (k <= 1.5e-75) tmp = (2.0 / (k * (k * t))) * ((l / k) * (l / sin(k))); else tmp = 2.0 * ((l / t) * (l / (k ^ 4.0))); end tmp_2 = tmp; end
code[t_, l_, k_] := If[LessEqual[k, 1.5e-75], N[(N[(2.0 / N[(k * N[(k * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(l / k), $MachinePrecision] * N[(l / N[Sin[k], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(2.0 * N[(N[(l / t), $MachinePrecision] * N[(l / N[Power[k, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;k \leq 1.5 \cdot 10^{-75}:\\
\;\;\;\;\frac{2}{k \cdot \left(k \cdot t\right)} \cdot \left(\frac{\ell}{k} \cdot \frac{\ell}{\sin k}\right)\\
\mathbf{else}:\\
\;\;\;\;2 \cdot \left(\frac{\ell}{t} \cdot \frac{\ell}{{k}^{4}}\right)\\
\end{array}
\end{array}
if k < 1.4999999999999999e-75Initial program 40.6%
associate-*l*40.7%
associate-*l*40.7%
associate-/r*40.7%
associate-/r/40.6%
*-commutative40.6%
times-frac40.1%
+-commutative40.1%
associate--l+48.7%
metadata-eval48.7%
+-rgt-identity48.7%
times-frac52.5%
Simplified52.5%
Taylor expanded in t around 0 82.0%
unpow282.0%
associate-*l*85.4%
Simplified85.4%
Taylor expanded in k around 0 72.9%
if 1.4999999999999999e-75 < k Initial program 26.1%
associate-*l*26.1%
associate-*l*27.0%
associate-/r*27.0%
associate-/r/27.0%
*-commutative27.0%
times-frac27.2%
+-commutative27.2%
associate--l+41.7%
metadata-eval41.7%
+-rgt-identity41.7%
times-frac41.7%
Simplified41.7%
Taylor expanded in k around 0 64.1%
unpow264.1%
*-commutative64.1%
Simplified64.1%
Taylor expanded in l around 0 64.1%
unpow264.1%
times-frac68.8%
*-commutative68.8%
Simplified68.8%
Final simplification71.3%
(FPCore (t l k)
:precision binary64
(let* ((t_1 (* k (* k t))))
(if (<= l -1.25e+203)
(* (/ 2.0 t_1) (/ (* l (/ l k)) k))
(* (/ (/ (* 2.0 l) (sin k)) k) (/ l t_1)))))
double code(double t, double l, double k) {
double t_1 = k * (k * t);
double tmp;
if (l <= -1.25e+203) {
tmp = (2.0 / t_1) * ((l * (l / k)) / k);
} else {
tmp = (((2.0 * l) / sin(k)) / k) * (l / t_1);
}
return tmp;
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8) :: t_1
real(8) :: tmp
t_1 = k * (k * t)
if (l <= (-1.25d+203)) then
tmp = (2.0d0 / t_1) * ((l * (l / k)) / k)
else
tmp = (((2.0d0 * l) / sin(k)) / k) * (l / t_1)
end if
code = tmp
end function
public static double code(double t, double l, double k) {
double t_1 = k * (k * t);
double tmp;
if (l <= -1.25e+203) {
tmp = (2.0 / t_1) * ((l * (l / k)) / k);
} else {
tmp = (((2.0 * l) / Math.sin(k)) / k) * (l / t_1);
}
return tmp;
}
def code(t, l, k): t_1 = k * (k * t) tmp = 0 if l <= -1.25e+203: tmp = (2.0 / t_1) * ((l * (l / k)) / k) else: tmp = (((2.0 * l) / math.sin(k)) / k) * (l / t_1) return tmp
function code(t, l, k) t_1 = Float64(k * Float64(k * t)) tmp = 0.0 if (l <= -1.25e+203) tmp = Float64(Float64(2.0 / t_1) * Float64(Float64(l * Float64(l / k)) / k)); else tmp = Float64(Float64(Float64(Float64(2.0 * l) / sin(k)) / k) * Float64(l / t_1)); end return tmp end
function tmp_2 = code(t, l, k) t_1 = k * (k * t); tmp = 0.0; if (l <= -1.25e+203) tmp = (2.0 / t_1) * ((l * (l / k)) / k); else tmp = (((2.0 * l) / sin(k)) / k) * (l / t_1); end tmp_2 = tmp; end
code[t_, l_, k_] := Block[{t$95$1 = N[(k * N[(k * t), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[l, -1.25e+203], N[(N[(2.0 / t$95$1), $MachinePrecision] * N[(N[(l * N[(l / k), $MachinePrecision]), $MachinePrecision] / k), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(2.0 * l), $MachinePrecision] / N[Sin[k], $MachinePrecision]), $MachinePrecision] / k), $MachinePrecision] * N[(l / t$95$1), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := k \cdot \left(k \cdot t\right)\\
\mathbf{if}\;\ell \leq -1.25 \cdot 10^{+203}:\\
\;\;\;\;\frac{2}{t_1} \cdot \frac{\ell \cdot \frac{\ell}{k}}{k}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{2 \cdot \ell}{\sin k}}{k} \cdot \frac{\ell}{t_1}\\
\end{array}
\end{array}
if l < -1.24999999999999999e203Initial program 25.0%
associate-*l*25.0%
associate-*l*25.0%
associate-/r*25.0%
associate-/r/25.0%
*-commutative25.0%
times-frac25.0%
+-commutative25.0%
associate--l+25.0%
metadata-eval25.0%
+-rgt-identity25.0%
times-frac25.0%
Simplified25.0%
Taylor expanded in t around 0 65.8%
unpow265.8%
associate-*l*76.5%
Simplified76.5%
Taylor expanded in k around 0 60.8%
unpow260.8%
unpow260.8%
times-frac48.4%
Simplified48.4%
associate-*r/67.4%
Applied egg-rr67.4%
if -1.24999999999999999e203 < l Initial program 36.0%
associate-*l*36.0%
associate-*l*36.4%
associate-/r*36.4%
associate-/r/36.4%
*-commutative36.4%
times-frac36.1%
+-commutative36.1%
associate--l+47.9%
metadata-eval47.9%
+-rgt-identity47.9%
times-frac50.4%
Simplified50.4%
Taylor expanded in t around 0 82.3%
unpow282.3%
associate-*l*84.9%
Simplified84.9%
associate-*l/84.9%
Applied egg-rr84.9%
associate-*r*84.9%
times-frac98.5%
associate-*r/98.5%
Simplified98.5%
Taylor expanded in k around 0 72.1%
unpow272.1%
associate-*r*72.6%
Simplified72.6%
Final simplification72.2%
(FPCore (t l k) :precision binary64 (if (<= l -6.5e+204) (* (/ 2.0 (* k (* k t))) (/ (* l (/ l k)) k)) (* (/ (/ (* 2.0 l) (sin k)) k) (/ (/ l k) (* k t)))))
double code(double t, double l, double k) {
double tmp;
if (l <= -6.5e+204) {
tmp = (2.0 / (k * (k * t))) * ((l * (l / k)) / k);
} else {
tmp = (((2.0 * l) / sin(k)) / k) * ((l / k) / (k * t));
}
return tmp;
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8) :: tmp
if (l <= (-6.5d+204)) then
tmp = (2.0d0 / (k * (k * t))) * ((l * (l / k)) / k)
else
tmp = (((2.0d0 * l) / sin(k)) / k) * ((l / k) / (k * t))
end if
code = tmp
end function
public static double code(double t, double l, double k) {
double tmp;
if (l <= -6.5e+204) {
tmp = (2.0 / (k * (k * t))) * ((l * (l / k)) / k);
} else {
tmp = (((2.0 * l) / Math.sin(k)) / k) * ((l / k) / (k * t));
}
return tmp;
}
def code(t, l, k): tmp = 0 if l <= -6.5e+204: tmp = (2.0 / (k * (k * t))) * ((l * (l / k)) / k) else: tmp = (((2.0 * l) / math.sin(k)) / k) * ((l / k) / (k * t)) return tmp
function code(t, l, k) tmp = 0.0 if (l <= -6.5e+204) tmp = Float64(Float64(2.0 / Float64(k * Float64(k * t))) * Float64(Float64(l * Float64(l / k)) / k)); else tmp = Float64(Float64(Float64(Float64(2.0 * l) / sin(k)) / k) * Float64(Float64(l / k) / Float64(k * t))); end return tmp end
function tmp_2 = code(t, l, k) tmp = 0.0; if (l <= -6.5e+204) tmp = (2.0 / (k * (k * t))) * ((l * (l / k)) / k); else tmp = (((2.0 * l) / sin(k)) / k) * ((l / k) / (k * t)); end tmp_2 = tmp; end
code[t_, l_, k_] := If[LessEqual[l, -6.5e+204], N[(N[(2.0 / N[(k * N[(k * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(l * N[(l / k), $MachinePrecision]), $MachinePrecision] / k), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(2.0 * l), $MachinePrecision] / N[Sin[k], $MachinePrecision]), $MachinePrecision] / k), $MachinePrecision] * N[(N[(l / k), $MachinePrecision] / N[(k * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\ell \leq -6.5 \cdot 10^{+204}:\\
\;\;\;\;\frac{2}{k \cdot \left(k \cdot t\right)} \cdot \frac{\ell \cdot \frac{\ell}{k}}{k}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{2 \cdot \ell}{\sin k}}{k} \cdot \frac{\frac{\ell}{k}}{k \cdot t}\\
\end{array}
\end{array}
if l < -6.4999999999999997e204Initial program 25.0%
associate-*l*25.0%
associate-*l*25.0%
associate-/r*25.0%
associate-/r/25.0%
*-commutative25.0%
times-frac25.0%
+-commutative25.0%
associate--l+25.0%
metadata-eval25.0%
+-rgt-identity25.0%
times-frac25.0%
Simplified25.0%
Taylor expanded in t around 0 65.8%
unpow265.8%
associate-*l*76.5%
Simplified76.5%
Taylor expanded in k around 0 60.8%
unpow260.8%
unpow260.8%
times-frac48.4%
Simplified48.4%
associate-*r/67.4%
Applied egg-rr67.4%
if -6.4999999999999997e204 < l Initial program 36.0%
associate-*l*36.0%
associate-*l*36.4%
associate-/r*36.4%
associate-/r/36.4%
*-commutative36.4%
times-frac36.1%
+-commutative36.1%
associate--l+47.9%
metadata-eval47.9%
+-rgt-identity47.9%
times-frac50.4%
Simplified50.4%
Taylor expanded in t around 0 82.3%
unpow282.3%
associate-*l*84.9%
Simplified84.9%
associate-*l/84.9%
Applied egg-rr84.9%
associate-*r*84.9%
times-frac98.5%
associate-*r/98.5%
Simplified98.5%
Taylor expanded in k around 0 73.4%
Final simplification73.0%
(FPCore (t l k) :precision binary64 (if (<= l -1.05e+192) (* (/ 2.0 (* k (* k t))) (/ (* l (/ l k)) k)) (* (/ (/ l (tan k)) (* k t)) (/ (/ (* 2.0 l) k) k))))
double code(double t, double l, double k) {
double tmp;
if (l <= -1.05e+192) {
tmp = (2.0 / (k * (k * t))) * ((l * (l / k)) / k);
} else {
tmp = ((l / tan(k)) / (k * t)) * (((2.0 * l) / k) / k);
}
return tmp;
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8) :: tmp
if (l <= (-1.05d+192)) then
tmp = (2.0d0 / (k * (k * t))) * ((l * (l / k)) / k)
else
tmp = ((l / tan(k)) / (k * t)) * (((2.0d0 * l) / k) / k)
end if
code = tmp
end function
public static double code(double t, double l, double k) {
double tmp;
if (l <= -1.05e+192) {
tmp = (2.0 / (k * (k * t))) * ((l * (l / k)) / k);
} else {
tmp = ((l / Math.tan(k)) / (k * t)) * (((2.0 * l) / k) / k);
}
return tmp;
}
def code(t, l, k): tmp = 0 if l <= -1.05e+192: tmp = (2.0 / (k * (k * t))) * ((l * (l / k)) / k) else: tmp = ((l / math.tan(k)) / (k * t)) * (((2.0 * l) / k) / k) return tmp
function code(t, l, k) tmp = 0.0 if (l <= -1.05e+192) tmp = Float64(Float64(2.0 / Float64(k * Float64(k * t))) * Float64(Float64(l * Float64(l / k)) / k)); else tmp = Float64(Float64(Float64(l / tan(k)) / Float64(k * t)) * Float64(Float64(Float64(2.0 * l) / k) / k)); end return tmp end
function tmp_2 = code(t, l, k) tmp = 0.0; if (l <= -1.05e+192) tmp = (2.0 / (k * (k * t))) * ((l * (l / k)) / k); else tmp = ((l / tan(k)) / (k * t)) * (((2.0 * l) / k) / k); end tmp_2 = tmp; end
code[t_, l_, k_] := If[LessEqual[l, -1.05e+192], N[(N[(2.0 / N[(k * N[(k * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(l * N[(l / k), $MachinePrecision]), $MachinePrecision] / k), $MachinePrecision]), $MachinePrecision], N[(N[(N[(l / N[Tan[k], $MachinePrecision]), $MachinePrecision] / N[(k * t), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(2.0 * l), $MachinePrecision] / k), $MachinePrecision] / k), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\ell \leq -1.05 \cdot 10^{+192}:\\
\;\;\;\;\frac{2}{k \cdot \left(k \cdot t\right)} \cdot \frac{\ell \cdot \frac{\ell}{k}}{k}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\ell}{\tan k}}{k \cdot t} \cdot \frac{\frac{2 \cdot \ell}{k}}{k}\\
\end{array}
\end{array}
if l < -1.04999999999999997e192Initial program 26.3%
associate-*l*26.3%
associate-*l*26.3%
associate-/r*26.3%
associate-/r/26.3%
*-commutative26.3%
times-frac26.3%
+-commutative26.3%
associate--l+26.3%
metadata-eval26.3%
+-rgt-identity26.3%
times-frac26.3%
Simplified26.3%
Taylor expanded in t around 0 61.8%
unpow261.8%
associate-*l*71.0%
Simplified71.0%
Taylor expanded in k around 0 57.3%
unpow257.3%
unpow257.3%
times-frac50.8%
Simplified50.8%
associate-*r/67.4%
Applied egg-rr67.4%
if -1.04999999999999997e192 < l Initial program 36.0%
associate-*l*36.0%
associate-*l*36.4%
associate-/r*36.4%
associate-/r/36.4%
*-commutative36.4%
times-frac36.1%
+-commutative36.1%
associate--l+48.0%
metadata-eval48.0%
+-rgt-identity48.0%
times-frac50.6%
Simplified50.6%
Taylor expanded in t around 0 82.9%
unpow282.9%
associate-*l*85.5%
Simplified85.5%
associate-*l/85.6%
Applied egg-rr85.6%
associate-*r*85.6%
times-frac98.4%
associate-*r/98.4%
Simplified98.4%
Taylor expanded in k around 0 73.5%
associate-*r/73.5%
*-commutative73.5%
Simplified73.5%
Final simplification73.0%
(FPCore (t l k) :precision binary64 (/ (* 2.0 (pow (/ l k) 2.0)) (* k (* k t))))
double code(double t, double l, double k) {
return (2.0 * pow((l / k), 2.0)) / (k * (k * t));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = (2.0d0 * ((l / k) ** 2.0d0)) / (k * (k * t))
end function
public static double code(double t, double l, double k) {
return (2.0 * Math.pow((l / k), 2.0)) / (k * (k * t));
}
def code(t, l, k): return (2.0 * math.pow((l / k), 2.0)) / (k * (k * t))
function code(t, l, k) return Float64(Float64(2.0 * (Float64(l / k) ^ 2.0)) / Float64(k * Float64(k * t))) end
function tmp = code(t, l, k) tmp = (2.0 * ((l / k) ^ 2.0)) / (k * (k * t)); end
code[t_, l_, k_] := N[(N[(2.0 * N[Power[N[(l / k), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] / N[(k * N[(k * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2 \cdot {\left(\frac{\ell}{k}\right)}^{2}}{k \cdot \left(k \cdot t\right)}
\end{array}
Initial program 35.1%
associate-*l*35.1%
associate-*l*35.5%
associate-/r*35.5%
associate-/r/35.5%
*-commutative35.5%
times-frac35.2%
+-commutative35.2%
associate--l+46.1%
metadata-eval46.1%
+-rgt-identity46.1%
times-frac48.4%
Simplified48.4%
Taylor expanded in t around 0 81.0%
unpow281.0%
associate-*l*84.2%
Simplified84.2%
Taylor expanded in k around 0 66.0%
unpow266.0%
unpow266.0%
times-frac69.5%
Simplified69.5%
associate-*l/69.5%
pow269.5%
Applied egg-rr69.5%
Final simplification69.5%
(FPCore (t l k) :precision binary64 (* (/ 2.0 (* k (* k t))) (* (/ l k) (/ l k))))
double code(double t, double l, double k) {
return (2.0 / (k * (k * t))) * ((l / k) * (l / k));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = (2.0d0 / (k * (k * t))) * ((l / k) * (l / k))
end function
public static double code(double t, double l, double k) {
return (2.0 / (k * (k * t))) * ((l / k) * (l / k));
}
def code(t, l, k): return (2.0 / (k * (k * t))) * ((l / k) * (l / k))
function code(t, l, k) return Float64(Float64(2.0 / Float64(k * Float64(k * t))) * Float64(Float64(l / k) * Float64(l / k))) end
function tmp = code(t, l, k) tmp = (2.0 / (k * (k * t))) * ((l / k) * (l / k)); end
code[t_, l_, k_] := N[(N[(2.0 / N[(k * N[(k * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(l / k), $MachinePrecision] * N[(l / k), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{k \cdot \left(k \cdot t\right)} \cdot \left(\frac{\ell}{k} \cdot \frac{\ell}{k}\right)
\end{array}
Initial program 35.1%
associate-*l*35.1%
associate-*l*35.5%
associate-/r*35.5%
associate-/r/35.5%
*-commutative35.5%
times-frac35.2%
+-commutative35.2%
associate--l+46.1%
metadata-eval46.1%
+-rgt-identity46.1%
times-frac48.4%
Simplified48.4%
Taylor expanded in t around 0 81.0%
unpow281.0%
associate-*l*84.2%
Simplified84.2%
Taylor expanded in k around 0 66.0%
unpow266.0%
unpow266.0%
times-frac69.5%
Simplified69.5%
Final simplification69.5%
(FPCore (t l k) :precision binary64 (* -0.3333333333333333 (/ (* l l) (* k (* k t)))))
double code(double t, double l, double k) {
return -0.3333333333333333 * ((l * l) / (k * (k * t)));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = (-0.3333333333333333d0) * ((l * l) / (k * (k * t)))
end function
public static double code(double t, double l, double k) {
return -0.3333333333333333 * ((l * l) / (k * (k * t)));
}
def code(t, l, k): return -0.3333333333333333 * ((l * l) / (k * (k * t)))
function code(t, l, k) return Float64(-0.3333333333333333 * Float64(Float64(l * l) / Float64(k * Float64(k * t)))) end
function tmp = code(t, l, k) tmp = -0.3333333333333333 * ((l * l) / (k * (k * t))); end
code[t_, l_, k_] := N[(-0.3333333333333333 * N[(N[(l * l), $MachinePrecision] / N[(k * N[(k * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-0.3333333333333333 \cdot \frac{\ell \cdot \ell}{k \cdot \left(k \cdot t\right)}
\end{array}
Initial program 35.1%
associate-*l*35.1%
associate-*l*35.5%
associate-/r*35.5%
associate-/r/35.5%
*-commutative35.5%
times-frac35.2%
+-commutative35.2%
associate--l+46.1%
metadata-eval46.1%
+-rgt-identity46.1%
times-frac48.4%
Simplified48.4%
Taylor expanded in t around 0 81.0%
unpow281.0%
associate-*l*84.2%
Simplified84.2%
Taylor expanded in k around 0 56.8%
*-commutative56.8%
fma-def56.8%
unpow256.8%
unpow256.8%
unpow256.8%
times-frac61.8%
Simplified61.8%
Taylor expanded in k around inf 36.4%
*-commutative36.4%
unpow236.4%
associate-*l*36.4%
Simplified36.4%
Taylor expanded in k around 0 35.8%
unpow235.8%
unpow235.8%
associate-*r*36.4%
Simplified36.4%
Final simplification36.4%
(FPCore (t l k) :precision binary64 (* (/ -0.3333333333333333 k) (/ (/ l (/ t l)) k)))
double code(double t, double l, double k) {
return (-0.3333333333333333 / k) * ((l / (t / l)) / k);
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = ((-0.3333333333333333d0) / k) * ((l / (t / l)) / k)
end function
public static double code(double t, double l, double k) {
return (-0.3333333333333333 / k) * ((l / (t / l)) / k);
}
def code(t, l, k): return (-0.3333333333333333 / k) * ((l / (t / l)) / k)
function code(t, l, k) return Float64(Float64(-0.3333333333333333 / k) * Float64(Float64(l / Float64(t / l)) / k)) end
function tmp = code(t, l, k) tmp = (-0.3333333333333333 / k) * ((l / (t / l)) / k); end
code[t_, l_, k_] := N[(N[(-0.3333333333333333 / k), $MachinePrecision] * N[(N[(l / N[(t / l), $MachinePrecision]), $MachinePrecision] / k), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-0.3333333333333333}{k} \cdot \frac{\frac{\ell}{\frac{t}{\ell}}}{k}
\end{array}
Initial program 35.1%
associate-*l*35.1%
associate-*l*35.5%
associate-/r*35.5%
associate-/r/35.5%
*-commutative35.5%
times-frac35.2%
+-commutative35.2%
associate--l+46.1%
metadata-eval46.1%
+-rgt-identity46.1%
times-frac48.4%
Simplified48.4%
Taylor expanded in t around 0 81.0%
unpow281.0%
associate-*l*84.2%
Simplified84.2%
Taylor expanded in k around 0 56.8%
*-commutative56.8%
fma-def56.8%
unpow256.8%
unpow256.8%
unpow256.8%
times-frac61.8%
Simplified61.8%
Taylor expanded in k around inf 36.4%
*-commutative36.4%
unpow236.4%
associate-*l*36.4%
Simplified36.4%
Taylor expanded in k around 0 35.8%
unpow235.8%
associate-*r*36.4%
associate-*r/36.4%
associate-*r*35.8%
unpow235.8%
associate-/l/35.8%
associate-*r/35.8%
unpow235.8%
associate-*l/36.3%
unpow236.3%
times-frac37.0%
associate-/r/37.0%
Simplified37.0%
Final simplification37.0%
herbie shell --seed 2023207
(FPCore (t l k)
:name "Toniolo and Linder, Equation (10-)"
:precision binary64
(/ 2.0 (* (* (* (/ (pow t 3.0) (* l l)) (sin k)) (tan k)) (- (+ 1.0 (pow (/ k t) 2.0)) 1.0))))