
(FPCore (t l k) :precision binary64 (/ 2.0 (* (* (* (/ (pow t 3.0) (* l l)) (sin k)) (tan k)) (- (+ 1.0 (pow (/ k t) 2.0)) 1.0))))
double code(double t, double l, double k) {
return 2.0 / ((((pow(t, 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + pow((k / t), 2.0)) - 1.0));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = 2.0d0 / (((((t ** 3.0d0) / (l * l)) * sin(k)) * tan(k)) * ((1.0d0 + ((k / t) ** 2.0d0)) - 1.0d0))
end function
public static double code(double t, double l, double k) {
return 2.0 / ((((Math.pow(t, 3.0) / (l * l)) * Math.sin(k)) * Math.tan(k)) * ((1.0 + Math.pow((k / t), 2.0)) - 1.0));
}
def code(t, l, k): return 2.0 / ((((math.pow(t, 3.0) / (l * l)) * math.sin(k)) * math.tan(k)) * ((1.0 + math.pow((k / t), 2.0)) - 1.0))
function code(t, l, k) return Float64(2.0 / Float64(Float64(Float64(Float64((t ^ 3.0) / Float64(l * l)) * sin(k)) * tan(k)) * Float64(Float64(1.0 + (Float64(k / t) ^ 2.0)) - 1.0))) end
function tmp = code(t, l, k) tmp = 2.0 / (((((t ^ 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + ((k / t) ^ 2.0)) - 1.0)); end
code[t_, l_, k_] := N[(2.0 / N[(N[(N[(N[(N[Power[t, 3.0], $MachinePrecision] / N[(l * l), $MachinePrecision]), $MachinePrecision] * N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[Tan[k], $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 + N[Power[N[(k / t), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\left(\left(\frac{{t}^{3}}{\ell \cdot \ell} \cdot \sin k\right) \cdot \tan k\right) \cdot \left(\left(1 + {\left(\frac{k}{t}\right)}^{2}\right) - 1\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (t l k) :precision binary64 (/ 2.0 (* (* (* (/ (pow t 3.0) (* l l)) (sin k)) (tan k)) (- (+ 1.0 (pow (/ k t) 2.0)) 1.0))))
double code(double t, double l, double k) {
return 2.0 / ((((pow(t, 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + pow((k / t), 2.0)) - 1.0));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = 2.0d0 / (((((t ** 3.0d0) / (l * l)) * sin(k)) * tan(k)) * ((1.0d0 + ((k / t) ** 2.0d0)) - 1.0d0))
end function
public static double code(double t, double l, double k) {
return 2.0 / ((((Math.pow(t, 3.0) / (l * l)) * Math.sin(k)) * Math.tan(k)) * ((1.0 + Math.pow((k / t), 2.0)) - 1.0));
}
def code(t, l, k): return 2.0 / ((((math.pow(t, 3.0) / (l * l)) * math.sin(k)) * math.tan(k)) * ((1.0 + math.pow((k / t), 2.0)) - 1.0))
function code(t, l, k) return Float64(2.0 / Float64(Float64(Float64(Float64((t ^ 3.0) / Float64(l * l)) * sin(k)) * tan(k)) * Float64(Float64(1.0 + (Float64(k / t) ^ 2.0)) - 1.0))) end
function tmp = code(t, l, k) tmp = 2.0 / (((((t ^ 3.0) / (l * l)) * sin(k)) * tan(k)) * ((1.0 + ((k / t) ^ 2.0)) - 1.0)); end
code[t_, l_, k_] := N[(2.0 / N[(N[(N[(N[(N[Power[t, 3.0], $MachinePrecision] / N[(l * l), $MachinePrecision]), $MachinePrecision] * N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[Tan[k], $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 + N[Power[N[(k / t), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\left(\left(\frac{{t}^{3}}{\ell \cdot \ell} \cdot \sin k\right) \cdot \tan k\right) \cdot \left(\left(1 + {\left(\frac{k}{t}\right)}^{2}\right) - 1\right)}
\end{array}
(FPCore (t l k) :precision binary64 (/ (/ (* (/ 2.0 k) (/ (/ l k) t)) (/ (sin k) l)) (tan k)))
double code(double t, double l, double k) {
return (((2.0 / k) * ((l / k) / t)) / (sin(k) / l)) / tan(k);
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = (((2.0d0 / k) * ((l / k) / t)) / (sin(k) / l)) / tan(k)
end function
public static double code(double t, double l, double k) {
return (((2.0 / k) * ((l / k) / t)) / (Math.sin(k) / l)) / Math.tan(k);
}
def code(t, l, k): return (((2.0 / k) * ((l / k) / t)) / (math.sin(k) / l)) / math.tan(k)
function code(t, l, k) return Float64(Float64(Float64(Float64(2.0 / k) * Float64(Float64(l / k) / t)) / Float64(sin(k) / l)) / tan(k)) end
function tmp = code(t, l, k) tmp = (((2.0 / k) * ((l / k) / t)) / (sin(k) / l)) / tan(k); end
code[t_, l_, k_] := N[(N[(N[(N[(2.0 / k), $MachinePrecision] * N[(N[(l / k), $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision] / N[(N[Sin[k], $MachinePrecision] / l), $MachinePrecision]), $MachinePrecision] / N[Tan[k], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\frac{2}{k} \cdot \frac{\frac{\ell}{k}}{t}}{\frac{\sin k}{\ell}}}{\tan k}
\end{array}
Initial program 38.5%
associate-*l*38.5%
associate-*l*38.5%
associate-/r*38.1%
associate-/r/37.3%
*-commutative37.3%
times-frac38.5%
+-commutative38.5%
associate--l+46.3%
metadata-eval46.3%
+-rgt-identity46.3%
times-frac50.6%
Simplified50.6%
Taylor expanded in t around 0 78.3%
unpow278.3%
Simplified78.3%
associate-*l/78.3%
associate-*l*83.2%
Applied egg-rr83.2%
associate-*l/83.2%
associate-*r*90.4%
associate-*r/90.4%
Simplified90.4%
expm1-log1p-u61.1%
expm1-udef47.8%
associate-/l*47.8%
Applied egg-rr47.8%
expm1-def61.1%
expm1-log1p90.4%
associate-*r/89.8%
associate-*l/89.7%
associate-*l/89.7%
times-frac92.1%
associate-/r*96.6%
Simplified96.6%
Final simplification96.6%
(FPCore (t l k) :precision binary64 (* (/ l (tan k)) (* 2.0 (/ (/ l (* k k)) (* t (sin k))))))
double code(double t, double l, double k) {
return (l / tan(k)) * (2.0 * ((l / (k * k)) / (t * sin(k))));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = (l / tan(k)) * (2.0d0 * ((l / (k * k)) / (t * sin(k))))
end function
public static double code(double t, double l, double k) {
return (l / Math.tan(k)) * (2.0 * ((l / (k * k)) / (t * Math.sin(k))));
}
def code(t, l, k): return (l / math.tan(k)) * (2.0 * ((l / (k * k)) / (t * math.sin(k))))
function code(t, l, k) return Float64(Float64(l / tan(k)) * Float64(2.0 * Float64(Float64(l / Float64(k * k)) / Float64(t * sin(k))))) end
function tmp = code(t, l, k) tmp = (l / tan(k)) * (2.0 * ((l / (k * k)) / (t * sin(k)))); end
code[t_, l_, k_] := N[(N[(l / N[Tan[k], $MachinePrecision]), $MachinePrecision] * N[(2.0 * N[(N[(l / N[(k * k), $MachinePrecision]), $MachinePrecision] / N[(t * N[Sin[k], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\ell}{\tan k} \cdot \left(2 \cdot \frac{\frac{\ell}{k \cdot k}}{t \cdot \sin k}\right)
\end{array}
Initial program 38.5%
associate-*l*38.5%
associate-*l*38.5%
associate-/r*38.1%
associate-/r/37.3%
*-commutative37.3%
times-frac38.5%
+-commutative38.5%
associate--l+46.3%
metadata-eval46.3%
+-rgt-identity46.3%
times-frac50.6%
Simplified50.6%
Taylor expanded in t around 0 78.3%
unpow278.3%
Simplified78.3%
associate-*l/78.3%
associate-*l*83.2%
Applied egg-rr83.2%
associate-*l/83.2%
associate-*r*90.4%
associate-*r/90.4%
Simplified90.4%
Taylor expanded in k around inf 82.9%
associate-/r*87.2%
unpow287.2%
*-commutative87.2%
Simplified87.2%
Final simplification87.2%
(FPCore (t l k) :precision binary64 (* (* (/ l (tan k)) (/ (/ 2.0 k) (* k t))) (/ l (sin k))))
double code(double t, double l, double k) {
return ((l / tan(k)) * ((2.0 / k) / (k * t))) * (l / sin(k));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = ((l / tan(k)) * ((2.0d0 / k) / (k * t))) * (l / sin(k))
end function
public static double code(double t, double l, double k) {
return ((l / Math.tan(k)) * ((2.0 / k) / (k * t))) * (l / Math.sin(k));
}
def code(t, l, k): return ((l / math.tan(k)) * ((2.0 / k) / (k * t))) * (l / math.sin(k))
function code(t, l, k) return Float64(Float64(Float64(l / tan(k)) * Float64(Float64(2.0 / k) / Float64(k * t))) * Float64(l / sin(k))) end
function tmp = code(t, l, k) tmp = ((l / tan(k)) * ((2.0 / k) / (k * t))) * (l / sin(k)); end
code[t_, l_, k_] := N[(N[(N[(l / N[Tan[k], $MachinePrecision]), $MachinePrecision] * N[(N[(2.0 / k), $MachinePrecision] / N[(k * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(l / N[Sin[k], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{\ell}{\tan k} \cdot \frac{\frac{2}{k}}{k \cdot t}\right) \cdot \frac{\ell}{\sin k}
\end{array}
Initial program 38.5%
associate-*l*38.5%
associate-*l*38.5%
associate-/r*38.1%
associate-/r/37.3%
*-commutative37.3%
times-frac38.5%
+-commutative38.5%
associate--l+46.3%
metadata-eval46.3%
+-rgt-identity46.3%
times-frac50.6%
Simplified50.6%
Taylor expanded in t around 0 78.3%
unpow278.3%
Simplified78.3%
associate-*l/78.3%
associate-*l*83.2%
Applied egg-rr83.2%
associate-*l/83.2%
associate-*r*90.4%
associate-*r/90.4%
Simplified90.4%
expm1-log1p-u61.1%
expm1-udef47.8%
associate-/l*47.8%
Applied egg-rr47.8%
expm1-def61.1%
expm1-log1p90.4%
associate-*r/89.8%
associate-*l/89.7%
associate-/l*86.7%
associate-/l/85.9%
times-frac90.4%
associate-*r/90.4%
associate-/r*90.8%
Simplified90.8%
Final simplification90.8%
(FPCore (t l k) :precision binary64 (* (/ (* (/ 2.0 k) (/ (/ l k) t)) (sin k)) (/ l (tan k))))
double code(double t, double l, double k) {
return (((2.0 / k) * ((l / k) / t)) / sin(k)) * (l / tan(k));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = (((2.0d0 / k) * ((l / k) / t)) / sin(k)) * (l / tan(k))
end function
public static double code(double t, double l, double k) {
return (((2.0 / k) * ((l / k) / t)) / Math.sin(k)) * (l / Math.tan(k));
}
def code(t, l, k): return (((2.0 / k) * ((l / k) / t)) / math.sin(k)) * (l / math.tan(k))
function code(t, l, k) return Float64(Float64(Float64(Float64(2.0 / k) * Float64(Float64(l / k) / t)) / sin(k)) * Float64(l / tan(k))) end
function tmp = code(t, l, k) tmp = (((2.0 / k) * ((l / k) / t)) / sin(k)) * (l / tan(k)); end
code[t_, l_, k_] := N[(N[(N[(N[(2.0 / k), $MachinePrecision] * N[(N[(l / k), $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision] / N[Sin[k], $MachinePrecision]), $MachinePrecision] * N[(l / N[Tan[k], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{2}{k} \cdot \frac{\frac{\ell}{k}}{t}}{\sin k} \cdot \frac{\ell}{\tan k}
\end{array}
Initial program 38.5%
associate-*l*38.5%
associate-*l*38.5%
associate-/r*38.1%
associate-/r/37.3%
*-commutative37.3%
times-frac38.5%
+-commutative38.5%
associate--l+46.3%
metadata-eval46.3%
+-rgt-identity46.3%
times-frac50.6%
Simplified50.6%
Taylor expanded in t around 0 78.3%
unpow278.3%
Simplified78.3%
associate-*l/78.3%
associate-*l*83.2%
Applied egg-rr83.2%
associate-*l/83.2%
associate-*r*90.4%
associate-*r/90.4%
Simplified90.4%
Taylor expanded in k around 0 83.8%
unpow283.8%
associate-*r*90.4%
associate-*r/90.4%
times-frac92.2%
associate-/r*95.9%
Simplified95.9%
Final simplification95.9%
(FPCore (t l k)
:precision binary64
(let* ((t_1 (pow (/ l k) 2.0)))
(if (<= (* l l) 1e+303)
(/ (* 2.0 t_1) (* k (* k t)))
(/ (* 2.0 (+ -0.16666666666666666 (/ 1.0 (* k k)))) (/ t t_1)))))
double code(double t, double l, double k) {
double t_1 = pow((l / k), 2.0);
double tmp;
if ((l * l) <= 1e+303) {
tmp = (2.0 * t_1) / (k * (k * t));
} else {
tmp = (2.0 * (-0.16666666666666666 + (1.0 / (k * k)))) / (t / t_1);
}
return tmp;
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8) :: t_1
real(8) :: tmp
t_1 = (l / k) ** 2.0d0
if ((l * l) <= 1d+303) then
tmp = (2.0d0 * t_1) / (k * (k * t))
else
tmp = (2.0d0 * ((-0.16666666666666666d0) + (1.0d0 / (k * k)))) / (t / t_1)
end if
code = tmp
end function
public static double code(double t, double l, double k) {
double t_1 = Math.pow((l / k), 2.0);
double tmp;
if ((l * l) <= 1e+303) {
tmp = (2.0 * t_1) / (k * (k * t));
} else {
tmp = (2.0 * (-0.16666666666666666 + (1.0 / (k * k)))) / (t / t_1);
}
return tmp;
}
def code(t, l, k): t_1 = math.pow((l / k), 2.0) tmp = 0 if (l * l) <= 1e+303: tmp = (2.0 * t_1) / (k * (k * t)) else: tmp = (2.0 * (-0.16666666666666666 + (1.0 / (k * k)))) / (t / t_1) return tmp
function code(t, l, k) t_1 = Float64(l / k) ^ 2.0 tmp = 0.0 if (Float64(l * l) <= 1e+303) tmp = Float64(Float64(2.0 * t_1) / Float64(k * Float64(k * t))); else tmp = Float64(Float64(2.0 * Float64(-0.16666666666666666 + Float64(1.0 / Float64(k * k)))) / Float64(t / t_1)); end return tmp end
function tmp_2 = code(t, l, k) t_1 = (l / k) ^ 2.0; tmp = 0.0; if ((l * l) <= 1e+303) tmp = (2.0 * t_1) / (k * (k * t)); else tmp = (2.0 * (-0.16666666666666666 + (1.0 / (k * k)))) / (t / t_1); end tmp_2 = tmp; end
code[t_, l_, k_] := Block[{t$95$1 = N[Power[N[(l / k), $MachinePrecision], 2.0], $MachinePrecision]}, If[LessEqual[N[(l * l), $MachinePrecision], 1e+303], N[(N[(2.0 * t$95$1), $MachinePrecision] / N[(k * N[(k * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(2.0 * N[(-0.16666666666666666 + N[(1.0 / N[(k * k), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(t / t$95$1), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := {\left(\frac{\ell}{k}\right)}^{2}\\
\mathbf{if}\;\ell \cdot \ell \leq 10^{+303}:\\
\;\;\;\;\frac{2 \cdot t_1}{k \cdot \left(k \cdot t\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{2 \cdot \left(-0.16666666666666666 + \frac{1}{k \cdot k}\right)}{\frac{t}{t_1}}\\
\end{array}
\end{array}
if (*.f64 l l) < 1e303Initial program 44.1%
associate-*l*44.1%
associate-*l*44.1%
associate-/r*43.6%
associate-/r/42.5%
*-commutative42.5%
times-frac44.2%
+-commutative44.2%
associate--l+54.6%
metadata-eval54.6%
+-rgt-identity54.6%
times-frac60.7%
Simplified60.7%
Taylor expanded in t around 0 90.7%
unpow290.7%
Simplified90.7%
Taylor expanded in k around 0 70.8%
unpow270.8%
unpow270.8%
times-frac79.4%
Simplified79.4%
associate-*r*82.2%
associate-*l/82.3%
pow282.3%
Applied egg-rr82.3%
if 1e303 < (*.f64 l l) Initial program 24.6%
associate-*l*24.6%
associate-*l*24.6%
associate-/r*24.6%
associate-/r/24.6%
*-commutative24.6%
times-frac24.6%
+-commutative24.6%
associate--l+25.8%
metadata-eval25.8%
+-rgt-identity25.8%
times-frac25.8%
Simplified25.8%
Taylor expanded in t around 0 47.9%
unpow247.9%
Simplified47.9%
Taylor expanded in k around 0 0.2%
*-commutative0.2%
fma-def0.2%
unpow20.2%
unpow20.2%
unpow20.2%
times-frac4.4%
Simplified4.4%
Taylor expanded in l around 0 42.4%
unpow242.4%
associate-*r*45.2%
associate-/l*45.2%
associate-*r/45.2%
sub-neg45.2%
metadata-eval45.2%
+-commutative45.2%
unpow245.2%
associate-*r*42.4%
unpow242.4%
*-commutative42.4%
associate-/l*42.7%
unpow242.7%
unpow242.7%
times-frac51.2%
unpow251.2%
Simplified51.2%
Final simplification73.3%
(FPCore (t l k) :precision binary64 (/ (* 2.0 (pow (/ l k) 2.0)) (* k (* k t))))
double code(double t, double l, double k) {
return (2.0 * pow((l / k), 2.0)) / (k * (k * t));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = (2.0d0 * ((l / k) ** 2.0d0)) / (k * (k * t))
end function
public static double code(double t, double l, double k) {
return (2.0 * Math.pow((l / k), 2.0)) / (k * (k * t));
}
def code(t, l, k): return (2.0 * math.pow((l / k), 2.0)) / (k * (k * t))
function code(t, l, k) return Float64(Float64(2.0 * (Float64(l / k) ^ 2.0)) / Float64(k * Float64(k * t))) end
function tmp = code(t, l, k) tmp = (2.0 * ((l / k) ^ 2.0)) / (k * (k * t)); end
code[t_, l_, k_] := N[(N[(2.0 * N[Power[N[(l / k), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] / N[(k * N[(k * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2 \cdot {\left(\frac{\ell}{k}\right)}^{2}}{k \cdot \left(k \cdot t\right)}
\end{array}
Initial program 38.5%
associate-*l*38.5%
associate-*l*38.5%
associate-/r*38.1%
associate-/r/37.3%
*-commutative37.3%
times-frac38.5%
+-commutative38.5%
associate--l+46.3%
metadata-eval46.3%
+-rgt-identity46.3%
times-frac50.6%
Simplified50.6%
Taylor expanded in t around 0 78.3%
unpow278.3%
Simplified78.3%
Taylor expanded in k around 0 61.8%
unpow261.8%
unpow261.8%
times-frac69.0%
Simplified69.0%
associate-*r*71.0%
associate-*l/71.0%
pow271.0%
Applied egg-rr71.0%
Final simplification71.0%
(FPCore (t l k) :precision binary64 (* (/ 2.0 (* k (* k t))) (* (/ l k) (/ l k))))
double code(double t, double l, double k) {
return (2.0 / (k * (k * t))) * ((l / k) * (l / k));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = (2.0d0 / (k * (k * t))) * ((l / k) * (l / k))
end function
public static double code(double t, double l, double k) {
return (2.0 / (k * (k * t))) * ((l / k) * (l / k));
}
def code(t, l, k): return (2.0 / (k * (k * t))) * ((l / k) * (l / k))
function code(t, l, k) return Float64(Float64(2.0 / Float64(k * Float64(k * t))) * Float64(Float64(l / k) * Float64(l / k))) end
function tmp = code(t, l, k) tmp = (2.0 / (k * (k * t))) * ((l / k) * (l / k)); end
code[t_, l_, k_] := N[(N[(2.0 / N[(k * N[(k * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(l / k), $MachinePrecision] * N[(l / k), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{k \cdot \left(k \cdot t\right)} \cdot \left(\frac{\ell}{k} \cdot \frac{\ell}{k}\right)
\end{array}
Initial program 38.5%
associate-*l*38.5%
associate-*l*38.5%
associate-/r*38.1%
associate-/r/37.3%
*-commutative37.3%
times-frac38.5%
+-commutative38.5%
associate--l+46.3%
metadata-eval46.3%
+-rgt-identity46.3%
times-frac50.6%
Simplified50.6%
Taylor expanded in t around 0 78.3%
unpow278.3%
Simplified78.3%
Taylor expanded in k around 0 61.8%
unpow261.8%
unpow261.8%
times-frac69.0%
Simplified69.0%
Taylor expanded in k around 0 69.0%
unpow269.0%
associate-*r*71.0%
Simplified71.0%
Final simplification71.0%
(FPCore (t l k) :precision binary64 (* -0.3333333333333333 (* (/ l (* k k)) (/ l t))))
double code(double t, double l, double k) {
return -0.3333333333333333 * ((l / (k * k)) * (l / t));
}
real(8) function code(t, l, k)
real(8), intent (in) :: t
real(8), intent (in) :: l
real(8), intent (in) :: k
code = (-0.3333333333333333d0) * ((l / (k * k)) * (l / t))
end function
public static double code(double t, double l, double k) {
return -0.3333333333333333 * ((l / (k * k)) * (l / t));
}
def code(t, l, k): return -0.3333333333333333 * ((l / (k * k)) * (l / t))
function code(t, l, k) return Float64(-0.3333333333333333 * Float64(Float64(l / Float64(k * k)) * Float64(l / t))) end
function tmp = code(t, l, k) tmp = -0.3333333333333333 * ((l / (k * k)) * (l / t)); end
code[t_, l_, k_] := N[(-0.3333333333333333 * N[(N[(l / N[(k * k), $MachinePrecision]), $MachinePrecision] * N[(l / t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-0.3333333333333333 \cdot \left(\frac{\ell}{k \cdot k} \cdot \frac{\ell}{t}\right)
\end{array}
Initial program 38.5%
associate-*l*38.5%
associate-*l*38.5%
associate-/r*38.1%
associate-/r/37.3%
*-commutative37.3%
times-frac38.5%
+-commutative38.5%
associate--l+46.3%
metadata-eval46.3%
+-rgt-identity46.3%
times-frac50.6%
Simplified50.6%
Taylor expanded in t around 0 78.3%
unpow278.3%
Simplified78.3%
Taylor expanded in k around 0 49.9%
*-commutative49.9%
fma-def49.9%
unpow249.9%
unpow249.9%
unpow249.9%
times-frac57.1%
Simplified57.1%
Taylor expanded in k around inf 32.1%
*-commutative32.1%
unpow232.1%
associate-*l*32.1%
Simplified32.1%
Taylor expanded in k around 0 32.1%
unpow232.1%
*-commutative32.1%
times-frac33.4%
unpow233.4%
Simplified33.4%
Final simplification33.4%
herbie shell --seed 2023182
(FPCore (t l k)
:name "Toniolo and Linder, Equation (10-)"
:precision binary64
(/ 2.0 (* (* (* (/ (pow t 3.0) (* l l)) (sin k)) (tan k)) (- (+ 1.0 (pow (/ k t) 2.0)) 1.0))))