
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t_0}\\
t_2 := e^{-t_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t_1 + t_2}{t_1 - t_2}\right)
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t_0}\\
t_2 := e^{-t_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t_1 + t_2}{t_1 - t_2}\right)
\end{array}
\end{array}
(FPCore (f)
:precision binary64
(-
(/
(log
(/
(* 2.0 (cosh (* PI (* 0.25 f))))
(fma
f
(* PI 0.5)
(fma
(pow f 5.0)
(* (pow PI 5.0) 1.6276041666666666e-5)
(fma
(pow f 7.0)
(* (pow PI 7.0) 2.422030009920635e-8)
(* (pow (* PI f) 3.0) 0.005208333333333333))))))
(* PI 0.25))))
double code(double f) {
return -(log(((2.0 * cosh((((double) M_PI) * (0.25 * f)))) / fma(f, (((double) M_PI) * 0.5), fma(pow(f, 5.0), (pow(((double) M_PI), 5.0) * 1.6276041666666666e-5), fma(pow(f, 7.0), (pow(((double) M_PI), 7.0) * 2.422030009920635e-8), (pow((((double) M_PI) * f), 3.0) * 0.005208333333333333)))))) / (((double) M_PI) * 0.25));
}
function code(f) return Float64(-Float64(log(Float64(Float64(2.0 * cosh(Float64(pi * Float64(0.25 * f)))) / fma(f, Float64(pi * 0.5), fma((f ^ 5.0), Float64((pi ^ 5.0) * 1.6276041666666666e-5), fma((f ^ 7.0), Float64((pi ^ 7.0) * 2.422030009920635e-8), Float64((Float64(pi * f) ^ 3.0) * 0.005208333333333333)))))) / Float64(pi * 0.25))) end
code[f_] := (-N[(N[Log[N[(N[(2.0 * N[Cosh[N[(Pi * N[(0.25 * f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(f * N[(Pi * 0.5), $MachinePrecision] + N[(N[Power[f, 5.0], $MachinePrecision] * N[(N[Power[Pi, 5.0], $MachinePrecision] * 1.6276041666666666e-5), $MachinePrecision] + N[(N[Power[f, 7.0], $MachinePrecision] * N[(N[Power[Pi, 7.0], $MachinePrecision] * 2.422030009920635e-8), $MachinePrecision] + N[(N[Power[N[(Pi * f), $MachinePrecision], 3.0], $MachinePrecision] * 0.005208333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[(Pi * 0.25), $MachinePrecision]), $MachinePrecision])
\begin{array}{l}
\\
-\frac{\log \left(\frac{2 \cdot \cosh \left(\pi \cdot \left(0.25 \cdot f\right)\right)}{\mathsf{fma}\left(f, \pi \cdot 0.5, \mathsf{fma}\left({f}^{5}, {\pi}^{5} \cdot 1.6276041666666666 \cdot 10^{-5}, \mathsf{fma}\left({f}^{7}, {\pi}^{7} \cdot 2.422030009920635 \cdot 10^{-8}, {\left(\pi \cdot f\right)}^{3} \cdot 0.005208333333333333\right)\right)\right)}\right)}{\pi \cdot 0.25}
\end{array}
Initial program 7.1%
Taylor expanded in f around 0 94.8%
fma-def94.8%
distribute-rgt-out--94.8%
metadata-eval94.8%
+-commutative94.8%
associate-+l+94.8%
Simplified94.8%
div-inv94.8%
log-prod94.8%
Applied egg-rr94.8%
log-rec94.8%
sub-neg94.8%
log-div94.8%
associate-*l*94.8%
Simplified94.8%
associate-*l/94.9%
*-un-lft-identity94.9%
div-inv94.9%
metadata-eval94.9%
Applied egg-rr94.9%
Final simplification94.9%
(FPCore (f)
:precision binary64
(*
(log
(/
(+ (exp (* f (/ PI 4.0))) (exp (* f (/ (- PI) 4.0))))
(fma
f
(* PI 0.5)
(fma
(pow f 3.0)
(* 0.005208333333333333 (pow PI 3.0))
(* (pow f 5.0) (* (pow PI 5.0) 1.6276041666666666e-5))))))
(/ -1.0 (/ PI 4.0))))
double code(double f) {
return log(((exp((f * (((double) M_PI) / 4.0))) + exp((f * (-((double) M_PI) / 4.0)))) / fma(f, (((double) M_PI) * 0.5), fma(pow(f, 3.0), (0.005208333333333333 * pow(((double) M_PI), 3.0)), (pow(f, 5.0) * (pow(((double) M_PI), 5.0) * 1.6276041666666666e-5)))))) * (-1.0 / (((double) M_PI) / 4.0));
}
function code(f) return Float64(log(Float64(Float64(exp(Float64(f * Float64(pi / 4.0))) + exp(Float64(f * Float64(Float64(-pi) / 4.0)))) / fma(f, Float64(pi * 0.5), fma((f ^ 3.0), Float64(0.005208333333333333 * (pi ^ 3.0)), Float64((f ^ 5.0) * Float64((pi ^ 5.0) * 1.6276041666666666e-5)))))) * Float64(-1.0 / Float64(pi / 4.0))) end
code[f_] := N[(N[Log[N[(N[(N[Exp[N[(f * N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(f * N[((-Pi) / 4.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(f * N[(Pi * 0.5), $MachinePrecision] + N[(N[Power[f, 3.0], $MachinePrecision] * N[(0.005208333333333333 * N[Power[Pi, 3.0], $MachinePrecision]), $MachinePrecision] + N[(N[Power[f, 5.0], $MachinePrecision] * N[(N[Power[Pi, 5.0], $MachinePrecision] * 1.6276041666666666e-5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(-1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{e^{f \cdot \frac{\pi}{4}} + e^{f \cdot \frac{-\pi}{4}}}{\mathsf{fma}\left(f, \pi \cdot 0.5, \mathsf{fma}\left({f}^{3}, 0.005208333333333333 \cdot {\pi}^{3}, {f}^{5} \cdot \left({\pi}^{5} \cdot 1.6276041666666666 \cdot 10^{-5}\right)\right)\right)}\right) \cdot \frac{-1}{\frac{\pi}{4}}
\end{array}
Initial program 7.1%
Taylor expanded in f around 0 94.7%
fma-def94.7%
distribute-rgt-out--94.7%
metadata-eval94.7%
fma-def94.7%
distribute-rgt-out--94.7%
metadata-eval94.7%
distribute-rgt-out--94.7%
metadata-eval94.7%
Simplified94.7%
Final simplification94.7%
(FPCore (f) :precision binary64 (- (fma 2.0 (* (pow f 2.0) (* PI 0.041666666666666664)) (/ (* 4.0 (- (log (/ 4.0 PI)) (log f))) PI))))
double code(double f) {
return -fma(2.0, (pow(f, 2.0) * (((double) M_PI) * 0.041666666666666664)), ((4.0 * (log((4.0 / ((double) M_PI))) - log(f))) / ((double) M_PI)));
}
function code(f) return Float64(-fma(2.0, Float64((f ^ 2.0) * Float64(pi * 0.041666666666666664)), Float64(Float64(4.0 * Float64(log(Float64(4.0 / pi)) - log(f))) / pi))) end
code[f_] := (-N[(2.0 * N[(N[Power[f, 2.0], $MachinePrecision] * N[(Pi * 0.041666666666666664), $MachinePrecision]), $MachinePrecision] + N[(N[(4.0 * N[(N[Log[N[(4.0 / Pi), $MachinePrecision]], $MachinePrecision] - N[Log[f], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / Pi), $MachinePrecision]), $MachinePrecision])
\begin{array}{l}
\\
-\mathsf{fma}\left(2, {f}^{2} \cdot \left(\pi \cdot 0.041666666666666664\right), \frac{4 \cdot \left(\log \left(\frac{4}{\pi}\right) - \log f\right)}{\pi}\right)
\end{array}
Initial program 7.1%
Taylor expanded in f around 0 94.8%
fma-def94.8%
distribute-rgt-out--94.8%
metadata-eval94.8%
+-commutative94.8%
associate-+l+94.8%
Simplified94.8%
div-inv94.8%
log-prod94.8%
Applied egg-rr94.8%
log-rec94.8%
sub-neg94.8%
log-div94.8%
associate-*l*94.8%
Simplified94.8%
associate-*l/94.9%
*-un-lft-identity94.9%
div-inv94.9%
metadata-eval94.9%
Applied egg-rr94.9%
Taylor expanded in f around 0 94.6%
fma-def94.6%
distribute-rgt-out--94.6%
metadata-eval94.6%
associate-*r/94.6%
mul-1-neg94.6%
Simplified94.6%
Final simplification94.6%
(FPCore (f) :precision binary64 (- (fma 2.0 (* PI (* (pow f 2.0) 0.041666666666666664)) (/ (log (/ 4.0 (* PI f))) (* PI 0.25)))))
double code(double f) {
return -fma(2.0, (((double) M_PI) * (pow(f, 2.0) * 0.041666666666666664)), (log((4.0 / (((double) M_PI) * f))) / (((double) M_PI) * 0.25)));
}
function code(f) return Float64(-fma(2.0, Float64(pi * Float64((f ^ 2.0) * 0.041666666666666664)), Float64(log(Float64(4.0 / Float64(pi * f))) / Float64(pi * 0.25)))) end
code[f_] := (-N[(2.0 * N[(Pi * N[(N[Power[f, 2.0], $MachinePrecision] * 0.041666666666666664), $MachinePrecision]), $MachinePrecision] + N[(N[Log[N[(4.0 / N[(Pi * f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[(Pi * 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision])
\begin{array}{l}
\\
-\mathsf{fma}\left(2, \pi \cdot \left({f}^{2} \cdot 0.041666666666666664\right), \frac{\log \left(\frac{4}{\pi \cdot f}\right)}{\pi \cdot 0.25}\right)
\end{array}
Initial program 7.1%
Taylor expanded in f around 0 94.8%
fma-def94.8%
distribute-rgt-out--94.8%
metadata-eval94.8%
+-commutative94.8%
associate-+l+94.8%
Simplified94.8%
div-inv94.8%
log-prod94.8%
Applied egg-rr94.8%
log-rec94.8%
sub-neg94.8%
log-div94.8%
associate-*l*94.8%
Simplified94.8%
Taylor expanded in f around 0 94.6%
fma-def94.6%
*-commutative94.6%
distribute-rgt-out--94.6%
associate-*l*94.6%
metadata-eval94.6%
neg-mul-194.6%
sub-neg94.6%
metadata-eval94.6%
log-div94.5%
times-frac94.5%
Simplified94.5%
Final simplification94.5%
(FPCore (f) :precision binary64 (* (/ (+ (log (/ 4.0 PI)) (log (/ 1.0 f))) PI) (- 4.0)))
double code(double f) {
return ((log((4.0 / ((double) M_PI))) + log((1.0 / f))) / ((double) M_PI)) * -4.0;
}
public static double code(double f) {
return ((Math.log((4.0 / Math.PI)) + Math.log((1.0 / f))) / Math.PI) * -4.0;
}
def code(f): return ((math.log((4.0 / math.pi)) + math.log((1.0 / f))) / math.pi) * -4.0
function code(f) return Float64(Float64(Float64(log(Float64(4.0 / pi)) + log(Float64(1.0 / f))) / pi) * Float64(-4.0)) end
function tmp = code(f) tmp = ((log((4.0 / pi)) + log((1.0 / f))) / pi) * -4.0; end
code[f_] := N[(N[(N[(N[Log[N[(4.0 / Pi), $MachinePrecision]], $MachinePrecision] + N[Log[N[(1.0 / f), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / Pi), $MachinePrecision] * (-4.0)), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(\frac{4}{\pi}\right) + \log \left(\frac{1}{f}\right)}{\pi} \cdot \left(-4\right)
\end{array}
Initial program 7.1%
Taylor expanded in f around 0 94.0%
associate-/r*94.0%
distribute-rgt-out--94.0%
metadata-eval94.0%
Simplified94.0%
Taylor expanded in f around inf 94.1%
Final simplification94.1%
(FPCore (f) :precision binary64 (* 4.0 (/ (- (log f) (log (/ 4.0 PI))) PI)))
double code(double f) {
return 4.0 * ((log(f) - log((4.0 / ((double) M_PI)))) / ((double) M_PI));
}
public static double code(double f) {
return 4.0 * ((Math.log(f) - Math.log((4.0 / Math.PI))) / Math.PI);
}
def code(f): return 4.0 * ((math.log(f) - math.log((4.0 / math.pi))) / math.pi)
function code(f) return Float64(4.0 * Float64(Float64(log(f) - log(Float64(4.0 / pi))) / pi)) end
function tmp = code(f) tmp = 4.0 * ((log(f) - log((4.0 / pi))) / pi); end
code[f_] := N[(4.0 * N[(N[(N[Log[f], $MachinePrecision] - N[Log[N[(4.0 / Pi), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
4 \cdot \frac{\log f - \log \left(\frac{4}{\pi}\right)}{\pi}
\end{array}
Initial program 7.1%
Taylor expanded in f around 0 94.0%
associate-/r*94.0%
distribute-rgt-out--94.0%
metadata-eval94.0%
Simplified94.0%
Taylor expanded in f around 0 94.1%
neg-mul-194.1%
sub-neg94.1%
Simplified94.1%
Final simplification94.1%
(FPCore (f) :precision binary64 (/ (- (log (/ (/ 2.0 f) (* PI 0.5)))) (* PI 0.25)))
double code(double f) {
return -log(((2.0 / f) / (((double) M_PI) * 0.5))) / (((double) M_PI) * 0.25);
}
public static double code(double f) {
return -Math.log(((2.0 / f) / (Math.PI * 0.5))) / (Math.PI * 0.25);
}
def code(f): return -math.log(((2.0 / f) / (math.pi * 0.5))) / (math.pi * 0.25)
function code(f) return Float64(Float64(-log(Float64(Float64(2.0 / f) / Float64(pi * 0.5)))) / Float64(pi * 0.25)) end
function tmp = code(f) tmp = -log(((2.0 / f) / (pi * 0.5))) / (pi * 0.25); end
code[f_] := N[((-N[Log[N[(N[(2.0 / f), $MachinePrecision] / N[(Pi * 0.5), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]) / N[(Pi * 0.25), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-\log \left(\frac{\frac{2}{f}}{\pi \cdot 0.5}\right)}{\pi \cdot 0.25}
\end{array}
Initial program 7.1%
Taylor expanded in f around 0 94.0%
associate-/r*94.0%
distribute-rgt-out--94.0%
metadata-eval94.0%
Simplified94.0%
associate-*l/94.1%
*-un-lft-identity94.1%
div-inv94.1%
metadata-eval94.1%
Applied egg-rr94.1%
Final simplification94.1%
(FPCore (f) :precision binary64 (* (/ 4.0 PI) (- (log (/ 4.0 (* PI f))))))
double code(double f) {
return (4.0 / ((double) M_PI)) * -log((4.0 / (((double) M_PI) * f)));
}
public static double code(double f) {
return (4.0 / Math.PI) * -Math.log((4.0 / (Math.PI * f)));
}
def code(f): return (4.0 / math.pi) * -math.log((4.0 / (math.pi * f)))
function code(f) return Float64(Float64(4.0 / pi) * Float64(-log(Float64(4.0 / Float64(pi * f))))) end
function tmp = code(f) tmp = (4.0 / pi) * -log((4.0 / (pi * f))); end
code[f_] := N[(N[(4.0 / Pi), $MachinePrecision] * (-N[Log[N[(4.0 / N[(Pi * f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision])), $MachinePrecision]
\begin{array}{l}
\\
\frac{4}{\pi} \cdot \left(-\log \left(\frac{4}{\pi \cdot f}\right)\right)
\end{array}
Initial program 7.1%
Taylor expanded in f around 0 94.8%
fma-def94.8%
distribute-rgt-out--94.8%
metadata-eval94.8%
+-commutative94.8%
associate-+l+94.8%
Simplified94.8%
Taylor expanded in f around 0 94.1%
neg-mul-194.1%
sub-neg94.1%
Simplified94.1%
expm1-log1p-u92.6%
expm1-udef92.6%
add-log-exp75.2%
*-commutative75.2%
diff-log75.2%
exp-to-pow75.2%
clear-num75.2%
Applied egg-rr75.2%
expm1-def75.2%
expm1-log1p76.3%
log-pow94.0%
associate-/l/94.0%
*-commutative94.0%
Simplified94.0%
Final simplification94.0%
(FPCore (f) :precision binary64 (/ (- 4.0) (/ PI (log (/ (/ 4.0 PI) f)))))
double code(double f) {
return -4.0 / (((double) M_PI) / log(((4.0 / ((double) M_PI)) / f)));
}
public static double code(double f) {
return -4.0 / (Math.PI / Math.log(((4.0 / Math.PI) / f)));
}
def code(f): return -4.0 / (math.pi / math.log(((4.0 / math.pi) / f)))
function code(f) return Float64(Float64(-4.0) / Float64(pi / log(Float64(Float64(4.0 / pi) / f)))) end
function tmp = code(f) tmp = -4.0 / (pi / log(((4.0 / pi) / f))); end
code[f_] := N[((-4.0) / N[(Pi / N[Log[N[(N[(4.0 / Pi), $MachinePrecision] / f), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-4}{\frac{\pi}{\log \left(\frac{\frac{4}{\pi}}{f}\right)}}
\end{array}
Initial program 7.1%
Taylor expanded in f around 0 94.1%
associate-*r/94.1%
associate-/l*94.0%
mul-1-neg94.0%
unsub-neg94.0%
distribute-rgt-out--94.0%
metadata-eval94.0%
Simplified94.0%
add-exp-log92.7%
diff-log92.7%
Applied egg-rr92.7%
Taylor expanded in f around 0 94.0%
neg-mul-194.0%
sub-neg94.0%
log-div94.0%
Simplified94.0%
Final simplification94.0%
(FPCore (f) :precision binary64 (/ (- (log (/ (/ 4.0 PI) f))) (* PI 0.25)))
double code(double f) {
return -log(((4.0 / ((double) M_PI)) / f)) / (((double) M_PI) * 0.25);
}
public static double code(double f) {
return -Math.log(((4.0 / Math.PI) / f)) / (Math.PI * 0.25);
}
def code(f): return -math.log(((4.0 / math.pi) / f)) / (math.pi * 0.25)
function code(f) return Float64(Float64(-log(Float64(Float64(4.0 / pi) / f))) / Float64(pi * 0.25)) end
function tmp = code(f) tmp = -log(((4.0 / pi) / f)) / (pi * 0.25); end
code[f_] := N[((-N[Log[N[(N[(4.0 / Pi), $MachinePrecision] / f), $MachinePrecision]], $MachinePrecision]) / N[(Pi * 0.25), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-\log \left(\frac{\frac{4}{\pi}}{f}\right)}{\pi \cdot 0.25}
\end{array}
Initial program 7.1%
Taylor expanded in f around 0 94.8%
fma-def94.8%
distribute-rgt-out--94.8%
metadata-eval94.8%
+-commutative94.8%
associate-+l+94.8%
Simplified94.8%
Taylor expanded in f around 0 94.1%
neg-mul-194.1%
sub-neg94.1%
Simplified94.1%
associate-*l/94.1%
*-un-lft-identity94.1%
diff-log94.1%
div-inv94.1%
metadata-eval94.1%
Applied egg-rr94.1%
Final simplification94.1%
(FPCore (f) :precision binary64 (* 4.0 (/ (- (log 0.5)) PI)))
double code(double f) {
return 4.0 * (-log(0.5) / ((double) M_PI));
}
public static double code(double f) {
return 4.0 * (-Math.log(0.5) / Math.PI);
}
def code(f): return 4.0 * (-math.log(0.5) / math.pi)
function code(f) return Float64(4.0 * Float64(Float64(-log(0.5)) / pi)) end
function tmp = code(f) tmp = 4.0 * (-log(0.5) / pi); end
code[f_] := N[(4.0 * N[((-N[Log[0.5], $MachinePrecision]) / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
4 \cdot \frac{-\log 0.5}{\pi}
\end{array}
Initial program 7.1%
Applied egg-rr1.7%
Taylor expanded in f around 0 1.6%
+-commutative1.6%
*-commutative1.6%
Simplified1.6%
Taylor expanded in f around 0 1.6%
Final simplification1.6%
herbie shell --seed 2023306
(FPCore (f)
:name "VandenBroeck and Keller, Equation (20)"
:precision binary64
(- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))) (- (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))))))))