
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t\_0}\\
t_2 := e^{-t\_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t\_1 + t\_2}{t\_1 - t\_2}\right)
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t\_0}\\
t_2 := e^{-t\_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t\_1 + t\_2}{t\_1 - t\_2}\right)
\end{array}
\end{array}
(FPCore (f)
:precision binary64
(*
(/ 1.0 (/ PI 4.0))
(-
(log
(fma
f
(* PI 0.5)
(fma
(pow f 5.0)
(* (pow PI 5.0) 1.6276041666666666e-5)
(fma
(pow f 3.0)
(* (pow PI 3.0) 0.005208333333333333)
(* (pow (* PI f) 7.0) 2.422030009920635e-8)))))
(log (* 2.0 (cosh (* (* PI 0.25) f)))))))
double code(double f) {
return (1.0 / (((double) M_PI) / 4.0)) * (log(fma(f, (((double) M_PI) * 0.5), fma(pow(f, 5.0), (pow(((double) M_PI), 5.0) * 1.6276041666666666e-5), fma(pow(f, 3.0), (pow(((double) M_PI), 3.0) * 0.005208333333333333), (pow((((double) M_PI) * f), 7.0) * 2.422030009920635e-8))))) - log((2.0 * cosh(((((double) M_PI) * 0.25) * f)))));
}
function code(f) return Float64(Float64(1.0 / Float64(pi / 4.0)) * Float64(log(fma(f, Float64(pi * 0.5), fma((f ^ 5.0), Float64((pi ^ 5.0) * 1.6276041666666666e-5), fma((f ^ 3.0), Float64((pi ^ 3.0) * 0.005208333333333333), Float64((Float64(pi * f) ^ 7.0) * 2.422030009920635e-8))))) - log(Float64(2.0 * cosh(Float64(Float64(pi * 0.25) * f)))))) end
code[f_] := N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[(N[Log[N[(f * N[(Pi * 0.5), $MachinePrecision] + N[(N[Power[f, 5.0], $MachinePrecision] * N[(N[Power[Pi, 5.0], $MachinePrecision] * 1.6276041666666666e-5), $MachinePrecision] + N[(N[Power[f, 3.0], $MachinePrecision] * N[(N[Power[Pi, 3.0], $MachinePrecision] * 0.005208333333333333), $MachinePrecision] + N[(N[Power[N[(Pi * f), $MachinePrecision], 7.0], $MachinePrecision] * 2.422030009920635e-8), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[Log[N[(2.0 * N[Cosh[N[(N[(Pi * 0.25), $MachinePrecision] * f), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\frac{\pi}{4}} \cdot \left(\log \left(\mathsf{fma}\left(f, \pi \cdot 0.5, \mathsf{fma}\left({f}^{5}, {\pi}^{5} \cdot 1.6276041666666666 \cdot 10^{-5}, \mathsf{fma}\left({f}^{3}, {\pi}^{3} \cdot 0.005208333333333333, {\left(\pi \cdot f\right)}^{7} \cdot 2.422030009920635 \cdot 10^{-8}\right)\right)\right)\right) - \log \left(2 \cdot \cosh \left(\left(\pi \cdot 0.25\right) \cdot f\right)\right)\right)
\end{array}
Initial program 8.0%
Taylor expanded in f around 0 95.6%
fma-define95.6%
distribute-rgt-out--95.6%
metadata-eval95.6%
associate-+r+95.6%
+-commutative95.6%
Simplified95.6%
log-div95.6%
cosh-undef95.6%
div-inv95.6%
metadata-eval95.6%
Applied egg-rr95.6%
Final simplification95.6%
(FPCore (f)
:precision binary64
(*
(log
(/
(* 2.0 (cosh (* (* PI 0.25) f)))
(fma
f
(* PI 0.5)
(+
(* (pow (* PI f) 7.0) 2.422030009920635e-8)
(fma
(pow f 5.0)
(* (pow PI 5.0) 1.6276041666666666e-5)
(* 0.005208333333333333 (pow (* PI f) 3.0)))))))
(/ -1.0 (/ PI 4.0))))
double code(double f) {
return log(((2.0 * cosh(((((double) M_PI) * 0.25) * f))) / fma(f, (((double) M_PI) * 0.5), ((pow((((double) M_PI) * f), 7.0) * 2.422030009920635e-8) + fma(pow(f, 5.0), (pow(((double) M_PI), 5.0) * 1.6276041666666666e-5), (0.005208333333333333 * pow((((double) M_PI) * f), 3.0))))))) * (-1.0 / (((double) M_PI) / 4.0));
}
function code(f) return Float64(log(Float64(Float64(2.0 * cosh(Float64(Float64(pi * 0.25) * f))) / fma(f, Float64(pi * 0.5), Float64(Float64((Float64(pi * f) ^ 7.0) * 2.422030009920635e-8) + fma((f ^ 5.0), Float64((pi ^ 5.0) * 1.6276041666666666e-5), Float64(0.005208333333333333 * (Float64(pi * f) ^ 3.0))))))) * Float64(-1.0 / Float64(pi / 4.0))) end
code[f_] := N[(N[Log[N[(N[(2.0 * N[Cosh[N[(N[(Pi * 0.25), $MachinePrecision] * f), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(f * N[(Pi * 0.5), $MachinePrecision] + N[(N[(N[Power[N[(Pi * f), $MachinePrecision], 7.0], $MachinePrecision] * 2.422030009920635e-8), $MachinePrecision] + N[(N[Power[f, 5.0], $MachinePrecision] * N[(N[Power[Pi, 5.0], $MachinePrecision] * 1.6276041666666666e-5), $MachinePrecision] + N[(0.005208333333333333 * N[Power[N[(Pi * f), $MachinePrecision], 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(-1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{2 \cdot \cosh \left(\left(\pi \cdot 0.25\right) \cdot f\right)}{\mathsf{fma}\left(f, \pi \cdot 0.5, {\left(\pi \cdot f\right)}^{7} \cdot 2.422030009920635 \cdot 10^{-8} + \mathsf{fma}\left({f}^{5}, {\pi}^{5} \cdot 1.6276041666666666 \cdot 10^{-5}, 0.005208333333333333 \cdot {\left(\pi \cdot f\right)}^{3}\right)\right)}\right) \cdot \frac{-1}{\frac{\pi}{4}}
\end{array}
Initial program 8.0%
Taylor expanded in f around 0 95.6%
fma-define95.6%
distribute-rgt-out--95.6%
metadata-eval95.6%
associate-+r+95.6%
+-commutative95.6%
Simplified95.6%
div-inv95.6%
log-prod95.6%
Applied egg-rr95.6%
Simplified95.6%
Final simplification95.6%
(FPCore (f)
:precision binary64
(*
(log
(/
(+ (exp (* (* PI f) -0.25)) (exp (* 0.25 (* PI f))))
(fma
f
(* PI 0.5)
(fma
(pow f 5.0)
(* (pow PI 5.0) 1.6276041666666666e-5)
(* 0.005208333333333333 (pow (* PI f) 3.0))))))
(/ -4.0 PI)))
double code(double f) {
return log(((exp(((((double) M_PI) * f) * -0.25)) + exp((0.25 * (((double) M_PI) * f)))) / fma(f, (((double) M_PI) * 0.5), fma(pow(f, 5.0), (pow(((double) M_PI), 5.0) * 1.6276041666666666e-5), (0.005208333333333333 * pow((((double) M_PI) * f), 3.0)))))) * (-4.0 / ((double) M_PI));
}
function code(f) return Float64(log(Float64(Float64(exp(Float64(Float64(pi * f) * -0.25)) + exp(Float64(0.25 * Float64(pi * f)))) / fma(f, Float64(pi * 0.5), fma((f ^ 5.0), Float64((pi ^ 5.0) * 1.6276041666666666e-5), Float64(0.005208333333333333 * (Float64(pi * f) ^ 3.0)))))) * Float64(-4.0 / pi)) end
code[f_] := N[(N[Log[N[(N[(N[Exp[N[(N[(Pi * f), $MachinePrecision] * -0.25), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(0.25 * N[(Pi * f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(f * N[(Pi * 0.5), $MachinePrecision] + N[(N[Power[f, 5.0], $MachinePrecision] * N[(N[Power[Pi, 5.0], $MachinePrecision] * 1.6276041666666666e-5), $MachinePrecision] + N[(0.005208333333333333 * N[Power[N[(Pi * f), $MachinePrecision], 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(-4.0 / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{e^{\left(\pi \cdot f\right) \cdot -0.25} + e^{0.25 \cdot \left(\pi \cdot f\right)}}{\mathsf{fma}\left(f, \pi \cdot 0.5, \mathsf{fma}\left({f}^{5}, {\pi}^{5} \cdot 1.6276041666666666 \cdot 10^{-5}, 0.005208333333333333 \cdot {\left(\pi \cdot f\right)}^{3}\right)\right)}\right) \cdot \frac{-4}{\pi}
\end{array}
Initial program 8.0%
distribute-lft-neg-in8.0%
*-commutative8.0%
Simplified7.6%
Taylor expanded in f around inf 8.0%
Taylor expanded in f around 0 95.4%
fma-define95.4%
distribute-rgt-out--95.4%
metadata-eval95.4%
+-commutative95.4%
fma-define95.4%
distribute-rgt-out--95.4%
metadata-eval95.4%
distribute-rgt-out--95.4%
metadata-eval95.4%
*-commutative95.4%
Simplified95.4%
Final simplification95.4%
(FPCore (f)
:precision binary64
(*
(log
(+
(* 2.0 (* f (- (* PI 0.0625) (* PI 0.020833333333333332))))
(* 4.0 (/ 1.0 (* PI f)))))
(/ -1.0 (/ PI 4.0))))
double code(double f) {
return log(((2.0 * (f * ((((double) M_PI) * 0.0625) - (((double) M_PI) * 0.020833333333333332)))) + (4.0 * (1.0 / (((double) M_PI) * f))))) * (-1.0 / (((double) M_PI) / 4.0));
}
public static double code(double f) {
return Math.log(((2.0 * (f * ((Math.PI * 0.0625) - (Math.PI * 0.020833333333333332)))) + (4.0 * (1.0 / (Math.PI * f))))) * (-1.0 / (Math.PI / 4.0));
}
def code(f): return math.log(((2.0 * (f * ((math.pi * 0.0625) - (math.pi * 0.020833333333333332)))) + (4.0 * (1.0 / (math.pi * f))))) * (-1.0 / (math.pi / 4.0))
function code(f) return Float64(log(Float64(Float64(2.0 * Float64(f * Float64(Float64(pi * 0.0625) - Float64(pi * 0.020833333333333332)))) + Float64(4.0 * Float64(1.0 / Float64(pi * f))))) * Float64(-1.0 / Float64(pi / 4.0))) end
function tmp = code(f) tmp = log(((2.0 * (f * ((pi * 0.0625) - (pi * 0.020833333333333332)))) + (4.0 * (1.0 / (pi * f))))) * (-1.0 / (pi / 4.0)); end
code[f_] := N[(N[Log[N[(N[(2.0 * N[(f * N[(N[(Pi * 0.0625), $MachinePrecision] - N[(Pi * 0.020833333333333332), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(4.0 * N[(1.0 / N[(Pi * f), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(-1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(2 \cdot \left(f \cdot \left(\pi \cdot 0.0625 - \pi \cdot 0.020833333333333332\right)\right) + 4 \cdot \frac{1}{\pi \cdot f}\right) \cdot \frac{-1}{\frac{\pi}{4}}
\end{array}
Initial program 8.0%
Taylor expanded in f around 0 95.6%
fma-define95.6%
distribute-rgt-out--95.6%
metadata-eval95.6%
associate-+r+95.6%
+-commutative95.6%
Simplified95.6%
div-inv95.6%
log-prod95.6%
Applied egg-rr95.6%
Simplified95.6%
Taylor expanded in f around 0 95.2%
Final simplification95.2%
(FPCore (f) :precision binary64 (* (/ -4.0 PI) (log (/ 4.0 (* PI f)))))
double code(double f) {
return (-4.0 / ((double) M_PI)) * log((4.0 / (((double) M_PI) * f)));
}
public static double code(double f) {
return (-4.0 / Math.PI) * Math.log((4.0 / (Math.PI * f)));
}
def code(f): return (-4.0 / math.pi) * math.log((4.0 / (math.pi * f)))
function code(f) return Float64(Float64(-4.0 / pi) * log(Float64(4.0 / Float64(pi * f)))) end
function tmp = code(f) tmp = (-4.0 / pi) * log((4.0 / (pi * f))); end
code[f_] := N[(N[(-4.0 / Pi), $MachinePrecision] * N[Log[N[(4.0 / N[(Pi * f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-4}{\pi} \cdot \log \left(\frac{4}{\pi \cdot f}\right)
\end{array}
Initial program 8.0%
distribute-lft-neg-in8.0%
*-commutative8.0%
Simplified7.6%
Taylor expanded in f around 0 94.9%
associate-*r/94.9%
associate-/l*94.8%
associate-/r/94.8%
mul-1-neg94.8%
unsub-neg94.8%
distribute-rgt-out--94.8%
*-commutative94.8%
associate-/r*94.8%
metadata-eval94.8%
metadata-eval94.8%
Simplified94.8%
Taylor expanded in f around 0 94.9%
div-sub94.8%
remove-double-neg94.8%
mul-1-neg94.8%
log-rec94.8%
div-sub94.9%
associate-*r/94.9%
log-rec94.9%
mul-1-neg94.9%
remove-double-neg94.9%
log-div94.8%
associate-/r*94.8%
associate-*l/94.7%
Simplified94.7%
Final simplification94.7%
(FPCore (f) :precision binary64 (/ (* -4.0 (log (/ 4.0 (* PI f)))) PI))
double code(double f) {
return (-4.0 * log((4.0 / (((double) M_PI) * f)))) / ((double) M_PI);
}
public static double code(double f) {
return (-4.0 * Math.log((4.0 / (Math.PI * f)))) / Math.PI;
}
def code(f): return (-4.0 * math.log((4.0 / (math.pi * f)))) / math.pi
function code(f) return Float64(Float64(-4.0 * log(Float64(4.0 / Float64(pi * f)))) / pi) end
function tmp = code(f) tmp = (-4.0 * log((4.0 / (pi * f)))) / pi; end
code[f_] := N[(N[(-4.0 * N[Log[N[(4.0 / N[(Pi * f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / Pi), $MachinePrecision]
\begin{array}{l}
\\
\frac{-4 \cdot \log \left(\frac{4}{\pi \cdot f}\right)}{\pi}
\end{array}
Initial program 8.0%
distribute-lft-neg-in8.0%
*-commutative8.0%
Simplified7.6%
Taylor expanded in f around 0 94.9%
associate-*r/94.9%
associate-/l*94.8%
associate-/r/94.8%
mul-1-neg94.8%
unsub-neg94.8%
distribute-rgt-out--94.8%
*-commutative94.8%
associate-/r*94.8%
metadata-eval94.8%
metadata-eval94.8%
Simplified94.8%
associate-*l/94.9%
diff-log94.8%
associate-/l/94.8%
*-commutative94.8%
Applied egg-rr94.8%
Final simplification94.8%
(FPCore (f) :precision binary64 (* (/ (log 0.125) PI) (- 4.0)))
double code(double f) {
return (log(0.125) / ((double) M_PI)) * -4.0;
}
public static double code(double f) {
return (Math.log(0.125) / Math.PI) * -4.0;
}
def code(f): return (math.log(0.125) / math.pi) * -4.0
function code(f) return Float64(Float64(log(0.125) / pi) * Float64(-4.0)) end
function tmp = code(f) tmp = (log(0.125) / pi) * -4.0; end
code[f_] := N[(N[(N[Log[0.125], $MachinePrecision] / Pi), $MachinePrecision] * (-4.0)), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log 0.125}{\pi} \cdot \left(-4\right)
\end{array}
Initial program 8.0%
Applied egg-rr1.6%
Taylor expanded in f around 0 1.6%
Final simplification1.6%
herbie shell --seed 2024034
(FPCore (f)
:name "VandenBroeck and Keller, Equation (20)"
:precision binary64
(- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))) (- (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))))))))