
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t_0}\\
t_2 := e^{-t_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t_1 + t_2}{t_1 - t_2}\right)
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t_0}\\
t_2 := e^{-t_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t_1 + t_2}{t_1 - t_2}\right)
\end{array}
\end{array}
(FPCore (f)
:precision binary64
(/
(-
(log
(*
(/
2.0
(fma
(pow f 5.0)
(* (pow PI 5.0) 1.6276041666666666e-5)
(fma
(pow PI 3.0)
(* (pow f 3.0) 0.005208333333333333)
(* PI (* f 0.5)))))
(cosh (/ f (/ 4.0 PI))))))
(/ PI 4.0)))
double code(double f) {
return -log(((2.0 / fma(pow(f, 5.0), (pow(((double) M_PI), 5.0) * 1.6276041666666666e-5), fma(pow(((double) M_PI), 3.0), (pow(f, 3.0) * 0.005208333333333333), (((double) M_PI) * (f * 0.5))))) * cosh((f / (4.0 / ((double) M_PI)))))) / (((double) M_PI) / 4.0);
}
function code(f) return Float64(Float64(-log(Float64(Float64(2.0 / fma((f ^ 5.0), Float64((pi ^ 5.0) * 1.6276041666666666e-5), fma((pi ^ 3.0), Float64((f ^ 3.0) * 0.005208333333333333), Float64(pi * Float64(f * 0.5))))) * cosh(Float64(f / Float64(4.0 / pi)))))) / Float64(pi / 4.0)) end
code[f_] := N[((-N[Log[N[(N[(2.0 / N[(N[Power[f, 5.0], $MachinePrecision] * N[(N[Power[Pi, 5.0], $MachinePrecision] * 1.6276041666666666e-5), $MachinePrecision] + N[(N[Power[Pi, 3.0], $MachinePrecision] * N[(N[Power[f, 3.0], $MachinePrecision] * 0.005208333333333333), $MachinePrecision] + N[(Pi * N[(f * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cosh[N[(f / N[(4.0 / Pi), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]) / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-\log \left(\frac{2}{\mathsf{fma}\left({f}^{5}, {\pi}^{5} \cdot 1.6276041666666666 \cdot 10^{-5}, \mathsf{fma}\left({\pi}^{3}, {f}^{3} \cdot 0.005208333333333333, \pi \cdot \left(f \cdot 0.5\right)\right)\right)} \cdot \cosh \left(\frac{f}{\frac{4}{\pi}}\right)\right)}{\frac{\pi}{4}}
\end{array}
Initial program 6.4%
Taylor expanded in f around 0 96.5%
fma-def96.5%
distribute-rgt-out--96.5%
metadata-eval96.5%
fma-def96.5%
distribute-rgt-out--96.5%
metadata-eval96.5%
*-commutative96.5%
distribute-rgt-out--96.5%
associate-*l*96.5%
metadata-eval96.5%
Simplified96.5%
div-inv96.5%
cosh-undef96.5%
div-inv96.5%
metadata-eval96.5%
Applied egg-rr96.5%
associate-*l/96.6%
Applied egg-rr96.6%
Simplified96.6%
Final simplification96.6%
(FPCore (f) :precision binary64 (- (fma 4.0 (/ (log (/ 4.0 (* f PI))) PI) (* (* PI 0.041666666666666664) (* f (* 2.0 f))))))
double code(double f) {
return -fma(4.0, (log((4.0 / (f * ((double) M_PI)))) / ((double) M_PI)), ((((double) M_PI) * 0.041666666666666664) * (f * (2.0 * f))));
}
function code(f) return Float64(-fma(4.0, Float64(log(Float64(4.0 / Float64(f * pi))) / pi), Float64(Float64(pi * 0.041666666666666664) * Float64(f * Float64(2.0 * f))))) end
code[f_] := (-N[(4.0 * N[(N[Log[N[(4.0 / N[(f * Pi), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision] + N[(N[(Pi * 0.041666666666666664), $MachinePrecision] * N[(f * N[(2.0 * f), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision])
\begin{array}{l}
\\
-\mathsf{fma}\left(4, \frac{\log \left(\frac{4}{f \cdot \pi}\right)}{\pi}, \left(\pi \cdot 0.041666666666666664\right) \cdot \left(f \cdot \left(2 \cdot f\right)\right)\right)
\end{array}
Initial program 6.4%
Taylor expanded in f around 0 96.5%
fma-def96.5%
distribute-rgt-out--96.5%
metadata-eval96.5%
fma-def96.5%
distribute-rgt-out--96.5%
metadata-eval96.5%
*-commutative96.5%
distribute-rgt-out--96.5%
associate-*l*96.5%
metadata-eval96.5%
Simplified96.5%
div-inv96.5%
cosh-undef96.5%
div-inv96.5%
metadata-eval96.5%
Applied egg-rr96.5%
Taylor expanded in f around 0 96.3%
Simplified96.3%
Final simplification96.3%
(FPCore (f) :precision binary64 (- (/ (log (+ (/ (/ 4.0 f) PI) (* PI (* f 0.125)))) (* PI 0.25))))
double code(double f) {
return -(log((((4.0 / f) / ((double) M_PI)) + (((double) M_PI) * (f * 0.125)))) / (((double) M_PI) * 0.25));
}
public static double code(double f) {
return -(Math.log((((4.0 / f) / Math.PI) + (Math.PI * (f * 0.125)))) / (Math.PI * 0.25));
}
def code(f): return -(math.log((((4.0 / f) / math.pi) + (math.pi * (f * 0.125)))) / (math.pi * 0.25))
function code(f) return Float64(-Float64(log(Float64(Float64(Float64(4.0 / f) / pi) + Float64(pi * Float64(f * 0.125)))) / Float64(pi * 0.25))) end
function tmp = code(f) tmp = -(log((((4.0 / f) / pi) + (pi * (f * 0.125)))) / (pi * 0.25)); end
code[f_] := (-N[(N[Log[N[(N[(N[(4.0 / f), $MachinePrecision] / Pi), $MachinePrecision] + N[(Pi * N[(f * 0.125), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[(Pi * 0.25), $MachinePrecision]), $MachinePrecision])
\begin{array}{l}
\\
-\frac{\log \left(\frac{\frac{4}{f}}{\pi} + \pi \cdot \left(f \cdot 0.125\right)\right)}{\pi \cdot 0.25}
\end{array}
Initial program 6.4%
Taylor expanded in f around 0 95.7%
distribute-rgt-out--95.7%
metadata-eval95.7%
Simplified95.7%
Taylor expanded in f around 0 95.7%
associate-*r/95.7%
metadata-eval95.7%
Simplified95.7%
associate-*l/95.9%
*-un-lft-identity95.9%
associate-/r*95.9%
associate-*r*95.9%
div-inv95.9%
metadata-eval95.9%
Applied egg-rr95.9%
Final simplification95.9%
(FPCore (f) :precision binary64 (* (/ (log (/ 4.0 (* f PI))) PI) -4.0))
double code(double f) {
return (log((4.0 / (f * ((double) M_PI)))) / ((double) M_PI)) * -4.0;
}
public static double code(double f) {
return (Math.log((4.0 / (f * Math.PI))) / Math.PI) * -4.0;
}
def code(f): return (math.log((4.0 / (f * math.pi))) / math.pi) * -4.0
function code(f) return Float64(Float64(log(Float64(4.0 / Float64(f * pi))) / pi) * -4.0) end
function tmp = code(f) tmp = (log((4.0 / (f * pi))) / pi) * -4.0; end
code[f_] := N[(N[(N[Log[N[(4.0 / N[(f * Pi), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision] * -4.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(\frac{4}{f \cdot \pi}\right)}{\pi} \cdot -4
\end{array}
Initial program 6.4%
distribute-lft-neg-in6.4%
*-commutative6.4%
associate-/r/6.4%
associate-*l/6.4%
metadata-eval6.4%
distribute-neg-frac6.4%
Simplified6.4%
Taylor expanded in f around -inf 6.4%
Taylor expanded in f around 0 95.7%
*-commutative95.7%
Simplified95.8%
Final simplification95.8%
(FPCore (f) :precision binary64 (* (log 1.3333333333333333) (/ -1.0 (/ PI 4.0))))
double code(double f) {
return log(1.3333333333333333) * (-1.0 / (((double) M_PI) / 4.0));
}
public static double code(double f) {
return Math.log(1.3333333333333333) * (-1.0 / (Math.PI / 4.0));
}
def code(f): return math.log(1.3333333333333333) * (-1.0 / (math.pi / 4.0))
function code(f) return Float64(log(1.3333333333333333) * Float64(-1.0 / Float64(pi / 4.0))) end
function tmp = code(f) tmp = log(1.3333333333333333) * (-1.0 / (pi / 4.0)); end
code[f_] := N[(N[Log[1.3333333333333333], $MachinePrecision] * N[(-1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log 1.3333333333333333 \cdot \frac{-1}{\frac{\pi}{4}}
\end{array}
Initial program 6.4%
Applied egg-rr13.4%
Taylor expanded in f around 0 13.4%
Final simplification13.4%
herbie shell --seed 2023193
(FPCore (f)
:name "VandenBroeck and Keller, Equation (20)"
:precision binary64
(- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))) (- (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))))))))