
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t\_0}\\
t_2 := e^{-t\_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t\_1 + t\_2}{t\_1 - t\_2}\right)
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t\_0}\\
t_2 := e^{-t\_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t\_1 + t\_2}{t\_1 - t\_2}\right)
\end{array}
\end{array}
(FPCore (f) :precision binary64 (- (fma 4.0 (/ (- (log (* (/ PI 4.0) f))) PI) (fma 2.0 (* (/ (pow f 2.0) PI) (/ PI (/ (/ 12.0 PI) 0.5))) 0.0))))
double code(double f) {
return -fma(4.0, (-log(((((double) M_PI) / 4.0) * f)) / ((double) M_PI)), fma(2.0, ((pow(f, 2.0) / ((double) M_PI)) * (((double) M_PI) / ((12.0 / ((double) M_PI)) / 0.5))), 0.0));
}
function code(f) return Float64(-fma(4.0, Float64(Float64(-log(Float64(Float64(pi / 4.0) * f))) / pi), fma(2.0, Float64(Float64((f ^ 2.0) / pi) * Float64(pi / Float64(Float64(12.0 / pi) / 0.5))), 0.0))) end
code[f_] := (-N[(4.0 * N[((-N[Log[N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]], $MachinePrecision]) / Pi), $MachinePrecision] + N[(2.0 * N[(N[(N[Power[f, 2.0], $MachinePrecision] / Pi), $MachinePrecision] * N[(Pi / N[(N[(12.0 / Pi), $MachinePrecision] / 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 0.0), $MachinePrecision]), $MachinePrecision])
\begin{array}{l}
\\
-\mathsf{fma}\left(4, \frac{-\log \left(\frac{\pi}{4} \cdot f\right)}{\pi}, \mathsf{fma}\left(2, \frac{{f}^{2}}{\pi} \cdot \frac{\pi}{\frac{\frac{12}{\pi}}{0.5}}, 0\right)\right)
\end{array}
Initial program 6.0%
Taylor expanded in f around 0 96.5%
Simplified96.5%
div-sub96.4%
Applied egg-rr96.4%
div-sub96.5%
log-div96.1%
Simplified96.5%
fma-udef96.5%
pow-div96.5%
metadata-eval96.5%
pow196.5%
div-inv96.5%
*-commutative96.5%
metadata-eval96.5%
Applied egg-rr96.5%
+-rgt-identity96.5%
*-commutative96.5%
*-commutative96.5%
associate-*l*96.5%
fma-udef96.5%
associate-*l*96.5%
metadata-eval96.5%
associate-*l*96.5%
metadata-eval96.5%
*-commutative96.5%
Simplified96.5%
*-commutative96.5%
flip-+96.5%
associate-*r/96.5%
difference-of-squares96.5%
distribute-lft-out96.5%
metadata-eval96.5%
distribute-lft-out--96.5%
metadata-eval96.5%
distribute-lft-out--96.5%
metadata-eval96.5%
Applied egg-rr96.5%
associate-/l*96.5%
associate-/l*96.5%
Simplified96.5%
Final simplification96.5%
(FPCore (f) :precision binary64 (* (log (* f (* PI 0.25))) (/ (- -4.0) PI)))
double code(double f) {
return log((f * (((double) M_PI) * 0.25))) * (-(-4.0) / ((double) M_PI));
}
public static double code(double f) {
return Math.log((f * (Math.PI * 0.25))) * (-(-4.0) / Math.PI);
}
def code(f): return math.log((f * (math.pi * 0.25))) * (-(-4.0) / math.pi)
function code(f) return Float64(log(Float64(f * Float64(pi * 0.25))) * Float64(Float64(-(-4.0)) / pi)) end
function tmp = code(f) tmp = log((f * (pi * 0.25))) * (-(-4.0) / pi); end
code[f_] := N[(N[Log[N[(f * N[(Pi * 0.25), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[((--4.0) / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(f \cdot \left(\pi \cdot 0.25\right)\right) \cdot \frac{--4}{\pi}
\end{array}
Initial program 6.0%
Taylor expanded in f around 0 95.7%
associate-/r*95.3%
distribute-rgt-out--95.3%
metadata-eval95.3%
Simplified95.3%
*-commutative95.3%
add-log-exp72.8%
exp-to-pow72.8%
*-un-lft-identity72.8%
*-commutative72.8%
times-frac72.8%
metadata-eval72.8%
inv-pow72.8%
div-inv72.8%
metadata-eval72.8%
*-commutative72.8%
unpow-prod-down72.8%
metadata-eval72.8%
inv-pow72.8%
Applied egg-rr72.8%
log-pow95.3%
associate-*r/95.3%
metadata-eval95.3%
associate-*r/95.0%
associate-*r/95.0%
metadata-eval95.0%
Simplified95.0%
associate-*l/95.1%
associate-/l/95.8%
Applied egg-rr95.8%
expm1-log1p-u94.5%
expm1-udef94.5%
Applied egg-rr94.9%
expm1-def94.9%
expm1-log1p96.1%
Simplified96.1%
Final simplification96.1%
(FPCore (f) :precision binary64 (* 4.0 (/ (log (* (/ PI 4.0) f)) PI)))
double code(double f) {
return 4.0 * (log(((((double) M_PI) / 4.0) * f)) / ((double) M_PI));
}
public static double code(double f) {
return 4.0 * (Math.log(((Math.PI / 4.0) * f)) / Math.PI);
}
def code(f): return 4.0 * (math.log(((math.pi / 4.0) * f)) / math.pi)
function code(f) return Float64(4.0 * Float64(log(Float64(Float64(pi / 4.0) * f)) / pi)) end
function tmp = code(f) tmp = 4.0 * (log(((pi / 4.0) * f)) / pi); end
code[f_] := N[(4.0 * N[(N[Log[N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
4 \cdot \frac{\log \left(\frac{\pi}{4} \cdot f\right)}{\pi}
\end{array}
Initial program 6.0%
Taylor expanded in f around 0 95.7%
Taylor expanded in f around 0 96.2%
Simplified96.2%
Final simplification96.2%
herbie shell --seed 2024040
(FPCore (f)
:name "VandenBroeck and Keller, Equation (20)"
:precision binary64
(- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))) (- (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))))))))