
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t_0}\\
t_2 := e^{-t_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t_1 + t_2}{t_1 - t_2}\right)
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t_0}\\
t_2 := e^{-t_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t_1 + t_2}{t_1 - t_2}\right)
\end{array}
\end{array}
(FPCore (f) :precision binary64 (/ (- (log (fma f (* PI 0.08333333333333333) (/ 4.0 (* f PI))))) (* PI 0.25)))
double code(double f) {
return -log(fma(f, (((double) M_PI) * 0.08333333333333333), (4.0 / (f * ((double) M_PI))))) / (((double) M_PI) * 0.25);
}
function code(f) return Float64(Float64(-log(fma(f, Float64(pi * 0.08333333333333333), Float64(4.0 / Float64(f * pi))))) / Float64(pi * 0.25)) end
code[f_] := N[((-N[Log[N[(f * N[(Pi * 0.08333333333333333), $MachinePrecision] + N[(4.0 / N[(f * Pi), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]) / N[(Pi * 0.25), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-\log \left(\mathsf{fma}\left(f, \pi \cdot 0.08333333333333333, \frac{4}{f \cdot \pi}\right)\right)}{\pi \cdot 0.25}
\end{array}
Initial program 6.8%
Taylor expanded in f around 0 96.8%
Simplified96.8%
fma-udef96.8%
*-un-lft-identity96.8%
div-inv96.8%
metadata-eval96.8%
pow-div96.8%
metadata-eval96.8%
pow196.8%
associate-*l*96.8%
metadata-eval96.8%
Applied egg-rr96.8%
associate-*l/97.0%
*-un-lft-identity97.0%
Applied egg-rr97.0%
fma-udef97.0%
distribute-lft-out97.0%
metadata-eval97.0%
*-commutative97.0%
Simplified97.0%
Final simplification97.0%
(FPCore (f) :precision binary64 (* 4.0 (/ (- (- (log (/ 1.0 f))) (log (/ 4.0 PI))) PI)))
double code(double f) {
return 4.0 * ((-log((1.0 / f)) - log((4.0 / ((double) M_PI)))) / ((double) M_PI));
}
public static double code(double f) {
return 4.0 * ((-Math.log((1.0 / f)) - Math.log((4.0 / Math.PI))) / Math.PI);
}
def code(f): return 4.0 * ((-math.log((1.0 / f)) - math.log((4.0 / math.pi))) / math.pi)
function code(f) return Float64(4.0 * Float64(Float64(Float64(-log(Float64(1.0 / f))) - log(Float64(4.0 / pi))) / pi)) end
function tmp = code(f) tmp = 4.0 * ((-log((1.0 / f)) - log((4.0 / pi))) / pi); end
code[f_] := N[(4.0 * N[(N[((-N[Log[N[(1.0 / f), $MachinePrecision]], $MachinePrecision]) - N[Log[N[(4.0 / Pi), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
4 \cdot \frac{\left(-\log \left(\frac{1}{f}\right)\right) - \log \left(\frac{4}{\pi}\right)}{\pi}
\end{array}
Initial program 6.8%
Taylor expanded in f around 0 96.4%
associate-/r*96.4%
distribute-rgt-out--96.4%
metadata-eval96.4%
Simplified96.4%
Taylor expanded in f around inf 96.5%
Final simplification96.5%
(FPCore (f) :precision binary64 (* (/ (- (log (/ 4.0 PI)) (log f)) PI) -4.0))
double code(double f) {
return ((log((4.0 / ((double) M_PI))) - log(f)) / ((double) M_PI)) * -4.0;
}
public static double code(double f) {
return ((Math.log((4.0 / Math.PI)) - Math.log(f)) / Math.PI) * -4.0;
}
def code(f): return ((math.log((4.0 / math.pi)) - math.log(f)) / math.pi) * -4.0
function code(f) return Float64(Float64(Float64(log(Float64(4.0 / pi)) - log(f)) / pi) * -4.0) end
function tmp = code(f) tmp = ((log((4.0 / pi)) - log(f)) / pi) * -4.0; end
code[f_] := N[(N[(N[(N[Log[N[(4.0 / Pi), $MachinePrecision]], $MachinePrecision] - N[Log[f], $MachinePrecision]), $MachinePrecision] / Pi), $MachinePrecision] * -4.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(\frac{4}{\pi}\right) - \log f}{\pi} \cdot -4
\end{array}
Initial program 6.8%
distribute-lft-neg-in6.8%
*-commutative6.8%
Simplified6.8%
Taylor expanded in f around 0 96.5%
*-commutative96.5%
mul-1-neg96.5%
unsub-neg96.5%
distribute-rgt-out--96.5%
metadata-eval96.5%
Simplified96.5%
Taylor expanded in f around 0 96.5%
Final simplification96.5%
(FPCore (f) :precision binary64 (* -4.0 (/ (log (* 4.0 (/ f PI))) PI)))
double code(double f) {
return -4.0 * (log((4.0 * (f / ((double) M_PI)))) / ((double) M_PI));
}
public static double code(double f) {
return -4.0 * (Math.log((4.0 * (f / Math.PI))) / Math.PI);
}
def code(f): return -4.0 * (math.log((4.0 * (f / math.pi))) / math.pi)
function code(f) return Float64(-4.0 * Float64(log(Float64(4.0 * Float64(f / pi))) / pi)) end
function tmp = code(f) tmp = -4.0 * (log((4.0 * (f / pi))) / pi); end
code[f_] := N[(-4.0 * N[(N[Log[N[(4.0 * N[(f / Pi), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-4 \cdot \frac{\log \left(4 \cdot \frac{f}{\pi}\right)}{\pi}
\end{array}
Initial program 6.8%
distribute-lft-neg-in6.8%
*-commutative6.8%
Simplified6.8%
Taylor expanded in f around 0 96.5%
*-commutative96.5%
mul-1-neg96.5%
unsub-neg96.5%
distribute-rgt-out--96.5%
metadata-eval96.5%
Simplified96.5%
Taylor expanded in f around 0 96.5%
Simplified96.5%
associate-/l/96.5%
div-inv96.5%
metadata-eval96.5%
frac-times96.5%
div-inv96.5%
associate-*r/95.7%
log-div95.7%
un-div-inv95.7%
Applied egg-rr95.7%
expm1-log1p-u94.6%
expm1-udef94.6%
Applied egg-rr0.1%
expm1-def0.1%
expm1-log1p1.6%
Simplified1.6%
Final simplification1.6%
(FPCore (f) :precision binary64 (* -4.0 (/ (log (/ (/ 4.0 PI) f)) PI)))
double code(double f) {
return -4.0 * (log(((4.0 / ((double) M_PI)) / f)) / ((double) M_PI));
}
public static double code(double f) {
return -4.0 * (Math.log(((4.0 / Math.PI) / f)) / Math.PI);
}
def code(f): return -4.0 * (math.log(((4.0 / math.pi) / f)) / math.pi)
function code(f) return Float64(-4.0 * Float64(log(Float64(Float64(4.0 / pi) / f)) / pi)) end
function tmp = code(f) tmp = -4.0 * (log(((4.0 / pi) / f)) / pi); end
code[f_] := N[(-4.0 * N[(N[Log[N[(N[(4.0 / Pi), $MachinePrecision] / f), $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-4 \cdot \frac{\log \left(\frac{\frac{4}{\pi}}{f}\right)}{\pi}
\end{array}
Initial program 6.8%
distribute-lft-neg-in6.8%
*-commutative6.8%
Simplified6.8%
Taylor expanded in f around 0 96.5%
*-commutative96.5%
mul-1-neg96.5%
unsub-neg96.5%
distribute-rgt-out--96.5%
metadata-eval96.5%
Simplified96.5%
Taylor expanded in f around 0 96.5%
Simplified96.5%
Final simplification96.5%
herbie shell --seed 2024019
(FPCore (f)
:name "VandenBroeck and Keller, Equation (20)"
:precision binary64
(- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))) (- (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))))))))