
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t\_0}\\
t_2 := e^{-t\_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t\_1 + t\_2}{t\_1 - t\_2}\right)
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t\_0}\\
t_2 := e^{-t\_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t\_1 + t\_2}{t\_1 - t\_2}\right)
\end{array}
\end{array}
(FPCore (f)
:precision binary64
(fma
(/ (- (log (/ 4.0 PI)) (log f)) PI)
-4.0
(fma
(*
(/ (pow f 2.0) PI)
(* (* PI 0.5) (fma PI 0.125 (* PI -0.041666666666666664))))
-2.0
0.0)))
double code(double f) {
return fma(((log((4.0 / ((double) M_PI))) - log(f)) / ((double) M_PI)), -4.0, fma(((pow(f, 2.0) / ((double) M_PI)) * ((((double) M_PI) * 0.5) * fma(((double) M_PI), 0.125, (((double) M_PI) * -0.041666666666666664)))), -2.0, 0.0));
}
function code(f) return fma(Float64(Float64(log(Float64(4.0 / pi)) - log(f)) / pi), -4.0, fma(Float64(Float64((f ^ 2.0) / pi) * Float64(Float64(pi * 0.5) * fma(pi, 0.125, Float64(pi * -0.041666666666666664)))), -2.0, 0.0)) end
code[f_] := N[(N[(N[(N[Log[N[(4.0 / Pi), $MachinePrecision]], $MachinePrecision] - N[Log[f], $MachinePrecision]), $MachinePrecision] / Pi), $MachinePrecision] * -4.0 + N[(N[(N[(N[Power[f, 2.0], $MachinePrecision] / Pi), $MachinePrecision] * N[(N[(Pi * 0.5), $MachinePrecision] * N[(Pi * 0.125 + N[(Pi * -0.041666666666666664), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * -2.0 + 0.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\frac{\log \left(\frac{4}{\pi}\right) - \log f}{\pi}, -4, \mathsf{fma}\left(\frac{{f}^{2}}{\pi} \cdot \left(\left(\pi \cdot 0.5\right) \cdot \mathsf{fma}\left(\pi, 0.125, \pi \cdot -0.041666666666666664\right)\right), -2, 0\right)\right)
\end{array}
Initial program 7.4%
distribute-lft-neg-in7.4%
*-commutative7.4%
Simplified7.0%
Taylor expanded in f around 0 97.3%
Simplified97.3%
fma-udef97.3%
associate-/r*97.3%
metadata-eval97.3%
associate-*r*97.3%
metadata-eval97.3%
Applied egg-rr97.3%
+-rgt-identity97.3%
associate-*r*97.3%
fma-def97.3%
+-commutative97.3%
*-commutative97.3%
fma-def97.3%
*-commutative97.3%
associate-/r/97.3%
associate-*r*97.3%
metadata-eval97.3%
metadata-eval97.3%
Simplified97.3%
Final simplification97.3%
(FPCore (f)
:precision binary64
(*
(log
(fma
f
(+ (* -2.0 (/ 0.010416666666666666 (/ 0.5 PI))) (* PI 0.125))
(/ (/ 4.0 PI) f)))
(/ -1.0 (/ PI 4.0))))
double code(double f) {
return log(fma(f, ((-2.0 * (0.010416666666666666 / (0.5 / ((double) M_PI)))) + (((double) M_PI) * 0.125)), ((4.0 / ((double) M_PI)) / f))) * (-1.0 / (((double) M_PI) / 4.0));
}
function code(f) return Float64(log(fma(f, Float64(Float64(-2.0 * Float64(0.010416666666666666 / Float64(0.5 / pi))) + Float64(pi * 0.125)), Float64(Float64(4.0 / pi) / f))) * Float64(-1.0 / Float64(pi / 4.0))) end
code[f_] := N[(N[Log[N[(f * N[(N[(-2.0 * N[(0.010416666666666666 / N[(0.5 / Pi), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(Pi * 0.125), $MachinePrecision]), $MachinePrecision] + N[(N[(4.0 / Pi), $MachinePrecision] / f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(-1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(\mathsf{fma}\left(f, -2 \cdot \frac{0.010416666666666666}{\frac{0.5}{\pi}} + \pi \cdot 0.125, \frac{\frac{4}{\pi}}{f}\right)\right) \cdot \frac{-1}{\frac{\pi}{4}}
\end{array}
Initial program 7.4%
Taylor expanded in f around 0 97.2%
Simplified97.2%
fma-udef97.2%
associate-/r*97.2%
metadata-eval97.2%
associate-*r*97.2%
metadata-eval97.2%
Applied egg-rr97.2%
Final simplification97.2%
(FPCore (f) :precision binary64 (* (/ (log (* 4.0 (/ (cosh (* PI (* f 0.25))) (* PI f)))) PI) (- 4.0)))
double code(double f) {
return (log((4.0 * (cosh((((double) M_PI) * (f * 0.25))) / (((double) M_PI) * f)))) / ((double) M_PI)) * -4.0;
}
public static double code(double f) {
return (Math.log((4.0 * (Math.cosh((Math.PI * (f * 0.25))) / (Math.PI * f)))) / Math.PI) * -4.0;
}
def code(f): return (math.log((4.0 * (math.cosh((math.pi * (f * 0.25))) / (math.pi * f)))) / math.pi) * -4.0
function code(f) return Float64(Float64(log(Float64(4.0 * Float64(cosh(Float64(pi * Float64(f * 0.25))) / Float64(pi * f)))) / pi) * Float64(-4.0)) end
function tmp = code(f) tmp = (log((4.0 * (cosh((pi * (f * 0.25))) / (pi * f)))) / pi) * -4.0; end
code[f_] := N[(N[(N[Log[N[(4.0 * N[(N[Cosh[N[(Pi * N[(f * 0.25), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[(Pi * f), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision] * (-4.0)), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(4 \cdot \frac{\cosh \left(\pi \cdot \left(f \cdot 0.25\right)\right)}{\pi \cdot f}\right)}{\pi} \cdot \left(-4\right)
\end{array}
Initial program 7.4%
Taylor expanded in f around 0 96.8%
distribute-rgt-out--96.8%
metadata-eval96.8%
Simplified96.8%
associate-*l/96.9%
*-un-lft-identity96.9%
cosh-undef96.9%
*-commutative96.9%
div-inv96.9%
metadata-eval96.9%
div-inv96.9%
metadata-eval96.9%
Applied egg-rr96.9%
*-lft-identity96.9%
*-commutative96.9%
times-frac96.9%
metadata-eval96.9%
associate-*r*96.9%
*-commutative96.9%
times-frac96.9%
metadata-eval96.9%
associate-*r*96.9%
*-commutative96.9%
associate-*r*96.9%
Simplified96.9%
Final simplification96.9%
(FPCore (f) :precision binary64 (* -4.0 (- (/ (log (/ 4.0 f)) PI) (/ (log PI) PI))))
double code(double f) {
return -4.0 * ((log((4.0 / f)) / ((double) M_PI)) - (log(((double) M_PI)) / ((double) M_PI)));
}
public static double code(double f) {
return -4.0 * ((Math.log((4.0 / f)) / Math.PI) - (Math.log(Math.PI) / Math.PI));
}
def code(f): return -4.0 * ((math.log((4.0 / f)) / math.pi) - (math.log(math.pi) / math.pi))
function code(f) return Float64(-4.0 * Float64(Float64(log(Float64(4.0 / f)) / pi) - Float64(log(pi) / pi))) end
function tmp = code(f) tmp = -4.0 * ((log((4.0 / f)) / pi) - (log(pi) / pi)); end
code[f_] := N[(-4.0 * N[(N[(N[Log[N[(4.0 / f), $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision] - N[(N[Log[Pi], $MachinePrecision] / Pi), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-4 \cdot \left(\frac{\log \left(\frac{4}{f}\right)}{\pi} - \frac{\log \pi}{\pi}\right)
\end{array}
Initial program 7.4%
distribute-lft-neg-in7.4%
*-commutative7.4%
Simplified7.0%
Taylor expanded in f around 0 96.9%
*-commutative96.9%
associate-*l/96.9%
mul-1-neg96.9%
unsub-neg96.9%
distribute-rgt-out--96.9%
metadata-eval96.9%
Simplified96.9%
Taylor expanded in f around 0 96.9%
div-sub96.7%
remove-double-neg96.7%
mul-1-neg96.7%
log-rec96.7%
div-sub96.9%
div-sub96.7%
log-rec96.7%
mul-1-neg96.7%
remove-double-neg96.7%
div-sub96.9%
Simplified96.9%
log-div96.8%
div-sub96.9%
Applied egg-rr96.9%
Final simplification96.9%
(FPCore (f) :precision binary64 (* -4.0 (/ (log (/ 4.0 (* PI f))) PI)))
double code(double f) {
return -4.0 * (log((4.0 / (((double) M_PI) * f))) / ((double) M_PI));
}
public static double code(double f) {
return -4.0 * (Math.log((4.0 / (Math.PI * f))) / Math.PI);
}
def code(f): return -4.0 * (math.log((4.0 / (math.pi * f))) / math.pi)
function code(f) return Float64(-4.0 * Float64(log(Float64(4.0 / Float64(pi * f))) / pi)) end
function tmp = code(f) tmp = -4.0 * (log((4.0 / (pi * f))) / pi); end
code[f_] := N[(-4.0 * N[(N[Log[N[(4.0 / N[(Pi * f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-4 \cdot \frac{\log \left(\frac{4}{\pi \cdot f}\right)}{\pi}
\end{array}
Initial program 7.4%
distribute-lft-neg-in7.4%
*-commutative7.4%
Simplified7.0%
Taylor expanded in f around 0 96.9%
*-commutative96.9%
associate-*l/96.9%
mul-1-neg96.9%
unsub-neg96.9%
distribute-rgt-out--96.9%
metadata-eval96.9%
Simplified96.9%
Taylor expanded in f around 0 96.9%
div-sub96.7%
remove-double-neg96.7%
mul-1-neg96.7%
log-rec96.7%
div-sub96.9%
div-sub96.7%
log-rec96.7%
mul-1-neg96.7%
remove-double-neg96.7%
div-sub96.9%
Simplified96.9%
Taylor expanded in f around 0 96.9%
Final simplification96.9%
(FPCore (f) :precision binary64 (* (/ (log 0.0) PI) (- 4.0)))
double code(double f) {
return (log(0.0) / ((double) M_PI)) * -4.0;
}
public static double code(double f) {
return (Math.log(0.0) / Math.PI) * -4.0;
}
def code(f): return (math.log(0.0) / math.pi) * -4.0
function code(f) return Float64(Float64(log(0.0) / pi) * Float64(-4.0)) end
function tmp = code(f) tmp = (log(0.0) / pi) * -4.0; end
code[f_] := N[(N[(N[Log[0.0], $MachinePrecision] / Pi), $MachinePrecision] * (-4.0)), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log 0}{\pi} \cdot \left(-4\right)
\end{array}
Initial program 7.4%
Taylor expanded in f around 0 96.7%
Taylor expanded in f around inf 0.7%
distribute-rgt-out0.7%
distribute-rgt-out--0.7%
metadata-eval0.7%
metadata-eval0.7%
mul0-rgt0.7%
Simplified0.7%
Final simplification0.7%
herbie shell --seed 2024026
(FPCore (f)
:name "VandenBroeck and Keller, Equation (20)"
:precision binary64
(- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))) (- (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))))))))