
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t\_0}\\
t_2 := e^{-t\_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t\_1 + t\_2}{t\_1 - t\_2}\right)
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t\_0}\\
t_2 := e^{-t\_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t\_1 + t\_2}{t\_1 - t\_2}\right)
\end{array}
\end{array}
(FPCore (f) :precision binary64 (let* ((t_0 (/ 2.0 (sqrt PI)))) (* t_0 (* (log (tanh (* 0.25 (* f PI)))) t_0))))
double code(double f) {
double t_0 = 2.0 / sqrt(((double) M_PI));
return t_0 * (log(tanh((0.25 * (f * ((double) M_PI))))) * t_0);
}
public static double code(double f) {
double t_0 = 2.0 / Math.sqrt(Math.PI);
return t_0 * (Math.log(Math.tanh((0.25 * (f * Math.PI)))) * t_0);
}
def code(f): t_0 = 2.0 / math.sqrt(math.pi) return t_0 * (math.log(math.tanh((0.25 * (f * math.pi)))) * t_0)
function code(f) t_0 = Float64(2.0 / sqrt(pi)) return Float64(t_0 * Float64(log(tanh(Float64(0.25 * Float64(f * pi)))) * t_0)) end
function tmp = code(f) t_0 = 2.0 / sqrt(pi); tmp = t_0 * (log(tanh((0.25 * (f * pi)))) * t_0); end
code[f_] := Block[{t$95$0 = N[(2.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]}, N[(t$95$0 * N[(N[Log[N[Tanh[N[(0.25 * N[(f * Pi), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{2}{\sqrt{\pi}}\\
t\_0 \cdot \left(\log \tanh \left(0.25 \cdot \left(f \cdot \pi\right)\right) \cdot t\_0\right)
\end{array}
\end{array}
Initial program 6.9%
distribute-lft-neg-in6.9%
distribute-neg-frac26.9%
associate-*l/6.9%
*-lft-identity6.9%
Simplified6.9%
clear-num6.9%
log-div6.9%
metadata-eval6.9%
Applied egg-rr6.9%
neg-sub06.9%
exp-prod6.9%
*-commutative6.9%
exp-prod6.9%
exp-prod6.9%
metadata-eval6.9%
distribute-rgt-neg-in6.9%
distribute-lft-neg-out6.9%
+-commutative6.9%
Simplified99.4%
add-sqr-sqrt99.4%
log-prod99.4%
Applied egg-rr99.4%
count-299.4%
associate-*r*99.4%
*-commutative99.4%
Simplified99.4%
Applied egg-rr99.4%
Final simplification99.4%
(FPCore (f) :precision binary64 (/ (* 8.0 (log (sqrt (tanh (* 0.25 (* f PI)))))) PI))
double code(double f) {
return (8.0 * log(sqrt(tanh((0.25 * (f * ((double) M_PI))))))) / ((double) M_PI);
}
public static double code(double f) {
return (8.0 * Math.log(Math.sqrt(Math.tanh((0.25 * (f * Math.PI)))))) / Math.PI;
}
def code(f): return (8.0 * math.log(math.sqrt(math.tanh((0.25 * (f * math.pi)))))) / math.pi
function code(f) return Float64(Float64(8.0 * log(sqrt(tanh(Float64(0.25 * Float64(f * pi)))))) / pi) end
function tmp = code(f) tmp = (8.0 * log(sqrt(tanh((0.25 * (f * pi)))))) / pi; end
code[f_] := N[(N[(8.0 * N[Log[N[Sqrt[N[Tanh[N[(0.25 * N[(f * Pi), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / Pi), $MachinePrecision]
\begin{array}{l}
\\
\frac{8 \cdot \log \left(\sqrt{\tanh \left(0.25 \cdot \left(f \cdot \pi\right)\right)}\right)}{\pi}
\end{array}
Initial program 6.9%
distribute-lft-neg-in6.9%
distribute-neg-frac26.9%
associate-*l/6.9%
*-lft-identity6.9%
Simplified6.9%
clear-num6.9%
log-div6.9%
metadata-eval6.9%
Applied egg-rr6.9%
neg-sub06.9%
exp-prod6.9%
*-commutative6.9%
exp-prod6.9%
exp-prod6.9%
metadata-eval6.9%
distribute-rgt-neg-in6.9%
distribute-lft-neg-out6.9%
+-commutative6.9%
Simplified99.4%
add-sqr-sqrt99.4%
log-prod99.4%
Applied egg-rr99.4%
count-299.4%
associate-*r*99.4%
*-commutative99.4%
Simplified99.4%
Taylor expanded in f around inf 6.9%
associate-*r/6.9%
Simplified99.4%
Final simplification99.4%
(FPCore (f) :precision binary64 (* 4.0 (/ (log (tanh (* 0.25 (* f PI)))) PI)))
double code(double f) {
return 4.0 * (log(tanh((0.25 * (f * ((double) M_PI))))) / ((double) M_PI));
}
public static double code(double f) {
return 4.0 * (Math.log(Math.tanh((0.25 * (f * Math.PI)))) / Math.PI);
}
def code(f): return 4.0 * (math.log(math.tanh((0.25 * (f * math.pi)))) / math.pi)
function code(f) return Float64(4.0 * Float64(log(tanh(Float64(0.25 * Float64(f * pi)))) / pi)) end
function tmp = code(f) tmp = 4.0 * (log(tanh((0.25 * (f * pi)))) / pi); end
code[f_] := N[(4.0 * N[(N[Log[N[Tanh[N[(0.25 * N[(f * Pi), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
4 \cdot \frac{\log \tanh \left(0.25 \cdot \left(f \cdot \pi\right)\right)}{\pi}
\end{array}
Initial program 6.9%
distribute-lft-neg-in6.9%
distribute-neg-frac26.9%
associate-*l/6.9%
*-lft-identity6.9%
Simplified6.9%
clear-num6.9%
log-div6.9%
metadata-eval6.9%
Applied egg-rr6.9%
neg-sub06.9%
exp-prod6.9%
*-commutative6.9%
exp-prod6.9%
exp-prod6.9%
metadata-eval6.9%
distribute-rgt-neg-in6.9%
distribute-lft-neg-out6.9%
+-commutative6.9%
Simplified99.4%
Taylor expanded in f around inf 6.8%
Simplified99.4%
(FPCore (f) :precision binary64 (/ (* (log (/ 4.0 (* f PI))) -4.0) PI))
double code(double f) {
return (log((4.0 / (f * ((double) M_PI)))) * -4.0) / ((double) M_PI);
}
public static double code(double f) {
return (Math.log((4.0 / (f * Math.PI))) * -4.0) / Math.PI;
}
def code(f): return (math.log((4.0 / (f * math.pi))) * -4.0) / math.pi
function code(f) return Float64(Float64(log(Float64(4.0 / Float64(f * pi))) * -4.0) / pi) end
function tmp = code(f) tmp = (log((4.0 / (f * pi))) * -4.0) / pi; end
code[f_] := N[(N[(N[Log[N[(4.0 / N[(f * Pi), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * -4.0), $MachinePrecision] / Pi), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(\frac{4}{f \cdot \pi}\right) \cdot -4}{\pi}
\end{array}
Initial program 6.9%
Simplified99.3%
Taylor expanded in f around 0 94.8%
associate-*r/94.9%
Applied egg-rr94.9%
(FPCore (f) :precision binary64 (* (log (/ 4.0 (* f PI))) (/ -4.0 PI)))
double code(double f) {
return log((4.0 / (f * ((double) M_PI)))) * (-4.0 / ((double) M_PI));
}
public static double code(double f) {
return Math.log((4.0 / (f * Math.PI))) * (-4.0 / Math.PI);
}
def code(f): return math.log((4.0 / (f * math.pi))) * (-4.0 / math.pi)
function code(f) return Float64(log(Float64(4.0 / Float64(f * pi))) * Float64(-4.0 / pi)) end
function tmp = code(f) tmp = log((4.0 / (f * pi))) * (-4.0 / pi); end
code[f_] := N[(N[Log[N[(4.0 / N[(f * Pi), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(-4.0 / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{4}{f \cdot \pi}\right) \cdot \frac{-4}{\pi}
\end{array}
Initial program 6.9%
Simplified99.3%
Taylor expanded in f around 0 94.8%
(FPCore (f) :precision binary64 (* -4.0 (/ (log (/ (* f PI) 4.0)) PI)))
double code(double f) {
return -4.0 * (log(((f * ((double) M_PI)) / 4.0)) / ((double) M_PI));
}
public static double code(double f) {
return -4.0 * (Math.log(((f * Math.PI) / 4.0)) / Math.PI);
}
def code(f): return -4.0 * (math.log(((f * math.pi) / 4.0)) / math.pi)
function code(f) return Float64(-4.0 * Float64(log(Float64(Float64(f * pi) / 4.0)) / pi)) end
function tmp = code(f) tmp = -4.0 * (log(((f * pi) / 4.0)) / pi); end
code[f_] := N[(-4.0 * N[(N[Log[N[(N[(f * Pi), $MachinePrecision] / 4.0), $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-4 \cdot \frac{\log \left(\frac{f \cdot \pi}{4}\right)}{\pi}
\end{array}
Initial program 6.9%
Simplified99.3%
Taylor expanded in f around 0 94.8%
associate-/r*94.8%
Simplified94.8%
associate-/r*94.8%
clear-num94.8%
un-div-inv94.9%
add-sqr-sqrt0.0%
sqrt-unprod1.6%
div-inv1.6%
div-inv1.6%
swap-sqr1.6%
metadata-eval1.6%
metadata-eval1.6%
metadata-eval1.6%
metadata-eval1.6%
swap-sqr1.6%
sqrt-unprod1.6%
add-sqr-sqrt1.6%
*-commutative1.6%
Applied egg-rr1.6%
*-commutative1.6%
Simplified1.6%
frac-2neg1.6%
distribute-frac-neg21.6%
neg-log1.6%
clear-num1.6%
associate-/l*1.6%
*-commutative1.6%
Applied egg-rr1.6%
distribute-neg-frac1.6%
neg-mul-11.6%
times-frac1.6%
metadata-eval1.6%
associate-*r/1.6%
Simplified1.6%
herbie shell --seed 2024145
(FPCore (f)
:name "VandenBroeck and Keller, Equation (20)"
:precision binary64
(- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))) (- (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))))))))