
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t_0}\\
t_2 := e^{-t_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t_1 + t_2}{t_1 - t_2}\right)
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t_0}\\
t_2 := e^{-t_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t_1 + t_2}{t_1 - t_2}\right)
\end{array}
\end{array}
(FPCore (f) :precision binary64 (let* ((t_0 (/ PI (/ 4.0 f)))) (/ (- (log (/ (cosh t_0) (sinh t_0)))) (* PI 0.25))))
double code(double f) {
double t_0 = ((double) M_PI) / (4.0 / f);
return -log((cosh(t_0) / sinh(t_0))) / (((double) M_PI) * 0.25);
}
public static double code(double f) {
double t_0 = Math.PI / (4.0 / f);
return -Math.log((Math.cosh(t_0) / Math.sinh(t_0))) / (Math.PI * 0.25);
}
def code(f): t_0 = math.pi / (4.0 / f) return -math.log((math.cosh(t_0) / math.sinh(t_0))) / (math.pi * 0.25)
function code(f) t_0 = Float64(pi / Float64(4.0 / f)) return Float64(Float64(-log(Float64(cosh(t_0) / sinh(t_0)))) / Float64(pi * 0.25)) end
function tmp = code(f) t_0 = pi / (4.0 / f); tmp = -log((cosh(t_0) / sinh(t_0))) / (pi * 0.25); end
code[f_] := Block[{t$95$0 = N[(Pi / N[(4.0 / f), $MachinePrecision]), $MachinePrecision]}, N[((-N[Log[N[(N[Cosh[t$95$0], $MachinePrecision] / N[Sinh[t$95$0], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]) / N[(Pi * 0.25), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{\frac{4}{f}}\\
\frac{-\log \left(\frac{\cosh t_0}{\sinh t_0}\right)}{\pi \cdot 0.25}
\end{array}
\end{array}
Initial program 6.4%
expm1-log1p-u6.4%
expm1-udef6.4%
Applied egg-rr95.4%
expm1-def95.4%
expm1-log1p96.7%
*-commutative96.7%
times-frac96.7%
metadata-eval96.7%
*-lft-identity96.7%
associate-/l*96.7%
associate-/l*96.7%
Simplified96.7%
Final simplification96.7%
(FPCore (f) :precision binary64 (- (/ (log (/ (cosh (/ PI (/ 4.0 f))) (* PI (* f 0.25)))) (* PI 0.25))))
double code(double f) {
return -(log((cosh((((double) M_PI) / (4.0 / f))) / (((double) M_PI) * (f * 0.25)))) / (((double) M_PI) * 0.25));
}
public static double code(double f) {
return -(Math.log((Math.cosh((Math.PI / (4.0 / f))) / (Math.PI * (f * 0.25)))) / (Math.PI * 0.25));
}
def code(f): return -(math.log((math.cosh((math.pi / (4.0 / f))) / (math.pi * (f * 0.25)))) / (math.pi * 0.25))
function code(f) return Float64(-Float64(log(Float64(cosh(Float64(pi / Float64(4.0 / f))) / Float64(pi * Float64(f * 0.25)))) / Float64(pi * 0.25))) end
function tmp = code(f) tmp = -(log((cosh((pi / (4.0 / f))) / (pi * (f * 0.25)))) / (pi * 0.25)); end
code[f_] := (-N[(N[Log[N[(N[Cosh[N[(Pi / N[(4.0 / f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[(Pi * N[(f * 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[(Pi * 0.25), $MachinePrecision]), $MachinePrecision])
\begin{array}{l}
\\
-\frac{\log \left(\frac{\cosh \left(\frac{\pi}{\frac{4}{f}}\right)}{\pi \cdot \left(f \cdot 0.25\right)}\right)}{\pi \cdot 0.25}
\end{array}
Initial program 6.4%
expm1-log1p-u6.4%
expm1-udef6.4%
Applied egg-rr95.4%
expm1-def95.4%
expm1-log1p96.7%
*-commutative96.7%
times-frac96.7%
metadata-eval96.7%
*-lft-identity96.7%
associate-/l*96.7%
associate-/l*96.7%
Simplified96.7%
add-cbrt-cube32.6%
pow332.6%
associate-/l*32.6%
associate-*l/32.6%
*-commutative32.6%
div-inv32.6%
metadata-eval32.6%
Applied egg-rr32.6%
Taylor expanded in f around 0 95.5%
*-commutative95.5%
*-commutative95.5%
associate-*l*95.5%
Simplified95.5%
Final simplification95.5%
(FPCore (f) :precision binary64 (- (fabs (/ (log (* PI (* f 0.25))) (* PI -0.25)))))
double code(double f) {
return -fabs((log((((double) M_PI) * (f * 0.25))) / (((double) M_PI) * -0.25)));
}
public static double code(double f) {
return -Math.abs((Math.log((Math.PI * (f * 0.25))) / (Math.PI * -0.25)));
}
def code(f): return -math.fabs((math.log((math.pi * (f * 0.25))) / (math.pi * -0.25)))
function code(f) return Float64(-abs(Float64(log(Float64(pi * Float64(f * 0.25))) / Float64(pi * -0.25)))) end
function tmp = code(f) tmp = -abs((log((pi * (f * 0.25))) / (pi * -0.25))); end
code[f_] := (-N[Abs[N[(N[Log[N[(Pi * N[(f * 0.25), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[(Pi * -0.25), $MachinePrecision]), $MachinePrecision]], $MachinePrecision])
\begin{array}{l}
\\
-\left|\frac{\log \left(\pi \cdot \left(f \cdot 0.25\right)\right)}{\pi \cdot -0.25}\right|
\end{array}
Initial program 6.4%
expm1-log1p-u6.4%
expm1-udef6.4%
Applied egg-rr95.4%
expm1-def95.4%
expm1-log1p96.7%
*-commutative96.7%
times-frac96.7%
metadata-eval96.7%
*-lft-identity96.7%
associate-/l*96.7%
associate-/l*96.7%
Simplified96.7%
Taylor expanded in f around 0 95.4%
add-sqr-sqrt95.0%
sqrt-unprod95.5%
pow295.5%
frac-2neg95.5%
neg-log95.5%
clear-num95.5%
div-inv95.5%
metadata-eval95.5%
*-commutative95.5%
associate-*r*95.5%
*-commutative95.5%
distribute-rgt-neg-in95.5%
metadata-eval95.5%
Applied egg-rr95.5%
unpow295.5%
rem-sqrt-square95.5%
Simplified95.5%
Final simplification95.5%
(FPCore (f) :precision binary64 (* (log (/ 4.0 (* PI f))) (/ (- 4.0) PI)))
double code(double f) {
return log((4.0 / (((double) M_PI) * f))) * (-4.0 / ((double) M_PI));
}
public static double code(double f) {
return Math.log((4.0 / (Math.PI * f))) * (-4.0 / Math.PI);
}
def code(f): return math.log((4.0 / (math.pi * f))) * (-4.0 / math.pi)
function code(f) return Float64(log(Float64(4.0 / Float64(pi * f))) * Float64(Float64(-4.0) / pi)) end
function tmp = code(f) tmp = log((4.0 / (pi * f))) * (-4.0 / pi); end
code[f_] := N[(N[Log[N[(4.0 / N[(Pi * f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[((-4.0) / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{4}{\pi \cdot f}\right) \cdot \frac{-4}{\pi}
\end{array}
Initial program 6.4%
expm1-log1p-u6.4%
expm1-udef6.4%
Applied egg-rr95.4%
expm1-def95.4%
expm1-log1p96.7%
*-commutative96.7%
times-frac96.7%
metadata-eval96.7%
*-lft-identity96.7%
associate-/l*96.7%
associate-/l*96.7%
Simplified96.7%
Taylor expanded in f around 0 95.4%
Taylor expanded in f around 0 95.4%
*-commutative95.4%
mul-1-neg95.4%
log-rec95.4%
+-commutative95.4%
log-rec95.4%
unsub-neg95.4%
log-div95.4%
associate-/r*95.4%
metadata-eval95.4%
times-frac95.4%
*-commutative95.4%
*-lft-identity95.4%
associate-*r/95.3%
*-commutative95.3%
*-commutative95.3%
Simplified95.3%
Final simplification95.3%
(FPCore (f) :precision binary64 (/ (- (log (/ 4.0 (* PI f)))) (* PI 0.25)))
double code(double f) {
return -log((4.0 / (((double) M_PI) * f))) / (((double) M_PI) * 0.25);
}
public static double code(double f) {
return -Math.log((4.0 / (Math.PI * f))) / (Math.PI * 0.25);
}
def code(f): return -math.log((4.0 / (math.pi * f))) / (math.pi * 0.25)
function code(f) return Float64(Float64(-log(Float64(4.0 / Float64(pi * f)))) / Float64(pi * 0.25)) end
function tmp = code(f) tmp = -log((4.0 / (pi * f))) / (pi * 0.25); end
code[f_] := N[((-N[Log[N[(4.0 / N[(Pi * f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]) / N[(Pi * 0.25), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-\log \left(\frac{4}{\pi \cdot f}\right)}{\pi \cdot 0.25}
\end{array}
Initial program 6.4%
expm1-log1p-u6.4%
expm1-udef6.4%
Applied egg-rr95.4%
expm1-def95.4%
expm1-log1p96.7%
*-commutative96.7%
times-frac96.7%
metadata-eval96.7%
*-lft-identity96.7%
associate-/l*96.7%
associate-/l*96.7%
Simplified96.7%
Taylor expanded in f around 0 95.4%
Final simplification95.4%
(FPCore (f) :precision binary64 (* (log 0.0) (/ -1.0 (/ PI 4.0))))
double code(double f) {
return log(0.0) * (-1.0 / (((double) M_PI) / 4.0));
}
public static double code(double f) {
return Math.log(0.0) * (-1.0 / (Math.PI / 4.0));
}
def code(f): return math.log(0.0) * (-1.0 / (math.pi / 4.0))
function code(f) return Float64(log(0.0) * Float64(-1.0 / Float64(pi / 4.0))) end
function tmp = code(f) tmp = log(0.0) * (-1.0 / (pi / 4.0)); end
code[f_] := N[(N[Log[0.0], $MachinePrecision] * N[(-1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log 0 \cdot \frac{-1}{\frac{\pi}{4}}
\end{array}
Initial program 6.4%
Taylor expanded in f around 0 95.3%
distribute-rgt-out--95.3%
metadata-eval95.3%
Simplified95.3%
add-log-exp3.4%
associate-*l*3.4%
Applied egg-rr3.4%
Taylor expanded in f around inf 0.7%
Final simplification0.7%
herbie shell --seed 2023252
(FPCore (f)
:name "VandenBroeck and Keller, Equation (20)"
:precision binary64
(- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))) (- (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))))))))