
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t\_0}\\
t_2 := e^{-t\_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t\_1 + t\_2}{t\_1 - t\_2}\right)
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t\_0}\\
t_2 := e^{-t\_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t\_1 + t\_2}{t\_1 - t\_2}\right)
\end{array}
\end{array}
(FPCore (f) :precision binary64 (/ (log (tanh (/ PI (/ 4.0 f)))) (/ PI 4.0)))
double code(double f) {
return log(tanh((((double) M_PI) / (4.0 / f)))) / (((double) M_PI) / 4.0);
}
public static double code(double f) {
return Math.log(Math.tanh((Math.PI / (4.0 / f)))) / (Math.PI / 4.0);
}
def code(f): return math.log(math.tanh((math.pi / (4.0 / f)))) / (math.pi / 4.0)
function code(f) return Float64(log(tanh(Float64(pi / Float64(4.0 / f)))) / Float64(pi / 4.0)) end
function tmp = code(f) tmp = log(tanh((pi / (4.0 / f)))) / (pi / 4.0); end
code[f_] := N[(N[Log[N[Tanh[N[(Pi / N[(4.0 / f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision] / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \tanh \left(\frac{\pi}{\frac{4}{f}}\right)}{\frac{\pi}{4}}
\end{array}
Initial program 8.0%
*-commutativeN/A
un-div-invN/A
distribute-neg-fracN/A
/-lowering-/.f64N/A
Applied egg-rr99.2%
(FPCore (f) :precision binary64 (* (log (tanh (/ PI (/ 4.0 f)))) (/ 4.0 PI)))
double code(double f) {
return log(tanh((((double) M_PI) / (4.0 / f)))) * (4.0 / ((double) M_PI));
}
public static double code(double f) {
return Math.log(Math.tanh((Math.PI / (4.0 / f)))) * (4.0 / Math.PI);
}
def code(f): return math.log(math.tanh((math.pi / (4.0 / f)))) * (4.0 / math.pi)
function code(f) return Float64(log(tanh(Float64(pi / Float64(4.0 / f)))) * Float64(4.0 / pi)) end
function tmp = code(f) tmp = log(tanh((pi / (4.0 / f)))) * (4.0 / pi); end
code[f_] := N[(N[Log[N[Tanh[N[(Pi / N[(4.0 / f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision] * N[(4.0 / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \tanh \left(\frac{\pi}{\frac{4}{f}}\right) \cdot \frac{4}{\pi}
\end{array}
Initial program 8.0%
distribute-rgt-neg-inN/A
*-lowering-*.f64N/A
clear-numN/A
/-lowering-/.f64N/A
PI-lowering-PI.f64N/A
neg-logN/A
clear-numN/A
Applied egg-rr99.2%
Final simplification99.2%
(FPCore (f) :precision binary64 (/ (log (/ (+ (/ 4.0 PI) (* f (* f (* PI 0.08333333333333333)))) f)) (/ PI -4.0)))
double code(double f) {
return log((((4.0 / ((double) M_PI)) + (f * (f * (((double) M_PI) * 0.08333333333333333)))) / f)) / (((double) M_PI) / -4.0);
}
public static double code(double f) {
return Math.log((((4.0 / Math.PI) + (f * (f * (Math.PI * 0.08333333333333333)))) / f)) / (Math.PI / -4.0);
}
def code(f): return math.log((((4.0 / math.pi) + (f * (f * (math.pi * 0.08333333333333333)))) / f)) / (math.pi / -4.0)
function code(f) return Float64(log(Float64(Float64(Float64(4.0 / pi) + Float64(f * Float64(f * Float64(pi * 0.08333333333333333)))) / f)) / Float64(pi / -4.0)) end
function tmp = code(f) tmp = log((((4.0 / pi) + (f * (f * (pi * 0.08333333333333333)))) / f)) / (pi / -4.0); end
code[f_] := N[(N[Log[N[(N[(N[(4.0 / Pi), $MachinePrecision] + N[(f * N[(f * N[(Pi * 0.08333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / f), $MachinePrecision]], $MachinePrecision] / N[(Pi / -4.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(\frac{\frac{4}{\pi} + f \cdot \left(f \cdot \left(\pi \cdot 0.08333333333333333\right)\right)}{f}\right)}{\frac{\pi}{-4}}
\end{array}
Initial program 8.0%
distribute-lft-neg-inN/A
distribute-neg-frac2N/A
associate-*l/N/A
*-lft-identityN/A
/-lowering-/.f64N/A
Simplified8.0%
Taylor expanded in f around 0
Simplified95.3%
Taylor expanded in f around 0
/-lowering-/.f64N/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
distribute-rgt-outN/A
*-lowering-*.f64N/A
PI-lowering-PI.f64N/A
metadata-evalN/A
associate-*r/N/A
metadata-evalN/A
/-lowering-/.f64N/A
PI-lowering-PI.f6495.3%
Simplified95.3%
Final simplification95.3%
(FPCore (f) :precision binary64 (/ (log (/ (/ 4.0 PI) f)) (/ PI -4.0)))
double code(double f) {
return log(((4.0 / ((double) M_PI)) / f)) / (((double) M_PI) / -4.0);
}
public static double code(double f) {
return Math.log(((4.0 / Math.PI) / f)) / (Math.PI / -4.0);
}
def code(f): return math.log(((4.0 / math.pi) / f)) / (math.pi / -4.0)
function code(f) return Float64(log(Float64(Float64(4.0 / pi) / f)) / Float64(pi / -4.0)) end
function tmp = code(f) tmp = log(((4.0 / pi) / f)) / (pi / -4.0); end
code[f_] := N[(N[Log[N[(N[(4.0 / Pi), $MachinePrecision] / f), $MachinePrecision]], $MachinePrecision] / N[(Pi / -4.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(\frac{\frac{4}{\pi}}{f}\right)}{\frac{\pi}{-4}}
\end{array}
Initial program 8.0%
distribute-lft-neg-inN/A
distribute-neg-frac2N/A
associate-*l/N/A
*-lft-identityN/A
/-lowering-/.f64N/A
Simplified8.0%
Taylor expanded in f around 0
/-lowering-/.f64N/A
*-commutativeN/A
distribute-rgt-out--N/A
metadata-evalN/A
associate-*l*N/A
metadata-evalN/A
*-inversesN/A
metadata-evalN/A
times-fracN/A
*-commutativeN/A
metadata-evalN/A
distribute-rgt-out--N/A
associate-*r/N/A
*-lowering-*.f64N/A
PI-lowering-PI.f64N/A
Simplified94.7%
associate-/r*N/A
associate-/l/N/A
associate-/l/N/A
associate-/r*N/A
metadata-evalN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
PI-lowering-PI.f6494.7%
Applied egg-rr94.7%
(FPCore (f) :precision binary64 (* (/ 4.0 PI) (log (* f (* PI 0.25)))))
double code(double f) {
return (4.0 / ((double) M_PI)) * log((f * (((double) M_PI) * 0.25)));
}
public static double code(double f) {
return (4.0 / Math.PI) * Math.log((f * (Math.PI * 0.25)));
}
def code(f): return (4.0 / math.pi) * math.log((f * (math.pi * 0.25)))
function code(f) return Float64(Float64(4.0 / pi) * log(Float64(f * Float64(pi * 0.25)))) end
function tmp = code(f) tmp = (4.0 / pi) * log((f * (pi * 0.25))); end
code[f_] := N[(N[(4.0 / Pi), $MachinePrecision] * N[Log[N[(f * N[(Pi * 0.25), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{4}{\pi} \cdot \log \left(f \cdot \left(\pi \cdot 0.25\right)\right)
\end{array}
Initial program 8.0%
distribute-rgt-neg-inN/A
*-lowering-*.f64N/A
clear-numN/A
/-lowering-/.f64N/A
PI-lowering-PI.f64N/A
neg-logN/A
clear-numN/A
Applied egg-rr99.2%
Taylor expanded in f around 0
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
PI-lowering-PI.f6494.7%
Simplified94.7%
Final simplification94.7%
(FPCore (f) :precision binary64 (* f (* -0.08333333333333333 (* PI f))))
double code(double f) {
return f * (-0.08333333333333333 * (((double) M_PI) * f));
}
public static double code(double f) {
return f * (-0.08333333333333333 * (Math.PI * f));
}
def code(f): return f * (-0.08333333333333333 * (math.pi * f))
function code(f) return Float64(f * Float64(-0.08333333333333333 * Float64(pi * f))) end
function tmp = code(f) tmp = f * (-0.08333333333333333 * (pi * f)); end
code[f_] := N[(f * N[(-0.08333333333333333 * N[(Pi * f), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
f \cdot \left(-0.08333333333333333 \cdot \left(\pi \cdot f\right)\right)
\end{array}
Initial program 8.0%
Taylor expanded in f around 0
Simplified95.2%
Taylor expanded in f around inf
mul-1-negN/A
distribute-rgt-outN/A
associate-*r*N/A
distribute-rgt-neg-inN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
PI-lowering-PI.f64N/A
unpow2N/A
*-lowering-*.f64N/A
metadata-evalN/A
metadata-eval4.2%
Simplified4.2%
*-commutativeN/A
associate-*r*N/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
PI-lowering-PI.f644.2%
Applied egg-rr4.2%
Final simplification4.2%
herbie shell --seed 2024150
(FPCore (f)
:name "VandenBroeck and Keller, Equation (20)"
:precision binary64
(- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))) (- (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))))))))