
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t\_0}\\
t_2 := e^{-t\_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t\_1 + t\_2}{t\_1 - t\_2}\right)
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t\_0}\\
t_2 := e^{-t\_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t\_1 + t\_2}{t\_1 - t\_2}\right)
\end{array}
\end{array}
(FPCore (f)
:precision binary64
(*
(log
(/
(+ (exp (* f (* 0.25 PI))) (exp (* f (* PI -0.25))))
(*
f
(fma
(* f f)
(fma
(* f f)
(* (pow PI 5.0) 1.6276041666666666e-5)
(* PI (* (* PI PI) 0.005208333333333333)))
(* PI 0.5)))))
(- (/ 4.0 PI))))
double code(double f) {
return log(((exp((f * (0.25 * ((double) M_PI)))) + exp((f * (((double) M_PI) * -0.25)))) / (f * fma((f * f), fma((f * f), (pow(((double) M_PI), 5.0) * 1.6276041666666666e-5), (((double) M_PI) * ((((double) M_PI) * ((double) M_PI)) * 0.005208333333333333))), (((double) M_PI) * 0.5))))) * -(4.0 / ((double) M_PI));
}
function code(f) return Float64(log(Float64(Float64(exp(Float64(f * Float64(0.25 * pi))) + exp(Float64(f * Float64(pi * -0.25)))) / Float64(f * fma(Float64(f * f), fma(Float64(f * f), Float64((pi ^ 5.0) * 1.6276041666666666e-5), Float64(pi * Float64(Float64(pi * pi) * 0.005208333333333333))), Float64(pi * 0.5))))) * Float64(-Float64(4.0 / pi))) end
code[f_] := N[(N[Log[N[(N[(N[Exp[N[(f * N[(0.25 * Pi), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(f * N[(Pi * -0.25), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(f * N[(N[(f * f), $MachinePrecision] * N[(N[(f * f), $MachinePrecision] * N[(N[Power[Pi, 5.0], $MachinePrecision] * 1.6276041666666666e-5), $MachinePrecision] + N[(Pi * N[(N[(Pi * Pi), $MachinePrecision] * 0.005208333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(Pi * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * (-N[(4.0 / Pi), $MachinePrecision])), $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{e^{f \cdot \left(0.25 \cdot \pi\right)} + e^{f \cdot \left(\pi \cdot -0.25\right)}}{f \cdot \mathsf{fma}\left(f \cdot f, \mathsf{fma}\left(f \cdot f, {\pi}^{5} \cdot 1.6276041666666666 \cdot 10^{-5}, \pi \cdot \left(\left(\pi \cdot \pi\right) \cdot 0.005208333333333333\right)\right), \pi \cdot 0.5\right)}\right) \cdot \left(-\frac{4}{\pi}\right)
\end{array}
Initial program 7.5%
Taylor expanded in f around inf
*-commutativeN/A
associate-*l/N/A
associate-/l*N/A
metadata-evalN/A
metadata-evalN/A
associate-/r*N/A
*-commutativeN/A
distribute-rgt-out--N/A
metadata-evalN/A
associate-*r/N/A
Simplified7.5%
Taylor expanded in f around 0
lower-*.f64N/A
+-commutativeN/A
associate--l+N/A
lower-fma.f64N/A
Simplified97.2%
Final simplification97.2%
(FPCore (f)
:precision binary64
(-
(fma
4.0
(/ (- (- (log f)) (log (* 0.25 PI))) PI)
(*
(* f f)
(*
PI
(fma
(* PI PI)
(* (* f f) -0.0008680555555555555)
0.08333333333333333))))))
double code(double f) {
return -fma(4.0, ((-log(f) - log((0.25 * ((double) M_PI)))) / ((double) M_PI)), ((f * f) * (((double) M_PI) * fma((((double) M_PI) * ((double) M_PI)), ((f * f) * -0.0008680555555555555), 0.08333333333333333))));
}
function code(f) return Float64(-fma(4.0, Float64(Float64(Float64(-log(f)) - log(Float64(0.25 * pi))) / pi), Float64(Float64(f * f) * Float64(pi * fma(Float64(pi * pi), Float64(Float64(f * f) * -0.0008680555555555555), 0.08333333333333333))))) end
code[f_] := (-N[(4.0 * N[(N[((-N[Log[f], $MachinePrecision]) - N[Log[N[(0.25 * Pi), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / Pi), $MachinePrecision] + N[(N[(f * f), $MachinePrecision] * N[(Pi * N[(N[(Pi * Pi), $MachinePrecision] * N[(N[(f * f), $MachinePrecision] * -0.0008680555555555555), $MachinePrecision] + 0.08333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision])
\begin{array}{l}
\\
-\mathsf{fma}\left(4, \frac{\left(-\log f\right) - \log \left(0.25 \cdot \pi\right)}{\pi}, \left(f \cdot f\right) \cdot \left(\pi \cdot \mathsf{fma}\left(\pi \cdot \pi, \left(f \cdot f\right) \cdot -0.0008680555555555555, 0.08333333333333333\right)\right)\right)
\end{array}
Initial program 7.5%
Taylor expanded in f around 0
Simplified97.0%
Taylor expanded in f around 0
lower-fma.f64N/A
lower-/.f64N/A
mul-1-negN/A
unsub-negN/A
lower--.f64N/A
lower-log.f64N/A
lower-/.f64N/A
lower-PI.f64N/A
lower-log.f64N/A
lower-PI.f64N/A
lower-*.f64N/A
Simplified97.1%
Taylor expanded in f around 0
lower-/.f64N/A
Simplified97.1%
Taylor expanded in f around 0
associate-*r*N/A
unpow3N/A
unpow2N/A
associate-*r*N/A
distribute-rgt-outN/A
lower-*.f64N/A
lower-PI.f64N/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
lower-PI.f64N/A
lower-PI.f64N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
lower-*.f6497.1
Simplified97.1%
Final simplification97.1%
(FPCore (f) :precision binary64 (- (fma (+ (log (* 0.25 PI)) (log f)) (/ -4.0 PI) (* f (* f (* PI 0.08333333333333333))))))
double code(double f) {
return -fma((log((0.25 * ((double) M_PI))) + log(f)), (-4.0 / ((double) M_PI)), (f * (f * (((double) M_PI) * 0.08333333333333333))));
}
function code(f) return Float64(-fma(Float64(log(Float64(0.25 * pi)) + log(f)), Float64(-4.0 / pi), Float64(f * Float64(f * Float64(pi * 0.08333333333333333))))) end
code[f_] := (-N[(N[(N[Log[N[(0.25 * Pi), $MachinePrecision]], $MachinePrecision] + N[Log[f], $MachinePrecision]), $MachinePrecision] * N[(-4.0 / Pi), $MachinePrecision] + N[(f * N[(f * N[(Pi * 0.08333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision])
\begin{array}{l}
\\
-\mathsf{fma}\left(\log \left(0.25 \cdot \pi\right) + \log f, \frac{-4}{\pi}, f \cdot \left(f \cdot \left(\pi \cdot 0.08333333333333333\right)\right)\right)
\end{array}
Initial program 7.5%
Taylor expanded in f around 0
Simplified97.0%
Taylor expanded in f around 0
*-commutativeN/A
associate-*l/N/A
associate-/l*N/A
lower-fma.f64N/A
mul-1-negN/A
unsub-negN/A
lower--.f64N/A
lower-log.f64N/A
lower-/.f64N/A
lower-PI.f64N/A
lower-log.f64N/A
lower-/.f64N/A
lower-PI.f64N/A
*-commutativeN/A
Simplified97.0%
Taylor expanded in f around inf
Simplified47.6%
Taylor expanded in f around 0
associate-*r/N/A
*-commutativeN/A
remove-double-negN/A
mul-1-negN/A
distribute-rgt-neg-inN/A
log-recN/A
+-commutativeN/A
associate-/l*N/A
*-commutativeN/A
associate-*r*N/A
lower-fma.f64N/A
Simplified97.0%
Final simplification97.0%
(FPCore (f) :precision binary64 (* (log (/ (fma f (* PI (* f 0.08333333333333333)) (/ 4.0 PI)) f)) (/ -1.0 (/ PI 4.0))))
double code(double f) {
return log((fma(f, (((double) M_PI) * (f * 0.08333333333333333)), (4.0 / ((double) M_PI))) / f)) * (-1.0 / (((double) M_PI) / 4.0));
}
function code(f) return Float64(log(Float64(fma(f, Float64(pi * Float64(f * 0.08333333333333333)), Float64(4.0 / pi)) / f)) * Float64(-1.0 / Float64(pi / 4.0))) end
code[f_] := N[(N[Log[N[(N[(f * N[(Pi * N[(f * 0.08333333333333333), $MachinePrecision]), $MachinePrecision] + N[(4.0 / Pi), $MachinePrecision]), $MachinePrecision] / f), $MachinePrecision]], $MachinePrecision] * N[(-1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{\mathsf{fma}\left(f, \pi \cdot \left(f \cdot 0.08333333333333333\right), \frac{4}{\pi}\right)}{f}\right) \cdot \frac{-1}{\frac{\pi}{4}}
\end{array}
Initial program 7.5%
Taylor expanded in f around 0
Simplified97.0%
Taylor expanded in f around 0
*-commutativeN/A
distribute-rgt-outN/A
associate-*l*N/A
lower-*.f64N/A
lower-PI.f64N/A
lower-*.f64N/A
metadata-eval97.0
Simplified97.0%
Final simplification97.0%
(FPCore (f) :precision binary64 (* (log (/ 4.0 (* f PI))) (- (/ 4.0 PI))))
double code(double f) {
return log((4.0 / (f * ((double) M_PI)))) * -(4.0 / ((double) M_PI));
}
public static double code(double f) {
return Math.log((4.0 / (f * Math.PI))) * -(4.0 / Math.PI);
}
def code(f): return math.log((4.0 / (f * math.pi))) * -(4.0 / math.pi)
function code(f) return Float64(log(Float64(4.0 / Float64(f * pi))) * Float64(-Float64(4.0 / pi))) end
function tmp = code(f) tmp = log((4.0 / (f * pi))) * -(4.0 / pi); end
code[f_] := N[(N[Log[N[(4.0 / N[(f * Pi), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * (-N[(4.0 / Pi), $MachinePrecision])), $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{4}{f \cdot \pi}\right) \cdot \left(-\frac{4}{\pi}\right)
\end{array}
Initial program 7.5%
Taylor expanded in f around inf
*-commutativeN/A
associate-*l/N/A
associate-/l*N/A
metadata-evalN/A
metadata-evalN/A
associate-/r*N/A
*-commutativeN/A
distribute-rgt-out--N/A
metadata-evalN/A
associate-*r/N/A
Simplified7.5%
Taylor expanded in f around 0
associate-/l/N/A
distribute-rgt-out--N/A
*-commutativeN/A
associate-/r*N/A
metadata-evalN/A
metadata-evalN/A
associate-/l/N/A
lower-/.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-PI.f6496.7
Simplified96.7%
Final simplification96.7%
(FPCore (f) :precision binary64 (* PI (* f (* f -0.08333333333333333))))
double code(double f) {
return ((double) M_PI) * (f * (f * -0.08333333333333333));
}
public static double code(double f) {
return Math.PI * (f * (f * -0.08333333333333333));
}
def code(f): return math.pi * (f * (f * -0.08333333333333333))
function code(f) return Float64(pi * Float64(f * Float64(f * -0.08333333333333333))) end
function tmp = code(f) tmp = pi * (f * (f * -0.08333333333333333)); end
code[f_] := N[(Pi * N[(f * N[(f * -0.08333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\pi \cdot \left(f \cdot \left(f \cdot -0.08333333333333333\right)\right)
\end{array}
Initial program 7.5%
Taylor expanded in f around 0
Simplified97.0%
Taylor expanded in f around 0
*-commutativeN/A
associate-*l/N/A
associate-/l*N/A
lower-fma.f64N/A
mul-1-negN/A
unsub-negN/A
lower--.f64N/A
lower-log.f64N/A
lower-/.f64N/A
lower-PI.f64N/A
lower-log.f64N/A
lower-/.f64N/A
lower-PI.f64N/A
*-commutativeN/A
Simplified97.0%
Taylor expanded in f around inf
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
lower-PI.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f644.3
Simplified4.3%
Taylor expanded in f around 0
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
lower-PI.f64N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f644.3
Simplified4.3%
herbie shell --seed 2024215
(FPCore (f)
:name "VandenBroeck and Keller, Equation (20)"
:precision binary64
(- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))) (- (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))))))))