
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t_0}\\
t_2 := e^{-t_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t_1 + t_2}{t_1 - t_2}\right)
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t_0}\\
t_2 := e^{-t_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t_1 + t_2}{t_1 - t_2}\right)
\end{array}
\end{array}
(FPCore (f)
:precision binary64
(*
(log
(/
(+ (exp (* (/ PI 4.0) f)) (exp (* PI (* f -0.25))))
(fma
(pow f 5.0)
(* (pow PI 5.0) 1.6276041666666666e-5)
(fma
(pow f 3.0)
(* (pow PI 3.0) 0.005208333333333333)
(fma
(pow PI 7.0)
(* 2.422030009920635e-8 (pow f 7.0))
(* PI (* f 0.5)))))))
(/ -1.0 (/ PI 4.0))))
double code(double f) {
return log(((exp(((((double) M_PI) / 4.0) * f)) + exp((((double) M_PI) * (f * -0.25)))) / fma(pow(f, 5.0), (pow(((double) M_PI), 5.0) * 1.6276041666666666e-5), fma(pow(f, 3.0), (pow(((double) M_PI), 3.0) * 0.005208333333333333), fma(pow(((double) M_PI), 7.0), (2.422030009920635e-8 * pow(f, 7.0)), (((double) M_PI) * (f * 0.5))))))) * (-1.0 / (((double) M_PI) / 4.0));
}
function code(f) return Float64(log(Float64(Float64(exp(Float64(Float64(pi / 4.0) * f)) + exp(Float64(pi * Float64(f * -0.25)))) / fma((f ^ 5.0), Float64((pi ^ 5.0) * 1.6276041666666666e-5), fma((f ^ 3.0), Float64((pi ^ 3.0) * 0.005208333333333333), fma((pi ^ 7.0), Float64(2.422030009920635e-8 * (f ^ 7.0)), Float64(pi * Float64(f * 0.5))))))) * Float64(-1.0 / Float64(pi / 4.0))) end
code[f_] := N[(N[Log[N[(N[(N[Exp[N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(Pi * N[(f * -0.25), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(N[Power[f, 5.0], $MachinePrecision] * N[(N[Power[Pi, 5.0], $MachinePrecision] * 1.6276041666666666e-5), $MachinePrecision] + N[(N[Power[f, 3.0], $MachinePrecision] * N[(N[Power[Pi, 3.0], $MachinePrecision] * 0.005208333333333333), $MachinePrecision] + N[(N[Power[Pi, 7.0], $MachinePrecision] * N[(2.422030009920635e-8 * N[Power[f, 7.0], $MachinePrecision]), $MachinePrecision] + N[(Pi * N[(f * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(-1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{e^{\frac{\pi}{4} \cdot f} + e^{\pi \cdot \left(f \cdot -0.25\right)}}{\mathsf{fma}\left({f}^{5}, {\pi}^{5} \cdot 1.6276041666666666 \cdot 10^{-5}, \mathsf{fma}\left({f}^{3}, {\pi}^{3} \cdot 0.005208333333333333, \mathsf{fma}\left({\pi}^{7}, 2.422030009920635 \cdot 10^{-8} \cdot {f}^{7}, \pi \cdot \left(f \cdot 0.5\right)\right)\right)\right)}\right) \cdot \frac{-1}{\frac{\pi}{4}}
\end{array}
Initial program 8.5%
Taylor expanded in f around 0 97.7%
Simplified97.7%
Taylor expanded in f around inf 97.7%
distribute-lft-neg-in97.7%
metadata-eval97.7%
associate-*r*97.7%
*-commutative97.7%
Simplified97.7%
Final simplification97.7%
(FPCore (f)
:precision binary64
(/
(*
(log
(/
(+ (pow (exp 0.25) (* PI f)) (pow (exp -0.25) (* PI f)))
(fma
(pow f 5.0)
(* (pow PI 5.0) 1.6276041666666666e-5)
(fma f (* PI 0.5) (* 0.005208333333333333 (pow (* PI f) 3.0))))))
-4.0)
PI))
double code(double f) {
return (log(((pow(exp(0.25), (((double) M_PI) * f)) + pow(exp(-0.25), (((double) M_PI) * f))) / fma(pow(f, 5.0), (pow(((double) M_PI), 5.0) * 1.6276041666666666e-5), fma(f, (((double) M_PI) * 0.5), (0.005208333333333333 * pow((((double) M_PI) * f), 3.0)))))) * -4.0) / ((double) M_PI);
}
function code(f) return Float64(Float64(log(Float64(Float64((exp(0.25) ^ Float64(pi * f)) + (exp(-0.25) ^ Float64(pi * f))) / fma((f ^ 5.0), Float64((pi ^ 5.0) * 1.6276041666666666e-5), fma(f, Float64(pi * 0.5), Float64(0.005208333333333333 * (Float64(pi * f) ^ 3.0)))))) * -4.0) / pi) end
code[f_] := N[(N[(N[Log[N[(N[(N[Power[N[Exp[0.25], $MachinePrecision], N[(Pi * f), $MachinePrecision]], $MachinePrecision] + N[Power[N[Exp[-0.25], $MachinePrecision], N[(Pi * f), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(N[Power[f, 5.0], $MachinePrecision] * N[(N[Power[Pi, 5.0], $MachinePrecision] * 1.6276041666666666e-5), $MachinePrecision] + N[(f * N[(Pi * 0.5), $MachinePrecision] + N[(0.005208333333333333 * N[Power[N[(Pi * f), $MachinePrecision], 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * -4.0), $MachinePrecision] / Pi), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(\frac{{\left(e^{0.25}\right)}^{\left(\pi \cdot f\right)} + {\left(e^{-0.25}\right)}^{\left(\pi \cdot f\right)}}{\mathsf{fma}\left({f}^{5}, {\pi}^{5} \cdot 1.6276041666666666 \cdot 10^{-5}, \mathsf{fma}\left(f, \pi \cdot 0.5, 0.005208333333333333 \cdot {\left(\pi \cdot f\right)}^{3}\right)\right)}\right) \cdot -4}{\pi}
\end{array}
Initial program 8.5%
distribute-lft-neg-in8.5%
*-commutative8.5%
associate-/r/8.5%
associate-*l/8.5%
metadata-eval8.5%
distribute-neg-frac8.5%
Simplified8.4%
Taylor expanded in f around -inf 8.5%
Taylor expanded in f around 0 97.5%
fma-def97.5%
distribute-rgt-out--97.5%
metadata-eval97.5%
*-commutative97.5%
fma-def97.5%
distribute-rgt-out--97.5%
metadata-eval97.5%
distribute-rgt-out--97.5%
associate-*r*97.5%
cube-prod97.5%
metadata-eval97.5%
Simplified97.5%
associate-*r/97.7%
exp-prod97.7%
exp-prod97.7%
Applied egg-rr97.7%
Final simplification97.7%
(FPCore (f)
:precision binary64
(*
(log
(/
(+ (exp (* 0.25 (* PI f))) (exp (* -0.25 (* PI f))))
(fma
(pow f 5.0)
(* (pow PI 5.0) 1.6276041666666666e-5)
(+ (* 0.005208333333333333 (pow (* PI f) 3.0)) (* f (* PI 0.5))))))
(/ -4.0 PI)))
double code(double f) {
return log(((exp((0.25 * (((double) M_PI) * f))) + exp((-0.25 * (((double) M_PI) * f)))) / fma(pow(f, 5.0), (pow(((double) M_PI), 5.0) * 1.6276041666666666e-5), ((0.005208333333333333 * pow((((double) M_PI) * f), 3.0)) + (f * (((double) M_PI) * 0.5)))))) * (-4.0 / ((double) M_PI));
}
function code(f) return Float64(log(Float64(Float64(exp(Float64(0.25 * Float64(pi * f))) + exp(Float64(-0.25 * Float64(pi * f)))) / fma((f ^ 5.0), Float64((pi ^ 5.0) * 1.6276041666666666e-5), Float64(Float64(0.005208333333333333 * (Float64(pi * f) ^ 3.0)) + Float64(f * Float64(pi * 0.5)))))) * Float64(-4.0 / pi)) end
code[f_] := N[(N[Log[N[(N[(N[Exp[N[(0.25 * N[(Pi * f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(-0.25 * N[(Pi * f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(N[Power[f, 5.0], $MachinePrecision] * N[(N[Power[Pi, 5.0], $MachinePrecision] * 1.6276041666666666e-5), $MachinePrecision] + N[(N[(0.005208333333333333 * N[Power[N[(Pi * f), $MachinePrecision], 3.0], $MachinePrecision]), $MachinePrecision] + N[(f * N[(Pi * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(-4.0 / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{e^{0.25 \cdot \left(\pi \cdot f\right)} + e^{-0.25 \cdot \left(\pi \cdot f\right)}}{\mathsf{fma}\left({f}^{5}, {\pi}^{5} \cdot 1.6276041666666666 \cdot 10^{-5}, 0.005208333333333333 \cdot {\left(\pi \cdot f\right)}^{3} + f \cdot \left(\pi \cdot 0.5\right)\right)}\right) \cdot \frac{-4}{\pi}
\end{array}
Initial program 8.5%
distribute-lft-neg-in8.5%
*-commutative8.5%
associate-/r/8.5%
associate-*l/8.5%
metadata-eval8.5%
distribute-neg-frac8.5%
Simplified8.4%
Taylor expanded in f around -inf 8.5%
Taylor expanded in f around 0 97.5%
fma-def97.5%
distribute-rgt-out--97.5%
metadata-eval97.5%
*-commutative97.5%
fma-def97.5%
distribute-rgt-out--97.5%
metadata-eval97.5%
distribute-rgt-out--97.5%
associate-*r*97.5%
cube-prod97.5%
metadata-eval97.5%
Simplified97.5%
fma-udef97.5%
Applied egg-rr97.5%
Final simplification97.5%
(FPCore (f)
:precision binary64
(*
(/ -4.0 PI)
(log
(/
(+ (exp (* 0.25 (* PI f))) (exp (* -0.25 (* PI f))))
(fma f (* PI 0.5) (* 0.005208333333333333 (pow (* PI f) 3.0)))))))
double code(double f) {
return (-4.0 / ((double) M_PI)) * log(((exp((0.25 * (((double) M_PI) * f))) + exp((-0.25 * (((double) M_PI) * f)))) / fma(f, (((double) M_PI) * 0.5), (0.005208333333333333 * pow((((double) M_PI) * f), 3.0)))));
}
function code(f) return Float64(Float64(-4.0 / pi) * log(Float64(Float64(exp(Float64(0.25 * Float64(pi * f))) + exp(Float64(-0.25 * Float64(pi * f)))) / fma(f, Float64(pi * 0.5), Float64(0.005208333333333333 * (Float64(pi * f) ^ 3.0)))))) end
code[f_] := N[(N[(-4.0 / Pi), $MachinePrecision] * N[Log[N[(N[(N[Exp[N[(0.25 * N[(Pi * f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(-0.25 * N[(Pi * f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(f * N[(Pi * 0.5), $MachinePrecision] + N[(0.005208333333333333 * N[Power[N[(Pi * f), $MachinePrecision], 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-4}{\pi} \cdot \log \left(\frac{e^{0.25 \cdot \left(\pi \cdot f\right)} + e^{-0.25 \cdot \left(\pi \cdot f\right)}}{\mathsf{fma}\left(f, \pi \cdot 0.5, 0.005208333333333333 \cdot {\left(\pi \cdot f\right)}^{3}\right)}\right)
\end{array}
Initial program 8.5%
distribute-lft-neg-in8.5%
*-commutative8.5%
associate-/r/8.5%
associate-*l/8.5%
metadata-eval8.5%
distribute-neg-frac8.5%
Simplified8.4%
Taylor expanded in f around -inf 8.5%
Taylor expanded in f around 0 97.1%
*-commutative97.1%
fma-def97.1%
distribute-rgt-out--97.1%
metadata-eval97.1%
distribute-rgt-out--97.1%
associate-*r*97.1%
cube-prod97.1%
metadata-eval97.1%
Simplified97.1%
Final simplification97.1%
(FPCore (f) :precision binary64 (fma -4.0 (/ (log (/ (/ 4.0 f) PI)) PI) (fma -2.0 (* (/ (* f f) PI) (fma 0.5 (* (pow PI 2.0) 0.08333333333333333) 0.0)) (/ (* f 0.0) PI))))
double code(double f) {
return fma(-4.0, (log(((4.0 / f) / ((double) M_PI))) / ((double) M_PI)), fma(-2.0, (((f * f) / ((double) M_PI)) * fma(0.5, (pow(((double) M_PI), 2.0) * 0.08333333333333333), 0.0)), ((f * 0.0) / ((double) M_PI))));
}
function code(f) return fma(-4.0, Float64(log(Float64(Float64(4.0 / f) / pi)) / pi), fma(-2.0, Float64(Float64(Float64(f * f) / pi) * fma(0.5, Float64((pi ^ 2.0) * 0.08333333333333333), 0.0)), Float64(Float64(f * 0.0) / pi))) end
code[f_] := N[(-4.0 * N[(N[Log[N[(N[(4.0 / f), $MachinePrecision] / Pi), $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision] + N[(-2.0 * N[(N[(N[(f * f), $MachinePrecision] / Pi), $MachinePrecision] * N[(0.5 * N[(N[Power[Pi, 2.0], $MachinePrecision] * 0.08333333333333333), $MachinePrecision] + 0.0), $MachinePrecision]), $MachinePrecision] + N[(N[(f * 0.0), $MachinePrecision] / Pi), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-4, \frac{\log \left(\frac{\frac{4}{f}}{\pi}\right)}{\pi}, \mathsf{fma}\left(-2, \frac{f \cdot f}{\pi} \cdot \mathsf{fma}\left(0.5, {\pi}^{2} \cdot 0.08333333333333333, 0\right), \frac{f \cdot 0}{\pi}\right)\right)
\end{array}
Initial program 8.5%
distribute-lft-neg-in8.5%
*-commutative8.5%
associate-/r/8.5%
associate-*l/8.5%
metadata-eval8.5%
distribute-neg-frac8.5%
Simplified8.4%
Taylor expanded in f around -inf 8.5%
Taylor expanded in f around 0 97.5%
fma-def97.5%
distribute-rgt-out--97.5%
metadata-eval97.5%
*-commutative97.5%
fma-def97.5%
distribute-rgt-out--97.5%
metadata-eval97.5%
distribute-rgt-out--97.5%
associate-*r*97.5%
cube-prod97.5%
metadata-eval97.5%
Simplified97.5%
Taylor expanded in f around 0 97.2%
Simplified97.1%
Final simplification97.1%
(FPCore (f) :precision binary64 (- (fma 4.0 (/ (log (/ 4.0 (* PI f))) PI) (* PI (* f (* f 0.125))))))
double code(double f) {
return -fma(4.0, (log((4.0 / (((double) M_PI) * f))) / ((double) M_PI)), (((double) M_PI) * (f * (f * 0.125))));
}
function code(f) return Float64(-fma(4.0, Float64(log(Float64(4.0 / Float64(pi * f))) / pi), Float64(pi * Float64(f * Float64(f * 0.125))))) end
code[f_] := (-N[(4.0 * N[(N[Log[N[(4.0 / N[(Pi * f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision] + N[(Pi * N[(f * N[(f * 0.125), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision])
\begin{array}{l}
\\
-\mathsf{fma}\left(4, \frac{\log \left(\frac{4}{\pi \cdot f}\right)}{\pi}, \pi \cdot \left(f \cdot \left(f \cdot 0.125\right)\right)\right)
\end{array}
Initial program 8.5%
Taylor expanded in f around 0 96.1%
distribute-rgt-out--96.1%
metadata-eval96.1%
Simplified96.1%
Taylor expanded in f around 0 96.3%
+-commutative96.3%
fma-def96.3%
+-commutative96.3%
mul-1-neg96.3%
unsub-neg96.3%
log-div96.2%
associate-/r*96.2%
*-commutative96.2%
associate-*r*96.2%
*-commutative96.2%
unpow296.2%
associate-*r*96.2%
*-commutative96.2%
Simplified96.2%
Final simplification96.2%
(FPCore (f) :precision binary64 (- (fma 4.0 (/ (log (/ (/ 4.0 f) PI)) PI) (* PI (* (* f f) 0.125)))))
double code(double f) {
return -fma(4.0, (log(((4.0 / f) / ((double) M_PI))) / ((double) M_PI)), (((double) M_PI) * ((f * f) * 0.125)));
}
function code(f) return Float64(-fma(4.0, Float64(log(Float64(Float64(4.0 / f) / pi)) / pi), Float64(pi * Float64(Float64(f * f) * 0.125)))) end
code[f_] := (-N[(4.0 * N[(N[Log[N[(N[(4.0 / f), $MachinePrecision] / Pi), $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision] + N[(Pi * N[(N[(f * f), $MachinePrecision] * 0.125), $MachinePrecision]), $MachinePrecision]), $MachinePrecision])
\begin{array}{l}
\\
-\mathsf{fma}\left(4, \frac{\log \left(\frac{\frac{4}{f}}{\pi}\right)}{\pi}, \pi \cdot \left(\left(f \cdot f\right) \cdot 0.125\right)\right)
\end{array}
Initial program 8.5%
Taylor expanded in f around 0 96.1%
distribute-rgt-out--96.1%
metadata-eval96.1%
Simplified96.1%
Taylor expanded in f around 0 96.1%
associate-*r/96.1%
metadata-eval96.1%
associate-/r*96.1%
associate-*r*96.1%
Simplified96.1%
add-exp-log95.0%
associate-*l/95.0%
*-un-lft-identity95.0%
+-commutative95.0%
*-commutative95.0%
fma-def95.0%
*-commutative95.0%
div-inv95.0%
metadata-eval95.0%
Applied egg-rr95.0%
Taylor expanded in f around 0 96.3%
Simplified96.2%
Final simplification96.2%
(FPCore (f) :precision binary64 (* (log (+ (/ (/ 4.0 f) PI) (* PI (* f 0.125)))) (/ -1.0 (/ PI 4.0))))
double code(double f) {
return log((((4.0 / f) / ((double) M_PI)) + (((double) M_PI) * (f * 0.125)))) * (-1.0 / (((double) M_PI) / 4.0));
}
public static double code(double f) {
return Math.log((((4.0 / f) / Math.PI) + (Math.PI * (f * 0.125)))) * (-1.0 / (Math.PI / 4.0));
}
def code(f): return math.log((((4.0 / f) / math.pi) + (math.pi * (f * 0.125)))) * (-1.0 / (math.pi / 4.0))
function code(f) return Float64(log(Float64(Float64(Float64(4.0 / f) / pi) + Float64(pi * Float64(f * 0.125)))) * Float64(-1.0 / Float64(pi / 4.0))) end
function tmp = code(f) tmp = log((((4.0 / f) / pi) + (pi * (f * 0.125)))) * (-1.0 / (pi / 4.0)); end
code[f_] := N[(N[Log[N[(N[(N[(4.0 / f), $MachinePrecision] / Pi), $MachinePrecision] + N[(Pi * N[(f * 0.125), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(-1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{\frac{4}{f}}{\pi} + \pi \cdot \left(f \cdot 0.125\right)\right) \cdot \frac{-1}{\frac{\pi}{4}}
\end{array}
Initial program 8.5%
Taylor expanded in f around 0 96.1%
distribute-rgt-out--96.1%
metadata-eval96.1%
Simplified96.1%
Taylor expanded in f around 0 96.1%
associate-*r/96.1%
metadata-eval96.1%
associate-/r*96.1%
associate-*r*96.1%
Simplified96.1%
Final simplification96.1%
(FPCore (f) :precision binary64 (* -4.0 (/ (- (log (/ 4.0 PI)) (log f)) PI)))
double code(double f) {
return -4.0 * ((log((4.0 / ((double) M_PI))) - log(f)) / ((double) M_PI));
}
public static double code(double f) {
return -4.0 * ((Math.log((4.0 / Math.PI)) - Math.log(f)) / Math.PI);
}
def code(f): return -4.0 * ((math.log((4.0 / math.pi)) - math.log(f)) / math.pi)
function code(f) return Float64(-4.0 * Float64(Float64(log(Float64(4.0 / pi)) - log(f)) / pi)) end
function tmp = code(f) tmp = -4.0 * ((log((4.0 / pi)) - log(f)) / pi); end
code[f_] := N[(-4.0 * N[(N[(N[Log[N[(4.0 / Pi), $MachinePrecision]], $MachinePrecision] - N[Log[f], $MachinePrecision]), $MachinePrecision] / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-4 \cdot \frac{\log \left(\frac{4}{\pi}\right) - \log f}{\pi}
\end{array}
Initial program 8.5%
distribute-lft-neg-in8.5%
*-commutative8.5%
associate-/r/8.5%
associate-*l/8.5%
metadata-eval8.5%
distribute-neg-frac8.5%
Simplified8.4%
Taylor expanded in f around 0 95.9%
distribute-rgt-out--95.9%
metadata-eval95.9%
Simplified95.9%
Taylor expanded in f around 0 96.1%
+-commutative96.1%
mul-1-neg96.1%
unsub-neg96.1%
Simplified96.1%
Final simplification96.1%
(FPCore (f) :precision binary64 (* -4.0 (/ (log (/ 4.0 (* PI f))) PI)))
double code(double f) {
return -4.0 * (log((4.0 / (((double) M_PI) * f))) / ((double) M_PI));
}
public static double code(double f) {
return -4.0 * (Math.log((4.0 / (Math.PI * f))) / Math.PI);
}
def code(f): return -4.0 * (math.log((4.0 / (math.pi * f))) / math.pi)
function code(f) return Float64(-4.0 * Float64(log(Float64(4.0 / Float64(pi * f))) / pi)) end
function tmp = code(f) tmp = -4.0 * (log((4.0 / (pi * f))) / pi); end
code[f_] := N[(-4.0 * N[(N[Log[N[(4.0 / N[(Pi * f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-4 \cdot \frac{\log \left(\frac{4}{\pi \cdot f}\right)}{\pi}
\end{array}
Initial program 8.5%
distribute-lft-neg-in8.5%
*-commutative8.5%
associate-/r/8.5%
associate-*l/8.5%
metadata-eval8.5%
distribute-neg-frac8.5%
Simplified8.4%
Taylor expanded in f around 0 95.9%
distribute-rgt-out--95.9%
metadata-eval95.9%
Simplified95.9%
Taylor expanded in f around 0 96.1%
+-commutative96.1%
mul-1-neg96.1%
unsub-neg96.1%
log-div96.0%
associate-/r*96.0%
*-commutative96.0%
Simplified96.0%
Final simplification96.0%
(FPCore (f) :precision binary64 (/ (* f 0.0) PI))
double code(double f) {
return (f * 0.0) / ((double) M_PI);
}
public static double code(double f) {
return (f * 0.0) / Math.PI;
}
def code(f): return (f * 0.0) / math.pi
function code(f) return Float64(Float64(f * 0.0) / pi) end
function tmp = code(f) tmp = (f * 0.0) / pi; end
code[f_] := N[(N[(f * 0.0), $MachinePrecision] / Pi), $MachinePrecision]
\begin{array}{l}
\\
\frac{f \cdot 0}{\pi}
\end{array}
Initial program 8.5%
Applied egg-rr3.6%
Taylor expanded in f around 0 3.1%
associate-*r/3.1%
distribute-rgt-out3.1%
metadata-eval3.1%
mul0-rgt3.1%
metadata-eval3.1%
associate-*r*3.1%
metadata-eval3.1%
metadata-eval3.1%
metadata-eval3.1%
metadata-eval3.1%
associate-*r*3.1%
*-commutative3.1%
associate-*r*3.1%
metadata-eval3.1%
mul0-rgt3.1%
metadata-eval3.1%
distribute-rgt-out3.1%
associate-*r*3.1%
Simplified3.1%
Final simplification3.1%
herbie shell --seed 2023250
(FPCore (f)
:name "VandenBroeck and Keller, Equation (20)"
:precision binary64
(- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))) (- (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))))))))