
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t_0}\\
t_2 := e^{-t_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t_1 + t_2}{t_1 - t_2}\right)
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 16 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t_0}\\
t_2 := e^{-t_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t_1 + t_2}{t_1 - t_2}\right)
\end{array}
\end{array}
(FPCore (f)
:precision binary64
(*
(log
(/
(+ (exp (* (/ PI 4.0) f)) (exp (* (/ PI 4.0) (- f))))
(fma
f
(* PI 0.5)
(fma
(pow f 5.0)
(* (pow PI 5.0) 1.6276041666666666e-5)
(fma
(pow f 3.0)
(* (pow PI 3.0) 0.005208333333333333)
(* (pow f 7.0) (* (pow PI 7.0) 2.422030009920635e-8)))))))
(/ -1.0 (/ PI 4.0))))
double code(double f) {
return log(((exp(((((double) M_PI) / 4.0) * f)) + exp(((((double) M_PI) / 4.0) * -f))) / fma(f, (((double) M_PI) * 0.5), fma(pow(f, 5.0), (pow(((double) M_PI), 5.0) * 1.6276041666666666e-5), fma(pow(f, 3.0), (pow(((double) M_PI), 3.0) * 0.005208333333333333), (pow(f, 7.0) * (pow(((double) M_PI), 7.0) * 2.422030009920635e-8))))))) * (-1.0 / (((double) M_PI) / 4.0));
}
function code(f) return Float64(log(Float64(Float64(exp(Float64(Float64(pi / 4.0) * f)) + exp(Float64(Float64(pi / 4.0) * Float64(-f)))) / fma(f, Float64(pi * 0.5), fma((f ^ 5.0), Float64((pi ^ 5.0) * 1.6276041666666666e-5), fma((f ^ 3.0), Float64((pi ^ 3.0) * 0.005208333333333333), Float64((f ^ 7.0) * Float64((pi ^ 7.0) * 2.422030009920635e-8))))))) * Float64(-1.0 / Float64(pi / 4.0))) end
code[f_] := N[(N[Log[N[(N[(N[Exp[N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(N[(Pi / 4.0), $MachinePrecision] * (-f)), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(f * N[(Pi * 0.5), $MachinePrecision] + N[(N[Power[f, 5.0], $MachinePrecision] * N[(N[Power[Pi, 5.0], $MachinePrecision] * 1.6276041666666666e-5), $MachinePrecision] + N[(N[Power[f, 3.0], $MachinePrecision] * N[(N[Power[Pi, 3.0], $MachinePrecision] * 0.005208333333333333), $MachinePrecision] + N[(N[Power[f, 7.0], $MachinePrecision] * N[(N[Power[Pi, 7.0], $MachinePrecision] * 2.422030009920635e-8), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(-1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{e^{\frac{\pi}{4} \cdot f} + e^{\frac{\pi}{4} \cdot \left(-f\right)}}{\mathsf{fma}\left(f, \pi \cdot 0.5, \mathsf{fma}\left({f}^{5}, {\pi}^{5} \cdot 1.6276041666666666 \cdot 10^{-5}, \mathsf{fma}\left({f}^{3}, {\pi}^{3} \cdot 0.005208333333333333, {f}^{7} \cdot \left({\pi}^{7} \cdot 2.422030009920635 \cdot 10^{-8}\right)\right)\right)\right)}\right) \cdot \frac{-1}{\frac{\pi}{4}}
\end{array}
Initial program 7.7%
Taylor expanded in f around 0 96.9%
fma-def96.9%
distribute-rgt-out--96.9%
metadata-eval96.9%
associate-+r+96.9%
+-commutative96.9%
Simplified96.9%
Final simplification96.9%
(FPCore (f)
:precision binary64
(*
(log
(/
(* 2.0 (cosh (* f (* PI 0.25))))
(fma
f
(* PI 0.5)
(fma
(pow f 5.0)
(* (pow PI 5.0) 1.6276041666666666e-5)
(fma
(pow f 3.0)
(* (pow PI 3.0) 0.005208333333333333)
(* 2.422030009920635e-8 (pow (* PI f) 7.0)))))))
(/ -1.0 (/ PI 4.0))))
double code(double f) {
return log(((2.0 * cosh((f * (((double) M_PI) * 0.25)))) / fma(f, (((double) M_PI) * 0.5), fma(pow(f, 5.0), (pow(((double) M_PI), 5.0) * 1.6276041666666666e-5), fma(pow(f, 3.0), (pow(((double) M_PI), 3.0) * 0.005208333333333333), (2.422030009920635e-8 * pow((((double) M_PI) * f), 7.0))))))) * (-1.0 / (((double) M_PI) / 4.0));
}
function code(f) return Float64(log(Float64(Float64(2.0 * cosh(Float64(f * Float64(pi * 0.25)))) / fma(f, Float64(pi * 0.5), fma((f ^ 5.0), Float64((pi ^ 5.0) * 1.6276041666666666e-5), fma((f ^ 3.0), Float64((pi ^ 3.0) * 0.005208333333333333), Float64(2.422030009920635e-8 * (Float64(pi * f) ^ 7.0))))))) * Float64(-1.0 / Float64(pi / 4.0))) end
code[f_] := N[(N[Log[N[(N[(2.0 * N[Cosh[N[(f * N[(Pi * 0.25), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(f * N[(Pi * 0.5), $MachinePrecision] + N[(N[Power[f, 5.0], $MachinePrecision] * N[(N[Power[Pi, 5.0], $MachinePrecision] * 1.6276041666666666e-5), $MachinePrecision] + N[(N[Power[f, 3.0], $MachinePrecision] * N[(N[Power[Pi, 3.0], $MachinePrecision] * 0.005208333333333333), $MachinePrecision] + N[(2.422030009920635e-8 * N[Power[N[(Pi * f), $MachinePrecision], 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(-1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{2 \cdot \cosh \left(f \cdot \left(\pi \cdot 0.25\right)\right)}{\mathsf{fma}\left(f, \pi \cdot 0.5, \mathsf{fma}\left({f}^{5}, {\pi}^{5} \cdot 1.6276041666666666 \cdot 10^{-5}, \mathsf{fma}\left({f}^{3}, {\pi}^{3} \cdot 0.005208333333333333, 2.422030009920635 \cdot 10^{-8} \cdot {\left(\pi \cdot f\right)}^{7}\right)\right)\right)}\right) \cdot \frac{-1}{\frac{\pi}{4}}
\end{array}
Initial program 7.7%
Taylor expanded in f around 0 96.9%
fma-def96.9%
distribute-rgt-out--96.9%
metadata-eval96.9%
associate-+r+96.9%
+-commutative96.9%
Simplified96.9%
div-inv96.9%
log-prod96.9%
Applied egg-rr96.9%
log-rec96.9%
sub-neg96.9%
log-div96.9%
*-commutative96.9%
Simplified96.9%
Final simplification96.9%
(FPCore (f)
:precision binary64
(*
(log
(/
(+ (exp (* -0.25 (* PI f))) (exp (* 0.25 (* PI f))))
(fma
f
(* PI 0.5)
(fma
(pow f 3.0)
(* (pow PI 3.0) 0.005208333333333333)
(* (pow f 5.0) (* (pow PI 5.0) 1.6276041666666666e-5))))))
(/ -4.0 PI)))
double code(double f) {
return log(((exp((-0.25 * (((double) M_PI) * f))) + exp((0.25 * (((double) M_PI) * f)))) / fma(f, (((double) M_PI) * 0.5), fma(pow(f, 3.0), (pow(((double) M_PI), 3.0) * 0.005208333333333333), (pow(f, 5.0) * (pow(((double) M_PI), 5.0) * 1.6276041666666666e-5)))))) * (-4.0 / ((double) M_PI));
}
function code(f) return Float64(log(Float64(Float64(exp(Float64(-0.25 * Float64(pi * f))) + exp(Float64(0.25 * Float64(pi * f)))) / fma(f, Float64(pi * 0.5), fma((f ^ 3.0), Float64((pi ^ 3.0) * 0.005208333333333333), Float64((f ^ 5.0) * Float64((pi ^ 5.0) * 1.6276041666666666e-5)))))) * Float64(-4.0 / pi)) end
code[f_] := N[(N[Log[N[(N[(N[Exp[N[(-0.25 * N[(Pi * f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(0.25 * N[(Pi * f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(f * N[(Pi * 0.5), $MachinePrecision] + N[(N[Power[f, 3.0], $MachinePrecision] * N[(N[Power[Pi, 3.0], $MachinePrecision] * 0.005208333333333333), $MachinePrecision] + N[(N[Power[f, 5.0], $MachinePrecision] * N[(N[Power[Pi, 5.0], $MachinePrecision] * 1.6276041666666666e-5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(-4.0 / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{e^{-0.25 \cdot \left(\pi \cdot f\right)} + e^{0.25 \cdot \left(\pi \cdot f\right)}}{\mathsf{fma}\left(f, \pi \cdot 0.5, \mathsf{fma}\left({f}^{3}, {\pi}^{3} \cdot 0.005208333333333333, {f}^{5} \cdot \left({\pi}^{5} \cdot 1.6276041666666666 \cdot 10^{-5}\right)\right)\right)}\right) \cdot \frac{-4}{\pi}
\end{array}
Initial program 7.7%
distribute-lft-neg-in7.7%
*-commutative7.7%
Simplified7.6%
Taylor expanded in f around inf 7.7%
Taylor expanded in f around 0 96.8%
fma-def96.8%
distribute-rgt-out--96.8%
metadata-eval96.8%
fma-def96.8%
distribute-rgt-out--96.8%
metadata-eval96.8%
distribute-rgt-out--96.8%
metadata-eval96.8%
Simplified96.8%
Final simplification96.8%
(FPCore (f) :precision binary64 (- (* 0.08333333333333333 (* (pow f 2.0) (- PI))) (* 4.0 (+ (log (pow (/ 1.0 f) (/ 1.0 PI))) (/ (log (/ 4.0 PI)) PI)))))
double code(double f) {
return (0.08333333333333333 * (pow(f, 2.0) * -((double) M_PI))) - (4.0 * (log(pow((1.0 / f), (1.0 / ((double) M_PI)))) + (log((4.0 / ((double) M_PI))) / ((double) M_PI))));
}
public static double code(double f) {
return (0.08333333333333333 * (Math.pow(f, 2.0) * -Math.PI)) - (4.0 * (Math.log(Math.pow((1.0 / f), (1.0 / Math.PI))) + (Math.log((4.0 / Math.PI)) / Math.PI)));
}
def code(f): return (0.08333333333333333 * (math.pow(f, 2.0) * -math.pi)) - (4.0 * (math.log(math.pow((1.0 / f), (1.0 / math.pi))) + (math.log((4.0 / math.pi)) / math.pi)))
function code(f) return Float64(Float64(0.08333333333333333 * Float64((f ^ 2.0) * Float64(-pi))) - Float64(4.0 * Float64(log((Float64(1.0 / f) ^ Float64(1.0 / pi))) + Float64(log(Float64(4.0 / pi)) / pi)))) end
function tmp = code(f) tmp = (0.08333333333333333 * ((f ^ 2.0) * -pi)) - (4.0 * (log(((1.0 / f) ^ (1.0 / pi))) + (log((4.0 / pi)) / pi))); end
code[f_] := N[(N[(0.08333333333333333 * N[(N[Power[f, 2.0], $MachinePrecision] * (-Pi)), $MachinePrecision]), $MachinePrecision] - N[(4.0 * N[(N[Log[N[Power[N[(1.0 / f), $MachinePrecision], N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision]], $MachinePrecision] + N[(N[Log[N[(4.0 / Pi), $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.08333333333333333 \cdot \left({f}^{2} \cdot \left(-\pi\right)\right) - 4 \cdot \left(\log \left({\left(\frac{1}{f}\right)}^{\left(\frac{1}{\pi}\right)}\right) + \frac{\log \left(\frac{4}{\pi}\right)}{\pi}\right)
\end{array}
Initial program 7.7%
Taylor expanded in f around 0 96.9%
fma-def96.9%
distribute-rgt-out--96.9%
metadata-eval96.9%
associate-+r+96.9%
+-commutative96.9%
Simplified96.9%
Taylor expanded in f around 0 96.5%
Simplified96.4%
Taylor expanded in f around inf 96.5%
add-log-exp96.5%
div-inv96.3%
exp-to-pow96.6%
Applied egg-rr96.6%
Final simplification96.6%
(FPCore (f) :precision binary64 (- (* 0.08333333333333333 (* (pow f 2.0) (- PI))) (* 4.0 (/ (log (/ 4.0 (* PI f))) PI))))
double code(double f) {
return (0.08333333333333333 * (pow(f, 2.0) * -((double) M_PI))) - (4.0 * (log((4.0 / (((double) M_PI) * f))) / ((double) M_PI)));
}
public static double code(double f) {
return (0.08333333333333333 * (Math.pow(f, 2.0) * -Math.PI)) - (4.0 * (Math.log((4.0 / (Math.PI * f))) / Math.PI));
}
def code(f): return (0.08333333333333333 * (math.pow(f, 2.0) * -math.pi)) - (4.0 * (math.log((4.0 / (math.pi * f))) / math.pi))
function code(f) return Float64(Float64(0.08333333333333333 * Float64((f ^ 2.0) * Float64(-pi))) - Float64(4.0 * Float64(log(Float64(4.0 / Float64(pi * f))) / pi))) end
function tmp = code(f) tmp = (0.08333333333333333 * ((f ^ 2.0) * -pi)) - (4.0 * (log((4.0 / (pi * f))) / pi)); end
code[f_] := N[(N[(0.08333333333333333 * N[(N[Power[f, 2.0], $MachinePrecision] * (-Pi)), $MachinePrecision]), $MachinePrecision] - N[(4.0 * N[(N[Log[N[(4.0 / N[(Pi * f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.08333333333333333 \cdot \left({f}^{2} \cdot \left(-\pi\right)\right) - 4 \cdot \frac{\log \left(\frac{4}{\pi \cdot f}\right)}{\pi}
\end{array}
Initial program 7.7%
Taylor expanded in f around 0 96.9%
fma-def96.9%
distribute-rgt-out--96.9%
metadata-eval96.9%
associate-+r+96.9%
+-commutative96.9%
Simplified96.9%
Taylor expanded in f around 0 96.5%
Simplified96.4%
Taylor expanded in f around inf 96.5%
Taylor expanded in f around 0 96.5%
mul-1-neg96.5%
distribute-frac-neg96.5%
+-commutative96.5%
distribute-frac-neg96.5%
unsub-neg96.5%
div-sub96.5%
log-div96.6%
associate-/r*96.6%
Simplified96.6%
Final simplification96.6%
(FPCore (f) :precision binary64 (* -4.0 (/ (log (/ 4.0 (* PI f))) PI)))
double code(double f) {
return -4.0 * (log((4.0 / (((double) M_PI) * f))) / ((double) M_PI));
}
public static double code(double f) {
return -4.0 * (Math.log((4.0 / (Math.PI * f))) / Math.PI);
}
def code(f): return -4.0 * (math.log((4.0 / (math.pi * f))) / math.pi)
function code(f) return Float64(-4.0 * Float64(log(Float64(4.0 / Float64(pi * f))) / pi)) end
function tmp = code(f) tmp = -4.0 * (log((4.0 / (pi * f))) / pi); end
code[f_] := N[(-4.0 * N[(N[Log[N[(4.0 / N[(Pi * f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-4 \cdot \frac{\log \left(\frac{4}{\pi \cdot f}\right)}{\pi}
\end{array}
Initial program 7.7%
distribute-lft-neg-in7.7%
*-commutative7.7%
Simplified7.6%
Taylor expanded in f around 0 95.9%
associate-*r/95.9%
associate-/l*95.8%
associate-/r/95.8%
mul-1-neg95.8%
unsub-neg95.8%
distribute-rgt-out--95.8%
*-commutative95.8%
associate-/r*95.8%
metadata-eval95.8%
metadata-eval95.8%
Simplified95.8%
Taylor expanded in f around 0 95.9%
log-div95.9%
associate-*r/95.9%
log-div95.9%
sub-neg95.9%
distribute-lft-in95.9%
remove-double-neg95.9%
mul-1-neg95.9%
log-rec95.9%
distribute-lft-in95.9%
sub-neg95.9%
associate-*r/95.9%
Simplified95.9%
Final simplification95.9%
(FPCore (f) :precision binary64 (/ (* 4.0 (- (log 0.125))) -16.0))
double code(double f) {
return (4.0 * -log(0.125)) / -16.0;
}
real(8) function code(f)
real(8), intent (in) :: f
code = (4.0d0 * -log(0.125d0)) / (-16.0d0)
end function
public static double code(double f) {
return (4.0 * -Math.log(0.125)) / -16.0;
}
def code(f): return (4.0 * -math.log(0.125)) / -16.0
function code(f) return Float64(Float64(4.0 * Float64(-log(0.125))) / -16.0) end
function tmp = code(f) tmp = (4.0 * -log(0.125)) / -16.0; end
code[f_] := N[(N[(4.0 * (-N[Log[0.125], $MachinePrecision])), $MachinePrecision] / -16.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{4 \cdot \left(-\log 0.125\right)}{-16}
\end{array}
Initial program 7.7%
Applied egg-rr1.6%
Taylor expanded in f around 0 1.6%
associate-*r/1.6%
Simplified1.6%
Applied egg-rr13.6%
Final simplification13.6%
(FPCore (f) :precision binary64 (/ (* 4.0 (- (log 0.125))) -2.25))
double code(double f) {
return (4.0 * -log(0.125)) / -2.25;
}
real(8) function code(f)
real(8), intent (in) :: f
code = (4.0d0 * -log(0.125d0)) / (-2.25d0)
end function
public static double code(double f) {
return (4.0 * -Math.log(0.125)) / -2.25;
}
def code(f): return (4.0 * -math.log(0.125)) / -2.25
function code(f) return Float64(Float64(4.0 * Float64(-log(0.125))) / -2.25) end
function tmp = code(f) tmp = (4.0 * -log(0.125)) / -2.25; end
code[f_] := N[(N[(4.0 * (-N[Log[0.125], $MachinePrecision])), $MachinePrecision] / -2.25), $MachinePrecision]
\begin{array}{l}
\\
\frac{4 \cdot \left(-\log 0.125\right)}{-2.25}
\end{array}
Initial program 7.7%
Applied egg-rr1.6%
Taylor expanded in f around 0 1.6%
associate-*r/1.6%
Simplified1.6%
Applied egg-rr14.4%
Final simplification14.4%
(FPCore (f) :precision binary64 (/ (* 4.0 (- (log 0.125))) -1.5))
double code(double f) {
return (4.0 * -log(0.125)) / -1.5;
}
real(8) function code(f)
real(8), intent (in) :: f
code = (4.0d0 * -log(0.125d0)) / (-1.5d0)
end function
public static double code(double f) {
return (4.0 * -Math.log(0.125)) / -1.5;
}
def code(f): return (4.0 * -math.log(0.125)) / -1.5
function code(f) return Float64(Float64(4.0 * Float64(-log(0.125))) / -1.5) end
function tmp = code(f) tmp = (4.0 * -log(0.125)) / -1.5; end
code[f_] := N[(N[(4.0 * (-N[Log[0.125], $MachinePrecision])), $MachinePrecision] / -1.5), $MachinePrecision]
\begin{array}{l}
\\
\frac{4 \cdot \left(-\log 0.125\right)}{-1.5}
\end{array}
Initial program 7.7%
Applied egg-rr1.6%
Taylor expanded in f around 0 1.6%
associate-*r/1.6%
Simplified1.6%
Applied egg-rr14.7%
Final simplification14.7%
(FPCore (f) :precision binary64 (- (/ (* 4.0 (log 0.125)) -1.3333333333333333)))
double code(double f) {
return -((4.0 * log(0.125)) / -1.3333333333333333);
}
real(8) function code(f)
real(8), intent (in) :: f
code = -((4.0d0 * log(0.125d0)) / (-1.3333333333333333d0))
end function
public static double code(double f) {
return -((4.0 * Math.log(0.125)) / -1.3333333333333333);
}
def code(f): return -((4.0 * math.log(0.125)) / -1.3333333333333333)
function code(f) return Float64(-Float64(Float64(4.0 * log(0.125)) / -1.3333333333333333)) end
function tmp = code(f) tmp = -((4.0 * log(0.125)) / -1.3333333333333333); end
code[f_] := (-N[(N[(4.0 * N[Log[0.125], $MachinePrecision]), $MachinePrecision] / -1.3333333333333333), $MachinePrecision])
\begin{array}{l}
\\
-\frac{4 \cdot \log 0.125}{-1.3333333333333333}
\end{array}
Initial program 7.7%
Applied egg-rr1.6%
Taylor expanded in f around 0 1.6%
associate-*r/1.6%
Simplified1.6%
Applied egg-rr14.7%
Final simplification14.7%
(FPCore (f) :precision binary64 (/ (* 4.0 (- (log 0.125))) -0.75))
double code(double f) {
return (4.0 * -log(0.125)) / -0.75;
}
real(8) function code(f)
real(8), intent (in) :: f
code = (4.0d0 * -log(0.125d0)) / (-0.75d0)
end function
public static double code(double f) {
return (4.0 * -Math.log(0.125)) / -0.75;
}
def code(f): return (4.0 * -math.log(0.125)) / -0.75
function code(f) return Float64(Float64(4.0 * Float64(-log(0.125))) / -0.75) end
function tmp = code(f) tmp = (4.0 * -log(0.125)) / -0.75; end
code[f_] := N[(N[(4.0 * (-N[Log[0.125], $MachinePrecision])), $MachinePrecision] / -0.75), $MachinePrecision]
\begin{array}{l}
\\
\frac{4 \cdot \left(-\log 0.125\right)}{-0.75}
\end{array}
Initial program 7.7%
Applied egg-rr1.6%
Taylor expanded in f around 0 1.6%
associate-*r/1.6%
Simplified1.6%
Applied egg-rr15.1%
Final simplification15.1%
(FPCore (f) :precision binary64 (/ (* 4.0 (- (log 0.125))) -0.6666666666666666))
double code(double f) {
return (4.0 * -log(0.125)) / -0.6666666666666666;
}
real(8) function code(f)
real(8), intent (in) :: f
code = (4.0d0 * -log(0.125d0)) / (-0.6666666666666666d0)
end function
public static double code(double f) {
return (4.0 * -Math.log(0.125)) / -0.6666666666666666;
}
def code(f): return (4.0 * -math.log(0.125)) / -0.6666666666666666
function code(f) return Float64(Float64(4.0 * Float64(-log(0.125))) / -0.6666666666666666) end
function tmp = code(f) tmp = (4.0 * -log(0.125)) / -0.6666666666666666; end
code[f_] := N[(N[(4.0 * (-N[Log[0.125], $MachinePrecision])), $MachinePrecision] / -0.6666666666666666), $MachinePrecision]
\begin{array}{l}
\\
\frac{4 \cdot \left(-\log 0.125\right)}{-0.6666666666666666}
\end{array}
Initial program 7.7%
Applied egg-rr1.6%
Taylor expanded in f around 0 1.6%
associate-*r/1.6%
Simplified1.6%
Applied egg-rr15.2%
Final simplification15.2%
(FPCore (f) :precision binary64 (/ (* 4.0 (- (log 0.125))) -0.5))
double code(double f) {
return (4.0 * -log(0.125)) / -0.5;
}
real(8) function code(f)
real(8), intent (in) :: f
code = (4.0d0 * -log(0.125d0)) / (-0.5d0)
end function
public static double code(double f) {
return (4.0 * -Math.log(0.125)) / -0.5;
}
def code(f): return (4.0 * -math.log(0.125)) / -0.5
function code(f) return Float64(Float64(4.0 * Float64(-log(0.125))) / -0.5) end
function tmp = code(f) tmp = (4.0 * -log(0.125)) / -0.5; end
code[f_] := N[(N[(4.0 * (-N[Log[0.125], $MachinePrecision])), $MachinePrecision] / -0.5), $MachinePrecision]
\begin{array}{l}
\\
\frac{4 \cdot \left(-\log 0.125\right)}{-0.5}
\end{array}
Initial program 7.7%
Applied egg-rr1.6%
Taylor expanded in f around 0 1.6%
associate-*r/1.6%
Simplified1.6%
Applied egg-rr15.4%
Final simplification15.4%
(FPCore (f) :precision binary64 (- (/ (* 4.0 (log 0.125)) -0.25)))
double code(double f) {
return -((4.0 * log(0.125)) / -0.25);
}
real(8) function code(f)
real(8), intent (in) :: f
code = -((4.0d0 * log(0.125d0)) / (-0.25d0))
end function
public static double code(double f) {
return -((4.0 * Math.log(0.125)) / -0.25);
}
def code(f): return -((4.0 * math.log(0.125)) / -0.25)
function code(f) return Float64(-Float64(Float64(4.0 * log(0.125)) / -0.25)) end
function tmp = code(f) tmp = -((4.0 * log(0.125)) / -0.25); end
code[f_] := (-N[(N[(4.0 * N[Log[0.125], $MachinePrecision]), $MachinePrecision] / -0.25), $MachinePrecision])
\begin{array}{l}
\\
-\frac{4 \cdot \log 0.125}{-0.25}
\end{array}
Initial program 7.7%
Applied egg-rr1.6%
Taylor expanded in f around 0 1.6%
associate-*r/1.6%
Simplified1.6%
Applied egg-rr16.0%
Final simplification16.0%
(FPCore (f) :precision binary64 (/ (* 4.0 (- (log 0.125))) -0.08333333333333333))
double code(double f) {
return (4.0 * -log(0.125)) / -0.08333333333333333;
}
real(8) function code(f)
real(8), intent (in) :: f
code = (4.0d0 * -log(0.125d0)) / (-0.08333333333333333d0)
end function
public static double code(double f) {
return (4.0 * -Math.log(0.125)) / -0.08333333333333333;
}
def code(f): return (4.0 * -math.log(0.125)) / -0.08333333333333333
function code(f) return Float64(Float64(4.0 * Float64(-log(0.125))) / -0.08333333333333333) end
function tmp = code(f) tmp = (4.0 * -log(0.125)) / -0.08333333333333333; end
code[f_] := N[(N[(4.0 * (-N[Log[0.125], $MachinePrecision])), $MachinePrecision] / -0.08333333333333333), $MachinePrecision]
\begin{array}{l}
\\
\frac{4 \cdot \left(-\log 0.125\right)}{-0.08333333333333333}
\end{array}
Initial program 7.7%
Applied egg-rr1.6%
Taylor expanded in f around 0 1.6%
associate-*r/1.6%
Simplified1.6%
Applied egg-rr17.3%
Final simplification17.3%
(FPCore (f) :precision binary64 (/ (* 4.0 (- (log 0.125))) -0.006944444444444444))
double code(double f) {
return (4.0 * -log(0.125)) / -0.006944444444444444;
}
real(8) function code(f)
real(8), intent (in) :: f
code = (4.0d0 * -log(0.125d0)) / (-0.006944444444444444d0)
end function
public static double code(double f) {
return (4.0 * -Math.log(0.125)) / -0.006944444444444444;
}
def code(f): return (4.0 * -math.log(0.125)) / -0.006944444444444444
function code(f) return Float64(Float64(4.0 * Float64(-log(0.125))) / -0.006944444444444444) end
function tmp = code(f) tmp = (4.0 * -log(0.125)) / -0.006944444444444444; end
code[f_] := N[(N[(4.0 * (-N[Log[0.125], $MachinePrecision])), $MachinePrecision] / -0.006944444444444444), $MachinePrecision]
\begin{array}{l}
\\
\frac{4 \cdot \left(-\log 0.125\right)}{-0.006944444444444444}
\end{array}
Initial program 7.7%
Applied egg-rr1.6%
Taylor expanded in f around 0 1.6%
associate-*r/1.6%
Simplified1.6%
Applied egg-rr17.6%
Final simplification17.6%
herbie shell --seed 2024011
(FPCore (f)
:name "VandenBroeck and Keller, Equation (20)"
:precision binary64
(- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))) (- (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))))))))