
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t\_0}\\
t_2 := e^{-t\_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t\_1 + t\_2}{t\_1 - t\_2}\right)
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t\_0}\\
t_2 := e^{-t\_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t\_1 + t\_2}{t\_1 - t\_2}\right)
\end{array}
\end{array}
(FPCore (f)
:precision binary64
(*
-4.0
(/
(log1p
(+
(/ 1.0 (expm1 (* PI (* 0.5 f))))
(+ -1.0 (/ -1.0 (expm1 (* PI (* f -0.5)))))))
PI)))
double code(double f) {
return -4.0 * (log1p(((1.0 / expm1((((double) M_PI) * (0.5 * f)))) + (-1.0 + (-1.0 / expm1((((double) M_PI) * (f * -0.5))))))) / ((double) M_PI));
}
public static double code(double f) {
return -4.0 * (Math.log1p(((1.0 / Math.expm1((Math.PI * (0.5 * f)))) + (-1.0 + (-1.0 / Math.expm1((Math.PI * (f * -0.5))))))) / Math.PI);
}
def code(f): return -4.0 * (math.log1p(((1.0 / math.expm1((math.pi * (0.5 * f)))) + (-1.0 + (-1.0 / math.expm1((math.pi * (f * -0.5))))))) / math.pi)
function code(f) return Float64(-4.0 * Float64(log1p(Float64(Float64(1.0 / expm1(Float64(pi * Float64(0.5 * f)))) + Float64(-1.0 + Float64(-1.0 / expm1(Float64(pi * Float64(f * -0.5))))))) / pi)) end
code[f_] := N[(-4.0 * N[(N[Log[1 + N[(N[(1.0 / N[(Exp[N[(Pi * N[(0.5 * f), $MachinePrecision]), $MachinePrecision]] - 1), $MachinePrecision]), $MachinePrecision] + N[(-1.0 + N[(-1.0 / N[(Exp[N[(Pi * N[(f * -0.5), $MachinePrecision]), $MachinePrecision]] - 1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-4 \cdot \frac{\mathsf{log1p}\left(\frac{1}{\mathsf{expm1}\left(\pi \cdot \left(0.5 \cdot f\right)\right)} + \left(-1 + \frac{-1}{\mathsf{expm1}\left(\pi \cdot \left(f \cdot -0.5\right)\right)}\right)\right)}{\pi}
\end{array}
Initial program 5.1%
Simplified98.1%
Taylor expanded in f around inf 4.6%
*-commutative4.6%
*-commutative4.6%
associate-*r*4.6%
expm1-undefine7.5%
expm1-define98.2%
*-commutative98.2%
associate-*l*98.2%
Simplified98.2%
log1p-expm1-u98.2%
expm1-undefine98.2%
add-exp-log98.2%
associate-*r*98.2%
*-commutative98.2%
associate-*l*98.2%
associate-*r*98.2%
Applied egg-rr98.2%
sub-neg98.2%
sub-neg98.2%
metadata-eval98.2%
associate-+l+98.3%
*-commutative98.3%
distribute-neg-frac98.3%
metadata-eval98.3%
associate-*l*98.3%
Simplified98.3%
Final simplification98.3%
(FPCore (f)
:precision binary64
(*
-4.0
(/
(log
(+ (/ 1.0 (expm1 (* f (* PI 0.5)))) (/ -1.0 (expm1 (* PI (* f -0.5))))))
PI)))
double code(double f) {
return -4.0 * (log(((1.0 / expm1((f * (((double) M_PI) * 0.5)))) + (-1.0 / expm1((((double) M_PI) * (f * -0.5)))))) / ((double) M_PI));
}
public static double code(double f) {
return -4.0 * (Math.log(((1.0 / Math.expm1((f * (Math.PI * 0.5)))) + (-1.0 / Math.expm1((Math.PI * (f * -0.5)))))) / Math.PI);
}
def code(f): return -4.0 * (math.log(((1.0 / math.expm1((f * (math.pi * 0.5)))) + (-1.0 / math.expm1((math.pi * (f * -0.5)))))) / math.pi)
function code(f) return Float64(-4.0 * Float64(log(Float64(Float64(1.0 / expm1(Float64(f * Float64(pi * 0.5)))) + Float64(-1.0 / expm1(Float64(pi * Float64(f * -0.5)))))) / pi)) end
code[f_] := N[(-4.0 * N[(N[Log[N[(N[(1.0 / N[(Exp[N[(f * N[(Pi * 0.5), $MachinePrecision]), $MachinePrecision]] - 1), $MachinePrecision]), $MachinePrecision] + N[(-1.0 / N[(Exp[N[(Pi * N[(f * -0.5), $MachinePrecision]), $MachinePrecision]] - 1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-4 \cdot \frac{\log \left(\frac{1}{\mathsf{expm1}\left(f \cdot \left(\pi \cdot 0.5\right)\right)} + \frac{-1}{\mathsf{expm1}\left(\pi \cdot \left(f \cdot -0.5\right)\right)}\right)}{\pi}
\end{array}
Initial program 5.1%
Simplified98.1%
Taylor expanded in f around inf 4.6%
*-commutative4.6%
*-commutative4.6%
associate-*r*4.6%
expm1-undefine7.5%
expm1-define98.2%
*-commutative98.2%
associate-*l*98.2%
Simplified98.2%
Final simplification98.2%
(FPCore (f) :precision binary64 (- (/ (* -4.0 (- (log (/ 4.0 PI)) (log f))) PI) (* (pow f 2.0) (* PI 0.08333333333333333))))
double code(double f) {
return ((-4.0 * (log((4.0 / ((double) M_PI))) - log(f))) / ((double) M_PI)) - (pow(f, 2.0) * (((double) M_PI) * 0.08333333333333333));
}
public static double code(double f) {
return ((-4.0 * (Math.log((4.0 / Math.PI)) - Math.log(f))) / Math.PI) - (Math.pow(f, 2.0) * (Math.PI * 0.08333333333333333));
}
def code(f): return ((-4.0 * (math.log((4.0 / math.pi)) - math.log(f))) / math.pi) - (math.pow(f, 2.0) * (math.pi * 0.08333333333333333))
function code(f) return Float64(Float64(Float64(-4.0 * Float64(log(Float64(4.0 / pi)) - log(f))) / pi) - Float64((f ^ 2.0) * Float64(pi * 0.08333333333333333))) end
function tmp = code(f) tmp = ((-4.0 * (log((4.0 / pi)) - log(f))) / pi) - ((f ^ 2.0) * (pi * 0.08333333333333333)); end
code[f_] := N[(N[(N[(-4.0 * N[(N[Log[N[(4.0 / Pi), $MachinePrecision]], $MachinePrecision] - N[Log[f], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / Pi), $MachinePrecision] - N[(N[Power[f, 2.0], $MachinePrecision] * N[(Pi * 0.08333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-4 \cdot \left(\log \left(\frac{4}{\pi}\right) - \log f\right)}{\pi} - {f}^{2} \cdot \left(\pi \cdot 0.08333333333333333\right)
\end{array}
Initial program 5.1%
Simplified98.1%
Taylor expanded in f around inf 4.6%
*-commutative4.6%
*-commutative4.6%
associate-*r*4.6%
expm1-undefine7.5%
expm1-define98.2%
*-commutative98.2%
associate-*l*98.2%
Simplified98.2%
log1p-expm1-u98.2%
expm1-undefine98.2%
add-exp-log98.2%
associate-*r*98.2%
*-commutative98.2%
associate-*l*98.2%
associate-*r*98.2%
Applied egg-rr98.2%
sub-neg98.2%
sub-neg98.2%
metadata-eval98.2%
associate-+l+98.3%
*-commutative98.3%
distribute-neg-frac98.3%
metadata-eval98.3%
associate-*l*98.3%
Simplified98.3%
Taylor expanded in f around 0 95.2%
mul-1-neg95.2%
unsub-neg95.2%
Simplified95.2%
(FPCore (f)
:precision binary64
(*
-4.0
(/
(log1p
(+
(/ 1.0 (expm1 (* PI (* 0.5 f))))
(+
-1.0
(/
(+
(* f (- 0.5 (* f (+ (* PI -0.125) (* PI 0.08333333333333333)))))
(* 2.0 (/ 1.0 PI)))
f))))
PI)))
double code(double f) {
return -4.0 * (log1p(((1.0 / expm1((((double) M_PI) * (0.5 * f)))) + (-1.0 + (((f * (0.5 - (f * ((((double) M_PI) * -0.125) + (((double) M_PI) * 0.08333333333333333))))) + (2.0 * (1.0 / ((double) M_PI)))) / f)))) / ((double) M_PI));
}
public static double code(double f) {
return -4.0 * (Math.log1p(((1.0 / Math.expm1((Math.PI * (0.5 * f)))) + (-1.0 + (((f * (0.5 - (f * ((Math.PI * -0.125) + (Math.PI * 0.08333333333333333))))) + (2.0 * (1.0 / Math.PI))) / f)))) / Math.PI);
}
def code(f): return -4.0 * (math.log1p(((1.0 / math.expm1((math.pi * (0.5 * f)))) + (-1.0 + (((f * (0.5 - (f * ((math.pi * -0.125) + (math.pi * 0.08333333333333333))))) + (2.0 * (1.0 / math.pi))) / f)))) / math.pi)
function code(f) return Float64(-4.0 * Float64(log1p(Float64(Float64(1.0 / expm1(Float64(pi * Float64(0.5 * f)))) + Float64(-1.0 + Float64(Float64(Float64(f * Float64(0.5 - Float64(f * Float64(Float64(pi * -0.125) + Float64(pi * 0.08333333333333333))))) + Float64(2.0 * Float64(1.0 / pi))) / f)))) / pi)) end
code[f_] := N[(-4.0 * N[(N[Log[1 + N[(N[(1.0 / N[(Exp[N[(Pi * N[(0.5 * f), $MachinePrecision]), $MachinePrecision]] - 1), $MachinePrecision]), $MachinePrecision] + N[(-1.0 + N[(N[(N[(f * N[(0.5 - N[(f * N[(N[(Pi * -0.125), $MachinePrecision] + N[(Pi * 0.08333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(2.0 * N[(1.0 / Pi), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / f), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-4 \cdot \frac{\mathsf{log1p}\left(\frac{1}{\mathsf{expm1}\left(\pi \cdot \left(0.5 \cdot f\right)\right)} + \left(-1 + \frac{f \cdot \left(0.5 - f \cdot \left(\pi \cdot -0.125 + \pi \cdot 0.08333333333333333\right)\right) + 2 \cdot \frac{1}{\pi}}{f}\right)\right)}{\pi}
\end{array}
Initial program 5.1%
Simplified98.1%
Taylor expanded in f around inf 4.6%
*-commutative4.6%
*-commutative4.6%
associate-*r*4.6%
expm1-undefine7.5%
expm1-define98.2%
*-commutative98.2%
associate-*l*98.2%
Simplified98.2%
log1p-expm1-u98.2%
expm1-undefine98.2%
add-exp-log98.2%
associate-*r*98.2%
*-commutative98.2%
associate-*l*98.2%
associate-*r*98.2%
Applied egg-rr98.2%
sub-neg98.2%
sub-neg98.2%
metadata-eval98.2%
associate-+l+98.3%
*-commutative98.3%
distribute-neg-frac98.3%
metadata-eval98.3%
associate-*l*98.3%
Simplified98.3%
Taylor expanded in f around 0 95.1%
Final simplification95.1%
(FPCore (f)
:precision binary64
(*
-4.0
(/
(log1p
(/
(+
(*
f
(+
-1.0
(*
f
(-
(+ (* PI -0.08333333333333333) (* PI 0.125))
(+ (* PI -0.125) (* PI 0.08333333333333333))))))
(* 4.0 (/ 1.0 PI)))
f))
PI)))
double code(double f) {
return -4.0 * (log1p((((f * (-1.0 + (f * (((((double) M_PI) * -0.08333333333333333) + (((double) M_PI) * 0.125)) - ((((double) M_PI) * -0.125) + (((double) M_PI) * 0.08333333333333333)))))) + (4.0 * (1.0 / ((double) M_PI)))) / f)) / ((double) M_PI));
}
public static double code(double f) {
return -4.0 * (Math.log1p((((f * (-1.0 + (f * (((Math.PI * -0.08333333333333333) + (Math.PI * 0.125)) - ((Math.PI * -0.125) + (Math.PI * 0.08333333333333333)))))) + (4.0 * (1.0 / Math.PI))) / f)) / Math.PI);
}
def code(f): return -4.0 * (math.log1p((((f * (-1.0 + (f * (((math.pi * -0.08333333333333333) + (math.pi * 0.125)) - ((math.pi * -0.125) + (math.pi * 0.08333333333333333)))))) + (4.0 * (1.0 / math.pi))) / f)) / math.pi)
function code(f) return Float64(-4.0 * Float64(log1p(Float64(Float64(Float64(f * Float64(-1.0 + Float64(f * Float64(Float64(Float64(pi * -0.08333333333333333) + Float64(pi * 0.125)) - Float64(Float64(pi * -0.125) + Float64(pi * 0.08333333333333333)))))) + Float64(4.0 * Float64(1.0 / pi))) / f)) / pi)) end
code[f_] := N[(-4.0 * N[(N[Log[1 + N[(N[(N[(f * N[(-1.0 + N[(f * N[(N[(N[(Pi * -0.08333333333333333), $MachinePrecision] + N[(Pi * 0.125), $MachinePrecision]), $MachinePrecision] - N[(N[(Pi * -0.125), $MachinePrecision] + N[(Pi * 0.08333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(4.0 * N[(1.0 / Pi), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / f), $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-4 \cdot \frac{\mathsf{log1p}\left(\frac{f \cdot \left(-1 + f \cdot \left(\left(\pi \cdot -0.08333333333333333 + \pi \cdot 0.125\right) - \left(\pi \cdot -0.125 + \pi \cdot 0.08333333333333333\right)\right)\right) + 4 \cdot \frac{1}{\pi}}{f}\right)}{\pi}
\end{array}
Initial program 5.1%
Simplified98.1%
Taylor expanded in f around inf 4.6%
*-commutative4.6%
*-commutative4.6%
associate-*r*4.6%
expm1-undefine7.5%
expm1-define98.2%
*-commutative98.2%
associate-*l*98.2%
Simplified98.2%
log1p-expm1-u98.2%
expm1-undefine98.2%
add-exp-log98.2%
associate-*r*98.2%
*-commutative98.2%
associate-*l*98.2%
associate-*r*98.2%
Applied egg-rr98.2%
sub-neg98.2%
sub-neg98.2%
metadata-eval98.2%
associate-+l+98.3%
*-commutative98.3%
distribute-neg-frac98.3%
metadata-eval98.3%
associate-*l*98.3%
Simplified98.3%
Taylor expanded in f around 0 95.1%
Final simplification95.1%
(FPCore (f) :precision binary64 (/ (* -4.0 (log (/ (/ 4.0 PI) f))) PI))
double code(double f) {
return (-4.0 * log(((4.0 / ((double) M_PI)) / f))) / ((double) M_PI);
}
public static double code(double f) {
return (-4.0 * Math.log(((4.0 / Math.PI) / f))) / Math.PI;
}
def code(f): return (-4.0 * math.log(((4.0 / math.pi) / f))) / math.pi
function code(f) return Float64(Float64(-4.0 * log(Float64(Float64(4.0 / pi) / f))) / pi) end
function tmp = code(f) tmp = (-4.0 * log(((4.0 / pi) / f))) / pi; end
code[f_] := N[(N[(-4.0 * N[Log[N[(N[(4.0 / Pi), $MachinePrecision] / f), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / Pi), $MachinePrecision]
\begin{array}{l}
\\
\frac{-4 \cdot \log \left(\frac{\frac{4}{\pi}}{f}\right)}{\pi}
\end{array}
Initial program 5.1%
Simplified98.1%
Taylor expanded in f around 0 94.8%
mul-1-neg94.8%
unsub-neg94.8%
Simplified94.8%
associate-*r/94.8%
diff-log94.7%
Applied egg-rr94.7%
Final simplification94.7%
(FPCore (f) :precision binary64 (* (log (/ (/ 4.0 PI) f)) (/ -4.0 PI)))
double code(double f) {
return log(((4.0 / ((double) M_PI)) / f)) * (-4.0 / ((double) M_PI));
}
public static double code(double f) {
return Math.log(((4.0 / Math.PI) / f)) * (-4.0 / Math.PI);
}
def code(f): return math.log(((4.0 / math.pi) / f)) * (-4.0 / math.pi)
function code(f) return Float64(log(Float64(Float64(4.0 / pi) / f)) * Float64(-4.0 / pi)) end
function tmp = code(f) tmp = log(((4.0 / pi) / f)) * (-4.0 / pi); end
code[f_] := N[(N[Log[N[(N[(4.0 / Pi), $MachinePrecision] / f), $MachinePrecision]], $MachinePrecision] * N[(-4.0 / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{\frac{4}{\pi}}{f}\right) \cdot \frac{-4}{\pi}
\end{array}
Initial program 5.1%
Simplified98.1%
Taylor expanded in f around 0 94.7%
*-commutative94.7%
associate-/r*94.7%
Simplified94.7%
(FPCore (f) :precision binary64 (* -4.0 (/ (log1p (/ 4.0 (* PI f))) PI)))
double code(double f) {
return -4.0 * (log1p((4.0 / (((double) M_PI) * f))) / ((double) M_PI));
}
public static double code(double f) {
return -4.0 * (Math.log1p((4.0 / (Math.PI * f))) / Math.PI);
}
def code(f): return -4.0 * (math.log1p((4.0 / (math.pi * f))) / math.pi)
function code(f) return Float64(-4.0 * Float64(log1p(Float64(4.0 / Float64(pi * f))) / pi)) end
code[f_] := N[(-4.0 * N[(N[Log[1 + N[(4.0 / N[(Pi * f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-4 \cdot \frac{\mathsf{log1p}\left(\frac{4}{\pi \cdot f}\right)}{\pi}
\end{array}
Initial program 5.1%
Simplified98.1%
Taylor expanded in f around inf 4.6%
*-commutative4.6%
*-commutative4.6%
associate-*r*4.6%
expm1-undefine7.5%
expm1-define98.2%
*-commutative98.2%
associate-*l*98.2%
Simplified98.2%
log1p-expm1-u98.2%
expm1-undefine98.2%
add-exp-log98.2%
associate-*r*98.2%
*-commutative98.2%
associate-*l*98.2%
associate-*r*98.2%
Applied egg-rr98.2%
sub-neg98.2%
sub-neg98.2%
metadata-eval98.2%
associate-+l+98.3%
*-commutative98.3%
distribute-neg-frac98.3%
metadata-eval98.3%
associate-*l*98.3%
Simplified98.3%
inv-pow98.3%
add-sqr-sqrt98.3%
unpow-prod-down98.3%
Applied egg-rr98.3%
pow-sqr98.3%
*-commutative98.3%
metadata-eval98.3%
Simplified98.3%
Taylor expanded in f around 0 94.5%
Final simplification94.5%
(FPCore (f) :precision binary64 (* (/ -4.0 PI) (log 0.0)))
double code(double f) {
return (-4.0 / ((double) M_PI)) * log(0.0);
}
public static double code(double f) {
return (-4.0 / Math.PI) * Math.log(0.0);
}
def code(f): return (-4.0 / math.pi) * math.log(0.0)
function code(f) return Float64(Float64(-4.0 / pi) * log(0.0)) end
function tmp = code(f) tmp = (-4.0 / pi) * log(0.0); end
code[f_] := N[(N[(-4.0 / Pi), $MachinePrecision] * N[Log[0.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-4}{\pi} \cdot \log 0
\end{array}
Initial program 5.1%
Simplified98.1%
Applied egg-rr0.7%
+-inverses0.7%
Simplified0.7%
Final simplification0.7%
herbie shell --seed 2024118
(FPCore (f)
:name "VandenBroeck and Keller, Equation (20)"
:precision binary64
(- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))) (- (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))))))))