
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t_0}\\
t_2 := e^{-t_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t_1 + t_2}{t_1 - t_2}\right)
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t_0}\\
t_2 := e^{-t_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t_1 + t_2}{t_1 - t_2}\right)
\end{array}
\end{array}
(FPCore (f) :precision binary64 (/ (- (log (/ 1.0 (tanh (* f (* PI 0.25)))))) (* PI 0.25)))
double code(double f) {
return -log((1.0 / tanh((f * (((double) M_PI) * 0.25))))) / (((double) M_PI) * 0.25);
}
public static double code(double f) {
return -Math.log((1.0 / Math.tanh((f * (Math.PI * 0.25))))) / (Math.PI * 0.25);
}
def code(f): return -math.log((1.0 / math.tanh((f * (math.pi * 0.25))))) / (math.pi * 0.25)
function code(f) return Float64(Float64(-log(Float64(1.0 / tanh(Float64(f * Float64(pi * 0.25)))))) / Float64(pi * 0.25)) end
function tmp = code(f) tmp = -log((1.0 / tanh((f * (pi * 0.25))))) / (pi * 0.25); end
code[f_] := N[((-N[Log[N[(1.0 / N[Tanh[N[(f * N[(Pi * 0.25), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]) / N[(Pi * 0.25), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-\log \left(\frac{1}{\tanh \left(f \cdot \left(\pi \cdot 0.25\right)\right)}\right)}{\pi \cdot 0.25}
\end{array}
Initial program 9.4%
expm1-log1p-u9.4%
expm1-udef9.4%
Applied egg-rr97.6%
expm1-def97.6%
expm1-log1p98.9%
times-frac98.9%
metadata-eval98.9%
*-lft-identity98.9%
associate-/l*98.9%
associate-/r/98.9%
associate-/l*98.9%
associate-/r/98.9%
*-commutative98.9%
Simplified98.9%
cosh-def98.9%
cosh-undef98.9%
associate-*l/98.9%
associate-*l/98.9%
associate-/r*98.9%
expm1-log1p-u98.9%
expm1-udef98.9%
Applied egg-rr99.3%
expm1-def99.3%
expm1-log1p99.3%
associate-*r*99.3%
*-commutative99.3%
Simplified99.3%
Final simplification99.3%
(FPCore (f) :precision binary64 (* -4.0 (/ (- (log (tanh (* f (* PI 0.25))))) PI)))
double code(double f) {
return -4.0 * (-log(tanh((f * (((double) M_PI) * 0.25)))) / ((double) M_PI));
}
public static double code(double f) {
return -4.0 * (-Math.log(Math.tanh((f * (Math.PI * 0.25)))) / Math.PI);
}
def code(f): return -4.0 * (-math.log(math.tanh((f * (math.pi * 0.25)))) / math.pi)
function code(f) return Float64(-4.0 * Float64(Float64(-log(tanh(Float64(f * Float64(pi * 0.25))))) / pi)) end
function tmp = code(f) tmp = -4.0 * (-log(tanh((f * (pi * 0.25)))) / pi); end
code[f_] := N[(-4.0 * N[((-N[Log[N[Tanh[N[(f * N[(Pi * 0.25), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]) / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-4 \cdot \frac{-\log \tanh \left(f \cdot \left(\pi \cdot 0.25\right)\right)}{\pi}
\end{array}
Initial program 9.4%
expm1-log1p-u9.4%
expm1-udef9.4%
Applied egg-rr97.6%
expm1-def97.6%
expm1-log1p98.9%
times-frac98.9%
metadata-eval98.9%
*-lft-identity98.9%
associate-/l*98.9%
associate-/r/98.9%
associate-/l*98.9%
associate-/r/98.9%
*-commutative98.9%
Simplified98.9%
Applied egg-rr98.0%
expm1-def98.0%
expm1-log1p99.3%
neg-mul-199.3%
*-commutative99.3%
times-frac99.3%
metadata-eval99.3%
*-commutative99.3%
*-commutative99.3%
associate-*l*99.3%
Simplified99.3%
Final simplification99.3%
(FPCore (f) :precision binary64 (* (fabs (log (/ 4.0 (* PI f)))) (/ -4.0 PI)))
double code(double f) {
return fabs(log((4.0 / (((double) M_PI) * f)))) * (-4.0 / ((double) M_PI));
}
public static double code(double f) {
return Math.abs(Math.log((4.0 / (Math.PI * f)))) * (-4.0 / Math.PI);
}
def code(f): return math.fabs(math.log((4.0 / (math.pi * f)))) * (-4.0 / math.pi)
function code(f) return Float64(abs(log(Float64(4.0 / Float64(pi * f)))) * Float64(-4.0 / pi)) end
function tmp = code(f) tmp = abs(log((4.0 / (pi * f)))) * (-4.0 / pi); end
code[f_] := N[(N[Abs[N[Log[N[(4.0 / N[(Pi * f), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision] * N[(-4.0 / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left|\log \left(\frac{4}{\pi \cdot f}\right)\right| \cdot \frac{-4}{\pi}
\end{array}
Initial program 9.4%
distribute-lft-neg-in9.4%
*-commutative9.4%
associate-/r/9.4%
associate-*l/9.4%
metadata-eval9.4%
distribute-neg-frac9.4%
Simplified9.4%
Taylor expanded in f around 0 96.6%
mul-1-neg96.6%
unsub-neg96.6%
distribute-rgt-out--96.6%
metadata-eval96.6%
Simplified96.6%
add-exp-log95.3%
diff-log95.3%
Applied egg-rr95.3%
add-exp-log96.5%
*-commutative96.5%
associate-/r*96.5%
metadata-eval96.5%
associate-/r*96.5%
associate-/l/96.6%
add-sqr-sqrt96.1%
sqrt-unprod96.7%
pow296.7%
associate-/l/96.7%
associate-/r*96.7%
Applied egg-rr96.7%
unpow296.7%
rem-sqrt-square96.7%
associate-/l/96.7%
Simplified96.7%
Final simplification96.7%
(FPCore (f) :precision binary64 (- (/ (log (/ (/ 4.0 f) PI)) (* PI 0.25))))
double code(double f) {
return -(log(((4.0 / f) / ((double) M_PI))) / (((double) M_PI) * 0.25));
}
public static double code(double f) {
return -(Math.log(((4.0 / f) / Math.PI)) / (Math.PI * 0.25));
}
def code(f): return -(math.log(((4.0 / f) / math.pi)) / (math.pi * 0.25))
function code(f) return Float64(-Float64(log(Float64(Float64(4.0 / f) / pi)) / Float64(pi * 0.25))) end
function tmp = code(f) tmp = -(log(((4.0 / f) / pi)) / (pi * 0.25)); end
code[f_] := (-N[(N[Log[N[(N[(4.0 / f), $MachinePrecision] / Pi), $MachinePrecision]], $MachinePrecision] / N[(Pi * 0.25), $MachinePrecision]), $MachinePrecision])
\begin{array}{l}
\\
-\frac{\log \left(\frac{\frac{4}{f}}{\pi}\right)}{\pi \cdot 0.25}
\end{array}
Initial program 9.4%
expm1-log1p-u9.4%
expm1-udef9.4%
Applied egg-rr97.6%
expm1-def97.6%
expm1-log1p98.9%
times-frac98.9%
metadata-eval98.9%
*-lft-identity98.9%
associate-/l*98.9%
associate-/r/98.9%
associate-/l*98.9%
associate-/r/98.9%
*-commutative98.9%
Simplified98.9%
Taylor expanded in f around 0 96.6%
associate-/r*96.6%
Simplified96.6%
Final simplification96.6%
(FPCore (f) :precision binary64 (* (/ -4.0 PI) (log (/ (/ 4.0 f) PI))))
double code(double f) {
return (-4.0 / ((double) M_PI)) * log(((4.0 / f) / ((double) M_PI)));
}
public static double code(double f) {
return (-4.0 / Math.PI) * Math.log(((4.0 / f) / Math.PI));
}
def code(f): return (-4.0 / math.pi) * math.log(((4.0 / f) / math.pi))
function code(f) return Float64(Float64(-4.0 / pi) * log(Float64(Float64(4.0 / f) / pi))) end
function tmp = code(f) tmp = (-4.0 / pi) * log(((4.0 / f) / pi)); end
code[f_] := N[(N[(-4.0 / Pi), $MachinePrecision] * N[Log[N[(N[(4.0 / f), $MachinePrecision] / Pi), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-4}{\pi} \cdot \log \left(\frac{\frac{4}{f}}{\pi}\right)
\end{array}
Initial program 9.4%
distribute-lft-neg-in9.4%
*-commutative9.4%
associate-/r/9.4%
associate-*l/9.4%
metadata-eval9.4%
distribute-neg-frac9.4%
Simplified9.4%
Taylor expanded in f around 0 96.6%
mul-1-neg96.6%
unsub-neg96.6%
distribute-rgt-out--96.6%
metadata-eval96.6%
Simplified96.6%
Taylor expanded in f around 0 96.6%
associate-*r/96.6%
sub-neg96.6%
distribute-rgt-in96.6%
metadata-eval96.6%
associate-/r*96.6%
*-commutative96.6%
distribute-rgt-in96.6%
unsub-neg96.6%
log-div96.6%
associate-*l/96.5%
*-commutative96.5%
associate-/r*96.5%
metadata-eval96.5%
Simplified96.6%
Final simplification96.6%
(FPCore (f) :precision binary64 (/ (* -4.0 (log (/ (/ 4.0 PI) f))) PI))
double code(double f) {
return (-4.0 * log(((4.0 / ((double) M_PI)) / f))) / ((double) M_PI);
}
public static double code(double f) {
return (-4.0 * Math.log(((4.0 / Math.PI) / f))) / Math.PI;
}
def code(f): return (-4.0 * math.log(((4.0 / math.pi) / f))) / math.pi
function code(f) return Float64(Float64(-4.0 * log(Float64(Float64(4.0 / pi) / f))) / pi) end
function tmp = code(f) tmp = (-4.0 * log(((4.0 / pi) / f))) / pi; end
code[f_] := N[(N[(-4.0 * N[Log[N[(N[(4.0 / Pi), $MachinePrecision] / f), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / Pi), $MachinePrecision]
\begin{array}{l}
\\
\frac{-4 \cdot \log \left(\frac{\frac{4}{\pi}}{f}\right)}{\pi}
\end{array}
Initial program 9.4%
distribute-lft-neg-in9.4%
*-commutative9.4%
associate-/r/9.4%
associate-*l/9.4%
metadata-eval9.4%
distribute-neg-frac9.4%
Simplified9.4%
Taylor expanded in f around 0 96.6%
mul-1-neg96.6%
unsub-neg96.6%
distribute-rgt-out--96.6%
metadata-eval96.6%
Simplified96.6%
Taylor expanded in f around 0 96.6%
associate-*r/96.6%
sub-neg96.6%
distribute-rgt-in96.6%
metadata-eval96.6%
associate-/r*96.6%
*-commutative96.6%
distribute-rgt-in96.6%
unsub-neg96.6%
log-div96.6%
associate-*l/96.5%
*-commutative96.5%
associate-/r*96.5%
metadata-eval96.5%
Simplified96.6%
*-commutative96.6%
associate-*r/96.6%
associate-/l/96.6%
associate-/r*96.6%
Applied egg-rr96.6%
Final simplification96.6%
herbie shell --seed 2023256
(FPCore (f)
:name "VandenBroeck and Keller, Equation (20)"
:precision binary64
(- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))) (- (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))))))))