
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t\_0}\\
t_2 := e^{-t\_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t\_1 + t\_2}{t\_1 - t\_2}\right)
\end{array}
\end{array}
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (f) :precision binary64 (let* ((t_0 (* (/ PI 4.0) f)) (t_1 (exp t_0)) (t_2 (exp (- t_0)))) (- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ t_1 t_2) (- t_1 t_2)))))))
double code(double f) {
double t_0 = (((double) M_PI) / 4.0) * f;
double t_1 = exp(t_0);
double t_2 = exp(-t_0);
return -((1.0 / (((double) M_PI) / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2))));
}
public static double code(double f) {
double t_0 = (Math.PI / 4.0) * f;
double t_1 = Math.exp(t_0);
double t_2 = Math.exp(-t_0);
return -((1.0 / (Math.PI / 4.0)) * Math.log(((t_1 + t_2) / (t_1 - t_2))));
}
def code(f): t_0 = (math.pi / 4.0) * f t_1 = math.exp(t_0) t_2 = math.exp(-t_0) return -((1.0 / (math.pi / 4.0)) * math.log(((t_1 + t_2) / (t_1 - t_2))))
function code(f) t_0 = Float64(Float64(pi / 4.0) * f) t_1 = exp(t_0) t_2 = exp(Float64(-t_0)) return Float64(-Float64(Float64(1.0 / Float64(pi / 4.0)) * log(Float64(Float64(t_1 + t_2) / Float64(t_1 - t_2))))) end
function tmp = code(f) t_0 = (pi / 4.0) * f; t_1 = exp(t_0); t_2 = exp(-t_0); tmp = -((1.0 / (pi / 4.0)) * log(((t_1 + t_2) / (t_1 - t_2)))); end
code[f_] := Block[{t$95$0 = N[(N[(Pi / 4.0), $MachinePrecision] * f), $MachinePrecision]}, Block[{t$95$1 = N[Exp[t$95$0], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t$95$0)], $MachinePrecision]}, (-N[(N[(1.0 / N[(Pi / 4.0), $MachinePrecision]), $MachinePrecision] * N[Log[N[(N[(t$95$1 + t$95$2), $MachinePrecision] / N[(t$95$1 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision])]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\pi}{4} \cdot f\\
t_1 := e^{t\_0}\\
t_2 := e^{-t\_0}\\
-\frac{1}{\frac{\pi}{4}} \cdot \log \left(\frac{t\_1 + t\_2}{t\_1 - t\_2}\right)
\end{array}
\end{array}
(FPCore (f) :precision binary64 (/ (* 4.0 (log (tanh (* (* 0.25 PI) f)))) PI))
double code(double f) {
return (4.0 * log(tanh(((0.25 * ((double) M_PI)) * f)))) / ((double) M_PI);
}
public static double code(double f) {
return (4.0 * Math.log(Math.tanh(((0.25 * Math.PI) * f)))) / Math.PI;
}
def code(f): return (4.0 * math.log(math.tanh(((0.25 * math.pi) * f)))) / math.pi
function code(f) return Float64(Float64(4.0 * log(tanh(Float64(Float64(0.25 * pi) * f)))) / pi) end
function tmp = code(f) tmp = (4.0 * log(tanh(((0.25 * pi) * f)))) / pi; end
code[f_] := N[(N[(4.0 * N[Log[N[Tanh[N[(N[(0.25 * Pi), $MachinePrecision] * f), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / Pi), $MachinePrecision]
\begin{array}{l}
\\
\frac{4 \cdot \log \tanh \left(\left(0.25 \cdot \pi\right) \cdot f\right)}{\pi}
\end{array}
Initial program 6.9%
Applied rewrites99.1%
(FPCore (f) :precision binary64 (* (log (tanh (* (* 0.25 PI) f))) (/ 4.0 PI)))
double code(double f) {
return log(tanh(((0.25 * ((double) M_PI)) * f))) * (4.0 / ((double) M_PI));
}
public static double code(double f) {
return Math.log(Math.tanh(((0.25 * Math.PI) * f))) * (4.0 / Math.PI);
}
def code(f): return math.log(math.tanh(((0.25 * math.pi) * f))) * (4.0 / math.pi)
function code(f) return Float64(log(tanh(Float64(Float64(0.25 * pi) * f))) * Float64(4.0 / pi)) end
function tmp = code(f) tmp = log(tanh(((0.25 * pi) * f))) * (4.0 / pi); end
code[f_] := N[(N[Log[N[Tanh[N[(N[(0.25 * Pi), $MachinePrecision] * f), $MachinePrecision]], $MachinePrecision]], $MachinePrecision] * N[(4.0 / Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \tanh \left(\left(0.25 \cdot \pi\right) \cdot f\right) \cdot \frac{4}{\pi}
\end{array}
Initial program 6.9%
Applied rewrites98.9%
(FPCore (f) :precision binary64 (/ (* 4.0 (+ (log f) (log (* 0.25 PI)))) PI))
double code(double f) {
return (4.0 * (log(f) + log((0.25 * ((double) M_PI))))) / ((double) M_PI);
}
public static double code(double f) {
return (4.0 * (Math.log(f) + Math.log((0.25 * Math.PI)))) / Math.PI;
}
def code(f): return (4.0 * (math.log(f) + math.log((0.25 * math.pi)))) / math.pi
function code(f) return Float64(Float64(4.0 * Float64(log(f) + log(Float64(0.25 * pi)))) / pi) end
function tmp = code(f) tmp = (4.0 * (log(f) + log((0.25 * pi)))) / pi; end
code[f_] := N[(N[(4.0 * N[(N[Log[f], $MachinePrecision] + N[Log[N[(0.25 * Pi), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / Pi), $MachinePrecision]
\begin{array}{l}
\\
\frac{4 \cdot \left(\log f + \log \left(0.25 \cdot \pi\right)\right)}{\pi}
\end{array}
Initial program 6.9%
Applied rewrites99.1%
Taylor expanded in f around 0
Applied rewrites95.9%
Taylor expanded in f around 0
Applied rewrites95.9%
(FPCore (f) :precision binary64 (/ (* 4.0 (log (* 0.25 (* f PI)))) PI))
double code(double f) {
return (4.0 * log((0.25 * (f * ((double) M_PI))))) / ((double) M_PI);
}
public static double code(double f) {
return (4.0 * Math.log((0.25 * (f * Math.PI)))) / Math.PI;
}
def code(f): return (4.0 * math.log((0.25 * (f * math.pi)))) / math.pi
function code(f) return Float64(Float64(4.0 * log(Float64(0.25 * Float64(f * pi)))) / pi) end
function tmp = code(f) tmp = (4.0 * log((0.25 * (f * pi)))) / pi; end
code[f_] := N[(N[(4.0 * N[Log[N[(0.25 * N[(f * Pi), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / Pi), $MachinePrecision]
\begin{array}{l}
\\
\frac{4 \cdot \log \left(0.25 \cdot \left(f \cdot \pi\right)\right)}{\pi}
\end{array}
Initial program 6.9%
Applied rewrites99.1%
Taylor expanded in f around 0
Applied rewrites95.9%
herbie shell --seed 2025149
(FPCore (f)
:name "VandenBroeck and Keller, Equation (20)"
:precision binary64
(- (* (/ 1.0 (/ PI 4.0)) (log (/ (+ (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))) (- (exp (* (/ PI 4.0) f)) (exp (- (* (/ PI 4.0) f)))))))))