
(FPCore (x) :precision binary64 (log (/ (sinh x) x)))
double code(double x) {
return log((sinh(x) / x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((sinh(x) / x))
end function
public static double code(double x) {
return Math.log((Math.sinh(x) / x));
}
def code(x): return math.log((math.sinh(x) / x))
function code(x) return log(Float64(sinh(x) / x)) end
function tmp = code(x) tmp = log((sinh(x) / x)); end
code[x_] := N[Log[N[(N[Sinh[x], $MachinePrecision] / x), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{\sinh x}{x}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (log (/ (sinh x) x)))
double code(double x) {
return log((sinh(x) / x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((sinh(x) / x))
end function
public static double code(double x) {
return Math.log((Math.sinh(x) / x));
}
def code(x): return math.log((math.sinh(x) / x))
function code(x) return log(Float64(sinh(x) / x)) end
function tmp = code(x) tmp = log((sinh(x) / x)); end
code[x_] := N[Log[N[(N[Sinh[x], $MachinePrecision] / x), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{\sinh x}{x}\right)
\end{array}
(FPCore (x)
:precision binary64
(*
(- 0.027777777777777776 (* (pow x 4.0) 3.08641975308642e-5))
(/
(pow x 2.0)
(-
0.16666666666666666
(*
(pow x 2.0)
(fma (pow x 2.0) 0.0003527336860670194 -0.005555555555555556))))))
double code(double x) {
return (0.027777777777777776 - (pow(x, 4.0) * 3.08641975308642e-5)) * (pow(x, 2.0) / (0.16666666666666666 - (pow(x, 2.0) * fma(pow(x, 2.0), 0.0003527336860670194, -0.005555555555555556))));
}
function code(x) return Float64(Float64(0.027777777777777776 - Float64((x ^ 4.0) * 3.08641975308642e-5)) * Float64((x ^ 2.0) / Float64(0.16666666666666666 - Float64((x ^ 2.0) * fma((x ^ 2.0), 0.0003527336860670194, -0.005555555555555556))))) end
code[x_] := N[(N[(0.027777777777777776 - N[(N[Power[x, 4.0], $MachinePrecision] * 3.08641975308642e-5), $MachinePrecision]), $MachinePrecision] * N[(N[Power[x, 2.0], $MachinePrecision] / N[(0.16666666666666666 - N[(N[Power[x, 2.0], $MachinePrecision] * N[(N[Power[x, 2.0], $MachinePrecision] * 0.0003527336860670194 + -0.005555555555555556), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(0.027777777777777776 - {x}^{4} \cdot 3.08641975308642 \cdot 10^{-5}\right) \cdot \frac{{x}^{2}}{0.16666666666666666 - {x}^{2} \cdot \mathsf{fma}\left({x}^{2}, 0.0003527336860670194, -0.005555555555555556\right)}
\end{array}
Initial program 57.6%
Taylor expanded in x around 0 96.9%
flip-+96.9%
associate-*r/96.8%
Applied egg-rr96.8%
*-commutative96.8%
associate-/l*96.9%
Simplified96.9%
Taylor expanded in x around 0 97.1%
(FPCore (x)
:precision binary64
(*
(- 0.027777777777777776 (* (pow x 4.0) 3.08641975308642e-5))
(*
x
(/
x
(-
0.16666666666666666
(*
(pow x 2.0)
(fma (pow x 2.0) 0.0003527336860670194 -0.005555555555555556)))))))
double code(double x) {
return (0.027777777777777776 - (pow(x, 4.0) * 3.08641975308642e-5)) * (x * (x / (0.16666666666666666 - (pow(x, 2.0) * fma(pow(x, 2.0), 0.0003527336860670194, -0.005555555555555556)))));
}
function code(x) return Float64(Float64(0.027777777777777776 - Float64((x ^ 4.0) * 3.08641975308642e-5)) * Float64(x * Float64(x / Float64(0.16666666666666666 - Float64((x ^ 2.0) * fma((x ^ 2.0), 0.0003527336860670194, -0.005555555555555556)))))) end
code[x_] := N[(N[(0.027777777777777776 - N[(N[Power[x, 4.0], $MachinePrecision] * 3.08641975308642e-5), $MachinePrecision]), $MachinePrecision] * N[(x * N[(x / N[(0.16666666666666666 - N[(N[Power[x, 2.0], $MachinePrecision] * N[(N[Power[x, 2.0], $MachinePrecision] * 0.0003527336860670194 + -0.005555555555555556), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(0.027777777777777776 - {x}^{4} \cdot 3.08641975308642 \cdot 10^{-5}\right) \cdot \left(x \cdot \frac{x}{0.16666666666666666 - {x}^{2} \cdot \mathsf{fma}\left({x}^{2}, 0.0003527336860670194, -0.005555555555555556\right)}\right)
\end{array}
Initial program 57.6%
Taylor expanded in x around 0 96.9%
flip-+96.9%
associate-*r/96.8%
Applied egg-rr96.8%
*-commutative96.8%
associate-/l*96.9%
Simplified96.9%
Taylor expanded in x around 0 97.1%
unpow297.1%
associate-/l*97.0%
Applied egg-rr97.0%
(FPCore (x) :precision binary64 (+ (* (pow x 4.0) (fma (pow x 2.0) 0.0003527336860670194 -0.005555555555555556)) (* (pow x 2.0) 0.16666666666666666)))
double code(double x) {
return (pow(x, 4.0) * fma(pow(x, 2.0), 0.0003527336860670194, -0.005555555555555556)) + (pow(x, 2.0) * 0.16666666666666666);
}
function code(x) return Float64(Float64((x ^ 4.0) * fma((x ^ 2.0), 0.0003527336860670194, -0.005555555555555556)) + Float64((x ^ 2.0) * 0.16666666666666666)) end
code[x_] := N[(N[(N[Power[x, 4.0], $MachinePrecision] * N[(N[Power[x, 2.0], $MachinePrecision] * 0.0003527336860670194 + -0.005555555555555556), $MachinePrecision]), $MachinePrecision] + N[(N[Power[x, 2.0], $MachinePrecision] * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{x}^{4} \cdot \mathsf{fma}\left({x}^{2}, 0.0003527336860670194, -0.005555555555555556\right) + {x}^{2} \cdot 0.16666666666666666
\end{array}
Initial program 57.6%
Taylor expanded in x around 0 96.9%
distribute-rgt-in96.9%
+-commutative96.9%
*-commutative96.9%
associate-*l*96.9%
*-commutative96.9%
fmm-def96.9%
metadata-eval96.9%
pow-prod-up96.9%
metadata-eval96.9%
*-commutative96.9%
Applied egg-rr96.9%
Final simplification96.9%
(FPCore (x)
:precision binary64
(*
(pow x 2.0)
(+
0.16666666666666666
(*
(pow x 2.0)
(- (* 0.0003527336860670194 (* x x)) 0.005555555555555556)))))
double code(double x) {
return pow(x, 2.0) * (0.16666666666666666 + (pow(x, 2.0) * ((0.0003527336860670194 * (x * x)) - 0.005555555555555556)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x ** 2.0d0) * (0.16666666666666666d0 + ((x ** 2.0d0) * ((0.0003527336860670194d0 * (x * x)) - 0.005555555555555556d0)))
end function
public static double code(double x) {
return Math.pow(x, 2.0) * (0.16666666666666666 + (Math.pow(x, 2.0) * ((0.0003527336860670194 * (x * x)) - 0.005555555555555556)));
}
def code(x): return math.pow(x, 2.0) * (0.16666666666666666 + (math.pow(x, 2.0) * ((0.0003527336860670194 * (x * x)) - 0.005555555555555556)))
function code(x) return Float64((x ^ 2.0) * Float64(0.16666666666666666 + Float64((x ^ 2.0) * Float64(Float64(0.0003527336860670194 * Float64(x * x)) - 0.005555555555555556)))) end
function tmp = code(x) tmp = (x ^ 2.0) * (0.16666666666666666 + ((x ^ 2.0) * ((0.0003527336860670194 * (x * x)) - 0.005555555555555556))); end
code[x_] := N[(N[Power[x, 2.0], $MachinePrecision] * N[(0.16666666666666666 + N[(N[Power[x, 2.0], $MachinePrecision] * N[(N[(0.0003527336860670194 * N[(x * x), $MachinePrecision]), $MachinePrecision] - 0.005555555555555556), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{x}^{2} \cdot \left(0.16666666666666666 + {x}^{2} \cdot \left(0.0003527336860670194 \cdot \left(x \cdot x\right) - 0.005555555555555556\right)\right)
\end{array}
Initial program 57.6%
Taylor expanded in x around 0 96.9%
unpow296.9%
Applied egg-rr96.9%
(FPCore (x) :precision binary64 (fma 0.16666666666666666 (* x x) (* (pow x 4.0) -0.005555555555555556)))
double code(double x) {
return fma(0.16666666666666666, (x * x), (pow(x, 4.0) * -0.005555555555555556));
}
function code(x) return fma(0.16666666666666666, Float64(x * x), Float64((x ^ 4.0) * -0.005555555555555556)) end
code[x_] := N[(0.16666666666666666 * N[(x * x), $MachinePrecision] + N[(N[Power[x, 4.0], $MachinePrecision] * -0.005555555555555556), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(0.16666666666666666, x \cdot x, {x}^{4} \cdot -0.005555555555555556\right)
\end{array}
Initial program 57.6%
Taylor expanded in x around 0 96.6%
distribute-rgt-in96.6%
fma-define96.6%
associate-*l*96.6%
pow-sqr96.6%
metadata-eval96.6%
Simplified96.6%
unpow296.9%
Applied egg-rr96.6%
Final simplification96.6%
(FPCore (x) :precision binary64 (* 0.16666666666666666 (* x x)))
double code(double x) {
return 0.16666666666666666 * (x * x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.16666666666666666d0 * (x * x)
end function
public static double code(double x) {
return 0.16666666666666666 * (x * x);
}
def code(x): return 0.16666666666666666 * (x * x)
function code(x) return Float64(0.16666666666666666 * Float64(x * x)) end
function tmp = code(x) tmp = 0.16666666666666666 * (x * x); end
code[x_] := N[(0.16666666666666666 * N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.16666666666666666 \cdot \left(x \cdot x\right)
\end{array}
Initial program 57.6%
Taylor expanded in x around 0 56.5%
+-commutative56.5%
Simplified56.5%
Taylor expanded in x around 0 96.5%
*-commutative96.5%
Simplified96.5%
unpow296.9%
Applied egg-rr96.5%
Final simplification96.5%
(FPCore (x) :precision binary64 0.0)
double code(double x) {
return 0.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.0d0
end function
public static double code(double x) {
return 0.0;
}
def code(x): return 0.0
function code(x) return 0.0 end
function tmp = code(x) tmp = 0.0; end
code[x_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 57.6%
Taylor expanded in x around 0 55.5%
metadata-eval55.5%
Applied egg-rr55.5%
(FPCore (x)
:precision binary64
(if (< (fabs x) 0.085)
(*
(* x x)
(fma
(fma
(fma -2.6455026455026456e-5 (* x x) 0.0003527336860670194)
(* x x)
-0.005555555555555556)
(* x x)
0.16666666666666666))
(log (/ (sinh x) x))))
double code(double x) {
double tmp;
if (fabs(x) < 0.085) {
tmp = (x * x) * fma(fma(fma(-2.6455026455026456e-5, (x * x), 0.0003527336860670194), (x * x), -0.005555555555555556), (x * x), 0.16666666666666666);
} else {
tmp = log((sinh(x) / x));
}
return tmp;
}
function code(x) tmp = 0.0 if (abs(x) < 0.085) tmp = Float64(Float64(x * x) * fma(fma(fma(-2.6455026455026456e-5, Float64(x * x), 0.0003527336860670194), Float64(x * x), -0.005555555555555556), Float64(x * x), 0.16666666666666666)); else tmp = log(Float64(sinh(x) / x)); end return tmp end
code[x_] := If[Less[N[Abs[x], $MachinePrecision], 0.085], N[(N[(x * x), $MachinePrecision] * N[(N[(N[(-2.6455026455026456e-5 * N[(x * x), $MachinePrecision] + 0.0003527336860670194), $MachinePrecision] * N[(x * x), $MachinePrecision] + -0.005555555555555556), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.16666666666666666), $MachinePrecision]), $MachinePrecision], N[Log[N[(N[Sinh[x], $MachinePrecision] / x), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left|x\right| < 0.085:\\
\;\;\;\;\left(x \cdot x\right) \cdot \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-2.6455026455026456 \cdot 10^{-5}, x \cdot x, 0.0003527336860670194\right), x \cdot x, -0.005555555555555556\right), x \cdot x, 0.16666666666666666\right)\\
\mathbf{else}:\\
\;\;\;\;\log \left(\frac{\sinh x}{x}\right)\\
\end{array}
\end{array}
herbie shell --seed 2024191
(FPCore (x)
:name "bug500, discussion (missed optimization)"
:precision binary64
:alt
(! :herbie-platform default (if (< (fabs x) 17/200) (let ((x2 (* x x))) (* x2 (fma (fma (fma -1/37800 x2 1/2835) x2 -1/180) x2 1/6))) (log (/ (sinh x) x))))
(log (/ (sinh x) x)))