
(FPCore (x) :precision binary64 (log (/ (sinh x) x)))
double code(double x) {
return log((sinh(x) / x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((sinh(x) / x))
end function
public static double code(double x) {
return Math.log((Math.sinh(x) / x));
}
def code(x): return math.log((math.sinh(x) / x))
function code(x) return log(Float64(sinh(x) / x)) end
function tmp = code(x) tmp = log((sinh(x) / x)); end
code[x_] := N[Log[N[(N[Sinh[x], $MachinePrecision] / x), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{\sinh x}{x}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (log (/ (sinh x) x)))
double code(double x) {
return log((sinh(x) / x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((sinh(x) / x))
end function
public static double code(double x) {
return Math.log((Math.sinh(x) / x));
}
def code(x): return math.log((math.sinh(x) / x))
function code(x) return log(Float64(sinh(x) / x)) end
function tmp = code(x) tmp = log((sinh(x) / x)); end
code[x_] := N[Log[N[(N[Sinh[x], $MachinePrecision] / x), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{\sinh x}{x}\right)
\end{array}
(FPCore (x)
:precision binary64
(/
(* x x)
(/
1.0
(fma
(fma (* x x) 0.0003527336860670194 -0.005555555555555556)
(* x x)
0.16666666666666666))))
double code(double x) {
return (x * x) / (1.0 / fma(fma((x * x), 0.0003527336860670194, -0.005555555555555556), (x * x), 0.16666666666666666));
}
function code(x) return Float64(Float64(x * x) / Float64(1.0 / fma(fma(Float64(x * x), 0.0003527336860670194, -0.005555555555555556), Float64(x * x), 0.16666666666666666))) end
code[x_] := N[(N[(x * x), $MachinePrecision] / N[(1.0 / N[(N[(N[(x * x), $MachinePrecision] * 0.0003527336860670194 + -0.005555555555555556), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot x}{\frac{1}{\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.0003527336860670194, -0.005555555555555556\right), x \cdot x, 0.16666666666666666\right)}}
\end{array}
Initial program 52.1%
Taylor expanded in x around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6498.7
Applied rewrites98.7%
Applied rewrites98.8%
(FPCore (x)
:precision binary64
(*
(*
(fma
(fma 0.0003527336860670194 (* x x) -0.005555555555555556)
(* x x)
0.16666666666666666)
x)
x))
double code(double x) {
return (fma(fma(0.0003527336860670194, (x * x), -0.005555555555555556), (x * x), 0.16666666666666666) * x) * x;
}
function code(x) return Float64(Float64(fma(fma(0.0003527336860670194, Float64(x * x), -0.005555555555555556), Float64(x * x), 0.16666666666666666) * x) * x) end
code[x_] := N[(N[(N[(N[(0.0003527336860670194 * N[(x * x), $MachinePrecision] + -0.005555555555555556), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.16666666666666666), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}
\\
\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0003527336860670194, x \cdot x, -0.005555555555555556\right), x \cdot x, 0.16666666666666666\right) \cdot x\right) \cdot x
\end{array}
Initial program 52.1%
Taylor expanded in x around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6498.7
Applied rewrites98.7%
(FPCore (x) :precision binary64 (/ (* x x) (fma 0.2 (* x x) 6.0)))
double code(double x) {
return (x * x) / fma(0.2, (x * x), 6.0);
}
function code(x) return Float64(Float64(x * x) / fma(0.2, Float64(x * x), 6.0)) end
code[x_] := N[(N[(x * x), $MachinePrecision] / N[(0.2 * N[(x * x), $MachinePrecision] + 6.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot x}{\mathsf{fma}\left(0.2, x \cdot x, 6\right)}
\end{array}
Initial program 52.1%
Taylor expanded in x around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6498.7
Applied rewrites98.7%
Applied rewrites98.8%
Taylor expanded in x around 0
Applied rewrites98.7%
(FPCore (x) :precision binary64 (* (fma x 0.16666666666666666 (* (* -0.005555555555555556 x) (* x x))) x))
double code(double x) {
return fma(x, 0.16666666666666666, ((-0.005555555555555556 * x) * (x * x))) * x;
}
function code(x) return Float64(fma(x, 0.16666666666666666, Float64(Float64(-0.005555555555555556 * x) * Float64(x * x))) * x) end
code[x_] := N[(N[(x * 0.16666666666666666 + N[(N[(-0.005555555555555556 * x), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, 0.16666666666666666, \left(-0.005555555555555556 \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot x
\end{array}
Initial program 52.1%
Taylor expanded in x around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6498.4
Applied rewrites98.4%
Applied rewrites98.4%
Applied rewrites98.4%
Final simplification98.4%
(FPCore (x) :precision binary64 (* (* (fma -0.005555555555555556 (* x x) 0.16666666666666666) x) x))
double code(double x) {
return (fma(-0.005555555555555556, (x * x), 0.16666666666666666) * x) * x;
}
function code(x) return Float64(Float64(fma(-0.005555555555555556, Float64(x * x), 0.16666666666666666) * x) * x) end
code[x_] := N[(N[(N[(-0.005555555555555556 * N[(x * x), $MachinePrecision] + 0.16666666666666666), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}
\\
\left(\mathsf{fma}\left(-0.005555555555555556, x \cdot x, 0.16666666666666666\right) \cdot x\right) \cdot x
\end{array}
Initial program 52.1%
Taylor expanded in x around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6498.4
Applied rewrites98.4%
(FPCore (x) :precision binary64 (* (/ x 6.0) x))
double code(double x) {
return (x / 6.0) * x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x / 6.0d0) * x
end function
public static double code(double x) {
return (x / 6.0) * x;
}
def code(x): return (x / 6.0) * x
function code(x) return Float64(Float64(x / 6.0) * x) end
function tmp = code(x) tmp = (x / 6.0) * x; end
code[x_] := N[(N[(x / 6.0), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{6} \cdot x
\end{array}
Initial program 52.1%
Taylor expanded in x around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6498.7
Applied rewrites98.7%
Applied rewrites98.8%
Taylor expanded in x around 0
Applied rewrites98.1%
Applied rewrites98.1%
(FPCore (x) :precision binary64 (* (* 0.16666666666666666 x) x))
double code(double x) {
return (0.16666666666666666 * x) * x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (0.16666666666666666d0 * x) * x
end function
public static double code(double x) {
return (0.16666666666666666 * x) * x;
}
def code(x): return (0.16666666666666666 * x) * x
function code(x) return Float64(Float64(0.16666666666666666 * x) * x) end
function tmp = code(x) tmp = (0.16666666666666666 * x) * x; end
code[x_] := N[(N[(0.16666666666666666 * x), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}
\\
\left(0.16666666666666666 \cdot x\right) \cdot x
\end{array}
Initial program 52.1%
Taylor expanded in x around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6498.7
Applied rewrites98.7%
Taylor expanded in x around 0
Applied rewrites98.0%
(FPCore (x) :precision binary64 (* 0.16666666666666666 (* x x)))
double code(double x) {
return 0.16666666666666666 * (x * x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.16666666666666666d0 * (x * x)
end function
public static double code(double x) {
return 0.16666666666666666 * (x * x);
}
def code(x): return 0.16666666666666666 * (x * x)
function code(x) return Float64(0.16666666666666666 * Float64(x * x)) end
function tmp = code(x) tmp = 0.16666666666666666 * (x * x); end
code[x_] := N[(0.16666666666666666 * N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.16666666666666666 \cdot \left(x \cdot x\right)
\end{array}
Initial program 52.1%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
lower-*.f6498.0
Applied rewrites98.0%
Final simplification98.0%
(FPCore (x)
:precision binary64
(if (< (fabs x) 0.085)
(*
(* x x)
(fma
(fma
(fma -2.6455026455026456e-5 (* x x) 0.0003527336860670194)
(* x x)
-0.005555555555555556)
(* x x)
0.16666666666666666))
(log (/ (sinh x) x))))
double code(double x) {
double tmp;
if (fabs(x) < 0.085) {
tmp = (x * x) * fma(fma(fma(-2.6455026455026456e-5, (x * x), 0.0003527336860670194), (x * x), -0.005555555555555556), (x * x), 0.16666666666666666);
} else {
tmp = log((sinh(x) / x));
}
return tmp;
}
function code(x) tmp = 0.0 if (abs(x) < 0.085) tmp = Float64(Float64(x * x) * fma(fma(fma(-2.6455026455026456e-5, Float64(x * x), 0.0003527336860670194), Float64(x * x), -0.005555555555555556), Float64(x * x), 0.16666666666666666)); else tmp = log(Float64(sinh(x) / x)); end return tmp end
code[x_] := If[Less[N[Abs[x], $MachinePrecision], 0.085], N[(N[(x * x), $MachinePrecision] * N[(N[(N[(-2.6455026455026456e-5 * N[(x * x), $MachinePrecision] + 0.0003527336860670194), $MachinePrecision] * N[(x * x), $MachinePrecision] + -0.005555555555555556), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.16666666666666666), $MachinePrecision]), $MachinePrecision], N[Log[N[(N[Sinh[x], $MachinePrecision] / x), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left|x\right| < 0.085:\\
\;\;\;\;\left(x \cdot x\right) \cdot \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-2.6455026455026456 \cdot 10^{-5}, x \cdot x, 0.0003527336860670194\right), x \cdot x, -0.005555555555555556\right), x \cdot x, 0.16666666666666666\right)\\
\mathbf{else}:\\
\;\;\;\;\log \left(\frac{\sinh x}{x}\right)\\
\end{array}
\end{array}
herbie shell --seed 2024308
(FPCore (x)
:name "bug500, discussion (missed optimization)"
:precision binary64
:alt
(! :herbie-platform default (if (< (fabs x) 17/200) (let ((x2 (* x x))) (* x2 (fma (fma (fma -1/37800 x2 1/2835) x2 -1/180) x2 1/6))) (log (/ (sinh x) x))))
(log (/ (sinh x) x)))