
(FPCore (x) :precision binary64 (- (sin x) x))
double code(double x) {
return sin(x) - x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = sin(x) - x
end function
public static double code(double x) {
return Math.sin(x) - x;
}
def code(x): return math.sin(x) - x
function code(x) return Float64(sin(x) - x) end
function tmp = code(x) tmp = sin(x) - x; end
code[x_] := N[(N[Sin[x], $MachinePrecision] - x), $MachinePrecision]
\begin{array}{l}
\\
\sin x - x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (sin x) x))
double code(double x) {
return sin(x) - x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = sin(x) - x
end function
public static double code(double x) {
return Math.sin(x) - x;
}
def code(x): return math.sin(x) - x
function code(x) return Float64(sin(x) - x) end
function tmp = code(x) tmp = sin(x) - x; end
code[x_] := N[(N[Sin[x], $MachinePrecision] - x), $MachinePrecision]
\begin{array}{l}
\\
\sin x - x
\end{array}
(FPCore (x)
:precision binary64
(*
x
(fma
(* x x)
-0.16666666666666666
(*
(* x x)
(*
x
(*
x
(fma
(* x x)
(fma x (* x 2.7557319223985893e-6) -0.0001984126984126984)
0.008333333333333333)))))))
double code(double x) {
return x * fma((x * x), -0.16666666666666666, ((x * x) * (x * (x * fma((x * x), fma(x, (x * 2.7557319223985893e-6), -0.0001984126984126984), 0.008333333333333333)))));
}
function code(x) return Float64(x * fma(Float64(x * x), -0.16666666666666666, Float64(Float64(x * x) * Float64(x * Float64(x * fma(Float64(x * x), fma(x, Float64(x * 2.7557319223985893e-6), -0.0001984126984126984), 0.008333333333333333)))))) end
code[x_] := N[(x * N[(N[(x * x), $MachinePrecision] * -0.16666666666666666 + N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * 2.7557319223985893e-6), $MachinePrecision] + -0.0001984126984126984), $MachinePrecision] + 0.008333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \mathsf{fma}\left(x \cdot x, -0.16666666666666666, \left(x \cdot x\right) \cdot \left(x \cdot \left(x \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot 2.7557319223985893 \cdot 10^{-6}, -0.0001984126984126984\right), 0.008333333333333333\right)\right)\right)\right)
\end{array}
Initial program 65.2%
Taylor expanded in x around 0
cube-multN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
Simplified99.2%
+-commutativeN/A
distribute-lft-inN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6499.2
Applied egg-rr99.2%
(FPCore (x)
:precision binary64
(*
x
(*
(* x x)
(fma
x
(*
x
(fma
(* x x)
(fma x (* x 2.7557319223985893e-6) -0.0001984126984126984)
0.008333333333333333))
-0.16666666666666666))))
double code(double x) {
return x * ((x * x) * fma(x, (x * fma((x * x), fma(x, (x * 2.7557319223985893e-6), -0.0001984126984126984), 0.008333333333333333)), -0.16666666666666666));
}
function code(x) return Float64(x * Float64(Float64(x * x) * fma(x, Float64(x * fma(Float64(x * x), fma(x, Float64(x * 2.7557319223985893e-6), -0.0001984126984126984), 0.008333333333333333)), -0.16666666666666666))) end
code[x_] := N[(x * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * 2.7557319223985893e-6), $MachinePrecision] + -0.0001984126984126984), $MachinePrecision] + 0.008333333333333333), $MachinePrecision]), $MachinePrecision] + -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(\left(x \cdot x\right) \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot 2.7557319223985893 \cdot 10^{-6}, -0.0001984126984126984\right), 0.008333333333333333\right), -0.16666666666666666\right)\right)
\end{array}
Initial program 65.2%
Taylor expanded in x around 0
cube-multN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
Simplified99.2%
*-commutativeN/A
*-lowering-*.f64N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.2
Applied egg-rr99.2%
Final simplification99.2%
(FPCore (x)
:precision binary64
(*
x
(*
(* x x)
(fma
(* x x)
(fma
x
(* x (fma x (* x 2.7557319223985893e-6) -0.0001984126984126984))
0.008333333333333333)
-0.16666666666666666))))
double code(double x) {
return x * ((x * x) * fma((x * x), fma(x, (x * fma(x, (x * 2.7557319223985893e-6), -0.0001984126984126984)), 0.008333333333333333), -0.16666666666666666));
}
function code(x) return Float64(x * Float64(Float64(x * x) * fma(Float64(x * x), fma(x, Float64(x * fma(x, Float64(x * 2.7557319223985893e-6), -0.0001984126984126984)), 0.008333333333333333), -0.16666666666666666))) end
code[x_] := N[(x * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(x * N[(x * 2.7557319223985893e-6), $MachinePrecision] + -0.0001984126984126984), $MachinePrecision]), $MachinePrecision] + 0.008333333333333333), $MachinePrecision] + -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(\left(x \cdot x\right) \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 2.7557319223985893 \cdot 10^{-6}, -0.0001984126984126984\right), 0.008333333333333333\right), -0.16666666666666666\right)\right)
\end{array}
Initial program 65.2%
Taylor expanded in x around 0
cube-multN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
Simplified99.2%
(FPCore (x)
:precision binary64
(*
(* x x)
(*
x
(fma
(* x x)
(fma x (* x -0.0001984126984126984) 0.008333333333333333)
-0.16666666666666666))))
double code(double x) {
return (x * x) * (x * fma((x * x), fma(x, (x * -0.0001984126984126984), 0.008333333333333333), -0.16666666666666666));
}
function code(x) return Float64(Float64(x * x) * Float64(x * fma(Float64(x * x), fma(x, Float64(x * -0.0001984126984126984), 0.008333333333333333), -0.16666666666666666))) end
code[x_] := N[(N[(x * x), $MachinePrecision] * N[(x * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * -0.0001984126984126984), $MachinePrecision] + 0.008333333333333333), $MachinePrecision] + -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot x\right) \cdot \left(x \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot -0.0001984126984126984, 0.008333333333333333\right), -0.16666666666666666\right)\right)
\end{array}
Initial program 65.2%
Taylor expanded in x around 0
cube-multN/A
unpow2N/A
associate-*r*N/A
*-lowering-*.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6499.0
Simplified99.0%
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.0
Applied egg-rr99.0%
Final simplification99.0%
(FPCore (x)
:precision binary64
(*
x
(*
x
(*
x
(fma
(* x x)
(fma x (* x -0.0001984126984126984) 0.008333333333333333)
-0.16666666666666666)))))
double code(double x) {
return x * (x * (x * fma((x * x), fma(x, (x * -0.0001984126984126984), 0.008333333333333333), -0.16666666666666666)));
}
function code(x) return Float64(x * Float64(x * Float64(x * fma(Float64(x * x), fma(x, Float64(x * -0.0001984126984126984), 0.008333333333333333), -0.16666666666666666)))) end
code[x_] := N[(x * N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * -0.0001984126984126984), $MachinePrecision] + 0.008333333333333333), $MachinePrecision] + -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x \cdot \left(x \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot -0.0001984126984126984, 0.008333333333333333\right), -0.16666666666666666\right)\right)\right)
\end{array}
Initial program 65.2%
Taylor expanded in x around 0
cube-multN/A
unpow2N/A
associate-*r*N/A
*-lowering-*.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6499.0
Simplified99.0%
(FPCore (x) :precision binary64 (* (* x x) (* x (fma x (* x 0.008333333333333333) -0.16666666666666666))))
double code(double x) {
return (x * x) * (x * fma(x, (x * 0.008333333333333333), -0.16666666666666666));
}
function code(x) return Float64(Float64(x * x) * Float64(x * fma(x, Float64(x * 0.008333333333333333), -0.16666666666666666))) end
code[x_] := N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(x * 0.008333333333333333), $MachinePrecision] + -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot x\right) \cdot \left(x \cdot \mathsf{fma}\left(x, x \cdot 0.008333333333333333, -0.16666666666666666\right)\right)
\end{array}
Initial program 65.2%
Taylor expanded in x around 0
cube-multN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
Simplified99.2%
+-commutativeN/A
distribute-lft-inN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6499.2
Applied egg-rr99.2%
Taylor expanded in x around 0
unpow3N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6498.5
Simplified98.5%
(FPCore (x) :precision binary64 (* x (* x (* x (fma x (* x 0.008333333333333333) -0.16666666666666666)))))
double code(double x) {
return x * (x * (x * fma(x, (x * 0.008333333333333333), -0.16666666666666666)));
}
function code(x) return Float64(x * Float64(x * Float64(x * fma(x, Float64(x * 0.008333333333333333), -0.16666666666666666)))) end
code[x_] := N[(x * N[(x * N[(x * N[(x * N[(x * 0.008333333333333333), $MachinePrecision] + -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x \cdot \left(x \cdot \mathsf{fma}\left(x, x \cdot 0.008333333333333333, -0.16666666666666666\right)\right)\right)
\end{array}
Initial program 65.2%
Taylor expanded in x around 0
cube-multN/A
unpow2N/A
associate-*r*N/A
*-lowering-*.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6498.4
Simplified98.4%
(FPCore (x) :precision binary64 (* (* x x) (* x -0.16666666666666666)))
double code(double x) {
return (x * x) * (x * -0.16666666666666666);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * x) * (x * (-0.16666666666666666d0))
end function
public static double code(double x) {
return (x * x) * (x * -0.16666666666666666);
}
def code(x): return (x * x) * (x * -0.16666666666666666)
function code(x) return Float64(Float64(x * x) * Float64(x * -0.16666666666666666)) end
function tmp = code(x) tmp = (x * x) * (x * -0.16666666666666666); end
code[x_] := N[(N[(x * x), $MachinePrecision] * N[(x * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot x\right) \cdot \left(x \cdot -0.16666666666666666\right)
\end{array}
Initial program 65.2%
Taylor expanded in x around 0
cube-multN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
Simplified99.2%
+-commutativeN/A
distribute-lft-inN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6499.2
Applied egg-rr99.2%
Taylor expanded in x around 0
unpow3N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6498.5
Simplified98.5%
Taylor expanded in x around 0
Simplified97.4%
(FPCore (x) :precision binary64 (* -0.16666666666666666 (* x (* x x))))
double code(double x) {
return -0.16666666666666666 * (x * (x * x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-0.16666666666666666d0) * (x * (x * x))
end function
public static double code(double x) {
return -0.16666666666666666 * (x * (x * x));
}
def code(x): return -0.16666666666666666 * (x * (x * x))
function code(x) return Float64(-0.16666666666666666 * Float64(x * Float64(x * x))) end
function tmp = code(x) tmp = -0.16666666666666666 * (x * (x * x)); end
code[x_] := N[(-0.16666666666666666 * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-0.16666666666666666 \cdot \left(x \cdot \left(x \cdot x\right)\right)
\end{array}
Initial program 65.2%
Taylor expanded in x around 0
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6497.4
Simplified97.4%
(FPCore (x) :precision binary64 0.0)
double code(double x) {
return 0.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.0d0
end function
public static double code(double x) {
return 0.0;
}
def code(x): return 0.0
function code(x) return 0.0 end
function tmp = code(x) tmp = 0.0; end
code[x_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 65.2%
Taylor expanded in x around 0
Simplified61.9%
+-inverses61.9
Applied egg-rr61.9%
(FPCore (x) :precision binary64 (if (< (fabs x) 0.07) (- (+ (- (/ (pow x 3.0) 6.0) (/ (pow x 5.0) 120.0)) (/ (pow x 7.0) 5040.0))) (- (sin x) x)))
double code(double x) {
double tmp;
if (fabs(x) < 0.07) {
tmp = -(((pow(x, 3.0) / 6.0) - (pow(x, 5.0) / 120.0)) + (pow(x, 7.0) / 5040.0));
} else {
tmp = sin(x) - x;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (abs(x) < 0.07d0) then
tmp = -((((x ** 3.0d0) / 6.0d0) - ((x ** 5.0d0) / 120.0d0)) + ((x ** 7.0d0) / 5040.0d0))
else
tmp = sin(x) - x
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (Math.abs(x) < 0.07) {
tmp = -(((Math.pow(x, 3.0) / 6.0) - (Math.pow(x, 5.0) / 120.0)) + (Math.pow(x, 7.0) / 5040.0));
} else {
tmp = Math.sin(x) - x;
}
return tmp;
}
def code(x): tmp = 0 if math.fabs(x) < 0.07: tmp = -(((math.pow(x, 3.0) / 6.0) - (math.pow(x, 5.0) / 120.0)) + (math.pow(x, 7.0) / 5040.0)) else: tmp = math.sin(x) - x return tmp
function code(x) tmp = 0.0 if (abs(x) < 0.07) tmp = Float64(-Float64(Float64(Float64((x ^ 3.0) / 6.0) - Float64((x ^ 5.0) / 120.0)) + Float64((x ^ 7.0) / 5040.0))); else tmp = Float64(sin(x) - x); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (abs(x) < 0.07) tmp = -((((x ^ 3.0) / 6.0) - ((x ^ 5.0) / 120.0)) + ((x ^ 7.0) / 5040.0)); else tmp = sin(x) - x; end tmp_2 = tmp; end
code[x_] := If[Less[N[Abs[x], $MachinePrecision], 0.07], (-N[(N[(N[(N[Power[x, 3.0], $MachinePrecision] / 6.0), $MachinePrecision] - N[(N[Power[x, 5.0], $MachinePrecision] / 120.0), $MachinePrecision]), $MachinePrecision] + N[(N[Power[x, 7.0], $MachinePrecision] / 5040.0), $MachinePrecision]), $MachinePrecision]), N[(N[Sin[x], $MachinePrecision] - x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left|x\right| < 0.07:\\
\;\;\;\;-\left(\left(\frac{{x}^{3}}{6} - \frac{{x}^{5}}{120}\right) + \frac{{x}^{7}}{5040}\right)\\
\mathbf{else}:\\
\;\;\;\;\sin x - x\\
\end{array}
\end{array}
herbie shell --seed 2024204
(FPCore (x)
:name "bug500 (missed optimization)"
:precision binary64
:pre (and (< -1000.0 x) (< x 1000.0))
:alt
(! :herbie-platform default (if (< (fabs x) 7/100) (- (+ (- (/ (pow x 3) 6) (/ (pow x 5) 120)) (/ (pow x 7) 5040))) (- (sin x) x)))
(- (sin x) x))