
(FPCore (x) :precision binary64 (- (sin x) x))
double code(double x) {
return sin(x) - x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = sin(x) - x
end function
public static double code(double x) {
return Math.sin(x) - x;
}
def code(x): return math.sin(x) - x
function code(x) return Float64(sin(x) - x) end
function tmp = code(x) tmp = sin(x) - x; end
code[x_] := N[(N[Sin[x], $MachinePrecision] - x), $MachinePrecision]
\begin{array}{l}
\\
\sin x - x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (sin x) x))
double code(double x) {
return sin(x) - x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = sin(x) - x
end function
public static double code(double x) {
return Math.sin(x) - x;
}
def code(x): return math.sin(x) - x
function code(x) return Float64(sin(x) - x) end
function tmp = code(x) tmp = sin(x) - x; end
code[x_] := N[(N[Sin[x], $MachinePrecision] - x), $MachinePrecision]
\begin{array}{l}
\\
\sin x - x
\end{array}
(FPCore (x)
:precision binary64
(let* ((t_0 (fma (* x x) -0.0001984126984126984 0.008333333333333333))
(t_1 (* x t_0)))
(*
x
(/
(* (* x x) (fma (* x x) (* t_1 t_1) -0.027777777777777776))
(fma (* x x) t_0 0.16666666666666666)))))
double code(double x) {
double t_0 = fma((x * x), -0.0001984126984126984, 0.008333333333333333);
double t_1 = x * t_0;
return x * (((x * x) * fma((x * x), (t_1 * t_1), -0.027777777777777776)) / fma((x * x), t_0, 0.16666666666666666));
}
function code(x) t_0 = fma(Float64(x * x), -0.0001984126984126984, 0.008333333333333333) t_1 = Float64(x * t_0) return Float64(x * Float64(Float64(Float64(x * x) * fma(Float64(x * x), Float64(t_1 * t_1), -0.027777777777777776)) / fma(Float64(x * x), t_0, 0.16666666666666666))) end
code[x_] := Block[{t$95$0 = N[(N[(x * x), $MachinePrecision] * -0.0001984126984126984 + 0.008333333333333333), $MachinePrecision]}, Block[{t$95$1 = N[(x * t$95$0), $MachinePrecision]}, N[(x * N[(N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(t$95$1 * t$95$1), $MachinePrecision] + -0.027777777777777776), $MachinePrecision]), $MachinePrecision] / N[(N[(x * x), $MachinePrecision] * t$95$0 + 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(x \cdot x, -0.0001984126984126984, 0.008333333333333333\right)\\
t_1 := x \cdot t\_0\\
x \cdot \frac{\left(x \cdot x\right) \cdot \mathsf{fma}\left(x \cdot x, t\_1 \cdot t\_1, -0.027777777777777776\right)}{\mathsf{fma}\left(x \cdot x, t\_0, 0.16666666666666666\right)}
\end{array}
\end{array}
Initial program 74.8%
Taylor expanded in x around 0
cube-multN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
unpow2N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f6498.3
Applied rewrites98.3%
lift-*.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
lift-fma.f64N/A
associate-*r*N/A
lift-*.f64N/A
lift-fma.f64N/A
flip-+N/A
associate-*r/N/A
lower-/.f64N/A
Applied rewrites98.3%
(FPCore (x)
:precision binary64
(/
(* x (* x x))
(/
1.0
(fma
(* x x)
(fma (* x x) -0.0001984126984126984 0.008333333333333333)
-0.16666666666666666))))
double code(double x) {
return (x * (x * x)) / (1.0 / fma((x * x), fma((x * x), -0.0001984126984126984, 0.008333333333333333), -0.16666666666666666));
}
function code(x) return Float64(Float64(x * Float64(x * x)) / Float64(1.0 / fma(Float64(x * x), fma(Float64(x * x), -0.0001984126984126984, 0.008333333333333333), -0.16666666666666666))) end
code[x_] := N[(N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] / N[(1.0 / N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * -0.0001984126984126984 + 0.008333333333333333), $MachinePrecision] + -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot \left(x \cdot x\right)}{\frac{1}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.0001984126984126984, 0.008333333333333333\right), -0.16666666666666666\right)}}
\end{array}
Initial program 74.8%
Taylor expanded in x around 0
cube-multN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
unpow2N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f6498.3
Applied rewrites98.3%
lift-*.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
lift-fma.f64N/A
associate-*r*N/A
lift-*.f64N/A
lift-fma.f64N/A
flip-+N/A
associate-*r/N/A
lower-/.f64N/A
Applied rewrites98.3%
Applied rewrites98.3%
(FPCore (x) :precision binary64 (* (* x (* x x)) (fma x (* x (fma (* x x) -0.0001984126984126984 0.008333333333333333)) -0.16666666666666666)))
double code(double x) {
return (x * (x * x)) * fma(x, (x * fma((x * x), -0.0001984126984126984, 0.008333333333333333)), -0.16666666666666666);
}
function code(x) return Float64(Float64(x * Float64(x * x)) * fma(x, Float64(x * fma(Float64(x * x), -0.0001984126984126984, 0.008333333333333333)), -0.16666666666666666)) end
code[x_] := N[(N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * -0.0001984126984126984 + 0.008333333333333333), $MachinePrecision]), $MachinePrecision] + -0.16666666666666666), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \left(x \cdot x\right)\right) \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, -0.0001984126984126984, 0.008333333333333333\right), -0.16666666666666666\right)
\end{array}
Initial program 74.8%
Taylor expanded in x around 0
cube-multN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
unpow2N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f6498.3
Applied rewrites98.3%
lift-*.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
lift-fma.f64N/A
associate-*r*N/A
lift-*.f64N/A
associate-*l*N/A
lift-*.f64N/A
*-commutativeN/A
lower-*.f6498.3
Applied rewrites98.3%
Final simplification98.3%
(FPCore (x)
:precision binary64
(*
x
(*
x
(*
x
(fma
(* x x)
(fma x (* x -0.0001984126984126984) 0.008333333333333333)
-0.16666666666666666)))))
double code(double x) {
return x * (x * (x * fma((x * x), fma(x, (x * -0.0001984126984126984), 0.008333333333333333), -0.16666666666666666)));
}
function code(x) return Float64(x * Float64(x * Float64(x * fma(Float64(x * x), fma(x, Float64(x * -0.0001984126984126984), 0.008333333333333333), -0.16666666666666666)))) end
code[x_] := N[(x * N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * -0.0001984126984126984), $MachinePrecision] + 0.008333333333333333), $MachinePrecision] + -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x \cdot \left(x \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot -0.0001984126984126984, 0.008333333333333333\right), -0.16666666666666666\right)\right)\right)
\end{array}
Initial program 74.8%
Taylor expanded in x around 0
cube-multN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
unpow2N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f6498.3
Applied rewrites98.3%
(FPCore (x) :precision binary64 (* (* x (* x x)) (fma 0.008333333333333333 (* x x) -0.16666666666666666)))
double code(double x) {
return (x * (x * x)) * fma(0.008333333333333333, (x * x), -0.16666666666666666);
}
function code(x) return Float64(Float64(x * Float64(x * x)) * fma(0.008333333333333333, Float64(x * x), -0.16666666666666666)) end
code[x_] := N[(N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(0.008333333333333333 * N[(x * x), $MachinePrecision] + -0.16666666666666666), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \left(x \cdot x\right)\right) \cdot \mathsf{fma}\left(0.008333333333333333, x \cdot x, -0.16666666666666666\right)
\end{array}
Initial program 74.8%
Taylor expanded in x around 0
cube-multN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
unpow2N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f6498.3
Applied rewrites98.3%
Taylor expanded in x around 0
lower-*.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6498.1
Applied rewrites98.1%
(FPCore (x) :precision binary64 (* x (* x (* x (fma x (* x 0.008333333333333333) -0.16666666666666666)))))
double code(double x) {
return x * (x * (x * fma(x, (x * 0.008333333333333333), -0.16666666666666666)));
}
function code(x) return Float64(x * Float64(x * Float64(x * fma(x, Float64(x * 0.008333333333333333), -0.16666666666666666)))) end
code[x_] := N[(x * N[(x * N[(x * N[(x * N[(x * 0.008333333333333333), $MachinePrecision] + -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x \cdot \left(x \cdot \mathsf{fma}\left(x, x \cdot 0.008333333333333333, -0.16666666666666666\right)\right)\right)
\end{array}
Initial program 74.8%
Taylor expanded in x around 0
cube-multN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
unpow2N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
metadata-evalN/A
lower-fma.f64N/A
lower-*.f6498.0
Applied rewrites98.0%
(FPCore (x) :precision binary64 (* (* x (* x x)) -0.16666666666666666))
double code(double x) {
return (x * (x * x)) * -0.16666666666666666;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * (x * x)) * (-0.16666666666666666d0)
end function
public static double code(double x) {
return (x * (x * x)) * -0.16666666666666666;
}
def code(x): return (x * (x * x)) * -0.16666666666666666
function code(x) return Float64(Float64(x * Float64(x * x)) * -0.16666666666666666) end
function tmp = code(x) tmp = (x * (x * x)) * -0.16666666666666666; end
code[x_] := N[(N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] * -0.16666666666666666), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \left(x \cdot x\right)\right) \cdot -0.16666666666666666
\end{array}
Initial program 74.8%
Taylor expanded in x around 0
lower-*.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6497.9
Applied rewrites97.9%
Final simplification97.9%
(FPCore (x) :precision binary64 0.0)
double code(double x) {
return 0.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.0d0
end function
public static double code(double x) {
return 0.0;
}
def code(x): return 0.0
function code(x) return 0.0 end
function tmp = code(x) tmp = 0.0; end
code[x_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 74.8%
Taylor expanded in x around 0
+-commutativeN/A
distribute-rgt-inN/A
*-commutativeN/A
associate-*l*N/A
*-lft-identityN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
lower-*.f6473.1
Applied rewrites73.1%
lift-*.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
sub-negN/A
lift-fma.f64N/A
lift-*.f64N/A
associate-*r*N/A
distribute-lft1-inN/A
lift-neg.f64N/A
lower-fma.f64N/A
lift-*.f64N/A
associate-*r*N/A
*-commutativeN/A
lift-*.f64N/A
lower-fma.f6473.1
lift-*.f64N/A
*-commutativeN/A
lower-*.f6473.1
Applied rewrites73.1%
Taylor expanded in x around 0
Applied rewrites72.4%
unsub-negN/A
*-lft-identityN/A
+-inverses72.4
Applied rewrites72.4%
(FPCore (x) :precision binary64 (if (< (fabs x) 0.07) (- (+ (- (/ (pow x 3.0) 6.0) (/ (pow x 5.0) 120.0)) (/ (pow x 7.0) 5040.0))) (- (sin x) x)))
double code(double x) {
double tmp;
if (fabs(x) < 0.07) {
tmp = -(((pow(x, 3.0) / 6.0) - (pow(x, 5.0) / 120.0)) + (pow(x, 7.0) / 5040.0));
} else {
tmp = sin(x) - x;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (abs(x) < 0.07d0) then
tmp = -((((x ** 3.0d0) / 6.0d0) - ((x ** 5.0d0) / 120.0d0)) + ((x ** 7.0d0) / 5040.0d0))
else
tmp = sin(x) - x
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (Math.abs(x) < 0.07) {
tmp = -(((Math.pow(x, 3.0) / 6.0) - (Math.pow(x, 5.0) / 120.0)) + (Math.pow(x, 7.0) / 5040.0));
} else {
tmp = Math.sin(x) - x;
}
return tmp;
}
def code(x): tmp = 0 if math.fabs(x) < 0.07: tmp = -(((math.pow(x, 3.0) / 6.0) - (math.pow(x, 5.0) / 120.0)) + (math.pow(x, 7.0) / 5040.0)) else: tmp = math.sin(x) - x return tmp
function code(x) tmp = 0.0 if (abs(x) < 0.07) tmp = Float64(-Float64(Float64(Float64((x ^ 3.0) / 6.0) - Float64((x ^ 5.0) / 120.0)) + Float64((x ^ 7.0) / 5040.0))); else tmp = Float64(sin(x) - x); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (abs(x) < 0.07) tmp = -((((x ^ 3.0) / 6.0) - ((x ^ 5.0) / 120.0)) + ((x ^ 7.0) / 5040.0)); else tmp = sin(x) - x; end tmp_2 = tmp; end
code[x_] := If[Less[N[Abs[x], $MachinePrecision], 0.07], (-N[(N[(N[(N[Power[x, 3.0], $MachinePrecision] / 6.0), $MachinePrecision] - N[(N[Power[x, 5.0], $MachinePrecision] / 120.0), $MachinePrecision]), $MachinePrecision] + N[(N[Power[x, 7.0], $MachinePrecision] / 5040.0), $MachinePrecision]), $MachinePrecision]), N[(N[Sin[x], $MachinePrecision] - x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left|x\right| < 0.07:\\
\;\;\;\;-\left(\left(\frac{{x}^{3}}{6} - \frac{{x}^{5}}{120}\right) + \frac{{x}^{7}}{5040}\right)\\
\mathbf{else}:\\
\;\;\;\;\sin x - x\\
\end{array}
\end{array}
herbie shell --seed 2024216
(FPCore (x)
:name "bug500 (missed optimization)"
:precision binary64
:pre (and (< -1000.0 x) (< x 1000.0))
:alt
(! :herbie-platform default (if (< (fabs x) 7/100) (- (+ (- (/ (pow x 3) 6) (/ (pow x 5) 120)) (/ (pow x 7) 5040))) (- (sin x) x)))
(- (sin x) x))