
(FPCore (x) :precision binary64 (- 1.0 (cos x)))
double code(double x) {
return 1.0 - cos(x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 - cos(x)
end function
public static double code(double x) {
return 1.0 - Math.cos(x);
}
def code(x): return 1.0 - math.cos(x)
function code(x) return Float64(1.0 - cos(x)) end
function tmp = code(x) tmp = 1.0 - cos(x); end
code[x_] := N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \cos x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- 1.0 (cos x)))
double code(double x) {
return 1.0 - cos(x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 - cos(x)
end function
public static double code(double x) {
return 1.0 - Math.cos(x);
}
def code(x): return 1.0 - math.cos(x)
function code(x) return Float64(1.0 - cos(x)) end
function tmp = code(x) tmp = 1.0 - cos(x); end
code[x_] := N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \cos x
\end{array}
(FPCore (x) :precision binary64 (* (/ (sin x) -1.0) (/ (sin x) (- -1.0 (cos x)))))
double code(double x) {
return (sin(x) / -1.0) * (sin(x) / (-1.0 - cos(x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (sin(x) / (-1.0d0)) * (sin(x) / ((-1.0d0) - cos(x)))
end function
public static double code(double x) {
return (Math.sin(x) / -1.0) * (Math.sin(x) / (-1.0 - Math.cos(x)));
}
def code(x): return (math.sin(x) / -1.0) * (math.sin(x) / (-1.0 - math.cos(x)))
function code(x) return Float64(Float64(sin(x) / -1.0) * Float64(sin(x) / Float64(-1.0 - cos(x)))) end
function tmp = code(x) tmp = (sin(x) / -1.0) * (sin(x) / (-1.0 - cos(x))); end
code[x_] := N[(N[(N[Sin[x], $MachinePrecision] / -1.0), $MachinePrecision] * N[(N[Sin[x], $MachinePrecision] / N[(-1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\sin x}{-1} \cdot \frac{\sin x}{-1 - \cos x}
\end{array}
Initial program 52.2%
lift--.f64N/A
flip--N/A
frac-2negN/A
metadata-evalN/A
lift-cos.f64N/A
lift-cos.f64N/A
1-sub-cosN/A
distribute-rgt-neg-inN/A
neg-mul-1N/A
times-fracN/A
lower-*.f64N/A
lower-/.f64N/A
lower-sin.f64N/A
lower-/.f64N/A
lower-neg.f64N/A
lower-sin.f64N/A
lower-+.f64100.0
Applied rewrites100.0%
Final simplification100.0%
(FPCore (x)
:precision binary64
(*
x
(*
x
(fma
(* x x)
(fma
(* x x)
(fma x (* x -2.48015873015873e-5) 0.001388888888888889)
-0.041666666666666664)
0.5))))
double code(double x) {
return x * (x * fma((x * x), fma((x * x), fma(x, (x * -2.48015873015873e-5), 0.001388888888888889), -0.041666666666666664), 0.5));
}
function code(x) return Float64(x * Float64(x * fma(Float64(x * x), fma(Float64(x * x), fma(x, Float64(x * -2.48015873015873e-5), 0.001388888888888889), -0.041666666666666664), 0.5))) end
code[x_] := N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * -2.48015873015873e-5), $MachinePrecision] + 0.001388888888888889), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] + 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot -2.48015873015873 \cdot 10^{-5}, 0.001388888888888889\right), -0.041666666666666664\right), 0.5\right)\right)
\end{array}
Initial program 52.2%
Taylor expanded in x around 0
unpow2N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
Applied rewrites100.0%
(FPCore (x) :precision binary64 (* (* x x) (fma (* x x) (fma x (* x 0.001388888888888889) -0.041666666666666664) 0.5)))
double code(double x) {
return (x * x) * fma((x * x), fma(x, (x * 0.001388888888888889), -0.041666666666666664), 0.5);
}
function code(x) return Float64(Float64(x * x) * fma(Float64(x * x), fma(x, Float64(x * 0.001388888888888889), -0.041666666666666664), 0.5)) end
code[x_] := N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * 0.001388888888888889), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] + 0.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot x\right) \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot 0.001388888888888889, -0.041666666666666664\right), 0.5\right)
\end{array}
Initial program 52.2%
Taylor expanded in x around 0
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f64100.0
Applied rewrites100.0%
(FPCore (x) :precision binary64 (fma (* x 0.5) x (* x (* x (* x (* x -0.041666666666666664))))))
double code(double x) {
return fma((x * 0.5), x, (x * (x * (x * (x * -0.041666666666666664)))));
}
function code(x) return fma(Float64(x * 0.5), x, Float64(x * Float64(x * Float64(x * Float64(x * -0.041666666666666664))))) end
code[x_] := N[(N[(x * 0.5), $MachinePrecision] * x + N[(x * N[(x * N[(x * N[(x * -0.041666666666666664), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot 0.5, x, x \cdot \left(x \cdot \left(x \cdot \left(x \cdot -0.041666666666666664\right)\right)\right)\right)
\end{array}
Initial program 52.2%
Taylor expanded in x around 0
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.9
Applied rewrites99.9%
Applied rewrites99.9%
(FPCore (x) :precision binary64 (* x (* x (fma x (* x -0.041666666666666664) 0.5))))
double code(double x) {
return x * (x * fma(x, (x * -0.041666666666666664), 0.5));
}
function code(x) return Float64(x * Float64(x * fma(x, Float64(x * -0.041666666666666664), 0.5))) end
code[x_] := N[(x * N[(x * N[(x * N[(x * -0.041666666666666664), $MachinePrecision] + 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x \cdot \mathsf{fma}\left(x, x \cdot -0.041666666666666664, 0.5\right)\right)
\end{array}
Initial program 52.2%
Taylor expanded in x around 0
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.9
Applied rewrites99.9%
Applied rewrites99.9%
Final simplification99.9%
(FPCore (x) :precision binary64 (* (* x x) (fma (* x x) -0.041666666666666664 0.5)))
double code(double x) {
return (x * x) * fma((x * x), -0.041666666666666664, 0.5);
}
function code(x) return Float64(Float64(x * x) * fma(Float64(x * x), -0.041666666666666664, 0.5)) end
code[x_] := N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * -0.041666666666666664 + 0.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot x\right) \cdot \mathsf{fma}\left(x \cdot x, -0.041666666666666664, 0.5\right)
\end{array}
Initial program 52.2%
Taylor expanded in x around 0
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.9
Applied rewrites99.9%
(FPCore (x) :precision binary64 (* x (* x 0.5)))
double code(double x) {
return x * (x * 0.5);
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (x * 0.5d0)
end function
public static double code(double x) {
return x * (x * 0.5);
}
def code(x): return x * (x * 0.5)
function code(x) return Float64(x * Float64(x * 0.5)) end
function tmp = code(x) tmp = x * (x * 0.5); end
code[x_] := N[(x * N[(x * 0.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x \cdot 0.5\right)
\end{array}
Initial program 52.2%
Taylor expanded in x around 0
lower-*.f64N/A
unpow2N/A
lower-*.f6499.5
Applied rewrites99.5%
Applied rewrites99.5%
Final simplification99.5%
(FPCore (x) :precision binary64 (* (* x x) 0.5))
double code(double x) {
return (x * x) * 0.5;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * x) * 0.5d0
end function
public static double code(double x) {
return (x * x) * 0.5;
}
def code(x): return (x * x) * 0.5
function code(x) return Float64(Float64(x * x) * 0.5) end
function tmp = code(x) tmp = (x * x) * 0.5; end
code[x_] := N[(N[(x * x), $MachinePrecision] * 0.5), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot x\right) \cdot 0.5
\end{array}
Initial program 52.2%
Taylor expanded in x around 0
lower-*.f64N/A
unpow2N/A
lower-*.f6499.5
Applied rewrites99.5%
Final simplification99.5%
(FPCore (x) :precision binary64 (- 1.0 1.0))
double code(double x) {
return 1.0 - 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 - 1.0d0
end function
public static double code(double x) {
return 1.0 - 1.0;
}
def code(x): return 1.0 - 1.0
function code(x) return Float64(1.0 - 1.0) end
function tmp = code(x) tmp = 1.0 - 1.0; end
code[x_] := N[(1.0 - 1.0), $MachinePrecision]
\begin{array}{l}
\\
1 - 1
\end{array}
Initial program 52.2%
Taylor expanded in x around 0
Applied rewrites51.1%
(FPCore (x) :precision binary64 (/ (* (sin x) (sin x)) (+ 1.0 (cos x))))
double code(double x) {
return (sin(x) * sin(x)) / (1.0 + cos(x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (sin(x) * sin(x)) / (1.0d0 + cos(x))
end function
public static double code(double x) {
return (Math.sin(x) * Math.sin(x)) / (1.0 + Math.cos(x));
}
def code(x): return (math.sin(x) * math.sin(x)) / (1.0 + math.cos(x))
function code(x) return Float64(Float64(sin(x) * sin(x)) / Float64(1.0 + cos(x))) end
function tmp = code(x) tmp = (sin(x) * sin(x)) / (1.0 + cos(x)); end
code[x_] := N[(N[(N[Sin[x], $MachinePrecision] * N[Sin[x], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\sin x \cdot \sin x}{1 + \cos x}
\end{array}
herbie shell --seed 2024233
(FPCore (x)
:name "ENA, Section 1.4, Mentioned, A"
:precision binary64
:pre (and (<= -0.01 x) (<= x 0.01))
:alt
(! :herbie-platform default (/ (* (sin x) (sin x)) (+ 1 (cos x))))
(- 1.0 (cos x)))