
(FPCore (x) :precision binary64 (/ (- 1.0 (cos x)) (* x x)))
double code(double x) {
return (1.0 - cos(x)) / (x * x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 - cos(x)) / (x * x)
end function
public static double code(double x) {
return (1.0 - Math.cos(x)) / (x * x);
}
def code(x): return (1.0 - math.cos(x)) / (x * x)
function code(x) return Float64(Float64(1.0 - cos(x)) / Float64(x * x)) end
function tmp = code(x) tmp = (1.0 - cos(x)) / (x * x); end
code[x_] := N[(N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision] / N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \cos x}{x \cdot x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (- 1.0 (cos x)) (* x x)))
double code(double x) {
return (1.0 - cos(x)) / (x * x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 - cos(x)) / (x * x)
end function
public static double code(double x) {
return (1.0 - Math.cos(x)) / (x * x);
}
def code(x): return (1.0 - math.cos(x)) / (x * x)
function code(x) return Float64(Float64(1.0 - cos(x)) / Float64(x * x)) end
function tmp = code(x) tmp = (1.0 - cos(x)) / (x * x); end
code[x_] := N[(N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision] / N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \cos x}{x \cdot x}
\end{array}
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 0.0005) (fma -0.041666666666666664 (* x_m x_m) 0.5) (* (pow x_m -2.0) (* (sin x_m) (tan (* 0.5 x_m))))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.0005) {
tmp = fma(-0.041666666666666664, (x_m * x_m), 0.5);
} else {
tmp = pow(x_m, -2.0) * (sin(x_m) * tan((0.5 * x_m)));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.0005) tmp = fma(-0.041666666666666664, Float64(x_m * x_m), 0.5); else tmp = Float64((x_m ^ -2.0) * Float64(sin(x_m) * tan(Float64(0.5 * x_m)))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.0005], N[(-0.041666666666666664 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[Power[x$95$m, -2.0], $MachinePrecision] * N[(N[Sin[x$95$m], $MachinePrecision] * N[Tan[N[(0.5 * x$95$m), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.0005:\\
\;\;\;\;\mathsf{fma}\left(-0.041666666666666664, x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;{x\_m}^{-2} \cdot \left(\sin x\_m \cdot \tan \left(0.5 \cdot x\_m\right)\right)\\
\end{array}
\end{array}
if x < 5.0000000000000001e-4Initial program 32.1%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6469.4
Applied rewrites69.4%
if 5.0000000000000001e-4 < x Initial program 98.8%
lift-/.f64N/A
lift--.f64N/A
flip--N/A
associate-/l/N/A
metadata-evalN/A
lift-cos.f64N/A
lift-cos.f64N/A
1-sub-cosN/A
times-fracN/A
lower-*.f64N/A
lower-/.f64N/A
lower-sin.f64N/A
lift-cos.f64N/A
hang-0p-tanN/A
lower-tan.f64N/A
lower-/.f6499.6
Applied rewrites99.6%
lift-*.f64N/A
lift-/.f64N/A
associate-*l/N/A
div-invN/A
metadata-evalN/A
lift-*.f64N/A
frac-timesN/A
lift-/.f64N/A
lift-/.f64N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lift-/.f64N/A
clear-numN/A
associate-/r/N/A
metadata-evalN/A
lower-*.f64N/A
lift-/.f64N/A
lift-/.f64N/A
frac-timesN/A
metadata-evalN/A
pow2N/A
pow-flipN/A
lower-pow.f64N/A
metadata-eval99.7
Applied rewrites99.7%
Final simplification76.5%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 0.002)
(fma
(fma 0.001388888888888889 (* x_m x_m) -0.041666666666666664)
(* x_m x_m)
0.5)
(/ (/ (* (sin x_m) (tan (* 0.5 x_m))) x_m) x_m)))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.002) {
tmp = fma(fma(0.001388888888888889, (x_m * x_m), -0.041666666666666664), (x_m * x_m), 0.5);
} else {
tmp = ((sin(x_m) * tan((0.5 * x_m))) / x_m) / x_m;
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.002) tmp = fma(fma(0.001388888888888889, Float64(x_m * x_m), -0.041666666666666664), Float64(x_m * x_m), 0.5); else tmp = Float64(Float64(Float64(sin(x_m) * tan(Float64(0.5 * x_m))) / x_m) / x_m); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.002], N[(N[(0.001388888888888889 * N[(x$95$m * x$95$m), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(N[(N[Sin[x$95$m], $MachinePrecision] * N[Tan[N[(0.5 * x$95$m), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / x$95$m), $MachinePrecision] / x$95$m), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.002:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(0.001388888888888889, x\_m \cdot x\_m, -0.041666666666666664\right), x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\sin x\_m \cdot \tan \left(0.5 \cdot x\_m\right)}{x\_m}}{x\_m}\\
\end{array}
\end{array}
if x < 2e-3Initial program 32.3%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6469.9
Applied rewrites69.9%
if 2e-3 < x Initial program 99.2%
lift-/.f64N/A
lift--.f64N/A
flip--N/A
associate-/l/N/A
metadata-evalN/A
lift-cos.f64N/A
lift-cos.f64N/A
1-sub-cosN/A
times-fracN/A
lower-*.f64N/A
lower-/.f64N/A
lower-sin.f64N/A
lift-cos.f64N/A
hang-0p-tanN/A
lower-tan.f64N/A
lower-/.f6499.6
Applied rewrites99.6%
lift-*.f64N/A
lift-/.f64N/A
associate-*l/N/A
lift-*.f64N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
*-commutativeN/A
lower-*.f6499.7
lift-/.f64N/A
clear-numN/A
associate-/r/N/A
metadata-evalN/A
lower-*.f6499.7
Applied rewrites99.7%
Final simplification76.8%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 0.035)
(fma
(fma 0.001388888888888889 (* x_m x_m) -0.041666666666666664)
(* x_m x_m)
0.5)
(/ (- (/ 1.0 x_m) (* (/ 1.0 x_m) (cos x_m))) x_m)))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.035) {
tmp = fma(fma(0.001388888888888889, (x_m * x_m), -0.041666666666666664), (x_m * x_m), 0.5);
} else {
tmp = ((1.0 / x_m) - ((1.0 / x_m) * cos(x_m))) / x_m;
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.035) tmp = fma(fma(0.001388888888888889, Float64(x_m * x_m), -0.041666666666666664), Float64(x_m * x_m), 0.5); else tmp = Float64(Float64(Float64(1.0 / x_m) - Float64(Float64(1.0 / x_m) * cos(x_m))) / x_m); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.035], N[(N[(0.001388888888888889 * N[(x$95$m * x$95$m), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(N[(1.0 / x$95$m), $MachinePrecision] - N[(N[(1.0 / x$95$m), $MachinePrecision] * N[Cos[x$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x$95$m), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.035:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(0.001388888888888889, x\_m \cdot x\_m, -0.041666666666666664\right), x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1}{x\_m} - \frac{1}{x\_m} \cdot \cos x\_m}{x\_m}\\
\end{array}
\end{array}
if x < 0.035000000000000003Initial program 32.3%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6469.9
Applied rewrites69.9%
if 0.035000000000000003 < x Initial program 99.2%
Applied rewrites99.2%
lift-/.f64N/A
lift-/.f64N/A
lift--.f64N/A
div-subN/A
lift-/.f64N/A
div-subN/A
associate-/r*N/A
metadata-evalN/A
frac-timesN/A
lift-/.f64N/A
lift-/.f64N/A
lift-/.f64N/A
frac-2negN/A
metadata-evalN/A
lift-neg.f64N/A
div-invN/A
lift-/.f64N/A
frac-2negN/A
lift-neg.f64N/A
associate-/l/N/A
neg-mul-1N/A
Applied rewrites99.3%
Final simplification76.7%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 0.026)
(fma
(fma 0.001388888888888889 (* x_m x_m) -0.041666666666666664)
(* x_m x_m)
0.5)
(/ (/ (- 1.0 (cos x_m)) x_m) x_m)))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.026) {
tmp = fma(fma(0.001388888888888889, (x_m * x_m), -0.041666666666666664), (x_m * x_m), 0.5);
} else {
tmp = ((1.0 - cos(x_m)) / x_m) / x_m;
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.026) tmp = fma(fma(0.001388888888888889, Float64(x_m * x_m), -0.041666666666666664), Float64(x_m * x_m), 0.5); else tmp = Float64(Float64(Float64(1.0 - cos(x_m)) / x_m) / x_m); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.026], N[(N[(0.001388888888888889 * N[(x$95$m * x$95$m), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(N[(1.0 - N[Cos[x$95$m], $MachinePrecision]), $MachinePrecision] / x$95$m), $MachinePrecision] / x$95$m), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.026:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(0.001388888888888889, x\_m \cdot x\_m, -0.041666666666666664\right), x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 - \cos x\_m}{x\_m}}{x\_m}\\
\end{array}
\end{array}
if x < 0.0259999999999999988Initial program 32.3%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6469.9
Applied rewrites69.9%
if 0.0259999999999999988 < x Initial program 99.2%
Applied rewrites99.2%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 0.026)
(fma
(fma 0.001388888888888889 (* x_m x_m) -0.041666666666666664)
(* x_m x_m)
0.5)
(/ (- 1.0 (cos x_m)) (* x_m x_m))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.026) {
tmp = fma(fma(0.001388888888888889, (x_m * x_m), -0.041666666666666664), (x_m * x_m), 0.5);
} else {
tmp = (1.0 - cos(x_m)) / (x_m * x_m);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.026) tmp = fma(fma(0.001388888888888889, Float64(x_m * x_m), -0.041666666666666664), Float64(x_m * x_m), 0.5); else tmp = Float64(Float64(1.0 - cos(x_m)) / Float64(x_m * x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.026], N[(N[(0.001388888888888889 * N[(x$95$m * x$95$m), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(1.0 - N[Cos[x$95$m], $MachinePrecision]), $MachinePrecision] / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.026:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(0.001388888888888889, x\_m \cdot x\_m, -0.041666666666666664\right), x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - \cos x\_m}{x\_m \cdot x\_m}\\
\end{array}
\end{array}
if x < 0.0259999999999999988Initial program 32.3%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6469.9
Applied rewrites69.9%
if 0.0259999999999999988 < x Initial program 99.2%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 4.8)
(fma
(fma 0.001388888888888889 (* x_m x_m) -0.041666666666666664)
(* x_m x_m)
0.5)
(/ -1.0 (* -0.16666666666666666 (* x_m x_m)))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 4.8) {
tmp = fma(fma(0.001388888888888889, (x_m * x_m), -0.041666666666666664), (x_m * x_m), 0.5);
} else {
tmp = -1.0 / (-0.16666666666666666 * (x_m * x_m));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 4.8) tmp = fma(fma(0.001388888888888889, Float64(x_m * x_m), -0.041666666666666664), Float64(x_m * x_m), 0.5); else tmp = Float64(-1.0 / Float64(-0.16666666666666666 * Float64(x_m * x_m))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 4.8], N[(N[(0.001388888888888889 * N[(x$95$m * x$95$m), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(-1.0 / N[(-0.16666666666666666 * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 4.8:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(0.001388888888888889, x\_m \cdot x\_m, -0.041666666666666664\right), x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{-1}{-0.16666666666666666 \cdot \left(x\_m \cdot x\_m\right)}\\
\end{array}
\end{array}
if x < 4.79999999999999982Initial program 32.3%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6469.9
Applied rewrites69.9%
if 4.79999999999999982 < x Initial program 99.2%
Applied rewrites99.2%
Taylor expanded in x around 0
sub-negN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
metadata-eval60.2
Applied rewrites60.2%
Taylor expanded in x around inf
Applied rewrites60.2%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 3.3) (fma -0.041666666666666664 (* x_m x_m) 0.5) (/ -1.0 (* -0.16666666666666666 (* x_m x_m)))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 3.3) {
tmp = fma(-0.041666666666666664, (x_m * x_m), 0.5);
} else {
tmp = -1.0 / (-0.16666666666666666 * (x_m * x_m));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 3.3) tmp = fma(-0.041666666666666664, Float64(x_m * x_m), 0.5); else tmp = Float64(-1.0 / Float64(-0.16666666666666666 * Float64(x_m * x_m))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 3.3], N[(-0.041666666666666664 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(-1.0 / N[(-0.16666666666666666 * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 3.3:\\
\;\;\;\;\mathsf{fma}\left(-0.041666666666666664, x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{-1}{-0.16666666666666666 \cdot \left(x\_m \cdot x\_m\right)}\\
\end{array}
\end{array}
if x < 3.2999999999999998Initial program 32.3%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6469.5
Applied rewrites69.5%
if 3.2999999999999998 < x Initial program 99.2%
Applied rewrites99.2%
Taylor expanded in x around 0
sub-negN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
metadata-eval60.2
Applied rewrites60.2%
Taylor expanded in x around inf
Applied rewrites60.2%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 1.4e+77) 0.5 (/ (- 1.0 1.0) (* x_m x_m))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 1.4e+77) {
tmp = 0.5;
} else {
tmp = (1.0 - 1.0) / (x_m * x_m);
}
return tmp;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
real(8) :: tmp
if (x_m <= 1.4d+77) then
tmp = 0.5d0
else
tmp = (1.0d0 - 1.0d0) / (x_m * x_m)
end if
code = tmp
end function
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 1.4e+77) {
tmp = 0.5;
} else {
tmp = (1.0 - 1.0) / (x_m * x_m);
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 1.4e+77: tmp = 0.5 else: tmp = (1.0 - 1.0) / (x_m * x_m) return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 1.4e+77) tmp = 0.5; else tmp = Float64(Float64(1.0 - 1.0) / Float64(x_m * x_m)); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 1.4e+77) tmp = 0.5; else tmp = (1.0 - 1.0) / (x_m * x_m); end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 1.4e+77], 0.5, N[(N[(1.0 - 1.0), $MachinePrecision] / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 1.4 \cdot 10^{+77}:\\
\;\;\;\;0.5\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - 1}{x\_m \cdot x\_m}\\
\end{array}
\end{array}
if x < 1.4e77Initial program 36.7%
Taylor expanded in x around 0
Applied rewrites65.7%
if 1.4e77 < x Initial program 99.2%
Taylor expanded in x around 0
Applied rewrites70.6%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ -1.0 (fma -0.16666666666666666 (* x_m x_m) -2.0)))
x_m = fabs(x);
double code(double x_m) {
return -1.0 / fma(-0.16666666666666666, (x_m * x_m), -2.0);
}
x_m = abs(x) function code(x_m) return Float64(-1.0 / fma(-0.16666666666666666, Float64(x_m * x_m), -2.0)) end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(-1.0 / N[(-0.16666666666666666 * N[(x$95$m * x$95$m), $MachinePrecision] + -2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{-1}{\mathsf{fma}\left(-0.16666666666666666, x\_m \cdot x\_m, -2\right)}
\end{array}
Initial program 47.7%
Applied rewrites47.7%
Taylor expanded in x around 0
sub-negN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
metadata-eval79.6
Applied rewrites79.6%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 0.5)
x_m = fabs(x);
double code(double x_m) {
return 0.5;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = 0.5d0
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return 0.5;
}
x_m = math.fabs(x) def code(x_m): return 0.5
x_m = abs(x) function code(x_m) return 0.5 end
x_m = abs(x); function tmp = code(x_m) tmp = 0.5; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := 0.5
\begin{array}{l}
x_m = \left|x\right|
\\
0.5
\end{array}
Initial program 47.7%
Taylor expanded in x around 0
Applied rewrites54.8%
herbie shell --seed 2024242
(FPCore (x)
:name "cos2 (problem 3.4.1)"
:precision binary64
(/ (- 1.0 (cos x)) (* x x)))