
(FPCore (x) :precision binary64 (* (cos x) (exp (* 10.0 (* x x)))))
double code(double x) {
return cos(x) * exp((10.0 * (x * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * exp((10.0d0 * (x * x)))
end function
public static double code(double x) {
return Math.cos(x) * Math.exp((10.0 * (x * x)));
}
def code(x): return math.cos(x) * math.exp((10.0 * (x * x)))
function code(x) return Float64(cos(x) * exp(Float64(10.0 * Float64(x * x)))) end
function tmp = code(x) tmp = cos(x) * exp((10.0 * (x * x))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot e^{10 \cdot \left(x \cdot x\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 17 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (* (cos x) (exp (* 10.0 (* x x)))))
double code(double x) {
return cos(x) * exp((10.0 * (x * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * exp((10.0d0 * (x * x)))
end function
public static double code(double x) {
return Math.cos(x) * Math.exp((10.0 * (x * x)));
}
def code(x): return math.cos(x) * math.exp((10.0 * (x * x)))
function code(x) return Float64(cos(x) * exp(Float64(10.0 * Float64(x * x)))) end
function tmp = code(x) tmp = cos(x) * exp((10.0 * (x * x))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot e^{10 \cdot \left(x \cdot x\right)}
\end{array}
(FPCore (x) :precision binary64 (* (cos x) (pow (pow (exp 20.0) (* x 0.5)) (exp (log x)))))
double code(double x) {
return cos(x) * pow(pow(exp(20.0), (x * 0.5)), exp(log(x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * ((exp(20.0d0) ** (x * 0.5d0)) ** exp(log(x)))
end function
public static double code(double x) {
return Math.cos(x) * Math.pow(Math.pow(Math.exp(20.0), (x * 0.5)), Math.exp(Math.log(x)));
}
def code(x): return math.cos(x) * math.pow(math.pow(math.exp(20.0), (x * 0.5)), math.exp(math.log(x)))
function code(x) return Float64(cos(x) * ((exp(20.0) ^ Float64(x * 0.5)) ^ exp(log(x)))) end
function tmp = code(x) tmp = cos(x) * ((exp(20.0) ^ (x * 0.5)) ^ exp(log(x))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Power[N[Power[N[Exp[20.0], $MachinePrecision], N[(x * 0.5), $MachinePrecision]], $MachinePrecision], N[Exp[N[Log[x], $MachinePrecision]], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot {\left({\left(e^{20}\right)}^{\left(x \cdot 0.5\right)}\right)}^{\left(e^{\log x}\right)}
\end{array}
Initial program 94.5%
lift-*.f64N/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f6495.2
Applied rewrites95.2%
lift-exp.f64N/A
pow2N/A
pow-to-expN/A
exp-lft-sqrN/A
pow-unpowN/A
lower-pow.f64N/A
lower-pow.f64N/A
lower-exp.f64N/A
lower-log.f64N/A
lower-exp.f64N/A
lower-log.f6498.0
Applied rewrites98.0%
lift-exp.f64N/A
rem-exp-logN/A
sqr-powN/A
pow-prod-downN/A
lift-exp.f64N/A
lift-exp.f64N/A
prod-expN/A
metadata-evalN/A
lift-exp.f64N/A
lower-pow.f64N/A
div-invN/A
metadata-evalN/A
lower-*.f6499.3
Applied rewrites99.3%
(FPCore (x) :precision binary64 (* (cos x) (pow (pow (exp 10.0) (exp (log x))) x)))
double code(double x) {
return cos(x) * pow(pow(exp(10.0), exp(log(x))), x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * ((exp(10.0d0) ** exp(log(x))) ** x)
end function
public static double code(double x) {
return Math.cos(x) * Math.pow(Math.pow(Math.exp(10.0), Math.exp(Math.log(x))), x);
}
def code(x): return math.cos(x) * math.pow(math.pow(math.exp(10.0), math.exp(math.log(x))), x)
function code(x) return Float64(cos(x) * ((exp(10.0) ^ exp(log(x))) ^ x)) end
function tmp = code(x) tmp = cos(x) * ((exp(10.0) ^ exp(log(x))) ^ x); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Power[N[Power[N[Exp[10.0], $MachinePrecision], N[Exp[N[Log[x], $MachinePrecision]], $MachinePrecision]], $MachinePrecision], x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot {\left({\left(e^{10}\right)}^{\left(e^{\log x}\right)}\right)}^{x}
\end{array}
Initial program 94.5%
lift-*.f64N/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f6495.2
Applied rewrites95.2%
lift-exp.f64N/A
pow2N/A
pow-to-expN/A
exp-lft-sqrN/A
pow-unpowN/A
lower-pow.f64N/A
lower-pow.f64N/A
lower-exp.f64N/A
lower-log.f64N/A
lower-exp.f64N/A
lower-log.f6498.0
Applied rewrites98.0%
rem-exp-log98.0
Applied rewrites98.0%
(FPCore (x) :precision binary64 (* (cos x) (pow (pow (exp (+ x x)) 5.0) x)))
double code(double x) {
return cos(x) * pow(pow(exp((x + x)), 5.0), x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * ((exp((x + x)) ** 5.0d0) ** x)
end function
public static double code(double x) {
return Math.cos(x) * Math.pow(Math.pow(Math.exp((x + x)), 5.0), x);
}
def code(x): return math.cos(x) * math.pow(math.pow(math.exp((x + x)), 5.0), x)
function code(x) return Float64(cos(x) * ((exp(Float64(x + x)) ^ 5.0) ^ x)) end
function tmp = code(x) tmp = cos(x) * ((exp((x + x)) ^ 5.0) ^ x); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Power[N[Power[N[Exp[N[(x + x), $MachinePrecision]], $MachinePrecision], 5.0], $MachinePrecision], x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot {\left({\left(e^{x + x}\right)}^{5}\right)}^{x}
\end{array}
Initial program 94.5%
lift-*.f64N/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f6495.2
Applied rewrites95.2%
lift-*.f64N/A
pow-expN/A
*-commutativeN/A
exp-prodN/A
lift-*.f64N/A
pow-expN/A
lift-exp.f64N/A
pow-unpowN/A
lift-*.f64N/A
sqr-powN/A
pow-prod-downN/A
lift-*.f64N/A
associate-/l*N/A
*-commutativeN/A
pow-unpowN/A
lower-pow.f64N/A
lower-pow.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
prod-expN/A
lower-exp.f64N/A
lower-+.f64N/A
metadata-eval97.6
Applied rewrites97.6%
(FPCore (x) :precision binary64 (* (cos x) (pow (exp (* x x)) 10.0)))
double code(double x) {
return cos(x) * pow(exp((x * x)), 10.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * (exp((x * x)) ** 10.0d0)
end function
public static double code(double x) {
return Math.cos(x) * Math.pow(Math.exp((x * x)), 10.0);
}
def code(x): return math.cos(x) * math.pow(math.exp((x * x)), 10.0)
function code(x) return Float64(cos(x) * (exp(Float64(x * x)) ^ 10.0)) end
function tmp = code(x) tmp = cos(x) * (exp((x * x)) ^ 10.0); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Power[N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision], 10.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot {\left(e^{x \cdot x}\right)}^{10}
\end{array}
Initial program 94.5%
Applied rewrites95.2%
(FPCore (x) :precision binary64 (* (cos x) (pow (exp 10.0) (* x x))))
double code(double x) {
return cos(x) * pow(exp(10.0), (x * x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * (exp(10.0d0) ** (x * x))
end function
public static double code(double x) {
return Math.cos(x) * Math.pow(Math.exp(10.0), (x * x));
}
def code(x): return math.cos(x) * math.pow(math.exp(10.0), (x * x))
function code(x) return Float64(cos(x) * (exp(10.0) ^ Float64(x * x))) end
function tmp = code(x) tmp = cos(x) * (exp(10.0) ^ (x * x)); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Power[N[Exp[10.0], $MachinePrecision], N[(x * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot {\left(e^{10}\right)}^{\left(x \cdot x\right)}
\end{array}
Initial program 94.5%
lift-*.f64N/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f6495.2
Applied rewrites95.2%
(FPCore (x) :precision binary64 (* (cos x) (sqrt (exp (* 20.0 (* x x))))))
double code(double x) {
return cos(x) * sqrt(exp((20.0 * (x * x))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * sqrt(exp((20.0d0 * (x * x))))
end function
public static double code(double x) {
return Math.cos(x) * Math.sqrt(Math.exp((20.0 * (x * x))));
}
def code(x): return math.cos(x) * math.sqrt(math.exp((20.0 * (x * x))))
function code(x) return Float64(cos(x) * sqrt(exp(Float64(20.0 * Float64(x * x))))) end
function tmp = code(x) tmp = cos(x) * sqrt(exp((20.0 * (x * x)))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Sqrt[N[Exp[N[(20.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot \sqrt{e^{20 \cdot \left(x \cdot x\right)}}
\end{array}
Initial program 94.5%
lift-*.f64N/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f6495.2
Applied rewrites95.2%
lift-exp.f64N/A
pow2N/A
pow-to-expN/A
exp-lft-sqrN/A
pow-unpowN/A
lower-pow.f64N/A
lower-pow.f64N/A
lower-exp.f64N/A
lower-log.f64N/A
lower-exp.f64N/A
lower-log.f6498.0
Applied rewrites98.0%
Applied rewrites94.6%
Final simplification94.6%
(FPCore (x) :precision binary64 (* (cos x) (exp (* 10.0 (* x x)))))
double code(double x) {
return cos(x) * exp((10.0 * (x * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * exp((10.0d0 * (x * x)))
end function
public static double code(double x) {
return Math.cos(x) * Math.exp((10.0 * (x * x)));
}
def code(x): return math.cos(x) * math.exp((10.0 * (x * x)))
function code(x) return Float64(cos(x) * exp(Float64(10.0 * Float64(x * x)))) end
function tmp = code(x) tmp = cos(x) * exp((10.0 * (x * x))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot e^{10 \cdot \left(x \cdot x\right)}
\end{array}
Initial program 94.5%
(FPCore (x) :precision binary64 (* (fma (* x x) (fma (* x x) (fma (* x x) -0.001388888888888889 0.041666666666666664) -0.5) 1.0) (exp (* 10.0 (* (sqrt x) (* x (sqrt x)))))))
double code(double x) {
return fma((x * x), fma((x * x), fma((x * x), -0.001388888888888889, 0.041666666666666664), -0.5), 1.0) * exp((10.0 * (sqrt(x) * (x * sqrt(x)))));
}
function code(x) return Float64(fma(Float64(x * x), fma(Float64(x * x), fma(Float64(x * x), -0.001388888888888889, 0.041666666666666664), -0.5), 1.0) * exp(Float64(10.0 * Float64(sqrt(x) * Float64(x * sqrt(x)))))) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * -0.001388888888888889 + 0.041666666666666664), $MachinePrecision] + -0.5), $MachinePrecision] + 1.0), $MachinePrecision] * N[Exp[N[(10.0 * N[(N[Sqrt[x], $MachinePrecision] * N[(x * N[Sqrt[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right) \cdot e^{10 \cdot \left(\sqrt{x} \cdot \left(x \cdot \sqrt{x}\right)\right)}
\end{array}
Initial program 94.5%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6427.5
Applied rewrites27.5%
rem-square-sqrtN/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
associate-*r*N/A
lift-*.f64N/A
lower-*.f6427.5
Applied rewrites27.5%
Final simplification27.5%
(FPCore (x) :precision binary64 (* (fma (* x x) (fma (* x x) (fma (* x x) -0.001388888888888889 0.041666666666666664) -0.5) 1.0) (exp (* x (* x 10.0)))))
double code(double x) {
return fma((x * x), fma((x * x), fma((x * x), -0.001388888888888889, 0.041666666666666664), -0.5), 1.0) * exp((x * (x * 10.0)));
}
function code(x) return Float64(fma(Float64(x * x), fma(Float64(x * x), fma(Float64(x * x), -0.001388888888888889, 0.041666666666666664), -0.5), 1.0) * exp(Float64(x * Float64(x * 10.0)))) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * -0.001388888888888889 + 0.041666666666666664), $MachinePrecision] + -0.5), $MachinePrecision] + 1.0), $MachinePrecision] * N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right) \cdot e^{x \cdot \left(x \cdot 10\right)}
\end{array}
Initial program 94.5%
Taylor expanded in x around inf
*-commutativeN/A
*-lft-identityN/A
lower-*.f64N/A
lower-exp.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
*-lft-identityN/A
lower-cos.f6494.4
Applied rewrites94.4%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6427.5
Applied rewrites27.5%
Final simplification27.5%
(FPCore (x) :precision binary64 (* (exp (* x (* x 10.0))) (fma x (* x (fma x (* x 0.041666666666666664) -0.5)) 1.0)))
double code(double x) {
return exp((x * (x * 10.0))) * fma(x, (x * fma(x, (x * 0.041666666666666664), -0.5)), 1.0);
}
function code(x) return Float64(exp(Float64(x * Float64(x * 10.0))) * fma(x, Float64(x * fma(x, Float64(x * 0.041666666666666664), -0.5)), 1.0)) end
code[x_] := N[(N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(x * N[(x * N[(x * N[(x * 0.041666666666666664), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{x \cdot \left(x \cdot 10\right)} \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 0.041666666666666664, -0.5\right), 1\right)
\end{array}
Initial program 94.5%
Taylor expanded in x around inf
*-commutativeN/A
*-lft-identityN/A
lower-*.f64N/A
lower-exp.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
*-lft-identityN/A
lower-cos.f6494.4
Applied rewrites94.4%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6421.3
Applied rewrites21.3%
(FPCore (x) :precision binary64 (* (exp (* x (* x 10.0))) (fma x (* x -0.5) 1.0)))
double code(double x) {
return exp((x * (x * 10.0))) * fma(x, (x * -0.5), 1.0);
}
function code(x) return Float64(exp(Float64(x * Float64(x * 10.0))) * fma(x, Float64(x * -0.5), 1.0)) end
code[x_] := N[(N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(x * N[(x * -0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{x \cdot \left(x \cdot 10\right)} \cdot \mathsf{fma}\left(x, x \cdot -0.5, 1\right)
\end{array}
Initial program 94.5%
Taylor expanded in x around inf
*-commutativeN/A
*-lft-identityN/A
lower-*.f64N/A
lower-exp.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
*-lft-identityN/A
lower-cos.f6494.4
Applied rewrites94.4%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6418.2
Applied rewrites18.2%
(FPCore (x) :precision binary64 (* (fma (* x x) (fma (* x x) (fma (* x x) -0.001388888888888889 0.041666666666666664) -0.5) 1.0) (fma (* x x) (fma x (* x (fma (* x x) 166.66666666666666 50.0)) 10.0) 1.0)))
double code(double x) {
return fma((x * x), fma((x * x), fma((x * x), -0.001388888888888889, 0.041666666666666664), -0.5), 1.0) * fma((x * x), fma(x, (x * fma((x * x), 166.66666666666666, 50.0)), 10.0), 1.0);
}
function code(x) return Float64(fma(Float64(x * x), fma(Float64(x * x), fma(Float64(x * x), -0.001388888888888889, 0.041666666666666664), -0.5), 1.0) * fma(Float64(x * x), fma(x, Float64(x * fma(Float64(x * x), 166.66666666666666, 50.0)), 10.0), 1.0)) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * -0.001388888888888889 + 0.041666666666666664), $MachinePrecision] + -0.5), $MachinePrecision] + 1.0), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * 166.66666666666666 + 50.0), $MachinePrecision]), $MachinePrecision] + 10.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right) \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 166.66666666666666, 50\right), 10\right), 1\right)
\end{array}
Initial program 94.5%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6427.5
Applied rewrites27.5%
Taylor expanded in x around 0
Applied rewrites10.2%
(FPCore (x) :precision binary64 (* (fma (* x x) (fma (* x x) 0.041666666666666664 -0.5) 1.0) (fma (* x x) (fma (* x x) (fma x (* x 166.66666666666666) 50.0) 10.0) 1.0)))
double code(double x) {
return fma((x * x), fma((x * x), 0.041666666666666664, -0.5), 1.0) * fma((x * x), fma((x * x), fma(x, (x * 166.66666666666666), 50.0), 10.0), 1.0);
}
function code(x) return Float64(fma(Float64(x * x), fma(Float64(x * x), 0.041666666666666664, -0.5), 1.0) * fma(Float64(x * x), fma(Float64(x * x), fma(x, Float64(x * 166.66666666666666), 50.0), 10.0), 1.0)) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.041666666666666664 + -0.5), $MachinePrecision] + 1.0), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * 166.66666666666666), $MachinePrecision] + 50.0), $MachinePrecision] + 10.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.041666666666666664, -0.5\right), 1\right) \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot 166.66666666666666, 50\right), 10\right), 1\right)
\end{array}
Initial program 94.5%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6421.3
Applied rewrites21.3%
Taylor expanded in x around 0
Applied rewrites10.2%
(FPCore (x) :precision binary64 (* (fma (* x x) (fma (* x x) 0.041666666666666664 -0.5) 1.0) (fma x (* x (fma x (* x 50.0) 10.0)) 1.0)))
double code(double x) {
return fma((x * x), fma((x * x), 0.041666666666666664, -0.5), 1.0) * fma(x, (x * fma(x, (x * 50.0), 10.0)), 1.0);
}
function code(x) return Float64(fma(Float64(x * x), fma(Float64(x * x), 0.041666666666666664, -0.5), 1.0) * fma(x, Float64(x * fma(x, Float64(x * 50.0), 10.0)), 1.0)) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.041666666666666664 + -0.5), $MachinePrecision] + 1.0), $MachinePrecision] * N[(x * N[(x * N[(x * N[(x * 50.0), $MachinePrecision] + 10.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.041666666666666664, -0.5\right), 1\right) \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 50, 10\right), 1\right)
\end{array}
Initial program 94.5%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6421.3
Applied rewrites21.3%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
+-commutativeN/A
distribute-rgt-inN/A
lower-fma.f64N/A
+-commutativeN/A
distribute-rgt-inN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f6410.0
Applied rewrites10.0%
(FPCore (x) :precision binary64 (* (fma (* x x) (fma (* x x) 0.041666666666666664 -0.5) 1.0) (fma 10.0 (* x x) 1.0)))
double code(double x) {
return fma((x * x), fma((x * x), 0.041666666666666664, -0.5), 1.0) * fma(10.0, (x * x), 1.0);
}
function code(x) return Float64(fma(Float64(x * x), fma(Float64(x * x), 0.041666666666666664, -0.5), 1.0) * fma(10.0, Float64(x * x), 1.0)) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.041666666666666664 + -0.5), $MachinePrecision] + 1.0), $MachinePrecision] * N[(10.0 * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.041666666666666664, -0.5\right), 1\right) \cdot \mathsf{fma}\left(10, x \cdot x, 1\right)
\end{array}
Initial program 94.5%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6421.3
Applied rewrites21.3%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f649.8
Applied rewrites9.8%
(FPCore (x) :precision binary64 (* x (* x -0.5)))
double code(double x) {
return x * (x * -0.5);
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (x * (-0.5d0))
end function
public static double code(double x) {
return x * (x * -0.5);
}
def code(x): return x * (x * -0.5)
function code(x) return Float64(x * Float64(x * -0.5)) end
function tmp = code(x) tmp = x * (x * -0.5); end
code[x_] := N[(x * N[(x * -0.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x \cdot -0.5\right)
\end{array}
Initial program 94.5%
Taylor expanded in x around 0
Applied rewrites9.6%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f649.7
Applied rewrites9.7%
Taylor expanded in x around inf
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f649.7
Applied rewrites9.7%
(FPCore (x) :precision binary64 1.0)
double code(double x) {
return 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double x) {
return 1.0;
}
def code(x): return 1.0
function code(x) return 1.0 end
function tmp = code(x) tmp = 1.0; end
code[x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 94.5%
Taylor expanded in x around 0
Applied rewrites1.5%
herbie shell --seed 2024219
(FPCore (x)
:name "ENA, Section 1.4, Exercise 1"
:precision binary64
:pre (and (<= 1.99 x) (<= x 2.01))
(* (cos x) (exp (* 10.0 (* x x)))))