
(FPCore (x) :precision binary64 (* (cos x) (exp (* 10.0 (* x x)))))
double code(double x) {
return cos(x) * exp((10.0 * (x * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * exp((10.0d0 * (x * x)))
end function
public static double code(double x) {
return Math.cos(x) * Math.exp((10.0 * (x * x)));
}
def code(x): return math.cos(x) * math.exp((10.0 * (x * x)))
function code(x) return Float64(cos(x) * exp(Float64(10.0 * Float64(x * x)))) end
function tmp = code(x) tmp = cos(x) * exp((10.0 * (x * x))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot e^{10 \cdot \left(x \cdot x\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 17 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (* (cos x) (exp (* 10.0 (* x x)))))
double code(double x) {
return cos(x) * exp((10.0 * (x * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * exp((10.0d0 * (x * x)))
end function
public static double code(double x) {
return Math.cos(x) * Math.exp((10.0 * (x * x)));
}
def code(x): return math.cos(x) * math.exp((10.0 * (x * x)))
function code(x) return Float64(cos(x) * exp(Float64(10.0 * Float64(x * x)))) end
function tmp = code(x) tmp = cos(x) * exp((10.0 * (x * x))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot e^{10 \cdot \left(x \cdot x\right)}
\end{array}
(FPCore (x) :precision binary64 (* (cos x) (pow (pow (exp -20.0) x) (* x -0.5))))
double code(double x) {
return cos(x) * pow(pow(exp(-20.0), x), (x * -0.5));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * ((exp((-20.0d0)) ** x) ** (x * (-0.5d0)))
end function
public static double code(double x) {
return Math.cos(x) * Math.pow(Math.pow(Math.exp(-20.0), x), (x * -0.5));
}
def code(x): return math.cos(x) * math.pow(math.pow(math.exp(-20.0), x), (x * -0.5))
function code(x) return Float64(cos(x) * ((exp(-20.0) ^ x) ^ Float64(x * -0.5))) end
function tmp = code(x) tmp = cos(x) * ((exp(-20.0) ^ x) ^ (x * -0.5)); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Power[N[Power[N[Exp[-20.0], $MachinePrecision], x], $MachinePrecision], N[(x * -0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot {\left({\left(e^{-20}\right)}^{x}\right)}^{\left(x \cdot -0.5\right)}
\end{array}
Initial program 94.1%
lift-*.f64N/A
exp-prodN/A
sqr-powN/A
pow-prod-downN/A
frac-2negN/A
div-invN/A
pow-unpowN/A
lower-pow.f64N/A
Applied rewrites95.1%
lift-exp.f64N/A
lift-neg.f64N/A
lift-*.f64N/A
sqr-powN/A
sqr-powN/A
lift-*.f64N/A
pow-unpowN/A
lift-neg.f64N/A
neg-mul-1N/A
pow-unpowN/A
pow-powN/A
lower-pow.f64N/A
Applied rewrites94.7%
exp-prodN/A
lower-pow.f64N/A
lower-exp.f6499.2
Applied rewrites99.2%
(FPCore (x) :precision binary64 (* (cos x) (pow (pow (exp (+ x x)) x) 5.0)))
double code(double x) {
return cos(x) * pow(pow(exp((x + x)), x), 5.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * ((exp((x + x)) ** x) ** 5.0d0)
end function
public static double code(double x) {
return Math.cos(x) * Math.pow(Math.pow(Math.exp((x + x)), x), 5.0);
}
def code(x): return math.cos(x) * math.pow(math.pow(math.exp((x + x)), x), 5.0)
function code(x) return Float64(cos(x) * ((exp(Float64(x + x)) ^ x) ^ 5.0)) end
function tmp = code(x) tmp = cos(x) * ((exp((x + x)) ^ x) ^ 5.0); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Power[N[Power[N[Exp[N[(x + x), $MachinePrecision]], $MachinePrecision], x], $MachinePrecision], 5.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot {\left({\left(e^{x + x}\right)}^{x}\right)}^{5}
\end{array}
Initial program 94.1%
lift-*.f64N/A
exp-prodN/A
sqr-powN/A
pow-prod-downN/A
frac-2negN/A
div-invN/A
pow-unpowN/A
lower-pow.f64N/A
Applied rewrites95.1%
Applied rewrites95.1%
Applied rewrites97.6%
(FPCore (x) :precision binary64 (* (cos x) (pow (pow (exp x) (+ x x)) 5.0)))
double code(double x) {
return cos(x) * pow(pow(exp(x), (x + x)), 5.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * ((exp(x) ** (x + x)) ** 5.0d0)
end function
public static double code(double x) {
return Math.cos(x) * Math.pow(Math.pow(Math.exp(x), (x + x)), 5.0);
}
def code(x): return math.cos(x) * math.pow(math.pow(math.exp(x), (x + x)), 5.0)
function code(x) return Float64(cos(x) * ((exp(x) ^ Float64(x + x)) ^ 5.0)) end
function tmp = code(x) tmp = cos(x) * ((exp(x) ^ (x + x)) ^ 5.0); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Power[N[Power[N[Exp[x], $MachinePrecision], N[(x + x), $MachinePrecision]], $MachinePrecision], 5.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot {\left({\left(e^{x}\right)}^{\left(x + x\right)}\right)}^{5}
\end{array}
Initial program 94.1%
lift-*.f64N/A
exp-prodN/A
sqr-powN/A
pow-prod-downN/A
frac-2negN/A
div-invN/A
pow-unpowN/A
lower-pow.f64N/A
Applied rewrites95.1%
Applied rewrites95.1%
Applied rewrites96.7%
(FPCore (x) :precision binary64 (* (cos x) (pow (exp (* x (+ x x))) 5.0)))
double code(double x) {
return cos(x) * pow(exp((x * (x + x))), 5.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * (exp((x * (x + x))) ** 5.0d0)
end function
public static double code(double x) {
return Math.cos(x) * Math.pow(Math.exp((x * (x + x))), 5.0);
}
def code(x): return math.cos(x) * math.pow(math.exp((x * (x + x))), 5.0)
function code(x) return Float64(cos(x) * (exp(Float64(x * Float64(x + x))) ^ 5.0)) end
function tmp = code(x) tmp = cos(x) * (exp((x * (x + x))) ^ 5.0); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Power[N[Exp[N[(x * N[(x + x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], 5.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot {\left(e^{x \cdot \left(x + x\right)}\right)}^{5}
\end{array}
Initial program 94.1%
lift-*.f64N/A
exp-prodN/A
sqr-powN/A
pow-prod-downN/A
frac-2negN/A
div-invN/A
pow-unpowN/A
lower-pow.f64N/A
Applied rewrites95.1%
Applied rewrites95.1%
Final simplification95.1%
(FPCore (x) :precision binary64 (* (cos x) (pow (exp -10.0) (* x (- x)))))
double code(double x) {
return cos(x) * pow(exp(-10.0), (x * -x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * (exp((-10.0d0)) ** (x * -x))
end function
public static double code(double x) {
return Math.cos(x) * Math.pow(Math.exp(-10.0), (x * -x));
}
def code(x): return math.cos(x) * math.pow(math.exp(-10.0), (x * -x))
function code(x) return Float64(cos(x) * (exp(-10.0) ^ Float64(x * Float64(-x)))) end
function tmp = code(x) tmp = cos(x) * (exp(-10.0) ^ (x * -x)); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Power[N[Exp[-10.0], $MachinePrecision], N[(x * (-x)), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot {\left(e^{-10}\right)}^{\left(x \cdot \left(-x\right)\right)}
\end{array}
Initial program 94.1%
lift-*.f64N/A
exp-prodN/A
sqr-powN/A
pow-prod-downN/A
frac-2negN/A
div-invN/A
pow-unpowN/A
lower-pow.f64N/A
Applied rewrites95.1%
lift-exp.f64N/A
lift-neg.f64N/A
lift-*.f64N/A
sqr-powN/A
sqr-powN/A
pow-powN/A
*-commutativeN/A
pow-unpowN/A
lower-pow.f64N/A
pow-to-expN/A
lift-exp.f64N/A
rem-log-expN/A
metadata-evalN/A
metadata-evalN/A
lower-exp.f64N/A
metadata-eval95.1
lift-*.f64N/A
lift-neg.f64N/A
distribute-rgt-neg-outN/A
lift-*.f64N/A
lower-neg.f6495.1
Applied rewrites95.1%
Final simplification95.1%
(FPCore (x) :precision binary64 (* (cos x) (pow (exp 10.0) (* x x))))
double code(double x) {
return cos(x) * pow(exp(10.0), (x * x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * (exp(10.0d0) ** (x * x))
end function
public static double code(double x) {
return Math.cos(x) * Math.pow(Math.exp(10.0), (x * x));
}
def code(x): return math.cos(x) * math.pow(math.exp(10.0), (x * x))
function code(x) return Float64(cos(x) * (exp(10.0) ^ Float64(x * x))) end
function tmp = code(x) tmp = cos(x) * (exp(10.0) ^ (x * x)); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Power[N[Exp[10.0], $MachinePrecision], N[(x * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot {\left(e^{10}\right)}^{\left(x \cdot x\right)}
\end{array}
Initial program 94.1%
lift-*.f64N/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f6495.1
Applied rewrites95.1%
(FPCore (x) :precision binary64 (* (cos x) (/ 1.0 (exp (* -10.0 (* x x))))))
double code(double x) {
return cos(x) * (1.0 / exp((-10.0 * (x * x))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * (1.0d0 / exp(((-10.0d0) * (x * x))))
end function
public static double code(double x) {
return Math.cos(x) * (1.0 / Math.exp((-10.0 * (x * x))));
}
def code(x): return math.cos(x) * (1.0 / math.exp((-10.0 * (x * x))))
function code(x) return Float64(cos(x) * Float64(1.0 / exp(Float64(-10.0 * Float64(x * x))))) end
function tmp = code(x) tmp = cos(x) * (1.0 / exp((-10.0 * (x * x)))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[(1.0 / N[Exp[N[(-10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot \frac{1}{e^{-10 \cdot \left(x \cdot x\right)}}
\end{array}
Initial program 94.1%
lift-*.f64N/A
exp-prodN/A
sqr-powN/A
pow-prod-downN/A
frac-2negN/A
div-invN/A
pow-unpowN/A
lower-pow.f64N/A
Applied rewrites95.1%
Applied rewrites94.1%
Final simplification94.1%
(FPCore (x) :precision binary64 (* (cos x) (exp (* (* x x) 10.0))))
double code(double x) {
return cos(x) * exp(((x * x) * 10.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * exp(((x * x) * 10.0d0))
end function
public static double code(double x) {
return Math.cos(x) * Math.exp(((x * x) * 10.0));
}
def code(x): return math.cos(x) * math.exp(((x * x) * 10.0))
function code(x) return Float64(cos(x) * exp(Float64(Float64(x * x) * 10.0))) end
function tmp = code(x) tmp = cos(x) * exp(((x * x) * 10.0)); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Exp[N[(N[(x * x), $MachinePrecision] * 10.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot e^{\left(x \cdot x\right) \cdot 10}
\end{array}
Initial program 94.1%
Final simplification94.1%
(FPCore (x)
:precision binary64
(*
(exp (* x (* x 10.0)))
(fma
x
(*
x
(fma
(* x x)
(fma (* x x) -0.001388888888888889 0.041666666666666664)
-0.5))
1.0)))
double code(double x) {
return exp((x * (x * 10.0))) * fma(x, (x * fma((x * x), fma((x * x), -0.001388888888888889, 0.041666666666666664), -0.5)), 1.0);
}
function code(x) return Float64(exp(Float64(x * Float64(x * 10.0))) * fma(x, Float64(x * fma(Float64(x * x), fma(Float64(x * x), -0.001388888888888889, 0.041666666666666664), -0.5)), 1.0)) end
code[x_] := N[(N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * -0.001388888888888889 + 0.041666666666666664), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{x \cdot \left(x \cdot 10\right)} \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right)
\end{array}
Initial program 94.1%
Taylor expanded in x around inf
*-commutativeN/A
*-lft-identityN/A
lower-*.f64N/A
lower-exp.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
*-lft-identityN/A
lower-cos.f6494.0
Applied rewrites94.0%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6427.5
Applied rewrites27.5%
(FPCore (x) :precision binary64 (* (exp (* (* x x) 10.0)) (fma (* x x) (fma (* x x) 0.041666666666666664 -0.5) 1.0)))
double code(double x) {
return exp(((x * x) * 10.0)) * fma((x * x), fma((x * x), 0.041666666666666664, -0.5), 1.0);
}
function code(x) return Float64(exp(Float64(Float64(x * x) * 10.0)) * fma(Float64(x * x), fma(Float64(x * x), 0.041666666666666664, -0.5), 1.0)) end
code[x_] := N[(N[Exp[N[(N[(x * x), $MachinePrecision] * 10.0), $MachinePrecision]], $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.041666666666666664 + -0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{\left(x \cdot x\right) \cdot 10} \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.041666666666666664, -0.5\right), 1\right)
\end{array}
Initial program 94.1%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6421.3
Applied rewrites21.3%
Final simplification21.3%
(FPCore (x) :precision binary64 (* (exp (* x (* x 10.0))) (fma (* x x) (fma (* x x) 0.041666666666666664 -0.5) 1.0)))
double code(double x) {
return exp((x * (x * 10.0))) * fma((x * x), fma((x * x), 0.041666666666666664, -0.5), 1.0);
}
function code(x) return Float64(exp(Float64(x * Float64(x * 10.0))) * fma(Float64(x * x), fma(Float64(x * x), 0.041666666666666664, -0.5), 1.0)) end
code[x_] := N[(N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.041666666666666664 + -0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{x \cdot \left(x \cdot 10\right)} \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.041666666666666664, -0.5\right), 1\right)
\end{array}
Initial program 94.1%
Taylor expanded in x around inf
*-commutativeN/A
*-lft-identityN/A
lower-*.f64N/A
lower-exp.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
*-lft-identityN/A
lower-cos.f6494.0
Applied rewrites94.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6421.3
Applied rewrites21.3%
(FPCore (x) :precision binary64 (* (exp (* x (* x 10.0))) (fma x (* x -0.5) 1.0)))
double code(double x) {
return exp((x * (x * 10.0))) * fma(x, (x * -0.5), 1.0);
}
function code(x) return Float64(exp(Float64(x * Float64(x * 10.0))) * fma(x, Float64(x * -0.5), 1.0)) end
code[x_] := N[(N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(x * N[(x * -0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{x \cdot \left(x \cdot 10\right)} \cdot \mathsf{fma}\left(x, x \cdot -0.5, 1\right)
\end{array}
Initial program 94.1%
Taylor expanded in x around inf
*-commutativeN/A
*-lft-identityN/A
lower-*.f64N/A
lower-exp.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
*-lft-identityN/A
lower-cos.f6494.0
Applied rewrites94.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f6418.2
Applied rewrites18.2%
(FPCore (x) :precision binary64 (* (fma (* x x) (fma (* x x) 0.041666666666666664 -0.5) 1.0) (fma (* x x) (fma (* x x) (fma x (* x 166.66666666666666) 50.0) 10.0) 1.0)))
double code(double x) {
return fma((x * x), fma((x * x), 0.041666666666666664, -0.5), 1.0) * fma((x * x), fma((x * x), fma(x, (x * 166.66666666666666), 50.0), 10.0), 1.0);
}
function code(x) return Float64(fma(Float64(x * x), fma(Float64(x * x), 0.041666666666666664, -0.5), 1.0) * fma(Float64(x * x), fma(Float64(x * x), fma(x, Float64(x * 166.66666666666666), 50.0), 10.0), 1.0)) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.041666666666666664 + -0.5), $MachinePrecision] + 1.0), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * 166.66666666666666), $MachinePrecision] + 50.0), $MachinePrecision] + 10.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.041666666666666664, -0.5\right), 1\right) \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot 166.66666666666666, 50\right), 10\right), 1\right)
\end{array}
Initial program 94.1%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6421.3
Applied rewrites21.3%
Taylor expanded in x around 0
Applied rewrites10.2%
(FPCore (x) :precision binary64 (* (fma (* x x) (fma (* x x) 0.041666666666666664 -0.5) 1.0) (fma (* x x) (fma (* x x) 50.0 10.0) 1.0)))
double code(double x) {
return fma((x * x), fma((x * x), 0.041666666666666664, -0.5), 1.0) * fma((x * x), fma((x * x), 50.0, 10.0), 1.0);
}
function code(x) return Float64(fma(Float64(x * x), fma(Float64(x * x), 0.041666666666666664, -0.5), 1.0) * fma(Float64(x * x), fma(Float64(x * x), 50.0, 10.0), 1.0)) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.041666666666666664 + -0.5), $MachinePrecision] + 1.0), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 50.0 + 10.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.041666666666666664, -0.5\right), 1\right) \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 50, 10\right), 1\right)
\end{array}
Initial program 94.1%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6421.3
Applied rewrites21.3%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6410.0
Applied rewrites10.0%
(FPCore (x) :precision binary64 (* (fma (* x x) (fma (* x x) 0.041666666666666664 -0.5) 1.0) (fma (* x x) 10.0 1.0)))
double code(double x) {
return fma((x * x), fma((x * x), 0.041666666666666664, -0.5), 1.0) * fma((x * x), 10.0, 1.0);
}
function code(x) return Float64(fma(Float64(x * x), fma(Float64(x * x), 0.041666666666666664, -0.5), 1.0) * fma(Float64(x * x), 10.0, 1.0)) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.041666666666666664 + -0.5), $MachinePrecision] + 1.0), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 10.0 + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.041666666666666664, -0.5\right), 1\right) \cdot \mathsf{fma}\left(x \cdot x, 10, 1\right)
\end{array}
Initial program 94.1%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6421.3
Applied rewrites21.3%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f649.8
Applied rewrites9.8%
(FPCore (x) :precision binary64 (* x (* x -0.5)))
double code(double x) {
return x * (x * -0.5);
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (x * (-0.5d0))
end function
public static double code(double x) {
return x * (x * -0.5);
}
def code(x): return x * (x * -0.5)
function code(x) return Float64(x * Float64(x * -0.5)) end
function tmp = code(x) tmp = x * (x * -0.5); end
code[x_] := N[(x * N[(x * -0.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x \cdot -0.5\right)
\end{array}
Initial program 94.1%
Taylor expanded in x around 0
Applied rewrites9.6%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f649.7
Applied rewrites9.7%
Taylor expanded in x around inf
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f649.7
Applied rewrites9.7%
(FPCore (x) :precision binary64 1.0)
double code(double x) {
return 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double x) {
return 1.0;
}
def code(x): return 1.0
function code(x) return 1.0 end
function tmp = code(x) tmp = 1.0; end
code[x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 94.1%
Taylor expanded in x around 0
Applied rewrites1.5%
herbie shell --seed 2024214
(FPCore (x)
:name "ENA, Section 1.4, Exercise 1"
:precision binary64
:pre (and (<= 1.99 x) (<= x 2.01))
(* (cos x) (exp (* 10.0 (* x x)))))