
(FPCore (x) :precision binary64 (* (cos x) (exp (* 10.0 (* x x)))))
double code(double x) {
return cos(x) * exp((10.0 * (x * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * exp((10.0d0 * (x * x)))
end function
public static double code(double x) {
return Math.cos(x) * Math.exp((10.0 * (x * x)));
}
def code(x): return math.cos(x) * math.exp((10.0 * (x * x)))
function code(x) return Float64(cos(x) * exp(Float64(10.0 * Float64(x * x)))) end
function tmp = code(x) tmp = cos(x) * exp((10.0 * (x * x))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot e^{10 \cdot \left(x \cdot x\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 15 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (* (cos x) (exp (* 10.0 (* x x)))))
double code(double x) {
return cos(x) * exp((10.0 * (x * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * exp((10.0d0 * (x * x)))
end function
public static double code(double x) {
return Math.cos(x) * Math.exp((10.0 * (x * x)));
}
def code(x): return math.cos(x) * math.exp((10.0 * (x * x)))
function code(x) return Float64(cos(x) * exp(Float64(10.0 * Float64(x * x)))) end
function tmp = code(x) tmp = cos(x) * exp((10.0 * (x * x))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot e^{10 \cdot \left(x \cdot x\right)}
\end{array}
(FPCore (x) :precision binary64 (* (cos x) (pow (pow (exp 20.0) x) (* x 0.5))))
double code(double x) {
return cos(x) * pow(pow(exp(20.0), x), (x * 0.5));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * ((exp(20.0d0) ** x) ** (x * 0.5d0))
end function
public static double code(double x) {
return Math.cos(x) * Math.pow(Math.pow(Math.exp(20.0), x), (x * 0.5));
}
def code(x): return math.cos(x) * math.pow(math.pow(math.exp(20.0), x), (x * 0.5))
function code(x) return Float64(cos(x) * ((exp(20.0) ^ x) ^ Float64(x * 0.5))) end
function tmp = code(x) tmp = cos(x) * ((exp(20.0) ^ x) ^ (x * 0.5)); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Power[N[Power[N[Exp[20.0], $MachinePrecision], x], $MachinePrecision], N[(x * 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot {\left({\left(e^{20}\right)}^{x}\right)}^{\left(x \cdot 0.5\right)}
\end{array}
Initial program 94.4%
lift-exp.f64N/A
lift-*.f64N/A
exp-prodN/A
sqr-powN/A
pow-prod-downN/A
lift-*.f64N/A
associate-/l*N/A
pow-unpowN/A
lower-pow.f64N/A
pow-to-expN/A
lower-exp.f64N/A
lower-*.f64N/A
log-prodN/A
rem-log-expN/A
rem-log-expN/A
metadata-evalN/A
div-invN/A
lower-*.f64N/A
metadata-eval95.2
Applied rewrites95.2%
lift-exp.f64N/A
lift-*.f64N/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f6499.4
Applied rewrites99.4%
(FPCore (x) :precision binary64 (* (cos x) (pow (pow (exp (+ x x)) x) 5.0)))
double code(double x) {
return cos(x) * pow(pow(exp((x + x)), x), 5.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * ((exp((x + x)) ** x) ** 5.0d0)
end function
public static double code(double x) {
return Math.cos(x) * Math.pow(Math.pow(Math.exp((x + x)), x), 5.0);
}
def code(x): return math.cos(x) * math.pow(math.pow(math.exp((x + x)), x), 5.0)
function code(x) return Float64(cos(x) * ((exp(Float64(x + x)) ^ x) ^ 5.0)) end
function tmp = code(x) tmp = cos(x) * ((exp((x + x)) ^ x) ^ 5.0); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Power[N[Power[N[Exp[N[(x + x), $MachinePrecision]], $MachinePrecision], x], $MachinePrecision], 5.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot {\left({\left(e^{x + x}\right)}^{x}\right)}^{5}
\end{array}
Initial program 94.4%
lift-exp.f64N/A
lift-*.f64N/A
exp-prodN/A
sqr-powN/A
pow-prod-downN/A
lift-*.f64N/A
associate-/l*N/A
pow-unpowN/A
lower-pow.f64N/A
pow-to-expN/A
lower-exp.f64N/A
lower-*.f64N/A
log-prodN/A
rem-log-expN/A
rem-log-expN/A
metadata-evalN/A
div-invN/A
lower-*.f64N/A
metadata-eval95.2
Applied rewrites95.2%
lift-pow.f64N/A
lift-*.f64N/A
*-commutativeN/A
pow-unpowN/A
lift-exp.f64N/A
lift-*.f64N/A
*-commutativeN/A
exp-prodN/A
pow-powN/A
metadata-evalN/A
pow-unpowN/A
*-commutativeN/A
pow-powN/A
exp-prodN/A
sqr-powN/A
pow-prod-downN/A
lower-pow.f64N/A
Applied rewrites95.1%
lift-exp.f64N/A
lift-*.f64N/A
*-commutativeN/A
exp-prodN/A
Applied rewrites97.6%
(FPCore (x) :precision binary64 (* (cos x) (pow (pow (exp x) (+ x x)) 5.0)))
double code(double x) {
return cos(x) * pow(pow(exp(x), (x + x)), 5.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * ((exp(x) ** (x + x)) ** 5.0d0)
end function
public static double code(double x) {
return Math.cos(x) * Math.pow(Math.pow(Math.exp(x), (x + x)), 5.0);
}
def code(x): return math.cos(x) * math.pow(math.pow(math.exp(x), (x + x)), 5.0)
function code(x) return Float64(cos(x) * ((exp(x) ^ Float64(x + x)) ^ 5.0)) end
function tmp = code(x) tmp = cos(x) * ((exp(x) ^ (x + x)) ^ 5.0); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Power[N[Power[N[Exp[x], $MachinePrecision], N[(x + x), $MachinePrecision]], $MachinePrecision], 5.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot {\left({\left(e^{x}\right)}^{\left(x + x\right)}\right)}^{5}
\end{array}
Initial program 94.4%
lift-exp.f64N/A
lift-*.f64N/A
exp-prodN/A
sqr-powN/A
pow-prod-downN/A
lift-*.f64N/A
associate-/l*N/A
pow-unpowN/A
lower-pow.f64N/A
pow-to-expN/A
lower-exp.f64N/A
lower-*.f64N/A
log-prodN/A
rem-log-expN/A
rem-log-expN/A
metadata-evalN/A
div-invN/A
lower-*.f64N/A
metadata-eval95.2
Applied rewrites95.2%
lift-pow.f64N/A
lift-*.f64N/A
*-commutativeN/A
pow-unpowN/A
lift-exp.f64N/A
lift-*.f64N/A
*-commutativeN/A
exp-prodN/A
pow-powN/A
metadata-evalN/A
pow-unpowN/A
*-commutativeN/A
pow-powN/A
exp-prodN/A
sqr-powN/A
pow-prod-downN/A
lower-pow.f64N/A
Applied rewrites95.1%
lift-exp.f64N/A
lift-*.f64N/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f6496.7
Applied rewrites96.7%
(FPCore (x) :precision binary64 (* (cos x) (pow (exp (* x x)) 10.0)))
double code(double x) {
return cos(x) * pow(exp((x * x)), 10.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * (exp((x * x)) ** 10.0d0)
end function
public static double code(double x) {
return Math.cos(x) * Math.pow(Math.exp((x * x)), 10.0);
}
def code(x): return math.cos(x) * math.pow(math.exp((x * x)), 10.0)
function code(x) return Float64(cos(x) * (exp(Float64(x * x)) ^ 10.0)) end
function tmp = code(x) tmp = cos(x) * (exp((x * x)) ^ 10.0); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Power[N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision], 10.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot {\left(e^{x \cdot x}\right)}^{10}
\end{array}
Initial program 94.4%
Applied rewrites95.2%
(FPCore (x) :precision binary64 (* (cos x) (pow (exp (* x 10.0)) x)))
double code(double x) {
return cos(x) * pow(exp((x * 10.0)), x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * (exp((x * 10.0d0)) ** x)
end function
public static double code(double x) {
return Math.cos(x) * Math.pow(Math.exp((x * 10.0)), x);
}
def code(x): return math.cos(x) * math.pow(math.exp((x * 10.0)), x)
function code(x) return Float64(cos(x) * (exp(Float64(x * 10.0)) ^ x)) end
function tmp = code(x) tmp = cos(x) * (exp((x * 10.0)) ^ x); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Power[N[Exp[N[(x * 10.0), $MachinePrecision]], $MachinePrecision], x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot {\left(e^{x \cdot 10}\right)}^{x}
\end{array}
Initial program 94.4%
lift-exp.f64N/A
lift-*.f64N/A
exp-prodN/A
lift-*.f64N/A
pow-unpowN/A
lower-pow.f64N/A
pow-expN/A
lower-exp.f64N/A
*-commutativeN/A
lower-*.f6495.1
Applied rewrites95.1%
(FPCore (x) :precision binary64 (* (cos x) (pow (exp 10.0) (* x x))))
double code(double x) {
return cos(x) * pow(exp(10.0), (x * x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * (exp(10.0d0) ** (x * x))
end function
public static double code(double x) {
return Math.cos(x) * Math.pow(Math.exp(10.0), (x * x));
}
def code(x): return math.cos(x) * math.pow(math.exp(10.0), (x * x))
function code(x) return Float64(cos(x) * (exp(10.0) ^ Float64(x * x))) end
function tmp = code(x) tmp = cos(x) * (exp(10.0) ^ (x * x)); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Power[N[Exp[10.0], $MachinePrecision], N[(x * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot {\left(e^{10}\right)}^{\left(x \cdot x\right)}
\end{array}
Initial program 94.4%
lift-exp.f64N/A
lift-*.f64N/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f6495.1
Applied rewrites95.1%
(FPCore (x) :precision binary64 (* (cos x) (exp (* (* x x) 10.0))))
double code(double x) {
return cos(x) * exp(((x * x) * 10.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * exp(((x * x) * 10.0d0))
end function
public static double code(double x) {
return Math.cos(x) * Math.exp(((x * x) * 10.0));
}
def code(x): return math.cos(x) * math.exp(((x * x) * 10.0))
function code(x) return Float64(cos(x) * exp(Float64(Float64(x * x) * 10.0))) end
function tmp = code(x) tmp = cos(x) * exp(((x * x) * 10.0)); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Exp[N[(N[(x * x), $MachinePrecision] * 10.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot e^{\left(x \cdot x\right) \cdot 10}
\end{array}
Initial program 94.4%
Final simplification94.4%
(FPCore (x) :precision binary64 (* (exp (* (* x x) 10.0)) (fma (* x x) (fma x (* x (fma (* x x) -0.001388888888888889 0.041666666666666664)) -0.5) 1.0)))
double code(double x) {
return exp(((x * x) * 10.0)) * fma((x * x), fma(x, (x * fma((x * x), -0.001388888888888889, 0.041666666666666664)), -0.5), 1.0);
}
function code(x) return Float64(exp(Float64(Float64(x * x) * 10.0)) * fma(Float64(x * x), fma(x, Float64(x * fma(Float64(x * x), -0.001388888888888889, 0.041666666666666664)), -0.5), 1.0)) end
code[x_] := N[(N[Exp[N[(N[(x * x), $MachinePrecision] * 10.0), $MachinePrecision]], $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * -0.001388888888888889 + 0.041666666666666664), $MachinePrecision]), $MachinePrecision] + -0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{\left(x \cdot x\right) \cdot 10} \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right)
\end{array}
Initial program 94.4%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6421.3
Applied rewrites21.3%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
unpow2N/A
associate-*l*N/A
metadata-evalN/A
lower-fma.f64N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6427.5
Applied rewrites27.5%
Final simplification27.5%
(FPCore (x) :precision binary64 (* (fma x (* x (fma x (* x 0.041666666666666664) -0.5)) 1.0) (exp (* x (* x 10.0)))))
double code(double x) {
return fma(x, (x * fma(x, (x * 0.041666666666666664), -0.5)), 1.0) * exp((x * (x * 10.0)));
}
function code(x) return Float64(fma(x, Float64(x * fma(x, Float64(x * 0.041666666666666664), -0.5)), 1.0) * exp(Float64(x * Float64(x * 10.0)))) end
code[x_] := N[(N[(x * N[(x * N[(x * N[(x * 0.041666666666666664), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] * N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 0.041666666666666664, -0.5\right), 1\right) \cdot e^{x \cdot \left(x \cdot 10\right)}
\end{array}
Initial program 94.4%
lift-*.f64N/A
lift-*.f64N/A
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f6494.3
Applied rewrites94.3%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6421.3
Applied rewrites21.3%
Final simplification21.3%
(FPCore (x) :precision binary64 (* (exp (* (* x x) 10.0)) (fma x (* x -0.5) 1.0)))
double code(double x) {
return exp(((x * x) * 10.0)) * fma(x, (x * -0.5), 1.0);
}
function code(x) return Float64(exp(Float64(Float64(x * x) * 10.0)) * fma(x, Float64(x * -0.5), 1.0)) end
code[x_] := N[(N[Exp[N[(N[(x * x), $MachinePrecision] * 10.0), $MachinePrecision]], $MachinePrecision] * N[(x * N[(x * -0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{\left(x \cdot x\right) \cdot 10} \cdot \mathsf{fma}\left(x, x \cdot -0.5, 1\right)
\end{array}
Initial program 94.4%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6418.2
Applied rewrites18.2%
Final simplification18.2%
(FPCore (x) :precision binary64 (* (fma x (* x -0.5) 1.0) (fma (* x x) (fma (* x x) (fma (* x x) 166.66666666666666 50.0) 10.0) 1.0)))
double code(double x) {
return fma(x, (x * -0.5), 1.0) * fma((x * x), fma((x * x), fma((x * x), 166.66666666666666, 50.0), 10.0), 1.0);
}
function code(x) return Float64(fma(x, Float64(x * -0.5), 1.0) * fma(Float64(x * x), fma(Float64(x * x), fma(Float64(x * x), 166.66666666666666, 50.0), 10.0), 1.0)) end
code[x_] := N[(N[(x * N[(x * -0.5), $MachinePrecision] + 1.0), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 166.66666666666666 + 50.0), $MachinePrecision] + 10.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, x \cdot -0.5, 1\right) \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 166.66666666666666, 50\right), 10\right), 1\right)
\end{array}
Initial program 94.4%
Applied rewrites95.2%
Taylor expanded in x around 0
Applied rewrites9.6%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f649.7
Applied rewrites9.7%
Taylor expanded in x around 0
Applied rewrites10.3%
(FPCore (x) :precision binary64 (* (fma x (* x -0.5) 1.0) (fma (* x x) (fma x (* x 50.0) 10.0) 1.0)))
double code(double x) {
return fma(x, (x * -0.5), 1.0) * fma((x * x), fma(x, (x * 50.0), 10.0), 1.0);
}
function code(x) return Float64(fma(x, Float64(x * -0.5), 1.0) * fma(Float64(x * x), fma(x, Float64(x * 50.0), 10.0), 1.0)) end
code[x_] := N[(N[(x * N[(x * -0.5), $MachinePrecision] + 1.0), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * 50.0), $MachinePrecision] + 10.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, x \cdot -0.5, 1\right) \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot 50, 10\right), 1\right)
\end{array}
Initial program 94.4%
Applied rewrites95.2%
Taylor expanded in x around 0
Applied rewrites9.6%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f649.7
Applied rewrites9.7%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f6410.1
Applied rewrites10.1%
(FPCore (x) :precision binary64 (* (fma x (* x -0.5) 1.0) (fma x (* x 10.0) 1.0)))
double code(double x) {
return fma(x, (x * -0.5), 1.0) * fma(x, (x * 10.0), 1.0);
}
function code(x) return Float64(fma(x, Float64(x * -0.5), 1.0) * fma(x, Float64(x * 10.0), 1.0)) end
code[x_] := N[(N[(x * N[(x * -0.5), $MachinePrecision] + 1.0), $MachinePrecision] * N[(x * N[(x * 10.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, x \cdot -0.5, 1\right) \cdot \mathsf{fma}\left(x, x \cdot 10, 1\right)
\end{array}
Initial program 94.4%
Applied rewrites95.2%
Taylor expanded in x around 0
Applied rewrites9.6%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f649.7
Applied rewrites9.7%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f649.9
Applied rewrites9.9%
(FPCore (x) :precision binary64 (* (* (* x x) -0.5) 1.0))
double code(double x) {
return ((x * x) * -0.5) * 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((x * x) * (-0.5d0)) * 1.0d0
end function
public static double code(double x) {
return ((x * x) * -0.5) * 1.0;
}
def code(x): return ((x * x) * -0.5) * 1.0
function code(x) return Float64(Float64(Float64(x * x) * -0.5) * 1.0) end
function tmp = code(x) tmp = ((x * x) * -0.5) * 1.0; end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * -0.5), $MachinePrecision] * 1.0), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x \cdot x\right) \cdot -0.5\right) \cdot 1
\end{array}
Initial program 94.4%
Applied rewrites95.2%
Taylor expanded in x around 0
Applied rewrites9.6%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f649.7
Applied rewrites9.7%
Taylor expanded in x around inf
Applied rewrites9.7%
(FPCore (x) :precision binary64 1.0)
double code(double x) {
return 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double x) {
return 1.0;
}
def code(x): return 1.0
function code(x) return 1.0 end
function tmp = code(x) tmp = 1.0; end
code[x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 94.4%
Taylor expanded in x around 0
Applied rewrites1.5%
herbie shell --seed 2024233
(FPCore (x)
:name "ENA, Section 1.4, Exercise 1"
:precision binary64
:pre (and (<= 1.99 x) (<= x 2.01))
(* (cos x) (exp (* 10.0 (* x x)))))