
(FPCore (x) :precision binary64 (* (cos x) (exp (* 10.0 (* x x)))))
double code(double x) {
return cos(x) * exp((10.0 * (x * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * exp((10.0d0 * (x * x)))
end function
public static double code(double x) {
return Math.cos(x) * Math.exp((10.0 * (x * x)));
}
def code(x): return math.cos(x) * math.exp((10.0 * (x * x)))
function code(x) return Float64(cos(x) * exp(Float64(10.0 * Float64(x * x)))) end
function tmp = code(x) tmp = cos(x) * exp((10.0 * (x * x))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot e^{10 \cdot \left(x \cdot x\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 19 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (* (cos x) (exp (* 10.0 (* x x)))))
double code(double x) {
return cos(x) * exp((10.0 * (x * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * exp((10.0d0 * (x * x)))
end function
public static double code(double x) {
return Math.cos(x) * Math.exp((10.0 * (x * x)));
}
def code(x): return math.cos(x) * math.exp((10.0 * (x * x)))
function code(x) return Float64(cos(x) * exp(Float64(10.0 * Float64(x * x)))) end
function tmp = code(x) tmp = cos(x) * exp((10.0 * (x * x))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot e^{10 \cdot \left(x \cdot x\right)}
\end{array}
(FPCore (x) :precision binary64 (* (cos x) (/ 1.0 (pow (pow (exp 20.0) x) (* x -0.5)))))
double code(double x) {
return cos(x) * (1.0 / pow(pow(exp(20.0), x), (x * -0.5)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * (1.0d0 / ((exp(20.0d0) ** x) ** (x * (-0.5d0))))
end function
public static double code(double x) {
return Math.cos(x) * (1.0 / Math.pow(Math.pow(Math.exp(20.0), x), (x * -0.5)));
}
def code(x): return math.cos(x) * (1.0 / math.pow(math.pow(math.exp(20.0), x), (x * -0.5)))
function code(x) return Float64(cos(x) * Float64(1.0 / ((exp(20.0) ^ x) ^ Float64(x * -0.5)))) end
function tmp = code(x) tmp = cos(x) * (1.0 / ((exp(20.0) ^ x) ^ (x * -0.5))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[(1.0 / N[Power[N[Power[N[Exp[20.0], $MachinePrecision], x], $MachinePrecision], N[(x * -0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot \frac{1}{{\left({\left(e^{20}\right)}^{x}\right)}^{\left(x \cdot -0.5\right)}}
\end{array}
Initial program 94.5%
exp-prodN/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
+-inversesN/A
metadata-evalN/A
associate-/l/N/A
+-inversesN/A
+-inversesN/A
flip-+N/A
count-2N/A
*-commutativeN/A
associate-*l/N/A
associate-/l*N/A
metadata-evalN/A
*-rgt-identityN/A
frac-2negN/A
distribute-frac-negN/A
pow-negN/A
Applied egg-rr95.2%
sqr-powN/A
pow-prod-downN/A
associate-/l*N/A
pow-unpowN/A
neg-mul-1N/A
*-commutativeN/A
associate-/l*N/A
metadata-evalN/A
pow-lowering-pow.f64N/A
pow-lowering-pow.f64N/A
prod-expN/A
exp-lowering-exp.f64N/A
metadata-evalN/A
*-lowering-*.f6499.2
Applied egg-rr99.2%
(FPCore (x) :precision binary64 (* (cos x) (pow (pow (exp (+ x x)) 10.0) (* x 0.5))))
double code(double x) {
return cos(x) * pow(pow(exp((x + x)), 10.0), (x * 0.5));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * ((exp((x + x)) ** 10.0d0) ** (x * 0.5d0))
end function
public static double code(double x) {
return Math.cos(x) * Math.pow(Math.pow(Math.exp((x + x)), 10.0), (x * 0.5));
}
def code(x): return math.cos(x) * math.pow(math.pow(math.exp((x + x)), 10.0), (x * 0.5))
function code(x) return Float64(cos(x) * ((exp(Float64(x + x)) ^ 10.0) ^ Float64(x * 0.5))) end
function tmp = code(x) tmp = cos(x) * ((exp((x + x)) ^ 10.0) ^ (x * 0.5)); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Power[N[Power[N[Exp[N[(x + x), $MachinePrecision]], $MachinePrecision], 10.0], $MachinePrecision], N[(x * 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot {\left({\left(e^{x + x}\right)}^{10}\right)}^{\left(x \cdot 0.5\right)}
\end{array}
Initial program 94.5%
exp-prodN/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
+-inversesN/A
metadata-evalN/A
associate-/l/N/A
+-inversesN/A
+-inversesN/A
flip-+N/A
count-2N/A
*-commutativeN/A
associate-*l/N/A
associate-/l*N/A
metadata-evalN/A
*-rgt-identityN/A
frac-2negN/A
distribute-frac-negN/A
pow-negN/A
Applied egg-rr95.2%
pow-flipN/A
pow-expN/A
distribute-rgt-neg-outN/A
remove-double-negN/A
*-commutativeN/A
associate-*r*N/A
pow-expN/A
sqr-powN/A
pow-prod-downN/A
*-commutativeN/A
associate-/l*N/A
pow-unpowN/A
pow-lowering-pow.f64N/A
pow-lowering-pow.f64N/A
prod-expN/A
exp-lowering-exp.f64N/A
+-lowering-+.f64N/A
div-invN/A
metadata-evalN/A
*-lowering-*.f6497.6
Applied egg-rr97.6%
(FPCore (x) :precision binary64 (* (cos x) (pow (pow (exp (+ x x)) x) 5.0)))
double code(double x) {
return cos(x) * pow(pow(exp((x + x)), x), 5.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * ((exp((x + x)) ** x) ** 5.0d0)
end function
public static double code(double x) {
return Math.cos(x) * Math.pow(Math.pow(Math.exp((x + x)), x), 5.0);
}
def code(x): return math.cos(x) * math.pow(math.pow(math.exp((x + x)), x), 5.0)
function code(x) return Float64(cos(x) * ((exp(Float64(x + x)) ^ x) ^ 5.0)) end
function tmp = code(x) tmp = cos(x) * ((exp((x + x)) ^ x) ^ 5.0); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Power[N[Power[N[Exp[N[(x + x), $MachinePrecision]], $MachinePrecision], x], $MachinePrecision], 5.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot {\left({\left(e^{x + x}\right)}^{x}\right)}^{5}
\end{array}
Initial program 94.5%
exp-prodN/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
+-inversesN/A
metadata-evalN/A
associate-/l/N/A
+-inversesN/A
+-inversesN/A
flip-+N/A
count-2N/A
*-commutativeN/A
associate-*l/N/A
associate-/l*N/A
metadata-evalN/A
*-rgt-identityN/A
frac-2negN/A
distribute-frac-negN/A
pow-negN/A
Applied egg-rr95.2%
pow-flipN/A
pow-expN/A
distribute-rgt-neg-outN/A
remove-double-negN/A
*-commutativeN/A
associate-*r*N/A
pow-expN/A
sqr-powN/A
pow-prod-downN/A
associate-/l*N/A
pow-unpowN/A
pow-lowering-pow.f64N/A
pow-lowering-pow.f64N/A
prod-expN/A
exp-lowering-exp.f64N/A
+-lowering-+.f64N/A
metadata-eval97.6
Applied egg-rr97.6%
(FPCore (x) :precision binary64 (/ (* (cos x) (exp (* -0.5 (* x (* x -10.0))))) (exp (* 10.0 (* -0.5 (* x x))))))
double code(double x) {
return (cos(x) * exp((-0.5 * (x * (x * -10.0))))) / exp((10.0 * (-0.5 * (x * x))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (cos(x) * exp(((-0.5d0) * (x * (x * (-10.0d0)))))) / exp((10.0d0 * ((-0.5d0) * (x * x))))
end function
public static double code(double x) {
return (Math.cos(x) * Math.exp((-0.5 * (x * (x * -10.0))))) / Math.exp((10.0 * (-0.5 * (x * x))));
}
def code(x): return (math.cos(x) * math.exp((-0.5 * (x * (x * -10.0))))) / math.exp((10.0 * (-0.5 * (x * x))))
function code(x) return Float64(Float64(cos(x) * exp(Float64(-0.5 * Float64(x * Float64(x * -10.0))))) / exp(Float64(10.0 * Float64(-0.5 * Float64(x * x))))) end
function tmp = code(x) tmp = (cos(x) * exp((-0.5 * (x * (x * -10.0))))) / exp((10.0 * (-0.5 * (x * x)))); end
code[x_] := N[(N[(N[Cos[x], $MachinePrecision] * N[Exp[N[(-0.5 * N[(x * N[(x * -10.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[Exp[N[(10.0 * N[(-0.5 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\cos x \cdot e^{-0.5 \cdot \left(x \cdot \left(x \cdot -10\right)\right)}}{e^{10 \cdot \left(-0.5 \cdot \left(x \cdot x\right)\right)}}
\end{array}
Initial program 94.5%
exp-prodN/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
+-inversesN/A
metadata-evalN/A
associate-/l/N/A
+-inversesN/A
+-inversesN/A
flip-+N/A
count-2N/A
*-commutativeN/A
associate-*l/N/A
associate-/l*N/A
metadata-evalN/A
*-rgt-identityN/A
frac-2negN/A
distribute-frac-negN/A
pow-negN/A
Applied egg-rr95.2%
*-commutativeN/A
sqr-powN/A
associate-/r*N/A
associate-*l/N/A
/-lowering-/.f64N/A
Applied egg-rr95.3%
Final simplification95.3%
(FPCore (x) :precision binary64 (* (cos x) (/ (exp (* -0.5 (* x (* x -10.0)))) (exp (* (* x x) -5.0)))))
double code(double x) {
return cos(x) * (exp((-0.5 * (x * (x * -10.0)))) / exp(((x * x) * -5.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * (exp(((-0.5d0) * (x * (x * (-10.0d0))))) / exp(((x * x) * (-5.0d0))))
end function
public static double code(double x) {
return Math.cos(x) * (Math.exp((-0.5 * (x * (x * -10.0)))) / Math.exp(((x * x) * -5.0)));
}
def code(x): return math.cos(x) * (math.exp((-0.5 * (x * (x * -10.0)))) / math.exp(((x * x) * -5.0)))
function code(x) return Float64(cos(x) * Float64(exp(Float64(-0.5 * Float64(x * Float64(x * -10.0)))) / exp(Float64(Float64(x * x) * -5.0)))) end
function tmp = code(x) tmp = cos(x) * (exp((-0.5 * (x * (x * -10.0)))) / exp(((x * x) * -5.0))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[(N[Exp[N[(-0.5 * N[(x * N[(x * -10.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[Exp[N[(N[(x * x), $MachinePrecision] * -5.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot \frac{e^{-0.5 \cdot \left(x \cdot \left(x \cdot -10\right)\right)}}{e^{\left(x \cdot x\right) \cdot -5}}
\end{array}
Initial program 94.5%
exp-prodN/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
+-inversesN/A
metadata-evalN/A
associate-/l/N/A
+-inversesN/A
+-inversesN/A
flip-+N/A
count-2N/A
*-commutativeN/A
associate-*l/N/A
associate-/l*N/A
metadata-evalN/A
*-rgt-identityN/A
frac-2negN/A
distribute-frac-negN/A
pow-negN/A
Applied egg-rr95.2%
sqr-powN/A
associate-/r*N/A
/-lowering-/.f64N/A
Applied egg-rr95.2%
associate-*r*N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
metadata-eval95.2
Applied egg-rr95.2%
Final simplification95.2%
(FPCore (x) :precision binary64 (* (cos x) (/ 1.0 (pow (exp (* x (- x))) 10.0))))
double code(double x) {
return cos(x) * (1.0 / pow(exp((x * -x)), 10.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * (1.0d0 / (exp((x * -x)) ** 10.0d0))
end function
public static double code(double x) {
return Math.cos(x) * (1.0 / Math.pow(Math.exp((x * -x)), 10.0));
}
def code(x): return math.cos(x) * (1.0 / math.pow(math.exp((x * -x)), 10.0))
function code(x) return Float64(cos(x) * Float64(1.0 / (exp(Float64(x * Float64(-x))) ^ 10.0))) end
function tmp = code(x) tmp = cos(x) * (1.0 / (exp((x * -x)) ^ 10.0)); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[(1.0 / N[Power[N[Exp[N[(x * (-x)), $MachinePrecision]], $MachinePrecision], 10.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot \frac{1}{{\left(e^{x \cdot \left(-x\right)}\right)}^{10}}
\end{array}
Initial program 94.5%
exp-prodN/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
+-inversesN/A
metadata-evalN/A
associate-/l/N/A
+-inversesN/A
+-inversesN/A
flip-+N/A
count-2N/A
*-commutativeN/A
associate-*l/N/A
associate-/l*N/A
metadata-evalN/A
*-rgt-identityN/A
frac-2negN/A
distribute-frac-negN/A
pow-negN/A
Applied egg-rr95.2%
pow-expN/A
*-commutativeN/A
exp-prodN/A
pow-lowering-pow.f64N/A
exp-lowering-exp.f64N/A
*-lowering-*.f64N/A
neg-lowering-neg.f6495.2
Applied egg-rr95.2%
(FPCore (x) :precision binary64 (* (cos x) (pow (exp 10.0) (* x x))))
double code(double x) {
return cos(x) * pow(exp(10.0), (x * x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * (exp(10.0d0) ** (x * x))
end function
public static double code(double x) {
return Math.cos(x) * Math.pow(Math.exp(10.0), (x * x));
}
def code(x): return math.cos(x) * math.pow(math.exp(10.0), (x * x))
function code(x) return Float64(cos(x) * (exp(10.0) ^ Float64(x * x))) end
function tmp = code(x) tmp = cos(x) * (exp(10.0) ^ (x * x)); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Power[N[Exp[10.0], $MachinePrecision], N[(x * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot {\left(e^{10}\right)}^{\left(x \cdot x\right)}
\end{array}
Initial program 94.5%
exp-prodN/A
pow-lowering-pow.f64N/A
exp-lowering-exp.f64N/A
*-lowering-*.f6495.2
Applied egg-rr95.2%
(FPCore (x) :precision binary64 (* (cos x) (/ 1.0 (exp (* -10.0 (* x x))))))
double code(double x) {
return cos(x) * (1.0 / exp((-10.0 * (x * x))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * (1.0d0 / exp(((-10.0d0) * (x * x))))
end function
public static double code(double x) {
return Math.cos(x) * (1.0 / Math.exp((-10.0 * (x * x))));
}
def code(x): return math.cos(x) * (1.0 / math.exp((-10.0 * (x * x))))
function code(x) return Float64(cos(x) * Float64(1.0 / exp(Float64(-10.0 * Float64(x * x))))) end
function tmp = code(x) tmp = cos(x) * (1.0 / exp((-10.0 * (x * x)))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[(1.0 / N[Exp[N[(-10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot \frac{1}{e^{-10 \cdot \left(x \cdot x\right)}}
\end{array}
Initial program 94.5%
exp-prodN/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
+-inversesN/A
metadata-evalN/A
associate-/l/N/A
+-inversesN/A
+-inversesN/A
flip-+N/A
count-2N/A
*-commutativeN/A
associate-*l/N/A
associate-/l*N/A
metadata-evalN/A
*-rgt-identityN/A
frac-2negN/A
distribute-frac-negN/A
pow-negN/A
Applied egg-rr95.2%
Taylor expanded in x around inf
exp-lowering-exp.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6494.5
Simplified94.5%
Final simplification94.5%
(FPCore (x) :precision binary64 (/ (cos x) (exp (* -10.0 (* x x)))))
double code(double x) {
return cos(x) / exp((-10.0 * (x * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) / exp(((-10.0d0) * (x * x)))
end function
public static double code(double x) {
return Math.cos(x) / Math.exp((-10.0 * (x * x)));
}
def code(x): return math.cos(x) / math.exp((-10.0 * (x * x)))
function code(x) return Float64(cos(x) / exp(Float64(-10.0 * Float64(x * x)))) end
function tmp = code(x) tmp = cos(x) / exp((-10.0 * (x * x))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] / N[Exp[N[(-10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\cos x}{e^{-10 \cdot \left(x \cdot x\right)}}
\end{array}
Initial program 94.5%
exp-prodN/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
+-inversesN/A
metadata-evalN/A
associate-/l/N/A
+-inversesN/A
+-inversesN/A
flip-+N/A
count-2N/A
*-commutativeN/A
associate-*l/N/A
associate-/l*N/A
metadata-evalN/A
*-rgt-identityN/A
frac-2negN/A
distribute-frac-negN/A
pow-negN/A
Applied egg-rr95.2%
sqr-powN/A
pow-prod-downN/A
associate-/l*N/A
pow-unpowN/A
neg-mul-1N/A
*-commutativeN/A
associate-/l*N/A
metadata-evalN/A
pow-lowering-pow.f64N/A
pow-lowering-pow.f64N/A
prod-expN/A
exp-lowering-exp.f64N/A
metadata-evalN/A
*-lowering-*.f6499.2
Applied egg-rr99.2%
Taylor expanded in x around inf
/-lowering-/.f64N/A
cos-lowering-cos.f64N/A
associate-*r*N/A
exp-prodN/A
exp-prodN/A
rem-log-expN/A
exp-prodN/A
associate-*r*N/A
associate-*r*N/A
*-commutativeN/A
associate-*r*N/A
metadata-evalN/A
associate-*r*N/A
unpow2N/A
exp-lowering-exp.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6494.5
Simplified94.5%
Final simplification94.5%
(FPCore (x) :precision binary64 (* (cos x) (exp (* 10.0 (* x x)))))
double code(double x) {
return cos(x) * exp((10.0 * (x * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * exp((10.0d0 * (x * x)))
end function
public static double code(double x) {
return Math.cos(x) * Math.exp((10.0 * (x * x)));
}
def code(x): return math.cos(x) * math.exp((10.0 * (x * x)))
function code(x) return Float64(cos(x) * exp(Float64(10.0 * Float64(x * x)))) end
function tmp = code(x) tmp = cos(x) * exp((10.0 * (x * x))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot e^{10 \cdot \left(x \cdot x\right)}
\end{array}
Initial program 94.5%
(FPCore (x) :precision binary64 (* (exp (* 10.0 (* x x))) (fma (* x x) (fma (* x x) (fma (* x x) -0.001388888888888889 0.041666666666666664) -0.5) 1.0)))
double code(double x) {
return exp((10.0 * (x * x))) * fma((x * x), fma((x * x), fma((x * x), -0.001388888888888889, 0.041666666666666664), -0.5), 1.0);
}
function code(x) return Float64(exp(Float64(10.0 * Float64(x * x))) * fma(Float64(x * x), fma(Float64(x * x), fma(Float64(x * x), -0.001388888888888889, 0.041666666666666664), -0.5), 1.0)) end
code[x_] := N[(N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * -0.001388888888888889 + 0.041666666666666664), $MachinePrecision] + -0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{10 \cdot \left(x \cdot x\right)} \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right)
\end{array}
Initial program 94.5%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6427.5
Simplified27.5%
Final simplification27.5%
(FPCore (x)
:precision binary64
(*
(exp (* x (* x 10.0)))
(fma
x
(*
x
(fma
(* x x)
(fma (* x x) -0.001388888888888889 0.041666666666666664)
-0.5))
1.0)))
double code(double x) {
return exp((x * (x * 10.0))) * fma(x, (x * fma((x * x), fma((x * x), -0.001388888888888889, 0.041666666666666664), -0.5)), 1.0);
}
function code(x) return Float64(exp(Float64(x * Float64(x * 10.0))) * fma(x, Float64(x * fma(Float64(x * x), fma(Float64(x * x), -0.001388888888888889, 0.041666666666666664), -0.5)), 1.0)) end
code[x_] := N[(N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * -0.001388888888888889 + 0.041666666666666664), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{x \cdot \left(x \cdot 10\right)} \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right)
\end{array}
Initial program 94.5%
Taylor expanded in x around inf
*-commutativeN/A
*-lft-identityN/A
*-lowering-*.f64N/A
exp-lowering-exp.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lft-identityN/A
cos-lowering-cos.f6494.3
Simplified94.3%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6427.5
Simplified27.5%
(FPCore (x) :precision binary64 (* (exp (* x (* x 10.0))) (fma (* x x) (fma (* x x) 0.041666666666666664 -0.5) 1.0)))
double code(double x) {
return exp((x * (x * 10.0))) * fma((x * x), fma((x * x), 0.041666666666666664, -0.5), 1.0);
}
function code(x) return Float64(exp(Float64(x * Float64(x * 10.0))) * fma(Float64(x * x), fma(Float64(x * x), 0.041666666666666664, -0.5), 1.0)) end
code[x_] := N[(N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.041666666666666664 + -0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{x \cdot \left(x \cdot 10\right)} \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.041666666666666664, -0.5\right), 1\right)
\end{array}
Initial program 94.5%
Taylor expanded in x around inf
*-commutativeN/A
*-lft-identityN/A
*-lowering-*.f64N/A
exp-lowering-exp.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lft-identityN/A
cos-lowering-cos.f6494.3
Simplified94.3%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6421.3
Simplified21.3%
(FPCore (x) :precision binary64 (* (exp (* x (* x 10.0))) (fma x (* x -0.5) 1.0)))
double code(double x) {
return exp((x * (x * 10.0))) * fma(x, (x * -0.5), 1.0);
}
function code(x) return Float64(exp(Float64(x * Float64(x * 10.0))) * fma(x, Float64(x * -0.5), 1.0)) end
code[x_] := N[(N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(x * N[(x * -0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{x \cdot \left(x \cdot 10\right)} \cdot \mathsf{fma}\left(x, x \cdot -0.5, 1\right)
\end{array}
Initial program 94.5%
Taylor expanded in x around inf
*-commutativeN/A
*-lft-identityN/A
*-lowering-*.f64N/A
exp-lowering-exp.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lft-identityN/A
cos-lowering-cos.f6494.3
Simplified94.3%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6418.2
Simplified18.2%
(FPCore (x) :precision binary64 (* (fma (* x x) (fma (* x x) 0.041666666666666664 -0.5) 1.0) (fma (* x x) (fma (* x x) (fma x (* x 166.66666666666666) 50.0) 10.0) 1.0)))
double code(double x) {
return fma((x * x), fma((x * x), 0.041666666666666664, -0.5), 1.0) * fma((x * x), fma((x * x), fma(x, (x * 166.66666666666666), 50.0), 10.0), 1.0);
}
function code(x) return Float64(fma(Float64(x * x), fma(Float64(x * x), 0.041666666666666664, -0.5), 1.0) * fma(Float64(x * x), fma(Float64(x * x), fma(x, Float64(x * 166.66666666666666), 50.0), 10.0), 1.0)) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.041666666666666664 + -0.5), $MachinePrecision] + 1.0), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * 166.66666666666666), $MachinePrecision] + 50.0), $MachinePrecision] + 10.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.041666666666666664, -0.5\right), 1\right) \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot 166.66666666666666, 50\right), 10\right), 1\right)
\end{array}
Initial program 94.5%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6421.3
Simplified21.3%
Taylor expanded in x around 0
Simplified10.2%
(FPCore (x) :precision binary64 (* (fma (* x x) (fma (* x x) 0.041666666666666664 -0.5) 1.0) (fma (* x x) (fma x (* x 50.0) 10.0) 1.0)))
double code(double x) {
return fma((x * x), fma((x * x), 0.041666666666666664, -0.5), 1.0) * fma((x * x), fma(x, (x * 50.0), 10.0), 1.0);
}
function code(x) return Float64(fma(Float64(x * x), fma(Float64(x * x), 0.041666666666666664, -0.5), 1.0) * fma(Float64(x * x), fma(x, Float64(x * 50.0), 10.0), 1.0)) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.041666666666666664 + -0.5), $MachinePrecision] + 1.0), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * 50.0), $MachinePrecision] + 10.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.041666666666666664, -0.5\right), 1\right) \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot 50, 10\right), 1\right)
\end{array}
Initial program 94.5%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6421.3
Simplified21.3%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6410.0
Simplified10.0%
(FPCore (x) :precision binary64 (* (fma (* x x) (fma (* x x) 0.041666666666666664 -0.5) 1.0) (fma x (* x 10.0) 1.0)))
double code(double x) {
return fma((x * x), fma((x * x), 0.041666666666666664, -0.5), 1.0) * fma(x, (x * 10.0), 1.0);
}
function code(x) return Float64(fma(Float64(x * x), fma(Float64(x * x), 0.041666666666666664, -0.5), 1.0) * fma(x, Float64(x * 10.0), 1.0)) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.041666666666666664 + -0.5), $MachinePrecision] + 1.0), $MachinePrecision] * N[(x * N[(x * 10.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.041666666666666664, -0.5\right), 1\right) \cdot \mathsf{fma}\left(x, x \cdot 10, 1\right)
\end{array}
Initial program 94.5%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6421.3
Simplified21.3%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f649.8
Simplified9.8%
(FPCore (x) :precision binary64 (* -0.5 (* x x)))
double code(double x) {
return -0.5 * (x * x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (-0.5d0) * (x * x)
end function
public static double code(double x) {
return -0.5 * (x * x);
}
def code(x): return -0.5 * (x * x)
function code(x) return Float64(-0.5 * Float64(x * x)) end
function tmp = code(x) tmp = -0.5 * (x * x); end
code[x_] := N[(-0.5 * N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-0.5 \cdot \left(x \cdot x\right)
\end{array}
Initial program 94.5%
Taylor expanded in x around 0
Simplified9.6%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f649.7
Simplified9.7%
Taylor expanded in x around inf
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f649.7
Simplified9.7%
Final simplification9.7%
(FPCore (x) :precision binary64 1.0)
double code(double x) {
return 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double x) {
return 1.0;
}
def code(x): return 1.0
function code(x) return 1.0 end
function tmp = code(x) tmp = 1.0; end
code[x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 94.5%
Taylor expanded in x around 0
Simplified1.5%
herbie shell --seed 2024204
(FPCore (x)
:name "ENA, Section 1.4, Exercise 1"
:precision binary64
:pre (and (<= 1.99 x) (<= x 2.01))
(* (cos x) (exp (* 10.0 (* x x)))))