
(FPCore (x) :precision binary64 (* (cos x) (exp (* 10.0 (* x x)))))
double code(double x) {
return cos(x) * exp((10.0 * (x * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * exp((10.0d0 * (x * x)))
end function
public static double code(double x) {
return Math.cos(x) * Math.exp((10.0 * (x * x)));
}
def code(x): return math.cos(x) * math.exp((10.0 * (x * x)))
function code(x) return Float64(cos(x) * exp(Float64(10.0 * Float64(x * x)))) end
function tmp = code(x) tmp = cos(x) * exp((10.0 * (x * x))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot e^{10 \cdot \left(x \cdot x\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 16 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (* (cos x) (exp (* 10.0 (* x x)))))
double code(double x) {
return cos(x) * exp((10.0 * (x * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * exp((10.0d0 * (x * x)))
end function
public static double code(double x) {
return Math.cos(x) * Math.exp((10.0 * (x * x)));
}
def code(x): return math.cos(x) * math.exp((10.0 * (x * x)))
function code(x) return Float64(cos(x) * exp(Float64(10.0 * Float64(x * x)))) end
function tmp = code(x) tmp = cos(x) * exp((10.0 * (x * x))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot e^{10 \cdot \left(x \cdot x\right)}
\end{array}
(FPCore (x) :precision binary64 (* (cos x) (exp (* 10.0 (* x x)))))
double code(double x) {
return cos(x) * exp((10.0 * (x * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * exp((10.0d0 * (x * x)))
end function
public static double code(double x) {
return Math.cos(x) * Math.exp((10.0 * (x * x)));
}
def code(x): return math.cos(x) * math.exp((10.0 * (x * x)))
function code(x) return Float64(cos(x) * exp(Float64(10.0 * Float64(x * x)))) end
function tmp = code(x) tmp = cos(x) * exp((10.0 * (x * x))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot e^{10 \cdot \left(x \cdot x\right)}
\end{array}
Initial program 94.2%
(FPCore (x)
:precision binary64
(let* ((t_0 (* x (* x x)))
(t_1 (+ 0.003125 (* x (* x -0.00011574074074074075))))
(t_2 (* x t_1)))
(/
(*
(+
1.0
(*
(* x t_0)
(-
(*
(* x x)
(/
(- (* t_1 (* t_0 t_2)) 0.001736111111111111)
(- -0.041666666666666664 (* x t_2))))
0.25)))
(exp (* x (* x 10.0))))
(-
1.0
(*
x
(*
x
(+
-0.5
(*
(* x x)
(+ 0.041666666666666664 (* (* x x) -0.001388888888888889))))))))))
double code(double x) {
double t_0 = x * (x * x);
double t_1 = 0.003125 + (x * (x * -0.00011574074074074075));
double t_2 = x * t_1;
return ((1.0 + ((x * t_0) * (((x * x) * (((t_1 * (t_0 * t_2)) - 0.001736111111111111) / (-0.041666666666666664 - (x * t_2)))) - 0.25))) * exp((x * (x * 10.0)))) / (1.0 - (x * (x * (-0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * -0.001388888888888889)))))));
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
real(8) :: t_2
t_0 = x * (x * x)
t_1 = 0.003125d0 + (x * (x * (-0.00011574074074074075d0)))
t_2 = x * t_1
code = ((1.0d0 + ((x * t_0) * (((x * x) * (((t_1 * (t_0 * t_2)) - 0.001736111111111111d0) / ((-0.041666666666666664d0) - (x * t_2)))) - 0.25d0))) * exp((x * (x * 10.0d0)))) / (1.0d0 - (x * (x * ((-0.5d0) + ((x * x) * (0.041666666666666664d0 + ((x * x) * (-0.001388888888888889d0))))))))
end function
public static double code(double x) {
double t_0 = x * (x * x);
double t_1 = 0.003125 + (x * (x * -0.00011574074074074075));
double t_2 = x * t_1;
return ((1.0 + ((x * t_0) * (((x * x) * (((t_1 * (t_0 * t_2)) - 0.001736111111111111) / (-0.041666666666666664 - (x * t_2)))) - 0.25))) * Math.exp((x * (x * 10.0)))) / (1.0 - (x * (x * (-0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * -0.001388888888888889)))))));
}
def code(x): t_0 = x * (x * x) t_1 = 0.003125 + (x * (x * -0.00011574074074074075)) t_2 = x * t_1 return ((1.0 + ((x * t_0) * (((x * x) * (((t_1 * (t_0 * t_2)) - 0.001736111111111111) / (-0.041666666666666664 - (x * t_2)))) - 0.25))) * math.exp((x * (x * 10.0)))) / (1.0 - (x * (x * (-0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * -0.001388888888888889)))))))
function code(x) t_0 = Float64(x * Float64(x * x)) t_1 = Float64(0.003125 + Float64(x * Float64(x * -0.00011574074074074075))) t_2 = Float64(x * t_1) return Float64(Float64(Float64(1.0 + Float64(Float64(x * t_0) * Float64(Float64(Float64(x * x) * Float64(Float64(Float64(t_1 * Float64(t_0 * t_2)) - 0.001736111111111111) / Float64(-0.041666666666666664 - Float64(x * t_2)))) - 0.25))) * exp(Float64(x * Float64(x * 10.0)))) / Float64(1.0 - Float64(x * Float64(x * Float64(-0.5 + Float64(Float64(x * x) * Float64(0.041666666666666664 + Float64(Float64(x * x) * -0.001388888888888889)))))))) end
function tmp = code(x) t_0 = x * (x * x); t_1 = 0.003125 + (x * (x * -0.00011574074074074075)); t_2 = x * t_1; tmp = ((1.0 + ((x * t_0) * (((x * x) * (((t_1 * (t_0 * t_2)) - 0.001736111111111111) / (-0.041666666666666664 - (x * t_2)))) - 0.25))) * exp((x * (x * 10.0)))) / (1.0 - (x * (x * (-0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * -0.001388888888888889))))))); end
code[x_] := Block[{t$95$0 = N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(0.003125 + N[(x * N[(x * -0.00011574074074074075), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(x * t$95$1), $MachinePrecision]}, N[(N[(N[(1.0 + N[(N[(x * t$95$0), $MachinePrecision] * N[(N[(N[(x * x), $MachinePrecision] * N[(N[(N[(t$95$1 * N[(t$95$0 * t$95$2), $MachinePrecision]), $MachinePrecision] - 0.001736111111111111), $MachinePrecision] / N[(-0.041666666666666664 - N[(x * t$95$2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[(x * N[(x * N[(-0.5 + N[(N[(x * x), $MachinePrecision] * N[(0.041666666666666664 + N[(N[(x * x), $MachinePrecision] * -0.001388888888888889), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot \left(x \cdot x\right)\\
t_1 := 0.003125 + x \cdot \left(x \cdot -0.00011574074074074075\right)\\
t_2 := x \cdot t\_1\\
\frac{\left(1 + \left(x \cdot t\_0\right) \cdot \left(\left(x \cdot x\right) \cdot \frac{t\_1 \cdot \left(t\_0 \cdot t\_2\right) - 0.001736111111111111}{-0.041666666666666664 - x \cdot t\_2} - 0.25\right)\right) \cdot e^{x \cdot \left(x \cdot 10\right)}}{1 - x \cdot \left(x \cdot \left(-0.5 + \left(x \cdot x\right) \cdot \left(0.041666666666666664 + \left(x \cdot x\right) \cdot -0.001388888888888889\right)\right)\right)}
\end{array}
\end{array}
Initial program 94.2%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
remove-double-negN/A
sub-negN/A
--lowering--.f64N/A
*-commutativeN/A
Simplified27.6%
flip-+N/A
associate-*l/N/A
/-lowering-/.f64N/A
Applied egg-rr27.6%
Taylor expanded in x around 0
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
Simplified29.3%
+-commutativeN/A
flip-+N/A
/-lowering-/.f64N/A
Applied egg-rr29.3%
Final simplification29.3%
(FPCore (x)
:precision binary64
(let* ((t_0 (* x (* x (* x x)))))
(/
(*
(exp (* x (* x 10.0)))
(+
1.0
(*
t_0
(-
(*
(* x x)
(-
(*
x
(*
x
(*
(- 9.765625e-6 (* t_0 1.3395919067215363e-8))
(/ -1.0 (+ 0.003125 (* (* x x) 0.00011574074074074075))))))
-0.041666666666666664))
0.25))))
(-
1.0
(*
x
(*
x
(+
-0.5
(*
(* x x)
(+ 0.041666666666666664 (* (* x x) -0.001388888888888889))))))))))
double code(double x) {
double t_0 = x * (x * (x * x));
return (exp((x * (x * 10.0))) * (1.0 + (t_0 * (((x * x) * ((x * (x * ((9.765625e-6 - (t_0 * 1.3395919067215363e-8)) * (-1.0 / (0.003125 + ((x * x) * 0.00011574074074074075)))))) - -0.041666666666666664)) - 0.25)))) / (1.0 - (x * (x * (-0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * -0.001388888888888889)))))));
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = x * (x * (x * x))
code = (exp((x * (x * 10.0d0))) * (1.0d0 + (t_0 * (((x * x) * ((x * (x * ((9.765625d-6 - (t_0 * 1.3395919067215363d-8)) * ((-1.0d0) / (0.003125d0 + ((x * x) * 0.00011574074074074075d0)))))) - (-0.041666666666666664d0))) - 0.25d0)))) / (1.0d0 - (x * (x * ((-0.5d0) + ((x * x) * (0.041666666666666664d0 + ((x * x) * (-0.001388888888888889d0))))))))
end function
public static double code(double x) {
double t_0 = x * (x * (x * x));
return (Math.exp((x * (x * 10.0))) * (1.0 + (t_0 * (((x * x) * ((x * (x * ((9.765625e-6 - (t_0 * 1.3395919067215363e-8)) * (-1.0 / (0.003125 + ((x * x) * 0.00011574074074074075)))))) - -0.041666666666666664)) - 0.25)))) / (1.0 - (x * (x * (-0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * -0.001388888888888889)))))));
}
def code(x): t_0 = x * (x * (x * x)) return (math.exp((x * (x * 10.0))) * (1.0 + (t_0 * (((x * x) * ((x * (x * ((9.765625e-6 - (t_0 * 1.3395919067215363e-8)) * (-1.0 / (0.003125 + ((x * x) * 0.00011574074074074075)))))) - -0.041666666666666664)) - 0.25)))) / (1.0 - (x * (x * (-0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * -0.001388888888888889)))))))
function code(x) t_0 = Float64(x * Float64(x * Float64(x * x))) return Float64(Float64(exp(Float64(x * Float64(x * 10.0))) * Float64(1.0 + Float64(t_0 * Float64(Float64(Float64(x * x) * Float64(Float64(x * Float64(x * Float64(Float64(9.765625e-6 - Float64(t_0 * 1.3395919067215363e-8)) * Float64(-1.0 / Float64(0.003125 + Float64(Float64(x * x) * 0.00011574074074074075)))))) - -0.041666666666666664)) - 0.25)))) / Float64(1.0 - Float64(x * Float64(x * Float64(-0.5 + Float64(Float64(x * x) * Float64(0.041666666666666664 + Float64(Float64(x * x) * -0.001388888888888889)))))))) end
function tmp = code(x) t_0 = x * (x * (x * x)); tmp = (exp((x * (x * 10.0))) * (1.0 + (t_0 * (((x * x) * ((x * (x * ((9.765625e-6 - (t_0 * 1.3395919067215363e-8)) * (-1.0 / (0.003125 + ((x * x) * 0.00011574074074074075)))))) - -0.041666666666666664)) - 0.25)))) / (1.0 - (x * (x * (-0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * -0.001388888888888889))))))); end
code[x_] := Block[{t$95$0 = N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(1.0 + N[(t$95$0 * N[(N[(N[(x * x), $MachinePrecision] * N[(N[(x * N[(x * N[(N[(9.765625e-6 - N[(t$95$0 * 1.3395919067215363e-8), $MachinePrecision]), $MachinePrecision] * N[(-1.0 / N[(0.003125 + N[(N[(x * x), $MachinePrecision] * 0.00011574074074074075), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - -0.041666666666666664), $MachinePrecision]), $MachinePrecision] - 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[(x * N[(x * N[(-0.5 + N[(N[(x * x), $MachinePrecision] * N[(0.041666666666666664 + N[(N[(x * x), $MachinePrecision] * -0.001388888888888889), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot \left(x \cdot \left(x \cdot x\right)\right)\\
\frac{e^{x \cdot \left(x \cdot 10\right)} \cdot \left(1 + t\_0 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot \left(x \cdot \left(\left(9.765625 \cdot 10^{-6} - t\_0 \cdot 1.3395919067215363 \cdot 10^{-8}\right) \cdot \frac{-1}{0.003125 + \left(x \cdot x\right) \cdot 0.00011574074074074075}\right)\right) - -0.041666666666666664\right) - 0.25\right)\right)}{1 - x \cdot \left(x \cdot \left(-0.5 + \left(x \cdot x\right) \cdot \left(0.041666666666666664 + \left(x \cdot x\right) \cdot -0.001388888888888889\right)\right)\right)}
\end{array}
\end{array}
Initial program 94.2%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
remove-double-negN/A
sub-negN/A
--lowering--.f64N/A
*-commutativeN/A
Simplified27.6%
flip-+N/A
associate-*l/N/A
/-lowering-/.f64N/A
Applied egg-rr27.6%
Taylor expanded in x around 0
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
Simplified29.3%
flip-+N/A
div-invN/A
*-lowering-*.f64N/A
--lowering--.f64N/A
metadata-evalN/A
swap-sqrN/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
/-lowering-/.f64N/A
*-commutativeN/A
cancel-sign-sub-invN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
*-lowering-*.f6429.3%
Applied egg-rr29.3%
Final simplification29.3%
(FPCore (x)
:precision binary64
(let* ((t_0 (* x (* x (* x x)))))
(/
(*
(exp (* x (* x 10.0)))
(+
1.0
(*
t_0
(-
(*
(* x x)
(-
(*
x
(*
x
(/
(- (* t_0 1.3395919067215363e-8) 9.765625e-6)
(+ 0.003125 (* (* x x) 0.00011574074074074075)))))
-0.041666666666666664))
0.25))))
(-
1.0
(*
x
(*
x
(+
-0.5
(*
(* x x)
(+ 0.041666666666666664 (* (* x x) -0.001388888888888889))))))))))
double code(double x) {
double t_0 = x * (x * (x * x));
return (exp((x * (x * 10.0))) * (1.0 + (t_0 * (((x * x) * ((x * (x * (((t_0 * 1.3395919067215363e-8) - 9.765625e-6) / (0.003125 + ((x * x) * 0.00011574074074074075))))) - -0.041666666666666664)) - 0.25)))) / (1.0 - (x * (x * (-0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * -0.001388888888888889)))))));
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = x * (x * (x * x))
code = (exp((x * (x * 10.0d0))) * (1.0d0 + (t_0 * (((x * x) * ((x * (x * (((t_0 * 1.3395919067215363d-8) - 9.765625d-6) / (0.003125d0 + ((x * x) * 0.00011574074074074075d0))))) - (-0.041666666666666664d0))) - 0.25d0)))) / (1.0d0 - (x * (x * ((-0.5d0) + ((x * x) * (0.041666666666666664d0 + ((x * x) * (-0.001388888888888889d0))))))))
end function
public static double code(double x) {
double t_0 = x * (x * (x * x));
return (Math.exp((x * (x * 10.0))) * (1.0 + (t_0 * (((x * x) * ((x * (x * (((t_0 * 1.3395919067215363e-8) - 9.765625e-6) / (0.003125 + ((x * x) * 0.00011574074074074075))))) - -0.041666666666666664)) - 0.25)))) / (1.0 - (x * (x * (-0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * -0.001388888888888889)))))));
}
def code(x): t_0 = x * (x * (x * x)) return (math.exp((x * (x * 10.0))) * (1.0 + (t_0 * (((x * x) * ((x * (x * (((t_0 * 1.3395919067215363e-8) - 9.765625e-6) / (0.003125 + ((x * x) * 0.00011574074074074075))))) - -0.041666666666666664)) - 0.25)))) / (1.0 - (x * (x * (-0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * -0.001388888888888889)))))))
function code(x) t_0 = Float64(x * Float64(x * Float64(x * x))) return Float64(Float64(exp(Float64(x * Float64(x * 10.0))) * Float64(1.0 + Float64(t_0 * Float64(Float64(Float64(x * x) * Float64(Float64(x * Float64(x * Float64(Float64(Float64(t_0 * 1.3395919067215363e-8) - 9.765625e-6) / Float64(0.003125 + Float64(Float64(x * x) * 0.00011574074074074075))))) - -0.041666666666666664)) - 0.25)))) / Float64(1.0 - Float64(x * Float64(x * Float64(-0.5 + Float64(Float64(x * x) * Float64(0.041666666666666664 + Float64(Float64(x * x) * -0.001388888888888889)))))))) end
function tmp = code(x) t_0 = x * (x * (x * x)); tmp = (exp((x * (x * 10.0))) * (1.0 + (t_0 * (((x * x) * ((x * (x * (((t_0 * 1.3395919067215363e-8) - 9.765625e-6) / (0.003125 + ((x * x) * 0.00011574074074074075))))) - -0.041666666666666664)) - 0.25)))) / (1.0 - (x * (x * (-0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * -0.001388888888888889))))))); end
code[x_] := Block[{t$95$0 = N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(1.0 + N[(t$95$0 * N[(N[(N[(x * x), $MachinePrecision] * N[(N[(x * N[(x * N[(N[(N[(t$95$0 * 1.3395919067215363e-8), $MachinePrecision] - 9.765625e-6), $MachinePrecision] / N[(0.003125 + N[(N[(x * x), $MachinePrecision] * 0.00011574074074074075), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - -0.041666666666666664), $MachinePrecision]), $MachinePrecision] - 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[(x * N[(x * N[(-0.5 + N[(N[(x * x), $MachinePrecision] * N[(0.041666666666666664 + N[(N[(x * x), $MachinePrecision] * -0.001388888888888889), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot \left(x \cdot \left(x \cdot x\right)\right)\\
\frac{e^{x \cdot \left(x \cdot 10\right)} \cdot \left(1 + t\_0 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot \left(x \cdot \frac{t\_0 \cdot 1.3395919067215363 \cdot 10^{-8} - 9.765625 \cdot 10^{-6}}{0.003125 + \left(x \cdot x\right) \cdot 0.00011574074074074075}\right) - -0.041666666666666664\right) - 0.25\right)\right)}{1 - x \cdot \left(x \cdot \left(-0.5 + \left(x \cdot x\right) \cdot \left(0.041666666666666664 + \left(x \cdot x\right) \cdot -0.001388888888888889\right)\right)\right)}
\end{array}
\end{array}
Initial program 94.2%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
remove-double-negN/A
sub-negN/A
--lowering--.f64N/A
*-commutativeN/A
Simplified27.6%
flip-+N/A
associate-*l/N/A
/-lowering-/.f64N/A
Applied egg-rr27.6%
Taylor expanded in x around 0
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
Simplified29.3%
flip-+N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
metadata-evalN/A
swap-sqrN/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
*-commutativeN/A
cancel-sign-sub-invN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
*-lowering-*.f6429.3%
Applied egg-rr29.3%
Final simplification29.3%
(FPCore (x)
:precision binary64
(*
(exp (* x (* x 10.0)))
(/
(-
1.0
(*
(* x (* x (* x x)))
(+
0.25
(*
(* x x)
(+
(* x (* x (+ 0.003125 (* x (* x -0.00011574074074074075)))))
-0.041666666666666664)))))
(-
1.0
(*
(* x x)
(+
-0.5
(*
x
(* x (+ 0.041666666666666664 (* (* x x) -0.001388888888888889))))))))))
double code(double x) {
return exp((x * (x * 10.0))) * ((1.0 - ((x * (x * (x * x))) * (0.25 + ((x * x) * ((x * (x * (0.003125 + (x * (x * -0.00011574074074074075))))) + -0.041666666666666664))))) / (1.0 - ((x * x) * (-0.5 + (x * (x * (0.041666666666666664 + ((x * x) * -0.001388888888888889))))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp((x * (x * 10.0d0))) * ((1.0d0 - ((x * (x * (x * x))) * (0.25d0 + ((x * x) * ((x * (x * (0.003125d0 + (x * (x * (-0.00011574074074074075d0)))))) + (-0.041666666666666664d0)))))) / (1.0d0 - ((x * x) * ((-0.5d0) + (x * (x * (0.041666666666666664d0 + ((x * x) * (-0.001388888888888889d0)))))))))
end function
public static double code(double x) {
return Math.exp((x * (x * 10.0))) * ((1.0 - ((x * (x * (x * x))) * (0.25 + ((x * x) * ((x * (x * (0.003125 + (x * (x * -0.00011574074074074075))))) + -0.041666666666666664))))) / (1.0 - ((x * x) * (-0.5 + (x * (x * (0.041666666666666664 + ((x * x) * -0.001388888888888889))))))));
}
def code(x): return math.exp((x * (x * 10.0))) * ((1.0 - ((x * (x * (x * x))) * (0.25 + ((x * x) * ((x * (x * (0.003125 + (x * (x * -0.00011574074074074075))))) + -0.041666666666666664))))) / (1.0 - ((x * x) * (-0.5 + (x * (x * (0.041666666666666664 + ((x * x) * -0.001388888888888889))))))))
function code(x) return Float64(exp(Float64(x * Float64(x * 10.0))) * Float64(Float64(1.0 - Float64(Float64(x * Float64(x * Float64(x * x))) * Float64(0.25 + Float64(Float64(x * x) * Float64(Float64(x * Float64(x * Float64(0.003125 + Float64(x * Float64(x * -0.00011574074074074075))))) + -0.041666666666666664))))) / Float64(1.0 - Float64(Float64(x * x) * Float64(-0.5 + Float64(x * Float64(x * Float64(0.041666666666666664 + Float64(Float64(x * x) * -0.001388888888888889))))))))) end
function tmp = code(x) tmp = exp((x * (x * 10.0))) * ((1.0 - ((x * (x * (x * x))) * (0.25 + ((x * x) * ((x * (x * (0.003125 + (x * (x * -0.00011574074074074075))))) + -0.041666666666666664))))) / (1.0 - ((x * x) * (-0.5 + (x * (x * (0.041666666666666664 + ((x * x) * -0.001388888888888889)))))))); end
code[x_] := N[(N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(N[(1.0 - N[(N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(0.25 + N[(N[(x * x), $MachinePrecision] * N[(N[(x * N[(x * N[(0.003125 + N[(x * N[(x * -0.00011574074074074075), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -0.041666666666666664), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[(N[(x * x), $MachinePrecision] * N[(-0.5 + N[(x * N[(x * N[(0.041666666666666664 + N[(N[(x * x), $MachinePrecision] * -0.001388888888888889), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{x \cdot \left(x \cdot 10\right)} \cdot \frac{1 - \left(x \cdot \left(x \cdot \left(x \cdot x\right)\right)\right) \cdot \left(0.25 + \left(x \cdot x\right) \cdot \left(x \cdot \left(x \cdot \left(0.003125 + x \cdot \left(x \cdot -0.00011574074074074075\right)\right)\right) + -0.041666666666666664\right)\right)}{1 - \left(x \cdot x\right) \cdot \left(-0.5 + x \cdot \left(x \cdot \left(0.041666666666666664 + \left(x \cdot x\right) \cdot -0.001388888888888889\right)\right)\right)}
\end{array}
Initial program 94.2%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
remove-double-negN/A
sub-negN/A
--lowering--.f64N/A
*-commutativeN/A
Simplified27.6%
flip-+N/A
associate-*l/N/A
/-lowering-/.f64N/A
Applied egg-rr27.6%
Taylor expanded in x around 0
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
Simplified29.3%
Applied egg-rr29.3%
Final simplification29.3%
(FPCore (x)
:precision binary64
(*
(exp (* x (* x 10.0)))
(+
1.0
(*
x
(*
x
(+
-0.5
(*
x
(*
x
(/
1.0
(/
(+ 0.041666666666666664 (* (* x x) 0.001388888888888889))
0.001736111111111111))))))))))
double code(double x) {
return exp((x * (x * 10.0))) * (1.0 + (x * (x * (-0.5 + (x * (x * (1.0 / ((0.041666666666666664 + ((x * x) * 0.001388888888888889)) / 0.001736111111111111))))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp((x * (x * 10.0d0))) * (1.0d0 + (x * (x * ((-0.5d0) + (x * (x * (1.0d0 / ((0.041666666666666664d0 + ((x * x) * 0.001388888888888889d0)) / 0.001736111111111111d0))))))))
end function
public static double code(double x) {
return Math.exp((x * (x * 10.0))) * (1.0 + (x * (x * (-0.5 + (x * (x * (1.0 / ((0.041666666666666664 + ((x * x) * 0.001388888888888889)) / 0.001736111111111111))))))));
}
def code(x): return math.exp((x * (x * 10.0))) * (1.0 + (x * (x * (-0.5 + (x * (x * (1.0 / ((0.041666666666666664 + ((x * x) * 0.001388888888888889)) / 0.001736111111111111))))))))
function code(x) return Float64(exp(Float64(x * Float64(x * 10.0))) * Float64(1.0 + Float64(x * Float64(x * Float64(-0.5 + Float64(x * Float64(x * Float64(1.0 / Float64(Float64(0.041666666666666664 + Float64(Float64(x * x) * 0.001388888888888889)) / 0.001736111111111111))))))))) end
function tmp = code(x) tmp = exp((x * (x * 10.0))) * (1.0 + (x * (x * (-0.5 + (x * (x * (1.0 / ((0.041666666666666664 + ((x * x) * 0.001388888888888889)) / 0.001736111111111111)))))))); end
code[x_] := N[(N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(1.0 + N[(x * N[(x * N[(-0.5 + N[(x * N[(x * N[(1.0 / N[(N[(0.041666666666666664 + N[(N[(x * x), $MachinePrecision] * 0.001388888888888889), $MachinePrecision]), $MachinePrecision] / 0.001736111111111111), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{x \cdot \left(x \cdot 10\right)} \cdot \left(1 + x \cdot \left(x \cdot \left(-0.5 + x \cdot \left(x \cdot \frac{1}{\frac{0.041666666666666664 + \left(x \cdot x\right) \cdot 0.001388888888888889}{0.001736111111111111}}\right)\right)\right)\right)
\end{array}
Initial program 94.2%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
remove-double-negN/A
sub-negN/A
--lowering--.f64N/A
*-commutativeN/A
Simplified27.6%
flip--N/A
clear-numN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
--lowering--.f64N/A
metadata-evalN/A
associate-*r*N/A
associate-*r*N/A
swap-sqrN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
metadata-eval27.6%
Applied egg-rr27.6%
Taylor expanded in x around 0
Simplified28.2%
Final simplification28.2%
(FPCore (x)
:precision binary64
(*
(exp (* x (* x 10.0)))
(+
1.0
(*
x
(*
x
(-
-0.5
(*
x
(* x (- (* x (* x 0.001388888888888889)) 0.041666666666666664)))))))))
double code(double x) {
return exp((x * (x * 10.0))) * (1.0 + (x * (x * (-0.5 - (x * (x * ((x * (x * 0.001388888888888889)) - 0.041666666666666664)))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp((x * (x * 10.0d0))) * (1.0d0 + (x * (x * ((-0.5d0) - (x * (x * ((x * (x * 0.001388888888888889d0)) - 0.041666666666666664d0)))))))
end function
public static double code(double x) {
return Math.exp((x * (x * 10.0))) * (1.0 + (x * (x * (-0.5 - (x * (x * ((x * (x * 0.001388888888888889)) - 0.041666666666666664)))))));
}
def code(x): return math.exp((x * (x * 10.0))) * (1.0 + (x * (x * (-0.5 - (x * (x * ((x * (x * 0.001388888888888889)) - 0.041666666666666664)))))))
function code(x) return Float64(exp(Float64(x * Float64(x * 10.0))) * Float64(1.0 + Float64(x * Float64(x * Float64(-0.5 - Float64(x * Float64(x * Float64(Float64(x * Float64(x * 0.001388888888888889)) - 0.041666666666666664)))))))) end
function tmp = code(x) tmp = exp((x * (x * 10.0))) * (1.0 + (x * (x * (-0.5 - (x * (x * ((x * (x * 0.001388888888888889)) - 0.041666666666666664))))))); end
code[x_] := N[(N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(1.0 + N[(x * N[(x * N[(-0.5 - N[(x * N[(x * N[(N[(x * N[(x * 0.001388888888888889), $MachinePrecision]), $MachinePrecision] - 0.041666666666666664), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{x \cdot \left(x \cdot 10\right)} \cdot \left(1 + x \cdot \left(x \cdot \left(-0.5 - x \cdot \left(x \cdot \left(x \cdot \left(x \cdot 0.001388888888888889\right) - 0.041666666666666664\right)\right)\right)\right)\right)
\end{array}
Initial program 94.2%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
remove-double-negN/A
sub-negN/A
--lowering--.f64N/A
*-commutativeN/A
Simplified27.6%
Final simplification27.6%
(FPCore (x)
:precision binary64
(*
(exp (* 10.0 (* x x)))
(+
1.0
(*
x
(*
x
(-
-0.5
(*
x
(* x (- (* x (* x 0.001388888888888889)) 0.041666666666666664)))))))))
double code(double x) {
return exp((10.0 * (x * x))) * (1.0 + (x * (x * (-0.5 - (x * (x * ((x * (x * 0.001388888888888889)) - 0.041666666666666664)))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp((10.0d0 * (x * x))) * (1.0d0 + (x * (x * ((-0.5d0) - (x * (x * ((x * (x * 0.001388888888888889d0)) - 0.041666666666666664d0)))))))
end function
public static double code(double x) {
return Math.exp((10.0 * (x * x))) * (1.0 + (x * (x * (-0.5 - (x * (x * ((x * (x * 0.001388888888888889)) - 0.041666666666666664)))))));
}
def code(x): return math.exp((10.0 * (x * x))) * (1.0 + (x * (x * (-0.5 - (x * (x * ((x * (x * 0.001388888888888889)) - 0.041666666666666664)))))))
function code(x) return Float64(exp(Float64(10.0 * Float64(x * x))) * Float64(1.0 + Float64(x * Float64(x * Float64(-0.5 - Float64(x * Float64(x * Float64(Float64(x * Float64(x * 0.001388888888888889)) - 0.041666666666666664)))))))) end
function tmp = code(x) tmp = exp((10.0 * (x * x))) * (1.0 + (x * (x * (-0.5 - (x * (x * ((x * (x * 0.001388888888888889)) - 0.041666666666666664))))))); end
code[x_] := N[(N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(1.0 + N[(x * N[(x * N[(-0.5 - N[(x * N[(x * N[(N[(x * N[(x * 0.001388888888888889), $MachinePrecision]), $MachinePrecision] - 0.041666666666666664), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{10 \cdot \left(x \cdot x\right)} \cdot \left(1 + x \cdot \left(x \cdot \left(-0.5 - x \cdot \left(x \cdot \left(x \cdot \left(x \cdot 0.001388888888888889\right) - 0.041666666666666664\right)\right)\right)\right)\right)
\end{array}
Initial program 94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
remove-double-negN/A
sub-negN/A
--lowering--.f64N/A
*-commutativeN/A
Simplified27.6%
Final simplification27.6%
(FPCore (x) :precision binary64 (* (exp (* x (* x 10.0))) (+ 1.0 (* x (* x (+ -0.5 (* (* x x) 0.041666666666666664)))))))
double code(double x) {
return exp((x * (x * 10.0))) * (1.0 + (x * (x * (-0.5 + ((x * x) * 0.041666666666666664)))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp((x * (x * 10.0d0))) * (1.0d0 + (x * (x * ((-0.5d0) + ((x * x) * 0.041666666666666664d0)))))
end function
public static double code(double x) {
return Math.exp((x * (x * 10.0))) * (1.0 + (x * (x * (-0.5 + ((x * x) * 0.041666666666666664)))));
}
def code(x): return math.exp((x * (x * 10.0))) * (1.0 + (x * (x * (-0.5 + ((x * x) * 0.041666666666666664)))))
function code(x) return Float64(exp(Float64(x * Float64(x * 10.0))) * Float64(1.0 + Float64(x * Float64(x * Float64(-0.5 + Float64(Float64(x * x) * 0.041666666666666664)))))) end
function tmp = code(x) tmp = exp((x * (x * 10.0))) * (1.0 + (x * (x * (-0.5 + ((x * x) * 0.041666666666666664))))); end
code[x_] := N[(N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(1.0 + N[(x * N[(x * N[(-0.5 + N[(N[(x * x), $MachinePrecision] * 0.041666666666666664), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{x \cdot \left(x \cdot 10\right)} \cdot \left(1 + x \cdot \left(x \cdot \left(-0.5 + \left(x \cdot x\right) \cdot 0.041666666666666664\right)\right)\right)
\end{array}
Initial program 94.2%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6421.3%
Simplified21.3%
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
+-lowering-+.f64N/A
associate-*l*N/A
associate-*r*N/A
pow3N/A
*-lowering-*.f64N/A
cube-unmultN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6421.3%
Applied egg-rr21.3%
Taylor expanded in x around 0
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6421.3%
Simplified21.3%
Final simplification21.3%
(FPCore (x) :precision binary64 (* (exp (* 10.0 (* x x))) (+ 1.0 (* x (* x -0.5)))))
double code(double x) {
return exp((10.0 * (x * x))) * (1.0 + (x * (x * -0.5)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp((10.0d0 * (x * x))) * (1.0d0 + (x * (x * (-0.5d0))))
end function
public static double code(double x) {
return Math.exp((10.0 * (x * x))) * (1.0 + (x * (x * -0.5)));
}
def code(x): return math.exp((10.0 * (x * x))) * (1.0 + (x * (x * -0.5)))
function code(x) return Float64(exp(Float64(10.0 * Float64(x * x))) * Float64(1.0 + Float64(x * Float64(x * -0.5)))) end
function tmp = code(x) tmp = exp((10.0 * (x * x))) * (1.0 + (x * (x * -0.5))); end
code[x_] := N[(N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(1.0 + N[(x * N[(x * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{10 \cdot \left(x \cdot x\right)} \cdot \left(1 + x \cdot \left(x \cdot -0.5\right)\right)
\end{array}
Initial program 94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6418.2%
Simplified18.2%
Final simplification18.2%
(FPCore (x) :precision binary64 (* (exp (* 10.0 (* x x))) (* (* x x) -0.5)))
double code(double x) {
return exp((10.0 * (x * x))) * ((x * x) * -0.5);
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp((10.0d0 * (x * x))) * ((x * x) * (-0.5d0))
end function
public static double code(double x) {
return Math.exp((10.0 * (x * x))) * ((x * x) * -0.5);
}
def code(x): return math.exp((10.0 * (x * x))) * ((x * x) * -0.5)
function code(x) return Float64(exp(Float64(10.0 * Float64(x * x))) * Float64(Float64(x * x) * -0.5)) end
function tmp = code(x) tmp = exp((10.0 * (x * x))) * ((x * x) * -0.5); end
code[x_] := N[(N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{10 \cdot \left(x \cdot x\right)} \cdot \left(\left(x \cdot x\right) \cdot -0.5\right)
\end{array}
Initial program 94.2%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6418.2%
Simplified18.2%
Taylor expanded in x around inf
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
exp-lowering-exp.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6416.9%
Simplified16.9%
Final simplification16.9%
(FPCore (x) :precision binary64 (* (* x x) (+ -0.5 (* (* x x) (+ -5.0 (* (* x x) (+ -25.0 (* (* x x) -83.33333333333333))))))))
double code(double x) {
return (x * x) * (-0.5 + ((x * x) * (-5.0 + ((x * x) * (-25.0 + ((x * x) * -83.33333333333333))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * x) * ((-0.5d0) + ((x * x) * ((-5.0d0) + ((x * x) * ((-25.0d0) + ((x * x) * (-83.33333333333333d0)))))))
end function
public static double code(double x) {
return (x * x) * (-0.5 + ((x * x) * (-5.0 + ((x * x) * (-25.0 + ((x * x) * -83.33333333333333))))));
}
def code(x): return (x * x) * (-0.5 + ((x * x) * (-5.0 + ((x * x) * (-25.0 + ((x * x) * -83.33333333333333))))))
function code(x) return Float64(Float64(x * x) * Float64(-0.5 + Float64(Float64(x * x) * Float64(-5.0 + Float64(Float64(x * x) * Float64(-25.0 + Float64(Float64(x * x) * -83.33333333333333))))))) end
function tmp = code(x) tmp = (x * x) * (-0.5 + ((x * x) * (-5.0 + ((x * x) * (-25.0 + ((x * x) * -83.33333333333333)))))); end
code[x_] := N[(N[(x * x), $MachinePrecision] * N[(-0.5 + N[(N[(x * x), $MachinePrecision] * N[(-5.0 + N[(N[(x * x), $MachinePrecision] * N[(-25.0 + N[(N[(x * x), $MachinePrecision] * -83.33333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot x\right) \cdot \left(-0.5 + \left(x \cdot x\right) \cdot \left(-5 + \left(x \cdot x\right) \cdot \left(-25 + \left(x \cdot x\right) \cdot -83.33333333333333\right)\right)\right)
\end{array}
Initial program 94.2%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6418.2%
Simplified18.2%
Taylor expanded in x around inf
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6416.9%
Simplified16.9%
Taylor expanded in x around 0
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
Simplified10.3%
(FPCore (x) :precision binary64 (* (* x x) (+ -0.5 (* x (* x (+ -5.0 (* (* x x) -25.0)))))))
double code(double x) {
return (x * x) * (-0.5 + (x * (x * (-5.0 + ((x * x) * -25.0)))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * x) * ((-0.5d0) + (x * (x * ((-5.0d0) + ((x * x) * (-25.0d0))))))
end function
public static double code(double x) {
return (x * x) * (-0.5 + (x * (x * (-5.0 + ((x * x) * -25.0)))));
}
def code(x): return (x * x) * (-0.5 + (x * (x * (-5.0 + ((x * x) * -25.0)))))
function code(x) return Float64(Float64(x * x) * Float64(-0.5 + Float64(x * Float64(x * Float64(-5.0 + Float64(Float64(x * x) * -25.0)))))) end
function tmp = code(x) tmp = (x * x) * (-0.5 + (x * (x * (-5.0 + ((x * x) * -25.0))))); end
code[x_] := N[(N[(x * x), $MachinePrecision] * N[(-0.5 + N[(x * N[(x * N[(-5.0 + N[(N[(x * x), $MachinePrecision] * -25.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot x\right) \cdot \left(-0.5 + x \cdot \left(x \cdot \left(-5 + \left(x \cdot x\right) \cdot -25\right)\right)\right)
\end{array}
Initial program 94.2%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6418.2%
Simplified18.2%
Taylor expanded in x around inf
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6416.9%
Simplified16.9%
Taylor expanded in x around 0
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6410.1%
Simplified10.1%
(FPCore (x) :precision binary64 (* x (* x (+ -0.5 (* x (* x -5.0))))))
double code(double x) {
return x * (x * (-0.5 + (x * (x * -5.0))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (x * ((-0.5d0) + (x * (x * (-5.0d0)))))
end function
public static double code(double x) {
return x * (x * (-0.5 + (x * (x * -5.0))));
}
def code(x): return x * (x * (-0.5 + (x * (x * -5.0))))
function code(x) return Float64(x * Float64(x * Float64(-0.5 + Float64(x * Float64(x * -5.0))))) end
function tmp = code(x) tmp = x * (x * (-0.5 + (x * (x * -5.0)))); end
code[x_] := N[(x * N[(x * N[(-0.5 + N[(x * N[(x * -5.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x \cdot \left(-0.5 + x \cdot \left(x \cdot -5\right)\right)\right)
\end{array}
Initial program 94.2%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6418.2%
Simplified18.2%
Taylor expanded in x around inf
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6416.9%
Simplified16.9%
Taylor expanded in x around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f649.9%
Simplified9.9%
(FPCore (x) :precision binary64 (* (* x x) -0.5))
double code(double x) {
return (x * x) * -0.5;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * x) * (-0.5d0)
end function
public static double code(double x) {
return (x * x) * -0.5;
}
def code(x): return (x * x) * -0.5
function code(x) return Float64(Float64(x * x) * -0.5) end
function tmp = code(x) tmp = (x * x) * -0.5; end
code[x_] := N[(N[(x * x), $MachinePrecision] * -0.5), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot x\right) \cdot -0.5
\end{array}
Initial program 94.2%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6418.2%
Simplified18.2%
Taylor expanded in x around 0
Simplified9.7%
Taylor expanded in x around inf
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f649.7%
Simplified9.7%
(FPCore (x) :precision binary64 1.0)
double code(double x) {
return 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double x) {
return 1.0;
}
def code(x): return 1.0
function code(x) return 1.0 end
function tmp = code(x) tmp = 1.0; end
code[x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 94.2%
Taylor expanded in x around 0
Simplified1.5%
herbie shell --seed 2024139
(FPCore (x)
:name "ENA, Section 1.4, Exercise 1"
:precision binary64
:pre (and (<= 1.99 x) (<= x 2.01))
(* (cos x) (exp (* 10.0 (* x x)))))