
(FPCore (x) :precision binary64 (* (cos x) (exp (* 10.0 (* x x)))))
double code(double x) {
return cos(x) * exp((10.0 * (x * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * exp((10.0d0 * (x * x)))
end function
public static double code(double x) {
return Math.cos(x) * Math.exp((10.0 * (x * x)));
}
def code(x): return math.cos(x) * math.exp((10.0 * (x * x)))
function code(x) return Float64(cos(x) * exp(Float64(10.0 * Float64(x * x)))) end
function tmp = code(x) tmp = cos(x) * exp((10.0 * (x * x))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot e^{10 \cdot \left(x \cdot x\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 17 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (* (cos x) (exp (* 10.0 (* x x)))))
double code(double x) {
return cos(x) * exp((10.0 * (x * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * exp((10.0d0 * (x * x)))
end function
public static double code(double x) {
return Math.cos(x) * Math.exp((10.0 * (x * x)));
}
def code(x): return math.cos(x) * math.exp((10.0 * (x * x)))
function code(x) return Float64(cos(x) * exp(Float64(10.0 * Float64(x * x)))) end
function tmp = code(x) tmp = cos(x) * exp((10.0 * (x * x))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot e^{10 \cdot \left(x \cdot x\right)}
\end{array}
(FPCore (x) :precision binary64 (* (cos x) (exp (* 10.0 (* x x)))))
double code(double x) {
return cos(x) * exp((10.0 * (x * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = cos(x) * exp((10.0d0 * (x * x)))
end function
public static double code(double x) {
return Math.cos(x) * Math.exp((10.0 * (x * x)));
}
def code(x): return math.cos(x) * math.exp((10.0 * (x * x)))
function code(x) return Float64(cos(x) * exp(Float64(10.0 * Float64(x * x)))) end
function tmp = code(x) tmp = cos(x) * exp((10.0 * (x * x))); end
code[x_] := N[(N[Cos[x], $MachinePrecision] * N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos x \cdot e^{10 \cdot \left(x \cdot x\right)}
\end{array}
Initial program 94.3%
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* x x) (+ 0.003125 (* (* x x) -0.00011574074074074075)))))
(*
(/
(+
(*
(* x (* x (* x x)))
(+
0.25
(*
(* x x)
(*
(- 0.001736111111111111 (* t_0 t_0))
(/ 1.0 (- -0.041666666666666664 t_0))))))
-1.0)
(+
(*
x
(*
x
(+
-0.5
(*
x
(* x (+ 0.041666666666666664 (* (* x x) -0.001388888888888889)))))))
-1.0))
(exp (* x (* x 10.0))))))
double code(double x) {
double t_0 = (x * x) * (0.003125 + ((x * x) * -0.00011574074074074075));
return ((((x * (x * (x * x))) * (0.25 + ((x * x) * ((0.001736111111111111 - (t_0 * t_0)) * (1.0 / (-0.041666666666666664 - t_0)))))) + -1.0) / ((x * (x * (-0.5 + (x * (x * (0.041666666666666664 + ((x * x) * -0.001388888888888889))))))) + -1.0)) * exp((x * (x * 10.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = (x * x) * (0.003125d0 + ((x * x) * (-0.00011574074074074075d0)))
code = ((((x * (x * (x * x))) * (0.25d0 + ((x * x) * ((0.001736111111111111d0 - (t_0 * t_0)) * (1.0d0 / ((-0.041666666666666664d0) - t_0)))))) + (-1.0d0)) / ((x * (x * ((-0.5d0) + (x * (x * (0.041666666666666664d0 + ((x * x) * (-0.001388888888888889d0)))))))) + (-1.0d0))) * exp((x * (x * 10.0d0)))
end function
public static double code(double x) {
double t_0 = (x * x) * (0.003125 + ((x * x) * -0.00011574074074074075));
return ((((x * (x * (x * x))) * (0.25 + ((x * x) * ((0.001736111111111111 - (t_0 * t_0)) * (1.0 / (-0.041666666666666664 - t_0)))))) + -1.0) / ((x * (x * (-0.5 + (x * (x * (0.041666666666666664 + ((x * x) * -0.001388888888888889))))))) + -1.0)) * Math.exp((x * (x * 10.0)));
}
def code(x): t_0 = (x * x) * (0.003125 + ((x * x) * -0.00011574074074074075)) return ((((x * (x * (x * x))) * (0.25 + ((x * x) * ((0.001736111111111111 - (t_0 * t_0)) * (1.0 / (-0.041666666666666664 - t_0)))))) + -1.0) / ((x * (x * (-0.5 + (x * (x * (0.041666666666666664 + ((x * x) * -0.001388888888888889))))))) + -1.0)) * math.exp((x * (x * 10.0)))
function code(x) t_0 = Float64(Float64(x * x) * Float64(0.003125 + Float64(Float64(x * x) * -0.00011574074074074075))) return Float64(Float64(Float64(Float64(Float64(x * Float64(x * Float64(x * x))) * Float64(0.25 + Float64(Float64(x * x) * Float64(Float64(0.001736111111111111 - Float64(t_0 * t_0)) * Float64(1.0 / Float64(-0.041666666666666664 - t_0)))))) + -1.0) / Float64(Float64(x * Float64(x * Float64(-0.5 + Float64(x * Float64(x * Float64(0.041666666666666664 + Float64(Float64(x * x) * -0.001388888888888889))))))) + -1.0)) * exp(Float64(x * Float64(x * 10.0)))) end
function tmp = code(x) t_0 = (x * x) * (0.003125 + ((x * x) * -0.00011574074074074075)); tmp = ((((x * (x * (x * x))) * (0.25 + ((x * x) * ((0.001736111111111111 - (t_0 * t_0)) * (1.0 / (-0.041666666666666664 - t_0)))))) + -1.0) / ((x * (x * (-0.5 + (x * (x * (0.041666666666666664 + ((x * x) * -0.001388888888888889))))))) + -1.0)) * exp((x * (x * 10.0))); end
code[x_] := Block[{t$95$0 = N[(N[(x * x), $MachinePrecision] * N[(0.003125 + N[(N[(x * x), $MachinePrecision] * -0.00011574074074074075), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(0.25 + N[(N[(x * x), $MachinePrecision] * N[(N[(0.001736111111111111 - N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision] * N[(1.0 / N[(-0.041666666666666664 - t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision] / N[(N[(x * N[(x * N[(-0.5 + N[(x * N[(x * N[(0.041666666666666664 + N[(N[(x * x), $MachinePrecision] * -0.001388888888888889), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision] * N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(x \cdot x\right) \cdot \left(0.003125 + \left(x \cdot x\right) \cdot -0.00011574074074074075\right)\\
\frac{\left(x \cdot \left(x \cdot \left(x \cdot x\right)\right)\right) \cdot \left(0.25 + \left(x \cdot x\right) \cdot \left(\left(0.001736111111111111 - t\_0 \cdot t\_0\right) \cdot \frac{1}{-0.041666666666666664 - t\_0}\right)\right) + -1}{x \cdot \left(x \cdot \left(-0.5 + x \cdot \left(x \cdot \left(0.041666666666666664 + \left(x \cdot x\right) \cdot -0.001388888888888889\right)\right)\right)\right) + -1} \cdot e^{x \cdot \left(x \cdot 10\right)}
\end{array}
\end{array}
Initial program 94.3%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6427.5%
Simplified27.5%
+-commutativeN/A
flip-+N/A
/-lowering-/.f64N/A
Applied egg-rr27.5%
Taylor expanded in x around 0
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
Simplified29.3%
flip-+N/A
div-invN/A
*-lowering-*.f64N/A
Applied egg-rr29.3%
Final simplification29.3%
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* x x) (+ 0.003125 (* (* x x) -0.00011574074074074075)))))
(*
(exp (* x (* x 10.0)))
(/
(+
(*
(* x (* x (* x x)))
(+
0.25
(*
(* x x)
(/
(- (* t_0 t_0) 0.001736111111111111)
(- t_0 -0.041666666666666664)))))
-1.0)
(+
(*
x
(*
x
(+
-0.5
(*
x
(* x (+ 0.041666666666666664 (* (* x x) -0.001388888888888889)))))))
-1.0)))))
double code(double x) {
double t_0 = (x * x) * (0.003125 + ((x * x) * -0.00011574074074074075));
return exp((x * (x * 10.0))) * ((((x * (x * (x * x))) * (0.25 + ((x * x) * (((t_0 * t_0) - 0.001736111111111111) / (t_0 - -0.041666666666666664))))) + -1.0) / ((x * (x * (-0.5 + (x * (x * (0.041666666666666664 + ((x * x) * -0.001388888888888889))))))) + -1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = (x * x) * (0.003125d0 + ((x * x) * (-0.00011574074074074075d0)))
code = exp((x * (x * 10.0d0))) * ((((x * (x * (x * x))) * (0.25d0 + ((x * x) * (((t_0 * t_0) - 0.001736111111111111d0) / (t_0 - (-0.041666666666666664d0)))))) + (-1.0d0)) / ((x * (x * ((-0.5d0) + (x * (x * (0.041666666666666664d0 + ((x * x) * (-0.001388888888888889d0)))))))) + (-1.0d0)))
end function
public static double code(double x) {
double t_0 = (x * x) * (0.003125 + ((x * x) * -0.00011574074074074075));
return Math.exp((x * (x * 10.0))) * ((((x * (x * (x * x))) * (0.25 + ((x * x) * (((t_0 * t_0) - 0.001736111111111111) / (t_0 - -0.041666666666666664))))) + -1.0) / ((x * (x * (-0.5 + (x * (x * (0.041666666666666664 + ((x * x) * -0.001388888888888889))))))) + -1.0));
}
def code(x): t_0 = (x * x) * (0.003125 + ((x * x) * -0.00011574074074074075)) return math.exp((x * (x * 10.0))) * ((((x * (x * (x * x))) * (0.25 + ((x * x) * (((t_0 * t_0) - 0.001736111111111111) / (t_0 - -0.041666666666666664))))) + -1.0) / ((x * (x * (-0.5 + (x * (x * (0.041666666666666664 + ((x * x) * -0.001388888888888889))))))) + -1.0))
function code(x) t_0 = Float64(Float64(x * x) * Float64(0.003125 + Float64(Float64(x * x) * -0.00011574074074074075))) return Float64(exp(Float64(x * Float64(x * 10.0))) * Float64(Float64(Float64(Float64(x * Float64(x * Float64(x * x))) * Float64(0.25 + Float64(Float64(x * x) * Float64(Float64(Float64(t_0 * t_0) - 0.001736111111111111) / Float64(t_0 - -0.041666666666666664))))) + -1.0) / Float64(Float64(x * Float64(x * Float64(-0.5 + Float64(x * Float64(x * Float64(0.041666666666666664 + Float64(Float64(x * x) * -0.001388888888888889))))))) + -1.0))) end
function tmp = code(x) t_0 = (x * x) * (0.003125 + ((x * x) * -0.00011574074074074075)); tmp = exp((x * (x * 10.0))) * ((((x * (x * (x * x))) * (0.25 + ((x * x) * (((t_0 * t_0) - 0.001736111111111111) / (t_0 - -0.041666666666666664))))) + -1.0) / ((x * (x * (-0.5 + (x * (x * (0.041666666666666664 + ((x * x) * -0.001388888888888889))))))) + -1.0)); end
code[x_] := Block[{t$95$0 = N[(N[(x * x), $MachinePrecision] * N[(0.003125 + N[(N[(x * x), $MachinePrecision] * -0.00011574074074074075), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(N[(N[(N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(0.25 + N[(N[(x * x), $MachinePrecision] * N[(N[(N[(t$95$0 * t$95$0), $MachinePrecision] - 0.001736111111111111), $MachinePrecision] / N[(t$95$0 - -0.041666666666666664), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision] / N[(N[(x * N[(x * N[(-0.5 + N[(x * N[(x * N[(0.041666666666666664 + N[(N[(x * x), $MachinePrecision] * -0.001388888888888889), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(x \cdot x\right) \cdot \left(0.003125 + \left(x \cdot x\right) \cdot -0.00011574074074074075\right)\\
e^{x \cdot \left(x \cdot 10\right)} \cdot \frac{\left(x \cdot \left(x \cdot \left(x \cdot x\right)\right)\right) \cdot \left(0.25 + \left(x \cdot x\right) \cdot \frac{t\_0 \cdot t\_0 - 0.001736111111111111}{t\_0 - -0.041666666666666664}\right) + -1}{x \cdot \left(x \cdot \left(-0.5 + x \cdot \left(x \cdot \left(0.041666666666666664 + \left(x \cdot x\right) \cdot -0.001388888888888889\right)\right)\right)\right) + -1}
\end{array}
\end{array}
Initial program 94.3%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6427.5%
Simplified27.5%
+-commutativeN/A
flip-+N/A
/-lowering-/.f64N/A
Applied egg-rr27.5%
Taylor expanded in x around 0
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
Simplified29.3%
+-commutativeN/A
flip-+N/A
/-lowering-/.f64N/A
Applied egg-rr29.3%
Final simplification29.3%
(FPCore (x)
:precision binary64
(/
(exp (* 10.0 (* x x)))
(/
(+
(*
x
(*
x
(+
-0.5
(*
(* x x)
(+ 0.041666666666666664 (* (* x x) -0.001388888888888889))))))
-1.0)
(+
-1.0
(*
(* x x)
(*
(* x x)
(+
0.25
(*
(* x x)
(+
(* (* x x) (+ 0.003125 (* (* x x) -0.00011574074074074075)))
-0.041666666666666664)))))))))
double code(double x) {
return exp((10.0 * (x * x))) / (((x * (x * (-0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * -0.001388888888888889)))))) + -1.0) / (-1.0 + ((x * x) * ((x * x) * (0.25 + ((x * x) * (((x * x) * (0.003125 + ((x * x) * -0.00011574074074074075))) + -0.041666666666666664)))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp((10.0d0 * (x * x))) / (((x * (x * ((-0.5d0) + ((x * x) * (0.041666666666666664d0 + ((x * x) * (-0.001388888888888889d0))))))) + (-1.0d0)) / ((-1.0d0) + ((x * x) * ((x * x) * (0.25d0 + ((x * x) * (((x * x) * (0.003125d0 + ((x * x) * (-0.00011574074074074075d0)))) + (-0.041666666666666664d0))))))))
end function
public static double code(double x) {
return Math.exp((10.0 * (x * x))) / (((x * (x * (-0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * -0.001388888888888889)))))) + -1.0) / (-1.0 + ((x * x) * ((x * x) * (0.25 + ((x * x) * (((x * x) * (0.003125 + ((x * x) * -0.00011574074074074075))) + -0.041666666666666664)))))));
}
def code(x): return math.exp((10.0 * (x * x))) / (((x * (x * (-0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * -0.001388888888888889)))))) + -1.0) / (-1.0 + ((x * x) * ((x * x) * (0.25 + ((x * x) * (((x * x) * (0.003125 + ((x * x) * -0.00011574074074074075))) + -0.041666666666666664)))))))
function code(x) return Float64(exp(Float64(10.0 * Float64(x * x))) / Float64(Float64(Float64(x * Float64(x * Float64(-0.5 + Float64(Float64(x * x) * Float64(0.041666666666666664 + Float64(Float64(x * x) * -0.001388888888888889)))))) + -1.0) / Float64(-1.0 + Float64(Float64(x * x) * Float64(Float64(x * x) * Float64(0.25 + Float64(Float64(x * x) * Float64(Float64(Float64(x * x) * Float64(0.003125 + Float64(Float64(x * x) * -0.00011574074074074075))) + -0.041666666666666664)))))))) end
function tmp = code(x) tmp = exp((10.0 * (x * x))) / (((x * (x * (-0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * -0.001388888888888889)))))) + -1.0) / (-1.0 + ((x * x) * ((x * x) * (0.25 + ((x * x) * (((x * x) * (0.003125 + ((x * x) * -0.00011574074074074075))) + -0.041666666666666664))))))); end
code[x_] := N[(N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[(N[(N[(x * N[(x * N[(-0.5 + N[(N[(x * x), $MachinePrecision] * N[(0.041666666666666664 + N[(N[(x * x), $MachinePrecision] * -0.001388888888888889), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision] / N[(-1.0 + N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(0.25 + N[(N[(x * x), $MachinePrecision] * N[(N[(N[(x * x), $MachinePrecision] * N[(0.003125 + N[(N[(x * x), $MachinePrecision] * -0.00011574074074074075), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -0.041666666666666664), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{10 \cdot \left(x \cdot x\right)}}{\frac{x \cdot \left(x \cdot \left(-0.5 + \left(x \cdot x\right) \cdot \left(0.041666666666666664 + \left(x \cdot x\right) \cdot -0.001388888888888889\right)\right)\right) + -1}{-1 + \left(x \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot \left(0.25 + \left(x \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot \left(0.003125 + \left(x \cdot x\right) \cdot -0.00011574074074074075\right) + -0.041666666666666664\right)\right)\right)}}
\end{array}
Initial program 94.3%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6427.5%
Simplified27.5%
+-commutativeN/A
flip-+N/A
/-lowering-/.f64N/A
Applied egg-rr27.5%
Taylor expanded in x around 0
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
Simplified29.3%
Applied egg-rr29.3%
Final simplification29.3%
(FPCore (x)
:precision binary64
(*
(+
-1.0
(*
(* x x)
(*
(* x x)
(+
0.25
(*
(* x x)
(+
(* (* x x) (+ 0.003125 (* (* x x) -0.00011574074074074075)))
-0.041666666666666664))))))
(/
(exp (* 10.0 (* x x)))
(+
(*
x
(*
x
(+
-0.5
(*
(* x x)
(+ 0.041666666666666664 (* (* x x) -0.001388888888888889))))))
-1.0))))
double code(double x) {
return (-1.0 + ((x * x) * ((x * x) * (0.25 + ((x * x) * (((x * x) * (0.003125 + ((x * x) * -0.00011574074074074075))) + -0.041666666666666664)))))) * (exp((10.0 * (x * x))) / ((x * (x * (-0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * -0.001388888888888889)))))) + -1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((-1.0d0) + ((x * x) * ((x * x) * (0.25d0 + ((x * x) * (((x * x) * (0.003125d0 + ((x * x) * (-0.00011574074074074075d0)))) + (-0.041666666666666664d0))))))) * (exp((10.0d0 * (x * x))) / ((x * (x * ((-0.5d0) + ((x * x) * (0.041666666666666664d0 + ((x * x) * (-0.001388888888888889d0))))))) + (-1.0d0)))
end function
public static double code(double x) {
return (-1.0 + ((x * x) * ((x * x) * (0.25 + ((x * x) * (((x * x) * (0.003125 + ((x * x) * -0.00011574074074074075))) + -0.041666666666666664)))))) * (Math.exp((10.0 * (x * x))) / ((x * (x * (-0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * -0.001388888888888889)))))) + -1.0));
}
def code(x): return (-1.0 + ((x * x) * ((x * x) * (0.25 + ((x * x) * (((x * x) * (0.003125 + ((x * x) * -0.00011574074074074075))) + -0.041666666666666664)))))) * (math.exp((10.0 * (x * x))) / ((x * (x * (-0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * -0.001388888888888889)))))) + -1.0))
function code(x) return Float64(Float64(-1.0 + Float64(Float64(x * x) * Float64(Float64(x * x) * Float64(0.25 + Float64(Float64(x * x) * Float64(Float64(Float64(x * x) * Float64(0.003125 + Float64(Float64(x * x) * -0.00011574074074074075))) + -0.041666666666666664)))))) * Float64(exp(Float64(10.0 * Float64(x * x))) / Float64(Float64(x * Float64(x * Float64(-0.5 + Float64(Float64(x * x) * Float64(0.041666666666666664 + Float64(Float64(x * x) * -0.001388888888888889)))))) + -1.0))) end
function tmp = code(x) tmp = (-1.0 + ((x * x) * ((x * x) * (0.25 + ((x * x) * (((x * x) * (0.003125 + ((x * x) * -0.00011574074074074075))) + -0.041666666666666664)))))) * (exp((10.0 * (x * x))) / ((x * (x * (-0.5 + ((x * x) * (0.041666666666666664 + ((x * x) * -0.001388888888888889)))))) + -1.0)); end
code[x_] := N[(N[(-1.0 + N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(0.25 + N[(N[(x * x), $MachinePrecision] * N[(N[(N[(x * x), $MachinePrecision] * N[(0.003125 + N[(N[(x * x), $MachinePrecision] * -0.00011574074074074075), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -0.041666666666666664), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[(N[(x * N[(x * N[(-0.5 + N[(N[(x * x), $MachinePrecision] * N[(0.041666666666666664 + N[(N[(x * x), $MachinePrecision] * -0.001388888888888889), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-1 + \left(x \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot \left(0.25 + \left(x \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot \left(0.003125 + \left(x \cdot x\right) \cdot -0.00011574074074074075\right) + -0.041666666666666664\right)\right)\right)\right) \cdot \frac{e^{10 \cdot \left(x \cdot x\right)}}{x \cdot \left(x \cdot \left(-0.5 + \left(x \cdot x\right) \cdot \left(0.041666666666666664 + \left(x \cdot x\right) \cdot -0.001388888888888889\right)\right)\right) + -1}
\end{array}
Initial program 94.3%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6427.5%
Simplified27.5%
+-commutativeN/A
flip-+N/A
/-lowering-/.f64N/A
Applied egg-rr27.5%
Taylor expanded in x around 0
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
Simplified29.3%
Applied egg-rr29.3%
Final simplification29.3%
(FPCore (x)
:precision binary64
(let* ((t_0 (* x (* x x))) (t_1 (* x t_0)))
(*
(exp (* x (* x 10.0)))
(+
1.0
(*
(* (* x x) t_1)
(+
-0.001388888888888889
(/ (+ (* t_0 (/ 0.041666666666666664 x)) (* x (/ -0.5 x))) t_1)))))))
double code(double x) {
double t_0 = x * (x * x);
double t_1 = x * t_0;
return exp((x * (x * 10.0))) * (1.0 + (((x * x) * t_1) * (-0.001388888888888889 + (((t_0 * (0.041666666666666664 / x)) + (x * (-0.5 / x))) / t_1))));
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
t_0 = x * (x * x)
t_1 = x * t_0
code = exp((x * (x * 10.0d0))) * (1.0d0 + (((x * x) * t_1) * ((-0.001388888888888889d0) + (((t_0 * (0.041666666666666664d0 / x)) + (x * ((-0.5d0) / x))) / t_1))))
end function
public static double code(double x) {
double t_0 = x * (x * x);
double t_1 = x * t_0;
return Math.exp((x * (x * 10.0))) * (1.0 + (((x * x) * t_1) * (-0.001388888888888889 + (((t_0 * (0.041666666666666664 / x)) + (x * (-0.5 / x))) / t_1))));
}
def code(x): t_0 = x * (x * x) t_1 = x * t_0 return math.exp((x * (x * 10.0))) * (1.0 + (((x * x) * t_1) * (-0.001388888888888889 + (((t_0 * (0.041666666666666664 / x)) + (x * (-0.5 / x))) / t_1))))
function code(x) t_0 = Float64(x * Float64(x * x)) t_1 = Float64(x * t_0) return Float64(exp(Float64(x * Float64(x * 10.0))) * Float64(1.0 + Float64(Float64(Float64(x * x) * t_1) * Float64(-0.001388888888888889 + Float64(Float64(Float64(t_0 * Float64(0.041666666666666664 / x)) + Float64(x * Float64(-0.5 / x))) / t_1))))) end
function tmp = code(x) t_0 = x * (x * x); t_1 = x * t_0; tmp = exp((x * (x * 10.0))) * (1.0 + (((x * x) * t_1) * (-0.001388888888888889 + (((t_0 * (0.041666666666666664 / x)) + (x * (-0.5 / x))) / t_1)))); end
code[x_] := Block[{t$95$0 = N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(x * t$95$0), $MachinePrecision]}, N[(N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(1.0 + N[(N[(N[(x * x), $MachinePrecision] * t$95$1), $MachinePrecision] * N[(-0.001388888888888889 + N[(N[(N[(t$95$0 * N[(0.041666666666666664 / x), $MachinePrecision]), $MachinePrecision] + N[(x * N[(-0.5 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot \left(x \cdot x\right)\\
t_1 := x \cdot t\_0\\
e^{x \cdot \left(x \cdot 10\right)} \cdot \left(1 + \left(\left(x \cdot x\right) \cdot t\_1\right) \cdot \left(-0.001388888888888889 + \frac{t\_0 \cdot \frac{0.041666666666666664}{x} + x \cdot \frac{-0.5}{x}}{t\_1}\right)\right)
\end{array}
\end{array}
Initial program 94.3%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6427.5%
Simplified27.5%
Taylor expanded in x around inf
+-commutativeN/A
associate--l+N/A
distribute-lft-inN/A
rgt-mult-inverseN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
Simplified27.5%
associate-/r*N/A
associate-/r*N/A
frac-addN/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6427.5%
Applied egg-rr27.5%
Final simplification27.5%
(FPCore (x)
:precision binary64
(let* ((t_0 (* x (* x (* x x)))))
(*
(exp (* x (* x 10.0)))
(+
1.0
(*
(* (* x x) t_0)
(+
-0.001388888888888889
(+ (/ 0.041666666666666664 (* x x)) (/ -0.5 t_0))))))))
double code(double x) {
double t_0 = x * (x * (x * x));
return exp((x * (x * 10.0))) * (1.0 + (((x * x) * t_0) * (-0.001388888888888889 + ((0.041666666666666664 / (x * x)) + (-0.5 / t_0)))));
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = x * (x * (x * x))
code = exp((x * (x * 10.0d0))) * (1.0d0 + (((x * x) * t_0) * ((-0.001388888888888889d0) + ((0.041666666666666664d0 / (x * x)) + ((-0.5d0) / t_0)))))
end function
public static double code(double x) {
double t_0 = x * (x * (x * x));
return Math.exp((x * (x * 10.0))) * (1.0 + (((x * x) * t_0) * (-0.001388888888888889 + ((0.041666666666666664 / (x * x)) + (-0.5 / t_0)))));
}
def code(x): t_0 = x * (x * (x * x)) return math.exp((x * (x * 10.0))) * (1.0 + (((x * x) * t_0) * (-0.001388888888888889 + ((0.041666666666666664 / (x * x)) + (-0.5 / t_0)))))
function code(x) t_0 = Float64(x * Float64(x * Float64(x * x))) return Float64(exp(Float64(x * Float64(x * 10.0))) * Float64(1.0 + Float64(Float64(Float64(x * x) * t_0) * Float64(-0.001388888888888889 + Float64(Float64(0.041666666666666664 / Float64(x * x)) + Float64(-0.5 / t_0)))))) end
function tmp = code(x) t_0 = x * (x * (x * x)); tmp = exp((x * (x * 10.0))) * (1.0 + (((x * x) * t_0) * (-0.001388888888888889 + ((0.041666666666666664 / (x * x)) + (-0.5 / t_0))))); end
code[x_] := Block[{t$95$0 = N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(1.0 + N[(N[(N[(x * x), $MachinePrecision] * t$95$0), $MachinePrecision] * N[(-0.001388888888888889 + N[(N[(0.041666666666666664 / N[(x * x), $MachinePrecision]), $MachinePrecision] + N[(-0.5 / t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot \left(x \cdot \left(x \cdot x\right)\right)\\
e^{x \cdot \left(x \cdot 10\right)} \cdot \left(1 + \left(\left(x \cdot x\right) \cdot t\_0\right) \cdot \left(-0.001388888888888889 + \left(\frac{0.041666666666666664}{x \cdot x} + \frac{-0.5}{t\_0}\right)\right)\right)
\end{array}
\end{array}
Initial program 94.3%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6427.5%
Simplified27.5%
Taylor expanded in x around inf
+-commutativeN/A
associate--l+N/A
distribute-lft-inN/A
rgt-mult-inverseN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
Simplified27.5%
Final simplification27.5%
(FPCore (x)
:precision binary64
(*
(exp (* 10.0 (* x x)))
(+
1.0
(*
x
(*
x
(+
-0.5
(*
x
(* x (+ 0.041666666666666664 (* (* x x) -0.001388888888888889))))))))))
double code(double x) {
return exp((10.0 * (x * x))) * (1.0 + (x * (x * (-0.5 + (x * (x * (0.041666666666666664 + ((x * x) * -0.001388888888888889))))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp((10.0d0 * (x * x))) * (1.0d0 + (x * (x * ((-0.5d0) + (x * (x * (0.041666666666666664d0 + ((x * x) * (-0.001388888888888889d0)))))))))
end function
public static double code(double x) {
return Math.exp((10.0 * (x * x))) * (1.0 + (x * (x * (-0.5 + (x * (x * (0.041666666666666664 + ((x * x) * -0.001388888888888889))))))));
}
def code(x): return math.exp((10.0 * (x * x))) * (1.0 + (x * (x * (-0.5 + (x * (x * (0.041666666666666664 + ((x * x) * -0.001388888888888889))))))))
function code(x) return Float64(exp(Float64(10.0 * Float64(x * x))) * Float64(1.0 + Float64(x * Float64(x * Float64(-0.5 + Float64(x * Float64(x * Float64(0.041666666666666664 + Float64(Float64(x * x) * -0.001388888888888889))))))))) end
function tmp = code(x) tmp = exp((10.0 * (x * x))) * (1.0 + (x * (x * (-0.5 + (x * (x * (0.041666666666666664 + ((x * x) * -0.001388888888888889)))))))); end
code[x_] := N[(N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(1.0 + N[(x * N[(x * N[(-0.5 + N[(x * N[(x * N[(0.041666666666666664 + N[(N[(x * x), $MachinePrecision] * -0.001388888888888889), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{10 \cdot \left(x \cdot x\right)} \cdot \left(1 + x \cdot \left(x \cdot \left(-0.5 + x \cdot \left(x \cdot \left(0.041666666666666664 + \left(x \cdot x\right) \cdot -0.001388888888888889\right)\right)\right)\right)\right)
\end{array}
Initial program 94.3%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6427.5%
Simplified27.5%
*-lowering-*.f64N/A
Applied egg-rr27.5%
Final simplification27.5%
(FPCore (x) :precision binary64 (* (exp (* x (* x 10.0))) (+ 1.0 (* x (* x (+ -0.5 (* x (* x 0.041666666666666664))))))))
double code(double x) {
return exp((x * (x * 10.0))) * (1.0 + (x * (x * (-0.5 + (x * (x * 0.041666666666666664))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp((x * (x * 10.0d0))) * (1.0d0 + (x * (x * ((-0.5d0) + (x * (x * 0.041666666666666664d0))))))
end function
public static double code(double x) {
return Math.exp((x * (x * 10.0))) * (1.0 + (x * (x * (-0.5 + (x * (x * 0.041666666666666664))))));
}
def code(x): return math.exp((x * (x * 10.0))) * (1.0 + (x * (x * (-0.5 + (x * (x * 0.041666666666666664))))))
function code(x) return Float64(exp(Float64(x * Float64(x * 10.0))) * Float64(1.0 + Float64(x * Float64(x * Float64(-0.5 + Float64(x * Float64(x * 0.041666666666666664))))))) end
function tmp = code(x) tmp = exp((x * (x * 10.0))) * (1.0 + (x * (x * (-0.5 + (x * (x * 0.041666666666666664)))))); end
code[x_] := N[(N[Exp[N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(1.0 + N[(x * N[(x * N[(-0.5 + N[(x * N[(x * 0.041666666666666664), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{x \cdot \left(x \cdot 10\right)} \cdot \left(1 + x \cdot \left(x \cdot \left(-0.5 + x \cdot \left(x \cdot 0.041666666666666664\right)\right)\right)\right)
\end{array}
Initial program 94.3%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6427.5%
Simplified27.5%
+-commutativeN/A
flip-+N/A
/-lowering-/.f64N/A
Applied egg-rr27.5%
Taylor expanded in x around 0
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
Simplified29.3%
Taylor expanded in x around 0
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f6421.3%
Simplified21.3%
Final simplification21.3%
(FPCore (x) :precision binary64 (* (exp (* 10.0 (* x x))) (+ 1.0 (* (* x x) -0.5))))
double code(double x) {
return exp((10.0 * (x * x))) * (1.0 + ((x * x) * -0.5));
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp((10.0d0 * (x * x))) * (1.0d0 + ((x * x) * (-0.5d0)))
end function
public static double code(double x) {
return Math.exp((10.0 * (x * x))) * (1.0 + ((x * x) * -0.5));
}
def code(x): return math.exp((10.0 * (x * x))) * (1.0 + ((x * x) * -0.5))
function code(x) return Float64(exp(Float64(10.0 * Float64(x * x))) * Float64(1.0 + Float64(Float64(x * x) * -0.5))) end
function tmp = code(x) tmp = exp((10.0 * (x * x))) * (1.0 + ((x * x) * -0.5)); end
code[x_] := N[(N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(1.0 + N[(N[(x * x), $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{10 \cdot \left(x \cdot x\right)} \cdot \left(1 + \left(x \cdot x\right) \cdot -0.5\right)
\end{array}
Initial program 94.3%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6418.2%
Simplified18.2%
Final simplification18.2%
(FPCore (x) :precision binary64 (* (exp (* 10.0 (* x x))) (* x (* x -0.5))))
double code(double x) {
return exp((10.0 * (x * x))) * (x * (x * -0.5));
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp((10.0d0 * (x * x))) * (x * (x * (-0.5d0)))
end function
public static double code(double x) {
return Math.exp((10.0 * (x * x))) * (x * (x * -0.5));
}
def code(x): return math.exp((10.0 * (x * x))) * (x * (x * -0.5))
function code(x) return Float64(exp(Float64(10.0 * Float64(x * x))) * Float64(x * Float64(x * -0.5))) end
function tmp = code(x) tmp = exp((10.0 * (x * x))) * (x * (x * -0.5)); end
code[x_] := N[(N[Exp[N[(10.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(x * N[(x * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{10 \cdot \left(x \cdot x\right)} \cdot \left(x \cdot \left(x \cdot -0.5\right)\right)
\end{array}
Initial program 94.3%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6418.2%
Simplified18.2%
Taylor expanded in x around inf
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
exp-lowering-exp.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f6416.9%
Simplified16.9%
Final simplification16.9%
(FPCore (x) :precision binary64 (* (+ 1.0 (* (* x x) -0.5)) (+ 1.0 (* (* x x) (+ 10.0 (* x (* x (+ 50.0 (* (* x x) 166.66666666666666)))))))))
double code(double x) {
return (1.0 + ((x * x) * -0.5)) * (1.0 + ((x * x) * (10.0 + (x * (x * (50.0 + ((x * x) * 166.66666666666666)))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 + ((x * x) * (-0.5d0))) * (1.0d0 + ((x * x) * (10.0d0 + (x * (x * (50.0d0 + ((x * x) * 166.66666666666666d0)))))))
end function
public static double code(double x) {
return (1.0 + ((x * x) * -0.5)) * (1.0 + ((x * x) * (10.0 + (x * (x * (50.0 + ((x * x) * 166.66666666666666)))))));
}
def code(x): return (1.0 + ((x * x) * -0.5)) * (1.0 + ((x * x) * (10.0 + (x * (x * (50.0 + ((x * x) * 166.66666666666666)))))))
function code(x) return Float64(Float64(1.0 + Float64(Float64(x * x) * -0.5)) * Float64(1.0 + Float64(Float64(x * x) * Float64(10.0 + Float64(x * Float64(x * Float64(50.0 + Float64(Float64(x * x) * 166.66666666666666)))))))) end
function tmp = code(x) tmp = (1.0 + ((x * x) * -0.5)) * (1.0 + ((x * x) * (10.0 + (x * (x * (50.0 + ((x * x) * 166.66666666666666))))))); end
code[x_] := N[(N[(1.0 + N[(N[(x * x), $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision] * N[(1.0 + N[(N[(x * x), $MachinePrecision] * N[(10.0 + N[(x * N[(x * N[(50.0 + N[(N[(x * x), $MachinePrecision] * 166.66666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(1 + \left(x \cdot x\right) \cdot -0.5\right) \cdot \left(1 + \left(x \cdot x\right) \cdot \left(10 + x \cdot \left(x \cdot \left(50 + \left(x \cdot x\right) \cdot 166.66666666666666\right)\right)\right)\right)
\end{array}
Initial program 94.3%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6418.2%
Simplified18.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6410.3%
Simplified10.3%
(FPCore (x) :precision binary64 (* (+ 1.0 (* (* x x) -0.5)) (+ 1.0 (* (* x x) (+ 10.0 (* (* x x) 50.0))))))
double code(double x) {
return (1.0 + ((x * x) * -0.5)) * (1.0 + ((x * x) * (10.0 + ((x * x) * 50.0))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 + ((x * x) * (-0.5d0))) * (1.0d0 + ((x * x) * (10.0d0 + ((x * x) * 50.0d0))))
end function
public static double code(double x) {
return (1.0 + ((x * x) * -0.5)) * (1.0 + ((x * x) * (10.0 + ((x * x) * 50.0))));
}
def code(x): return (1.0 + ((x * x) * -0.5)) * (1.0 + ((x * x) * (10.0 + ((x * x) * 50.0))))
function code(x) return Float64(Float64(1.0 + Float64(Float64(x * x) * -0.5)) * Float64(1.0 + Float64(Float64(x * x) * Float64(10.0 + Float64(Float64(x * x) * 50.0))))) end
function tmp = code(x) tmp = (1.0 + ((x * x) * -0.5)) * (1.0 + ((x * x) * (10.0 + ((x * x) * 50.0)))); end
code[x_] := N[(N[(1.0 + N[(N[(x * x), $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision] * N[(1.0 + N[(N[(x * x), $MachinePrecision] * N[(10.0 + N[(N[(x * x), $MachinePrecision] * 50.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(1 + \left(x \cdot x\right) \cdot -0.5\right) \cdot \left(1 + \left(x \cdot x\right) \cdot \left(10 + \left(x \cdot x\right) \cdot 50\right)\right)
\end{array}
Initial program 94.3%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6418.2%
Simplified18.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6410.1%
Simplified10.1%
(FPCore (x) :precision binary64 (* (+ 1.0 (* x (* x -0.5))) (+ 1.0 (* x (* x 10.0)))))
double code(double x) {
return (1.0 + (x * (x * -0.5))) * (1.0 + (x * (x * 10.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 + (x * (x * (-0.5d0)))) * (1.0d0 + (x * (x * 10.0d0)))
end function
public static double code(double x) {
return (1.0 + (x * (x * -0.5))) * (1.0 + (x * (x * 10.0)));
}
def code(x): return (1.0 + (x * (x * -0.5))) * (1.0 + (x * (x * 10.0)))
function code(x) return Float64(Float64(1.0 + Float64(x * Float64(x * -0.5))) * Float64(1.0 + Float64(x * Float64(x * 10.0)))) end
function tmp = code(x) tmp = (1.0 + (x * (x * -0.5))) * (1.0 + (x * (x * 10.0))); end
code[x_] := N[(N[(1.0 + N[(x * N[(x * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(1.0 + N[(x * N[(x * 10.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(1 + x \cdot \left(x \cdot -0.5\right)\right) \cdot \left(1 + x \cdot \left(x \cdot 10\right)\right)
\end{array}
Initial program 94.3%
Taylor expanded in x around 0
+-lowering-+.f64N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f649.8%
Simplified9.8%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f649.9%
Simplified9.9%
(FPCore (x) :precision binary64 (+ 1.0 (* (* x x) (+ 9.5 (* x (* x -4.958333333333333))))))
double code(double x) {
return 1.0 + ((x * x) * (9.5 + (x * (x * -4.958333333333333))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 + ((x * x) * (9.5d0 + (x * (x * (-4.958333333333333d0)))))
end function
public static double code(double x) {
return 1.0 + ((x * x) * (9.5 + (x * (x * -4.958333333333333))));
}
def code(x): return 1.0 + ((x * x) * (9.5 + (x * (x * -4.958333333333333))))
function code(x) return Float64(1.0 + Float64(Float64(x * x) * Float64(9.5 + Float64(x * Float64(x * -4.958333333333333))))) end
function tmp = code(x) tmp = 1.0 + ((x * x) * (9.5 + (x * (x * -4.958333333333333)))); end
code[x_] := N[(1.0 + N[(N[(x * x), $MachinePrecision] * N[(9.5 + N[(x * N[(x * -4.958333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 + \left(x \cdot x\right) \cdot \left(9.5 + x \cdot \left(x \cdot -4.958333333333333\right)\right)
\end{array}
Initial program 94.3%
Taylor expanded in x around 0
+-lowering-+.f64N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f649.8%
Simplified9.8%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f649.9%
Simplified9.9%
(FPCore (x) :precision binary64 (* (* x x) -0.5))
double code(double x) {
return (x * x) * -0.5;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * x) * (-0.5d0)
end function
public static double code(double x) {
return (x * x) * -0.5;
}
def code(x): return (x * x) * -0.5
function code(x) return Float64(Float64(x * x) * -0.5) end
function tmp = code(x) tmp = (x * x) * -0.5; end
code[x_] := N[(N[(x * x), $MachinePrecision] * -0.5), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot x\right) \cdot -0.5
\end{array}
Initial program 94.3%
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
exp-lowering-exp.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6494.2%
Simplified94.2%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6418.2%
Simplified18.2%
Taylor expanded in x around 0
Simplified9.7%
Taylor expanded in x around inf
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f649.7%
Simplified9.7%
Final simplification9.7%
(FPCore (x) :precision binary64 1.0)
double code(double x) {
return 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double x) {
return 1.0;
}
def code(x): return 1.0
function code(x) return 1.0 end
function tmp = code(x) tmp = 1.0; end
code[x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 94.3%
Taylor expanded in x around 0
Simplified1.5%
herbie shell --seed 2024161
(FPCore (x)
:name "ENA, Section 1.4, Exercise 1"
:precision binary64
:pre (and (<= 1.99 x) (<= x 2.01))
(* (cos x) (exp (* 10.0 (* x x)))))