
(FPCore (x) :precision binary64 (+ (- (exp x) 2.0) (exp (- x))))
double code(double x) {
return (exp(x) - 2.0) + exp(-x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - 2.0d0) + exp(-x)
end function
public static double code(double x) {
return (Math.exp(x) - 2.0) + Math.exp(-x);
}
def code(x): return (math.exp(x) - 2.0) + math.exp(-x)
function code(x) return Float64(Float64(exp(x) - 2.0) + exp(Float64(-x))) end
function tmp = code(x) tmp = (exp(x) - 2.0) + exp(-x); end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(e^{x} - 2\right) + e^{-x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (+ (- (exp x) 2.0) (exp (- x))))
double code(double x) {
return (exp(x) - 2.0) + exp(-x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - 2.0d0) + exp(-x)
end function
public static double code(double x) {
return (Math.exp(x) - 2.0) + Math.exp(-x);
}
def code(x): return (math.exp(x) - 2.0) + math.exp(-x)
function code(x) return Float64(Float64(exp(x) - 2.0) + exp(Float64(-x))) end
function tmp = code(x) tmp = (exp(x) - 2.0) + exp(-x); end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(e^{x} - 2\right) + e^{-x}
\end{array}
(FPCore (x)
:precision binary64
(let* ((t_0
(+
0.08333333333333333
(*
(* x x)
(+ 0.002777777777777778 (* x (* x 4.96031746031746e-5))))))
(t_1 (* x (* x t_0))))
(/
(* (* x x) (+ 1.0 (* (* (* x x) (* (* x x) (* x x))) (* t_0 (* t_0 t_0)))))
(+ 1.0 (* t_1 (+ t_1 -1.0))))))
double code(double x) {
double t_0 = 0.08333333333333333 + ((x * x) * (0.002777777777777778 + (x * (x * 4.96031746031746e-5))));
double t_1 = x * (x * t_0);
return ((x * x) * (1.0 + (((x * x) * ((x * x) * (x * x))) * (t_0 * (t_0 * t_0))))) / (1.0 + (t_1 * (t_1 + -1.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
t_0 = 0.08333333333333333d0 + ((x * x) * (0.002777777777777778d0 + (x * (x * 4.96031746031746d-5))))
t_1 = x * (x * t_0)
code = ((x * x) * (1.0d0 + (((x * x) * ((x * x) * (x * x))) * (t_0 * (t_0 * t_0))))) / (1.0d0 + (t_1 * (t_1 + (-1.0d0))))
end function
public static double code(double x) {
double t_0 = 0.08333333333333333 + ((x * x) * (0.002777777777777778 + (x * (x * 4.96031746031746e-5))));
double t_1 = x * (x * t_0);
return ((x * x) * (1.0 + (((x * x) * ((x * x) * (x * x))) * (t_0 * (t_0 * t_0))))) / (1.0 + (t_1 * (t_1 + -1.0)));
}
def code(x): t_0 = 0.08333333333333333 + ((x * x) * (0.002777777777777778 + (x * (x * 4.96031746031746e-5)))) t_1 = x * (x * t_0) return ((x * x) * (1.0 + (((x * x) * ((x * x) * (x * x))) * (t_0 * (t_0 * t_0))))) / (1.0 + (t_1 * (t_1 + -1.0)))
function code(x) t_0 = Float64(0.08333333333333333 + Float64(Float64(x * x) * Float64(0.002777777777777778 + Float64(x * Float64(x * 4.96031746031746e-5))))) t_1 = Float64(x * Float64(x * t_0)) return Float64(Float64(Float64(x * x) * Float64(1.0 + Float64(Float64(Float64(x * x) * Float64(Float64(x * x) * Float64(x * x))) * Float64(t_0 * Float64(t_0 * t_0))))) / Float64(1.0 + Float64(t_1 * Float64(t_1 + -1.0)))) end
function tmp = code(x) t_0 = 0.08333333333333333 + ((x * x) * (0.002777777777777778 + (x * (x * 4.96031746031746e-5)))); t_1 = x * (x * t_0); tmp = ((x * x) * (1.0 + (((x * x) * ((x * x) * (x * x))) * (t_0 * (t_0 * t_0))))) / (1.0 + (t_1 * (t_1 + -1.0))); end
code[x_] := Block[{t$95$0 = N[(0.08333333333333333 + N[(N[(x * x), $MachinePrecision] * N[(0.002777777777777778 + N[(x * N[(x * 4.96031746031746e-5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(x * N[(x * t$95$0), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(x * x), $MachinePrecision] * N[(1.0 + N[(N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(t$95$0 * N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(t$95$1 * N[(t$95$1 + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 0.08333333333333333 + \left(x \cdot x\right) \cdot \left(0.002777777777777778 + x \cdot \left(x \cdot 4.96031746031746 \cdot 10^{-5}\right)\right)\\
t_1 := x \cdot \left(x \cdot t\_0\right)\\
\frac{\left(x \cdot x\right) \cdot \left(1 + \left(\left(x \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) \cdot \left(t\_0 \cdot \left(t\_0 \cdot t\_0\right)\right)\right)}{1 + t\_1 \cdot \left(t\_1 + -1\right)}
\end{array}
\end{array}
Initial program 48.9%
associate-+l-N/A
sub-negN/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
remove-double-negN/A
+-lowering-+.f64N/A
exp-negN/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
metadata-eval49.0%
Simplified49.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6498.6%
Simplified98.6%
*-commutativeN/A
flip3-+N/A
associate-*l/N/A
/-lowering-/.f64N/A
Applied egg-rr98.6%
Final simplification98.6%
(FPCore (x)
:precision binary64
(let* ((t_0
(+
0.08333333333333333
(*
(* x x)
(+ 0.002777777777777778 (* x (* x 4.96031746031746e-5)))))))
(*
(* x x)
(/
(+ (* t_0 (* (* (* x x) (* x x)) t_0)) -1.0)
(+ (* x (* x t_0)) -1.0)))))
double code(double x) {
double t_0 = 0.08333333333333333 + ((x * x) * (0.002777777777777778 + (x * (x * 4.96031746031746e-5))));
return (x * x) * (((t_0 * (((x * x) * (x * x)) * t_0)) + -1.0) / ((x * (x * t_0)) + -1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = 0.08333333333333333d0 + ((x * x) * (0.002777777777777778d0 + (x * (x * 4.96031746031746d-5))))
code = (x * x) * (((t_0 * (((x * x) * (x * x)) * t_0)) + (-1.0d0)) / ((x * (x * t_0)) + (-1.0d0)))
end function
public static double code(double x) {
double t_0 = 0.08333333333333333 + ((x * x) * (0.002777777777777778 + (x * (x * 4.96031746031746e-5))));
return (x * x) * (((t_0 * (((x * x) * (x * x)) * t_0)) + -1.0) / ((x * (x * t_0)) + -1.0));
}
def code(x): t_0 = 0.08333333333333333 + ((x * x) * (0.002777777777777778 + (x * (x * 4.96031746031746e-5)))) return (x * x) * (((t_0 * (((x * x) * (x * x)) * t_0)) + -1.0) / ((x * (x * t_0)) + -1.0))
function code(x) t_0 = Float64(0.08333333333333333 + Float64(Float64(x * x) * Float64(0.002777777777777778 + Float64(x * Float64(x * 4.96031746031746e-5))))) return Float64(Float64(x * x) * Float64(Float64(Float64(t_0 * Float64(Float64(Float64(x * x) * Float64(x * x)) * t_0)) + -1.0) / Float64(Float64(x * Float64(x * t_0)) + -1.0))) end
function tmp = code(x) t_0 = 0.08333333333333333 + ((x * x) * (0.002777777777777778 + (x * (x * 4.96031746031746e-5)))); tmp = (x * x) * (((t_0 * (((x * x) * (x * x)) * t_0)) + -1.0) / ((x * (x * t_0)) + -1.0)); end
code[x_] := Block[{t$95$0 = N[(0.08333333333333333 + N[(N[(x * x), $MachinePrecision] * N[(0.002777777777777778 + N[(x * N[(x * 4.96031746031746e-5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(x * x), $MachinePrecision] * N[(N[(N[(t$95$0 * N[(N[(N[(x * x), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision] / N[(N[(x * N[(x * t$95$0), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 0.08333333333333333 + \left(x \cdot x\right) \cdot \left(0.002777777777777778 + x \cdot \left(x \cdot 4.96031746031746 \cdot 10^{-5}\right)\right)\\
\left(x \cdot x\right) \cdot \frac{t\_0 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot t\_0\right) + -1}{x \cdot \left(x \cdot t\_0\right) + -1}
\end{array}
\end{array}
Initial program 48.9%
associate-+l-N/A
sub-negN/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
remove-double-negN/A
+-lowering-+.f64N/A
exp-negN/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
metadata-eval49.0%
Simplified49.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6498.6%
Simplified98.6%
+-commutativeN/A
flip-+N/A
/-lowering-/.f64N/A
Applied egg-rr98.6%
Final simplification98.6%
(FPCore (x)
:precision binary64
(+
(* x x)
(*
(* (* x x) (* x x))
(+
0.08333333333333333
(* (* x x) (+ 0.002777777777777778 (* x (* x 4.96031746031746e-5))))))))
double code(double x) {
return (x * x) + (((x * x) * (x * x)) * (0.08333333333333333 + ((x * x) * (0.002777777777777778 + (x * (x * 4.96031746031746e-5))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * x) + (((x * x) * (x * x)) * (0.08333333333333333d0 + ((x * x) * (0.002777777777777778d0 + (x * (x * 4.96031746031746d-5))))))
end function
public static double code(double x) {
return (x * x) + (((x * x) * (x * x)) * (0.08333333333333333 + ((x * x) * (0.002777777777777778 + (x * (x * 4.96031746031746e-5))))));
}
def code(x): return (x * x) + (((x * x) * (x * x)) * (0.08333333333333333 + ((x * x) * (0.002777777777777778 + (x * (x * 4.96031746031746e-5))))))
function code(x) return Float64(Float64(x * x) + Float64(Float64(Float64(x * x) * Float64(x * x)) * Float64(0.08333333333333333 + Float64(Float64(x * x) * Float64(0.002777777777777778 + Float64(x * Float64(x * 4.96031746031746e-5))))))) end
function tmp = code(x) tmp = (x * x) + (((x * x) * (x * x)) * (0.08333333333333333 + ((x * x) * (0.002777777777777778 + (x * (x * 4.96031746031746e-5)))))); end
code[x_] := N[(N[(x * x), $MachinePrecision] + N[(N[(N[(x * x), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(0.08333333333333333 + N[(N[(x * x), $MachinePrecision] * N[(0.002777777777777778 + N[(x * N[(x * 4.96031746031746e-5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot x + \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(0.08333333333333333 + \left(x \cdot x\right) \cdot \left(0.002777777777777778 + x \cdot \left(x \cdot 4.96031746031746 \cdot 10^{-5}\right)\right)\right)
\end{array}
Initial program 48.9%
associate-+l-N/A
sub-negN/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
remove-double-negN/A
+-lowering-+.f64N/A
exp-negN/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
metadata-eval49.0%
Simplified49.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6498.6%
Simplified98.6%
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
+-lowering-+.f64N/A
Applied egg-rr98.6%
Final simplification98.6%
(FPCore (x)
:precision binary64
(*
x
(+
x
(*
x
(*
(* x x)
(+
0.08333333333333333
(*
x
(* x (+ 0.002777777777777778 (* x (* x 4.96031746031746e-5)))))))))))
double code(double x) {
return x * (x + (x * ((x * x) * (0.08333333333333333 + (x * (x * (0.002777777777777778 + (x * (x * 4.96031746031746e-5)))))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (x + (x * ((x * x) * (0.08333333333333333d0 + (x * (x * (0.002777777777777778d0 + (x * (x * 4.96031746031746d-5)))))))))
end function
public static double code(double x) {
return x * (x + (x * ((x * x) * (0.08333333333333333 + (x * (x * (0.002777777777777778 + (x * (x * 4.96031746031746e-5)))))))));
}
def code(x): return x * (x + (x * ((x * x) * (0.08333333333333333 + (x * (x * (0.002777777777777778 + (x * (x * 4.96031746031746e-5)))))))))
function code(x) return Float64(x * Float64(x + Float64(x * Float64(Float64(x * x) * Float64(0.08333333333333333 + Float64(x * Float64(x * Float64(0.002777777777777778 + Float64(x * Float64(x * 4.96031746031746e-5)))))))))) end
function tmp = code(x) tmp = x * (x + (x * ((x * x) * (0.08333333333333333 + (x * (x * (0.002777777777777778 + (x * (x * 4.96031746031746e-5))))))))); end
code[x_] := N[(x * N[(x + N[(x * N[(N[(x * x), $MachinePrecision] * N[(0.08333333333333333 + N[(x * N[(x * N[(0.002777777777777778 + N[(x * N[(x * 4.96031746031746e-5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x + x \cdot \left(\left(x \cdot x\right) \cdot \left(0.08333333333333333 + x \cdot \left(x \cdot \left(0.002777777777777778 + x \cdot \left(x \cdot 4.96031746031746 \cdot 10^{-5}\right)\right)\right)\right)\right)\right)
\end{array}
Initial program 48.9%
associate-+l-N/A
sub-negN/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
remove-double-negN/A
+-lowering-+.f64N/A
exp-negN/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
metadata-eval49.0%
Simplified49.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6498.6%
Simplified98.6%
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
+-lowering-+.f64N/A
Applied egg-rr98.6%
*-commutativeN/A
associate-*r*N/A
distribute-lft-inN/A
associate-*r*N/A
distribute-lft-inN/A
associate-*l*N/A
distribute-lft-outN/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
Applied egg-rr98.6%
Final simplification98.6%
(FPCore (x) :precision binary64 (* (* x x) (+ 1.0 (* (* x x) (+ 0.08333333333333333 (* (* x x) 0.002777777777777778))))))
double code(double x) {
return (x * x) * (1.0 + ((x * x) * (0.08333333333333333 + ((x * x) * 0.002777777777777778))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * x) * (1.0d0 + ((x * x) * (0.08333333333333333d0 + ((x * x) * 0.002777777777777778d0))))
end function
public static double code(double x) {
return (x * x) * (1.0 + ((x * x) * (0.08333333333333333 + ((x * x) * 0.002777777777777778))));
}
def code(x): return (x * x) * (1.0 + ((x * x) * (0.08333333333333333 + ((x * x) * 0.002777777777777778))))
function code(x) return Float64(Float64(x * x) * Float64(1.0 + Float64(Float64(x * x) * Float64(0.08333333333333333 + Float64(Float64(x * x) * 0.002777777777777778))))) end
function tmp = code(x) tmp = (x * x) * (1.0 + ((x * x) * (0.08333333333333333 + ((x * x) * 0.002777777777777778)))); end
code[x_] := N[(N[(x * x), $MachinePrecision] * N[(1.0 + N[(N[(x * x), $MachinePrecision] * N[(0.08333333333333333 + N[(N[(x * x), $MachinePrecision] * 0.002777777777777778), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot x\right) \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.08333333333333333 + \left(x \cdot x\right) \cdot 0.002777777777777778\right)\right)
\end{array}
Initial program 48.9%
associate-+l-N/A
sub-negN/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
remove-double-negN/A
+-lowering-+.f64N/A
exp-negN/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
metadata-eval49.0%
Simplified49.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6498.5%
Simplified98.5%
(FPCore (x) :precision binary64 (* x (* x (+ 1.0 (* (* x x) 0.08333333333333333)))))
double code(double x) {
return x * (x * (1.0 + ((x * x) * 0.08333333333333333)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (x * (1.0d0 + ((x * x) * 0.08333333333333333d0)))
end function
public static double code(double x) {
return x * (x * (1.0 + ((x * x) * 0.08333333333333333)));
}
def code(x): return x * (x * (1.0 + ((x * x) * 0.08333333333333333)))
function code(x) return Float64(x * Float64(x * Float64(1.0 + Float64(Float64(x * x) * 0.08333333333333333)))) end
function tmp = code(x) tmp = x * (x * (1.0 + ((x * x) * 0.08333333333333333))); end
code[x_] := N[(x * N[(x * N[(1.0 + N[(N[(x * x), $MachinePrecision] * 0.08333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x \cdot \left(1 + \left(x \cdot x\right) \cdot 0.08333333333333333\right)\right)
\end{array}
Initial program 48.9%
associate-+l-N/A
sub-negN/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
remove-double-negN/A
+-lowering-+.f64N/A
exp-negN/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
metadata-eval49.0%
Simplified49.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6498.4%
Simplified98.4%
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6498.4%
Applied egg-rr98.4%
Final simplification98.4%
(FPCore (x) :precision binary64 (* (* x x) (+ 1.0 (* (* x x) 0.08333333333333333))))
double code(double x) {
return (x * x) * (1.0 + ((x * x) * 0.08333333333333333));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * x) * (1.0d0 + ((x * x) * 0.08333333333333333d0))
end function
public static double code(double x) {
return (x * x) * (1.0 + ((x * x) * 0.08333333333333333));
}
def code(x): return (x * x) * (1.0 + ((x * x) * 0.08333333333333333))
function code(x) return Float64(Float64(x * x) * Float64(1.0 + Float64(Float64(x * x) * 0.08333333333333333))) end
function tmp = code(x) tmp = (x * x) * (1.0 + ((x * x) * 0.08333333333333333)); end
code[x_] := N[(N[(x * x), $MachinePrecision] * N[(1.0 + N[(N[(x * x), $MachinePrecision] * 0.08333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot x\right) \cdot \left(1 + \left(x \cdot x\right) \cdot 0.08333333333333333\right)
\end{array}
Initial program 48.9%
associate-+l-N/A
sub-negN/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
remove-double-negN/A
+-lowering-+.f64N/A
exp-negN/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
metadata-eval49.0%
Simplified49.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6498.4%
Simplified98.4%
(FPCore (x) :precision binary64 (* x x))
double code(double x) {
return x * x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * x
end function
public static double code(double x) {
return x * x;
}
def code(x): return x * x
function code(x) return Float64(x * x) end
function tmp = code(x) tmp = x * x; end
code[x_] := N[(x * x), $MachinePrecision]
\begin{array}{l}
\\
x \cdot x
\end{array}
Initial program 48.9%
associate-+l-N/A
sub-negN/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
remove-double-negN/A
+-lowering-+.f64N/A
exp-negN/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
metadata-eval49.0%
Simplified49.0%
Taylor expanded in x around 0
unpow2N/A
*-lowering-*.f6498.3%
Simplified98.3%
(FPCore (x) :precision binary64 x)
double code(double x) {
return x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x
end function
public static double code(double x) {
return x;
}
def code(x): return x
function code(x) return x end
function tmp = code(x) tmp = x; end
code[x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 48.9%
associate-+l-N/A
sub-negN/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
remove-double-negN/A
+-lowering-+.f64N/A
exp-negN/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
metadata-eval49.0%
Simplified49.0%
Taylor expanded in x around 0
Simplified47.6%
Taylor expanded in x around 0
Simplified5.6%
(FPCore (x) :precision binary64 (let* ((t_0 (sinh (/ x 2.0)))) (* 4.0 (* t_0 t_0))))
double code(double x) {
double t_0 = sinh((x / 2.0));
return 4.0 * (t_0 * t_0);
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = sinh((x / 2.0d0))
code = 4.0d0 * (t_0 * t_0)
end function
public static double code(double x) {
double t_0 = Math.sinh((x / 2.0));
return 4.0 * (t_0 * t_0);
}
def code(x): t_0 = math.sinh((x / 2.0)) return 4.0 * (t_0 * t_0)
function code(x) t_0 = sinh(Float64(x / 2.0)) return Float64(4.0 * Float64(t_0 * t_0)) end
function tmp = code(x) t_0 = sinh((x / 2.0)); tmp = 4.0 * (t_0 * t_0); end
code[x_] := Block[{t$95$0 = N[Sinh[N[(x / 2.0), $MachinePrecision]], $MachinePrecision]}, N[(4.0 * N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \sinh \left(\frac{x}{2}\right)\\
4 \cdot \left(t\_0 \cdot t\_0\right)
\end{array}
\end{array}
herbie shell --seed 2024139
(FPCore (x)
:name "exp2 (problem 3.3.7)"
:precision binary64
:pre (<= (fabs x) 710.0)
:alt
(! :herbie-platform default (* 4 (* (sinh (/ x 2)) (sinh (/ x 2)))))
(+ (- (exp x) 2.0) (exp (- x))))