Math FPCore C Fortran Java Python Julia MATLAB Wolfram TeX \[\left(e^{x} - 2\right) + e^{-x}
\]
↓
\[\begin{array}{l}
t_0 := \left(e^{x} - 2\right) + e^{-x}\\
\mathbf{if}\;t_0 \leq 0:\\
\;\;\;\;x \cdot x\\
\mathbf{else}:\\
\;\;\;\;t_0\\
\end{array}
\]
(FPCore (x) :precision binary64 (+ (- (exp x) 2.0) (exp (- x)))) ↓
(FPCore (x)
:precision binary64
(let* ((t_0 (+ (- (exp x) 2.0) (exp (- x))))) (if (<= t_0 0.0) (* x x) t_0))) double code(double x) {
return (exp(x) - 2.0) + exp(-x);
}
↓
double code(double x) {
double t_0 = (exp(x) - 2.0) + exp(-x);
double tmp;
if (t_0 <= 0.0) {
tmp = x * x;
} else {
tmp = t_0;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - 2.0d0) + exp(-x)
end function
↓
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = (exp(x) - 2.0d0) + exp(-x)
if (t_0 <= 0.0d0) then
tmp = x * x
else
tmp = t_0
end if
code = tmp
end function
public static double code(double x) {
return (Math.exp(x) - 2.0) + Math.exp(-x);
}
↓
public static double code(double x) {
double t_0 = (Math.exp(x) - 2.0) + Math.exp(-x);
double tmp;
if (t_0 <= 0.0) {
tmp = x * x;
} else {
tmp = t_0;
}
return tmp;
}
def code(x):
return (math.exp(x) - 2.0) + math.exp(-x)
↓
def code(x):
t_0 = (math.exp(x) - 2.0) + math.exp(-x)
tmp = 0
if t_0 <= 0.0:
tmp = x * x
else:
tmp = t_0
return tmp
function code(x)
return Float64(Float64(exp(x) - 2.0) + exp(Float64(-x)))
end
↓
function code(x)
t_0 = Float64(Float64(exp(x) - 2.0) + exp(Float64(-x)))
tmp = 0.0
if (t_0 <= 0.0)
tmp = Float64(x * x);
else
tmp = t_0;
end
return tmp
end
function tmp = code(x)
tmp = (exp(x) - 2.0) + exp(-x);
end
↓
function tmp_2 = code(x)
t_0 = (exp(x) - 2.0) + exp(-x);
tmp = 0.0;
if (t_0 <= 0.0)
tmp = x * x;
else
tmp = t_0;
end
tmp_2 = tmp;
end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
↓
code[x_] := Block[{t$95$0 = N[(N[(N[Exp[x], $MachinePrecision] - 2.0), $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, 0.0], N[(x * x), $MachinePrecision], t$95$0]]
\left(e^{x} - 2\right) + e^{-x}
↓
\begin{array}{l}
t_0 := \left(e^{x} - 2\right) + e^{-x}\\
\mathbf{if}\;t_0 \leq 0:\\
\;\;\;\;x \cdot x\\
\mathbf{else}:\\
\;\;\;\;t_0\\
\end{array}
Alternatives Alternative 1 Accuracy 99.6% Cost 13448
\[\begin{array}{l}
\mathbf{if}\;x \leq -1.65:\\
\;\;\;\;\mathsf{expm1}\left(-x\right)\\
\mathbf{elif}\;x \leq 0.00018:\\
\;\;\;\;x \cdot x\\
\mathbf{else}:\\
\;\;\;\;e^{x} + \left(e^{-x} + -2\right)\\
\end{array}
\]
Alternative 2 Accuracy 99.5% Cost 7432
\[\begin{array}{l}
\mathbf{if}\;x \leq -2.6:\\
\;\;\;\;\mathsf{expm1}\left(-x\right)\\
\mathbf{elif}\;x \leq 2.6:\\
\;\;\;\;x \cdot x + \left(\left(1 + 0.08333333333333333 \cdot {x}^{4}\right) + -1\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{expm1}\left(x\right)\\
\end{array}
\]
Alternative 3 Accuracy 99.6% Cost 6728
\[\begin{array}{l}
\mathbf{if}\;x \leq -2.6:\\
\;\;\;\;\mathsf{expm1}\left(-x\right)\\
\mathbf{elif}\;x \leq 2.6:\\
\;\;\;\;x \cdot x + \left(x \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot 0.08333333333333333\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{expm1}\left(x\right)\\
\end{array}
\]
Alternative 4 Accuracy 94.3% Cost 6596
\[\begin{array}{l}
\mathbf{if}\;x \leq 2.6:\\
\;\;\;\;x \cdot x + \left(x \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot 0.08333333333333333\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{expm1}\left(x\right)\\
\end{array}
\]
Alternative 5 Accuracy 80.5% Cost 836
\[\begin{array}{l}
\mathbf{if}\;x \leq -1.5:\\
\;\;\;\;\left(x \cdot x\right) \cdot \left(0.5 + x \cdot -0.16666666666666666\right) - x\\
\mathbf{else}:\\
\;\;\;\;x \cdot x\\
\end{array}
\]
Alternative 6 Accuracy 88.8% Cost 832
\[x \cdot x + \left(x \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot 0.08333333333333333\right)
\]
Alternative 7 Accuracy 76.8% Cost 192
\[x \cdot x
\]
Alternative 8 Accuracy 27.2% Cost 64
\[0
\]