
(FPCore (x) :precision binary64 (/ (- (exp x) (exp (- x))) 2.0))
double code(double x) {
return (exp(x) - exp(-x)) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - exp(-x)) / 2.0d0
end function
public static double code(double x) {
return (Math.exp(x) - Math.exp(-x)) / 2.0;
}
def code(x): return (math.exp(x) - math.exp(-x)) / 2.0
function code(x) return Float64(Float64(exp(x) - exp(Float64(-x))) / 2.0) end
function tmp = code(x) tmp = (exp(x) - exp(-x)) / 2.0; end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x} - e^{-x}}{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (- (exp x) (exp (- x))) 2.0))
double code(double x) {
return (exp(x) - exp(-x)) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (exp(x) - exp(-x)) / 2.0d0
end function
public static double code(double x) {
return (Math.exp(x) - Math.exp(-x)) / 2.0;
}
def code(x): return (math.exp(x) - math.exp(-x)) / 2.0
function code(x) return Float64(Float64(exp(x) - exp(Float64(-x))) / 2.0) end
function tmp = code(x) tmp = (exp(x) - exp(-x)) / 2.0; end
code[x_] := N[(N[(N[Exp[x], $MachinePrecision] - N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x} - e^{-x}}{2}
\end{array}
(FPCore (x)
:precision binary64
(let* ((t_0 (- (exp x) (exp (- x)))))
(if (or (<= t_0 -5.0) (not (<= t_0 0.0005)))
(/ t_0 2.0)
(/ (+ (* (* x 0.3333333333333333) (* x x)) (* x 2.0)) 2.0))))
double code(double x) {
double t_0 = exp(x) - exp(-x);
double tmp;
if ((t_0 <= -5.0) || !(t_0 <= 0.0005)) {
tmp = t_0 / 2.0;
} else {
tmp = (((x * 0.3333333333333333) * (x * x)) + (x * 2.0)) / 2.0;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = exp(x) - exp(-x)
if ((t_0 <= (-5.0d0)) .or. (.not. (t_0 <= 0.0005d0))) then
tmp = t_0 / 2.0d0
else
tmp = (((x * 0.3333333333333333d0) * (x * x)) + (x * 2.0d0)) / 2.0d0
end if
code = tmp
end function
public static double code(double x) {
double t_0 = Math.exp(x) - Math.exp(-x);
double tmp;
if ((t_0 <= -5.0) || !(t_0 <= 0.0005)) {
tmp = t_0 / 2.0;
} else {
tmp = (((x * 0.3333333333333333) * (x * x)) + (x * 2.0)) / 2.0;
}
return tmp;
}
def code(x): t_0 = math.exp(x) - math.exp(-x) tmp = 0 if (t_0 <= -5.0) or not (t_0 <= 0.0005): tmp = t_0 / 2.0 else: tmp = (((x * 0.3333333333333333) * (x * x)) + (x * 2.0)) / 2.0 return tmp
function code(x) t_0 = Float64(exp(x) - exp(Float64(-x))) tmp = 0.0 if ((t_0 <= -5.0) || !(t_0 <= 0.0005)) tmp = Float64(t_0 / 2.0); else tmp = Float64(Float64(Float64(Float64(x * 0.3333333333333333) * Float64(x * x)) + Float64(x * 2.0)) / 2.0); end return tmp end
function tmp_2 = code(x) t_0 = exp(x) - exp(-x); tmp = 0.0; if ((t_0 <= -5.0) || ~((t_0 <= 0.0005))) tmp = t_0 / 2.0; else tmp = (((x * 0.3333333333333333) * (x * x)) + (x * 2.0)) / 2.0; end tmp_2 = tmp; end
code[x_] := Block[{t$95$0 = N[(N[Exp[x], $MachinePrecision] - N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]}, If[Or[LessEqual[t$95$0, -5.0], N[Not[LessEqual[t$95$0, 0.0005]], $MachinePrecision]], N[(t$95$0 / 2.0), $MachinePrecision], N[(N[(N[(N[(x * 0.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] + N[(x * 2.0), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := e^{x} - e^{-x}\\
\mathbf{if}\;t_0 \leq -5 \lor \neg \left(t_0 \leq 0.0005\right):\\
\;\;\;\;\frac{t_0}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\left(x \cdot 0.3333333333333333\right) \cdot \left(x \cdot x\right) + x \cdot 2}{2}\\
\end{array}
\end{array}
if (-.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) < -5 or 5.0000000000000001e-4 < (-.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) Initial program 100.0%
if -5 < (-.f64 (exp.f64 x) (exp.f64 (neg.f64 x))) < 5.0000000000000001e-4Initial program 7.1%
Taylor expanded in x around 0 100.0%
unpow3100.0%
associate-*r*100.0%
distribute-rgt-out100.0%
*-commutative100.0%
+-commutative100.0%
associate-*l*100.0%
fma-def100.0%
Simplified100.0%
fma-udef100.0%
*-commutative100.0%
Applied egg-rr100.0%
distribute-rgt-in100.0%
*-commutative100.0%
associate-*l*100.0%
*-commutative100.0%
Applied egg-rr100.0%
Final simplification100.0%
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* x x) (/ x 6.0))) (t_1 (* (* x 0.3333333333333333) (* x x))))
(if (<= x -5e+156)
t_0
(if (<= x 2e-88)
(/
(*
x
(/
(- 4.0 (* (pow x 4.0) 0.1111111111111111))
(+ 2.0 (* (* x x) -0.3333333333333333))))
2.0)
(if (<= x 8.2e+102)
(/ (/ (- (* t_1 t_1) (* (* x 2.0) (* x 2.0))) (- t_1 (* x 2.0))) 2.0)
t_0)))))
double code(double x) {
double t_0 = (x * x) * (x / 6.0);
double t_1 = (x * 0.3333333333333333) * (x * x);
double tmp;
if (x <= -5e+156) {
tmp = t_0;
} else if (x <= 2e-88) {
tmp = (x * ((4.0 - (pow(x, 4.0) * 0.1111111111111111)) / (2.0 + ((x * x) * -0.3333333333333333)))) / 2.0;
} else if (x <= 8.2e+102) {
tmp = (((t_1 * t_1) - ((x * 2.0) * (x * 2.0))) / (t_1 - (x * 2.0))) / 2.0;
} else {
tmp = t_0;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = (x * x) * (x / 6.0d0)
t_1 = (x * 0.3333333333333333d0) * (x * x)
if (x <= (-5d+156)) then
tmp = t_0
else if (x <= 2d-88) then
tmp = (x * ((4.0d0 - ((x ** 4.0d0) * 0.1111111111111111d0)) / (2.0d0 + ((x * x) * (-0.3333333333333333d0))))) / 2.0d0
else if (x <= 8.2d+102) then
tmp = (((t_1 * t_1) - ((x * 2.0d0) * (x * 2.0d0))) / (t_1 - (x * 2.0d0))) / 2.0d0
else
tmp = t_0
end if
code = tmp
end function
public static double code(double x) {
double t_0 = (x * x) * (x / 6.0);
double t_1 = (x * 0.3333333333333333) * (x * x);
double tmp;
if (x <= -5e+156) {
tmp = t_0;
} else if (x <= 2e-88) {
tmp = (x * ((4.0 - (Math.pow(x, 4.0) * 0.1111111111111111)) / (2.0 + ((x * x) * -0.3333333333333333)))) / 2.0;
} else if (x <= 8.2e+102) {
tmp = (((t_1 * t_1) - ((x * 2.0) * (x * 2.0))) / (t_1 - (x * 2.0))) / 2.0;
} else {
tmp = t_0;
}
return tmp;
}
def code(x): t_0 = (x * x) * (x / 6.0) t_1 = (x * 0.3333333333333333) * (x * x) tmp = 0 if x <= -5e+156: tmp = t_0 elif x <= 2e-88: tmp = (x * ((4.0 - (math.pow(x, 4.0) * 0.1111111111111111)) / (2.0 + ((x * x) * -0.3333333333333333)))) / 2.0 elif x <= 8.2e+102: tmp = (((t_1 * t_1) - ((x * 2.0) * (x * 2.0))) / (t_1 - (x * 2.0))) / 2.0 else: tmp = t_0 return tmp
function code(x) t_0 = Float64(Float64(x * x) * Float64(x / 6.0)) t_1 = Float64(Float64(x * 0.3333333333333333) * Float64(x * x)) tmp = 0.0 if (x <= -5e+156) tmp = t_0; elseif (x <= 2e-88) tmp = Float64(Float64(x * Float64(Float64(4.0 - Float64((x ^ 4.0) * 0.1111111111111111)) / Float64(2.0 + Float64(Float64(x * x) * -0.3333333333333333)))) / 2.0); elseif (x <= 8.2e+102) tmp = Float64(Float64(Float64(Float64(t_1 * t_1) - Float64(Float64(x * 2.0) * Float64(x * 2.0))) / Float64(t_1 - Float64(x * 2.0))) / 2.0); else tmp = t_0; end return tmp end
function tmp_2 = code(x) t_0 = (x * x) * (x / 6.0); t_1 = (x * 0.3333333333333333) * (x * x); tmp = 0.0; if (x <= -5e+156) tmp = t_0; elseif (x <= 2e-88) tmp = (x * ((4.0 - ((x ^ 4.0) * 0.1111111111111111)) / (2.0 + ((x * x) * -0.3333333333333333)))) / 2.0; elseif (x <= 8.2e+102) tmp = (((t_1 * t_1) - ((x * 2.0) * (x * 2.0))) / (t_1 - (x * 2.0))) / 2.0; else tmp = t_0; end tmp_2 = tmp; end
code[x_] := Block[{t$95$0 = N[(N[(x * x), $MachinePrecision] * N[(x / 6.0), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(x * 0.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, -5e+156], t$95$0, If[LessEqual[x, 2e-88], N[(N[(x * N[(N[(4.0 - N[(N[Power[x, 4.0], $MachinePrecision] * 0.1111111111111111), $MachinePrecision]), $MachinePrecision] / N[(2.0 + N[(N[(x * x), $MachinePrecision] * -0.3333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 8.2e+102], N[(N[(N[(N[(t$95$1 * t$95$1), $MachinePrecision] - N[(N[(x * 2.0), $MachinePrecision] * N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(t$95$1 - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], t$95$0]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(x \cdot x\right) \cdot \frac{x}{6}\\
t_1 := \left(x \cdot 0.3333333333333333\right) \cdot \left(x \cdot x\right)\\
\mathbf{if}\;x \leq -5 \cdot 10^{+156}:\\
\;\;\;\;t_0\\
\mathbf{elif}\;x \leq 2 \cdot 10^{-88}:\\
\;\;\;\;\frac{x \cdot \frac{4 - {x}^{4} \cdot 0.1111111111111111}{2 + \left(x \cdot x\right) \cdot -0.3333333333333333}}{2}\\
\mathbf{elif}\;x \leq 8.2 \cdot 10^{+102}:\\
\;\;\;\;\frac{\frac{t_1 \cdot t_1 - \left(x \cdot 2\right) \cdot \left(x \cdot 2\right)}{t_1 - x \cdot 2}}{2}\\
\mathbf{else}:\\
\;\;\;\;t_0\\
\end{array}
\end{array}
if x < -4.99999999999999992e156 or 8.1999999999999999e102 < x Initial program 100.0%
Taylor expanded in x around 0 100.0%
Taylor expanded in x around inf 100.0%
Taylor expanded in x around 0 100.0%
Simplified100.0%
metadata-eval100.0%
div-inv100.0%
cube-mult100.0%
associate-/l*100.0%
Applied egg-rr100.0%
associate-/r/100.0%
Simplified100.0%
if -4.99999999999999992e156 < x < 1.99999999999999987e-88Initial program 26.9%
Taylor expanded in x around 0 87.3%
+-commutative87.3%
unpow387.3%
associate-*r*87.3%
fma-def87.3%
add-log-exp26.6%
*-commutative26.6%
exp-lft-sqr26.6%
log-prod26.6%
add-log-exp36.5%
add-log-exp87.3%
Applied egg-rr87.3%
fma-udef87.3%
*-commutative87.3%
associate-*r*87.3%
*-commutative87.3%
associate-*l*87.3%
count-287.3%
*-commutative87.3%
distribute-lft-in87.3%
fma-udef87.3%
*-commutative87.3%
*-un-lft-identity87.3%
*-un-lft-identity87.3%
fma-udef87.3%
*-commutative87.3%
*-commutative87.3%
associate-*r*87.3%
fma-def87.3%
Applied egg-rr87.3%
fma-udef87.3%
associate-*r*87.3%
*-commutative87.3%
flip-+91.0%
frac-2neg91.0%
sub-neg91.0%
pow291.0%
*-commutative91.0%
associate-*r*91.0%
metadata-eval91.0%
metadata-eval91.0%
fma-neg91.0%
metadata-eval91.0%
Applied egg-rr91.0%
+-commutative91.0%
distribute-neg-in91.0%
metadata-eval91.0%
sub-neg91.0%
fma-udef91.0%
distribute-neg-in91.0%
*-commutative91.0%
associate-*r*91.0%
metadata-eval91.0%
+-commutative91.0%
sub-neg91.0%
Simplified91.0%
if 1.99999999999999987e-88 < x < 8.1999999999999999e102Initial program 70.6%
Taylor expanded in x around 0 39.3%
unpow339.3%
associate-*r*39.3%
distribute-rgt-out39.2%
*-commutative39.2%
+-commutative39.2%
associate-*l*39.2%
fma-def39.2%
Simplified39.2%
fma-udef39.2%
*-commutative39.2%
Applied egg-rr39.2%
distribute-lft-in39.3%
flip-+70.7%
*-commutative70.7%
*-commutative70.7%
*-commutative70.7%
associate-*l*70.7%
*-commutative70.7%
associate-*l*70.7%
*-commutative70.7%
*-commutative70.7%
associate-*l*70.7%
Applied egg-rr70.7%
Final simplification91.1%
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* x 0.3333333333333333) (* x x)))
(t_1 (* (* x x) (/ x 6.0)))
(t_2
(/
(/ (- (* t_0 t_0) (* (* x 2.0) (* x 2.0))) (- t_0 (* x 2.0)))
2.0)))
(if (<= x -8.2e+102)
t_1
(if (<= x -2.8e+32)
t_2
(if (<= x 1e+27)
(/ (+ t_0 (* x 2.0)) 2.0)
(if (<= x 8.2e+102) t_2 t_1))))))
double code(double x) {
double t_0 = (x * 0.3333333333333333) * (x * x);
double t_1 = (x * x) * (x / 6.0);
double t_2 = (((t_0 * t_0) - ((x * 2.0) * (x * 2.0))) / (t_0 - (x * 2.0))) / 2.0;
double tmp;
if (x <= -8.2e+102) {
tmp = t_1;
} else if (x <= -2.8e+32) {
tmp = t_2;
} else if (x <= 1e+27) {
tmp = (t_0 + (x * 2.0)) / 2.0;
} else if (x <= 8.2e+102) {
tmp = t_2;
} else {
tmp = t_1;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
real(8) :: t_2
real(8) :: tmp
t_0 = (x * 0.3333333333333333d0) * (x * x)
t_1 = (x * x) * (x / 6.0d0)
t_2 = (((t_0 * t_0) - ((x * 2.0d0) * (x * 2.0d0))) / (t_0 - (x * 2.0d0))) / 2.0d0
if (x <= (-8.2d+102)) then
tmp = t_1
else if (x <= (-2.8d+32)) then
tmp = t_2
else if (x <= 1d+27) then
tmp = (t_0 + (x * 2.0d0)) / 2.0d0
else if (x <= 8.2d+102) then
tmp = t_2
else
tmp = t_1
end if
code = tmp
end function
public static double code(double x) {
double t_0 = (x * 0.3333333333333333) * (x * x);
double t_1 = (x * x) * (x / 6.0);
double t_2 = (((t_0 * t_0) - ((x * 2.0) * (x * 2.0))) / (t_0 - (x * 2.0))) / 2.0;
double tmp;
if (x <= -8.2e+102) {
tmp = t_1;
} else if (x <= -2.8e+32) {
tmp = t_2;
} else if (x <= 1e+27) {
tmp = (t_0 + (x * 2.0)) / 2.0;
} else if (x <= 8.2e+102) {
tmp = t_2;
} else {
tmp = t_1;
}
return tmp;
}
def code(x): t_0 = (x * 0.3333333333333333) * (x * x) t_1 = (x * x) * (x / 6.0) t_2 = (((t_0 * t_0) - ((x * 2.0) * (x * 2.0))) / (t_0 - (x * 2.0))) / 2.0 tmp = 0 if x <= -8.2e+102: tmp = t_1 elif x <= -2.8e+32: tmp = t_2 elif x <= 1e+27: tmp = (t_0 + (x * 2.0)) / 2.0 elif x <= 8.2e+102: tmp = t_2 else: tmp = t_1 return tmp
function code(x) t_0 = Float64(Float64(x * 0.3333333333333333) * Float64(x * x)) t_1 = Float64(Float64(x * x) * Float64(x / 6.0)) t_2 = Float64(Float64(Float64(Float64(t_0 * t_0) - Float64(Float64(x * 2.0) * Float64(x * 2.0))) / Float64(t_0 - Float64(x * 2.0))) / 2.0) tmp = 0.0 if (x <= -8.2e+102) tmp = t_1; elseif (x <= -2.8e+32) tmp = t_2; elseif (x <= 1e+27) tmp = Float64(Float64(t_0 + Float64(x * 2.0)) / 2.0); elseif (x <= 8.2e+102) tmp = t_2; else tmp = t_1; end return tmp end
function tmp_2 = code(x) t_0 = (x * 0.3333333333333333) * (x * x); t_1 = (x * x) * (x / 6.0); t_2 = (((t_0 * t_0) - ((x * 2.0) * (x * 2.0))) / (t_0 - (x * 2.0))) / 2.0; tmp = 0.0; if (x <= -8.2e+102) tmp = t_1; elseif (x <= -2.8e+32) tmp = t_2; elseif (x <= 1e+27) tmp = (t_0 + (x * 2.0)) / 2.0; elseif (x <= 8.2e+102) tmp = t_2; else tmp = t_1; end tmp_2 = tmp; end
code[x_] := Block[{t$95$0 = N[(N[(x * 0.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(x * x), $MachinePrecision] * N[(x / 6.0), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(N[(N[(N[(t$95$0 * t$95$0), $MachinePrecision] - N[(N[(x * 2.0), $MachinePrecision] * N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(t$95$0 - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]}, If[LessEqual[x, -8.2e+102], t$95$1, If[LessEqual[x, -2.8e+32], t$95$2, If[LessEqual[x, 1e+27], N[(N[(t$95$0 + N[(x * 2.0), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 8.2e+102], t$95$2, t$95$1]]]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(x \cdot 0.3333333333333333\right) \cdot \left(x \cdot x\right)\\
t_1 := \left(x \cdot x\right) \cdot \frac{x}{6}\\
t_2 := \frac{\frac{t_0 \cdot t_0 - \left(x \cdot 2\right) \cdot \left(x \cdot 2\right)}{t_0 - x \cdot 2}}{2}\\
\mathbf{if}\;x \leq -8.2 \cdot 10^{+102}:\\
\;\;\;\;t_1\\
\mathbf{elif}\;x \leq -2.8 \cdot 10^{+32}:\\
\;\;\;\;t_2\\
\mathbf{elif}\;x \leq 10^{+27}:\\
\;\;\;\;\frac{t_0 + x \cdot 2}{2}\\
\mathbf{elif}\;x \leq 8.2 \cdot 10^{+102}:\\
\;\;\;\;t_2\\
\mathbf{else}:\\
\;\;\;\;t_1\\
\end{array}
\end{array}
if x < -8.1999999999999999e102 or 8.1999999999999999e102 < x Initial program 100.0%
Taylor expanded in x around 0 100.0%
Taylor expanded in x around inf 100.0%
Taylor expanded in x around 0 100.0%
Simplified100.0%
metadata-eval100.0%
div-inv100.0%
cube-mult100.0%
associate-/l*100.0%
Applied egg-rr100.0%
associate-/r/100.0%
Simplified100.0%
if -8.1999999999999999e102 < x < -2.8e32 or 1e27 < x < 8.1999999999999999e102Initial program 100.0%
Taylor expanded in x around 0 6.1%
unpow36.1%
associate-*r*6.1%
distribute-rgt-out6.1%
*-commutative6.1%
+-commutative6.1%
associate-*l*6.1%
fma-def6.1%
Simplified6.1%
fma-udef6.1%
*-commutative6.1%
Applied egg-rr6.1%
distribute-lft-in6.1%
flip-+80.1%
*-commutative80.1%
*-commutative80.1%
*-commutative80.1%
associate-*l*80.1%
*-commutative80.1%
associate-*l*80.1%
*-commutative80.1%
*-commutative80.1%
associate-*l*80.1%
Applied egg-rr80.1%
if -2.8e32 < x < 1e27Initial program 18.1%
Taylor expanded in x around 0 88.7%
unpow388.7%
associate-*r*88.7%
distribute-rgt-out88.7%
*-commutative88.7%
+-commutative88.7%
associate-*l*88.7%
fma-def88.7%
Simplified88.7%
fma-udef88.7%
*-commutative88.7%
Applied egg-rr88.7%
distribute-rgt-in88.7%
*-commutative88.7%
associate-*l*88.7%
*-commutative88.7%
Applied egg-rr88.7%
Final simplification91.8%
(FPCore (x) :precision binary64 (/ (+ (* (* x 0.3333333333333333) (* x x)) (* x 2.0)) 2.0))
double code(double x) {
return (((x * 0.3333333333333333) * (x * x)) + (x * 2.0)) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (((x * 0.3333333333333333d0) * (x * x)) + (x * 2.0d0)) / 2.0d0
end function
public static double code(double x) {
return (((x * 0.3333333333333333) * (x * x)) + (x * 2.0)) / 2.0;
}
def code(x): return (((x * 0.3333333333333333) * (x * x)) + (x * 2.0)) / 2.0
function code(x) return Float64(Float64(Float64(Float64(x * 0.3333333333333333) * Float64(x * x)) + Float64(x * 2.0)) / 2.0) end
function tmp = code(x) tmp = (((x * 0.3333333333333333) * (x * x)) + (x * 2.0)) / 2.0; end
code[x_] := N[(N[(N[(N[(x * 0.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] + N[(x * 2.0), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(x \cdot 0.3333333333333333\right) \cdot \left(x \cdot x\right) + x \cdot 2}{2}
\end{array}
Initial program 54.3%
Taylor expanded in x around 0 84.9%
unpow384.9%
associate-*r*84.9%
distribute-rgt-out84.9%
*-commutative84.9%
+-commutative84.9%
associate-*l*84.9%
fma-def84.9%
Simplified84.9%
fma-udef84.9%
*-commutative84.9%
Applied egg-rr84.9%
distribute-rgt-in84.9%
*-commutative84.9%
associate-*l*84.9%
*-commutative84.9%
Applied egg-rr84.9%
Final simplification84.9%
(FPCore (x) :precision binary64 (if (or (<= x -2.5) (not (<= x 2.45))) (* (* x x) (/ x 6.0)) (/ (* x 2.0) 2.0)))
double code(double x) {
double tmp;
if ((x <= -2.5) || !(x <= 2.45)) {
tmp = (x * x) * (x / 6.0);
} else {
tmp = (x * 2.0) / 2.0;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if ((x <= (-2.5d0)) .or. (.not. (x <= 2.45d0))) then
tmp = (x * x) * (x / 6.0d0)
else
tmp = (x * 2.0d0) / 2.0d0
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if ((x <= -2.5) || !(x <= 2.45)) {
tmp = (x * x) * (x / 6.0);
} else {
tmp = (x * 2.0) / 2.0;
}
return tmp;
}
def code(x): tmp = 0 if (x <= -2.5) or not (x <= 2.45): tmp = (x * x) * (x / 6.0) else: tmp = (x * 2.0) / 2.0 return tmp
function code(x) tmp = 0.0 if ((x <= -2.5) || !(x <= 2.45)) tmp = Float64(Float64(x * x) * Float64(x / 6.0)); else tmp = Float64(Float64(x * 2.0) / 2.0); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if ((x <= -2.5) || ~((x <= 2.45))) tmp = (x * x) * (x / 6.0); else tmp = (x * 2.0) / 2.0; end tmp_2 = tmp; end
code[x_] := If[Or[LessEqual[x, -2.5], N[Not[LessEqual[x, 2.45]], $MachinePrecision]], N[(N[(x * x), $MachinePrecision] * N[(x / 6.0), $MachinePrecision]), $MachinePrecision], N[(N[(x * 2.0), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -2.5 \lor \neg \left(x \leq 2.45\right):\\
\;\;\;\;\left(x \cdot x\right) \cdot \frac{x}{6}\\
\mathbf{else}:\\
\;\;\;\;\frac{x \cdot 2}{2}\\
\end{array}
\end{array}
if x < -2.5 or 2.4500000000000002 < x Initial program 100.0%
Taylor expanded in x around 0 70.6%
Taylor expanded in x around inf 70.6%
Taylor expanded in x around 0 70.6%
Simplified70.6%
metadata-eval70.6%
div-inv70.6%
cube-mult70.6%
associate-/l*70.6%
Applied egg-rr70.6%
associate-/r/70.6%
Simplified70.6%
if -2.5 < x < 2.4500000000000002Initial program 7.8%
Taylor expanded in x around 0 99.0%
Final simplification84.7%
(FPCore (x) :precision binary64 (/ (* x (+ 2.0 (* x (* x 0.3333333333333333)))) 2.0))
double code(double x) {
return (x * (2.0 + (x * (x * 0.3333333333333333)))) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * (2.0d0 + (x * (x * 0.3333333333333333d0)))) / 2.0d0
end function
public static double code(double x) {
return (x * (2.0 + (x * (x * 0.3333333333333333)))) / 2.0;
}
def code(x): return (x * (2.0 + (x * (x * 0.3333333333333333)))) / 2.0
function code(x) return Float64(Float64(x * Float64(2.0 + Float64(x * Float64(x * 0.3333333333333333)))) / 2.0) end
function tmp = code(x) tmp = (x * (2.0 + (x * (x * 0.3333333333333333)))) / 2.0; end
code[x_] := N[(N[(x * N[(2.0 + N[(x * N[(x * 0.3333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot \left(2 + x \cdot \left(x \cdot 0.3333333333333333\right)\right)}{2}
\end{array}
Initial program 54.3%
Taylor expanded in x around 0 84.9%
unpow384.9%
associate-*r*84.9%
distribute-rgt-out84.9%
*-commutative84.9%
+-commutative84.9%
associate-*l*84.9%
fma-def84.9%
Simplified84.9%
fma-udef84.9%
*-commutative84.9%
Applied egg-rr84.9%
Final simplification84.9%
(FPCore (x) :precision binary64 (/ (* x 2.0) 2.0))
double code(double x) {
return (x * 2.0) / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * 2.0d0) / 2.0d0
end function
public static double code(double x) {
return (x * 2.0) / 2.0;
}
def code(x): return (x * 2.0) / 2.0
function code(x) return Float64(Float64(x * 2.0) / 2.0) end
function tmp = code(x) tmp = (x * 2.0) / 2.0; end
code[x_] := N[(N[(x * 2.0), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot 2}{2}
\end{array}
Initial program 54.3%
Taylor expanded in x around 0 51.8%
Final simplification51.8%
(FPCore (x) :precision binary64 -1.0)
double code(double x) {
return -1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = -1.0d0
end function
public static double code(double x) {
return -1.0;
}
def code(x): return -1.0
function code(x) return -1.0 end
function tmp = code(x) tmp = -1.0; end
code[x_] := -1.0
\begin{array}{l}
\\
-1
\end{array}
Initial program 54.3%
Applied egg-rr2.7%
Final simplification2.7%
(FPCore (x) :precision binary64 0.0)
double code(double x) {
return 0.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.0d0
end function
public static double code(double x) {
return 0.0;
}
def code(x): return 0.0
function code(x) return 0.0 end
function tmp = code(x) tmp = 0.0; end
code[x_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 54.3%
Applied egg-rr3.5%
Final simplification3.5%
herbie shell --seed 2023207
(FPCore (x)
:name "Hyperbolic sine"
:precision binary64
(/ (- (exp x) (exp (- x))) 2.0))