Math FPCore C Fortran Java Python Julia MATLAB Wolfram TeX \[x \cdot 0.5 + y \cdot \left(\left(1 - z\right) + \log z\right)
\]
↓
\[y \cdot \left(1 + \log z\right) + \left(0.5 \cdot x + z \cdot \left(-y\right)\right)
\]
(FPCore (x y z) :precision binary64 (+ (* x 0.5) (* y (+ (- 1.0 z) (log z))))) ↓
(FPCore (x y z)
:precision binary64
(+ (* y (+ 1.0 (log z))) (+ (* 0.5 x) (* z (- y))))) double code(double x, double y, double z) {
return (x * 0.5) + (y * ((1.0 - z) + log(z)));
}
↓
double code(double x, double y, double z) {
return (y * (1.0 + log(z))) + ((0.5 * x) + (z * -y));
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = (x * 0.5d0) + (y * ((1.0d0 - z) + log(z)))
end function
↓
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = (y * (1.0d0 + log(z))) + ((0.5d0 * x) + (z * -y))
end function
public static double code(double x, double y, double z) {
return (x * 0.5) + (y * ((1.0 - z) + Math.log(z)));
}
↓
public static double code(double x, double y, double z) {
return (y * (1.0 + Math.log(z))) + ((0.5 * x) + (z * -y));
}
def code(x, y, z):
return (x * 0.5) + (y * ((1.0 - z) + math.log(z)))
↓
def code(x, y, z):
return (y * (1.0 + math.log(z))) + ((0.5 * x) + (z * -y))
function code(x, y, z)
return Float64(Float64(x * 0.5) + Float64(y * Float64(Float64(1.0 - z) + log(z))))
end
↓
function code(x, y, z)
return Float64(Float64(y * Float64(1.0 + log(z))) + Float64(Float64(0.5 * x) + Float64(z * Float64(-y))))
end
function tmp = code(x, y, z)
tmp = (x * 0.5) + (y * ((1.0 - z) + log(z)));
end
↓
function tmp = code(x, y, z)
tmp = (y * (1.0 + log(z))) + ((0.5 * x) + (z * -y));
end
code[x_, y_, z_] := N[(N[(x * 0.5), $MachinePrecision] + N[(y * N[(N[(1.0 - z), $MachinePrecision] + N[Log[z], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
↓
code[x_, y_, z_] := N[(N[(y * N[(1.0 + N[Log[z], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(0.5 * x), $MachinePrecision] + N[(z * (-y)), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
x \cdot 0.5 + y \cdot \left(\left(1 - z\right) + \log z\right)
↓
y \cdot \left(1 + \log z\right) + \left(0.5 \cdot x + z \cdot \left(-y\right)\right)
Alternatives Alternative 1 Error 18.8 Cost 7248
\[\begin{array}{l}
t_0 := \left(1 + \log z\right) \cdot y\\
t_1 := x \cdot 0.5 + z \cdot \left(-y\right)\\
t_2 := x \cdot 0.5 + y \cdot \left(-z\right)\\
\mathbf{if}\;z \leq 5.4 \cdot 10^{-238}:\\
\;\;\;\;t_1 \cdot \left(t_1 \cdot \frac{2}{x}\right)\\
\mathbf{elif}\;z \leq 1.6 \cdot 10^{-176}:\\
\;\;\;\;t_0\\
\mathbf{elif}\;z \leq 2.2 \cdot 10^{-155}:\\
\;\;\;\;t_2\\
\mathbf{elif}\;z \leq 5 \cdot 10^{-113}:\\
\;\;\;\;t_0\\
\mathbf{else}:\\
\;\;\;\;t_2\\
\end{array}
\]
Alternative 2 Error 10.5 Cost 7112
\[\begin{array}{l}
t_0 := y \cdot \left(\left(\log z - z\right) + 1\right)\\
\mathbf{if}\;y \leq -21000000000000:\\
\;\;\;\;t_0\\
\mathbf{elif}\;y \leq 1.8 \cdot 10^{+55}:\\
\;\;\;\;x \cdot 0.5 + y \cdot \left(-z\right)\\
\mathbf{else}:\\
\;\;\;\;t_0\\
\end{array}
\]
Alternative 3 Error 0.9 Cost 7108
\[\begin{array}{l}
\mathbf{if}\;z \leq 0.28:\\
\;\;\;\;y \cdot \left(1 + \log z\right) + 0.5 \cdot x\\
\mathbf{else}:\\
\;\;\;\;x \cdot 0.5 + y \cdot \left(-z\right)\\
\end{array}
\]
Alternative 4 Error 0.1 Cost 7104
\[x \cdot 0.5 + y \cdot \left(\left(1 - z\right) + \log z\right)
\]
Alternative 5 Error 29.3 Cost 784
\[\begin{array}{l}
t_0 := z \cdot \left(-y\right)\\
\mathbf{if}\;x \leq -180000000000:\\
\;\;\;\;0.5 \cdot x\\
\mathbf{elif}\;x \leq 6.2 \cdot 10^{-149}:\\
\;\;\;\;t_0\\
\mathbf{elif}\;x \leq 5.5 \cdot 10^{-126}:\\
\;\;\;\;0.5 \cdot x\\
\mathbf{elif}\;x \leq 0.41:\\
\;\;\;\;t_0\\
\mathbf{else}:\\
\;\;\;\;0.5 \cdot x\\
\end{array}
\]
Alternative 6 Error 18.8 Cost 512
\[x \cdot 0.5 + y \cdot \left(-z\right)
\]
Alternative 7 Error 35.2 Cost 192
\[0.5 \cdot x
\]