Math FPCore C Fortran Java Python Julia MATLAB Wolfram TeX \[x \cdot 0.5 + y \cdot \left(\left(1 - z\right) + \log z\right)
\]
↓
\[x \cdot 0.5 + y \cdot \left(\left(1 - z\right) + \log z\right)
\]
(FPCore (x y z) :precision binary64 (+ (* x 0.5) (* y (+ (- 1.0 z) (log z))))) ↓
(FPCore (x y z) :precision binary64 (+ (* x 0.5) (* y (+ (- 1.0 z) (log z))))) double code(double x, double y, double z) {
return (x * 0.5) + (y * ((1.0 - z) + log(z)));
}
↓
double code(double x, double y, double z) {
return (x * 0.5) + (y * ((1.0 - z) + log(z)));
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = (x * 0.5d0) + (y * ((1.0d0 - z) + log(z)))
end function
↓
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = (x * 0.5d0) + (y * ((1.0d0 - z) + log(z)))
end function
public static double code(double x, double y, double z) {
return (x * 0.5) + (y * ((1.0 - z) + Math.log(z)));
}
↓
public static double code(double x, double y, double z) {
return (x * 0.5) + (y * ((1.0 - z) + Math.log(z)));
}
def code(x, y, z):
return (x * 0.5) + (y * ((1.0 - z) + math.log(z)))
↓
def code(x, y, z):
return (x * 0.5) + (y * ((1.0 - z) + math.log(z)))
function code(x, y, z)
return Float64(Float64(x * 0.5) + Float64(y * Float64(Float64(1.0 - z) + log(z))))
end
↓
function code(x, y, z)
return Float64(Float64(x * 0.5) + Float64(y * Float64(Float64(1.0 - z) + log(z))))
end
function tmp = code(x, y, z)
tmp = (x * 0.5) + (y * ((1.0 - z) + log(z)));
end
↓
function tmp = code(x, y, z)
tmp = (x * 0.5) + (y * ((1.0 - z) + log(z)));
end
code[x_, y_, z_] := N[(N[(x * 0.5), $MachinePrecision] + N[(y * N[(N[(1.0 - z), $MachinePrecision] + N[Log[z], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
↓
code[x_, y_, z_] := N[(N[(x * 0.5), $MachinePrecision] + N[(y * N[(N[(1.0 - z), $MachinePrecision] + N[Log[z], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
x \cdot 0.5 + y \cdot \left(\left(1 - z\right) + \log z\right)
↓
x \cdot 0.5 + y \cdot \left(\left(1 - z\right) + \log z\right)
Alternatives Alternative 1 Accuracy 81.7% Cost 7377
\[\begin{array}{l}
t_0 := y \cdot \left(\left(1 - z\right) + \log z\right)\\
\mathbf{if}\;y \leq -7.6 \cdot 10^{-39}:\\
\;\;\;\;t_0\\
\mathbf{elif}\;y \leq 1.25 \cdot 10^{+32}:\\
\;\;\;\;x \cdot 0.5 - y \cdot z\\
\mathbf{elif}\;y \leq 1.65 \cdot 10^{+82} \lor \neg \left(y \leq 1.45 \cdot 10^{+116}\right):\\
\;\;\;\;t_0\\
\mathbf{else}:\\
\;\;\;\;x \cdot 0.5\\
\end{array}
\]
Alternative 2 Accuracy 81.7% Cost 7376
\[\begin{array}{l}
t_0 := y \cdot \left(\left(1 - z\right) + \log z\right)\\
\mathbf{if}\;y \leq -8.2 \cdot 10^{-39}:\\
\;\;\;\;t_0\\
\mathbf{elif}\;y \leq 5 \cdot 10^{+31}:\\
\;\;\;\;x \cdot 0.5 - y \cdot z\\
\mathbf{elif}\;y \leq 1.65 \cdot 10^{+82}:\\
\;\;\;\;y + y \cdot \left(\log z - z\right)\\
\mathbf{elif}\;y \leq 1.45 \cdot 10^{+116}:\\
\;\;\;\;x \cdot 0.5\\
\mathbf{else}:\\
\;\;\;\;t_0\\
\end{array}
\]
Alternative 3 Accuracy 98.5% Cost 7108
\[\begin{array}{l}
\mathbf{if}\;z \leq 0.035:\\
\;\;\;\;y \cdot \log z + \left(y + x \cdot 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;x \cdot 0.5 - y \cdot z\\
\end{array}
\]
Alternative 4 Accuracy 77.9% Cost 6985
\[\begin{array}{l}
\mathbf{if}\;y \leq -6.4 \cdot 10^{+142} \lor \neg \left(y \leq 3.8 \cdot 10^{+136}\right):\\
\;\;\;\;y \cdot \left(1 + \log z\right)\\
\mathbf{else}:\\
\;\;\;\;x \cdot 0.5 - y \cdot z\\
\end{array}
\]
Alternative 5 Accuracy 55.4% Cost 520
\[\begin{array}{l}
\mathbf{if}\;x \leq -1.8 \cdot 10^{-123}:\\
\;\;\;\;x \cdot 0.5\\
\mathbf{elif}\;x \leq 90000000:\\
\;\;\;\;y \cdot \left(-z\right)\\
\mathbf{else}:\\
\;\;\;\;x \cdot 0.5\\
\end{array}
\]
Alternative 6 Accuracy 71.8% Cost 448
\[x \cdot 0.5 - y \cdot z
\]
Alternative 7 Accuracy 46.1% Cost 192
\[x \cdot 0.5
\]
Alternative 8 Accuracy 2.1% Cost 64
\[y
\]