
(FPCore (x y z) :precision binary64 (- (- (* x (log y)) z) y))
double code(double x, double y, double z) {
return ((x * log(y)) - z) - y;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = ((x * log(y)) - z) - y
end function
public static double code(double x, double y, double z) {
return ((x * Math.log(y)) - z) - y;
}
def code(x, y, z): return ((x * math.log(y)) - z) - y
function code(x, y, z) return Float64(Float64(Float64(x * log(y)) - z) - y) end
function tmp = code(x, y, z) tmp = ((x * log(y)) - z) - y; end
code[x_, y_, z_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision] - y), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y - z\right) - y
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z) :precision binary64 (- (- (* x (log y)) z) y))
double code(double x, double y, double z) {
return ((x * log(y)) - z) - y;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = ((x * log(y)) - z) - y
end function
public static double code(double x, double y, double z) {
return ((x * Math.log(y)) - z) - y;
}
def code(x, y, z): return ((x * math.log(y)) - z) - y
function code(x, y, z) return Float64(Float64(Float64(x * log(y)) - z) - y) end
function tmp = code(x, y, z) tmp = ((x * log(y)) - z) - y; end
code[x_, y_, z_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision] - y), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y - z\right) - y
\end{array}
(FPCore (x y z) :precision binary64 (fma x (log y) (- (- z) y)))
double code(double x, double y, double z) {
return fma(x, log(y), (-z - y));
}
function code(x, y, z) return fma(x, log(y), Float64(Float64(-z) - y)) end
code[x_, y_, z_] := N[(x * N[Log[y], $MachinePrecision] + N[((-z) - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, \log y, \left(-z\right) - y\right)
\end{array}
Initial program 99.8%
associate--l-99.8%
fma-neg99.8%
+-commutative99.8%
Simplified99.8%
Final simplification99.8%
(FPCore (x y z) :precision binary64 (if (or (<= x -1.2e+32) (not (<= x 2.75e+38))) (- (* x (log y)) y) (- (- z) y)))
double code(double x, double y, double z) {
double tmp;
if ((x <= -1.2e+32) || !(x <= 2.75e+38)) {
tmp = (x * log(y)) - y;
} else {
tmp = -z - y;
}
return tmp;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8) :: tmp
if ((x <= (-1.2d+32)) .or. (.not. (x <= 2.75d+38))) then
tmp = (x * log(y)) - y
else
tmp = -z - y
end if
code = tmp
end function
public static double code(double x, double y, double z) {
double tmp;
if ((x <= -1.2e+32) || !(x <= 2.75e+38)) {
tmp = (x * Math.log(y)) - y;
} else {
tmp = -z - y;
}
return tmp;
}
def code(x, y, z): tmp = 0 if (x <= -1.2e+32) or not (x <= 2.75e+38): tmp = (x * math.log(y)) - y else: tmp = -z - y return tmp
function code(x, y, z) tmp = 0.0 if ((x <= -1.2e+32) || !(x <= 2.75e+38)) tmp = Float64(Float64(x * log(y)) - y); else tmp = Float64(Float64(-z) - y); end return tmp end
function tmp_2 = code(x, y, z) tmp = 0.0; if ((x <= -1.2e+32) || ~((x <= 2.75e+38))) tmp = (x * log(y)) - y; else tmp = -z - y; end tmp_2 = tmp; end
code[x_, y_, z_] := If[Or[LessEqual[x, -1.2e+32], N[Not[LessEqual[x, 2.75e+38]], $MachinePrecision]], N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - y), $MachinePrecision], N[((-z) - y), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.2 \cdot 10^{+32} \lor \neg \left(x \leq 2.75 \cdot 10^{+38}\right):\\
\;\;\;\;x \cdot \log y - y\\
\mathbf{else}:\\
\;\;\;\;\left(-z\right) - y\\
\end{array}
\end{array}
if x < -1.19999999999999996e32 or 2.7500000000000002e38 < x Initial program 99.6%
Taylor expanded in z around 0 84.3%
if -1.19999999999999996e32 < x < 2.7500000000000002e38Initial program 100.0%
Taylor expanded in x around 0 90.6%
neg-mul-190.6%
Simplified90.6%
Final simplification87.8%
(FPCore (x y z) :precision binary64 (- (- (* x (log y)) z) y))
double code(double x, double y, double z) {
return ((x * log(y)) - z) - y;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = ((x * log(y)) - z) - y
end function
public static double code(double x, double y, double z) {
return ((x * Math.log(y)) - z) - y;
}
def code(x, y, z): return ((x * math.log(y)) - z) - y
function code(x, y, z) return Float64(Float64(Float64(x * log(y)) - z) - y) end
function tmp = code(x, y, z) tmp = ((x * log(y)) - z) - y; end
code[x_, y_, z_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision] - y), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y - z\right) - y
\end{array}
Initial program 99.8%
Final simplification99.8%
(FPCore (x y z) :precision binary64 (- (- z) y))
double code(double x, double y, double z) {
return -z - y;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = -z - y
end function
public static double code(double x, double y, double z) {
return -z - y;
}
def code(x, y, z): return -z - y
function code(x, y, z) return Float64(Float64(-z) - y) end
function tmp = code(x, y, z) tmp = -z - y; end
code[x_, y_, z_] := N[((-z) - y), $MachinePrecision]
\begin{array}{l}
\\
\left(-z\right) - y
\end{array}
Initial program 99.8%
Taylor expanded in x around 0 63.1%
neg-mul-163.1%
Simplified63.1%
Final simplification63.1%
(FPCore (x y z) :precision binary64 (- y))
double code(double x, double y, double z) {
return -y;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = -y
end function
public static double code(double x, double y, double z) {
return -y;
}
def code(x, y, z): return -y
function code(x, y, z) return Float64(-y) end
function tmp = code(x, y, z) tmp = -y; end
code[x_, y_, z_] := (-y)
\begin{array}{l}
\\
-y
\end{array}
Initial program 99.8%
Taylor expanded in y around inf 29.6%
neg-mul-129.6%
Simplified29.6%
Final simplification29.6%
(FPCore (x y z) :precision binary64 y)
double code(double x, double y, double z) {
return y;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = y
end function
public static double code(double x, double y, double z) {
return y;
}
def code(x, y, z): return y
function code(x, y, z) return y end
function tmp = code(x, y, z) tmp = y; end
code[x_, y_, z_] := y
\begin{array}{l}
\\
y
\end{array}
Initial program 99.8%
associate--l-99.8%
+-commutative99.8%
sub-neg99.8%
add-cube-cbrt99.2%
associate-*l*99.2%
fma-def99.2%
pow299.2%
add-sqr-sqrt18.9%
sqrt-unprod38.2%
sqr-neg38.2%
sqrt-unprod30.2%
add-sqr-sqrt36.4%
Applied egg-rr36.4%
Taylor expanded in y around inf 2.1%
Final simplification2.1%
herbie shell --seed 2023279
(FPCore (x y z)
:name "Statistics.Distribution.Poisson:$clogProbability from math-functions-0.1.5.2"
:precision binary64
(- (- (* x (log y)) z) y))