
(FPCore (x y z) :precision binary64 (- (+ (- x (* (+ y 0.5) (log y))) y) z))
double code(double x, double y, double z) {
return ((x - ((y + 0.5) * log(y))) + y) - z;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = ((x - ((y + 0.5d0) * log(y))) + y) - z
end function
public static double code(double x, double y, double z) {
return ((x - ((y + 0.5) * Math.log(y))) + y) - z;
}
def code(x, y, z): return ((x - ((y + 0.5) * math.log(y))) + y) - z
function code(x, y, z) return Float64(Float64(Float64(x - Float64(Float64(y + 0.5) * log(y))) + y) - z) end
function tmp = code(x, y, z) tmp = ((x - ((y + 0.5) * log(y))) + y) - z; end
code[x_, y_, z_] := N[(N[(N[(x - N[(N[(y + 0.5), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + y), $MachinePrecision] - z), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x - \left(y + 0.5\right) \cdot \log y\right) + y\right) - z
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z) :precision binary64 (- (+ (- x (* (+ y 0.5) (log y))) y) z))
double code(double x, double y, double z) {
return ((x - ((y + 0.5) * log(y))) + y) - z;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = ((x - ((y + 0.5d0) * log(y))) + y) - z
end function
public static double code(double x, double y, double z) {
return ((x - ((y + 0.5) * Math.log(y))) + y) - z;
}
def code(x, y, z): return ((x - ((y + 0.5) * math.log(y))) + y) - z
function code(x, y, z) return Float64(Float64(Float64(x - Float64(Float64(y + 0.5) * log(y))) + y) - z) end
function tmp = code(x, y, z) tmp = ((x - ((y + 0.5) * log(y))) + y) - z; end
code[x_, y_, z_] := N[(N[(N[(x - N[(N[(y + 0.5), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + y), $MachinePrecision] - z), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x - \left(y + 0.5\right) \cdot \log y\right) + y\right) - z
\end{array}
(FPCore (x y z) :precision binary64 (- (- (+ x (* y (- 1.0 (log y)))) (* (log y) 0.5)) z))
double code(double x, double y, double z) {
return ((x + (y * (1.0 - log(y)))) - (log(y) * 0.5)) - z;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = ((x + (y * (1.0d0 - log(y)))) - (log(y) * 0.5d0)) - z
end function
public static double code(double x, double y, double z) {
return ((x + (y * (1.0 - Math.log(y)))) - (Math.log(y) * 0.5)) - z;
}
def code(x, y, z): return ((x + (y * (1.0 - math.log(y)))) - (math.log(y) * 0.5)) - z
function code(x, y, z) return Float64(Float64(Float64(x + Float64(y * Float64(1.0 - log(y)))) - Float64(log(y) * 0.5)) - z) end
function tmp = code(x, y, z) tmp = ((x + (y * (1.0 - log(y)))) - (log(y) * 0.5)) - z; end
code[x_, y_, z_] := N[(N[(N[(x + N[(y * N[(1.0 - N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[Log[y], $MachinePrecision] * 0.5), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x + y \cdot \left(1 - \log y\right)\right) - \log y \cdot 0.5\right) - z
\end{array}
Initial program 99.8%
Taylor expanded in y around 0 99.9%
Final simplification99.9%
(FPCore (x y z) :precision binary64 (+ x (- (fma (log y) (- -0.5 y) y) z)))
double code(double x, double y, double z) {
return x + (fma(log(y), (-0.5 - y), y) - z);
}
function code(x, y, z) return Float64(x + Float64(fma(log(y), Float64(-0.5 - y), y) - z)) end
code[x_, y_, z_] := N[(x + N[(N[(N[Log[y], $MachinePrecision] * N[(-0.5 - y), $MachinePrecision] + y), $MachinePrecision] - z), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(\mathsf{fma}\left(\log y, -0.5 - y, y\right) - z\right)
\end{array}
Initial program 99.8%
associate--l+99.8%
sub-neg99.8%
associate-+l+99.8%
associate-+r-99.8%
*-commutative99.8%
distribute-rgt-neg-in99.8%
fma-define99.9%
+-commutative99.9%
distribute-neg-in99.9%
unsub-neg99.9%
metadata-eval99.9%
Simplified99.9%
Final simplification99.9%
(FPCore (x y z) :precision binary64 (if (<= y 2.35e+30) (- (- x (* (log y) 0.5)) z) (+ x (* y (+ 1.0 (log (/ 1.0 y)))))))
double code(double x, double y, double z) {
double tmp;
if (y <= 2.35e+30) {
tmp = (x - (log(y) * 0.5)) - z;
} else {
tmp = x + (y * (1.0 + log((1.0 / y))));
}
return tmp;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8) :: tmp
if (y <= 2.35d+30) then
tmp = (x - (log(y) * 0.5d0)) - z
else
tmp = x + (y * (1.0d0 + log((1.0d0 / y))))
end if
code = tmp
end function
public static double code(double x, double y, double z) {
double tmp;
if (y <= 2.35e+30) {
tmp = (x - (Math.log(y) * 0.5)) - z;
} else {
tmp = x + (y * (1.0 + Math.log((1.0 / y))));
}
return tmp;
}
def code(x, y, z): tmp = 0 if y <= 2.35e+30: tmp = (x - (math.log(y) * 0.5)) - z else: tmp = x + (y * (1.0 + math.log((1.0 / y)))) return tmp
function code(x, y, z) tmp = 0.0 if (y <= 2.35e+30) tmp = Float64(Float64(x - Float64(log(y) * 0.5)) - z); else tmp = Float64(x + Float64(y * Float64(1.0 + log(Float64(1.0 / y))))); end return tmp end
function tmp_2 = code(x, y, z) tmp = 0.0; if (y <= 2.35e+30) tmp = (x - (log(y) * 0.5)) - z; else tmp = x + (y * (1.0 + log((1.0 / y)))); end tmp_2 = tmp; end
code[x_, y_, z_] := If[LessEqual[y, 2.35e+30], N[(N[(x - N[(N[Log[y], $MachinePrecision] * 0.5), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision], N[(x + N[(y * N[(1.0 + N[Log[N[(1.0 / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 2.35 \cdot 10^{+30}:\\
\;\;\;\;\left(x - \log y \cdot 0.5\right) - z\\
\mathbf{else}:\\
\;\;\;\;x + y \cdot \left(1 + \log \left(\frac{1}{y}\right)\right)\\
\end{array}
\end{array}
if y < 2.34999999999999995e30Initial program 99.9%
Taylor expanded in y around 0 97.7%
*-commutative97.7%
Simplified97.7%
if 2.34999999999999995e30 < y Initial program 99.6%
associate--l+99.6%
sub-neg99.6%
associate-+l+99.6%
associate-+r-99.6%
*-commutative99.6%
distribute-rgt-neg-in99.6%
fma-define99.7%
+-commutative99.7%
distribute-neg-in99.7%
unsub-neg99.7%
metadata-eval99.7%
Simplified99.7%
Taylor expanded in y around inf 99.7%
Taylor expanded in y around inf 79.4%
Final simplification89.8%
(FPCore (x y z) :precision binary64 (if (<= y 0.000104) (- (- x (* (log y) 0.5)) z) (+ x (- (* y (- 1.0 (log y))) z))))
double code(double x, double y, double z) {
double tmp;
if (y <= 0.000104) {
tmp = (x - (log(y) * 0.5)) - z;
} else {
tmp = x + ((y * (1.0 - log(y))) - z);
}
return tmp;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8) :: tmp
if (y <= 0.000104d0) then
tmp = (x - (log(y) * 0.5d0)) - z
else
tmp = x + ((y * (1.0d0 - log(y))) - z)
end if
code = tmp
end function
public static double code(double x, double y, double z) {
double tmp;
if (y <= 0.000104) {
tmp = (x - (Math.log(y) * 0.5)) - z;
} else {
tmp = x + ((y * (1.0 - Math.log(y))) - z);
}
return tmp;
}
def code(x, y, z): tmp = 0 if y <= 0.000104: tmp = (x - (math.log(y) * 0.5)) - z else: tmp = x + ((y * (1.0 - math.log(y))) - z) return tmp
function code(x, y, z) tmp = 0.0 if (y <= 0.000104) tmp = Float64(Float64(x - Float64(log(y) * 0.5)) - z); else tmp = Float64(x + Float64(Float64(y * Float64(1.0 - log(y))) - z)); end return tmp end
function tmp_2 = code(x, y, z) tmp = 0.0; if (y <= 0.000104) tmp = (x - (log(y) * 0.5)) - z; else tmp = x + ((y * (1.0 - log(y))) - z); end tmp_2 = tmp; end
code[x_, y_, z_] := If[LessEqual[y, 0.000104], N[(N[(x - N[(N[Log[y], $MachinePrecision] * 0.5), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision], N[(x + N[(N[(y * N[(1.0 - N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 0.000104:\\
\;\;\;\;\left(x - \log y \cdot 0.5\right) - z\\
\mathbf{else}:\\
\;\;\;\;x + \left(y \cdot \left(1 - \log y\right) - z\right)\\
\end{array}
\end{array}
if y < 1.03999999999999994e-4Initial program 100.0%
Taylor expanded in y around 0 98.8%
*-commutative98.8%
Simplified98.8%
if 1.03999999999999994e-4 < y Initial program 99.6%
associate--l+99.6%
sub-neg99.6%
associate-+l+99.6%
associate-+r-99.6%
*-commutative99.6%
distribute-rgt-neg-in99.6%
fma-define99.7%
+-commutative99.7%
distribute-neg-in99.7%
unsub-neg99.7%
metadata-eval99.7%
Simplified99.7%
Taylor expanded in y around inf 99.2%
log-rec99.2%
sub-neg99.2%
Simplified99.2%
Final simplification99.0%
(FPCore (x y z) :precision binary64 (if (<= y 5400000.0) (- (- x (* (log y) 0.5)) z) (- (- y (* (log y) (+ y 0.5))) z)))
double code(double x, double y, double z) {
double tmp;
if (y <= 5400000.0) {
tmp = (x - (log(y) * 0.5)) - z;
} else {
tmp = (y - (log(y) * (y + 0.5))) - z;
}
return tmp;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8) :: tmp
if (y <= 5400000.0d0) then
tmp = (x - (log(y) * 0.5d0)) - z
else
tmp = (y - (log(y) * (y + 0.5d0))) - z
end if
code = tmp
end function
public static double code(double x, double y, double z) {
double tmp;
if (y <= 5400000.0) {
tmp = (x - (Math.log(y) * 0.5)) - z;
} else {
tmp = (y - (Math.log(y) * (y + 0.5))) - z;
}
return tmp;
}
def code(x, y, z): tmp = 0 if y <= 5400000.0: tmp = (x - (math.log(y) * 0.5)) - z else: tmp = (y - (math.log(y) * (y + 0.5))) - z return tmp
function code(x, y, z) tmp = 0.0 if (y <= 5400000.0) tmp = Float64(Float64(x - Float64(log(y) * 0.5)) - z); else tmp = Float64(Float64(y - Float64(log(y) * Float64(y + 0.5))) - z); end return tmp end
function tmp_2 = code(x, y, z) tmp = 0.0; if (y <= 5400000.0) tmp = (x - (log(y) * 0.5)) - z; else tmp = (y - (log(y) * (y + 0.5))) - z; end tmp_2 = tmp; end
code[x_, y_, z_] := If[LessEqual[y, 5400000.0], N[(N[(x - N[(N[Log[y], $MachinePrecision] * 0.5), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision], N[(N[(y - N[(N[Log[y], $MachinePrecision] * N[(y + 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 5400000:\\
\;\;\;\;\left(x - \log y \cdot 0.5\right) - z\\
\mathbf{else}:\\
\;\;\;\;\left(y - \log y \cdot \left(y + 0.5\right)\right) - z\\
\end{array}
\end{array}
if y < 5.4e6Initial program 100.0%
Taylor expanded in y around 0 98.9%
*-commutative98.9%
Simplified98.9%
if 5.4e6 < y Initial program 99.6%
Taylor expanded in x around 0 79.8%
Final simplification90.1%
(FPCore (x y z) :precision binary64 (if (<= y 240000000.0) (- x z) (- (* y (- 1.0 (log y))) z)))
double code(double x, double y, double z) {
double tmp;
if (y <= 240000000.0) {
tmp = x - z;
} else {
tmp = (y * (1.0 - log(y))) - z;
}
return tmp;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8) :: tmp
if (y <= 240000000.0d0) then
tmp = x - z
else
tmp = (y * (1.0d0 - log(y))) - z
end if
code = tmp
end function
public static double code(double x, double y, double z) {
double tmp;
if (y <= 240000000.0) {
tmp = x - z;
} else {
tmp = (y * (1.0 - Math.log(y))) - z;
}
return tmp;
}
def code(x, y, z): tmp = 0 if y <= 240000000.0: tmp = x - z else: tmp = (y * (1.0 - math.log(y))) - z return tmp
function code(x, y, z) tmp = 0.0 if (y <= 240000000.0) tmp = Float64(x - z); else tmp = Float64(Float64(y * Float64(1.0 - log(y))) - z); end return tmp end
function tmp_2 = code(x, y, z) tmp = 0.0; if (y <= 240000000.0) tmp = x - z; else tmp = (y * (1.0 - log(y))) - z; end tmp_2 = tmp; end
code[x_, y_, z_] := If[LessEqual[y, 240000000.0], N[(x - z), $MachinePrecision], N[(N[(y * N[(1.0 - N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 240000000:\\
\;\;\;\;x - z\\
\mathbf{else}:\\
\;\;\;\;y \cdot \left(1 - \log y\right) - z\\
\end{array}
\end{array}
if y < 2.4e8Initial program 100.0%
Taylor expanded in x around inf 71.9%
if 2.4e8 < y Initial program 99.6%
Taylor expanded in x around inf 80.0%
Simplified81.4%
Taylor expanded in y around inf 61.2%
associate-*r/61.2%
div-sub61.2%
mul-1-neg61.2%
log-rec61.2%
remove-double-neg61.2%
Simplified61.2%
Taylor expanded in x around 0 79.3%
Final simplification75.3%
(FPCore (x y z) :precision binary64 (if (<= y 4.2e+82) (- (- x (* (log y) 0.5)) z) (- (- y (* y (log y))) z)))
double code(double x, double y, double z) {
double tmp;
if (y <= 4.2e+82) {
tmp = (x - (log(y) * 0.5)) - z;
} else {
tmp = (y - (y * log(y))) - z;
}
return tmp;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8) :: tmp
if (y <= 4.2d+82) then
tmp = (x - (log(y) * 0.5d0)) - z
else
tmp = (y - (y * log(y))) - z
end if
code = tmp
end function
public static double code(double x, double y, double z) {
double tmp;
if (y <= 4.2e+82) {
tmp = (x - (Math.log(y) * 0.5)) - z;
} else {
tmp = (y - (y * Math.log(y))) - z;
}
return tmp;
}
def code(x, y, z): tmp = 0 if y <= 4.2e+82: tmp = (x - (math.log(y) * 0.5)) - z else: tmp = (y - (y * math.log(y))) - z return tmp
function code(x, y, z) tmp = 0.0 if (y <= 4.2e+82) tmp = Float64(Float64(x - Float64(log(y) * 0.5)) - z); else tmp = Float64(Float64(y - Float64(y * log(y))) - z); end return tmp end
function tmp_2 = code(x, y, z) tmp = 0.0; if (y <= 4.2e+82) tmp = (x - (log(y) * 0.5)) - z; else tmp = (y - (y * log(y))) - z; end tmp_2 = tmp; end
code[x_, y_, z_] := If[LessEqual[y, 4.2e+82], N[(N[(x - N[(N[Log[y], $MachinePrecision] * 0.5), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision], N[(N[(y - N[(y * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 4.2 \cdot 10^{+82}:\\
\;\;\;\;\left(x - \log y \cdot 0.5\right) - z\\
\mathbf{else}:\\
\;\;\;\;\left(y - y \cdot \log y\right) - z\\
\end{array}
\end{array}
if y < 4.2e82Initial program 99.9%
Taylor expanded in y around 0 93.0%
*-commutative93.0%
Simplified93.0%
if 4.2e82 < y Initial program 99.6%
add-cube-cbrt98.4%
pow398.4%
sub-neg98.4%
associate-+l+98.4%
*-commutative98.4%
distribute-rgt-neg-in98.4%
+-commutative98.4%
distribute-neg-in98.4%
metadata-eval98.4%
sub-neg98.4%
fma-undefine98.5%
Applied egg-rr98.5%
Taylor expanded in x around inf 76.9%
+-commutative76.9%
mul-1-neg76.9%
unsub-neg76.9%
associate-/l*76.9%
+-commutative76.9%
Simplified76.9%
Taylor expanded in y around inf 76.9%
metadata-eval76.9%
times-frac76.9%
mul-1-neg76.9%
*-commutative76.9%
distribute-lft-neg-in76.9%
log-rec76.9%
remove-double-neg76.9%
times-frac76.9%
/-rgt-identity76.9%
Simplified76.9%
Taylor expanded in x around 0 84.2%
Final simplification90.0%
(FPCore (x y z) :precision binary64 (+ x (- (* y (+ 1.0 (log (/ 1.0 y)))) z)))
double code(double x, double y, double z) {
return x + ((y * (1.0 + log((1.0 / y)))) - z);
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x + ((y * (1.0d0 + log((1.0d0 / y)))) - z)
end function
public static double code(double x, double y, double z) {
return x + ((y * (1.0 + Math.log((1.0 / y)))) - z);
}
def code(x, y, z): return x + ((y * (1.0 + math.log((1.0 / y)))) - z)
function code(x, y, z) return Float64(x + Float64(Float64(y * Float64(1.0 + log(Float64(1.0 / y)))) - z)) end
function tmp = code(x, y, z) tmp = x + ((y * (1.0 + log((1.0 / y)))) - z); end
code[x_, y_, z_] := N[(x + N[(N[(y * N[(1.0 + N[Log[N[(1.0 / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot \left(1 + \log \left(\frac{1}{y}\right)\right) - z\right)
\end{array}
Initial program 99.8%
associate--l+99.8%
sub-neg99.8%
associate-+l+99.8%
associate-+r-99.8%
*-commutative99.8%
distribute-rgt-neg-in99.8%
fma-define99.9%
+-commutative99.9%
distribute-neg-in99.9%
unsub-neg99.9%
metadata-eval99.9%
Simplified99.9%
Taylor expanded in y around inf 84.8%
Final simplification84.8%
(FPCore (x y z) :precision binary64 (- (+ y (- x (* (log y) (+ y 0.5)))) z))
double code(double x, double y, double z) {
return (y + (x - (log(y) * (y + 0.5)))) - z;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = (y + (x - (log(y) * (y + 0.5d0)))) - z
end function
public static double code(double x, double y, double z) {
return (y + (x - (Math.log(y) * (y + 0.5)))) - z;
}
def code(x, y, z): return (y + (x - (math.log(y) * (y + 0.5)))) - z
function code(x, y, z) return Float64(Float64(y + Float64(x - Float64(log(y) * Float64(y + 0.5)))) - z) end
function tmp = code(x, y, z) tmp = (y + (x - (log(y) * (y + 0.5)))) - z; end
code[x_, y_, z_] := N[(N[(y + N[(x - N[(N[Log[y], $MachinePrecision] * N[(y + 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]
\begin{array}{l}
\\
\left(y + \left(x - \log y \cdot \left(y + 0.5\right)\right)\right) - z
\end{array}
Initial program 99.8%
Final simplification99.8%
(FPCore (x y z) :precision binary64 (- (- x (* (log y) 0.5)) z))
double code(double x, double y, double z) {
return (x - (log(y) * 0.5)) - z;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = (x - (log(y) * 0.5d0)) - z
end function
public static double code(double x, double y, double z) {
return (x - (Math.log(y) * 0.5)) - z;
}
def code(x, y, z): return (x - (math.log(y) * 0.5)) - z
function code(x, y, z) return Float64(Float64(x - Float64(log(y) * 0.5)) - z) end
function tmp = code(x, y, z) tmp = (x - (log(y) * 0.5)) - z; end
code[x_, y_, z_] := N[(N[(x - N[(N[Log[y], $MachinePrecision] * 0.5), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]
\begin{array}{l}
\\
\left(x - \log y \cdot 0.5\right) - z
\end{array}
Initial program 99.8%
Taylor expanded in y around 0 73.2%
*-commutative73.2%
Simplified73.2%
Final simplification73.2%
(FPCore (x y z) :precision binary64 (- x z))
double code(double x, double y, double z) {
return x - z;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x - z
end function
public static double code(double x, double y, double z) {
return x - z;
}
def code(x, y, z): return x - z
function code(x, y, z) return Float64(x - z) end
function tmp = code(x, y, z) tmp = x - z; end
code[x_, y_, z_] := N[(x - z), $MachinePrecision]
\begin{array}{l}
\\
x - z
\end{array}
Initial program 99.8%
Taylor expanded in x around inf 58.3%
Final simplification58.3%
(FPCore (x y z) :precision binary64 (- z))
double code(double x, double y, double z) {
return -z;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = -z
end function
public static double code(double x, double y, double z) {
return -z;
}
def code(x, y, z): return -z
function code(x, y, z) return Float64(-z) end
function tmp = code(x, y, z) tmp = -z; end
code[x_, y_, z_] := (-z)
\begin{array}{l}
\\
-z
\end{array}
Initial program 99.8%
Taylor expanded in x around 0 66.7%
Taylor expanded in z around inf 25.5%
neg-mul-125.5%
Simplified25.5%
Final simplification25.5%
(FPCore (x y z) :precision binary64 (- (- (+ y x) z) (* (+ y 0.5) (log y))))
double code(double x, double y, double z) {
return ((y + x) - z) - ((y + 0.5) * log(y));
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = ((y + x) - z) - ((y + 0.5d0) * log(y))
end function
public static double code(double x, double y, double z) {
return ((y + x) - z) - ((y + 0.5) * Math.log(y));
}
def code(x, y, z): return ((y + x) - z) - ((y + 0.5) * math.log(y))
function code(x, y, z) return Float64(Float64(Float64(y + x) - z) - Float64(Float64(y + 0.5) * log(y))) end
function tmp = code(x, y, z) tmp = ((y + x) - z) - ((y + 0.5) * log(y)); end
code[x_, y_, z_] := N[(N[(N[(y + x), $MachinePrecision] - z), $MachinePrecision] - N[(N[(y + 0.5), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(y + x\right) - z\right) - \left(y + 0.5\right) \cdot \log y
\end{array}
herbie shell --seed 2024066
(FPCore (x y z)
:name "Numeric.SpecFunctions:stirlingError from math-functions-0.1.5.2"
:precision binary64
:alt
(- (- (+ y x) z) (* (+ y 0.5) (log y)))
(- (+ (- x (* (+ y 0.5) (log y))) y) z))