
(FPCore (x y z) :precision binary64 (- (+ (- x (* (+ y 0.5) (log y))) y) z))
double code(double x, double y, double z) {
return ((x - ((y + 0.5) * log(y))) + y) - z;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = ((x - ((y + 0.5d0) * log(y))) + y) - z
end function
public static double code(double x, double y, double z) {
return ((x - ((y + 0.5) * Math.log(y))) + y) - z;
}
def code(x, y, z): return ((x - ((y + 0.5) * math.log(y))) + y) - z
function code(x, y, z) return Float64(Float64(Float64(x - Float64(Float64(y + 0.5) * log(y))) + y) - z) end
function tmp = code(x, y, z) tmp = ((x - ((y + 0.5) * log(y))) + y) - z; end
code[x_, y_, z_] := N[(N[(N[(x - N[(N[(y + 0.5), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + y), $MachinePrecision] - z), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x - \left(y + 0.5\right) \cdot \log y\right) + y\right) - z
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z) :precision binary64 (- (+ (- x (* (+ y 0.5) (log y))) y) z))
double code(double x, double y, double z) {
return ((x - ((y + 0.5) * log(y))) + y) - z;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = ((x - ((y + 0.5d0) * log(y))) + y) - z
end function
public static double code(double x, double y, double z) {
return ((x - ((y + 0.5) * Math.log(y))) + y) - z;
}
def code(x, y, z): return ((x - ((y + 0.5) * math.log(y))) + y) - z
function code(x, y, z) return Float64(Float64(Float64(x - Float64(Float64(y + 0.5) * log(y))) + y) - z) end
function tmp = code(x, y, z) tmp = ((x - ((y + 0.5) * log(y))) + y) - z; end
code[x_, y_, z_] := N[(N[(N[(x - N[(N[(y + 0.5), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + y), $MachinePrecision] - z), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x - \left(y + 0.5\right) \cdot \log y\right) + y\right) - z
\end{array}
(FPCore (x y z) :precision binary64 (+ x (- (fma (log y) (- -0.5 y) y) z)))
double code(double x, double y, double z) {
return x + (fma(log(y), (-0.5 - y), y) - z);
}
function code(x, y, z) return Float64(x + Float64(fma(log(y), Float64(-0.5 - y), y) - z)) end
code[x_, y_, z_] := N[(x + N[(N[(N[Log[y], $MachinePrecision] * N[(-0.5 - y), $MachinePrecision] + y), $MachinePrecision] - z), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(\mathsf{fma}\left(\log y, -0.5 - y, y\right) - z\right)
\end{array}
Initial program 99.8%
associate--l+99.8%
sub-neg99.8%
associate-+l+99.8%
associate-+r-99.8%
*-commutative99.8%
distribute-rgt-neg-in99.8%
fma-def99.9%
+-commutative99.9%
distribute-neg-in99.9%
unsub-neg99.9%
metadata-eval99.9%
Simplified99.9%
Final simplification99.9%
(FPCore (x y z) :precision binary64 (- (+ y (- x (* (log y) (+ y 0.5)))) z))
double code(double x, double y, double z) {
return (y + (x - (log(y) * (y + 0.5)))) - z;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = (y + (x - (log(y) * (y + 0.5d0)))) - z
end function
public static double code(double x, double y, double z) {
return (y + (x - (Math.log(y) * (y + 0.5)))) - z;
}
def code(x, y, z): return (y + (x - (math.log(y) * (y + 0.5)))) - z
function code(x, y, z) return Float64(Float64(y + Float64(x - Float64(log(y) * Float64(y + 0.5)))) - z) end
function tmp = code(x, y, z) tmp = (y + (x - (log(y) * (y + 0.5)))) - z; end
code[x_, y_, z_] := N[(N[(y + N[(x - N[(N[Log[y], $MachinePrecision] * N[(y + 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]
\begin{array}{l}
\\
\left(y + \left(x - \log y \cdot \left(y + 0.5\right)\right)\right) - z
\end{array}
Initial program 99.8%
Final simplification99.8%
(FPCore (x y z) :precision binary64 (- (- (+ x y) (* (log y) (+ y 0.5))) z))
double code(double x, double y, double z) {
return ((x + y) - (log(y) * (y + 0.5))) - z;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = ((x + y) - (log(y) * (y + 0.5d0))) - z
end function
public static double code(double x, double y, double z) {
return ((x + y) - (Math.log(y) * (y + 0.5))) - z;
}
def code(x, y, z): return ((x + y) - (math.log(y) * (y + 0.5))) - z
function code(x, y, z) return Float64(Float64(Float64(x + y) - Float64(log(y) * Float64(y + 0.5))) - z) end
function tmp = code(x, y, z) tmp = ((x + y) - (log(y) * (y + 0.5))) - z; end
code[x_, y_, z_] := N[(N[(N[(x + y), $MachinePrecision] - N[(N[Log[y], $MachinePrecision] * N[(y + 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x + y\right) - \log y \cdot \left(y + 0.5\right)\right) - z
\end{array}
Initial program 99.8%
+-commutative99.8%
associate-+r-99.8%
*-commutative99.8%
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (x y z) :precision binary64 (+ x (- (* y (- 1.0 (log y))) z)))
double code(double x, double y, double z) {
return x + ((y * (1.0 - log(y))) - z);
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x + ((y * (1.0d0 - log(y))) - z)
end function
public static double code(double x, double y, double z) {
return x + ((y * (1.0 - Math.log(y))) - z);
}
def code(x, y, z): return x + ((y * (1.0 - math.log(y))) - z)
function code(x, y, z) return Float64(x + Float64(Float64(y * Float64(1.0 - log(y))) - z)) end
function tmp = code(x, y, z) tmp = x + ((y * (1.0 - log(y))) - z); end
code[x_, y_, z_] := N[(x + N[(N[(y * N[(1.0 - N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot \left(1 - \log y\right) - z\right)
\end{array}
Initial program 99.8%
associate--l+99.8%
sub-neg99.8%
associate-+l+99.8%
associate-+r-99.8%
*-commutative99.8%
distribute-rgt-neg-in99.8%
fma-def99.9%
+-commutative99.9%
distribute-neg-in99.9%
unsub-neg99.9%
metadata-eval99.9%
Simplified99.9%
Taylor expanded in y around inf 87.2%
log-rec87.2%
sub-neg87.2%
Simplified87.2%
Final simplification87.2%
(FPCore (x y z) :precision binary64 (- (* y (- 1.0 (log y))) z))
double code(double x, double y, double z) {
return (y * (1.0 - log(y))) - z;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = (y * (1.0d0 - log(y))) - z
end function
public static double code(double x, double y, double z) {
return (y * (1.0 - Math.log(y))) - z;
}
def code(x, y, z): return (y * (1.0 - math.log(y))) - z
function code(x, y, z) return Float64(Float64(y * Float64(1.0 - log(y))) - z) end
function tmp = code(x, y, z) tmp = (y * (1.0 - log(y))) - z; end
code[x_, y_, z_] := N[(N[(y * N[(1.0 - N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]
\begin{array}{l}
\\
y \cdot \left(1 - \log y\right) - z
\end{array}
Initial program 99.8%
+-commutative99.8%
associate-+r-99.8%
*-commutative99.8%
Applied egg-rr99.8%
Taylor expanded in y around inf 60.5%
mul-1-neg60.5%
log-rec60.5%
remove-double-neg60.5%
Simplified60.5%
Final simplification60.5%
(FPCore (x y z) :precision binary64 (- x z))
double code(double x, double y, double z) {
return x - z;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x - z
end function
public static double code(double x, double y, double z) {
return x - z;
}
def code(x, y, z): return x - z
function code(x, y, z) return Float64(x - z) end
function tmp = code(x, y, z) tmp = x - z; end
code[x_, y_, z_] := N[(x - z), $MachinePrecision]
\begin{array}{l}
\\
x - z
\end{array}
Initial program 99.8%
+-commutative99.8%
associate-+r-99.8%
*-commutative99.8%
Applied egg-rr99.8%
Taylor expanded in x around inf 58.7%
Final simplification58.7%
(FPCore (x y z) :precision binary64 (- z))
double code(double x, double y, double z) {
return -z;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = -z
end function
public static double code(double x, double y, double z) {
return -z;
}
def code(x, y, z): return -z
function code(x, y, z) return Float64(-z) end
function tmp = code(x, y, z) tmp = -z; end
code[x_, y_, z_] := (-z)
\begin{array}{l}
\\
-z
\end{array}
Initial program 99.8%
associate--l+99.8%
sub-neg99.8%
associate-+l+99.8%
associate-+r-99.8%
*-commutative99.8%
distribute-rgt-neg-in99.8%
fma-def99.9%
+-commutative99.9%
distribute-neg-in99.9%
unsub-neg99.9%
metadata-eval99.9%
Simplified99.9%
Taylor expanded in z around inf 32.5%
neg-mul-132.5%
Simplified32.5%
Final simplification32.5%
(FPCore (x y z) :precision binary64 x)
double code(double x, double y, double z) {
return x;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x
end function
public static double code(double x, double y, double z) {
return x;
}
def code(x, y, z): return x
function code(x, y, z) return x end
function tmp = code(x, y, z) tmp = x; end
code[x_, y_, z_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 99.8%
associate--l+99.8%
sub-neg99.8%
associate-+l+99.8%
associate-+r-99.8%
*-commutative99.8%
distribute-rgt-neg-in99.8%
fma-def99.9%
+-commutative99.9%
distribute-neg-in99.9%
unsub-neg99.9%
metadata-eval99.9%
Simplified99.9%
Taylor expanded in x around inf 27.8%
Final simplification27.8%
(FPCore (x y z) :precision binary64 (- (- (+ y x) z) (* (+ y 0.5) (log y))))
double code(double x, double y, double z) {
return ((y + x) - z) - ((y + 0.5) * log(y));
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = ((y + x) - z) - ((y + 0.5d0) * log(y))
end function
public static double code(double x, double y, double z) {
return ((y + x) - z) - ((y + 0.5) * Math.log(y));
}
def code(x, y, z): return ((y + x) - z) - ((y + 0.5) * math.log(y))
function code(x, y, z) return Float64(Float64(Float64(y + x) - z) - Float64(Float64(y + 0.5) * log(y))) end
function tmp = code(x, y, z) tmp = ((y + x) - z) - ((y + 0.5) * log(y)); end
code[x_, y_, z_] := N[(N[(N[(y + x), $MachinePrecision] - z), $MachinePrecision] - N[(N[(y + 0.5), $MachinePrecision] * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(y + x\right) - z\right) - \left(y + 0.5\right) \cdot \log y
\end{array}
herbie shell --seed 2024033
(FPCore (x y z)
:name "Numeric.SpecFunctions:stirlingError from math-functions-0.1.5.2"
:precision binary64
:herbie-target
(- (- (+ y x) z) (* (+ y 0.5) (log y)))
(- (+ (- x (* (+ y 0.5) (log y))) y) z))