
(FPCore (x y z) :precision binary64 (exp (- (+ x (* y (log y))) z)))
double code(double x, double y, double z) {
return exp(((x + (y * log(y))) - z));
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = exp(((x + (y * log(y))) - z))
end function
public static double code(double x, double y, double z) {
return Math.exp(((x + (y * Math.log(y))) - z));
}
def code(x, y, z): return math.exp(((x + (y * math.log(y))) - z))
function code(x, y, z) return exp(Float64(Float64(x + Float64(y * log(y))) - z)) end
function tmp = code(x, y, z) tmp = exp(((x + (y * log(y))) - z)); end
code[x_, y_, z_] := N[Exp[N[(N[(x + N[(y * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
e^{\left(x + y \cdot \log y\right) - z}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z) :precision binary64 (exp (- (+ x (* y (log y))) z)))
double code(double x, double y, double z) {
return exp(((x + (y * log(y))) - z));
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = exp(((x + (y * log(y))) - z))
end function
public static double code(double x, double y, double z) {
return Math.exp(((x + (y * Math.log(y))) - z));
}
def code(x, y, z): return math.exp(((x + (y * math.log(y))) - z))
function code(x, y, z) return exp(Float64(Float64(x + Float64(y * log(y))) - z)) end
function tmp = code(x, y, z) tmp = exp(((x + (y * log(y))) - z)); end
code[x_, y_, z_] := N[Exp[N[(N[(x + N[(y * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
e^{\left(x + y \cdot \log y\right) - z}
\end{array}
(FPCore (x y z) :precision binary64 (exp (- (+ x (* y (log y))) z)))
double code(double x, double y, double z) {
return exp(((x + (y * log(y))) - z));
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = exp(((x + (y * log(y))) - z))
end function
public static double code(double x, double y, double z) {
return Math.exp(((x + (y * Math.log(y))) - z));
}
def code(x, y, z): return math.exp(((x + (y * math.log(y))) - z))
function code(x, y, z) return exp(Float64(Float64(x + Float64(y * log(y))) - z)) end
function tmp = code(x, y, z) tmp = exp(((x + (y * log(y))) - z)); end
code[x_, y_, z_] := N[Exp[N[(N[(x + N[(y * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
e^{\left(x + y \cdot \log y\right) - z}
\end{array}
Initial program 100.0%
(FPCore (x y z) :precision binary64 (- (- (+ x (* y (log y))) z) z))
double code(double x, double y, double z) {
return ((x + (y * log(y))) - z) - z;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = ((x + (y * log(y))) - z) - z
end function
public static double code(double x, double y, double z) {
return ((x + (y * Math.log(y))) - z) - z;
}
def code(x, y, z): return ((x + (y * math.log(y))) - z) - z
function code(x, y, z) return Float64(Float64(Float64(x + Float64(y * log(y))) - z) - z) end
function tmp = code(x, y, z) tmp = ((x + (y * log(y))) - z) - z; end
code[x_, y_, z_] := N[(N[(N[(x + N[(y * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision] - z), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x + y \cdot \log y\right) - z\right) - z
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
Applied rewrites5.4%
Taylor expanded in x around 0
Applied rewrites5.4%
(FPCore (x y z) :precision binary64 (- (+ x (* y (log y))) z))
double code(double x, double y, double z) {
return (x + (y * log(y))) - z;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = (x + (y * log(y))) - z
end function
public static double code(double x, double y, double z) {
return (x + (y * Math.log(y))) - z;
}
def code(x, y, z): return (x + (y * math.log(y))) - z
function code(x, y, z) return Float64(Float64(x + Float64(y * log(y))) - z) end
function tmp = code(x, y, z) tmp = (x + (y * log(y))) - z; end
code[x_, y_, z_] := N[(N[(x + N[(y * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]
\begin{array}{l}
\\
\left(x + y \cdot \log y\right) - z
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
Applied rewrites5.4%
(FPCore (x y z) :precision binary64 (+ x (* y (log y))))
double code(double x, double y, double z) {
return x + (y * log(y));
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x + (y * log(y))
end function
public static double code(double x, double y, double z) {
return x + (y * Math.log(y));
}
def code(x, y, z): return x + (y * math.log(y))
function code(x, y, z) return Float64(x + Float64(y * log(y))) end
function tmp = code(x, y, z) tmp = x + (y * log(y)); end
code[x_, y_, z_] := N[(x + N[(y * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + y \cdot \log y
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
Applied rewrites5.1%
(FPCore (x y z) :precision binary64 (* y (log y)))
double code(double x, double y, double z) {
return y * log(y);
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = y * log(y)
end function
public static double code(double x, double y, double z) {
return y * Math.log(y);
}
def code(x, y, z): return y * math.log(y)
function code(x, y, z) return Float64(y * log(y)) end
function tmp = code(x, y, z) tmp = y * log(y); end
code[x_, y_, z_] := N[(y * N[Log[y], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
y \cdot \log y
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
Applied rewrites4.8%
(FPCore (x y z) :precision binary64 (log y))
double code(double x, double y, double z) {
return log(y);
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = log(y)
end function
public static double code(double x, double y, double z) {
return Math.log(y);
}
def code(x, y, z): return math.log(y)
function code(x, y, z) return log(y) end
function tmp = code(x, y, z) tmp = log(y); end
code[x_, y_, z_] := N[Log[y], $MachinePrecision]
\begin{array}{l}
\\
\log y
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
Applied rewrites4.8%
Taylor expanded in x around 0
Applied rewrites2.5%
(FPCore (x y z) :precision binary64 (exp (+ (- x z) (* (log y) y))))
double code(double x, double y, double z) {
return exp(((x - z) + (log(y) * y)));
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = exp(((x - z) + (log(y) * y)))
end function
public static double code(double x, double y, double z) {
return Math.exp(((x - z) + (Math.log(y) * y)));
}
def code(x, y, z): return math.exp(((x - z) + (math.log(y) * y)))
function code(x, y, z) return exp(Float64(Float64(x - z) + Float64(log(y) * y))) end
function tmp = code(x, y, z) tmp = exp(((x - z) + (log(y) * y))); end
code[x_, y_, z_] := N[Exp[N[(N[(x - z), $MachinePrecision] + N[(N[Log[y], $MachinePrecision] * y), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
e^{\left(x - z\right) + \log y \cdot y}
\end{array}
herbie shell --seed 2024321
(FPCore (x y z)
:name "Statistics.Distribution.Poisson.Internal:probability from math-functions-0.1.5.2"
:precision binary64
:pre (TRUE)
:alt
(! :herbie-platform default (exp (+ (- x z) (* (log y) y))))
(exp (- (+ x (* y (log y))) z)))