
(FPCore (x y z t) :precision binary64 (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (z * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = ((x * log(y)) + (z * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) + (z * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) + (z * math.log((1.0 - y)))) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(z * log(Float64(1.0 - y)))) - t) end
function tmp = code(x, y, z, t) tmp = ((x * log(y)) + (z * log((1.0 - y)))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z t) :precision binary64 (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (z * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = ((x * log(y)) + (z * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) + (z * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) + (z * math.log((1.0 - y)))) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(z * log(Float64(1.0 - y)))) - t) end
function tmp = code(x, y, z, t) tmp = ((x * log(y)) + (z * log((1.0 - y)))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t
\end{array}
(FPCore (x y z t) :precision binary64 (fma z (log1p (- y)) (- (* x (log y)) t)))
double code(double x, double y, double z, double t) {
return fma(z, log1p(-y), ((x * log(y)) - t));
}
function code(x, y, z, t) return fma(z, log1p(Float64(-y)), Float64(Float64(x * log(y)) - t)) end
code[x_, y_, z_, t_] := N[(z * N[Log[1 + (-y)], $MachinePrecision] + N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(z, \mathsf{log1p}\left(-y\right), x \cdot \log y - t\right)
\end{array}
Initial program 84.2%
+-commutative84.2%
associate--l+84.2%
fma-define84.2%
sub-neg84.2%
log1p-define99.8%
Simplified99.8%
(FPCore (x y z t) :precision binary64 (- (+ (* x (log y)) (* z (* y (+ (* y (- (* y (- (* y -0.25) 0.3333333333333333)) 0.5)) -1.0)))) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (z * (y * ((y * ((y * ((y * -0.25) - 0.3333333333333333)) - 0.5)) + -1.0)))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = ((x * log(y)) + (z * (y * ((y * ((y * ((y * (-0.25d0)) - 0.3333333333333333d0)) - 0.5d0)) + (-1.0d0))))) - t
end function
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) + (z * (y * ((y * ((y * ((y * -0.25) - 0.3333333333333333)) - 0.5)) + -1.0)))) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) + (z * (y * ((y * ((y * ((y * -0.25) - 0.3333333333333333)) - 0.5)) + -1.0)))) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(z * Float64(y * Float64(Float64(y * Float64(Float64(y * Float64(Float64(y * -0.25) - 0.3333333333333333)) - 0.5)) + -1.0)))) - t) end
function tmp = code(x, y, z, t) tmp = ((x * log(y)) + (z * (y * ((y * ((y * ((y * -0.25) - 0.3333333333333333)) - 0.5)) + -1.0)))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[(y * N[(N[(y * N[(N[(y * N[(N[(y * -0.25), $MachinePrecision] - 0.3333333333333333), $MachinePrecision]), $MachinePrecision] - 0.5), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + z \cdot \left(y \cdot \left(y \cdot \left(y \cdot \left(y \cdot -0.25 - 0.3333333333333333\right) - 0.5\right) + -1\right)\right)\right) - t
\end{array}
Initial program 84.2%
Taylor expanded in y around 0 99.6%
Final simplification99.6%
(FPCore (x y z t) :precision binary64 (- (+ (* x (log y)) (* z (* y (+ (* y (- (* y -0.3333333333333333) 0.5)) -1.0)))) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (z * (y * ((y * ((y * -0.3333333333333333) - 0.5)) + -1.0)))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = ((x * log(y)) + (z * (y * ((y * ((y * (-0.3333333333333333d0)) - 0.5d0)) + (-1.0d0))))) - t
end function
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) + (z * (y * ((y * ((y * -0.3333333333333333) - 0.5)) + -1.0)))) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) + (z * (y * ((y * ((y * -0.3333333333333333) - 0.5)) + -1.0)))) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(z * Float64(y * Float64(Float64(y * Float64(Float64(y * -0.3333333333333333) - 0.5)) + -1.0)))) - t) end
function tmp = code(x, y, z, t) tmp = ((x * log(y)) + (z * (y * ((y * ((y * -0.3333333333333333) - 0.5)) + -1.0)))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[(y * N[(N[(y * N[(N[(y * -0.3333333333333333), $MachinePrecision] - 0.5), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + z \cdot \left(y \cdot \left(y \cdot \left(y \cdot -0.3333333333333333 - 0.5\right) + -1\right)\right)\right) - t
\end{array}
Initial program 84.2%
Taylor expanded in y around 0 99.5%
Final simplification99.5%
(FPCore (x y z t) :precision binary64 (- (+ (* x (log y)) (* z (* y (+ (* y -0.5) -1.0)))) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (z * (y * ((y * -0.5) + -1.0)))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = ((x * log(y)) + (z * (y * ((y * (-0.5d0)) + (-1.0d0))))) - t
end function
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) + (z * (y * ((y * -0.5) + -1.0)))) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) + (z * (y * ((y * -0.5) + -1.0)))) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(z * Float64(y * Float64(Float64(y * -0.5) + -1.0)))) - t) end
function tmp = code(x, y, z, t) tmp = ((x * log(y)) + (z * (y * ((y * -0.5) + -1.0)))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[(y * N[(N[(y * -0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + z \cdot \left(y \cdot \left(y \cdot -0.5 + -1\right)\right)\right) - t
\end{array}
Initial program 84.2%
Taylor expanded in y around 0 99.4%
Final simplification99.4%
(FPCore (x y z t) :precision binary64 (if (or (<= x -2.2e+17) (not (<= x 3.3e+114))) (* x (log y)) (- (* y (* z (+ (* y -0.5) -1.0))) t)))
double code(double x, double y, double z, double t) {
double tmp;
if ((x <= -2.2e+17) || !(x <= 3.3e+114)) {
tmp = x * log(y);
} else {
tmp = (y * (z * ((y * -0.5) + -1.0))) - t;
}
return tmp;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: tmp
if ((x <= (-2.2d+17)) .or. (.not. (x <= 3.3d+114))) then
tmp = x * log(y)
else
tmp = (y * (z * ((y * (-0.5d0)) + (-1.0d0)))) - t
end if
code = tmp
end function
public static double code(double x, double y, double z, double t) {
double tmp;
if ((x <= -2.2e+17) || !(x <= 3.3e+114)) {
tmp = x * Math.log(y);
} else {
tmp = (y * (z * ((y * -0.5) + -1.0))) - t;
}
return tmp;
}
def code(x, y, z, t): tmp = 0 if (x <= -2.2e+17) or not (x <= 3.3e+114): tmp = x * math.log(y) else: tmp = (y * (z * ((y * -0.5) + -1.0))) - t return tmp
function code(x, y, z, t) tmp = 0.0 if ((x <= -2.2e+17) || !(x <= 3.3e+114)) tmp = Float64(x * log(y)); else tmp = Float64(Float64(y * Float64(z * Float64(Float64(y * -0.5) + -1.0))) - t); end return tmp end
function tmp_2 = code(x, y, z, t) tmp = 0.0; if ((x <= -2.2e+17) || ~((x <= 3.3e+114))) tmp = x * log(y); else tmp = (y * (z * ((y * -0.5) + -1.0))) - t; end tmp_2 = tmp; end
code[x_, y_, z_, t_] := If[Or[LessEqual[x, -2.2e+17], N[Not[LessEqual[x, 3.3e+114]], $MachinePrecision]], N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision], N[(N[(y * N[(z * N[(N[(y * -0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -2.2 \cdot 10^{+17} \lor \neg \left(x \leq 3.3 \cdot 10^{+114}\right):\\
\;\;\;\;x \cdot \log y\\
\mathbf{else}:\\
\;\;\;\;y \cdot \left(z \cdot \left(y \cdot -0.5 + -1\right)\right) - t\\
\end{array}
\end{array}
if x < -2.2e17 or 3.3000000000000001e114 < x Initial program 95.0%
+-commutative95.0%
associate--l+95.0%
fma-define95.0%
sub-neg95.0%
log1p-define99.7%
Simplified99.7%
add-sqr-sqrt99.7%
distribute-rgt-neg-in99.7%
Applied egg-rr99.7%
Taylor expanded in x around inf 78.3%
if -2.2e17 < x < 3.3000000000000001e114Initial program 77.1%
Taylor expanded in y around 0 99.4%
Taylor expanded in y around 0 99.2%
*-commutative99.2%
Simplified99.2%
Taylor expanded in x around 0 75.2%
Final simplification76.4%
(FPCore (x y z t) :precision binary64 (if (<= z -6.6e+172) (- (* z (log1p (- y))) t) (- (* x (log y)) t)))
double code(double x, double y, double z, double t) {
double tmp;
if (z <= -6.6e+172) {
tmp = (z * log1p(-y)) - t;
} else {
tmp = (x * log(y)) - t;
}
return tmp;
}
public static double code(double x, double y, double z, double t) {
double tmp;
if (z <= -6.6e+172) {
tmp = (z * Math.log1p(-y)) - t;
} else {
tmp = (x * Math.log(y)) - t;
}
return tmp;
}
def code(x, y, z, t): tmp = 0 if z <= -6.6e+172: tmp = (z * math.log1p(-y)) - t else: tmp = (x * math.log(y)) - t return tmp
function code(x, y, z, t) tmp = 0.0 if (z <= -6.6e+172) tmp = Float64(Float64(z * log1p(Float64(-y))) - t); else tmp = Float64(Float64(x * log(y)) - t); end return tmp end
code[x_, y_, z_, t_] := If[LessEqual[z, -6.6e+172], N[(N[(z * N[Log[1 + (-y)], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;z \leq -6.6 \cdot 10^{+172}:\\
\;\;\;\;z \cdot \mathsf{log1p}\left(-y\right) - t\\
\mathbf{else}:\\
\;\;\;\;x \cdot \log y - t\\
\end{array}
\end{array}
if z < -6.59999999999999965e172Initial program 29.2%
Taylor expanded in x around 0 15.2%
sub-neg15.2%
log1p-define82.5%
Simplified82.5%
if -6.59999999999999965e172 < z Initial program 91.0%
+-commutative91.0%
associate--l+91.0%
fma-define91.0%
sub-neg91.0%
log1p-define99.8%
Simplified99.8%
Taylor expanded in z around 0 90.5%
(FPCore (x y z t) :precision binary64 (if (<= z -1.85e+176) (- (* y (* z (+ (* y -0.5) -1.0))) t) (- (* x (log y)) t)))
double code(double x, double y, double z, double t) {
double tmp;
if (z <= -1.85e+176) {
tmp = (y * (z * ((y * -0.5) + -1.0))) - t;
} else {
tmp = (x * log(y)) - t;
}
return tmp;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: tmp
if (z <= (-1.85d+176)) then
tmp = (y * (z * ((y * (-0.5d0)) + (-1.0d0)))) - t
else
tmp = (x * log(y)) - t
end if
code = tmp
end function
public static double code(double x, double y, double z, double t) {
double tmp;
if (z <= -1.85e+176) {
tmp = (y * (z * ((y * -0.5) + -1.0))) - t;
} else {
tmp = (x * Math.log(y)) - t;
}
return tmp;
}
def code(x, y, z, t): tmp = 0 if z <= -1.85e+176: tmp = (y * (z * ((y * -0.5) + -1.0))) - t else: tmp = (x * math.log(y)) - t return tmp
function code(x, y, z, t) tmp = 0.0 if (z <= -1.85e+176) tmp = Float64(Float64(y * Float64(z * Float64(Float64(y * -0.5) + -1.0))) - t); else tmp = Float64(Float64(x * log(y)) - t); end return tmp end
function tmp_2 = code(x, y, z, t) tmp = 0.0; if (z <= -1.85e+176) tmp = (y * (z * ((y * -0.5) + -1.0))) - t; else tmp = (x * log(y)) - t; end tmp_2 = tmp; end
code[x_, y_, z_, t_] := If[LessEqual[z, -1.85e+176], N[(N[(y * N[(z * N[(N[(y * -0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;z \leq -1.85 \cdot 10^{+176}:\\
\;\;\;\;y \cdot \left(z \cdot \left(y \cdot -0.5 + -1\right)\right) - t\\
\mathbf{else}:\\
\;\;\;\;x \cdot \log y - t\\
\end{array}
\end{array}
if z < -1.8499999999999999e176Initial program 29.2%
Taylor expanded in y around 0 99.8%
Taylor expanded in y around 0 99.3%
*-commutative99.3%
Simplified99.3%
Taylor expanded in x around 0 82.0%
if -1.8499999999999999e176 < z Initial program 91.0%
+-commutative91.0%
associate--l+91.0%
fma-define91.0%
sub-neg91.0%
log1p-define99.8%
Simplified99.8%
Taylor expanded in z around 0 90.5%
Final simplification89.5%
(FPCore (x y z t) :precision binary64 (- (- (* x (log y)) (* z y)) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) - (z * y)) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = ((x * log(y)) - (z * y)) - t
end function
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) - (z * y)) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) - (z * y)) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) - Float64(z * y)) - t) end
function tmp = code(x, y, z, t) tmp = ((x * log(y)) - (z * y)) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - N[(z * y), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y - z \cdot y\right) - t
\end{array}
Initial program 84.2%
Taylor expanded in y around 0 99.1%
+-commutative99.1%
mul-1-neg99.1%
unsub-neg99.1%
Simplified99.1%
Final simplification99.1%
(FPCore (x y z t) :precision binary64 (- (* y (* z (+ (* y -0.5) -1.0))) t))
double code(double x, double y, double z, double t) {
return (y * (z * ((y * -0.5) + -1.0))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = (y * (z * ((y * (-0.5d0)) + (-1.0d0)))) - t
end function
public static double code(double x, double y, double z, double t) {
return (y * (z * ((y * -0.5) + -1.0))) - t;
}
def code(x, y, z, t): return (y * (z * ((y * -0.5) + -1.0))) - t
function code(x, y, z, t) return Float64(Float64(y * Float64(z * Float64(Float64(y * -0.5) + -1.0))) - t) end
function tmp = code(x, y, z, t) tmp = (y * (z * ((y * -0.5) + -1.0))) - t; end
code[x_, y_, z_, t_] := N[(N[(y * N[(z * N[(N[(y * -0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
y \cdot \left(z \cdot \left(y \cdot -0.5 + -1\right)\right) - t
\end{array}
Initial program 84.2%
Taylor expanded in y around 0 99.5%
Taylor expanded in y around 0 99.4%
*-commutative99.4%
Simplified99.4%
Taylor expanded in x around 0 53.8%
Final simplification53.8%
(FPCore (x y z t) :precision binary64 (- (- t) (* z y)))
double code(double x, double y, double z, double t) {
return -t - (z * y);
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = -t - (z * y)
end function
public static double code(double x, double y, double z, double t) {
return -t - (z * y);
}
def code(x, y, z, t): return -t - (z * y)
function code(x, y, z, t) return Float64(Float64(-t) - Float64(z * y)) end
function tmp = code(x, y, z, t) tmp = -t - (z * y); end
code[x_, y_, z_, t_] := N[((-t) - N[(z * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-t\right) - z \cdot y
\end{array}
Initial program 84.2%
+-commutative84.2%
associate--l+84.2%
fma-define84.2%
sub-neg84.2%
log1p-define99.8%
Simplified99.8%
add-sqr-sqrt99.7%
distribute-rgt-neg-in99.7%
Applied egg-rr99.7%
Taylor expanded in y around 0 99.1%
+-commutative99.1%
fma-define99.1%
mul-1-neg99.1%
distribute-rgt-neg-in99.1%
Simplified99.1%
Taylor expanded in x around 0 53.5%
neg-mul-153.5%
distribute-rgt-neg-in53.5%
Simplified53.5%
Final simplification53.5%
(FPCore (x y z t) :precision binary64 (- t))
double code(double x, double y, double z, double t) {
return -t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = -t
end function
public static double code(double x, double y, double z, double t) {
return -t;
}
def code(x, y, z, t): return -t
function code(x, y, z, t) return Float64(-t) end
function tmp = code(x, y, z, t) tmp = -t; end
code[x_, y_, z_, t_] := (-t)
\begin{array}{l}
\\
-t
\end{array}
Initial program 84.2%
+-commutative84.2%
associate--l+84.2%
fma-define84.2%
sub-neg84.2%
log1p-define99.8%
Simplified99.8%
add-sqr-sqrt99.7%
distribute-rgt-neg-in99.7%
Applied egg-rr99.7%
Taylor expanded in t around inf 38.2%
neg-mul-138.2%
Simplified38.2%
(FPCore (x y z t) :precision binary64 t)
double code(double x, double y, double z, double t) {
return t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = t
end function
public static double code(double x, double y, double z, double t) {
return t;
}
def code(x, y, z, t): return t
function code(x, y, z, t) return t end
function tmp = code(x, y, z, t) tmp = t; end
code[x_, y_, z_, t_] := t
\begin{array}{l}
\\
t
\end{array}
Initial program 84.2%
+-commutative84.2%
associate--l+84.2%
fma-define84.2%
sub-neg84.2%
log1p-define99.8%
Simplified99.8%
add-sqr-sqrt99.7%
distribute-rgt-neg-in99.7%
Applied egg-rr99.7%
Taylor expanded in t around inf 38.2%
neg-mul-138.2%
Simplified38.2%
neg-sub038.2%
sub-neg38.2%
add-sqr-sqrt17.6%
sqrt-unprod11.2%
sqr-neg11.2%
sqrt-unprod1.1%
add-sqr-sqrt2.5%
Applied egg-rr2.5%
+-lft-identity2.5%
Simplified2.5%
(FPCore (x y z t)
:precision binary64
(-
(*
(- z)
(+
(+ (* 0.5 (* y y)) y)
(* (/ 0.3333333333333333 (* 1.0 (* 1.0 1.0))) (* y (* y y)))))
(- t (* x (log y)))))
double code(double x, double y, double z, double t) {
return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * log(y)));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = (-z * (((0.5d0 * (y * y)) + y) + ((0.3333333333333333d0 / (1.0d0 * (1.0d0 * 1.0d0))) * (y * (y * y))))) - (t - (x * log(y)))
end function
public static double code(double x, double y, double z, double t) {
return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * Math.log(y)));
}
def code(x, y, z, t): return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * math.log(y)))
function code(x, y, z, t) return Float64(Float64(Float64(-z) * Float64(Float64(Float64(0.5 * Float64(y * y)) + y) + Float64(Float64(0.3333333333333333 / Float64(1.0 * Float64(1.0 * 1.0))) * Float64(y * Float64(y * y))))) - Float64(t - Float64(x * log(y)))) end
function tmp = code(x, y, z, t) tmp = (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * log(y))); end
code[x_, y_, z_, t_] := N[(N[((-z) * N[(N[(N[(0.5 * N[(y * y), $MachinePrecision]), $MachinePrecision] + y), $MachinePrecision] + N[(N[(0.3333333333333333 / N[(1.0 * N[(1.0 * 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(y * N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(t - N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-z\right) \cdot \left(\left(0.5 \cdot \left(y \cdot y\right) + y\right) + \frac{0.3333333333333333}{1 \cdot \left(1 \cdot 1\right)} \cdot \left(y \cdot \left(y \cdot y\right)\right)\right) - \left(t - x \cdot \log y\right)
\end{array}
herbie shell --seed 2024118
(FPCore (x y z t)
:name "Numeric.SpecFunctions:invIncompleteBetaWorker from math-functions-0.1.5.2, B"
:precision binary64
:alt
(! :herbie-platform default (- (* (- z) (+ (+ (* 1/2 (* y y)) y) (* (/ 1/3 (* 1 (* 1 1))) (* y (* y y))))) (- t (* x (log y)))))
(- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))