
(FPCore (x y z t) :precision binary64 (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (z * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = ((x * log(y)) + (z * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) + (z * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) + (z * math.log((1.0 - y)))) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(z * log(Float64(1.0 - y)))) - t) end
function tmp = code(x, y, z, t) tmp = ((x * log(y)) + (z * log((1.0 - y)))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 13 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z t) :precision binary64 (- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (z * log((1.0 - y)))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = ((x * log(y)) + (z * log((1.0d0 - y)))) - t
end function
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) + (z * Math.log((1.0 - y)))) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) + (z * math.log((1.0 - y)))) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(z * log(Float64(1.0 - y)))) - t) end
function tmp = code(x, y, z, t) tmp = ((x * log(y)) + (z * log((1.0 - y)))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[N[(1.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + z \cdot \log \left(1 - y\right)\right) - t
\end{array}
(FPCore (x y z t) :precision binary64 (- (+ (* x (log y)) (* z (log1p (- 0.0 y)))) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (z * log1p((0.0 - y)))) - t;
}
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) + (z * Math.log1p((0.0 - y)))) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) + (z * math.log1p((0.0 - y)))) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(z * log1p(Float64(0.0 - y)))) - t) end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[Log[1 + N[(0.0 - y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + z \cdot \mathsf{log1p}\left(0 - y\right)\right) - t
\end{array}
Initial program 83.7%
sub-negN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
neg-sub0N/A
--lowering--.f6499.8%
Applied egg-rr99.8%
(FPCore (x y z t)
:precision binary64
(let* ((t_1 (+ -0.5 (* y (+ -0.3333333333333333 (* y -0.25))))))
(-
(+
(* x (log y))
(/
(* (+ -1.0 (* (* y (* y y)) (* t_1 (* t_1 t_1)))) (* y z))
(+ 1.0 (* (* y t_1) (- (* y (+ -0.5 (* y -0.3333333333333333))) -1.0)))))
t)))
double code(double x, double y, double z, double t) {
double t_1 = -0.5 + (y * (-0.3333333333333333 + (y * -0.25)));
return ((x * log(y)) + (((-1.0 + ((y * (y * y)) * (t_1 * (t_1 * t_1)))) * (y * z)) / (1.0 + ((y * t_1) * ((y * (-0.5 + (y * -0.3333333333333333))) - -1.0))))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: t_1
t_1 = (-0.5d0) + (y * ((-0.3333333333333333d0) + (y * (-0.25d0))))
code = ((x * log(y)) + ((((-1.0d0) + ((y * (y * y)) * (t_1 * (t_1 * t_1)))) * (y * z)) / (1.0d0 + ((y * t_1) * ((y * ((-0.5d0) + (y * (-0.3333333333333333d0)))) - (-1.0d0)))))) - t
end function
public static double code(double x, double y, double z, double t) {
double t_1 = -0.5 + (y * (-0.3333333333333333 + (y * -0.25)));
return ((x * Math.log(y)) + (((-1.0 + ((y * (y * y)) * (t_1 * (t_1 * t_1)))) * (y * z)) / (1.0 + ((y * t_1) * ((y * (-0.5 + (y * -0.3333333333333333))) - -1.0))))) - t;
}
def code(x, y, z, t): t_1 = -0.5 + (y * (-0.3333333333333333 + (y * -0.25))) return ((x * math.log(y)) + (((-1.0 + ((y * (y * y)) * (t_1 * (t_1 * t_1)))) * (y * z)) / (1.0 + ((y * t_1) * ((y * (-0.5 + (y * -0.3333333333333333))) - -1.0))))) - t
function code(x, y, z, t) t_1 = Float64(-0.5 + Float64(y * Float64(-0.3333333333333333 + Float64(y * -0.25)))) return Float64(Float64(Float64(x * log(y)) + Float64(Float64(Float64(-1.0 + Float64(Float64(y * Float64(y * y)) * Float64(t_1 * Float64(t_1 * t_1)))) * Float64(y * z)) / Float64(1.0 + Float64(Float64(y * t_1) * Float64(Float64(y * Float64(-0.5 + Float64(y * -0.3333333333333333))) - -1.0))))) - t) end
function tmp = code(x, y, z, t) t_1 = -0.5 + (y * (-0.3333333333333333 + (y * -0.25))); tmp = ((x * log(y)) + (((-1.0 + ((y * (y * y)) * (t_1 * (t_1 * t_1)))) * (y * z)) / (1.0 + ((y * t_1) * ((y * (-0.5 + (y * -0.3333333333333333))) - -1.0))))) - t; end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(-0.5 + N[(y * N[(-0.3333333333333333 + N[(y * -0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(N[(N[(-1.0 + N[(N[(y * N[(y * y), $MachinePrecision]), $MachinePrecision] * N[(t$95$1 * N[(t$95$1 * t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(y * z), $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(N[(y * t$95$1), $MachinePrecision] * N[(N[(y * N[(-0.5 + N[(y * -0.3333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := -0.5 + y \cdot \left(-0.3333333333333333 + y \cdot -0.25\right)\\
\left(x \cdot \log y + \frac{\left(-1 + \left(y \cdot \left(y \cdot y\right)\right) \cdot \left(t\_1 \cdot \left(t\_1 \cdot t\_1\right)\right)\right) \cdot \left(y \cdot z\right)}{1 + \left(y \cdot t\_1\right) \cdot \left(y \cdot \left(-0.5 + y \cdot -0.3333333333333333\right) - -1\right)}\right) - t
\end{array}
\end{array}
Initial program 83.7%
Taylor expanded in y around 0
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6499.6%
Simplified99.6%
Applied egg-rr99.6%
Taylor expanded in y around 0
*-commutativeN/A
*-lowering-*.f6499.6%
Simplified99.6%
Final simplification99.6%
(FPCore (x y z t)
:precision binary64
(-
(+
(* x (log y))
(*
y
(-
(* y (+ (* z -0.5) (* y (* z (+ -0.3333333333333333 (* y -0.25))))))
z)))
t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (y * ((y * ((z * -0.5) + (y * (z * (-0.3333333333333333 + (y * -0.25)))))) - z))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = ((x * log(y)) + (y * ((y * ((z * (-0.5d0)) + (y * (z * ((-0.3333333333333333d0) + (y * (-0.25d0))))))) - z))) - t
end function
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) + (y * ((y * ((z * -0.5) + (y * (z * (-0.3333333333333333 + (y * -0.25)))))) - z))) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) + (y * ((y * ((z * -0.5) + (y * (z * (-0.3333333333333333 + (y * -0.25)))))) - z))) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(y * Float64(Float64(y * Float64(Float64(z * -0.5) + Float64(y * Float64(z * Float64(-0.3333333333333333 + Float64(y * -0.25)))))) - z))) - t) end
function tmp = code(x, y, z, t) tmp = ((x * log(y)) + (y * ((y * ((z * -0.5) + (y * (z * (-0.3333333333333333 + (y * -0.25)))))) - z))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(y * N[(N[(y * N[(N[(z * -0.5), $MachinePrecision] + N[(y * N[(z * N[(-0.3333333333333333 + N[(y * -0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + y \cdot \left(y \cdot \left(z \cdot -0.5 + y \cdot \left(z \cdot \left(-0.3333333333333333 + y \cdot -0.25\right)\right)\right) - z\right)\right) - t
\end{array}
Initial program 83.7%
Taylor expanded in y around 0
*-lowering-*.f64N/A
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
--lowering--.f64N/A
Simplified99.6%
(FPCore (x y z t)
:precision binary64
(-
(+
(* x (log y))
(*
z
(* y (+ -1.0 (* y (+ -0.5 (* y (+ -0.3333333333333333 (* y -0.25)))))))))
t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (z * (y * (-1.0 + (y * (-0.5 + (y * (-0.3333333333333333 + (y * -0.25))))))))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = ((x * log(y)) + (z * (y * ((-1.0d0) + (y * ((-0.5d0) + (y * ((-0.3333333333333333d0) + (y * (-0.25d0)))))))))) - t
end function
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) + (z * (y * (-1.0 + (y * (-0.5 + (y * (-0.3333333333333333 + (y * -0.25))))))))) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) + (z * (y * (-1.0 + (y * (-0.5 + (y * (-0.3333333333333333 + (y * -0.25))))))))) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(z * Float64(y * Float64(-1.0 + Float64(y * Float64(-0.5 + Float64(y * Float64(-0.3333333333333333 + Float64(y * -0.25))))))))) - t) end
function tmp = code(x, y, z, t) tmp = ((x * log(y)) + (z * (y * (-1.0 + (y * (-0.5 + (y * (-0.3333333333333333 + (y * -0.25))))))))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[(y * N[(-1.0 + N[(y * N[(-0.5 + N[(y * N[(-0.3333333333333333 + N[(y * -0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + z \cdot \left(y \cdot \left(-1 + y \cdot \left(-0.5 + y \cdot \left(-0.3333333333333333 + y \cdot -0.25\right)\right)\right)\right)\right) - t
\end{array}
Initial program 83.7%
Taylor expanded in y around 0
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6499.6%
Simplified99.6%
(FPCore (x y z t) :precision binary64 (- (+ (* x (log y)) (* y (- (* (+ -0.5 (* y -0.3333333333333333)) (* y z)) z))) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (y * (((-0.5 + (y * -0.3333333333333333)) * (y * z)) - z))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = ((x * log(y)) + (y * ((((-0.5d0) + (y * (-0.3333333333333333d0))) * (y * z)) - z))) - t
end function
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) + (y * (((-0.5 + (y * -0.3333333333333333)) * (y * z)) - z))) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) + (y * (((-0.5 + (y * -0.3333333333333333)) * (y * z)) - z))) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(y * Float64(Float64(Float64(-0.5 + Float64(y * -0.3333333333333333)) * Float64(y * z)) - z))) - t) end
function tmp = code(x, y, z, t) tmp = ((x * log(y)) + (y * (((-0.5 + (y * -0.3333333333333333)) * (y * z)) - z))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(y * N[(N[(N[(-0.5 + N[(y * -0.3333333333333333), $MachinePrecision]), $MachinePrecision] * N[(y * z), $MachinePrecision]), $MachinePrecision] - z), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + y \cdot \left(\left(-0.5 + y \cdot -0.3333333333333333\right) \cdot \left(y \cdot z\right) - z\right)\right) - t
\end{array}
Initial program 83.7%
Taylor expanded in y around 0
+-commutativeN/A
remove-double-negN/A
log-recN/A
distribute-rgt-neg-inN/A
mul-1-negN/A
+-lowering-+.f64N/A
Simplified99.5%
Final simplification99.5%
(FPCore (x y z t) :precision binary64 (- (+ (* x (log y)) (* z (* y (+ -1.0 (* y (+ -0.5 (* y -0.3333333333333333))))))) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (z * (y * (-1.0 + (y * (-0.5 + (y * -0.3333333333333333))))))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = ((x * log(y)) + (z * (y * ((-1.0d0) + (y * ((-0.5d0) + (y * (-0.3333333333333333d0)))))))) - t
end function
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) + (z * (y * (-1.0 + (y * (-0.5 + (y * -0.3333333333333333))))))) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) + (z * (y * (-1.0 + (y * (-0.5 + (y * -0.3333333333333333))))))) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(z * Float64(y * Float64(-1.0 + Float64(y * Float64(-0.5 + Float64(y * -0.3333333333333333))))))) - t) end
function tmp = code(x, y, z, t) tmp = ((x * log(y)) + (z * (y * (-1.0 + (y * (-0.5 + (y * -0.3333333333333333))))))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(z * N[(y * N[(-1.0 + N[(y * N[(-0.5 + N[(y * -0.3333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + z \cdot \left(y \cdot \left(-1 + y \cdot \left(-0.5 + y \cdot -0.3333333333333333\right)\right)\right)\right) - t
\end{array}
Initial program 83.7%
Taylor expanded in y around 0
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6499.5%
Simplified99.5%
(FPCore (x y z t) :precision binary64 (let* ((t_1 (- (* x (log y)) t))) (if (<= x -4e-118) t_1 (if (<= x 2.3e-45) (- (* y (- 0.0 z)) t) t_1))))
double code(double x, double y, double z, double t) {
double t_1 = (x * log(y)) - t;
double tmp;
if (x <= -4e-118) {
tmp = t_1;
} else if (x <= 2.3e-45) {
tmp = (y * (0.0 - z)) - t;
} else {
tmp = t_1;
}
return tmp;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: t_1
real(8) :: tmp
t_1 = (x * log(y)) - t
if (x <= (-4d-118)) then
tmp = t_1
else if (x <= 2.3d-45) then
tmp = (y * (0.0d0 - z)) - t
else
tmp = t_1
end if
code = tmp
end function
public static double code(double x, double y, double z, double t) {
double t_1 = (x * Math.log(y)) - t;
double tmp;
if (x <= -4e-118) {
tmp = t_1;
} else if (x <= 2.3e-45) {
tmp = (y * (0.0 - z)) - t;
} else {
tmp = t_1;
}
return tmp;
}
def code(x, y, z, t): t_1 = (x * math.log(y)) - t tmp = 0 if x <= -4e-118: tmp = t_1 elif x <= 2.3e-45: tmp = (y * (0.0 - z)) - t else: tmp = t_1 return tmp
function code(x, y, z, t) t_1 = Float64(Float64(x * log(y)) - t) tmp = 0.0 if (x <= -4e-118) tmp = t_1; elseif (x <= 2.3e-45) tmp = Float64(Float64(y * Float64(0.0 - z)) - t); else tmp = t_1; end return tmp end
function tmp_2 = code(x, y, z, t) t_1 = (x * log(y)) - t; tmp = 0.0; if (x <= -4e-118) tmp = t_1; elseif (x <= 2.3e-45) tmp = (y * (0.0 - z)) - t; else tmp = t_1; end tmp_2 = tmp; end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]}, If[LessEqual[x, -4e-118], t$95$1, If[LessEqual[x, 2.3e-45], N[(N[(y * N[(0.0 - z), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := x \cdot \log y - t\\
\mathbf{if}\;x \leq -4 \cdot 10^{-118}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;x \leq 2.3 \cdot 10^{-45}:\\
\;\;\;\;y \cdot \left(0 - z\right) - t\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if x < -3.99999999999999994e-118 or 2.29999999999999992e-45 < x Initial program 93.3%
Taylor expanded in y around 0
remove-double-negN/A
log-recN/A
distribute-rgt-neg-inN/A
mul-1-negN/A
--lowering--.f64N/A
mul-1-negN/A
distribute-rgt-neg-inN/A
log-recN/A
remove-double-negN/A
*-lowering-*.f64N/A
log-lowering-log.f6492.3%
Simplified92.3%
if -3.99999999999999994e-118 < x < 2.29999999999999992e-45Initial program 68.4%
Taylor expanded in y around 0
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
remove-double-negN/A
log-recN/A
distribute-rgt-neg-inN/A
mul-1-negN/A
--lowering--.f64N/A
mul-1-negN/A
distribute-rgt-neg-inN/A
log-recN/A
remove-double-negN/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
*-commutativeN/A
*-lowering-*.f6497.0%
Simplified97.0%
Taylor expanded in x around 0
mul-1-negN/A
neg-sub0N/A
--lowering--.f64N/A
*-commutativeN/A
*-lowering-*.f6494.0%
Simplified94.0%
Final simplification93.0%
(FPCore (x y z t) :precision binary64 (- (+ (* x (log y)) (* y (* z (+ -1.0 (* y -0.5))))) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) + (y * (z * (-1.0 + (y * -0.5))))) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = ((x * log(y)) + (y * (z * ((-1.0d0) + (y * (-0.5d0)))))) - t
end function
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) + (y * (z * (-1.0 + (y * -0.5))))) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) + (y * (z * (-1.0 + (y * -0.5))))) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) + Float64(y * Float64(z * Float64(-1.0 + Float64(y * -0.5))))) - t) end
function tmp = code(x, y, z, t) tmp = ((x * log(y)) + (y * (z * (-1.0 + (y * -0.5))))) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] + N[(y * N[(z * N[(-1.0 + N[(y * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y + y \cdot \left(z \cdot \left(-1 + y \cdot -0.5\right)\right)\right) - t
\end{array}
Initial program 83.7%
sub-negN/A
log1p-defineN/A
log1p-lowering-log1p.f64N/A
neg-sub0N/A
--lowering--.f6499.8%
Applied egg-rr99.8%
Taylor expanded in y around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
*-lowering-*.f64N/A
associate-*r*N/A
distribute-rgt-outN/A
+-commutativeN/A
metadata-evalN/A
sub-negN/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6499.2%
Simplified99.2%
(FPCore (x y z t) :precision binary64 (let* ((t_1 (* x (log y)))) (if (<= x -1.5e+79) t_1 (if (<= x 1e+175) (- (* y (- 0.0 z)) t) t_1))))
double code(double x, double y, double z, double t) {
double t_1 = x * log(y);
double tmp;
if (x <= -1.5e+79) {
tmp = t_1;
} else if (x <= 1e+175) {
tmp = (y * (0.0 - z)) - t;
} else {
tmp = t_1;
}
return tmp;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: t_1
real(8) :: tmp
t_1 = x * log(y)
if (x <= (-1.5d+79)) then
tmp = t_1
else if (x <= 1d+175) then
tmp = (y * (0.0d0 - z)) - t
else
tmp = t_1
end if
code = tmp
end function
public static double code(double x, double y, double z, double t) {
double t_1 = x * Math.log(y);
double tmp;
if (x <= -1.5e+79) {
tmp = t_1;
} else if (x <= 1e+175) {
tmp = (y * (0.0 - z)) - t;
} else {
tmp = t_1;
}
return tmp;
}
def code(x, y, z, t): t_1 = x * math.log(y) tmp = 0 if x <= -1.5e+79: tmp = t_1 elif x <= 1e+175: tmp = (y * (0.0 - z)) - t else: tmp = t_1 return tmp
function code(x, y, z, t) t_1 = Float64(x * log(y)) tmp = 0.0 if (x <= -1.5e+79) tmp = t_1; elseif (x <= 1e+175) tmp = Float64(Float64(y * Float64(0.0 - z)) - t); else tmp = t_1; end return tmp end
function tmp_2 = code(x, y, z, t) t_1 = x * log(y); tmp = 0.0; if (x <= -1.5e+79) tmp = t_1; elseif (x <= 1e+175) tmp = (y * (0.0 - z)) - t; else tmp = t_1; end tmp_2 = tmp; end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, -1.5e+79], t$95$1, If[LessEqual[x, 1e+175], N[(N[(y * N[(0.0 - z), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := x \cdot \log y\\
\mathbf{if}\;x \leq -1.5 \cdot 10^{+79}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;x \leq 10^{+175}:\\
\;\;\;\;y \cdot \left(0 - z\right) - t\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if x < -1.49999999999999987e79 or 9.9999999999999994e174 < x Initial program 99.0%
Taylor expanded in x around inf
*-lowering-*.f64N/A
log-lowering-log.f6478.3%
Simplified78.3%
if -1.49999999999999987e79 < x < 9.9999999999999994e174Initial program 78.4%
Taylor expanded in y around 0
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
remove-double-negN/A
log-recN/A
distribute-rgt-neg-inN/A
mul-1-negN/A
--lowering--.f64N/A
mul-1-negN/A
distribute-rgt-neg-inN/A
log-recN/A
remove-double-negN/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
*-commutativeN/A
*-lowering-*.f6498.0%
Simplified98.0%
Taylor expanded in x around 0
mul-1-negN/A
neg-sub0N/A
--lowering--.f64N/A
*-commutativeN/A
*-lowering-*.f6480.3%
Simplified80.3%
Final simplification79.8%
(FPCore (x y z t) :precision binary64 (- (- (* x (log y)) (* y z)) t))
double code(double x, double y, double z, double t) {
return ((x * log(y)) - (y * z)) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = ((x * log(y)) - (y * z)) - t
end function
public static double code(double x, double y, double z, double t) {
return ((x * Math.log(y)) - (y * z)) - t;
}
def code(x, y, z, t): return ((x * math.log(y)) - (y * z)) - t
function code(x, y, z, t) return Float64(Float64(Float64(x * log(y)) - Float64(y * z)) - t) end
function tmp = code(x, y, z, t) tmp = ((x * log(y)) - (y * z)) - t; end
code[x_, y_, z_, t_] := N[(N[(N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision] - N[(y * z), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \log y - y \cdot z\right) - t
\end{array}
Initial program 83.7%
Taylor expanded in y around 0
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
remove-double-negN/A
log-recN/A
distribute-rgt-neg-inN/A
mul-1-negN/A
--lowering--.f64N/A
mul-1-negN/A
distribute-rgt-neg-inN/A
log-recN/A
remove-double-negN/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
*-commutativeN/A
*-lowering-*.f6498.3%
Simplified98.3%
Final simplification98.3%
(FPCore (x y z t) :precision binary64 (if (<= t -2.75e-86) (- 0.0 t) (if (<= t 5.2e-93) (* y (- 0.0 z)) (- 0.0 t))))
double code(double x, double y, double z, double t) {
double tmp;
if (t <= -2.75e-86) {
tmp = 0.0 - t;
} else if (t <= 5.2e-93) {
tmp = y * (0.0 - z);
} else {
tmp = 0.0 - t;
}
return tmp;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: tmp
if (t <= (-2.75d-86)) then
tmp = 0.0d0 - t
else if (t <= 5.2d-93) then
tmp = y * (0.0d0 - z)
else
tmp = 0.0d0 - t
end if
code = tmp
end function
public static double code(double x, double y, double z, double t) {
double tmp;
if (t <= -2.75e-86) {
tmp = 0.0 - t;
} else if (t <= 5.2e-93) {
tmp = y * (0.0 - z);
} else {
tmp = 0.0 - t;
}
return tmp;
}
def code(x, y, z, t): tmp = 0 if t <= -2.75e-86: tmp = 0.0 - t elif t <= 5.2e-93: tmp = y * (0.0 - z) else: tmp = 0.0 - t return tmp
function code(x, y, z, t) tmp = 0.0 if (t <= -2.75e-86) tmp = Float64(0.0 - t); elseif (t <= 5.2e-93) tmp = Float64(y * Float64(0.0 - z)); else tmp = Float64(0.0 - t); end return tmp end
function tmp_2 = code(x, y, z, t) tmp = 0.0; if (t <= -2.75e-86) tmp = 0.0 - t; elseif (t <= 5.2e-93) tmp = y * (0.0 - z); else tmp = 0.0 - t; end tmp_2 = tmp; end
code[x_, y_, z_, t_] := If[LessEqual[t, -2.75e-86], N[(0.0 - t), $MachinePrecision], If[LessEqual[t, 5.2e-93], N[(y * N[(0.0 - z), $MachinePrecision]), $MachinePrecision], N[(0.0 - t), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;t \leq -2.75 \cdot 10^{-86}:\\
\;\;\;\;0 - t\\
\mathbf{elif}\;t \leq 5.2 \cdot 10^{-93}:\\
\;\;\;\;y \cdot \left(0 - z\right)\\
\mathbf{else}:\\
\;\;\;\;0 - t\\
\end{array}
\end{array}
if t < -2.75e-86 or 5.1999999999999997e-93 < t Initial program 91.9%
Taylor expanded in t around inf
mul-1-negN/A
neg-sub0N/A
--lowering--.f6468.9%
Simplified68.9%
sub0-negN/A
neg-lowering-neg.f6468.9%
Applied egg-rr68.9%
if -2.75e-86 < t < 5.1999999999999997e-93Initial program 67.4%
Taylor expanded in y around 0
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
remove-double-negN/A
log-recN/A
distribute-rgt-neg-inN/A
mul-1-negN/A
--lowering--.f64N/A
mul-1-negN/A
distribute-rgt-neg-inN/A
log-recN/A
remove-double-negN/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
*-commutativeN/A
*-lowering-*.f6496.2%
Simplified96.2%
Taylor expanded in y around inf
mul-1-negN/A
neg-sub0N/A
--lowering--.f64N/A
*-commutativeN/A
*-lowering-*.f6437.8%
Simplified37.8%
sub0-negN/A
*-commutativeN/A
distribute-lft-neg-inN/A
*-lowering-*.f64N/A
neg-lowering-neg.f6437.8%
Applied egg-rr37.8%
Final simplification58.4%
(FPCore (x y z t) :precision binary64 (- (* y (- 0.0 z)) t))
double code(double x, double y, double z, double t) {
return (y * (0.0 - z)) - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = (y * (0.0d0 - z)) - t
end function
public static double code(double x, double y, double z, double t) {
return (y * (0.0 - z)) - t;
}
def code(x, y, z, t): return (y * (0.0 - z)) - t
function code(x, y, z, t) return Float64(Float64(y * Float64(0.0 - z)) - t) end
function tmp = code(x, y, z, t) tmp = (y * (0.0 - z)) - t; end
code[x_, y_, z_, t_] := N[(N[(y * N[(0.0 - z), $MachinePrecision]), $MachinePrecision] - t), $MachinePrecision]
\begin{array}{l}
\\
y \cdot \left(0 - z\right) - t
\end{array}
Initial program 83.7%
Taylor expanded in y around 0
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
remove-double-negN/A
log-recN/A
distribute-rgt-neg-inN/A
mul-1-negN/A
--lowering--.f64N/A
mul-1-negN/A
distribute-rgt-neg-inN/A
log-recN/A
remove-double-negN/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
*-commutativeN/A
*-lowering-*.f6498.3%
Simplified98.3%
Taylor expanded in x around 0
mul-1-negN/A
neg-sub0N/A
--lowering--.f64N/A
*-commutativeN/A
*-lowering-*.f6465.1%
Simplified65.1%
Final simplification65.1%
(FPCore (x y z t) :precision binary64 (- 0.0 t))
double code(double x, double y, double z, double t) {
return 0.0 - t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = 0.0d0 - t
end function
public static double code(double x, double y, double z, double t) {
return 0.0 - t;
}
def code(x, y, z, t): return 0.0 - t
function code(x, y, z, t) return Float64(0.0 - t) end
function tmp = code(x, y, z, t) tmp = 0.0 - t; end
code[x_, y_, z_, t_] := N[(0.0 - t), $MachinePrecision]
\begin{array}{l}
\\
0 - t
\end{array}
Initial program 83.7%
Taylor expanded in t around inf
mul-1-negN/A
neg-sub0N/A
--lowering--.f6447.9%
Simplified47.9%
sub0-negN/A
neg-lowering-neg.f6447.9%
Applied egg-rr47.9%
Final simplification47.9%
(FPCore (x y z t)
:precision binary64
(-
(*
(- z)
(+
(+ (* 0.5 (* y y)) y)
(* (/ 0.3333333333333333 (* 1.0 (* 1.0 1.0))) (* y (* y y)))))
(- t (* x (log y)))))
double code(double x, double y, double z, double t) {
return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * log(y)));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = (-z * (((0.5d0 * (y * y)) + y) + ((0.3333333333333333d0 / (1.0d0 * (1.0d0 * 1.0d0))) * (y * (y * y))))) - (t - (x * log(y)))
end function
public static double code(double x, double y, double z, double t) {
return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * Math.log(y)));
}
def code(x, y, z, t): return (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * math.log(y)))
function code(x, y, z, t) return Float64(Float64(Float64(-z) * Float64(Float64(Float64(0.5 * Float64(y * y)) + y) + Float64(Float64(0.3333333333333333 / Float64(1.0 * Float64(1.0 * 1.0))) * Float64(y * Float64(y * y))))) - Float64(t - Float64(x * log(y)))) end
function tmp = code(x, y, z, t) tmp = (-z * (((0.5 * (y * y)) + y) + ((0.3333333333333333 / (1.0 * (1.0 * 1.0))) * (y * (y * y))))) - (t - (x * log(y))); end
code[x_, y_, z_, t_] := N[(N[((-z) * N[(N[(N[(0.5 * N[(y * y), $MachinePrecision]), $MachinePrecision] + y), $MachinePrecision] + N[(N[(0.3333333333333333 / N[(1.0 * N[(1.0 * 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(y * N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(t - N[(x * N[Log[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-z\right) \cdot \left(\left(0.5 \cdot \left(y \cdot y\right) + y\right) + \frac{0.3333333333333333}{1 \cdot \left(1 \cdot 1\right)} \cdot \left(y \cdot \left(y \cdot y\right)\right)\right) - \left(t - x \cdot \log y\right)
\end{array}
herbie shell --seed 2024138
(FPCore (x y z t)
:name "Numeric.SpecFunctions:invIncompleteBetaWorker from math-functions-0.1.5.2, B"
:precision binary64
:alt
(! :herbie-platform default (- (* (- z) (+ (+ (* 1/2 (* y y)) y) (* (/ 1/3 (* 1 (* 1 1))) (* y (* y y))))) (- t (* x (log y)))))
(- (+ (* x (log y)) (* z (log (- 1.0 y)))) t))