
(FPCore (alpha beta) :precision binary64 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0)))) (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) t_0) t_0) (+ t_0 1.0))))
double code(double alpha, double beta) {
double t_0 = (alpha + beta) + (2.0 * 1.0);
return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: t_0
t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
code = (((((alpha + beta) + (beta * alpha)) + 1.0d0) / t_0) / t_0) / (t_0 + 1.0d0)
end function
public static double code(double alpha, double beta) {
double t_0 = (alpha + beta) + (2.0 * 1.0);
return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
def code(alpha, beta): t_0 = (alpha + beta) + (2.0 * 1.0) return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0)
function code(alpha, beta) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0)) return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / t_0) / t_0) / Float64(t_0 + 1.0)) end
function tmp = code(alpha, beta) t_0 = (alpha + beta) + (2.0 * 1.0); tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0); end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 13 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (alpha beta) :precision binary64 (let* ((t_0 (+ (+ alpha beta) (* 2.0 1.0)))) (/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) t_0) t_0) (+ t_0 1.0))))
double code(double alpha, double beta) {
double t_0 = (alpha + beta) + (2.0 * 1.0);
return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: t_0
t_0 = (alpha + beta) + (2.0d0 * 1.0d0)
code = (((((alpha + beta) + (beta * alpha)) + 1.0d0) / t_0) / t_0) / (t_0 + 1.0d0)
end function
public static double code(double alpha, double beta) {
double t_0 = (alpha + beta) + (2.0 * 1.0);
return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0);
}
def code(alpha, beta): t_0 = (alpha + beta) + (2.0 * 1.0) return (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0)
function code(alpha, beta) t_0 = Float64(Float64(alpha + beta) + Float64(2.0 * 1.0)) return Float64(Float64(Float64(Float64(Float64(Float64(alpha + beta) + Float64(beta * alpha)) + 1.0) / t_0) / t_0) / Float64(t_0 + 1.0)) end
function tmp = code(alpha, beta) t_0 = (alpha + beta) + (2.0 * 1.0); tmp = (((((alpha + beta) + (beta * alpha)) + 1.0) / t_0) / t_0) / (t_0 + 1.0); end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * 1.0), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(alpha + beta), $MachinePrecision] + N[(beta * alpha), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(t$95$0 + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\alpha + \beta\right) + 2 \cdot 1\\
\frac{\frac{\frac{\left(\left(\alpha + \beta\right) + \beta \cdot \alpha\right) + 1}{t\_0}}{t\_0}}{t\_0 + 1}
\end{array}
\end{array}
(FPCore (alpha beta) :precision binary64 (let* ((t_0 (+ alpha (+ beta 2.0)))) (* (/ (+ 1.0 alpha) t_0) (/ (/ (+ 1.0 beta) t_0) (+ alpha (+ beta 3.0))))))
double code(double alpha, double beta) {
double t_0 = alpha + (beta + 2.0);
return ((1.0 + alpha) / t_0) * (((1.0 + beta) / t_0) / (alpha + (beta + 3.0)));
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: t_0
t_0 = alpha + (beta + 2.0d0)
code = ((1.0d0 + alpha) / t_0) * (((1.0d0 + beta) / t_0) / (alpha + (beta + 3.0d0)))
end function
public static double code(double alpha, double beta) {
double t_0 = alpha + (beta + 2.0);
return ((1.0 + alpha) / t_0) * (((1.0 + beta) / t_0) / (alpha + (beta + 3.0)));
}
def code(alpha, beta): t_0 = alpha + (beta + 2.0) return ((1.0 + alpha) / t_0) * (((1.0 + beta) / t_0) / (alpha + (beta + 3.0)))
function code(alpha, beta) t_0 = Float64(alpha + Float64(beta + 2.0)) return Float64(Float64(Float64(1.0 + alpha) / t_0) * Float64(Float64(Float64(1.0 + beta) / t_0) / Float64(alpha + Float64(beta + 3.0)))) end
function tmp = code(alpha, beta) t_0 = alpha + (beta + 2.0); tmp = ((1.0 + alpha) / t_0) * (((1.0 + beta) / t_0) / (alpha + (beta + 3.0))); end
code[alpha_, beta_] := Block[{t$95$0 = N[(alpha + N[(beta + 2.0), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(1.0 + alpha), $MachinePrecision] / t$95$0), $MachinePrecision] * N[(N[(N[(1.0 + beta), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(alpha + N[(beta + 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \alpha + \left(\beta + 2\right)\\
\frac{1 + \alpha}{t\_0} \cdot \frac{\frac{1 + \beta}{t\_0}}{\alpha + \left(\beta + 3\right)}
\end{array}
\end{array}
Initial program 93.3%
Simplified82.1%
times-frac97.0%
+-commutative97.0%
Applied egg-rr97.0%
+-commutative97.0%
+-commutative97.0%
+-commutative97.0%
+-commutative97.0%
associate-/r*99.7%
+-commutative99.7%
+-commutative99.7%
+-commutative99.7%
+-commutative99.7%
+-commutative99.7%
+-commutative99.7%
Simplified99.7%
Final simplification99.7%
(FPCore (alpha beta)
:precision binary64
(if (<= beta 3.2e+16)
(*
(/ 1.0 (+ beta 2.0))
(/ (/ (+ 1.0 beta) (+ beta 2.0)) (+ alpha (+ beta 3.0))))
(/ (/ (+ 1.0 alpha) beta) beta)))
double code(double alpha, double beta) {
double tmp;
if (beta <= 3.2e+16) {
tmp = (1.0 / (beta + 2.0)) * (((1.0 + beta) / (beta + 2.0)) / (alpha + (beta + 3.0)));
} else {
tmp = ((1.0 + alpha) / beta) / beta;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (beta <= 3.2d+16) then
tmp = (1.0d0 / (beta + 2.0d0)) * (((1.0d0 + beta) / (beta + 2.0d0)) / (alpha + (beta + 3.0d0)))
else
tmp = ((1.0d0 + alpha) / beta) / beta
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (beta <= 3.2e+16) {
tmp = (1.0 / (beta + 2.0)) * (((1.0 + beta) / (beta + 2.0)) / (alpha + (beta + 3.0)));
} else {
tmp = ((1.0 + alpha) / beta) / beta;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if beta <= 3.2e+16: tmp = (1.0 / (beta + 2.0)) * (((1.0 + beta) / (beta + 2.0)) / (alpha + (beta + 3.0))) else: tmp = ((1.0 + alpha) / beta) / beta return tmp
function code(alpha, beta) tmp = 0.0 if (beta <= 3.2e+16) tmp = Float64(Float64(1.0 / Float64(beta + 2.0)) * Float64(Float64(Float64(1.0 + beta) / Float64(beta + 2.0)) / Float64(alpha + Float64(beta + 3.0)))); else tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / beta); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (beta <= 3.2e+16) tmp = (1.0 / (beta + 2.0)) * (((1.0 + beta) / (beta + 2.0)) / (alpha + (beta + 3.0))); else tmp = ((1.0 + alpha) / beta) / beta; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[beta, 3.2e+16], N[(N[(1.0 / N[(beta + 2.0), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(1.0 + beta), $MachinePrecision] / N[(beta + 2.0), $MachinePrecision]), $MachinePrecision] / N[(alpha + N[(beta + 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 3.2 \cdot 10^{+16}:\\
\;\;\;\;\frac{1}{\beta + 2} \cdot \frac{\frac{1 + \beta}{\beta + 2}}{\alpha + \left(\beta + 3\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\
\end{array}
\end{array}
if beta < 3.2e16Initial program 99.8%
Simplified95.9%
times-frac99.8%
+-commutative99.8%
Applied egg-rr99.8%
+-commutative99.8%
+-commutative99.8%
+-commutative99.8%
+-commutative99.8%
associate-/r*99.8%
+-commutative99.8%
+-commutative99.8%
+-commutative99.8%
+-commutative99.8%
+-commutative99.8%
+-commutative99.8%
Simplified99.8%
Taylor expanded in alpha around 0 84.8%
+-commutative84.8%
Simplified84.8%
Taylor expanded in alpha around 0 69.0%
+-commutative69.0%
Simplified69.0%
if 3.2e16 < beta Initial program 81.4%
Taylor expanded in beta around inf 83.1%
Taylor expanded in beta around inf 82.9%
Final simplification73.9%
(FPCore (alpha beta) :precision binary64 (if (<= beta 2.8e+26) (/ (/ (+ 1.0 beta) (+ beta 2.0)) (* (+ beta 2.0) (+ beta 3.0))) (/ (/ (+ 1.0 alpha) beta) beta)))
double code(double alpha, double beta) {
double tmp;
if (beta <= 2.8e+26) {
tmp = ((1.0 + beta) / (beta + 2.0)) / ((beta + 2.0) * (beta + 3.0));
} else {
tmp = ((1.0 + alpha) / beta) / beta;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (beta <= 2.8d+26) then
tmp = ((1.0d0 + beta) / (beta + 2.0d0)) / ((beta + 2.0d0) * (beta + 3.0d0))
else
tmp = ((1.0d0 + alpha) / beta) / beta
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (beta <= 2.8e+26) {
tmp = ((1.0 + beta) / (beta + 2.0)) / ((beta + 2.0) * (beta + 3.0));
} else {
tmp = ((1.0 + alpha) / beta) / beta;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if beta <= 2.8e+26: tmp = ((1.0 + beta) / (beta + 2.0)) / ((beta + 2.0) * (beta + 3.0)) else: tmp = ((1.0 + alpha) / beta) / beta return tmp
function code(alpha, beta) tmp = 0.0 if (beta <= 2.8e+26) tmp = Float64(Float64(Float64(1.0 + beta) / Float64(beta + 2.0)) / Float64(Float64(beta + 2.0) * Float64(beta + 3.0))); else tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / beta); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (beta <= 2.8e+26) tmp = ((1.0 + beta) / (beta + 2.0)) / ((beta + 2.0) * (beta + 3.0)); else tmp = ((1.0 + alpha) / beta) / beta; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[beta, 2.8e+26], N[(N[(N[(1.0 + beta), $MachinePrecision] / N[(beta + 2.0), $MachinePrecision]), $MachinePrecision] / N[(N[(beta + 2.0), $MachinePrecision] * N[(beta + 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 2.8 \cdot 10^{+26}:\\
\;\;\;\;\frac{\frac{1 + \beta}{\beta + 2}}{\left(\beta + 2\right) \cdot \left(\beta + 3\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\
\end{array}
\end{array}
if beta < 2.8e26Initial program 99.8%
associate-/l/99.8%
+-commutative99.8%
associate-+l+99.8%
*-commutative99.8%
metadata-eval99.8%
associate-+l+99.8%
metadata-eval99.8%
associate-+l+99.8%
metadata-eval99.8%
metadata-eval99.8%
associate-+l+99.8%
Simplified99.8%
Taylor expanded in alpha around 0 84.1%
+-commutative84.1%
Simplified84.1%
Taylor expanded in alpha around 0 67.0%
+-commutative64.4%
+-commutative64.4%
Simplified67.0%
if 2.8e26 < beta Initial program 80.6%
Taylor expanded in beta around inf 84.5%
Taylor expanded in beta around inf 84.3%
Final simplification72.9%
(FPCore (alpha beta) :precision binary64 (if (<= beta 6.2) (/ (+ 0.5 (* beta 0.25)) (* (+ beta 2.0) (+ beta 3.0))) (/ (/ (+ 1.0 alpha) beta) beta)))
double code(double alpha, double beta) {
double tmp;
if (beta <= 6.2) {
tmp = (0.5 + (beta * 0.25)) / ((beta + 2.0) * (beta + 3.0));
} else {
tmp = ((1.0 + alpha) / beta) / beta;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (beta <= 6.2d0) then
tmp = (0.5d0 + (beta * 0.25d0)) / ((beta + 2.0d0) * (beta + 3.0d0))
else
tmp = ((1.0d0 + alpha) / beta) / beta
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (beta <= 6.2) {
tmp = (0.5 + (beta * 0.25)) / ((beta + 2.0) * (beta + 3.0));
} else {
tmp = ((1.0 + alpha) / beta) / beta;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if beta <= 6.2: tmp = (0.5 + (beta * 0.25)) / ((beta + 2.0) * (beta + 3.0)) else: tmp = ((1.0 + alpha) / beta) / beta return tmp
function code(alpha, beta) tmp = 0.0 if (beta <= 6.2) tmp = Float64(Float64(0.5 + Float64(beta * 0.25)) / Float64(Float64(beta + 2.0) * Float64(beta + 3.0))); else tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / beta); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (beta <= 6.2) tmp = (0.5 + (beta * 0.25)) / ((beta + 2.0) * (beta + 3.0)); else tmp = ((1.0 + alpha) / beta) / beta; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[beta, 6.2], N[(N[(0.5 + N[(beta * 0.25), $MachinePrecision]), $MachinePrecision] / N[(N[(beta + 2.0), $MachinePrecision] * N[(beta + 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 6.2:\\
\;\;\;\;\frac{0.5 + \beta \cdot 0.25}{\left(\beta + 2\right) \cdot \left(\beta + 3\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\
\end{array}
\end{array}
if beta < 6.20000000000000018Initial program 99.8%
associate-/l/99.8%
+-commutative99.8%
associate-+l+99.8%
*-commutative99.8%
metadata-eval99.8%
associate-+l+99.8%
metadata-eval99.8%
associate-+l+99.8%
metadata-eval99.8%
metadata-eval99.8%
associate-+l+99.8%
Simplified99.8%
Taylor expanded in alpha around 0 84.7%
+-commutative84.7%
Simplified84.7%
Taylor expanded in beta around 0 84.3%
*-commutative84.3%
Simplified84.3%
Taylor expanded in alpha around 0 66.5%
+-commutative66.5%
+-commutative66.5%
Simplified66.5%
if 6.20000000000000018 < beta Initial program 81.8%
Taylor expanded in beta around inf 82.5%
Taylor expanded in beta around inf 82.2%
Final simplification72.2%
(FPCore (alpha beta) :precision binary64 (if (<= beta 4.0) (/ (+ 0.5 (* beta 0.25)) (* (+ beta 2.0) (+ beta 3.0))) (/ (/ (+ 1.0 alpha) beta) (+ 1.0 (+ 2.0 (+ alpha beta))))))
double code(double alpha, double beta) {
double tmp;
if (beta <= 4.0) {
tmp = (0.5 + (beta * 0.25)) / ((beta + 2.0) * (beta + 3.0));
} else {
tmp = ((1.0 + alpha) / beta) / (1.0 + (2.0 + (alpha + beta)));
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (beta <= 4.0d0) then
tmp = (0.5d0 + (beta * 0.25d0)) / ((beta + 2.0d0) * (beta + 3.0d0))
else
tmp = ((1.0d0 + alpha) / beta) / (1.0d0 + (2.0d0 + (alpha + beta)))
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (beta <= 4.0) {
tmp = (0.5 + (beta * 0.25)) / ((beta + 2.0) * (beta + 3.0));
} else {
tmp = ((1.0 + alpha) / beta) / (1.0 + (2.0 + (alpha + beta)));
}
return tmp;
}
def code(alpha, beta): tmp = 0 if beta <= 4.0: tmp = (0.5 + (beta * 0.25)) / ((beta + 2.0) * (beta + 3.0)) else: tmp = ((1.0 + alpha) / beta) / (1.0 + (2.0 + (alpha + beta))) return tmp
function code(alpha, beta) tmp = 0.0 if (beta <= 4.0) tmp = Float64(Float64(0.5 + Float64(beta * 0.25)) / Float64(Float64(beta + 2.0) * Float64(beta + 3.0))); else tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / Float64(1.0 + Float64(2.0 + Float64(alpha + beta)))); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (beta <= 4.0) tmp = (0.5 + (beta * 0.25)) / ((beta + 2.0) * (beta + 3.0)); else tmp = ((1.0 + alpha) / beta) / (1.0 + (2.0 + (alpha + beta))); end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[beta, 4.0], N[(N[(0.5 + N[(beta * 0.25), $MachinePrecision]), $MachinePrecision] / N[(N[(beta + 2.0), $MachinePrecision] * N[(beta + 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / N[(1.0 + N[(2.0 + N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 4:\\
\;\;\;\;\frac{0.5 + \beta \cdot 0.25}{\left(\beta + 2\right) \cdot \left(\beta + 3\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{1 + \left(2 + \left(\alpha + \beta\right)\right)}\\
\end{array}
\end{array}
if beta < 4Initial program 99.8%
associate-/l/99.8%
+-commutative99.8%
associate-+l+99.8%
*-commutative99.8%
metadata-eval99.8%
associate-+l+99.8%
metadata-eval99.8%
associate-+l+99.8%
metadata-eval99.8%
metadata-eval99.8%
associate-+l+99.8%
Simplified99.8%
Taylor expanded in alpha around 0 84.7%
+-commutative84.7%
Simplified84.7%
Taylor expanded in beta around 0 84.3%
*-commutative84.3%
Simplified84.3%
Taylor expanded in alpha around 0 66.5%
+-commutative66.5%
+-commutative66.5%
Simplified66.5%
if 4 < beta Initial program 81.8%
Taylor expanded in beta around inf 82.5%
Final simplification72.3%
(FPCore (alpha beta) :precision binary64 (if (<= beta 7.2) (/ 0.5 (* (+ alpha 2.0) (+ alpha 3.0))) (/ (/ (+ 1.0 alpha) beta) beta)))
double code(double alpha, double beta) {
double tmp;
if (beta <= 7.2) {
tmp = 0.5 / ((alpha + 2.0) * (alpha + 3.0));
} else {
tmp = ((1.0 + alpha) / beta) / beta;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (beta <= 7.2d0) then
tmp = 0.5d0 / ((alpha + 2.0d0) * (alpha + 3.0d0))
else
tmp = ((1.0d0 + alpha) / beta) / beta
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (beta <= 7.2) {
tmp = 0.5 / ((alpha + 2.0) * (alpha + 3.0));
} else {
tmp = ((1.0 + alpha) / beta) / beta;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if beta <= 7.2: tmp = 0.5 / ((alpha + 2.0) * (alpha + 3.0)) else: tmp = ((1.0 + alpha) / beta) / beta return tmp
function code(alpha, beta) tmp = 0.0 if (beta <= 7.2) tmp = Float64(0.5 / Float64(Float64(alpha + 2.0) * Float64(alpha + 3.0))); else tmp = Float64(Float64(Float64(1.0 + alpha) / beta) / beta); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (beta <= 7.2) tmp = 0.5 / ((alpha + 2.0) * (alpha + 3.0)); else tmp = ((1.0 + alpha) / beta) / beta; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[beta, 7.2], N[(0.5 / N[(N[(alpha + 2.0), $MachinePrecision] * N[(alpha + 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 7.2:\\
\;\;\;\;\frac{0.5}{\left(\alpha + 2\right) \cdot \left(\alpha + 3\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 + \alpha}{\beta}}{\beta}\\
\end{array}
\end{array}
if beta < 7.20000000000000018Initial program 99.8%
associate-/l/99.8%
+-commutative99.8%
associate-+l+99.8%
*-commutative99.8%
metadata-eval99.8%
associate-+l+99.8%
metadata-eval99.8%
associate-+l+99.8%
metadata-eval99.8%
metadata-eval99.8%
associate-+l+99.8%
Simplified99.8%
Taylor expanded in alpha around 0 84.7%
+-commutative84.7%
Simplified84.7%
Taylor expanded in beta around 0 83.8%
if 7.20000000000000018 < beta Initial program 81.8%
Taylor expanded in beta around inf 82.5%
Taylor expanded in beta around inf 82.2%
Final simplification83.2%
(FPCore (alpha beta) :precision binary64 (if (<= alpha 1.2) (/ (/ 1.0 beta) (+ beta 3.0)) (/ (/ alpha beta) beta)))
double code(double alpha, double beta) {
double tmp;
if (alpha <= 1.2) {
tmp = (1.0 / beta) / (beta + 3.0);
} else {
tmp = (alpha / beta) / beta;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (alpha <= 1.2d0) then
tmp = (1.0d0 / beta) / (beta + 3.0d0)
else
tmp = (alpha / beta) / beta
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (alpha <= 1.2) {
tmp = (1.0 / beta) / (beta + 3.0);
} else {
tmp = (alpha / beta) / beta;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if alpha <= 1.2: tmp = (1.0 / beta) / (beta + 3.0) else: tmp = (alpha / beta) / beta return tmp
function code(alpha, beta) tmp = 0.0 if (alpha <= 1.2) tmp = Float64(Float64(1.0 / beta) / Float64(beta + 3.0)); else tmp = Float64(Float64(alpha / beta) / beta); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (alpha <= 1.2) tmp = (1.0 / beta) / (beta + 3.0); else tmp = (alpha / beta) / beta; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[alpha, 1.2], N[(N[(1.0 / beta), $MachinePrecision] / N[(beta + 3.0), $MachinePrecision]), $MachinePrecision], N[(N[(alpha / beta), $MachinePrecision] / beta), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 1.2:\\
\;\;\;\;\frac{\frac{1}{\beta}}{\beta + 3}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\alpha}{\beta}}{\beta}\\
\end{array}
\end{array}
if alpha < 1.19999999999999996Initial program 99.8%
Taylor expanded in beta around inf 37.7%
Taylor expanded in alpha around 0 36.4%
associate-/r*36.8%
+-commutative36.8%
Simplified36.8%
if 1.19999999999999996 < alpha Initial program 80.1%
Taylor expanded in beta around inf 19.9%
clear-num19.9%
inv-pow19.9%
Applied egg-rr19.9%
unpow-119.9%
Simplified19.9%
Taylor expanded in beta around inf 19.6%
Taylor expanded in alpha around inf 19.6%
Final simplification31.1%
(FPCore (alpha beta) :precision binary64 (if (<= alpha 3.0) (/ 1.0 (* beta 3.0)) (/ 1.0 (* alpha beta))))
double code(double alpha, double beta) {
double tmp;
if (alpha <= 3.0) {
tmp = 1.0 / (beta * 3.0);
} else {
tmp = 1.0 / (alpha * beta);
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (alpha <= 3.0d0) then
tmp = 1.0d0 / (beta * 3.0d0)
else
tmp = 1.0d0 / (alpha * beta)
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (alpha <= 3.0) {
tmp = 1.0 / (beta * 3.0);
} else {
tmp = 1.0 / (alpha * beta);
}
return tmp;
}
def code(alpha, beta): tmp = 0 if alpha <= 3.0: tmp = 1.0 / (beta * 3.0) else: tmp = 1.0 / (alpha * beta) return tmp
function code(alpha, beta) tmp = 0.0 if (alpha <= 3.0) tmp = Float64(1.0 / Float64(beta * 3.0)); else tmp = Float64(1.0 / Float64(alpha * beta)); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (alpha <= 3.0) tmp = 1.0 / (beta * 3.0); else tmp = 1.0 / (alpha * beta); end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[alpha, 3.0], N[(1.0 / N[(beta * 3.0), $MachinePrecision]), $MachinePrecision], N[(1.0 / N[(alpha * beta), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 3:\\
\;\;\;\;\frac{1}{\beta \cdot 3}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\alpha \cdot \beta}\\
\end{array}
\end{array}
if alpha < 3Initial program 99.8%
Taylor expanded in beta around inf 37.7%
Taylor expanded in alpha around 0 36.4%
+-commutative36.4%
Simplified36.4%
Taylor expanded in beta around 0 4.3%
if 3 < alpha Initial program 80.1%
Taylor expanded in beta around inf 19.9%
*-un-lft-identity19.9%
associate-/l/21.3%
metadata-eval21.3%
associate-+l+21.3%
metadata-eval21.3%
associate-+r+21.3%
Applied egg-rr21.3%
*-lft-identity21.3%
*-commutative21.3%
associate-+r+21.3%
+-commutative21.3%
associate-+r+21.3%
Simplified21.3%
Taylor expanded in alpha around inf 17.0%
Taylor expanded in alpha around 0 18.1%
Final simplification8.9%
(FPCore (alpha beta) :precision binary64 (if (<= alpha 1.0) (/ (/ 1.0 beta) beta) (/ (/ alpha beta) beta)))
double code(double alpha, double beta) {
double tmp;
if (alpha <= 1.0) {
tmp = (1.0 / beta) / beta;
} else {
tmp = (alpha / beta) / beta;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (alpha <= 1.0d0) then
tmp = (1.0d0 / beta) / beta
else
tmp = (alpha / beta) / beta
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (alpha <= 1.0) {
tmp = (1.0 / beta) / beta;
} else {
tmp = (alpha / beta) / beta;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if alpha <= 1.0: tmp = (1.0 / beta) / beta else: tmp = (alpha / beta) / beta return tmp
function code(alpha, beta) tmp = 0.0 if (alpha <= 1.0) tmp = Float64(Float64(1.0 / beta) / beta); else tmp = Float64(Float64(alpha / beta) / beta); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (alpha <= 1.0) tmp = (1.0 / beta) / beta; else tmp = (alpha / beta) / beta; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[alpha, 1.0], N[(N[(1.0 / beta), $MachinePrecision] / beta), $MachinePrecision], N[(N[(alpha / beta), $MachinePrecision] / beta), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 1:\\
\;\;\;\;\frac{\frac{1}{\beta}}{\beta}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\alpha}{\beta}}{\beta}\\
\end{array}
\end{array}
if alpha < 1Initial program 99.8%
Taylor expanded in beta around inf 37.7%
clear-num37.7%
inv-pow37.7%
Applied egg-rr37.7%
unpow-137.7%
Simplified37.7%
Taylor expanded in beta around inf 38.3%
Taylor expanded in alpha around 0 37.4%
if 1 < alpha Initial program 80.1%
Taylor expanded in beta around inf 19.9%
clear-num19.9%
inv-pow19.9%
Applied egg-rr19.9%
unpow-119.9%
Simplified19.9%
Taylor expanded in beta around inf 19.6%
Taylor expanded in alpha around inf 19.6%
Final simplification31.5%
(FPCore (alpha beta) :precision binary64 (/ (/ (+ 1.0 alpha) beta) beta))
double code(double alpha, double beta) {
return ((1.0 + alpha) / beta) / beta;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
code = ((1.0d0 + alpha) / beta) / beta
end function
public static double code(double alpha, double beta) {
return ((1.0 + alpha) / beta) / beta;
}
def code(alpha, beta): return ((1.0 + alpha) / beta) / beta
function code(alpha, beta) return Float64(Float64(Float64(1.0 + alpha) / beta) / beta) end
function tmp = code(alpha, beta) tmp = ((1.0 + alpha) / beta) / beta; end
code[alpha_, beta_] := N[(N[(N[(1.0 + alpha), $MachinePrecision] / beta), $MachinePrecision] / beta), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{1 + \alpha}{\beta}}{\beta}
\end{array}
Initial program 93.3%
Taylor expanded in beta around inf 31.8%
Taylor expanded in beta around inf 32.1%
Final simplification32.1%
(FPCore (alpha beta) :precision binary64 (/ 1.0 (* beta 3.0)))
double code(double alpha, double beta) {
return 1.0 / (beta * 3.0);
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
code = 1.0d0 / (beta * 3.0d0)
end function
public static double code(double alpha, double beta) {
return 1.0 / (beta * 3.0);
}
def code(alpha, beta): return 1.0 / (beta * 3.0)
function code(alpha, beta) return Float64(1.0 / Float64(beta * 3.0)) end
function tmp = code(alpha, beta) tmp = 1.0 / (beta * 3.0); end
code[alpha_, beta_] := N[(1.0 / N[(beta * 3.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\beta \cdot 3}
\end{array}
Initial program 93.3%
Taylor expanded in beta around inf 31.8%
Taylor expanded in alpha around 0 30.4%
+-commutative30.4%
Simplified30.4%
Taylor expanded in beta around 0 4.1%
Final simplification4.1%
(FPCore (alpha beta) :precision binary64 (/ (/ 1.0 beta) beta))
double code(double alpha, double beta) {
return (1.0 / beta) / beta;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
code = (1.0d0 / beta) / beta
end function
public static double code(double alpha, double beta) {
return (1.0 / beta) / beta;
}
def code(alpha, beta): return (1.0 / beta) / beta
function code(alpha, beta) return Float64(Float64(1.0 / beta) / beta) end
function tmp = code(alpha, beta) tmp = (1.0 / beta) / beta; end
code[alpha_, beta_] := N[(N[(1.0 / beta), $MachinePrecision] / beta), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{1}{\beta}}{\beta}
\end{array}
Initial program 93.3%
Taylor expanded in beta around inf 31.8%
clear-num31.8%
inv-pow31.8%
Applied egg-rr31.8%
unpow-131.8%
Simplified31.8%
Taylor expanded in beta around inf 32.1%
Taylor expanded in alpha around 0 31.1%
Final simplification31.1%
(FPCore (alpha beta) :precision binary64 (/ 0.25 beta))
double code(double alpha, double beta) {
return 0.25 / beta;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
code = 0.25d0 / beta
end function
public static double code(double alpha, double beta) {
return 0.25 / beta;
}
def code(alpha, beta): return 0.25 / beta
function code(alpha, beta) return Float64(0.25 / beta) end
function tmp = code(alpha, beta) tmp = 0.25 / beta; end
code[alpha_, beta_] := N[(0.25 / beta), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.25}{\beta}
\end{array}
Initial program 93.3%
associate-/l/92.2%
+-commutative92.2%
associate-+l+92.2%
*-commutative92.2%
metadata-eval92.2%
associate-+l+92.2%
metadata-eval92.2%
associate-+l+92.2%
metadata-eval92.2%
metadata-eval92.2%
associate-+l+92.2%
Simplified92.2%
Taylor expanded in alpha around 0 84.3%
+-commutative84.3%
Simplified84.3%
Taylor expanded in beta around 0 71.0%
*-commutative71.0%
Simplified71.0%
Taylor expanded in beta around inf 4.1%
Final simplification4.1%
herbie shell --seed 2024050
(FPCore (alpha beta)
:name "Octave 3.8, jcobi/3"
:precision binary64
:pre (and (> alpha -1.0) (> beta -1.0))
(/ (/ (/ (+ (+ (+ alpha beta) (* beta alpha)) 1.0) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ alpha beta) (* 2.0 1.0))) (+ (+ (+ alpha beta) (* 2.0 1.0)) 1.0)))