
(FPCore (alpha beta) :precision binary64 (/ (+ (/ (- beta alpha) (+ (+ alpha beta) 2.0)) 1.0) 2.0))
double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
code = (((beta - alpha) / ((alpha + beta) + 2.0d0)) + 1.0d0) / 2.0d0
end function
public static double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
def code(alpha, beta): return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0
function code(alpha, beta) return Float64(Float64(Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + 2.0)) + 1.0) / 2.0) end
function tmp = code(alpha, beta) tmp = (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0; end
code[alpha_, beta_] := N[(N[(N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\beta - \alpha}{\left(\alpha + \beta\right) + 2} + 1}{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (alpha beta) :precision binary64 (/ (+ (/ (- beta alpha) (+ (+ alpha beta) 2.0)) 1.0) 2.0))
double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
code = (((beta - alpha) / ((alpha + beta) + 2.0d0)) + 1.0d0) / 2.0d0
end function
public static double code(double alpha, double beta) {
return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0;
}
def code(alpha, beta): return (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0
function code(alpha, beta) return Float64(Float64(Float64(Float64(beta - alpha) / Float64(Float64(alpha + beta) + 2.0)) + 1.0) / 2.0) end
function tmp = code(alpha, beta) tmp = (((beta - alpha) / ((alpha + beta) + 2.0)) + 1.0) / 2.0; end
code[alpha_, beta_] := N[(N[(N[(N[(beta - alpha), $MachinePrecision] / N[(N[(alpha + beta), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\beta - \alpha}{\left(\alpha + \beta\right) + 2} + 1}{2}
\end{array}
(FPCore (alpha beta)
:precision binary64
(let* ((t_0 (/ (- beta alpha) (+ (+ beta alpha) 2.0))))
(if (<= t_0 -0.5)
(/ (/ (+ 2.0 (* beta 2.0)) alpha) 2.0)
(/ (+ t_0 1.0) 2.0))))
double code(double alpha, double beta) {
double t_0 = (beta - alpha) / ((beta + alpha) + 2.0);
double tmp;
if (t_0 <= -0.5) {
tmp = ((2.0 + (beta * 2.0)) / alpha) / 2.0;
} else {
tmp = (t_0 + 1.0) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: t_0
real(8) :: tmp
t_0 = (beta - alpha) / ((beta + alpha) + 2.0d0)
if (t_0 <= (-0.5d0)) then
tmp = ((2.0d0 + (beta * 2.0d0)) / alpha) / 2.0d0
else
tmp = (t_0 + 1.0d0) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double t_0 = (beta - alpha) / ((beta + alpha) + 2.0);
double tmp;
if (t_0 <= -0.5) {
tmp = ((2.0 + (beta * 2.0)) / alpha) / 2.0;
} else {
tmp = (t_0 + 1.0) / 2.0;
}
return tmp;
}
def code(alpha, beta): t_0 = (beta - alpha) / ((beta + alpha) + 2.0) tmp = 0 if t_0 <= -0.5: tmp = ((2.0 + (beta * 2.0)) / alpha) / 2.0 else: tmp = (t_0 + 1.0) / 2.0 return tmp
function code(alpha, beta) t_0 = Float64(Float64(beta - alpha) / Float64(Float64(beta + alpha) + 2.0)) tmp = 0.0 if (t_0 <= -0.5) tmp = Float64(Float64(Float64(2.0 + Float64(beta * 2.0)) / alpha) / 2.0); else tmp = Float64(Float64(t_0 + 1.0) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta) t_0 = (beta - alpha) / ((beta + alpha) + 2.0); tmp = 0.0; if (t_0 <= -0.5) tmp = ((2.0 + (beta * 2.0)) / alpha) / 2.0; else tmp = (t_0 + 1.0) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_] := Block[{t$95$0 = N[(N[(beta - alpha), $MachinePrecision] / N[(N[(beta + alpha), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -0.5], N[(N[(N[(2.0 + N[(beta * 2.0), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(t$95$0 + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\beta - \alpha}{\left(\beta + \alpha\right) + 2}\\
\mathbf{if}\;t_0 \leq -0.5:\\
\;\;\;\;\frac{\frac{2 + \beta \cdot 2}{\alpha}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{t_0 + 1}{2}\\
\end{array}
\end{array}
if (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) 2)) < -0.5Initial program 8.0%
+-commutative8.0%
Simplified8.0%
Taylor expanded in alpha around inf 98.5%
if -0.5 < (/.f64 (-.f64 beta alpha) (+.f64 (+.f64 alpha beta) 2)) Initial program 100.0%
Final simplification99.6%
(FPCore (alpha beta) :precision binary64 (if (<= alpha 59000000000000.0) (/ (+ 1.0 (/ beta (+ beta 2.0))) 2.0) (/ (/ 2.0 alpha) 2.0)))
double code(double alpha, double beta) {
double tmp;
if (alpha <= 59000000000000.0) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = (2.0 / alpha) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (alpha <= 59000000000000.0d0) then
tmp = (1.0d0 + (beta / (beta + 2.0d0))) / 2.0d0
else
tmp = (2.0d0 / alpha) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (alpha <= 59000000000000.0) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = (2.0 / alpha) / 2.0;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if alpha <= 59000000000000.0: tmp = (1.0 + (beta / (beta + 2.0))) / 2.0 else: tmp = (2.0 / alpha) / 2.0 return tmp
function code(alpha, beta) tmp = 0.0 if (alpha <= 59000000000000.0) tmp = Float64(Float64(1.0 + Float64(beta / Float64(beta + 2.0))) / 2.0); else tmp = Float64(Float64(2.0 / alpha) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (alpha <= 59000000000000.0) tmp = (1.0 + (beta / (beta + 2.0))) / 2.0; else tmp = (2.0 / alpha) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[alpha, 59000000000000.0], N[(N[(1.0 + N[(beta / N[(beta + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(2.0 / alpha), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 59000000000000:\\
\;\;\;\;\frac{1 + \frac{\beta}{\beta + 2}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{2}{\alpha}}{2}\\
\end{array}
\end{array}
if alpha < 5.9e13Initial program 99.7%
+-commutative99.7%
Simplified99.7%
Taylor expanded in alpha around 0 97.9%
if 5.9e13 < alpha Initial program 19.8%
+-commutative19.8%
Simplified19.8%
Taylor expanded in beta around 0 5.8%
+-commutative5.8%
Simplified5.8%
Taylor expanded in alpha around inf 71.1%
Final simplification89.2%
(FPCore (alpha beta) :precision binary64 (if (<= alpha 38000000000000.0) (/ (+ 1.0 (/ beta (+ beta 2.0))) 2.0) (/ (/ (+ 2.0 (* beta 2.0)) alpha) 2.0)))
double code(double alpha, double beta) {
double tmp;
if (alpha <= 38000000000000.0) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = ((2.0 + (beta * 2.0)) / alpha) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (alpha <= 38000000000000.0d0) then
tmp = (1.0d0 + (beta / (beta + 2.0d0))) / 2.0d0
else
tmp = ((2.0d0 + (beta * 2.0d0)) / alpha) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (alpha <= 38000000000000.0) {
tmp = (1.0 + (beta / (beta + 2.0))) / 2.0;
} else {
tmp = ((2.0 + (beta * 2.0)) / alpha) / 2.0;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if alpha <= 38000000000000.0: tmp = (1.0 + (beta / (beta + 2.0))) / 2.0 else: tmp = ((2.0 + (beta * 2.0)) / alpha) / 2.0 return tmp
function code(alpha, beta) tmp = 0.0 if (alpha <= 38000000000000.0) tmp = Float64(Float64(1.0 + Float64(beta / Float64(beta + 2.0))) / 2.0); else tmp = Float64(Float64(Float64(2.0 + Float64(beta * 2.0)) / alpha) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (alpha <= 38000000000000.0) tmp = (1.0 + (beta / (beta + 2.0))) / 2.0; else tmp = ((2.0 + (beta * 2.0)) / alpha) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[alpha, 38000000000000.0], N[(N[(1.0 + N[(beta / N[(beta + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(2.0 + N[(beta * 2.0), $MachinePrecision]), $MachinePrecision] / alpha), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 38000000000000:\\
\;\;\;\;\frac{1 + \frac{\beta}{\beta + 2}}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{2 + \beta \cdot 2}{\alpha}}{2}\\
\end{array}
\end{array}
if alpha < 3.8e13Initial program 99.7%
+-commutative99.7%
Simplified99.7%
Taylor expanded in alpha around 0 97.9%
if 3.8e13 < alpha Initial program 19.8%
+-commutative19.8%
Simplified19.8%
Taylor expanded in alpha around inf 86.6%
Final simplification94.2%
(FPCore (alpha beta) :precision binary64 (if (<= beta 2.0) (/ (+ 1.0 (* beta 0.5)) 2.0) 1.0))
double code(double alpha, double beta) {
double tmp;
if (beta <= 2.0) {
tmp = (1.0 + (beta * 0.5)) / 2.0;
} else {
tmp = 1.0;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (beta <= 2.0d0) then
tmp = (1.0d0 + (beta * 0.5d0)) / 2.0d0
else
tmp = 1.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (beta <= 2.0) {
tmp = (1.0 + (beta * 0.5)) / 2.0;
} else {
tmp = 1.0;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if beta <= 2.0: tmp = (1.0 + (beta * 0.5)) / 2.0 else: tmp = 1.0 return tmp
function code(alpha, beta) tmp = 0.0 if (beta <= 2.0) tmp = Float64(Float64(1.0 + Float64(beta * 0.5)) / 2.0); else tmp = 1.0; end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (beta <= 2.0) tmp = (1.0 + (beta * 0.5)) / 2.0; else tmp = 1.0; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[beta, 2.0], N[(N[(1.0 + N[(beta * 0.5), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], 1.0]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\beta \leq 2:\\
\;\;\;\;\frac{1 + \beta \cdot 0.5}{2}\\
\mathbf{else}:\\
\;\;\;\;1\\
\end{array}
\end{array}
if beta < 2Initial program 68.7%
+-commutative68.7%
Simplified68.7%
Taylor expanded in alpha around 0 66.5%
Taylor expanded in beta around 0 66.5%
if 2 < beta Initial program 86.0%
+-commutative86.0%
Simplified86.0%
Taylor expanded in beta around inf 82.8%
Final simplification71.3%
(FPCore (alpha beta) :precision binary64 (if (<= alpha 1.3) (/ (- 1.0 (* alpha 0.5)) 2.0) (/ (/ 2.0 alpha) 2.0)))
double code(double alpha, double beta) {
double tmp;
if (alpha <= 1.3) {
tmp = (1.0 - (alpha * 0.5)) / 2.0;
} else {
tmp = (2.0 / alpha) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (alpha <= 1.3d0) then
tmp = (1.0d0 - (alpha * 0.5d0)) / 2.0d0
else
tmp = (2.0d0 / alpha) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (alpha <= 1.3) {
tmp = (1.0 - (alpha * 0.5)) / 2.0;
} else {
tmp = (2.0 / alpha) / 2.0;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if alpha <= 1.3: tmp = (1.0 - (alpha * 0.5)) / 2.0 else: tmp = (2.0 / alpha) / 2.0 return tmp
function code(alpha, beta) tmp = 0.0 if (alpha <= 1.3) tmp = Float64(Float64(1.0 - Float64(alpha * 0.5)) / 2.0); else tmp = Float64(Float64(2.0 / alpha) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (alpha <= 1.3) tmp = (1.0 - (alpha * 0.5)) / 2.0; else tmp = (2.0 / alpha) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[alpha, 1.3], N[(N[(1.0 - N[(alpha * 0.5), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(2.0 / alpha), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 1.3:\\
\;\;\;\;\frac{1 - \alpha \cdot 0.5}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{2}{\alpha}}{2}\\
\end{array}
\end{array}
if alpha < 1.30000000000000004Initial program 100.0%
+-commutative100.0%
Simplified100.0%
Taylor expanded in beta around 0 75.6%
+-commutative75.6%
Simplified75.6%
Taylor expanded in alpha around 0 74.7%
if 1.30000000000000004 < alpha Initial program 21.0%
+-commutative21.0%
Simplified21.0%
Taylor expanded in beta around 0 6.3%
+-commutative6.3%
Simplified6.3%
Taylor expanded in alpha around inf 70.6%
Final simplification73.3%
(FPCore (alpha beta) :precision binary64 (if (<= alpha 41000000000000.0) 1.0 (/ (/ 2.0 alpha) 2.0)))
double code(double alpha, double beta) {
double tmp;
if (alpha <= 41000000000000.0) {
tmp = 1.0;
} else {
tmp = (2.0 / alpha) / 2.0;
}
return tmp;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8) :: tmp
if (alpha <= 41000000000000.0d0) then
tmp = 1.0d0
else
tmp = (2.0d0 / alpha) / 2.0d0
end if
code = tmp
end function
public static double code(double alpha, double beta) {
double tmp;
if (alpha <= 41000000000000.0) {
tmp = 1.0;
} else {
tmp = (2.0 / alpha) / 2.0;
}
return tmp;
}
def code(alpha, beta): tmp = 0 if alpha <= 41000000000000.0: tmp = 1.0 else: tmp = (2.0 / alpha) / 2.0 return tmp
function code(alpha, beta) tmp = 0.0 if (alpha <= 41000000000000.0) tmp = 1.0; else tmp = Float64(Float64(2.0 / alpha) / 2.0); end return tmp end
function tmp_2 = code(alpha, beta) tmp = 0.0; if (alpha <= 41000000000000.0) tmp = 1.0; else tmp = (2.0 / alpha) / 2.0; end tmp_2 = tmp; end
code[alpha_, beta_] := If[LessEqual[alpha, 41000000000000.0], 1.0, N[(N[(2.0 / alpha), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\alpha \leq 41000000000000:\\
\;\;\;\;1\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{2}{\alpha}}{2}\\
\end{array}
\end{array}
if alpha < 4.1e13Initial program 99.7%
+-commutative99.7%
Simplified99.7%
Taylor expanded in beta around inf 42.8%
if 4.1e13 < alpha Initial program 19.8%
+-commutative19.8%
Simplified19.8%
Taylor expanded in beta around 0 5.8%
+-commutative5.8%
Simplified5.8%
Taylor expanded in alpha around inf 71.1%
Final simplification52.0%
(FPCore (alpha beta) :precision binary64 1.0)
double code(double alpha, double beta) {
return 1.0;
}
real(8) function code(alpha, beta)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
code = 1.0d0
end function
public static double code(double alpha, double beta) {
return 1.0;
}
def code(alpha, beta): return 1.0
function code(alpha, beta) return 1.0 end
function tmp = code(alpha, beta) tmp = 1.0; end
code[alpha_, beta_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 73.8%
+-commutative73.8%
Simplified73.8%
Taylor expanded in beta around inf 34.3%
Final simplification34.3%
herbie shell --seed 2023182
(FPCore (alpha beta)
:name "Octave 3.8, jcobi/1"
:precision binary64
:pre (and (> alpha -1.0) (> beta -1.0))
(/ (+ (/ (- beta alpha) (+ (+ alpha beta) 2.0)) 1.0) 2.0))