
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (* i (+ (+ alpha beta) i)))
(t_1 (+ (+ alpha beta) (* 2.0 i)))
(t_2 (* t_1 t_1)))
(/ (/ (* t_0 (+ (* beta alpha) t_0)) t_2) (- t_2 1.0))))
double code(double alpha, double beta, double i) {
double t_0 = i * ((alpha + beta) + i);
double t_1 = (alpha + beta) + (2.0 * i);
double t_2 = t_1 * t_1;
return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0);
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: t_0
real(8) :: t_1
real(8) :: t_2
t_0 = i * ((alpha + beta) + i)
t_1 = (alpha + beta) + (2.0d0 * i)
t_2 = t_1 * t_1
code = ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0d0)
end function
public static double code(double alpha, double beta, double i) {
double t_0 = i * ((alpha + beta) + i);
double t_1 = (alpha + beta) + (2.0 * i);
double t_2 = t_1 * t_1;
return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0);
}
def code(alpha, beta, i): t_0 = i * ((alpha + beta) + i) t_1 = (alpha + beta) + (2.0 * i) t_2 = t_1 * t_1 return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0)
function code(alpha, beta, i) t_0 = Float64(i * Float64(Float64(alpha + beta) + i)) t_1 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) t_2 = Float64(t_1 * t_1) return Float64(Float64(Float64(t_0 * Float64(Float64(beta * alpha) + t_0)) / t_2) / Float64(t_2 - 1.0)) end
function tmp = code(alpha, beta, i) t_0 = i * ((alpha + beta) + i); t_1 = (alpha + beta) + (2.0 * i); t_2 = t_1 * t_1; tmp = ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0); end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(i * N[(N[(alpha + beta), $MachinePrecision] + i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$1 * t$95$1), $MachinePrecision]}, N[(N[(N[(t$95$0 * N[(N[(beta * alpha), $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$2), $MachinePrecision] / N[(t$95$2 - 1.0), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := i \cdot \left(\left(\alpha + \beta\right) + i\right)\\
t_1 := \left(\alpha + \beta\right) + 2 \cdot i\\
t_2 := t\_1 \cdot t\_1\\
\frac{\frac{t\_0 \cdot \left(\beta \cdot \alpha + t\_0\right)}{t\_2}}{t\_2 - 1}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (alpha beta i)
:precision binary64
(let* ((t_0 (* i (+ (+ alpha beta) i)))
(t_1 (+ (+ alpha beta) (* 2.0 i)))
(t_2 (* t_1 t_1)))
(/ (/ (* t_0 (+ (* beta alpha) t_0)) t_2) (- t_2 1.0))))
double code(double alpha, double beta, double i) {
double t_0 = i * ((alpha + beta) + i);
double t_1 = (alpha + beta) + (2.0 * i);
double t_2 = t_1 * t_1;
return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0);
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
real(8) :: t_0
real(8) :: t_1
real(8) :: t_2
t_0 = i * ((alpha + beta) + i)
t_1 = (alpha + beta) + (2.0d0 * i)
t_2 = t_1 * t_1
code = ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0d0)
end function
public static double code(double alpha, double beta, double i) {
double t_0 = i * ((alpha + beta) + i);
double t_1 = (alpha + beta) + (2.0 * i);
double t_2 = t_1 * t_1;
return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0);
}
def code(alpha, beta, i): t_0 = i * ((alpha + beta) + i) t_1 = (alpha + beta) + (2.0 * i) t_2 = t_1 * t_1 return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0)
function code(alpha, beta, i) t_0 = Float64(i * Float64(Float64(alpha + beta) + i)) t_1 = Float64(Float64(alpha + beta) + Float64(2.0 * i)) t_2 = Float64(t_1 * t_1) return Float64(Float64(Float64(t_0 * Float64(Float64(beta * alpha) + t_0)) / t_2) / Float64(t_2 - 1.0)) end
function tmp = code(alpha, beta, i) t_0 = i * ((alpha + beta) + i); t_1 = (alpha + beta) + (2.0 * i); t_2 = t_1 * t_1; tmp = ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0); end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(i * N[(N[(alpha + beta), $MachinePrecision] + i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$1 * t$95$1), $MachinePrecision]}, N[(N[(N[(t$95$0 * N[(N[(beta * alpha), $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$2), $MachinePrecision] / N[(t$95$2 - 1.0), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := i \cdot \left(\left(\alpha + \beta\right) + i\right)\\
t_1 := \left(\alpha + \beta\right) + 2 \cdot i\\
t_2 := t\_1 \cdot t\_1\\
\frac{\frac{t\_0 \cdot \left(\beta \cdot \alpha + t\_0\right)}{t\_2}}{t\_2 - 1}
\end{array}
\end{array}
(FPCore (alpha beta i) :precision binary64 (/ (- (+ (* 0.0625 i) (* 0.0625 (+ (* 2.0 alpha) (* 2.0 beta)))) (* 0.125 (+ alpha beta))) i))
double code(double alpha, double beta, double i) {
return (((0.0625 * i) + (0.0625 * ((2.0 * alpha) + (2.0 * beta)))) - (0.125 * (alpha + beta))) / i;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
code = (((0.0625d0 * i) + (0.0625d0 * ((2.0d0 * alpha) + (2.0d0 * beta)))) - (0.125d0 * (alpha + beta))) / i
end function
public static double code(double alpha, double beta, double i) {
return (((0.0625 * i) + (0.0625 * ((2.0 * alpha) + (2.0 * beta)))) - (0.125 * (alpha + beta))) / i;
}
def code(alpha, beta, i): return (((0.0625 * i) + (0.0625 * ((2.0 * alpha) + (2.0 * beta)))) - (0.125 * (alpha + beta))) / i
function code(alpha, beta, i) return Float64(Float64(Float64(Float64(0.0625 * i) + Float64(0.0625 * Float64(Float64(2.0 * alpha) + Float64(2.0 * beta)))) - Float64(0.125 * Float64(alpha + beta))) / i) end
function tmp = code(alpha, beta, i) tmp = (((0.0625 * i) + (0.0625 * ((2.0 * alpha) + (2.0 * beta)))) - (0.125 * (alpha + beta))) / i; end
code[alpha_, beta_, i_] := N[(N[(N[(N[(0.0625 * i), $MachinePrecision] + N[(0.0625 * N[(N[(2.0 * alpha), $MachinePrecision] + N[(2.0 * beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(0.125 * N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / i), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(0.0625 \cdot i + 0.0625 \cdot \left(2 \cdot \alpha + 2 \cdot \beta\right)\right) - 0.125 \cdot \left(\alpha + \beta\right)}{i}
\end{array}
Initial program 14.6%
Simplified36.2%
Taylor expanded in i around inf 78.1%
Taylor expanded in i around 0 78.1%
Final simplification78.1%
(FPCore (alpha beta i) :precision binary64 (- (+ 0.0625 (* 0.0625 (/ (+ (* 2.0 alpha) (* 2.0 beta)) i))) (* 0.125 (/ (+ alpha beta) i))))
double code(double alpha, double beta, double i) {
return (0.0625 + (0.0625 * (((2.0 * alpha) + (2.0 * beta)) / i))) - (0.125 * ((alpha + beta) / i));
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
code = (0.0625d0 + (0.0625d0 * (((2.0d0 * alpha) + (2.0d0 * beta)) / i))) - (0.125d0 * ((alpha + beta) / i))
end function
public static double code(double alpha, double beta, double i) {
return (0.0625 + (0.0625 * (((2.0 * alpha) + (2.0 * beta)) / i))) - (0.125 * ((alpha + beta) / i));
}
def code(alpha, beta, i): return (0.0625 + (0.0625 * (((2.0 * alpha) + (2.0 * beta)) / i))) - (0.125 * ((alpha + beta) / i))
function code(alpha, beta, i) return Float64(Float64(0.0625 + Float64(0.0625 * Float64(Float64(Float64(2.0 * alpha) + Float64(2.0 * beta)) / i))) - Float64(0.125 * Float64(Float64(alpha + beta) / i))) end
function tmp = code(alpha, beta, i) tmp = (0.0625 + (0.0625 * (((2.0 * alpha) + (2.0 * beta)) / i))) - (0.125 * ((alpha + beta) / i)); end
code[alpha_, beta_, i_] := N[(N[(0.0625 + N[(0.0625 * N[(N[(N[(2.0 * alpha), $MachinePrecision] + N[(2.0 * beta), $MachinePrecision]), $MachinePrecision] / i), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(0.125 * N[(N[(alpha + beta), $MachinePrecision] / i), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(0.0625 + 0.0625 \cdot \frac{2 \cdot \alpha + 2 \cdot \beta}{i}\right) - 0.125 \cdot \frac{\alpha + \beta}{i}
\end{array}
Initial program 14.6%
Simplified36.2%
Taylor expanded in i around inf 78.1%
Final simplification78.1%
(FPCore (alpha beta i) :precision binary64 (/ (- (+ (* 0.0625 i) (* beta 0.125)) (* 0.125 (+ alpha beta))) i))
double code(double alpha, double beta, double i) {
return (((0.0625 * i) + (beta * 0.125)) - (0.125 * (alpha + beta))) / i;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
code = (((0.0625d0 * i) + (beta * 0.125d0)) - (0.125d0 * (alpha + beta))) / i
end function
public static double code(double alpha, double beta, double i) {
return (((0.0625 * i) + (beta * 0.125)) - (0.125 * (alpha + beta))) / i;
}
def code(alpha, beta, i): return (((0.0625 * i) + (beta * 0.125)) - (0.125 * (alpha + beta))) / i
function code(alpha, beta, i) return Float64(Float64(Float64(Float64(0.0625 * i) + Float64(beta * 0.125)) - Float64(0.125 * Float64(alpha + beta))) / i) end
function tmp = code(alpha, beta, i) tmp = (((0.0625 * i) + (beta * 0.125)) - (0.125 * (alpha + beta))) / i; end
code[alpha_, beta_, i_] := N[(N[(N[(N[(0.0625 * i), $MachinePrecision] + N[(beta * 0.125), $MachinePrecision]), $MachinePrecision] - N[(0.125 * N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / i), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(0.0625 \cdot i + \beta \cdot 0.125\right) - 0.125 \cdot \left(\alpha + \beta\right)}{i}
\end{array}
Initial program 14.6%
Simplified36.2%
Taylor expanded in i around inf 78.1%
Taylor expanded in i around 0 78.1%
Taylor expanded in alpha around 0 71.9%
Final simplification71.9%
(FPCore (alpha beta i) :precision binary64 0.0)
double code(double alpha, double beta, double i) {
return 0.0;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
code = 0.0d0
end function
public static double code(double alpha, double beta, double i) {
return 0.0;
}
def code(alpha, beta, i): return 0.0
function code(alpha, beta, i) return 0.0 end
function tmp = code(alpha, beta, i) tmp = 0.0; end
code[alpha_, beta_, i_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 14.6%
Simplified36.2%
Taylor expanded in i around inf 78.1%
Taylor expanded in i around 0 14.0%
div-sub14.0%
distribute-lft-in14.0%
associate-*r*14.0%
metadata-eval14.0%
associate-*r/14.0%
associate-*r/14.0%
+-inverses14.0%
Simplified14.0%
Final simplification14.0%
(FPCore (alpha beta i) :precision binary64 0.0625)
double code(double alpha, double beta, double i) {
return 0.0625;
}
real(8) function code(alpha, beta, i)
real(8), intent (in) :: alpha
real(8), intent (in) :: beta
real(8), intent (in) :: i
code = 0.0625d0
end function
public static double code(double alpha, double beta, double i) {
return 0.0625;
}
def code(alpha, beta, i): return 0.0625
function code(alpha, beta, i) return 0.0625 end
function tmp = code(alpha, beta, i) tmp = 0.0625; end
code[alpha_, beta_, i_] := 0.0625
\begin{array}{l}
\\
0.0625
\end{array}
Initial program 14.6%
Simplified36.2%
Taylor expanded in i around inf 67.6%
Final simplification67.6%
herbie shell --seed 2024066
(FPCore (alpha beta i)
:name "Octave 3.8, jcobi/4"
:precision binary64
:pre (and (and (> alpha -1.0) (> beta -1.0)) (> i 1.0))
(/ (/ (* (* i (+ (+ alpha beta) i)) (+ (* beta alpha) (* i (+ (+ alpha beta) i)))) (* (+ (+ alpha beta) (* 2.0 i)) (+ (+ alpha beta) (* 2.0 i)))) (- (* (+ (+ alpha beta) (* 2.0 i)) (+ (+ alpha beta) (* 2.0 i))) 1.0)))