Octave 3.8, jcobi/4

Percentage Accurate: 15.4% → 77.5%
Time: 25.0s
Alternatives: 5
Speedup: 53.0×

Specification

?
\[\left(\alpha > -1 \land \beta > -1\right) \land i > 1\]
\[\begin{array}{l} \\ \begin{array}{l} t_0 := i \cdot \left(\left(\alpha + \beta\right) + i\right)\\ t_1 := \left(\alpha + \beta\right) + 2 \cdot i\\ t_2 := t\_1 \cdot t\_1\\ \frac{\frac{t\_0 \cdot \left(\beta \cdot \alpha + t\_0\right)}{t\_2}}{t\_2 - 1} \end{array} \end{array} \]
(FPCore (alpha beta i)
 :precision binary64
 (let* ((t_0 (* i (+ (+ alpha beta) i)))
        (t_1 (+ (+ alpha beta) (* 2.0 i)))
        (t_2 (* t_1 t_1)))
   (/ (/ (* t_0 (+ (* beta alpha) t_0)) t_2) (- t_2 1.0))))
double code(double alpha, double beta, double i) {
	double t_0 = i * ((alpha + beta) + i);
	double t_1 = (alpha + beta) + (2.0 * i);
	double t_2 = t_1 * t_1;
	return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0);
}
real(8) function code(alpha, beta, i)
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8), intent (in) :: i
    real(8) :: t_0
    real(8) :: t_1
    real(8) :: t_2
    t_0 = i * ((alpha + beta) + i)
    t_1 = (alpha + beta) + (2.0d0 * i)
    t_2 = t_1 * t_1
    code = ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0d0)
end function
public static double code(double alpha, double beta, double i) {
	double t_0 = i * ((alpha + beta) + i);
	double t_1 = (alpha + beta) + (2.0 * i);
	double t_2 = t_1 * t_1;
	return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0);
}
def code(alpha, beta, i):
	t_0 = i * ((alpha + beta) + i)
	t_1 = (alpha + beta) + (2.0 * i)
	t_2 = t_1 * t_1
	return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0)
function code(alpha, beta, i)
	t_0 = Float64(i * Float64(Float64(alpha + beta) + i))
	t_1 = Float64(Float64(alpha + beta) + Float64(2.0 * i))
	t_2 = Float64(t_1 * t_1)
	return Float64(Float64(Float64(t_0 * Float64(Float64(beta * alpha) + t_0)) / t_2) / Float64(t_2 - 1.0))
end
function tmp = code(alpha, beta, i)
	t_0 = i * ((alpha + beta) + i);
	t_1 = (alpha + beta) + (2.0 * i);
	t_2 = t_1 * t_1;
	tmp = ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0);
end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(i * N[(N[(alpha + beta), $MachinePrecision] + i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$1 * t$95$1), $MachinePrecision]}, N[(N[(N[(t$95$0 * N[(N[(beta * alpha), $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$2), $MachinePrecision] / N[(t$95$2 - 1.0), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := i \cdot \left(\left(\alpha + \beta\right) + i\right)\\
t_1 := \left(\alpha + \beta\right) + 2 \cdot i\\
t_2 := t\_1 \cdot t\_1\\
\frac{\frac{t\_0 \cdot \left(\beta \cdot \alpha + t\_0\right)}{t\_2}}{t\_2 - 1}
\end{array}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 5 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 15.4% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := i \cdot \left(\left(\alpha + \beta\right) + i\right)\\ t_1 := \left(\alpha + \beta\right) + 2 \cdot i\\ t_2 := t\_1 \cdot t\_1\\ \frac{\frac{t\_0 \cdot \left(\beta \cdot \alpha + t\_0\right)}{t\_2}}{t\_2 - 1} \end{array} \end{array} \]
(FPCore (alpha beta i)
 :precision binary64
 (let* ((t_0 (* i (+ (+ alpha beta) i)))
        (t_1 (+ (+ alpha beta) (* 2.0 i)))
        (t_2 (* t_1 t_1)))
   (/ (/ (* t_0 (+ (* beta alpha) t_0)) t_2) (- t_2 1.0))))
double code(double alpha, double beta, double i) {
	double t_0 = i * ((alpha + beta) + i);
	double t_1 = (alpha + beta) + (2.0 * i);
	double t_2 = t_1 * t_1;
	return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0);
}
real(8) function code(alpha, beta, i)
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8), intent (in) :: i
    real(8) :: t_0
    real(8) :: t_1
    real(8) :: t_2
    t_0 = i * ((alpha + beta) + i)
    t_1 = (alpha + beta) + (2.0d0 * i)
    t_2 = t_1 * t_1
    code = ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0d0)
end function
public static double code(double alpha, double beta, double i) {
	double t_0 = i * ((alpha + beta) + i);
	double t_1 = (alpha + beta) + (2.0 * i);
	double t_2 = t_1 * t_1;
	return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0);
}
def code(alpha, beta, i):
	t_0 = i * ((alpha + beta) + i)
	t_1 = (alpha + beta) + (2.0 * i)
	t_2 = t_1 * t_1
	return ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0)
function code(alpha, beta, i)
	t_0 = Float64(i * Float64(Float64(alpha + beta) + i))
	t_1 = Float64(Float64(alpha + beta) + Float64(2.0 * i))
	t_2 = Float64(t_1 * t_1)
	return Float64(Float64(Float64(t_0 * Float64(Float64(beta * alpha) + t_0)) / t_2) / Float64(t_2 - 1.0))
end
function tmp = code(alpha, beta, i)
	t_0 = i * ((alpha + beta) + i);
	t_1 = (alpha + beta) + (2.0 * i);
	t_2 = t_1 * t_1;
	tmp = ((t_0 * ((beta * alpha) + t_0)) / t_2) / (t_2 - 1.0);
end
code[alpha_, beta_, i_] := Block[{t$95$0 = N[(i * N[(N[(alpha + beta), $MachinePrecision] + i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(alpha + beta), $MachinePrecision] + N[(2.0 * i), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$1 * t$95$1), $MachinePrecision]}, N[(N[(N[(t$95$0 * N[(N[(beta * alpha), $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$2), $MachinePrecision] / N[(t$95$2 - 1.0), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := i \cdot \left(\left(\alpha + \beta\right) + i\right)\\
t_1 := \left(\alpha + \beta\right) + 2 \cdot i\\
t_2 := t\_1 \cdot t\_1\\
\frac{\frac{t\_0 \cdot \left(\beta \cdot \alpha + t\_0\right)}{t\_2}}{t\_2 - 1}
\end{array}
\end{array}

Alternative 1: 77.5% accurate, 2.5× speedup?

\[\begin{array}{l} \\ \frac{\left(0.0625 \cdot i + 0.0625 \cdot \left(2 \cdot \alpha + 2 \cdot \beta\right)\right) - 0.125 \cdot \left(\alpha + \beta\right)}{i} \end{array} \]
(FPCore (alpha beta i)
 :precision binary64
 (/
  (-
   (+ (* 0.0625 i) (* 0.0625 (+ (* 2.0 alpha) (* 2.0 beta))))
   (* 0.125 (+ alpha beta)))
  i))
double code(double alpha, double beta, double i) {
	return (((0.0625 * i) + (0.0625 * ((2.0 * alpha) + (2.0 * beta)))) - (0.125 * (alpha + beta))) / i;
}
real(8) function code(alpha, beta, i)
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8), intent (in) :: i
    code = (((0.0625d0 * i) + (0.0625d0 * ((2.0d0 * alpha) + (2.0d0 * beta)))) - (0.125d0 * (alpha + beta))) / i
end function
public static double code(double alpha, double beta, double i) {
	return (((0.0625 * i) + (0.0625 * ((2.0 * alpha) + (2.0 * beta)))) - (0.125 * (alpha + beta))) / i;
}
def code(alpha, beta, i):
	return (((0.0625 * i) + (0.0625 * ((2.0 * alpha) + (2.0 * beta)))) - (0.125 * (alpha + beta))) / i
function code(alpha, beta, i)
	return Float64(Float64(Float64(Float64(0.0625 * i) + Float64(0.0625 * Float64(Float64(2.0 * alpha) + Float64(2.0 * beta)))) - Float64(0.125 * Float64(alpha + beta))) / i)
end
function tmp = code(alpha, beta, i)
	tmp = (((0.0625 * i) + (0.0625 * ((2.0 * alpha) + (2.0 * beta)))) - (0.125 * (alpha + beta))) / i;
end
code[alpha_, beta_, i_] := N[(N[(N[(N[(0.0625 * i), $MachinePrecision] + N[(0.0625 * N[(N[(2.0 * alpha), $MachinePrecision] + N[(2.0 * beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(0.125 * N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / i), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(0.0625 \cdot i + 0.0625 \cdot \left(2 \cdot \alpha + 2 \cdot \beta\right)\right) - 0.125 \cdot \left(\alpha + \beta\right)}{i}
\end{array}
Derivation
  1. Initial program 14.6%

    \[\frac{\frac{\left(i \cdot \left(\left(\alpha + \beta\right) + i\right)\right) \cdot \left(\beta \cdot \alpha + i \cdot \left(\left(\alpha + \beta\right) + i\right)\right)}{\left(\left(\alpha + \beta\right) + 2 \cdot i\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot i\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot i\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot i\right) - 1} \]
  2. Simplified36.2%

    \[\leadsto \color{blue}{i \cdot \left(\frac{\mathsf{fma}\left(i, i + \left(\alpha + \beta\right), \alpha \cdot \beta\right)}{\mathsf{fma}\left(\alpha + \mathsf{fma}\left(i, 2, \beta\right), \alpha + \mathsf{fma}\left(i, 2, \beta\right), -1\right)} \cdot \frac{i + \left(\alpha + \beta\right)}{\left(\alpha + \mathsf{fma}\left(i, 2, \beta\right)\right) \cdot \left(\alpha + \mathsf{fma}\left(i, 2, \beta\right)\right)}\right)} \]
  3. Add Preprocessing
  4. Taylor expanded in i around inf 78.1%

    \[\leadsto \color{blue}{\left(0.0625 + 0.0625 \cdot \frac{2 \cdot \alpha + 2 \cdot \beta}{i}\right) - 0.125 \cdot \frac{\alpha + \beta}{i}} \]
  5. Taylor expanded in i around 0 78.1%

    \[\leadsto \color{blue}{\frac{\left(0.0625 \cdot i + 0.0625 \cdot \left(2 \cdot \alpha + 2 \cdot \beta\right)\right) - 0.125 \cdot \left(\alpha + \beta\right)}{i}} \]
  6. Final simplification78.1%

    \[\leadsto \frac{\left(0.0625 \cdot i + 0.0625 \cdot \left(2 \cdot \alpha + 2 \cdot \beta\right)\right) - 0.125 \cdot \left(\alpha + \beta\right)}{i} \]
  7. Add Preprocessing

Alternative 2: 77.5% accurate, 2.5× speedup?

\[\begin{array}{l} \\ \left(0.0625 + 0.0625 \cdot \frac{2 \cdot \alpha + 2 \cdot \beta}{i}\right) - 0.125 \cdot \frac{\alpha + \beta}{i} \end{array} \]
(FPCore (alpha beta i)
 :precision binary64
 (-
  (+ 0.0625 (* 0.0625 (/ (+ (* 2.0 alpha) (* 2.0 beta)) i)))
  (* 0.125 (/ (+ alpha beta) i))))
double code(double alpha, double beta, double i) {
	return (0.0625 + (0.0625 * (((2.0 * alpha) + (2.0 * beta)) / i))) - (0.125 * ((alpha + beta) / i));
}
real(8) function code(alpha, beta, i)
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8), intent (in) :: i
    code = (0.0625d0 + (0.0625d0 * (((2.0d0 * alpha) + (2.0d0 * beta)) / i))) - (0.125d0 * ((alpha + beta) / i))
end function
public static double code(double alpha, double beta, double i) {
	return (0.0625 + (0.0625 * (((2.0 * alpha) + (2.0 * beta)) / i))) - (0.125 * ((alpha + beta) / i));
}
def code(alpha, beta, i):
	return (0.0625 + (0.0625 * (((2.0 * alpha) + (2.0 * beta)) / i))) - (0.125 * ((alpha + beta) / i))
function code(alpha, beta, i)
	return Float64(Float64(0.0625 + Float64(0.0625 * Float64(Float64(Float64(2.0 * alpha) + Float64(2.0 * beta)) / i))) - Float64(0.125 * Float64(Float64(alpha + beta) / i)))
end
function tmp = code(alpha, beta, i)
	tmp = (0.0625 + (0.0625 * (((2.0 * alpha) + (2.0 * beta)) / i))) - (0.125 * ((alpha + beta) / i));
end
code[alpha_, beta_, i_] := N[(N[(0.0625 + N[(0.0625 * N[(N[(N[(2.0 * alpha), $MachinePrecision] + N[(2.0 * beta), $MachinePrecision]), $MachinePrecision] / i), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(0.125 * N[(N[(alpha + beta), $MachinePrecision] / i), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(0.0625 + 0.0625 \cdot \frac{2 \cdot \alpha + 2 \cdot \beta}{i}\right) - 0.125 \cdot \frac{\alpha + \beta}{i}
\end{array}
Derivation
  1. Initial program 14.6%

    \[\frac{\frac{\left(i \cdot \left(\left(\alpha + \beta\right) + i\right)\right) \cdot \left(\beta \cdot \alpha + i \cdot \left(\left(\alpha + \beta\right) + i\right)\right)}{\left(\left(\alpha + \beta\right) + 2 \cdot i\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot i\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot i\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot i\right) - 1} \]
  2. Simplified36.2%

    \[\leadsto \color{blue}{i \cdot \left(\frac{\mathsf{fma}\left(i, i + \left(\alpha + \beta\right), \alpha \cdot \beta\right)}{\mathsf{fma}\left(\alpha + \mathsf{fma}\left(i, 2, \beta\right), \alpha + \mathsf{fma}\left(i, 2, \beta\right), -1\right)} \cdot \frac{i + \left(\alpha + \beta\right)}{\left(\alpha + \mathsf{fma}\left(i, 2, \beta\right)\right) \cdot \left(\alpha + \mathsf{fma}\left(i, 2, \beta\right)\right)}\right)} \]
  3. Add Preprocessing
  4. Taylor expanded in i around inf 78.1%

    \[\leadsto \color{blue}{\left(0.0625 + 0.0625 \cdot \frac{2 \cdot \alpha + 2 \cdot \beta}{i}\right) - 0.125 \cdot \frac{\alpha + \beta}{i}} \]
  5. Final simplification78.1%

    \[\leadsto \left(0.0625 + 0.0625 \cdot \frac{2 \cdot \alpha + 2 \cdot \beta}{i}\right) - 0.125 \cdot \frac{\alpha + \beta}{i} \]
  6. Add Preprocessing

Alternative 3: 73.2% accurate, 3.5× speedup?

\[\begin{array}{l} \\ \frac{\left(0.0625 \cdot i + \beta \cdot 0.125\right) - 0.125 \cdot \left(\alpha + \beta\right)}{i} \end{array} \]
(FPCore (alpha beta i)
 :precision binary64
 (/ (- (+ (* 0.0625 i) (* beta 0.125)) (* 0.125 (+ alpha beta))) i))
double code(double alpha, double beta, double i) {
	return (((0.0625 * i) + (beta * 0.125)) - (0.125 * (alpha + beta))) / i;
}
real(8) function code(alpha, beta, i)
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8), intent (in) :: i
    code = (((0.0625d0 * i) + (beta * 0.125d0)) - (0.125d0 * (alpha + beta))) / i
end function
public static double code(double alpha, double beta, double i) {
	return (((0.0625 * i) + (beta * 0.125)) - (0.125 * (alpha + beta))) / i;
}
def code(alpha, beta, i):
	return (((0.0625 * i) + (beta * 0.125)) - (0.125 * (alpha + beta))) / i
function code(alpha, beta, i)
	return Float64(Float64(Float64(Float64(0.0625 * i) + Float64(beta * 0.125)) - Float64(0.125 * Float64(alpha + beta))) / i)
end
function tmp = code(alpha, beta, i)
	tmp = (((0.0625 * i) + (beta * 0.125)) - (0.125 * (alpha + beta))) / i;
end
code[alpha_, beta_, i_] := N[(N[(N[(N[(0.0625 * i), $MachinePrecision] + N[(beta * 0.125), $MachinePrecision]), $MachinePrecision] - N[(0.125 * N[(alpha + beta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / i), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(0.0625 \cdot i + \beta \cdot 0.125\right) - 0.125 \cdot \left(\alpha + \beta\right)}{i}
\end{array}
Derivation
  1. Initial program 14.6%

    \[\frac{\frac{\left(i \cdot \left(\left(\alpha + \beta\right) + i\right)\right) \cdot \left(\beta \cdot \alpha + i \cdot \left(\left(\alpha + \beta\right) + i\right)\right)}{\left(\left(\alpha + \beta\right) + 2 \cdot i\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot i\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot i\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot i\right) - 1} \]
  2. Simplified36.2%

    \[\leadsto \color{blue}{i \cdot \left(\frac{\mathsf{fma}\left(i, i + \left(\alpha + \beta\right), \alpha \cdot \beta\right)}{\mathsf{fma}\left(\alpha + \mathsf{fma}\left(i, 2, \beta\right), \alpha + \mathsf{fma}\left(i, 2, \beta\right), -1\right)} \cdot \frac{i + \left(\alpha + \beta\right)}{\left(\alpha + \mathsf{fma}\left(i, 2, \beta\right)\right) \cdot \left(\alpha + \mathsf{fma}\left(i, 2, \beta\right)\right)}\right)} \]
  3. Add Preprocessing
  4. Taylor expanded in i around inf 78.1%

    \[\leadsto \color{blue}{\left(0.0625 + 0.0625 \cdot \frac{2 \cdot \alpha + 2 \cdot \beta}{i}\right) - 0.125 \cdot \frac{\alpha + \beta}{i}} \]
  5. Taylor expanded in i around 0 78.1%

    \[\leadsto \color{blue}{\frac{\left(0.0625 \cdot i + 0.0625 \cdot \left(2 \cdot \alpha + 2 \cdot \beta\right)\right) - 0.125 \cdot \left(\alpha + \beta\right)}{i}} \]
  6. Taylor expanded in alpha around 0 71.9%

    \[\leadsto \frac{\color{blue}{\left(0.0625 \cdot i + 0.125 \cdot \beta\right)} - 0.125 \cdot \left(\alpha + \beta\right)}{i} \]
  7. Final simplification71.9%

    \[\leadsto \frac{\left(0.0625 \cdot i + \beta \cdot 0.125\right) - 0.125 \cdot \left(\alpha + \beta\right)}{i} \]
  8. Add Preprocessing

Alternative 4: 10.6% accurate, 53.0× speedup?

\[\begin{array}{l} \\ 0 \end{array} \]
(FPCore (alpha beta i) :precision binary64 0.0)
double code(double alpha, double beta, double i) {
	return 0.0;
}
real(8) function code(alpha, beta, i)
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8), intent (in) :: i
    code = 0.0d0
end function
public static double code(double alpha, double beta, double i) {
	return 0.0;
}
def code(alpha, beta, i):
	return 0.0
function code(alpha, beta, i)
	return 0.0
end
function tmp = code(alpha, beta, i)
	tmp = 0.0;
end
code[alpha_, beta_, i_] := 0.0
\begin{array}{l}

\\
0
\end{array}
Derivation
  1. Initial program 14.6%

    \[\frac{\frac{\left(i \cdot \left(\left(\alpha + \beta\right) + i\right)\right) \cdot \left(\beta \cdot \alpha + i \cdot \left(\left(\alpha + \beta\right) + i\right)\right)}{\left(\left(\alpha + \beta\right) + 2 \cdot i\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot i\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot i\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot i\right) - 1} \]
  2. Simplified36.2%

    \[\leadsto \color{blue}{i \cdot \left(\frac{\mathsf{fma}\left(i, i + \left(\alpha + \beta\right), \alpha \cdot \beta\right)}{\mathsf{fma}\left(\alpha + \mathsf{fma}\left(i, 2, \beta\right), \alpha + \mathsf{fma}\left(i, 2, \beta\right), -1\right)} \cdot \frac{i + \left(\alpha + \beta\right)}{\left(\alpha + \mathsf{fma}\left(i, 2, \beta\right)\right) \cdot \left(\alpha + \mathsf{fma}\left(i, 2, \beta\right)\right)}\right)} \]
  3. Add Preprocessing
  4. Taylor expanded in i around inf 78.1%

    \[\leadsto \color{blue}{\left(0.0625 + 0.0625 \cdot \frac{2 \cdot \alpha + 2 \cdot \beta}{i}\right) - 0.125 \cdot \frac{\alpha + \beta}{i}} \]
  5. Taylor expanded in i around 0 14.0%

    \[\leadsto \color{blue}{\frac{0.0625 \cdot \left(2 \cdot \alpha + 2 \cdot \beta\right) - 0.125 \cdot \left(\alpha + \beta\right)}{i}} \]
  6. Step-by-step derivation
    1. div-sub14.0%

      \[\leadsto \color{blue}{\frac{0.0625 \cdot \left(2 \cdot \alpha + 2 \cdot \beta\right)}{i} - \frac{0.125 \cdot \left(\alpha + \beta\right)}{i}} \]
    2. distribute-lft-in14.0%

      \[\leadsto \frac{0.0625 \cdot \color{blue}{\left(2 \cdot \left(\alpha + \beta\right)\right)}}{i} - \frac{0.125 \cdot \left(\alpha + \beta\right)}{i} \]
    3. associate-*r*14.0%

      \[\leadsto \frac{\color{blue}{\left(0.0625 \cdot 2\right) \cdot \left(\alpha + \beta\right)}}{i} - \frac{0.125 \cdot \left(\alpha + \beta\right)}{i} \]
    4. metadata-eval14.0%

      \[\leadsto \frac{\color{blue}{0.125} \cdot \left(\alpha + \beta\right)}{i} - \frac{0.125 \cdot \left(\alpha + \beta\right)}{i} \]
    5. associate-*r/14.0%

      \[\leadsto \color{blue}{0.125 \cdot \frac{\alpha + \beta}{i}} - \frac{0.125 \cdot \left(\alpha + \beta\right)}{i} \]
    6. associate-*r/14.0%

      \[\leadsto 0.125 \cdot \frac{\alpha + \beta}{i} - \color{blue}{0.125 \cdot \frac{\alpha + \beta}{i}} \]
    7. +-inverses14.0%

      \[\leadsto \color{blue}{0} \]
  7. Simplified14.0%

    \[\leadsto \color{blue}{0} \]
  8. Final simplification14.0%

    \[\leadsto 0 \]
  9. Add Preprocessing

Alternative 5: 70.5% accurate, 53.0× speedup?

\[\begin{array}{l} \\ 0.0625 \end{array} \]
(FPCore (alpha beta i) :precision binary64 0.0625)
double code(double alpha, double beta, double i) {
	return 0.0625;
}
real(8) function code(alpha, beta, i)
    real(8), intent (in) :: alpha
    real(8), intent (in) :: beta
    real(8), intent (in) :: i
    code = 0.0625d0
end function
public static double code(double alpha, double beta, double i) {
	return 0.0625;
}
def code(alpha, beta, i):
	return 0.0625
function code(alpha, beta, i)
	return 0.0625
end
function tmp = code(alpha, beta, i)
	tmp = 0.0625;
end
code[alpha_, beta_, i_] := 0.0625
\begin{array}{l}

\\
0.0625
\end{array}
Derivation
  1. Initial program 14.6%

    \[\frac{\frac{\left(i \cdot \left(\left(\alpha + \beta\right) + i\right)\right) \cdot \left(\beta \cdot \alpha + i \cdot \left(\left(\alpha + \beta\right) + i\right)\right)}{\left(\left(\alpha + \beta\right) + 2 \cdot i\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot i\right)}}{\left(\left(\alpha + \beta\right) + 2 \cdot i\right) \cdot \left(\left(\alpha + \beta\right) + 2 \cdot i\right) - 1} \]
  2. Simplified36.2%

    \[\leadsto \color{blue}{i \cdot \left(\frac{\mathsf{fma}\left(i, i + \left(\alpha + \beta\right), \alpha \cdot \beta\right)}{\mathsf{fma}\left(\alpha + \mathsf{fma}\left(i, 2, \beta\right), \alpha + \mathsf{fma}\left(i, 2, \beta\right), -1\right)} \cdot \frac{i + \left(\alpha + \beta\right)}{\left(\alpha + \mathsf{fma}\left(i, 2, \beta\right)\right) \cdot \left(\alpha + \mathsf{fma}\left(i, 2, \beta\right)\right)}\right)} \]
  3. Add Preprocessing
  4. Taylor expanded in i around inf 67.6%

    \[\leadsto \color{blue}{0.0625} \]
  5. Final simplification67.6%

    \[\leadsto 0.0625 \]
  6. Add Preprocessing

Reproduce

?
herbie shell --seed 2024066 
(FPCore (alpha beta i)
  :name "Octave 3.8, jcobi/4"
  :precision binary64
  :pre (and (and (> alpha -1.0) (> beta -1.0)) (> i 1.0))
  (/ (/ (* (* i (+ (+ alpha beta) i)) (+ (* beta alpha) (* i (+ (+ alpha beta) i)))) (* (+ (+ alpha beta) (* 2.0 i)) (+ (+ alpha beta) (* 2.0 i)))) (- (* (+ (+ alpha beta) (* 2.0 i)) (+ (+ alpha beta) (* 2.0 i))) 1.0)))