3frac (problem 3.3.3)

Percentage Accurate: 68.8% → 99.8%
Time: 11.1s
Alternatives: 8
Speedup: 2.1×

Specification

?
\[\left|x\right| > 1\]
\[\begin{array}{l} \\ \left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1} \end{array} \]
(FPCore (x)
 :precision binary64
 (+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))
double code(double x) {
	return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = ((1.0d0 / (x + 1.0d0)) - (2.0d0 / x)) + (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
	return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
def code(x):
	return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0))
function code(x)
	return Float64(Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(2.0 / x)) + Float64(1.0 / Float64(x - 1.0)))
end
function tmp = code(x)
	tmp = ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
end
code[x_] := N[(N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(2.0 / x), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 8 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 68.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1} \end{array} \]
(FPCore (x)
 :precision binary64
 (+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))
double code(double x) {
	return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = ((1.0d0 / (x + 1.0d0)) - (2.0d0 / x)) + (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
	return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
def code(x):
	return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0))
function code(x)
	return Float64(Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(2.0 / x)) + Float64(1.0 / Float64(x - 1.0)))
end
function tmp = code(x)
	tmp = ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
end
code[x_] := N[(N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(2.0 / x), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1}
\end{array}

Alternative 1: 99.8% accurate, 1.4× speedup?

\[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot \frac{\frac{2}{\mathsf{fma}\left(x\_m, x\_m, x\_m\right)}}{x\_m + -1} \end{array} \]
x\_m = (fabs.f64 x)
x\_s = (copysign.f64 #s(literal 1 binary64) x)
(FPCore (x_s x_m)
 :precision binary64
 (* x_s (/ (/ 2.0 (fma x_m x_m x_m)) (+ x_m -1.0))))
x\_m = fabs(x);
x\_s = copysign(1.0, x);
double code(double x_s, double x_m) {
	return x_s * ((2.0 / fma(x_m, x_m, x_m)) / (x_m + -1.0));
}
x\_m = abs(x)
x\_s = copysign(1.0, x)
function code(x_s, x_m)
	return Float64(x_s * Float64(Float64(2.0 / fma(x_m, x_m, x_m)) / Float64(x_m + -1.0)))
end
x\_m = N[Abs[x], $MachinePrecision]
x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
code[x$95$s_, x$95$m_] := N[(x$95$s * N[(N[(2.0 / N[(x$95$m * x$95$m + x$95$m), $MachinePrecision]), $MachinePrecision] / N[(x$95$m + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x\_m = \left|x\right|
\\
x\_s = \mathsf{copysign}\left(1, x\right)

\\
x\_s \cdot \frac{\frac{2}{\mathsf{fma}\left(x\_m, x\_m, x\_m\right)}}{x\_m + -1}
\end{array}
Derivation
  1. Initial program 67.7%

    \[\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. lift-+.f64N/A

      \[\leadsto \left(\frac{1}{\color{blue}{x + 1}} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
    2. lift-/.f64N/A

      \[\leadsto \left(\color{blue}{\frac{1}{x + 1}} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
    3. lift-/.f64N/A

      \[\leadsto \left(\frac{1}{x + 1} - \color{blue}{\frac{2}{x}}\right) + \frac{1}{x - 1} \]
    4. lift--.f64N/A

      \[\leadsto \color{blue}{\left(\frac{1}{x + 1} - \frac{2}{x}\right)} + \frac{1}{x - 1} \]
    5. lift--.f64N/A

      \[\leadsto \left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{\color{blue}{x - 1}} \]
    6. lift-/.f64N/A

      \[\leadsto \left(\frac{1}{x + 1} - \frac{2}{x}\right) + \color{blue}{\frac{1}{x - 1}} \]
    7. +-commutativeN/A

      \[\leadsto \color{blue}{\frac{1}{x - 1} + \left(\frac{1}{x + 1} - \frac{2}{x}\right)} \]
    8. lift-/.f64N/A

      \[\leadsto \color{blue}{\frac{1}{x - 1}} + \left(\frac{1}{x + 1} - \frac{2}{x}\right) \]
    9. lift--.f64N/A

      \[\leadsto \frac{1}{x - 1} + \color{blue}{\left(\frac{1}{x + 1} - \frac{2}{x}\right)} \]
    10. lift-/.f64N/A

      \[\leadsto \frac{1}{x - 1} + \left(\color{blue}{\frac{1}{x + 1}} - \frac{2}{x}\right) \]
    11. lift-/.f64N/A

      \[\leadsto \frac{1}{x - 1} + \left(\frac{1}{x + 1} - \color{blue}{\frac{2}{x}}\right) \]
    12. frac-subN/A

      \[\leadsto \frac{1}{x - 1} + \color{blue}{\frac{1 \cdot x - \left(x + 1\right) \cdot 2}{\left(x + 1\right) \cdot x}} \]
    13. frac-addN/A

      \[\leadsto \color{blue}{\frac{1 \cdot \left(\left(x + 1\right) \cdot x\right) + \left(x - 1\right) \cdot \left(1 \cdot x - \left(x + 1\right) \cdot 2\right)}{\left(x - 1\right) \cdot \left(\left(x + 1\right) \cdot x\right)}} \]
    14. lower-/.f64N/A

      \[\leadsto \color{blue}{\frac{1 \cdot \left(\left(x + 1\right) \cdot x\right) + \left(x - 1\right) \cdot \left(1 \cdot x - \left(x + 1\right) \cdot 2\right)}{\left(x - 1\right) \cdot \left(\left(x + 1\right) \cdot x\right)}} \]
  4. Applied rewrites18.0%

    \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1, x \cdot \left(1 + x\right), \left(x + -1\right) \cdot \left(x - \left(1 + x\right) \cdot 2\right)\right)}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)}} \]
  5. Taylor expanded in x around 0

    \[\leadsto \frac{\color{blue}{2}}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)} \]
  6. Step-by-step derivation
    1. Applied rewrites98.8%

      \[\leadsto \frac{\color{blue}{2}}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)} \]
    2. Step-by-step derivation
      1. lift-approxN/A

        \[\leadsto \frac{\color{blue}{2}}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)} \]
      2. lift-+.f64N/A

        \[\leadsto \frac{2}{\color{blue}{\left(x + -1\right)} \cdot \left(x \cdot \left(1 + x\right)\right)} \]
      3. *-lft-identityN/A

        \[\leadsto \frac{2}{\left(x + -1\right) \cdot \left(\color{blue}{\left(1 \cdot x\right)} \cdot \left(1 + x\right)\right)} \]
      4. associate-*r*N/A

        \[\leadsto \frac{2}{\left(x + -1\right) \cdot \color{blue}{\left(1 \cdot \left(x \cdot \left(1 + x\right)\right)\right)}} \]
      5. *-commutativeN/A

        \[\leadsto \frac{2}{\color{blue}{\left(1 \cdot \left(x \cdot \left(1 + x\right)\right)\right) \cdot \left(x + -1\right)}} \]
      6. associate-/r*N/A

        \[\leadsto \color{blue}{\frac{\frac{2}{1 \cdot \left(x \cdot \left(1 + x\right)\right)}}{x + -1}} \]
      7. lower-/.f64N/A

        \[\leadsto \color{blue}{\frac{\frac{2}{1 \cdot \left(x \cdot \left(1 + x\right)\right)}}{x + -1}} \]
    3. Applied rewrites99.8%

      \[\leadsto \color{blue}{\frac{\frac{2}{\mathsf{fma}\left(x, x, x\right)}}{x + -1}} \]
    4. Add Preprocessing

    Alternative 2: 99.8% accurate, 1.4× speedup?

    \[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot \frac{\frac{2}{x\_m + -1}}{\mathsf{fma}\left(x\_m, x\_m, x\_m\right)} \end{array} \]
    x\_m = (fabs.f64 x)
    x\_s = (copysign.f64 #s(literal 1 binary64) x)
    (FPCore (x_s x_m)
     :precision binary64
     (* x_s (/ (/ 2.0 (+ x_m -1.0)) (fma x_m x_m x_m))))
    x\_m = fabs(x);
    x\_s = copysign(1.0, x);
    double code(double x_s, double x_m) {
    	return x_s * ((2.0 / (x_m + -1.0)) / fma(x_m, x_m, x_m));
    }
    
    x\_m = abs(x)
    x\_s = copysign(1.0, x)
    function code(x_s, x_m)
    	return Float64(x_s * Float64(Float64(2.0 / Float64(x_m + -1.0)) / fma(x_m, x_m, x_m)))
    end
    
    x\_m = N[Abs[x], $MachinePrecision]
    x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
    code[x$95$s_, x$95$m_] := N[(x$95$s * N[(N[(2.0 / N[(x$95$m + -1.0), $MachinePrecision]), $MachinePrecision] / N[(x$95$m * x$95$m + x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    x\_m = \left|x\right|
    \\
    x\_s = \mathsf{copysign}\left(1, x\right)
    
    \\
    x\_s \cdot \frac{\frac{2}{x\_m + -1}}{\mathsf{fma}\left(x\_m, x\_m, x\_m\right)}
    \end{array}
    
    Derivation
    1. Initial program 67.7%

      \[\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift-+.f64N/A

        \[\leadsto \left(\frac{1}{\color{blue}{x + 1}} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
      2. lift-/.f64N/A

        \[\leadsto \left(\color{blue}{\frac{1}{x + 1}} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
      3. lift-/.f64N/A

        \[\leadsto \left(\frac{1}{x + 1} - \color{blue}{\frac{2}{x}}\right) + \frac{1}{x - 1} \]
      4. lift--.f64N/A

        \[\leadsto \color{blue}{\left(\frac{1}{x + 1} - \frac{2}{x}\right)} + \frac{1}{x - 1} \]
      5. lift--.f64N/A

        \[\leadsto \left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{\color{blue}{x - 1}} \]
      6. lift-/.f64N/A

        \[\leadsto \left(\frac{1}{x + 1} - \frac{2}{x}\right) + \color{blue}{\frac{1}{x - 1}} \]
      7. +-commutativeN/A

        \[\leadsto \color{blue}{\frac{1}{x - 1} + \left(\frac{1}{x + 1} - \frac{2}{x}\right)} \]
      8. lift-/.f64N/A

        \[\leadsto \color{blue}{\frac{1}{x - 1}} + \left(\frac{1}{x + 1} - \frac{2}{x}\right) \]
      9. lift--.f64N/A

        \[\leadsto \frac{1}{x - 1} + \color{blue}{\left(\frac{1}{x + 1} - \frac{2}{x}\right)} \]
      10. lift-/.f64N/A

        \[\leadsto \frac{1}{x - 1} + \left(\color{blue}{\frac{1}{x + 1}} - \frac{2}{x}\right) \]
      11. lift-/.f64N/A

        \[\leadsto \frac{1}{x - 1} + \left(\frac{1}{x + 1} - \color{blue}{\frac{2}{x}}\right) \]
      12. frac-subN/A

        \[\leadsto \frac{1}{x - 1} + \color{blue}{\frac{1 \cdot x - \left(x + 1\right) \cdot 2}{\left(x + 1\right) \cdot x}} \]
      13. frac-addN/A

        \[\leadsto \color{blue}{\frac{1 \cdot \left(\left(x + 1\right) \cdot x\right) + \left(x - 1\right) \cdot \left(1 \cdot x - \left(x + 1\right) \cdot 2\right)}{\left(x - 1\right) \cdot \left(\left(x + 1\right) \cdot x\right)}} \]
      14. lower-/.f64N/A

        \[\leadsto \color{blue}{\frac{1 \cdot \left(\left(x + 1\right) \cdot x\right) + \left(x - 1\right) \cdot \left(1 \cdot x - \left(x + 1\right) \cdot 2\right)}{\left(x - 1\right) \cdot \left(\left(x + 1\right) \cdot x\right)}} \]
    4. Applied rewrites18.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1, x \cdot \left(1 + x\right), \left(x + -1\right) \cdot \left(x - \left(1 + x\right) \cdot 2\right)\right)}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)}} \]
    5. Taylor expanded in x around 0

      \[\leadsto \frac{\color{blue}{2}}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)} \]
    6. Step-by-step derivation
      1. Applied rewrites98.8%

        \[\leadsto \frac{\color{blue}{2}}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)} \]
      2. Step-by-step derivation
        1. lift-approxN/A

          \[\leadsto \frac{\color{blue}{2}}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)} \]
        2. lift-+.f64N/A

          \[\leadsto \frac{2}{\color{blue}{\left(x + -1\right)} \cdot \left(x \cdot \left(1 + x\right)\right)} \]
        3. *-lft-identityN/A

          \[\leadsto \frac{2}{\left(x + -1\right) \cdot \left(\color{blue}{\left(1 \cdot x\right)} \cdot \left(1 + x\right)\right)} \]
        4. associate-*r*N/A

          \[\leadsto \frac{2}{\left(x + -1\right) \cdot \color{blue}{\left(1 \cdot \left(x \cdot \left(1 + x\right)\right)\right)}} \]
        5. associate-/r*N/A

          \[\leadsto \color{blue}{\frac{\frac{2}{x + -1}}{1 \cdot \left(x \cdot \left(1 + x\right)\right)}} \]
        6. lower-/.f64N/A

          \[\leadsto \color{blue}{\frac{\frac{2}{x + -1}}{1 \cdot \left(x \cdot \left(1 + x\right)\right)}} \]
        7. lower-/.f64N/A

          \[\leadsto \frac{\color{blue}{\frac{2}{x + -1}}}{1 \cdot \left(x \cdot \left(1 + x\right)\right)} \]
      3. Applied rewrites99.8%

        \[\leadsto \color{blue}{\frac{\frac{2}{x + -1}}{\mathsf{fma}\left(x, x, x\right)}} \]
      4. Add Preprocessing

      Alternative 3: 98.7% accurate, 1.6× speedup?

      \[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot \frac{\frac{2}{x \cdot x}}{x\_m} \end{array} \]
      x\_m = (fabs.f64 x)
      x\_s = (copysign.f64 #s(literal 1 binary64) x)
      (FPCore (x_s x_m) :precision binary64 (* x_s (/ (/ 2.0 (* x x)) x_m)))
      x\_m = fabs(x);
      x\_s = copysign(1.0, x);
      double code(double x_s, double x_m) {
      	return x_s * ((2.0 / (x * x)) / x_m);
      }
      
      x\_m = abs(x)
      x\_s = copysign(1.0d0, x)
      real(8) function code(x_s, x_m)
          real(8), intent (in) :: x_s
          real(8), intent (in) :: x_m
          code = x_s * ((2.0d0 / (x * x)) / x_m)
      end function
      
      x\_m = Math.abs(x);
      x\_s = Math.copySign(1.0, x);
      public static double code(double x_s, double x_m) {
      	return x_s * ((2.0 / (x * x)) / x_m);
      }
      
      x\_m = math.fabs(x)
      x\_s = math.copysign(1.0, x)
      def code(x_s, x_m):
      	return x_s * ((2.0 / (x * x)) / x_m)
      
      x\_m = abs(x)
      x\_s = copysign(1.0, x)
      function code(x_s, x_m)
      	return Float64(x_s * Float64(Float64(2.0 / Float64(x * x)) / x_m))
      end
      
      x\_m = abs(x);
      x\_s = sign(x) * abs(1.0);
      function tmp = code(x_s, x_m)
      	tmp = x_s * ((2.0 / (x * x)) / x_m);
      end
      
      x\_m = N[Abs[x], $MachinePrecision]
      x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
      code[x$95$s_, x$95$m_] := N[(x$95$s * N[(N[(2.0 / N[(x * x), $MachinePrecision]), $MachinePrecision] / x$95$m), $MachinePrecision]), $MachinePrecision]
      
      \begin{array}{l}
      x\_m = \left|x\right|
      \\
      x\_s = \mathsf{copysign}\left(1, x\right)
      
      \\
      x\_s \cdot \frac{\frac{2}{x \cdot x}}{x\_m}
      \end{array}
      
      Derivation
      1. Initial program 67.7%

        \[\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
      2. Add Preprocessing
      3. Step-by-step derivation
        1. lift-+.f64N/A

          \[\leadsto \left(\frac{1}{\color{blue}{x + 1}} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
        2. lift-/.f64N/A

          \[\leadsto \left(\color{blue}{\frac{1}{x + 1}} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
        3. lift-/.f64N/A

          \[\leadsto \left(\frac{1}{x + 1} - \color{blue}{\frac{2}{x}}\right) + \frac{1}{x - 1} \]
        4. lift--.f64N/A

          \[\leadsto \color{blue}{\left(\frac{1}{x + 1} - \frac{2}{x}\right)} + \frac{1}{x - 1} \]
        5. lift--.f64N/A

          \[\leadsto \left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{\color{blue}{x - 1}} \]
        6. lift-/.f64N/A

          \[\leadsto \left(\frac{1}{x + 1} - \frac{2}{x}\right) + \color{blue}{\frac{1}{x - 1}} \]
        7. +-commutativeN/A

          \[\leadsto \color{blue}{\frac{1}{x - 1} + \left(\frac{1}{x + 1} - \frac{2}{x}\right)} \]
        8. lift-/.f64N/A

          \[\leadsto \color{blue}{\frac{1}{x - 1}} + \left(\frac{1}{x + 1} - \frac{2}{x}\right) \]
        9. lift--.f64N/A

          \[\leadsto \frac{1}{x - 1} + \color{blue}{\left(\frac{1}{x + 1} - \frac{2}{x}\right)} \]
        10. lift-/.f64N/A

          \[\leadsto \frac{1}{x - 1} + \left(\color{blue}{\frac{1}{x + 1}} - \frac{2}{x}\right) \]
        11. lift-/.f64N/A

          \[\leadsto \frac{1}{x - 1} + \left(\frac{1}{x + 1} - \color{blue}{\frac{2}{x}}\right) \]
        12. frac-subN/A

          \[\leadsto \frac{1}{x - 1} + \color{blue}{\frac{1 \cdot x - \left(x + 1\right) \cdot 2}{\left(x + 1\right) \cdot x}} \]
        13. frac-addN/A

          \[\leadsto \color{blue}{\frac{1 \cdot \left(\left(x + 1\right) \cdot x\right) + \left(x - 1\right) \cdot \left(1 \cdot x - \left(x + 1\right) \cdot 2\right)}{\left(x - 1\right) \cdot \left(\left(x + 1\right) \cdot x\right)}} \]
        14. lower-/.f64N/A

          \[\leadsto \color{blue}{\frac{1 \cdot \left(\left(x + 1\right) \cdot x\right) + \left(x - 1\right) \cdot \left(1 \cdot x - \left(x + 1\right) \cdot 2\right)}{\left(x - 1\right) \cdot \left(\left(x + 1\right) \cdot x\right)}} \]
      4. Applied rewrites18.0%

        \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1, x \cdot \left(1 + x\right), \left(x + -1\right) \cdot \left(x - \left(1 + x\right) \cdot 2\right)\right)}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)}} \]
      5. Taylor expanded in x around 0

        \[\leadsto \frac{\color{blue}{2}}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)} \]
      6. Step-by-step derivation
        1. Applied rewrites98.8%

          \[\leadsto \frac{\color{blue}{2}}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)} \]
        2. Step-by-step derivation
          1. lift-approxN/A

            \[\leadsto \frac{\color{blue}{2}}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)} \]
          2. lift-+.f64N/A

            \[\leadsto \frac{2}{\color{blue}{\left(x + -1\right)} \cdot \left(x \cdot \left(1 + x\right)\right)} \]
          3. *-lft-identityN/A

            \[\leadsto \frac{2}{\left(x + -1\right) \cdot \left(\color{blue}{\left(1 \cdot x\right)} \cdot \left(1 + x\right)\right)} \]
          4. associate-*r*N/A

            \[\leadsto \frac{2}{\left(x + -1\right) \cdot \color{blue}{\left(1 \cdot \left(x \cdot \left(1 + x\right)\right)\right)}} \]
          5. associate-/r*N/A

            \[\leadsto \color{blue}{\frac{\frac{2}{x + -1}}{1 \cdot \left(x \cdot \left(1 + x\right)\right)}} \]
          6. associate-*r*N/A

            \[\leadsto \frac{\frac{2}{x + -1}}{\color{blue}{\left(1 \cdot x\right) \cdot \left(1 + x\right)}} \]
          7. *-lft-identityN/A

            \[\leadsto \frac{\frac{2}{x + -1}}{\color{blue}{x} \cdot \left(1 + x\right)} \]
          8. *-commutativeN/A

            \[\leadsto \frac{\frac{2}{x + -1}}{\color{blue}{\left(1 + x\right) \cdot x}} \]
          9. associate-/r*N/A

            \[\leadsto \color{blue}{\frac{\frac{\frac{2}{x + -1}}{1 + x}}{x}} \]
          10. lower-/.f64N/A

            \[\leadsto \color{blue}{\frac{\frac{\frac{2}{x + -1}}{1 + x}}{x}} \]
        3. Applied rewrites99.7%

          \[\leadsto \color{blue}{\frac{\frac{\frac{2}{x + -1}}{x + 1}}{x}} \]
        4. Taylor expanded in x around inf

          \[\leadsto \frac{\color{blue}{\frac{2}{{x}^{2}}}}{x} \]
        5. Step-by-step derivation
          1. lower-/.f64N/A

            \[\leadsto \frac{\color{blue}{\frac{2}{{x}^{2}}}}{x} \]
          2. unpow2N/A

            \[\leadsto \frac{\frac{2}{\color{blue}{x \cdot x}}}{x} \]
          3. lower-*.f6499.5

            \[\leadsto \frac{\frac{2}{\color{blue}{x \cdot x}}}{x} \]
        6. Applied rewrites99.5%

          \[\leadsto \frac{\color{blue}{\frac{2}{x \cdot x}}}{x} \]
        7. Add Preprocessing

        Alternative 4: 99.2% accurate, 1.8× speedup?

        \[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot \frac{2}{\mathsf{fma}\left(x\_m, x\_m, x\_m\right) \cdot \left(x\_m + -1\right)} \end{array} \]
        x\_m = (fabs.f64 x)
        x\_s = (copysign.f64 #s(literal 1 binary64) x)
        (FPCore (x_s x_m)
         :precision binary64
         (* x_s (/ 2.0 (* (fma x_m x_m x_m) (+ x_m -1.0)))))
        x\_m = fabs(x);
        x\_s = copysign(1.0, x);
        double code(double x_s, double x_m) {
        	return x_s * (2.0 / (fma(x_m, x_m, x_m) * (x_m + -1.0)));
        }
        
        x\_m = abs(x)
        x\_s = copysign(1.0, x)
        function code(x_s, x_m)
        	return Float64(x_s * Float64(2.0 / Float64(fma(x_m, x_m, x_m) * Float64(x_m + -1.0))))
        end
        
        x\_m = N[Abs[x], $MachinePrecision]
        x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
        code[x$95$s_, x$95$m_] := N[(x$95$s * N[(2.0 / N[(N[(x$95$m * x$95$m + x$95$m), $MachinePrecision] * N[(x$95$m + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
        
        \begin{array}{l}
        x\_m = \left|x\right|
        \\
        x\_s = \mathsf{copysign}\left(1, x\right)
        
        \\
        x\_s \cdot \frac{2}{\mathsf{fma}\left(x\_m, x\_m, x\_m\right) \cdot \left(x\_m + -1\right)}
        \end{array}
        
        Derivation
        1. Initial program 67.7%

          \[\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
        2. Add Preprocessing
        3. Step-by-step derivation
          1. lift-+.f64N/A

            \[\leadsto \left(\frac{1}{\color{blue}{x + 1}} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
          2. lift-/.f64N/A

            \[\leadsto \left(\color{blue}{\frac{1}{x + 1}} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
          3. lift-/.f64N/A

            \[\leadsto \left(\frac{1}{x + 1} - \color{blue}{\frac{2}{x}}\right) + \frac{1}{x - 1} \]
          4. lift--.f64N/A

            \[\leadsto \color{blue}{\left(\frac{1}{x + 1} - \frac{2}{x}\right)} + \frac{1}{x - 1} \]
          5. lift--.f64N/A

            \[\leadsto \left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{\color{blue}{x - 1}} \]
          6. lift-/.f64N/A

            \[\leadsto \left(\frac{1}{x + 1} - \frac{2}{x}\right) + \color{blue}{\frac{1}{x - 1}} \]
          7. +-commutativeN/A

            \[\leadsto \color{blue}{\frac{1}{x - 1} + \left(\frac{1}{x + 1} - \frac{2}{x}\right)} \]
          8. lift-/.f64N/A

            \[\leadsto \color{blue}{\frac{1}{x - 1}} + \left(\frac{1}{x + 1} - \frac{2}{x}\right) \]
          9. lift--.f64N/A

            \[\leadsto \frac{1}{x - 1} + \color{blue}{\left(\frac{1}{x + 1} - \frac{2}{x}\right)} \]
          10. lift-/.f64N/A

            \[\leadsto \frac{1}{x - 1} + \left(\color{blue}{\frac{1}{x + 1}} - \frac{2}{x}\right) \]
          11. lift-/.f64N/A

            \[\leadsto \frac{1}{x - 1} + \left(\frac{1}{x + 1} - \color{blue}{\frac{2}{x}}\right) \]
          12. frac-subN/A

            \[\leadsto \frac{1}{x - 1} + \color{blue}{\frac{1 \cdot x - \left(x + 1\right) \cdot 2}{\left(x + 1\right) \cdot x}} \]
          13. frac-addN/A

            \[\leadsto \color{blue}{\frac{1 \cdot \left(\left(x + 1\right) \cdot x\right) + \left(x - 1\right) \cdot \left(1 \cdot x - \left(x + 1\right) \cdot 2\right)}{\left(x - 1\right) \cdot \left(\left(x + 1\right) \cdot x\right)}} \]
          14. lower-/.f64N/A

            \[\leadsto \color{blue}{\frac{1 \cdot \left(\left(x + 1\right) \cdot x\right) + \left(x - 1\right) \cdot \left(1 \cdot x - \left(x + 1\right) \cdot 2\right)}{\left(x - 1\right) \cdot \left(\left(x + 1\right) \cdot x\right)}} \]
        4. Applied rewrites18.0%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1, x \cdot \left(1 + x\right), \left(x + -1\right) \cdot \left(x - \left(1 + x\right) \cdot 2\right)\right)}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)}} \]
        5. Taylor expanded in x around 0

          \[\leadsto \frac{\color{blue}{2}}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)} \]
        6. Step-by-step derivation
          1. Applied rewrites98.8%

            \[\leadsto \frac{\color{blue}{2}}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)} \]
          2. Step-by-step derivation
            1. lift-+.f64N/A

              \[\leadsto \frac{2}{\color{blue}{\left(x + -1\right)} \cdot \left(x \cdot \left(1 + x\right)\right)} \]
            2. *-lft-identityN/A

              \[\leadsto \frac{2}{\left(x + -1\right) \cdot \left(\color{blue}{\left(1 \cdot x\right)} \cdot \left(1 + x\right)\right)} \]
            3. associate-*r*N/A

              \[\leadsto \frac{2}{\left(x + -1\right) \cdot \color{blue}{\left(1 \cdot \left(x \cdot \left(1 + x\right)\right)\right)}} \]
            4. *-commutativeN/A

              \[\leadsto \frac{2}{\color{blue}{\left(1 \cdot \left(x \cdot \left(1 + x\right)\right)\right) \cdot \left(x + -1\right)}} \]
            5. lower-*.f64N/A

              \[\leadsto \frac{2}{\color{blue}{\left(1 \cdot \left(x \cdot \left(1 + x\right)\right)\right) \cdot \left(x + -1\right)}} \]
            6. associate-*r*N/A

              \[\leadsto \frac{2}{\color{blue}{\left(\left(1 \cdot x\right) \cdot \left(1 + x\right)\right)} \cdot \left(x + -1\right)} \]
            7. *-lft-identityN/A

              \[\leadsto \frac{2}{\left(\color{blue}{x} \cdot \left(1 + x\right)\right) \cdot \left(x + -1\right)} \]
            8. +-commutativeN/A

              \[\leadsto \frac{2}{\left(x \cdot \color{blue}{\left(x + 1\right)}\right) \cdot \left(x + -1\right)} \]
            9. distribute-lft-inN/A

              \[\leadsto \frac{2}{\color{blue}{\left(x \cdot x + x \cdot 1\right)} \cdot \left(x + -1\right)} \]
            10. *-rgt-identityN/A

              \[\leadsto \frac{2}{\left(x \cdot x + \color{blue}{x}\right) \cdot \left(x + -1\right)} \]
            11. lower-fma.f6498.8

              \[\leadsto \frac{2}{\color{blue}{\mathsf{fma}\left(x, x, x\right)} \cdot \left(x + -1\right)} \]
          3. Applied rewrites98.8%

            \[\leadsto \frac{2}{\color{blue}{\mathsf{fma}\left(x, x, x\right) \cdot \left(x + -1\right)}} \]
          4. Add Preprocessing

          Alternative 5: 99.2% accurate, 2.0× speedup?

          \[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot \frac{2}{x \cdot \mathsf{fma}\left(x, x, -1\right)} \end{array} \]
          x\_m = (fabs.f64 x)
          x\_s = (copysign.f64 #s(literal 1 binary64) x)
          (FPCore (x_s x_m) :precision binary64 (* x_s (/ 2.0 (* x (fma x x -1.0)))))
          x\_m = fabs(x);
          x\_s = copysign(1.0, x);
          double code(double x_s, double x_m) {
          	return x_s * (2.0 / (x * fma(x, x, -1.0)));
          }
          
          x\_m = abs(x)
          x\_s = copysign(1.0, x)
          function code(x_s, x_m)
          	return Float64(x_s * Float64(2.0 / Float64(x * fma(x, x, -1.0))))
          end
          
          x\_m = N[Abs[x], $MachinePrecision]
          x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
          code[x$95$s_, x$95$m_] := N[(x$95$s * N[(2.0 / N[(x * N[(x * x + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
          
          \begin{array}{l}
          x\_m = \left|x\right|
          \\
          x\_s = \mathsf{copysign}\left(1, x\right)
          
          \\
          x\_s \cdot \frac{2}{x \cdot \mathsf{fma}\left(x, x, -1\right)}
          \end{array}
          
          Derivation
          1. Initial program 67.7%

            \[\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
          2. Add Preprocessing
          3. Step-by-step derivation
            1. lift-+.f64N/A

              \[\leadsto \left(\frac{1}{\color{blue}{x + 1}} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
            2. lift-/.f64N/A

              \[\leadsto \left(\color{blue}{\frac{1}{x + 1}} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
            3. lift-/.f64N/A

              \[\leadsto \left(\frac{1}{x + 1} - \color{blue}{\frac{2}{x}}\right) + \frac{1}{x - 1} \]
            4. lift--.f64N/A

              \[\leadsto \color{blue}{\left(\frac{1}{x + 1} - \frac{2}{x}\right)} + \frac{1}{x - 1} \]
            5. lift--.f64N/A

              \[\leadsto \left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{\color{blue}{x - 1}} \]
            6. lift-/.f64N/A

              \[\leadsto \left(\frac{1}{x + 1} - \frac{2}{x}\right) + \color{blue}{\frac{1}{x - 1}} \]
            7. +-commutativeN/A

              \[\leadsto \color{blue}{\frac{1}{x - 1} + \left(\frac{1}{x + 1} - \frac{2}{x}\right)} \]
            8. lift-/.f64N/A

              \[\leadsto \color{blue}{\frac{1}{x - 1}} + \left(\frac{1}{x + 1} - \frac{2}{x}\right) \]
            9. lift--.f64N/A

              \[\leadsto \frac{1}{x - 1} + \color{blue}{\left(\frac{1}{x + 1} - \frac{2}{x}\right)} \]
            10. lift-/.f64N/A

              \[\leadsto \frac{1}{x - 1} + \left(\color{blue}{\frac{1}{x + 1}} - \frac{2}{x}\right) \]
            11. lift-/.f64N/A

              \[\leadsto \frac{1}{x - 1} + \left(\frac{1}{x + 1} - \color{blue}{\frac{2}{x}}\right) \]
            12. frac-subN/A

              \[\leadsto \frac{1}{x - 1} + \color{blue}{\frac{1 \cdot x - \left(x + 1\right) \cdot 2}{\left(x + 1\right) \cdot x}} \]
            13. frac-addN/A

              \[\leadsto \color{blue}{\frac{1 \cdot \left(\left(x + 1\right) \cdot x\right) + \left(x - 1\right) \cdot \left(1 \cdot x - \left(x + 1\right) \cdot 2\right)}{\left(x - 1\right) \cdot \left(\left(x + 1\right) \cdot x\right)}} \]
            14. lower-/.f64N/A

              \[\leadsto \color{blue}{\frac{1 \cdot \left(\left(x + 1\right) \cdot x\right) + \left(x - 1\right) \cdot \left(1 \cdot x - \left(x + 1\right) \cdot 2\right)}{\left(x - 1\right) \cdot \left(\left(x + 1\right) \cdot x\right)}} \]
          4. Applied rewrites18.0%

            \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1, x \cdot \left(1 + x\right), \left(x + -1\right) \cdot \left(x - \left(1 + x\right) \cdot 2\right)\right)}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)}} \]
          5. Taylor expanded in x around 0

            \[\leadsto \frac{\color{blue}{2}}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)} \]
          6. Step-by-step derivation
            1. Applied rewrites98.8%

              \[\leadsto \frac{\color{blue}{2}}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)} \]
            2. Taylor expanded in x around 0

              \[\leadsto \frac{2}{\color{blue}{x \cdot \left({x}^{2} - 1\right)}} \]
            3. Step-by-step derivation
              1. sub-negN/A

                \[\leadsto \frac{2}{x \cdot \color{blue}{\left({x}^{2} + \left(\mathsf{neg}\left(1\right)\right)\right)}} \]
              2. *-lft-identityN/A

                \[\leadsto \frac{2}{x \cdot \left(\color{blue}{1 \cdot {x}^{2}} + \left(\mathsf{neg}\left(1\right)\right)\right)} \]
              3. lft-mult-inverseN/A

                \[\leadsto \frac{2}{x \cdot \left(1 \cdot {x}^{2} + \left(\mathsf{neg}\left(\color{blue}{\frac{1}{{x}^{2}} \cdot {x}^{2}}\right)\right)\right)} \]
              4. distribute-lft-neg-outN/A

                \[\leadsto \frac{2}{x \cdot \left(1 \cdot {x}^{2} + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{{x}^{2}}\right)\right) \cdot {x}^{2}}\right)} \]
              5. distribute-neg-fracN/A

                \[\leadsto \frac{2}{x \cdot \left(1 \cdot {x}^{2} + \color{blue}{\frac{\mathsf{neg}\left(1\right)}{{x}^{2}}} \cdot {x}^{2}\right)} \]
              6. metadata-evalN/A

                \[\leadsto \frac{2}{x \cdot \left(1 \cdot {x}^{2} + \frac{\color{blue}{-1}}{{x}^{2}} \cdot {x}^{2}\right)} \]
              7. distribute-rgt-inN/A

                \[\leadsto \frac{2}{x \cdot \color{blue}{\left({x}^{2} \cdot \left(1 + \frac{-1}{{x}^{2}}\right)\right)}} \]
              8. metadata-evalN/A

                \[\leadsto \frac{2}{x \cdot \left({x}^{2} \cdot \left(1 + \frac{\color{blue}{\mathsf{neg}\left(1\right)}}{{x}^{2}}\right)\right)} \]
              9. distribute-neg-fracN/A

                \[\leadsto \frac{2}{x \cdot \left({x}^{2} \cdot \left(1 + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{{x}^{2}}\right)\right)}\right)\right)} \]
              10. sub-negN/A

                \[\leadsto \frac{2}{x \cdot \left({x}^{2} \cdot \color{blue}{\left(1 - \frac{1}{{x}^{2}}\right)}\right)} \]
              11. unpow2N/A

                \[\leadsto \frac{2}{x \cdot \left(\color{blue}{\left(x \cdot x\right)} \cdot \left(1 - \frac{1}{{x}^{2}}\right)\right)} \]
              12. associate-*l*N/A

                \[\leadsto \frac{2}{x \cdot \color{blue}{\left(x \cdot \left(x \cdot \left(1 - \frac{1}{{x}^{2}}\right)\right)\right)}} \]
              13. associate-*l*N/A

                \[\leadsto \frac{2}{x \cdot \color{blue}{\left(\left(x \cdot x\right) \cdot \left(1 - \frac{1}{{x}^{2}}\right)\right)}} \]
              14. unpow2N/A

                \[\leadsto \frac{2}{x \cdot \left(\color{blue}{{x}^{2}} \cdot \left(1 - \frac{1}{{x}^{2}}\right)\right)} \]
              15. *-commutativeN/A

                \[\leadsto \frac{2}{x \cdot \color{blue}{\left(\left(1 - \frac{1}{{x}^{2}}\right) \cdot {x}^{2}\right)}} \]
              16. lower-*.f64N/A

                \[\leadsto \frac{2}{\color{blue}{x \cdot \left(\left(1 - \frac{1}{{x}^{2}}\right) \cdot {x}^{2}\right)}} \]
            4. Applied rewrites98.8%

              \[\leadsto \frac{2}{\color{blue}{x \cdot \mathsf{fma}\left(x, x, -1\right)}} \]
            5. Add Preprocessing

            Alternative 6: 98.1% accurate, 2.1× speedup?

            \[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot \frac{2}{x \cdot \left(x \cdot x\right)} \end{array} \]
            x\_m = (fabs.f64 x)
            x\_s = (copysign.f64 #s(literal 1 binary64) x)
            (FPCore (x_s x_m) :precision binary64 (* x_s (/ 2.0 (* x (* x x)))))
            x\_m = fabs(x);
            x\_s = copysign(1.0, x);
            double code(double x_s, double x_m) {
            	return x_s * (2.0 / (x * (x * x)));
            }
            
            x\_m = abs(x)
            x\_s = copysign(1.0d0, x)
            real(8) function code(x_s, x_m)
                real(8), intent (in) :: x_s
                real(8), intent (in) :: x_m
                code = x_s * (2.0d0 / (x * (x * x)))
            end function
            
            x\_m = Math.abs(x);
            x\_s = Math.copySign(1.0, x);
            public static double code(double x_s, double x_m) {
            	return x_s * (2.0 / (x * (x * x)));
            }
            
            x\_m = math.fabs(x)
            x\_s = math.copysign(1.0, x)
            def code(x_s, x_m):
            	return x_s * (2.0 / (x * (x * x)))
            
            x\_m = abs(x)
            x\_s = copysign(1.0, x)
            function code(x_s, x_m)
            	return Float64(x_s * Float64(2.0 / Float64(x * Float64(x * x))))
            end
            
            x\_m = abs(x);
            x\_s = sign(x) * abs(1.0);
            function tmp = code(x_s, x_m)
            	tmp = x_s * (2.0 / (x * (x * x)));
            end
            
            x\_m = N[Abs[x], $MachinePrecision]
            x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
            code[x$95$s_, x$95$m_] := N[(x$95$s * N[(2.0 / N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
            
            \begin{array}{l}
            x\_m = \left|x\right|
            \\
            x\_s = \mathsf{copysign}\left(1, x\right)
            
            \\
            x\_s \cdot \frac{2}{x \cdot \left(x \cdot x\right)}
            \end{array}
            
            Derivation
            1. Initial program 67.7%

              \[\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
            2. Add Preprocessing
            3. Taylor expanded in x around inf

              \[\leadsto \color{blue}{\frac{2}{{x}^{3}}} \]
            4. Step-by-step derivation
              1. lower-/.f64N/A

                \[\leadsto \color{blue}{\frac{2}{{x}^{3}}} \]
              2. cube-multN/A

                \[\leadsto \frac{2}{\color{blue}{x \cdot \left(x \cdot x\right)}} \]
              3. unpow2N/A

                \[\leadsto \frac{2}{x \cdot \color{blue}{{x}^{2}}} \]
              4. lower-*.f64N/A

                \[\leadsto \frac{2}{\color{blue}{x \cdot {x}^{2}}} \]
              5. unpow2N/A

                \[\leadsto \frac{2}{x \cdot \color{blue}{\left(x \cdot x\right)}} \]
              6. lower-*.f6498.5

                \[\leadsto \frac{2}{x \cdot \color{blue}{\left(x \cdot x\right)}} \]
            5. Applied rewrites98.5%

              \[\leadsto \color{blue}{\frac{2}{x \cdot \left(x \cdot x\right)}} \]
            6. Add Preprocessing

            Alternative 7: 51.9% accurate, 2.6× speedup?

            \[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot \frac{-2}{\mathsf{fma}\left(x\_m, x\_m, x\_m\right)} \end{array} \]
            x\_m = (fabs.f64 x)
            x\_s = (copysign.f64 #s(literal 1 binary64) x)
            (FPCore (x_s x_m) :precision binary64 (* x_s (/ -2.0 (fma x_m x_m x_m))))
            x\_m = fabs(x);
            x\_s = copysign(1.0, x);
            double code(double x_s, double x_m) {
            	return x_s * (-2.0 / fma(x_m, x_m, x_m));
            }
            
            x\_m = abs(x)
            x\_s = copysign(1.0, x)
            function code(x_s, x_m)
            	return Float64(x_s * Float64(-2.0 / fma(x_m, x_m, x_m)))
            end
            
            x\_m = N[Abs[x], $MachinePrecision]
            x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
            code[x$95$s_, x$95$m_] := N[(x$95$s * N[(-2.0 / N[(x$95$m * x$95$m + x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
            
            \begin{array}{l}
            x\_m = \left|x\right|
            \\
            x\_s = \mathsf{copysign}\left(1, x\right)
            
            \\
            x\_s \cdot \frac{-2}{\mathsf{fma}\left(x\_m, x\_m, x\_m\right)}
            \end{array}
            
            Derivation
            1. Initial program 67.7%

              \[\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
            2. Add Preprocessing
            3. Step-by-step derivation
              1. lift-+.f64N/A

                \[\leadsto \left(\frac{1}{\color{blue}{x + 1}} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
              2. lift-/.f64N/A

                \[\leadsto \left(\color{blue}{\frac{1}{x + 1}} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
              3. lift-/.f64N/A

                \[\leadsto \left(\frac{1}{x + 1} - \color{blue}{\frac{2}{x}}\right) + \frac{1}{x - 1} \]
              4. lift--.f64N/A

                \[\leadsto \color{blue}{\left(\frac{1}{x + 1} - \frac{2}{x}\right)} + \frac{1}{x - 1} \]
              5. lift--.f64N/A

                \[\leadsto \left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{\color{blue}{x - 1}} \]
              6. lift-/.f64N/A

                \[\leadsto \left(\frac{1}{x + 1} - \frac{2}{x}\right) + \color{blue}{\frac{1}{x - 1}} \]
              7. +-commutativeN/A

                \[\leadsto \color{blue}{\frac{1}{x - 1} + \left(\frac{1}{x + 1} - \frac{2}{x}\right)} \]
              8. lift-/.f64N/A

                \[\leadsto \color{blue}{\frac{1}{x - 1}} + \left(\frac{1}{x + 1} - \frac{2}{x}\right) \]
              9. lift--.f64N/A

                \[\leadsto \frac{1}{x - 1} + \color{blue}{\left(\frac{1}{x + 1} - \frac{2}{x}\right)} \]
              10. lift-/.f64N/A

                \[\leadsto \frac{1}{x - 1} + \left(\color{blue}{\frac{1}{x + 1}} - \frac{2}{x}\right) \]
              11. lift-/.f64N/A

                \[\leadsto \frac{1}{x - 1} + \left(\frac{1}{x + 1} - \color{blue}{\frac{2}{x}}\right) \]
              12. frac-subN/A

                \[\leadsto \frac{1}{x - 1} + \color{blue}{\frac{1 \cdot x - \left(x + 1\right) \cdot 2}{\left(x + 1\right) \cdot x}} \]
              13. frac-addN/A

                \[\leadsto \color{blue}{\frac{1 \cdot \left(\left(x + 1\right) \cdot x\right) + \left(x - 1\right) \cdot \left(1 \cdot x - \left(x + 1\right) \cdot 2\right)}{\left(x - 1\right) \cdot \left(\left(x + 1\right) \cdot x\right)}} \]
              14. lower-/.f64N/A

                \[\leadsto \color{blue}{\frac{1 \cdot \left(\left(x + 1\right) \cdot x\right) + \left(x - 1\right) \cdot \left(1 \cdot x - \left(x + 1\right) \cdot 2\right)}{\left(x - 1\right) \cdot \left(\left(x + 1\right) \cdot x\right)}} \]
            4. Applied rewrites18.0%

              \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1, x \cdot \left(1 + x\right), \left(x + -1\right) \cdot \left(x - \left(1 + x\right) \cdot 2\right)\right)}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)}} \]
            5. Taylor expanded in x around 0

              \[\leadsto \frac{\color{blue}{2}}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)} \]
            6. Step-by-step derivation
              1. Applied rewrites98.8%

                \[\leadsto \frac{\color{blue}{2}}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)} \]
              2. Step-by-step derivation
                1. lift-approxN/A

                  \[\leadsto \frac{\color{blue}{2}}{\left(x + -1\right) \cdot \left(x \cdot \left(1 + x\right)\right)} \]
                2. lift-+.f64N/A

                  \[\leadsto \frac{2}{\color{blue}{\left(x + -1\right)} \cdot \left(x \cdot \left(1 + x\right)\right)} \]
                3. *-lft-identityN/A

                  \[\leadsto \frac{2}{\left(x + -1\right) \cdot \left(\color{blue}{\left(1 \cdot x\right)} \cdot \left(1 + x\right)\right)} \]
                4. associate-*r*N/A

                  \[\leadsto \frac{2}{\left(x + -1\right) \cdot \color{blue}{\left(1 \cdot \left(x \cdot \left(1 + x\right)\right)\right)}} \]
                5. associate-/r*N/A

                  \[\leadsto \color{blue}{\frac{\frac{2}{x + -1}}{1 \cdot \left(x \cdot \left(1 + x\right)\right)}} \]
                6. lower-/.f64N/A

                  \[\leadsto \color{blue}{\frac{\frac{2}{x + -1}}{1 \cdot \left(x \cdot \left(1 + x\right)\right)}} \]
                7. lower-/.f64N/A

                  \[\leadsto \frac{\color{blue}{\frac{2}{x + -1}}}{1 \cdot \left(x \cdot \left(1 + x\right)\right)} \]
              3. Applied rewrites99.8%

                \[\leadsto \color{blue}{\frac{\frac{2}{x + -1}}{\mathsf{fma}\left(x, x, x\right)}} \]
              4. Taylor expanded in x around 0

                \[\leadsto \frac{\color{blue}{-2}}{\mathsf{fma}\left(x, x, x\right)} \]
              5. Step-by-step derivation
                1. Applied rewrites53.5%

                  \[\leadsto \frac{\color{blue}{-2}}{\mathsf{fma}\left(x, x, x\right)} \]
                2. Add Preprocessing

                Alternative 8: 5.0% accurate, 3.8× speedup?

                \[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot \frac{-2}{x} \end{array} \]
                x\_m = (fabs.f64 x)
                x\_s = (copysign.f64 #s(literal 1 binary64) x)
                (FPCore (x_s x_m) :precision binary64 (* x_s (/ -2.0 x)))
                x\_m = fabs(x);
                x\_s = copysign(1.0, x);
                double code(double x_s, double x_m) {
                	return x_s * (-2.0 / x);
                }
                
                x\_m = abs(x)
                x\_s = copysign(1.0d0, x)
                real(8) function code(x_s, x_m)
                    real(8), intent (in) :: x_s
                    real(8), intent (in) :: x_m
                    code = x_s * ((-2.0d0) / x)
                end function
                
                x\_m = Math.abs(x);
                x\_s = Math.copySign(1.0, x);
                public static double code(double x_s, double x_m) {
                	return x_s * (-2.0 / x);
                }
                
                x\_m = math.fabs(x)
                x\_s = math.copysign(1.0, x)
                def code(x_s, x_m):
                	return x_s * (-2.0 / x)
                
                x\_m = abs(x)
                x\_s = copysign(1.0, x)
                function code(x_s, x_m)
                	return Float64(x_s * Float64(-2.0 / x))
                end
                
                x\_m = abs(x);
                x\_s = sign(x) * abs(1.0);
                function tmp = code(x_s, x_m)
                	tmp = x_s * (-2.0 / x);
                end
                
                x\_m = N[Abs[x], $MachinePrecision]
                x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
                code[x$95$s_, x$95$m_] := N[(x$95$s * N[(-2.0 / x), $MachinePrecision]), $MachinePrecision]
                
                \begin{array}{l}
                x\_m = \left|x\right|
                \\
                x\_s = \mathsf{copysign}\left(1, x\right)
                
                \\
                x\_s \cdot \frac{-2}{x}
                \end{array}
                
                Derivation
                1. Initial program 67.7%

                  \[\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
                2. Add Preprocessing
                3. Taylor expanded in x around 0

                  \[\leadsto \color{blue}{\frac{-2}{x}} \]
                4. Step-by-step derivation
                  1. lower-/.f645.1

                    \[\leadsto \color{blue}{\frac{-2}{x}} \]
                5. Applied rewrites5.1%

                  \[\leadsto \color{blue}{\frac{-2}{x}} \]
                6. Add Preprocessing

                Developer Target 1: 99.2% accurate, 1.8× speedup?

                \[\begin{array}{l} \\ \frac{2}{x \cdot \left(x \cdot x - 1\right)} \end{array} \]
                (FPCore (x) :precision binary64 (/ 2.0 (* x (- (* x x) 1.0))))
                double code(double x) {
                	return 2.0 / (x * ((x * x) - 1.0));
                }
                
                real(8) function code(x)
                    real(8), intent (in) :: x
                    code = 2.0d0 / (x * ((x * x) - 1.0d0))
                end function
                
                public static double code(double x) {
                	return 2.0 / (x * ((x * x) - 1.0));
                }
                
                def code(x):
                	return 2.0 / (x * ((x * x) - 1.0))
                
                function code(x)
                	return Float64(2.0 / Float64(x * Float64(Float64(x * x) - 1.0)))
                end
                
                function tmp = code(x)
                	tmp = 2.0 / (x * ((x * x) - 1.0));
                end
                
                code[x_] := N[(2.0 / N[(x * N[(N[(x * x), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
                
                \begin{array}{l}
                
                \\
                \frac{2}{x \cdot \left(x \cdot x - 1\right)}
                \end{array}
                

                Reproduce

                ?
                herbie shell --seed 2024212 
                (FPCore (x)
                  :name "3frac (problem 3.3.3)"
                  :precision binary64
                  :pre (> (fabs x) 1.0)
                
                  :alt
                  (! :herbie-platform default (/ 2 (* x (- (* x x) 1))))
                
                  (+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))