3frac (problem 3.3.3)

Percentage Accurate: 68.8% → 99.8%
Time: 11.1s
Alternatives: 6
Speedup: 2.1×

Specification

?
\[\left|x\right| > 1\]
\[\begin{array}{l} \\ \left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1} \end{array} \]
(FPCore (x)
 :precision binary64
 (+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))
double code(double x) {
	return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = ((1.0d0 / (x + 1.0d0)) - (2.0d0 / x)) + (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
	return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
def code(x):
	return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0))
function code(x)
	return Float64(Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(2.0 / x)) + Float64(1.0 / Float64(x - 1.0)))
end
function tmp = code(x)
	tmp = ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
end
code[x_] := N[(N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(2.0 / x), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 6 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 68.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1} \end{array} \]
(FPCore (x)
 :precision binary64
 (+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))
double code(double x) {
	return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = ((1.0d0 / (x + 1.0d0)) - (2.0d0 / x)) + (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
	return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
}
def code(x):
	return ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0))
function code(x)
	return Float64(Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(2.0 / x)) + Float64(1.0 / Float64(x - 1.0)))
end
function tmp = code(x)
	tmp = ((1.0 / (x + 1.0)) - (2.0 / x)) + (1.0 / (x - 1.0));
end
code[x_] := N[(N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(2.0 / x), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1}
\end{array}

Alternative 1: 99.8% accurate, 1.4× speedup?

\[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot \frac{\frac{\frac{2}{x\_m}}{x\_m + 1}}{x\_m + -1} \end{array} \]
x\_m = (fabs.f64 x)
x\_s = (copysign.f64 #s(literal 1 binary64) x)
(FPCore (x_s x_m)
 :precision binary64
 (* x_s (/ (/ (/ 2.0 x_m) (+ x_m 1.0)) (+ x_m -1.0))))
x\_m = fabs(x);
x\_s = copysign(1.0, x);
double code(double x_s, double x_m) {
	return x_s * (((2.0 / x_m) / (x_m + 1.0)) / (x_m + -1.0));
}
x\_m = abs(x)
x\_s = copysign(1.0d0, x)
real(8) function code(x_s, x_m)
    real(8), intent (in) :: x_s
    real(8), intent (in) :: x_m
    code = x_s * (((2.0d0 / x_m) / (x_m + 1.0d0)) / (x_m + (-1.0d0)))
end function
x\_m = Math.abs(x);
x\_s = Math.copySign(1.0, x);
public static double code(double x_s, double x_m) {
	return x_s * (((2.0 / x_m) / (x_m + 1.0)) / (x_m + -1.0));
}
x\_m = math.fabs(x)
x\_s = math.copysign(1.0, x)
def code(x_s, x_m):
	return x_s * (((2.0 / x_m) / (x_m + 1.0)) / (x_m + -1.0))
x\_m = abs(x)
x\_s = copysign(1.0, x)
function code(x_s, x_m)
	return Float64(x_s * Float64(Float64(Float64(2.0 / x_m) / Float64(x_m + 1.0)) / Float64(x_m + -1.0)))
end
x\_m = abs(x);
x\_s = sign(x) * abs(1.0);
function tmp = code(x_s, x_m)
	tmp = x_s * (((2.0 / x_m) / (x_m + 1.0)) / (x_m + -1.0));
end
x\_m = N[Abs[x], $MachinePrecision]
x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
code[x$95$s_, x$95$m_] := N[(x$95$s * N[(N[(N[(2.0 / x$95$m), $MachinePrecision] / N[(x$95$m + 1.0), $MachinePrecision]), $MachinePrecision] / N[(x$95$m + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x\_m = \left|x\right|
\\
x\_s = \mathsf{copysign}\left(1, x\right)

\\
x\_s \cdot \frac{\frac{\frac{2}{x\_m}}{x\_m + 1}}{x\_m + -1}
\end{array}
Derivation
  1. Initial program 68.2%

    \[\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. frac-subN/A

      \[\leadsto \frac{1 \cdot x - \left(x + 1\right) \cdot 2}{\left(x + 1\right) \cdot x} + \frac{\color{blue}{1}}{x - 1} \]
    2. frac-addN/A

      \[\leadsto \frac{\left(1 \cdot x - \left(x + 1\right) \cdot 2\right) \cdot \left(x - 1\right) + \left(\left(x + 1\right) \cdot x\right) \cdot 1}{\color{blue}{\left(\left(x + 1\right) \cdot x\right) \cdot \left(x - 1\right)}} \]
    3. /-lowering-/.f64N/A

      \[\leadsto \mathsf{/.f64}\left(\left(\left(1 \cdot x - \left(x + 1\right) \cdot 2\right) \cdot \left(x - 1\right) + \left(\left(x + 1\right) \cdot x\right) \cdot 1\right), \color{blue}{\left(\left(\left(x + 1\right) \cdot x\right) \cdot \left(x - 1\right)\right)}\right) \]
  4. Applied egg-rr22.1%

    \[\leadsto \color{blue}{\frac{\left(x - \left(1 + x\right) \cdot 2\right) \cdot \left(x + -1\right) + \left(x \cdot \left(1 + x\right)\right) \cdot 1}{\left(x \cdot \left(1 + x\right)\right) \cdot \left(x + -1\right)}} \]
  5. Taylor expanded in x around 0

    \[\leadsto \mathsf{/.f64}\left(\color{blue}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, \mathsf{+.f64}\left(1, x\right)\right), \mathsf{+.f64}\left(x, -1\right)\right)\right) \]
  6. Step-by-step derivation
    1. Simplified99.6%

      \[\leadsto \frac{\color{blue}{2}}{\left(x \cdot \left(1 + x\right)\right) \cdot \left(x + -1\right)} \]
    2. Taylor expanded in x around 0

      \[\leadsto \mathsf{/.f64}\left(2, \color{blue}{\left(x \cdot \left({x}^{2} - 1\right)\right)}\right) \]
    3. Step-by-step derivation
      1. distribute-lft-out--N/A

        \[\leadsto \mathsf{/.f64}\left(2, \left(x \cdot {x}^{2} - \color{blue}{x \cdot 1}\right)\right) \]
      2. unpow2N/A

        \[\leadsto \mathsf{/.f64}\left(2, \left(x \cdot \left(x \cdot x\right) - x \cdot 1\right)\right) \]
      3. cube-multN/A

        \[\leadsto \mathsf{/.f64}\left(2, \left({x}^{3} - \color{blue}{x} \cdot 1\right)\right) \]
      4. *-rgt-identityN/A

        \[\leadsto \mathsf{/.f64}\left(2, \left({x}^{3} \cdot 1 - \color{blue}{x} \cdot 1\right)\right) \]
      5. rgt-mult-inverseN/A

        \[\leadsto \mathsf{/.f64}\left(2, \left({x}^{3} \cdot 1 - x \cdot \left({x}^{2} \cdot \color{blue}{\frac{1}{{x}^{2}}}\right)\right)\right) \]
      6. associate-*l*N/A

        \[\leadsto \mathsf{/.f64}\left(2, \left({x}^{3} \cdot 1 - \left(x \cdot {x}^{2}\right) \cdot \color{blue}{\frac{1}{{x}^{2}}}\right)\right) \]
      7. unpow2N/A

        \[\leadsto \mathsf{/.f64}\left(2, \left({x}^{3} \cdot 1 - \left(x \cdot \left(x \cdot x\right)\right) \cdot \frac{1}{{x}^{2}}\right)\right) \]
      8. cube-multN/A

        \[\leadsto \mathsf{/.f64}\left(2, \left({x}^{3} \cdot 1 - {x}^{3} \cdot \frac{\color{blue}{1}}{{x}^{2}}\right)\right) \]
      9. distribute-lft-out--N/A

        \[\leadsto \mathsf{/.f64}\left(2, \left({x}^{3} \cdot \color{blue}{\left(1 - \frac{1}{{x}^{2}}\right)}\right)\right) \]
      10. cube-multN/A

        \[\leadsto \mathsf{/.f64}\left(2, \left(\left(x \cdot \left(x \cdot x\right)\right) \cdot \left(\color{blue}{1} - \frac{1}{{x}^{2}}\right)\right)\right) \]
      11. unpow2N/A

        \[\leadsto \mathsf{/.f64}\left(2, \left(\left(x \cdot {x}^{2}\right) \cdot \left(1 - \frac{1}{{x}^{2}}\right)\right)\right) \]
      12. associate-*l*N/A

        \[\leadsto \mathsf{/.f64}\left(2, \left(x \cdot \color{blue}{\left({x}^{2} \cdot \left(1 - \frac{1}{{x}^{2}}\right)\right)}\right)\right) \]
      13. *-lowering-*.f64N/A

        \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \color{blue}{\left({x}^{2} \cdot \left(1 - \frac{1}{{x}^{2}}\right)\right)}\right)\right) \]
      14. sub-negN/A

        \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} \cdot \left(1 + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{{x}^{2}}\right)\right)}\right)\right)\right)\right) \]
      15. distribute-neg-fracN/A

        \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} \cdot \left(1 + \frac{\mathsf{neg}\left(1\right)}{\color{blue}{{x}^{2}}}\right)\right)\right)\right) \]
      16. metadata-evalN/A

        \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} \cdot \left(1 + \frac{-1}{{\color{blue}{x}}^{2}}\right)\right)\right)\right) \]
      17. distribute-lft-inN/A

        \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} \cdot 1 + \color{blue}{{x}^{2} \cdot \frac{-1}{{x}^{2}}}\right)\right)\right) \]
      18. *-rgt-identityN/A

        \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} + \color{blue}{{x}^{2}} \cdot \frac{-1}{{x}^{2}}\right)\right)\right) \]
      19. metadata-evalN/A

        \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} + {x}^{2} \cdot \frac{\mathsf{neg}\left(1\right)}{{\color{blue}{x}}^{2}}\right)\right)\right) \]
      20. distribute-neg-fracN/A

        \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} + {x}^{2} \cdot \left(\mathsf{neg}\left(\frac{1}{{x}^{2}}\right)\right)\right)\right)\right) \]
      21. distribute-rgt-neg-outN/A

        \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} + \left(\mathsf{neg}\left({x}^{2} \cdot \frac{1}{{x}^{2}}\right)\right)\right)\right)\right) \]
    4. Simplified99.5%

      \[\leadsto \frac{2}{\color{blue}{x \cdot \left(-1 + x \cdot x\right)}} \]
    5. Step-by-step derivation
      1. associate-/r*N/A

        \[\leadsto \frac{\frac{2}{x}}{\color{blue}{-1 + x \cdot x}} \]
      2. +-commutativeN/A

        \[\leadsto \frac{\frac{2}{x}}{x \cdot x + \color{blue}{-1}} \]
      3. difference-of-sqr--1N/A

        \[\leadsto \frac{\frac{2}{x}}{\left(x + 1\right) \cdot \color{blue}{\left(x - 1\right)}} \]
      4. associate-/r*N/A

        \[\leadsto \frac{\frac{\frac{2}{x}}{x + 1}}{\color{blue}{x - 1}} \]
      5. associate-/r*N/A

        \[\leadsto \frac{\frac{2}{x \cdot \left(x + 1\right)}}{\color{blue}{x} - 1} \]
      6. /-lowering-/.f64N/A

        \[\leadsto \mathsf{/.f64}\left(\left(\frac{2}{x \cdot \left(x + 1\right)}\right), \color{blue}{\left(x - 1\right)}\right) \]
      7. associate-/r*N/A

        \[\leadsto \mathsf{/.f64}\left(\left(\frac{\frac{2}{x}}{x + 1}\right), \left(\color{blue}{x} - 1\right)\right) \]
      8. /-lowering-/.f64N/A

        \[\leadsto \mathsf{/.f64}\left(\mathsf{/.f64}\left(\left(\frac{2}{x}\right), \left(x + 1\right)\right), \left(\color{blue}{x} - 1\right)\right) \]
      9. /-lowering-/.f64N/A

        \[\leadsto \mathsf{/.f64}\left(\mathsf{/.f64}\left(\mathsf{/.f64}\left(2, x\right), \left(x + 1\right)\right), \left(x - 1\right)\right) \]
      10. +-lowering-+.f64N/A

        \[\leadsto \mathsf{/.f64}\left(\mathsf{/.f64}\left(\mathsf{/.f64}\left(2, x\right), \mathsf{+.f64}\left(x, 1\right)\right), \left(x - 1\right)\right) \]
      11. sub-negN/A

        \[\leadsto \mathsf{/.f64}\left(\mathsf{/.f64}\left(\mathsf{/.f64}\left(2, x\right), \mathsf{+.f64}\left(x, 1\right)\right), \left(x + \color{blue}{\left(\mathsf{neg}\left(1\right)\right)}\right)\right) \]
      12. metadata-evalN/A

        \[\leadsto \mathsf{/.f64}\left(\mathsf{/.f64}\left(\mathsf{/.f64}\left(2, x\right), \mathsf{+.f64}\left(x, 1\right)\right), \left(x + -1\right)\right) \]
      13. +-lowering-+.f6499.8%

        \[\leadsto \mathsf{/.f64}\left(\mathsf{/.f64}\left(\mathsf{/.f64}\left(2, x\right), \mathsf{+.f64}\left(x, 1\right)\right), \mathsf{+.f64}\left(x, \color{blue}{-1}\right)\right) \]
    6. Applied egg-rr99.8%

      \[\leadsto \color{blue}{\frac{\frac{\frac{2}{x}}{x + 1}}{x + -1}} \]
    7. Add Preprocessing

    Alternative 2: 99.8% accurate, 1.7× speedup?

    \[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot \frac{\frac{2}{-1 + x\_m \cdot x\_m}}{x\_m} \end{array} \]
    x\_m = (fabs.f64 x)
    x\_s = (copysign.f64 #s(literal 1 binary64) x)
    (FPCore (x_s x_m)
     :precision binary64
     (* x_s (/ (/ 2.0 (+ -1.0 (* x_m x_m))) x_m)))
    x\_m = fabs(x);
    x\_s = copysign(1.0, x);
    double code(double x_s, double x_m) {
    	return x_s * ((2.0 / (-1.0 + (x_m * x_m))) / x_m);
    }
    
    x\_m = abs(x)
    x\_s = copysign(1.0d0, x)
    real(8) function code(x_s, x_m)
        real(8), intent (in) :: x_s
        real(8), intent (in) :: x_m
        code = x_s * ((2.0d0 / ((-1.0d0) + (x_m * x_m))) / x_m)
    end function
    
    x\_m = Math.abs(x);
    x\_s = Math.copySign(1.0, x);
    public static double code(double x_s, double x_m) {
    	return x_s * ((2.0 / (-1.0 + (x_m * x_m))) / x_m);
    }
    
    x\_m = math.fabs(x)
    x\_s = math.copysign(1.0, x)
    def code(x_s, x_m):
    	return x_s * ((2.0 / (-1.0 + (x_m * x_m))) / x_m)
    
    x\_m = abs(x)
    x\_s = copysign(1.0, x)
    function code(x_s, x_m)
    	return Float64(x_s * Float64(Float64(2.0 / Float64(-1.0 + Float64(x_m * x_m))) / x_m))
    end
    
    x\_m = abs(x);
    x\_s = sign(x) * abs(1.0);
    function tmp = code(x_s, x_m)
    	tmp = x_s * ((2.0 / (-1.0 + (x_m * x_m))) / x_m);
    end
    
    x\_m = N[Abs[x], $MachinePrecision]
    x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
    code[x$95$s_, x$95$m_] := N[(x$95$s * N[(N[(2.0 / N[(-1.0 + N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x$95$m), $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    x\_m = \left|x\right|
    \\
    x\_s = \mathsf{copysign}\left(1, x\right)
    
    \\
    x\_s \cdot \frac{\frac{2}{-1 + x\_m \cdot x\_m}}{x\_m}
    \end{array}
    
    Derivation
    1. Initial program 68.2%

      \[\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. frac-subN/A

        \[\leadsto \frac{1 \cdot x - \left(x + 1\right) \cdot 2}{\left(x + 1\right) \cdot x} + \frac{\color{blue}{1}}{x - 1} \]
      2. frac-addN/A

        \[\leadsto \frac{\left(1 \cdot x - \left(x + 1\right) \cdot 2\right) \cdot \left(x - 1\right) + \left(\left(x + 1\right) \cdot x\right) \cdot 1}{\color{blue}{\left(\left(x + 1\right) \cdot x\right) \cdot \left(x - 1\right)}} \]
      3. /-lowering-/.f64N/A

        \[\leadsto \mathsf{/.f64}\left(\left(\left(1 \cdot x - \left(x + 1\right) \cdot 2\right) \cdot \left(x - 1\right) + \left(\left(x + 1\right) \cdot x\right) \cdot 1\right), \color{blue}{\left(\left(\left(x + 1\right) \cdot x\right) \cdot \left(x - 1\right)\right)}\right) \]
    4. Applied egg-rr22.1%

      \[\leadsto \color{blue}{\frac{\left(x - \left(1 + x\right) \cdot 2\right) \cdot \left(x + -1\right) + \left(x \cdot \left(1 + x\right)\right) \cdot 1}{\left(x \cdot \left(1 + x\right)\right) \cdot \left(x + -1\right)}} \]
    5. Taylor expanded in x around 0

      \[\leadsto \mathsf{/.f64}\left(\color{blue}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, \mathsf{+.f64}\left(1, x\right)\right), \mathsf{+.f64}\left(x, -1\right)\right)\right) \]
    6. Step-by-step derivation
      1. Simplified99.6%

        \[\leadsto \frac{\color{blue}{2}}{\left(x \cdot \left(1 + x\right)\right) \cdot \left(x + -1\right)} \]
      2. Taylor expanded in x around 0

        \[\leadsto \mathsf{/.f64}\left(2, \color{blue}{\left(x \cdot \left({x}^{2} - 1\right)\right)}\right) \]
      3. Step-by-step derivation
        1. distribute-lft-out--N/A

          \[\leadsto \mathsf{/.f64}\left(2, \left(x \cdot {x}^{2} - \color{blue}{x \cdot 1}\right)\right) \]
        2. unpow2N/A

          \[\leadsto \mathsf{/.f64}\left(2, \left(x \cdot \left(x \cdot x\right) - x \cdot 1\right)\right) \]
        3. cube-multN/A

          \[\leadsto \mathsf{/.f64}\left(2, \left({x}^{3} - \color{blue}{x} \cdot 1\right)\right) \]
        4. *-rgt-identityN/A

          \[\leadsto \mathsf{/.f64}\left(2, \left({x}^{3} \cdot 1 - \color{blue}{x} \cdot 1\right)\right) \]
        5. rgt-mult-inverseN/A

          \[\leadsto \mathsf{/.f64}\left(2, \left({x}^{3} \cdot 1 - x \cdot \left({x}^{2} \cdot \color{blue}{\frac{1}{{x}^{2}}}\right)\right)\right) \]
        6. associate-*l*N/A

          \[\leadsto \mathsf{/.f64}\left(2, \left({x}^{3} \cdot 1 - \left(x \cdot {x}^{2}\right) \cdot \color{blue}{\frac{1}{{x}^{2}}}\right)\right) \]
        7. unpow2N/A

          \[\leadsto \mathsf{/.f64}\left(2, \left({x}^{3} \cdot 1 - \left(x \cdot \left(x \cdot x\right)\right) \cdot \frac{1}{{x}^{2}}\right)\right) \]
        8. cube-multN/A

          \[\leadsto \mathsf{/.f64}\left(2, \left({x}^{3} \cdot 1 - {x}^{3} \cdot \frac{\color{blue}{1}}{{x}^{2}}\right)\right) \]
        9. distribute-lft-out--N/A

          \[\leadsto \mathsf{/.f64}\left(2, \left({x}^{3} \cdot \color{blue}{\left(1 - \frac{1}{{x}^{2}}\right)}\right)\right) \]
        10. cube-multN/A

          \[\leadsto \mathsf{/.f64}\left(2, \left(\left(x \cdot \left(x \cdot x\right)\right) \cdot \left(\color{blue}{1} - \frac{1}{{x}^{2}}\right)\right)\right) \]
        11. unpow2N/A

          \[\leadsto \mathsf{/.f64}\left(2, \left(\left(x \cdot {x}^{2}\right) \cdot \left(1 - \frac{1}{{x}^{2}}\right)\right)\right) \]
        12. associate-*l*N/A

          \[\leadsto \mathsf{/.f64}\left(2, \left(x \cdot \color{blue}{\left({x}^{2} \cdot \left(1 - \frac{1}{{x}^{2}}\right)\right)}\right)\right) \]
        13. *-lowering-*.f64N/A

          \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \color{blue}{\left({x}^{2} \cdot \left(1 - \frac{1}{{x}^{2}}\right)\right)}\right)\right) \]
        14. sub-negN/A

          \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} \cdot \left(1 + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{{x}^{2}}\right)\right)}\right)\right)\right)\right) \]
        15. distribute-neg-fracN/A

          \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} \cdot \left(1 + \frac{\mathsf{neg}\left(1\right)}{\color{blue}{{x}^{2}}}\right)\right)\right)\right) \]
        16. metadata-evalN/A

          \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} \cdot \left(1 + \frac{-1}{{\color{blue}{x}}^{2}}\right)\right)\right)\right) \]
        17. distribute-lft-inN/A

          \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} \cdot 1 + \color{blue}{{x}^{2} \cdot \frac{-1}{{x}^{2}}}\right)\right)\right) \]
        18. *-rgt-identityN/A

          \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} + \color{blue}{{x}^{2}} \cdot \frac{-1}{{x}^{2}}\right)\right)\right) \]
        19. metadata-evalN/A

          \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} + {x}^{2} \cdot \frac{\mathsf{neg}\left(1\right)}{{\color{blue}{x}}^{2}}\right)\right)\right) \]
        20. distribute-neg-fracN/A

          \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} + {x}^{2} \cdot \left(\mathsf{neg}\left(\frac{1}{{x}^{2}}\right)\right)\right)\right)\right) \]
        21. distribute-rgt-neg-outN/A

          \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} + \left(\mathsf{neg}\left({x}^{2} \cdot \frac{1}{{x}^{2}}\right)\right)\right)\right)\right) \]
      4. Simplified99.5%

        \[\leadsto \frac{2}{\color{blue}{x \cdot \left(-1 + x \cdot x\right)}} \]
      5. Step-by-step derivation
        1. +-commutativeN/A

          \[\leadsto \frac{2}{x \cdot \left(x \cdot x + \color{blue}{-1}\right)} \]
        2. *-commutativeN/A

          \[\leadsto \frac{2}{\left(x \cdot x + -1\right) \cdot \color{blue}{x}} \]
        3. associate-/r*N/A

          \[\leadsto \frac{\frac{2}{x \cdot x + -1}}{\color{blue}{x}} \]
        4. /-lowering-/.f64N/A

          \[\leadsto \mathsf{/.f64}\left(\left(\frac{2}{x \cdot x + -1}\right), \color{blue}{x}\right) \]
        5. /-lowering-/.f64N/A

          \[\leadsto \mathsf{/.f64}\left(\mathsf{/.f64}\left(2, \left(x \cdot x + -1\right)\right), x\right) \]
        6. +-commutativeN/A

          \[\leadsto \mathsf{/.f64}\left(\mathsf{/.f64}\left(2, \left(-1 + x \cdot x\right)\right), x\right) \]
        7. +-lowering-+.f64N/A

          \[\leadsto \mathsf{/.f64}\left(\mathsf{/.f64}\left(2, \mathsf{+.f64}\left(-1, \left(x \cdot x\right)\right)\right), x\right) \]
        8. *-lowering-*.f6499.8%

          \[\leadsto \mathsf{/.f64}\left(\mathsf{/.f64}\left(2, \mathsf{+.f64}\left(-1, \mathsf{*.f64}\left(x, x\right)\right)\right), x\right) \]
      6. Applied egg-rr99.8%

        \[\leadsto \color{blue}{\frac{\frac{2}{-1 + x \cdot x}}{x}} \]
      7. Add Preprocessing

      Alternative 3: 99.1% accurate, 1.7× speedup?

      \[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot \frac{2}{x\_m \cdot \left(-1 + x\_m \cdot x\_m\right)} \end{array} \]
      x\_m = (fabs.f64 x)
      x\_s = (copysign.f64 #s(literal 1 binary64) x)
      (FPCore (x_s x_m)
       :precision binary64
       (* x_s (/ 2.0 (* x_m (+ -1.0 (* x_m x_m))))))
      x\_m = fabs(x);
      x\_s = copysign(1.0, x);
      double code(double x_s, double x_m) {
      	return x_s * (2.0 / (x_m * (-1.0 + (x_m * x_m))));
      }
      
      x\_m = abs(x)
      x\_s = copysign(1.0d0, x)
      real(8) function code(x_s, x_m)
          real(8), intent (in) :: x_s
          real(8), intent (in) :: x_m
          code = x_s * (2.0d0 / (x_m * ((-1.0d0) + (x_m * x_m))))
      end function
      
      x\_m = Math.abs(x);
      x\_s = Math.copySign(1.0, x);
      public static double code(double x_s, double x_m) {
      	return x_s * (2.0 / (x_m * (-1.0 + (x_m * x_m))));
      }
      
      x\_m = math.fabs(x)
      x\_s = math.copysign(1.0, x)
      def code(x_s, x_m):
      	return x_s * (2.0 / (x_m * (-1.0 + (x_m * x_m))))
      
      x\_m = abs(x)
      x\_s = copysign(1.0, x)
      function code(x_s, x_m)
      	return Float64(x_s * Float64(2.0 / Float64(x_m * Float64(-1.0 + Float64(x_m * x_m)))))
      end
      
      x\_m = abs(x);
      x\_s = sign(x) * abs(1.0);
      function tmp = code(x_s, x_m)
      	tmp = x_s * (2.0 / (x_m * (-1.0 + (x_m * x_m))));
      end
      
      x\_m = N[Abs[x], $MachinePrecision]
      x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
      code[x$95$s_, x$95$m_] := N[(x$95$s * N[(2.0 / N[(x$95$m * N[(-1.0 + N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
      
      \begin{array}{l}
      x\_m = \left|x\right|
      \\
      x\_s = \mathsf{copysign}\left(1, x\right)
      
      \\
      x\_s \cdot \frac{2}{x\_m \cdot \left(-1 + x\_m \cdot x\_m\right)}
      \end{array}
      
      Derivation
      1. Initial program 68.2%

        \[\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
      2. Add Preprocessing
      3. Step-by-step derivation
        1. frac-subN/A

          \[\leadsto \frac{1 \cdot x - \left(x + 1\right) \cdot 2}{\left(x + 1\right) \cdot x} + \frac{\color{blue}{1}}{x - 1} \]
        2. frac-addN/A

          \[\leadsto \frac{\left(1 \cdot x - \left(x + 1\right) \cdot 2\right) \cdot \left(x - 1\right) + \left(\left(x + 1\right) \cdot x\right) \cdot 1}{\color{blue}{\left(\left(x + 1\right) \cdot x\right) \cdot \left(x - 1\right)}} \]
        3. /-lowering-/.f64N/A

          \[\leadsto \mathsf{/.f64}\left(\left(\left(1 \cdot x - \left(x + 1\right) \cdot 2\right) \cdot \left(x - 1\right) + \left(\left(x + 1\right) \cdot x\right) \cdot 1\right), \color{blue}{\left(\left(\left(x + 1\right) \cdot x\right) \cdot \left(x - 1\right)\right)}\right) \]
      4. Applied egg-rr22.1%

        \[\leadsto \color{blue}{\frac{\left(x - \left(1 + x\right) \cdot 2\right) \cdot \left(x + -1\right) + \left(x \cdot \left(1 + x\right)\right) \cdot 1}{\left(x \cdot \left(1 + x\right)\right) \cdot \left(x + -1\right)}} \]
      5. Taylor expanded in x around 0

        \[\leadsto \mathsf{/.f64}\left(\color{blue}{2}, \mathsf{*.f64}\left(\mathsf{*.f64}\left(x, \mathsf{+.f64}\left(1, x\right)\right), \mathsf{+.f64}\left(x, -1\right)\right)\right) \]
      6. Step-by-step derivation
        1. Simplified99.6%

          \[\leadsto \frac{\color{blue}{2}}{\left(x \cdot \left(1 + x\right)\right) \cdot \left(x + -1\right)} \]
        2. Taylor expanded in x around 0

          \[\leadsto \mathsf{/.f64}\left(2, \color{blue}{\left(x \cdot \left({x}^{2} - 1\right)\right)}\right) \]
        3. Step-by-step derivation
          1. distribute-lft-out--N/A

            \[\leadsto \mathsf{/.f64}\left(2, \left(x \cdot {x}^{2} - \color{blue}{x \cdot 1}\right)\right) \]
          2. unpow2N/A

            \[\leadsto \mathsf{/.f64}\left(2, \left(x \cdot \left(x \cdot x\right) - x \cdot 1\right)\right) \]
          3. cube-multN/A

            \[\leadsto \mathsf{/.f64}\left(2, \left({x}^{3} - \color{blue}{x} \cdot 1\right)\right) \]
          4. *-rgt-identityN/A

            \[\leadsto \mathsf{/.f64}\left(2, \left({x}^{3} \cdot 1 - \color{blue}{x} \cdot 1\right)\right) \]
          5. rgt-mult-inverseN/A

            \[\leadsto \mathsf{/.f64}\left(2, \left({x}^{3} \cdot 1 - x \cdot \left({x}^{2} \cdot \color{blue}{\frac{1}{{x}^{2}}}\right)\right)\right) \]
          6. associate-*l*N/A

            \[\leadsto \mathsf{/.f64}\left(2, \left({x}^{3} \cdot 1 - \left(x \cdot {x}^{2}\right) \cdot \color{blue}{\frac{1}{{x}^{2}}}\right)\right) \]
          7. unpow2N/A

            \[\leadsto \mathsf{/.f64}\left(2, \left({x}^{3} \cdot 1 - \left(x \cdot \left(x \cdot x\right)\right) \cdot \frac{1}{{x}^{2}}\right)\right) \]
          8. cube-multN/A

            \[\leadsto \mathsf{/.f64}\left(2, \left({x}^{3} \cdot 1 - {x}^{3} \cdot \frac{\color{blue}{1}}{{x}^{2}}\right)\right) \]
          9. distribute-lft-out--N/A

            \[\leadsto \mathsf{/.f64}\left(2, \left({x}^{3} \cdot \color{blue}{\left(1 - \frac{1}{{x}^{2}}\right)}\right)\right) \]
          10. cube-multN/A

            \[\leadsto \mathsf{/.f64}\left(2, \left(\left(x \cdot \left(x \cdot x\right)\right) \cdot \left(\color{blue}{1} - \frac{1}{{x}^{2}}\right)\right)\right) \]
          11. unpow2N/A

            \[\leadsto \mathsf{/.f64}\left(2, \left(\left(x \cdot {x}^{2}\right) \cdot \left(1 - \frac{1}{{x}^{2}}\right)\right)\right) \]
          12. associate-*l*N/A

            \[\leadsto \mathsf{/.f64}\left(2, \left(x \cdot \color{blue}{\left({x}^{2} \cdot \left(1 - \frac{1}{{x}^{2}}\right)\right)}\right)\right) \]
          13. *-lowering-*.f64N/A

            \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \color{blue}{\left({x}^{2} \cdot \left(1 - \frac{1}{{x}^{2}}\right)\right)}\right)\right) \]
          14. sub-negN/A

            \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} \cdot \left(1 + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{{x}^{2}}\right)\right)}\right)\right)\right)\right) \]
          15. distribute-neg-fracN/A

            \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} \cdot \left(1 + \frac{\mathsf{neg}\left(1\right)}{\color{blue}{{x}^{2}}}\right)\right)\right)\right) \]
          16. metadata-evalN/A

            \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} \cdot \left(1 + \frac{-1}{{\color{blue}{x}}^{2}}\right)\right)\right)\right) \]
          17. distribute-lft-inN/A

            \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} \cdot 1 + \color{blue}{{x}^{2} \cdot \frac{-1}{{x}^{2}}}\right)\right)\right) \]
          18. *-rgt-identityN/A

            \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} + \color{blue}{{x}^{2}} \cdot \frac{-1}{{x}^{2}}\right)\right)\right) \]
          19. metadata-evalN/A

            \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} + {x}^{2} \cdot \frac{\mathsf{neg}\left(1\right)}{{\color{blue}{x}}^{2}}\right)\right)\right) \]
          20. distribute-neg-fracN/A

            \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} + {x}^{2} \cdot \left(\mathsf{neg}\left(\frac{1}{{x}^{2}}\right)\right)\right)\right)\right) \]
          21. distribute-rgt-neg-outN/A

            \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left({x}^{2} + \left(\mathsf{neg}\left({x}^{2} \cdot \frac{1}{{x}^{2}}\right)\right)\right)\right)\right) \]
        4. Simplified99.5%

          \[\leadsto \frac{2}{\color{blue}{x \cdot \left(-1 + x \cdot x\right)}} \]
        5. Add Preprocessing

        Alternative 4: 98.9% accurate, 2.1× speedup?

        \[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot \frac{\frac{2}{x\_m}}{x\_m \cdot x\_m} \end{array} \]
        x\_m = (fabs.f64 x)
        x\_s = (copysign.f64 #s(literal 1 binary64) x)
        (FPCore (x_s x_m) :precision binary64 (* x_s (/ (/ 2.0 x_m) (* x_m x_m))))
        x\_m = fabs(x);
        x\_s = copysign(1.0, x);
        double code(double x_s, double x_m) {
        	return x_s * ((2.0 / x_m) / (x_m * x_m));
        }
        
        x\_m = abs(x)
        x\_s = copysign(1.0d0, x)
        real(8) function code(x_s, x_m)
            real(8), intent (in) :: x_s
            real(8), intent (in) :: x_m
            code = x_s * ((2.0d0 / x_m) / (x_m * x_m))
        end function
        
        x\_m = Math.abs(x);
        x\_s = Math.copySign(1.0, x);
        public static double code(double x_s, double x_m) {
        	return x_s * ((2.0 / x_m) / (x_m * x_m));
        }
        
        x\_m = math.fabs(x)
        x\_s = math.copysign(1.0, x)
        def code(x_s, x_m):
        	return x_s * ((2.0 / x_m) / (x_m * x_m))
        
        x\_m = abs(x)
        x\_s = copysign(1.0, x)
        function code(x_s, x_m)
        	return Float64(x_s * Float64(Float64(2.0 / x_m) / Float64(x_m * x_m)))
        end
        
        x\_m = abs(x);
        x\_s = sign(x) * abs(1.0);
        function tmp = code(x_s, x_m)
        	tmp = x_s * ((2.0 / x_m) / (x_m * x_m));
        end
        
        x\_m = N[Abs[x], $MachinePrecision]
        x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
        code[x$95$s_, x$95$m_] := N[(x$95$s * N[(N[(2.0 / x$95$m), $MachinePrecision] / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
        
        \begin{array}{l}
        x\_m = \left|x\right|
        \\
        x\_s = \mathsf{copysign}\left(1, x\right)
        
        \\
        x\_s \cdot \frac{\frac{2}{x\_m}}{x\_m \cdot x\_m}
        \end{array}
        
        Derivation
        1. Initial program 68.2%

          \[\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
        2. Add Preprocessing
        3. Taylor expanded in x around -inf

          \[\leadsto \color{blue}{-1 \cdot \frac{-1 \cdot \frac{2 + 2 \cdot \frac{1}{{x}^{2}}}{{x}^{2}} - 2}{{x}^{3}}} \]
        4. Step-by-step derivation
          1. associate-*r/N/A

            \[\leadsto \frac{-1 \cdot \left(-1 \cdot \frac{2 + 2 \cdot \frac{1}{{x}^{2}}}{{x}^{2}} - 2\right)}{\color{blue}{{x}^{3}}} \]
          2. cube-multN/A

            \[\leadsto \frac{-1 \cdot \left(-1 \cdot \frac{2 + 2 \cdot \frac{1}{{x}^{2}}}{{x}^{2}} - 2\right)}{x \cdot \color{blue}{\left(x \cdot x\right)}} \]
          3. unpow2N/A

            \[\leadsto \frac{-1 \cdot \left(-1 \cdot \frac{2 + 2 \cdot \frac{1}{{x}^{2}}}{{x}^{2}} - 2\right)}{x \cdot {x}^{\color{blue}{2}}} \]
          4. associate-/r*N/A

            \[\leadsto \frac{\frac{-1 \cdot \left(-1 \cdot \frac{2 + 2 \cdot \frac{1}{{x}^{2}}}{{x}^{2}} - 2\right)}{x}}{\color{blue}{{x}^{2}}} \]
          5. /-lowering-/.f64N/A

            \[\leadsto \mathsf{/.f64}\left(\left(\frac{-1 \cdot \left(-1 \cdot \frac{2 + 2 \cdot \frac{1}{{x}^{2}}}{{x}^{2}} - 2\right)}{x}\right), \color{blue}{\left({x}^{2}\right)}\right) \]
        5. Simplified99.6%

          \[\leadsto \color{blue}{\frac{\frac{2 - \frac{-2 + \frac{-2}{x \cdot x}}{x \cdot x}}{x}}{x \cdot x}} \]
        6. Taylor expanded in x around inf

          \[\leadsto \mathsf{/.f64}\left(\color{blue}{\left(\frac{2}{x}\right)}, \mathsf{*.f64}\left(x, x\right)\right) \]
        7. Step-by-step derivation
          1. /-lowering-/.f6499.0%

            \[\leadsto \mathsf{/.f64}\left(\mathsf{/.f64}\left(2, x\right), \mathsf{*.f64}\left(\color{blue}{x}, x\right)\right) \]
        8. Simplified99.0%

          \[\leadsto \frac{\color{blue}{\frac{2}{x}}}{x \cdot x} \]
        9. Add Preprocessing

        Alternative 5: 98.1% accurate, 2.1× speedup?

        \[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot \frac{2}{x\_m \cdot \left(x\_m \cdot x\_m\right)} \end{array} \]
        x\_m = (fabs.f64 x)
        x\_s = (copysign.f64 #s(literal 1 binary64) x)
        (FPCore (x_s x_m) :precision binary64 (* x_s (/ 2.0 (* x_m (* x_m x_m)))))
        x\_m = fabs(x);
        x\_s = copysign(1.0, x);
        double code(double x_s, double x_m) {
        	return x_s * (2.0 / (x_m * (x_m * x_m)));
        }
        
        x\_m = abs(x)
        x\_s = copysign(1.0d0, x)
        real(8) function code(x_s, x_m)
            real(8), intent (in) :: x_s
            real(8), intent (in) :: x_m
            code = x_s * (2.0d0 / (x_m * (x_m * x_m)))
        end function
        
        x\_m = Math.abs(x);
        x\_s = Math.copySign(1.0, x);
        public static double code(double x_s, double x_m) {
        	return x_s * (2.0 / (x_m * (x_m * x_m)));
        }
        
        x\_m = math.fabs(x)
        x\_s = math.copysign(1.0, x)
        def code(x_s, x_m):
        	return x_s * (2.0 / (x_m * (x_m * x_m)))
        
        x\_m = abs(x)
        x\_s = copysign(1.0, x)
        function code(x_s, x_m)
        	return Float64(x_s * Float64(2.0 / Float64(x_m * Float64(x_m * x_m))))
        end
        
        x\_m = abs(x);
        x\_s = sign(x) * abs(1.0);
        function tmp = code(x_s, x_m)
        	tmp = x_s * (2.0 / (x_m * (x_m * x_m)));
        end
        
        x\_m = N[Abs[x], $MachinePrecision]
        x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
        code[x$95$s_, x$95$m_] := N[(x$95$s * N[(2.0 / N[(x$95$m * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
        
        \begin{array}{l}
        x\_m = \left|x\right|
        \\
        x\_s = \mathsf{copysign}\left(1, x\right)
        
        \\
        x\_s \cdot \frac{2}{x\_m \cdot \left(x\_m \cdot x\_m\right)}
        \end{array}
        
        Derivation
        1. Initial program 68.2%

          \[\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
        2. Add Preprocessing
        3. Taylor expanded in x around inf

          \[\leadsto \color{blue}{\frac{2}{{x}^{3}}} \]
        4. Step-by-step derivation
          1. /-lowering-/.f64N/A

            \[\leadsto \mathsf{/.f64}\left(2, \color{blue}{\left({x}^{3}\right)}\right) \]
          2. cube-multN/A

            \[\leadsto \mathsf{/.f64}\left(2, \left(x \cdot \color{blue}{\left(x \cdot x\right)}\right)\right) \]
          3. unpow2N/A

            \[\leadsto \mathsf{/.f64}\left(2, \left(x \cdot {x}^{\color{blue}{2}}\right)\right) \]
          4. *-lowering-*.f64N/A

            \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \color{blue}{\left({x}^{2}\right)}\right)\right) \]
          5. unpow2N/A

            \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \left(x \cdot \color{blue}{x}\right)\right)\right) \]
          6. *-lowering-*.f6498.7%

            \[\leadsto \mathsf{/.f64}\left(2, \mathsf{*.f64}\left(x, \mathsf{*.f64}\left(x, \color{blue}{x}\right)\right)\right) \]
        5. Simplified98.7%

          \[\leadsto \color{blue}{\frac{2}{x \cdot \left(x \cdot x\right)}} \]
        6. Add Preprocessing

        Alternative 6: 5.0% accurate, 5.0× speedup?

        \[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot \frac{-2}{x\_m} \end{array} \]
        x\_m = (fabs.f64 x)
        x\_s = (copysign.f64 #s(literal 1 binary64) x)
        (FPCore (x_s x_m) :precision binary64 (* x_s (/ -2.0 x_m)))
        x\_m = fabs(x);
        x\_s = copysign(1.0, x);
        double code(double x_s, double x_m) {
        	return x_s * (-2.0 / x_m);
        }
        
        x\_m = abs(x)
        x\_s = copysign(1.0d0, x)
        real(8) function code(x_s, x_m)
            real(8), intent (in) :: x_s
            real(8), intent (in) :: x_m
            code = x_s * ((-2.0d0) / x_m)
        end function
        
        x\_m = Math.abs(x);
        x\_s = Math.copySign(1.0, x);
        public static double code(double x_s, double x_m) {
        	return x_s * (-2.0 / x_m);
        }
        
        x\_m = math.fabs(x)
        x\_s = math.copysign(1.0, x)
        def code(x_s, x_m):
        	return x_s * (-2.0 / x_m)
        
        x\_m = abs(x)
        x\_s = copysign(1.0, x)
        function code(x_s, x_m)
        	return Float64(x_s * Float64(-2.0 / x_m))
        end
        
        x\_m = abs(x);
        x\_s = sign(x) * abs(1.0);
        function tmp = code(x_s, x_m)
        	tmp = x_s * (-2.0 / x_m);
        end
        
        x\_m = N[Abs[x], $MachinePrecision]
        x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
        code[x$95$s_, x$95$m_] := N[(x$95$s * N[(-2.0 / x$95$m), $MachinePrecision]), $MachinePrecision]
        
        \begin{array}{l}
        x\_m = \left|x\right|
        \\
        x\_s = \mathsf{copysign}\left(1, x\right)
        
        \\
        x\_s \cdot \frac{-2}{x\_m}
        \end{array}
        
        Derivation
        1. Initial program 68.2%

          \[\left(\frac{1}{x + 1} - \frac{2}{x}\right) + \frac{1}{x - 1} \]
        2. Add Preprocessing
        3. Taylor expanded in x around 0

          \[\leadsto \color{blue}{\frac{-2}{x}} \]
        4. Step-by-step derivation
          1. /-lowering-/.f644.9%

            \[\leadsto \mathsf{/.f64}\left(-2, \color{blue}{x}\right) \]
        5. Simplified4.9%

          \[\leadsto \color{blue}{\frac{-2}{x}} \]
        6. Add Preprocessing

        Developer Target 1: 99.1% accurate, 1.7× speedup?

        \[\begin{array}{l} \\ \frac{2}{x \cdot \left(x \cdot x - 1\right)} \end{array} \]
        (FPCore (x) :precision binary64 (/ 2.0 (* x (- (* x x) 1.0))))
        double code(double x) {
        	return 2.0 / (x * ((x * x) - 1.0));
        }
        
        real(8) function code(x)
            real(8), intent (in) :: x
            code = 2.0d0 / (x * ((x * x) - 1.0d0))
        end function
        
        public static double code(double x) {
        	return 2.0 / (x * ((x * x) - 1.0));
        }
        
        def code(x):
        	return 2.0 / (x * ((x * x) - 1.0))
        
        function code(x)
        	return Float64(2.0 / Float64(x * Float64(Float64(x * x) - 1.0)))
        end
        
        function tmp = code(x)
        	tmp = 2.0 / (x * ((x * x) - 1.0));
        end
        
        code[x_] := N[(2.0 / N[(x * N[(N[(x * x), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
        
        \begin{array}{l}
        
        \\
        \frac{2}{x \cdot \left(x \cdot x - 1\right)}
        \end{array}
        

        Reproduce

        ?
        herbie shell --seed 2024191 
        (FPCore (x)
          :name "3frac (problem 3.3.3)"
          :precision binary64
          :pre (> (fabs x) 1.0)
        
          :alt
          (! :herbie-platform default (/ 2 (* x (- (* x x) 1))))
        
          (+ (- (/ 1.0 (+ x 1.0)) (/ 2.0 x)) (/ 1.0 (- x 1.0))))