Kahan p13 Example 2

Percentage Accurate: 100.0% → 100.0%
Time: 9.9s
Alternatives: 8
Speedup: 1.5×

Specification

?
\[\begin{array}{l} \\ \begin{array}{l} t_1 := 2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\\ t_2 := t\_1 \cdot t\_1\\ \frac{1 + t\_2}{2 + t\_2} \end{array} \end{array} \]
(FPCore (t)
 :precision binary64
 (let* ((t_1 (- 2.0 (/ (/ 2.0 t) (+ 1.0 (/ 1.0 t))))) (t_2 (* t_1 t_1)))
   (/ (+ 1.0 t_2) (+ 2.0 t_2))))
double code(double t) {
	double t_1 = 2.0 - ((2.0 / t) / (1.0 + (1.0 / t)));
	double t_2 = t_1 * t_1;
	return (1.0 + t_2) / (2.0 + t_2);
}
real(8) function code(t)
    real(8), intent (in) :: t
    real(8) :: t_1
    real(8) :: t_2
    t_1 = 2.0d0 - ((2.0d0 / t) / (1.0d0 + (1.0d0 / t)))
    t_2 = t_1 * t_1
    code = (1.0d0 + t_2) / (2.0d0 + t_2)
end function
public static double code(double t) {
	double t_1 = 2.0 - ((2.0 / t) / (1.0 + (1.0 / t)));
	double t_2 = t_1 * t_1;
	return (1.0 + t_2) / (2.0 + t_2);
}
def code(t):
	t_1 = 2.0 - ((2.0 / t) / (1.0 + (1.0 / t)))
	t_2 = t_1 * t_1
	return (1.0 + t_2) / (2.0 + t_2)
function code(t)
	t_1 = Float64(2.0 - Float64(Float64(2.0 / t) / Float64(1.0 + Float64(1.0 / t))))
	t_2 = Float64(t_1 * t_1)
	return Float64(Float64(1.0 + t_2) / Float64(2.0 + t_2))
end
function tmp = code(t)
	t_1 = 2.0 - ((2.0 / t) / (1.0 + (1.0 / t)));
	t_2 = t_1 * t_1;
	tmp = (1.0 + t_2) / (2.0 + t_2);
end
code[t_] := Block[{t$95$1 = N[(2.0 - N[(N[(2.0 / t), $MachinePrecision] / N[(1.0 + N[(1.0 / t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$1 * t$95$1), $MachinePrecision]}, N[(N[(1.0 + t$95$2), $MachinePrecision] / N[(2.0 + t$95$2), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := 2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\\
t_2 := t\_1 \cdot t\_1\\
\frac{1 + t\_2}{2 + t\_2}
\end{array}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 8 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 100.0% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := 2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\\ t_2 := t\_1 \cdot t\_1\\ \frac{1 + t\_2}{2 + t\_2} \end{array} \end{array} \]
(FPCore (t)
 :precision binary64
 (let* ((t_1 (- 2.0 (/ (/ 2.0 t) (+ 1.0 (/ 1.0 t))))) (t_2 (* t_1 t_1)))
   (/ (+ 1.0 t_2) (+ 2.0 t_2))))
double code(double t) {
	double t_1 = 2.0 - ((2.0 / t) / (1.0 + (1.0 / t)));
	double t_2 = t_1 * t_1;
	return (1.0 + t_2) / (2.0 + t_2);
}
real(8) function code(t)
    real(8), intent (in) :: t
    real(8) :: t_1
    real(8) :: t_2
    t_1 = 2.0d0 - ((2.0d0 / t) / (1.0d0 + (1.0d0 / t)))
    t_2 = t_1 * t_1
    code = (1.0d0 + t_2) / (2.0d0 + t_2)
end function
public static double code(double t) {
	double t_1 = 2.0 - ((2.0 / t) / (1.0 + (1.0 / t)));
	double t_2 = t_1 * t_1;
	return (1.0 + t_2) / (2.0 + t_2);
}
def code(t):
	t_1 = 2.0 - ((2.0 / t) / (1.0 + (1.0 / t)))
	t_2 = t_1 * t_1
	return (1.0 + t_2) / (2.0 + t_2)
function code(t)
	t_1 = Float64(2.0 - Float64(Float64(2.0 / t) / Float64(1.0 + Float64(1.0 / t))))
	t_2 = Float64(t_1 * t_1)
	return Float64(Float64(1.0 + t_2) / Float64(2.0 + t_2))
end
function tmp = code(t)
	t_1 = 2.0 - ((2.0 / t) / (1.0 + (1.0 / t)));
	t_2 = t_1 * t_1;
	tmp = (1.0 + t_2) / (2.0 + t_2);
end
code[t_] := Block[{t$95$1 = N[(2.0 - N[(N[(2.0 / t), $MachinePrecision] / N[(1.0 + N[(1.0 / t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$1 * t$95$1), $MachinePrecision]}, N[(N[(1.0 + t$95$2), $MachinePrecision] / N[(2.0 + t$95$2), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := 2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\\
t_2 := t\_1 \cdot t\_1\\
\frac{1 + t\_2}{2 + t\_2}
\end{array}
\end{array}

Alternative 1: 100.0% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := 2 + \frac{2}{-1 - t}\\ t_2 := t\_1 \cdot t\_1\\ \frac{1 + t\_2}{2 + t\_2} \end{array} \end{array} \]
(FPCore (t)
 :precision binary64
 (let* ((t_1 (+ 2.0 (/ 2.0 (- -1.0 t)))) (t_2 (* t_1 t_1)))
   (/ (+ 1.0 t_2) (+ 2.0 t_2))))
double code(double t) {
	double t_1 = 2.0 + (2.0 / (-1.0 - t));
	double t_2 = t_1 * t_1;
	return (1.0 + t_2) / (2.0 + t_2);
}
real(8) function code(t)
    real(8), intent (in) :: t
    real(8) :: t_1
    real(8) :: t_2
    t_1 = 2.0d0 + (2.0d0 / ((-1.0d0) - t))
    t_2 = t_1 * t_1
    code = (1.0d0 + t_2) / (2.0d0 + t_2)
end function
public static double code(double t) {
	double t_1 = 2.0 + (2.0 / (-1.0 - t));
	double t_2 = t_1 * t_1;
	return (1.0 + t_2) / (2.0 + t_2);
}
def code(t):
	t_1 = 2.0 + (2.0 / (-1.0 - t))
	t_2 = t_1 * t_1
	return (1.0 + t_2) / (2.0 + t_2)
function code(t)
	t_1 = Float64(2.0 + Float64(2.0 / Float64(-1.0 - t)))
	t_2 = Float64(t_1 * t_1)
	return Float64(Float64(1.0 + t_2) / Float64(2.0 + t_2))
end
function tmp = code(t)
	t_1 = 2.0 + (2.0 / (-1.0 - t));
	t_2 = t_1 * t_1;
	tmp = (1.0 + t_2) / (2.0 + t_2);
end
code[t_] := Block[{t$95$1 = N[(2.0 + N[(2.0 / N[(-1.0 - t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$1 * t$95$1), $MachinePrecision]}, N[(N[(1.0 + t$95$2), $MachinePrecision] / N[(2.0 + t$95$2), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := 2 + \frac{2}{-1 - t}\\
t_2 := t\_1 \cdot t\_1\\
\frac{1 + t\_2}{2 + t\_2}
\end{array}
\end{array}
Derivation
  1. Initial program 100.0%

    \[\frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. *-un-lft-identity100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{1 \cdot \frac{\frac{2}{t}}{1 + \frac{1}{t}}}\right)} \]
  4. Applied egg-rr100.0%

    \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{1 \cdot \frac{\frac{2}{t}}{1 + \frac{1}{t}}}\right)} \]
  5. Step-by-step derivation
    1. *-lft-identity100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{\frac{2}{t}}{1 + \frac{1}{t}}}\right)} \]
    2. associate-/r*100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{2}{t \cdot \left(1 + \frac{1}{t}\right)}}\right)} \]
    3. distribute-lft-in100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t \cdot 1 + t \cdot \frac{1}{t}}}\right)} \]
    4. *-rgt-identity100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t} + t \cdot \frac{1}{t}}\right)} \]
    5. rgt-mult-inverse100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + \color{blue}{1}}\right)} \]
  6. Simplified100.0%

    \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{2}{t + 1}}\right)} \]
  7. Step-by-step derivation
    1. *-un-lft-identity100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{1 \cdot \frac{\frac{2}{t}}{1 + \frac{1}{t}}}\right)} \]
  8. Applied egg-rr100.0%

    \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \color{blue}{1 \cdot \frac{\frac{2}{t}}{1 + \frac{1}{t}}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
  9. Step-by-step derivation
    1. *-lft-identity100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{\frac{2}{t}}{1 + \frac{1}{t}}}\right)} \]
    2. associate-/r*100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{2}{t \cdot \left(1 + \frac{1}{t}\right)}}\right)} \]
    3. distribute-lft-in100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t \cdot 1 + t \cdot \frac{1}{t}}}\right)} \]
    4. *-rgt-identity100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t} + t \cdot \frac{1}{t}}\right)} \]
    5. rgt-mult-inverse100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + \color{blue}{1}}\right)} \]
  10. Simplified100.0%

    \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \color{blue}{\frac{2}{t + 1}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
  11. Step-by-step derivation
    1. *-un-lft-identity100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{1 \cdot \frac{\frac{2}{t}}{1 + \frac{1}{t}}}\right)} \]
  12. Applied egg-rr100.0%

    \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{1 \cdot \frac{\frac{2}{t}}{1 + \frac{1}{t}}}\right)}{2 + \left(2 - \frac{2}{t + 1}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
  13. Step-by-step derivation
    1. *-lft-identity100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{\frac{2}{t}}{1 + \frac{1}{t}}}\right)} \]
    2. associate-/r*100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{2}{t \cdot \left(1 + \frac{1}{t}\right)}}\right)} \]
    3. distribute-lft-in100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t \cdot 1 + t \cdot \frac{1}{t}}}\right)} \]
    4. *-rgt-identity100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t} + t \cdot \frac{1}{t}}\right)} \]
    5. rgt-mult-inverse100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + \color{blue}{1}}\right)} \]
  14. Simplified100.0%

    \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{2}{t + 1}}\right)}{2 + \left(2 - \frac{2}{t + 1}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
  15. Step-by-step derivation
    1. *-un-lft-identity100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{1 \cdot \frac{\frac{2}{t}}{1 + \frac{1}{t}}}\right)} \]
  16. Applied egg-rr100.0%

    \[\leadsto \frac{1 + \left(2 - \color{blue}{1 \cdot \frac{\frac{2}{t}}{1 + \frac{1}{t}}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)}{2 + \left(2 - \frac{2}{t + 1}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
  17. Step-by-step derivation
    1. *-lft-identity100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{\frac{2}{t}}{1 + \frac{1}{t}}}\right)} \]
    2. associate-/r*100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{2}{t \cdot \left(1 + \frac{1}{t}\right)}}\right)} \]
    3. distribute-lft-in100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t \cdot 1 + t \cdot \frac{1}{t}}}\right)} \]
    4. *-rgt-identity100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t} + t \cdot \frac{1}{t}}\right)} \]
    5. rgt-mult-inverse100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + \color{blue}{1}}\right)} \]
  18. Simplified100.0%

    \[\leadsto \frac{1 + \left(2 - \color{blue}{\frac{2}{t + 1}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)}{2 + \left(2 - \frac{2}{t + 1}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
  19. Final simplification100.0%

    \[\leadsto \frac{1 + \left(2 + \frac{2}{-1 - t}\right) \cdot \left(2 + \frac{2}{-1 - t}\right)}{2 + \left(2 + \frac{2}{-1 - t}\right) \cdot \left(2 + \frac{2}{-1 - t}\right)} \]
  20. Add Preprocessing

Alternative 2: 99.4% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := \frac{8 + \frac{-12}{t}}{t}\\ \mathbf{if}\;t \leq -0.6:\\ \;\;\;\;\frac{5 - t\_1}{2 + \left(4 - t\_1\right)}\\ \mathbf{elif}\;t \leq 0.6:\\ \;\;\;\;\frac{1 + \left(2 \cdot t\right) \cdot \left(t \cdot \left(2 + t \cdot -2\right)\right)}{2 + \left(2 \cdot t\right) \cdot \left(2 \cdot t\right)}\\ \mathbf{else}:\\ \;\;\;\;0.8333333333333334 + \frac{\frac{0.037037037037037035 + \frac{0.04938271604938271}{t}}{t} - 0.2222222222222222}{t}\\ \end{array} \end{array} \]
(FPCore (t)
 :precision binary64
 (let* ((t_1 (/ (+ 8.0 (/ -12.0 t)) t)))
   (if (<= t -0.6)
     (/ (- 5.0 t_1) (+ 2.0 (- 4.0 t_1)))
     (if (<= t 0.6)
       (/
        (+ 1.0 (* (* 2.0 t) (* t (+ 2.0 (* t -2.0)))))
        (+ 2.0 (* (* 2.0 t) (* 2.0 t))))
       (+
        0.8333333333333334
        (/
         (-
          (/ (+ 0.037037037037037035 (/ 0.04938271604938271 t)) t)
          0.2222222222222222)
         t))))))
double code(double t) {
	double t_1 = (8.0 + (-12.0 / t)) / t;
	double tmp;
	if (t <= -0.6) {
		tmp = (5.0 - t_1) / (2.0 + (4.0 - t_1));
	} else if (t <= 0.6) {
		tmp = (1.0 + ((2.0 * t) * (t * (2.0 + (t * -2.0))))) / (2.0 + ((2.0 * t) * (2.0 * t)));
	} else {
		tmp = 0.8333333333333334 + ((((0.037037037037037035 + (0.04938271604938271 / t)) / t) - 0.2222222222222222) / t);
	}
	return tmp;
}
real(8) function code(t)
    real(8), intent (in) :: t
    real(8) :: t_1
    real(8) :: tmp
    t_1 = (8.0d0 + ((-12.0d0) / t)) / t
    if (t <= (-0.6d0)) then
        tmp = (5.0d0 - t_1) / (2.0d0 + (4.0d0 - t_1))
    else if (t <= 0.6d0) then
        tmp = (1.0d0 + ((2.0d0 * t) * (t * (2.0d0 + (t * (-2.0d0)))))) / (2.0d0 + ((2.0d0 * t) * (2.0d0 * t)))
    else
        tmp = 0.8333333333333334d0 + ((((0.037037037037037035d0 + (0.04938271604938271d0 / t)) / t) - 0.2222222222222222d0) / t)
    end if
    code = tmp
end function
public static double code(double t) {
	double t_1 = (8.0 + (-12.0 / t)) / t;
	double tmp;
	if (t <= -0.6) {
		tmp = (5.0 - t_1) / (2.0 + (4.0 - t_1));
	} else if (t <= 0.6) {
		tmp = (1.0 + ((2.0 * t) * (t * (2.0 + (t * -2.0))))) / (2.0 + ((2.0 * t) * (2.0 * t)));
	} else {
		tmp = 0.8333333333333334 + ((((0.037037037037037035 + (0.04938271604938271 / t)) / t) - 0.2222222222222222) / t);
	}
	return tmp;
}
def code(t):
	t_1 = (8.0 + (-12.0 / t)) / t
	tmp = 0
	if t <= -0.6:
		tmp = (5.0 - t_1) / (2.0 + (4.0 - t_1))
	elif t <= 0.6:
		tmp = (1.0 + ((2.0 * t) * (t * (2.0 + (t * -2.0))))) / (2.0 + ((2.0 * t) * (2.0 * t)))
	else:
		tmp = 0.8333333333333334 + ((((0.037037037037037035 + (0.04938271604938271 / t)) / t) - 0.2222222222222222) / t)
	return tmp
function code(t)
	t_1 = Float64(Float64(8.0 + Float64(-12.0 / t)) / t)
	tmp = 0.0
	if (t <= -0.6)
		tmp = Float64(Float64(5.0 - t_1) / Float64(2.0 + Float64(4.0 - t_1)));
	elseif (t <= 0.6)
		tmp = Float64(Float64(1.0 + Float64(Float64(2.0 * t) * Float64(t * Float64(2.0 + Float64(t * -2.0))))) / Float64(2.0 + Float64(Float64(2.0 * t) * Float64(2.0 * t))));
	else
		tmp = Float64(0.8333333333333334 + Float64(Float64(Float64(Float64(0.037037037037037035 + Float64(0.04938271604938271 / t)) / t) - 0.2222222222222222) / t));
	end
	return tmp
end
function tmp_2 = code(t)
	t_1 = (8.0 + (-12.0 / t)) / t;
	tmp = 0.0;
	if (t <= -0.6)
		tmp = (5.0 - t_1) / (2.0 + (4.0 - t_1));
	elseif (t <= 0.6)
		tmp = (1.0 + ((2.0 * t) * (t * (2.0 + (t * -2.0))))) / (2.0 + ((2.0 * t) * (2.0 * t)));
	else
		tmp = 0.8333333333333334 + ((((0.037037037037037035 + (0.04938271604938271 / t)) / t) - 0.2222222222222222) / t);
	end
	tmp_2 = tmp;
end
code[t_] := Block[{t$95$1 = N[(N[(8.0 + N[(-12.0 / t), $MachinePrecision]), $MachinePrecision] / t), $MachinePrecision]}, If[LessEqual[t, -0.6], N[(N[(5.0 - t$95$1), $MachinePrecision] / N[(2.0 + N[(4.0 - t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[t, 0.6], N[(N[(1.0 + N[(N[(2.0 * t), $MachinePrecision] * N[(t * N[(2.0 + N[(t * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(2.0 + N[(N[(2.0 * t), $MachinePrecision] * N[(2.0 * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(0.8333333333333334 + N[(N[(N[(N[(0.037037037037037035 + N[(0.04938271604938271 / t), $MachinePrecision]), $MachinePrecision] / t), $MachinePrecision] - 0.2222222222222222), $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := \frac{8 + \frac{-12}{t}}{t}\\
\mathbf{if}\;t \leq -0.6:\\
\;\;\;\;\frac{5 - t\_1}{2 + \left(4 - t\_1\right)}\\

\mathbf{elif}\;t \leq 0.6:\\
\;\;\;\;\frac{1 + \left(2 \cdot t\right) \cdot \left(t \cdot \left(2 + t \cdot -2\right)\right)}{2 + \left(2 \cdot t\right) \cdot \left(2 \cdot t\right)}\\

\mathbf{else}:\\
\;\;\;\;0.8333333333333334 + \frac{\frac{0.037037037037037035 + \frac{0.04938271604938271}{t}}{t} - 0.2222222222222222}{t}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if t < -0.599999999999999978

    1. Initial program 100.0%

      \[\frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. *-un-lft-identity100.0%

        \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{1 \cdot \frac{\frac{2}{t}}{1 + \frac{1}{t}}}\right)} \]
    4. Applied egg-rr100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{1 \cdot \frac{\frac{2}{t}}{1 + \frac{1}{t}}}\right)} \]
    5. Step-by-step derivation
      1. *-lft-identity100.0%

        \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{\frac{2}{t}}{1 + \frac{1}{t}}}\right)} \]
      2. associate-/r*100.0%

        \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{2}{t \cdot \left(1 + \frac{1}{t}\right)}}\right)} \]
      3. distribute-lft-in100.0%

        \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t \cdot 1 + t \cdot \frac{1}{t}}}\right)} \]
      4. *-rgt-identity100.0%

        \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t} + t \cdot \frac{1}{t}}\right)} \]
      5. rgt-mult-inverse100.0%

        \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + \color{blue}{1}}\right)} \]
    6. Simplified100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{2}{t + 1}}\right)} \]
    7. Taylor expanded in t around -inf 100.0%

      \[\leadsto \frac{\color{blue}{5 + -1 \cdot \frac{8 - 12 \cdot \frac{1}{t}}{t}}}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    8. Step-by-step derivation
      1. mul-1-neg100.0%

        \[\leadsto \frac{5 + \color{blue}{\left(-\frac{8 - 12 \cdot \frac{1}{t}}{t}\right)}}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      2. unsub-neg100.0%

        \[\leadsto \frac{\color{blue}{5 - \frac{8 - 12 \cdot \frac{1}{t}}{t}}}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      3. sub-neg100.0%

        \[\leadsto \frac{5 - \frac{\color{blue}{8 + \left(-12 \cdot \frac{1}{t}\right)}}{t}}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      4. associate-*r/100.0%

        \[\leadsto \frac{5 - \frac{8 + \left(-\color{blue}{\frac{12 \cdot 1}{t}}\right)}{t}}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      5. metadata-eval100.0%

        \[\leadsto \frac{5 - \frac{8 + \left(-\frac{\color{blue}{12}}{t}\right)}{t}}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      6. distribute-neg-frac100.0%

        \[\leadsto \frac{5 - \frac{8 + \color{blue}{\frac{-12}{t}}}{t}}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{5 - \frac{8 + \frac{\color{blue}{-12}}{t}}{t}}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    9. Simplified100.0%

      \[\leadsto \frac{\color{blue}{5 - \frac{8 + \frac{-12}{t}}{t}}}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    10. Taylor expanded in t around -inf 100.0%

      \[\leadsto \frac{5 - \frac{8 + \frac{-12}{t}}{t}}{2 + \color{blue}{\left(4 + -1 \cdot \frac{8 - 12 \cdot \frac{1}{t}}{t}\right)}} \]
    11. Step-by-step derivation
      1. mul-1-neg100.0%

        \[\leadsto \frac{5 - \frac{8 + \frac{-12}{t}}{t}}{2 + \left(4 + \color{blue}{\left(-\frac{8 - 12 \cdot \frac{1}{t}}{t}\right)}\right)} \]
      2. unsub-neg100.0%

        \[\leadsto \frac{5 - \frac{8 + \frac{-12}{t}}{t}}{2 + \color{blue}{\left(4 - \frac{8 - 12 \cdot \frac{1}{t}}{t}\right)}} \]
      3. sub-neg100.0%

        \[\leadsto \frac{5 - \frac{8 + \frac{-12}{t}}{t}}{2 + \left(4 - \frac{\color{blue}{8 + \left(-12 \cdot \frac{1}{t}\right)}}{t}\right)} \]
      4. associate-*r/100.0%

        \[\leadsto \frac{5 - \frac{8 + \frac{-12}{t}}{t}}{2 + \left(4 - \frac{8 + \left(-\color{blue}{\frac{12 \cdot 1}{t}}\right)}{t}\right)} \]
      5. metadata-eval100.0%

        \[\leadsto \frac{5 - \frac{8 + \frac{-12}{t}}{t}}{2 + \left(4 - \frac{8 + \left(-\frac{\color{blue}{12}}{t}\right)}{t}\right)} \]
      6. distribute-neg-frac100.0%

        \[\leadsto \frac{5 - \frac{8 + \frac{-12}{t}}{t}}{2 + \left(4 - \frac{8 + \color{blue}{\frac{-12}{t}}}{t}\right)} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{5 - \frac{8 + \frac{-12}{t}}{t}}{2 + \left(4 - \frac{8 + \frac{\color{blue}{-12}}{t}}{t}\right)} \]
    12. Simplified100.0%

      \[\leadsto \frac{5 - \frac{8 + \frac{-12}{t}}{t}}{2 + \color{blue}{\left(4 - \frac{8 + \frac{-12}{t}}{t}\right)}} \]

    if -0.599999999999999978 < t < 0.599999999999999978

    1. Initial program 100.0%

      \[\frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around 0 99.6%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \color{blue}{\left(2 \cdot t\right)}} \]
    4. Step-by-step derivation
      1. *-commutative99.6%

        \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \color{blue}{\left(t \cdot 2\right)}} \]
    5. Simplified99.6%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \color{blue}{\left(t \cdot 2\right)}} \]
    6. Taylor expanded in t around 0 99.6%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \color{blue}{\left(t \cdot \left(2 + -2 \cdot t\right)\right)}}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(t \cdot 2\right)} \]
    7. Step-by-step derivation
      1. *-commutative99.6%

        \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(t \cdot \left(2 + \color{blue}{t \cdot -2}\right)\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(t \cdot 2\right)} \]
    8. Simplified99.6%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \color{blue}{\left(t \cdot \left(2 + t \cdot -2\right)\right)}}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(t \cdot 2\right)} \]
    9. Taylor expanded in t around 0 99.6%

      \[\leadsto \frac{1 + \color{blue}{\left(2 \cdot t\right)} \cdot \left(t \cdot \left(2 + t \cdot -2\right)\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(t \cdot 2\right)} \]
    10. Step-by-step derivation
      1. *-commutative99.6%

        \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \color{blue}{\left(t \cdot 2\right)}} \]
    11. Simplified99.6%

      \[\leadsto \frac{1 + \color{blue}{\left(t \cdot 2\right)} \cdot \left(t \cdot \left(2 + t \cdot -2\right)\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(t \cdot 2\right)} \]
    12. Taylor expanded in t around 0 99.7%

      \[\leadsto \frac{1 + \left(t \cdot 2\right) \cdot \left(t \cdot \left(2 + t \cdot -2\right)\right)}{2 + \color{blue}{\left(2 \cdot t\right)} \cdot \left(t \cdot 2\right)} \]
    13. Step-by-step derivation
      1. *-commutative99.6%

        \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \color{blue}{\left(t \cdot 2\right)}} \]
    14. Simplified99.7%

      \[\leadsto \frac{1 + \left(t \cdot 2\right) \cdot \left(t \cdot \left(2 + t \cdot -2\right)\right)}{2 + \color{blue}{\left(t \cdot 2\right)} \cdot \left(t \cdot 2\right)} \]

    if 0.599999999999999978 < t

    1. Initial program 100.0%

      \[\frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around -inf 99.5%

      \[\leadsto \color{blue}{0.8333333333333334 + -1 \cdot \frac{0.2222222222222222 + -1 \cdot \frac{0.037037037037037035 + 0.04938271604938271 \cdot \frac{1}{t}}{t}}{t}} \]
    4. Step-by-step derivation
      1. mul-1-neg99.5%

        \[\leadsto 0.8333333333333334 + \color{blue}{\left(-\frac{0.2222222222222222 + -1 \cdot \frac{0.037037037037037035 + 0.04938271604938271 \cdot \frac{1}{t}}{t}}{t}\right)} \]
      2. unsub-neg99.5%

        \[\leadsto \color{blue}{0.8333333333333334 - \frac{0.2222222222222222 + -1 \cdot \frac{0.037037037037037035 + 0.04938271604938271 \cdot \frac{1}{t}}{t}}{t}} \]
      3. mul-1-neg99.5%

        \[\leadsto 0.8333333333333334 - \frac{0.2222222222222222 + \color{blue}{\left(-\frac{0.037037037037037035 + 0.04938271604938271 \cdot \frac{1}{t}}{t}\right)}}{t} \]
      4. unsub-neg99.5%

        \[\leadsto 0.8333333333333334 - \frac{\color{blue}{0.2222222222222222 - \frac{0.037037037037037035 + 0.04938271604938271 \cdot \frac{1}{t}}{t}}}{t} \]
      5. associate-*r/99.5%

        \[\leadsto 0.8333333333333334 - \frac{0.2222222222222222 - \frac{0.037037037037037035 + \color{blue}{\frac{0.04938271604938271 \cdot 1}{t}}}{t}}{t} \]
      6. metadata-eval99.5%

        \[\leadsto 0.8333333333333334 - \frac{0.2222222222222222 - \frac{0.037037037037037035 + \frac{\color{blue}{0.04938271604938271}}{t}}{t}}{t} \]
    5. Simplified99.5%

      \[\leadsto \color{blue}{0.8333333333333334 - \frac{0.2222222222222222 - \frac{0.037037037037037035 + \frac{0.04938271604938271}{t}}{t}}{t}} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq -0.6:\\ \;\;\;\;\frac{5 - \frac{8 + \frac{-12}{t}}{t}}{2 + \left(4 - \frac{8 + \frac{-12}{t}}{t}\right)}\\ \mathbf{elif}\;t \leq 0.6:\\ \;\;\;\;\frac{1 + \left(2 \cdot t\right) \cdot \left(t \cdot \left(2 + t \cdot -2\right)\right)}{2 + \left(2 \cdot t\right) \cdot \left(2 \cdot t\right)}\\ \mathbf{else}:\\ \;\;\;\;0.8333333333333334 + \frac{\frac{0.037037037037037035 + \frac{0.04938271604938271}{t}}{t} - 0.2222222222222222}{t}\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 99.1% accurate, 2.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := \frac{8 + \frac{-12}{t}}{t}\\ \mathbf{if}\;t \leq -0.41:\\ \;\;\;\;\frac{5 - t\_1}{2 + \left(4 - t\_1\right)}\\ \mathbf{elif}\;t \leq 0.66:\\ \;\;\;\;0.5\\ \mathbf{else}:\\ \;\;\;\;0.8333333333333334 + \frac{\frac{0.037037037037037035 + \frac{0.04938271604938271}{t}}{t} - 0.2222222222222222}{t}\\ \end{array} \end{array} \]
(FPCore (t)
 :precision binary64
 (let* ((t_1 (/ (+ 8.0 (/ -12.0 t)) t)))
   (if (<= t -0.41)
     (/ (- 5.0 t_1) (+ 2.0 (- 4.0 t_1)))
     (if (<= t 0.66)
       0.5
       (+
        0.8333333333333334
        (/
         (-
          (/ (+ 0.037037037037037035 (/ 0.04938271604938271 t)) t)
          0.2222222222222222)
         t))))))
double code(double t) {
	double t_1 = (8.0 + (-12.0 / t)) / t;
	double tmp;
	if (t <= -0.41) {
		tmp = (5.0 - t_1) / (2.0 + (4.0 - t_1));
	} else if (t <= 0.66) {
		tmp = 0.5;
	} else {
		tmp = 0.8333333333333334 + ((((0.037037037037037035 + (0.04938271604938271 / t)) / t) - 0.2222222222222222) / t);
	}
	return tmp;
}
real(8) function code(t)
    real(8), intent (in) :: t
    real(8) :: t_1
    real(8) :: tmp
    t_1 = (8.0d0 + ((-12.0d0) / t)) / t
    if (t <= (-0.41d0)) then
        tmp = (5.0d0 - t_1) / (2.0d0 + (4.0d0 - t_1))
    else if (t <= 0.66d0) then
        tmp = 0.5d0
    else
        tmp = 0.8333333333333334d0 + ((((0.037037037037037035d0 + (0.04938271604938271d0 / t)) / t) - 0.2222222222222222d0) / t)
    end if
    code = tmp
end function
public static double code(double t) {
	double t_1 = (8.0 + (-12.0 / t)) / t;
	double tmp;
	if (t <= -0.41) {
		tmp = (5.0 - t_1) / (2.0 + (4.0 - t_1));
	} else if (t <= 0.66) {
		tmp = 0.5;
	} else {
		tmp = 0.8333333333333334 + ((((0.037037037037037035 + (0.04938271604938271 / t)) / t) - 0.2222222222222222) / t);
	}
	return tmp;
}
def code(t):
	t_1 = (8.0 + (-12.0 / t)) / t
	tmp = 0
	if t <= -0.41:
		tmp = (5.0 - t_1) / (2.0 + (4.0 - t_1))
	elif t <= 0.66:
		tmp = 0.5
	else:
		tmp = 0.8333333333333334 + ((((0.037037037037037035 + (0.04938271604938271 / t)) / t) - 0.2222222222222222) / t)
	return tmp
function code(t)
	t_1 = Float64(Float64(8.0 + Float64(-12.0 / t)) / t)
	tmp = 0.0
	if (t <= -0.41)
		tmp = Float64(Float64(5.0 - t_1) / Float64(2.0 + Float64(4.0 - t_1)));
	elseif (t <= 0.66)
		tmp = 0.5;
	else
		tmp = Float64(0.8333333333333334 + Float64(Float64(Float64(Float64(0.037037037037037035 + Float64(0.04938271604938271 / t)) / t) - 0.2222222222222222) / t));
	end
	return tmp
end
function tmp_2 = code(t)
	t_1 = (8.0 + (-12.0 / t)) / t;
	tmp = 0.0;
	if (t <= -0.41)
		tmp = (5.0 - t_1) / (2.0 + (4.0 - t_1));
	elseif (t <= 0.66)
		tmp = 0.5;
	else
		tmp = 0.8333333333333334 + ((((0.037037037037037035 + (0.04938271604938271 / t)) / t) - 0.2222222222222222) / t);
	end
	tmp_2 = tmp;
end
code[t_] := Block[{t$95$1 = N[(N[(8.0 + N[(-12.0 / t), $MachinePrecision]), $MachinePrecision] / t), $MachinePrecision]}, If[LessEqual[t, -0.41], N[(N[(5.0 - t$95$1), $MachinePrecision] / N[(2.0 + N[(4.0 - t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[t, 0.66], 0.5, N[(0.8333333333333334 + N[(N[(N[(N[(0.037037037037037035 + N[(0.04938271604938271 / t), $MachinePrecision]), $MachinePrecision] / t), $MachinePrecision] - 0.2222222222222222), $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := \frac{8 + \frac{-12}{t}}{t}\\
\mathbf{if}\;t \leq -0.41:\\
\;\;\;\;\frac{5 - t\_1}{2 + \left(4 - t\_1\right)}\\

\mathbf{elif}\;t \leq 0.66:\\
\;\;\;\;0.5\\

\mathbf{else}:\\
\;\;\;\;0.8333333333333334 + \frac{\frac{0.037037037037037035 + \frac{0.04938271604938271}{t}}{t} - 0.2222222222222222}{t}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if t < -0.409999999999999976

    1. Initial program 100.0%

      \[\frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. *-un-lft-identity100.0%

        \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{1 \cdot \frac{\frac{2}{t}}{1 + \frac{1}{t}}}\right)} \]
    4. Applied egg-rr100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{1 \cdot \frac{\frac{2}{t}}{1 + \frac{1}{t}}}\right)} \]
    5. Step-by-step derivation
      1. *-lft-identity100.0%

        \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{\frac{2}{t}}{1 + \frac{1}{t}}}\right)} \]
      2. associate-/r*100.0%

        \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{2}{t \cdot \left(1 + \frac{1}{t}\right)}}\right)} \]
      3. distribute-lft-in100.0%

        \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t \cdot 1 + t \cdot \frac{1}{t}}}\right)} \]
      4. *-rgt-identity100.0%

        \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t} + t \cdot \frac{1}{t}}\right)} \]
      5. rgt-mult-inverse100.0%

        \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + \color{blue}{1}}\right)} \]
    6. Simplified100.0%

      \[\leadsto \frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{2}{t + 1}}\right)} \]
    7. Taylor expanded in t around -inf 100.0%

      \[\leadsto \frac{\color{blue}{5 + -1 \cdot \frac{8 - 12 \cdot \frac{1}{t}}{t}}}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    8. Step-by-step derivation
      1. mul-1-neg100.0%

        \[\leadsto \frac{5 + \color{blue}{\left(-\frac{8 - 12 \cdot \frac{1}{t}}{t}\right)}}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      2. unsub-neg100.0%

        \[\leadsto \frac{\color{blue}{5 - \frac{8 - 12 \cdot \frac{1}{t}}{t}}}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      3. sub-neg100.0%

        \[\leadsto \frac{5 - \frac{\color{blue}{8 + \left(-12 \cdot \frac{1}{t}\right)}}{t}}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      4. associate-*r/100.0%

        \[\leadsto \frac{5 - \frac{8 + \left(-\color{blue}{\frac{12 \cdot 1}{t}}\right)}{t}}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      5. metadata-eval100.0%

        \[\leadsto \frac{5 - \frac{8 + \left(-\frac{\color{blue}{12}}{t}\right)}{t}}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      6. distribute-neg-frac100.0%

        \[\leadsto \frac{5 - \frac{8 + \color{blue}{\frac{-12}{t}}}{t}}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{5 - \frac{8 + \frac{\color{blue}{-12}}{t}}{t}}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    9. Simplified100.0%

      \[\leadsto \frac{\color{blue}{5 - \frac{8 + \frac{-12}{t}}{t}}}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    10. Taylor expanded in t around -inf 100.0%

      \[\leadsto \frac{5 - \frac{8 + \frac{-12}{t}}{t}}{2 + \color{blue}{\left(4 + -1 \cdot \frac{8 - 12 \cdot \frac{1}{t}}{t}\right)}} \]
    11. Step-by-step derivation
      1. mul-1-neg100.0%

        \[\leadsto \frac{5 - \frac{8 + \frac{-12}{t}}{t}}{2 + \left(4 + \color{blue}{\left(-\frac{8 - 12 \cdot \frac{1}{t}}{t}\right)}\right)} \]
      2. unsub-neg100.0%

        \[\leadsto \frac{5 - \frac{8 + \frac{-12}{t}}{t}}{2 + \color{blue}{\left(4 - \frac{8 - 12 \cdot \frac{1}{t}}{t}\right)}} \]
      3. sub-neg100.0%

        \[\leadsto \frac{5 - \frac{8 + \frac{-12}{t}}{t}}{2 + \left(4 - \frac{\color{blue}{8 + \left(-12 \cdot \frac{1}{t}\right)}}{t}\right)} \]
      4. associate-*r/100.0%

        \[\leadsto \frac{5 - \frac{8 + \frac{-12}{t}}{t}}{2 + \left(4 - \frac{8 + \left(-\color{blue}{\frac{12 \cdot 1}{t}}\right)}{t}\right)} \]
      5. metadata-eval100.0%

        \[\leadsto \frac{5 - \frac{8 + \frac{-12}{t}}{t}}{2 + \left(4 - \frac{8 + \left(-\frac{\color{blue}{12}}{t}\right)}{t}\right)} \]
      6. distribute-neg-frac100.0%

        \[\leadsto \frac{5 - \frac{8 + \frac{-12}{t}}{t}}{2 + \left(4 - \frac{8 + \color{blue}{\frac{-12}{t}}}{t}\right)} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{5 - \frac{8 + \frac{-12}{t}}{t}}{2 + \left(4 - \frac{8 + \frac{\color{blue}{-12}}{t}}{t}\right)} \]
    12. Simplified100.0%

      \[\leadsto \frac{5 - \frac{8 + \frac{-12}{t}}{t}}{2 + \color{blue}{\left(4 - \frac{8 + \frac{-12}{t}}{t}\right)}} \]

    if -0.409999999999999976 < t < 0.660000000000000031

    1. Initial program 100.0%

      \[\frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around 0 99.4%

      \[\leadsto \color{blue}{0.5} \]

    if 0.660000000000000031 < t

    1. Initial program 100.0%

      \[\frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around -inf 99.5%

      \[\leadsto \color{blue}{0.8333333333333334 + -1 \cdot \frac{0.2222222222222222 + -1 \cdot \frac{0.037037037037037035 + 0.04938271604938271 \cdot \frac{1}{t}}{t}}{t}} \]
    4. Step-by-step derivation
      1. mul-1-neg99.5%

        \[\leadsto 0.8333333333333334 + \color{blue}{\left(-\frac{0.2222222222222222 + -1 \cdot \frac{0.037037037037037035 + 0.04938271604938271 \cdot \frac{1}{t}}{t}}{t}\right)} \]
      2. unsub-neg99.5%

        \[\leadsto \color{blue}{0.8333333333333334 - \frac{0.2222222222222222 + -1 \cdot \frac{0.037037037037037035 + 0.04938271604938271 \cdot \frac{1}{t}}{t}}{t}} \]
      3. mul-1-neg99.5%

        \[\leadsto 0.8333333333333334 - \frac{0.2222222222222222 + \color{blue}{\left(-\frac{0.037037037037037035 + 0.04938271604938271 \cdot \frac{1}{t}}{t}\right)}}{t} \]
      4. unsub-neg99.5%

        \[\leadsto 0.8333333333333334 - \frac{\color{blue}{0.2222222222222222 - \frac{0.037037037037037035 + 0.04938271604938271 \cdot \frac{1}{t}}{t}}}{t} \]
      5. associate-*r/99.5%

        \[\leadsto 0.8333333333333334 - \frac{0.2222222222222222 - \frac{0.037037037037037035 + \color{blue}{\frac{0.04938271604938271 \cdot 1}{t}}}{t}}{t} \]
      6. metadata-eval99.5%

        \[\leadsto 0.8333333333333334 - \frac{0.2222222222222222 - \frac{0.037037037037037035 + \frac{\color{blue}{0.04938271604938271}}{t}}{t}}{t} \]
    5. Simplified99.5%

      \[\leadsto \color{blue}{0.8333333333333334 - \frac{0.2222222222222222 - \frac{0.037037037037037035 + \frac{0.04938271604938271}{t}}{t}}{t}} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq -0.41:\\ \;\;\;\;\frac{5 - \frac{8 + \frac{-12}{t}}{t}}{2 + \left(4 - \frac{8 + \frac{-12}{t}}{t}\right)}\\ \mathbf{elif}\;t \leq 0.66:\\ \;\;\;\;0.5\\ \mathbf{else}:\\ \;\;\;\;0.8333333333333334 + \frac{\frac{0.037037037037037035 + \frac{0.04938271604938271}{t}}{t} - 0.2222222222222222}{t}\\ \end{array} \]
  5. Add Preprocessing

Alternative 4: 99.1% accurate, 2.2× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq -0.5:\\ \;\;\;\;0.8333333333333334 - \frac{0.2222222222222222 + \frac{-0.037037037037037035}{t}}{t}\\ \mathbf{elif}\;t \leq 0.66:\\ \;\;\;\;0.5\\ \mathbf{else}:\\ \;\;\;\;0.8333333333333334 + \frac{\frac{0.037037037037037035 + \frac{0.04938271604938271}{t}}{t} - 0.2222222222222222}{t}\\ \end{array} \end{array} \]
(FPCore (t)
 :precision binary64
 (if (<= t -0.5)
   (-
    0.8333333333333334
    (/ (+ 0.2222222222222222 (/ -0.037037037037037035 t)) t))
   (if (<= t 0.66)
     0.5
     (+
      0.8333333333333334
      (/
       (-
        (/ (+ 0.037037037037037035 (/ 0.04938271604938271 t)) t)
        0.2222222222222222)
       t)))))
double code(double t) {
	double tmp;
	if (t <= -0.5) {
		tmp = 0.8333333333333334 - ((0.2222222222222222 + (-0.037037037037037035 / t)) / t);
	} else if (t <= 0.66) {
		tmp = 0.5;
	} else {
		tmp = 0.8333333333333334 + ((((0.037037037037037035 + (0.04938271604938271 / t)) / t) - 0.2222222222222222) / t);
	}
	return tmp;
}
real(8) function code(t)
    real(8), intent (in) :: t
    real(8) :: tmp
    if (t <= (-0.5d0)) then
        tmp = 0.8333333333333334d0 - ((0.2222222222222222d0 + ((-0.037037037037037035d0) / t)) / t)
    else if (t <= 0.66d0) then
        tmp = 0.5d0
    else
        tmp = 0.8333333333333334d0 + ((((0.037037037037037035d0 + (0.04938271604938271d0 / t)) / t) - 0.2222222222222222d0) / t)
    end if
    code = tmp
end function
public static double code(double t) {
	double tmp;
	if (t <= -0.5) {
		tmp = 0.8333333333333334 - ((0.2222222222222222 + (-0.037037037037037035 / t)) / t);
	} else if (t <= 0.66) {
		tmp = 0.5;
	} else {
		tmp = 0.8333333333333334 + ((((0.037037037037037035 + (0.04938271604938271 / t)) / t) - 0.2222222222222222) / t);
	}
	return tmp;
}
def code(t):
	tmp = 0
	if t <= -0.5:
		tmp = 0.8333333333333334 - ((0.2222222222222222 + (-0.037037037037037035 / t)) / t)
	elif t <= 0.66:
		tmp = 0.5
	else:
		tmp = 0.8333333333333334 + ((((0.037037037037037035 + (0.04938271604938271 / t)) / t) - 0.2222222222222222) / t)
	return tmp
function code(t)
	tmp = 0.0
	if (t <= -0.5)
		tmp = Float64(0.8333333333333334 - Float64(Float64(0.2222222222222222 + Float64(-0.037037037037037035 / t)) / t));
	elseif (t <= 0.66)
		tmp = 0.5;
	else
		tmp = Float64(0.8333333333333334 + Float64(Float64(Float64(Float64(0.037037037037037035 + Float64(0.04938271604938271 / t)) / t) - 0.2222222222222222) / t));
	end
	return tmp
end
function tmp_2 = code(t)
	tmp = 0.0;
	if (t <= -0.5)
		tmp = 0.8333333333333334 - ((0.2222222222222222 + (-0.037037037037037035 / t)) / t);
	elseif (t <= 0.66)
		tmp = 0.5;
	else
		tmp = 0.8333333333333334 + ((((0.037037037037037035 + (0.04938271604938271 / t)) / t) - 0.2222222222222222) / t);
	end
	tmp_2 = tmp;
end
code[t_] := If[LessEqual[t, -0.5], N[(0.8333333333333334 - N[(N[(0.2222222222222222 + N[(-0.037037037037037035 / t), $MachinePrecision]), $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision], If[LessEqual[t, 0.66], 0.5, N[(0.8333333333333334 + N[(N[(N[(N[(0.037037037037037035 + N[(0.04938271604938271 / t), $MachinePrecision]), $MachinePrecision] / t), $MachinePrecision] - 0.2222222222222222), $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq -0.5:\\
\;\;\;\;0.8333333333333334 - \frac{0.2222222222222222 + \frac{-0.037037037037037035}{t}}{t}\\

\mathbf{elif}\;t \leq 0.66:\\
\;\;\;\;0.5\\

\mathbf{else}:\\
\;\;\;\;0.8333333333333334 + \frac{\frac{0.037037037037037035 + \frac{0.04938271604938271}{t}}{t} - 0.2222222222222222}{t}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if t < -0.5

    1. Initial program 100.0%

      \[\frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around -inf 99.9%

      \[\leadsto \color{blue}{0.8333333333333334 + -1 \cdot \frac{0.2222222222222222 - 0.037037037037037035 \cdot \frac{1}{t}}{t}} \]
    4. Step-by-step derivation
      1. mul-1-neg99.9%

        \[\leadsto 0.8333333333333334 + \color{blue}{\left(-\frac{0.2222222222222222 - 0.037037037037037035 \cdot \frac{1}{t}}{t}\right)} \]
      2. unsub-neg99.9%

        \[\leadsto \color{blue}{0.8333333333333334 - \frac{0.2222222222222222 - 0.037037037037037035 \cdot \frac{1}{t}}{t}} \]
      3. sub-neg99.9%

        \[\leadsto 0.8333333333333334 - \frac{\color{blue}{0.2222222222222222 + \left(-0.037037037037037035 \cdot \frac{1}{t}\right)}}{t} \]
      4. associate-*r/99.9%

        \[\leadsto 0.8333333333333334 - \frac{0.2222222222222222 + \left(-\color{blue}{\frac{0.037037037037037035 \cdot 1}{t}}\right)}{t} \]
      5. metadata-eval99.9%

        \[\leadsto 0.8333333333333334 - \frac{0.2222222222222222 + \left(-\frac{\color{blue}{0.037037037037037035}}{t}\right)}{t} \]
      6. distribute-neg-frac99.9%

        \[\leadsto 0.8333333333333334 - \frac{0.2222222222222222 + \color{blue}{\frac{-0.037037037037037035}{t}}}{t} \]
      7. metadata-eval99.9%

        \[\leadsto 0.8333333333333334 - \frac{0.2222222222222222 + \frac{\color{blue}{-0.037037037037037035}}{t}}{t} \]
    5. Simplified99.9%

      \[\leadsto \color{blue}{0.8333333333333334 - \frac{0.2222222222222222 + \frac{-0.037037037037037035}{t}}{t}} \]

    if -0.5 < t < 0.660000000000000031

    1. Initial program 100.0%

      \[\frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around 0 99.4%

      \[\leadsto \color{blue}{0.5} \]

    if 0.660000000000000031 < t

    1. Initial program 100.0%

      \[\frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around -inf 99.5%

      \[\leadsto \color{blue}{0.8333333333333334 + -1 \cdot \frac{0.2222222222222222 + -1 \cdot \frac{0.037037037037037035 + 0.04938271604938271 \cdot \frac{1}{t}}{t}}{t}} \]
    4. Step-by-step derivation
      1. mul-1-neg99.5%

        \[\leadsto 0.8333333333333334 + \color{blue}{\left(-\frac{0.2222222222222222 + -1 \cdot \frac{0.037037037037037035 + 0.04938271604938271 \cdot \frac{1}{t}}{t}}{t}\right)} \]
      2. unsub-neg99.5%

        \[\leadsto \color{blue}{0.8333333333333334 - \frac{0.2222222222222222 + -1 \cdot \frac{0.037037037037037035 + 0.04938271604938271 \cdot \frac{1}{t}}{t}}{t}} \]
      3. mul-1-neg99.5%

        \[\leadsto 0.8333333333333334 - \frac{0.2222222222222222 + \color{blue}{\left(-\frac{0.037037037037037035 + 0.04938271604938271 \cdot \frac{1}{t}}{t}\right)}}{t} \]
      4. unsub-neg99.5%

        \[\leadsto 0.8333333333333334 - \frac{\color{blue}{0.2222222222222222 - \frac{0.037037037037037035 + 0.04938271604938271 \cdot \frac{1}{t}}{t}}}{t} \]
      5. associate-*r/99.5%

        \[\leadsto 0.8333333333333334 - \frac{0.2222222222222222 - \frac{0.037037037037037035 + \color{blue}{\frac{0.04938271604938271 \cdot 1}{t}}}{t}}{t} \]
      6. metadata-eval99.5%

        \[\leadsto 0.8333333333333334 - \frac{0.2222222222222222 - \frac{0.037037037037037035 + \frac{\color{blue}{0.04938271604938271}}{t}}{t}}{t} \]
    5. Simplified99.5%

      \[\leadsto \color{blue}{0.8333333333333334 - \frac{0.2222222222222222 - \frac{0.037037037037037035 + \frac{0.04938271604938271}{t}}{t}}{t}} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq -0.5:\\ \;\;\;\;0.8333333333333334 - \frac{0.2222222222222222 + \frac{-0.037037037037037035}{t}}{t}\\ \mathbf{elif}\;t \leq 0.66:\\ \;\;\;\;0.5\\ \mathbf{else}:\\ \;\;\;\;0.8333333333333334 + \frac{\frac{0.037037037037037035 + \frac{0.04938271604938271}{t}}{t} - 0.2222222222222222}{t}\\ \end{array} \]
  5. Add Preprocessing

Alternative 5: 99.1% accurate, 2.7× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq -0.5 \lor \neg \left(t \leq 0.23\right):\\ \;\;\;\;0.8333333333333334 - \frac{0.2222222222222222 + \frac{-0.037037037037037035}{t}}{t}\\ \mathbf{else}:\\ \;\;\;\;0.5\\ \end{array} \end{array} \]
(FPCore (t)
 :precision binary64
 (if (or (<= t -0.5) (not (<= t 0.23)))
   (-
    0.8333333333333334
    (/ (+ 0.2222222222222222 (/ -0.037037037037037035 t)) t))
   0.5))
double code(double t) {
	double tmp;
	if ((t <= -0.5) || !(t <= 0.23)) {
		tmp = 0.8333333333333334 - ((0.2222222222222222 + (-0.037037037037037035 / t)) / t);
	} else {
		tmp = 0.5;
	}
	return tmp;
}
real(8) function code(t)
    real(8), intent (in) :: t
    real(8) :: tmp
    if ((t <= (-0.5d0)) .or. (.not. (t <= 0.23d0))) then
        tmp = 0.8333333333333334d0 - ((0.2222222222222222d0 + ((-0.037037037037037035d0) / t)) / t)
    else
        tmp = 0.5d0
    end if
    code = tmp
end function
public static double code(double t) {
	double tmp;
	if ((t <= -0.5) || !(t <= 0.23)) {
		tmp = 0.8333333333333334 - ((0.2222222222222222 + (-0.037037037037037035 / t)) / t);
	} else {
		tmp = 0.5;
	}
	return tmp;
}
def code(t):
	tmp = 0
	if (t <= -0.5) or not (t <= 0.23):
		tmp = 0.8333333333333334 - ((0.2222222222222222 + (-0.037037037037037035 / t)) / t)
	else:
		tmp = 0.5
	return tmp
function code(t)
	tmp = 0.0
	if ((t <= -0.5) || !(t <= 0.23))
		tmp = Float64(0.8333333333333334 - Float64(Float64(0.2222222222222222 + Float64(-0.037037037037037035 / t)) / t));
	else
		tmp = 0.5;
	end
	return tmp
end
function tmp_2 = code(t)
	tmp = 0.0;
	if ((t <= -0.5) || ~((t <= 0.23)))
		tmp = 0.8333333333333334 - ((0.2222222222222222 + (-0.037037037037037035 / t)) / t);
	else
		tmp = 0.5;
	end
	tmp_2 = tmp;
end
code[t_] := If[Or[LessEqual[t, -0.5], N[Not[LessEqual[t, 0.23]], $MachinePrecision]], N[(0.8333333333333334 - N[(N[(0.2222222222222222 + N[(-0.037037037037037035 / t), $MachinePrecision]), $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision], 0.5]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq -0.5 \lor \neg \left(t \leq 0.23\right):\\
\;\;\;\;0.8333333333333334 - \frac{0.2222222222222222 + \frac{-0.037037037037037035}{t}}{t}\\

\mathbf{else}:\\
\;\;\;\;0.5\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < -0.5 or 0.23000000000000001 < t

    1. Initial program 100.0%

      \[\frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around -inf 99.6%

      \[\leadsto \color{blue}{0.8333333333333334 + -1 \cdot \frac{0.2222222222222222 - 0.037037037037037035 \cdot \frac{1}{t}}{t}} \]
    4. Step-by-step derivation
      1. mul-1-neg99.6%

        \[\leadsto 0.8333333333333334 + \color{blue}{\left(-\frac{0.2222222222222222 - 0.037037037037037035 \cdot \frac{1}{t}}{t}\right)} \]
      2. unsub-neg99.6%

        \[\leadsto \color{blue}{0.8333333333333334 - \frac{0.2222222222222222 - 0.037037037037037035 \cdot \frac{1}{t}}{t}} \]
      3. sub-neg99.6%

        \[\leadsto 0.8333333333333334 - \frac{\color{blue}{0.2222222222222222 + \left(-0.037037037037037035 \cdot \frac{1}{t}\right)}}{t} \]
      4. associate-*r/99.6%

        \[\leadsto 0.8333333333333334 - \frac{0.2222222222222222 + \left(-\color{blue}{\frac{0.037037037037037035 \cdot 1}{t}}\right)}{t} \]
      5. metadata-eval99.6%

        \[\leadsto 0.8333333333333334 - \frac{0.2222222222222222 + \left(-\frac{\color{blue}{0.037037037037037035}}{t}\right)}{t} \]
      6. distribute-neg-frac99.6%

        \[\leadsto 0.8333333333333334 - \frac{0.2222222222222222 + \color{blue}{\frac{-0.037037037037037035}{t}}}{t} \]
      7. metadata-eval99.6%

        \[\leadsto 0.8333333333333334 - \frac{0.2222222222222222 + \frac{\color{blue}{-0.037037037037037035}}{t}}{t} \]
    5. Simplified99.6%

      \[\leadsto \color{blue}{0.8333333333333334 - \frac{0.2222222222222222 + \frac{-0.037037037037037035}{t}}{t}} \]

    if -0.5 < t < 0.23000000000000001

    1. Initial program 100.0%

      \[\frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around 0 99.4%

      \[\leadsto \color{blue}{0.5} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq -0.5 \lor \neg \left(t \leq 0.23\right):\\ \;\;\;\;0.8333333333333334 - \frac{0.2222222222222222 + \frac{-0.037037037037037035}{t}}{t}\\ \mathbf{else}:\\ \;\;\;\;0.5\\ \end{array} \]
  5. Add Preprocessing

Alternative 6: 98.9% accurate, 3.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq -0.49 \lor \neg \left(t \leq 0.66\right):\\ \;\;\;\;0.8333333333333334 - \frac{0.2222222222222222}{t}\\ \mathbf{else}:\\ \;\;\;\;0.5\\ \end{array} \end{array} \]
(FPCore (t)
 :precision binary64
 (if (or (<= t -0.49) (not (<= t 0.66)))
   (- 0.8333333333333334 (/ 0.2222222222222222 t))
   0.5))
double code(double t) {
	double tmp;
	if ((t <= -0.49) || !(t <= 0.66)) {
		tmp = 0.8333333333333334 - (0.2222222222222222 / t);
	} else {
		tmp = 0.5;
	}
	return tmp;
}
real(8) function code(t)
    real(8), intent (in) :: t
    real(8) :: tmp
    if ((t <= (-0.49d0)) .or. (.not. (t <= 0.66d0))) then
        tmp = 0.8333333333333334d0 - (0.2222222222222222d0 / t)
    else
        tmp = 0.5d0
    end if
    code = tmp
end function
public static double code(double t) {
	double tmp;
	if ((t <= -0.49) || !(t <= 0.66)) {
		tmp = 0.8333333333333334 - (0.2222222222222222 / t);
	} else {
		tmp = 0.5;
	}
	return tmp;
}
def code(t):
	tmp = 0
	if (t <= -0.49) or not (t <= 0.66):
		tmp = 0.8333333333333334 - (0.2222222222222222 / t)
	else:
		tmp = 0.5
	return tmp
function code(t)
	tmp = 0.0
	if ((t <= -0.49) || !(t <= 0.66))
		tmp = Float64(0.8333333333333334 - Float64(0.2222222222222222 / t));
	else
		tmp = 0.5;
	end
	return tmp
end
function tmp_2 = code(t)
	tmp = 0.0;
	if ((t <= -0.49) || ~((t <= 0.66)))
		tmp = 0.8333333333333334 - (0.2222222222222222 / t);
	else
		tmp = 0.5;
	end
	tmp_2 = tmp;
end
code[t_] := If[Or[LessEqual[t, -0.49], N[Not[LessEqual[t, 0.66]], $MachinePrecision]], N[(0.8333333333333334 - N[(0.2222222222222222 / t), $MachinePrecision]), $MachinePrecision], 0.5]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq -0.49 \lor \neg \left(t \leq 0.66\right):\\
\;\;\;\;0.8333333333333334 - \frac{0.2222222222222222}{t}\\

\mathbf{else}:\\
\;\;\;\;0.5\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < -0.48999999999999999 or 0.660000000000000031 < t

    1. Initial program 100.0%

      \[\frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around inf 99.4%

      \[\leadsto \color{blue}{0.8333333333333334 - 0.2222222222222222 \cdot \frac{1}{t}} \]
    4. Step-by-step derivation
      1. associate-*r/99.4%

        \[\leadsto 0.8333333333333334 - \color{blue}{\frac{0.2222222222222222 \cdot 1}{t}} \]
      2. metadata-eval99.4%

        \[\leadsto 0.8333333333333334 - \frac{\color{blue}{0.2222222222222222}}{t} \]
    5. Simplified99.4%

      \[\leadsto \color{blue}{0.8333333333333334 - \frac{0.2222222222222222}{t}} \]

    if -0.48999999999999999 < t < 0.660000000000000031

    1. Initial program 100.0%

      \[\frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around 0 99.4%

      \[\leadsto \color{blue}{0.5} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq -0.49 \lor \neg \left(t \leq 0.66\right):\\ \;\;\;\;0.8333333333333334 - \frac{0.2222222222222222}{t}\\ \mathbf{else}:\\ \;\;\;\;0.5\\ \end{array} \]
  5. Add Preprocessing

Alternative 7: 98.4% accurate, 4.6× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq -0.34:\\ \;\;\;\;0.8333333333333334\\ \mathbf{elif}\;t \leq 1:\\ \;\;\;\;0.5\\ \mathbf{else}:\\ \;\;\;\;0.8333333333333334\\ \end{array} \end{array} \]
(FPCore (t)
 :precision binary64
 (if (<= t -0.34) 0.8333333333333334 (if (<= t 1.0) 0.5 0.8333333333333334)))
double code(double t) {
	double tmp;
	if (t <= -0.34) {
		tmp = 0.8333333333333334;
	} else if (t <= 1.0) {
		tmp = 0.5;
	} else {
		tmp = 0.8333333333333334;
	}
	return tmp;
}
real(8) function code(t)
    real(8), intent (in) :: t
    real(8) :: tmp
    if (t <= (-0.34d0)) then
        tmp = 0.8333333333333334d0
    else if (t <= 1.0d0) then
        tmp = 0.5d0
    else
        tmp = 0.8333333333333334d0
    end if
    code = tmp
end function
public static double code(double t) {
	double tmp;
	if (t <= -0.34) {
		tmp = 0.8333333333333334;
	} else if (t <= 1.0) {
		tmp = 0.5;
	} else {
		tmp = 0.8333333333333334;
	}
	return tmp;
}
def code(t):
	tmp = 0
	if t <= -0.34:
		tmp = 0.8333333333333334
	elif t <= 1.0:
		tmp = 0.5
	else:
		tmp = 0.8333333333333334
	return tmp
function code(t)
	tmp = 0.0
	if (t <= -0.34)
		tmp = 0.8333333333333334;
	elseif (t <= 1.0)
		tmp = 0.5;
	else
		tmp = 0.8333333333333334;
	end
	return tmp
end
function tmp_2 = code(t)
	tmp = 0.0;
	if (t <= -0.34)
		tmp = 0.8333333333333334;
	elseif (t <= 1.0)
		tmp = 0.5;
	else
		tmp = 0.8333333333333334;
	end
	tmp_2 = tmp;
end
code[t_] := If[LessEqual[t, -0.34], 0.8333333333333334, If[LessEqual[t, 1.0], 0.5, 0.8333333333333334]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq -0.34:\\
\;\;\;\;0.8333333333333334\\

\mathbf{elif}\;t \leq 1:\\
\;\;\;\;0.5\\

\mathbf{else}:\\
\;\;\;\;0.8333333333333334\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < -0.340000000000000024 or 1 < t

    1. Initial program 100.0%

      \[\frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around inf 98.1%

      \[\leadsto \color{blue}{0.8333333333333334} \]

    if -0.340000000000000024 < t < 1

    1. Initial program 100.0%

      \[\frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around 0 99.4%

      \[\leadsto \color{blue}{0.5} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 8: 58.7% accurate, 51.0× speedup?

\[\begin{array}{l} \\ 0.5 \end{array} \]
(FPCore (t) :precision binary64 0.5)
double code(double t) {
	return 0.5;
}
real(8) function code(t)
    real(8), intent (in) :: t
    code = 0.5d0
end function
public static double code(double t) {
	return 0.5;
}
def code(t):
	return 0.5
function code(t)
	return 0.5
end
function tmp = code(t)
	tmp = 0.5;
end
code[t_] := 0.5
\begin{array}{l}

\\
0.5
\end{array}
Derivation
  1. Initial program 100.0%

    \[\frac{1 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
  2. Add Preprocessing
  3. Taylor expanded in t around 0 59.8%

    \[\leadsto \color{blue}{0.5} \]
  4. Add Preprocessing

Reproduce

?
herbie shell --seed 2024133 
(FPCore (t)
  :name "Kahan p13 Example 2"
  :precision binary64
  (/ (+ 1.0 (* (- 2.0 (/ (/ 2.0 t) (+ 1.0 (/ 1.0 t)))) (- 2.0 (/ (/ 2.0 t) (+ 1.0 (/ 1.0 t)))))) (+ 2.0 (* (- 2.0 (/ (/ 2.0 t) (+ 1.0 (/ 1.0 t)))) (- 2.0 (/ (/ 2.0 t) (+ 1.0 (/ 1.0 t))))))))