Kahan p13 Example 3

Percentage Accurate: 100.0% → 100.0%
Time: 15.6s
Alternatives: 10
Speedup: 1.4×

Specification

?
\[\begin{array}{l} \\ \begin{array}{l} t_1 := 2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\\ 1 - \frac{1}{2 + t\_1 \cdot t\_1} \end{array} \end{array} \]
(FPCore (t)
 :precision binary64
 (let* ((t_1 (- 2.0 (/ (/ 2.0 t) (+ 1.0 (/ 1.0 t))))))
   (- 1.0 (/ 1.0 (+ 2.0 (* t_1 t_1))))))
double code(double t) {
	double t_1 = 2.0 - ((2.0 / t) / (1.0 + (1.0 / t)));
	return 1.0 - (1.0 / (2.0 + (t_1 * t_1)));
}
real(8) function code(t)
    real(8), intent (in) :: t
    real(8) :: t_1
    t_1 = 2.0d0 - ((2.0d0 / t) / (1.0d0 + (1.0d0 / t)))
    code = 1.0d0 - (1.0d0 / (2.0d0 + (t_1 * t_1)))
end function
public static double code(double t) {
	double t_1 = 2.0 - ((2.0 / t) / (1.0 + (1.0 / t)));
	return 1.0 - (1.0 / (2.0 + (t_1 * t_1)));
}
def code(t):
	t_1 = 2.0 - ((2.0 / t) / (1.0 + (1.0 / t)))
	return 1.0 - (1.0 / (2.0 + (t_1 * t_1)))
function code(t)
	t_1 = Float64(2.0 - Float64(Float64(2.0 / t) / Float64(1.0 + Float64(1.0 / t))))
	return Float64(1.0 - Float64(1.0 / Float64(2.0 + Float64(t_1 * t_1))))
end
function tmp = code(t)
	t_1 = 2.0 - ((2.0 / t) / (1.0 + (1.0 / t)));
	tmp = 1.0 - (1.0 / (2.0 + (t_1 * t_1)));
end
code[t_] := Block[{t$95$1 = N[(2.0 - N[(N[(2.0 / t), $MachinePrecision] / N[(1.0 + N[(1.0 / t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(1.0 - N[(1.0 / N[(2.0 + N[(t$95$1 * t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := 2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\\
1 - \frac{1}{2 + t\_1 \cdot t\_1}
\end{array}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 10 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 100.0% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := 2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\\ 1 - \frac{1}{2 + t\_1 \cdot t\_1} \end{array} \end{array} \]
(FPCore (t)
 :precision binary64
 (let* ((t_1 (- 2.0 (/ (/ 2.0 t) (+ 1.0 (/ 1.0 t))))))
   (- 1.0 (/ 1.0 (+ 2.0 (* t_1 t_1))))))
double code(double t) {
	double t_1 = 2.0 - ((2.0 / t) / (1.0 + (1.0 / t)));
	return 1.0 - (1.0 / (2.0 + (t_1 * t_1)));
}
real(8) function code(t)
    real(8), intent (in) :: t
    real(8) :: t_1
    t_1 = 2.0d0 - ((2.0d0 / t) / (1.0d0 + (1.0d0 / t)))
    code = 1.0d0 - (1.0d0 / (2.0d0 + (t_1 * t_1)))
end function
public static double code(double t) {
	double t_1 = 2.0 - ((2.0 / t) / (1.0 + (1.0 / t)));
	return 1.0 - (1.0 / (2.0 + (t_1 * t_1)));
}
def code(t):
	t_1 = 2.0 - ((2.0 / t) / (1.0 + (1.0 / t)))
	return 1.0 - (1.0 / (2.0 + (t_1 * t_1)))
function code(t)
	t_1 = Float64(2.0 - Float64(Float64(2.0 / t) / Float64(1.0 + Float64(1.0 / t))))
	return Float64(1.0 - Float64(1.0 / Float64(2.0 + Float64(t_1 * t_1))))
end
function tmp = code(t)
	t_1 = 2.0 - ((2.0 / t) / (1.0 + (1.0 / t)));
	tmp = 1.0 - (1.0 / (2.0 + (t_1 * t_1)));
end
code[t_] := Block[{t$95$1 = N[(2.0 - N[(N[(2.0 / t), $MachinePrecision] / N[(1.0 + N[(1.0 / t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(1.0 - N[(1.0 / N[(2.0 + N[(t$95$1 * t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := 2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\\
1 - \frac{1}{2 + t\_1 \cdot t\_1}
\end{array}
\end{array}

Alternative 1: 100.0% accurate, 1.4× speedup?

\[\begin{array}{l} \\ 1 + \frac{-1}{2 + \left(2 + \frac{-2}{1 + t}\right) \cdot \left(2 + \frac{2}{-1 - t}\right)} \end{array} \]
(FPCore (t)
 :precision binary64
 (+
  1.0
  (/ -1.0 (+ 2.0 (* (+ 2.0 (/ -2.0 (+ 1.0 t))) (+ 2.0 (/ 2.0 (- -1.0 t))))))))
double code(double t) {
	return 1.0 + (-1.0 / (2.0 + ((2.0 + (-2.0 / (1.0 + t))) * (2.0 + (2.0 / (-1.0 - t))))));
}
real(8) function code(t)
    real(8), intent (in) :: t
    code = 1.0d0 + ((-1.0d0) / (2.0d0 + ((2.0d0 + ((-2.0d0) / (1.0d0 + t))) * (2.0d0 + (2.0d0 / ((-1.0d0) - t))))))
end function
public static double code(double t) {
	return 1.0 + (-1.0 / (2.0 + ((2.0 + (-2.0 / (1.0 + t))) * (2.0 + (2.0 / (-1.0 - t))))));
}
def code(t):
	return 1.0 + (-1.0 / (2.0 + ((2.0 + (-2.0 / (1.0 + t))) * (2.0 + (2.0 / (-1.0 - t))))))
function code(t)
	return Float64(1.0 + Float64(-1.0 / Float64(2.0 + Float64(Float64(2.0 + Float64(-2.0 / Float64(1.0 + t))) * Float64(2.0 + Float64(2.0 / Float64(-1.0 - t)))))))
end
function tmp = code(t)
	tmp = 1.0 + (-1.0 / (2.0 + ((2.0 + (-2.0 / (1.0 + t))) * (2.0 + (2.0 / (-1.0 - t))))));
end
code[t_] := N[(1.0 + N[(-1.0 / N[(2.0 + N[(N[(2.0 + N[(-2.0 / N[(1.0 + t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(2.0 + N[(2.0 / N[(-1.0 - t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
1 + \frac{-1}{2 + \left(2 + \frac{-2}{1 + t}\right) \cdot \left(2 + \frac{2}{-1 - t}\right)}
\end{array}
Derivation
  1. Initial program 100.0%

    \[1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. div-inv100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\color{blue}{2 \cdot \frac{1}{t}}}{1 + \frac{1}{t}}\right)} \]
    2. associate-/l*100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{2 \cdot \frac{\frac{1}{t}}{1 + \frac{1}{t}}}\right)} \]
  4. Applied egg-rr100.0%

    \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{2 \cdot \frac{\frac{1}{t}}{1 + \frac{1}{t}}}\right)} \]
  5. Step-by-step derivation
    1. associate-/r*100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - 2 \cdot \color{blue}{\frac{1}{t \cdot \left(1 + \frac{1}{t}\right)}}\right)} \]
    2. associate-*r/100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{2 \cdot 1}{t \cdot \left(1 + \frac{1}{t}\right)}}\right)} \]
    3. metadata-eval100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\color{blue}{2}}{t \cdot \left(1 + \frac{1}{t}\right)}\right)} \]
    4. distribute-lft-in100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t \cdot 1 + t \cdot \frac{1}{t}}}\right)} \]
    5. *-rgt-identity100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t} + t \cdot \frac{1}{t}}\right)} \]
    6. rgt-mult-inverse100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + \color{blue}{1}}\right)} \]
  6. Simplified100.0%

    \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{2}{t + 1}}\right)} \]
  7. Step-by-step derivation
    1. sub-neg100.0%

      \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(2 + \left(-\frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)\right)} \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    2. distribute-neg-frac100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 + \color{blue}{\frac{-\frac{2}{t}}{1 + \frac{1}{t}}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    3. distribute-neg-frac100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{\color{blue}{\frac{-2}{t}}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    4. metadata-eval100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{\frac{\color{blue}{-2}}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
  8. Applied egg-rr100.0%

    \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(2 + \frac{\frac{-2}{t}}{1 + \frac{1}{t}}\right)} \cdot \left(2 - \frac{2}{t + 1}\right)} \]
  9. Step-by-step derivation
    1. associate-/r*100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 + \color{blue}{\frac{-2}{t \cdot \left(1 + \frac{1}{t}\right)}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    2. distribute-lft-in100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{-2}{\color{blue}{t \cdot 1 + t \cdot \frac{1}{t}}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    3. *-rgt-identity100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{-2}{\color{blue}{t} + t \cdot \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    4. rgt-mult-inverse100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{-2}{t + \color{blue}{1}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
  10. Simplified100.0%

    \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(2 + \frac{-2}{t + 1}\right)} \cdot \left(2 - \frac{2}{t + 1}\right)} \]
  11. Final simplification100.0%

    \[\leadsto 1 + \frac{-1}{2 + \left(2 + \frac{-2}{1 + t}\right) \cdot \left(2 + \frac{2}{-1 - t}\right)} \]
  12. Add Preprocessing

Alternative 2: 99.4% accurate, 1.1× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq -0.72 \lor \neg \left(t \leq 0.78\right):\\ \;\;\;\;1 - \left(0.16666666666666666 - \frac{\frac{0.037037037037037035}{t} + -0.2222222222222222}{t}\right)\\ \mathbf{else}:\\ \;\;\;\;1 + \frac{-1}{2 + \left(t \cdot \left(2 + -2 \cdot t\right)\right) \cdot \left(2 \cdot t\right)}\\ \end{array} \end{array} \]
(FPCore (t)
 :precision binary64
 (if (or (<= t -0.72) (not (<= t 0.78)))
   (-
    1.0
    (-
     0.16666666666666666
     (/ (+ (/ 0.037037037037037035 t) -0.2222222222222222) t)))
   (+ 1.0 (/ -1.0 (+ 2.0 (* (* t (+ 2.0 (* -2.0 t))) (* 2.0 t)))))))
double code(double t) {
	double tmp;
	if ((t <= -0.72) || !(t <= 0.78)) {
		tmp = 1.0 - (0.16666666666666666 - (((0.037037037037037035 / t) + -0.2222222222222222) / t));
	} else {
		tmp = 1.0 + (-1.0 / (2.0 + ((t * (2.0 + (-2.0 * t))) * (2.0 * t))));
	}
	return tmp;
}
real(8) function code(t)
    real(8), intent (in) :: t
    real(8) :: tmp
    if ((t <= (-0.72d0)) .or. (.not. (t <= 0.78d0))) then
        tmp = 1.0d0 - (0.16666666666666666d0 - (((0.037037037037037035d0 / t) + (-0.2222222222222222d0)) / t))
    else
        tmp = 1.0d0 + ((-1.0d0) / (2.0d0 + ((t * (2.0d0 + ((-2.0d0) * t))) * (2.0d0 * t))))
    end if
    code = tmp
end function
public static double code(double t) {
	double tmp;
	if ((t <= -0.72) || !(t <= 0.78)) {
		tmp = 1.0 - (0.16666666666666666 - (((0.037037037037037035 / t) + -0.2222222222222222) / t));
	} else {
		tmp = 1.0 + (-1.0 / (2.0 + ((t * (2.0 + (-2.0 * t))) * (2.0 * t))));
	}
	return tmp;
}
def code(t):
	tmp = 0
	if (t <= -0.72) or not (t <= 0.78):
		tmp = 1.0 - (0.16666666666666666 - (((0.037037037037037035 / t) + -0.2222222222222222) / t))
	else:
		tmp = 1.0 + (-1.0 / (2.0 + ((t * (2.0 + (-2.0 * t))) * (2.0 * t))))
	return tmp
function code(t)
	tmp = 0.0
	if ((t <= -0.72) || !(t <= 0.78))
		tmp = Float64(1.0 - Float64(0.16666666666666666 - Float64(Float64(Float64(0.037037037037037035 / t) + -0.2222222222222222) / t)));
	else
		tmp = Float64(1.0 + Float64(-1.0 / Float64(2.0 + Float64(Float64(t * Float64(2.0 + Float64(-2.0 * t))) * Float64(2.0 * t)))));
	end
	return tmp
end
function tmp_2 = code(t)
	tmp = 0.0;
	if ((t <= -0.72) || ~((t <= 0.78)))
		tmp = 1.0 - (0.16666666666666666 - (((0.037037037037037035 / t) + -0.2222222222222222) / t));
	else
		tmp = 1.0 + (-1.0 / (2.0 + ((t * (2.0 + (-2.0 * t))) * (2.0 * t))));
	end
	tmp_2 = tmp;
end
code[t_] := If[Or[LessEqual[t, -0.72], N[Not[LessEqual[t, 0.78]], $MachinePrecision]], N[(1.0 - N[(0.16666666666666666 - N[(N[(N[(0.037037037037037035 / t), $MachinePrecision] + -0.2222222222222222), $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(1.0 + N[(-1.0 / N[(2.0 + N[(N[(t * N[(2.0 + N[(-2.0 * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(2.0 * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq -0.72 \lor \neg \left(t \leq 0.78\right):\\
\;\;\;\;1 - \left(0.16666666666666666 - \frac{\frac{0.037037037037037035}{t} + -0.2222222222222222}{t}\right)\\

\mathbf{else}:\\
\;\;\;\;1 + \frac{-1}{2 + \left(t \cdot \left(2 + -2 \cdot t\right)\right) \cdot \left(2 \cdot t\right)}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < -0.71999999999999997 or 0.78000000000000003 < t

    1. Initial program 100.0%

      \[1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around -inf 99.5%

      \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 + -1 \cdot \frac{0.037037037037037035 \cdot \frac{1}{t} - 0.2222222222222222}{t}\right)} \]
    4. Step-by-step derivation
      1. mul-1-neg99.5%

        \[\leadsto 1 - \left(0.16666666666666666 + \color{blue}{\left(-\frac{0.037037037037037035 \cdot \frac{1}{t} - 0.2222222222222222}{t}\right)}\right) \]
      2. unsub-neg99.5%

        \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 - \frac{0.037037037037037035 \cdot \frac{1}{t} - 0.2222222222222222}{t}\right)} \]
      3. sub-neg99.5%

        \[\leadsto 1 - \left(0.16666666666666666 - \frac{\color{blue}{0.037037037037037035 \cdot \frac{1}{t} + \left(-0.2222222222222222\right)}}{t}\right) \]
      4. associate-*r/99.5%

        \[\leadsto 1 - \left(0.16666666666666666 - \frac{\color{blue}{\frac{0.037037037037037035 \cdot 1}{t}} + \left(-0.2222222222222222\right)}{t}\right) \]
      5. metadata-eval99.5%

        \[\leadsto 1 - \left(0.16666666666666666 - \frac{\frac{\color{blue}{0.037037037037037035}}{t} + \left(-0.2222222222222222\right)}{t}\right) \]
      6. metadata-eval99.5%

        \[\leadsto 1 - \left(0.16666666666666666 - \frac{\frac{0.037037037037037035}{t} + \color{blue}{-0.2222222222222222}}{t}\right) \]
    5. Simplified99.5%

      \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 - \frac{\frac{0.037037037037037035}{t} + -0.2222222222222222}{t}\right)} \]

    if -0.71999999999999997 < t < 0.78000000000000003

    1. Initial program 100.0%

      \[1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. div-inv100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\color{blue}{2 \cdot \frac{1}{t}}}{1 + \frac{1}{t}}\right)} \]
      2. associate-/l*100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{2 \cdot \frac{\frac{1}{t}}{1 + \frac{1}{t}}}\right)} \]
    4. Applied egg-rr100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{2 \cdot \frac{\frac{1}{t}}{1 + \frac{1}{t}}}\right)} \]
    5. Step-by-step derivation
      1. associate-/r*100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - 2 \cdot \color{blue}{\frac{1}{t \cdot \left(1 + \frac{1}{t}\right)}}\right)} \]
      2. associate-*r/100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{2 \cdot 1}{t \cdot \left(1 + \frac{1}{t}\right)}}\right)} \]
      3. metadata-eval100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\color{blue}{2}}{t \cdot \left(1 + \frac{1}{t}\right)}\right)} \]
      4. distribute-lft-in100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t \cdot 1 + t \cdot \frac{1}{t}}}\right)} \]
      5. *-rgt-identity100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t} + t \cdot \frac{1}{t}}\right)} \]
      6. rgt-mult-inverse100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + \color{blue}{1}}\right)} \]
    6. Simplified100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{2}{t + 1}}\right)} \]
    7. Step-by-step derivation
      1. sub-neg100.0%

        \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(2 + \left(-\frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)\right)} \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      2. distribute-neg-frac100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \color{blue}{\frac{-\frac{2}{t}}{1 + \frac{1}{t}}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      3. distribute-neg-frac100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{\color{blue}{\frac{-2}{t}}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      4. metadata-eval100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{\frac{\color{blue}{-2}}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    8. Applied egg-rr100.0%

      \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(2 + \frac{\frac{-2}{t}}{1 + \frac{1}{t}}\right)} \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    9. Step-by-step derivation
      1. associate-/r*100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \color{blue}{\frac{-2}{t \cdot \left(1 + \frac{1}{t}\right)}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      2. distribute-lft-in100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{-2}{\color{blue}{t \cdot 1 + t \cdot \frac{1}{t}}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      3. *-rgt-identity100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{-2}{\color{blue}{t} + t \cdot \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      4. rgt-mult-inverse100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{-2}{t + \color{blue}{1}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    10. Simplified100.0%

      \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(2 + \frac{-2}{t + 1}\right)} \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    11. Taylor expanded in t around 0 99.7%

      \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{-2}{t + 1}\right) \cdot \color{blue}{\left(2 \cdot t\right)}} \]
    12. Taylor expanded in t around 0 99.7%

      \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(t \cdot \left(2 + -2 \cdot t\right)\right)} \cdot \left(2 \cdot t\right)} \]
    13. Step-by-step derivation
      1. *-commutative99.7%

        \[\leadsto 1 - \frac{1}{2 + \left(t \cdot \left(2 + \color{blue}{t \cdot -2}\right)\right) \cdot \left(2 \cdot t\right)} \]
    14. Simplified99.7%

      \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(t \cdot \left(2 + t \cdot -2\right)\right)} \cdot \left(2 \cdot t\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq -0.72 \lor \neg \left(t \leq 0.78\right):\\ \;\;\;\;1 - \left(0.16666666666666666 - \frac{\frac{0.037037037037037035}{t} + -0.2222222222222222}{t}\right)\\ \mathbf{else}:\\ \;\;\;\;1 + \frac{-1}{2 + \left(t \cdot \left(2 + -2 \cdot t\right)\right) \cdot \left(2 \cdot t\right)}\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 99.4% accurate, 1.1× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq -0.72:\\ \;\;\;\;1 - \left(0.16666666666666666 - \frac{\frac{0.037037037037037035}{t} + -0.2222222222222222}{t}\right)\\ \mathbf{elif}\;t \leq 1.6:\\ \;\;\;\;1 + \frac{-1}{2 + \left(t \cdot \left(2 + -2 \cdot t\right)\right) \cdot \left(2 \cdot t\right)}\\ \mathbf{else}:\\ \;\;\;\;1 - \frac{1}{6 - \frac{8 - \frac{12 + \frac{-16}{t}}{t}}{t}}\\ \end{array} \end{array} \]
(FPCore (t)
 :precision binary64
 (if (<= t -0.72)
   (-
    1.0
    (-
     0.16666666666666666
     (/ (+ (/ 0.037037037037037035 t) -0.2222222222222222) t)))
   (if (<= t 1.6)
     (+ 1.0 (/ -1.0 (+ 2.0 (* (* t (+ 2.0 (* -2.0 t))) (* 2.0 t)))))
     (- 1.0 (/ 1.0 (- 6.0 (/ (- 8.0 (/ (+ 12.0 (/ -16.0 t)) t)) t)))))))
double code(double t) {
	double tmp;
	if (t <= -0.72) {
		tmp = 1.0 - (0.16666666666666666 - (((0.037037037037037035 / t) + -0.2222222222222222) / t));
	} else if (t <= 1.6) {
		tmp = 1.0 + (-1.0 / (2.0 + ((t * (2.0 + (-2.0 * t))) * (2.0 * t))));
	} else {
		tmp = 1.0 - (1.0 / (6.0 - ((8.0 - ((12.0 + (-16.0 / t)) / t)) / t)));
	}
	return tmp;
}
real(8) function code(t)
    real(8), intent (in) :: t
    real(8) :: tmp
    if (t <= (-0.72d0)) then
        tmp = 1.0d0 - (0.16666666666666666d0 - (((0.037037037037037035d0 / t) + (-0.2222222222222222d0)) / t))
    else if (t <= 1.6d0) then
        tmp = 1.0d0 + ((-1.0d0) / (2.0d0 + ((t * (2.0d0 + ((-2.0d0) * t))) * (2.0d0 * t))))
    else
        tmp = 1.0d0 - (1.0d0 / (6.0d0 - ((8.0d0 - ((12.0d0 + ((-16.0d0) / t)) / t)) / t)))
    end if
    code = tmp
end function
public static double code(double t) {
	double tmp;
	if (t <= -0.72) {
		tmp = 1.0 - (0.16666666666666666 - (((0.037037037037037035 / t) + -0.2222222222222222) / t));
	} else if (t <= 1.6) {
		tmp = 1.0 + (-1.0 / (2.0 + ((t * (2.0 + (-2.0 * t))) * (2.0 * t))));
	} else {
		tmp = 1.0 - (1.0 / (6.0 - ((8.0 - ((12.0 + (-16.0 / t)) / t)) / t)));
	}
	return tmp;
}
def code(t):
	tmp = 0
	if t <= -0.72:
		tmp = 1.0 - (0.16666666666666666 - (((0.037037037037037035 / t) + -0.2222222222222222) / t))
	elif t <= 1.6:
		tmp = 1.0 + (-1.0 / (2.0 + ((t * (2.0 + (-2.0 * t))) * (2.0 * t))))
	else:
		tmp = 1.0 - (1.0 / (6.0 - ((8.0 - ((12.0 + (-16.0 / t)) / t)) / t)))
	return tmp
function code(t)
	tmp = 0.0
	if (t <= -0.72)
		tmp = Float64(1.0 - Float64(0.16666666666666666 - Float64(Float64(Float64(0.037037037037037035 / t) + -0.2222222222222222) / t)));
	elseif (t <= 1.6)
		tmp = Float64(1.0 + Float64(-1.0 / Float64(2.0 + Float64(Float64(t * Float64(2.0 + Float64(-2.0 * t))) * Float64(2.0 * t)))));
	else
		tmp = Float64(1.0 - Float64(1.0 / Float64(6.0 - Float64(Float64(8.0 - Float64(Float64(12.0 + Float64(-16.0 / t)) / t)) / t))));
	end
	return tmp
end
function tmp_2 = code(t)
	tmp = 0.0;
	if (t <= -0.72)
		tmp = 1.0 - (0.16666666666666666 - (((0.037037037037037035 / t) + -0.2222222222222222) / t));
	elseif (t <= 1.6)
		tmp = 1.0 + (-1.0 / (2.0 + ((t * (2.0 + (-2.0 * t))) * (2.0 * t))));
	else
		tmp = 1.0 - (1.0 / (6.0 - ((8.0 - ((12.0 + (-16.0 / t)) / t)) / t)));
	end
	tmp_2 = tmp;
end
code[t_] := If[LessEqual[t, -0.72], N[(1.0 - N[(0.16666666666666666 - N[(N[(N[(0.037037037037037035 / t), $MachinePrecision] + -0.2222222222222222), $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[t, 1.6], N[(1.0 + N[(-1.0 / N[(2.0 + N[(N[(t * N[(2.0 + N[(-2.0 * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(2.0 * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(1.0 - N[(1.0 / N[(6.0 - N[(N[(8.0 - N[(N[(12.0 + N[(-16.0 / t), $MachinePrecision]), $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq -0.72:\\
\;\;\;\;1 - \left(0.16666666666666666 - \frac{\frac{0.037037037037037035}{t} + -0.2222222222222222}{t}\right)\\

\mathbf{elif}\;t \leq 1.6:\\
\;\;\;\;1 + \frac{-1}{2 + \left(t \cdot \left(2 + -2 \cdot t\right)\right) \cdot \left(2 \cdot t\right)}\\

\mathbf{else}:\\
\;\;\;\;1 - \frac{1}{6 - \frac{8 - \frac{12 + \frac{-16}{t}}{t}}{t}}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if t < -0.71999999999999997

    1. Initial program 100.0%

      \[1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around -inf 100.0%

      \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 + -1 \cdot \frac{0.037037037037037035 \cdot \frac{1}{t} - 0.2222222222222222}{t}\right)} \]
    4. Step-by-step derivation
      1. mul-1-neg100.0%

        \[\leadsto 1 - \left(0.16666666666666666 + \color{blue}{\left(-\frac{0.037037037037037035 \cdot \frac{1}{t} - 0.2222222222222222}{t}\right)}\right) \]
      2. unsub-neg100.0%

        \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 - \frac{0.037037037037037035 \cdot \frac{1}{t} - 0.2222222222222222}{t}\right)} \]
      3. sub-neg100.0%

        \[\leadsto 1 - \left(0.16666666666666666 - \frac{\color{blue}{0.037037037037037035 \cdot \frac{1}{t} + \left(-0.2222222222222222\right)}}{t}\right) \]
      4. associate-*r/100.0%

        \[\leadsto 1 - \left(0.16666666666666666 - \frac{\color{blue}{\frac{0.037037037037037035 \cdot 1}{t}} + \left(-0.2222222222222222\right)}{t}\right) \]
      5. metadata-eval100.0%

        \[\leadsto 1 - \left(0.16666666666666666 - \frac{\frac{\color{blue}{0.037037037037037035}}{t} + \left(-0.2222222222222222\right)}{t}\right) \]
      6. metadata-eval100.0%

        \[\leadsto 1 - \left(0.16666666666666666 - \frac{\frac{0.037037037037037035}{t} + \color{blue}{-0.2222222222222222}}{t}\right) \]
    5. Simplified100.0%

      \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 - \frac{\frac{0.037037037037037035}{t} + -0.2222222222222222}{t}\right)} \]

    if -0.71999999999999997 < t < 1.6000000000000001

    1. Initial program 100.0%

      \[1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. div-inv100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\color{blue}{2 \cdot \frac{1}{t}}}{1 + \frac{1}{t}}\right)} \]
      2. associate-/l*100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{2 \cdot \frac{\frac{1}{t}}{1 + \frac{1}{t}}}\right)} \]
    4. Applied egg-rr100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{2 \cdot \frac{\frac{1}{t}}{1 + \frac{1}{t}}}\right)} \]
    5. Step-by-step derivation
      1. associate-/r*100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - 2 \cdot \color{blue}{\frac{1}{t \cdot \left(1 + \frac{1}{t}\right)}}\right)} \]
      2. associate-*r/100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{2 \cdot 1}{t \cdot \left(1 + \frac{1}{t}\right)}}\right)} \]
      3. metadata-eval100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\color{blue}{2}}{t \cdot \left(1 + \frac{1}{t}\right)}\right)} \]
      4. distribute-lft-in100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t \cdot 1 + t \cdot \frac{1}{t}}}\right)} \]
      5. *-rgt-identity100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t} + t \cdot \frac{1}{t}}\right)} \]
      6. rgt-mult-inverse100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + \color{blue}{1}}\right)} \]
    6. Simplified100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{2}{t + 1}}\right)} \]
    7. Step-by-step derivation
      1. sub-neg100.0%

        \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(2 + \left(-\frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)\right)} \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      2. distribute-neg-frac100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \color{blue}{\frac{-\frac{2}{t}}{1 + \frac{1}{t}}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      3. distribute-neg-frac100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{\color{blue}{\frac{-2}{t}}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      4. metadata-eval100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{\frac{\color{blue}{-2}}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    8. Applied egg-rr100.0%

      \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(2 + \frac{\frac{-2}{t}}{1 + \frac{1}{t}}\right)} \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    9. Step-by-step derivation
      1. associate-/r*100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \color{blue}{\frac{-2}{t \cdot \left(1 + \frac{1}{t}\right)}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      2. distribute-lft-in100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{-2}{\color{blue}{t \cdot 1 + t \cdot \frac{1}{t}}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      3. *-rgt-identity100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{-2}{\color{blue}{t} + t \cdot \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      4. rgt-mult-inverse100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{-2}{t + \color{blue}{1}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    10. Simplified100.0%

      \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(2 + \frac{-2}{t + 1}\right)} \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    11. Taylor expanded in t around 0 99.7%

      \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{-2}{t + 1}\right) \cdot \color{blue}{\left(2 \cdot t\right)}} \]
    12. Taylor expanded in t around 0 99.7%

      \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(t \cdot \left(2 + -2 \cdot t\right)\right)} \cdot \left(2 \cdot t\right)} \]
    13. Step-by-step derivation
      1. *-commutative99.7%

        \[\leadsto 1 - \frac{1}{2 + \left(t \cdot \left(2 + \color{blue}{t \cdot -2}\right)\right) \cdot \left(2 \cdot t\right)} \]
    14. Simplified99.7%

      \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(t \cdot \left(2 + t \cdot -2\right)\right)} \cdot \left(2 \cdot t\right)} \]

    if 1.6000000000000001 < t

    1. Initial program 100.0%

      \[1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around -inf 99.2%

      \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(4 + -1 \cdot \frac{8 + -1 \cdot \frac{12 - 16 \cdot \frac{1}{t}}{t}}{t}\right)}} \]
    4. Step-by-step derivation
      1. mul-1-neg99.2%

        \[\leadsto 1 - \frac{1}{2 + \left(4 + \color{blue}{\left(-\frac{8 + -1 \cdot \frac{12 - 16 \cdot \frac{1}{t}}{t}}{t}\right)}\right)} \]
      2. unsub-neg99.2%

        \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(4 - \frac{8 + -1 \cdot \frac{12 - 16 \cdot \frac{1}{t}}{t}}{t}\right)}} \]
      3. mul-1-neg99.2%

        \[\leadsto 1 - \frac{1}{2 + \left(4 - \frac{8 + \color{blue}{\left(-\frac{12 - 16 \cdot \frac{1}{t}}{t}\right)}}{t}\right)} \]
      4. unsub-neg99.2%

        \[\leadsto 1 - \frac{1}{2 + \left(4 - \frac{\color{blue}{8 - \frac{12 - 16 \cdot \frac{1}{t}}{t}}}{t}\right)} \]
      5. sub-neg99.2%

        \[\leadsto 1 - \frac{1}{2 + \left(4 - \frac{8 - \frac{\color{blue}{12 + \left(-16 \cdot \frac{1}{t}\right)}}{t}}{t}\right)} \]
      6. associate-*r/99.2%

        \[\leadsto 1 - \frac{1}{2 + \left(4 - \frac{8 - \frac{12 + \left(-\color{blue}{\frac{16 \cdot 1}{t}}\right)}{t}}{t}\right)} \]
      7. metadata-eval99.2%

        \[\leadsto 1 - \frac{1}{2 + \left(4 - \frac{8 - \frac{12 + \left(-\frac{\color{blue}{16}}{t}\right)}{t}}{t}\right)} \]
      8. distribute-neg-frac99.2%

        \[\leadsto 1 - \frac{1}{2 + \left(4 - \frac{8 - \frac{12 + \color{blue}{\frac{-16}{t}}}{t}}{t}\right)} \]
      9. metadata-eval99.2%

        \[\leadsto 1 - \frac{1}{2 + \left(4 - \frac{8 - \frac{12 + \frac{\color{blue}{-16}}{t}}{t}}{t}\right)} \]
    5. Simplified99.2%

      \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(4 - \frac{8 - \frac{12 + \frac{-16}{t}}{t}}{t}\right)}} \]
    6. Step-by-step derivation
      1. *-un-lft-identity99.2%

        \[\leadsto 1 - \color{blue}{1 \cdot \frac{1}{2 + \left(4 - \frac{8 - \frac{12 + \frac{-16}{t}}{t}}{t}\right)}} \]
      2. associate-+r-99.2%

        \[\leadsto 1 - 1 \cdot \frac{1}{\color{blue}{\left(2 + 4\right) - \frac{8 - \frac{12 + \frac{-16}{t}}{t}}{t}}} \]
      3. metadata-eval99.2%

        \[\leadsto 1 - 1 \cdot \frac{1}{\color{blue}{6} - \frac{8 - \frac{12 + \frac{-16}{t}}{t}}{t}} \]
    7. Applied egg-rr99.2%

      \[\leadsto 1 - \color{blue}{1 \cdot \frac{1}{6 - \frac{8 - \frac{12 + \frac{-16}{t}}{t}}{t}}} \]
    8. Step-by-step derivation
      1. *-lft-identity99.2%

        \[\leadsto 1 - \color{blue}{\frac{1}{6 - \frac{8 - \frac{12 + \frac{-16}{t}}{t}}{t}}} \]
    9. Simplified99.2%

      \[\leadsto 1 - \color{blue}{\frac{1}{6 - \frac{8 - \frac{12 + \frac{-16}{t}}{t}}{t}}} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification99.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq -0.72:\\ \;\;\;\;1 - \left(0.16666666666666666 - \frac{\frac{0.037037037037037035}{t} + -0.2222222222222222}{t}\right)\\ \mathbf{elif}\;t \leq 1.6:\\ \;\;\;\;1 + \frac{-1}{2 + \left(t \cdot \left(2 + -2 \cdot t\right)\right) \cdot \left(2 \cdot t\right)}\\ \mathbf{else}:\\ \;\;\;\;1 - \frac{1}{6 - \frac{8 - \frac{12 + \frac{-16}{t}}{t}}{t}}\\ \end{array} \]
  5. Add Preprocessing

Alternative 4: 99.4% accurate, 1.2× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq -1.2 \lor \neg \left(t \leq 0.6\right):\\ \;\;\;\;1 + \left(\frac{\frac{0.037037037037037035}{t} + -0.2222222222222222}{t} - 0.16666666666666666\right)\\ \mathbf{else}:\\ \;\;\;\;1 + \frac{-1}{2 + t \cdot \left(4 + \frac{-4}{1 + t}\right)}\\ \end{array} \end{array} \]
(FPCore (t)
 :precision binary64
 (if (or (<= t -1.2) (not (<= t 0.6)))
   (+
    1.0
    (-
     (/ (+ (/ 0.037037037037037035 t) -0.2222222222222222) t)
     0.16666666666666666))
   (+ 1.0 (/ -1.0 (+ 2.0 (* t (+ 4.0 (/ -4.0 (+ 1.0 t)))))))))
double code(double t) {
	double tmp;
	if ((t <= -1.2) || !(t <= 0.6)) {
		tmp = 1.0 + ((((0.037037037037037035 / t) + -0.2222222222222222) / t) - 0.16666666666666666);
	} else {
		tmp = 1.0 + (-1.0 / (2.0 + (t * (4.0 + (-4.0 / (1.0 + t))))));
	}
	return tmp;
}
real(8) function code(t)
    real(8), intent (in) :: t
    real(8) :: tmp
    if ((t <= (-1.2d0)) .or. (.not. (t <= 0.6d0))) then
        tmp = 1.0d0 + ((((0.037037037037037035d0 / t) + (-0.2222222222222222d0)) / t) - 0.16666666666666666d0)
    else
        tmp = 1.0d0 + ((-1.0d0) / (2.0d0 + (t * (4.0d0 + ((-4.0d0) / (1.0d0 + t))))))
    end if
    code = tmp
end function
public static double code(double t) {
	double tmp;
	if ((t <= -1.2) || !(t <= 0.6)) {
		tmp = 1.0 + ((((0.037037037037037035 / t) + -0.2222222222222222) / t) - 0.16666666666666666);
	} else {
		tmp = 1.0 + (-1.0 / (2.0 + (t * (4.0 + (-4.0 / (1.0 + t))))));
	}
	return tmp;
}
def code(t):
	tmp = 0
	if (t <= -1.2) or not (t <= 0.6):
		tmp = 1.0 + ((((0.037037037037037035 / t) + -0.2222222222222222) / t) - 0.16666666666666666)
	else:
		tmp = 1.0 + (-1.0 / (2.0 + (t * (4.0 + (-4.0 / (1.0 + t))))))
	return tmp
function code(t)
	tmp = 0.0
	if ((t <= -1.2) || !(t <= 0.6))
		tmp = Float64(1.0 + Float64(Float64(Float64(Float64(0.037037037037037035 / t) + -0.2222222222222222) / t) - 0.16666666666666666));
	else
		tmp = Float64(1.0 + Float64(-1.0 / Float64(2.0 + Float64(t * Float64(4.0 + Float64(-4.0 / Float64(1.0 + t)))))));
	end
	return tmp
end
function tmp_2 = code(t)
	tmp = 0.0;
	if ((t <= -1.2) || ~((t <= 0.6)))
		tmp = 1.0 + ((((0.037037037037037035 / t) + -0.2222222222222222) / t) - 0.16666666666666666);
	else
		tmp = 1.0 + (-1.0 / (2.0 + (t * (4.0 + (-4.0 / (1.0 + t))))));
	end
	tmp_2 = tmp;
end
code[t_] := If[Or[LessEqual[t, -1.2], N[Not[LessEqual[t, 0.6]], $MachinePrecision]], N[(1.0 + N[(N[(N[(N[(0.037037037037037035 / t), $MachinePrecision] + -0.2222222222222222), $MachinePrecision] / t), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision], N[(1.0 + N[(-1.0 / N[(2.0 + N[(t * N[(4.0 + N[(-4.0 / N[(1.0 + t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq -1.2 \lor \neg \left(t \leq 0.6\right):\\
\;\;\;\;1 + \left(\frac{\frac{0.037037037037037035}{t} + -0.2222222222222222}{t} - 0.16666666666666666\right)\\

\mathbf{else}:\\
\;\;\;\;1 + \frac{-1}{2 + t \cdot \left(4 + \frac{-4}{1 + t}\right)}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < -1.19999999999999996 or 0.599999999999999978 < t

    1. Initial program 100.0%

      \[1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around -inf 99.5%

      \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 + -1 \cdot \frac{0.037037037037037035 \cdot \frac{1}{t} - 0.2222222222222222}{t}\right)} \]
    4. Step-by-step derivation
      1. mul-1-neg99.5%

        \[\leadsto 1 - \left(0.16666666666666666 + \color{blue}{\left(-\frac{0.037037037037037035 \cdot \frac{1}{t} - 0.2222222222222222}{t}\right)}\right) \]
      2. unsub-neg99.5%

        \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 - \frac{0.037037037037037035 \cdot \frac{1}{t} - 0.2222222222222222}{t}\right)} \]
      3. sub-neg99.5%

        \[\leadsto 1 - \left(0.16666666666666666 - \frac{\color{blue}{0.037037037037037035 \cdot \frac{1}{t} + \left(-0.2222222222222222\right)}}{t}\right) \]
      4. associate-*r/99.5%

        \[\leadsto 1 - \left(0.16666666666666666 - \frac{\color{blue}{\frac{0.037037037037037035 \cdot 1}{t}} + \left(-0.2222222222222222\right)}{t}\right) \]
      5. metadata-eval99.5%

        \[\leadsto 1 - \left(0.16666666666666666 - \frac{\frac{\color{blue}{0.037037037037037035}}{t} + \left(-0.2222222222222222\right)}{t}\right) \]
      6. metadata-eval99.5%

        \[\leadsto 1 - \left(0.16666666666666666 - \frac{\frac{0.037037037037037035}{t} + \color{blue}{-0.2222222222222222}}{t}\right) \]
    5. Simplified99.5%

      \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 - \frac{\frac{0.037037037037037035}{t} + -0.2222222222222222}{t}\right)} \]

    if -1.19999999999999996 < t < 0.599999999999999978

    1. Initial program 100.0%

      \[1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. div-inv100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\color{blue}{2 \cdot \frac{1}{t}}}{1 + \frac{1}{t}}\right)} \]
      2. associate-/l*100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{2 \cdot \frac{\frac{1}{t}}{1 + \frac{1}{t}}}\right)} \]
    4. Applied egg-rr100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{2 \cdot \frac{\frac{1}{t}}{1 + \frac{1}{t}}}\right)} \]
    5. Step-by-step derivation
      1. associate-/r*100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - 2 \cdot \color{blue}{\frac{1}{t \cdot \left(1 + \frac{1}{t}\right)}}\right)} \]
      2. associate-*r/100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{2 \cdot 1}{t \cdot \left(1 + \frac{1}{t}\right)}}\right)} \]
      3. metadata-eval100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\color{blue}{2}}{t \cdot \left(1 + \frac{1}{t}\right)}\right)} \]
      4. distribute-lft-in100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t \cdot 1 + t \cdot \frac{1}{t}}}\right)} \]
      5. *-rgt-identity100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t} + t \cdot \frac{1}{t}}\right)} \]
      6. rgt-mult-inverse100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + \color{blue}{1}}\right)} \]
    6. Simplified100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{2}{t + 1}}\right)} \]
    7. Step-by-step derivation
      1. sub-neg100.0%

        \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(2 + \left(-\frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)\right)} \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      2. distribute-neg-frac100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \color{blue}{\frac{-\frac{2}{t}}{1 + \frac{1}{t}}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      3. distribute-neg-frac100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{\color{blue}{\frac{-2}{t}}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      4. metadata-eval100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{\frac{\color{blue}{-2}}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    8. Applied egg-rr100.0%

      \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(2 + \frac{\frac{-2}{t}}{1 + \frac{1}{t}}\right)} \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    9. Step-by-step derivation
      1. associate-/r*100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \color{blue}{\frac{-2}{t \cdot \left(1 + \frac{1}{t}\right)}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      2. distribute-lft-in100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{-2}{\color{blue}{t \cdot 1 + t \cdot \frac{1}{t}}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      3. *-rgt-identity100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{-2}{\color{blue}{t} + t \cdot \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      4. rgt-mult-inverse100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{-2}{t + \color{blue}{1}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    10. Simplified100.0%

      \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(2 + \frac{-2}{t + 1}\right)} \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    11. Taylor expanded in t around 0 99.7%

      \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{-2}{t + 1}\right) \cdot \color{blue}{\left(2 \cdot t\right)}} \]
    12. Step-by-step derivation
      1. *-commutative99.7%

        \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(2 \cdot t\right) \cdot \left(2 + \frac{-2}{t + 1}\right)}} \]
      2. +-commutative99.7%

        \[\leadsto 1 - \frac{1}{2 + \left(2 \cdot t\right) \cdot \color{blue}{\left(\frac{-2}{t + 1} + 2\right)}} \]
      3. distribute-lft-in99.7%

        \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(\left(2 \cdot t\right) \cdot \frac{-2}{t + 1} + \left(2 \cdot t\right) \cdot 2\right)}} \]
      4. *-commutative99.7%

        \[\leadsto 1 - \frac{1}{2 + \left(\color{blue}{\left(t \cdot 2\right)} \cdot \frac{-2}{t + 1} + \left(2 \cdot t\right) \cdot 2\right)} \]
      5. *-commutative99.7%

        \[\leadsto 1 - \frac{1}{2 + \left(\left(t \cdot 2\right) \cdot \frac{-2}{t + 1} + \color{blue}{\left(t \cdot 2\right)} \cdot 2\right)} \]
    13. Applied egg-rr99.7%

      \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(\left(t \cdot 2\right) \cdot \frac{-2}{t + 1} + \left(t \cdot 2\right) \cdot 2\right)}} \]
    14. Step-by-step derivation
      1. +-commutative99.7%

        \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(\left(t \cdot 2\right) \cdot 2 + \left(t \cdot 2\right) \cdot \frac{-2}{t + 1}\right)}} \]
      2. associate-*l*99.7%

        \[\leadsto 1 - \frac{1}{2 + \left(\color{blue}{t \cdot \left(2 \cdot 2\right)} + \left(t \cdot 2\right) \cdot \frac{-2}{t + 1}\right)} \]
      3. metadata-eval99.7%

        \[\leadsto 1 - \frac{1}{2 + \left(t \cdot \color{blue}{4} + \left(t \cdot 2\right) \cdot \frac{-2}{t + 1}\right)} \]
      4. associate-*l*99.7%

        \[\leadsto 1 - \frac{1}{2 + \left(t \cdot 4 + \color{blue}{t \cdot \left(2 \cdot \frac{-2}{t + 1}\right)}\right)} \]
      5. distribute-lft-out99.7%

        \[\leadsto 1 - \frac{1}{2 + \color{blue}{t \cdot \left(4 + 2 \cdot \frac{-2}{t + 1}\right)}} \]
      6. metadata-eval99.7%

        \[\leadsto 1 - \frac{1}{2 + t \cdot \left(4 + 2 \cdot \frac{\color{blue}{-2}}{t + 1}\right)} \]
      7. distribute-neg-frac99.7%

        \[\leadsto 1 - \frac{1}{2 + t \cdot \left(4 + 2 \cdot \color{blue}{\left(-\frac{2}{t + 1}\right)}\right)} \]
      8. distribute-neg-frac299.7%

        \[\leadsto 1 - \frac{1}{2 + t \cdot \left(4 + 2 \cdot \color{blue}{\frac{2}{-\left(t + 1\right)}}\right)} \]
      9. distribute-neg-in99.7%

        \[\leadsto 1 - \frac{1}{2 + t \cdot \left(4 + 2 \cdot \frac{2}{\color{blue}{\left(-t\right) + \left(-1\right)}}\right)} \]
      10. metadata-eval99.7%

        \[\leadsto 1 - \frac{1}{2 + t \cdot \left(4 + 2 \cdot \frac{2}{\left(-t\right) + \color{blue}{-1}}\right)} \]
      11. +-commutative99.7%

        \[\leadsto 1 - \frac{1}{2 + t \cdot \left(4 + 2 \cdot \frac{2}{\color{blue}{-1 + \left(-t\right)}}\right)} \]
      12. associate-*r/99.7%

        \[\leadsto 1 - \frac{1}{2 + t \cdot \left(4 + \color{blue}{\frac{2 \cdot 2}{-1 + \left(-t\right)}}\right)} \]
      13. metadata-eval99.7%

        \[\leadsto 1 - \frac{1}{2 + t \cdot \left(4 + \frac{\color{blue}{4}}{-1 + \left(-t\right)}\right)} \]
      14. metadata-eval99.7%

        \[\leadsto 1 - \frac{1}{2 + t \cdot \left(4 + \frac{4}{\color{blue}{\left(-1\right)} + \left(-t\right)}\right)} \]
      15. distribute-neg-in99.7%

        \[\leadsto 1 - \frac{1}{2 + t \cdot \left(4 + \frac{4}{\color{blue}{-\left(1 + t\right)}}\right)} \]
      16. rgt-mult-inverse99.7%

        \[\leadsto 1 - \frac{1}{2 + t \cdot \left(4 + \frac{4}{-\left(\color{blue}{t \cdot \frac{1}{t}} + t\right)}\right)} \]
      17. *-rgt-identity99.7%

        \[\leadsto 1 - \frac{1}{2 + t \cdot \left(4 + \frac{4}{-\left(t \cdot \frac{1}{t} + \color{blue}{t \cdot 1}\right)}\right)} \]
      18. distribute-lft-in99.7%

        \[\leadsto 1 - \frac{1}{2 + t \cdot \left(4 + \frac{4}{-\color{blue}{t \cdot \left(\frac{1}{t} + 1\right)}}\right)} \]
      19. +-commutative99.7%

        \[\leadsto 1 - \frac{1}{2 + t \cdot \left(4 + \frac{4}{-t \cdot \color{blue}{\left(1 + \frac{1}{t}\right)}}\right)} \]
      20. distribute-neg-frac299.7%

        \[\leadsto 1 - \frac{1}{2 + t \cdot \left(4 + \color{blue}{\left(-\frac{4}{t \cdot \left(1 + \frac{1}{t}\right)}\right)}\right)} \]
      21. distribute-neg-frac99.7%

        \[\leadsto 1 - \frac{1}{2 + t \cdot \left(4 + \color{blue}{\frac{-4}{t \cdot \left(1 + \frac{1}{t}\right)}}\right)} \]
      22. metadata-eval99.7%

        \[\leadsto 1 - \frac{1}{2 + t \cdot \left(4 + \frac{\color{blue}{-4}}{t \cdot \left(1 + \frac{1}{t}\right)}\right)} \]
    15. Simplified99.7%

      \[\leadsto 1 - \frac{1}{2 + \color{blue}{t \cdot \left(4 + \frac{-4}{t + 1}\right)}} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq -1.2 \lor \neg \left(t \leq 0.6\right):\\ \;\;\;\;1 + \left(\frac{\frac{0.037037037037037035}{t} + -0.2222222222222222}{t} - 0.16666666666666666\right)\\ \mathbf{else}:\\ \;\;\;\;1 + \frac{-1}{2 + t \cdot \left(4 + \frac{-4}{1 + t}\right)}\\ \end{array} \]
  5. Add Preprocessing

Alternative 5: 99.4% accurate, 1.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq -0.65 \lor \neg \left(t \leq 0.44\right):\\ \;\;\;\;1 - \left(0.16666666666666666 - \frac{\frac{0.037037037037037035}{t} + -0.2222222222222222}{t}\right)\\ \mathbf{else}:\\ \;\;\;\;1 + \frac{-1}{2 + \left(2 \cdot t\right) \cdot \left(2 \cdot t\right)}\\ \end{array} \end{array} \]
(FPCore (t)
 :precision binary64
 (if (or (<= t -0.65) (not (<= t 0.44)))
   (-
    1.0
    (-
     0.16666666666666666
     (/ (+ (/ 0.037037037037037035 t) -0.2222222222222222) t)))
   (+ 1.0 (/ -1.0 (+ 2.0 (* (* 2.0 t) (* 2.0 t)))))))
double code(double t) {
	double tmp;
	if ((t <= -0.65) || !(t <= 0.44)) {
		tmp = 1.0 - (0.16666666666666666 - (((0.037037037037037035 / t) + -0.2222222222222222) / t));
	} else {
		tmp = 1.0 + (-1.0 / (2.0 + ((2.0 * t) * (2.0 * t))));
	}
	return tmp;
}
real(8) function code(t)
    real(8), intent (in) :: t
    real(8) :: tmp
    if ((t <= (-0.65d0)) .or. (.not. (t <= 0.44d0))) then
        tmp = 1.0d0 - (0.16666666666666666d0 - (((0.037037037037037035d0 / t) + (-0.2222222222222222d0)) / t))
    else
        tmp = 1.0d0 + ((-1.0d0) / (2.0d0 + ((2.0d0 * t) * (2.0d0 * t))))
    end if
    code = tmp
end function
public static double code(double t) {
	double tmp;
	if ((t <= -0.65) || !(t <= 0.44)) {
		tmp = 1.0 - (0.16666666666666666 - (((0.037037037037037035 / t) + -0.2222222222222222) / t));
	} else {
		tmp = 1.0 + (-1.0 / (2.0 + ((2.0 * t) * (2.0 * t))));
	}
	return tmp;
}
def code(t):
	tmp = 0
	if (t <= -0.65) or not (t <= 0.44):
		tmp = 1.0 - (0.16666666666666666 - (((0.037037037037037035 / t) + -0.2222222222222222) / t))
	else:
		tmp = 1.0 + (-1.0 / (2.0 + ((2.0 * t) * (2.0 * t))))
	return tmp
function code(t)
	tmp = 0.0
	if ((t <= -0.65) || !(t <= 0.44))
		tmp = Float64(1.0 - Float64(0.16666666666666666 - Float64(Float64(Float64(0.037037037037037035 / t) + -0.2222222222222222) / t)));
	else
		tmp = Float64(1.0 + Float64(-1.0 / Float64(2.0 + Float64(Float64(2.0 * t) * Float64(2.0 * t)))));
	end
	return tmp
end
function tmp_2 = code(t)
	tmp = 0.0;
	if ((t <= -0.65) || ~((t <= 0.44)))
		tmp = 1.0 - (0.16666666666666666 - (((0.037037037037037035 / t) + -0.2222222222222222) / t));
	else
		tmp = 1.0 + (-1.0 / (2.0 + ((2.0 * t) * (2.0 * t))));
	end
	tmp_2 = tmp;
end
code[t_] := If[Or[LessEqual[t, -0.65], N[Not[LessEqual[t, 0.44]], $MachinePrecision]], N[(1.0 - N[(0.16666666666666666 - N[(N[(N[(0.037037037037037035 / t), $MachinePrecision] + -0.2222222222222222), $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(1.0 + N[(-1.0 / N[(2.0 + N[(N[(2.0 * t), $MachinePrecision] * N[(2.0 * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq -0.65 \lor \neg \left(t \leq 0.44\right):\\
\;\;\;\;1 - \left(0.16666666666666666 - \frac{\frac{0.037037037037037035}{t} + -0.2222222222222222}{t}\right)\\

\mathbf{else}:\\
\;\;\;\;1 + \frac{-1}{2 + \left(2 \cdot t\right) \cdot \left(2 \cdot t\right)}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < -0.650000000000000022 or 0.440000000000000002 < t

    1. Initial program 100.0%

      \[1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around -inf 99.5%

      \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 + -1 \cdot \frac{0.037037037037037035 \cdot \frac{1}{t} - 0.2222222222222222}{t}\right)} \]
    4. Step-by-step derivation
      1. mul-1-neg99.5%

        \[\leadsto 1 - \left(0.16666666666666666 + \color{blue}{\left(-\frac{0.037037037037037035 \cdot \frac{1}{t} - 0.2222222222222222}{t}\right)}\right) \]
      2. unsub-neg99.5%

        \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 - \frac{0.037037037037037035 \cdot \frac{1}{t} - 0.2222222222222222}{t}\right)} \]
      3. sub-neg99.5%

        \[\leadsto 1 - \left(0.16666666666666666 - \frac{\color{blue}{0.037037037037037035 \cdot \frac{1}{t} + \left(-0.2222222222222222\right)}}{t}\right) \]
      4. associate-*r/99.5%

        \[\leadsto 1 - \left(0.16666666666666666 - \frac{\color{blue}{\frac{0.037037037037037035 \cdot 1}{t}} + \left(-0.2222222222222222\right)}{t}\right) \]
      5. metadata-eval99.5%

        \[\leadsto 1 - \left(0.16666666666666666 - \frac{\frac{\color{blue}{0.037037037037037035}}{t} + \left(-0.2222222222222222\right)}{t}\right) \]
      6. metadata-eval99.5%

        \[\leadsto 1 - \left(0.16666666666666666 - \frac{\frac{0.037037037037037035}{t} + \color{blue}{-0.2222222222222222}}{t}\right) \]
    5. Simplified99.5%

      \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 - \frac{\frac{0.037037037037037035}{t} + -0.2222222222222222}{t}\right)} \]

    if -0.650000000000000022 < t < 0.440000000000000002

    1. Initial program 100.0%

      \[1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. div-inv100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\color{blue}{2 \cdot \frac{1}{t}}}{1 + \frac{1}{t}}\right)} \]
      2. associate-/l*100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{2 \cdot \frac{\frac{1}{t}}{1 + \frac{1}{t}}}\right)} \]
    4. Applied egg-rr100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{2 \cdot \frac{\frac{1}{t}}{1 + \frac{1}{t}}}\right)} \]
    5. Step-by-step derivation
      1. associate-/r*100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - 2 \cdot \color{blue}{\frac{1}{t \cdot \left(1 + \frac{1}{t}\right)}}\right)} \]
      2. associate-*r/100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{2 \cdot 1}{t \cdot \left(1 + \frac{1}{t}\right)}}\right)} \]
      3. metadata-eval100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\color{blue}{2}}{t \cdot \left(1 + \frac{1}{t}\right)}\right)} \]
      4. distribute-lft-in100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t \cdot 1 + t \cdot \frac{1}{t}}}\right)} \]
      5. *-rgt-identity100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{\color{blue}{t} + t \cdot \frac{1}{t}}\right)} \]
      6. rgt-mult-inverse100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + \color{blue}{1}}\right)} \]
    6. Simplified100.0%

      \[\leadsto 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \color{blue}{\frac{2}{t + 1}}\right)} \]
    7. Step-by-step derivation
      1. sub-neg100.0%

        \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(2 + \left(-\frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)\right)} \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      2. distribute-neg-frac100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \color{blue}{\frac{-\frac{2}{t}}{1 + \frac{1}{t}}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      3. distribute-neg-frac100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{\color{blue}{\frac{-2}{t}}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      4. metadata-eval100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{\frac{\color{blue}{-2}}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    8. Applied egg-rr100.0%

      \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(2 + \frac{\frac{-2}{t}}{1 + \frac{1}{t}}\right)} \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    9. Step-by-step derivation
      1. associate-/r*100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \color{blue}{\frac{-2}{t \cdot \left(1 + \frac{1}{t}\right)}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      2. distribute-lft-in100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{-2}{\color{blue}{t \cdot 1 + t \cdot \frac{1}{t}}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      3. *-rgt-identity100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{-2}{\color{blue}{t} + t \cdot \frac{1}{t}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
      4. rgt-mult-inverse100.0%

        \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{-2}{t + \color{blue}{1}}\right) \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    10. Simplified100.0%

      \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(2 + \frac{-2}{t + 1}\right)} \cdot \left(2 - \frac{2}{t + 1}\right)} \]
    11. Taylor expanded in t around 0 99.7%

      \[\leadsto 1 - \frac{1}{2 + \left(2 + \frac{-2}{t + 1}\right) \cdot \color{blue}{\left(2 \cdot t\right)}} \]
    12. Taylor expanded in t around 0 99.7%

      \[\leadsto 1 - \frac{1}{2 + \color{blue}{\left(2 \cdot t\right)} \cdot \left(2 \cdot t\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq -0.65 \lor \neg \left(t \leq 0.44\right):\\ \;\;\;\;1 - \left(0.16666666666666666 - \frac{\frac{0.037037037037037035}{t} + -0.2222222222222222}{t}\right)\\ \mathbf{else}:\\ \;\;\;\;1 + \frac{-1}{2 + \left(2 \cdot t\right) \cdot \left(2 \cdot t\right)}\\ \end{array} \]
  5. Add Preprocessing

Alternative 6: 99.2% accurate, 1.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq -0.52 \lor \neg \left(t \leq 0.23\right):\\ \;\;\;\;1 + \left(\frac{\frac{0.037037037037037035}{t} + -0.2222222222222222}{t} - 0.16666666666666666\right)\\ \mathbf{else}:\\ \;\;\;\;0.5\\ \end{array} \end{array} \]
(FPCore (t)
 :precision binary64
 (if (or (<= t -0.52) (not (<= t 0.23)))
   (+
    1.0
    (-
     (/ (+ (/ 0.037037037037037035 t) -0.2222222222222222) t)
     0.16666666666666666))
   0.5))
double code(double t) {
	double tmp;
	if ((t <= -0.52) || !(t <= 0.23)) {
		tmp = 1.0 + ((((0.037037037037037035 / t) + -0.2222222222222222) / t) - 0.16666666666666666);
	} else {
		tmp = 0.5;
	}
	return tmp;
}
real(8) function code(t)
    real(8), intent (in) :: t
    real(8) :: tmp
    if ((t <= (-0.52d0)) .or. (.not. (t <= 0.23d0))) then
        tmp = 1.0d0 + ((((0.037037037037037035d0 / t) + (-0.2222222222222222d0)) / t) - 0.16666666666666666d0)
    else
        tmp = 0.5d0
    end if
    code = tmp
end function
public static double code(double t) {
	double tmp;
	if ((t <= -0.52) || !(t <= 0.23)) {
		tmp = 1.0 + ((((0.037037037037037035 / t) + -0.2222222222222222) / t) - 0.16666666666666666);
	} else {
		tmp = 0.5;
	}
	return tmp;
}
def code(t):
	tmp = 0
	if (t <= -0.52) or not (t <= 0.23):
		tmp = 1.0 + ((((0.037037037037037035 / t) + -0.2222222222222222) / t) - 0.16666666666666666)
	else:
		tmp = 0.5
	return tmp
function code(t)
	tmp = 0.0
	if ((t <= -0.52) || !(t <= 0.23))
		tmp = Float64(1.0 + Float64(Float64(Float64(Float64(0.037037037037037035 / t) + -0.2222222222222222) / t) - 0.16666666666666666));
	else
		tmp = 0.5;
	end
	return tmp
end
function tmp_2 = code(t)
	tmp = 0.0;
	if ((t <= -0.52) || ~((t <= 0.23)))
		tmp = 1.0 + ((((0.037037037037037035 / t) + -0.2222222222222222) / t) - 0.16666666666666666);
	else
		tmp = 0.5;
	end
	tmp_2 = tmp;
end
code[t_] := If[Or[LessEqual[t, -0.52], N[Not[LessEqual[t, 0.23]], $MachinePrecision]], N[(1.0 + N[(N[(N[(N[(0.037037037037037035 / t), $MachinePrecision] + -0.2222222222222222), $MachinePrecision] / t), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision], 0.5]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq -0.52 \lor \neg \left(t \leq 0.23\right):\\
\;\;\;\;1 + \left(\frac{\frac{0.037037037037037035}{t} + -0.2222222222222222}{t} - 0.16666666666666666\right)\\

\mathbf{else}:\\
\;\;\;\;0.5\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < -0.52000000000000002 or 0.23000000000000001 < t

    1. Initial program 100.0%

      \[1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around -inf 99.5%

      \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 + -1 \cdot \frac{0.037037037037037035 \cdot \frac{1}{t} - 0.2222222222222222}{t}\right)} \]
    4. Step-by-step derivation
      1. mul-1-neg99.5%

        \[\leadsto 1 - \left(0.16666666666666666 + \color{blue}{\left(-\frac{0.037037037037037035 \cdot \frac{1}{t} - 0.2222222222222222}{t}\right)}\right) \]
      2. unsub-neg99.5%

        \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 - \frac{0.037037037037037035 \cdot \frac{1}{t} - 0.2222222222222222}{t}\right)} \]
      3. sub-neg99.5%

        \[\leadsto 1 - \left(0.16666666666666666 - \frac{\color{blue}{0.037037037037037035 \cdot \frac{1}{t} + \left(-0.2222222222222222\right)}}{t}\right) \]
      4. associate-*r/99.5%

        \[\leadsto 1 - \left(0.16666666666666666 - \frac{\color{blue}{\frac{0.037037037037037035 \cdot 1}{t}} + \left(-0.2222222222222222\right)}{t}\right) \]
      5. metadata-eval99.5%

        \[\leadsto 1 - \left(0.16666666666666666 - \frac{\frac{\color{blue}{0.037037037037037035}}{t} + \left(-0.2222222222222222\right)}{t}\right) \]
      6. metadata-eval99.5%

        \[\leadsto 1 - \left(0.16666666666666666 - \frac{\frac{0.037037037037037035}{t} + \color{blue}{-0.2222222222222222}}{t}\right) \]
    5. Simplified99.5%

      \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 - \frac{\frac{0.037037037037037035}{t} + -0.2222222222222222}{t}\right)} \]

    if -0.52000000000000002 < t < 0.23000000000000001

    1. Initial program 100.0%

      \[1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around 0 99.4%

      \[\leadsto 1 - \color{blue}{0.5} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq -0.52 \lor \neg \left(t \leq 0.23\right):\\ \;\;\;\;1 + \left(\frac{\frac{0.037037037037037035}{t} + -0.2222222222222222}{t} - 0.16666666666666666\right)\\ \mathbf{else}:\\ \;\;\;\;0.5\\ \end{array} \]
  5. Add Preprocessing

Alternative 7: 99.1% accurate, 1.7× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq -0.48 \lor \neg \left(t \leq 0.66\right):\\ \;\;\;\;1 - \left(0.16666666666666666 + \frac{0.2222222222222222}{t}\right)\\ \mathbf{else}:\\ \;\;\;\;0.5\\ \end{array} \end{array} \]
(FPCore (t)
 :precision binary64
 (if (or (<= t -0.48) (not (<= t 0.66)))
   (- 1.0 (+ 0.16666666666666666 (/ 0.2222222222222222 t)))
   0.5))
double code(double t) {
	double tmp;
	if ((t <= -0.48) || !(t <= 0.66)) {
		tmp = 1.0 - (0.16666666666666666 + (0.2222222222222222 / t));
	} else {
		tmp = 0.5;
	}
	return tmp;
}
real(8) function code(t)
    real(8), intent (in) :: t
    real(8) :: tmp
    if ((t <= (-0.48d0)) .or. (.not. (t <= 0.66d0))) then
        tmp = 1.0d0 - (0.16666666666666666d0 + (0.2222222222222222d0 / t))
    else
        tmp = 0.5d0
    end if
    code = tmp
end function
public static double code(double t) {
	double tmp;
	if ((t <= -0.48) || !(t <= 0.66)) {
		tmp = 1.0 - (0.16666666666666666 + (0.2222222222222222 / t));
	} else {
		tmp = 0.5;
	}
	return tmp;
}
def code(t):
	tmp = 0
	if (t <= -0.48) or not (t <= 0.66):
		tmp = 1.0 - (0.16666666666666666 + (0.2222222222222222 / t))
	else:
		tmp = 0.5
	return tmp
function code(t)
	tmp = 0.0
	if ((t <= -0.48) || !(t <= 0.66))
		tmp = Float64(1.0 - Float64(0.16666666666666666 + Float64(0.2222222222222222 / t)));
	else
		tmp = 0.5;
	end
	return tmp
end
function tmp_2 = code(t)
	tmp = 0.0;
	if ((t <= -0.48) || ~((t <= 0.66)))
		tmp = 1.0 - (0.16666666666666666 + (0.2222222222222222 / t));
	else
		tmp = 0.5;
	end
	tmp_2 = tmp;
end
code[t_] := If[Or[LessEqual[t, -0.48], N[Not[LessEqual[t, 0.66]], $MachinePrecision]], N[(1.0 - N[(0.16666666666666666 + N[(0.2222222222222222 / t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.5]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq -0.48 \lor \neg \left(t \leq 0.66\right):\\
\;\;\;\;1 - \left(0.16666666666666666 + \frac{0.2222222222222222}{t}\right)\\

\mathbf{else}:\\
\;\;\;\;0.5\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < -0.47999999999999998 or 0.660000000000000031 < t

    1. Initial program 100.0%

      \[1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around inf 98.9%

      \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 + 0.2222222222222222 \cdot \frac{1}{t}\right)} \]
    4. Step-by-step derivation
      1. associate-*r/98.9%

        \[\leadsto 1 - \left(0.16666666666666666 + \color{blue}{\frac{0.2222222222222222 \cdot 1}{t}}\right) \]
      2. metadata-eval98.9%

        \[\leadsto 1 - \left(0.16666666666666666 + \frac{\color{blue}{0.2222222222222222}}{t}\right) \]
    5. Simplified98.9%

      \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 + \frac{0.2222222222222222}{t}\right)} \]

    if -0.47999999999999998 < t < 0.660000000000000031

    1. Initial program 100.0%

      \[1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around 0 99.4%

      \[\leadsto 1 - \color{blue}{0.5} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq -0.48 \lor \neg \left(t \leq 0.66\right):\\ \;\;\;\;1 - \left(0.16666666666666666 + \frac{0.2222222222222222}{t}\right)\\ \mathbf{else}:\\ \;\;\;\;0.5\\ \end{array} \]
  5. Add Preprocessing

Alternative 8: 99.1% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq -0.48 \lor \neg \left(t \leq 0.66\right):\\ \;\;\;\;0.8333333333333334 + \frac{-0.2222222222222222}{t}\\ \mathbf{else}:\\ \;\;\;\;0.5\\ \end{array} \end{array} \]
(FPCore (t)
 :precision binary64
 (if (or (<= t -0.48) (not (<= t 0.66)))
   (+ 0.8333333333333334 (/ -0.2222222222222222 t))
   0.5))
double code(double t) {
	double tmp;
	if ((t <= -0.48) || !(t <= 0.66)) {
		tmp = 0.8333333333333334 + (-0.2222222222222222 / t);
	} else {
		tmp = 0.5;
	}
	return tmp;
}
real(8) function code(t)
    real(8), intent (in) :: t
    real(8) :: tmp
    if ((t <= (-0.48d0)) .or. (.not. (t <= 0.66d0))) then
        tmp = 0.8333333333333334d0 + ((-0.2222222222222222d0) / t)
    else
        tmp = 0.5d0
    end if
    code = tmp
end function
public static double code(double t) {
	double tmp;
	if ((t <= -0.48) || !(t <= 0.66)) {
		tmp = 0.8333333333333334 + (-0.2222222222222222 / t);
	} else {
		tmp = 0.5;
	}
	return tmp;
}
def code(t):
	tmp = 0
	if (t <= -0.48) or not (t <= 0.66):
		tmp = 0.8333333333333334 + (-0.2222222222222222 / t)
	else:
		tmp = 0.5
	return tmp
function code(t)
	tmp = 0.0
	if ((t <= -0.48) || !(t <= 0.66))
		tmp = Float64(0.8333333333333334 + Float64(-0.2222222222222222 / t));
	else
		tmp = 0.5;
	end
	return tmp
end
function tmp_2 = code(t)
	tmp = 0.0;
	if ((t <= -0.48) || ~((t <= 0.66)))
		tmp = 0.8333333333333334 + (-0.2222222222222222 / t);
	else
		tmp = 0.5;
	end
	tmp_2 = tmp;
end
code[t_] := If[Or[LessEqual[t, -0.48], N[Not[LessEqual[t, 0.66]], $MachinePrecision]], N[(0.8333333333333334 + N[(-0.2222222222222222 / t), $MachinePrecision]), $MachinePrecision], 0.5]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq -0.48 \lor \neg \left(t \leq 0.66\right):\\
\;\;\;\;0.8333333333333334 + \frac{-0.2222222222222222}{t}\\

\mathbf{else}:\\
\;\;\;\;0.5\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < -0.47999999999999998 or 0.660000000000000031 < t

    1. Initial program 100.0%

      \[1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around inf 98.9%

      \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 + 0.2222222222222222 \cdot \frac{1}{t}\right)} \]
    4. Step-by-step derivation
      1. associate-*r/98.9%

        \[\leadsto 1 - \left(0.16666666666666666 + \color{blue}{\frac{0.2222222222222222 \cdot 1}{t}}\right) \]
      2. metadata-eval98.9%

        \[\leadsto 1 - \left(0.16666666666666666 + \frac{\color{blue}{0.2222222222222222}}{t}\right) \]
    5. Simplified98.9%

      \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 + \frac{0.2222222222222222}{t}\right)} \]
    6. Taylor expanded in t around 0 98.6%

      \[\leadsto \color{blue}{\frac{0.8333333333333334 \cdot t - 0.2222222222222222}{t}} \]
    7. Step-by-step derivation
      1. div-sub98.6%

        \[\leadsto \color{blue}{\frac{0.8333333333333334 \cdot t}{t} - \frac{0.2222222222222222}{t}} \]
      2. sub-neg98.6%

        \[\leadsto \color{blue}{\frac{0.8333333333333334 \cdot t}{t} + \left(-\frac{0.2222222222222222}{t}\right)} \]
      3. associate-/l*98.8%

        \[\leadsto \color{blue}{0.8333333333333334 \cdot \frac{t}{t}} + \left(-\frac{0.2222222222222222}{t}\right) \]
      4. *-inverses98.8%

        \[\leadsto 0.8333333333333334 \cdot \color{blue}{1} + \left(-\frac{0.2222222222222222}{t}\right) \]
      5. metadata-eval98.8%

        \[\leadsto \color{blue}{0.8333333333333334} + \left(-\frac{0.2222222222222222}{t}\right) \]
      6. distribute-neg-frac98.8%

        \[\leadsto 0.8333333333333334 + \color{blue}{\frac{-0.2222222222222222}{t}} \]
      7. metadata-eval98.8%

        \[\leadsto 0.8333333333333334 + \frac{\color{blue}{-0.2222222222222222}}{t} \]
    8. Simplified98.8%

      \[\leadsto \color{blue}{0.8333333333333334 + \frac{-0.2222222222222222}{t}} \]

    if -0.47999999999999998 < t < 0.660000000000000031

    1. Initial program 100.0%

      \[1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around 0 99.4%

      \[\leadsto 1 - \color{blue}{0.5} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq -0.48 \lor \neg \left(t \leq 0.66\right):\\ \;\;\;\;0.8333333333333334 + \frac{-0.2222222222222222}{t}\\ \mathbf{else}:\\ \;\;\;\;0.5\\ \end{array} \]
  5. Add Preprocessing

Alternative 9: 98.6% accurate, 2.6× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;t \leq -0.335:\\ \;\;\;\;0.8333333333333334\\ \mathbf{elif}\;t \leq 1:\\ \;\;\;\;0.5\\ \mathbf{else}:\\ \;\;\;\;0.8333333333333334\\ \end{array} \end{array} \]
(FPCore (t)
 :precision binary64
 (if (<= t -0.335) 0.8333333333333334 (if (<= t 1.0) 0.5 0.8333333333333334)))
double code(double t) {
	double tmp;
	if (t <= -0.335) {
		tmp = 0.8333333333333334;
	} else if (t <= 1.0) {
		tmp = 0.5;
	} else {
		tmp = 0.8333333333333334;
	}
	return tmp;
}
real(8) function code(t)
    real(8), intent (in) :: t
    real(8) :: tmp
    if (t <= (-0.335d0)) then
        tmp = 0.8333333333333334d0
    else if (t <= 1.0d0) then
        tmp = 0.5d0
    else
        tmp = 0.8333333333333334d0
    end if
    code = tmp
end function
public static double code(double t) {
	double tmp;
	if (t <= -0.335) {
		tmp = 0.8333333333333334;
	} else if (t <= 1.0) {
		tmp = 0.5;
	} else {
		tmp = 0.8333333333333334;
	}
	return tmp;
}
def code(t):
	tmp = 0
	if t <= -0.335:
		tmp = 0.8333333333333334
	elif t <= 1.0:
		tmp = 0.5
	else:
		tmp = 0.8333333333333334
	return tmp
function code(t)
	tmp = 0.0
	if (t <= -0.335)
		tmp = 0.8333333333333334;
	elseif (t <= 1.0)
		tmp = 0.5;
	else
		tmp = 0.8333333333333334;
	end
	return tmp
end
function tmp_2 = code(t)
	tmp = 0.0;
	if (t <= -0.335)
		tmp = 0.8333333333333334;
	elseif (t <= 1.0)
		tmp = 0.5;
	else
		tmp = 0.8333333333333334;
	end
	tmp_2 = tmp;
end
code[t_] := If[LessEqual[t, -0.335], 0.8333333333333334, If[LessEqual[t, 1.0], 0.5, 0.8333333333333334]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;t \leq -0.335:\\
\;\;\;\;0.8333333333333334\\

\mathbf{elif}\;t \leq 1:\\
\;\;\;\;0.5\\

\mathbf{else}:\\
\;\;\;\;0.8333333333333334\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if t < -0.33500000000000002 or 1 < t

    1. Initial program 100.0%

      \[1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around inf 98.9%

      \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 + 0.2222222222222222 \cdot \frac{1}{t}\right)} \]
    4. Step-by-step derivation
      1. associate-*r/98.9%

        \[\leadsto 1 - \left(0.16666666666666666 + \color{blue}{\frac{0.2222222222222222 \cdot 1}{t}}\right) \]
      2. metadata-eval98.9%

        \[\leadsto 1 - \left(0.16666666666666666 + \frac{\color{blue}{0.2222222222222222}}{t}\right) \]
    5. Simplified98.9%

      \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 + \frac{0.2222222222222222}{t}\right)} \]
    6. Taylor expanded in t around inf 97.3%

      \[\leadsto \color{blue}{0.8333333333333334} \]

    if -0.33500000000000002 < t < 1

    1. Initial program 100.0%

      \[1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
    2. Add Preprocessing
    3. Taylor expanded in t around 0 99.4%

      \[\leadsto 1 - \color{blue}{0.5} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification98.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;t \leq -0.335:\\ \;\;\;\;0.8333333333333334\\ \mathbf{elif}\;t \leq 1:\\ \;\;\;\;0.5\\ \mathbf{else}:\\ \;\;\;\;0.8333333333333334\\ \end{array} \]
  5. Add Preprocessing

Alternative 10: 58.9% accurate, 29.0× speedup?

\[\begin{array}{l} \\ 0.8333333333333334 \end{array} \]
(FPCore (t) :precision binary64 0.8333333333333334)
double code(double t) {
	return 0.8333333333333334;
}
real(8) function code(t)
    real(8), intent (in) :: t
    code = 0.8333333333333334d0
end function
public static double code(double t) {
	return 0.8333333333333334;
}
def code(t):
	return 0.8333333333333334
function code(t)
	return 0.8333333333333334
end
function tmp = code(t)
	tmp = 0.8333333333333334;
end
code[t_] := 0.8333333333333334
\begin{array}{l}

\\
0.8333333333333334
\end{array}
Derivation
  1. Initial program 100.0%

    \[1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
  2. Add Preprocessing
  3. Taylor expanded in t around inf 56.2%

    \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 + 0.2222222222222222 \cdot \frac{1}{t}\right)} \]
  4. Step-by-step derivation
    1. associate-*r/56.2%

      \[\leadsto 1 - \left(0.16666666666666666 + \color{blue}{\frac{0.2222222222222222 \cdot 1}{t}}\right) \]
    2. metadata-eval56.2%

      \[\leadsto 1 - \left(0.16666666666666666 + \frac{\color{blue}{0.2222222222222222}}{t}\right) \]
  5. Simplified56.2%

    \[\leadsto 1 - \color{blue}{\left(0.16666666666666666 + \frac{0.2222222222222222}{t}\right)} \]
  6. Taylor expanded in t around inf 62.7%

    \[\leadsto \color{blue}{0.8333333333333334} \]
  7. Final simplification62.7%

    \[\leadsto 0.8333333333333334 \]
  8. Add Preprocessing

Reproduce

?
herbie shell --seed 2024079 
(FPCore (t)
  :name "Kahan p13 Example 3"
  :precision binary64
  (- 1.0 (/ 1.0 (+ 2.0 (* (- 2.0 (/ (/ 2.0 t) (+ 1.0 (/ 1.0 t)))) (- 2.0 (/ (/ 2.0 t) (+ 1.0 (/ 1.0 t)))))))))