Math FPCore C Julia Wolfram TeX \[\frac{1 + \frac{2 \cdot t}{1 + t} \cdot \frac{2 \cdot t}{1 + t}}{2 + \frac{2 \cdot t}{1 + t} \cdot \frac{2 \cdot t}{1 + t}}
\]
↓
\[\begin{array}{l}
t_1 := \frac{4}{\frac{1}{t} + \left(t + 2\right)}\\
\frac{\mathsf{fma}\left(t, t_1, 1\right)}{\mathsf{fma}\left(t, t_1, 2\right)}
\end{array}
\]
(FPCore (t)
:precision binary64
(/
(+ 1.0 (* (/ (* 2.0 t) (+ 1.0 t)) (/ (* 2.0 t) (+ 1.0 t))))
(+ 2.0 (* (/ (* 2.0 t) (+ 1.0 t)) (/ (* 2.0 t) (+ 1.0 t)))))) ↓
(FPCore (t)
:precision binary64
(let* ((t_1 (/ 4.0 (+ (/ 1.0 t) (+ t 2.0)))))
(/ (fma t t_1 1.0) (fma t t_1 2.0)))) double code(double t) {
return (1.0 + (((2.0 * t) / (1.0 + t)) * ((2.0 * t) / (1.0 + t)))) / (2.0 + (((2.0 * t) / (1.0 + t)) * ((2.0 * t) / (1.0 + t))));
}
↓
double code(double t) {
double t_1 = 4.0 / ((1.0 / t) + (t + 2.0));
return fma(t, t_1, 1.0) / fma(t, t_1, 2.0);
}
function code(t)
return Float64(Float64(1.0 + Float64(Float64(Float64(2.0 * t) / Float64(1.0 + t)) * Float64(Float64(2.0 * t) / Float64(1.0 + t)))) / Float64(2.0 + Float64(Float64(Float64(2.0 * t) / Float64(1.0 + t)) * Float64(Float64(2.0 * t) / Float64(1.0 + t)))))
end
↓
function code(t)
t_1 = Float64(4.0 / Float64(Float64(1.0 / t) + Float64(t + 2.0)))
return Float64(fma(t, t_1, 1.0) / fma(t, t_1, 2.0))
end
code[t_] := N[(N[(1.0 + N[(N[(N[(2.0 * t), $MachinePrecision] / N[(1.0 + t), $MachinePrecision]), $MachinePrecision] * N[(N[(2.0 * t), $MachinePrecision] / N[(1.0 + t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(2.0 + N[(N[(N[(2.0 * t), $MachinePrecision] / N[(1.0 + t), $MachinePrecision]), $MachinePrecision] * N[(N[(2.0 * t), $MachinePrecision] / N[(1.0 + t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
↓
code[t_] := Block[{t$95$1 = N[(4.0 / N[(N[(1.0 / t), $MachinePrecision] + N[(t + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(t * t$95$1 + 1.0), $MachinePrecision] / N[(t * t$95$1 + 2.0), $MachinePrecision]), $MachinePrecision]]
\frac{1 + \frac{2 \cdot t}{1 + t} \cdot \frac{2 \cdot t}{1 + t}}{2 + \frac{2 \cdot t}{1 + t} \cdot \frac{2 \cdot t}{1 + t}}
↓
\begin{array}{l}
t_1 := \frac{4}{\frac{1}{t} + \left(t + 2\right)}\\
\frac{\mathsf{fma}\left(t, t_1, 1\right)}{\mathsf{fma}\left(t, t_1, 2\right)}
\end{array}
Alternatives Alternative 1 Accuracy 100.0% Cost 14272
\[\begin{array}{l}
t_1 := \frac{4}{\frac{1}{t} + \left(t + 2\right)}\\
\frac{\mathsf{fma}\left(t, t_1, 1\right)}{\mathsf{fma}\left(t, t_1, 2\right)}
\end{array}
\]
Alternative 2 Accuracy 99.9% Cost 1984
\[\begin{array}{l}
t_1 := t \cdot \frac{\frac{t \cdot 4}{t + 1}}{t + 1}\\
\frac{1 + t_1}{2 + t_1}
\end{array}
\]
Alternative 3 Accuracy 99.4% Cost 1225
\[\begin{array}{l}
t_1 := t \cdot \left(t \cdot 4\right)\\
\mathbf{if}\;t \leq -2.1 \lor \neg \left(t \leq 0.44\right):\\
\;\;\;\;0.8333333333333334 + \left(\frac{0.037037037037037035}{t \cdot t} + \frac{-0.2222222222222222}{t}\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1 + t_1}{2 + t_1}\\
\end{array}
\]
Alternative 4 Accuracy 99.4% Cost 969
\[\begin{array}{l}
\mathbf{if}\;t \leq -0.82 \lor \neg \left(t \leq 0.33\right):\\
\;\;\;\;0.8333333333333334 + \left(\frac{0.037037037037037035}{t \cdot t} + \frac{-0.2222222222222222}{t}\right)\\
\mathbf{else}:\\
\;\;\;\;t \cdot t + 0.5\\
\end{array}
\]
Alternative 5 Accuracy 99.3% Cost 585
\[\begin{array}{l}
\mathbf{if}\;t \leq -0.78 \lor \neg \left(t \leq 0.55\right):\\
\;\;\;\;0.8333333333333334 - \frac{0.2222222222222222}{t}\\
\mathbf{else}:\\
\;\;\;\;t \cdot t + 0.5\\
\end{array}
\]
Alternative 6 Accuracy 98.8% Cost 584
\[\begin{array}{l}
\mathbf{if}\;t \leq -0.9:\\
\;\;\;\;0.8333333333333334\\
\mathbf{elif}\;t \leq 0.58:\\
\;\;\;\;t \cdot t + 0.5\\
\mathbf{else}:\\
\;\;\;\;0.8333333333333334\\
\end{array}
\]
Alternative 7 Accuracy 98.7% Cost 328
\[\begin{array}{l}
\mathbf{if}\;t \leq -0.34:\\
\;\;\;\;0.8333333333333334\\
\mathbf{elif}\;t \leq 1:\\
\;\;\;\;0.5\\
\mathbf{else}:\\
\;\;\;\;0.8333333333333334\\
\end{array}
\]
Alternative 8 Accuracy 58.9% Cost 64
\[0.5
\]