?

Average Accuracy: 100.0% → 100.0%
Time: 7.3s
Precision: binary64
Cost: 13760

?

\[1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
\[e^{\mathsf{log1p}\left(\frac{-1}{\frac{\frac{4}{1 + t} + -8}{1 + t} + 6}\right)} \]
(FPCore (t)
 :precision binary64
 (-
  1.0
  (/
   1.0
   (+
    2.0
    (*
     (- 2.0 (/ (/ 2.0 t) (+ 1.0 (/ 1.0 t))))
     (- 2.0 (/ (/ 2.0 t) (+ 1.0 (/ 1.0 t)))))))))
(FPCore (t)
 :precision binary64
 (exp (log1p (/ -1.0 (+ (/ (+ (/ 4.0 (+ 1.0 t)) -8.0) (+ 1.0 t)) 6.0)))))
double code(double t) {
	return 1.0 - (1.0 / (2.0 + ((2.0 - ((2.0 / t) / (1.0 + (1.0 / t)))) * (2.0 - ((2.0 / t) / (1.0 + (1.0 / t)))))));
}
double code(double t) {
	return exp(log1p((-1.0 / ((((4.0 / (1.0 + t)) + -8.0) / (1.0 + t)) + 6.0))));
}
public static double code(double t) {
	return 1.0 - (1.0 / (2.0 + ((2.0 - ((2.0 / t) / (1.0 + (1.0 / t)))) * (2.0 - ((2.0 / t) / (1.0 + (1.0 / t)))))));
}
public static double code(double t) {
	return Math.exp(Math.log1p((-1.0 / ((((4.0 / (1.0 + t)) + -8.0) / (1.0 + t)) + 6.0))));
}
def code(t):
	return 1.0 - (1.0 / (2.0 + ((2.0 - ((2.0 / t) / (1.0 + (1.0 / t)))) * (2.0 - ((2.0 / t) / (1.0 + (1.0 / t)))))))
def code(t):
	return math.exp(math.log1p((-1.0 / ((((4.0 / (1.0 + t)) + -8.0) / (1.0 + t)) + 6.0))))
function code(t)
	return Float64(1.0 - Float64(1.0 / Float64(2.0 + Float64(Float64(2.0 - Float64(Float64(2.0 / t) / Float64(1.0 + Float64(1.0 / t)))) * Float64(2.0 - Float64(Float64(2.0 / t) / Float64(1.0 + Float64(1.0 / t))))))))
end
function code(t)
	return exp(log1p(Float64(-1.0 / Float64(Float64(Float64(Float64(4.0 / Float64(1.0 + t)) + -8.0) / Float64(1.0 + t)) + 6.0))))
end
code[t_] := N[(1.0 - N[(1.0 / N[(2.0 + N[(N[(2.0 - N[(N[(2.0 / t), $MachinePrecision] / N[(1.0 + N[(1.0 / t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(2.0 - N[(N[(2.0 / t), $MachinePrecision] / N[(1.0 + N[(1.0 / t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
code[t_] := N[Exp[N[Log[1 + N[(-1.0 / N[(N[(N[(N[(4.0 / N[(1.0 + t), $MachinePrecision]), $MachinePrecision] + -8.0), $MachinePrecision] / N[(1.0 + t), $MachinePrecision]), $MachinePrecision] + 6.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]
1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}
e^{\mathsf{log1p}\left(\frac{-1}{\frac{\frac{4}{1 + t} + -8}{1 + t} + 6}\right)}

Error?

Try it out?

Your Program's Arguments

Results

Enter valid numbers for all inputs

Derivation?

  1. Initial program 100.0%

    \[1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]
  2. Simplified100.0%

    \[\leadsto \color{blue}{1 + \frac{-1}{\frac{\frac{4}{1 + t} + -8}{1 + t} + 6}} \]
    Proof

    [Start]100.0

    \[ 1 - \frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]

    sub-neg [=>]100.0

    \[ \color{blue}{1 + \left(-\frac{1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}\right)} \]

    distribute-neg-frac [=>]100.0

    \[ 1 + \color{blue}{\frac{-1}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)}} \]

    metadata-eval [=>]100.0

    \[ 1 + \frac{\color{blue}{-1}}{2 + \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right)} \]

    +-commutative [=>]100.0

    \[ 1 + \frac{-1}{\color{blue}{\left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) \cdot \left(2 - \frac{\frac{2}{t}}{1 + \frac{1}{t}}\right) + 2}} \]
  3. Applied egg-rr100.0%

    \[\leadsto \color{blue}{e^{\mathsf{log1p}\left(\frac{-1}{\frac{\frac{4}{1 + t} + -8}{1 + t} + 6}\right)}} \]
    Proof

    [Start]100.0

    \[ 1 + \frac{-1}{\frac{\frac{4}{1 + t} + -8}{1 + t} + 6} \]

    add-exp-log [=>]100.0

    \[ \color{blue}{e^{\log \left(1 + \frac{-1}{\frac{\frac{4}{1 + t} + -8}{1 + t} + 6}\right)}} \]

    log1p-def [=>]100.0

    \[ e^{\color{blue}{\mathsf{log1p}\left(\frac{-1}{\frac{\frac{4}{1 + t} + -8}{1 + t} + 6}\right)}} \]
  4. Final simplification100.0%

    \[\leadsto e^{\mathsf{log1p}\left(\frac{-1}{\frac{\frac{4}{1 + t} + -8}{1 + t} + 6}\right)} \]

Alternatives

Alternative 1
Accuracy100.0%
Cost1088
\[1 + \frac{-1}{\frac{\frac{4}{1 + t} + -8}{1 + t} + 6} \]
Alternative 2
Accuracy99.3%
Cost969
\[\begin{array}{l} \mathbf{if}\;t \leq -0.8 \lor \neg \left(t \leq 0.34\right):\\ \;\;\;\;\frac{0.037037037037037035}{t \cdot t} + \left(0.8333333333333334 - \frac{0.2222222222222222}{t}\right)\\ \mathbf{else}:\\ \;\;\;\;t \cdot t + 0.5\\ \end{array} \]
Alternative 3
Accuracy99.1%
Cost585
\[\begin{array}{l} \mathbf{if}\;t \leq -0.78 \lor \neg \left(t \leq 0.56\right):\\ \;\;\;\;0.8333333333333334 - \frac{0.2222222222222222}{t}\\ \mathbf{else}:\\ \;\;\;\;t \cdot t + 0.5\\ \end{array} \]
Alternative 4
Accuracy98.6%
Cost584
\[\begin{array}{l} \mathbf{if}\;t \leq -0.9:\\ \;\;\;\;0.8333333333333334\\ \mathbf{elif}\;t \leq 0.56:\\ \;\;\;\;t \cdot t + 0.5\\ \mathbf{else}:\\ \;\;\;\;0.8333333333333334\\ \end{array} \]
Alternative 5
Accuracy99.1%
Cost584
\[\begin{array}{l} \mathbf{if}\;t \leq -0.78:\\ \;\;\;\;1 + \left(-0.16666666666666666 + \frac{-0.2222222222222222}{t}\right)\\ \mathbf{elif}\;t \leq 0.56:\\ \;\;\;\;t \cdot t + 0.5\\ \mathbf{else}:\\ \;\;\;\;0.8333333333333334 - \frac{0.2222222222222222}{t}\\ \end{array} \]
Alternative 6
Accuracy98.4%
Cost328
\[\begin{array}{l} \mathbf{if}\;t \leq -0.34:\\ \;\;\;\;0.8333333333333334\\ \mathbf{elif}\;t \leq 1:\\ \;\;\;\;0.5\\ \mathbf{else}:\\ \;\;\;\;0.8333333333333334\\ \end{array} \]
Alternative 7
Accuracy58.7%
Cost64
\[0.5 \]

Error

Reproduce?

herbie shell --seed 2023140 
(FPCore (t)
  :name "Kahan p13 Example 3"
  :precision binary64
  (- 1.0 (/ 1.0 (+ 2.0 (* (- 2.0 (/ (/ 2.0 t) (+ 1.0 (/ 1.0 t)))) (- 2.0 (/ (/ 2.0 t) (+ 1.0 (/ 1.0 t)))))))))