
(FPCore (x) :precision binary64 (- (/ 1.0 (+ x 1.0)) (/ 1.0 (- x 1.0))))
double code(double x) {
return (1.0 / (x + 1.0)) - (1.0 / (x - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / (x + 1.0d0)) - (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
return (1.0 / (x + 1.0)) - (1.0 / (x - 1.0));
}
def code(x): return (1.0 / (x + 1.0)) - (1.0 / (x - 1.0))
function code(x) return Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(1.0 / Float64(x - 1.0))) end
function tmp = code(x) tmp = (1.0 / (x + 1.0)) - (1.0 / (x - 1.0)); end
code[x_] := N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{x + 1} - \frac{1}{x - 1}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (/ 1.0 (+ x 1.0)) (/ 1.0 (- x 1.0))))
double code(double x) {
return (1.0 / (x + 1.0)) - (1.0 / (x - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / (x + 1.0d0)) - (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
return (1.0 / (x + 1.0)) - (1.0 / (x - 1.0));
}
def code(x): return (1.0 / (x + 1.0)) - (1.0 / (x - 1.0))
function code(x) return Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(1.0 / Float64(x - 1.0))) end
function tmp = code(x) tmp = (1.0 / (x + 1.0)) - (1.0 / (x - 1.0)); end
code[x_] := N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{x + 1} - \frac{1}{x - 1}
\end{array}
(FPCore (x) :precision binary64 (/ (/ -2.0 (+ 1.0 x)) (+ x -1.0)))
double code(double x) {
return (-2.0 / (1.0 + x)) / (x + -1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((-2.0d0) / (1.0d0 + x)) / (x + (-1.0d0))
end function
public static double code(double x) {
return (-2.0 / (1.0 + x)) / (x + -1.0);
}
def code(x): return (-2.0 / (1.0 + x)) / (x + -1.0)
function code(x) return Float64(Float64(-2.0 / Float64(1.0 + x)) / Float64(x + -1.0)) end
function tmp = code(x) tmp = (-2.0 / (1.0 + x)) / (x + -1.0); end
code[x_] := N[(N[(-2.0 / N[(1.0 + x), $MachinePrecision]), $MachinePrecision] / N[(x + -1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{-2}{1 + x}}{x + -1}
\end{array}
Initial program 79.4%
frac-sub80.1%
associate-/r*80.1%
*-un-lft-identity80.1%
*-rgt-identity80.1%
associate--l-80.1%
+-commutative80.1%
+-commutative80.1%
sub-neg80.1%
metadata-eval80.1%
Applied egg-rr80.1%
Taylor expanded in x around 0 99.9%
Final simplification99.9%
(FPCore (x) :precision binary64 (if (<= x 1.0) (+ 2.0 (* x x)) (/ -2.0 (* x x))))
double code(double x) {
double tmp;
if (x <= 1.0) {
tmp = 2.0 + (x * x);
} else {
tmp = -2.0 / (x * x);
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (x <= 1.0d0) then
tmp = 2.0d0 + (x * x)
else
tmp = (-2.0d0) / (x * x)
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (x <= 1.0) {
tmp = 2.0 + (x * x);
} else {
tmp = -2.0 / (x * x);
}
return tmp;
}
def code(x): tmp = 0 if x <= 1.0: tmp = 2.0 + (x * x) else: tmp = -2.0 / (x * x) return tmp
function code(x) tmp = 0.0 if (x <= 1.0) tmp = Float64(2.0 + Float64(x * x)); else tmp = Float64(-2.0 / Float64(x * x)); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= 1.0) tmp = 2.0 + (x * x); else tmp = -2.0 / (x * x); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, 1.0], N[(2.0 + N[(x * x), $MachinePrecision]), $MachinePrecision], N[(-2.0 / N[(x * x), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 1:\\
\;\;\;\;2 + x \cdot x\\
\mathbf{else}:\\
\;\;\;\;\frac{-2}{x \cdot x}\\
\end{array}
\end{array}
if x < 1Initial program 86.3%
Taylor expanded in x around 0 66.2%
neg-mul-166.2%
unsub-neg66.2%
Simplified66.2%
Taylor expanded in x around 0 66.1%
unpow266.1%
Simplified66.1%
if 1 < x Initial program 54.2%
Taylor expanded in x around inf 95.8%
unpow295.8%
Simplified95.8%
Final simplification72.5%
(FPCore (x) :precision binary64 (/ 2.0 (- 1.0 (* x x))))
double code(double x) {
return 2.0 / (1.0 - (x * x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (1.0d0 - (x * x))
end function
public static double code(double x) {
return 2.0 / (1.0 - (x * x));
}
def code(x): return 2.0 / (1.0 - (x * x))
function code(x) return Float64(2.0 / Float64(1.0 - Float64(x * x))) end
function tmp = code(x) tmp = 2.0 / (1.0 - (x * x)); end
code[x_] := N[(2.0 / N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{1 - x \cdot x}
\end{array}
Initial program 79.4%
frac-sub80.1%
associate-/r*80.1%
*-un-lft-identity80.1%
*-rgt-identity80.1%
associate--l-80.1%
+-commutative80.1%
+-commutative80.1%
sub-neg80.1%
metadata-eval80.1%
Applied egg-rr80.1%
Taylor expanded in x around 0 99.9%
frac-2neg99.9%
div-inv99.8%
distribute-neg-frac99.8%
metadata-eval99.8%
+-commutative99.8%
distribute-neg-in99.8%
metadata-eval99.8%
sub-neg99.8%
Applied egg-rr99.8%
associate-*r/99.9%
*-rgt-identity99.9%
metadata-eval99.9%
distribute-neg-frac99.9%
distribute-neg-frac99.9%
associate-/l/99.3%
distribute-neg-frac99.3%
metadata-eval99.3%
*-commutative99.3%
+-commutative99.3%
Simplified99.3%
Taylor expanded in x around 0 99.3%
unpow299.3%
mul-1-neg99.3%
sub-neg99.3%
Simplified99.3%
Final simplification99.3%
(FPCore (x) :precision binary64 2.0)
double code(double x) {
return 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0
end function
public static double code(double x) {
return 2.0;
}
def code(x): return 2.0
function code(x) return 2.0 end
function tmp = code(x) tmp = 2.0; end
code[x_] := 2.0
\begin{array}{l}
\\
2
\end{array}
Initial program 79.4%
Taylor expanded in x around 0 52.8%
Final simplification52.8%
herbie shell --seed 2023200
(FPCore (x)
:name "Asymptote A"
:precision binary64
(- (/ 1.0 (+ x 1.0)) (/ 1.0 (- x 1.0))))