
(FPCore (x) :precision binary64 (- (/ 1.0 (+ x 1.0)) (/ 1.0 (- x 1.0))))
double code(double x) {
return (1.0 / (x + 1.0)) - (1.0 / (x - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / (x + 1.0d0)) - (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
return (1.0 / (x + 1.0)) - (1.0 / (x - 1.0));
}
def code(x): return (1.0 / (x + 1.0)) - (1.0 / (x - 1.0))
function code(x) return Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(1.0 / Float64(x - 1.0))) end
function tmp = code(x) tmp = (1.0 / (x + 1.0)) - (1.0 / (x - 1.0)); end
code[x_] := N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{x + 1} - \frac{1}{x - 1}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (/ 1.0 (+ x 1.0)) (/ 1.0 (- x 1.0))))
double code(double x) {
return (1.0 / (x + 1.0)) - (1.0 / (x - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / (x + 1.0d0)) - (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
return (1.0 / (x + 1.0)) - (1.0 / (x - 1.0));
}
def code(x): return (1.0 / (x + 1.0)) - (1.0 / (x - 1.0))
function code(x) return Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(1.0 / Float64(x - 1.0))) end
function tmp = code(x) tmp = (1.0 / (x + 1.0)) - (1.0 / (x - 1.0)); end
code[x_] := N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{x + 1} - \frac{1}{x - 1}
\end{array}
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ (/ 2.0 (- -1.0 x_m)) (+ -1.0 x_m)))
x_m = fabs(x);
double code(double x_m) {
return (2.0 / (-1.0 - x_m)) / (-1.0 + x_m);
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = (2.0d0 / ((-1.0d0) - x_m)) / ((-1.0d0) + x_m)
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return (2.0 / (-1.0 - x_m)) / (-1.0 + x_m);
}
x_m = math.fabs(x) def code(x_m): return (2.0 / (-1.0 - x_m)) / (-1.0 + x_m)
x_m = abs(x) function code(x_m) return Float64(Float64(2.0 / Float64(-1.0 - x_m)) / Float64(-1.0 + x_m)) end
x_m = abs(x); function tmp = code(x_m) tmp = (2.0 / (-1.0 - x_m)) / (-1.0 + x_m); end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(N[(2.0 / N[(-1.0 - x$95$m), $MachinePrecision]), $MachinePrecision] / N[(-1.0 + x$95$m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{\frac{2}{-1 - x\_m}}{-1 + x\_m}
\end{array}
Initial program 82.8%
frac-sub83.3%
associate-/r*83.3%
*-un-lft-identity83.3%
sub-neg83.3%
metadata-eval83.3%
*-rgt-identity83.3%
associate--l+83.3%
+-commutative83.3%
+-commutative83.3%
sub-neg83.3%
metadata-eval83.3%
Applied egg-rr83.3%
add083.3%
associate--r+83.3%
associate-+r-83.3%
metadata-eval83.3%
+-commutative83.3%
Applied egg-rr83.3%
add083.3%
remove-double-neg83.3%
metadata-eval83.3%
distribute-neg-in83.3%
+-commutative83.3%
distribute-neg-frac283.3%
distribute-neg-frac83.3%
neg-sub083.3%
neg-sub083.3%
associate--l+83.3%
sub-neg83.3%
+-commutative83.3%
associate-+r+99.9%
sub-neg99.9%
+-inverses99.9%
metadata-eval99.9%
metadata-eval99.9%
sub-neg99.9%
Simplified99.9%
Final simplification99.9%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 1.55) (+ (- 1.0 x_m) (/ -1.0 (+ -1.0 x_m))) (/ (/ -2.0 x_m) (+ -1.0 x_m))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 1.55) {
tmp = (1.0 - x_m) + (-1.0 / (-1.0 + x_m));
} else {
tmp = (-2.0 / x_m) / (-1.0 + x_m);
}
return tmp;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
real(8) :: tmp
if (x_m <= 1.55d0) then
tmp = (1.0d0 - x_m) + ((-1.0d0) / ((-1.0d0) + x_m))
else
tmp = ((-2.0d0) / x_m) / ((-1.0d0) + x_m)
end if
code = tmp
end function
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 1.55) {
tmp = (1.0 - x_m) + (-1.0 / (-1.0 + x_m));
} else {
tmp = (-2.0 / x_m) / (-1.0 + x_m);
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 1.55: tmp = (1.0 - x_m) + (-1.0 / (-1.0 + x_m)) else: tmp = (-2.0 / x_m) / (-1.0 + x_m) return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 1.55) tmp = Float64(Float64(1.0 - x_m) + Float64(-1.0 / Float64(-1.0 + x_m))); else tmp = Float64(Float64(-2.0 / x_m) / Float64(-1.0 + x_m)); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 1.55) tmp = (1.0 - x_m) + (-1.0 / (-1.0 + x_m)); else tmp = (-2.0 / x_m) / (-1.0 + x_m); end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 1.55], N[(N[(1.0 - x$95$m), $MachinePrecision] + N[(-1.0 / N[(-1.0 + x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(-2.0 / x$95$m), $MachinePrecision] / N[(-1.0 + x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 1.55:\\
\;\;\;\;\left(1 - x\_m\right) + \frac{-1}{-1 + x\_m}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{-2}{x\_m}}{-1 + x\_m}\\
\end{array}
\end{array}
if x < 1.55000000000000004Initial program 90.9%
Taylor expanded in x around 0 71.6%
neg-mul-171.6%
sub-neg71.6%
Simplified71.6%
if 1.55000000000000004 < x Initial program 64.3%
frac-sub65.1%
associate-/r*65.1%
*-un-lft-identity65.1%
sub-neg65.1%
metadata-eval65.1%
*-rgt-identity65.1%
associate--l+65.1%
+-commutative65.1%
+-commutative65.1%
sub-neg65.1%
metadata-eval65.1%
Applied egg-rr65.1%
Taylor expanded in x around inf 97.7%
Final simplification79.6%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 0.75) 2.0 (/ (/ -2.0 x_m) (+ -1.0 x_m))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.75) {
tmp = 2.0;
} else {
tmp = (-2.0 / x_m) / (-1.0 + x_m);
}
return tmp;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
real(8) :: tmp
if (x_m <= 0.75d0) then
tmp = 2.0d0
else
tmp = ((-2.0d0) / x_m) / ((-1.0d0) + x_m)
end if
code = tmp
end function
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 0.75) {
tmp = 2.0;
} else {
tmp = (-2.0 / x_m) / (-1.0 + x_m);
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 0.75: tmp = 2.0 else: tmp = (-2.0 / x_m) / (-1.0 + x_m) return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.75) tmp = 2.0; else tmp = Float64(Float64(-2.0 / x_m) / Float64(-1.0 + x_m)); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 0.75) tmp = 2.0; else tmp = (-2.0 / x_m) / (-1.0 + x_m); end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.75], 2.0, N[(N[(-2.0 / x$95$m), $MachinePrecision] / N[(-1.0 + x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.75:\\
\;\;\;\;2\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{-2}{x\_m}}{-1 + x\_m}\\
\end{array}
\end{array}
if x < 0.75Initial program 90.9%
Taylor expanded in x around 0 71.8%
if 0.75 < x Initial program 64.3%
frac-sub65.1%
associate-/r*65.1%
*-un-lft-identity65.1%
sub-neg65.1%
metadata-eval65.1%
*-rgt-identity65.1%
associate--l+65.1%
+-commutative65.1%
+-commutative65.1%
sub-neg65.1%
metadata-eval65.1%
Applied egg-rr65.1%
Taylor expanded in x around inf 97.7%
Final simplification79.7%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ 2.0 (* (+ x_m 1.0) (- 1.0 x_m))))
x_m = fabs(x);
double code(double x_m) {
return 2.0 / ((x_m + 1.0) * (1.0 - x_m));
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = 2.0d0 / ((x_m + 1.0d0) * (1.0d0 - x_m))
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return 2.0 / ((x_m + 1.0) * (1.0 - x_m));
}
x_m = math.fabs(x) def code(x_m): return 2.0 / ((x_m + 1.0) * (1.0 - x_m))
x_m = abs(x) function code(x_m) return Float64(2.0 / Float64(Float64(x_m + 1.0) * Float64(1.0 - x_m))) end
x_m = abs(x); function tmp = code(x_m) tmp = 2.0 / ((x_m + 1.0) * (1.0 - x_m)); end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(2.0 / N[(N[(x$95$m + 1.0), $MachinePrecision] * N[(1.0 - x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{2}{\left(x\_m + 1\right) \cdot \left(1 - x\_m\right)}
\end{array}
Initial program 82.8%
frac-sub83.3%
associate-/r*83.3%
*-un-lft-identity83.3%
sub-neg83.3%
metadata-eval83.3%
*-rgt-identity83.3%
associate--l+83.3%
+-commutative83.3%
+-commutative83.3%
sub-neg83.3%
metadata-eval83.3%
Applied egg-rr83.3%
div-inv83.3%
metadata-eval83.3%
sub-neg83.3%
associate-/l*83.3%
associate--r+83.3%
associate-+r-83.3%
metadata-eval83.3%
+-commutative83.3%
sub-neg83.3%
metadata-eval83.3%
Applied egg-rr83.3%
associate-*r/83.3%
associate-*r/83.3%
*-rgt-identity83.3%
+-commutative83.3%
metadata-eval83.3%
remove-double-neg83.3%
distribute-neg-in83.3%
sub-neg83.3%
distribute-neg-frac283.3%
associate-/l/83.3%
*-commutative83.3%
distribute-neg-frac83.3%
associate--l+83.3%
sub-neg83.3%
+-commutative83.3%
associate-+r+99.4%
sub-neg99.4%
+-inverses99.4%
metadata-eval99.4%
metadata-eval99.4%
Simplified99.4%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 2.0)
x_m = fabs(x);
double code(double x_m) {
return 2.0;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = 2.0d0
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return 2.0;
}
x_m = math.fabs(x) def code(x_m): return 2.0
x_m = abs(x) function code(x_m) return 2.0 end
x_m = abs(x); function tmp = code(x_m) tmp = 2.0; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := 2.0
\begin{array}{l}
x_m = \left|x\right|
\\
2
\end{array}
Initial program 82.8%
Taylor expanded in x around 0 50.8%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 1.0)
x_m = fabs(x);
double code(double x_m) {
return 1.0;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = 1.0d0
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return 1.0;
}
x_m = math.fabs(x) def code(x_m): return 1.0
x_m = abs(x) function code(x_m) return 1.0 end
x_m = abs(x); function tmp = code(x_m) tmp = 1.0; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := 1.0
\begin{array}{l}
x_m = \left|x\right|
\\
1
\end{array}
Initial program 82.8%
Taylor expanded in x around 0 50.2%
Taylor expanded in x around inf 10.8%
herbie shell --seed 2024107
(FPCore (x)
:name "Asymptote A"
:precision binary64
(- (/ 1.0 (+ x 1.0)) (/ 1.0 (- x 1.0))))