
(FPCore (x) :precision binary64 (- (/ 1.0 (+ x 1.0)) (/ 1.0 (- x 1.0))))
double code(double x) {
return (1.0 / (x + 1.0)) - (1.0 / (x - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / (x + 1.0d0)) - (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
return (1.0 / (x + 1.0)) - (1.0 / (x - 1.0));
}
def code(x): return (1.0 / (x + 1.0)) - (1.0 / (x - 1.0))
function code(x) return Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(1.0 / Float64(x - 1.0))) end
function tmp = code(x) tmp = (1.0 / (x + 1.0)) - (1.0 / (x - 1.0)); end
code[x_] := N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{x + 1} - \frac{1}{x - 1}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (/ 1.0 (+ x 1.0)) (/ 1.0 (- x 1.0))))
double code(double x) {
return (1.0 / (x + 1.0)) - (1.0 / (x - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / (x + 1.0d0)) - (1.0d0 / (x - 1.0d0))
end function
public static double code(double x) {
return (1.0 / (x + 1.0)) - (1.0 / (x - 1.0));
}
def code(x): return (1.0 / (x + 1.0)) - (1.0 / (x - 1.0))
function code(x) return Float64(Float64(1.0 / Float64(x + 1.0)) - Float64(1.0 / Float64(x - 1.0))) end
function tmp = code(x) tmp = (1.0 / (x + 1.0)) - (1.0 / (x - 1.0)); end
code[x_] := N[(N[(1.0 / N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - N[(1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{x + 1} - \frac{1}{x - 1}
\end{array}
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ (/ -2.0 (- 1.0 x_m)) (- -1.0 x_m)))
x_m = fabs(x);
double code(double x_m) {
return (-2.0 / (1.0 - x_m)) / (-1.0 - x_m);
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = ((-2.0d0) / (1.0d0 - x_m)) / ((-1.0d0) - x_m)
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return (-2.0 / (1.0 - x_m)) / (-1.0 - x_m);
}
x_m = math.fabs(x) def code(x_m): return (-2.0 / (1.0 - x_m)) / (-1.0 - x_m)
x_m = abs(x) function code(x_m) return Float64(Float64(-2.0 / Float64(1.0 - x_m)) / Float64(-1.0 - x_m)) end
x_m = abs(x); function tmp = code(x_m) tmp = (-2.0 / (1.0 - x_m)) / (-1.0 - x_m); end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(N[(-2.0 / N[(1.0 - x$95$m), $MachinePrecision]), $MachinePrecision] / N[(-1.0 - x$95$m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{\frac{-2}{1 - x\_m}}{-1 - x\_m}
\end{array}
Initial program 80.0%
sub-neg80.0%
+-commutative80.0%
distribute-neg-frac280.0%
neg-sub080.0%
associate-+l-80.0%
neg-sub080.0%
remove-double-neg80.0%
distribute-neg-in80.0%
sub-neg80.0%
distribute-neg-frac280.0%
sub-neg80.0%
+-commutative80.0%
unsub-neg80.0%
sub-neg80.0%
+-commutative80.0%
unsub-neg80.0%
metadata-eval80.0%
Simplified80.0%
sub-neg80.0%
distribute-neg-frac80.0%
metadata-eval80.0%
Applied egg-rr80.0%
metadata-eval80.0%
distribute-neg-frac80.0%
unsub-neg80.0%
*-rgt-identity80.0%
*-inverses80.0%
associate-/r*54.4%
*-commutative54.4%
*-lft-identity54.4%
associate-/r*80.0%
associate-*r/80.0%
associate-*l/80.0%
distribute-lft-out--80.1%
*-inverses80.1%
div-sub80.5%
associate--r+83.1%
*-commutative83.1%
times-frac83.1%
Simplified99.2%
associate-/r*99.9%
div-inv99.8%
Applied egg-rr99.8%
un-div-inv99.9%
Applied egg-rr99.9%
Final simplification99.9%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 0.76) 2.0 (/ -2.0 (* x_m (+ x_m -1.0)))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.76) {
tmp = 2.0;
} else {
tmp = -2.0 / (x_m * (x_m + -1.0));
}
return tmp;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
real(8) :: tmp
if (x_m <= 0.76d0) then
tmp = 2.0d0
else
tmp = (-2.0d0) / (x_m * (x_m + (-1.0d0)))
end if
code = tmp
end function
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 0.76) {
tmp = 2.0;
} else {
tmp = -2.0 / (x_m * (x_m + -1.0));
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 0.76: tmp = 2.0 else: tmp = -2.0 / (x_m * (x_m + -1.0)) return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.76) tmp = 2.0; else tmp = Float64(-2.0 / Float64(x_m * Float64(x_m + -1.0))); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 0.76) tmp = 2.0; else tmp = -2.0 / (x_m * (x_m + -1.0)); end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.76], 2.0, N[(-2.0 / N[(x$95$m * N[(x$95$m + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.76:\\
\;\;\;\;2\\
\mathbf{else}:\\
\;\;\;\;\frac{-2}{x\_m \cdot \left(x\_m + -1\right)}\\
\end{array}
\end{array}
if x < 0.76000000000000001Initial program 88.5%
sub-neg88.5%
+-commutative88.5%
distribute-neg-frac288.5%
neg-sub088.5%
associate-+l-88.5%
neg-sub088.5%
remove-double-neg88.5%
distribute-neg-in88.5%
sub-neg88.5%
distribute-neg-frac288.5%
sub-neg88.5%
+-commutative88.5%
unsub-neg88.5%
sub-neg88.5%
+-commutative88.5%
unsub-neg88.5%
metadata-eval88.5%
Simplified88.5%
Taylor expanded in x around 0 66.7%
if 0.76000000000000001 < x Initial program 56.2%
sub-neg56.2%
+-commutative56.2%
distribute-neg-frac256.2%
neg-sub056.2%
associate-+l-56.2%
neg-sub056.2%
remove-double-neg56.2%
distribute-neg-in56.2%
sub-neg56.2%
distribute-neg-frac256.2%
sub-neg56.2%
+-commutative56.2%
unsub-neg56.2%
sub-neg56.2%
+-commutative56.2%
unsub-neg56.2%
metadata-eval56.2%
Simplified56.2%
sub-neg56.2%
distribute-neg-frac56.2%
metadata-eval56.2%
Applied egg-rr56.2%
metadata-eval56.2%
distribute-neg-frac56.2%
unsub-neg56.2%
*-rgt-identity56.2%
*-inverses56.2%
associate-/r*11.2%
*-commutative11.2%
*-lft-identity11.2%
associate-/r*56.1%
associate-*r/56.1%
associate-*l/56.2%
distribute-lft-out--56.3%
*-inverses56.3%
div-sub57.8%
associate--r+63.6%
*-commutative63.6%
times-frac63.6%
Simplified99.8%
flip3--38.4%
associate-*r/34.2%
metadata-eval34.2%
metadata-eval34.2%
distribute-rgt-out34.2%
Applied egg-rr34.2%
associate-/l*38.4%
Simplified38.4%
Taylor expanded in x around inf 95.7%
neg-mul-195.7%
Simplified95.7%
Taylor expanded in x around 0 95.7%
Final simplification74.3%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 1.0) 2.0 (/ (/ -2.0 (+ 1.0 x_m)) x_m)))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 1.0) {
tmp = 2.0;
} else {
tmp = (-2.0 / (1.0 + x_m)) / x_m;
}
return tmp;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
real(8) :: tmp
if (x_m <= 1.0d0) then
tmp = 2.0d0
else
tmp = ((-2.0d0) / (1.0d0 + x_m)) / x_m
end if
code = tmp
end function
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 1.0) {
tmp = 2.0;
} else {
tmp = (-2.0 / (1.0 + x_m)) / x_m;
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 1.0: tmp = 2.0 else: tmp = (-2.0 / (1.0 + x_m)) / x_m return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 1.0) tmp = 2.0; else tmp = Float64(Float64(-2.0 / Float64(1.0 + x_m)) / x_m); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 1.0) tmp = 2.0; else tmp = (-2.0 / (1.0 + x_m)) / x_m; end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 1.0], 2.0, N[(N[(-2.0 / N[(1.0 + x$95$m), $MachinePrecision]), $MachinePrecision] / x$95$m), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 1:\\
\;\;\;\;2\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{-2}{1 + x\_m}}{x\_m}\\
\end{array}
\end{array}
if x < 1Initial program 88.5%
sub-neg88.5%
+-commutative88.5%
distribute-neg-frac288.5%
neg-sub088.5%
associate-+l-88.5%
neg-sub088.5%
remove-double-neg88.5%
distribute-neg-in88.5%
sub-neg88.5%
distribute-neg-frac288.5%
sub-neg88.5%
+-commutative88.5%
unsub-neg88.5%
sub-neg88.5%
+-commutative88.5%
unsub-neg88.5%
metadata-eval88.5%
Simplified88.5%
Taylor expanded in x around 0 66.7%
if 1 < x Initial program 56.2%
sub-neg56.2%
+-commutative56.2%
distribute-neg-frac256.2%
neg-sub056.2%
associate-+l-56.2%
neg-sub056.2%
remove-double-neg56.2%
distribute-neg-in56.2%
sub-neg56.2%
distribute-neg-frac256.2%
sub-neg56.2%
+-commutative56.2%
unsub-neg56.2%
sub-neg56.2%
+-commutative56.2%
unsub-neg56.2%
metadata-eval56.2%
Simplified56.2%
sub-neg56.2%
distribute-neg-frac56.2%
metadata-eval56.2%
Applied egg-rr56.2%
metadata-eval56.2%
distribute-neg-frac56.2%
unsub-neg56.2%
*-rgt-identity56.2%
*-inverses56.2%
associate-/r*11.2%
*-commutative11.2%
*-lft-identity11.2%
associate-/r*56.1%
associate-*r/56.1%
associate-*l/56.2%
distribute-lft-out--56.3%
*-inverses56.3%
div-sub57.8%
associate--r+63.6%
*-commutative63.6%
times-frac63.6%
Simplified99.8%
flip3--38.4%
associate-*r/34.2%
metadata-eval34.2%
metadata-eval34.2%
distribute-rgt-out34.2%
Applied egg-rr34.2%
associate-/l*38.4%
Simplified38.4%
Taylor expanded in x around inf 95.7%
neg-mul-195.7%
Simplified95.7%
associate-/r*95.8%
div-inv95.6%
sub-neg95.6%
add-sqr-sqrt0.0%
sqrt-unprod49.7%
sqr-neg49.7%
sqrt-unprod49.7%
add-sqr-sqrt49.7%
add-sqr-sqrt0.0%
sqrt-unprod95.6%
sqr-neg95.6%
sqrt-unprod95.5%
add-sqr-sqrt95.6%
Applied egg-rr95.6%
associate-*r/95.8%
*-rgt-identity95.8%
+-commutative95.8%
Simplified95.8%
Final simplification74.3%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ -2.0 (* (- 1.0 x_m) (- -1.0 x_m))))
x_m = fabs(x);
double code(double x_m) {
return -2.0 / ((1.0 - x_m) * (-1.0 - x_m));
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = (-2.0d0) / ((1.0d0 - x_m) * ((-1.0d0) - x_m))
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return -2.0 / ((1.0 - x_m) * (-1.0 - x_m));
}
x_m = math.fabs(x) def code(x_m): return -2.0 / ((1.0 - x_m) * (-1.0 - x_m))
x_m = abs(x) function code(x_m) return Float64(-2.0 / Float64(Float64(1.0 - x_m) * Float64(-1.0 - x_m))) end
x_m = abs(x); function tmp = code(x_m) tmp = -2.0 / ((1.0 - x_m) * (-1.0 - x_m)); end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(-2.0 / N[(N[(1.0 - x$95$m), $MachinePrecision] * N[(-1.0 - x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{-2}{\left(1 - x\_m\right) \cdot \left(-1 - x\_m\right)}
\end{array}
Initial program 80.0%
sub-neg80.0%
+-commutative80.0%
distribute-neg-frac280.0%
neg-sub080.0%
associate-+l-80.0%
neg-sub080.0%
remove-double-neg80.0%
distribute-neg-in80.0%
sub-neg80.0%
distribute-neg-frac280.0%
sub-neg80.0%
+-commutative80.0%
unsub-neg80.0%
sub-neg80.0%
+-commutative80.0%
unsub-neg80.0%
metadata-eval80.0%
Simplified80.0%
sub-neg80.0%
distribute-neg-frac80.0%
metadata-eval80.0%
Applied egg-rr80.0%
metadata-eval80.0%
distribute-neg-frac80.0%
unsub-neg80.0%
*-rgt-identity80.0%
*-inverses80.0%
associate-/r*54.4%
*-commutative54.4%
*-lft-identity54.4%
associate-/r*80.0%
associate-*r/80.0%
associate-*l/80.0%
distribute-lft-out--80.1%
*-inverses80.1%
div-sub80.5%
associate--r+83.1%
*-commutative83.1%
times-frac83.1%
Simplified99.2%
Final simplification99.2%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 1.0) 2.0 (/ -1.0 x_m)))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 1.0) {
tmp = 2.0;
} else {
tmp = -1.0 / x_m;
}
return tmp;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
real(8) :: tmp
if (x_m <= 1.0d0) then
tmp = 2.0d0
else
tmp = (-1.0d0) / x_m
end if
code = tmp
end function
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 1.0) {
tmp = 2.0;
} else {
tmp = -1.0 / x_m;
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 1.0: tmp = 2.0 else: tmp = -1.0 / x_m return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 1.0) tmp = 2.0; else tmp = Float64(-1.0 / x_m); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 1.0) tmp = 2.0; else tmp = -1.0 / x_m; end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 1.0], 2.0, N[(-1.0 / x$95$m), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 1:\\
\;\;\;\;2\\
\mathbf{else}:\\
\;\;\;\;\frac{-1}{x\_m}\\
\end{array}
\end{array}
if x < 1Initial program 88.5%
sub-neg88.5%
+-commutative88.5%
distribute-neg-frac288.5%
neg-sub088.5%
associate-+l-88.5%
neg-sub088.5%
remove-double-neg88.5%
distribute-neg-in88.5%
sub-neg88.5%
distribute-neg-frac288.5%
sub-neg88.5%
+-commutative88.5%
unsub-neg88.5%
sub-neg88.5%
+-commutative88.5%
unsub-neg88.5%
metadata-eval88.5%
Simplified88.5%
Taylor expanded in x around 0 66.7%
if 1 < x Initial program 56.2%
sub-neg56.2%
+-commutative56.2%
distribute-neg-frac256.2%
neg-sub056.2%
associate-+l-56.2%
neg-sub056.2%
remove-double-neg56.2%
distribute-neg-in56.2%
sub-neg56.2%
distribute-neg-frac256.2%
sub-neg56.2%
+-commutative56.2%
unsub-neg56.2%
sub-neg56.2%
+-commutative56.2%
unsub-neg56.2%
metadata-eval56.2%
Simplified56.2%
Taylor expanded in x around 0 2.6%
Taylor expanded in x around inf 2.6%
Taylor expanded in x around 0 7.0%
Final simplification51.0%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 1.0)
x_m = fabs(x);
double code(double x_m) {
return 1.0;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = 1.0d0
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return 1.0;
}
x_m = math.fabs(x) def code(x_m): return 1.0
x_m = abs(x) function code(x_m) return 1.0 end
x_m = abs(x); function tmp = code(x_m) tmp = 1.0; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := 1.0
\begin{array}{l}
x_m = \left|x\right|
\\
1
\end{array}
Initial program 80.0%
sub-neg80.0%
+-commutative80.0%
distribute-neg-frac280.0%
neg-sub080.0%
associate-+l-80.0%
neg-sub080.0%
remove-double-neg80.0%
distribute-neg-in80.0%
sub-neg80.0%
distribute-neg-frac280.0%
sub-neg80.0%
+-commutative80.0%
unsub-neg80.0%
sub-neg80.0%
+-commutative80.0%
unsub-neg80.0%
metadata-eval80.0%
Simplified80.0%
Taylor expanded in x around 0 49.4%
Taylor expanded in x around inf 10.6%
Final simplification10.6%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 2.0)
x_m = fabs(x);
double code(double x_m) {
return 2.0;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = 2.0d0
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return 2.0;
}
x_m = math.fabs(x) def code(x_m): return 2.0
x_m = abs(x) function code(x_m) return 2.0 end
x_m = abs(x); function tmp = code(x_m) tmp = 2.0; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := 2.0
\begin{array}{l}
x_m = \left|x\right|
\\
2
\end{array}
Initial program 80.0%
sub-neg80.0%
+-commutative80.0%
distribute-neg-frac280.0%
neg-sub080.0%
associate-+l-80.0%
neg-sub080.0%
remove-double-neg80.0%
distribute-neg-in80.0%
sub-neg80.0%
distribute-neg-frac280.0%
sub-neg80.0%
+-commutative80.0%
unsub-neg80.0%
sub-neg80.0%
+-commutative80.0%
unsub-neg80.0%
metadata-eval80.0%
Simplified80.0%
Taylor expanded in x around 0 49.9%
Final simplification49.9%
herbie shell --seed 2024053
(FPCore (x)
:name "Asymptote A"
:precision binary64
(- (/ 1.0 (+ x 1.0)) (/ 1.0 (- x 1.0))))