
(FPCore (x) :precision binary64 (/ x (+ (* x x) 1.0)))
double code(double x) {
return x / ((x * x) + 1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = x / ((x * x) + 1.0d0)
end function
public static double code(double x) {
return x / ((x * x) + 1.0);
}
def code(x): return x / ((x * x) + 1.0)
function code(x) return Float64(x / Float64(Float64(x * x) + 1.0)) end
function tmp = code(x) tmp = x / ((x * x) + 1.0); end
code[x_] := N[(x / N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{x \cdot x + 1}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ x (+ (* x x) 1.0)))
double code(double x) {
return x / ((x * x) + 1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = x / ((x * x) + 1.0d0)
end function
public static double code(double x) {
return x / ((x * x) + 1.0);
}
def code(x): return x / ((x * x) + 1.0)
function code(x) return Float64(x / Float64(Float64(x * x) + 1.0)) end
function tmp = code(x) tmp = x / ((x * x) + 1.0); end
code[x_] := N[(x / N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{x \cdot x + 1}
\end{array}
(FPCore (x) :precision binary64 (/ 1.0 (+ x (/ 1.0 x))))
double code(double x) {
return 1.0 / (x + (1.0 / x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / (x + (1.0d0 / x))
end function
public static double code(double x) {
return 1.0 / (x + (1.0 / x));
}
def code(x): return 1.0 / (x + (1.0 / x))
function code(x) return Float64(1.0 / Float64(x + Float64(1.0 / x))) end
function tmp = code(x) tmp = 1.0 / (x + (1.0 / x)); end
code[x_] := N[(1.0 / N[(x + N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{x + \frac{1}{x}}
\end{array}
Initial program 76.2%
clear-num76.2%
inv-pow76.2%
fma-def76.2%
Applied egg-rr76.2%
Taylor expanded in x around 0 99.8%
add-log-exp8.1%
*-un-lft-identity8.1%
log-prod8.1%
metadata-eval8.1%
add-log-exp99.8%
unpow-199.8%
Applied egg-rr99.8%
+-lft-identity99.8%
Simplified99.8%
Final simplification99.8%
(FPCore (x) :precision binary64 (if (<= x -0.85) (/ 1.0 x) (if (<= x 0.86) (* x (- 1.0 (* x x))) (/ 1.0 x))))
double code(double x) {
double tmp;
if (x <= -0.85) {
tmp = 1.0 / x;
} else if (x <= 0.86) {
tmp = x * (1.0 - (x * x));
} else {
tmp = 1.0 / x;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (x <= (-0.85d0)) then
tmp = 1.0d0 / x
else if (x <= 0.86d0) then
tmp = x * (1.0d0 - (x * x))
else
tmp = 1.0d0 / x
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (x <= -0.85) {
tmp = 1.0 / x;
} else if (x <= 0.86) {
tmp = x * (1.0 - (x * x));
} else {
tmp = 1.0 / x;
}
return tmp;
}
def code(x): tmp = 0 if x <= -0.85: tmp = 1.0 / x elif x <= 0.86: tmp = x * (1.0 - (x * x)) else: tmp = 1.0 / x return tmp
function code(x) tmp = 0.0 if (x <= -0.85) tmp = Float64(1.0 / x); elseif (x <= 0.86) tmp = Float64(x * Float64(1.0 - Float64(x * x))); else tmp = Float64(1.0 / x); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= -0.85) tmp = 1.0 / x; elseif (x <= 0.86) tmp = x * (1.0 - (x * x)); else tmp = 1.0 / x; end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, -0.85], N[(1.0 / x), $MachinePrecision], If[LessEqual[x, 0.86], N[(x * N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(1.0 / x), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.85:\\
\;\;\;\;\frac{1}{x}\\
\mathbf{elif}\;x \leq 0.86:\\
\;\;\;\;x \cdot \left(1 - x \cdot x\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{x}\\
\end{array}
\end{array}
if x < -0.849999999999999978 or 0.859999999999999987 < x Initial program 53.3%
Taylor expanded in x around inf 99.4%
if -0.849999999999999978 < x < 0.859999999999999987Initial program 100.0%
Taylor expanded in x around 0 98.9%
mul-1-neg98.9%
unsub-neg98.9%
Simplified98.9%
*-un-lft-identity98.9%
unpow398.9%
distribute-rgt-out--98.9%
Applied egg-rr98.9%
Final simplification99.2%
(FPCore (x) :precision binary64 (if (<= x -1.0) (/ 1.0 x) (if (<= x 1.0) x (/ 1.0 x))))
double code(double x) {
double tmp;
if (x <= -1.0) {
tmp = 1.0 / x;
} else if (x <= 1.0) {
tmp = x;
} else {
tmp = 1.0 / x;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (x <= (-1.0d0)) then
tmp = 1.0d0 / x
else if (x <= 1.0d0) then
tmp = x
else
tmp = 1.0d0 / x
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (x <= -1.0) {
tmp = 1.0 / x;
} else if (x <= 1.0) {
tmp = x;
} else {
tmp = 1.0 / x;
}
return tmp;
}
def code(x): tmp = 0 if x <= -1.0: tmp = 1.0 / x elif x <= 1.0: tmp = x else: tmp = 1.0 / x return tmp
function code(x) tmp = 0.0 if (x <= -1.0) tmp = Float64(1.0 / x); elseif (x <= 1.0) tmp = x; else tmp = Float64(1.0 / x); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= -1.0) tmp = 1.0 / x; elseif (x <= 1.0) tmp = x; else tmp = 1.0 / x; end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, -1.0], N[(1.0 / x), $MachinePrecision], If[LessEqual[x, 1.0], x, N[(1.0 / x), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1:\\
\;\;\;\;\frac{1}{x}\\
\mathbf{elif}\;x \leq 1:\\
\;\;\;\;x\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{x}\\
\end{array}
\end{array}
if x < -1 or 1 < x Initial program 53.3%
Taylor expanded in x around inf 99.4%
if -1 < x < 1Initial program 100.0%
Taylor expanded in x around 0 98.2%
Final simplification98.8%
(FPCore (x) :precision binary64 x)
double code(double x) {
return x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x
end function
public static double code(double x) {
return x;
}
def code(x): return x
function code(x) return x end
function tmp = code(x) tmp = x; end
code[x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 76.2%
Taylor expanded in x around 0 50.2%
Final simplification50.2%
(FPCore (x) :precision binary64 (/ 1.0 (+ x (/ 1.0 x))))
double code(double x) {
return 1.0 / (x + (1.0 / x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / (x + (1.0d0 / x))
end function
public static double code(double x) {
return 1.0 / (x + (1.0 / x));
}
def code(x): return 1.0 / (x + (1.0 / x))
function code(x) return Float64(1.0 / Float64(x + Float64(1.0 / x))) end
function tmp = code(x) tmp = 1.0 / (x + (1.0 / x)); end
code[x_] := N[(1.0 / N[(x + N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{x + \frac{1}{x}}
\end{array}
herbie shell --seed 2023293
(FPCore (x)
:name "x / (x^2 + 1)"
:precision binary64
:herbie-target
(/ 1.0 (+ x (/ 1.0 x)))
(/ x (+ (* x x) 1.0)))