
(FPCore (x) :precision binary64 (/ x (+ (* x x) 1.0)))
double code(double x) {
return x / ((x * x) + 1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = x / ((x * x) + 1.0d0)
end function
public static double code(double x) {
return x / ((x * x) + 1.0);
}
def code(x): return x / ((x * x) + 1.0)
function code(x) return Float64(x / Float64(Float64(x * x) + 1.0)) end
function tmp = code(x) tmp = x / ((x * x) + 1.0); end
code[x_] := N[(x / N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{x \cdot x + 1}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ x (+ (* x x) 1.0)))
double code(double x) {
return x / ((x * x) + 1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = x / ((x * x) + 1.0d0)
end function
public static double code(double x) {
return x / ((x * x) + 1.0);
}
def code(x): return x / ((x * x) + 1.0)
function code(x) return Float64(x / Float64(Float64(x * x) + 1.0)) end
function tmp = code(x) tmp = x / ((x * x) + 1.0); end
code[x_] := N[(x / N[(N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{x \cdot x + 1}
\end{array}
(FPCore (x) :precision binary64 (/ 1.0 (+ x (/ 1.0 x))))
double code(double x) {
return 1.0 / (x + (1.0 / x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / (x + (1.0d0 / x))
end function
public static double code(double x) {
return 1.0 / (x + (1.0 / x));
}
def code(x): return 1.0 / (x + (1.0 / x))
function code(x) return Float64(1.0 / Float64(x + Float64(1.0 / x))) end
function tmp = code(x) tmp = 1.0 / (x + (1.0 / x)); end
code[x_] := N[(1.0 / N[(x + N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{x + \frac{1}{x}}
\end{array}
Initial program 80.3%
clear-num80.2%
associate-/r/80.3%
add-sqr-sqrt80.3%
pow280.3%
pow-flip80.3%
+-commutative80.3%
hypot-1-def80.6%
metadata-eval80.6%
Applied egg-rr80.6%
Applied egg-rr80.2%
Taylor expanded in x around 0 99.9%
Final simplification99.9%
(FPCore (x) :precision binary64 (if (<= x 1.0) x (/ 1.0 x)))
double code(double x) {
double tmp;
if (x <= 1.0) {
tmp = x;
} else {
tmp = 1.0 / x;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (x <= 1.0d0) then
tmp = x
else
tmp = 1.0d0 / x
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (x <= 1.0) {
tmp = x;
} else {
tmp = 1.0 / x;
}
return tmp;
}
def code(x): tmp = 0 if x <= 1.0: tmp = x else: tmp = 1.0 / x return tmp
function code(x) tmp = 0.0 if (x <= 1.0) tmp = x; else tmp = Float64(1.0 / x); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= 1.0) tmp = x; else tmp = 1.0 / x; end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, 1.0], x, N[(1.0 / x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 1:\\
\;\;\;\;x\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{x}\\
\end{array}
\end{array}
if x < 1Initial program 87.9%
Taylor expanded in x around 0 70.2%
if 1 < x Initial program 60.5%
Taylor expanded in x around inf 99.4%
Final simplification78.3%
(FPCore (x) :precision binary64 x)
double code(double x) {
return x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x
end function
public static double code(double x) {
return x;
}
def code(x): return x
function code(x) return x end
function tmp = code(x) tmp = x; end
code[x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 80.3%
Taylor expanded in x around 0 51.8%
Final simplification51.8%
(FPCore (x) :precision binary64 (/ 1.0 (+ x (/ 1.0 x))))
double code(double x) {
return 1.0 / (x + (1.0 / x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / (x + (1.0d0 / x))
end function
public static double code(double x) {
return 1.0 / (x + (1.0 / x));
}
def code(x): return 1.0 / (x + (1.0 / x))
function code(x) return Float64(1.0 / Float64(x + Float64(1.0 / x))) end
function tmp = code(x) tmp = 1.0 / (x + (1.0 / x)); end
code[x_] := N[(1.0 / N[(x + N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{x + \frac{1}{x}}
\end{array}
herbie shell --seed 2024020
(FPCore (x)
:name "x / (x^2 + 1)"
:precision binary64
:herbie-target
(/ 1.0 (+ x (/ 1.0 x)))
(/ x (+ (* x x) 1.0)))