
(FPCore (x) :precision binary64 (- (* (+ x 1.0) (+ x 1.0)) 1.0))
double code(double x) {
return ((x + 1.0) * (x + 1.0)) - 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((x + 1.0d0) * (x + 1.0d0)) - 1.0d0
end function
public static double code(double x) {
return ((x + 1.0) * (x + 1.0)) - 1.0;
}
def code(x): return ((x + 1.0) * (x + 1.0)) - 1.0
function code(x) return Float64(Float64(Float64(x + 1.0) * Float64(x + 1.0)) - 1.0) end
function tmp = code(x) tmp = ((x + 1.0) * (x + 1.0)) - 1.0; end
code[x_] := N[(N[(N[(x + 1.0), $MachinePrecision] * N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l}
\\
\left(x + 1\right) \cdot \left(x + 1\right) - 1
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (* (+ x 1.0) (+ x 1.0)) 1.0))
double code(double x) {
return ((x + 1.0) * (x + 1.0)) - 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((x + 1.0d0) * (x + 1.0d0)) - 1.0d0
end function
public static double code(double x) {
return ((x + 1.0) * (x + 1.0)) - 1.0;
}
def code(x): return ((x + 1.0) * (x + 1.0)) - 1.0
function code(x) return Float64(Float64(Float64(x + 1.0) * Float64(x + 1.0)) - 1.0) end
function tmp = code(x) tmp = ((x + 1.0) * (x + 1.0)) - 1.0; end
code[x_] := N[(N[(N[(x + 1.0), $MachinePrecision] * N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l}
\\
\left(x + 1\right) \cdot \left(x + 1\right) - 1
\end{array}
(FPCore (x) :precision binary64 (fma x x (* x 2.0)))
double code(double x) {
return fma(x, x, (x * 2.0));
}
function code(x) return fma(x, x, Float64(x * 2.0)) end
code[x_] := N[(x * x + N[(x * 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, x, x \cdot 2\right)
\end{array}
Initial program 53.6%
difference-of-sqr-153.6%
*-commutative53.6%
associate--l+100.0%
metadata-eval100.0%
+-rgt-identity100.0%
associate-+l+100.0%
metadata-eval100.0%
Simplified100.0%
distribute-lft-in100.0%
fma-define100.0%
Applied egg-rr100.0%
(FPCore (x) :precision binary64 (* x (+ x 2.0)))
double code(double x) {
return x * (x + 2.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (x + 2.0d0)
end function
public static double code(double x) {
return x * (x + 2.0);
}
def code(x): return x * (x + 2.0)
function code(x) return Float64(x * Float64(x + 2.0)) end
function tmp = code(x) tmp = x * (x + 2.0); end
code[x_] := N[(x * N[(x + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x + 2\right)
\end{array}
Initial program 53.6%
difference-of-sqr-153.6%
*-commutative53.6%
associate--l+100.0%
metadata-eval100.0%
+-rgt-identity100.0%
associate-+l+100.0%
metadata-eval100.0%
Simplified100.0%
(FPCore (x) :precision binary64 (* x 2.0))
double code(double x) {
return x * 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * 2.0d0
end function
public static double code(double x) {
return x * 2.0;
}
def code(x): return x * 2.0
function code(x) return Float64(x * 2.0) end
function tmp = code(x) tmp = x * 2.0; end
code[x_] := N[(x * 2.0), $MachinePrecision]
\begin{array}{l}
\\
x \cdot 2
\end{array}
Initial program 53.6%
difference-of-sqr-153.6%
*-commutative53.6%
associate--l+100.0%
metadata-eval100.0%
+-rgt-identity100.0%
associate-+l+100.0%
metadata-eval100.0%
Simplified100.0%
Taylor expanded in x around 0 51.6%
Final simplification51.6%
herbie shell --seed 2024106
(FPCore (x)
:name "Expanding a square"
:precision binary64
(- (* (+ x 1.0) (+ x 1.0)) 1.0))