
(FPCore (x) :precision binary64 (- (* (+ x 1.0) (+ x 1.0)) 1.0))
double code(double x) {
return ((x + 1.0) * (x + 1.0)) - 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((x + 1.0d0) * (x + 1.0d0)) - 1.0d0
end function
public static double code(double x) {
return ((x + 1.0) * (x + 1.0)) - 1.0;
}
def code(x): return ((x + 1.0) * (x + 1.0)) - 1.0
function code(x) return Float64(Float64(Float64(x + 1.0) * Float64(x + 1.0)) - 1.0) end
function tmp = code(x) tmp = ((x + 1.0) * (x + 1.0)) - 1.0; end
code[x_] := N[(N[(N[(x + 1.0), $MachinePrecision] * N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l}
\\
\left(x + 1\right) \cdot \left(x + 1\right) - 1
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (* (+ x 1.0) (+ x 1.0)) 1.0))
double code(double x) {
return ((x + 1.0) * (x + 1.0)) - 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((x + 1.0d0) * (x + 1.0d0)) - 1.0d0
end function
public static double code(double x) {
return ((x + 1.0) * (x + 1.0)) - 1.0;
}
def code(x): return ((x + 1.0) * (x + 1.0)) - 1.0
function code(x) return Float64(Float64(Float64(x + 1.0) * Float64(x + 1.0)) - 1.0) end
function tmp = code(x) tmp = ((x + 1.0) * (x + 1.0)) - 1.0; end
code[x_] := N[(N[(N[(x + 1.0), $MachinePrecision] * N[(x + 1.0), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l}
\\
\left(x + 1\right) \cdot \left(x + 1\right) - 1
\end{array}
(FPCore (x) :precision binary64 (fma x x (+ x x)))
double code(double x) {
return fma(x, x, (x + x));
}
function code(x) return fma(x, x, Float64(x + x)) end
code[x_] := N[(x * x + N[(x + x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, x, x + x\right)
\end{array}
Initial program 54.6%
lift--.f64N/A
lift-*.f64N/A
difference-of-sqr-1N/A
lift-+.f64N/A
associate--l+N/A
metadata-evalN/A
+-rgt-identityN/A
*-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
lift-+.f64N/A
distribute-lft-inN/A
*-rgt-identityN/A
associate-+l+N/A
lower-fma.f64N/A
lower-+.f64100.0
Applied rewrites100.0%
(FPCore (x) :precision binary64 (if (<= (* (+ x 1.0) (+ x 1.0)) 5.0) (* 2.0 x) (* x x)))
double code(double x) {
double tmp;
if (((x + 1.0) * (x + 1.0)) <= 5.0) {
tmp = 2.0 * x;
} else {
tmp = x * x;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (((x + 1.0d0) * (x + 1.0d0)) <= 5.0d0) then
tmp = 2.0d0 * x
else
tmp = x * x
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (((x + 1.0) * (x + 1.0)) <= 5.0) {
tmp = 2.0 * x;
} else {
tmp = x * x;
}
return tmp;
}
def code(x): tmp = 0 if ((x + 1.0) * (x + 1.0)) <= 5.0: tmp = 2.0 * x else: tmp = x * x return tmp
function code(x) tmp = 0.0 if (Float64(Float64(x + 1.0) * Float64(x + 1.0)) <= 5.0) tmp = Float64(2.0 * x); else tmp = Float64(x * x); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (((x + 1.0) * (x + 1.0)) <= 5.0) tmp = 2.0 * x; else tmp = x * x; end tmp_2 = tmp; end
code[x_] := If[LessEqual[N[(N[(x + 1.0), $MachinePrecision] * N[(x + 1.0), $MachinePrecision]), $MachinePrecision], 5.0], N[(2.0 * x), $MachinePrecision], N[(x * x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left(x + 1\right) \cdot \left(x + 1\right) \leq 5:\\
\;\;\;\;2 \cdot x\\
\mathbf{else}:\\
\;\;\;\;x \cdot x\\
\end{array}
\end{array}
if (*.f64 (+.f64 x #s(literal 1 binary64)) (+.f64 x #s(literal 1 binary64))) < 5Initial program 8.6%
Taylor expanded in x around 0
lower-*.f6497.6
Applied rewrites97.6%
if 5 < (*.f64 (+.f64 x #s(literal 1 binary64)) (+.f64 x #s(literal 1 binary64))) Initial program 100.0%
Taylor expanded in x around inf
unpow2N/A
lower-*.f6497.5
Applied rewrites97.5%
(FPCore (x) :precision binary64 (* (+ 2.0 x) x))
double code(double x) {
return (2.0 + x) * x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (2.0d0 + x) * x
end function
public static double code(double x) {
return (2.0 + x) * x;
}
def code(x): return (2.0 + x) * x
function code(x) return Float64(Float64(2.0 + x) * x) end
function tmp = code(x) tmp = (2.0 + x) * x; end
code[x_] := N[(N[(2.0 + x), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}
\\
\left(2 + x\right) \cdot x
\end{array}
Initial program 54.6%
lift--.f64N/A
lift-*.f64N/A
difference-of-sqr-1N/A
lift-+.f64N/A
associate--l+N/A
metadata-evalN/A
+-rgt-identityN/A
lower-*.f64N/A
lift-+.f64N/A
associate-+l+N/A
metadata-evalN/A
+-commutativeN/A
lower-+.f64100.0
Applied rewrites100.0%
(FPCore (x) :precision binary64 (* 2.0 x))
double code(double x) {
return 2.0 * x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 * x
end function
public static double code(double x) {
return 2.0 * x;
}
def code(x): return 2.0 * x
function code(x) return Float64(2.0 * x) end
function tmp = code(x) tmp = 2.0 * x; end
code[x_] := N[(2.0 * x), $MachinePrecision]
\begin{array}{l}
\\
2 \cdot x
\end{array}
Initial program 54.6%
Taylor expanded in x around 0
lower-*.f6450.2
Applied rewrites50.2%
(FPCore (x) :precision binary64 (- 1.0 1.0))
double code(double x) {
return 1.0 - 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 - 1.0d0
end function
public static double code(double x) {
return 1.0 - 1.0;
}
def code(x): return 1.0 - 1.0
function code(x) return Float64(1.0 - 1.0) end
function tmp = code(x) tmp = 1.0 - 1.0; end
code[x_] := N[(1.0 - 1.0), $MachinePrecision]
\begin{array}{l}
\\
1 - 1
\end{array}
Initial program 54.6%
Taylor expanded in x around 0
Applied rewrites3.6%
herbie shell --seed 2024313
(FPCore (x)
:name "Expanding a square"
:precision binary64
(- (* (+ x 1.0) (+ x 1.0)) 1.0))