
(FPCore (x y) :precision binary64 (+ (+ (* x 2.0) (* x x)) (* y y)))
double code(double x, double y) {
return ((x * 2.0) + (x * x)) + (y * y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((x * 2.0d0) + (x * x)) + (y * y)
end function
public static double code(double x, double y) {
return ((x * 2.0) + (x * x)) + (y * y);
}
def code(x, y): return ((x * 2.0) + (x * x)) + (y * y)
function code(x, y) return Float64(Float64(Float64(x * 2.0) + Float64(x * x)) + Float64(y * y)) end
function tmp = code(x, y) tmp = ((x * 2.0) + (x * x)) + (y * y); end
code[x_, y_] := N[(N[(N[(x * 2.0), $MachinePrecision] + N[(x * x), $MachinePrecision]), $MachinePrecision] + N[(y * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot 2 + x \cdot x\right) + y \cdot y
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (+ (+ (* x 2.0) (* x x)) (* y y)))
double code(double x, double y) {
return ((x * 2.0) + (x * x)) + (y * y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((x * 2.0d0) + (x * x)) + (y * y)
end function
public static double code(double x, double y) {
return ((x * 2.0) + (x * x)) + (y * y);
}
def code(x, y): return ((x * 2.0) + (x * x)) + (y * y)
function code(x, y) return Float64(Float64(Float64(x * 2.0) + Float64(x * x)) + Float64(y * y)) end
function tmp = code(x, y) tmp = ((x * 2.0) + (x * x)) + (y * y); end
code[x_, y_] := N[(N[(N[(x * 2.0), $MachinePrecision] + N[(x * x), $MachinePrecision]), $MachinePrecision] + N[(y * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot 2 + x \cdot x\right) + y \cdot y
\end{array}
(FPCore (x y) :precision binary64 (fma x (+ x 2.0) (* y y)))
double code(double x, double y) {
return fma(x, (x + 2.0), (y * y));
}
function code(x, y) return fma(x, Float64(x + 2.0), Float64(y * y)) end
code[x_, y_] := N[(x * N[(x + 2.0), $MachinePrecision] + N[(y * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, x + 2, y \cdot y\right)
\end{array}
Initial program 100.0%
distribute-lft-out100.0%
fma-define100.0%
+-commutative100.0%
Simplified100.0%
(FPCore (x y) :precision binary64 (if (or (<= x -2.8e+131) (not (<= x 4.8e+33))) (* x (+ x 2.0)) (+ (* y y) (* x 2.0))))
double code(double x, double y) {
double tmp;
if ((x <= -2.8e+131) || !(x <= 4.8e+33)) {
tmp = x * (x + 2.0);
} else {
tmp = (y * y) + (x * 2.0);
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((x <= (-2.8d+131)) .or. (.not. (x <= 4.8d+33))) then
tmp = x * (x + 2.0d0)
else
tmp = (y * y) + (x * 2.0d0)
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((x <= -2.8e+131) || !(x <= 4.8e+33)) {
tmp = x * (x + 2.0);
} else {
tmp = (y * y) + (x * 2.0);
}
return tmp;
}
def code(x, y): tmp = 0 if (x <= -2.8e+131) or not (x <= 4.8e+33): tmp = x * (x + 2.0) else: tmp = (y * y) + (x * 2.0) return tmp
function code(x, y) tmp = 0.0 if ((x <= -2.8e+131) || !(x <= 4.8e+33)) tmp = Float64(x * Float64(x + 2.0)); else tmp = Float64(Float64(y * y) + Float64(x * 2.0)); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((x <= -2.8e+131) || ~((x <= 4.8e+33))) tmp = x * (x + 2.0); else tmp = (y * y) + (x * 2.0); end tmp_2 = tmp; end
code[x_, y_] := If[Or[LessEqual[x, -2.8e+131], N[Not[LessEqual[x, 4.8e+33]], $MachinePrecision]], N[(x * N[(x + 2.0), $MachinePrecision]), $MachinePrecision], N[(N[(y * y), $MachinePrecision] + N[(x * 2.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -2.8 \cdot 10^{+131} \lor \neg \left(x \leq 4.8 \cdot 10^{+33}\right):\\
\;\;\;\;x \cdot \left(x + 2\right)\\
\mathbf{else}:\\
\;\;\;\;y \cdot y + x \cdot 2\\
\end{array}
\end{array}
if x < -2.8000000000000001e131 or 4.8e33 < x Initial program 100.0%
distribute-lft-out100.0%
Simplified100.0%
Taylor expanded in y around 0 94.6%
if -2.8000000000000001e131 < x < 4.8e33Initial program 100.0%
distribute-lft-out100.0%
Simplified100.0%
Taylor expanded in x around 0 91.0%
Final simplification92.2%
(FPCore (x y) :precision binary64 (+ (* y y) (* x (+ x 2.0))))
double code(double x, double y) {
return (y * y) + (x * (x + 2.0));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (y * y) + (x * (x + 2.0d0))
end function
public static double code(double x, double y) {
return (y * y) + (x * (x + 2.0));
}
def code(x, y): return (y * y) + (x * (x + 2.0))
function code(x, y) return Float64(Float64(y * y) + Float64(x * Float64(x + 2.0))) end
function tmp = code(x, y) tmp = (y * y) + (x * (x + 2.0)); end
code[x_, y_] := N[(N[(y * y), $MachinePrecision] + N[(x * N[(x + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
y \cdot y + x \cdot \left(x + 2\right)
\end{array}
Initial program 100.0%
distribute-lft-out100.0%
Simplified100.0%
Final simplification100.0%
(FPCore (x y) :precision binary64 (* x (+ x 2.0)))
double code(double x, double y) {
return x * (x + 2.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * (x + 2.0d0)
end function
public static double code(double x, double y) {
return x * (x + 2.0);
}
def code(x, y): return x * (x + 2.0)
function code(x, y) return Float64(x * Float64(x + 2.0)) end
function tmp = code(x, y) tmp = x * (x + 2.0); end
code[x_, y_] := N[(x * N[(x + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x + 2\right)
\end{array}
Initial program 100.0%
distribute-lft-out100.0%
Simplified100.0%
Taylor expanded in y around 0 55.6%
Final simplification55.6%
(FPCore (x y) :precision binary64 (* x 2.0))
double code(double x, double y) {
return x * 2.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * 2.0d0
end function
public static double code(double x, double y) {
return x * 2.0;
}
def code(x, y): return x * 2.0
function code(x, y) return Float64(x * 2.0) end
function tmp = code(x, y) tmp = x * 2.0; end
code[x_, y_] := N[(x * 2.0), $MachinePrecision]
\begin{array}{l}
\\
x \cdot 2
\end{array}
Initial program 100.0%
distribute-lft-out100.0%
Simplified100.0%
Taylor expanded in y around 0 55.6%
Taylor expanded in x around 0 18.5%
Final simplification18.5%
(FPCore (x y) :precision binary64 (+ (* y y) (+ (* 2.0 x) (* x x))))
double code(double x, double y) {
return (y * y) + ((2.0 * x) + (x * x));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (y * y) + ((2.0d0 * x) + (x * x))
end function
public static double code(double x, double y) {
return (y * y) + ((2.0 * x) + (x * x));
}
def code(x, y): return (y * y) + ((2.0 * x) + (x * x))
function code(x, y) return Float64(Float64(y * y) + Float64(Float64(2.0 * x) + Float64(x * x))) end
function tmp = code(x, y) tmp = (y * y) + ((2.0 * x) + (x * x)); end
code[x_, y_] := N[(N[(y * y), $MachinePrecision] + N[(N[(2.0 * x), $MachinePrecision] + N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
y \cdot y + \left(2 \cdot x + x \cdot x\right)
\end{array}
herbie shell --seed 2024092
(FPCore (x y)
:name "Numeric.Log:$clog1p from log-domain-0.10.2.1, A"
:precision binary64
:alt
(+ (* y y) (+ (* 2.0 x) (* x x)))
(+ (+ (* x 2.0) (* x x)) (* y y)))