
(FPCore (x y) :precision binary64 (* x (+ 1.0 (* y y))))
double code(double x, double y) {
return x * (1.0 + (y * y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * (1.0d0 + (y * y))
end function
public static double code(double x, double y) {
return x * (1.0 + (y * y));
}
def code(x, y): return x * (1.0 + (y * y))
function code(x, y) return Float64(x * Float64(1.0 + Float64(y * y))) end
function tmp = code(x, y) tmp = x * (1.0 + (y * y)); end
code[x_, y_] := N[(x * N[(1.0 + N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 + y \cdot y\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (* x (+ 1.0 (* y y))))
double code(double x, double y) {
return x * (1.0 + (y * y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * (1.0d0 + (y * y))
end function
public static double code(double x, double y) {
return x * (1.0 + (y * y));
}
def code(x, y): return x * (1.0 + (y * y))
function code(x, y) return Float64(x * Float64(1.0 + Float64(y * y))) end
function tmp = code(x, y) tmp = x * (1.0 + (y * y)); end
code[x_, y_] := N[(x * N[(1.0 + N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 + y \cdot y\right)
\end{array}
(FPCore (x y) :precision binary64 (fma (* x y) y x))
double code(double x, double y) {
return fma((x * y), y, x);
}
function code(x, y) return fma(Float64(x * y), y, x) end
code[x_, y_] := N[(N[(x * y), $MachinePrecision] * y + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot y, y, x\right)
\end{array}
Initial program 94.8%
+-commutative94.8%
distribute-lft-in94.8%
associate-*r*99.9%
*-rgt-identity99.9%
fma-def99.9%
Applied egg-rr99.9%
Final simplification99.9%
(FPCore (x y) :precision binary64 (* x (+ 1.0 (* y y))))
double code(double x, double y) {
return x * (1.0 + (y * y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * (1.0d0 + (y * y))
end function
public static double code(double x, double y) {
return x * (1.0 + (y * y));
}
def code(x, y): return x * (1.0 + (y * y))
function code(x, y) return Float64(x * Float64(1.0 + Float64(y * y))) end
function tmp = code(x, y) tmp = x * (1.0 + (y * y)); end
code[x_, y_] := N[(x * N[(1.0 + N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 + y \cdot y\right)
\end{array}
Initial program 94.8%
Final simplification94.8%
(FPCore (x y) :precision binary64 x)
double code(double x, double y) {
return x;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x
end function
public static double code(double x, double y) {
return x;
}
def code(x, y): return x
function code(x, y) return x end
function tmp = code(x, y) tmp = x; end
code[x_, y_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 94.8%
Taylor expanded in y around 0 56.4%
Final simplification56.4%
(FPCore (x y) :precision binary64 (+ x (* (* x y) y)))
double code(double x, double y) {
return x + ((x * y) * y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x + ((x * y) * y)
end function
public static double code(double x, double y) {
return x + ((x * y) * y);
}
def code(x, y): return x + ((x * y) * y)
function code(x, y) return Float64(x + Float64(Float64(x * y) * y)) end
function tmp = code(x, y) tmp = x + ((x * y) * y); end
code[x_, y_] := N[(x + N[(N[(x * y), $MachinePrecision] * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(x \cdot y\right) \cdot y
\end{array}
herbie shell --seed 2024026
(FPCore (x y)
:name "Numeric.Integration.TanhSinh:everywhere from integration-0.2.1"
:precision binary64
:herbie-target
(+ x (* (* x y) y))
(* x (+ 1.0 (* y y))))