
(FPCore (x y) :precision binary64 (* x (+ y y)))
double code(double x, double y) {
return x * (y + y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * (y + y)
end function
public static double code(double x, double y) {
return x * (y + y);
}
def code(x, y): return x * (y + y)
function code(x, y) return Float64(x * Float64(y + y)) end
function tmp = code(x, y) tmp = x * (y + y); end
code[x_, y_] := N[(x * N[(y + y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(y + y\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (* x (+ y y)))
double code(double x, double y) {
return x * (y + y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * (y + y)
end function
public static double code(double x, double y) {
return x * (y + y);
}
def code(x, y): return x * (y + y)
function code(x, y) return Float64(x * Float64(y + y)) end
function tmp = code(x, y) tmp = x * (y + y); end
code[x_, y_] := N[(x * N[(y + y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(y + y\right)
\end{array}
(FPCore (x y) :precision binary64 (* 2.0 (* x y)))
double code(double x, double y) {
return 2.0 * (x * y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 2.0d0 * (x * y)
end function
public static double code(double x, double y) {
return 2.0 * (x * y);
}
def code(x, y): return 2.0 * (x * y)
function code(x, y) return Float64(2.0 * Float64(x * y)) end
function tmp = code(x, y) tmp = 2.0 * (x * y); end
code[x_, y_] := N[(2.0 * N[(x * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
2 \cdot \left(x \cdot y\right)
\end{array}
Initial program 99.6%
Taylor expanded in x around 0 100.0%
Final simplification100.0%
(FPCore (x y) :precision binary64 (+ y y))
double code(double x, double y) {
return y + y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = y + y
end function
public static double code(double x, double y) {
return y + y;
}
def code(x, y): return y + y
function code(x, y) return Float64(y + y) end
function tmp = code(x, y) tmp = y + y; end
code[x_, y_] := N[(y + y), $MachinePrecision]
\begin{array}{l}
\\
y + y
\end{array}
Initial program 99.6%
add-log-exp36.0%
exp-prod33.3%
flip-+17.4%
div-inv17.4%
+-inverses17.4%
+-inverses17.4%
pow-unpow18.7%
+-inverses18.7%
metadata-eval18.7%
1-exp18.7%
+-inverses18.7%
exp-prod0.0%
+-inverses0.0%
+-inverses0.0%
div-inv0.0%
flip-+25.3%
add-cube-cbrt25.3%
add-log-exp4.0%
add-cube-cbrt4.0%
Applied egg-rr4.0%
Final simplification4.0%
(FPCore (x y) :precision binary64 -2.0)
double code(double x, double y) {
return -2.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = -2.0d0
end function
public static double code(double x, double y) {
return -2.0;
}
def code(x, y): return -2.0
function code(x, y) return -2.0 end
function tmp = code(x, y) tmp = -2.0; end
code[x_, y_] := -2.0
\begin{array}{l}
\\
-2
\end{array}
Initial program 99.6%
add-log-exp36.0%
exp-prod33.3%
flip-+17.4%
div-inv17.4%
+-inverses17.4%
+-inverses17.4%
pow-unpow18.7%
+-inverses18.7%
metadata-eval18.7%
metadata-eval18.7%
+-inverses18.7%
pow-unpow0.0%
+-inverses0.0%
+-inverses0.0%
div-inv0.0%
flip-+25.3%
exp-prod25.3%
*-un-lft-identity25.3%
*-un-lft-identity25.3%
log-prod25.3%
metadata-eval25.3%
add-log-exp4.0%
flip-+0.0%
Applied egg-rr0.0%
Simplified3.2%
Final simplification3.2%
herbie shell --seed 2024031
(FPCore (x y)
:name "Numeric.Integration.TanhSinh:simpson from integration-0.2.1"
:precision binary64
(* x (+ y y)))