
(FPCore (x y) :precision binary64 (* (* (* x 3.0) x) y))
double code(double x, double y) {
return ((x * 3.0) * x) * y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((x * 3.0d0) * x) * y
end function
public static double code(double x, double y) {
return ((x * 3.0) * x) * y;
}
def code(x, y): return ((x * 3.0) * x) * y
function code(x, y) return Float64(Float64(Float64(x * 3.0) * x) * y) end
function tmp = code(x, y) tmp = ((x * 3.0) * x) * y; end
code[x_, y_] := N[(N[(N[(x * 3.0), $MachinePrecision] * x), $MachinePrecision] * y), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x \cdot 3\right) \cdot x\right) \cdot y
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (* (* (* x 3.0) x) y))
double code(double x, double y) {
return ((x * 3.0) * x) * y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((x * 3.0d0) * x) * y
end function
public static double code(double x, double y) {
return ((x * 3.0) * x) * y;
}
def code(x, y): return ((x * 3.0) * x) * y
function code(x, y) return Float64(Float64(Float64(x * 3.0) * x) * y) end
function tmp = code(x, y) tmp = ((x * 3.0) * x) * y; end
code[x_, y_] := N[(N[(N[(x * 3.0), $MachinePrecision] * x), $MachinePrecision] * y), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x \cdot 3\right) \cdot x\right) \cdot y
\end{array}
(FPCore (x y) :precision binary64 (* (* x (* x y)) 3.0))
double code(double x, double y) {
return (x * (x * y)) * 3.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x * (x * y)) * 3.0d0
end function
public static double code(double x, double y) {
return (x * (x * y)) * 3.0;
}
def code(x, y): return (x * (x * y)) * 3.0
function code(x, y) return Float64(Float64(x * Float64(x * y)) * 3.0) end
function tmp = code(x, y) tmp = (x * (x * y)) * 3.0; end
code[x_, y_] := N[(N[(x * N[(x * y), $MachinePrecision]), $MachinePrecision] * 3.0), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \left(x \cdot y\right)\right) \cdot 3
\end{array}
Initial program 88.6%
*-commutative88.6%
associate-*l*88.5%
Simplified88.5%
associate-*r*88.6%
*-commutative88.6%
associate-*r*99.7%
expm1-log1p-u75.4%
expm1-udef50.5%
log1p-udef50.5%
add-exp-log74.8%
*-commutative74.8%
associate-*l*74.8%
Applied egg-rr74.8%
add-exp-log50.1%
associate--l+50.1%
log1p-def50.1%
associate-*r*48.9%
*-commutative48.9%
associate-*r*48.8%
*-commutative48.8%
add-exp-log48.4%
expm1-def48.4%
log1p-expm1-u57.3%
associate-*r*57.3%
log-prod43.1%
add-sqr-sqrt43.1%
swap-sqr43.1%
unpow243.1%
log-prod57.3%
add-exp-log88.3%
Applied egg-rr99.7%
Final simplification99.7%
(FPCore (x y) :precision binary64 (* 3.0 (* y (* x x))))
double code(double x, double y) {
return 3.0 * (y * (x * x));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 3.0d0 * (y * (x * x))
end function
public static double code(double x, double y) {
return 3.0 * (y * (x * x));
}
def code(x, y): return 3.0 * (y * (x * x))
function code(x, y) return Float64(3.0 * Float64(y * Float64(x * x))) end
function tmp = code(x, y) tmp = 3.0 * (y * (x * x)); end
code[x_, y_] := N[(3.0 * N[(y * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
3 \cdot \left(y \cdot \left(x \cdot x\right)\right)
\end{array}
Initial program 88.6%
*-commutative88.6%
associate-*l*88.5%
Simplified88.5%
Taylor expanded in x around 0 88.6%
unpow288.6%
Simplified88.6%
Final simplification88.6%
(FPCore (x y) :precision binary64 (* x (* (* x y) 3.0)))
double code(double x, double y) {
return x * ((x * y) * 3.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * ((x * y) * 3.0d0)
end function
public static double code(double x, double y) {
return x * ((x * y) * 3.0);
}
def code(x, y): return x * ((x * y) * 3.0)
function code(x, y) return Float64(x * Float64(Float64(x * y) * 3.0)) end
function tmp = code(x, y) tmp = x * ((x * y) * 3.0); end
code[x_, y_] := N[(x * N[(N[(x * y), $MachinePrecision] * 3.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(\left(x \cdot y\right) \cdot 3\right)
\end{array}
Initial program 88.6%
*-commutative88.6%
associate-*l*88.5%
Simplified88.5%
associate-*r*88.6%
*-commutative88.6%
associate-*r*99.7%
expm1-log1p-u75.4%
expm1-udef50.5%
log1p-udef50.5%
add-exp-log74.8%
*-commutative74.8%
associate-*l*74.8%
Applied egg-rr74.8%
add-exp-log50.1%
associate--l+50.1%
log1p-def50.1%
associate-*r*48.9%
*-commutative48.9%
associate-*r*48.8%
*-commutative48.8%
add-exp-log48.4%
expm1-def48.4%
log1p-expm1-u57.3%
associate-*r*57.3%
log-prod43.1%
add-sqr-sqrt43.1%
swap-sqr43.1%
unpow243.1%
log-prod57.3%
add-exp-log88.3%
Applied egg-rr99.7%
Final simplification99.7%
(FPCore (x y) :precision binary64 (* (* x y) (* x 3.0)))
double code(double x, double y) {
return (x * y) * (x * 3.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x * y) * (x * 3.0d0)
end function
public static double code(double x, double y) {
return (x * y) * (x * 3.0);
}
def code(x, y): return (x * y) * (x * 3.0)
function code(x, y) return Float64(Float64(x * y) * Float64(x * 3.0)) end
function tmp = code(x, y) tmp = (x * y) * (x * 3.0); end
code[x_, y_] := N[(N[(x * y), $MachinePrecision] * N[(x * 3.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot y\right) \cdot \left(x \cdot 3\right)
\end{array}
Initial program 88.6%
associate-*l*99.7%
Simplified99.7%
Final simplification99.7%
(FPCore (x y) :precision binary64 0.0)
double code(double x, double y) {
return 0.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 0.0d0
end function
public static double code(double x, double y) {
return 0.0;
}
def code(x, y): return 0.0
function code(x, y) return 0.0 end
function tmp = code(x, y) tmp = 0.0; end
code[x_, y_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 88.6%
*-commutative88.6%
associate-*l*88.5%
Simplified88.5%
associate-*r*88.6%
*-commutative88.6%
associate-*r*99.7%
expm1-log1p-u75.4%
expm1-udef50.5%
log1p-udef50.5%
add-exp-log74.8%
*-commutative74.8%
associate-*l*74.8%
Applied egg-rr74.8%
Taylor expanded in x around 0 29.7%
Final simplification29.7%
(FPCore (x y) :precision binary64 (* (* x 3.0) (* x y)))
double code(double x, double y) {
return (x * 3.0) * (x * y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x * 3.0d0) * (x * y)
end function
public static double code(double x, double y) {
return (x * 3.0) * (x * y);
}
def code(x, y): return (x * 3.0) * (x * y)
function code(x, y) return Float64(Float64(x * 3.0) * Float64(x * y)) end
function tmp = code(x, y) tmp = (x * 3.0) * (x * y); end
code[x_, y_] := N[(N[(x * 3.0), $MachinePrecision] * N[(x * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot 3\right) \cdot \left(x \cdot y\right)
\end{array}
herbie shell --seed 2023221
(FPCore (x y)
:name "Diagrams.Segment:$catParam from diagrams-lib-1.3.0.3, A"
:precision binary64
:herbie-target
(* (* x 3.0) (* x y))
(* (* (* x 3.0) x) y))