
(FPCore (a b) :precision binary64 (- (* (* (* a a) b) b)))
double code(double a, double b) {
return -(((a * a) * b) * b);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = -(((a * a) * b) * b)
end function
public static double code(double a, double b) {
return -(((a * a) * b) * b);
}
def code(a, b): return -(((a * a) * b) * b)
function code(a, b) return Float64(-Float64(Float64(Float64(a * a) * b) * b)) end
function tmp = code(a, b) tmp = -(((a * a) * b) * b); end
code[a_, b_] := (-N[(N[(N[(a * a), $MachinePrecision] * b), $MachinePrecision] * b), $MachinePrecision])
\begin{array}{l}
\\
-\left(\left(a \cdot a\right) \cdot b\right) \cdot b
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b) :precision binary64 (- (* (* (* a a) b) b)))
double code(double a, double b) {
return -(((a * a) * b) * b);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = -(((a * a) * b) * b)
end function
public static double code(double a, double b) {
return -(((a * a) * b) * b);
}
def code(a, b): return -(((a * a) * b) * b)
function code(a, b) return Float64(-Float64(Float64(Float64(a * a) * b) * b)) end
function tmp = code(a, b) tmp = -(((a * a) * b) * b); end
code[a_, b_] := (-N[(N[(N[(a * a), $MachinePrecision] * b), $MachinePrecision] * b), $MachinePrecision])
\begin{array}{l}
\\
-\left(\left(a \cdot a\right) \cdot b\right) \cdot b
\end{array}
(FPCore (a b) :precision binary64 (* (* a b) (* (- b) a)))
double code(double a, double b) {
return (a * b) * (-b * a);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = (a * b) * (-b * a)
end function
public static double code(double a, double b) {
return (a * b) * (-b * a);
}
def code(a, b): return (a * b) * (-b * a)
function code(a, b) return Float64(Float64(a * b) * Float64(Float64(-b) * a)) end
function tmp = code(a, b) tmp = (a * b) * (-b * a); end
code[a_, b_] := N[(N[(a * b), $MachinePrecision] * N[((-b) * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(a \cdot b\right) \cdot \left(\left(-b\right) \cdot a\right)
\end{array}
Initial program 85.5%
lift-neg.f64N/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
lift-*.f64N/A
unswap-sqrN/A
distribute-lft-neg-inN/A
lower-*.f64N/A
distribute-lft-neg-inN/A
lower-*.f64N/A
lower-neg.f64N/A
*-commutativeN/A
lower-*.f6499.7
Applied rewrites99.7%
Final simplification99.7%
(FPCore (a b) :precision binary64 (* (* (* a a) b) (- b)))
double code(double a, double b) {
return ((a * a) * b) * -b;
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = ((a * a) * b) * -b
end function
public static double code(double a, double b) {
return ((a * a) * b) * -b;
}
def code(a, b): return ((a * a) * b) * -b
function code(a, b) return Float64(Float64(Float64(a * a) * b) * Float64(-b)) end
function tmp = code(a, b) tmp = ((a * a) * b) * -b; end
code[a_, b_] := N[(N[(N[(a * a), $MachinePrecision] * b), $MachinePrecision] * (-b)), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(a \cdot a\right) \cdot b\right) \cdot \left(-b\right)
\end{array}
Initial program 85.5%
Final simplification85.5%
(FPCore (a b) :precision binary64 (* (* b (* a a)) b))
double code(double a, double b) {
return (b * (a * a)) * b;
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = (b * (a * a)) * b
end function
public static double code(double a, double b) {
return (b * (a * a)) * b;
}
def code(a, b): return (b * (a * a)) * b
function code(a, b) return Float64(Float64(b * Float64(a * a)) * b) end
function tmp = code(a, b) tmp = (b * (a * a)) * b; end
code[a_, b_] := N[(N[(b * N[(a * a), $MachinePrecision]), $MachinePrecision] * b), $MachinePrecision]
\begin{array}{l}
\\
\left(b \cdot \left(a \cdot a\right)\right) \cdot b
\end{array}
Initial program 85.5%
lift-neg.f64N/A
+-lft-identityN/A
flip3-+N/A
distribute-neg-fracN/A
Applied rewrites29.6%
(FPCore (a b) :precision binary64 (* (* b a) (* b a)))
double code(double a, double b) {
return (b * a) * (b * a);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = (b * a) * (b * a)
end function
public static double code(double a, double b) {
return (b * a) * (b * a);
}
def code(a, b): return (b * a) * (b * a)
function code(a, b) return Float64(Float64(b * a) * Float64(b * a)) end
function tmp = code(a, b) tmp = (b * a) * (b * a); end
code[a_, b_] := N[(N[(b * a), $MachinePrecision] * N[(b * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(b \cdot a\right) \cdot \left(b \cdot a\right)
\end{array}
Initial program 85.5%
lift-neg.f64N/A
+-lft-identityN/A
flip3-+N/A
distribute-neg-fracN/A
Applied rewrites29.5%
herbie shell --seed 2024314
(FPCore (a b)
:name "ab-angle->ABCF D"
:precision binary64
(- (* (* (* a a) b) b)))