
(FPCore (a b) :precision binary64 (- (* (* (* a a) b) b)))
double code(double a, double b) {
return -(((a * a) * b) * b);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = -(((a * a) * b) * b)
end function
public static double code(double a, double b) {
return -(((a * a) * b) * b);
}
def code(a, b): return -(((a * a) * b) * b)
function code(a, b) return Float64(-Float64(Float64(Float64(a * a) * b) * b)) end
function tmp = code(a, b) tmp = -(((a * a) * b) * b); end
code[a_, b_] := (-N[(N[(N[(a * a), $MachinePrecision] * b), $MachinePrecision] * b), $MachinePrecision])
\begin{array}{l}
\\
-\left(\left(a \cdot a\right) \cdot b\right) \cdot b
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b) :precision binary64 (- (* (* (* a a) b) b)))
double code(double a, double b) {
return -(((a * a) * b) * b);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = -(((a * a) * b) * b)
end function
public static double code(double a, double b) {
return -(((a * a) * b) * b);
}
def code(a, b): return -(((a * a) * b) * b)
function code(a, b) return Float64(-Float64(Float64(Float64(a * a) * b) * b)) end
function tmp = code(a, b) tmp = -(((a * a) * b) * b); end
code[a_, b_] := (-N[(N[(N[(a * a), $MachinePrecision] * b), $MachinePrecision] * b), $MachinePrecision])
\begin{array}{l}
\\
-\left(\left(a \cdot a\right) \cdot b\right) \cdot b
\end{array}
(FPCore (a b) :precision binary64 (/ (* b a) (/ (/ -1.0 b) a)))
double code(double a, double b) {
return (b * a) / ((-1.0 / b) / a);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = (b * a) / (((-1.0d0) / b) / a)
end function
public static double code(double a, double b) {
return (b * a) / ((-1.0 / b) / a);
}
def code(a, b): return (b * a) / ((-1.0 / b) / a)
function code(a, b) return Float64(Float64(b * a) / Float64(Float64(-1.0 / b) / a)) end
function tmp = code(a, b) tmp = (b * a) / ((-1.0 / b) / a); end
code[a_, b_] := N[(N[(b * a), $MachinePrecision] / N[(N[(-1.0 / b), $MachinePrecision] / a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{b \cdot a}{\frac{\frac{-1}{b}}{a}}
\end{array}
Initial program 80.5%
associate-*l*N/A
unswap-sqrN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.6%
Applied egg-rr99.6%
/-rgt-identityN/A
clear-numN/A
associate-*l/N/A
*-lft-identityN/A
/-lowering-/.f64N/A
/-lowering-/.f6499.6%
Applied egg-rr99.6%
*-commutativeN/A
clear-numN/A
un-div-invN/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
associate-/r*N/A
distribute-neg-fracN/A
metadata-evalN/A
/-lowering-/.f64N/A
*-commutativeN/A
*-lowering-*.f6499.6%
Applied egg-rr99.6%
associate-/r*N/A
div-invN/A
neg-mul-1N/A
/-lowering-/.f64N/A
neg-mul-1N/A
div-invN/A
/-lowering-/.f6499.7%
Applied egg-rr99.7%
(FPCore (a b) :precision binary64 (/ (* b a) (/ -1.0 (* b a))))
double code(double a, double b) {
return (b * a) / (-1.0 / (b * a));
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = (b * a) / ((-1.0d0) / (b * a))
end function
public static double code(double a, double b) {
return (b * a) / (-1.0 / (b * a));
}
def code(a, b): return (b * a) / (-1.0 / (b * a))
function code(a, b) return Float64(Float64(b * a) / Float64(-1.0 / Float64(b * a))) end
function tmp = code(a, b) tmp = (b * a) / (-1.0 / (b * a)); end
code[a_, b_] := N[(N[(b * a), $MachinePrecision] / N[(-1.0 / N[(b * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{b \cdot a}{\frac{-1}{b \cdot a}}
\end{array}
Initial program 80.5%
associate-*l*N/A
unswap-sqrN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.6%
Applied egg-rr99.6%
/-rgt-identityN/A
clear-numN/A
associate-*l/N/A
*-lft-identityN/A
/-lowering-/.f64N/A
/-lowering-/.f6499.6%
Applied egg-rr99.6%
*-commutativeN/A
clear-numN/A
un-div-invN/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
associate-/r*N/A
distribute-neg-fracN/A
metadata-evalN/A
/-lowering-/.f64N/A
*-commutativeN/A
*-lowering-*.f6499.6%
Applied egg-rr99.6%
(FPCore (a b) :precision binary64 (* (* b a) (- 0.0 (* b a))))
double code(double a, double b) {
return (b * a) * (0.0 - (b * a));
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = (b * a) * (0.0d0 - (b * a))
end function
public static double code(double a, double b) {
return (b * a) * (0.0 - (b * a));
}
def code(a, b): return (b * a) * (0.0 - (b * a))
function code(a, b) return Float64(Float64(b * a) * Float64(0.0 - Float64(b * a))) end
function tmp = code(a, b) tmp = (b * a) * (0.0 - (b * a)); end
code[a_, b_] := N[(N[(b * a), $MachinePrecision] * N[(0.0 - N[(b * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(b \cdot a\right) \cdot \left(0 - b \cdot a\right)
\end{array}
Initial program 80.5%
associate-*l*N/A
unswap-sqrN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.6%
Applied egg-rr99.6%
Final simplification99.6%
(FPCore (a b) :precision binary64 (* b (* b (* a a))))
double code(double a, double b) {
return b * (b * (a * a));
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = b * (b * (a * a))
end function
public static double code(double a, double b) {
return b * (b * (a * a));
}
def code(a, b): return b * (b * (a * a))
function code(a, b) return Float64(b * Float64(b * Float64(a * a))) end
function tmp = code(a, b) tmp = b * (b * (a * a)); end
code[a_, b_] := N[(b * N[(b * N[(a * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
b \cdot \left(b \cdot \left(a \cdot a\right)\right)
\end{array}
Initial program 80.5%
associate-*l*N/A
associate-*r*N/A
+-lft-identityN/A
flip3-+N/A
distribute-neg-fracN/A
Applied egg-rr24.3%
Final simplification24.3%
(FPCore (a b) :precision binary64 (* (* b a) (* b a)))
double code(double a, double b) {
return (b * a) * (b * a);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = (b * a) * (b * a)
end function
public static double code(double a, double b) {
return (b * a) * (b * a);
}
def code(a, b): return (b * a) * (b * a)
function code(a, b) return Float64(Float64(b * a) * Float64(b * a)) end
function tmp = code(a, b) tmp = (b * a) * (b * a); end
code[a_, b_] := N[(N[(b * a), $MachinePrecision] * N[(b * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(b \cdot a\right) \cdot \left(b \cdot a\right)
\end{array}
Initial program 80.5%
associate-*l*N/A
associate-*r*N/A
+-lft-identityN/A
flip3-+N/A
distribute-neg-fracN/A
Applied egg-rr24.2%
Final simplification24.2%
herbie shell --seed 2024152
(FPCore (a b)
:name "ab-angle->ABCF D"
:precision binary64
(- (* (* (* a a) b) b)))