
(FPCore (a b) :precision binary64 (- (* (* (* a a) b) b)))
double code(double a, double b) {
return -(((a * a) * b) * b);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = -(((a * a) * b) * b)
end function
public static double code(double a, double b) {
return -(((a * a) * b) * b);
}
def code(a, b): return -(((a * a) * b) * b)
function code(a, b) return Float64(-Float64(Float64(Float64(a * a) * b) * b)) end
function tmp = code(a, b) tmp = -(((a * a) * b) * b); end
code[a_, b_] := (-N[(N[(N[(a * a), $MachinePrecision] * b), $MachinePrecision] * b), $MachinePrecision])
\begin{array}{l}
\\
-\left(\left(a \cdot a\right) \cdot b\right) \cdot b
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b) :precision binary64 (- (* (* (* a a) b) b)))
double code(double a, double b) {
return -(((a * a) * b) * b);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = -(((a * a) * b) * b)
end function
public static double code(double a, double b) {
return -(((a * a) * b) * b);
}
def code(a, b): return -(((a * a) * b) * b)
function code(a, b) return Float64(-Float64(Float64(Float64(a * a) * b) * b)) end
function tmp = code(a, b) tmp = -(((a * a) * b) * b); end
code[a_, b_] := (-N[(N[(N[(a * a), $MachinePrecision] * b), $MachinePrecision] * b), $MachinePrecision])
\begin{array}{l}
\\
-\left(\left(a \cdot a\right) \cdot b\right) \cdot b
\end{array}
a_m = (fabs.f64 a) b_m = (fabs.f64 b) (FPCore (a_m b_m) :precision binary64 (* (pow (* a_m b_m) 1.5) (- (sqrt (* a_m b_m)))))
a_m = fabs(a);
b_m = fabs(b);
double code(double a_m, double b_m) {
return pow((a_m * b_m), 1.5) * -sqrt((a_m * b_m));
}
a_m = abs(a)
b_m = abs(b)
real(8) function code(a_m, b_m)
real(8), intent (in) :: a_m
real(8), intent (in) :: b_m
code = ((a_m * b_m) ** 1.5d0) * -sqrt((a_m * b_m))
end function
a_m = Math.abs(a);
b_m = Math.abs(b);
public static double code(double a_m, double b_m) {
return Math.pow((a_m * b_m), 1.5) * -Math.sqrt((a_m * b_m));
}
a_m = math.fabs(a) b_m = math.fabs(b) def code(a_m, b_m): return math.pow((a_m * b_m), 1.5) * -math.sqrt((a_m * b_m))
a_m = abs(a) b_m = abs(b) function code(a_m, b_m) return Float64((Float64(a_m * b_m) ^ 1.5) * Float64(-sqrt(Float64(a_m * b_m)))) end
a_m = abs(a); b_m = abs(b); function tmp = code(a_m, b_m) tmp = ((a_m * b_m) ^ 1.5) * -sqrt((a_m * b_m)); end
a_m = N[Abs[a], $MachinePrecision] b_m = N[Abs[b], $MachinePrecision] code[a$95$m_, b$95$m_] := N[(N[Power[N[(a$95$m * b$95$m), $MachinePrecision], 1.5], $MachinePrecision] * (-N[Sqrt[N[(a$95$m * b$95$m), $MachinePrecision]], $MachinePrecision])), $MachinePrecision]
\begin{array}{l}
a_m = \left|a\right|
\\
b_m = \left|b\right|
\\
{\left(a\_m \cdot b\_m\right)}^{1.5} \cdot \left(-\sqrt{a\_m \cdot b\_m}\right)
\end{array}
Initial program 84.2%
Taylor expanded in a around 0 80.1%
unpow280.1%
unpow280.1%
swap-sqr99.7%
unpow299.7%
Simplified99.7%
unpow299.7%
add-sqr-sqrt54.4%
associate-*r*54.5%
pow154.5%
pow1/254.5%
pow-prod-up54.5%
metadata-eval54.5%
Applied egg-rr54.5%
Final simplification54.5%
a_m = (fabs.f64 a) b_m = (fabs.f64 b) (FPCore (a_m b_m) :precision binary64 (* (* a_m b_m) (* a_m (- b_m))))
a_m = fabs(a);
b_m = fabs(b);
double code(double a_m, double b_m) {
return (a_m * b_m) * (a_m * -b_m);
}
a_m = abs(a)
b_m = abs(b)
real(8) function code(a_m, b_m)
real(8), intent (in) :: a_m
real(8), intent (in) :: b_m
code = (a_m * b_m) * (a_m * -b_m)
end function
a_m = Math.abs(a);
b_m = Math.abs(b);
public static double code(double a_m, double b_m) {
return (a_m * b_m) * (a_m * -b_m);
}
a_m = math.fabs(a) b_m = math.fabs(b) def code(a_m, b_m): return (a_m * b_m) * (a_m * -b_m)
a_m = abs(a) b_m = abs(b) function code(a_m, b_m) return Float64(Float64(a_m * b_m) * Float64(a_m * Float64(-b_m))) end
a_m = abs(a); b_m = abs(b); function tmp = code(a_m, b_m) tmp = (a_m * b_m) * (a_m * -b_m); end
a_m = N[Abs[a], $MachinePrecision] b_m = N[Abs[b], $MachinePrecision] code[a$95$m_, b$95$m_] := N[(N[(a$95$m * b$95$m), $MachinePrecision] * N[(a$95$m * (-b$95$m)), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
a_m = \left|a\right|
\\
b_m = \left|b\right|
\\
\left(a\_m \cdot b\_m\right) \cdot \left(a\_m \cdot \left(-b\_m\right)\right)
\end{array}
Initial program 84.2%
Taylor expanded in a around 0 80.1%
unpow280.1%
unpow280.1%
swap-sqr99.7%
unpow299.7%
Simplified99.7%
unpow299.7%
Applied egg-rr99.7%
Final simplification99.7%
a_m = (fabs.f64 a) b_m = (fabs.f64 b) (FPCore (a_m b_m) :precision binary64 (* b_m (* a_m (* a_m b_m))))
a_m = fabs(a);
b_m = fabs(b);
double code(double a_m, double b_m) {
return b_m * (a_m * (a_m * b_m));
}
a_m = abs(a)
b_m = abs(b)
real(8) function code(a_m, b_m)
real(8), intent (in) :: a_m
real(8), intent (in) :: b_m
code = b_m * (a_m * (a_m * b_m))
end function
a_m = Math.abs(a);
b_m = Math.abs(b);
public static double code(double a_m, double b_m) {
return b_m * (a_m * (a_m * b_m));
}
a_m = math.fabs(a) b_m = math.fabs(b) def code(a_m, b_m): return b_m * (a_m * (a_m * b_m))
a_m = abs(a) b_m = abs(b) function code(a_m, b_m) return Float64(b_m * Float64(a_m * Float64(a_m * b_m))) end
a_m = abs(a); b_m = abs(b); function tmp = code(a_m, b_m) tmp = b_m * (a_m * (a_m * b_m)); end
a_m = N[Abs[a], $MachinePrecision] b_m = N[Abs[b], $MachinePrecision] code[a$95$m_, b$95$m_] := N[(b$95$m * N[(a$95$m * N[(a$95$m * b$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
a_m = \left|a\right|
\\
b_m = \left|b\right|
\\
b\_m \cdot \left(a\_m \cdot \left(a\_m \cdot b\_m\right)\right)
\end{array}
Initial program 84.2%
distribute-rgt-neg-in84.2%
associate-*l*92.9%
Simplified92.9%
neg-sub092.9%
sub-neg92.9%
add-sqr-sqrt44.3%
sqrt-unprod54.2%
sqr-neg54.2%
sqrt-unprod14.9%
add-sqr-sqrt26.2%
Applied egg-rr26.2%
+-lft-identity26.2%
Simplified26.2%
Final simplification26.2%
a_m = (fabs.f64 a) b_m = (fabs.f64 b) (FPCore (a_m b_m) :precision binary64 (* (* a_m b_m) (* a_m b_m)))
a_m = fabs(a);
b_m = fabs(b);
double code(double a_m, double b_m) {
return (a_m * b_m) * (a_m * b_m);
}
a_m = abs(a)
b_m = abs(b)
real(8) function code(a_m, b_m)
real(8), intent (in) :: a_m
real(8), intent (in) :: b_m
code = (a_m * b_m) * (a_m * b_m)
end function
a_m = Math.abs(a);
b_m = Math.abs(b);
public static double code(double a_m, double b_m) {
return (a_m * b_m) * (a_m * b_m);
}
a_m = math.fabs(a) b_m = math.fabs(b) def code(a_m, b_m): return (a_m * b_m) * (a_m * b_m)
a_m = abs(a) b_m = abs(b) function code(a_m, b_m) return Float64(Float64(a_m * b_m) * Float64(a_m * b_m)) end
a_m = abs(a); b_m = abs(b); function tmp = code(a_m, b_m) tmp = (a_m * b_m) * (a_m * b_m); end
a_m = N[Abs[a], $MachinePrecision] b_m = N[Abs[b], $MachinePrecision] code[a$95$m_, b$95$m_] := N[(N[(a$95$m * b$95$m), $MachinePrecision] * N[(a$95$m * b$95$m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
a_m = \left|a\right|
\\
b_m = \left|b\right|
\\
\left(a\_m \cdot b\_m\right) \cdot \left(a\_m \cdot b\_m\right)
\end{array}
Initial program 84.2%
add-sqr-sqrt25.3%
sqrt-unprod26.2%
sqr-neg26.2%
sqrt-unprod26.2%
add-sqr-sqrt26.2%
associate-*l*25.9%
swap-sqr26.1%
Applied egg-rr26.1%
a_m = (fabs.f64 a) b_m = (fabs.f64 b) (FPCore (a_m b_m) :precision binary64 (* a_m (* b_m (* a_m b_m))))
a_m = fabs(a);
b_m = fabs(b);
double code(double a_m, double b_m) {
return a_m * (b_m * (a_m * b_m));
}
a_m = abs(a)
b_m = abs(b)
real(8) function code(a_m, b_m)
real(8), intent (in) :: a_m
real(8), intent (in) :: b_m
code = a_m * (b_m * (a_m * b_m))
end function
a_m = Math.abs(a);
b_m = Math.abs(b);
public static double code(double a_m, double b_m) {
return a_m * (b_m * (a_m * b_m));
}
a_m = math.fabs(a) b_m = math.fabs(b) def code(a_m, b_m): return a_m * (b_m * (a_m * b_m))
a_m = abs(a) b_m = abs(b) function code(a_m, b_m) return Float64(a_m * Float64(b_m * Float64(a_m * b_m))) end
a_m = abs(a); b_m = abs(b); function tmp = code(a_m, b_m) tmp = a_m * (b_m * (a_m * b_m)); end
a_m = N[Abs[a], $MachinePrecision] b_m = N[Abs[b], $MachinePrecision] code[a$95$m_, b$95$m_] := N[(a$95$m * N[(b$95$m * N[(a$95$m * b$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
a_m = \left|a\right|
\\
b_m = \left|b\right|
\\
a\_m \cdot \left(b\_m \cdot \left(a\_m \cdot b\_m\right)\right)
\end{array}
Initial program 84.2%
associate-*l*80.1%
associate-*r*84.8%
*-commutative84.8%
distribute-rgt-neg-in84.8%
distribute-rgt-neg-in84.8%
associate-*r*96.2%
Simplified96.2%
neg-sub096.2%
sub-neg96.2%
add-sqr-sqrt47.6%
sqrt-unprod55.3%
sqr-neg55.3%
sqrt-prod14.8%
add-sqr-sqrt26.1%
Applied egg-rr26.1%
+-lft-identity26.1%
Simplified26.1%
Final simplification26.1%
herbie shell --seed 2024131
(FPCore (a b)
:name "ab-angle->ABCF D"
:precision binary64
(- (* (* (* a a) b) b)))