
(FPCore (a b) :precision binary64 (- (* a a) (* b b)))
double code(double a, double b) {
return (a * a) - (b * b);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = (a * a) - (b * b)
end function
public static double code(double a, double b) {
return (a * a) - (b * b);
}
def code(a, b): return (a * a) - (b * b)
function code(a, b) return Float64(Float64(a * a) - Float64(b * b)) end
function tmp = code(a, b) tmp = (a * a) - (b * b); end
code[a_, b_] := N[(N[(a * a), $MachinePrecision] - N[(b * b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
a \cdot a - b \cdot b
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b) :precision binary64 (- (* a a) (* b b)))
double code(double a, double b) {
return (a * a) - (b * b);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = (a * a) - (b * b)
end function
public static double code(double a, double b) {
return (a * a) - (b * b);
}
def code(a, b): return (a * a) - (b * b)
function code(a, b) return Float64(Float64(a * a) - Float64(b * b)) end
function tmp = code(a, b) tmp = (a * a) - (b * b); end
code[a_, b_] := N[(N[(a * a), $MachinePrecision] - N[(b * b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
a \cdot a - b \cdot b
\end{array}
a_m = (fabs.f64 a) b_m = (fabs.f64 b) (FPCore (a_m b_m) :precision binary64 (if (<= (* a_m a_m) 1e+273) (- (* a_m a_m) (* b_m b_m)) (* a_m (+ a_m (* b_m -2.0)))))
a_m = fabs(a);
b_m = fabs(b);
double code(double a_m, double b_m) {
double tmp;
if ((a_m * a_m) <= 1e+273) {
tmp = (a_m * a_m) - (b_m * b_m);
} else {
tmp = a_m * (a_m + (b_m * -2.0));
}
return tmp;
}
a_m = abs(a)
b_m = abs(b)
real(8) function code(a_m, b_m)
real(8), intent (in) :: a_m
real(8), intent (in) :: b_m
real(8) :: tmp
if ((a_m * a_m) <= 1d+273) then
tmp = (a_m * a_m) - (b_m * b_m)
else
tmp = a_m * (a_m + (b_m * (-2.0d0)))
end if
code = tmp
end function
a_m = Math.abs(a);
b_m = Math.abs(b);
public static double code(double a_m, double b_m) {
double tmp;
if ((a_m * a_m) <= 1e+273) {
tmp = (a_m * a_m) - (b_m * b_m);
} else {
tmp = a_m * (a_m + (b_m * -2.0));
}
return tmp;
}
a_m = math.fabs(a) b_m = math.fabs(b) def code(a_m, b_m): tmp = 0 if (a_m * a_m) <= 1e+273: tmp = (a_m * a_m) - (b_m * b_m) else: tmp = a_m * (a_m + (b_m * -2.0)) return tmp
a_m = abs(a) b_m = abs(b) function code(a_m, b_m) tmp = 0.0 if (Float64(a_m * a_m) <= 1e+273) tmp = Float64(Float64(a_m * a_m) - Float64(b_m * b_m)); else tmp = Float64(a_m * Float64(a_m + Float64(b_m * -2.0))); end return tmp end
a_m = abs(a); b_m = abs(b); function tmp_2 = code(a_m, b_m) tmp = 0.0; if ((a_m * a_m) <= 1e+273) tmp = (a_m * a_m) - (b_m * b_m); else tmp = a_m * (a_m + (b_m * -2.0)); end tmp_2 = tmp; end
a_m = N[Abs[a], $MachinePrecision] b_m = N[Abs[b], $MachinePrecision] code[a$95$m_, b$95$m_] := If[LessEqual[N[(a$95$m * a$95$m), $MachinePrecision], 1e+273], N[(N[(a$95$m * a$95$m), $MachinePrecision] - N[(b$95$m * b$95$m), $MachinePrecision]), $MachinePrecision], N[(a$95$m * N[(a$95$m + N[(b$95$m * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
a_m = \left|a\right|
\\
b_m = \left|b\right|
\\
\begin{array}{l}
\mathbf{if}\;a_m \cdot a_m \leq 10^{+273}:\\
\;\;\;\;a_m \cdot a_m - b_m \cdot b_m\\
\mathbf{else}:\\
\;\;\;\;a_m \cdot \left(a_m + b_m \cdot -2\right)\\
\end{array}
\end{array}
if (*.f64 a a) < 9.99999999999999945e272Initial program 100.0%
if 9.99999999999999945e272 < (*.f64 a a) Initial program 80.2%
difference-of-squares100.0%
add-sqr-sqrt48.1%
sqrt-prod91.4%
sqr-neg91.4%
sqrt-unprod49.4%
add-sqr-sqrt96.3%
sub-neg96.3%
pow196.3%
pow196.3%
pow-prod-up96.3%
metadata-eval96.3%
add-sqr-sqrt50.6%
add-sqr-sqrt30.9%
difference-of-squares30.9%
unpow-prod-down30.9%
Applied egg-rr30.9%
unpow230.9%
unpow230.9%
unswap-sqr30.9%
difference-of-squares30.9%
unpow1/230.9%
unpow1/230.9%
pow-sqr30.9%
metadata-eval30.9%
unpow130.9%
unpow1/230.9%
unpow1/230.9%
pow-sqr30.9%
metadata-eval30.9%
unpow130.9%
difference-of-squares30.9%
unpow1/230.9%
unpow1/230.9%
pow-sqr46.9%
metadata-eval46.9%
unpow146.9%
Simplified96.3%
Taylor expanded in a around inf 74.1%
*-commutative74.1%
associate-*l*74.1%
unpow274.1%
distribute-lft-out96.3%
Simplified96.3%
Final simplification98.8%
a_m = (fabs.f64 a) b_m = (fabs.f64 b) (FPCore (a_m b_m) :precision binary64 (* a_m (+ a_m (* b_m -2.0))))
a_m = fabs(a);
b_m = fabs(b);
double code(double a_m, double b_m) {
return a_m * (a_m + (b_m * -2.0));
}
a_m = abs(a)
b_m = abs(b)
real(8) function code(a_m, b_m)
real(8), intent (in) :: a_m
real(8), intent (in) :: b_m
code = a_m * (a_m + (b_m * (-2.0d0)))
end function
a_m = Math.abs(a);
b_m = Math.abs(b);
public static double code(double a_m, double b_m) {
return a_m * (a_m + (b_m * -2.0));
}
a_m = math.fabs(a) b_m = math.fabs(b) def code(a_m, b_m): return a_m * (a_m + (b_m * -2.0))
a_m = abs(a) b_m = abs(b) function code(a_m, b_m) return Float64(a_m * Float64(a_m + Float64(b_m * -2.0))) end
a_m = abs(a); b_m = abs(b); function tmp = code(a_m, b_m) tmp = a_m * (a_m + (b_m * -2.0)); end
a_m = N[Abs[a], $MachinePrecision] b_m = N[Abs[b], $MachinePrecision] code[a$95$m_, b$95$m_] := N[(a$95$m * N[(a$95$m + N[(b$95$m * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
a_m = \left|a\right|
\\
b_m = \left|b\right|
\\
a_m \cdot \left(a_m + b_m \cdot -2\right)
\end{array}
Initial program 93.8%
difference-of-squares100.0%
add-sqr-sqrt46.8%
sqrt-prod73.8%
sqr-neg73.8%
sqrt-unprod28.9%
add-sqr-sqrt58.4%
sub-neg58.4%
pow158.4%
pow158.4%
pow-prod-up58.4%
metadata-eval58.4%
add-sqr-sqrt32.4%
add-sqr-sqrt19.2%
difference-of-squares19.2%
unpow-prod-down19.2%
Applied egg-rr19.2%
unpow219.2%
unpow219.2%
unswap-sqr19.2%
difference-of-squares19.2%
unpow1/219.2%
unpow1/219.2%
pow-sqr19.2%
metadata-eval19.2%
unpow119.2%
unpow1/219.2%
unpow1/219.2%
pow-sqr19.2%
metadata-eval19.2%
unpow119.2%
difference-of-squares19.2%
unpow1/219.2%
unpow1/219.2%
pow-sqr29.5%
metadata-eval29.5%
unpow129.5%
Simplified58.4%
Taylor expanded in a around inf 53.3%
*-commutative53.3%
associate-*l*53.3%
unpow253.3%
distribute-lft-out60.3%
Simplified60.3%
Final simplification60.3%
a_m = (fabs.f64 a) b_m = (fabs.f64 b) (FPCore (a_m b_m) :precision binary64 (* -2.0 (* a_m b_m)))
a_m = fabs(a);
b_m = fabs(b);
double code(double a_m, double b_m) {
return -2.0 * (a_m * b_m);
}
a_m = abs(a)
b_m = abs(b)
real(8) function code(a_m, b_m)
real(8), intent (in) :: a_m
real(8), intent (in) :: b_m
code = (-2.0d0) * (a_m * b_m)
end function
a_m = Math.abs(a);
b_m = Math.abs(b);
public static double code(double a_m, double b_m) {
return -2.0 * (a_m * b_m);
}
a_m = math.fabs(a) b_m = math.fabs(b) def code(a_m, b_m): return -2.0 * (a_m * b_m)
a_m = abs(a) b_m = abs(b) function code(a_m, b_m) return Float64(-2.0 * Float64(a_m * b_m)) end
a_m = abs(a); b_m = abs(b); function tmp = code(a_m, b_m) tmp = -2.0 * (a_m * b_m); end
a_m = N[Abs[a], $MachinePrecision] b_m = N[Abs[b], $MachinePrecision] code[a$95$m_, b$95$m_] := N[(-2.0 * N[(a$95$m * b$95$m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
a_m = \left|a\right|
\\
b_m = \left|b\right|
\\
-2 \cdot \left(a_m \cdot b_m\right)
\end{array}
Initial program 93.8%
difference-of-squares100.0%
add-sqr-sqrt46.8%
sqrt-prod73.8%
sqr-neg73.8%
sqrt-unprod28.9%
add-sqr-sqrt58.4%
sub-neg58.4%
pow158.4%
pow158.4%
pow-prod-up58.4%
metadata-eval58.4%
add-sqr-sqrt32.4%
add-sqr-sqrt19.2%
difference-of-squares19.2%
unpow-prod-down19.2%
Applied egg-rr19.2%
unpow219.2%
unpow219.2%
unswap-sqr19.2%
difference-of-squares19.2%
unpow1/219.2%
unpow1/219.2%
pow-sqr19.2%
metadata-eval19.2%
unpow119.2%
unpow1/219.2%
unpow1/219.2%
pow-sqr19.2%
metadata-eval19.2%
unpow119.2%
difference-of-squares19.2%
unpow1/219.2%
unpow1/219.2%
pow-sqr29.5%
metadata-eval29.5%
unpow129.5%
Simplified58.4%
Taylor expanded in a around inf 53.3%
*-commutative53.3%
associate-*l*53.3%
unpow253.3%
distribute-lft-out60.3%
Simplified60.3%
Taylor expanded in a around 0 13.4%
Final simplification13.4%
(FPCore (a b) :precision binary64 (* (+ a b) (- a b)))
double code(double a, double b) {
return (a + b) * (a - b);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = (a + b) * (a - b)
end function
public static double code(double a, double b) {
return (a + b) * (a - b);
}
def code(a, b): return (a + b) * (a - b)
function code(a, b) return Float64(Float64(a + b) * Float64(a - b)) end
function tmp = code(a, b) tmp = (a + b) * (a - b); end
code[a_, b_] := N[(N[(a + b), $MachinePrecision] * N[(a - b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(a + b\right) \cdot \left(a - b\right)
\end{array}
herbie shell --seed 2024011
(FPCore (a b)
:name "Difference of squares"
:precision binary64
:herbie-target
(* (+ a b) (- a b))
(- (* a a) (* b b)))