
(FPCore (a b) :precision binary64 (- (* a a) (* b b)))
double code(double a, double b) {
return (a * a) - (b * b);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = (a * a) - (b * b)
end function
public static double code(double a, double b) {
return (a * a) - (b * b);
}
def code(a, b): return (a * a) - (b * b)
function code(a, b) return Float64(Float64(a * a) - Float64(b * b)) end
function tmp = code(a, b) tmp = (a * a) - (b * b); end
code[a_, b_] := N[(N[(a * a), $MachinePrecision] - N[(b * b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
a \cdot a - b \cdot b
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b) :precision binary64 (- (* a a) (* b b)))
double code(double a, double b) {
return (a * a) - (b * b);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = (a * a) - (b * b)
end function
public static double code(double a, double b) {
return (a * a) - (b * b);
}
def code(a, b): return (a * a) - (b * b)
function code(a, b) return Float64(Float64(a * a) - Float64(b * b)) end
function tmp = code(a, b) tmp = (a * a) - (b * b); end
code[a_, b_] := N[(N[(a * a), $MachinePrecision] - N[(b * b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
a \cdot a - b \cdot b
\end{array}
(FPCore (a b) :precision binary64 (* (- a b) (+ a b)))
double code(double a, double b) {
return (a - b) * (a + b);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = (a - b) * (a + b)
end function
public static double code(double a, double b) {
return (a - b) * (a + b);
}
def code(a, b): return (a - b) * (a + b)
function code(a, b) return Float64(Float64(a - b) * Float64(a + b)) end
function tmp = code(a, b) tmp = (a - b) * (a + b); end
code[a_, b_] := N[(N[(a - b), $MachinePrecision] * N[(a + b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(a - b\right) \cdot \left(a + b\right)
\end{array}
Initial program 95.7%
difference-of-squaresN/A
*-commutativeN/A
*-lowering-*.f64N/A
--lowering--.f64N/A
+-lowering-+.f64100.0%
Applied egg-rr100.0%
(FPCore (a b) :precision binary64 (if (<= (* a a) 3.4e+192) (- 0.0 (* b b)) (* a a)))
double code(double a, double b) {
double tmp;
if ((a * a) <= 3.4e+192) {
tmp = 0.0 - (b * b);
} else {
tmp = a * a;
}
return tmp;
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8) :: tmp
if ((a * a) <= 3.4d+192) then
tmp = 0.0d0 - (b * b)
else
tmp = a * a
end if
code = tmp
end function
public static double code(double a, double b) {
double tmp;
if ((a * a) <= 3.4e+192) {
tmp = 0.0 - (b * b);
} else {
tmp = a * a;
}
return tmp;
}
def code(a, b): tmp = 0 if (a * a) <= 3.4e+192: tmp = 0.0 - (b * b) else: tmp = a * a return tmp
function code(a, b) tmp = 0.0 if (Float64(a * a) <= 3.4e+192) tmp = Float64(0.0 - Float64(b * b)); else tmp = Float64(a * a); end return tmp end
function tmp_2 = code(a, b) tmp = 0.0; if ((a * a) <= 3.4e+192) tmp = 0.0 - (b * b); else tmp = a * a; end tmp_2 = tmp; end
code[a_, b_] := If[LessEqual[N[(a * a), $MachinePrecision], 3.4e+192], N[(0.0 - N[(b * b), $MachinePrecision]), $MachinePrecision], N[(a * a), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;a \cdot a \leq 3.4 \cdot 10^{+192}:\\
\;\;\;\;0 - b \cdot b\\
\mathbf{else}:\\
\;\;\;\;a \cdot a\\
\end{array}
\end{array}
if (*.f64 a a) < 3.39999999999999996e192Initial program 100.0%
Taylor expanded in a around 0
mul-1-negN/A
neg-sub0N/A
--lowering--.f64N/A
unpow2N/A
*-lowering-*.f6475.2%
Simplified75.2%
sub0-negN/A
neg-lowering-neg.f64N/A
*-lowering-*.f6475.2%
Applied egg-rr75.2%
if 3.39999999999999996e192 < (*.f64 a a) Initial program 86.9%
Taylor expanded in a around inf
unpow2N/A
*-lowering-*.f6491.7%
Simplified91.7%
Final simplification80.6%
(FPCore (a b) :precision binary64 (* a a))
double code(double a, double b) {
return a * a;
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = a * a
end function
public static double code(double a, double b) {
return a * a;
}
def code(a, b): return a * a
function code(a, b) return Float64(a * a) end
function tmp = code(a, b) tmp = a * a; end
code[a_, b_] := N[(a * a), $MachinePrecision]
\begin{array}{l}
\\
a \cdot a
\end{array}
Initial program 95.7%
Taylor expanded in a around inf
unpow2N/A
*-lowering-*.f6450.4%
Simplified50.4%
(FPCore (a b) :precision binary64 (* (+ a b) (- a b)))
double code(double a, double b) {
return (a + b) * (a - b);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = (a + b) * (a - b)
end function
public static double code(double a, double b) {
return (a + b) * (a - b);
}
def code(a, b): return (a + b) * (a - b)
function code(a, b) return Float64(Float64(a + b) * Float64(a - b)) end
function tmp = code(a, b) tmp = (a + b) * (a - b); end
code[a_, b_] := N[(N[(a + b), $MachinePrecision] * N[(a - b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(a + b\right) \cdot \left(a - b\right)
\end{array}
herbie shell --seed 2024139
(FPCore (a b)
:name "Difference of squares"
:precision binary64
:alt
(! :herbie-platform default (* (+ a b) (- a b)))
(- (* a a) (* b b)))