
(FPCore (x y) :precision binary64 (* 0.5 (- (* x x) y)))
double code(double x, double y) {
return 0.5 * ((x * x) - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 0.5d0 * ((x * x) - y)
end function
public static double code(double x, double y) {
return 0.5 * ((x * x) - y);
}
def code(x, y): return 0.5 * ((x * x) - y)
function code(x, y) return Float64(0.5 * Float64(Float64(x * x) - y)) end
function tmp = code(x, y) tmp = 0.5 * ((x * x) - y); end
code[x_, y_] := N[(0.5 * N[(N[(x * x), $MachinePrecision] - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \left(x \cdot x - y\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (* 0.5 (- (* x x) y)))
double code(double x, double y) {
return 0.5 * ((x * x) - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 0.5d0 * ((x * x) - y)
end function
public static double code(double x, double y) {
return 0.5 * ((x * x) - y);
}
def code(x, y): return 0.5 * ((x * x) - y)
function code(x, y) return Float64(0.5 * Float64(Float64(x * x) - y)) end
function tmp = code(x, y) tmp = 0.5 * ((x * x) - y); end
code[x_, y_] := N[(0.5 * N[(N[(x * x), $MachinePrecision] - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \left(x \cdot x - y\right)
\end{array}
(FPCore (x y) :precision binary64 (fma (* 0.5 x) x (* -0.5 y)))
double code(double x, double y) {
return fma((0.5 * x), x, (-0.5 * y));
}
function code(x, y) return fma(Float64(0.5 * x), x, Float64(-0.5 * y)) end
code[x_, y_] := N[(N[(0.5 * x), $MachinePrecision] * x + N[(-0.5 * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(0.5 \cdot x, x, -0.5 \cdot y\right)
\end{array}
Initial program 99.7%
sub-negN/A
distribute-lft-inN/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-commutativeN/A
neg-mul-1N/A
associate-*r*N/A
*-lowering-*.f64N/A
metadata-eval100.0
Applied egg-rr100.0%
(FPCore (x y) :precision binary64 (if (<= (* x x) 2e-80) (* -0.5 y) (* x (* 0.5 x))))
double code(double x, double y) {
double tmp;
if ((x * x) <= 2e-80) {
tmp = -0.5 * y;
} else {
tmp = x * (0.5 * x);
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((x * x) <= 2d-80) then
tmp = (-0.5d0) * y
else
tmp = x * (0.5d0 * x)
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((x * x) <= 2e-80) {
tmp = -0.5 * y;
} else {
tmp = x * (0.5 * x);
}
return tmp;
}
def code(x, y): tmp = 0 if (x * x) <= 2e-80: tmp = -0.5 * y else: tmp = x * (0.5 * x) return tmp
function code(x, y) tmp = 0.0 if (Float64(x * x) <= 2e-80) tmp = Float64(-0.5 * y); else tmp = Float64(x * Float64(0.5 * x)); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((x * x) <= 2e-80) tmp = -0.5 * y; else tmp = x * (0.5 * x); end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[N[(x * x), $MachinePrecision], 2e-80], N[(-0.5 * y), $MachinePrecision], N[(x * N[(0.5 * x), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \cdot x \leq 2 \cdot 10^{-80}:\\
\;\;\;\;-0.5 \cdot y\\
\mathbf{else}:\\
\;\;\;\;x \cdot \left(0.5 \cdot x\right)\\
\end{array}
\end{array}
if (*.f64 x x) < 1.99999999999999992e-80Initial program 100.0%
Taylor expanded in x around 0
*-lowering-*.f6489.2
Simplified89.2%
if 1.99999999999999992e-80 < (*.f64 x x) Initial program 99.4%
Taylor expanded in x around inf
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6485.1
Simplified85.1%
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f6485.7
Applied egg-rr85.7%
Final simplification87.4%
(FPCore (x y) :precision binary64 (if (<= (* x x) 2e-80) (* -0.5 y) (* 0.5 (* x x))))
double code(double x, double y) {
double tmp;
if ((x * x) <= 2e-80) {
tmp = -0.5 * y;
} else {
tmp = 0.5 * (x * x);
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((x * x) <= 2d-80) then
tmp = (-0.5d0) * y
else
tmp = 0.5d0 * (x * x)
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((x * x) <= 2e-80) {
tmp = -0.5 * y;
} else {
tmp = 0.5 * (x * x);
}
return tmp;
}
def code(x, y): tmp = 0 if (x * x) <= 2e-80: tmp = -0.5 * y else: tmp = 0.5 * (x * x) return tmp
function code(x, y) tmp = 0.0 if (Float64(x * x) <= 2e-80) tmp = Float64(-0.5 * y); else tmp = Float64(0.5 * Float64(x * x)); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((x * x) <= 2e-80) tmp = -0.5 * y; else tmp = 0.5 * (x * x); end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[N[(x * x), $MachinePrecision], 2e-80], N[(-0.5 * y), $MachinePrecision], N[(0.5 * N[(x * x), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \cdot x \leq 2 \cdot 10^{-80}:\\
\;\;\;\;-0.5 \cdot y\\
\mathbf{else}:\\
\;\;\;\;0.5 \cdot \left(x \cdot x\right)\\
\end{array}
\end{array}
if (*.f64 x x) < 1.99999999999999992e-80Initial program 100.0%
Taylor expanded in x around 0
*-lowering-*.f6489.2
Simplified89.2%
if 1.99999999999999992e-80 < (*.f64 x x) Initial program 99.4%
Taylor expanded in x around inf
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6485.1
Simplified85.1%
(FPCore (x y) :precision binary64 (* 0.5 (fma x x (- y))))
double code(double x, double y) {
return 0.5 * fma(x, x, -y);
}
function code(x, y) return Float64(0.5 * fma(x, x, Float64(-y))) end
code[x_, y_] := N[(0.5 * N[(x * x + (-y)), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \mathsf{fma}\left(x, x, -y\right)
\end{array}
Initial program 99.7%
sub-negN/A
accelerator-lowering-fma.f64N/A
neg-lowering-neg.f6499.7
Applied egg-rr99.7%
(FPCore (x y) :precision binary64 (* 0.5 (- (* x x) y)))
double code(double x, double y) {
return 0.5 * ((x * x) - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 0.5d0 * ((x * x) - y)
end function
public static double code(double x, double y) {
return 0.5 * ((x * x) - y);
}
def code(x, y): return 0.5 * ((x * x) - y)
function code(x, y) return Float64(0.5 * Float64(Float64(x * x) - y)) end
function tmp = code(x, y) tmp = 0.5 * ((x * x) - y); end
code[x_, y_] := N[(0.5 * N[(N[(x * x), $MachinePrecision] - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \left(x \cdot x - y\right)
\end{array}
Initial program 99.7%
(FPCore (x y) :precision binary64 (* -0.5 y))
double code(double x, double y) {
return -0.5 * y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (-0.5d0) * y
end function
public static double code(double x, double y) {
return -0.5 * y;
}
def code(x, y): return -0.5 * y
function code(x, y) return Float64(-0.5 * y) end
function tmp = code(x, y) tmp = -0.5 * y; end
code[x_, y_] := N[(-0.5 * y), $MachinePrecision]
\begin{array}{l}
\\
-0.5 \cdot y
\end{array}
Initial program 99.7%
Taylor expanded in x around 0
*-lowering-*.f6450.3
Simplified50.3%
(FPCore (x y) :precision binary64 0.5)
double code(double x, double y) {
return 0.5;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 0.5d0
end function
public static double code(double x, double y) {
return 0.5;
}
def code(x, y): return 0.5
function code(x, y) return 0.5 end
function tmp = code(x, y) tmp = 0.5; end
code[x_, y_] := 0.5
\begin{array}{l}
\\
0.5
\end{array}
Initial program 99.7%
Taylor expanded in x around inf
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6451.0
Simplified51.0%
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f6451.3
Applied egg-rr51.3%
Applied egg-rr3.8%
herbie shell --seed 2024204
(FPCore (x y)
:name "System.Random.MWC.Distributions:standard from mwc-random-0.13.3.2"
:precision binary64
(* 0.5 (- (* x x) y)))