
(FPCore (x y) :precision binary64 (* 200.0 (- x y)))
double code(double x, double y) {
return 200.0 * (x - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 200.0d0 * (x - y)
end function
public static double code(double x, double y) {
return 200.0 * (x - y);
}
def code(x, y): return 200.0 * (x - y)
function code(x, y) return Float64(200.0 * Float64(x - y)) end
function tmp = code(x, y) tmp = 200.0 * (x - y); end
code[x_, y_] := N[(200.0 * N[(x - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
200 \cdot \left(x - y\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (* 200.0 (- x y)))
double code(double x, double y) {
return 200.0 * (x - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 200.0d0 * (x - y)
end function
public static double code(double x, double y) {
return 200.0 * (x - y);
}
def code(x, y): return 200.0 * (x - y)
function code(x, y) return Float64(200.0 * Float64(x - y)) end
function tmp = code(x, y) tmp = 200.0 * (x - y); end
code[x_, y_] := N[(200.0 * N[(x - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
200 \cdot \left(x - y\right)
\end{array}
(FPCore (x y) :precision binary64 (fma x 200.0 (* -200.0 y)))
double code(double x, double y) {
return fma(x, 200.0, (-200.0 * y));
}
function code(x, y) return fma(x, 200.0, Float64(-200.0 * y)) end
code[x_, y_] := N[(x * 200.0 + N[(-200.0 * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, 200, -200 \cdot y\right)
\end{array}
Initial program 99.9%
Taylor expanded in x around 0 100.0%
*-commutative100.0%
fma-def100.0%
Applied egg-rr100.0%
Final simplification100.0%
(FPCore (x y) :precision binary64 (fma -200.0 y (* x 200.0)))
double code(double x, double y) {
return fma(-200.0, y, (x * 200.0));
}
function code(x, y) return fma(-200.0, y, Float64(x * 200.0)) end
code[x_, y_] := N[(-200.0 * y + N[(x * 200.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-200, y, x \cdot 200\right)
\end{array}
Initial program 99.9%
Taylor expanded in x around 0 100.0%
+-commutative100.0%
fma-def100.0%
Simplified100.0%
Final simplification100.0%
(FPCore (x y)
:precision binary64
(if (or (<= x -34.0)
(and (not (<= x 4.6e+26)) (or (<= x 1.7e+78) (not (<= x 1.76e+108)))))
(* x 200.0)
(* -200.0 y)))
double code(double x, double y) {
double tmp;
if ((x <= -34.0) || (!(x <= 4.6e+26) && ((x <= 1.7e+78) || !(x <= 1.76e+108)))) {
tmp = x * 200.0;
} else {
tmp = -200.0 * y;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((x <= (-34.0d0)) .or. (.not. (x <= 4.6d+26)) .and. (x <= 1.7d+78) .or. (.not. (x <= 1.76d+108))) then
tmp = x * 200.0d0
else
tmp = (-200.0d0) * y
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((x <= -34.0) || (!(x <= 4.6e+26) && ((x <= 1.7e+78) || !(x <= 1.76e+108)))) {
tmp = x * 200.0;
} else {
tmp = -200.0 * y;
}
return tmp;
}
def code(x, y): tmp = 0 if (x <= -34.0) or (not (x <= 4.6e+26) and ((x <= 1.7e+78) or not (x <= 1.76e+108))): tmp = x * 200.0 else: tmp = -200.0 * y return tmp
function code(x, y) tmp = 0.0 if ((x <= -34.0) || (!(x <= 4.6e+26) && ((x <= 1.7e+78) || !(x <= 1.76e+108)))) tmp = Float64(x * 200.0); else tmp = Float64(-200.0 * y); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((x <= -34.0) || (~((x <= 4.6e+26)) && ((x <= 1.7e+78) || ~((x <= 1.76e+108))))) tmp = x * 200.0; else tmp = -200.0 * y; end tmp_2 = tmp; end
code[x_, y_] := If[Or[LessEqual[x, -34.0], And[N[Not[LessEqual[x, 4.6e+26]], $MachinePrecision], Or[LessEqual[x, 1.7e+78], N[Not[LessEqual[x, 1.76e+108]], $MachinePrecision]]]], N[(x * 200.0), $MachinePrecision], N[(-200.0 * y), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -34 \lor \neg \left(x \leq 4.6 \cdot 10^{+26}\right) \land \left(x \leq 1.7 \cdot 10^{+78} \lor \neg \left(x \leq 1.76 \cdot 10^{+108}\right)\right):\\
\;\;\;\;x \cdot 200\\
\mathbf{else}:\\
\;\;\;\;-200 \cdot y\\
\end{array}
\end{array}
if x < -34 or 4.6000000000000001e26 < x < 1.70000000000000004e78 or 1.76e108 < x Initial program 99.9%
Taylor expanded in x around inf 83.7%
if -34 < x < 4.6000000000000001e26 or 1.70000000000000004e78 < x < 1.76e108Initial program 99.9%
Taylor expanded in x around 0 77.3%
Final simplification80.3%
(FPCore (x y) :precision binary64 (+ (* -200.0 y) (* x 200.0)))
double code(double x, double y) {
return (-200.0 * y) + (x * 200.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((-200.0d0) * y) + (x * 200.0d0)
end function
public static double code(double x, double y) {
return (-200.0 * y) + (x * 200.0);
}
def code(x, y): return (-200.0 * y) + (x * 200.0)
function code(x, y) return Float64(Float64(-200.0 * y) + Float64(x * 200.0)) end
function tmp = code(x, y) tmp = (-200.0 * y) + (x * 200.0); end
code[x_, y_] := N[(N[(-200.0 * y), $MachinePrecision] + N[(x * 200.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-200 \cdot y + x \cdot 200
\end{array}
Initial program 99.9%
Taylor expanded in x around 0 100.0%
Final simplification100.0%
(FPCore (x y) :precision binary64 (* 200.0 (- x y)))
double code(double x, double y) {
return 200.0 * (x - y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 200.0d0 * (x - y)
end function
public static double code(double x, double y) {
return 200.0 * (x - y);
}
def code(x, y): return 200.0 * (x - y)
function code(x, y) return Float64(200.0 * Float64(x - y)) end
function tmp = code(x, y) tmp = 200.0 * (x - y); end
code[x_, y_] := N[(200.0 * N[(x - y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
200 \cdot \left(x - y\right)
\end{array}
Initial program 99.9%
Final simplification99.9%
(FPCore (x y) :precision binary64 (* -200.0 y))
double code(double x, double y) {
return -200.0 * y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (-200.0d0) * y
end function
public static double code(double x, double y) {
return -200.0 * y;
}
def code(x, y): return -200.0 * y
function code(x, y) return Float64(-200.0 * y) end
function tmp = code(x, y) tmp = -200.0 * y; end
code[x_, y_] := N[(-200.0 * y), $MachinePrecision]
\begin{array}{l}
\\
-200 \cdot y
\end{array}
Initial program 99.9%
Taylor expanded in x around 0 50.0%
Final simplification50.0%
herbie shell --seed 2023195
(FPCore (x y)
:name "Data.Colour.CIE:cieLABView from colour-2.3.3, C"
:precision binary64
(* 200.0 (- x y)))