
(FPCore (x y) :precision binary64 (* (* (- x (/ 16.0 116.0)) 3.0) y))
double code(double x, double y) {
return ((x - (16.0 / 116.0)) * 3.0) * y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((x - (16.0d0 / 116.0d0)) * 3.0d0) * y
end function
public static double code(double x, double y) {
return ((x - (16.0 / 116.0)) * 3.0) * y;
}
def code(x, y): return ((x - (16.0 / 116.0)) * 3.0) * y
function code(x, y) return Float64(Float64(Float64(x - Float64(16.0 / 116.0)) * 3.0) * y) end
function tmp = code(x, y) tmp = ((x - (16.0 / 116.0)) * 3.0) * y; end
code[x_, y_] := N[(N[(N[(x - N[(16.0 / 116.0), $MachinePrecision]), $MachinePrecision] * 3.0), $MachinePrecision] * y), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x - \frac{16}{116}\right) \cdot 3\right) \cdot y
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (* (* (- x (/ 16.0 116.0)) 3.0) y))
double code(double x, double y) {
return ((x - (16.0 / 116.0)) * 3.0) * y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((x - (16.0d0 / 116.0d0)) * 3.0d0) * y
end function
public static double code(double x, double y) {
return ((x - (16.0 / 116.0)) * 3.0) * y;
}
def code(x, y): return ((x - (16.0 / 116.0)) * 3.0) * y
function code(x, y) return Float64(Float64(Float64(x - Float64(16.0 / 116.0)) * 3.0) * y) end
function tmp = code(x, y) tmp = ((x - (16.0 / 116.0)) * 3.0) * y; end
code[x_, y_] := N[(N[(N[(x - N[(16.0 / 116.0), $MachinePrecision]), $MachinePrecision] * 3.0), $MachinePrecision] * y), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x - \frac{16}{116}\right) \cdot 3\right) \cdot y
\end{array}
(FPCore (x y) :precision binary64 (* (fma x 3.0 -0.41379310344827586) y))
double code(double x, double y) {
return fma(x, 3.0, -0.41379310344827586) * y;
}
function code(x, y) return Float64(fma(x, 3.0, -0.41379310344827586) * y) end
code[x_, y_] := N[(N[(x * 3.0 + -0.41379310344827586), $MachinePrecision] * y), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, 3, -0.41379310344827586\right) \cdot y
\end{array}
Initial program 99.8%
*-commutative99.8%
sub-neg99.8%
distribute-lft-in99.8%
*-commutative99.8%
fma-define99.8%
metadata-eval99.8%
metadata-eval99.8%
metadata-eval99.8%
Simplified99.8%
Final simplification99.8%
(FPCore (x y) :precision binary64 (if (or (<= x -0.14) (not (<= x 0.136))) (* 3.0 (* x y)) (* -0.41379310344827586 y)))
double code(double x, double y) {
double tmp;
if ((x <= -0.14) || !(x <= 0.136)) {
tmp = 3.0 * (x * y);
} else {
tmp = -0.41379310344827586 * y;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((x <= (-0.14d0)) .or. (.not. (x <= 0.136d0))) then
tmp = 3.0d0 * (x * y)
else
tmp = (-0.41379310344827586d0) * y
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((x <= -0.14) || !(x <= 0.136)) {
tmp = 3.0 * (x * y);
} else {
tmp = -0.41379310344827586 * y;
}
return tmp;
}
def code(x, y): tmp = 0 if (x <= -0.14) or not (x <= 0.136): tmp = 3.0 * (x * y) else: tmp = -0.41379310344827586 * y return tmp
function code(x, y) tmp = 0.0 if ((x <= -0.14) || !(x <= 0.136)) tmp = Float64(3.0 * Float64(x * y)); else tmp = Float64(-0.41379310344827586 * y); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((x <= -0.14) || ~((x <= 0.136))) tmp = 3.0 * (x * y); else tmp = -0.41379310344827586 * y; end tmp_2 = tmp; end
code[x_, y_] := If[Or[LessEqual[x, -0.14], N[Not[LessEqual[x, 0.136]], $MachinePrecision]], N[(3.0 * N[(x * y), $MachinePrecision]), $MachinePrecision], N[(-0.41379310344827586 * y), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.14 \lor \neg \left(x \leq 0.136\right):\\
\;\;\;\;3 \cdot \left(x \cdot y\right)\\
\mathbf{else}:\\
\;\;\;\;-0.41379310344827586 \cdot y\\
\end{array}
\end{array}
if x < -0.14000000000000001 or 0.13600000000000001 < x Initial program 99.7%
Taylor expanded in x around inf 97.8%
*-commutative97.8%
Simplified97.8%
if -0.14000000000000001 < x < 0.13600000000000001Initial program 99.9%
Taylor expanded in x around 0 98.6%
Final simplification98.2%
(FPCore (x y) :precision binary64 (if (or (<= x -0.14) (not (<= x 0.136))) (* y (* x 3.0)) (* -0.41379310344827586 y)))
double code(double x, double y) {
double tmp;
if ((x <= -0.14) || !(x <= 0.136)) {
tmp = y * (x * 3.0);
} else {
tmp = -0.41379310344827586 * y;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((x <= (-0.14d0)) .or. (.not. (x <= 0.136d0))) then
tmp = y * (x * 3.0d0)
else
tmp = (-0.41379310344827586d0) * y
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((x <= -0.14) || !(x <= 0.136)) {
tmp = y * (x * 3.0);
} else {
tmp = -0.41379310344827586 * y;
}
return tmp;
}
def code(x, y): tmp = 0 if (x <= -0.14) or not (x <= 0.136): tmp = y * (x * 3.0) else: tmp = -0.41379310344827586 * y return tmp
function code(x, y) tmp = 0.0 if ((x <= -0.14) || !(x <= 0.136)) tmp = Float64(y * Float64(x * 3.0)); else tmp = Float64(-0.41379310344827586 * y); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((x <= -0.14) || ~((x <= 0.136))) tmp = y * (x * 3.0); else tmp = -0.41379310344827586 * y; end tmp_2 = tmp; end
code[x_, y_] := If[Or[LessEqual[x, -0.14], N[Not[LessEqual[x, 0.136]], $MachinePrecision]], N[(y * N[(x * 3.0), $MachinePrecision]), $MachinePrecision], N[(-0.41379310344827586 * y), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.14 \lor \neg \left(x \leq 0.136\right):\\
\;\;\;\;y \cdot \left(x \cdot 3\right)\\
\mathbf{else}:\\
\;\;\;\;-0.41379310344827586 \cdot y\\
\end{array}
\end{array}
if x < -0.14000000000000001 or 0.13600000000000001 < x Initial program 99.7%
Taylor expanded in x around inf 97.9%
if -0.14000000000000001 < x < 0.13600000000000001Initial program 99.9%
Taylor expanded in x around 0 98.6%
Final simplification98.3%
(FPCore (x y) :precision binary64 (* 3.0 (* y (- x 0.13793103448275862))))
double code(double x, double y) {
return 3.0 * (y * (x - 0.13793103448275862));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 3.0d0 * (y * (x - 0.13793103448275862d0))
end function
public static double code(double x, double y) {
return 3.0 * (y * (x - 0.13793103448275862));
}
def code(x, y): return 3.0 * (y * (x - 0.13793103448275862))
function code(x, y) return Float64(3.0 * Float64(y * Float64(x - 0.13793103448275862))) end
function tmp = code(x, y) tmp = 3.0 * (y * (x - 0.13793103448275862)); end
code[x_, y_] := N[(3.0 * N[(y * N[(x - 0.13793103448275862), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
3 \cdot \left(y \cdot \left(x - 0.13793103448275862\right)\right)
\end{array}
Initial program 99.8%
Taylor expanded in y around 0 99.6%
Final simplification99.6%
(FPCore (x y) :precision binary64 (* y (* 3.0 (- x 0.13793103448275862))))
double code(double x, double y) {
return y * (3.0 * (x - 0.13793103448275862));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = y * (3.0d0 * (x - 0.13793103448275862d0))
end function
public static double code(double x, double y) {
return y * (3.0 * (x - 0.13793103448275862));
}
def code(x, y): return y * (3.0 * (x - 0.13793103448275862))
function code(x, y) return Float64(y * Float64(3.0 * Float64(x - 0.13793103448275862))) end
function tmp = code(x, y) tmp = y * (3.0 * (x - 0.13793103448275862)); end
code[x_, y_] := N[(y * N[(3.0 * N[(x - 0.13793103448275862), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
y \cdot \left(3 \cdot \left(x - 0.13793103448275862\right)\right)
\end{array}
Initial program 99.8%
Final simplification99.8%
(FPCore (x y) :precision binary64 (* -0.41379310344827586 y))
double code(double x, double y) {
return -0.41379310344827586 * y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (-0.41379310344827586d0) * y
end function
public static double code(double x, double y) {
return -0.41379310344827586 * y;
}
def code(x, y): return -0.41379310344827586 * y
function code(x, y) return Float64(-0.41379310344827586 * y) end
function tmp = code(x, y) tmp = -0.41379310344827586 * y; end
code[x_, y_] := N[(-0.41379310344827586 * y), $MachinePrecision]
\begin{array}{l}
\\
-0.41379310344827586 \cdot y
\end{array}
Initial program 99.8%
Taylor expanded in x around 0 51.8%
Final simplification51.8%
(FPCore (x y) :precision binary64 (* y (- (* x 3.0) 0.41379310344827586)))
double code(double x, double y) {
return y * ((x * 3.0) - 0.41379310344827586);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = y * ((x * 3.0d0) - 0.41379310344827586d0)
end function
public static double code(double x, double y) {
return y * ((x * 3.0) - 0.41379310344827586);
}
def code(x, y): return y * ((x * 3.0) - 0.41379310344827586)
function code(x, y) return Float64(y * Float64(Float64(x * 3.0) - 0.41379310344827586)) end
function tmp = code(x, y) tmp = y * ((x * 3.0) - 0.41379310344827586); end
code[x_, y_] := N[(y * N[(N[(x * 3.0), $MachinePrecision] - 0.41379310344827586), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
y \cdot \left(x \cdot 3 - 0.41379310344827586\right)
\end{array}
herbie shell --seed 2024043
(FPCore (x y)
:name "Data.Colour.CIE:cieLAB from colour-2.3.3, A"
:precision binary64
:alt
(* y (- (* x 3.0) 0.41379310344827586))
(* (* (- x (/ 16.0 116.0)) 3.0) y))