
(FPCore (x y) :precision binary64 (* (* (- x (/ 16.0 116.0)) 3.0) y))
double code(double x, double y) {
return ((x - (16.0 / 116.0)) * 3.0) * y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((x - (16.0d0 / 116.0d0)) * 3.0d0) * y
end function
public static double code(double x, double y) {
return ((x - (16.0 / 116.0)) * 3.0) * y;
}
def code(x, y): return ((x - (16.0 / 116.0)) * 3.0) * y
function code(x, y) return Float64(Float64(Float64(x - Float64(16.0 / 116.0)) * 3.0) * y) end
function tmp = code(x, y) tmp = ((x - (16.0 / 116.0)) * 3.0) * y; end
code[x_, y_] := N[(N[(N[(x - N[(16.0 / 116.0), $MachinePrecision]), $MachinePrecision] * 3.0), $MachinePrecision] * y), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x - \frac{16}{116}\right) \cdot 3\right) \cdot y
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (* (* (- x (/ 16.0 116.0)) 3.0) y))
double code(double x, double y) {
return ((x - (16.0 / 116.0)) * 3.0) * y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((x - (16.0d0 / 116.0d0)) * 3.0d0) * y
end function
public static double code(double x, double y) {
return ((x - (16.0 / 116.0)) * 3.0) * y;
}
def code(x, y): return ((x - (16.0 / 116.0)) * 3.0) * y
function code(x, y) return Float64(Float64(Float64(x - Float64(16.0 / 116.0)) * 3.0) * y) end
function tmp = code(x, y) tmp = ((x - (16.0 / 116.0)) * 3.0) * y; end
code[x_, y_] := N[(N[(N[(x - N[(16.0 / 116.0), $MachinePrecision]), $MachinePrecision] * 3.0), $MachinePrecision] * y), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x - \frac{16}{116}\right) \cdot 3\right) \cdot y
\end{array}
(FPCore (x y) :precision binary64 (fma (* 3.0 y) -0.13793103448275862 (* (* 3.0 y) x)))
double code(double x, double y) {
return fma((3.0 * y), -0.13793103448275862, ((3.0 * y) * x));
}
function code(x, y) return fma(Float64(3.0 * y), -0.13793103448275862, Float64(Float64(3.0 * y) * x)) end
code[x_, y_] := N[(N[(3.0 * y), $MachinePrecision] * -0.13793103448275862 + N[(N[(3.0 * y), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(3 \cdot y, -0.13793103448275862, \left(3 \cdot y\right) \cdot x\right)
\end{array}
Initial program 99.4%
associate-*r*99.6%
*-commutative99.6%
sub-neg99.6%
metadata-eval99.6%
metadata-eval99.6%
+-commutative99.6%
distribute-lft-in99.6%
fma-define99.6%
Applied egg-rr99.6%
(FPCore (x y) :precision binary64 (if (or (<= x -0.135) (not (<= x 0.14))) (* (* 3.0 y) x) (* y -0.41379310344827586)))
double code(double x, double y) {
double tmp;
if ((x <= -0.135) || !(x <= 0.14)) {
tmp = (3.0 * y) * x;
} else {
tmp = y * -0.41379310344827586;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((x <= (-0.135d0)) .or. (.not. (x <= 0.14d0))) then
tmp = (3.0d0 * y) * x
else
tmp = y * (-0.41379310344827586d0)
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((x <= -0.135) || !(x <= 0.14)) {
tmp = (3.0 * y) * x;
} else {
tmp = y * -0.41379310344827586;
}
return tmp;
}
def code(x, y): tmp = 0 if (x <= -0.135) or not (x <= 0.14): tmp = (3.0 * y) * x else: tmp = y * -0.41379310344827586 return tmp
function code(x, y) tmp = 0.0 if ((x <= -0.135) || !(x <= 0.14)) tmp = Float64(Float64(3.0 * y) * x); else tmp = Float64(y * -0.41379310344827586); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((x <= -0.135) || ~((x <= 0.14))) tmp = (3.0 * y) * x; else tmp = y * -0.41379310344827586; end tmp_2 = tmp; end
code[x_, y_] := If[Or[LessEqual[x, -0.135], N[Not[LessEqual[x, 0.14]], $MachinePrecision]], N[(N[(3.0 * y), $MachinePrecision] * x), $MachinePrecision], N[(y * -0.41379310344827586), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.135 \lor \neg \left(x \leq 0.14\right):\\
\;\;\;\;\left(3 \cdot y\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;y \cdot -0.41379310344827586\\
\end{array}
\end{array}
if x < -0.13500000000000001 or 0.14000000000000001 < x Initial program 98.9%
Taylor expanded in x around inf 98.2%
*-commutative98.2%
associate-*r*98.3%
Simplified98.3%
if -0.13500000000000001 < x < 0.14000000000000001Initial program 99.8%
Taylor expanded in x around 0 97.9%
Final simplification98.1%
(FPCore (x y) :precision binary64 (if (or (<= x -0.135) (not (<= x 0.14))) (* 3.0 (* y x)) (* y -0.41379310344827586)))
double code(double x, double y) {
double tmp;
if ((x <= -0.135) || !(x <= 0.14)) {
tmp = 3.0 * (y * x);
} else {
tmp = y * -0.41379310344827586;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((x <= (-0.135d0)) .or. (.not. (x <= 0.14d0))) then
tmp = 3.0d0 * (y * x)
else
tmp = y * (-0.41379310344827586d0)
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((x <= -0.135) || !(x <= 0.14)) {
tmp = 3.0 * (y * x);
} else {
tmp = y * -0.41379310344827586;
}
return tmp;
}
def code(x, y): tmp = 0 if (x <= -0.135) or not (x <= 0.14): tmp = 3.0 * (y * x) else: tmp = y * -0.41379310344827586 return tmp
function code(x, y) tmp = 0.0 if ((x <= -0.135) || !(x <= 0.14)) tmp = Float64(3.0 * Float64(y * x)); else tmp = Float64(y * -0.41379310344827586); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((x <= -0.135) || ~((x <= 0.14))) tmp = 3.0 * (y * x); else tmp = y * -0.41379310344827586; end tmp_2 = tmp; end
code[x_, y_] := If[Or[LessEqual[x, -0.135], N[Not[LessEqual[x, 0.14]], $MachinePrecision]], N[(3.0 * N[(y * x), $MachinePrecision]), $MachinePrecision], N[(y * -0.41379310344827586), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.135 \lor \neg \left(x \leq 0.14\right):\\
\;\;\;\;3 \cdot \left(y \cdot x\right)\\
\mathbf{else}:\\
\;\;\;\;y \cdot -0.41379310344827586\\
\end{array}
\end{array}
if x < -0.13500000000000001 or 0.14000000000000001 < x Initial program 98.9%
Taylor expanded in x around inf 98.2%
if -0.13500000000000001 < x < 0.14000000000000001Initial program 99.8%
Taylor expanded in x around 0 97.9%
Final simplification98.0%
(FPCore (x y) :precision binary64 (* (* 3.0 y) (+ -0.13793103448275862 x)))
double code(double x, double y) {
return (3.0 * y) * (-0.13793103448275862 + x);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (3.0d0 * y) * ((-0.13793103448275862d0) + x)
end function
public static double code(double x, double y) {
return (3.0 * y) * (-0.13793103448275862 + x);
}
def code(x, y): return (3.0 * y) * (-0.13793103448275862 + x)
function code(x, y) return Float64(Float64(3.0 * y) * Float64(-0.13793103448275862 + x)) end
function tmp = code(x, y) tmp = (3.0 * y) * (-0.13793103448275862 + x); end
code[x_, y_] := N[(N[(3.0 * y), $MachinePrecision] * N[(-0.13793103448275862 + x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(3 \cdot y\right) \cdot \left(-0.13793103448275862 + x\right)
\end{array}
Initial program 99.4%
associate-*l*99.6%
sub-neg99.6%
metadata-eval99.6%
metadata-eval99.6%
Simplified99.6%
Final simplification99.6%
(FPCore (x y) :precision binary64 (* 3.0 (* y (- x 0.13793103448275862))))
double code(double x, double y) {
return 3.0 * (y * (x - 0.13793103448275862));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 3.0d0 * (y * (x - 0.13793103448275862d0))
end function
public static double code(double x, double y) {
return 3.0 * (y * (x - 0.13793103448275862));
}
def code(x, y): return 3.0 * (y * (x - 0.13793103448275862))
function code(x, y) return Float64(3.0 * Float64(y * Float64(x - 0.13793103448275862))) end
function tmp = code(x, y) tmp = 3.0 * (y * (x - 0.13793103448275862)); end
code[x_, y_] := N[(3.0 * N[(y * N[(x - 0.13793103448275862), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
3 \cdot \left(y \cdot \left(x - 0.13793103448275862\right)\right)
\end{array}
Initial program 99.4%
Taylor expanded in y around 0 99.6%
(FPCore (x y) :precision binary64 (* y -0.41379310344827586))
double code(double x, double y) {
return y * -0.41379310344827586;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = y * (-0.41379310344827586d0)
end function
public static double code(double x, double y) {
return y * -0.41379310344827586;
}
def code(x, y): return y * -0.41379310344827586
function code(x, y) return Float64(y * -0.41379310344827586) end
function tmp = code(x, y) tmp = y * -0.41379310344827586; end
code[x_, y_] := N[(y * -0.41379310344827586), $MachinePrecision]
\begin{array}{l}
\\
y \cdot -0.41379310344827586
\end{array}
Initial program 99.4%
Taylor expanded in x around 0 51.2%
Final simplification51.2%
(FPCore (x y) :precision binary64 (* y (- (* x 3.0) 0.41379310344827586)))
double code(double x, double y) {
return y * ((x * 3.0) - 0.41379310344827586);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = y * ((x * 3.0d0) - 0.41379310344827586d0)
end function
public static double code(double x, double y) {
return y * ((x * 3.0) - 0.41379310344827586);
}
def code(x, y): return y * ((x * 3.0) - 0.41379310344827586)
function code(x, y) return Float64(y * Float64(Float64(x * 3.0) - 0.41379310344827586)) end
function tmp = code(x, y) tmp = y * ((x * 3.0) - 0.41379310344827586); end
code[x_, y_] := N[(y * N[(N[(x * 3.0), $MachinePrecision] - 0.41379310344827586), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
y \cdot \left(x \cdot 3 - 0.41379310344827586\right)
\end{array}
herbie shell --seed 2024139
(FPCore (x y)
:name "Data.Colour.CIE:cieLAB from colour-2.3.3, A"
:precision binary64
:alt
(! :herbie-platform default (* y (- (* x 3) 20689655172413793/50000000000000000)))
(* (* (- x (/ 16.0 116.0)) 3.0) y))