
(FPCore (x y) :precision binary64 (/ (* (- 1.0 x) (- 3.0 x)) (* y 3.0)))
double code(double x, double y) {
return ((1.0 - x) * (3.0 - x)) / (y * 3.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((1.0d0 - x) * (3.0d0 - x)) / (y * 3.0d0)
end function
public static double code(double x, double y) {
return ((1.0 - x) * (3.0 - x)) / (y * 3.0);
}
def code(x, y): return ((1.0 - x) * (3.0 - x)) / (y * 3.0)
function code(x, y) return Float64(Float64(Float64(1.0 - x) * Float64(3.0 - x)) / Float64(y * 3.0)) end
function tmp = code(x, y) tmp = ((1.0 - x) * (3.0 - x)) / (y * 3.0); end
code[x_, y_] := N[(N[(N[(1.0 - x), $MachinePrecision] * N[(3.0 - x), $MachinePrecision]), $MachinePrecision] / N[(y * 3.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(1 - x\right) \cdot \left(3 - x\right)}{y \cdot 3}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (/ (* (- 1.0 x) (- 3.0 x)) (* y 3.0)))
double code(double x, double y) {
return ((1.0 - x) * (3.0 - x)) / (y * 3.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((1.0d0 - x) * (3.0d0 - x)) / (y * 3.0d0)
end function
public static double code(double x, double y) {
return ((1.0 - x) * (3.0 - x)) / (y * 3.0);
}
def code(x, y): return ((1.0 - x) * (3.0 - x)) / (y * 3.0)
function code(x, y) return Float64(Float64(Float64(1.0 - x) * Float64(3.0 - x)) / Float64(y * 3.0)) end
function tmp = code(x, y) tmp = ((1.0 - x) * (3.0 - x)) / (y * 3.0); end
code[x_, y_] := N[(N[(N[(1.0 - x), $MachinePrecision] * N[(3.0 - x), $MachinePrecision]), $MachinePrecision] / N[(y * 3.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(1 - x\right) \cdot \left(3 - x\right)}{y \cdot 3}
\end{array}
(FPCore (x y) :precision binary64 (if (<= (* (- 1.0 x) (- 3.0 x)) 2e+61) (/ (fma (fma 0.3333333333333333 x -1.3333333333333333) x 1.0) y) (* (* (/ x y) x) 0.3333333333333333)))
double code(double x, double y) {
double tmp;
if (((1.0 - x) * (3.0 - x)) <= 2e+61) {
tmp = fma(fma(0.3333333333333333, x, -1.3333333333333333), x, 1.0) / y;
} else {
tmp = ((x / y) * x) * 0.3333333333333333;
}
return tmp;
}
function code(x, y) tmp = 0.0 if (Float64(Float64(1.0 - x) * Float64(3.0 - x)) <= 2e+61) tmp = Float64(fma(fma(0.3333333333333333, x, -1.3333333333333333), x, 1.0) / y); else tmp = Float64(Float64(Float64(x / y) * x) * 0.3333333333333333); end return tmp end
code[x_, y_] := If[LessEqual[N[(N[(1.0 - x), $MachinePrecision] * N[(3.0 - x), $MachinePrecision]), $MachinePrecision], 2e+61], N[(N[(N[(0.3333333333333333 * x + -1.3333333333333333), $MachinePrecision] * x + 1.0), $MachinePrecision] / y), $MachinePrecision], N[(N[(N[(x / y), $MachinePrecision] * x), $MachinePrecision] * 0.3333333333333333), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left(1 - x\right) \cdot \left(3 - x\right) \leq 2 \cdot 10^{+61}:\\
\;\;\;\;\frac{\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, x, -1.3333333333333333\right), x, 1\right)}{y}\\
\mathbf{else}:\\
\;\;\;\;\left(\frac{x}{y} \cdot x\right) \cdot 0.3333333333333333\\
\end{array}
\end{array}
if (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) < 1.9999999999999999e61Initial program 99.5%
Taylor expanded in x around inf
unpow2N/A
lower-*.f6412.5
Applied rewrites12.5%
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
associate-/r*N/A
lower-/.f64N/A
div-invN/A
lower-*.f64N/A
metadata-eval12.5
Applied rewrites12.5%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f6499.9
Applied rewrites99.9%
if 1.9999999999999999e61 < (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) Initial program 85.0%
Applied rewrites99.8%
Taylor expanded in x around inf
unpow2N/A
associate-*l/N/A
lower-*.f64N/A
lower-/.f6499.8
Applied rewrites99.8%
(FPCore (x y) :precision binary64 (if (<= x -0.75) (* (/ -1.3333333333333333 y) x) (pow y -1.0)))
double code(double x, double y) {
double tmp;
if (x <= -0.75) {
tmp = (-1.3333333333333333 / y) * x;
} else {
tmp = pow(y, -1.0);
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (x <= (-0.75d0)) then
tmp = ((-1.3333333333333333d0) / y) * x
else
tmp = y ** (-1.0d0)
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (x <= -0.75) {
tmp = (-1.3333333333333333 / y) * x;
} else {
tmp = Math.pow(y, -1.0);
}
return tmp;
}
def code(x, y): tmp = 0 if x <= -0.75: tmp = (-1.3333333333333333 / y) * x else: tmp = math.pow(y, -1.0) return tmp
function code(x, y) tmp = 0.0 if (x <= -0.75) tmp = Float64(Float64(-1.3333333333333333 / y) * x); else tmp = y ^ -1.0; end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (x <= -0.75) tmp = (-1.3333333333333333 / y) * x; else tmp = y ^ -1.0; end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[x, -0.75], N[(N[(-1.3333333333333333 / y), $MachinePrecision] * x), $MachinePrecision], N[Power[y, -1.0], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.75:\\
\;\;\;\;\frac{-1.3333333333333333}{y} \cdot x\\
\mathbf{else}:\\
\;\;\;\;{y}^{-1}\\
\end{array}
\end{array}
if x < -0.75Initial program 79.4%
Applied rewrites99.6%
Taylor expanded in x around inf
sub-negN/A
associate-*r/N/A
metadata-evalN/A
distribute-rgt-inN/A
*-commutativeN/A
associate-*r/N/A
metadata-evalN/A
associate-*l/N/A
unpow2N/A
associate-*r*N/A
associate-/l*N/A
distribute-neg-fracN/A
metadata-evalN/A
associate-*r/N/A
times-fracN/A
Applied rewrites98.2%
Taylor expanded in x around 0
Applied rewrites29.6%
if -0.75 < x Initial program 98.1%
Taylor expanded in x around 0
lower-/.f6468.1
Applied rewrites68.1%
Final simplification57.8%
(FPCore (x y) :precision binary64 (pow y -1.0))
double code(double x, double y) {
return pow(y, -1.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = y ** (-1.0d0)
end function
public static double code(double x, double y) {
return Math.pow(y, -1.0);
}
def code(x, y): return math.pow(y, -1.0)
function code(x, y) return y ^ -1.0 end
function tmp = code(x, y) tmp = y ^ -1.0; end
code[x_, y_] := N[Power[y, -1.0], $MachinePrecision]
\begin{array}{l}
\\
{y}^{-1}
\end{array}
Initial program 93.2%
Taylor expanded in x around 0
lower-/.f6451.3
Applied rewrites51.3%
Final simplification51.3%
(FPCore (x y) :precision binary64 (if (<= (* (- 1.0 x) (- 3.0 x)) 5.0) (/ (fma -1.3333333333333333 x 1.0) y) (* (fma 0.3333333333333333 x -1.3333333333333333) (/ x y))))
double code(double x, double y) {
double tmp;
if (((1.0 - x) * (3.0 - x)) <= 5.0) {
tmp = fma(-1.3333333333333333, x, 1.0) / y;
} else {
tmp = fma(0.3333333333333333, x, -1.3333333333333333) * (x / y);
}
return tmp;
}
function code(x, y) tmp = 0.0 if (Float64(Float64(1.0 - x) * Float64(3.0 - x)) <= 5.0) tmp = Float64(fma(-1.3333333333333333, x, 1.0) / y); else tmp = Float64(fma(0.3333333333333333, x, -1.3333333333333333) * Float64(x / y)); end return tmp end
code[x_, y_] := If[LessEqual[N[(N[(1.0 - x), $MachinePrecision] * N[(3.0 - x), $MachinePrecision]), $MachinePrecision], 5.0], N[(N[(-1.3333333333333333 * x + 1.0), $MachinePrecision] / y), $MachinePrecision], N[(N[(0.3333333333333333 * x + -1.3333333333333333), $MachinePrecision] * N[(x / y), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left(1 - x\right) \cdot \left(3 - x\right) \leq 5:\\
\;\;\;\;\frac{\mathsf{fma}\left(-1.3333333333333333, x, 1\right)}{y}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(0.3333333333333333, x, -1.3333333333333333\right) \cdot \frac{x}{y}\\
\end{array}
\end{array}
if (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) < 5Initial program 99.6%
Taylor expanded in x around inf
unpow2N/A
lower-*.f645.1
Applied rewrites5.1%
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
associate-/r*N/A
lower-/.f64N/A
div-invN/A
lower-*.f64N/A
metadata-eval5.1
Applied rewrites5.1%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f6498.3
Applied rewrites98.3%
if 5 < (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) Initial program 86.7%
Taylor expanded in x around inf
sub-negN/A
associate-*r/N/A
metadata-evalN/A
distribute-lft-inN/A
associate-*r*N/A
*-commutativeN/A
unpow2N/A
associate-*r*N/A
associate-*l*N/A
associate-*r/N/A
*-rgt-identityN/A
distribute-neg-fracN/A
metadata-evalN/A
associate-*r/N/A
times-fracN/A
Applied rewrites98.8%
Final simplification98.5%
(FPCore (x y) :precision binary64 (/ (/ (- 3.0 x) y) (/ 3.0 (- 1.0 x))))
double code(double x, double y) {
return ((3.0 - x) / y) / (3.0 / (1.0 - x));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((3.0d0 - x) / y) / (3.0d0 / (1.0d0 - x))
end function
public static double code(double x, double y) {
return ((3.0 - x) / y) / (3.0 / (1.0 - x));
}
def code(x, y): return ((3.0 - x) / y) / (3.0 / (1.0 - x))
function code(x, y) return Float64(Float64(Float64(3.0 - x) / y) / Float64(3.0 / Float64(1.0 - x))) end
function tmp = code(x, y) tmp = ((3.0 - x) / y) / (3.0 / (1.0 - x)); end
code[x_, y_] := N[(N[(N[(3.0 - x), $MachinePrecision] / y), $MachinePrecision] / N[(3.0 / N[(1.0 - x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{3 - x}{y}}{\frac{3}{1 - x}}
\end{array}
Initial program 93.2%
Applied rewrites99.7%
(FPCore (x y) :precision binary64 (if (<= (* (- 1.0 x) (- 3.0 x)) 5.0) (/ (fma -1.3333333333333333 x 1.0) y) (* (* (/ x y) x) 0.3333333333333333)))
double code(double x, double y) {
double tmp;
if (((1.0 - x) * (3.0 - x)) <= 5.0) {
tmp = fma(-1.3333333333333333, x, 1.0) / y;
} else {
tmp = ((x / y) * x) * 0.3333333333333333;
}
return tmp;
}
function code(x, y) tmp = 0.0 if (Float64(Float64(1.0 - x) * Float64(3.0 - x)) <= 5.0) tmp = Float64(fma(-1.3333333333333333, x, 1.0) / y); else tmp = Float64(Float64(Float64(x / y) * x) * 0.3333333333333333); end return tmp end
code[x_, y_] := If[LessEqual[N[(N[(1.0 - x), $MachinePrecision] * N[(3.0 - x), $MachinePrecision]), $MachinePrecision], 5.0], N[(N[(-1.3333333333333333 * x + 1.0), $MachinePrecision] / y), $MachinePrecision], N[(N[(N[(x / y), $MachinePrecision] * x), $MachinePrecision] * 0.3333333333333333), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left(1 - x\right) \cdot \left(3 - x\right) \leq 5:\\
\;\;\;\;\frac{\mathsf{fma}\left(-1.3333333333333333, x, 1\right)}{y}\\
\mathbf{else}:\\
\;\;\;\;\left(\frac{x}{y} \cdot x\right) \cdot 0.3333333333333333\\
\end{array}
\end{array}
if (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) < 5Initial program 99.6%
Taylor expanded in x around inf
unpow2N/A
lower-*.f645.1
Applied rewrites5.1%
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
associate-/r*N/A
lower-/.f64N/A
div-invN/A
lower-*.f64N/A
metadata-eval5.1
Applied rewrites5.1%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f6498.3
Applied rewrites98.3%
if 5 < (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) Initial program 86.7%
Applied rewrites99.7%
Taylor expanded in x around inf
unpow2N/A
associate-*l/N/A
lower-*.f64N/A
lower-/.f6497.0
Applied rewrites97.0%
(FPCore (x y) :precision binary64 (if (<= (* (- 1.0 x) (- 3.0 x)) 5.0) (/ (fma -1.3333333333333333 x 1.0) y) (* (* 0.3333333333333333 (/ x y)) x)))
double code(double x, double y) {
double tmp;
if (((1.0 - x) * (3.0 - x)) <= 5.0) {
tmp = fma(-1.3333333333333333, x, 1.0) / y;
} else {
tmp = (0.3333333333333333 * (x / y)) * x;
}
return tmp;
}
function code(x, y) tmp = 0.0 if (Float64(Float64(1.0 - x) * Float64(3.0 - x)) <= 5.0) tmp = Float64(fma(-1.3333333333333333, x, 1.0) / y); else tmp = Float64(Float64(0.3333333333333333 * Float64(x / y)) * x); end return tmp end
code[x_, y_] := If[LessEqual[N[(N[(1.0 - x), $MachinePrecision] * N[(3.0 - x), $MachinePrecision]), $MachinePrecision], 5.0], N[(N[(-1.3333333333333333 * x + 1.0), $MachinePrecision] / y), $MachinePrecision], N[(N[(0.3333333333333333 * N[(x / y), $MachinePrecision]), $MachinePrecision] * x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left(1 - x\right) \cdot \left(3 - x\right) \leq 5:\\
\;\;\;\;\frac{\mathsf{fma}\left(-1.3333333333333333, x, 1\right)}{y}\\
\mathbf{else}:\\
\;\;\;\;\left(0.3333333333333333 \cdot \frac{x}{y}\right) \cdot x\\
\end{array}
\end{array}
if (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) < 5Initial program 99.6%
Taylor expanded in x around inf
unpow2N/A
lower-*.f645.1
Applied rewrites5.1%
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
associate-/r*N/A
lower-/.f64N/A
div-invN/A
lower-*.f64N/A
metadata-eval5.1
Applied rewrites5.1%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f6498.3
Applied rewrites98.3%
if 5 < (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) Initial program 86.7%
Taylor expanded in x around inf
unpow2N/A
associate-/l*N/A
*-commutativeN/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
lower-/.f6496.9
Applied rewrites96.9%
Final simplification97.6%
(FPCore (x y) :precision binary64 (* (* (/ (- 3.0 x) y) (- 1.0 x)) 0.3333333333333333))
double code(double x, double y) {
return (((3.0 - x) / y) * (1.0 - x)) * 0.3333333333333333;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (((3.0d0 - x) / y) * (1.0d0 - x)) * 0.3333333333333333d0
end function
public static double code(double x, double y) {
return (((3.0 - x) / y) * (1.0 - x)) * 0.3333333333333333;
}
def code(x, y): return (((3.0 - x) / y) * (1.0 - x)) * 0.3333333333333333
function code(x, y) return Float64(Float64(Float64(Float64(3.0 - x) / y) * Float64(1.0 - x)) * 0.3333333333333333) end
function tmp = code(x, y) tmp = (((3.0 - x) / y) * (1.0 - x)) * 0.3333333333333333; end
code[x_, y_] := N[(N[(N[(N[(3.0 - x), $MachinePrecision] / y), $MachinePrecision] * N[(1.0 - x), $MachinePrecision]), $MachinePrecision] * 0.3333333333333333), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{3 - x}{y} \cdot \left(1 - x\right)\right) \cdot 0.3333333333333333
\end{array}
Initial program 93.2%
Applied rewrites99.6%
(FPCore (x y) :precision binary64 (* (/ (- 1.0 x) (* y 3.0)) (- 3.0 x)))
double code(double x, double y) {
return ((1.0 - x) / (y * 3.0)) * (3.0 - x);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((1.0d0 - x) / (y * 3.0d0)) * (3.0d0 - x)
end function
public static double code(double x, double y) {
return ((1.0 - x) / (y * 3.0)) * (3.0 - x);
}
def code(x, y): return ((1.0 - x) / (y * 3.0)) * (3.0 - x)
function code(x, y) return Float64(Float64(Float64(1.0 - x) / Float64(y * 3.0)) * Float64(3.0 - x)) end
function tmp = code(x, y) tmp = ((1.0 - x) / (y * 3.0)) * (3.0 - x); end
code[x_, y_] := N[(N[(N[(1.0 - x), $MachinePrecision] / N[(y * 3.0), $MachinePrecision]), $MachinePrecision] * N[(3.0 - x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - x}{y \cdot 3} \cdot \left(3 - x\right)
\end{array}
Initial program 93.2%
Taylor expanded in y around 0
*-commutativeN/A
*-commutativeN/A
associate-/l*N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
associate-*l/N/A
*-commutativeN/A
lower-/.f64N/A
sub-negN/A
+-commutativeN/A
distribute-lft-inN/A
distribute-rgt-neg-outN/A
distribute-lft-neg-inN/A
metadata-evalN/A
lower-fma.f64N/A
metadata-evalN/A
lower--.f6499.5
Applied rewrites99.5%
Applied rewrites99.6%
Final simplification99.6%
(FPCore (x y) :precision binary64 (* (fma -0.3333333333333333 x 0.3333333333333333) (/ (- 3.0 x) y)))
double code(double x, double y) {
return fma(-0.3333333333333333, x, 0.3333333333333333) * ((3.0 - x) / y);
}
function code(x, y) return Float64(fma(-0.3333333333333333, x, 0.3333333333333333) * Float64(Float64(3.0 - x) / y)) end
code[x_, y_] := N[(N[(-0.3333333333333333 * x + 0.3333333333333333), $MachinePrecision] * N[(N[(3.0 - x), $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-0.3333333333333333, x, 0.3333333333333333\right) \cdot \frac{3 - x}{y}
\end{array}
Initial program 93.2%
Applied rewrites99.6%
lift-*.f64N/A
*-commutativeN/A
lift--.f64N/A
lift-*.f64N/A
*-commutativeN/A
associate-*r*N/A
metadata-evalN/A
associate-/r/N/A
frac-2negN/A
associate-/r/N/A
metadata-evalN/A
metadata-evalN/A
sub-negN/A
distribute-neg-inN/A
metadata-evalN/A
remove-double-negN/A
distribute-lft-inN/A
metadata-evalN/A
+-commutativeN/A
lift-fma.f64N/A
lower-*.f6499.5
Applied rewrites99.5%
(FPCore (x y) :precision binary64 (* (/ (fma -0.3333333333333333 x 0.3333333333333333) y) (- 3.0 x)))
double code(double x, double y) {
return (fma(-0.3333333333333333, x, 0.3333333333333333) / y) * (3.0 - x);
}
function code(x, y) return Float64(Float64(fma(-0.3333333333333333, x, 0.3333333333333333) / y) * Float64(3.0 - x)) end
code[x_, y_] := N[(N[(N[(-0.3333333333333333 * x + 0.3333333333333333), $MachinePrecision] / y), $MachinePrecision] * N[(3.0 - x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(-0.3333333333333333, x, 0.3333333333333333\right)}{y} \cdot \left(3 - x\right)
\end{array}
Initial program 93.2%
Taylor expanded in y around 0
*-commutativeN/A
*-commutativeN/A
associate-/l*N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
associate-*l/N/A
*-commutativeN/A
lower-/.f64N/A
sub-negN/A
+-commutativeN/A
distribute-lft-inN/A
distribute-rgt-neg-outN/A
distribute-lft-neg-inN/A
metadata-evalN/A
lower-fma.f64N/A
metadata-evalN/A
lower--.f6499.5
Applied rewrites99.5%
Final simplification99.5%
(FPCore (x y) :precision binary64 (/ (fma -1.3333333333333333 x 1.0) y))
double code(double x, double y) {
return fma(-1.3333333333333333, x, 1.0) / y;
}
function code(x, y) return Float64(fma(-1.3333333333333333, x, 1.0) / y) end
code[x_, y_] := N[(N[(-1.3333333333333333 * x + 1.0), $MachinePrecision] / y), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(-1.3333333333333333, x, 1\right)}{y}
\end{array}
Initial program 93.2%
Taylor expanded in x around inf
unpow2N/A
lower-*.f6444.2
Applied rewrites44.2%
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
associate-/r*N/A
lower-/.f64N/A
div-invN/A
lower-*.f64N/A
metadata-eval44.2
Applied rewrites44.2%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f6457.6
Applied rewrites57.6%
(FPCore (x y) :precision binary64 (* (/ (- 1.0 x) y) (/ (- 3.0 x) 3.0)))
double code(double x, double y) {
return ((1.0 - x) / y) * ((3.0 - x) / 3.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((1.0d0 - x) / y) * ((3.0d0 - x) / 3.0d0)
end function
public static double code(double x, double y) {
return ((1.0 - x) / y) * ((3.0 - x) / 3.0);
}
def code(x, y): return ((1.0 - x) / y) * ((3.0 - x) / 3.0)
function code(x, y) return Float64(Float64(Float64(1.0 - x) / y) * Float64(Float64(3.0 - x) / 3.0)) end
function tmp = code(x, y) tmp = ((1.0 - x) / y) * ((3.0 - x) / 3.0); end
code[x_, y_] := N[(N[(N[(1.0 - x), $MachinePrecision] / y), $MachinePrecision] * N[(N[(3.0 - x), $MachinePrecision] / 3.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - x}{y} \cdot \frac{3 - x}{3}
\end{array}
herbie shell --seed 2024298
(FPCore (x y)
:name "Diagrams.TwoD.Arc:bezierFromSweepQ1 from diagrams-lib-1.3.0.3"
:precision binary64
:alt
(! :herbie-platform default (* (/ (- 1 x) y) (/ (- 3 x) 3)))
(/ (* (- 1.0 x) (- 3.0 x)) (* y 3.0)))