
(FPCore (x y) :precision binary64 (/ (* (- 1.0 x) (- 3.0 x)) (* y 3.0)))
double code(double x, double y) {
return ((1.0 - x) * (3.0 - x)) / (y * 3.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((1.0d0 - x) * (3.0d0 - x)) / (y * 3.0d0)
end function
public static double code(double x, double y) {
return ((1.0 - x) * (3.0 - x)) / (y * 3.0);
}
def code(x, y): return ((1.0 - x) * (3.0 - x)) / (y * 3.0)
function code(x, y) return Float64(Float64(Float64(1.0 - x) * Float64(3.0 - x)) / Float64(y * 3.0)) end
function tmp = code(x, y) tmp = ((1.0 - x) * (3.0 - x)) / (y * 3.0); end
code[x_, y_] := N[(N[(N[(1.0 - x), $MachinePrecision] * N[(3.0 - x), $MachinePrecision]), $MachinePrecision] / N[(y * 3.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(1 - x\right) \cdot \left(3 - x\right)}{y \cdot 3}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (/ (* (- 1.0 x) (- 3.0 x)) (* y 3.0)))
double code(double x, double y) {
return ((1.0 - x) * (3.0 - x)) / (y * 3.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((1.0d0 - x) * (3.0d0 - x)) / (y * 3.0d0)
end function
public static double code(double x, double y) {
return ((1.0 - x) * (3.0 - x)) / (y * 3.0);
}
def code(x, y): return ((1.0 - x) * (3.0 - x)) / (y * 3.0)
function code(x, y) return Float64(Float64(Float64(1.0 - x) * Float64(3.0 - x)) / Float64(y * 3.0)) end
function tmp = code(x, y) tmp = ((1.0 - x) * (3.0 - x)) / (y * 3.0); end
code[x_, y_] := N[(N[(N[(1.0 - x), $MachinePrecision] * N[(3.0 - x), $MachinePrecision]), $MachinePrecision] / N[(y * 3.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(1 - x\right) \cdot \left(3 - x\right)}{y \cdot 3}
\end{array}
(FPCore (x y) :precision binary64 (if (<= (* (- 1.0 x) (- 3.0 x)) 2e+290) (/ (fma (fma 0.3333333333333333 x -1.3333333333333333) x 1.0) y) (* (/ x y) (* 0.3333333333333333 x))))
double code(double x, double y) {
double tmp;
if (((1.0 - x) * (3.0 - x)) <= 2e+290) {
tmp = fma(fma(0.3333333333333333, x, -1.3333333333333333), x, 1.0) / y;
} else {
tmp = (x / y) * (0.3333333333333333 * x);
}
return tmp;
}
function code(x, y) tmp = 0.0 if (Float64(Float64(1.0 - x) * Float64(3.0 - x)) <= 2e+290) tmp = Float64(fma(fma(0.3333333333333333, x, -1.3333333333333333), x, 1.0) / y); else tmp = Float64(Float64(x / y) * Float64(0.3333333333333333 * x)); end return tmp end
code[x_, y_] := If[LessEqual[N[(N[(1.0 - x), $MachinePrecision] * N[(3.0 - x), $MachinePrecision]), $MachinePrecision], 2e+290], N[(N[(N[(0.3333333333333333 * x + -1.3333333333333333), $MachinePrecision] * x + 1.0), $MachinePrecision] / y), $MachinePrecision], N[(N[(x / y), $MachinePrecision] * N[(0.3333333333333333 * x), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left(1 - x\right) \cdot \left(3 - x\right) \leq 2 \cdot 10^{+290}:\\
\;\;\;\;\frac{\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, x, -1.3333333333333333\right), x, 1\right)}{y}\\
\mathbf{else}:\\
\;\;\;\;\frac{x}{y} \cdot \left(0.3333333333333333 \cdot x\right)\\
\end{array}
\end{array}
if (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) < 2.00000000000000012e290Initial program 99.1%
Applied rewrites99.8%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f6499.8
Applied rewrites99.8%
if 2.00000000000000012e290 < (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) Initial program 72.5%
Taylor expanded in x around inf
unpow2N/A
associate-*l/N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
lower-/.f6499.8
Applied rewrites99.8%
Applied rewrites99.9%
(FPCore (x y) :precision binary64 (if (<= x -0.75) (/ (* -1.3333333333333333 x) y) (pow y -1.0)))
double code(double x, double y) {
double tmp;
if (x <= -0.75) {
tmp = (-1.3333333333333333 * x) / y;
} else {
tmp = pow(y, -1.0);
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (x <= (-0.75d0)) then
tmp = ((-1.3333333333333333d0) * x) / y
else
tmp = y ** (-1.0d0)
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (x <= -0.75) {
tmp = (-1.3333333333333333 * x) / y;
} else {
tmp = Math.pow(y, -1.0);
}
return tmp;
}
def code(x, y): tmp = 0 if x <= -0.75: tmp = (-1.3333333333333333 * x) / y else: tmp = math.pow(y, -1.0) return tmp
function code(x, y) tmp = 0.0 if (x <= -0.75) tmp = Float64(Float64(-1.3333333333333333 * x) / y); else tmp = y ^ -1.0; end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (x <= -0.75) tmp = (-1.3333333333333333 * x) / y; else tmp = y ^ -1.0; end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[x, -0.75], N[(N[(-1.3333333333333333 * x), $MachinePrecision] / y), $MachinePrecision], N[Power[y, -1.0], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.75:\\
\;\;\;\;\frac{-1.3333333333333333 \cdot x}{y}\\
\mathbf{else}:\\
\;\;\;\;{y}^{-1}\\
\end{array}
\end{array}
if x < -0.75Initial program 88.5%
Applied rewrites88.5%
Taylor expanded in x around inf
sub-negN/A
distribute-rgt-inN/A
distribute-lft-neg-inN/A
metadata-evalN/A
associate-*l*N/A
associate-*l/N/A
*-lft-identityN/A
unpow2N/A
associate-/l*N/A
*-rgt-identityN/A
associate-*r/N/A
rgt-mult-inverseN/A
*-rgt-identityN/A
unpow2N/A
associate-*r*N/A
distribute-rgt-inN/A
metadata-evalN/A
sub-negN/A
*-commutativeN/A
lower-*.f64N/A
Applied rewrites87.6%
Taylor expanded in x around 0
Applied rewrites28.2%
if -0.75 < x Initial program 92.9%
Taylor expanded in x around 0
lower-/.f6463.8
Applied rewrites63.8%
Final simplification54.8%
(FPCore (x y) :precision binary64 (pow y -1.0))
double code(double x, double y) {
return pow(y, -1.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = y ** (-1.0d0)
end function
public static double code(double x, double y) {
return Math.pow(y, -1.0);
}
def code(x, y): return math.pow(y, -1.0)
function code(x, y) return y ^ -1.0 end
function tmp = code(x, y) tmp = y ^ -1.0; end
code[x_, y_] := N[Power[y, -1.0], $MachinePrecision]
\begin{array}{l}
\\
{y}^{-1}
\end{array}
Initial program 91.7%
Taylor expanded in x around 0
lower-/.f6448.9
Applied rewrites48.9%
Final simplification48.9%
(FPCore (x y) :precision binary64 (if (<= (* (- 1.0 x) (- 3.0 x)) 10.0) (/ (fma -1.3333333333333333 x 1.0) y) (* (/ x y) (fma 0.3333333333333333 x -1.3333333333333333))))
double code(double x, double y) {
double tmp;
if (((1.0 - x) * (3.0 - x)) <= 10.0) {
tmp = fma(-1.3333333333333333, x, 1.0) / y;
} else {
tmp = (x / y) * fma(0.3333333333333333, x, -1.3333333333333333);
}
return tmp;
}
function code(x, y) tmp = 0.0 if (Float64(Float64(1.0 - x) * Float64(3.0 - x)) <= 10.0) tmp = Float64(fma(-1.3333333333333333, x, 1.0) / y); else tmp = Float64(Float64(x / y) * fma(0.3333333333333333, x, -1.3333333333333333)); end return tmp end
code[x_, y_] := If[LessEqual[N[(N[(1.0 - x), $MachinePrecision] * N[(3.0 - x), $MachinePrecision]), $MachinePrecision], 10.0], N[(N[(-1.3333333333333333 * x + 1.0), $MachinePrecision] / y), $MachinePrecision], N[(N[(x / y), $MachinePrecision] * N[(0.3333333333333333 * x + -1.3333333333333333), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left(1 - x\right) \cdot \left(3 - x\right) \leq 10:\\
\;\;\;\;\frac{\mathsf{fma}\left(-1.3333333333333333, x, 1\right)}{y}\\
\mathbf{else}:\\
\;\;\;\;\frac{x}{y} \cdot \mathsf{fma}\left(0.3333333333333333, x, -1.3333333333333333\right)\\
\end{array}
\end{array}
if (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) < 10Initial program 99.0%
Applied rewrites99.9%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f6497.4
Applied rewrites97.4%
if 10 < (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) Initial program 85.1%
Taylor expanded in x around inf
sub-negN/A
associate-*r/N/A
metadata-evalN/A
distribute-lft-inN/A
associate-*r/N/A
metadata-evalN/A
associate-*r/N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
associate-*l/N/A
distribute-neg-fracN/A
metadata-evalN/A
associate-*r/N/A
times-fracN/A
Applied rewrites97.6%
(FPCore (x y) :precision binary64 (if (<= (* (- 1.0 x) (- 3.0 x)) 10.0) (/ (fma -1.3333333333333333 x 1.0) y) (* (* 0.3333333333333333 (/ x y)) x)))
double code(double x, double y) {
double tmp;
if (((1.0 - x) * (3.0 - x)) <= 10.0) {
tmp = fma(-1.3333333333333333, x, 1.0) / y;
} else {
tmp = (0.3333333333333333 * (x / y)) * x;
}
return tmp;
}
function code(x, y) tmp = 0.0 if (Float64(Float64(1.0 - x) * Float64(3.0 - x)) <= 10.0) tmp = Float64(fma(-1.3333333333333333, x, 1.0) / y); else tmp = Float64(Float64(0.3333333333333333 * Float64(x / y)) * x); end return tmp end
code[x_, y_] := If[LessEqual[N[(N[(1.0 - x), $MachinePrecision] * N[(3.0 - x), $MachinePrecision]), $MachinePrecision], 10.0], N[(N[(-1.3333333333333333 * x + 1.0), $MachinePrecision] / y), $MachinePrecision], N[(N[(0.3333333333333333 * N[(x / y), $MachinePrecision]), $MachinePrecision] * x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left(1 - x\right) \cdot \left(3 - x\right) \leq 10:\\
\;\;\;\;\frac{\mathsf{fma}\left(-1.3333333333333333, x, 1\right)}{y}\\
\mathbf{else}:\\
\;\;\;\;\left(0.3333333333333333 \cdot \frac{x}{y}\right) \cdot x\\
\end{array}
\end{array}
if (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) < 10Initial program 99.0%
Applied rewrites99.9%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f6497.4
Applied rewrites97.4%
if 10 < (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) Initial program 85.1%
Taylor expanded in x around inf
unpow2N/A
associate-*l/N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
lower-/.f6495.6
Applied rewrites95.6%
(FPCore (x y) :precision binary64 (* (/ (fma -0.3333333333333333 x 1.0) y) (- 1.0 x)))
double code(double x, double y) {
return (fma(-0.3333333333333333, x, 1.0) / y) * (1.0 - x);
}
function code(x, y) return Float64(Float64(fma(-0.3333333333333333, x, 1.0) / y) * Float64(1.0 - x)) end
code[x_, y_] := N[(N[(N[(-0.3333333333333333 * x + 1.0), $MachinePrecision] / y), $MachinePrecision] * N[(1.0 - x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(-0.3333333333333333, x, 1\right)}{y} \cdot \left(1 - x\right)
\end{array}
Initial program 91.7%
Applied rewrites99.5%
lift-*.f64N/A
metadata-evalN/A
div-invN/A
lift-*.f64N/A
associate-*l/N/A
lift-/.f64N/A
associate-/r*N/A
lift-*.f64N/A
lower-*.f64N/A
Applied rewrites99.8%
(FPCore (x y) :precision binary64 (* (/ (fma -0.3333333333333333 x 0.3333333333333333) y) (- 3.0 x)))
double code(double x, double y) {
return (fma(-0.3333333333333333, x, 0.3333333333333333) / y) * (3.0 - x);
}
function code(x, y) return Float64(Float64(fma(-0.3333333333333333, x, 0.3333333333333333) / y) * Float64(3.0 - x)) end
code[x_, y_] := N[(N[(N[(-0.3333333333333333 * x + 0.3333333333333333), $MachinePrecision] / y), $MachinePrecision] * N[(3.0 - x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(-0.3333333333333333, x, 0.3333333333333333\right)}{y} \cdot \left(3 - x\right)
\end{array}
Initial program 91.7%
Taylor expanded in y around 0
*-commutativeN/A
*-commutativeN/A
associate-/l*N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
associate-*l/N/A
*-commutativeN/A
lower-/.f64N/A
sub-negN/A
mul-1-negN/A
+-commutativeN/A
distribute-lft-inN/A
mul-1-negN/A
distribute-rgt-neg-outN/A
distribute-lft-neg-inN/A
metadata-evalN/A
lower-fma.f64N/A
metadata-evalN/A
lower--.f6499.5
Applied rewrites99.5%
(FPCore (x y) :precision binary64 (/ (fma -1.3333333333333333 x 1.0) y))
double code(double x, double y) {
return fma(-1.3333333333333333, x, 1.0) / y;
}
function code(x, y) return Float64(fma(-1.3333333333333333, x, 1.0) / y) end
code[x_, y_] := N[(N[(-1.3333333333333333 * x + 1.0), $MachinePrecision] / y), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(-1.3333333333333333, x, 1\right)}{y}
\end{array}
Initial program 91.7%
Applied rewrites92.2%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f6454.2
Applied rewrites54.2%
(FPCore (x y) :precision binary64 (* (/ (- 1.0 x) y) (/ (- 3.0 x) 3.0)))
double code(double x, double y) {
return ((1.0 - x) / y) * ((3.0 - x) / 3.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((1.0d0 - x) / y) * ((3.0d0 - x) / 3.0d0)
end function
public static double code(double x, double y) {
return ((1.0 - x) / y) * ((3.0 - x) / 3.0);
}
def code(x, y): return ((1.0 - x) / y) * ((3.0 - x) / 3.0)
function code(x, y) return Float64(Float64(Float64(1.0 - x) / y) * Float64(Float64(3.0 - x) / 3.0)) end
function tmp = code(x, y) tmp = ((1.0 - x) / y) * ((3.0 - x) / 3.0); end
code[x_, y_] := N[(N[(N[(1.0 - x), $MachinePrecision] / y), $MachinePrecision] * N[(N[(3.0 - x), $MachinePrecision] / 3.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - x}{y} \cdot \frac{3 - x}{3}
\end{array}
herbie shell --seed 2024309
(FPCore (x y)
:name "Diagrams.TwoD.Arc:bezierFromSweepQ1 from diagrams-lib-1.3.0.3"
:precision binary64
:alt
(! :herbie-platform default (* (/ (- 1 x) y) (/ (- 3 x) 3)))
(/ (* (- 1.0 x) (- 3.0 x)) (* y 3.0)))