
(FPCore (x y) :precision binary64 (/ (* (- 1.0 x) (- 3.0 x)) (* y 3.0)))
double code(double x, double y) {
return ((1.0 - x) * (3.0 - x)) / (y * 3.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((1.0d0 - x) * (3.0d0 - x)) / (y * 3.0d0)
end function
public static double code(double x, double y) {
return ((1.0 - x) * (3.0 - x)) / (y * 3.0);
}
def code(x, y): return ((1.0 - x) * (3.0 - x)) / (y * 3.0)
function code(x, y) return Float64(Float64(Float64(1.0 - x) * Float64(3.0 - x)) / Float64(y * 3.0)) end
function tmp = code(x, y) tmp = ((1.0 - x) * (3.0 - x)) / (y * 3.0); end
code[x_, y_] := N[(N[(N[(1.0 - x), $MachinePrecision] * N[(3.0 - x), $MachinePrecision]), $MachinePrecision] / N[(y * 3.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(1 - x\right) \cdot \left(3 - x\right)}{y \cdot 3}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (/ (* (- 1.0 x) (- 3.0 x)) (* y 3.0)))
double code(double x, double y) {
return ((1.0 - x) * (3.0 - x)) / (y * 3.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((1.0d0 - x) * (3.0d0 - x)) / (y * 3.0d0)
end function
public static double code(double x, double y) {
return ((1.0 - x) * (3.0 - x)) / (y * 3.0);
}
def code(x, y): return ((1.0 - x) * (3.0 - x)) / (y * 3.0)
function code(x, y) return Float64(Float64(Float64(1.0 - x) * Float64(3.0 - x)) / Float64(y * 3.0)) end
function tmp = code(x, y) tmp = ((1.0 - x) * (3.0 - x)) / (y * 3.0); end
code[x_, y_] := N[(N[(N[(1.0 - x), $MachinePrecision] * N[(3.0 - x), $MachinePrecision]), $MachinePrecision] / N[(y * 3.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(1 - x\right) \cdot \left(3 - x\right)}{y \cdot 3}
\end{array}
(FPCore (x y) :precision binary64 (* (* (* (- 1.0 x) 0.3333333333333333) (/ -1.0 y)) (- x 3.0)))
double code(double x, double y) {
return (((1.0 - x) * 0.3333333333333333) * (-1.0 / y)) * (x - 3.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (((1.0d0 - x) * 0.3333333333333333d0) * ((-1.0d0) / y)) * (x - 3.0d0)
end function
public static double code(double x, double y) {
return (((1.0 - x) * 0.3333333333333333) * (-1.0 / y)) * (x - 3.0);
}
def code(x, y): return (((1.0 - x) * 0.3333333333333333) * (-1.0 / y)) * (x - 3.0)
function code(x, y) return Float64(Float64(Float64(Float64(1.0 - x) * 0.3333333333333333) * Float64(-1.0 / y)) * Float64(x - 3.0)) end
function tmp = code(x, y) tmp = (((1.0 - x) * 0.3333333333333333) * (-1.0 / y)) * (x - 3.0); end
code[x_, y_] := N[(N[(N[(N[(1.0 - x), $MachinePrecision] * 0.3333333333333333), $MachinePrecision] * N[(-1.0 / y), $MachinePrecision]), $MachinePrecision] * N[(x - 3.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(\left(1 - x\right) \cdot 0.3333333333333333\right) \cdot \frac{-1}{y}\right) \cdot \left(x - 3\right)
\end{array}
Initial program 92.1%
lift-/.f64N/A
lift-*.f64N/A
lift-*.f64N/A
times-fracN/A
clear-numN/A
frac-timesN/A
*-lft-identityN/A
lower-/.f64N/A
lower-*.f64N/A
lower-/.f6499.6
Applied rewrites99.6%
lift-/.f64N/A
lift-*.f64N/A
associate-/r*N/A
lift-/.f64N/A
associate-/r/N/A
lift-/.f64N/A
associate-/l*N/A
lift-/.f64N/A
frac-2negN/A
div-invN/A
div-invN/A
metadata-evalN/A
associate-*l*N/A
lower-*.f64N/A
neg-sub0N/A
lift--.f64N/A
sub-negN/A
+-commutativeN/A
associate--r+N/A
neg-sub0N/A
remove-double-negN/A
lower--.f64N/A
lower-*.f64N/A
Applied rewrites99.6%
Final simplification99.6%
(FPCore (x y) :precision binary64 (if (<= (* (- 3.0 x) (- 1.0 x)) 10.0) (/ (fma -4.0 x 3.0) (* y 3.0)) (* (+ -1.3333333333333333 (* 0.3333333333333333 x)) (/ x y))))
double code(double x, double y) {
double tmp;
if (((3.0 - x) * (1.0 - x)) <= 10.0) {
tmp = fma(-4.0, x, 3.0) / (y * 3.0);
} else {
tmp = (-1.3333333333333333 + (0.3333333333333333 * x)) * (x / y);
}
return tmp;
}
function code(x, y) tmp = 0.0 if (Float64(Float64(3.0 - x) * Float64(1.0 - x)) <= 10.0) tmp = Float64(fma(-4.0, x, 3.0) / Float64(y * 3.0)); else tmp = Float64(Float64(-1.3333333333333333 + Float64(0.3333333333333333 * x)) * Float64(x / y)); end return tmp end
code[x_, y_] := If[LessEqual[N[(N[(3.0 - x), $MachinePrecision] * N[(1.0 - x), $MachinePrecision]), $MachinePrecision], 10.0], N[(N[(-4.0 * x + 3.0), $MachinePrecision] / N[(y * 3.0), $MachinePrecision]), $MachinePrecision], N[(N[(-1.3333333333333333 + N[(0.3333333333333333 * x), $MachinePrecision]), $MachinePrecision] * N[(x / y), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left(3 - x\right) \cdot \left(1 - x\right) \leq 10:\\
\;\;\;\;\frac{\mathsf{fma}\left(-4, x, 3\right)}{y \cdot 3}\\
\mathbf{else}:\\
\;\;\;\;\left(-1.3333333333333333 + 0.3333333333333333 \cdot x\right) \cdot \frac{x}{y}\\
\end{array}
\end{array}
if (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) < 10Initial program 99.6%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f6498.5
Applied rewrites98.5%
if 10 < (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) Initial program 84.2%
Taylor expanded in x around inf
sub-negN/A
associate-*r/N/A
metadata-evalN/A
distribute-lft-inN/A
associate-*r/N/A
metadata-evalN/A
associate-*r/N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
associate-*l/N/A
distribute-neg-fracN/A
metadata-evalN/A
associate-*r/N/A
times-fracN/A
Applied rewrites99.0%
Applied rewrites99.0%
Final simplification98.7%
(FPCore (x y) :precision binary64 (if (<= (* (- 3.0 x) (- 1.0 x)) 10.0) (/ (fma -4.0 x 3.0) (* y 3.0)) (* (/ (fma 0.3333333333333333 x -1.3333333333333333) y) x)))
double code(double x, double y) {
double tmp;
if (((3.0 - x) * (1.0 - x)) <= 10.0) {
tmp = fma(-4.0, x, 3.0) / (y * 3.0);
} else {
tmp = (fma(0.3333333333333333, x, -1.3333333333333333) / y) * x;
}
return tmp;
}
function code(x, y) tmp = 0.0 if (Float64(Float64(3.0 - x) * Float64(1.0 - x)) <= 10.0) tmp = Float64(fma(-4.0, x, 3.0) / Float64(y * 3.0)); else tmp = Float64(Float64(fma(0.3333333333333333, x, -1.3333333333333333) / y) * x); end return tmp end
code[x_, y_] := If[LessEqual[N[(N[(3.0 - x), $MachinePrecision] * N[(1.0 - x), $MachinePrecision]), $MachinePrecision], 10.0], N[(N[(-4.0 * x + 3.0), $MachinePrecision] / N[(y * 3.0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(0.3333333333333333 * x + -1.3333333333333333), $MachinePrecision] / y), $MachinePrecision] * x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left(3 - x\right) \cdot \left(1 - x\right) \leq 10:\\
\;\;\;\;\frac{\mathsf{fma}\left(-4, x, 3\right)}{y \cdot 3}\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(0.3333333333333333, x, -1.3333333333333333\right)}{y} \cdot x\\
\end{array}
\end{array}
if (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) < 10Initial program 99.6%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f6498.5
Applied rewrites98.5%
if 10 < (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) Initial program 84.2%
Taylor expanded in x around inf
sub-negN/A
associate-*r/N/A
metadata-evalN/A
distribute-lft-inN/A
associate-*r/N/A
metadata-evalN/A
associate-*r/N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
associate-*l/N/A
distribute-neg-fracN/A
metadata-evalN/A
associate-*r/N/A
times-fracN/A
Applied rewrites99.0%
Applied rewrites99.0%
Final simplification98.7%
(FPCore (x y) :precision binary64 (if (<= (* (- 3.0 x) (- 1.0 x)) 10.0) (* (/ (fma -4.0 x 3.0) y) 0.3333333333333333) (* (/ (fma 0.3333333333333333 x -1.3333333333333333) y) x)))
double code(double x, double y) {
double tmp;
if (((3.0 - x) * (1.0 - x)) <= 10.0) {
tmp = (fma(-4.0, x, 3.0) / y) * 0.3333333333333333;
} else {
tmp = (fma(0.3333333333333333, x, -1.3333333333333333) / y) * x;
}
return tmp;
}
function code(x, y) tmp = 0.0 if (Float64(Float64(3.0 - x) * Float64(1.0 - x)) <= 10.0) tmp = Float64(Float64(fma(-4.0, x, 3.0) / y) * 0.3333333333333333); else tmp = Float64(Float64(fma(0.3333333333333333, x, -1.3333333333333333) / y) * x); end return tmp end
code[x_, y_] := If[LessEqual[N[(N[(3.0 - x), $MachinePrecision] * N[(1.0 - x), $MachinePrecision]), $MachinePrecision], 10.0], N[(N[(N[(-4.0 * x + 3.0), $MachinePrecision] / y), $MachinePrecision] * 0.3333333333333333), $MachinePrecision], N[(N[(N[(0.3333333333333333 * x + -1.3333333333333333), $MachinePrecision] / y), $MachinePrecision] * x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left(3 - x\right) \cdot \left(1 - x\right) \leq 10:\\
\;\;\;\;\frac{\mathsf{fma}\left(-4, x, 3\right)}{y} \cdot 0.3333333333333333\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(0.3333333333333333, x, -1.3333333333333333\right)}{y} \cdot x\\
\end{array}
\end{array}
if (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) < 10Initial program 99.6%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f6498.5
Applied rewrites98.5%
lift-/.f64N/A
lift-*.f64N/A
associate-/r*N/A
div-invN/A
metadata-evalN/A
lower-*.f64N/A
lower-/.f6498.3
Applied rewrites98.3%
if 10 < (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) Initial program 84.2%
Taylor expanded in x around inf
sub-negN/A
associate-*r/N/A
metadata-evalN/A
distribute-lft-inN/A
associate-*r/N/A
metadata-evalN/A
associate-*r/N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
associate-*l/N/A
distribute-neg-fracN/A
metadata-evalN/A
associate-*r/N/A
times-fracN/A
Applied rewrites99.0%
Applied rewrites99.0%
Final simplification98.6%
(FPCore (x y) :precision binary64 (if (<= (* (- 3.0 x) (- 1.0 x)) 5.0) (/ 1.0 y) (* (/ (fma 0.3333333333333333 x -1.3333333333333333) y) x)))
double code(double x, double y) {
double tmp;
if (((3.0 - x) * (1.0 - x)) <= 5.0) {
tmp = 1.0 / y;
} else {
tmp = (fma(0.3333333333333333, x, -1.3333333333333333) / y) * x;
}
return tmp;
}
function code(x, y) tmp = 0.0 if (Float64(Float64(3.0 - x) * Float64(1.0 - x)) <= 5.0) tmp = Float64(1.0 / y); else tmp = Float64(Float64(fma(0.3333333333333333, x, -1.3333333333333333) / y) * x); end return tmp end
code[x_, y_] := If[LessEqual[N[(N[(3.0 - x), $MachinePrecision] * N[(1.0 - x), $MachinePrecision]), $MachinePrecision], 5.0], N[(1.0 / y), $MachinePrecision], N[(N[(N[(0.3333333333333333 * x + -1.3333333333333333), $MachinePrecision] / y), $MachinePrecision] * x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left(3 - x\right) \cdot \left(1 - x\right) \leq 5:\\
\;\;\;\;\frac{1}{y}\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(0.3333333333333333, x, -1.3333333333333333\right)}{y} \cdot x\\
\end{array}
\end{array}
if (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) < 5Initial program 99.6%
Taylor expanded in x around 0
lower-/.f6498.8
Applied rewrites98.8%
if 5 < (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) Initial program 84.3%
Taylor expanded in x around inf
sub-negN/A
associate-*r/N/A
metadata-evalN/A
distribute-lft-inN/A
associate-*r/N/A
metadata-evalN/A
associate-*r/N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
associate-*l/N/A
distribute-neg-fracN/A
metadata-evalN/A
associate-*r/N/A
times-fracN/A
Applied rewrites98.4%
Applied rewrites98.4%
Final simplification98.6%
(FPCore (x y) :precision binary64 (if (<= (* (- 3.0 x) (- 1.0 x)) 5.0) (/ 1.0 y) (* (fma 0.3333333333333333 x -1.3333333333333333) (/ x y))))
double code(double x, double y) {
double tmp;
if (((3.0 - x) * (1.0 - x)) <= 5.0) {
tmp = 1.0 / y;
} else {
tmp = fma(0.3333333333333333, x, -1.3333333333333333) * (x / y);
}
return tmp;
}
function code(x, y) tmp = 0.0 if (Float64(Float64(3.0 - x) * Float64(1.0 - x)) <= 5.0) tmp = Float64(1.0 / y); else tmp = Float64(fma(0.3333333333333333, x, -1.3333333333333333) * Float64(x / y)); end return tmp end
code[x_, y_] := If[LessEqual[N[(N[(3.0 - x), $MachinePrecision] * N[(1.0 - x), $MachinePrecision]), $MachinePrecision], 5.0], N[(1.0 / y), $MachinePrecision], N[(N[(0.3333333333333333 * x + -1.3333333333333333), $MachinePrecision] * N[(x / y), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left(3 - x\right) \cdot \left(1 - x\right) \leq 5:\\
\;\;\;\;\frac{1}{y}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(0.3333333333333333, x, -1.3333333333333333\right) \cdot \frac{x}{y}\\
\end{array}
\end{array}
if (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) < 5Initial program 99.6%
Taylor expanded in x around 0
lower-/.f6498.8
Applied rewrites98.8%
if 5 < (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) Initial program 84.3%
Taylor expanded in x around inf
sub-negN/A
associate-*r/N/A
metadata-evalN/A
distribute-lft-inN/A
associate-*r/N/A
metadata-evalN/A
associate-*r/N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
associate-*l/N/A
distribute-neg-fracN/A
metadata-evalN/A
associate-*r/N/A
times-fracN/A
Applied rewrites98.4%
Final simplification98.6%
(FPCore (x y) :precision binary64 (if (<= (* (- 3.0 x) (- 1.0 x)) 10.0) (/ 1.0 y) (* (* (/ x y) 0.3333333333333333) x)))
double code(double x, double y) {
double tmp;
if (((3.0 - x) * (1.0 - x)) <= 10.0) {
tmp = 1.0 / y;
} else {
tmp = ((x / y) * 0.3333333333333333) * x;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (((3.0d0 - x) * (1.0d0 - x)) <= 10.0d0) then
tmp = 1.0d0 / y
else
tmp = ((x / y) * 0.3333333333333333d0) * x
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (((3.0 - x) * (1.0 - x)) <= 10.0) {
tmp = 1.0 / y;
} else {
tmp = ((x / y) * 0.3333333333333333) * x;
}
return tmp;
}
def code(x, y): tmp = 0 if ((3.0 - x) * (1.0 - x)) <= 10.0: tmp = 1.0 / y else: tmp = ((x / y) * 0.3333333333333333) * x return tmp
function code(x, y) tmp = 0.0 if (Float64(Float64(3.0 - x) * Float64(1.0 - x)) <= 10.0) tmp = Float64(1.0 / y); else tmp = Float64(Float64(Float64(x / y) * 0.3333333333333333) * x); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (((3.0 - x) * (1.0 - x)) <= 10.0) tmp = 1.0 / y; else tmp = ((x / y) * 0.3333333333333333) * x; end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[N[(N[(3.0 - x), $MachinePrecision] * N[(1.0 - x), $MachinePrecision]), $MachinePrecision], 10.0], N[(1.0 / y), $MachinePrecision], N[(N[(N[(x / y), $MachinePrecision] * 0.3333333333333333), $MachinePrecision] * x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left(3 - x\right) \cdot \left(1 - x\right) \leq 10:\\
\;\;\;\;\frac{1}{y}\\
\mathbf{else}:\\
\;\;\;\;\left(\frac{x}{y} \cdot 0.3333333333333333\right) \cdot x\\
\end{array}
\end{array}
if (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) < 10Initial program 99.6%
Taylor expanded in x around 0
lower-/.f6498.2
Applied rewrites98.2%
if 10 < (*.f64 (-.f64 #s(literal 1 binary64) x) (-.f64 #s(literal 3 binary64) x)) Initial program 84.2%
Taylor expanded in x around inf
unpow2N/A
associate-/l*N/A
*-commutativeN/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
lower-/.f6498.1
Applied rewrites98.1%
Final simplification98.1%
(FPCore (x y) :precision binary64 (* (- 3.0 x) (/ (fma -0.3333333333333333 x 0.3333333333333333) y)))
double code(double x, double y) {
return (3.0 - x) * (fma(-0.3333333333333333, x, 0.3333333333333333) / y);
}
function code(x, y) return Float64(Float64(3.0 - x) * Float64(fma(-0.3333333333333333, x, 0.3333333333333333) / y)) end
code[x_, y_] := N[(N[(3.0 - x), $MachinePrecision] * N[(N[(-0.3333333333333333 * x + 0.3333333333333333), $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(3 - x\right) \cdot \frac{\mathsf{fma}\left(-0.3333333333333333, x, 0.3333333333333333\right)}{y}
\end{array}
Initial program 92.1%
Taylor expanded in y around 0
*-commutativeN/A
*-commutativeN/A
associate-/l*N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
associate-*l/N/A
*-commutativeN/A
lower-/.f64N/A
sub-negN/A
mul-1-negN/A
+-commutativeN/A
distribute-lft-inN/A
mul-1-negN/A
distribute-rgt-neg-outN/A
distribute-lft-neg-inN/A
metadata-evalN/A
lower-fma.f64N/A
metadata-evalN/A
lower--.f6499.5
Applied rewrites99.5%
Final simplification99.5%
(FPCore (x y) :precision binary64 (if (<= x -0.75) (* (/ -1.3333333333333333 y) x) (/ 1.0 y)))
double code(double x, double y) {
double tmp;
if (x <= -0.75) {
tmp = (-1.3333333333333333 / y) * x;
} else {
tmp = 1.0 / y;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (x <= (-0.75d0)) then
tmp = ((-1.3333333333333333d0) / y) * x
else
tmp = 1.0d0 / y
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (x <= -0.75) {
tmp = (-1.3333333333333333 / y) * x;
} else {
tmp = 1.0 / y;
}
return tmp;
}
def code(x, y): tmp = 0 if x <= -0.75: tmp = (-1.3333333333333333 / y) * x else: tmp = 1.0 / y return tmp
function code(x, y) tmp = 0.0 if (x <= -0.75) tmp = Float64(Float64(-1.3333333333333333 / y) * x); else tmp = Float64(1.0 / y); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (x <= -0.75) tmp = (-1.3333333333333333 / y) * x; else tmp = 1.0 / y; end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[x, -0.75], N[(N[(-1.3333333333333333 / y), $MachinePrecision] * x), $MachinePrecision], N[(1.0 / y), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.75:\\
\;\;\;\;\frac{-1.3333333333333333}{y} \cdot x\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{y}\\
\end{array}
\end{array}
if x < -0.75Initial program 82.1%
Taylor expanded in x around inf
sub-negN/A
associate-*r/N/A
metadata-evalN/A
distribute-lft-inN/A
associate-*r/N/A
metadata-evalN/A
associate-*r/N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
associate-*l/N/A
distribute-neg-fracN/A
metadata-evalN/A
associate-*r/N/A
times-fracN/A
Applied rewrites97.4%
Taylor expanded in x around 0
Applied rewrites25.8%
if -0.75 < x Initial program 95.0%
Taylor expanded in x around 0
lower-/.f6466.5
Applied rewrites66.5%
(FPCore (x y) :precision binary64 (/ (- 3.0 x) (* y 3.0)))
double code(double x, double y) {
return (3.0 - x) / (y * 3.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (3.0d0 - x) / (y * 3.0d0)
end function
public static double code(double x, double y) {
return (3.0 - x) / (y * 3.0);
}
def code(x, y): return (3.0 - x) / (y * 3.0)
function code(x, y) return Float64(Float64(3.0 - x) / Float64(y * 3.0)) end
function tmp = code(x, y) tmp = (3.0 - x) / (y * 3.0); end
code[x_, y_] := N[(N[(3.0 - x), $MachinePrecision] / N[(y * 3.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{3 - x}{y \cdot 3}
\end{array}
Initial program 92.1%
lift-/.f64N/A
lift-*.f64N/A
lift-*.f64N/A
times-fracN/A
clear-numN/A
frac-timesN/A
*-lft-identityN/A
lower-/.f64N/A
lower-*.f64N/A
lower-/.f6499.6
Applied rewrites99.6%
Taylor expanded in x around 0
lower-*.f6456.0
Applied rewrites56.0%
Final simplification56.0%
(FPCore (x y) :precision binary64 (* (/ -0.3333333333333333 y) (- x 3.0)))
double code(double x, double y) {
return (-0.3333333333333333 / y) * (x - 3.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((-0.3333333333333333d0) / y) * (x - 3.0d0)
end function
public static double code(double x, double y) {
return (-0.3333333333333333 / y) * (x - 3.0);
}
def code(x, y): return (-0.3333333333333333 / y) * (x - 3.0)
function code(x, y) return Float64(Float64(-0.3333333333333333 / y) * Float64(x - 3.0)) end
function tmp = code(x, y) tmp = (-0.3333333333333333 / y) * (x - 3.0); end
code[x_, y_] := N[(N[(-0.3333333333333333 / y), $MachinePrecision] * N[(x - 3.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-0.3333333333333333}{y} \cdot \left(x - 3\right)
\end{array}
Initial program 92.1%
lift-/.f64N/A
lift-*.f64N/A
lift-*.f64N/A
times-fracN/A
clear-numN/A
frac-timesN/A
*-lft-identityN/A
lower-/.f64N/A
lower-*.f64N/A
lower-/.f6499.6
Applied rewrites99.6%
lift-/.f64N/A
lift-*.f64N/A
associate-/r*N/A
lift-/.f64N/A
associate-/r/N/A
lift-/.f64N/A
associate-/l*N/A
lift-/.f64N/A
frac-2negN/A
div-invN/A
div-invN/A
metadata-evalN/A
associate-*l*N/A
lower-*.f64N/A
neg-sub0N/A
lift--.f64N/A
sub-negN/A
+-commutativeN/A
associate--r+N/A
neg-sub0N/A
remove-double-negN/A
lower--.f64N/A
lower-*.f64N/A
Applied rewrites99.6%
Taylor expanded in x around 0
lower-/.f6456.0
Applied rewrites56.0%
Final simplification56.0%
(FPCore (x y) :precision binary64 (/ 1.0 y))
double code(double x, double y) {
return 1.0 / y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 1.0d0 / y
end function
public static double code(double x, double y) {
return 1.0 / y;
}
def code(x, y): return 1.0 / y
function code(x, y) return Float64(1.0 / y) end
function tmp = code(x, y) tmp = 1.0 / y; end
code[x_, y_] := N[(1.0 / y), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{y}
\end{array}
Initial program 92.1%
Taylor expanded in x around 0
lower-/.f6452.6
Applied rewrites52.6%
(FPCore (x y) :precision binary64 (* (/ (- 1.0 x) y) (/ (- 3.0 x) 3.0)))
double code(double x, double y) {
return ((1.0 - x) / y) * ((3.0 - x) / 3.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((1.0d0 - x) / y) * ((3.0d0 - x) / 3.0d0)
end function
public static double code(double x, double y) {
return ((1.0 - x) / y) * ((3.0 - x) / 3.0);
}
def code(x, y): return ((1.0 - x) / y) * ((3.0 - x) / 3.0)
function code(x, y) return Float64(Float64(Float64(1.0 - x) / y) * Float64(Float64(3.0 - x) / 3.0)) end
function tmp = code(x, y) tmp = ((1.0 - x) / y) * ((3.0 - x) / 3.0); end
code[x_, y_] := N[(N[(N[(1.0 - x), $MachinePrecision] / y), $MachinePrecision] * N[(N[(3.0 - x), $MachinePrecision] / 3.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - x}{y} \cdot \frac{3 - x}{3}
\end{array}
herbie shell --seed 2024327
(FPCore (x y)
:name "Diagrams.TwoD.Arc:bezierFromSweepQ1 from diagrams-lib-1.3.0.3"
:precision binary64
:alt
(! :herbie-platform default (* (/ (- 1 x) y) (/ (- 3 x) 3)))
(/ (* (- 1.0 x) (- 3.0 x)) (* y 3.0)))