
(FPCore (x y) :precision binary64 (/ (* (- 1.0 x) (- 3.0 x)) (* y 3.0)))
double code(double x, double y) {
return ((1.0 - x) * (3.0 - x)) / (y * 3.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((1.0d0 - x) * (3.0d0 - x)) / (y * 3.0d0)
end function
public static double code(double x, double y) {
return ((1.0 - x) * (3.0 - x)) / (y * 3.0);
}
def code(x, y): return ((1.0 - x) * (3.0 - x)) / (y * 3.0)
function code(x, y) return Float64(Float64(Float64(1.0 - x) * Float64(3.0 - x)) / Float64(y * 3.0)) end
function tmp = code(x, y) tmp = ((1.0 - x) * (3.0 - x)) / (y * 3.0); end
code[x_, y_] := N[(N[(N[(1.0 - x), $MachinePrecision] * N[(3.0 - x), $MachinePrecision]), $MachinePrecision] / N[(y * 3.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(1 - x\right) \cdot \left(3 - x\right)}{y \cdot 3}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (/ (* (- 1.0 x) (- 3.0 x)) (* y 3.0)))
double code(double x, double y) {
return ((1.0 - x) * (3.0 - x)) / (y * 3.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((1.0d0 - x) * (3.0d0 - x)) / (y * 3.0d0)
end function
public static double code(double x, double y) {
return ((1.0 - x) * (3.0 - x)) / (y * 3.0);
}
def code(x, y): return ((1.0 - x) * (3.0 - x)) / (y * 3.0)
function code(x, y) return Float64(Float64(Float64(1.0 - x) * Float64(3.0 - x)) / Float64(y * 3.0)) end
function tmp = code(x, y) tmp = ((1.0 - x) * (3.0 - x)) / (y * 3.0); end
code[x_, y_] := N[(N[(N[(1.0 - x), $MachinePrecision] * N[(3.0 - x), $MachinePrecision]), $MachinePrecision] / N[(y * 3.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(1 - x\right) \cdot \left(3 - x\right)}{y \cdot 3}
\end{array}
(FPCore (x y) :precision binary64 (/ (- 1.0 x) (* y (/ 3.0 (- 3.0 x)))))
double code(double x, double y) {
return (1.0 - x) / (y * (3.0 / (3.0 - x)));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (1.0d0 - x) / (y * (3.0d0 / (3.0d0 - x)))
end function
public static double code(double x, double y) {
return (1.0 - x) / (y * (3.0 / (3.0 - x)));
}
def code(x, y): return (1.0 - x) / (y * (3.0 / (3.0 - x)))
function code(x, y) return Float64(Float64(1.0 - x) / Float64(y * Float64(3.0 / Float64(3.0 - x)))) end
function tmp = code(x, y) tmp = (1.0 - x) / (y * (3.0 / (3.0 - x))); end
code[x_, y_] := N[(N[(1.0 - x), $MachinePrecision] / N[(y * N[(3.0 / N[(3.0 - x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - x}{y \cdot \frac{3}{3 - x}}
\end{array}
Initial program 92.1%
associate-/l*99.3%
*-commutative99.3%
Simplified99.3%
clear-num99.3%
un-div-inv99.4%
*-commutative99.4%
associate-/l*99.8%
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (x y) :precision binary64 (if (or (<= x -1.72) (not (<= x 1.72))) (* 0.3333333333333333 (* (/ x y) (+ x -4.0))) (/ (+ 1.0 (* x -1.3333333333333333)) y)))
double code(double x, double y) {
double tmp;
if ((x <= -1.72) || !(x <= 1.72)) {
tmp = 0.3333333333333333 * ((x / y) * (x + -4.0));
} else {
tmp = (1.0 + (x * -1.3333333333333333)) / y;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((x <= (-1.72d0)) .or. (.not. (x <= 1.72d0))) then
tmp = 0.3333333333333333d0 * ((x / y) * (x + (-4.0d0)))
else
tmp = (1.0d0 + (x * (-1.3333333333333333d0))) / y
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((x <= -1.72) || !(x <= 1.72)) {
tmp = 0.3333333333333333 * ((x / y) * (x + -4.0));
} else {
tmp = (1.0 + (x * -1.3333333333333333)) / y;
}
return tmp;
}
def code(x, y): tmp = 0 if (x <= -1.72) or not (x <= 1.72): tmp = 0.3333333333333333 * ((x / y) * (x + -4.0)) else: tmp = (1.0 + (x * -1.3333333333333333)) / y return tmp
function code(x, y) tmp = 0.0 if ((x <= -1.72) || !(x <= 1.72)) tmp = Float64(0.3333333333333333 * Float64(Float64(x / y) * Float64(x + -4.0))); else tmp = Float64(Float64(1.0 + Float64(x * -1.3333333333333333)) / y); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((x <= -1.72) || ~((x <= 1.72))) tmp = 0.3333333333333333 * ((x / y) * (x + -4.0)); else tmp = (1.0 + (x * -1.3333333333333333)) / y; end tmp_2 = tmp; end
code[x_, y_] := If[Or[LessEqual[x, -1.72], N[Not[LessEqual[x, 1.72]], $MachinePrecision]], N[(0.3333333333333333 * N[(N[(x / y), $MachinePrecision] * N[(x + -4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(1.0 + N[(x * -1.3333333333333333), $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.72 \lor \neg \left(x \leq 1.72\right):\\
\;\;\;\;0.3333333333333333 \cdot \left(\frac{x}{y} \cdot \left(x + -4\right)\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1 + x \cdot -1.3333333333333333}{y}\\
\end{array}
\end{array}
if x < -1.71999999999999997 or 1.71999999999999997 < x Initial program 83.4%
Taylor expanded in x around inf 82.5%
+-commutative82.5%
unpow282.5%
distribute-rgt-out82.5%
Simplified82.5%
associate-/r*82.4%
div-inv82.3%
*-commutative82.3%
associate-/l*98.6%
metadata-eval98.6%
Applied egg-rr98.6%
if -1.71999999999999997 < x < 1.71999999999999997Initial program 99.0%
associate-/l*99.0%
*-rgt-identity99.0%
remove-double-neg99.0%
distribute-lft-neg-out99.0%
neg-mul-199.0%
times-frac98.9%
*-rgt-identity98.9%
associate-/l*98.9%
metadata-eval98.9%
*-commutative98.9%
sub-neg98.9%
+-commutative98.9%
distribute-lft-in98.9%
neg-mul-198.9%
remove-double-neg98.9%
metadata-eval98.9%
distribute-lft-neg-out98.9%
*-commutative98.9%
distribute-lft-neg-in98.9%
associate-/r*99.4%
metadata-eval99.4%
metadata-eval99.4%
Simplified99.4%
Taylor expanded in x around 0 97.9%
Taylor expanded in y around 0 97.8%
Final simplification98.2%
(FPCore (x y) :precision binary64 (if (or (<= x -1.72) (not (<= x 1.72))) (* (/ x y) (* (+ x -4.0) 0.3333333333333333)) (/ (+ 1.0 (* x -1.3333333333333333)) y)))
double code(double x, double y) {
double tmp;
if ((x <= -1.72) || !(x <= 1.72)) {
tmp = (x / y) * ((x + -4.0) * 0.3333333333333333);
} else {
tmp = (1.0 + (x * -1.3333333333333333)) / y;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((x <= (-1.72d0)) .or. (.not. (x <= 1.72d0))) then
tmp = (x / y) * ((x + (-4.0d0)) * 0.3333333333333333d0)
else
tmp = (1.0d0 + (x * (-1.3333333333333333d0))) / y
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((x <= -1.72) || !(x <= 1.72)) {
tmp = (x / y) * ((x + -4.0) * 0.3333333333333333);
} else {
tmp = (1.0 + (x * -1.3333333333333333)) / y;
}
return tmp;
}
def code(x, y): tmp = 0 if (x <= -1.72) or not (x <= 1.72): tmp = (x / y) * ((x + -4.0) * 0.3333333333333333) else: tmp = (1.0 + (x * -1.3333333333333333)) / y return tmp
function code(x, y) tmp = 0.0 if ((x <= -1.72) || !(x <= 1.72)) tmp = Float64(Float64(x / y) * Float64(Float64(x + -4.0) * 0.3333333333333333)); else tmp = Float64(Float64(1.0 + Float64(x * -1.3333333333333333)) / y); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((x <= -1.72) || ~((x <= 1.72))) tmp = (x / y) * ((x + -4.0) * 0.3333333333333333); else tmp = (1.0 + (x * -1.3333333333333333)) / y; end tmp_2 = tmp; end
code[x_, y_] := If[Or[LessEqual[x, -1.72], N[Not[LessEqual[x, 1.72]], $MachinePrecision]], N[(N[(x / y), $MachinePrecision] * N[(N[(x + -4.0), $MachinePrecision] * 0.3333333333333333), $MachinePrecision]), $MachinePrecision], N[(N[(1.0 + N[(x * -1.3333333333333333), $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.72 \lor \neg \left(x \leq 1.72\right):\\
\;\;\;\;\frac{x}{y} \cdot \left(\left(x + -4\right) \cdot 0.3333333333333333\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1 + x \cdot -1.3333333333333333}{y}\\
\end{array}
\end{array}
if x < -1.71999999999999997 or 1.71999999999999997 < x Initial program 83.4%
Taylor expanded in x around inf 82.5%
+-commutative82.5%
unpow282.5%
distribute-rgt-out82.5%
Simplified82.5%
times-frac98.8%
div-inv98.7%
metadata-eval98.7%
Applied egg-rr98.7%
if -1.71999999999999997 < x < 1.71999999999999997Initial program 99.0%
associate-/l*99.0%
*-rgt-identity99.0%
remove-double-neg99.0%
distribute-lft-neg-out99.0%
neg-mul-199.0%
times-frac98.9%
*-rgt-identity98.9%
associate-/l*98.9%
metadata-eval98.9%
*-commutative98.9%
sub-neg98.9%
+-commutative98.9%
distribute-lft-in98.9%
neg-mul-198.9%
remove-double-neg98.9%
metadata-eval98.9%
distribute-lft-neg-out98.9%
*-commutative98.9%
distribute-lft-neg-in98.9%
associate-/r*99.4%
metadata-eval99.4%
metadata-eval99.4%
Simplified99.4%
Taylor expanded in x around 0 97.9%
Taylor expanded in y around 0 97.8%
Final simplification98.2%
(FPCore (x y) :precision binary64 (if (or (<= x -1.72) (not (<= x 1.72))) (* (/ x y) (* (+ x -4.0) 0.3333333333333333)) (+ (* (/ x y) -1.3333333333333333) (/ 1.0 y))))
double code(double x, double y) {
double tmp;
if ((x <= -1.72) || !(x <= 1.72)) {
tmp = (x / y) * ((x + -4.0) * 0.3333333333333333);
} else {
tmp = ((x / y) * -1.3333333333333333) + (1.0 / y);
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((x <= (-1.72d0)) .or. (.not. (x <= 1.72d0))) then
tmp = (x / y) * ((x + (-4.0d0)) * 0.3333333333333333d0)
else
tmp = ((x / y) * (-1.3333333333333333d0)) + (1.0d0 / y)
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((x <= -1.72) || !(x <= 1.72)) {
tmp = (x / y) * ((x + -4.0) * 0.3333333333333333);
} else {
tmp = ((x / y) * -1.3333333333333333) + (1.0 / y);
}
return tmp;
}
def code(x, y): tmp = 0 if (x <= -1.72) or not (x <= 1.72): tmp = (x / y) * ((x + -4.0) * 0.3333333333333333) else: tmp = ((x / y) * -1.3333333333333333) + (1.0 / y) return tmp
function code(x, y) tmp = 0.0 if ((x <= -1.72) || !(x <= 1.72)) tmp = Float64(Float64(x / y) * Float64(Float64(x + -4.0) * 0.3333333333333333)); else tmp = Float64(Float64(Float64(x / y) * -1.3333333333333333) + Float64(1.0 / y)); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((x <= -1.72) || ~((x <= 1.72))) tmp = (x / y) * ((x + -4.0) * 0.3333333333333333); else tmp = ((x / y) * -1.3333333333333333) + (1.0 / y); end tmp_2 = tmp; end
code[x_, y_] := If[Or[LessEqual[x, -1.72], N[Not[LessEqual[x, 1.72]], $MachinePrecision]], N[(N[(x / y), $MachinePrecision] * N[(N[(x + -4.0), $MachinePrecision] * 0.3333333333333333), $MachinePrecision]), $MachinePrecision], N[(N[(N[(x / y), $MachinePrecision] * -1.3333333333333333), $MachinePrecision] + N[(1.0 / y), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.72 \lor \neg \left(x \leq 1.72\right):\\
\;\;\;\;\frac{x}{y} \cdot \left(\left(x + -4\right) \cdot 0.3333333333333333\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{x}{y} \cdot -1.3333333333333333 + \frac{1}{y}\\
\end{array}
\end{array}
if x < -1.71999999999999997 or 1.71999999999999997 < x Initial program 83.4%
Taylor expanded in x around inf 82.5%
+-commutative82.5%
unpow282.5%
distribute-rgt-out82.5%
Simplified82.5%
times-frac98.8%
div-inv98.7%
metadata-eval98.7%
Applied egg-rr98.7%
if -1.71999999999999997 < x < 1.71999999999999997Initial program 99.0%
associate-/l*99.0%
*-rgt-identity99.0%
remove-double-neg99.0%
distribute-lft-neg-out99.0%
neg-mul-199.0%
times-frac98.9%
*-rgt-identity98.9%
associate-/l*98.9%
metadata-eval98.9%
*-commutative98.9%
sub-neg98.9%
+-commutative98.9%
distribute-lft-in98.9%
neg-mul-198.9%
remove-double-neg98.9%
metadata-eval98.9%
distribute-lft-neg-out98.9%
*-commutative98.9%
distribute-lft-neg-in98.9%
associate-/r*99.4%
metadata-eval99.4%
metadata-eval99.4%
Simplified99.4%
Taylor expanded in x around 0 97.9%
Final simplification98.2%
(FPCore (x y) :precision binary64 (* (- 1.0 x) (* (+ x -3.0) (/ -0.3333333333333333 y))))
double code(double x, double y) {
return (1.0 - x) * ((x + -3.0) * (-0.3333333333333333 / y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (1.0d0 - x) * ((x + (-3.0d0)) * ((-0.3333333333333333d0) / y))
end function
public static double code(double x, double y) {
return (1.0 - x) * ((x + -3.0) * (-0.3333333333333333 / y));
}
def code(x, y): return (1.0 - x) * ((x + -3.0) * (-0.3333333333333333 / y))
function code(x, y) return Float64(Float64(1.0 - x) * Float64(Float64(x + -3.0) * Float64(-0.3333333333333333 / y))) end
function tmp = code(x, y) tmp = (1.0 - x) * ((x + -3.0) * (-0.3333333333333333 / y)); end
code[x_, y_] := N[(N[(1.0 - x), $MachinePrecision] * N[(N[(x + -3.0), $MachinePrecision] * N[(-0.3333333333333333 / y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(1 - x\right) \cdot \left(\left(x + -3\right) \cdot \frac{-0.3333333333333333}{y}\right)
\end{array}
Initial program 92.1%
associate-/l*99.3%
*-rgt-identity99.3%
remove-double-neg99.3%
distribute-lft-neg-out99.3%
neg-mul-199.3%
times-frac99.2%
*-rgt-identity99.2%
associate-/l*99.2%
metadata-eval99.2%
*-commutative99.2%
sub-neg99.2%
+-commutative99.2%
distribute-lft-in99.2%
neg-mul-199.2%
remove-double-neg99.2%
metadata-eval99.2%
distribute-lft-neg-out99.2%
*-commutative99.2%
distribute-lft-neg-in99.2%
associate-/r*99.5%
metadata-eval99.5%
metadata-eval99.5%
Simplified99.5%
Final simplification99.5%
(FPCore (x y) :precision binary64 (* (/ (- 3.0 x) y) (/ (- 1.0 x) 3.0)))
double code(double x, double y) {
return ((3.0 - x) / y) * ((1.0 - x) / 3.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((3.0d0 - x) / y) * ((1.0d0 - x) / 3.0d0)
end function
public static double code(double x, double y) {
return ((3.0 - x) / y) * ((1.0 - x) / 3.0);
}
def code(x, y): return ((3.0 - x) / y) * ((1.0 - x) / 3.0)
function code(x, y) return Float64(Float64(Float64(3.0 - x) / y) * Float64(Float64(1.0 - x) / 3.0)) end
function tmp = code(x, y) tmp = ((3.0 - x) / y) * ((1.0 - x) / 3.0); end
code[x_, y_] := N[(N[(N[(3.0 - x), $MachinePrecision] / y), $MachinePrecision] * N[(N[(1.0 - x), $MachinePrecision] / 3.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{3 - x}{y} \cdot \frac{1 - x}{3}
\end{array}
Initial program 92.1%
*-commutative92.1%
times-frac99.6%
Applied egg-rr99.6%
Final simplification99.6%
(FPCore (x y) :precision binary64 (if (<= x -0.75) (* (/ x y) -1.3333333333333333) (/ 1.0 y)))
double code(double x, double y) {
double tmp;
if (x <= -0.75) {
tmp = (x / y) * -1.3333333333333333;
} else {
tmp = 1.0 / y;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (x <= (-0.75d0)) then
tmp = (x / y) * (-1.3333333333333333d0)
else
tmp = 1.0d0 / y
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (x <= -0.75) {
tmp = (x / y) * -1.3333333333333333;
} else {
tmp = 1.0 / y;
}
return tmp;
}
def code(x, y): tmp = 0 if x <= -0.75: tmp = (x / y) * -1.3333333333333333 else: tmp = 1.0 / y return tmp
function code(x, y) tmp = 0.0 if (x <= -0.75) tmp = Float64(Float64(x / y) * -1.3333333333333333); else tmp = Float64(1.0 / y); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (x <= -0.75) tmp = (x / y) * -1.3333333333333333; else tmp = 1.0 / y; end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[x, -0.75], N[(N[(x / y), $MachinePrecision] * -1.3333333333333333), $MachinePrecision], N[(1.0 / y), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.75:\\
\;\;\;\;\frac{x}{y} \cdot -1.3333333333333333\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{y}\\
\end{array}
\end{array}
if x < -0.75Initial program 86.6%
associate-/l*99.5%
*-rgt-identity99.5%
remove-double-neg99.5%
distribute-lft-neg-out99.5%
neg-mul-199.5%
times-frac99.4%
*-rgt-identity99.4%
associate-/l*99.4%
metadata-eval99.4%
*-commutative99.4%
sub-neg99.4%
+-commutative99.4%
distribute-lft-in99.4%
neg-mul-199.4%
remove-double-neg99.4%
metadata-eval99.4%
distribute-lft-neg-out99.4%
*-commutative99.4%
distribute-lft-neg-in99.4%
associate-/r*99.5%
metadata-eval99.5%
metadata-eval99.5%
Simplified99.5%
Taylor expanded in x around 0 23.4%
Taylor expanded in x around inf 23.4%
if -0.75 < x Initial program 93.4%
associate-/l*99.2%
*-commutative99.2%
Simplified99.2%
clear-num99.3%
un-div-inv99.3%
*-commutative99.3%
associate-/l*99.9%
Applied egg-rr99.9%
Taylor expanded in x around 0 67.6%
Final simplification58.9%
(FPCore (x y) :precision binary64 (if (<= x -1.0) (/ x (- y)) (/ 1.0 y)))
double code(double x, double y) {
double tmp;
if (x <= -1.0) {
tmp = x / -y;
} else {
tmp = 1.0 / y;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (x <= (-1.0d0)) then
tmp = x / -y
else
tmp = 1.0d0 / y
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (x <= -1.0) {
tmp = x / -y;
} else {
tmp = 1.0 / y;
}
return tmp;
}
def code(x, y): tmp = 0 if x <= -1.0: tmp = x / -y else: tmp = 1.0 / y return tmp
function code(x, y) tmp = 0.0 if (x <= -1.0) tmp = Float64(x / Float64(-y)); else tmp = Float64(1.0 / y); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (x <= -1.0) tmp = x / -y; else tmp = 1.0 / y; end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[x, -1.0], N[(x / (-y)), $MachinePrecision], N[(1.0 / y), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1:\\
\;\;\;\;\frac{x}{-y}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{y}\\
\end{array}
\end{array}
if x < -1Initial program 86.6%
associate-/l*99.5%
*-commutative99.5%
Simplified99.5%
clear-num99.4%
un-div-inv99.5%
*-commutative99.5%
associate-/l*99.6%
Applied egg-rr99.6%
Taylor expanded in x around 0 23.4%
Taylor expanded in x around inf 23.4%
neg-mul-123.4%
distribute-neg-frac223.4%
Simplified23.4%
if -1 < x Initial program 93.4%
associate-/l*99.2%
*-commutative99.2%
Simplified99.2%
clear-num99.3%
un-div-inv99.3%
*-commutative99.3%
associate-/l*99.9%
Applied egg-rr99.9%
Taylor expanded in x around 0 67.6%
Final simplification58.9%
(FPCore (x y) :precision binary64 (* (- 1.0 x) (/ 1.0 y)))
double code(double x, double y) {
return (1.0 - x) * (1.0 / y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (1.0d0 - x) * (1.0d0 / y)
end function
public static double code(double x, double y) {
return (1.0 - x) * (1.0 / y);
}
def code(x, y): return (1.0 - x) * (1.0 / y)
function code(x, y) return Float64(Float64(1.0 - x) * Float64(1.0 / y)) end
function tmp = code(x, y) tmp = (1.0 - x) * (1.0 / y); end
code[x_, y_] := N[(N[(1.0 - x), $MachinePrecision] * N[(1.0 / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(1 - x\right) \cdot \frac{1}{y}
\end{array}
Initial program 92.1%
associate-/l*99.3%
*-rgt-identity99.3%
remove-double-neg99.3%
distribute-lft-neg-out99.3%
neg-mul-199.3%
times-frac99.2%
*-rgt-identity99.2%
associate-/l*99.2%
metadata-eval99.2%
*-commutative99.2%
sub-neg99.2%
+-commutative99.2%
distribute-lft-in99.2%
neg-mul-199.2%
remove-double-neg99.2%
metadata-eval99.2%
distribute-lft-neg-out99.2%
*-commutative99.2%
distribute-lft-neg-in99.2%
associate-/r*99.5%
metadata-eval99.5%
metadata-eval99.5%
Simplified99.5%
Taylor expanded in x around 0 58.1%
Final simplification58.1%
(FPCore (x y) :precision binary64 (/ (+ 1.0 (* x -1.3333333333333333)) y))
double code(double x, double y) {
return (1.0 + (x * -1.3333333333333333)) / y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (1.0d0 + (x * (-1.3333333333333333d0))) / y
end function
public static double code(double x, double y) {
return (1.0 + (x * -1.3333333333333333)) / y;
}
def code(x, y): return (1.0 + (x * -1.3333333333333333)) / y
function code(x, y) return Float64(Float64(1.0 + Float64(x * -1.3333333333333333)) / y) end
function tmp = code(x, y) tmp = (1.0 + (x * -1.3333333333333333)) / y; end
code[x_, y_] := N[(N[(1.0 + N[(x * -1.3333333333333333), $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 + x \cdot -1.3333333333333333}{y}
\end{array}
Initial program 92.1%
associate-/l*99.3%
*-rgt-identity99.3%
remove-double-neg99.3%
distribute-lft-neg-out99.3%
neg-mul-199.3%
times-frac99.2%
*-rgt-identity99.2%
associate-/l*99.2%
metadata-eval99.2%
*-commutative99.2%
sub-neg99.2%
+-commutative99.2%
distribute-lft-in99.2%
neg-mul-199.2%
remove-double-neg99.2%
metadata-eval99.2%
distribute-lft-neg-out99.2%
*-commutative99.2%
distribute-lft-neg-in99.2%
associate-/r*99.5%
metadata-eval99.5%
metadata-eval99.5%
Simplified99.5%
Taylor expanded in x around 0 59.1%
Taylor expanded in y around 0 59.0%
Final simplification59.0%
(FPCore (x y) :precision binary64 (/ (- 1.0 x) y))
double code(double x, double y) {
return (1.0 - x) / y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (1.0d0 - x) / y
end function
public static double code(double x, double y) {
return (1.0 - x) / y;
}
def code(x, y): return (1.0 - x) / y
function code(x, y) return Float64(Float64(1.0 - x) / y) end
function tmp = code(x, y) tmp = (1.0 - x) / y; end
code[x_, y_] := N[(N[(1.0 - x), $MachinePrecision] / y), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - x}{y}
\end{array}
Initial program 92.1%
associate-/l*99.3%
*-commutative99.3%
Simplified99.3%
clear-num99.3%
un-div-inv99.4%
*-commutative99.4%
associate-/l*99.8%
Applied egg-rr99.8%
Taylor expanded in x around 0 58.1%
Final simplification58.1%
(FPCore (x y) :precision binary64 (/ 1.0 y))
double code(double x, double y) {
return 1.0 / y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 1.0d0 / y
end function
public static double code(double x, double y) {
return 1.0 / y;
}
def code(x, y): return 1.0 / y
function code(x, y) return Float64(1.0 / y) end
function tmp = code(x, y) tmp = 1.0 / y; end
code[x_, y_] := N[(1.0 / y), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{y}
\end{array}
Initial program 92.1%
associate-/l*99.3%
*-commutative99.3%
Simplified99.3%
clear-num99.3%
un-div-inv99.4%
*-commutative99.4%
associate-/l*99.8%
Applied egg-rr99.8%
Taylor expanded in x around 0 55.3%
Final simplification55.3%
(FPCore (x y) :precision binary64 (* (/ (- 1.0 x) y) (/ (- 3.0 x) 3.0)))
double code(double x, double y) {
return ((1.0 - x) / y) * ((3.0 - x) / 3.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((1.0d0 - x) / y) * ((3.0d0 - x) / 3.0d0)
end function
public static double code(double x, double y) {
return ((1.0 - x) / y) * ((3.0 - x) / 3.0);
}
def code(x, y): return ((1.0 - x) / y) * ((3.0 - x) / 3.0)
function code(x, y) return Float64(Float64(Float64(1.0 - x) / y) * Float64(Float64(3.0 - x) / 3.0)) end
function tmp = code(x, y) tmp = ((1.0 - x) / y) * ((3.0 - x) / 3.0); end
code[x_, y_] := N[(N[(N[(1.0 - x), $MachinePrecision] / y), $MachinePrecision] * N[(N[(3.0 - x), $MachinePrecision] / 3.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - x}{y} \cdot \frac{3 - x}{3}
\end{array}
herbie shell --seed 2024041
(FPCore (x y)
:name "Diagrams.TwoD.Arc:bezierFromSweepQ1 from diagrams-lib-1.3.0.3"
:precision binary64
:herbie-target
(* (/ (- 1.0 x) y) (/ (- 3.0 x) 3.0))
(/ (* (- 1.0 x) (- 3.0 x)) (* y 3.0)))