
(FPCore (x y) :precision binary64 (/ (* (- x y) (+ x y)) (+ (* x x) (* y y))))
double code(double x, double y) {
return ((x - y) * (x + y)) / ((x * x) + (y * y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((x - y) * (x + y)) / ((x * x) + (y * y))
end function
public static double code(double x, double y) {
return ((x - y) * (x + y)) / ((x * x) + (y * y));
}
def code(x, y): return ((x - y) * (x + y)) / ((x * x) + (y * y))
function code(x, y) return Float64(Float64(Float64(x - y) * Float64(x + y)) / Float64(Float64(x * x) + Float64(y * y))) end
function tmp = code(x, y) tmp = ((x - y) * (x + y)) / ((x * x) + (y * y)); end
code[x_, y_] := N[(N[(N[(x - y), $MachinePrecision] * N[(x + y), $MachinePrecision]), $MachinePrecision] / N[(N[(x * x), $MachinePrecision] + N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (/ (* (- x y) (+ x y)) (+ (* x x) (* y y))))
double code(double x, double y) {
return ((x - y) * (x + y)) / ((x * x) + (y * y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((x - y) * (x + y)) / ((x * x) + (y * y))
end function
public static double code(double x, double y) {
return ((x - y) * (x + y)) / ((x * x) + (y * y));
}
def code(x, y): return ((x - y) * (x + y)) / ((x * x) + (y * y))
function code(x, y) return Float64(Float64(Float64(x - y) * Float64(x + y)) / Float64(Float64(x * x) + Float64(y * y))) end
function tmp = code(x, y) tmp = ((x - y) * (x + y)) / ((x * x) + (y * y)); end
code[x_, y_] := N[(N[(N[(x - y), $MachinePrecision] * N[(x + y), $MachinePrecision]), $MachinePrecision] / N[(N[(x * x), $MachinePrecision] + N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y}
\end{array}
y_m = (fabs.f64 y) (FPCore (x y_m) :precision binary64 (/ (/ (+ x y_m) (hypot x y_m)) (/ (hypot x y_m) (- x y_m))))
y_m = fabs(y);
double code(double x, double y_m) {
return ((x + y_m) / hypot(x, y_m)) / (hypot(x, y_m) / (x - y_m));
}
y_m = Math.abs(y);
public static double code(double x, double y_m) {
return ((x + y_m) / Math.hypot(x, y_m)) / (Math.hypot(x, y_m) / (x - y_m));
}
y_m = math.fabs(y) def code(x, y_m): return ((x + y_m) / math.hypot(x, y_m)) / (math.hypot(x, y_m) / (x - y_m))
y_m = abs(y) function code(x, y_m) return Float64(Float64(Float64(x + y_m) / hypot(x, y_m)) / Float64(hypot(x, y_m) / Float64(x - y_m))) end
y_m = abs(y); function tmp = code(x, y_m) tmp = ((x + y_m) / hypot(x, y_m)) / (hypot(x, y_m) / (x - y_m)); end
y_m = N[Abs[y], $MachinePrecision] code[x_, y$95$m_] := N[(N[(N[(x + y$95$m), $MachinePrecision] / N[Sqrt[x ^ 2 + y$95$m ^ 2], $MachinePrecision]), $MachinePrecision] / N[(N[Sqrt[x ^ 2 + y$95$m ^ 2], $MachinePrecision] / N[(x - y$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
y_m = \left|y\right|
\\
\frac{\frac{x + y_m}{\mathsf{hypot}\left(x, y_m\right)}}{\frac{\mathsf{hypot}\left(x, y_m\right)}{x - y_m}}
\end{array}
Initial program 72.2%
fma-def72.2%
add-sqr-sqrt72.2%
times-frac72.1%
fma-def72.1%
hypot-def72.1%
fma-def72.1%
hypot-def99.9%
Applied egg-rr99.9%
*-commutative99.9%
clear-num99.9%
un-div-inv99.9%
Applied egg-rr99.9%
Final simplification99.9%
y_m = (fabs.f64 y) (FPCore (x y_m) :precision binary64 (* (- x y_m) (/ (/ (+ x y_m) (hypot x y_m)) (hypot x y_m))))
y_m = fabs(y);
double code(double x, double y_m) {
return (x - y_m) * (((x + y_m) / hypot(x, y_m)) / hypot(x, y_m));
}
y_m = Math.abs(y);
public static double code(double x, double y_m) {
return (x - y_m) * (((x + y_m) / Math.hypot(x, y_m)) / Math.hypot(x, y_m));
}
y_m = math.fabs(y) def code(x, y_m): return (x - y_m) * (((x + y_m) / math.hypot(x, y_m)) / math.hypot(x, y_m))
y_m = abs(y) function code(x, y_m) return Float64(Float64(x - y_m) * Float64(Float64(Float64(x + y_m) / hypot(x, y_m)) / hypot(x, y_m))) end
y_m = abs(y); function tmp = code(x, y_m) tmp = (x - y_m) * (((x + y_m) / hypot(x, y_m)) / hypot(x, y_m)); end
y_m = N[Abs[y], $MachinePrecision] code[x_, y$95$m_] := N[(N[(x - y$95$m), $MachinePrecision] * N[(N[(N[(x + y$95$m), $MachinePrecision] / N[Sqrt[x ^ 2 + y$95$m ^ 2], $MachinePrecision]), $MachinePrecision] / N[Sqrt[x ^ 2 + y$95$m ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
y_m = \left|y\right|
\\
\left(x - y_m\right) \cdot \frac{\frac{x + y_m}{\mathsf{hypot}\left(x, y_m\right)}}{\mathsf{hypot}\left(x, y_m\right)}
\end{array}
Initial program 72.2%
+-commutative72.2%
associate-*r/71.8%
+-commutative71.8%
fma-def71.8%
Simplified71.8%
*-un-lft-identity71.8%
add-sqr-sqrt71.8%
times-frac71.9%
fma-def71.9%
hypot-def72.0%
fma-def72.0%
hypot-def99.7%
Applied egg-rr99.7%
associate-*l/99.7%
*-un-lft-identity99.7%
Applied egg-rr99.7%
Final simplification99.7%
y_m = (fabs.f64 y) (FPCore (x y_m) :precision binary64 (* (/ (+ x y_m) (hypot x y_m)) (/ (- x y_m) (hypot x y_m))))
y_m = fabs(y);
double code(double x, double y_m) {
return ((x + y_m) / hypot(x, y_m)) * ((x - y_m) / hypot(x, y_m));
}
y_m = Math.abs(y);
public static double code(double x, double y_m) {
return ((x + y_m) / Math.hypot(x, y_m)) * ((x - y_m) / Math.hypot(x, y_m));
}
y_m = math.fabs(y) def code(x, y_m): return ((x + y_m) / math.hypot(x, y_m)) * ((x - y_m) / math.hypot(x, y_m))
y_m = abs(y) function code(x, y_m) return Float64(Float64(Float64(x + y_m) / hypot(x, y_m)) * Float64(Float64(x - y_m) / hypot(x, y_m))) end
y_m = abs(y); function tmp = code(x, y_m) tmp = ((x + y_m) / hypot(x, y_m)) * ((x - y_m) / hypot(x, y_m)); end
y_m = N[Abs[y], $MachinePrecision] code[x_, y$95$m_] := N[(N[(N[(x + y$95$m), $MachinePrecision] / N[Sqrt[x ^ 2 + y$95$m ^ 2], $MachinePrecision]), $MachinePrecision] * N[(N[(x - y$95$m), $MachinePrecision] / N[Sqrt[x ^ 2 + y$95$m ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
y_m = \left|y\right|
\\
\frac{x + y_m}{\mathsf{hypot}\left(x, y_m\right)} \cdot \frac{x - y_m}{\mathsf{hypot}\left(x, y_m\right)}
\end{array}
Initial program 72.2%
fma-def72.2%
add-sqr-sqrt72.2%
times-frac72.1%
fma-def72.1%
hypot-def72.1%
fma-def72.1%
hypot-def99.9%
Applied egg-rr99.9%
Final simplification99.9%
y_m = (fabs.f64 y) (FPCore (x y_m) :precision binary64 (let* ((t_0 (/ (* (+ x y_m) (- x y_m)) (+ (* x x) (* y_m y_m))))) (if (<= t_0 2.0) t_0 (* (- x y_m) (/ (+ 1.0 (/ x y_m)) (hypot x y_m))))))
y_m = fabs(y);
double code(double x, double y_m) {
double t_0 = ((x + y_m) * (x - y_m)) / ((x * x) + (y_m * y_m));
double tmp;
if (t_0 <= 2.0) {
tmp = t_0;
} else {
tmp = (x - y_m) * ((1.0 + (x / y_m)) / hypot(x, y_m));
}
return tmp;
}
y_m = Math.abs(y);
public static double code(double x, double y_m) {
double t_0 = ((x + y_m) * (x - y_m)) / ((x * x) + (y_m * y_m));
double tmp;
if (t_0 <= 2.0) {
tmp = t_0;
} else {
tmp = (x - y_m) * ((1.0 + (x / y_m)) / Math.hypot(x, y_m));
}
return tmp;
}
y_m = math.fabs(y) def code(x, y_m): t_0 = ((x + y_m) * (x - y_m)) / ((x * x) + (y_m * y_m)) tmp = 0 if t_0 <= 2.0: tmp = t_0 else: tmp = (x - y_m) * ((1.0 + (x / y_m)) / math.hypot(x, y_m)) return tmp
y_m = abs(y) function code(x, y_m) t_0 = Float64(Float64(Float64(x + y_m) * Float64(x - y_m)) / Float64(Float64(x * x) + Float64(y_m * y_m))) tmp = 0.0 if (t_0 <= 2.0) tmp = t_0; else tmp = Float64(Float64(x - y_m) * Float64(Float64(1.0 + Float64(x / y_m)) / hypot(x, y_m))); end return tmp end
y_m = abs(y); function tmp_2 = code(x, y_m) t_0 = ((x + y_m) * (x - y_m)) / ((x * x) + (y_m * y_m)); tmp = 0.0; if (t_0 <= 2.0) tmp = t_0; else tmp = (x - y_m) * ((1.0 + (x / y_m)) / hypot(x, y_m)); end tmp_2 = tmp; end
y_m = N[Abs[y], $MachinePrecision]
code[x_, y$95$m_] := Block[{t$95$0 = N[(N[(N[(x + y$95$m), $MachinePrecision] * N[(x - y$95$m), $MachinePrecision]), $MachinePrecision] / N[(N[(x * x), $MachinePrecision] + N[(y$95$m * y$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, 2.0], t$95$0, N[(N[(x - y$95$m), $MachinePrecision] * N[(N[(1.0 + N[(x / y$95$m), $MachinePrecision]), $MachinePrecision] / N[Sqrt[x ^ 2 + y$95$m ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
y_m = \left|y\right|
\\
\begin{array}{l}
t_0 := \frac{\left(x + y_m\right) \cdot \left(x - y_m\right)}{x \cdot x + y_m \cdot y_m}\\
\mathbf{if}\;t_0 \leq 2:\\
\;\;\;\;t_0\\
\mathbf{else}:\\
\;\;\;\;\left(x - y_m\right) \cdot \frac{1 + \frac{x}{y_m}}{\mathsf{hypot}\left(x, y_m\right)}\\
\end{array}
\end{array}
if (/.f64 (*.f64 (-.f64 x y) (+.f64 x y)) (+.f64 (*.f64 x x) (*.f64 y y))) < 2Initial program 100.0%
if 2 < (/.f64 (*.f64 (-.f64 x y) (+.f64 x y)) (+.f64 (*.f64 x x) (*.f64 y y))) Initial program 0.0%
+-commutative0.0%
associate-*r/3.1%
+-commutative3.1%
fma-def3.1%
Simplified3.1%
*-un-lft-identity3.1%
add-sqr-sqrt3.1%
times-frac3.1%
fma-def3.1%
hypot-def3.1%
fma-def3.1%
hypot-def99.7%
Applied egg-rr99.7%
associate-*l/99.7%
*-un-lft-identity99.7%
Applied egg-rr99.7%
Taylor expanded in x around 0 15.3%
Final simplification76.5%
y_m = (fabs.f64 y) (FPCore (x y_m) :precision binary64 (let* ((t_0 (/ (* (+ x y_m) (- x y_m)) (+ (* x x) (* y_m y_m))))) (if (<= t_0 2.0) t_0 (* (/ (- x y_m) (hypot x y_m)) (+ 1.0 (/ x y_m))))))
y_m = fabs(y);
double code(double x, double y_m) {
double t_0 = ((x + y_m) * (x - y_m)) / ((x * x) + (y_m * y_m));
double tmp;
if (t_0 <= 2.0) {
tmp = t_0;
} else {
tmp = ((x - y_m) / hypot(x, y_m)) * (1.0 + (x / y_m));
}
return tmp;
}
y_m = Math.abs(y);
public static double code(double x, double y_m) {
double t_0 = ((x + y_m) * (x - y_m)) / ((x * x) + (y_m * y_m));
double tmp;
if (t_0 <= 2.0) {
tmp = t_0;
} else {
tmp = ((x - y_m) / Math.hypot(x, y_m)) * (1.0 + (x / y_m));
}
return tmp;
}
y_m = math.fabs(y) def code(x, y_m): t_0 = ((x + y_m) * (x - y_m)) / ((x * x) + (y_m * y_m)) tmp = 0 if t_0 <= 2.0: tmp = t_0 else: tmp = ((x - y_m) / math.hypot(x, y_m)) * (1.0 + (x / y_m)) return tmp
y_m = abs(y) function code(x, y_m) t_0 = Float64(Float64(Float64(x + y_m) * Float64(x - y_m)) / Float64(Float64(x * x) + Float64(y_m * y_m))) tmp = 0.0 if (t_0 <= 2.0) tmp = t_0; else tmp = Float64(Float64(Float64(x - y_m) / hypot(x, y_m)) * Float64(1.0 + Float64(x / y_m))); end return tmp end
y_m = abs(y); function tmp_2 = code(x, y_m) t_0 = ((x + y_m) * (x - y_m)) / ((x * x) + (y_m * y_m)); tmp = 0.0; if (t_0 <= 2.0) tmp = t_0; else tmp = ((x - y_m) / hypot(x, y_m)) * (1.0 + (x / y_m)); end tmp_2 = tmp; end
y_m = N[Abs[y], $MachinePrecision]
code[x_, y$95$m_] := Block[{t$95$0 = N[(N[(N[(x + y$95$m), $MachinePrecision] * N[(x - y$95$m), $MachinePrecision]), $MachinePrecision] / N[(N[(x * x), $MachinePrecision] + N[(y$95$m * y$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, 2.0], t$95$0, N[(N[(N[(x - y$95$m), $MachinePrecision] / N[Sqrt[x ^ 2 + y$95$m ^ 2], $MachinePrecision]), $MachinePrecision] * N[(1.0 + N[(x / y$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
y_m = \left|y\right|
\\
\begin{array}{l}
t_0 := \frac{\left(x + y_m\right) \cdot \left(x - y_m\right)}{x \cdot x + y_m \cdot y_m}\\
\mathbf{if}\;t_0 \leq 2:\\
\;\;\;\;t_0\\
\mathbf{else}:\\
\;\;\;\;\frac{x - y_m}{\mathsf{hypot}\left(x, y_m\right)} \cdot \left(1 + \frac{x}{y_m}\right)\\
\end{array}
\end{array}
if (/.f64 (*.f64 (-.f64 x y) (+.f64 x y)) (+.f64 (*.f64 x x) (*.f64 y y))) < 2Initial program 100.0%
if 2 < (/.f64 (*.f64 (-.f64 x y) (+.f64 x y)) (+.f64 (*.f64 x x) (*.f64 y y))) Initial program 0.0%
fma-def0.0%
add-sqr-sqrt0.0%
times-frac3.1%
fma-def3.1%
hypot-def3.1%
fma-def3.1%
hypot-def99.8%
Applied egg-rr99.8%
Taylor expanded in x around 0 15.3%
Final simplification76.5%
y_m = (fabs.f64 y)
(FPCore (x y_m)
:precision binary64
(let* ((t_0 (- 1.0 (/ y_m x))))
(if (<= y_m 1.75e-163)
(+ t_0 (* (/ y_m x) t_0))
(if (<= y_m 1.8e-11)
(/ (* (+ x y_m) (- x y_m)) (+ (* x x) (* y_m y_m)))
-1.0))))y_m = fabs(y);
double code(double x, double y_m) {
double t_0 = 1.0 - (y_m / x);
double tmp;
if (y_m <= 1.75e-163) {
tmp = t_0 + ((y_m / x) * t_0);
} else if (y_m <= 1.8e-11) {
tmp = ((x + y_m) * (x - y_m)) / ((x * x) + (y_m * y_m));
} else {
tmp = -1.0;
}
return tmp;
}
y_m = abs(y)
real(8) function code(x, y_m)
real(8), intent (in) :: x
real(8), intent (in) :: y_m
real(8) :: t_0
real(8) :: tmp
t_0 = 1.0d0 - (y_m / x)
if (y_m <= 1.75d-163) then
tmp = t_0 + ((y_m / x) * t_0)
else if (y_m <= 1.8d-11) then
tmp = ((x + y_m) * (x - y_m)) / ((x * x) + (y_m * y_m))
else
tmp = -1.0d0
end if
code = tmp
end function
y_m = Math.abs(y);
public static double code(double x, double y_m) {
double t_0 = 1.0 - (y_m / x);
double tmp;
if (y_m <= 1.75e-163) {
tmp = t_0 + ((y_m / x) * t_0);
} else if (y_m <= 1.8e-11) {
tmp = ((x + y_m) * (x - y_m)) / ((x * x) + (y_m * y_m));
} else {
tmp = -1.0;
}
return tmp;
}
y_m = math.fabs(y) def code(x, y_m): t_0 = 1.0 - (y_m / x) tmp = 0 if y_m <= 1.75e-163: tmp = t_0 + ((y_m / x) * t_0) elif y_m <= 1.8e-11: tmp = ((x + y_m) * (x - y_m)) / ((x * x) + (y_m * y_m)) else: tmp = -1.0 return tmp
y_m = abs(y) function code(x, y_m) t_0 = Float64(1.0 - Float64(y_m / x)) tmp = 0.0 if (y_m <= 1.75e-163) tmp = Float64(t_0 + Float64(Float64(y_m / x) * t_0)); elseif (y_m <= 1.8e-11) tmp = Float64(Float64(Float64(x + y_m) * Float64(x - y_m)) / Float64(Float64(x * x) + Float64(y_m * y_m))); else tmp = -1.0; end return tmp end
y_m = abs(y); function tmp_2 = code(x, y_m) t_0 = 1.0 - (y_m / x); tmp = 0.0; if (y_m <= 1.75e-163) tmp = t_0 + ((y_m / x) * t_0); elseif (y_m <= 1.8e-11) tmp = ((x + y_m) * (x - y_m)) / ((x * x) + (y_m * y_m)); else tmp = -1.0; end tmp_2 = tmp; end
y_m = N[Abs[y], $MachinePrecision]
code[x_, y$95$m_] := Block[{t$95$0 = N[(1.0 - N[(y$95$m / x), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[y$95$m, 1.75e-163], N[(t$95$0 + N[(N[(y$95$m / x), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision], If[LessEqual[y$95$m, 1.8e-11], N[(N[(N[(x + y$95$m), $MachinePrecision] * N[(x - y$95$m), $MachinePrecision]), $MachinePrecision] / N[(N[(x * x), $MachinePrecision] + N[(y$95$m * y$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], -1.0]]]
\begin{array}{l}
y_m = \left|y\right|
\\
\begin{array}{l}
t_0 := 1 - \frac{y_m}{x}\\
\mathbf{if}\;y_m \leq 1.75 \cdot 10^{-163}:\\
\;\;\;\;t_0 + \frac{y_m}{x} \cdot t_0\\
\mathbf{elif}\;y_m \leq 1.8 \cdot 10^{-11}:\\
\;\;\;\;\frac{\left(x + y_m\right) \cdot \left(x - y_m\right)}{x \cdot x + y_m \cdot y_m}\\
\mathbf{else}:\\
\;\;\;\;-1\\
\end{array}
\end{array}
if y < 1.75000000000000014e-163Initial program 66.5%
+-commutative66.5%
associate-*r/66.1%
+-commutative66.1%
fma-def66.1%
Simplified66.1%
*-un-lft-identity66.1%
add-sqr-sqrt66.1%
times-frac66.2%
fma-def66.2%
hypot-def66.2%
fma-def66.2%
hypot-def99.7%
Applied egg-rr99.7%
Taylor expanded in x around inf 35.3%
Taylor expanded in x around inf 34.6%
associate-*r*34.7%
div-inv34.7%
distribute-rgt-in34.2%
*-un-lft-identity34.2%
div-sub34.2%
*-inverses34.2%
div-sub34.2%
*-inverses34.2%
Applied egg-rr34.2%
if 1.75000000000000014e-163 < y < 1.79999999999999992e-11Initial program 99.9%
if 1.79999999999999992e-11 < y Initial program 100.0%
+-commutative100.0%
associate-*r/100.0%
+-commutative100.0%
fma-def100.0%
Simplified100.0%
Taylor expanded in x around 0 100.0%
Final simplification45.5%
y_m = (fabs.f64 y) (FPCore (x y_m) :precision binary64 (let* ((t_0 (- 1.0 (/ y_m x)))) (if (<= y_m 2.1e-153) (+ t_0 (* (/ y_m x) t_0)) -1.0)))
y_m = fabs(y);
double code(double x, double y_m) {
double t_0 = 1.0 - (y_m / x);
double tmp;
if (y_m <= 2.1e-153) {
tmp = t_0 + ((y_m / x) * t_0);
} else {
tmp = -1.0;
}
return tmp;
}
y_m = abs(y)
real(8) function code(x, y_m)
real(8), intent (in) :: x
real(8), intent (in) :: y_m
real(8) :: t_0
real(8) :: tmp
t_0 = 1.0d0 - (y_m / x)
if (y_m <= 2.1d-153) then
tmp = t_0 + ((y_m / x) * t_0)
else
tmp = -1.0d0
end if
code = tmp
end function
y_m = Math.abs(y);
public static double code(double x, double y_m) {
double t_0 = 1.0 - (y_m / x);
double tmp;
if (y_m <= 2.1e-153) {
tmp = t_0 + ((y_m / x) * t_0);
} else {
tmp = -1.0;
}
return tmp;
}
y_m = math.fabs(y) def code(x, y_m): t_0 = 1.0 - (y_m / x) tmp = 0 if y_m <= 2.1e-153: tmp = t_0 + ((y_m / x) * t_0) else: tmp = -1.0 return tmp
y_m = abs(y) function code(x, y_m) t_0 = Float64(1.0 - Float64(y_m / x)) tmp = 0.0 if (y_m <= 2.1e-153) tmp = Float64(t_0 + Float64(Float64(y_m / x) * t_0)); else tmp = -1.0; end return tmp end
y_m = abs(y); function tmp_2 = code(x, y_m) t_0 = 1.0 - (y_m / x); tmp = 0.0; if (y_m <= 2.1e-153) tmp = t_0 + ((y_m / x) * t_0); else tmp = -1.0; end tmp_2 = tmp; end
y_m = N[Abs[y], $MachinePrecision]
code[x_, y$95$m_] := Block[{t$95$0 = N[(1.0 - N[(y$95$m / x), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[y$95$m, 2.1e-153], N[(t$95$0 + N[(N[(y$95$m / x), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision], -1.0]]
\begin{array}{l}
y_m = \left|y\right|
\\
\begin{array}{l}
t_0 := 1 - \frac{y_m}{x}\\
\mathbf{if}\;y_m \leq 2.1 \cdot 10^{-153}:\\
\;\;\;\;t_0 + \frac{y_m}{x} \cdot t_0\\
\mathbf{else}:\\
\;\;\;\;-1\\
\end{array}
\end{array}
if y < 2.10000000000000004e-153Initial program 66.7%
+-commutative66.7%
associate-*r/66.2%
+-commutative66.2%
fma-def66.2%
Simplified66.2%
*-un-lft-identity66.2%
add-sqr-sqrt66.2%
times-frac66.3%
fma-def66.3%
hypot-def66.4%
fma-def66.4%
hypot-def99.7%
Applied egg-rr99.7%
Taylor expanded in x around inf 35.6%
Taylor expanded in x around inf 34.9%
associate-*r*35.0%
div-inv35.1%
distribute-rgt-in34.5%
*-un-lft-identity34.5%
div-sub34.5%
*-inverses34.5%
div-sub34.5%
*-inverses34.5%
Applied egg-rr34.5%
if 2.10000000000000004e-153 < y Initial program 99.9%
+-commutative99.9%
associate-*r/99.5%
+-commutative99.5%
fma-def99.5%
Simplified99.5%
Taylor expanded in x around 0 71.5%
Final simplification40.7%
y_m = (fabs.f64 y) (FPCore (x y_m) :precision binary64 (if (<= y_m 1.1e-156) (* (- x y_m) (* (+ 1.0 (/ y_m x)) (/ 1.0 x))) -1.0))
y_m = fabs(y);
double code(double x, double y_m) {
double tmp;
if (y_m <= 1.1e-156) {
tmp = (x - y_m) * ((1.0 + (y_m / x)) * (1.0 / x));
} else {
tmp = -1.0;
}
return tmp;
}
y_m = abs(y)
real(8) function code(x, y_m)
real(8), intent (in) :: x
real(8), intent (in) :: y_m
real(8) :: tmp
if (y_m <= 1.1d-156) then
tmp = (x - y_m) * ((1.0d0 + (y_m / x)) * (1.0d0 / x))
else
tmp = -1.0d0
end if
code = tmp
end function
y_m = Math.abs(y);
public static double code(double x, double y_m) {
double tmp;
if (y_m <= 1.1e-156) {
tmp = (x - y_m) * ((1.0 + (y_m / x)) * (1.0 / x));
} else {
tmp = -1.0;
}
return tmp;
}
y_m = math.fabs(y) def code(x, y_m): tmp = 0 if y_m <= 1.1e-156: tmp = (x - y_m) * ((1.0 + (y_m / x)) * (1.0 / x)) else: tmp = -1.0 return tmp
y_m = abs(y) function code(x, y_m) tmp = 0.0 if (y_m <= 1.1e-156) tmp = Float64(Float64(x - y_m) * Float64(Float64(1.0 + Float64(y_m / x)) * Float64(1.0 / x))); else tmp = -1.0; end return tmp end
y_m = abs(y); function tmp_2 = code(x, y_m) tmp = 0.0; if (y_m <= 1.1e-156) tmp = (x - y_m) * ((1.0 + (y_m / x)) * (1.0 / x)); else tmp = -1.0; end tmp_2 = tmp; end
y_m = N[Abs[y], $MachinePrecision] code[x_, y$95$m_] := If[LessEqual[y$95$m, 1.1e-156], N[(N[(x - y$95$m), $MachinePrecision] * N[(N[(1.0 + N[(y$95$m / x), $MachinePrecision]), $MachinePrecision] * N[(1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], -1.0]
\begin{array}{l}
y_m = \left|y\right|
\\
\begin{array}{l}
\mathbf{if}\;y_m \leq 1.1 \cdot 10^{-156}:\\
\;\;\;\;\left(x - y_m\right) \cdot \left(\left(1 + \frac{y_m}{x}\right) \cdot \frac{1}{x}\right)\\
\mathbf{else}:\\
\;\;\;\;-1\\
\end{array}
\end{array}
if y < 1.1e-156Initial program 66.5%
+-commutative66.5%
associate-*r/66.1%
+-commutative66.1%
fma-def66.1%
Simplified66.1%
*-un-lft-identity66.1%
add-sqr-sqrt66.1%
times-frac66.2%
fma-def66.2%
hypot-def66.2%
fma-def66.2%
hypot-def99.7%
Applied egg-rr99.7%
Taylor expanded in x around inf 35.3%
Taylor expanded in x around inf 34.6%
if 1.1e-156 < y Initial program 99.9%
+-commutative99.9%
associate-*r/99.5%
+-commutative99.5%
fma-def99.5%
Simplified99.5%
Taylor expanded in x around 0 69.9%
Final simplification40.7%
y_m = (fabs.f64 y) (FPCore (x y_m) :precision binary64 (if (<= y_m 1.85e-156) (* (- x y_m) (/ 1.0 (/ x (+ 1.0 (/ y_m x))))) -1.0))
y_m = fabs(y);
double code(double x, double y_m) {
double tmp;
if (y_m <= 1.85e-156) {
tmp = (x - y_m) * (1.0 / (x / (1.0 + (y_m / x))));
} else {
tmp = -1.0;
}
return tmp;
}
y_m = abs(y)
real(8) function code(x, y_m)
real(8), intent (in) :: x
real(8), intent (in) :: y_m
real(8) :: tmp
if (y_m <= 1.85d-156) then
tmp = (x - y_m) * (1.0d0 / (x / (1.0d0 + (y_m / x))))
else
tmp = -1.0d0
end if
code = tmp
end function
y_m = Math.abs(y);
public static double code(double x, double y_m) {
double tmp;
if (y_m <= 1.85e-156) {
tmp = (x - y_m) * (1.0 / (x / (1.0 + (y_m / x))));
} else {
tmp = -1.0;
}
return tmp;
}
y_m = math.fabs(y) def code(x, y_m): tmp = 0 if y_m <= 1.85e-156: tmp = (x - y_m) * (1.0 / (x / (1.0 + (y_m / x)))) else: tmp = -1.0 return tmp
y_m = abs(y) function code(x, y_m) tmp = 0.0 if (y_m <= 1.85e-156) tmp = Float64(Float64(x - y_m) * Float64(1.0 / Float64(x / Float64(1.0 + Float64(y_m / x))))); else tmp = -1.0; end return tmp end
y_m = abs(y); function tmp_2 = code(x, y_m) tmp = 0.0; if (y_m <= 1.85e-156) tmp = (x - y_m) * (1.0 / (x / (1.0 + (y_m / x)))); else tmp = -1.0; end tmp_2 = tmp; end
y_m = N[Abs[y], $MachinePrecision] code[x_, y$95$m_] := If[LessEqual[y$95$m, 1.85e-156], N[(N[(x - y$95$m), $MachinePrecision] * N[(1.0 / N[(x / N[(1.0 + N[(y$95$m / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], -1.0]
\begin{array}{l}
y_m = \left|y\right|
\\
\begin{array}{l}
\mathbf{if}\;y_m \leq 1.85 \cdot 10^{-156}:\\
\;\;\;\;\left(x - y_m\right) \cdot \frac{1}{\frac{x}{1 + \frac{y_m}{x}}}\\
\mathbf{else}:\\
\;\;\;\;-1\\
\end{array}
\end{array}
if y < 1.85e-156Initial program 66.5%
+-commutative66.5%
associate-*r/66.1%
+-commutative66.1%
fma-def66.1%
Simplified66.1%
*-un-lft-identity66.1%
add-sqr-sqrt66.1%
times-frac66.2%
fma-def66.2%
hypot-def66.2%
fma-def66.2%
hypot-def99.7%
Applied egg-rr99.7%
Taylor expanded in x around inf 35.3%
Taylor expanded in x around inf 34.6%
associate-*l/34.6%
*-un-lft-identity34.6%
clear-num34.6%
Applied egg-rr34.6%
if 1.85e-156 < y Initial program 99.9%
+-commutative99.9%
associate-*r/99.5%
+-commutative99.5%
fma-def99.5%
Simplified99.5%
Taylor expanded in x around 0 69.9%
Final simplification40.7%
y_m = (fabs.f64 y) (FPCore (x y_m) :precision binary64 (if (<= y_m 2.25e-156) (* (- x y_m) (/ (+ 1.0 (/ y_m x)) x)) -1.0))
y_m = fabs(y);
double code(double x, double y_m) {
double tmp;
if (y_m <= 2.25e-156) {
tmp = (x - y_m) * ((1.0 + (y_m / x)) / x);
} else {
tmp = -1.0;
}
return tmp;
}
y_m = abs(y)
real(8) function code(x, y_m)
real(8), intent (in) :: x
real(8), intent (in) :: y_m
real(8) :: tmp
if (y_m <= 2.25d-156) then
tmp = (x - y_m) * ((1.0d0 + (y_m / x)) / x)
else
tmp = -1.0d0
end if
code = tmp
end function
y_m = Math.abs(y);
public static double code(double x, double y_m) {
double tmp;
if (y_m <= 2.25e-156) {
tmp = (x - y_m) * ((1.0 + (y_m / x)) / x);
} else {
tmp = -1.0;
}
return tmp;
}
y_m = math.fabs(y) def code(x, y_m): tmp = 0 if y_m <= 2.25e-156: tmp = (x - y_m) * ((1.0 + (y_m / x)) / x) else: tmp = -1.0 return tmp
y_m = abs(y) function code(x, y_m) tmp = 0.0 if (y_m <= 2.25e-156) tmp = Float64(Float64(x - y_m) * Float64(Float64(1.0 + Float64(y_m / x)) / x)); else tmp = -1.0; end return tmp end
y_m = abs(y); function tmp_2 = code(x, y_m) tmp = 0.0; if (y_m <= 2.25e-156) tmp = (x - y_m) * ((1.0 + (y_m / x)) / x); else tmp = -1.0; end tmp_2 = tmp; end
y_m = N[Abs[y], $MachinePrecision] code[x_, y$95$m_] := If[LessEqual[y$95$m, 2.25e-156], N[(N[(x - y$95$m), $MachinePrecision] * N[(N[(1.0 + N[(y$95$m / x), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision], -1.0]
\begin{array}{l}
y_m = \left|y\right|
\\
\begin{array}{l}
\mathbf{if}\;y_m \leq 2.25 \cdot 10^{-156}:\\
\;\;\;\;\left(x - y_m\right) \cdot \frac{1 + \frac{y_m}{x}}{x}\\
\mathbf{else}:\\
\;\;\;\;-1\\
\end{array}
\end{array}
if y < 2.24999999999999993e-156Initial program 66.5%
+-commutative66.5%
associate-*r/66.1%
+-commutative66.1%
fma-def66.1%
Simplified66.1%
*-un-lft-identity66.1%
add-sqr-sqrt66.1%
times-frac66.2%
fma-def66.2%
hypot-def66.2%
fma-def66.2%
hypot-def99.7%
Applied egg-rr99.7%
Taylor expanded in x around inf 35.3%
Taylor expanded in x around inf 34.6%
*-commutative34.6%
sub-neg34.6%
distribute-lft-in34.5%
associate-*l/34.5%
*-un-lft-identity34.5%
associate-*l/34.5%
*-un-lft-identity34.5%
Applied egg-rr34.5%
distribute-lft-out34.6%
sub-neg34.6%
Simplified34.6%
if 2.24999999999999993e-156 < y Initial program 99.9%
+-commutative99.9%
associate-*r/99.5%
+-commutative99.5%
fma-def99.5%
Simplified99.5%
Taylor expanded in x around 0 69.9%
Final simplification40.7%
y_m = (fabs.f64 y) (FPCore (x y_m) :precision binary64 (if (<= y_m 1.05e-153) 1.0 -1.0))
y_m = fabs(y);
double code(double x, double y_m) {
double tmp;
if (y_m <= 1.05e-153) {
tmp = 1.0;
} else {
tmp = -1.0;
}
return tmp;
}
y_m = abs(y)
real(8) function code(x, y_m)
real(8), intent (in) :: x
real(8), intent (in) :: y_m
real(8) :: tmp
if (y_m <= 1.05d-153) then
tmp = 1.0d0
else
tmp = -1.0d0
end if
code = tmp
end function
y_m = Math.abs(y);
public static double code(double x, double y_m) {
double tmp;
if (y_m <= 1.05e-153) {
tmp = 1.0;
} else {
tmp = -1.0;
}
return tmp;
}
y_m = math.fabs(y) def code(x, y_m): tmp = 0 if y_m <= 1.05e-153: tmp = 1.0 else: tmp = -1.0 return tmp
y_m = abs(y) function code(x, y_m) tmp = 0.0 if (y_m <= 1.05e-153) tmp = 1.0; else tmp = -1.0; end return tmp end
y_m = abs(y); function tmp_2 = code(x, y_m) tmp = 0.0; if (y_m <= 1.05e-153) tmp = 1.0; else tmp = -1.0; end tmp_2 = tmp; end
y_m = N[Abs[y], $MachinePrecision] code[x_, y$95$m_] := If[LessEqual[y$95$m, 1.05e-153], 1.0, -1.0]
\begin{array}{l}
y_m = \left|y\right|
\\
\begin{array}{l}
\mathbf{if}\;y_m \leq 1.05 \cdot 10^{-153}:\\
\;\;\;\;1\\
\mathbf{else}:\\
\;\;\;\;-1\\
\end{array}
\end{array}
if y < 1.05000000000000002e-153Initial program 66.7%
+-commutative66.7%
associate-*r/66.2%
+-commutative66.2%
fma-def66.2%
Simplified66.2%
Taylor expanded in x around inf 33.3%
if 1.05000000000000002e-153 < y Initial program 99.9%
+-commutative99.9%
associate-*r/99.5%
+-commutative99.5%
fma-def99.5%
Simplified99.5%
Taylor expanded in x around 0 71.5%
Final simplification39.7%
y_m = (fabs.f64 y) (FPCore (x y_m) :precision binary64 -1.0)
y_m = fabs(y);
double code(double x, double y_m) {
return -1.0;
}
y_m = abs(y)
real(8) function code(x, y_m)
real(8), intent (in) :: x
real(8), intent (in) :: y_m
code = -1.0d0
end function
y_m = Math.abs(y);
public static double code(double x, double y_m) {
return -1.0;
}
y_m = math.fabs(y) def code(x, y_m): return -1.0
y_m = abs(y) function code(x, y_m) return -1.0 end
y_m = abs(y); function tmp = code(x, y_m) tmp = -1.0; end
y_m = N[Abs[y], $MachinePrecision] code[x_, y$95$m_] := -1.0
\begin{array}{l}
y_m = \left|y\right|
\\
-1
\end{array}
Initial program 72.2%
+-commutative72.2%
associate-*r/71.8%
+-commutative71.8%
fma-def71.8%
Simplified71.8%
Taylor expanded in x around 0 67.5%
Final simplification67.5%
(FPCore (x y)
:precision binary64
(let* ((t_0 (fabs (/ x y))))
(if (and (< 0.5 t_0) (< t_0 2.0))
(/ (* (- x y) (+ x y)) (+ (* x x) (* y y)))
(- 1.0 (/ 2.0 (+ 1.0 (* (/ x y) (/ x y))))))))
double code(double x, double y) {
double t_0 = fabs((x / y));
double tmp;
if ((0.5 < t_0) && (t_0 < 2.0)) {
tmp = ((x - y) * (x + y)) / ((x * x) + (y * y));
} else {
tmp = 1.0 - (2.0 / (1.0 + ((x / y) * (x / y))));
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: t_0
real(8) :: tmp
t_0 = abs((x / y))
if ((0.5d0 < t_0) .and. (t_0 < 2.0d0)) then
tmp = ((x - y) * (x + y)) / ((x * x) + (y * y))
else
tmp = 1.0d0 - (2.0d0 / (1.0d0 + ((x / y) * (x / y))))
end if
code = tmp
end function
public static double code(double x, double y) {
double t_0 = Math.abs((x / y));
double tmp;
if ((0.5 < t_0) && (t_0 < 2.0)) {
tmp = ((x - y) * (x + y)) / ((x * x) + (y * y));
} else {
tmp = 1.0 - (2.0 / (1.0 + ((x / y) * (x / y))));
}
return tmp;
}
def code(x, y): t_0 = math.fabs((x / y)) tmp = 0 if (0.5 < t_0) and (t_0 < 2.0): tmp = ((x - y) * (x + y)) / ((x * x) + (y * y)) else: tmp = 1.0 - (2.0 / (1.0 + ((x / y) * (x / y)))) return tmp
function code(x, y) t_0 = abs(Float64(x / y)) tmp = 0.0 if ((0.5 < t_0) && (t_0 < 2.0)) tmp = Float64(Float64(Float64(x - y) * Float64(x + y)) / Float64(Float64(x * x) + Float64(y * y))); else tmp = Float64(1.0 - Float64(2.0 / Float64(1.0 + Float64(Float64(x / y) * Float64(x / y))))); end return tmp end
function tmp_2 = code(x, y) t_0 = abs((x / y)); tmp = 0.0; if ((0.5 < t_0) && (t_0 < 2.0)) tmp = ((x - y) * (x + y)) / ((x * x) + (y * y)); else tmp = 1.0 - (2.0 / (1.0 + ((x / y) * (x / y)))); end tmp_2 = tmp; end
code[x_, y_] := Block[{t$95$0 = N[Abs[N[(x / y), $MachinePrecision]], $MachinePrecision]}, If[And[Less[0.5, t$95$0], Less[t$95$0, 2.0]], N[(N[(N[(x - y), $MachinePrecision] * N[(x + y), $MachinePrecision]), $MachinePrecision] / N[(N[(x * x), $MachinePrecision] + N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(1.0 - N[(2.0 / N[(1.0 + N[(N[(x / y), $MachinePrecision] * N[(x / y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left|\frac{x}{y}\right|\\
\mathbf{if}\;0.5 < t_0 \land t_0 < 2:\\
\;\;\;\;\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y}\\
\mathbf{else}:\\
\;\;\;\;1 - \frac{2}{1 + \frac{x}{y} \cdot \frac{x}{y}}\\
\end{array}
\end{array}
herbie shell --seed 2024017
(FPCore (x y)
:name "Kahan p9 Example"
:precision binary64
:pre (and (and (< 0.0 x) (< x 1.0)) (< y 1.0))
:herbie-target
(if (and (< 0.5 (fabs (/ x y))) (< (fabs (/ x y)) 2.0)) (/ (* (- x y) (+ x y)) (+ (* x x) (* y y))) (- 1.0 (/ 2.0 (+ 1.0 (* (/ x y) (/ x y))))))
(/ (* (- x y) (+ x y)) (+ (* x x) (* y y))))