Kahan p9 Example

Percentage Accurate: 68.5% → 92.1%
Time: 3.5s
Alternatives: 6
Speedup: N/A×

Specification

?
\[\left(0 < x \land x < 1\right) \land y < 1\]
\[\begin{array}{l} \\ \frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y} \end{array} \]
(FPCore (x y) :precision binary64 (/ (* (- x y) (+ x y)) (+ (* x x) (* y y))))
double code(double x, double y) {
	return ((x - y) * (x + y)) / ((x * x) + (y * y));
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(x, y)
use fmin_fmax_functions
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = ((x - y) * (x + y)) / ((x * x) + (y * y))
end function
public static double code(double x, double y) {
	return ((x - y) * (x + y)) / ((x * x) + (y * y));
}
def code(x, y):
	return ((x - y) * (x + y)) / ((x * x) + (y * y))
function code(x, y)
	return Float64(Float64(Float64(x - y) * Float64(x + y)) / Float64(Float64(x * x) + Float64(y * y)))
end
function tmp = code(x, y)
	tmp = ((x - y) * (x + y)) / ((x * x) + (y * y));
end
code[x_, y_] := N[(N[(N[(x - y), $MachinePrecision] * N[(x + y), $MachinePrecision]), $MachinePrecision] / N[(N[(x * x), $MachinePrecision] + N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 6 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 68.5% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y} \end{array} \]
(FPCore (x y) :precision binary64 (/ (* (- x y) (+ x y)) (+ (* x x) (* y y))))
double code(double x, double y) {
	return ((x - y) * (x + y)) / ((x * x) + (y * y));
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(x, y)
use fmin_fmax_functions
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = ((x - y) * (x + y)) / ((x * x) + (y * y))
end function
public static double code(double x, double y) {
	return ((x - y) * (x + y)) / ((x * x) + (y * y));
}
def code(x, y):
	return ((x - y) * (x + y)) / ((x * x) + (y * y))
function code(x, y)
	return Float64(Float64(Float64(x - y) * Float64(x + y)) / Float64(Float64(x * x) + Float64(y * y)))
end
function tmp = code(x, y)
	tmp = ((x - y) * (x + y)) / ((x * x) + (y * y));
end
code[x_, y_] := N[(N[(N[(x - y), $MachinePrecision] * N[(x + y), $MachinePrecision]), $MachinePrecision] / N[(N[(x * x), $MachinePrecision] + N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y}
\end{array}

Alternative 1: 92.1% accurate, N/A× speedup?

\[\begin{array}{l} y_m = \left|y\right| \\ \begin{array}{l} t_0 := \log x \cdot 2\\ \mathbf{if}\;y\_m \leq 2.25 \cdot 10^{-181}:\\ \;\;\;\;\mathsf{fma}\left(e^{\frac{{\left(\log y\_m \cdot 2\right)}^{2} - {t\_0}^{2}}{\mathsf{fma}\left(-1 \cdot \log y\_m, -2, t\_0\right)}}, -2, 1\right)\\ \mathbf{elif}\;y\_m \leq 1.08 \cdot 10^{-44}:\\ \;\;\;\;\frac{\left(x - y\_m\right) \cdot \left(x + y\_m\right)}{x \cdot x + y\_m \cdot y\_m}\\ \mathbf{else}:\\ \;\;\;\;-1 \cdot \left(0 \cdot \frac{x}{y\_m} + 1\right)\\ \end{array} \end{array} \]
y_m = (fabs.f64 y)
(FPCore (x y_m)
 :precision binary64
 (let* ((t_0 (* (log x) 2.0)))
   (if (<= y_m 2.25e-181)
     (fma
      (exp
       (/
        (- (pow (* (log y_m) 2.0) 2.0) (pow t_0 2.0))
        (fma (* -1.0 (log y_m)) -2.0 t_0)))
      -2.0
      1.0)
     (if (<= y_m 1.08e-44)
       (/ (* (- x y_m) (+ x y_m)) (+ (* x x) (* y_m y_m)))
       (* -1.0 (+ (* 0.0 (/ x y_m)) 1.0))))))
y_m = fabs(y);
double code(double x, double y_m) {
	double t_0 = log(x) * 2.0;
	double tmp;
	if (y_m <= 2.25e-181) {
		tmp = fma(exp(((pow((log(y_m) * 2.0), 2.0) - pow(t_0, 2.0)) / fma((-1.0 * log(y_m)), -2.0, t_0))), -2.0, 1.0);
	} else if (y_m <= 1.08e-44) {
		tmp = ((x - y_m) * (x + y_m)) / ((x * x) + (y_m * y_m));
	} else {
		tmp = -1.0 * ((0.0 * (x / y_m)) + 1.0);
	}
	return tmp;
}
y_m = abs(y)
function code(x, y_m)
	t_0 = Float64(log(x) * 2.0)
	tmp = 0.0
	if (y_m <= 2.25e-181)
		tmp = fma(exp(Float64(Float64((Float64(log(y_m) * 2.0) ^ 2.0) - (t_0 ^ 2.0)) / fma(Float64(-1.0 * log(y_m)), -2.0, t_0))), -2.0, 1.0);
	elseif (y_m <= 1.08e-44)
		tmp = Float64(Float64(Float64(x - y_m) * Float64(x + y_m)) / Float64(Float64(x * x) + Float64(y_m * y_m)));
	else
		tmp = Float64(-1.0 * Float64(Float64(0.0 * Float64(x / y_m)) + 1.0));
	end
	return tmp
end
y_m = N[Abs[y], $MachinePrecision]
code[x_, y$95$m_] := Block[{t$95$0 = N[(N[Log[x], $MachinePrecision] * 2.0), $MachinePrecision]}, If[LessEqual[y$95$m, 2.25e-181], N[(N[Exp[N[(N[(N[Power[N[(N[Log[y$95$m], $MachinePrecision] * 2.0), $MachinePrecision], 2.0], $MachinePrecision] - N[Power[t$95$0, 2.0], $MachinePrecision]), $MachinePrecision] / N[(N[(-1.0 * N[Log[y$95$m], $MachinePrecision]), $MachinePrecision] * -2.0 + t$95$0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * -2.0 + 1.0), $MachinePrecision], If[LessEqual[y$95$m, 1.08e-44], N[(N[(N[(x - y$95$m), $MachinePrecision] * N[(x + y$95$m), $MachinePrecision]), $MachinePrecision] / N[(N[(x * x), $MachinePrecision] + N[(y$95$m * y$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(-1.0 * N[(N[(0.0 * N[(x / y$95$m), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
y_m = \left|y\right|

\\
\begin{array}{l}
t_0 := \log x \cdot 2\\
\mathbf{if}\;y\_m \leq 2.25 \cdot 10^{-181}:\\
\;\;\;\;\mathsf{fma}\left(e^{\frac{{\left(\log y\_m \cdot 2\right)}^{2} - {t\_0}^{2}}{\mathsf{fma}\left(-1 \cdot \log y\_m, -2, t\_0\right)}}, -2, 1\right)\\

\mathbf{elif}\;y\_m \leq 1.08 \cdot 10^{-44}:\\
\;\;\;\;\frac{\left(x - y\_m\right) \cdot \left(x + y\_m\right)}{x \cdot x + y\_m \cdot y\_m}\\

\mathbf{else}:\\
\;\;\;\;-1 \cdot \left(0 \cdot \frac{x}{y\_m} + 1\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if y < 2.2499999999999999e-181

    1. Initial program 66.0%

      \[\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y} \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0

      \[\leadsto \color{blue}{1 + -2 \cdot \frac{{y}^{2}}{{x}^{2}}} \]
    4. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto -2 \cdot \frac{{y}^{2}}{{x}^{2}} + \color{blue}{1} \]
      2. *-commutativeN/A

        \[\leadsto \frac{{y}^{2}}{{x}^{2}} \cdot -2 + 1 \]
      3. lower-fma.f64N/A

        \[\leadsto \mathsf{fma}\left(\frac{{y}^{2}}{{x}^{2}}, \color{blue}{-2}, 1\right) \]
      4. pow2N/A

        \[\leadsto \mathsf{fma}\left(\frac{y \cdot y}{{x}^{2}}, -2, 1\right) \]
      5. pow2N/A

        \[\leadsto \mathsf{fma}\left(\frac{y \cdot y}{x \cdot x}, -2, 1\right) \]
      6. frac-timesN/A

        \[\leadsto \mathsf{fma}\left(\frac{y}{x} \cdot \frac{y}{x}, -2, 1\right) \]
      7. sqr-neg-revN/A

        \[\leadsto \mathsf{fma}\left(\left(\mathsf{neg}\left(\frac{y}{x}\right)\right) \cdot \left(\mathsf{neg}\left(\frac{y}{x}\right)\right), -2, 1\right) \]
      8. mul-1-negN/A

        \[\leadsto \mathsf{fma}\left(\left(-1 \cdot \frac{y}{x}\right) \cdot \left(\mathsf{neg}\left(\frac{y}{x}\right)\right), -2, 1\right) \]
      9. mul-1-negN/A

        \[\leadsto \mathsf{fma}\left(\left(-1 \cdot \frac{y}{x}\right) \cdot \left(-1 \cdot \frac{y}{x}\right), -2, 1\right) \]
      10. pow2N/A

        \[\leadsto \mathsf{fma}\left({\left(-1 \cdot \frac{y}{x}\right)}^{2}, -2, 1\right) \]
      11. lower-pow.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(-1 \cdot \frac{y}{x}\right)}^{2}, -2, 1\right) \]
      12. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      13. lower-*.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      14. lower-/.f6439.9

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
    5. Applied rewrites39.9%

      \[\leadsto \color{blue}{\mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right)} \]
    6. Step-by-step derivation
      1. lift-pow.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      2. lift-*.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      3. lift-/.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      4. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left({\left(-1 \cdot \frac{y}{x}\right)}^{2}, -2, 1\right) \]
      5. pow2N/A

        \[\leadsto \mathsf{fma}\left(\left(-1 \cdot \frac{y}{x}\right) \cdot \left(-1 \cdot \frac{y}{x}\right), -2, 1\right) \]
      6. mul-1-negN/A

        \[\leadsto \mathsf{fma}\left(\left(\mathsf{neg}\left(\frac{y}{x}\right)\right) \cdot \left(-1 \cdot \frac{y}{x}\right), -2, 1\right) \]
      7. mul-1-negN/A

        \[\leadsto \mathsf{fma}\left(\left(\mathsf{neg}\left(\frac{y}{x}\right)\right) \cdot \left(\mathsf{neg}\left(\frac{y}{x}\right)\right), -2, 1\right) \]
      8. sqr-neg-revN/A

        \[\leadsto \mathsf{fma}\left(\frac{y}{x} \cdot \frac{y}{x}, -2, 1\right) \]
      9. frac-timesN/A

        \[\leadsto \mathsf{fma}\left(\frac{y \cdot y}{x \cdot x}, -2, 1\right) \]
      10. pow2N/A

        \[\leadsto \mathsf{fma}\left(\frac{{y}^{2}}{x \cdot x}, -2, 1\right) \]
      11. pow-to-expN/A

        \[\leadsto \mathsf{fma}\left(\frac{e^{\log y \cdot 2}}{x \cdot x}, -2, 1\right) \]
      12. pow2N/A

        \[\leadsto \mathsf{fma}\left(\frac{e^{\log y \cdot 2}}{{x}^{2}}, -2, 1\right) \]
      13. pow-to-expN/A

        \[\leadsto \mathsf{fma}\left(\frac{e^{\log y \cdot 2}}{e^{\log x \cdot 2}}, -2, 1\right) \]
      14. div-expN/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      15. lower-exp.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      16. lower--.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      17. lower-*.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      18. lower-log.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      19. lower-*.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      20. lower-log.f6415.3

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
    7. Applied rewrites15.3%

      \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
    8. Step-by-step derivation
      1. lift--.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      2. lift-*.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      3. lift-log.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      4. lift-*.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      5. lift-log.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      6. distribute-rgt-out--N/A

        \[\leadsto \mathsf{fma}\left(e^{2 \cdot \left(\log y - \log x\right)}, -2, 1\right) \]
      7. distribute-lft-out--N/A

        \[\leadsto \mathsf{fma}\left(e^{2 \cdot \log y - 2 \cdot \log x}, -2, 1\right) \]
      8. flip--N/A

        \[\leadsto \mathsf{fma}\left(e^{\frac{\left(2 \cdot \log y\right) \cdot \left(2 \cdot \log y\right) - \left(2 \cdot \log x\right) \cdot \left(2 \cdot \log x\right)}{2 \cdot \log y + 2 \cdot \log x}}, -2, 1\right) \]
      9. lower-/.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\frac{\left(2 \cdot \log y\right) \cdot \left(2 \cdot \log y\right) - \left(2 \cdot \log x\right) \cdot \left(2 \cdot \log x\right)}{2 \cdot \log y + 2 \cdot \log x}}, -2, 1\right) \]
    9. Applied rewrites15.3%

      \[\leadsto \mathsf{fma}\left(e^{\frac{{\left(\log y \cdot 2\right)}^{2} - {\left(\log x \cdot 2\right)}^{2}}{\mathsf{fma}\left(-1 \cdot \log y, -2, \log x \cdot 2\right)}}, -2, 1\right) \]

    if 2.2499999999999999e-181 < y < 1.07999999999999994e-44

    1. Initial program 99.9%

      \[\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y} \]
    2. Add Preprocessing

    if 1.07999999999999994e-44 < y

    1. Initial program 100.0%

      \[\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y} \]
    2. Add Preprocessing
    3. Taylor expanded in y around -inf

      \[\leadsto \color{blue}{-1 \cdot \frac{x + -1 \cdot x}{y} - 1} \]
    4. Step-by-step derivation
      1. lower--.f64N/A

        \[\leadsto -1 \cdot \frac{x + -1 \cdot x}{y} - \color{blue}{1} \]
      2. *-commutativeN/A

        \[\leadsto \frac{x + -1 \cdot x}{y} \cdot -1 - 1 \]
      3. div-addN/A

        \[\leadsto \left(\frac{x}{y} + \frac{-1 \cdot x}{y}\right) \cdot -1 - 1 \]
      4. associate-*r/N/A

        \[\leadsto \left(\frac{x}{y} + -1 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
      5. +-commutativeN/A

        \[\leadsto \left(-1 \cdot \frac{x}{y} + \frac{x}{y}\right) \cdot -1 - 1 \]
      6. lower-*.f64N/A

        \[\leadsto \left(-1 \cdot \frac{x}{y} + \frac{x}{y}\right) \cdot -1 - 1 \]
      7. distribute-lft1-inN/A

        \[\leadsto \left(\left(-1 + 1\right) \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
      8. metadata-evalN/A

        \[\leadsto \left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
      9. lower-*.f64N/A

        \[\leadsto \left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
      10. lower-/.f64100.0

        \[\leadsto \left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
    5. Applied rewrites100.0%

      \[\leadsto \color{blue}{\left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification31.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq 2.25 \cdot 10^{-181}:\\ \;\;\;\;\mathsf{fma}\left(e^{\frac{{\left(\log y \cdot 2\right)}^{2} - {\left(\log x \cdot 2\right)}^{2}}{\mathsf{fma}\left(-1 \cdot \log y, -2, \log x \cdot 2\right)}}, -2, 1\right)\\ \mathbf{elif}\;y \leq 1.08 \cdot 10^{-44}:\\ \;\;\;\;\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y}\\ \mathbf{else}:\\ \;\;\;\;-1 \cdot \left(0 \cdot \frac{x}{y} + 1\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 2: 92.1% accurate, N/A× speedup?

\[\begin{array}{l} y_m = \left|y\right| \\ \begin{array}{l} t_0 := \log x \cdot 2\\ \mathbf{if}\;y\_m \leq 2.25 \cdot 10^{-181}:\\ \;\;\;\;\mathsf{fma}\left(e^{\frac{{\left(\log y\_m \cdot 2\right)}^{2} - {t\_0}^{2}}{\mathsf{fma}\left(-1 \cdot \log y\_m, -2, t\_0\right)}}, -2, 1\right)\\ \mathbf{elif}\;y\_m \leq 1.08 \cdot 10^{-44}:\\ \;\;\;\;\frac{\left(x - y\_m\right) \cdot \left(\mathsf{fma}\left(\frac{x}{y\_m}, 1, 1\right) \cdot y\_m\right)}{x \cdot x + y\_m \cdot y\_m}\\ \mathbf{else}:\\ \;\;\;\;-1 \cdot \left(0 \cdot \frac{x}{y\_m} + 1\right)\\ \end{array} \end{array} \]
y_m = (fabs.f64 y)
(FPCore (x y_m)
 :precision binary64
 (let* ((t_0 (* (log x) 2.0)))
   (if (<= y_m 2.25e-181)
     (fma
      (exp
       (/
        (- (pow (* (log y_m) 2.0) 2.0) (pow t_0 2.0))
        (fma (* -1.0 (log y_m)) -2.0 t_0)))
      -2.0
      1.0)
     (if (<= y_m 1.08e-44)
       (/
        (* (- x y_m) (* (fma (/ x y_m) 1.0 1.0) y_m))
        (+ (* x x) (* y_m y_m)))
       (* -1.0 (+ (* 0.0 (/ x y_m)) 1.0))))))
y_m = fabs(y);
double code(double x, double y_m) {
	double t_0 = log(x) * 2.0;
	double tmp;
	if (y_m <= 2.25e-181) {
		tmp = fma(exp(((pow((log(y_m) * 2.0), 2.0) - pow(t_0, 2.0)) / fma((-1.0 * log(y_m)), -2.0, t_0))), -2.0, 1.0);
	} else if (y_m <= 1.08e-44) {
		tmp = ((x - y_m) * (fma((x / y_m), 1.0, 1.0) * y_m)) / ((x * x) + (y_m * y_m));
	} else {
		tmp = -1.0 * ((0.0 * (x / y_m)) + 1.0);
	}
	return tmp;
}
y_m = abs(y)
function code(x, y_m)
	t_0 = Float64(log(x) * 2.0)
	tmp = 0.0
	if (y_m <= 2.25e-181)
		tmp = fma(exp(Float64(Float64((Float64(log(y_m) * 2.0) ^ 2.0) - (t_0 ^ 2.0)) / fma(Float64(-1.0 * log(y_m)), -2.0, t_0))), -2.0, 1.0);
	elseif (y_m <= 1.08e-44)
		tmp = Float64(Float64(Float64(x - y_m) * Float64(fma(Float64(x / y_m), 1.0, 1.0) * y_m)) / Float64(Float64(x * x) + Float64(y_m * y_m)));
	else
		tmp = Float64(-1.0 * Float64(Float64(0.0 * Float64(x / y_m)) + 1.0));
	end
	return tmp
end
y_m = N[Abs[y], $MachinePrecision]
code[x_, y$95$m_] := Block[{t$95$0 = N[(N[Log[x], $MachinePrecision] * 2.0), $MachinePrecision]}, If[LessEqual[y$95$m, 2.25e-181], N[(N[Exp[N[(N[(N[Power[N[(N[Log[y$95$m], $MachinePrecision] * 2.0), $MachinePrecision], 2.0], $MachinePrecision] - N[Power[t$95$0, 2.0], $MachinePrecision]), $MachinePrecision] / N[(N[(-1.0 * N[Log[y$95$m], $MachinePrecision]), $MachinePrecision] * -2.0 + t$95$0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * -2.0 + 1.0), $MachinePrecision], If[LessEqual[y$95$m, 1.08e-44], N[(N[(N[(x - y$95$m), $MachinePrecision] * N[(N[(N[(x / y$95$m), $MachinePrecision] * 1.0 + 1.0), $MachinePrecision] * y$95$m), $MachinePrecision]), $MachinePrecision] / N[(N[(x * x), $MachinePrecision] + N[(y$95$m * y$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(-1.0 * N[(N[(0.0 * N[(x / y$95$m), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
y_m = \left|y\right|

\\
\begin{array}{l}
t_0 := \log x \cdot 2\\
\mathbf{if}\;y\_m \leq 2.25 \cdot 10^{-181}:\\
\;\;\;\;\mathsf{fma}\left(e^{\frac{{\left(\log y\_m \cdot 2\right)}^{2} - {t\_0}^{2}}{\mathsf{fma}\left(-1 \cdot \log y\_m, -2, t\_0\right)}}, -2, 1\right)\\

\mathbf{elif}\;y\_m \leq 1.08 \cdot 10^{-44}:\\
\;\;\;\;\frac{\left(x - y\_m\right) \cdot \left(\mathsf{fma}\left(\frac{x}{y\_m}, 1, 1\right) \cdot y\_m\right)}{x \cdot x + y\_m \cdot y\_m}\\

\mathbf{else}:\\
\;\;\;\;-1 \cdot \left(0 \cdot \frac{x}{y\_m} + 1\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if y < 2.2499999999999999e-181

    1. Initial program 66.0%

      \[\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y} \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0

      \[\leadsto \color{blue}{1 + -2 \cdot \frac{{y}^{2}}{{x}^{2}}} \]
    4. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto -2 \cdot \frac{{y}^{2}}{{x}^{2}} + \color{blue}{1} \]
      2. *-commutativeN/A

        \[\leadsto \frac{{y}^{2}}{{x}^{2}} \cdot -2 + 1 \]
      3. lower-fma.f64N/A

        \[\leadsto \mathsf{fma}\left(\frac{{y}^{2}}{{x}^{2}}, \color{blue}{-2}, 1\right) \]
      4. pow2N/A

        \[\leadsto \mathsf{fma}\left(\frac{y \cdot y}{{x}^{2}}, -2, 1\right) \]
      5. pow2N/A

        \[\leadsto \mathsf{fma}\left(\frac{y \cdot y}{x \cdot x}, -2, 1\right) \]
      6. frac-timesN/A

        \[\leadsto \mathsf{fma}\left(\frac{y}{x} \cdot \frac{y}{x}, -2, 1\right) \]
      7. sqr-neg-revN/A

        \[\leadsto \mathsf{fma}\left(\left(\mathsf{neg}\left(\frac{y}{x}\right)\right) \cdot \left(\mathsf{neg}\left(\frac{y}{x}\right)\right), -2, 1\right) \]
      8. mul-1-negN/A

        \[\leadsto \mathsf{fma}\left(\left(-1 \cdot \frac{y}{x}\right) \cdot \left(\mathsf{neg}\left(\frac{y}{x}\right)\right), -2, 1\right) \]
      9. mul-1-negN/A

        \[\leadsto \mathsf{fma}\left(\left(-1 \cdot \frac{y}{x}\right) \cdot \left(-1 \cdot \frac{y}{x}\right), -2, 1\right) \]
      10. pow2N/A

        \[\leadsto \mathsf{fma}\left({\left(-1 \cdot \frac{y}{x}\right)}^{2}, -2, 1\right) \]
      11. lower-pow.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(-1 \cdot \frac{y}{x}\right)}^{2}, -2, 1\right) \]
      12. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      13. lower-*.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      14. lower-/.f6439.9

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
    5. Applied rewrites39.9%

      \[\leadsto \color{blue}{\mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right)} \]
    6. Step-by-step derivation
      1. lift-pow.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      2. lift-*.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      3. lift-/.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      4. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left({\left(-1 \cdot \frac{y}{x}\right)}^{2}, -2, 1\right) \]
      5. pow2N/A

        \[\leadsto \mathsf{fma}\left(\left(-1 \cdot \frac{y}{x}\right) \cdot \left(-1 \cdot \frac{y}{x}\right), -2, 1\right) \]
      6. mul-1-negN/A

        \[\leadsto \mathsf{fma}\left(\left(\mathsf{neg}\left(\frac{y}{x}\right)\right) \cdot \left(-1 \cdot \frac{y}{x}\right), -2, 1\right) \]
      7. mul-1-negN/A

        \[\leadsto \mathsf{fma}\left(\left(\mathsf{neg}\left(\frac{y}{x}\right)\right) \cdot \left(\mathsf{neg}\left(\frac{y}{x}\right)\right), -2, 1\right) \]
      8. sqr-neg-revN/A

        \[\leadsto \mathsf{fma}\left(\frac{y}{x} \cdot \frac{y}{x}, -2, 1\right) \]
      9. frac-timesN/A

        \[\leadsto \mathsf{fma}\left(\frac{y \cdot y}{x \cdot x}, -2, 1\right) \]
      10. pow2N/A

        \[\leadsto \mathsf{fma}\left(\frac{{y}^{2}}{x \cdot x}, -2, 1\right) \]
      11. pow-to-expN/A

        \[\leadsto \mathsf{fma}\left(\frac{e^{\log y \cdot 2}}{x \cdot x}, -2, 1\right) \]
      12. pow2N/A

        \[\leadsto \mathsf{fma}\left(\frac{e^{\log y \cdot 2}}{{x}^{2}}, -2, 1\right) \]
      13. pow-to-expN/A

        \[\leadsto \mathsf{fma}\left(\frac{e^{\log y \cdot 2}}{e^{\log x \cdot 2}}, -2, 1\right) \]
      14. div-expN/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      15. lower-exp.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      16. lower--.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      17. lower-*.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      18. lower-log.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      19. lower-*.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      20. lower-log.f6415.3

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
    7. Applied rewrites15.3%

      \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
    8. Step-by-step derivation
      1. lift--.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      2. lift-*.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      3. lift-log.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      4. lift-*.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      5. lift-log.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      6. distribute-rgt-out--N/A

        \[\leadsto \mathsf{fma}\left(e^{2 \cdot \left(\log y - \log x\right)}, -2, 1\right) \]
      7. distribute-lft-out--N/A

        \[\leadsto \mathsf{fma}\left(e^{2 \cdot \log y - 2 \cdot \log x}, -2, 1\right) \]
      8. flip--N/A

        \[\leadsto \mathsf{fma}\left(e^{\frac{\left(2 \cdot \log y\right) \cdot \left(2 \cdot \log y\right) - \left(2 \cdot \log x\right) \cdot \left(2 \cdot \log x\right)}{2 \cdot \log y + 2 \cdot \log x}}, -2, 1\right) \]
      9. lower-/.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\frac{\left(2 \cdot \log y\right) \cdot \left(2 \cdot \log y\right) - \left(2 \cdot \log x\right) \cdot \left(2 \cdot \log x\right)}{2 \cdot \log y + 2 \cdot \log x}}, -2, 1\right) \]
    9. Applied rewrites15.3%

      \[\leadsto \mathsf{fma}\left(e^{\frac{{\left(\log y \cdot 2\right)}^{2} - {\left(\log x \cdot 2\right)}^{2}}{\mathsf{fma}\left(-1 \cdot \log y, -2, \log x \cdot 2\right)}}, -2, 1\right) \]

    if 2.2499999999999999e-181 < y < 1.07999999999999994e-44

    1. Initial program 99.9%

      \[\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y} \]
    2. Add Preprocessing
    3. Taylor expanded in y around inf

      \[\leadsto \frac{\left(x - y\right) \cdot \color{blue}{\left(y \cdot \left(1 + \frac{x}{y}\right)\right)}}{x \cdot x + y \cdot y} \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \frac{\left(x - y\right) \cdot \left(\left(1 + \frac{x}{y}\right) \cdot \color{blue}{y}\right)}{x \cdot x + y \cdot y} \]
      2. lower-*.f64N/A

        \[\leadsto \frac{\left(x - y\right) \cdot \left(\left(1 + \frac{x}{y}\right) \cdot \color{blue}{y}\right)}{x \cdot x + y \cdot y} \]
      3. +-commutativeN/A

        \[\leadsto \frac{\left(x - y\right) \cdot \left(\left(\frac{x}{y} + 1\right) \cdot y\right)}{x \cdot x + y \cdot y} \]
      4. frac-2negN/A

        \[\leadsto \frac{\left(x - y\right) \cdot \left(\left(\frac{\mathsf{neg}\left(x\right)}{\mathsf{neg}\left(y\right)} + 1\right) \cdot y\right)}{x \cdot x + y \cdot y} \]
      5. mul-1-negN/A

        \[\leadsto \frac{\left(x - y\right) \cdot \left(\left(\frac{-1 \cdot x}{\mathsf{neg}\left(y\right)} + 1\right) \cdot y\right)}{x \cdot x + y \cdot y} \]
      6. *-commutativeN/A

        \[\leadsto \frac{\left(x - y\right) \cdot \left(\left(\frac{x \cdot -1}{\mathsf{neg}\left(y\right)} + 1\right) \cdot y\right)}{x \cdot x + y \cdot y} \]
      7. mul-1-negN/A

        \[\leadsto \frac{\left(x - y\right) \cdot \left(\left(\frac{x \cdot -1}{-1 \cdot y} + 1\right) \cdot y\right)}{x \cdot x + y \cdot y} \]
      8. *-commutativeN/A

        \[\leadsto \frac{\left(x - y\right) \cdot \left(\left(\frac{x \cdot -1}{y \cdot -1} + 1\right) \cdot y\right)}{x \cdot x + y \cdot y} \]
      9. times-fracN/A

        \[\leadsto \frac{\left(x - y\right) \cdot \left(\left(\frac{x}{y} \cdot \frac{-1}{-1} + 1\right) \cdot y\right)}{x \cdot x + y \cdot y} \]
      10. metadata-evalN/A

        \[\leadsto \frac{\left(x - y\right) \cdot \left(\left(\frac{x}{y} \cdot 1 + 1\right) \cdot y\right)}{x \cdot x + y \cdot y} \]
      11. lower-fma.f64N/A

        \[\leadsto \frac{\left(x - y\right) \cdot \left(\mathsf{fma}\left(\frac{x}{y}, 1, 1\right) \cdot y\right)}{x \cdot x + y \cdot y} \]
      12. lower-/.f6499.8

        \[\leadsto \frac{\left(x - y\right) \cdot \left(\mathsf{fma}\left(\frac{x}{y}, 1, 1\right) \cdot y\right)}{x \cdot x + y \cdot y} \]
    5. Applied rewrites99.8%

      \[\leadsto \frac{\left(x - y\right) \cdot \color{blue}{\left(\mathsf{fma}\left(\frac{x}{y}, 1, 1\right) \cdot y\right)}}{x \cdot x + y \cdot y} \]

    if 1.07999999999999994e-44 < y

    1. Initial program 100.0%

      \[\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y} \]
    2. Add Preprocessing
    3. Taylor expanded in y around -inf

      \[\leadsto \color{blue}{-1 \cdot \frac{x + -1 \cdot x}{y} - 1} \]
    4. Step-by-step derivation
      1. lower--.f64N/A

        \[\leadsto -1 \cdot \frac{x + -1 \cdot x}{y} - \color{blue}{1} \]
      2. *-commutativeN/A

        \[\leadsto \frac{x + -1 \cdot x}{y} \cdot -1 - 1 \]
      3. div-addN/A

        \[\leadsto \left(\frac{x}{y} + \frac{-1 \cdot x}{y}\right) \cdot -1 - 1 \]
      4. associate-*r/N/A

        \[\leadsto \left(\frac{x}{y} + -1 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
      5. +-commutativeN/A

        \[\leadsto \left(-1 \cdot \frac{x}{y} + \frac{x}{y}\right) \cdot -1 - 1 \]
      6. lower-*.f64N/A

        \[\leadsto \left(-1 \cdot \frac{x}{y} + \frac{x}{y}\right) \cdot -1 - 1 \]
      7. distribute-lft1-inN/A

        \[\leadsto \left(\left(-1 + 1\right) \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
      8. metadata-evalN/A

        \[\leadsto \left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
      9. lower-*.f64N/A

        \[\leadsto \left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
      10. lower-/.f64100.0

        \[\leadsto \left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
    5. Applied rewrites100.0%

      \[\leadsto \color{blue}{\left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification31.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq 2.25 \cdot 10^{-181}:\\ \;\;\;\;\mathsf{fma}\left(e^{\frac{{\left(\log y \cdot 2\right)}^{2} - {\left(\log x \cdot 2\right)}^{2}}{\mathsf{fma}\left(-1 \cdot \log y, -2, \log x \cdot 2\right)}}, -2, 1\right)\\ \mathbf{elif}\;y \leq 1.08 \cdot 10^{-44}:\\ \;\;\;\;\frac{\left(x - y\right) \cdot \left(\mathsf{fma}\left(\frac{x}{y}, 1, 1\right) \cdot y\right)}{x \cdot x + y \cdot y}\\ \mathbf{else}:\\ \;\;\;\;-1 \cdot \left(0 \cdot \frac{x}{y} + 1\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 92.1% accurate, N/A× speedup?

\[\begin{array}{l} y_m = \left|y\right| \\ \begin{array}{l} t_0 := \frac{\left(x - y\_m\right) \cdot \left(x + y\_m\right)}{x \cdot x + y\_m \cdot y\_m}\\ t_1 := -1 \cdot \left(0 \cdot \frac{x}{y\_m} + 1\right)\\ \mathbf{if}\;t\_0 \leq -0.5:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;t\_0 \leq 2:\\ \;\;\;\;\mathsf{fma}\left({\left(\frac{y\_m}{x} \cdot -1\right)}^{2}, -2, 1\right)\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
y_m = (fabs.f64 y)
(FPCore (x y_m)
 :precision binary64
 (let* ((t_0 (/ (* (- x y_m) (+ x y_m)) (+ (* x x) (* y_m y_m))))
        (t_1 (* -1.0 (+ (* 0.0 (/ x y_m)) 1.0))))
   (if (<= t_0 -0.5)
     t_1
     (if (<= t_0 2.0) (fma (pow (* (/ y_m x) -1.0) 2.0) -2.0 1.0) t_1))))
y_m = fabs(y);
double code(double x, double y_m) {
	double t_0 = ((x - y_m) * (x + y_m)) / ((x * x) + (y_m * y_m));
	double t_1 = -1.0 * ((0.0 * (x / y_m)) + 1.0);
	double tmp;
	if (t_0 <= -0.5) {
		tmp = t_1;
	} else if (t_0 <= 2.0) {
		tmp = fma(pow(((y_m / x) * -1.0), 2.0), -2.0, 1.0);
	} else {
		tmp = t_1;
	}
	return tmp;
}
y_m = abs(y)
function code(x, y_m)
	t_0 = Float64(Float64(Float64(x - y_m) * Float64(x + y_m)) / Float64(Float64(x * x) + Float64(y_m * y_m)))
	t_1 = Float64(-1.0 * Float64(Float64(0.0 * Float64(x / y_m)) + 1.0))
	tmp = 0.0
	if (t_0 <= -0.5)
		tmp = t_1;
	elseif (t_0 <= 2.0)
		tmp = fma((Float64(Float64(y_m / x) * -1.0) ^ 2.0), -2.0, 1.0);
	else
		tmp = t_1;
	end
	return tmp
end
y_m = N[Abs[y], $MachinePrecision]
code[x_, y$95$m_] := Block[{t$95$0 = N[(N[(N[(x - y$95$m), $MachinePrecision] * N[(x + y$95$m), $MachinePrecision]), $MachinePrecision] / N[(N[(x * x), $MachinePrecision] + N[(y$95$m * y$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(-1.0 * N[(N[(0.0 * N[(x / y$95$m), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -0.5], t$95$1, If[LessEqual[t$95$0, 2.0], N[(N[Power[N[(N[(y$95$m / x), $MachinePrecision] * -1.0), $MachinePrecision], 2.0], $MachinePrecision] * -2.0 + 1.0), $MachinePrecision], t$95$1]]]]
\begin{array}{l}
y_m = \left|y\right|

\\
\begin{array}{l}
t_0 := \frac{\left(x - y\_m\right) \cdot \left(x + y\_m\right)}{x \cdot x + y\_m \cdot y\_m}\\
t_1 := -1 \cdot \left(0 \cdot \frac{x}{y\_m} + 1\right)\\
\mathbf{if}\;t\_0 \leq -0.5:\\
\;\;\;\;t\_1\\

\mathbf{elif}\;t\_0 \leq 2:\\
\;\;\;\;\mathsf{fma}\left({\left(\frac{y\_m}{x} \cdot -1\right)}^{2}, -2, 1\right)\\

\mathbf{else}:\\
\;\;\;\;t\_1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (/.f64 (*.f64 (-.f64 x y) (+.f64 x y)) (+.f64 (*.f64 x x) (*.f64 y y))) < -0.5 or 2 < (/.f64 (*.f64 (-.f64 x y) (+.f64 x y)) (+.f64 (*.f64 x x) (*.f64 y y)))

    1. Initial program 60.9%

      \[\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y} \]
    2. Add Preprocessing
    3. Taylor expanded in y around -inf

      \[\leadsto \color{blue}{-1 \cdot \frac{x + -1 \cdot x}{y} - 1} \]
    4. Step-by-step derivation
      1. lower--.f64N/A

        \[\leadsto -1 \cdot \frac{x + -1 \cdot x}{y} - \color{blue}{1} \]
      2. *-commutativeN/A

        \[\leadsto \frac{x + -1 \cdot x}{y} \cdot -1 - 1 \]
      3. div-addN/A

        \[\leadsto \left(\frac{x}{y} + \frac{-1 \cdot x}{y}\right) \cdot -1 - 1 \]
      4. associate-*r/N/A

        \[\leadsto \left(\frac{x}{y} + -1 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
      5. +-commutativeN/A

        \[\leadsto \left(-1 \cdot \frac{x}{y} + \frac{x}{y}\right) \cdot -1 - 1 \]
      6. lower-*.f64N/A

        \[\leadsto \left(-1 \cdot \frac{x}{y} + \frac{x}{y}\right) \cdot -1 - 1 \]
      7. distribute-lft1-inN/A

        \[\leadsto \left(\left(-1 + 1\right) \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
      8. metadata-evalN/A

        \[\leadsto \left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
      9. lower-*.f64N/A

        \[\leadsto \left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
      10. lower-/.f6488.1

        \[\leadsto \left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
    5. Applied rewrites88.1%

      \[\leadsto \color{blue}{\left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1} \]

    if -0.5 < (/.f64 (*.f64 (-.f64 x y) (+.f64 x y)) (+.f64 (*.f64 x x) (*.f64 y y))) < 2

    1. Initial program 100.0%

      \[\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y} \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0

      \[\leadsto \color{blue}{1 + -2 \cdot \frac{{y}^{2}}{{x}^{2}}} \]
    4. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto -2 \cdot \frac{{y}^{2}}{{x}^{2}} + \color{blue}{1} \]
      2. *-commutativeN/A

        \[\leadsto \frac{{y}^{2}}{{x}^{2}} \cdot -2 + 1 \]
      3. lower-fma.f64N/A

        \[\leadsto \mathsf{fma}\left(\frac{{y}^{2}}{{x}^{2}}, \color{blue}{-2}, 1\right) \]
      4. pow2N/A

        \[\leadsto \mathsf{fma}\left(\frac{y \cdot y}{{x}^{2}}, -2, 1\right) \]
      5. pow2N/A

        \[\leadsto \mathsf{fma}\left(\frac{y \cdot y}{x \cdot x}, -2, 1\right) \]
      6. frac-timesN/A

        \[\leadsto \mathsf{fma}\left(\frac{y}{x} \cdot \frac{y}{x}, -2, 1\right) \]
      7. sqr-neg-revN/A

        \[\leadsto \mathsf{fma}\left(\left(\mathsf{neg}\left(\frac{y}{x}\right)\right) \cdot \left(\mathsf{neg}\left(\frac{y}{x}\right)\right), -2, 1\right) \]
      8. mul-1-negN/A

        \[\leadsto \mathsf{fma}\left(\left(-1 \cdot \frac{y}{x}\right) \cdot \left(\mathsf{neg}\left(\frac{y}{x}\right)\right), -2, 1\right) \]
      9. mul-1-negN/A

        \[\leadsto \mathsf{fma}\left(\left(-1 \cdot \frac{y}{x}\right) \cdot \left(-1 \cdot \frac{y}{x}\right), -2, 1\right) \]
      10. pow2N/A

        \[\leadsto \mathsf{fma}\left({\left(-1 \cdot \frac{y}{x}\right)}^{2}, -2, 1\right) \]
      11. lower-pow.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(-1 \cdot \frac{y}{x}\right)}^{2}, -2, 1\right) \]
      12. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      13. lower-*.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      14. lower-/.f6498.3

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
    5. Applied rewrites98.3%

      \[\leadsto \color{blue}{\mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification91.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y} \leq -0.5:\\ \;\;\;\;-1 \cdot \left(0 \cdot \frac{x}{y} + 1\right)\\ \mathbf{elif}\;\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y} \leq 2:\\ \;\;\;\;\mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right)\\ \mathbf{else}:\\ \;\;\;\;-1 \cdot \left(0 \cdot \frac{x}{y} + 1\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 4: 92.1% accurate, N/A× speedup?

\[\begin{array}{l} y_m = \left|y\right| \\ \begin{array}{l} t_0 := \log x \cdot 2\\ t_1 := \frac{\left(x - y\_m\right) \cdot \left(x + y\_m\right)}{x \cdot x + y\_m \cdot y\_m}\\ t_2 := -1 \cdot \left(0 \cdot \frac{x}{y\_m} + 1\right)\\ \mathbf{if}\;t\_1 \leq -0.5:\\ \;\;\;\;t\_2\\ \mathbf{elif}\;t\_1 \leq 2:\\ \;\;\;\;\mathsf{fma}\left(e^{\frac{{\left(\log y\_m \cdot 2\right)}^{2} - {t\_0}^{2}}{\mathsf{fma}\left(-1 \cdot \log y\_m, -2, t\_0\right)}}, -2, 1\right)\\ \mathbf{else}:\\ \;\;\;\;t\_2\\ \end{array} \end{array} \]
y_m = (fabs.f64 y)
(FPCore (x y_m)
 :precision binary64
 (let* ((t_0 (* (log x) 2.0))
        (t_1 (/ (* (- x y_m) (+ x y_m)) (+ (* x x) (* y_m y_m))))
        (t_2 (* -1.0 (+ (* 0.0 (/ x y_m)) 1.0))))
   (if (<= t_1 -0.5)
     t_2
     (if (<= t_1 2.0)
       (fma
        (exp
         (/
          (- (pow (* (log y_m) 2.0) 2.0) (pow t_0 2.0))
          (fma (* -1.0 (log y_m)) -2.0 t_0)))
        -2.0
        1.0)
       t_2))))
y_m = fabs(y);
double code(double x, double y_m) {
	double t_0 = log(x) * 2.0;
	double t_1 = ((x - y_m) * (x + y_m)) / ((x * x) + (y_m * y_m));
	double t_2 = -1.0 * ((0.0 * (x / y_m)) + 1.0);
	double tmp;
	if (t_1 <= -0.5) {
		tmp = t_2;
	} else if (t_1 <= 2.0) {
		tmp = fma(exp(((pow((log(y_m) * 2.0), 2.0) - pow(t_0, 2.0)) / fma((-1.0 * log(y_m)), -2.0, t_0))), -2.0, 1.0);
	} else {
		tmp = t_2;
	}
	return tmp;
}
y_m = abs(y)
function code(x, y_m)
	t_0 = Float64(log(x) * 2.0)
	t_1 = Float64(Float64(Float64(x - y_m) * Float64(x + y_m)) / Float64(Float64(x * x) + Float64(y_m * y_m)))
	t_2 = Float64(-1.0 * Float64(Float64(0.0 * Float64(x / y_m)) + 1.0))
	tmp = 0.0
	if (t_1 <= -0.5)
		tmp = t_2;
	elseif (t_1 <= 2.0)
		tmp = fma(exp(Float64(Float64((Float64(log(y_m) * 2.0) ^ 2.0) - (t_0 ^ 2.0)) / fma(Float64(-1.0 * log(y_m)), -2.0, t_0))), -2.0, 1.0);
	else
		tmp = t_2;
	end
	return tmp
end
y_m = N[Abs[y], $MachinePrecision]
code[x_, y$95$m_] := Block[{t$95$0 = N[(N[Log[x], $MachinePrecision] * 2.0), $MachinePrecision]}, Block[{t$95$1 = N[(N[(N[(x - y$95$m), $MachinePrecision] * N[(x + y$95$m), $MachinePrecision]), $MachinePrecision] / N[(N[(x * x), $MachinePrecision] + N[(y$95$m * y$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(-1.0 * N[(N[(0.0 * N[(x / y$95$m), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, -0.5], t$95$2, If[LessEqual[t$95$1, 2.0], N[(N[Exp[N[(N[(N[Power[N[(N[Log[y$95$m], $MachinePrecision] * 2.0), $MachinePrecision], 2.0], $MachinePrecision] - N[Power[t$95$0, 2.0], $MachinePrecision]), $MachinePrecision] / N[(N[(-1.0 * N[Log[y$95$m], $MachinePrecision]), $MachinePrecision] * -2.0 + t$95$0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * -2.0 + 1.0), $MachinePrecision], t$95$2]]]]]
\begin{array}{l}
y_m = \left|y\right|

\\
\begin{array}{l}
t_0 := \log x \cdot 2\\
t_1 := \frac{\left(x - y\_m\right) \cdot \left(x + y\_m\right)}{x \cdot x + y\_m \cdot y\_m}\\
t_2 := -1 \cdot \left(0 \cdot \frac{x}{y\_m} + 1\right)\\
\mathbf{if}\;t\_1 \leq -0.5:\\
\;\;\;\;t\_2\\

\mathbf{elif}\;t\_1 \leq 2:\\
\;\;\;\;\mathsf{fma}\left(e^{\frac{{\left(\log y\_m \cdot 2\right)}^{2} - {t\_0}^{2}}{\mathsf{fma}\left(-1 \cdot \log y\_m, -2, t\_0\right)}}, -2, 1\right)\\

\mathbf{else}:\\
\;\;\;\;t\_2\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (/.f64 (*.f64 (-.f64 x y) (+.f64 x y)) (+.f64 (*.f64 x x) (*.f64 y y))) < -0.5 or 2 < (/.f64 (*.f64 (-.f64 x y) (+.f64 x y)) (+.f64 (*.f64 x x) (*.f64 y y)))

    1. Initial program 60.9%

      \[\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y} \]
    2. Add Preprocessing
    3. Taylor expanded in y around -inf

      \[\leadsto \color{blue}{-1 \cdot \frac{x + -1 \cdot x}{y} - 1} \]
    4. Step-by-step derivation
      1. lower--.f64N/A

        \[\leadsto -1 \cdot \frac{x + -1 \cdot x}{y} - \color{blue}{1} \]
      2. *-commutativeN/A

        \[\leadsto \frac{x + -1 \cdot x}{y} \cdot -1 - 1 \]
      3. div-addN/A

        \[\leadsto \left(\frac{x}{y} + \frac{-1 \cdot x}{y}\right) \cdot -1 - 1 \]
      4. associate-*r/N/A

        \[\leadsto \left(\frac{x}{y} + -1 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
      5. +-commutativeN/A

        \[\leadsto \left(-1 \cdot \frac{x}{y} + \frac{x}{y}\right) \cdot -1 - 1 \]
      6. lower-*.f64N/A

        \[\leadsto \left(-1 \cdot \frac{x}{y} + \frac{x}{y}\right) \cdot -1 - 1 \]
      7. distribute-lft1-inN/A

        \[\leadsto \left(\left(-1 + 1\right) \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
      8. metadata-evalN/A

        \[\leadsto \left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
      9. lower-*.f64N/A

        \[\leadsto \left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
      10. lower-/.f6488.1

        \[\leadsto \left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
    5. Applied rewrites88.1%

      \[\leadsto \color{blue}{\left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1} \]

    if -0.5 < (/.f64 (*.f64 (-.f64 x y) (+.f64 x y)) (+.f64 (*.f64 x x) (*.f64 y y))) < 2

    1. Initial program 100.0%

      \[\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y} \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0

      \[\leadsto \color{blue}{1 + -2 \cdot \frac{{y}^{2}}{{x}^{2}}} \]
    4. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto -2 \cdot \frac{{y}^{2}}{{x}^{2}} + \color{blue}{1} \]
      2. *-commutativeN/A

        \[\leadsto \frac{{y}^{2}}{{x}^{2}} \cdot -2 + 1 \]
      3. lower-fma.f64N/A

        \[\leadsto \mathsf{fma}\left(\frac{{y}^{2}}{{x}^{2}}, \color{blue}{-2}, 1\right) \]
      4. pow2N/A

        \[\leadsto \mathsf{fma}\left(\frac{y \cdot y}{{x}^{2}}, -2, 1\right) \]
      5. pow2N/A

        \[\leadsto \mathsf{fma}\left(\frac{y \cdot y}{x \cdot x}, -2, 1\right) \]
      6. frac-timesN/A

        \[\leadsto \mathsf{fma}\left(\frac{y}{x} \cdot \frac{y}{x}, -2, 1\right) \]
      7. sqr-neg-revN/A

        \[\leadsto \mathsf{fma}\left(\left(\mathsf{neg}\left(\frac{y}{x}\right)\right) \cdot \left(\mathsf{neg}\left(\frac{y}{x}\right)\right), -2, 1\right) \]
      8. mul-1-negN/A

        \[\leadsto \mathsf{fma}\left(\left(-1 \cdot \frac{y}{x}\right) \cdot \left(\mathsf{neg}\left(\frac{y}{x}\right)\right), -2, 1\right) \]
      9. mul-1-negN/A

        \[\leadsto \mathsf{fma}\left(\left(-1 \cdot \frac{y}{x}\right) \cdot \left(-1 \cdot \frac{y}{x}\right), -2, 1\right) \]
      10. pow2N/A

        \[\leadsto \mathsf{fma}\left({\left(-1 \cdot \frac{y}{x}\right)}^{2}, -2, 1\right) \]
      11. lower-pow.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(-1 \cdot \frac{y}{x}\right)}^{2}, -2, 1\right) \]
      12. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      13. lower-*.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      14. lower-/.f6498.3

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
    5. Applied rewrites98.3%

      \[\leadsto \color{blue}{\mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right)} \]
    6. Step-by-step derivation
      1. lift-pow.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      2. lift-*.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      3. lift-/.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      4. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left({\left(-1 \cdot \frac{y}{x}\right)}^{2}, -2, 1\right) \]
      5. pow2N/A

        \[\leadsto \mathsf{fma}\left(\left(-1 \cdot \frac{y}{x}\right) \cdot \left(-1 \cdot \frac{y}{x}\right), -2, 1\right) \]
      6. mul-1-negN/A

        \[\leadsto \mathsf{fma}\left(\left(\mathsf{neg}\left(\frac{y}{x}\right)\right) \cdot \left(-1 \cdot \frac{y}{x}\right), -2, 1\right) \]
      7. mul-1-negN/A

        \[\leadsto \mathsf{fma}\left(\left(\mathsf{neg}\left(\frac{y}{x}\right)\right) \cdot \left(\mathsf{neg}\left(\frac{y}{x}\right)\right), -2, 1\right) \]
      8. sqr-neg-revN/A

        \[\leadsto \mathsf{fma}\left(\frac{y}{x} \cdot \frac{y}{x}, -2, 1\right) \]
      9. frac-timesN/A

        \[\leadsto \mathsf{fma}\left(\frac{y \cdot y}{x \cdot x}, -2, 1\right) \]
      10. pow2N/A

        \[\leadsto \mathsf{fma}\left(\frac{{y}^{2}}{x \cdot x}, -2, 1\right) \]
      11. pow-to-expN/A

        \[\leadsto \mathsf{fma}\left(\frac{e^{\log y \cdot 2}}{x \cdot x}, -2, 1\right) \]
      12. pow2N/A

        \[\leadsto \mathsf{fma}\left(\frac{e^{\log y \cdot 2}}{{x}^{2}}, -2, 1\right) \]
      13. pow-to-expN/A

        \[\leadsto \mathsf{fma}\left(\frac{e^{\log y \cdot 2}}{e^{\log x \cdot 2}}, -2, 1\right) \]
      14. div-expN/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      15. lower-exp.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      16. lower--.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      17. lower-*.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      18. lower-log.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      19. lower-*.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      20. lower-log.f6450.3

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
    7. Applied rewrites50.3%

      \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
    8. Step-by-step derivation
      1. lift--.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      2. lift-*.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      3. lift-log.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      4. lift-*.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      5. lift-log.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      6. distribute-rgt-out--N/A

        \[\leadsto \mathsf{fma}\left(e^{2 \cdot \left(\log y - \log x\right)}, -2, 1\right) \]
      7. distribute-lft-out--N/A

        \[\leadsto \mathsf{fma}\left(e^{2 \cdot \log y - 2 \cdot \log x}, -2, 1\right) \]
      8. flip--N/A

        \[\leadsto \mathsf{fma}\left(e^{\frac{\left(2 \cdot \log y\right) \cdot \left(2 \cdot \log y\right) - \left(2 \cdot \log x\right) \cdot \left(2 \cdot \log x\right)}{2 \cdot \log y + 2 \cdot \log x}}, -2, 1\right) \]
      9. lower-/.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\frac{\left(2 \cdot \log y\right) \cdot \left(2 \cdot \log y\right) - \left(2 \cdot \log x\right) \cdot \left(2 \cdot \log x\right)}{2 \cdot \log y + 2 \cdot \log x}}, -2, 1\right) \]
    9. Applied rewrites50.3%

      \[\leadsto \mathsf{fma}\left(e^{\frac{{\left(\log y \cdot 2\right)}^{2} - {\left(\log x \cdot 2\right)}^{2}}{\mathsf{fma}\left(-1 \cdot \log y, -2, \log x \cdot 2\right)}}, -2, 1\right) \]
  3. Recombined 2 regimes into one program.
  4. Final simplification76.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y} \leq -0.5:\\ \;\;\;\;-1 \cdot \left(0 \cdot \frac{x}{y} + 1\right)\\ \mathbf{elif}\;\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y} \leq 2:\\ \;\;\;\;\mathsf{fma}\left(e^{\frac{{\left(\log y \cdot 2\right)}^{2} - {\left(\log x \cdot 2\right)}^{2}}{\mathsf{fma}\left(-1 \cdot \log y, -2, \log x \cdot 2\right)}}, -2, 1\right)\\ \mathbf{else}:\\ \;\;\;\;-1 \cdot \left(0 \cdot \frac{x}{y} + 1\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 5: 92.1% accurate, N/A× speedup?

\[\begin{array}{l} y_m = \left|y\right| \\ \begin{array}{l} t_0 := \frac{\left(x - y\_m\right) \cdot \left(x + y\_m\right)}{x \cdot x + y\_m \cdot y\_m}\\ t_1 := -1 \cdot \left(0 \cdot \frac{x}{y\_m} + 1\right)\\ \mathbf{if}\;t\_0 \leq -0.5:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;t\_0 \leq 2:\\ \;\;\;\;\mathsf{fma}\left(e^{\log y\_m \cdot 2 - \log x \cdot 2}, -2, 1\right)\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
y_m = (fabs.f64 y)
(FPCore (x y_m)
 :precision binary64
 (let* ((t_0 (/ (* (- x y_m) (+ x y_m)) (+ (* x x) (* y_m y_m))))
        (t_1 (* -1.0 (+ (* 0.0 (/ x y_m)) 1.0))))
   (if (<= t_0 -0.5)
     t_1
     (if (<= t_0 2.0)
       (fma (exp (- (* (log y_m) 2.0) (* (log x) 2.0))) -2.0 1.0)
       t_1))))
y_m = fabs(y);
double code(double x, double y_m) {
	double t_0 = ((x - y_m) * (x + y_m)) / ((x * x) + (y_m * y_m));
	double t_1 = -1.0 * ((0.0 * (x / y_m)) + 1.0);
	double tmp;
	if (t_0 <= -0.5) {
		tmp = t_1;
	} else if (t_0 <= 2.0) {
		tmp = fma(exp(((log(y_m) * 2.0) - (log(x) * 2.0))), -2.0, 1.0);
	} else {
		tmp = t_1;
	}
	return tmp;
}
y_m = abs(y)
function code(x, y_m)
	t_0 = Float64(Float64(Float64(x - y_m) * Float64(x + y_m)) / Float64(Float64(x * x) + Float64(y_m * y_m)))
	t_1 = Float64(-1.0 * Float64(Float64(0.0 * Float64(x / y_m)) + 1.0))
	tmp = 0.0
	if (t_0 <= -0.5)
		tmp = t_1;
	elseif (t_0 <= 2.0)
		tmp = fma(exp(Float64(Float64(log(y_m) * 2.0) - Float64(log(x) * 2.0))), -2.0, 1.0);
	else
		tmp = t_1;
	end
	return tmp
end
y_m = N[Abs[y], $MachinePrecision]
code[x_, y$95$m_] := Block[{t$95$0 = N[(N[(N[(x - y$95$m), $MachinePrecision] * N[(x + y$95$m), $MachinePrecision]), $MachinePrecision] / N[(N[(x * x), $MachinePrecision] + N[(y$95$m * y$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(-1.0 * N[(N[(0.0 * N[(x / y$95$m), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -0.5], t$95$1, If[LessEqual[t$95$0, 2.0], N[(N[Exp[N[(N[(N[Log[y$95$m], $MachinePrecision] * 2.0), $MachinePrecision] - N[(N[Log[x], $MachinePrecision] * 2.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * -2.0 + 1.0), $MachinePrecision], t$95$1]]]]
\begin{array}{l}
y_m = \left|y\right|

\\
\begin{array}{l}
t_0 := \frac{\left(x - y\_m\right) \cdot \left(x + y\_m\right)}{x \cdot x + y\_m \cdot y\_m}\\
t_1 := -1 \cdot \left(0 \cdot \frac{x}{y\_m} + 1\right)\\
\mathbf{if}\;t\_0 \leq -0.5:\\
\;\;\;\;t\_1\\

\mathbf{elif}\;t\_0 \leq 2:\\
\;\;\;\;\mathsf{fma}\left(e^{\log y\_m \cdot 2 - \log x \cdot 2}, -2, 1\right)\\

\mathbf{else}:\\
\;\;\;\;t\_1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (/.f64 (*.f64 (-.f64 x y) (+.f64 x y)) (+.f64 (*.f64 x x) (*.f64 y y))) < -0.5 or 2 < (/.f64 (*.f64 (-.f64 x y) (+.f64 x y)) (+.f64 (*.f64 x x) (*.f64 y y)))

    1. Initial program 60.9%

      \[\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y} \]
    2. Add Preprocessing
    3. Taylor expanded in y around -inf

      \[\leadsto \color{blue}{-1 \cdot \frac{x + -1 \cdot x}{y} - 1} \]
    4. Step-by-step derivation
      1. lower--.f64N/A

        \[\leadsto -1 \cdot \frac{x + -1 \cdot x}{y} - \color{blue}{1} \]
      2. *-commutativeN/A

        \[\leadsto \frac{x + -1 \cdot x}{y} \cdot -1 - 1 \]
      3. div-addN/A

        \[\leadsto \left(\frac{x}{y} + \frac{-1 \cdot x}{y}\right) \cdot -1 - 1 \]
      4. associate-*r/N/A

        \[\leadsto \left(\frac{x}{y} + -1 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
      5. +-commutativeN/A

        \[\leadsto \left(-1 \cdot \frac{x}{y} + \frac{x}{y}\right) \cdot -1 - 1 \]
      6. lower-*.f64N/A

        \[\leadsto \left(-1 \cdot \frac{x}{y} + \frac{x}{y}\right) \cdot -1 - 1 \]
      7. distribute-lft1-inN/A

        \[\leadsto \left(\left(-1 + 1\right) \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
      8. metadata-evalN/A

        \[\leadsto \left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
      9. lower-*.f64N/A

        \[\leadsto \left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
      10. lower-/.f6488.1

        \[\leadsto \left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
    5. Applied rewrites88.1%

      \[\leadsto \color{blue}{\left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1} \]

    if -0.5 < (/.f64 (*.f64 (-.f64 x y) (+.f64 x y)) (+.f64 (*.f64 x x) (*.f64 y y))) < 2

    1. Initial program 100.0%

      \[\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y} \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0

      \[\leadsto \color{blue}{1 + -2 \cdot \frac{{y}^{2}}{{x}^{2}}} \]
    4. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto -2 \cdot \frac{{y}^{2}}{{x}^{2}} + \color{blue}{1} \]
      2. *-commutativeN/A

        \[\leadsto \frac{{y}^{2}}{{x}^{2}} \cdot -2 + 1 \]
      3. lower-fma.f64N/A

        \[\leadsto \mathsf{fma}\left(\frac{{y}^{2}}{{x}^{2}}, \color{blue}{-2}, 1\right) \]
      4. pow2N/A

        \[\leadsto \mathsf{fma}\left(\frac{y \cdot y}{{x}^{2}}, -2, 1\right) \]
      5. pow2N/A

        \[\leadsto \mathsf{fma}\left(\frac{y \cdot y}{x \cdot x}, -2, 1\right) \]
      6. frac-timesN/A

        \[\leadsto \mathsf{fma}\left(\frac{y}{x} \cdot \frac{y}{x}, -2, 1\right) \]
      7. sqr-neg-revN/A

        \[\leadsto \mathsf{fma}\left(\left(\mathsf{neg}\left(\frac{y}{x}\right)\right) \cdot \left(\mathsf{neg}\left(\frac{y}{x}\right)\right), -2, 1\right) \]
      8. mul-1-negN/A

        \[\leadsto \mathsf{fma}\left(\left(-1 \cdot \frac{y}{x}\right) \cdot \left(\mathsf{neg}\left(\frac{y}{x}\right)\right), -2, 1\right) \]
      9. mul-1-negN/A

        \[\leadsto \mathsf{fma}\left(\left(-1 \cdot \frac{y}{x}\right) \cdot \left(-1 \cdot \frac{y}{x}\right), -2, 1\right) \]
      10. pow2N/A

        \[\leadsto \mathsf{fma}\left({\left(-1 \cdot \frac{y}{x}\right)}^{2}, -2, 1\right) \]
      11. lower-pow.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(-1 \cdot \frac{y}{x}\right)}^{2}, -2, 1\right) \]
      12. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      13. lower-*.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      14. lower-/.f6498.3

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
    5. Applied rewrites98.3%

      \[\leadsto \color{blue}{\mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right)} \]
    6. Step-by-step derivation
      1. lift-pow.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      2. lift-*.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      3. lift-/.f64N/A

        \[\leadsto \mathsf{fma}\left({\left(\frac{y}{x} \cdot -1\right)}^{2}, -2, 1\right) \]
      4. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left({\left(-1 \cdot \frac{y}{x}\right)}^{2}, -2, 1\right) \]
      5. pow2N/A

        \[\leadsto \mathsf{fma}\left(\left(-1 \cdot \frac{y}{x}\right) \cdot \left(-1 \cdot \frac{y}{x}\right), -2, 1\right) \]
      6. mul-1-negN/A

        \[\leadsto \mathsf{fma}\left(\left(\mathsf{neg}\left(\frac{y}{x}\right)\right) \cdot \left(-1 \cdot \frac{y}{x}\right), -2, 1\right) \]
      7. mul-1-negN/A

        \[\leadsto \mathsf{fma}\left(\left(\mathsf{neg}\left(\frac{y}{x}\right)\right) \cdot \left(\mathsf{neg}\left(\frac{y}{x}\right)\right), -2, 1\right) \]
      8. sqr-neg-revN/A

        \[\leadsto \mathsf{fma}\left(\frac{y}{x} \cdot \frac{y}{x}, -2, 1\right) \]
      9. frac-timesN/A

        \[\leadsto \mathsf{fma}\left(\frac{y \cdot y}{x \cdot x}, -2, 1\right) \]
      10. pow2N/A

        \[\leadsto \mathsf{fma}\left(\frac{{y}^{2}}{x \cdot x}, -2, 1\right) \]
      11. pow-to-expN/A

        \[\leadsto \mathsf{fma}\left(\frac{e^{\log y \cdot 2}}{x \cdot x}, -2, 1\right) \]
      12. pow2N/A

        \[\leadsto \mathsf{fma}\left(\frac{e^{\log y \cdot 2}}{{x}^{2}}, -2, 1\right) \]
      13. pow-to-expN/A

        \[\leadsto \mathsf{fma}\left(\frac{e^{\log y \cdot 2}}{e^{\log x \cdot 2}}, -2, 1\right) \]
      14. div-expN/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      15. lower-exp.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      16. lower--.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      17. lower-*.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      18. lower-log.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      19. lower-*.f64N/A

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
      20. lower-log.f6450.3

        \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
    7. Applied rewrites50.3%

      \[\leadsto \mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right) \]
  3. Recombined 2 regimes into one program.
  4. Final simplification76.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y} \leq -0.5:\\ \;\;\;\;-1 \cdot \left(0 \cdot \frac{x}{y} + 1\right)\\ \mathbf{elif}\;\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y} \leq 2:\\ \;\;\;\;\mathsf{fma}\left(e^{\log y \cdot 2 - \log x \cdot 2}, -2, 1\right)\\ \mathbf{else}:\\ \;\;\;\;-1 \cdot \left(0 \cdot \frac{x}{y} + 1\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 6: 66.5% accurate, N/A× speedup?

\[\begin{array}{l} y_m = \left|y\right| \\ -1 \cdot \left(0 \cdot \frac{x}{y\_m} + 1\right) \end{array} \]
y_m = (fabs.f64 y)
(FPCore (x y_m) :precision binary64 (* -1.0 (+ (* 0.0 (/ x y_m)) 1.0)))
y_m = fabs(y);
double code(double x, double y_m) {
	return -1.0 * ((0.0 * (x / y_m)) + 1.0);
}
y_m =     private
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(x, y_m)
use fmin_fmax_functions
    real(8), intent (in) :: x
    real(8), intent (in) :: y_m
    code = (-1.0d0) * ((0.0d0 * (x / y_m)) + 1.0d0)
end function
y_m = Math.abs(y);
public static double code(double x, double y_m) {
	return -1.0 * ((0.0 * (x / y_m)) + 1.0);
}
y_m = math.fabs(y)
def code(x, y_m):
	return -1.0 * ((0.0 * (x / y_m)) + 1.0)
y_m = abs(y)
function code(x, y_m)
	return Float64(-1.0 * Float64(Float64(0.0 * Float64(x / y_m)) + 1.0))
end
y_m = abs(y);
function tmp = code(x, y_m)
	tmp = -1.0 * ((0.0 * (x / y_m)) + 1.0);
end
y_m = N[Abs[y], $MachinePrecision]
code[x_, y$95$m_] := N[(-1.0 * N[(N[(0.0 * N[(x / y$95$m), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
y_m = \left|y\right|

\\
-1 \cdot \left(0 \cdot \frac{x}{y\_m} + 1\right)
\end{array}
Derivation
  1. Initial program 72.6%

    \[\frac{\left(x - y\right) \cdot \left(x + y\right)}{x \cdot x + y \cdot y} \]
  2. Add Preprocessing
  3. Taylor expanded in y around -inf

    \[\leadsto \color{blue}{-1 \cdot \frac{x + -1 \cdot x}{y} - 1} \]
  4. Step-by-step derivation
    1. lower--.f64N/A

      \[\leadsto -1 \cdot \frac{x + -1 \cdot x}{y} - \color{blue}{1} \]
    2. *-commutativeN/A

      \[\leadsto \frac{x + -1 \cdot x}{y} \cdot -1 - 1 \]
    3. div-addN/A

      \[\leadsto \left(\frac{x}{y} + \frac{-1 \cdot x}{y}\right) \cdot -1 - 1 \]
    4. associate-*r/N/A

      \[\leadsto \left(\frac{x}{y} + -1 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
    5. +-commutativeN/A

      \[\leadsto \left(-1 \cdot \frac{x}{y} + \frac{x}{y}\right) \cdot -1 - 1 \]
    6. lower-*.f64N/A

      \[\leadsto \left(-1 \cdot \frac{x}{y} + \frac{x}{y}\right) \cdot -1 - 1 \]
    7. distribute-lft1-inN/A

      \[\leadsto \left(\left(-1 + 1\right) \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
    8. metadata-evalN/A

      \[\leadsto \left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
    9. lower-*.f64N/A

      \[\leadsto \left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
    10. lower-/.f6462.1

      \[\leadsto \left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1 \]
  5. Applied rewrites62.1%

    \[\leadsto \color{blue}{\left(0 \cdot \frac{x}{y}\right) \cdot -1 - 1} \]
  6. Final simplification62.1%

    \[\leadsto -1 \cdot \left(0 \cdot \frac{x}{y} + 1\right) \]
  7. Add Preprocessing

Reproduce

?
herbie shell --seed 2025065 
(FPCore (x y)
  :name "Kahan p9 Example"
  :precision binary64
  :pre (and (and (< 0.0 x) (< x 1.0)) (< y 1.0))

  :alt
  (! :herbie-platform c (if (< 1/2 (fabs (/ x y)) 2) (/ (* (- x y) (+ x y)) (+ (* x x) (* y y))) (- 1 (/ 2 (+ 1 (* (/ x y) (/ x y)))))))

  (/ (* (- x y) (+ x y)) (+ (* x x) (* y y))))