Numeric.SpecFunctions:incompleteBetaWorker from math-functions-0.1.5.2, A

Percentage Accurate: 98.4% → 98.4%
Time: 16.5s
Alternatives: 19
Speedup: 1.0×

Specification

?
\[\begin{array}{l} \\ \frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (/ (* x (exp (- (+ (* y (log z)) (* (- t 1.0) (log a))) b))) y))
double code(double x, double y, double z, double t, double a, double b) {
	return (x * exp((((y * log(z)) + ((t - 1.0) * log(a))) - b))) / y;
}
real(8) function code(x, y, z, t, a, b)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    code = (x * exp((((y * log(z)) + ((t - 1.0d0) * log(a))) - b))) / y
end function
public static double code(double x, double y, double z, double t, double a, double b) {
	return (x * Math.exp((((y * Math.log(z)) + ((t - 1.0) * Math.log(a))) - b))) / y;
}
def code(x, y, z, t, a, b):
	return (x * math.exp((((y * math.log(z)) + ((t - 1.0) * math.log(a))) - b))) / y
function code(x, y, z, t, a, b)
	return Float64(Float64(x * exp(Float64(Float64(Float64(y * log(z)) + Float64(Float64(t - 1.0) * log(a))) - b))) / y)
end
function tmp = code(x, y, z, t, a, b)
	tmp = (x * exp((((y * log(z)) + ((t - 1.0) * log(a))) - b))) / y;
end
code[x_, y_, z_, t_, a_, b_] := N[(N[(x * N[Exp[N[(N[(N[(y * N[Log[z], $MachinePrecision]), $MachinePrecision] + N[(N[(t - 1.0), $MachinePrecision] * N[Log[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - b), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision]
\begin{array}{l}

\\
\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 19 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 98.4% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (/ (* x (exp (- (+ (* y (log z)) (* (- t 1.0) (log a))) b))) y))
double code(double x, double y, double z, double t, double a, double b) {
	return (x * exp((((y * log(z)) + ((t - 1.0) * log(a))) - b))) / y;
}
real(8) function code(x, y, z, t, a, b)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    code = (x * exp((((y * log(z)) + ((t - 1.0d0) * log(a))) - b))) / y
end function
public static double code(double x, double y, double z, double t, double a, double b) {
	return (x * Math.exp((((y * Math.log(z)) + ((t - 1.0) * Math.log(a))) - b))) / y;
}
def code(x, y, z, t, a, b):
	return (x * math.exp((((y * math.log(z)) + ((t - 1.0) * math.log(a))) - b))) / y
function code(x, y, z, t, a, b)
	return Float64(Float64(x * exp(Float64(Float64(Float64(y * log(z)) + Float64(Float64(t - 1.0) * log(a))) - b))) / y)
end
function tmp = code(x, y, z, t, a, b)
	tmp = (x * exp((((y * log(z)) + ((t - 1.0) * log(a))) - b))) / y;
end
code[x_, y_, z_, t_, a_, b_] := N[(N[(x * N[Exp[N[(N[(N[(y * N[Log[z], $MachinePrecision]), $MachinePrecision] + N[(N[(t - 1.0), $MachinePrecision] * N[Log[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - b), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision]
\begin{array}{l}

\\
\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y}
\end{array}

Alternative 1: 98.4% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{x \cdot e^{\left(y \cdot \log z + \log a \cdot \left(t + -1\right)\right) - b}}{y} \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (/ (* x (exp (- (+ (* y (log z)) (* (log a) (+ t -1.0))) b))) y))
double code(double x, double y, double z, double t, double a, double b) {
	return (x * exp((((y * log(z)) + (log(a) * (t + -1.0))) - b))) / y;
}
real(8) function code(x, y, z, t, a, b)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    code = (x * exp((((y * log(z)) + (log(a) * (t + (-1.0d0)))) - b))) / y
end function
public static double code(double x, double y, double z, double t, double a, double b) {
	return (x * Math.exp((((y * Math.log(z)) + (Math.log(a) * (t + -1.0))) - b))) / y;
}
def code(x, y, z, t, a, b):
	return (x * math.exp((((y * math.log(z)) + (math.log(a) * (t + -1.0))) - b))) / y
function code(x, y, z, t, a, b)
	return Float64(Float64(x * exp(Float64(Float64(Float64(y * log(z)) + Float64(log(a) * Float64(t + -1.0))) - b))) / y)
end
function tmp = code(x, y, z, t, a, b)
	tmp = (x * exp((((y * log(z)) + (log(a) * (t + -1.0))) - b))) / y;
end
code[x_, y_, z_, t_, a_, b_] := N[(N[(x * N[Exp[N[(N[(N[(y * N[Log[z], $MachinePrecision]), $MachinePrecision] + N[(N[Log[a], $MachinePrecision] * N[(t + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - b), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision]
\begin{array}{l}

\\
\frac{x \cdot e^{\left(y \cdot \log z + \log a \cdot \left(t + -1\right)\right) - b}}{y}
\end{array}
Derivation
  1. Initial program 98.4%

    \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
  2. Add Preprocessing
  3. Final simplification98.4%

    \[\leadsto \frac{x \cdot e^{\left(y \cdot \log z + \log a \cdot \left(t + -1\right)\right) - b}}{y} \]
  4. Add Preprocessing

Alternative 2: 56.2% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := \frac{x \cdot e^{\left(y \cdot \log z + \log a \cdot \left(t + -1\right)\right) - b}}{y}\\ t_2 := x \cdot \frac{\mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.5, -1\right), 1\right)}{y}\\ \mathbf{if}\;t\_1 \leq -\infty:\\ \;\;\;\;t\_2\\ \mathbf{elif}\;t\_1 \leq 2 \cdot 10^{+300}:\\ \;\;\;\;\frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.16666666666666666, 0.5\right), 1\right), 1\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;t\_2\\ \end{array} \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (let* ((t_1 (/ (* x (exp (- (+ (* y (log z)) (* (log a) (+ t -1.0))) b))) y))
        (t_2 (* x (/ (fma b (fma b 0.5 -1.0) 1.0) y))))
   (if (<= t_1 (- INFINITY))
     t_2
     (if (<= t_1 2e+300)
       (/
        x
        (* a (* y (fma b (fma b (fma b 0.16666666666666666 0.5) 1.0) 1.0))))
       t_2))))
double code(double x, double y, double z, double t, double a, double b) {
	double t_1 = (x * exp((((y * log(z)) + (log(a) * (t + -1.0))) - b))) / y;
	double t_2 = x * (fma(b, fma(b, 0.5, -1.0), 1.0) / y);
	double tmp;
	if (t_1 <= -((double) INFINITY)) {
		tmp = t_2;
	} else if (t_1 <= 2e+300) {
		tmp = x / (a * (y * fma(b, fma(b, fma(b, 0.16666666666666666, 0.5), 1.0), 1.0)));
	} else {
		tmp = t_2;
	}
	return tmp;
}
function code(x, y, z, t, a, b)
	t_1 = Float64(Float64(x * exp(Float64(Float64(Float64(y * log(z)) + Float64(log(a) * Float64(t + -1.0))) - b))) / y)
	t_2 = Float64(x * Float64(fma(b, fma(b, 0.5, -1.0), 1.0) / y))
	tmp = 0.0
	if (t_1 <= Float64(-Inf))
		tmp = t_2;
	elseif (t_1 <= 2e+300)
		tmp = Float64(x / Float64(a * Float64(y * fma(b, fma(b, fma(b, 0.16666666666666666, 0.5), 1.0), 1.0))));
	else
		tmp = t_2;
	end
	return tmp
end
code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(N[(x * N[Exp[N[(N[(N[(y * N[Log[z], $MachinePrecision]), $MachinePrecision] + N[(N[Log[a], $MachinePrecision] * N[(t + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - b), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision]}, Block[{t$95$2 = N[(x * N[(N[(b * N[(b * 0.5 + -1.0), $MachinePrecision] + 1.0), $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, (-Infinity)], t$95$2, If[LessEqual[t$95$1, 2e+300], N[(x / N[(a * N[(y * N[(b * N[(b * N[(b * 0.16666666666666666 + 0.5), $MachinePrecision] + 1.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], t$95$2]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := \frac{x \cdot e^{\left(y \cdot \log z + \log a \cdot \left(t + -1\right)\right) - b}}{y}\\
t_2 := x \cdot \frac{\mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.5, -1\right), 1\right)}{y}\\
\mathbf{if}\;t\_1 \leq -\infty:\\
\;\;\;\;t\_2\\

\mathbf{elif}\;t\_1 \leq 2 \cdot 10^{+300}:\\
\;\;\;\;\frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.16666666666666666, 0.5\right), 1\right), 1\right)\right)}\\

\mathbf{else}:\\
\;\;\;\;t\_2\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (/.f64 (*.f64 x (exp.f64 (-.f64 (+.f64 (*.f64 y (log.f64 z)) (*.f64 (-.f64 t #s(literal 1 binary64)) (log.f64 a))) b))) y) < -inf.0 or 2.0000000000000001e300 < (/.f64 (*.f64 x (exp.f64 (-.f64 (+.f64 (*.f64 y (log.f64 z)) (*.f64 (-.f64 t #s(literal 1 binary64)) (log.f64 a))) b))) y)

    1. Initial program 100.0%

      \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
    2. Add Preprocessing
    3. Taylor expanded in y around inf

      \[\leadsto \frac{x \cdot e^{\color{blue}{y \cdot \log z} - b}}{y} \]
    4. Step-by-step derivation
      1. +-rgt-identityN/A

        \[\leadsto \frac{x \cdot e^{\color{blue}{\left(y \cdot \log z + 0\right)} - b}}{y} \]
      2. accelerator-lowering-fma.f64N/A

        \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
      3. log-lowering-log.f6481.1

        \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(y, \color{blue}{\log z}, 0\right) - b}}{y} \]
    5. Simplified81.1%

      \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
    6. Taylor expanded in y around 0

      \[\leadsto \frac{\color{blue}{x \cdot e^{\mathsf{neg}\left(b\right)}}}{y} \]
    7. Step-by-step derivation
      1. *-lowering-*.f64N/A

        \[\leadsto \frac{\color{blue}{x \cdot e^{\mathsf{neg}\left(b\right)}}}{y} \]
      2. exp-lowering-exp.f64N/A

        \[\leadsto \frac{x \cdot \color{blue}{e^{\mathsf{neg}\left(b\right)}}}{y} \]
      3. neg-sub0N/A

        \[\leadsto \frac{x \cdot e^{\color{blue}{0 - b}}}{y} \]
      4. --lowering--.f6440.4

        \[\leadsto \frac{x \cdot e^{\color{blue}{0 - b}}}{y} \]
    8. Simplified40.4%

      \[\leadsto \frac{\color{blue}{x \cdot e^{0 - b}}}{y} \]
    9. Taylor expanded in b around 0

      \[\leadsto \color{blue}{b \cdot \left(-1 \cdot \frac{x}{y} + \frac{1}{2} \cdot \frac{b \cdot x}{y}\right) + \frac{x}{y}} \]
    10. Simplified32.9%

      \[\leadsto \color{blue}{\frac{x}{y} \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(0.5, b, -1\right), 1\right)} \]
    11. Step-by-step derivation
      1. associate-*l/N/A

        \[\leadsto \color{blue}{\frac{x \cdot \left(b \cdot \left(\frac{1}{2} \cdot b + -1\right) + 1\right)}{y}} \]
      2. associate-/l*N/A

        \[\leadsto \color{blue}{x \cdot \frac{b \cdot \left(\frac{1}{2} \cdot b + -1\right) + 1}{y}} \]
      3. *-commutativeN/A

        \[\leadsto \color{blue}{\frac{b \cdot \left(\frac{1}{2} \cdot b + -1\right) + 1}{y} \cdot x} \]
      4. *-lowering-*.f64N/A

        \[\leadsto \color{blue}{\frac{b \cdot \left(\frac{1}{2} \cdot b + -1\right) + 1}{y} \cdot x} \]
      5. /-lowering-/.f64N/A

        \[\leadsto \color{blue}{\frac{b \cdot \left(\frac{1}{2} \cdot b + -1\right) + 1}{y}} \cdot x \]
      6. accelerator-lowering-fma.f64N/A

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(b, \frac{1}{2} \cdot b + -1, 1\right)}}{y} \cdot x \]
      7. *-commutativeN/A

        \[\leadsto \frac{\mathsf{fma}\left(b, \color{blue}{b \cdot \frac{1}{2}} + -1, 1\right)}{y} \cdot x \]
      8. accelerator-lowering-fma.f6441.4

        \[\leadsto \frac{\mathsf{fma}\left(b, \color{blue}{\mathsf{fma}\left(b, 0.5, -1\right)}, 1\right)}{y} \cdot x \]
    12. Applied egg-rr41.4%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.5, -1\right), 1\right)}{y} \cdot x} \]

    if -inf.0 < (/.f64 (*.f64 x (exp.f64 (-.f64 (+.f64 (*.f64 y (log.f64 z)) (*.f64 (-.f64 t #s(literal 1 binary64)) (log.f64 a))) b))) y) < 2.0000000000000001e300

    1. Initial program 96.9%

      \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0

      \[\leadsto \color{blue}{\frac{x \cdot e^{\log a \cdot \left(t - 1\right) - b}}{y}} \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right) - b} \cdot x}}{y} \]
      2. exp-diffN/A

        \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)}}{e^{b}}} \cdot x}{y} \]
      3. associate-*l/N/A

        \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{e^{b}}}}{y} \]
      4. associate-/l/N/A

        \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
      5. /-lowering-/.f64N/A

        \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
      6. *-lowering-*.f64N/A

        \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right)} \cdot x}}{y \cdot e^{b}} \]
      7. exp-prodN/A

        \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
      8. pow-lowering-pow.f64N/A

        \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
      9. rem-exp-logN/A

        \[\leadsto \frac{{\color{blue}{a}}^{\left(t - 1\right)} \cdot x}{y \cdot e^{b}} \]
      10. sub-negN/A

        \[\leadsto \frac{{a}^{\color{blue}{\left(t + \left(\mathsf{neg}\left(1\right)\right)\right)}} \cdot x}{y \cdot e^{b}} \]
      11. metadata-evalN/A

        \[\leadsto \frac{{a}^{\left(t + \color{blue}{-1}\right)} \cdot x}{y \cdot e^{b}} \]
      12. +-lowering-+.f64N/A

        \[\leadsto \frac{{a}^{\color{blue}{\left(t + -1\right)}} \cdot x}{y \cdot e^{b}} \]
      13. *-lowering-*.f64N/A

        \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y \cdot e^{b}}} \]
      14. exp-lowering-exp.f6466.9

        \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot \color{blue}{e^{b}}} \]
    5. Simplified66.9%

      \[\leadsto \color{blue}{\frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot e^{b}}} \]
    6. Taylor expanded in t around 0

      \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
    7. Step-by-step derivation
      1. /-lowering-/.f64N/A

        \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
      2. *-lowering-*.f64N/A

        \[\leadsto \frac{x}{\color{blue}{a \cdot \left(y \cdot e^{b}\right)}} \]
      3. *-lowering-*.f64N/A

        \[\leadsto \frac{x}{a \cdot \color{blue}{\left(y \cdot e^{b}\right)}} \]
      4. exp-lowering-exp.f6464.5

        \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{e^{b}}\right)} \]
    8. Simplified64.5%

      \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
    9. Taylor expanded in b around 0

      \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{\left(1 + b \cdot \left(1 + b \cdot \left(\frac{1}{2} + \frac{1}{6} \cdot b\right)\right)\right)}\right)} \]
    10. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{\left(b \cdot \left(1 + b \cdot \left(\frac{1}{2} + \frac{1}{6} \cdot b\right)\right) + 1\right)}\right)} \]
      2. accelerator-lowering-fma.f64N/A

        \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{\mathsf{fma}\left(b, 1 + b \cdot \left(\frac{1}{2} + \frac{1}{6} \cdot b\right), 1\right)}\right)} \]
      3. +-commutativeN/A

        \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \color{blue}{b \cdot \left(\frac{1}{2} + \frac{1}{6} \cdot b\right) + 1}, 1\right)\right)} \]
      4. accelerator-lowering-fma.f64N/A

        \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \color{blue}{\mathsf{fma}\left(b, \frac{1}{2} + \frac{1}{6} \cdot b, 1\right)}, 1\right)\right)} \]
      5. +-commutativeN/A

        \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \color{blue}{\frac{1}{6} \cdot b + \frac{1}{2}}, 1\right), 1\right)\right)} \]
      6. *-commutativeN/A

        \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \color{blue}{b \cdot \frac{1}{6}} + \frac{1}{2}, 1\right), 1\right)\right)} \]
      7. accelerator-lowering-fma.f6471.3

        \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \color{blue}{\mathsf{fma}\left(b, 0.16666666666666666, 0.5\right)}, 1\right), 1\right)\right)} \]
    11. Simplified71.3%

      \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{\mathsf{fma}\left(b, \mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.16666666666666666, 0.5\right), 1\right), 1\right)}\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification57.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\frac{x \cdot e^{\left(y \cdot \log z + \log a \cdot \left(t + -1\right)\right) - b}}{y} \leq -\infty:\\ \;\;\;\;x \cdot \frac{\mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.5, -1\right), 1\right)}{y}\\ \mathbf{elif}\;\frac{x \cdot e^{\left(y \cdot \log z + \log a \cdot \left(t + -1\right)\right) - b}}{y} \leq 2 \cdot 10^{+300}:\\ \;\;\;\;\frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.16666666666666666, 0.5\right), 1\right), 1\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;x \cdot \frac{\mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.5, -1\right), 1\right)}{y}\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 53.9% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := \frac{x \cdot e^{\left(y \cdot \log z + \log a \cdot \left(t + -1\right)\right) - b}}{y}\\ t_2 := x \cdot \frac{\mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.5, -1\right), 1\right)}{y}\\ \mathbf{if}\;t\_1 \leq -\infty:\\ \;\;\;\;t\_2\\ \mathbf{elif}\;t\_1 \leq 2 \cdot 10^{+300}:\\ \;\;\;\;\frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.5, 1\right), 1\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;t\_2\\ \end{array} \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (let* ((t_1 (/ (* x (exp (- (+ (* y (log z)) (* (log a) (+ t -1.0))) b))) y))
        (t_2 (* x (/ (fma b (fma b 0.5 -1.0) 1.0) y))))
   (if (<= t_1 (- INFINITY))
     t_2
     (if (<= t_1 2e+300) (/ x (* a (* y (fma b (fma b 0.5 1.0) 1.0)))) t_2))))
double code(double x, double y, double z, double t, double a, double b) {
	double t_1 = (x * exp((((y * log(z)) + (log(a) * (t + -1.0))) - b))) / y;
	double t_2 = x * (fma(b, fma(b, 0.5, -1.0), 1.0) / y);
	double tmp;
	if (t_1 <= -((double) INFINITY)) {
		tmp = t_2;
	} else if (t_1 <= 2e+300) {
		tmp = x / (a * (y * fma(b, fma(b, 0.5, 1.0), 1.0)));
	} else {
		tmp = t_2;
	}
	return tmp;
}
function code(x, y, z, t, a, b)
	t_1 = Float64(Float64(x * exp(Float64(Float64(Float64(y * log(z)) + Float64(log(a) * Float64(t + -1.0))) - b))) / y)
	t_2 = Float64(x * Float64(fma(b, fma(b, 0.5, -1.0), 1.0) / y))
	tmp = 0.0
	if (t_1 <= Float64(-Inf))
		tmp = t_2;
	elseif (t_1 <= 2e+300)
		tmp = Float64(x / Float64(a * Float64(y * fma(b, fma(b, 0.5, 1.0), 1.0))));
	else
		tmp = t_2;
	end
	return tmp
end
code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(N[(x * N[Exp[N[(N[(N[(y * N[Log[z], $MachinePrecision]), $MachinePrecision] + N[(N[Log[a], $MachinePrecision] * N[(t + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - b), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision]}, Block[{t$95$2 = N[(x * N[(N[(b * N[(b * 0.5 + -1.0), $MachinePrecision] + 1.0), $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, (-Infinity)], t$95$2, If[LessEqual[t$95$1, 2e+300], N[(x / N[(a * N[(y * N[(b * N[(b * 0.5 + 1.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], t$95$2]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := \frac{x \cdot e^{\left(y \cdot \log z + \log a \cdot \left(t + -1\right)\right) - b}}{y}\\
t_2 := x \cdot \frac{\mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.5, -1\right), 1\right)}{y}\\
\mathbf{if}\;t\_1 \leq -\infty:\\
\;\;\;\;t\_2\\

\mathbf{elif}\;t\_1 \leq 2 \cdot 10^{+300}:\\
\;\;\;\;\frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.5, 1\right), 1\right)\right)}\\

\mathbf{else}:\\
\;\;\;\;t\_2\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (/.f64 (*.f64 x (exp.f64 (-.f64 (+.f64 (*.f64 y (log.f64 z)) (*.f64 (-.f64 t #s(literal 1 binary64)) (log.f64 a))) b))) y) < -inf.0 or 2.0000000000000001e300 < (/.f64 (*.f64 x (exp.f64 (-.f64 (+.f64 (*.f64 y (log.f64 z)) (*.f64 (-.f64 t #s(literal 1 binary64)) (log.f64 a))) b))) y)

    1. Initial program 100.0%

      \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
    2. Add Preprocessing
    3. Taylor expanded in y around inf

      \[\leadsto \frac{x \cdot e^{\color{blue}{y \cdot \log z} - b}}{y} \]
    4. Step-by-step derivation
      1. +-rgt-identityN/A

        \[\leadsto \frac{x \cdot e^{\color{blue}{\left(y \cdot \log z + 0\right)} - b}}{y} \]
      2. accelerator-lowering-fma.f64N/A

        \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
      3. log-lowering-log.f6481.1

        \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(y, \color{blue}{\log z}, 0\right) - b}}{y} \]
    5. Simplified81.1%

      \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
    6. Taylor expanded in y around 0

      \[\leadsto \frac{\color{blue}{x \cdot e^{\mathsf{neg}\left(b\right)}}}{y} \]
    7. Step-by-step derivation
      1. *-lowering-*.f64N/A

        \[\leadsto \frac{\color{blue}{x \cdot e^{\mathsf{neg}\left(b\right)}}}{y} \]
      2. exp-lowering-exp.f64N/A

        \[\leadsto \frac{x \cdot \color{blue}{e^{\mathsf{neg}\left(b\right)}}}{y} \]
      3. neg-sub0N/A

        \[\leadsto \frac{x \cdot e^{\color{blue}{0 - b}}}{y} \]
      4. --lowering--.f6440.4

        \[\leadsto \frac{x \cdot e^{\color{blue}{0 - b}}}{y} \]
    8. Simplified40.4%

      \[\leadsto \frac{\color{blue}{x \cdot e^{0 - b}}}{y} \]
    9. Taylor expanded in b around 0

      \[\leadsto \color{blue}{b \cdot \left(-1 \cdot \frac{x}{y} + \frac{1}{2} \cdot \frac{b \cdot x}{y}\right) + \frac{x}{y}} \]
    10. Simplified32.9%

      \[\leadsto \color{blue}{\frac{x}{y} \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(0.5, b, -1\right), 1\right)} \]
    11. Step-by-step derivation
      1. associate-*l/N/A

        \[\leadsto \color{blue}{\frac{x \cdot \left(b \cdot \left(\frac{1}{2} \cdot b + -1\right) + 1\right)}{y}} \]
      2. associate-/l*N/A

        \[\leadsto \color{blue}{x \cdot \frac{b \cdot \left(\frac{1}{2} \cdot b + -1\right) + 1}{y}} \]
      3. *-commutativeN/A

        \[\leadsto \color{blue}{\frac{b \cdot \left(\frac{1}{2} \cdot b + -1\right) + 1}{y} \cdot x} \]
      4. *-lowering-*.f64N/A

        \[\leadsto \color{blue}{\frac{b \cdot \left(\frac{1}{2} \cdot b + -1\right) + 1}{y} \cdot x} \]
      5. /-lowering-/.f64N/A

        \[\leadsto \color{blue}{\frac{b \cdot \left(\frac{1}{2} \cdot b + -1\right) + 1}{y}} \cdot x \]
      6. accelerator-lowering-fma.f64N/A

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(b, \frac{1}{2} \cdot b + -1, 1\right)}}{y} \cdot x \]
      7. *-commutativeN/A

        \[\leadsto \frac{\mathsf{fma}\left(b, \color{blue}{b \cdot \frac{1}{2}} + -1, 1\right)}{y} \cdot x \]
      8. accelerator-lowering-fma.f6441.4

        \[\leadsto \frac{\mathsf{fma}\left(b, \color{blue}{\mathsf{fma}\left(b, 0.5, -1\right)}, 1\right)}{y} \cdot x \]
    12. Applied egg-rr41.4%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.5, -1\right), 1\right)}{y} \cdot x} \]

    if -inf.0 < (/.f64 (*.f64 x (exp.f64 (-.f64 (+.f64 (*.f64 y (log.f64 z)) (*.f64 (-.f64 t #s(literal 1 binary64)) (log.f64 a))) b))) y) < 2.0000000000000001e300

    1. Initial program 96.9%

      \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0

      \[\leadsto \color{blue}{\frac{x \cdot e^{\log a \cdot \left(t - 1\right) - b}}{y}} \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right) - b} \cdot x}}{y} \]
      2. exp-diffN/A

        \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)}}{e^{b}}} \cdot x}{y} \]
      3. associate-*l/N/A

        \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{e^{b}}}}{y} \]
      4. associate-/l/N/A

        \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
      5. /-lowering-/.f64N/A

        \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
      6. *-lowering-*.f64N/A

        \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right)} \cdot x}}{y \cdot e^{b}} \]
      7. exp-prodN/A

        \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
      8. pow-lowering-pow.f64N/A

        \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
      9. rem-exp-logN/A

        \[\leadsto \frac{{\color{blue}{a}}^{\left(t - 1\right)} \cdot x}{y \cdot e^{b}} \]
      10. sub-negN/A

        \[\leadsto \frac{{a}^{\color{blue}{\left(t + \left(\mathsf{neg}\left(1\right)\right)\right)}} \cdot x}{y \cdot e^{b}} \]
      11. metadata-evalN/A

        \[\leadsto \frac{{a}^{\left(t + \color{blue}{-1}\right)} \cdot x}{y \cdot e^{b}} \]
      12. +-lowering-+.f64N/A

        \[\leadsto \frac{{a}^{\color{blue}{\left(t + -1\right)}} \cdot x}{y \cdot e^{b}} \]
      13. *-lowering-*.f64N/A

        \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y \cdot e^{b}}} \]
      14. exp-lowering-exp.f6466.9

        \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot \color{blue}{e^{b}}} \]
    5. Simplified66.9%

      \[\leadsto \color{blue}{\frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot e^{b}}} \]
    6. Taylor expanded in t around 0

      \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
    7. Step-by-step derivation
      1. /-lowering-/.f64N/A

        \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
      2. *-lowering-*.f64N/A

        \[\leadsto \frac{x}{\color{blue}{a \cdot \left(y \cdot e^{b}\right)}} \]
      3. *-lowering-*.f64N/A

        \[\leadsto \frac{x}{a \cdot \color{blue}{\left(y \cdot e^{b}\right)}} \]
      4. exp-lowering-exp.f6464.5

        \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{e^{b}}\right)} \]
    8. Simplified64.5%

      \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
    9. Taylor expanded in b around 0

      \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{\left(1 + b \cdot \left(1 + \frac{1}{2} \cdot b\right)\right)}\right)} \]
    10. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{\left(b \cdot \left(1 + \frac{1}{2} \cdot b\right) + 1\right)}\right)} \]
      2. accelerator-lowering-fma.f64N/A

        \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{\mathsf{fma}\left(b, 1 + \frac{1}{2} \cdot b, 1\right)}\right)} \]
      3. +-commutativeN/A

        \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \color{blue}{\frac{1}{2} \cdot b + 1}, 1\right)\right)} \]
      4. *-commutativeN/A

        \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \color{blue}{b \cdot \frac{1}{2}} + 1, 1\right)\right)} \]
      5. accelerator-lowering-fma.f6464.9

        \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \color{blue}{\mathsf{fma}\left(b, 0.5, 1\right)}, 1\right)\right)} \]
    11. Simplified64.9%

      \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{\mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.5, 1\right), 1\right)}\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification53.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\frac{x \cdot e^{\left(y \cdot \log z + \log a \cdot \left(t + -1\right)\right) - b}}{y} \leq -\infty:\\ \;\;\;\;x \cdot \frac{\mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.5, -1\right), 1\right)}{y}\\ \mathbf{elif}\;\frac{x \cdot e^{\left(y \cdot \log z + \log a \cdot \left(t + -1\right)\right) - b}}{y} \leq 2 \cdot 10^{+300}:\\ \;\;\;\;\frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.5, 1\right), 1\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;x \cdot \frac{\mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.5, -1\right), 1\right)}{y}\\ \end{array} \]
  5. Add Preprocessing

Alternative 4: 38.7% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := \frac{x \cdot e^{\left(y \cdot \log z + \log a \cdot \left(t + -1\right)\right) - b}}{y}\\ \mathbf{if}\;t\_1 \leq -\infty:\\ \;\;\;\;x \cdot \frac{1 - b}{y}\\ \mathbf{elif}\;t\_1 \leq 10^{+203}:\\ \;\;\;\;\frac{x}{a \cdot \mathsf{fma}\left(y, b, y\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{x}{a}}{y}\\ \end{array} \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (let* ((t_1 (/ (* x (exp (- (+ (* y (log z)) (* (log a) (+ t -1.0))) b))) y)))
   (if (<= t_1 (- INFINITY))
     (* x (/ (- 1.0 b) y))
     (if (<= t_1 1e+203) (/ x (* a (fma y b y))) (/ (/ x a) y)))))
double code(double x, double y, double z, double t, double a, double b) {
	double t_1 = (x * exp((((y * log(z)) + (log(a) * (t + -1.0))) - b))) / y;
	double tmp;
	if (t_1 <= -((double) INFINITY)) {
		tmp = x * ((1.0 - b) / y);
	} else if (t_1 <= 1e+203) {
		tmp = x / (a * fma(y, b, y));
	} else {
		tmp = (x / a) / y;
	}
	return tmp;
}
function code(x, y, z, t, a, b)
	t_1 = Float64(Float64(x * exp(Float64(Float64(Float64(y * log(z)) + Float64(log(a) * Float64(t + -1.0))) - b))) / y)
	tmp = 0.0
	if (t_1 <= Float64(-Inf))
		tmp = Float64(x * Float64(Float64(1.0 - b) / y));
	elseif (t_1 <= 1e+203)
		tmp = Float64(x / Float64(a * fma(y, b, y)));
	else
		tmp = Float64(Float64(x / a) / y);
	end
	return tmp
end
code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(N[(x * N[Exp[N[(N[(N[(y * N[Log[z], $MachinePrecision]), $MachinePrecision] + N[(N[Log[a], $MachinePrecision] * N[(t + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - b), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision]}, If[LessEqual[t$95$1, (-Infinity)], N[(x * N[(N[(1.0 - b), $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision], If[LessEqual[t$95$1, 1e+203], N[(x / N[(a * N[(y * b + y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(x / a), $MachinePrecision] / y), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := \frac{x \cdot e^{\left(y \cdot \log z + \log a \cdot \left(t + -1\right)\right) - b}}{y}\\
\mathbf{if}\;t\_1 \leq -\infty:\\
\;\;\;\;x \cdot \frac{1 - b}{y}\\

\mathbf{elif}\;t\_1 \leq 10^{+203}:\\
\;\;\;\;\frac{x}{a \cdot \mathsf{fma}\left(y, b, y\right)}\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{x}{a}}{y}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if (/.f64 (*.f64 x (exp.f64 (-.f64 (+.f64 (*.f64 y (log.f64 z)) (*.f64 (-.f64 t #s(literal 1 binary64)) (log.f64 a))) b))) y) < -inf.0

    1. Initial program 100.0%

      \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
    2. Add Preprocessing
    3. Taylor expanded in y around inf

      \[\leadsto \frac{x \cdot e^{\color{blue}{y \cdot \log z} - b}}{y} \]
    4. Step-by-step derivation
      1. +-rgt-identityN/A

        \[\leadsto \frac{x \cdot e^{\color{blue}{\left(y \cdot \log z + 0\right)} - b}}{y} \]
      2. accelerator-lowering-fma.f64N/A

        \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
      3. log-lowering-log.f6481.3

        \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(y, \color{blue}{\log z}, 0\right) - b}}{y} \]
    5. Simplified81.3%

      \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
    6. Taylor expanded in y around 0

      \[\leadsto \frac{\color{blue}{x \cdot e^{\mathsf{neg}\left(b\right)}}}{y} \]
    7. Step-by-step derivation
      1. *-lowering-*.f64N/A

        \[\leadsto \frac{\color{blue}{x \cdot e^{\mathsf{neg}\left(b\right)}}}{y} \]
      2. exp-lowering-exp.f64N/A

        \[\leadsto \frac{x \cdot \color{blue}{e^{\mathsf{neg}\left(b\right)}}}{y} \]
      3. neg-sub0N/A

        \[\leadsto \frac{x \cdot e^{\color{blue}{0 - b}}}{y} \]
      4. --lowering--.f6443.6

        \[\leadsto \frac{x \cdot e^{\color{blue}{0 - b}}}{y} \]
    8. Simplified43.6%

      \[\leadsto \frac{\color{blue}{x \cdot e^{0 - b}}}{y} \]
    9. Taylor expanded in b around 0

      \[\leadsto \color{blue}{-1 \cdot \frac{b \cdot x}{y} + \frac{x}{y}} \]
    10. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \color{blue}{\frac{x}{y} + -1 \cdot \frac{b \cdot x}{y}} \]
      2. mul-1-negN/A

        \[\leadsto \frac{x}{y} + \color{blue}{\left(\mathsf{neg}\left(\frac{b \cdot x}{y}\right)\right)} \]
      3. unsub-negN/A

        \[\leadsto \color{blue}{\frac{x}{y} - \frac{b \cdot x}{y}} \]
      4. div-subN/A

        \[\leadsto \color{blue}{\frac{x - b \cdot x}{y}} \]
      5. unsub-negN/A

        \[\leadsto \frac{\color{blue}{x + \left(\mathsf{neg}\left(b \cdot x\right)\right)}}{y} \]
      6. mul-1-negN/A

        \[\leadsto \frac{x + \color{blue}{-1 \cdot \left(b \cdot x\right)}}{y} \]
      7. /-lowering-/.f64N/A

        \[\leadsto \color{blue}{\frac{x + -1 \cdot \left(b \cdot x\right)}{y}} \]
      8. *-lft-identityN/A

        \[\leadsto \frac{\color{blue}{1 \cdot x} + -1 \cdot \left(b \cdot x\right)}{y} \]
      9. associate-*r*N/A

        \[\leadsto \frac{1 \cdot x + \color{blue}{\left(-1 \cdot b\right) \cdot x}}{y} \]
      10. distribute-rgt-outN/A

        \[\leadsto \frac{\color{blue}{x \cdot \left(1 + -1 \cdot b\right)}}{y} \]
      11. *-lowering-*.f64N/A

        \[\leadsto \frac{\color{blue}{x \cdot \left(1 + -1 \cdot b\right)}}{y} \]
      12. neg-mul-1N/A

        \[\leadsto \frac{x \cdot \left(1 + \color{blue}{\left(\mathsf{neg}\left(b\right)\right)}\right)}{y} \]
      13. unsub-negN/A

        \[\leadsto \frac{x \cdot \color{blue}{\left(1 - b\right)}}{y} \]
      14. --lowering--.f6416.7

        \[\leadsto \frac{x \cdot \color{blue}{\left(1 - b\right)}}{y} \]
    11. Simplified16.7%

      \[\leadsto \color{blue}{\frac{x \cdot \left(1 - b\right)}{y}} \]
    12. Step-by-step derivation
      1. associate-/l*N/A

        \[\leadsto \color{blue}{x \cdot \frac{1 - b}{y}} \]
      2. *-commutativeN/A

        \[\leadsto \color{blue}{\frac{1 - b}{y} \cdot x} \]
      3. *-lowering-*.f64N/A

        \[\leadsto \color{blue}{\frac{1 - b}{y} \cdot x} \]
      4. /-lowering-/.f64N/A

        \[\leadsto \color{blue}{\frac{1 - b}{y}} \cdot x \]
      5. --lowering--.f6421.8

        \[\leadsto \frac{\color{blue}{1 - b}}{y} \cdot x \]
    13. Applied egg-rr21.8%

      \[\leadsto \color{blue}{\frac{1 - b}{y} \cdot x} \]

    if -inf.0 < (/.f64 (*.f64 x (exp.f64 (-.f64 (+.f64 (*.f64 y (log.f64 z)) (*.f64 (-.f64 t #s(literal 1 binary64)) (log.f64 a))) b))) y) < 9.9999999999999999e202

    1. Initial program 97.0%

      \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0

      \[\leadsto \color{blue}{\frac{x \cdot e^{\log a \cdot \left(t - 1\right) - b}}{y}} \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right) - b} \cdot x}}{y} \]
      2. exp-diffN/A

        \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)}}{e^{b}}} \cdot x}{y} \]
      3. associate-*l/N/A

        \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{e^{b}}}}{y} \]
      4. associate-/l/N/A

        \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
      5. /-lowering-/.f64N/A

        \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
      6. *-lowering-*.f64N/A

        \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right)} \cdot x}}{y \cdot e^{b}} \]
      7. exp-prodN/A

        \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
      8. pow-lowering-pow.f64N/A

        \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
      9. rem-exp-logN/A

        \[\leadsto \frac{{\color{blue}{a}}^{\left(t - 1\right)} \cdot x}{y \cdot e^{b}} \]
      10. sub-negN/A

        \[\leadsto \frac{{a}^{\color{blue}{\left(t + \left(\mathsf{neg}\left(1\right)\right)\right)}} \cdot x}{y \cdot e^{b}} \]
      11. metadata-evalN/A

        \[\leadsto \frac{{a}^{\left(t + \color{blue}{-1}\right)} \cdot x}{y \cdot e^{b}} \]
      12. +-lowering-+.f64N/A

        \[\leadsto \frac{{a}^{\color{blue}{\left(t + -1\right)}} \cdot x}{y \cdot e^{b}} \]
      13. *-lowering-*.f64N/A

        \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y \cdot e^{b}}} \]
      14. exp-lowering-exp.f6466.1

        \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot \color{blue}{e^{b}}} \]
    5. Simplified66.1%

      \[\leadsto \color{blue}{\frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot e^{b}}} \]
    6. Taylor expanded in t around 0

      \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
    7. Step-by-step derivation
      1. /-lowering-/.f64N/A

        \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
      2. *-lowering-*.f64N/A

        \[\leadsto \frac{x}{\color{blue}{a \cdot \left(y \cdot e^{b}\right)}} \]
      3. *-lowering-*.f64N/A

        \[\leadsto \frac{x}{a \cdot \color{blue}{\left(y \cdot e^{b}\right)}} \]
      4. exp-lowering-exp.f6464.4

        \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{e^{b}}\right)} \]
    8. Simplified64.4%

      \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
    9. Taylor expanded in b around 0

      \[\leadsto \frac{x}{\color{blue}{a \cdot y + a \cdot \left(b \cdot y\right)}} \]
    10. Step-by-step derivation
      1. distribute-lft-outN/A

        \[\leadsto \frac{x}{\color{blue}{a \cdot \left(y + b \cdot y\right)}} \]
      2. *-lowering-*.f64N/A

        \[\leadsto \frac{x}{\color{blue}{a \cdot \left(y + b \cdot y\right)}} \]
      3. +-commutativeN/A

        \[\leadsto \frac{x}{a \cdot \color{blue}{\left(b \cdot y + y\right)}} \]
      4. *-commutativeN/A

        \[\leadsto \frac{x}{a \cdot \left(\color{blue}{y \cdot b} + y\right)} \]
      5. accelerator-lowering-fma.f6448.3

        \[\leadsto \frac{x}{a \cdot \color{blue}{\mathsf{fma}\left(y, b, y\right)}} \]
    11. Simplified48.3%

      \[\leadsto \frac{x}{\color{blue}{a \cdot \mathsf{fma}\left(y, b, y\right)}} \]

    if 9.9999999999999999e202 < (/.f64 (*.f64 x (exp.f64 (-.f64 (+.f64 (*.f64 y (log.f64 z)) (*.f64 (-.f64 t #s(literal 1 binary64)) (log.f64 a))) b))) y)

    1. Initial program 99.6%

      \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
    2. Add Preprocessing
    3. Taylor expanded in y around 0

      \[\leadsto \color{blue}{\frac{x \cdot e^{\log a \cdot \left(t - 1\right) - b}}{y}} \]
    4. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right) - b} \cdot x}}{y} \]
      2. exp-diffN/A

        \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)}}{e^{b}}} \cdot x}{y} \]
      3. associate-*l/N/A

        \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{e^{b}}}}{y} \]
      4. associate-/l/N/A

        \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
      5. /-lowering-/.f64N/A

        \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
      6. *-lowering-*.f64N/A

        \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right)} \cdot x}}{y \cdot e^{b}} \]
      7. exp-prodN/A

        \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
      8. pow-lowering-pow.f64N/A

        \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
      9. rem-exp-logN/A

        \[\leadsto \frac{{\color{blue}{a}}^{\left(t - 1\right)} \cdot x}{y \cdot e^{b}} \]
      10. sub-negN/A

        \[\leadsto \frac{{a}^{\color{blue}{\left(t + \left(\mathsf{neg}\left(1\right)\right)\right)}} \cdot x}{y \cdot e^{b}} \]
      11. metadata-evalN/A

        \[\leadsto \frac{{a}^{\left(t + \color{blue}{-1}\right)} \cdot x}{y \cdot e^{b}} \]
      12. +-lowering-+.f64N/A

        \[\leadsto \frac{{a}^{\color{blue}{\left(t + -1\right)}} \cdot x}{y \cdot e^{b}} \]
      13. *-lowering-*.f64N/A

        \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y \cdot e^{b}}} \]
      14. exp-lowering-exp.f6468.1

        \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot \color{blue}{e^{b}}} \]
    5. Simplified68.1%

      \[\leadsto \color{blue}{\frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot e^{b}}} \]
    6. Taylor expanded in b around 0

      \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y}} \]
    7. Step-by-step derivation
      1. Simplified61.2%

        \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y}} \]
      2. Taylor expanded in t around 0

        \[\leadsto \frac{\color{blue}{\frac{x}{a}}}{y} \]
      3. Step-by-step derivation
        1. /-lowering-/.f6429.8

          \[\leadsto \frac{\color{blue}{\frac{x}{a}}}{y} \]
      4. Simplified29.8%

        \[\leadsto \frac{\color{blue}{\frac{x}{a}}}{y} \]
    8. Recombined 3 regimes into one program.
    9. Final simplification37.4%

      \[\leadsto \begin{array}{l} \mathbf{if}\;\frac{x \cdot e^{\left(y \cdot \log z + \log a \cdot \left(t + -1\right)\right) - b}}{y} \leq -\infty:\\ \;\;\;\;x \cdot \frac{1 - b}{y}\\ \mathbf{elif}\;\frac{x \cdot e^{\left(y \cdot \log z + \log a \cdot \left(t + -1\right)\right) - b}}{y} \leq 10^{+203}:\\ \;\;\;\;\frac{x}{a \cdot \mathsf{fma}\left(y, b, y\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{x}{a}}{y}\\ \end{array} \]
    10. Add Preprocessing

    Alternative 5: 79.4% accurate, 0.7× speedup?

    \[\begin{array}{l} \\ \begin{array}{l} t_1 := \log a \cdot \left(t + -1\right)\\ t_2 := \frac{x \cdot {a}^{t}}{y}\\ \mathbf{if}\;t\_1 \leq -5 \cdot 10^{+145}:\\ \;\;\;\;t\_2\\ \mathbf{elif}\;t\_1 \leq 5 \cdot 10^{+130}:\\ \;\;\;\;\frac{x \cdot e^{\mathsf{fma}\left(y, \log z, 0\right) - b}}{y}\\ \mathbf{else}:\\ \;\;\;\;t\_2\\ \end{array} \end{array} \]
    (FPCore (x y z t a b)
     :precision binary64
     (let* ((t_1 (* (log a) (+ t -1.0))) (t_2 (/ (* x (pow a t)) y)))
       (if (<= t_1 -5e+145)
         t_2
         (if (<= t_1 5e+130) (/ (* x (exp (- (fma y (log z) 0.0) b))) y) t_2))))
    double code(double x, double y, double z, double t, double a, double b) {
    	double t_1 = log(a) * (t + -1.0);
    	double t_2 = (x * pow(a, t)) / y;
    	double tmp;
    	if (t_1 <= -5e+145) {
    		tmp = t_2;
    	} else if (t_1 <= 5e+130) {
    		tmp = (x * exp((fma(y, log(z), 0.0) - b))) / y;
    	} else {
    		tmp = t_2;
    	}
    	return tmp;
    }
    
    function code(x, y, z, t, a, b)
    	t_1 = Float64(log(a) * Float64(t + -1.0))
    	t_2 = Float64(Float64(x * (a ^ t)) / y)
    	tmp = 0.0
    	if (t_1 <= -5e+145)
    		tmp = t_2;
    	elseif (t_1 <= 5e+130)
    		tmp = Float64(Float64(x * exp(Float64(fma(y, log(z), 0.0) - b))) / y);
    	else
    		tmp = t_2;
    	end
    	return tmp
    end
    
    code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(N[Log[a], $MachinePrecision] * N[(t + -1.0), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(N[(x * N[Power[a, t], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision]}, If[LessEqual[t$95$1, -5e+145], t$95$2, If[LessEqual[t$95$1, 5e+130], N[(N[(x * N[Exp[N[(N[(y * N[Log[z], $MachinePrecision] + 0.0), $MachinePrecision] - b), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision], t$95$2]]]]
    
    \begin{array}{l}
    
    \\
    \begin{array}{l}
    t_1 := \log a \cdot \left(t + -1\right)\\
    t_2 := \frac{x \cdot {a}^{t}}{y}\\
    \mathbf{if}\;t\_1 \leq -5 \cdot 10^{+145}:\\
    \;\;\;\;t\_2\\
    
    \mathbf{elif}\;t\_1 \leq 5 \cdot 10^{+130}:\\
    \;\;\;\;\frac{x \cdot e^{\mathsf{fma}\left(y, \log z, 0\right) - b}}{y}\\
    
    \mathbf{else}:\\
    \;\;\;\;t\_2\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if (*.f64 (-.f64 t #s(literal 1 binary64)) (log.f64 a)) < -4.99999999999999967e145 or 4.9999999999999996e130 < (*.f64 (-.f64 t #s(literal 1 binary64)) (log.f64 a))

      1. Initial program 100.0%

        \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
      2. Add Preprocessing
      3. Taylor expanded in t around inf

        \[\leadsto \frac{x \cdot e^{\color{blue}{t \cdot \log a} - b}}{y} \]
      4. Step-by-step derivation
        1. +-rgt-identityN/A

          \[\leadsto \frac{x \cdot e^{\color{blue}{\left(t \cdot \log a + 0\right)} - b}}{y} \]
        2. *-commutativeN/A

          \[\leadsto \frac{x \cdot e^{\left(\color{blue}{\log a \cdot t} + 0\right) - b}}{y} \]
        3. accelerator-lowering-fma.f64N/A

          \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(\log a, t, 0\right)} - b}}{y} \]
        4. rem-exp-logN/A

          \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(\log \color{blue}{\left(e^{\log a}\right)}, t, 0\right) - b}}{y} \]
        5. log-lowering-log.f64N/A

          \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(\color{blue}{\log \left(e^{\log a}\right)}, t, 0\right) - b}}{y} \]
        6. rem-exp-log98.5

          \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(\log \color{blue}{a}, t, 0\right) - b}}{y} \]
      5. Simplified98.5%

        \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(\log a, t, 0\right)} - b}}{y} \]
      6. Taylor expanded in b around 0

        \[\leadsto \color{blue}{\frac{x \cdot {a}^{t}}{y}} \]
      7. Step-by-step derivation
        1. /-lowering-/.f64N/A

          \[\leadsto \color{blue}{\frac{x \cdot {a}^{t}}{y}} \]
        2. *-lowering-*.f64N/A

          \[\leadsto \frac{\color{blue}{x \cdot {a}^{t}}}{y} \]
        3. pow-lowering-pow.f6492.5

          \[\leadsto \frac{x \cdot \color{blue}{{a}^{t}}}{y} \]
      8. Simplified92.5%

        \[\leadsto \color{blue}{\frac{x \cdot {a}^{t}}{y}} \]

      if -4.99999999999999967e145 < (*.f64 (-.f64 t #s(literal 1 binary64)) (log.f64 a)) < 4.9999999999999996e130

      1. Initial program 97.8%

        \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
      2. Add Preprocessing
      3. Taylor expanded in y around inf

        \[\leadsto \frac{x \cdot e^{\color{blue}{y \cdot \log z} - b}}{y} \]
      4. Step-by-step derivation
        1. +-rgt-identityN/A

          \[\leadsto \frac{x \cdot e^{\color{blue}{\left(y \cdot \log z + 0\right)} - b}}{y} \]
        2. accelerator-lowering-fma.f64N/A

          \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
        3. log-lowering-log.f6481.0

          \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(y, \color{blue}{\log z}, 0\right) - b}}{y} \]
      5. Simplified81.0%

        \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
    3. Recombined 2 regimes into one program.
    4. Final simplification84.0%

      \[\leadsto \begin{array}{l} \mathbf{if}\;\log a \cdot \left(t + -1\right) \leq -5 \cdot 10^{+145}:\\ \;\;\;\;\frac{x \cdot {a}^{t}}{y}\\ \mathbf{elif}\;\log a \cdot \left(t + -1\right) \leq 5 \cdot 10^{+130}:\\ \;\;\;\;\frac{x \cdot e^{\mathsf{fma}\left(y, \log z, 0\right) - b}}{y}\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot {a}^{t}}{y}\\ \end{array} \]
    5. Add Preprocessing

    Alternative 6: 63.1% accurate, 1.0× speedup?

    \[\begin{array}{l} \\ \begin{array}{l} t_1 := \log a \cdot \left(t + -1\right)\\ t_2 := \frac{x \cdot {a}^{t}}{y}\\ \mathbf{if}\;t\_1 \leq -620:\\ \;\;\;\;t\_2\\ \mathbf{elif}\;t\_1 \leq 1000:\\ \;\;\;\;\frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.16666666666666666, 0.5\right), 1\right), 1\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;t\_2\\ \end{array} \end{array} \]
    (FPCore (x y z t a b)
     :precision binary64
     (let* ((t_1 (* (log a) (+ t -1.0))) (t_2 (/ (* x (pow a t)) y)))
       (if (<= t_1 -620.0)
         t_2
         (if (<= t_1 1000.0)
           (/
            x
            (* a (* y (fma b (fma b (fma b 0.16666666666666666 0.5) 1.0) 1.0))))
           t_2))))
    double code(double x, double y, double z, double t, double a, double b) {
    	double t_1 = log(a) * (t + -1.0);
    	double t_2 = (x * pow(a, t)) / y;
    	double tmp;
    	if (t_1 <= -620.0) {
    		tmp = t_2;
    	} else if (t_1 <= 1000.0) {
    		tmp = x / (a * (y * fma(b, fma(b, fma(b, 0.16666666666666666, 0.5), 1.0), 1.0)));
    	} else {
    		tmp = t_2;
    	}
    	return tmp;
    }
    
    function code(x, y, z, t, a, b)
    	t_1 = Float64(log(a) * Float64(t + -1.0))
    	t_2 = Float64(Float64(x * (a ^ t)) / y)
    	tmp = 0.0
    	if (t_1 <= -620.0)
    		tmp = t_2;
    	elseif (t_1 <= 1000.0)
    		tmp = Float64(x / Float64(a * Float64(y * fma(b, fma(b, fma(b, 0.16666666666666666, 0.5), 1.0), 1.0))));
    	else
    		tmp = t_2;
    	end
    	return tmp
    end
    
    code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(N[Log[a], $MachinePrecision] * N[(t + -1.0), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(N[(x * N[Power[a, t], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision]}, If[LessEqual[t$95$1, -620.0], t$95$2, If[LessEqual[t$95$1, 1000.0], N[(x / N[(a * N[(y * N[(b * N[(b * N[(b * 0.16666666666666666 + 0.5), $MachinePrecision] + 1.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], t$95$2]]]]
    
    \begin{array}{l}
    
    \\
    \begin{array}{l}
    t_1 := \log a \cdot \left(t + -1\right)\\
    t_2 := \frac{x \cdot {a}^{t}}{y}\\
    \mathbf{if}\;t\_1 \leq -620:\\
    \;\;\;\;t\_2\\
    
    \mathbf{elif}\;t\_1 \leq 1000:\\
    \;\;\;\;\frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.16666666666666666, 0.5\right), 1\right), 1\right)\right)}\\
    
    \mathbf{else}:\\
    \;\;\;\;t\_2\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if (*.f64 (-.f64 t #s(literal 1 binary64)) (log.f64 a)) < -620 or 1e3 < (*.f64 (-.f64 t #s(literal 1 binary64)) (log.f64 a))

      1. Initial program 100.0%

        \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
      2. Add Preprocessing
      3. Taylor expanded in t around inf

        \[\leadsto \frac{x \cdot e^{\color{blue}{t \cdot \log a} - b}}{y} \]
      4. Step-by-step derivation
        1. +-rgt-identityN/A

          \[\leadsto \frac{x \cdot e^{\color{blue}{\left(t \cdot \log a + 0\right)} - b}}{y} \]
        2. *-commutativeN/A

          \[\leadsto \frac{x \cdot e^{\left(\color{blue}{\log a \cdot t} + 0\right) - b}}{y} \]
        3. accelerator-lowering-fma.f64N/A

          \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(\log a, t, 0\right)} - b}}{y} \]
        4. rem-exp-logN/A

          \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(\log \color{blue}{\left(e^{\log a}\right)}, t, 0\right) - b}}{y} \]
        5. log-lowering-log.f64N/A

          \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(\color{blue}{\log \left(e^{\log a}\right)}, t, 0\right) - b}}{y} \]
        6. rem-exp-log89.5

          \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(\log \color{blue}{a}, t, 0\right) - b}}{y} \]
      5. Simplified89.5%

        \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(\log a, t, 0\right)} - b}}{y} \]
      6. Taylor expanded in b around 0

        \[\leadsto \color{blue}{\frac{x \cdot {a}^{t}}{y}} \]
      7. Step-by-step derivation
        1. /-lowering-/.f64N/A

          \[\leadsto \color{blue}{\frac{x \cdot {a}^{t}}{y}} \]
        2. *-lowering-*.f64N/A

          \[\leadsto \frac{\color{blue}{x \cdot {a}^{t}}}{y} \]
        3. pow-lowering-pow.f6479.0

          \[\leadsto \frac{x \cdot \color{blue}{{a}^{t}}}{y} \]
      8. Simplified79.0%

        \[\leadsto \color{blue}{\frac{x \cdot {a}^{t}}{y}} \]

      if -620 < (*.f64 (-.f64 t #s(literal 1 binary64)) (log.f64 a)) < 1e3

      1. Initial program 96.7%

        \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
      2. Add Preprocessing
      3. Taylor expanded in y around 0

        \[\leadsto \color{blue}{\frac{x \cdot e^{\log a \cdot \left(t - 1\right) - b}}{y}} \]
      4. Step-by-step derivation
        1. *-commutativeN/A

          \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right) - b} \cdot x}}{y} \]
        2. exp-diffN/A

          \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)}}{e^{b}}} \cdot x}{y} \]
        3. associate-*l/N/A

          \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{e^{b}}}}{y} \]
        4. associate-/l/N/A

          \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
        5. /-lowering-/.f64N/A

          \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
        6. *-lowering-*.f64N/A

          \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right)} \cdot x}}{y \cdot e^{b}} \]
        7. exp-prodN/A

          \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
        8. pow-lowering-pow.f64N/A

          \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
        9. rem-exp-logN/A

          \[\leadsto \frac{{\color{blue}{a}}^{\left(t - 1\right)} \cdot x}{y \cdot e^{b}} \]
        10. sub-negN/A

          \[\leadsto \frac{{a}^{\color{blue}{\left(t + \left(\mathsf{neg}\left(1\right)\right)\right)}} \cdot x}{y \cdot e^{b}} \]
        11. metadata-evalN/A

          \[\leadsto \frac{{a}^{\left(t + \color{blue}{-1}\right)} \cdot x}{y \cdot e^{b}} \]
        12. +-lowering-+.f64N/A

          \[\leadsto \frac{{a}^{\color{blue}{\left(t + -1\right)}} \cdot x}{y \cdot e^{b}} \]
        13. *-lowering-*.f64N/A

          \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y \cdot e^{b}}} \]
        14. exp-lowering-exp.f6464.1

          \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot \color{blue}{e^{b}}} \]
      5. Simplified64.1%

        \[\leadsto \color{blue}{\frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot e^{b}}} \]
      6. Taylor expanded in t around 0

        \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
      7. Step-by-step derivation
        1. /-lowering-/.f64N/A

          \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
        2. *-lowering-*.f64N/A

          \[\leadsto \frac{x}{\color{blue}{a \cdot \left(y \cdot e^{b}\right)}} \]
        3. *-lowering-*.f64N/A

          \[\leadsto \frac{x}{a \cdot \color{blue}{\left(y \cdot e^{b}\right)}} \]
        4. exp-lowering-exp.f6468.6

          \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{e^{b}}\right)} \]
      8. Simplified68.6%

        \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
      9. Taylor expanded in b around 0

        \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{\left(1 + b \cdot \left(1 + b \cdot \left(\frac{1}{2} + \frac{1}{6} \cdot b\right)\right)\right)}\right)} \]
      10. Step-by-step derivation
        1. +-commutativeN/A

          \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{\left(b \cdot \left(1 + b \cdot \left(\frac{1}{2} + \frac{1}{6} \cdot b\right)\right) + 1\right)}\right)} \]
        2. accelerator-lowering-fma.f64N/A

          \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{\mathsf{fma}\left(b, 1 + b \cdot \left(\frac{1}{2} + \frac{1}{6} \cdot b\right), 1\right)}\right)} \]
        3. +-commutativeN/A

          \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \color{blue}{b \cdot \left(\frac{1}{2} + \frac{1}{6} \cdot b\right) + 1}, 1\right)\right)} \]
        4. accelerator-lowering-fma.f64N/A

          \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \color{blue}{\mathsf{fma}\left(b, \frac{1}{2} + \frac{1}{6} \cdot b, 1\right)}, 1\right)\right)} \]
        5. +-commutativeN/A

          \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \color{blue}{\frac{1}{6} \cdot b + \frac{1}{2}}, 1\right), 1\right)\right)} \]
        6. *-commutativeN/A

          \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \color{blue}{b \cdot \frac{1}{6}} + \frac{1}{2}, 1\right), 1\right)\right)} \]
        7. accelerator-lowering-fma.f6454.5

          \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \color{blue}{\mathsf{fma}\left(b, 0.16666666666666666, 0.5\right)}, 1\right), 1\right)\right)} \]
      11. Simplified54.5%

        \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{\mathsf{fma}\left(b, \mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.16666666666666666, 0.5\right), 1\right), 1\right)}\right)} \]
    3. Recombined 2 regimes into one program.
    4. Final simplification67.0%

      \[\leadsto \begin{array}{l} \mathbf{if}\;\log a \cdot \left(t + -1\right) \leq -620:\\ \;\;\;\;\frac{x \cdot {a}^{t}}{y}\\ \mathbf{elif}\;\log a \cdot \left(t + -1\right) \leq 1000:\\ \;\;\;\;\frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.16666666666666666, 0.5\right), 1\right), 1\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot {a}^{t}}{y}\\ \end{array} \]
    5. Add Preprocessing

    Alternative 7: 81.6% accurate, 1.4× speedup?

    \[\begin{array}{l} \\ \begin{array}{l} t_1 := \frac{x \cdot e^{\mathsf{fma}\left(y, \log z, 0\right) - b}}{y}\\ \mathbf{if}\;y \leq -3.6 \cdot 10^{+43}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;y \leq 2.95 \cdot 10^{-237}:\\ \;\;\;\;\frac{x \cdot e^{\mathsf{fma}\left(\log a, t, 0\right) - b}}{y}\\ \mathbf{elif}\;y \leq 2.1 \cdot 10^{-37}:\\ \;\;\;\;{a}^{\left(t + -1\right)} \cdot \frac{x}{y}\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
    (FPCore (x y z t a b)
     :precision binary64
     (let* ((t_1 (/ (* x (exp (- (fma y (log z) 0.0) b))) y)))
       (if (<= y -3.6e+43)
         t_1
         (if (<= y 2.95e-237)
           (/ (* x (exp (- (fma (log a) t 0.0) b))) y)
           (if (<= y 2.1e-37) (* (pow a (+ t -1.0)) (/ x y)) t_1)))))
    double code(double x, double y, double z, double t, double a, double b) {
    	double t_1 = (x * exp((fma(y, log(z), 0.0) - b))) / y;
    	double tmp;
    	if (y <= -3.6e+43) {
    		tmp = t_1;
    	} else if (y <= 2.95e-237) {
    		tmp = (x * exp((fma(log(a), t, 0.0) - b))) / y;
    	} else if (y <= 2.1e-37) {
    		tmp = pow(a, (t + -1.0)) * (x / y);
    	} else {
    		tmp = t_1;
    	}
    	return tmp;
    }
    
    function code(x, y, z, t, a, b)
    	t_1 = Float64(Float64(x * exp(Float64(fma(y, log(z), 0.0) - b))) / y)
    	tmp = 0.0
    	if (y <= -3.6e+43)
    		tmp = t_1;
    	elseif (y <= 2.95e-237)
    		tmp = Float64(Float64(x * exp(Float64(fma(log(a), t, 0.0) - b))) / y);
    	elseif (y <= 2.1e-37)
    		tmp = Float64((a ^ Float64(t + -1.0)) * Float64(x / y));
    	else
    		tmp = t_1;
    	end
    	return tmp
    end
    
    code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(N[(x * N[Exp[N[(N[(y * N[Log[z], $MachinePrecision] + 0.0), $MachinePrecision] - b), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision]}, If[LessEqual[y, -3.6e+43], t$95$1, If[LessEqual[y, 2.95e-237], N[(N[(x * N[Exp[N[(N[(N[Log[a], $MachinePrecision] * t + 0.0), $MachinePrecision] - b), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision], If[LessEqual[y, 2.1e-37], N[(N[Power[a, N[(t + -1.0), $MachinePrecision]], $MachinePrecision] * N[(x / y), $MachinePrecision]), $MachinePrecision], t$95$1]]]]
    
    \begin{array}{l}
    
    \\
    \begin{array}{l}
    t_1 := \frac{x \cdot e^{\mathsf{fma}\left(y, \log z, 0\right) - b}}{y}\\
    \mathbf{if}\;y \leq -3.6 \cdot 10^{+43}:\\
    \;\;\;\;t\_1\\
    
    \mathbf{elif}\;y \leq 2.95 \cdot 10^{-237}:\\
    \;\;\;\;\frac{x \cdot e^{\mathsf{fma}\left(\log a, t, 0\right) - b}}{y}\\
    
    \mathbf{elif}\;y \leq 2.1 \cdot 10^{-37}:\\
    \;\;\;\;{a}^{\left(t + -1\right)} \cdot \frac{x}{y}\\
    
    \mathbf{else}:\\
    \;\;\;\;t\_1\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 3 regimes
    2. if y < -3.6000000000000001e43 or 2.1000000000000001e-37 < y

      1. Initial program 100.0%

        \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
      2. Add Preprocessing
      3. Taylor expanded in y around inf

        \[\leadsto \frac{x \cdot e^{\color{blue}{y \cdot \log z} - b}}{y} \]
      4. Step-by-step derivation
        1. +-rgt-identityN/A

          \[\leadsto \frac{x \cdot e^{\color{blue}{\left(y \cdot \log z + 0\right)} - b}}{y} \]
        2. accelerator-lowering-fma.f64N/A

          \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
        3. log-lowering-log.f6492.5

          \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(y, \color{blue}{\log z}, 0\right) - b}}{y} \]
      5. Simplified92.5%

        \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]

      if -3.6000000000000001e43 < y < 2.95000000000000018e-237

      1. Initial program 98.2%

        \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
      2. Add Preprocessing
      3. Taylor expanded in t around inf

        \[\leadsto \frac{x \cdot e^{\color{blue}{t \cdot \log a} - b}}{y} \]
      4. Step-by-step derivation
        1. +-rgt-identityN/A

          \[\leadsto \frac{x \cdot e^{\color{blue}{\left(t \cdot \log a + 0\right)} - b}}{y} \]
        2. *-commutativeN/A

          \[\leadsto \frac{x \cdot e^{\left(\color{blue}{\log a \cdot t} + 0\right) - b}}{y} \]
        3. accelerator-lowering-fma.f64N/A

          \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(\log a, t, 0\right)} - b}}{y} \]
        4. rem-exp-logN/A

          \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(\log \color{blue}{\left(e^{\log a}\right)}, t, 0\right) - b}}{y} \]
        5. log-lowering-log.f64N/A

          \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(\color{blue}{\log \left(e^{\log a}\right)}, t, 0\right) - b}}{y} \]
        6. rem-exp-log85.5

          \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(\log \color{blue}{a}, t, 0\right) - b}}{y} \]
      5. Simplified85.5%

        \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(\log a, t, 0\right)} - b}}{y} \]

      if 2.95000000000000018e-237 < y < 2.1000000000000001e-37

      1. Initial program 93.4%

        \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
      2. Add Preprocessing
      3. Taylor expanded in b around 0

        \[\leadsto \color{blue}{\frac{x \cdot e^{y \cdot \log z + \log a \cdot \left(t - 1\right)}}{y}} \]
      4. Step-by-step derivation
        1. +-rgt-identityN/A

          \[\leadsto \color{blue}{\frac{x \cdot e^{y \cdot \log z + \log a \cdot \left(t - 1\right)}}{y} + 0} \]
        2. associate-/l*N/A

          \[\leadsto \color{blue}{x \cdot \frac{e^{y \cdot \log z + \log a \cdot \left(t - 1\right)}}{y}} + 0 \]
        3. accelerator-lowering-fma.f64N/A

          \[\leadsto \color{blue}{\mathsf{fma}\left(x, \frac{e^{y \cdot \log z + \log a \cdot \left(t - 1\right)}}{y}, 0\right)} \]
      5. Simplified78.1%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x, {a}^{\left(t + -1\right)} \cdot \frac{{z}^{y}}{y}, 0\right)} \]
      6. Taylor expanded in y around 0

        \[\leadsto \color{blue}{\frac{x \cdot e^{\log a \cdot \left(t - 1\right)}}{y}} \]
      7. Step-by-step derivation
        1. *-commutativeN/A

          \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right)} \cdot x}}{y} \]
        2. associate-/l*N/A

          \[\leadsto \color{blue}{e^{\log a \cdot \left(t - 1\right)} \cdot \frac{x}{y}} \]
        3. *-lowering-*.f64N/A

          \[\leadsto \color{blue}{e^{\log a \cdot \left(t - 1\right)} \cdot \frac{x}{y}} \]
        4. exp-to-powN/A

          \[\leadsto \color{blue}{{a}^{\left(t - 1\right)}} \cdot \frac{x}{y} \]
        5. pow-lowering-pow.f64N/A

          \[\leadsto \color{blue}{{a}^{\left(t - 1\right)}} \cdot \frac{x}{y} \]
        6. sub-negN/A

          \[\leadsto {a}^{\color{blue}{\left(t + \left(\mathsf{neg}\left(1\right)\right)\right)}} \cdot \frac{x}{y} \]
        7. metadata-evalN/A

          \[\leadsto {a}^{\left(t + \color{blue}{-1}\right)} \cdot \frac{x}{y} \]
        8. +-commutativeN/A

          \[\leadsto {a}^{\color{blue}{\left(-1 + t\right)}} \cdot \frac{x}{y} \]
        9. +-lowering-+.f64N/A

          \[\leadsto {a}^{\color{blue}{\left(-1 + t\right)}} \cdot \frac{x}{y} \]
        10. /-lowering-/.f6480.6

          \[\leadsto {a}^{\left(-1 + t\right)} \cdot \color{blue}{\frac{x}{y}} \]
      8. Simplified80.6%

        \[\leadsto \color{blue}{{a}^{\left(-1 + t\right)} \cdot \frac{x}{y}} \]
    3. Recombined 3 regimes into one program.
    4. Final simplification88.3%

      \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -3.6 \cdot 10^{+43}:\\ \;\;\;\;\frac{x \cdot e^{\mathsf{fma}\left(y, \log z, 0\right) - b}}{y}\\ \mathbf{elif}\;y \leq 2.95 \cdot 10^{-237}:\\ \;\;\;\;\frac{x \cdot e^{\mathsf{fma}\left(\log a, t, 0\right) - b}}{y}\\ \mathbf{elif}\;y \leq 2.1 \cdot 10^{-37}:\\ \;\;\;\;{a}^{\left(t + -1\right)} \cdot \frac{x}{y}\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot e^{\mathsf{fma}\left(y, \log z, 0\right) - b}}{y}\\ \end{array} \]
    5. Add Preprocessing

    Alternative 8: 86.8% accurate, 1.4× speedup?

    \[\begin{array}{l} \\ \begin{array}{l} t_1 := \frac{x \cdot e^{\mathsf{fma}\left(\log a, t, 0\right) - b}}{y}\\ \mathbf{if}\;b \leq -6.1 \cdot 10^{+27}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;b \leq 2.3:\\ \;\;\;\;\mathsf{fma}\left(x, {a}^{\left(t + -1\right)} \cdot \frac{{z}^{y}}{y}, 0\right)\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
    (FPCore (x y z t a b)
     :precision binary64
     (let* ((t_1 (/ (* x (exp (- (fma (log a) t 0.0) b))) y)))
       (if (<= b -6.1e+27)
         t_1
         (if (<= b 2.3) (fma x (* (pow a (+ t -1.0)) (/ (pow z y) y)) 0.0) t_1))))
    double code(double x, double y, double z, double t, double a, double b) {
    	double t_1 = (x * exp((fma(log(a), t, 0.0) - b))) / y;
    	double tmp;
    	if (b <= -6.1e+27) {
    		tmp = t_1;
    	} else if (b <= 2.3) {
    		tmp = fma(x, (pow(a, (t + -1.0)) * (pow(z, y) / y)), 0.0);
    	} else {
    		tmp = t_1;
    	}
    	return tmp;
    }
    
    function code(x, y, z, t, a, b)
    	t_1 = Float64(Float64(x * exp(Float64(fma(log(a), t, 0.0) - b))) / y)
    	tmp = 0.0
    	if (b <= -6.1e+27)
    		tmp = t_1;
    	elseif (b <= 2.3)
    		tmp = fma(x, Float64((a ^ Float64(t + -1.0)) * Float64((z ^ y) / y)), 0.0);
    	else
    		tmp = t_1;
    	end
    	return tmp
    end
    
    code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(N[(x * N[Exp[N[(N[(N[Log[a], $MachinePrecision] * t + 0.0), $MachinePrecision] - b), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision]}, If[LessEqual[b, -6.1e+27], t$95$1, If[LessEqual[b, 2.3], N[(x * N[(N[Power[a, N[(t + -1.0), $MachinePrecision]], $MachinePrecision] * N[(N[Power[z, y], $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision] + 0.0), $MachinePrecision], t$95$1]]]
    
    \begin{array}{l}
    
    \\
    \begin{array}{l}
    t_1 := \frac{x \cdot e^{\mathsf{fma}\left(\log a, t, 0\right) - b}}{y}\\
    \mathbf{if}\;b \leq -6.1 \cdot 10^{+27}:\\
    \;\;\;\;t\_1\\
    
    \mathbf{elif}\;b \leq 2.3:\\
    \;\;\;\;\mathsf{fma}\left(x, {a}^{\left(t + -1\right)} \cdot \frac{{z}^{y}}{y}, 0\right)\\
    
    \mathbf{else}:\\
    \;\;\;\;t\_1\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if b < -6.0999999999999997e27 or 2.2999999999999998 < b

      1. Initial program 100.0%

        \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
      2. Add Preprocessing
      3. Taylor expanded in t around inf

        \[\leadsto \frac{x \cdot e^{\color{blue}{t \cdot \log a} - b}}{y} \]
      4. Step-by-step derivation
        1. +-rgt-identityN/A

          \[\leadsto \frac{x \cdot e^{\color{blue}{\left(t \cdot \log a + 0\right)} - b}}{y} \]
        2. *-commutativeN/A

          \[\leadsto \frac{x \cdot e^{\left(\color{blue}{\log a \cdot t} + 0\right) - b}}{y} \]
        3. accelerator-lowering-fma.f64N/A

          \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(\log a, t, 0\right)} - b}}{y} \]
        4. rem-exp-logN/A

          \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(\log \color{blue}{\left(e^{\log a}\right)}, t, 0\right) - b}}{y} \]
        5. log-lowering-log.f64N/A

          \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(\color{blue}{\log \left(e^{\log a}\right)}, t, 0\right) - b}}{y} \]
        6. rem-exp-log90.6

          \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(\log \color{blue}{a}, t, 0\right) - b}}{y} \]
      5. Simplified90.6%

        \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(\log a, t, 0\right)} - b}}{y} \]

      if -6.0999999999999997e27 < b < 2.2999999999999998

      1. Initial program 96.8%

        \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
      2. Add Preprocessing
      3. Taylor expanded in b around 0

        \[\leadsto \color{blue}{\frac{x \cdot e^{y \cdot \log z + \log a \cdot \left(t - 1\right)}}{y}} \]
      4. Step-by-step derivation
        1. +-rgt-identityN/A

          \[\leadsto \color{blue}{\frac{x \cdot e^{y \cdot \log z + \log a \cdot \left(t - 1\right)}}{y} + 0} \]
        2. associate-/l*N/A

          \[\leadsto \color{blue}{x \cdot \frac{e^{y \cdot \log z + \log a \cdot \left(t - 1\right)}}{y}} + 0 \]
        3. accelerator-lowering-fma.f64N/A

          \[\leadsto \color{blue}{\mathsf{fma}\left(x, \frac{e^{y \cdot \log z + \log a \cdot \left(t - 1\right)}}{y}, 0\right)} \]
      5. Simplified89.2%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x, {a}^{\left(t + -1\right)} \cdot \frac{{z}^{y}}{y}, 0\right)} \]
    3. Recombined 2 regimes into one program.
    4. Add Preprocessing

    Alternative 9: 75.2% accurate, 2.4× speedup?

    \[\begin{array}{l} \\ \begin{array}{l} t_1 := \frac{x \cdot {z}^{y}}{y}\\ \mathbf{if}\;y \leq -2.05 \cdot 10^{+36}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;y \leq -1.1 \cdot 10^{-113}:\\ \;\;\;\;\frac{x \cdot {a}^{\left(t + -1\right)}}{y}\\ \mathbf{elif}\;y \leq 3.1 \cdot 10^{+81}:\\ \;\;\;\;\frac{x}{a \cdot \left(y \cdot e^{b}\right)}\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
    (FPCore (x y z t a b)
     :precision binary64
     (let* ((t_1 (/ (* x (pow z y)) y)))
       (if (<= y -2.05e+36)
         t_1
         (if (<= y -1.1e-113)
           (/ (* x (pow a (+ t -1.0))) y)
           (if (<= y 3.1e+81) (/ x (* a (* y (exp b)))) t_1)))))
    double code(double x, double y, double z, double t, double a, double b) {
    	double t_1 = (x * pow(z, y)) / y;
    	double tmp;
    	if (y <= -2.05e+36) {
    		tmp = t_1;
    	} else if (y <= -1.1e-113) {
    		tmp = (x * pow(a, (t + -1.0))) / y;
    	} else if (y <= 3.1e+81) {
    		tmp = x / (a * (y * exp(b)));
    	} else {
    		tmp = t_1;
    	}
    	return tmp;
    }
    
    real(8) function code(x, y, z, t, a, b)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        real(8), intent (in) :: z
        real(8), intent (in) :: t
        real(8), intent (in) :: a
        real(8), intent (in) :: b
        real(8) :: t_1
        real(8) :: tmp
        t_1 = (x * (z ** y)) / y
        if (y <= (-2.05d+36)) then
            tmp = t_1
        else if (y <= (-1.1d-113)) then
            tmp = (x * (a ** (t + (-1.0d0)))) / y
        else if (y <= 3.1d+81) then
            tmp = x / (a * (y * exp(b)))
        else
            tmp = t_1
        end if
        code = tmp
    end function
    
    public static double code(double x, double y, double z, double t, double a, double b) {
    	double t_1 = (x * Math.pow(z, y)) / y;
    	double tmp;
    	if (y <= -2.05e+36) {
    		tmp = t_1;
    	} else if (y <= -1.1e-113) {
    		tmp = (x * Math.pow(a, (t + -1.0))) / y;
    	} else if (y <= 3.1e+81) {
    		tmp = x / (a * (y * Math.exp(b)));
    	} else {
    		tmp = t_1;
    	}
    	return tmp;
    }
    
    def code(x, y, z, t, a, b):
    	t_1 = (x * math.pow(z, y)) / y
    	tmp = 0
    	if y <= -2.05e+36:
    		tmp = t_1
    	elif y <= -1.1e-113:
    		tmp = (x * math.pow(a, (t + -1.0))) / y
    	elif y <= 3.1e+81:
    		tmp = x / (a * (y * math.exp(b)))
    	else:
    		tmp = t_1
    	return tmp
    
    function code(x, y, z, t, a, b)
    	t_1 = Float64(Float64(x * (z ^ y)) / y)
    	tmp = 0.0
    	if (y <= -2.05e+36)
    		tmp = t_1;
    	elseif (y <= -1.1e-113)
    		tmp = Float64(Float64(x * (a ^ Float64(t + -1.0))) / y);
    	elseif (y <= 3.1e+81)
    		tmp = Float64(x / Float64(a * Float64(y * exp(b))));
    	else
    		tmp = t_1;
    	end
    	return tmp
    end
    
    function tmp_2 = code(x, y, z, t, a, b)
    	t_1 = (x * (z ^ y)) / y;
    	tmp = 0.0;
    	if (y <= -2.05e+36)
    		tmp = t_1;
    	elseif (y <= -1.1e-113)
    		tmp = (x * (a ^ (t + -1.0))) / y;
    	elseif (y <= 3.1e+81)
    		tmp = x / (a * (y * exp(b)));
    	else
    		tmp = t_1;
    	end
    	tmp_2 = tmp;
    end
    
    code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(N[(x * N[Power[z, y], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision]}, If[LessEqual[y, -2.05e+36], t$95$1, If[LessEqual[y, -1.1e-113], N[(N[(x * N[Power[a, N[(t + -1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision], If[LessEqual[y, 3.1e+81], N[(x / N[(a * N[(y * N[Exp[b], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], t$95$1]]]]
    
    \begin{array}{l}
    
    \\
    \begin{array}{l}
    t_1 := \frac{x \cdot {z}^{y}}{y}\\
    \mathbf{if}\;y \leq -2.05 \cdot 10^{+36}:\\
    \;\;\;\;t\_1\\
    
    \mathbf{elif}\;y \leq -1.1 \cdot 10^{-113}:\\
    \;\;\;\;\frac{x \cdot {a}^{\left(t + -1\right)}}{y}\\
    
    \mathbf{elif}\;y \leq 3.1 \cdot 10^{+81}:\\
    \;\;\;\;\frac{x}{a \cdot \left(y \cdot e^{b}\right)}\\
    
    \mathbf{else}:\\
    \;\;\;\;t\_1\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 3 regimes
    2. if y < -2.05000000000000006e36 or 3.1e81 < y

      1. Initial program 100.0%

        \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
      2. Add Preprocessing
      3. Taylor expanded in y around inf

        \[\leadsto \frac{x \cdot e^{\color{blue}{y \cdot \log z} - b}}{y} \]
      4. Step-by-step derivation
        1. +-rgt-identityN/A

          \[\leadsto \frac{x \cdot e^{\color{blue}{\left(y \cdot \log z + 0\right)} - b}}{y} \]
        2. accelerator-lowering-fma.f64N/A

          \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
        3. log-lowering-log.f6493.0

          \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(y, \color{blue}{\log z}, 0\right) - b}}{y} \]
      5. Simplified93.0%

        \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
      6. Taylor expanded in b around 0

        \[\leadsto \color{blue}{\frac{x \cdot {z}^{y}}{y}} \]
      7. Step-by-step derivation
        1. /-lowering-/.f64N/A

          \[\leadsto \color{blue}{\frac{x \cdot {z}^{y}}{y}} \]
        2. *-lowering-*.f64N/A

          \[\leadsto \frac{\color{blue}{x \cdot {z}^{y}}}{y} \]
        3. pow-lowering-pow.f6488.7

          \[\leadsto \frac{x \cdot \color{blue}{{z}^{y}}}{y} \]
      8. Simplified88.7%

        \[\leadsto \color{blue}{\frac{x \cdot {z}^{y}}{y}} \]

      if -2.05000000000000006e36 < y < -1.10000000000000002e-113

      1. Initial program 99.1%

        \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
      2. Add Preprocessing
      3. Taylor expanded in y around 0

        \[\leadsto \color{blue}{\frac{x \cdot e^{\log a \cdot \left(t - 1\right) - b}}{y}} \]
      4. Step-by-step derivation
        1. *-commutativeN/A

          \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right) - b} \cdot x}}{y} \]
        2. exp-diffN/A

          \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)}}{e^{b}}} \cdot x}{y} \]
        3. associate-*l/N/A

          \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{e^{b}}}}{y} \]
        4. associate-/l/N/A

          \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
        5. /-lowering-/.f64N/A

          \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
        6. *-lowering-*.f64N/A

          \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right)} \cdot x}}{y \cdot e^{b}} \]
        7. exp-prodN/A

          \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
        8. pow-lowering-pow.f64N/A

          \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
        9. rem-exp-logN/A

          \[\leadsto \frac{{\color{blue}{a}}^{\left(t - 1\right)} \cdot x}{y \cdot e^{b}} \]
        10. sub-negN/A

          \[\leadsto \frac{{a}^{\color{blue}{\left(t + \left(\mathsf{neg}\left(1\right)\right)\right)}} \cdot x}{y \cdot e^{b}} \]
        11. metadata-evalN/A

          \[\leadsto \frac{{a}^{\left(t + \color{blue}{-1}\right)} \cdot x}{y \cdot e^{b}} \]
        12. +-lowering-+.f64N/A

          \[\leadsto \frac{{a}^{\color{blue}{\left(t + -1\right)}} \cdot x}{y \cdot e^{b}} \]
        13. *-lowering-*.f64N/A

          \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y \cdot e^{b}}} \]
        14. exp-lowering-exp.f6479.5

          \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot \color{blue}{e^{b}}} \]
      5. Simplified79.5%

        \[\leadsto \color{blue}{\frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot e^{b}}} \]
      6. Taylor expanded in b around 0

        \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y}} \]
      7. Step-by-step derivation
        1. Simplified76.9%

          \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y}} \]

        if -1.10000000000000002e-113 < y < 3.1e81

        1. Initial program 96.5%

          \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
        2. Add Preprocessing
        3. Taylor expanded in y around 0

          \[\leadsto \color{blue}{\frac{x \cdot e^{\log a \cdot \left(t - 1\right) - b}}{y}} \]
        4. Step-by-step derivation
          1. *-commutativeN/A

            \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right) - b} \cdot x}}{y} \]
          2. exp-diffN/A

            \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)}}{e^{b}}} \cdot x}{y} \]
          3. associate-*l/N/A

            \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{e^{b}}}}{y} \]
          4. associate-/l/N/A

            \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
          5. /-lowering-/.f64N/A

            \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
          6. *-lowering-*.f64N/A

            \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right)} \cdot x}}{y \cdot e^{b}} \]
          7. exp-prodN/A

            \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
          8. pow-lowering-pow.f64N/A

            \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
          9. rem-exp-logN/A

            \[\leadsto \frac{{\color{blue}{a}}^{\left(t - 1\right)} \cdot x}{y \cdot e^{b}} \]
          10. sub-negN/A

            \[\leadsto \frac{{a}^{\color{blue}{\left(t + \left(\mathsf{neg}\left(1\right)\right)\right)}} \cdot x}{y \cdot e^{b}} \]
          11. metadata-evalN/A

            \[\leadsto \frac{{a}^{\left(t + \color{blue}{-1}\right)} \cdot x}{y \cdot e^{b}} \]
          12. +-lowering-+.f64N/A

            \[\leadsto \frac{{a}^{\color{blue}{\left(t + -1\right)}} \cdot x}{y \cdot e^{b}} \]
          13. *-lowering-*.f64N/A

            \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y \cdot e^{b}}} \]
          14. exp-lowering-exp.f6478.2

            \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot \color{blue}{e^{b}}} \]
        5. Simplified78.2%

          \[\leadsto \color{blue}{\frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot e^{b}}} \]
        6. Taylor expanded in t around 0

          \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
        7. Step-by-step derivation
          1. /-lowering-/.f64N/A

            \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
          2. *-lowering-*.f64N/A

            \[\leadsto \frac{x}{\color{blue}{a \cdot \left(y \cdot e^{b}\right)}} \]
          3. *-lowering-*.f64N/A

            \[\leadsto \frac{x}{a \cdot \color{blue}{\left(y \cdot e^{b}\right)}} \]
          4. exp-lowering-exp.f6476.5

            \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{e^{b}}\right)} \]
        8. Simplified76.5%

          \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
      8. Recombined 3 regimes into one program.
      9. Final simplification81.9%

        \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -2.05 \cdot 10^{+36}:\\ \;\;\;\;\frac{x \cdot {z}^{y}}{y}\\ \mathbf{elif}\;y \leq -1.1 \cdot 10^{-113}:\\ \;\;\;\;\frac{x \cdot {a}^{\left(t + -1\right)}}{y}\\ \mathbf{elif}\;y \leq 3.1 \cdot 10^{+81}:\\ \;\;\;\;\frac{x}{a \cdot \left(y \cdot e^{b}\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot {z}^{y}}{y}\\ \end{array} \]
      10. Add Preprocessing

      Alternative 10: 64.3% accurate, 2.5× speedup?

      \[\begin{array}{l} \\ \begin{array}{l} t_1 := \frac{x \cdot {z}^{y}}{y}\\ \mathbf{if}\;y \leq -7.5 \cdot 10^{+38}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;y \leq 3.95 \cdot 10^{-212}:\\ \;\;\;\;\frac{x}{y \cdot e^{b}}\\ \mathbf{elif}\;y \leq 1.52 \cdot 10^{+36}:\\ \;\;\;\;\frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.16666666666666666, 0.5\right), 1\right), 1\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
      (FPCore (x y z t a b)
       :precision binary64
       (let* ((t_1 (/ (* x (pow z y)) y)))
         (if (<= y -7.5e+38)
           t_1
           (if (<= y 3.95e-212)
             (/ x (* y (exp b)))
             (if (<= y 1.52e+36)
               (/
                x
                (* a (* y (fma b (fma b (fma b 0.16666666666666666 0.5) 1.0) 1.0))))
               t_1)))))
      double code(double x, double y, double z, double t, double a, double b) {
      	double t_1 = (x * pow(z, y)) / y;
      	double tmp;
      	if (y <= -7.5e+38) {
      		tmp = t_1;
      	} else if (y <= 3.95e-212) {
      		tmp = x / (y * exp(b));
      	} else if (y <= 1.52e+36) {
      		tmp = x / (a * (y * fma(b, fma(b, fma(b, 0.16666666666666666, 0.5), 1.0), 1.0)));
      	} else {
      		tmp = t_1;
      	}
      	return tmp;
      }
      
      function code(x, y, z, t, a, b)
      	t_1 = Float64(Float64(x * (z ^ y)) / y)
      	tmp = 0.0
      	if (y <= -7.5e+38)
      		tmp = t_1;
      	elseif (y <= 3.95e-212)
      		tmp = Float64(x / Float64(y * exp(b)));
      	elseif (y <= 1.52e+36)
      		tmp = Float64(x / Float64(a * Float64(y * fma(b, fma(b, fma(b, 0.16666666666666666, 0.5), 1.0), 1.0))));
      	else
      		tmp = t_1;
      	end
      	return tmp
      end
      
      code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(N[(x * N[Power[z, y], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision]}, If[LessEqual[y, -7.5e+38], t$95$1, If[LessEqual[y, 3.95e-212], N[(x / N[(y * N[Exp[b], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[y, 1.52e+36], N[(x / N[(a * N[(y * N[(b * N[(b * N[(b * 0.16666666666666666 + 0.5), $MachinePrecision] + 1.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], t$95$1]]]]
      
      \begin{array}{l}
      
      \\
      \begin{array}{l}
      t_1 := \frac{x \cdot {z}^{y}}{y}\\
      \mathbf{if}\;y \leq -7.5 \cdot 10^{+38}:\\
      \;\;\;\;t\_1\\
      
      \mathbf{elif}\;y \leq 3.95 \cdot 10^{-212}:\\
      \;\;\;\;\frac{x}{y \cdot e^{b}}\\
      
      \mathbf{elif}\;y \leq 1.52 \cdot 10^{+36}:\\
      \;\;\;\;\frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.16666666666666666, 0.5\right), 1\right), 1\right)\right)}\\
      
      \mathbf{else}:\\
      \;\;\;\;t\_1\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 3 regimes
      2. if y < -7.4999999999999999e38 or 1.52e36 < y

        1. Initial program 100.0%

          \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
        2. Add Preprocessing
        3. Taylor expanded in y around inf

          \[\leadsto \frac{x \cdot e^{\color{blue}{y \cdot \log z} - b}}{y} \]
        4. Step-by-step derivation
          1. +-rgt-identityN/A

            \[\leadsto \frac{x \cdot e^{\color{blue}{\left(y \cdot \log z + 0\right)} - b}}{y} \]
          2. accelerator-lowering-fma.f64N/A

            \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
          3. log-lowering-log.f6492.9

            \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(y, \color{blue}{\log z}, 0\right) - b}}{y} \]
        5. Simplified92.9%

          \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
        6. Taylor expanded in b around 0

          \[\leadsto \color{blue}{\frac{x \cdot {z}^{y}}{y}} \]
        7. Step-by-step derivation
          1. /-lowering-/.f64N/A

            \[\leadsto \color{blue}{\frac{x \cdot {z}^{y}}{y}} \]
          2. *-lowering-*.f64N/A

            \[\leadsto \frac{\color{blue}{x \cdot {z}^{y}}}{y} \]
          3. pow-lowering-pow.f6486.5

            \[\leadsto \frac{x \cdot \color{blue}{{z}^{y}}}{y} \]
        8. Simplified86.5%

          \[\leadsto \color{blue}{\frac{x \cdot {z}^{y}}{y}} \]

        if -7.4999999999999999e38 < y < 3.9500000000000002e-212

        1. Initial program 98.2%

          \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
        2. Add Preprocessing
        3. Taylor expanded in y around inf

          \[\leadsto \frac{x \cdot e^{\color{blue}{y \cdot \log z} - b}}{y} \]
        4. Step-by-step derivation
          1. +-rgt-identityN/A

            \[\leadsto \frac{x \cdot e^{\color{blue}{\left(y \cdot \log z + 0\right)} - b}}{y} \]
          2. accelerator-lowering-fma.f64N/A

            \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
          3. log-lowering-log.f6459.1

            \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(y, \color{blue}{\log z}, 0\right) - b}}{y} \]
        5. Simplified59.1%

          \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
        6. Taylor expanded in y around 0

          \[\leadsto \frac{\color{blue}{x \cdot e^{\mathsf{neg}\left(b\right)}}}{y} \]
        7. Step-by-step derivation
          1. *-lowering-*.f64N/A

            \[\leadsto \frac{\color{blue}{x \cdot e^{\mathsf{neg}\left(b\right)}}}{y} \]
          2. exp-lowering-exp.f64N/A

            \[\leadsto \frac{x \cdot \color{blue}{e^{\mathsf{neg}\left(b\right)}}}{y} \]
          3. neg-sub0N/A

            \[\leadsto \frac{x \cdot e^{\color{blue}{0 - b}}}{y} \]
          4. --lowering--.f6458.0

            \[\leadsto \frac{x \cdot e^{\color{blue}{0 - b}}}{y} \]
        8. Simplified58.0%

          \[\leadsto \frac{\color{blue}{x \cdot e^{0 - b}}}{y} \]
        9. Step-by-step derivation
          1. sub0-negN/A

            \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{neg}\left(b\right)}}}{y} \]
          2. exp-negN/A

            \[\leadsto \frac{x \cdot \color{blue}{\frac{1}{e^{b}}}}{y} \]
          3. un-div-invN/A

            \[\leadsto \frac{\color{blue}{\frac{x}{e^{b}}}}{y} \]
          4. associate-/l/N/A

            \[\leadsto \color{blue}{\frac{x}{y \cdot e^{b}}} \]
          5. /-lowering-/.f64N/A

            \[\leadsto \color{blue}{\frac{x}{y \cdot e^{b}}} \]
          6. *-lowering-*.f64N/A

            \[\leadsto \frac{x}{\color{blue}{y \cdot e^{b}}} \]
          7. exp-lowering-exp.f6458.0

            \[\leadsto \frac{x}{y \cdot \color{blue}{e^{b}}} \]
        10. Applied egg-rr58.0%

          \[\leadsto \color{blue}{\frac{x}{y \cdot e^{b}}} \]

        if 3.9500000000000002e-212 < y < 1.52e36

        1. Initial program 94.4%

          \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
        2. Add Preprocessing
        3. Taylor expanded in y around 0

          \[\leadsto \color{blue}{\frac{x \cdot e^{\log a \cdot \left(t - 1\right) - b}}{y}} \]
        4. Step-by-step derivation
          1. *-commutativeN/A

            \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right) - b} \cdot x}}{y} \]
          2. exp-diffN/A

            \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)}}{e^{b}}} \cdot x}{y} \]
          3. associate-*l/N/A

            \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{e^{b}}}}{y} \]
          4. associate-/l/N/A

            \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
          5. /-lowering-/.f64N/A

            \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
          6. *-lowering-*.f64N/A

            \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right)} \cdot x}}{y \cdot e^{b}} \]
          7. exp-prodN/A

            \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
          8. pow-lowering-pow.f64N/A

            \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
          9. rem-exp-logN/A

            \[\leadsto \frac{{\color{blue}{a}}^{\left(t - 1\right)} \cdot x}{y \cdot e^{b}} \]
          10. sub-negN/A

            \[\leadsto \frac{{a}^{\color{blue}{\left(t + \left(\mathsf{neg}\left(1\right)\right)\right)}} \cdot x}{y \cdot e^{b}} \]
          11. metadata-evalN/A

            \[\leadsto \frac{{a}^{\left(t + \color{blue}{-1}\right)} \cdot x}{y \cdot e^{b}} \]
          12. +-lowering-+.f64N/A

            \[\leadsto \frac{{a}^{\color{blue}{\left(t + -1\right)}} \cdot x}{y \cdot e^{b}} \]
          13. *-lowering-*.f64N/A

            \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y \cdot e^{b}}} \]
          14. exp-lowering-exp.f6468.3

            \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot \color{blue}{e^{b}}} \]
        5. Simplified68.3%

          \[\leadsto \color{blue}{\frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot e^{b}}} \]
        6. Taylor expanded in t around 0

          \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
        7. Step-by-step derivation
          1. /-lowering-/.f64N/A

            \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
          2. *-lowering-*.f64N/A

            \[\leadsto \frac{x}{\color{blue}{a \cdot \left(y \cdot e^{b}\right)}} \]
          3. *-lowering-*.f64N/A

            \[\leadsto \frac{x}{a \cdot \color{blue}{\left(y \cdot e^{b}\right)}} \]
          4. exp-lowering-exp.f6477.1

            \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{e^{b}}\right)} \]
        8. Simplified77.1%

          \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
        9. Taylor expanded in b around 0

          \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{\left(1 + b \cdot \left(1 + b \cdot \left(\frac{1}{2} + \frac{1}{6} \cdot b\right)\right)\right)}\right)} \]
        10. Step-by-step derivation
          1. +-commutativeN/A

            \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{\left(b \cdot \left(1 + b \cdot \left(\frac{1}{2} + \frac{1}{6} \cdot b\right)\right) + 1\right)}\right)} \]
          2. accelerator-lowering-fma.f64N/A

            \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{\mathsf{fma}\left(b, 1 + b \cdot \left(\frac{1}{2} + \frac{1}{6} \cdot b\right), 1\right)}\right)} \]
          3. +-commutativeN/A

            \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \color{blue}{b \cdot \left(\frac{1}{2} + \frac{1}{6} \cdot b\right) + 1}, 1\right)\right)} \]
          4. accelerator-lowering-fma.f64N/A

            \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \color{blue}{\mathsf{fma}\left(b, \frac{1}{2} + \frac{1}{6} \cdot b, 1\right)}, 1\right)\right)} \]
          5. +-commutativeN/A

            \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \color{blue}{\frac{1}{6} \cdot b + \frac{1}{2}}, 1\right), 1\right)\right)} \]
          6. *-commutativeN/A

            \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \color{blue}{b \cdot \frac{1}{6}} + \frac{1}{2}, 1\right), 1\right)\right)} \]
          7. accelerator-lowering-fma.f6462.6

            \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \color{blue}{\mathsf{fma}\left(b, 0.16666666666666666, 0.5\right)}, 1\right), 1\right)\right)} \]
        11. Simplified62.6%

          \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{\mathsf{fma}\left(b, \mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.16666666666666666, 0.5\right), 1\right), 1\right)}\right)} \]
      3. Recombined 3 regimes into one program.
      4. Add Preprocessing

      Alternative 11: 74.4% accurate, 2.5× speedup?

      \[\begin{array}{l} \\ \begin{array}{l} t_1 := \frac{x \cdot {z}^{y}}{y}\\ \mathbf{if}\;y \leq -2 \cdot 10^{+36}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;y \leq 2.05 \cdot 10^{+61}:\\ \;\;\;\;\frac{x \cdot {a}^{\left(t + -1\right)}}{y}\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
      (FPCore (x y z t a b)
       :precision binary64
       (let* ((t_1 (/ (* x (pow z y)) y)))
         (if (<= y -2e+36)
           t_1
           (if (<= y 2.05e+61) (/ (* x (pow a (+ t -1.0))) y) t_1))))
      double code(double x, double y, double z, double t, double a, double b) {
      	double t_1 = (x * pow(z, y)) / y;
      	double tmp;
      	if (y <= -2e+36) {
      		tmp = t_1;
      	} else if (y <= 2.05e+61) {
      		tmp = (x * pow(a, (t + -1.0))) / y;
      	} else {
      		tmp = t_1;
      	}
      	return tmp;
      }
      
      real(8) function code(x, y, z, t, a, b)
          real(8), intent (in) :: x
          real(8), intent (in) :: y
          real(8), intent (in) :: z
          real(8), intent (in) :: t
          real(8), intent (in) :: a
          real(8), intent (in) :: b
          real(8) :: t_1
          real(8) :: tmp
          t_1 = (x * (z ** y)) / y
          if (y <= (-2d+36)) then
              tmp = t_1
          else if (y <= 2.05d+61) then
              tmp = (x * (a ** (t + (-1.0d0)))) / y
          else
              tmp = t_1
          end if
          code = tmp
      end function
      
      public static double code(double x, double y, double z, double t, double a, double b) {
      	double t_1 = (x * Math.pow(z, y)) / y;
      	double tmp;
      	if (y <= -2e+36) {
      		tmp = t_1;
      	} else if (y <= 2.05e+61) {
      		tmp = (x * Math.pow(a, (t + -1.0))) / y;
      	} else {
      		tmp = t_1;
      	}
      	return tmp;
      }
      
      def code(x, y, z, t, a, b):
      	t_1 = (x * math.pow(z, y)) / y
      	tmp = 0
      	if y <= -2e+36:
      		tmp = t_1
      	elif y <= 2.05e+61:
      		tmp = (x * math.pow(a, (t + -1.0))) / y
      	else:
      		tmp = t_1
      	return tmp
      
      function code(x, y, z, t, a, b)
      	t_1 = Float64(Float64(x * (z ^ y)) / y)
      	tmp = 0.0
      	if (y <= -2e+36)
      		tmp = t_1;
      	elseif (y <= 2.05e+61)
      		tmp = Float64(Float64(x * (a ^ Float64(t + -1.0))) / y);
      	else
      		tmp = t_1;
      	end
      	return tmp
      end
      
      function tmp_2 = code(x, y, z, t, a, b)
      	t_1 = (x * (z ^ y)) / y;
      	tmp = 0.0;
      	if (y <= -2e+36)
      		tmp = t_1;
      	elseif (y <= 2.05e+61)
      		tmp = (x * (a ^ (t + -1.0))) / y;
      	else
      		tmp = t_1;
      	end
      	tmp_2 = tmp;
      end
      
      code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(N[(x * N[Power[z, y], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision]}, If[LessEqual[y, -2e+36], t$95$1, If[LessEqual[y, 2.05e+61], N[(N[(x * N[Power[a, N[(t + -1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision], t$95$1]]]
      
      \begin{array}{l}
      
      \\
      \begin{array}{l}
      t_1 := \frac{x \cdot {z}^{y}}{y}\\
      \mathbf{if}\;y \leq -2 \cdot 10^{+36}:\\
      \;\;\;\;t\_1\\
      
      \mathbf{elif}\;y \leq 2.05 \cdot 10^{+61}:\\
      \;\;\;\;\frac{x \cdot {a}^{\left(t + -1\right)}}{y}\\
      
      \mathbf{else}:\\
      \;\;\;\;t\_1\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 2 regimes
      2. if y < -2.00000000000000008e36 or 2.04999999999999986e61 < y

        1. Initial program 100.0%

          \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
        2. Add Preprocessing
        3. Taylor expanded in y around inf

          \[\leadsto \frac{x \cdot e^{\color{blue}{y \cdot \log z} - b}}{y} \]
        4. Step-by-step derivation
          1. +-rgt-identityN/A

            \[\leadsto \frac{x \cdot e^{\color{blue}{\left(y \cdot \log z + 0\right)} - b}}{y} \]
          2. accelerator-lowering-fma.f64N/A

            \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
          3. log-lowering-log.f6493.4

            \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(y, \color{blue}{\log z}, 0\right) - b}}{y} \]
        5. Simplified93.4%

          \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
        6. Taylor expanded in b around 0

          \[\leadsto \color{blue}{\frac{x \cdot {z}^{y}}{y}} \]
        7. Step-by-step derivation
          1. /-lowering-/.f64N/A

            \[\leadsto \color{blue}{\frac{x \cdot {z}^{y}}{y}} \]
          2. *-lowering-*.f64N/A

            \[\leadsto \frac{\color{blue}{x \cdot {z}^{y}}}{y} \]
          3. pow-lowering-pow.f6488.4

            \[\leadsto \frac{x \cdot \color{blue}{{z}^{y}}}{y} \]
        8. Simplified88.4%

          \[\leadsto \color{blue}{\frac{x \cdot {z}^{y}}{y}} \]

        if -2.00000000000000008e36 < y < 2.04999999999999986e61

        1. Initial program 97.0%

          \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
        2. Add Preprocessing
        3. Taylor expanded in y around 0

          \[\leadsto \color{blue}{\frac{x \cdot e^{\log a \cdot \left(t - 1\right) - b}}{y}} \]
        4. Step-by-step derivation
          1. *-commutativeN/A

            \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right) - b} \cdot x}}{y} \]
          2. exp-diffN/A

            \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)}}{e^{b}}} \cdot x}{y} \]
          3. associate-*l/N/A

            \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{e^{b}}}}{y} \]
          4. associate-/l/N/A

            \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
          5. /-lowering-/.f64N/A

            \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
          6. *-lowering-*.f64N/A

            \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right)} \cdot x}}{y \cdot e^{b}} \]
          7. exp-prodN/A

            \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
          8. pow-lowering-pow.f64N/A

            \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
          9. rem-exp-logN/A

            \[\leadsto \frac{{\color{blue}{a}}^{\left(t - 1\right)} \cdot x}{y \cdot e^{b}} \]
          10. sub-negN/A

            \[\leadsto \frac{{a}^{\color{blue}{\left(t + \left(\mathsf{neg}\left(1\right)\right)\right)}} \cdot x}{y \cdot e^{b}} \]
          11. metadata-evalN/A

            \[\leadsto \frac{{a}^{\left(t + \color{blue}{-1}\right)} \cdot x}{y \cdot e^{b}} \]
          12. +-lowering-+.f64N/A

            \[\leadsto \frac{{a}^{\color{blue}{\left(t + -1\right)}} \cdot x}{y \cdot e^{b}} \]
          13. *-lowering-*.f64N/A

            \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y \cdot e^{b}}} \]
          14. exp-lowering-exp.f6478.2

            \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot \color{blue}{e^{b}}} \]
        5. Simplified78.2%

          \[\leadsto \color{blue}{\frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot e^{b}}} \]
        6. Taylor expanded in b around 0

          \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y}} \]
        7. Step-by-step derivation
          1. Simplified69.4%

            \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y}} \]
        8. Recombined 2 regimes into one program.
        9. Final simplification78.3%

          \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -2 \cdot 10^{+36}:\\ \;\;\;\;\frac{x \cdot {z}^{y}}{y}\\ \mathbf{elif}\;y \leq 2.05 \cdot 10^{+61}:\\ \;\;\;\;\frac{x \cdot {a}^{\left(t + -1\right)}}{y}\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot {z}^{y}}{y}\\ \end{array} \]
        10. Add Preprocessing

        Alternative 12: 71.3% accurate, 2.5× speedup?

        \[\begin{array}{l} \\ \begin{array}{l} t_1 := \frac{x \cdot {z}^{y}}{y}\\ \mathbf{if}\;y \leq -2.6 \cdot 10^{+36}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;y \leq 5.5 \cdot 10^{+61}:\\ \;\;\;\;{a}^{\left(t + -1\right)} \cdot \frac{x}{y}\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
        (FPCore (x y z t a b)
         :precision binary64
         (let* ((t_1 (/ (* x (pow z y)) y)))
           (if (<= y -2.6e+36)
             t_1
             (if (<= y 5.5e+61) (* (pow a (+ t -1.0)) (/ x y)) t_1))))
        double code(double x, double y, double z, double t, double a, double b) {
        	double t_1 = (x * pow(z, y)) / y;
        	double tmp;
        	if (y <= -2.6e+36) {
        		tmp = t_1;
        	} else if (y <= 5.5e+61) {
        		tmp = pow(a, (t + -1.0)) * (x / y);
        	} else {
        		tmp = t_1;
        	}
        	return tmp;
        }
        
        real(8) function code(x, y, z, t, a, b)
            real(8), intent (in) :: x
            real(8), intent (in) :: y
            real(8), intent (in) :: z
            real(8), intent (in) :: t
            real(8), intent (in) :: a
            real(8), intent (in) :: b
            real(8) :: t_1
            real(8) :: tmp
            t_1 = (x * (z ** y)) / y
            if (y <= (-2.6d+36)) then
                tmp = t_1
            else if (y <= 5.5d+61) then
                tmp = (a ** (t + (-1.0d0))) * (x / y)
            else
                tmp = t_1
            end if
            code = tmp
        end function
        
        public static double code(double x, double y, double z, double t, double a, double b) {
        	double t_1 = (x * Math.pow(z, y)) / y;
        	double tmp;
        	if (y <= -2.6e+36) {
        		tmp = t_1;
        	} else if (y <= 5.5e+61) {
        		tmp = Math.pow(a, (t + -1.0)) * (x / y);
        	} else {
        		tmp = t_1;
        	}
        	return tmp;
        }
        
        def code(x, y, z, t, a, b):
        	t_1 = (x * math.pow(z, y)) / y
        	tmp = 0
        	if y <= -2.6e+36:
        		tmp = t_1
        	elif y <= 5.5e+61:
        		tmp = math.pow(a, (t + -1.0)) * (x / y)
        	else:
        		tmp = t_1
        	return tmp
        
        function code(x, y, z, t, a, b)
        	t_1 = Float64(Float64(x * (z ^ y)) / y)
        	tmp = 0.0
        	if (y <= -2.6e+36)
        		tmp = t_1;
        	elseif (y <= 5.5e+61)
        		tmp = Float64((a ^ Float64(t + -1.0)) * Float64(x / y));
        	else
        		tmp = t_1;
        	end
        	return tmp
        end
        
        function tmp_2 = code(x, y, z, t, a, b)
        	t_1 = (x * (z ^ y)) / y;
        	tmp = 0.0;
        	if (y <= -2.6e+36)
        		tmp = t_1;
        	elseif (y <= 5.5e+61)
        		tmp = (a ^ (t + -1.0)) * (x / y);
        	else
        		tmp = t_1;
        	end
        	tmp_2 = tmp;
        end
        
        code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(N[(x * N[Power[z, y], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision]}, If[LessEqual[y, -2.6e+36], t$95$1, If[LessEqual[y, 5.5e+61], N[(N[Power[a, N[(t + -1.0), $MachinePrecision]], $MachinePrecision] * N[(x / y), $MachinePrecision]), $MachinePrecision], t$95$1]]]
        
        \begin{array}{l}
        
        \\
        \begin{array}{l}
        t_1 := \frac{x \cdot {z}^{y}}{y}\\
        \mathbf{if}\;y \leq -2.6 \cdot 10^{+36}:\\
        \;\;\;\;t\_1\\
        
        \mathbf{elif}\;y \leq 5.5 \cdot 10^{+61}:\\
        \;\;\;\;{a}^{\left(t + -1\right)} \cdot \frac{x}{y}\\
        
        \mathbf{else}:\\
        \;\;\;\;t\_1\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if y < -2.6000000000000001e36 or 5.50000000000000036e61 < y

          1. Initial program 100.0%

            \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
          2. Add Preprocessing
          3. Taylor expanded in y around inf

            \[\leadsto \frac{x \cdot e^{\color{blue}{y \cdot \log z} - b}}{y} \]
          4. Step-by-step derivation
            1. +-rgt-identityN/A

              \[\leadsto \frac{x \cdot e^{\color{blue}{\left(y \cdot \log z + 0\right)} - b}}{y} \]
            2. accelerator-lowering-fma.f64N/A

              \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
            3. log-lowering-log.f6493.4

              \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(y, \color{blue}{\log z}, 0\right) - b}}{y} \]
          5. Simplified93.4%

            \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
          6. Taylor expanded in b around 0

            \[\leadsto \color{blue}{\frac{x \cdot {z}^{y}}{y}} \]
          7. Step-by-step derivation
            1. /-lowering-/.f64N/A

              \[\leadsto \color{blue}{\frac{x \cdot {z}^{y}}{y}} \]
            2. *-lowering-*.f64N/A

              \[\leadsto \frac{\color{blue}{x \cdot {z}^{y}}}{y} \]
            3. pow-lowering-pow.f6488.4

              \[\leadsto \frac{x \cdot \color{blue}{{z}^{y}}}{y} \]
          8. Simplified88.4%

            \[\leadsto \color{blue}{\frac{x \cdot {z}^{y}}{y}} \]

          if -2.6000000000000001e36 < y < 5.50000000000000036e61

          1. Initial program 97.0%

            \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
          2. Add Preprocessing
          3. Taylor expanded in b around 0

            \[\leadsto \color{blue}{\frac{x \cdot e^{y \cdot \log z + \log a \cdot \left(t - 1\right)}}{y}} \]
          4. Step-by-step derivation
            1. +-rgt-identityN/A

              \[\leadsto \color{blue}{\frac{x \cdot e^{y \cdot \log z + \log a \cdot \left(t - 1\right)}}{y} + 0} \]
            2. associate-/l*N/A

              \[\leadsto \color{blue}{x \cdot \frac{e^{y \cdot \log z + \log a \cdot \left(t - 1\right)}}{y}} + 0 \]
            3. accelerator-lowering-fma.f64N/A

              \[\leadsto \color{blue}{\mathsf{fma}\left(x, \frac{e^{y \cdot \log z + \log a \cdot \left(t - 1\right)}}{y}, 0\right)} \]
          5. Simplified68.2%

            \[\leadsto \color{blue}{\mathsf{fma}\left(x, {a}^{\left(t + -1\right)} \cdot \frac{{z}^{y}}{y}, 0\right)} \]
          6. Taylor expanded in y around 0

            \[\leadsto \color{blue}{\frac{x \cdot e^{\log a \cdot \left(t - 1\right)}}{y}} \]
          7. Step-by-step derivation
            1. *-commutativeN/A

              \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right)} \cdot x}}{y} \]
            2. associate-/l*N/A

              \[\leadsto \color{blue}{e^{\log a \cdot \left(t - 1\right)} \cdot \frac{x}{y}} \]
            3. *-lowering-*.f64N/A

              \[\leadsto \color{blue}{e^{\log a \cdot \left(t - 1\right)} \cdot \frac{x}{y}} \]
            4. exp-to-powN/A

              \[\leadsto \color{blue}{{a}^{\left(t - 1\right)}} \cdot \frac{x}{y} \]
            5. pow-lowering-pow.f64N/A

              \[\leadsto \color{blue}{{a}^{\left(t - 1\right)}} \cdot \frac{x}{y} \]
            6. sub-negN/A

              \[\leadsto {a}^{\color{blue}{\left(t + \left(\mathsf{neg}\left(1\right)\right)\right)}} \cdot \frac{x}{y} \]
            7. metadata-evalN/A

              \[\leadsto {a}^{\left(t + \color{blue}{-1}\right)} \cdot \frac{x}{y} \]
            8. +-commutativeN/A

              \[\leadsto {a}^{\color{blue}{\left(-1 + t\right)}} \cdot \frac{x}{y} \]
            9. +-lowering-+.f64N/A

              \[\leadsto {a}^{\color{blue}{\left(-1 + t\right)}} \cdot \frac{x}{y} \]
            10. /-lowering-/.f6464.7

              \[\leadsto {a}^{\left(-1 + t\right)} \cdot \color{blue}{\frac{x}{y}} \]
          8. Simplified64.7%

            \[\leadsto \color{blue}{{a}^{\left(-1 + t\right)} \cdot \frac{x}{y}} \]
        3. Recombined 2 regimes into one program.
        4. Final simplification75.7%

          \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -2.6 \cdot 10^{+36}:\\ \;\;\;\;\frac{x \cdot {z}^{y}}{y}\\ \mathbf{elif}\;y \leq 5.5 \cdot 10^{+61}:\\ \;\;\;\;{a}^{\left(t + -1\right)} \cdot \frac{x}{y}\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot {z}^{y}}{y}\\ \end{array} \]
        5. Add Preprocessing

        Alternative 13: 57.5% accurate, 2.7× speedup?

        \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;b \leq -115:\\ \;\;\;\;\frac{x}{y \cdot e^{b}}\\ \mathbf{else}:\\ \;\;\;\;\frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.16666666666666666, 0.5\right), 1\right), 1\right)\right)}\\ \end{array} \end{array} \]
        (FPCore (x y z t a b)
         :precision binary64
         (if (<= b -115.0)
           (/ x (* y (exp b)))
           (/ x (* a (* y (fma b (fma b (fma b 0.16666666666666666 0.5) 1.0) 1.0))))))
        double code(double x, double y, double z, double t, double a, double b) {
        	double tmp;
        	if (b <= -115.0) {
        		tmp = x / (y * exp(b));
        	} else {
        		tmp = x / (a * (y * fma(b, fma(b, fma(b, 0.16666666666666666, 0.5), 1.0), 1.0)));
        	}
        	return tmp;
        }
        
        function code(x, y, z, t, a, b)
        	tmp = 0.0
        	if (b <= -115.0)
        		tmp = Float64(x / Float64(y * exp(b)));
        	else
        		tmp = Float64(x / Float64(a * Float64(y * fma(b, fma(b, fma(b, 0.16666666666666666, 0.5), 1.0), 1.0))));
        	end
        	return tmp
        end
        
        code[x_, y_, z_, t_, a_, b_] := If[LessEqual[b, -115.0], N[(x / N[(y * N[Exp[b], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x / N[(a * N[(y * N[(b * N[(b * N[(b * 0.16666666666666666 + 0.5), $MachinePrecision] + 1.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
        
        \begin{array}{l}
        
        \\
        \begin{array}{l}
        \mathbf{if}\;b \leq -115:\\
        \;\;\;\;\frac{x}{y \cdot e^{b}}\\
        
        \mathbf{else}:\\
        \;\;\;\;\frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.16666666666666666, 0.5\right), 1\right), 1\right)\right)}\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if b < -115

          1. Initial program 100.0%

            \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
          2. Add Preprocessing
          3. Taylor expanded in y around inf

            \[\leadsto \frac{x \cdot e^{\color{blue}{y \cdot \log z} - b}}{y} \]
          4. Step-by-step derivation
            1. +-rgt-identityN/A

              \[\leadsto \frac{x \cdot e^{\color{blue}{\left(y \cdot \log z + 0\right)} - b}}{y} \]
            2. accelerator-lowering-fma.f64N/A

              \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
            3. log-lowering-log.f6485.5

              \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(y, \color{blue}{\log z}, 0\right) - b}}{y} \]
          5. Simplified85.5%

            \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
          6. Taylor expanded in y around 0

            \[\leadsto \frac{\color{blue}{x \cdot e^{\mathsf{neg}\left(b\right)}}}{y} \]
          7. Step-by-step derivation
            1. *-lowering-*.f64N/A

              \[\leadsto \frac{\color{blue}{x \cdot e^{\mathsf{neg}\left(b\right)}}}{y} \]
            2. exp-lowering-exp.f64N/A

              \[\leadsto \frac{x \cdot \color{blue}{e^{\mathsf{neg}\left(b\right)}}}{y} \]
            3. neg-sub0N/A

              \[\leadsto \frac{x \cdot e^{\color{blue}{0 - b}}}{y} \]
            4. --lowering--.f6474.2

              \[\leadsto \frac{x \cdot e^{\color{blue}{0 - b}}}{y} \]
          8. Simplified74.2%

            \[\leadsto \frac{\color{blue}{x \cdot e^{0 - b}}}{y} \]
          9. Step-by-step derivation
            1. sub0-negN/A

              \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{neg}\left(b\right)}}}{y} \]
            2. exp-negN/A

              \[\leadsto \frac{x \cdot \color{blue}{\frac{1}{e^{b}}}}{y} \]
            3. un-div-invN/A

              \[\leadsto \frac{\color{blue}{\frac{x}{e^{b}}}}{y} \]
            4. associate-/l/N/A

              \[\leadsto \color{blue}{\frac{x}{y \cdot e^{b}}} \]
            5. /-lowering-/.f64N/A

              \[\leadsto \color{blue}{\frac{x}{y \cdot e^{b}}} \]
            6. *-lowering-*.f64N/A

              \[\leadsto \frac{x}{\color{blue}{y \cdot e^{b}}} \]
            7. exp-lowering-exp.f6474.2

              \[\leadsto \frac{x}{y \cdot \color{blue}{e^{b}}} \]
          10. Applied egg-rr74.2%

            \[\leadsto \color{blue}{\frac{x}{y \cdot e^{b}}} \]

          if -115 < b

          1. Initial program 97.9%

            \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
          2. Add Preprocessing
          3. Taylor expanded in y around 0

            \[\leadsto \color{blue}{\frac{x \cdot e^{\log a \cdot \left(t - 1\right) - b}}{y}} \]
          4. Step-by-step derivation
            1. *-commutativeN/A

              \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right) - b} \cdot x}}{y} \]
            2. exp-diffN/A

              \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)}}{e^{b}}} \cdot x}{y} \]
            3. associate-*l/N/A

              \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{e^{b}}}}{y} \]
            4. associate-/l/N/A

              \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
            5. /-lowering-/.f64N/A

              \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
            6. *-lowering-*.f64N/A

              \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right)} \cdot x}}{y \cdot e^{b}} \]
            7. exp-prodN/A

              \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
            8. pow-lowering-pow.f64N/A

              \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
            9. rem-exp-logN/A

              \[\leadsto \frac{{\color{blue}{a}}^{\left(t - 1\right)} \cdot x}{y \cdot e^{b}} \]
            10. sub-negN/A

              \[\leadsto \frac{{a}^{\color{blue}{\left(t + \left(\mathsf{neg}\left(1\right)\right)\right)}} \cdot x}{y \cdot e^{b}} \]
            11. metadata-evalN/A

              \[\leadsto \frac{{a}^{\left(t + \color{blue}{-1}\right)} \cdot x}{y \cdot e^{b}} \]
            12. +-lowering-+.f64N/A

              \[\leadsto \frac{{a}^{\color{blue}{\left(t + -1\right)}} \cdot x}{y \cdot e^{b}} \]
            13. *-lowering-*.f64N/A

              \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y \cdot e^{b}}} \]
            14. exp-lowering-exp.f6464.7

              \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot \color{blue}{e^{b}}} \]
          5. Simplified64.7%

            \[\leadsto \color{blue}{\frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot e^{b}}} \]
          6. Taylor expanded in t around 0

            \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
          7. Step-by-step derivation
            1. /-lowering-/.f64N/A

              \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
            2. *-lowering-*.f64N/A

              \[\leadsto \frac{x}{\color{blue}{a \cdot \left(y \cdot e^{b}\right)}} \]
            3. *-lowering-*.f64N/A

              \[\leadsto \frac{x}{a \cdot \color{blue}{\left(y \cdot e^{b}\right)}} \]
            4. exp-lowering-exp.f6452.7

              \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{e^{b}}\right)} \]
          8. Simplified52.7%

            \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
          9. Taylor expanded in b around 0

            \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{\left(1 + b \cdot \left(1 + b \cdot \left(\frac{1}{2} + \frac{1}{6} \cdot b\right)\right)\right)}\right)} \]
          10. Step-by-step derivation
            1. +-commutativeN/A

              \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{\left(b \cdot \left(1 + b \cdot \left(\frac{1}{2} + \frac{1}{6} \cdot b\right)\right) + 1\right)}\right)} \]
            2. accelerator-lowering-fma.f64N/A

              \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{\mathsf{fma}\left(b, 1 + b \cdot \left(\frac{1}{2} + \frac{1}{6} \cdot b\right), 1\right)}\right)} \]
            3. +-commutativeN/A

              \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \color{blue}{b \cdot \left(\frac{1}{2} + \frac{1}{6} \cdot b\right) + 1}, 1\right)\right)} \]
            4. accelerator-lowering-fma.f64N/A

              \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \color{blue}{\mathsf{fma}\left(b, \frac{1}{2} + \frac{1}{6} \cdot b, 1\right)}, 1\right)\right)} \]
            5. +-commutativeN/A

              \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \color{blue}{\frac{1}{6} \cdot b + \frac{1}{2}}, 1\right), 1\right)\right)} \]
            6. *-commutativeN/A

              \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \color{blue}{b \cdot \frac{1}{6}} + \frac{1}{2}, 1\right), 1\right)\right)} \]
            7. accelerator-lowering-fma.f6450.8

              \[\leadsto \frac{x}{a \cdot \left(y \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(b, \color{blue}{\mathsf{fma}\left(b, 0.16666666666666666, 0.5\right)}, 1\right), 1\right)\right)} \]
          11. Simplified50.8%

            \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{\mathsf{fma}\left(b, \mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.16666666666666666, 0.5\right), 1\right), 1\right)}\right)} \]
        3. Recombined 2 regimes into one program.
        4. Add Preprocessing

        Alternative 14: 45.4% accurate, 9.6× speedup?

        \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;b \leq -180:\\ \;\;\;\;x \cdot \frac{\mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.5, -1\right), 1\right)}{y}\\ \mathbf{else}:\\ \;\;\;\;\frac{x}{a \cdot \mathsf{fma}\left(y, b, y\right)}\\ \end{array} \end{array} \]
        (FPCore (x y z t a b)
         :precision binary64
         (if (<= b -180.0)
           (* x (/ (fma b (fma b 0.5 -1.0) 1.0) y))
           (/ x (* a (fma y b y)))))
        double code(double x, double y, double z, double t, double a, double b) {
        	double tmp;
        	if (b <= -180.0) {
        		tmp = x * (fma(b, fma(b, 0.5, -1.0), 1.0) / y);
        	} else {
        		tmp = x / (a * fma(y, b, y));
        	}
        	return tmp;
        }
        
        function code(x, y, z, t, a, b)
        	tmp = 0.0
        	if (b <= -180.0)
        		tmp = Float64(x * Float64(fma(b, fma(b, 0.5, -1.0), 1.0) / y));
        	else
        		tmp = Float64(x / Float64(a * fma(y, b, y)));
        	end
        	return tmp
        end
        
        code[x_, y_, z_, t_, a_, b_] := If[LessEqual[b, -180.0], N[(x * N[(N[(b * N[(b * 0.5 + -1.0), $MachinePrecision] + 1.0), $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision], N[(x / N[(a * N[(y * b + y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
        
        \begin{array}{l}
        
        \\
        \begin{array}{l}
        \mathbf{if}\;b \leq -180:\\
        \;\;\;\;x \cdot \frac{\mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.5, -1\right), 1\right)}{y}\\
        
        \mathbf{else}:\\
        \;\;\;\;\frac{x}{a \cdot \mathsf{fma}\left(y, b, y\right)}\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if b < -180

          1. Initial program 100.0%

            \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
          2. Add Preprocessing
          3. Taylor expanded in y around inf

            \[\leadsto \frac{x \cdot e^{\color{blue}{y \cdot \log z} - b}}{y} \]
          4. Step-by-step derivation
            1. +-rgt-identityN/A

              \[\leadsto \frac{x \cdot e^{\color{blue}{\left(y \cdot \log z + 0\right)} - b}}{y} \]
            2. accelerator-lowering-fma.f64N/A

              \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
            3. log-lowering-log.f6485.5

              \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(y, \color{blue}{\log z}, 0\right) - b}}{y} \]
          5. Simplified85.5%

            \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
          6. Taylor expanded in y around 0

            \[\leadsto \frac{\color{blue}{x \cdot e^{\mathsf{neg}\left(b\right)}}}{y} \]
          7. Step-by-step derivation
            1. *-lowering-*.f64N/A

              \[\leadsto \frac{\color{blue}{x \cdot e^{\mathsf{neg}\left(b\right)}}}{y} \]
            2. exp-lowering-exp.f64N/A

              \[\leadsto \frac{x \cdot \color{blue}{e^{\mathsf{neg}\left(b\right)}}}{y} \]
            3. neg-sub0N/A

              \[\leadsto \frac{x \cdot e^{\color{blue}{0 - b}}}{y} \]
            4. --lowering--.f6474.2

              \[\leadsto \frac{x \cdot e^{\color{blue}{0 - b}}}{y} \]
          8. Simplified74.2%

            \[\leadsto \frac{\color{blue}{x \cdot e^{0 - b}}}{y} \]
          9. Taylor expanded in b around 0

            \[\leadsto \color{blue}{b \cdot \left(-1 \cdot \frac{x}{y} + \frac{1}{2} \cdot \frac{b \cdot x}{y}\right) + \frac{x}{y}} \]
          10. Simplified57.5%

            \[\leadsto \color{blue}{\frac{x}{y} \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(0.5, b, -1\right), 1\right)} \]
          11. Step-by-step derivation
            1. associate-*l/N/A

              \[\leadsto \color{blue}{\frac{x \cdot \left(b \cdot \left(\frac{1}{2} \cdot b + -1\right) + 1\right)}{y}} \]
            2. associate-/l*N/A

              \[\leadsto \color{blue}{x \cdot \frac{b \cdot \left(\frac{1}{2} \cdot b + -1\right) + 1}{y}} \]
            3. *-commutativeN/A

              \[\leadsto \color{blue}{\frac{b \cdot \left(\frac{1}{2} \cdot b + -1\right) + 1}{y} \cdot x} \]
            4. *-lowering-*.f64N/A

              \[\leadsto \color{blue}{\frac{b \cdot \left(\frac{1}{2} \cdot b + -1\right) + 1}{y} \cdot x} \]
            5. /-lowering-/.f64N/A

              \[\leadsto \color{blue}{\frac{b \cdot \left(\frac{1}{2} \cdot b + -1\right) + 1}{y}} \cdot x \]
            6. accelerator-lowering-fma.f64N/A

              \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(b, \frac{1}{2} \cdot b + -1, 1\right)}}{y} \cdot x \]
            7. *-commutativeN/A

              \[\leadsto \frac{\mathsf{fma}\left(b, \color{blue}{b \cdot \frac{1}{2}} + -1, 1\right)}{y} \cdot x \]
            8. accelerator-lowering-fma.f6466.7

              \[\leadsto \frac{\mathsf{fma}\left(b, \color{blue}{\mathsf{fma}\left(b, 0.5, -1\right)}, 1\right)}{y} \cdot x \]
          12. Applied egg-rr66.7%

            \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.5, -1\right), 1\right)}{y} \cdot x} \]

          if -180 < b

          1. Initial program 97.9%

            \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
          2. Add Preprocessing
          3. Taylor expanded in y around 0

            \[\leadsto \color{blue}{\frac{x \cdot e^{\log a \cdot \left(t - 1\right) - b}}{y}} \]
          4. Step-by-step derivation
            1. *-commutativeN/A

              \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right) - b} \cdot x}}{y} \]
            2. exp-diffN/A

              \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)}}{e^{b}}} \cdot x}{y} \]
            3. associate-*l/N/A

              \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{e^{b}}}}{y} \]
            4. associate-/l/N/A

              \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
            5. /-lowering-/.f64N/A

              \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
            6. *-lowering-*.f64N/A

              \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right)} \cdot x}}{y \cdot e^{b}} \]
            7. exp-prodN/A

              \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
            8. pow-lowering-pow.f64N/A

              \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
            9. rem-exp-logN/A

              \[\leadsto \frac{{\color{blue}{a}}^{\left(t - 1\right)} \cdot x}{y \cdot e^{b}} \]
            10. sub-negN/A

              \[\leadsto \frac{{a}^{\color{blue}{\left(t + \left(\mathsf{neg}\left(1\right)\right)\right)}} \cdot x}{y \cdot e^{b}} \]
            11. metadata-evalN/A

              \[\leadsto \frac{{a}^{\left(t + \color{blue}{-1}\right)} \cdot x}{y \cdot e^{b}} \]
            12. +-lowering-+.f64N/A

              \[\leadsto \frac{{a}^{\color{blue}{\left(t + -1\right)}} \cdot x}{y \cdot e^{b}} \]
            13. *-lowering-*.f64N/A

              \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y \cdot e^{b}}} \]
            14. exp-lowering-exp.f6464.7

              \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot \color{blue}{e^{b}}} \]
          5. Simplified64.7%

            \[\leadsto \color{blue}{\frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot e^{b}}} \]
          6. Taylor expanded in t around 0

            \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
          7. Step-by-step derivation
            1. /-lowering-/.f64N/A

              \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
            2. *-lowering-*.f64N/A

              \[\leadsto \frac{x}{\color{blue}{a \cdot \left(y \cdot e^{b}\right)}} \]
            3. *-lowering-*.f64N/A

              \[\leadsto \frac{x}{a \cdot \color{blue}{\left(y \cdot e^{b}\right)}} \]
            4. exp-lowering-exp.f6452.7

              \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{e^{b}}\right)} \]
          8. Simplified52.7%

            \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
          9. Taylor expanded in b around 0

            \[\leadsto \frac{x}{\color{blue}{a \cdot y + a \cdot \left(b \cdot y\right)}} \]
          10. Step-by-step derivation
            1. distribute-lft-outN/A

              \[\leadsto \frac{x}{\color{blue}{a \cdot \left(y + b \cdot y\right)}} \]
            2. *-lowering-*.f64N/A

              \[\leadsto \frac{x}{\color{blue}{a \cdot \left(y + b \cdot y\right)}} \]
            3. +-commutativeN/A

              \[\leadsto \frac{x}{a \cdot \color{blue}{\left(b \cdot y + y\right)}} \]
            4. *-commutativeN/A

              \[\leadsto \frac{x}{a \cdot \left(\color{blue}{y \cdot b} + y\right)} \]
            5. accelerator-lowering-fma.f6436.8

              \[\leadsto \frac{x}{a \cdot \color{blue}{\mathsf{fma}\left(y, b, y\right)}} \]
          11. Simplified36.8%

            \[\leadsto \frac{x}{\color{blue}{a \cdot \mathsf{fma}\left(y, b, y\right)}} \]
        3. Recombined 2 regimes into one program.
        4. Final simplification43.9%

          \[\leadsto \begin{array}{l} \mathbf{if}\;b \leq -180:\\ \;\;\;\;x \cdot \frac{\mathsf{fma}\left(b, \mathsf{fma}\left(b, 0.5, -1\right), 1\right)}{y}\\ \mathbf{else}:\\ \;\;\;\;\frac{x}{a \cdot \mathsf{fma}\left(y, b, y\right)}\\ \end{array} \]
        5. Add Preprocessing

        Alternative 15: 44.0% accurate, 9.9× speedup?

        \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;b \leq -11:\\ \;\;\;\;\frac{x}{y} \cdot \left(b \cdot \mathsf{fma}\left(b, 0.5, -1\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\frac{x}{a \cdot \mathsf{fma}\left(y, b, y\right)}\\ \end{array} \end{array} \]
        (FPCore (x y z t a b)
         :precision binary64
         (if (<= b -11.0) (* (/ x y) (* b (fma b 0.5 -1.0))) (/ x (* a (fma y b y)))))
        double code(double x, double y, double z, double t, double a, double b) {
        	double tmp;
        	if (b <= -11.0) {
        		tmp = (x / y) * (b * fma(b, 0.5, -1.0));
        	} else {
        		tmp = x / (a * fma(y, b, y));
        	}
        	return tmp;
        }
        
        function code(x, y, z, t, a, b)
        	tmp = 0.0
        	if (b <= -11.0)
        		tmp = Float64(Float64(x / y) * Float64(b * fma(b, 0.5, -1.0)));
        	else
        		tmp = Float64(x / Float64(a * fma(y, b, y)));
        	end
        	return tmp
        end
        
        code[x_, y_, z_, t_, a_, b_] := If[LessEqual[b, -11.0], N[(N[(x / y), $MachinePrecision] * N[(b * N[(b * 0.5 + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x / N[(a * N[(y * b + y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
        
        \begin{array}{l}
        
        \\
        \begin{array}{l}
        \mathbf{if}\;b \leq -11:\\
        \;\;\;\;\frac{x}{y} \cdot \left(b \cdot \mathsf{fma}\left(b, 0.5, -1\right)\right)\\
        
        \mathbf{else}:\\
        \;\;\;\;\frac{x}{a \cdot \mathsf{fma}\left(y, b, y\right)}\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if b < -11

          1. Initial program 100.0%

            \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
          2. Add Preprocessing
          3. Taylor expanded in y around inf

            \[\leadsto \frac{x \cdot e^{\color{blue}{y \cdot \log z} - b}}{y} \]
          4. Step-by-step derivation
            1. +-rgt-identityN/A

              \[\leadsto \frac{x \cdot e^{\color{blue}{\left(y \cdot \log z + 0\right)} - b}}{y} \]
            2. accelerator-lowering-fma.f64N/A

              \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
            3. log-lowering-log.f6485.5

              \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(y, \color{blue}{\log z}, 0\right) - b}}{y} \]
          5. Simplified85.5%

            \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
          6. Taylor expanded in y around 0

            \[\leadsto \frac{\color{blue}{x \cdot e^{\mathsf{neg}\left(b\right)}}}{y} \]
          7. Step-by-step derivation
            1. *-lowering-*.f64N/A

              \[\leadsto \frac{\color{blue}{x \cdot e^{\mathsf{neg}\left(b\right)}}}{y} \]
            2. exp-lowering-exp.f64N/A

              \[\leadsto \frac{x \cdot \color{blue}{e^{\mathsf{neg}\left(b\right)}}}{y} \]
            3. neg-sub0N/A

              \[\leadsto \frac{x \cdot e^{\color{blue}{0 - b}}}{y} \]
            4. --lowering--.f6474.2

              \[\leadsto \frac{x \cdot e^{\color{blue}{0 - b}}}{y} \]
          8. Simplified74.2%

            \[\leadsto \frac{\color{blue}{x \cdot e^{0 - b}}}{y} \]
          9. Taylor expanded in b around 0

            \[\leadsto \color{blue}{b \cdot \left(-1 \cdot \frac{x}{y} + \frac{1}{2} \cdot \frac{b \cdot x}{y}\right) + \frac{x}{y}} \]
          10. Simplified57.5%

            \[\leadsto \color{blue}{\frac{x}{y} \cdot \mathsf{fma}\left(b, \mathsf{fma}\left(0.5, b, -1\right), 1\right)} \]
          11. Taylor expanded in b around inf

            \[\leadsto \frac{x}{y} \cdot \color{blue}{\left({b}^{2} \cdot \left(\frac{1}{2} - \frac{1}{b}\right)\right)} \]
          12. Step-by-step derivation
            1. unpow2N/A

              \[\leadsto \frac{x}{y} \cdot \left(\color{blue}{\left(b \cdot b\right)} \cdot \left(\frac{1}{2} - \frac{1}{b}\right)\right) \]
            2. associate-*l*N/A

              \[\leadsto \frac{x}{y} \cdot \color{blue}{\left(b \cdot \left(b \cdot \left(\frac{1}{2} - \frac{1}{b}\right)\right)\right)} \]
            3. sub-negN/A

              \[\leadsto \frac{x}{y} \cdot \left(b \cdot \left(b \cdot \color{blue}{\left(\frac{1}{2} + \left(\mathsf{neg}\left(\frac{1}{b}\right)\right)\right)}\right)\right) \]
            4. distribute-rgt-inN/A

              \[\leadsto \frac{x}{y} \cdot \left(b \cdot \color{blue}{\left(\frac{1}{2} \cdot b + \left(\mathsf{neg}\left(\frac{1}{b}\right)\right) \cdot b\right)}\right) \]
            5. distribute-lft-neg-outN/A

              \[\leadsto \frac{x}{y} \cdot \left(b \cdot \left(\frac{1}{2} \cdot b + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{b} \cdot b\right)\right)}\right)\right) \]
            6. lft-mult-inverseN/A

              \[\leadsto \frac{x}{y} \cdot \left(b \cdot \left(\frac{1}{2} \cdot b + \left(\mathsf{neg}\left(\color{blue}{1}\right)\right)\right)\right) \]
            7. sub-negN/A

              \[\leadsto \frac{x}{y} \cdot \left(b \cdot \color{blue}{\left(\frac{1}{2} \cdot b - 1\right)}\right) \]
            8. *-lowering-*.f64N/A

              \[\leadsto \frac{x}{y} \cdot \color{blue}{\left(b \cdot \left(\frac{1}{2} \cdot b - 1\right)\right)} \]
            9. sub-negN/A

              \[\leadsto \frac{x}{y} \cdot \left(b \cdot \color{blue}{\left(\frac{1}{2} \cdot b + \left(\mathsf{neg}\left(1\right)\right)\right)}\right) \]
            10. metadata-evalN/A

              \[\leadsto \frac{x}{y} \cdot \left(b \cdot \left(\frac{1}{2} \cdot b + \color{blue}{-1}\right)\right) \]
            11. *-commutativeN/A

              \[\leadsto \frac{x}{y} \cdot \left(b \cdot \left(\color{blue}{b \cdot \frac{1}{2}} + -1\right)\right) \]
            12. accelerator-lowering-fma.f6457.5

              \[\leadsto \frac{x}{y} \cdot \left(b \cdot \color{blue}{\mathsf{fma}\left(b, 0.5, -1\right)}\right) \]
          13. Simplified57.5%

            \[\leadsto \frac{x}{y} \cdot \color{blue}{\left(b \cdot \mathsf{fma}\left(b, 0.5, -1\right)\right)} \]

          if -11 < b

          1. Initial program 97.9%

            \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
          2. Add Preprocessing
          3. Taylor expanded in y around 0

            \[\leadsto \color{blue}{\frac{x \cdot e^{\log a \cdot \left(t - 1\right) - b}}{y}} \]
          4. Step-by-step derivation
            1. *-commutativeN/A

              \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right) - b} \cdot x}}{y} \]
            2. exp-diffN/A

              \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)}}{e^{b}}} \cdot x}{y} \]
            3. associate-*l/N/A

              \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{e^{b}}}}{y} \]
            4. associate-/l/N/A

              \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
            5. /-lowering-/.f64N/A

              \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
            6. *-lowering-*.f64N/A

              \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right)} \cdot x}}{y \cdot e^{b}} \]
            7. exp-prodN/A

              \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
            8. pow-lowering-pow.f64N/A

              \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
            9. rem-exp-logN/A

              \[\leadsto \frac{{\color{blue}{a}}^{\left(t - 1\right)} \cdot x}{y \cdot e^{b}} \]
            10. sub-negN/A

              \[\leadsto \frac{{a}^{\color{blue}{\left(t + \left(\mathsf{neg}\left(1\right)\right)\right)}} \cdot x}{y \cdot e^{b}} \]
            11. metadata-evalN/A

              \[\leadsto \frac{{a}^{\left(t + \color{blue}{-1}\right)} \cdot x}{y \cdot e^{b}} \]
            12. +-lowering-+.f64N/A

              \[\leadsto \frac{{a}^{\color{blue}{\left(t + -1\right)}} \cdot x}{y \cdot e^{b}} \]
            13. *-lowering-*.f64N/A

              \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y \cdot e^{b}}} \]
            14. exp-lowering-exp.f6464.7

              \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot \color{blue}{e^{b}}} \]
          5. Simplified64.7%

            \[\leadsto \color{blue}{\frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot e^{b}}} \]
          6. Taylor expanded in t around 0

            \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
          7. Step-by-step derivation
            1. /-lowering-/.f64N/A

              \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
            2. *-lowering-*.f64N/A

              \[\leadsto \frac{x}{\color{blue}{a \cdot \left(y \cdot e^{b}\right)}} \]
            3. *-lowering-*.f64N/A

              \[\leadsto \frac{x}{a \cdot \color{blue}{\left(y \cdot e^{b}\right)}} \]
            4. exp-lowering-exp.f6452.7

              \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{e^{b}}\right)} \]
          8. Simplified52.7%

            \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
          9. Taylor expanded in b around 0

            \[\leadsto \frac{x}{\color{blue}{a \cdot y + a \cdot \left(b \cdot y\right)}} \]
          10. Step-by-step derivation
            1. distribute-lft-outN/A

              \[\leadsto \frac{x}{\color{blue}{a \cdot \left(y + b \cdot y\right)}} \]
            2. *-lowering-*.f64N/A

              \[\leadsto \frac{x}{\color{blue}{a \cdot \left(y + b \cdot y\right)}} \]
            3. +-commutativeN/A

              \[\leadsto \frac{x}{a \cdot \color{blue}{\left(b \cdot y + y\right)}} \]
            4. *-commutativeN/A

              \[\leadsto \frac{x}{a \cdot \left(\color{blue}{y \cdot b} + y\right)} \]
            5. accelerator-lowering-fma.f6436.8

              \[\leadsto \frac{x}{a \cdot \color{blue}{\mathsf{fma}\left(y, b, y\right)}} \]
          11. Simplified36.8%

            \[\leadsto \frac{x}{\color{blue}{a \cdot \mathsf{fma}\left(y, b, y\right)}} \]
        3. Recombined 2 regimes into one program.
        4. Add Preprocessing

        Alternative 16: 39.4% accurate, 11.6× speedup?

        \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;b \leq -35:\\ \;\;\;\;x \cdot \frac{1 - b}{y}\\ \mathbf{else}:\\ \;\;\;\;\frac{x}{a \cdot \mathsf{fma}\left(y, b, y\right)}\\ \end{array} \end{array} \]
        (FPCore (x y z t a b)
         :precision binary64
         (if (<= b -35.0) (* x (/ (- 1.0 b) y)) (/ x (* a (fma y b y)))))
        double code(double x, double y, double z, double t, double a, double b) {
        	double tmp;
        	if (b <= -35.0) {
        		tmp = x * ((1.0 - b) / y);
        	} else {
        		tmp = x / (a * fma(y, b, y));
        	}
        	return tmp;
        }
        
        function code(x, y, z, t, a, b)
        	tmp = 0.0
        	if (b <= -35.0)
        		tmp = Float64(x * Float64(Float64(1.0 - b) / y));
        	else
        		tmp = Float64(x / Float64(a * fma(y, b, y)));
        	end
        	return tmp
        end
        
        code[x_, y_, z_, t_, a_, b_] := If[LessEqual[b, -35.0], N[(x * N[(N[(1.0 - b), $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision], N[(x / N[(a * N[(y * b + y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
        
        \begin{array}{l}
        
        \\
        \begin{array}{l}
        \mathbf{if}\;b \leq -35:\\
        \;\;\;\;x \cdot \frac{1 - b}{y}\\
        
        \mathbf{else}:\\
        \;\;\;\;\frac{x}{a \cdot \mathsf{fma}\left(y, b, y\right)}\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if b < -35

          1. Initial program 100.0%

            \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
          2. Add Preprocessing
          3. Taylor expanded in y around inf

            \[\leadsto \frac{x \cdot e^{\color{blue}{y \cdot \log z} - b}}{y} \]
          4. Step-by-step derivation
            1. +-rgt-identityN/A

              \[\leadsto \frac{x \cdot e^{\color{blue}{\left(y \cdot \log z + 0\right)} - b}}{y} \]
            2. accelerator-lowering-fma.f64N/A

              \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
            3. log-lowering-log.f6485.5

              \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(y, \color{blue}{\log z}, 0\right) - b}}{y} \]
          5. Simplified85.5%

            \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
          6. Taylor expanded in y around 0

            \[\leadsto \frac{\color{blue}{x \cdot e^{\mathsf{neg}\left(b\right)}}}{y} \]
          7. Step-by-step derivation
            1. *-lowering-*.f64N/A

              \[\leadsto \frac{\color{blue}{x \cdot e^{\mathsf{neg}\left(b\right)}}}{y} \]
            2. exp-lowering-exp.f64N/A

              \[\leadsto \frac{x \cdot \color{blue}{e^{\mathsf{neg}\left(b\right)}}}{y} \]
            3. neg-sub0N/A

              \[\leadsto \frac{x \cdot e^{\color{blue}{0 - b}}}{y} \]
            4. --lowering--.f6474.2

              \[\leadsto \frac{x \cdot e^{\color{blue}{0 - b}}}{y} \]
          8. Simplified74.2%

            \[\leadsto \frac{\color{blue}{x \cdot e^{0 - b}}}{y} \]
          9. Taylor expanded in b around 0

            \[\leadsto \color{blue}{-1 \cdot \frac{b \cdot x}{y} + \frac{x}{y}} \]
          10. Step-by-step derivation
            1. +-commutativeN/A

              \[\leadsto \color{blue}{\frac{x}{y} + -1 \cdot \frac{b \cdot x}{y}} \]
            2. mul-1-negN/A

              \[\leadsto \frac{x}{y} + \color{blue}{\left(\mathsf{neg}\left(\frac{b \cdot x}{y}\right)\right)} \]
            3. unsub-negN/A

              \[\leadsto \color{blue}{\frac{x}{y} - \frac{b \cdot x}{y}} \]
            4. div-subN/A

              \[\leadsto \color{blue}{\frac{x - b \cdot x}{y}} \]
            5. unsub-negN/A

              \[\leadsto \frac{\color{blue}{x + \left(\mathsf{neg}\left(b \cdot x\right)\right)}}{y} \]
            6. mul-1-negN/A

              \[\leadsto \frac{x + \color{blue}{-1 \cdot \left(b \cdot x\right)}}{y} \]
            7. /-lowering-/.f64N/A

              \[\leadsto \color{blue}{\frac{x + -1 \cdot \left(b \cdot x\right)}{y}} \]
            8. *-lft-identityN/A

              \[\leadsto \frac{\color{blue}{1 \cdot x} + -1 \cdot \left(b \cdot x\right)}{y} \]
            9. associate-*r*N/A

              \[\leadsto \frac{1 \cdot x + \color{blue}{\left(-1 \cdot b\right) \cdot x}}{y} \]
            10. distribute-rgt-outN/A

              \[\leadsto \frac{\color{blue}{x \cdot \left(1 + -1 \cdot b\right)}}{y} \]
            11. *-lowering-*.f64N/A

              \[\leadsto \frac{\color{blue}{x \cdot \left(1 + -1 \cdot b\right)}}{y} \]
            12. neg-mul-1N/A

              \[\leadsto \frac{x \cdot \left(1 + \color{blue}{\left(\mathsf{neg}\left(b\right)\right)}\right)}{y} \]
            13. unsub-negN/A

              \[\leadsto \frac{x \cdot \color{blue}{\left(1 - b\right)}}{y} \]
            14. --lowering--.f6429.2

              \[\leadsto \frac{x \cdot \color{blue}{\left(1 - b\right)}}{y} \]
          11. Simplified29.2%

            \[\leadsto \color{blue}{\frac{x \cdot \left(1 - b\right)}{y}} \]
          12. Step-by-step derivation
            1. associate-/l*N/A

              \[\leadsto \color{blue}{x \cdot \frac{1 - b}{y}} \]
            2. *-commutativeN/A

              \[\leadsto \color{blue}{\frac{1 - b}{y} \cdot x} \]
            3. *-lowering-*.f64N/A

              \[\leadsto \color{blue}{\frac{1 - b}{y} \cdot x} \]
            4. /-lowering-/.f64N/A

              \[\leadsto \color{blue}{\frac{1 - b}{y}} \cdot x \]
            5. --lowering--.f6432.4

              \[\leadsto \frac{\color{blue}{1 - b}}{y} \cdot x \]
          13. Applied egg-rr32.4%

            \[\leadsto \color{blue}{\frac{1 - b}{y} \cdot x} \]

          if -35 < b

          1. Initial program 97.9%

            \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
          2. Add Preprocessing
          3. Taylor expanded in y around 0

            \[\leadsto \color{blue}{\frac{x \cdot e^{\log a \cdot \left(t - 1\right) - b}}{y}} \]
          4. Step-by-step derivation
            1. *-commutativeN/A

              \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right) - b} \cdot x}}{y} \]
            2. exp-diffN/A

              \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)}}{e^{b}}} \cdot x}{y} \]
            3. associate-*l/N/A

              \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{e^{b}}}}{y} \]
            4. associate-/l/N/A

              \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
            5. /-lowering-/.f64N/A

              \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
            6. *-lowering-*.f64N/A

              \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right)} \cdot x}}{y \cdot e^{b}} \]
            7. exp-prodN/A

              \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
            8. pow-lowering-pow.f64N/A

              \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
            9. rem-exp-logN/A

              \[\leadsto \frac{{\color{blue}{a}}^{\left(t - 1\right)} \cdot x}{y \cdot e^{b}} \]
            10. sub-negN/A

              \[\leadsto \frac{{a}^{\color{blue}{\left(t + \left(\mathsf{neg}\left(1\right)\right)\right)}} \cdot x}{y \cdot e^{b}} \]
            11. metadata-evalN/A

              \[\leadsto \frac{{a}^{\left(t + \color{blue}{-1}\right)} \cdot x}{y \cdot e^{b}} \]
            12. +-lowering-+.f64N/A

              \[\leadsto \frac{{a}^{\color{blue}{\left(t + -1\right)}} \cdot x}{y \cdot e^{b}} \]
            13. *-lowering-*.f64N/A

              \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y \cdot e^{b}}} \]
            14. exp-lowering-exp.f6464.7

              \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot \color{blue}{e^{b}}} \]
          5. Simplified64.7%

            \[\leadsto \color{blue}{\frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot e^{b}}} \]
          6. Taylor expanded in t around 0

            \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
          7. Step-by-step derivation
            1. /-lowering-/.f64N/A

              \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
            2. *-lowering-*.f64N/A

              \[\leadsto \frac{x}{\color{blue}{a \cdot \left(y \cdot e^{b}\right)}} \]
            3. *-lowering-*.f64N/A

              \[\leadsto \frac{x}{a \cdot \color{blue}{\left(y \cdot e^{b}\right)}} \]
            4. exp-lowering-exp.f6452.7

              \[\leadsto \frac{x}{a \cdot \left(y \cdot \color{blue}{e^{b}}\right)} \]
          8. Simplified52.7%

            \[\leadsto \color{blue}{\frac{x}{a \cdot \left(y \cdot e^{b}\right)}} \]
          9. Taylor expanded in b around 0

            \[\leadsto \frac{x}{\color{blue}{a \cdot y + a \cdot \left(b \cdot y\right)}} \]
          10. Step-by-step derivation
            1. distribute-lft-outN/A

              \[\leadsto \frac{x}{\color{blue}{a \cdot \left(y + b \cdot y\right)}} \]
            2. *-lowering-*.f64N/A

              \[\leadsto \frac{x}{\color{blue}{a \cdot \left(y + b \cdot y\right)}} \]
            3. +-commutativeN/A

              \[\leadsto \frac{x}{a \cdot \color{blue}{\left(b \cdot y + y\right)}} \]
            4. *-commutativeN/A

              \[\leadsto \frac{x}{a \cdot \left(\color{blue}{y \cdot b} + y\right)} \]
            5. accelerator-lowering-fma.f6436.8

              \[\leadsto \frac{x}{a \cdot \color{blue}{\mathsf{fma}\left(y, b, y\right)}} \]
          11. Simplified36.8%

            \[\leadsto \frac{x}{\color{blue}{a \cdot \mathsf{fma}\left(y, b, y\right)}} \]
        3. Recombined 2 regimes into one program.
        4. Final simplification35.7%

          \[\leadsto \begin{array}{l} \mathbf{if}\;b \leq -35:\\ \;\;\;\;x \cdot \frac{1 - b}{y}\\ \mathbf{else}:\\ \;\;\;\;\frac{x}{a \cdot \mathsf{fma}\left(y, b, y\right)}\\ \end{array} \]
        5. Add Preprocessing

        Alternative 17: 34.7% accurate, 12.9× speedup?

        \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;b \leq -4.8 \cdot 10^{+24}:\\ \;\;\;\;x \cdot \frac{1 - b}{y}\\ \mathbf{else}:\\ \;\;\;\;\frac{x}{y \cdot a}\\ \end{array} \end{array} \]
        (FPCore (x y z t a b)
         :precision binary64
         (if (<= b -4.8e+24) (* x (/ (- 1.0 b) y)) (/ x (* y a))))
        double code(double x, double y, double z, double t, double a, double b) {
        	double tmp;
        	if (b <= -4.8e+24) {
        		tmp = x * ((1.0 - b) / y);
        	} else {
        		tmp = x / (y * a);
        	}
        	return tmp;
        }
        
        real(8) function code(x, y, z, t, a, b)
            real(8), intent (in) :: x
            real(8), intent (in) :: y
            real(8), intent (in) :: z
            real(8), intent (in) :: t
            real(8), intent (in) :: a
            real(8), intent (in) :: b
            real(8) :: tmp
            if (b <= (-4.8d+24)) then
                tmp = x * ((1.0d0 - b) / y)
            else
                tmp = x / (y * a)
            end if
            code = tmp
        end function
        
        public static double code(double x, double y, double z, double t, double a, double b) {
        	double tmp;
        	if (b <= -4.8e+24) {
        		tmp = x * ((1.0 - b) / y);
        	} else {
        		tmp = x / (y * a);
        	}
        	return tmp;
        }
        
        def code(x, y, z, t, a, b):
        	tmp = 0
        	if b <= -4.8e+24:
        		tmp = x * ((1.0 - b) / y)
        	else:
        		tmp = x / (y * a)
        	return tmp
        
        function code(x, y, z, t, a, b)
        	tmp = 0.0
        	if (b <= -4.8e+24)
        		tmp = Float64(x * Float64(Float64(1.0 - b) / y));
        	else
        		tmp = Float64(x / Float64(y * a));
        	end
        	return tmp
        end
        
        function tmp_2 = code(x, y, z, t, a, b)
        	tmp = 0.0;
        	if (b <= -4.8e+24)
        		tmp = x * ((1.0 - b) / y);
        	else
        		tmp = x / (y * a);
        	end
        	tmp_2 = tmp;
        end
        
        code[x_, y_, z_, t_, a_, b_] := If[LessEqual[b, -4.8e+24], N[(x * N[(N[(1.0 - b), $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision], N[(x / N[(y * a), $MachinePrecision]), $MachinePrecision]]
        
        \begin{array}{l}
        
        \\
        \begin{array}{l}
        \mathbf{if}\;b \leq -4.8 \cdot 10^{+24}:\\
        \;\;\;\;x \cdot \frac{1 - b}{y}\\
        
        \mathbf{else}:\\
        \;\;\;\;\frac{x}{y \cdot a}\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if b < -4.8000000000000001e24

          1. Initial program 100.0%

            \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
          2. Add Preprocessing
          3. Taylor expanded in y around inf

            \[\leadsto \frac{x \cdot e^{\color{blue}{y \cdot \log z} - b}}{y} \]
          4. Step-by-step derivation
            1. +-rgt-identityN/A

              \[\leadsto \frac{x \cdot e^{\color{blue}{\left(y \cdot \log z + 0\right)} - b}}{y} \]
            2. accelerator-lowering-fma.f64N/A

              \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
            3. log-lowering-log.f6485.2

              \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(y, \color{blue}{\log z}, 0\right) - b}}{y} \]
          5. Simplified85.2%

            \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(y, \log z, 0\right)} - b}}{y} \]
          6. Taylor expanded in y around 0

            \[\leadsto \frac{\color{blue}{x \cdot e^{\mathsf{neg}\left(b\right)}}}{y} \]
          7. Step-by-step derivation
            1. *-lowering-*.f64N/A

              \[\leadsto \frac{\color{blue}{x \cdot e^{\mathsf{neg}\left(b\right)}}}{y} \]
            2. exp-lowering-exp.f64N/A

              \[\leadsto \frac{x \cdot \color{blue}{e^{\mathsf{neg}\left(b\right)}}}{y} \]
            3. neg-sub0N/A

              \[\leadsto \frac{x \cdot e^{\color{blue}{0 - b}}}{y} \]
            4. --lowering--.f6473.8

              \[\leadsto \frac{x \cdot e^{\color{blue}{0 - b}}}{y} \]
          8. Simplified73.8%

            \[\leadsto \frac{\color{blue}{x \cdot e^{0 - b}}}{y} \]
          9. Taylor expanded in b around 0

            \[\leadsto \color{blue}{-1 \cdot \frac{b \cdot x}{y} + \frac{x}{y}} \]
          10. Step-by-step derivation
            1. +-commutativeN/A

              \[\leadsto \color{blue}{\frac{x}{y} + -1 \cdot \frac{b \cdot x}{y}} \]
            2. mul-1-negN/A

              \[\leadsto \frac{x}{y} + \color{blue}{\left(\mathsf{neg}\left(\frac{b \cdot x}{y}\right)\right)} \]
            3. unsub-negN/A

              \[\leadsto \color{blue}{\frac{x}{y} - \frac{b \cdot x}{y}} \]
            4. div-subN/A

              \[\leadsto \color{blue}{\frac{x - b \cdot x}{y}} \]
            5. unsub-negN/A

              \[\leadsto \frac{\color{blue}{x + \left(\mathsf{neg}\left(b \cdot x\right)\right)}}{y} \]
            6. mul-1-negN/A

              \[\leadsto \frac{x + \color{blue}{-1 \cdot \left(b \cdot x\right)}}{y} \]
            7. /-lowering-/.f64N/A

              \[\leadsto \color{blue}{\frac{x + -1 \cdot \left(b \cdot x\right)}{y}} \]
            8. *-lft-identityN/A

              \[\leadsto \frac{\color{blue}{1 \cdot x} + -1 \cdot \left(b \cdot x\right)}{y} \]
            9. associate-*r*N/A

              \[\leadsto \frac{1 \cdot x + \color{blue}{\left(-1 \cdot b\right) \cdot x}}{y} \]
            10. distribute-rgt-outN/A

              \[\leadsto \frac{\color{blue}{x \cdot \left(1 + -1 \cdot b\right)}}{y} \]
            11. *-lowering-*.f64N/A

              \[\leadsto \frac{\color{blue}{x \cdot \left(1 + -1 \cdot b\right)}}{y} \]
            12. neg-mul-1N/A

              \[\leadsto \frac{x \cdot \left(1 + \color{blue}{\left(\mathsf{neg}\left(b\right)\right)}\right)}{y} \]
            13. unsub-negN/A

              \[\leadsto \frac{x \cdot \color{blue}{\left(1 - b\right)}}{y} \]
            14. --lowering--.f6429.7

              \[\leadsto \frac{x \cdot \color{blue}{\left(1 - b\right)}}{y} \]
          11. Simplified29.7%

            \[\leadsto \color{blue}{\frac{x \cdot \left(1 - b\right)}{y}} \]
          12. Step-by-step derivation
            1. associate-/l*N/A

              \[\leadsto \color{blue}{x \cdot \frac{1 - b}{y}} \]
            2. *-commutativeN/A

              \[\leadsto \color{blue}{\frac{1 - b}{y} \cdot x} \]
            3. *-lowering-*.f64N/A

              \[\leadsto \color{blue}{\frac{1 - b}{y} \cdot x} \]
            4. /-lowering-/.f64N/A

              \[\leadsto \color{blue}{\frac{1 - b}{y}} \cdot x \]
            5. --lowering--.f6432.9

              \[\leadsto \frac{\color{blue}{1 - b}}{y} \cdot x \]
          13. Applied egg-rr32.9%

            \[\leadsto \color{blue}{\frac{1 - b}{y} \cdot x} \]

          if -4.8000000000000001e24 < b

          1. Initial program 97.9%

            \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
          2. Add Preprocessing
          3. Taylor expanded in y around 0

            \[\leadsto \color{blue}{\frac{x \cdot e^{\log a \cdot \left(t - 1\right) - b}}{y}} \]
          4. Step-by-step derivation
            1. *-commutativeN/A

              \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right) - b} \cdot x}}{y} \]
            2. exp-diffN/A

              \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)}}{e^{b}}} \cdot x}{y} \]
            3. associate-*l/N/A

              \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{e^{b}}}}{y} \]
            4. associate-/l/N/A

              \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
            5. /-lowering-/.f64N/A

              \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
            6. *-lowering-*.f64N/A

              \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right)} \cdot x}}{y \cdot e^{b}} \]
            7. exp-prodN/A

              \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
            8. pow-lowering-pow.f64N/A

              \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
            9. rem-exp-logN/A

              \[\leadsto \frac{{\color{blue}{a}}^{\left(t - 1\right)} \cdot x}{y \cdot e^{b}} \]
            10. sub-negN/A

              \[\leadsto \frac{{a}^{\color{blue}{\left(t + \left(\mathsf{neg}\left(1\right)\right)\right)}} \cdot x}{y \cdot e^{b}} \]
            11. metadata-evalN/A

              \[\leadsto \frac{{a}^{\left(t + \color{blue}{-1}\right)} \cdot x}{y \cdot e^{b}} \]
            12. +-lowering-+.f64N/A

              \[\leadsto \frac{{a}^{\color{blue}{\left(t + -1\right)}} \cdot x}{y \cdot e^{b}} \]
            13. *-lowering-*.f64N/A

              \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y \cdot e^{b}}} \]
            14. exp-lowering-exp.f6464.9

              \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot \color{blue}{e^{b}}} \]
          5. Simplified64.9%

            \[\leadsto \color{blue}{\frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot e^{b}}} \]
          6. Taylor expanded in b around 0

            \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y}} \]
          7. Step-by-step derivation
            1. Simplified60.2%

              \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y}} \]
            2. Taylor expanded in t around 0

              \[\leadsto \color{blue}{\frac{x}{a \cdot y}} \]
            3. Step-by-step derivation
              1. /-lowering-/.f64N/A

                \[\leadsto \color{blue}{\frac{x}{a \cdot y}} \]
              2. *-lowering-*.f6431.5

                \[\leadsto \frac{x}{\color{blue}{a \cdot y}} \]
            4. Simplified31.5%

              \[\leadsto \color{blue}{\frac{x}{a \cdot y}} \]
          8. Recombined 2 regimes into one program.
          9. Final simplification31.8%

            \[\leadsto \begin{array}{l} \mathbf{if}\;b \leq -4.8 \cdot 10^{+24}:\\ \;\;\;\;x \cdot \frac{1 - b}{y}\\ \mathbf{else}:\\ \;\;\;\;\frac{x}{y \cdot a}\\ \end{array} \]
          10. Add Preprocessing

          Alternative 18: 31.6% accurate, 19.8× speedup?

          \[\begin{array}{l} \\ \frac{x}{y \cdot a} \end{array} \]
          (FPCore (x y z t a b) :precision binary64 (/ x (* y a)))
          double code(double x, double y, double z, double t, double a, double b) {
          	return x / (y * a);
          }
          
          real(8) function code(x, y, z, t, a, b)
              real(8), intent (in) :: x
              real(8), intent (in) :: y
              real(8), intent (in) :: z
              real(8), intent (in) :: t
              real(8), intent (in) :: a
              real(8), intent (in) :: b
              code = x / (y * a)
          end function
          
          public static double code(double x, double y, double z, double t, double a, double b) {
          	return x / (y * a);
          }
          
          def code(x, y, z, t, a, b):
          	return x / (y * a)
          
          function code(x, y, z, t, a, b)
          	return Float64(x / Float64(y * a))
          end
          
          function tmp = code(x, y, z, t, a, b)
          	tmp = x / (y * a);
          end
          
          code[x_, y_, z_, t_, a_, b_] := N[(x / N[(y * a), $MachinePrecision]), $MachinePrecision]
          
          \begin{array}{l}
          
          \\
          \frac{x}{y \cdot a}
          \end{array}
          
          Derivation
          1. Initial program 98.4%

            \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
          2. Add Preprocessing
          3. Taylor expanded in y around 0

            \[\leadsto \color{blue}{\frac{x \cdot e^{\log a \cdot \left(t - 1\right) - b}}{y}} \]
          4. Step-by-step derivation
            1. *-commutativeN/A

              \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right) - b} \cdot x}}{y} \]
            2. exp-diffN/A

              \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)}}{e^{b}}} \cdot x}{y} \]
            3. associate-*l/N/A

              \[\leadsto \frac{\color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{e^{b}}}}{y} \]
            4. associate-/l/N/A

              \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
            5. /-lowering-/.f64N/A

              \[\leadsto \color{blue}{\frac{e^{\log a \cdot \left(t - 1\right)} \cdot x}{y \cdot e^{b}}} \]
            6. *-lowering-*.f64N/A

              \[\leadsto \frac{\color{blue}{e^{\log a \cdot \left(t - 1\right)} \cdot x}}{y \cdot e^{b}} \]
            7. exp-prodN/A

              \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
            8. pow-lowering-pow.f64N/A

              \[\leadsto \frac{\color{blue}{{\left(e^{\log a}\right)}^{\left(t - 1\right)}} \cdot x}{y \cdot e^{b}} \]
            9. rem-exp-logN/A

              \[\leadsto \frac{{\color{blue}{a}}^{\left(t - 1\right)} \cdot x}{y \cdot e^{b}} \]
            10. sub-negN/A

              \[\leadsto \frac{{a}^{\color{blue}{\left(t + \left(\mathsf{neg}\left(1\right)\right)\right)}} \cdot x}{y \cdot e^{b}} \]
            11. metadata-evalN/A

              \[\leadsto \frac{{a}^{\left(t + \color{blue}{-1}\right)} \cdot x}{y \cdot e^{b}} \]
            12. +-lowering-+.f64N/A

              \[\leadsto \frac{{a}^{\color{blue}{\left(t + -1\right)}} \cdot x}{y \cdot e^{b}} \]
            13. *-lowering-*.f64N/A

              \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y \cdot e^{b}}} \]
            14. exp-lowering-exp.f6464.2

              \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot \color{blue}{e^{b}}} \]
          5. Simplified64.2%

            \[\leadsto \color{blue}{\frac{{a}^{\left(t + -1\right)} \cdot x}{y \cdot e^{b}}} \]
          6. Taylor expanded in b around 0

            \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y}} \]
          7. Step-by-step derivation
            1. Simplified58.5%

              \[\leadsto \frac{{a}^{\left(t + -1\right)} \cdot x}{\color{blue}{y}} \]
            2. Taylor expanded in t around 0

              \[\leadsto \color{blue}{\frac{x}{a \cdot y}} \]
            3. Step-by-step derivation
              1. /-lowering-/.f64N/A

                \[\leadsto \color{blue}{\frac{x}{a \cdot y}} \]
              2. *-lowering-*.f6429.3

                \[\leadsto \frac{x}{\color{blue}{a \cdot y}} \]
            4. Simplified29.3%

              \[\leadsto \color{blue}{\frac{x}{a \cdot y}} \]
            5. Final simplification29.3%

              \[\leadsto \frac{x}{y \cdot a} \]
            6. Add Preprocessing

            Alternative 19: 16.3% accurate, 28.0× speedup?

            \[\begin{array}{l} \\ \frac{x}{y} \end{array} \]
            (FPCore (x y z t a b) :precision binary64 (/ x y))
            double code(double x, double y, double z, double t, double a, double b) {
            	return x / y;
            }
            
            real(8) function code(x, y, z, t, a, b)
                real(8), intent (in) :: x
                real(8), intent (in) :: y
                real(8), intent (in) :: z
                real(8), intent (in) :: t
                real(8), intent (in) :: a
                real(8), intent (in) :: b
                code = x / y
            end function
            
            public static double code(double x, double y, double z, double t, double a, double b) {
            	return x / y;
            }
            
            def code(x, y, z, t, a, b):
            	return x / y
            
            function code(x, y, z, t, a, b)
            	return Float64(x / y)
            end
            
            function tmp = code(x, y, z, t, a, b)
            	tmp = x / y;
            end
            
            code[x_, y_, z_, t_, a_, b_] := N[(x / y), $MachinePrecision]
            
            \begin{array}{l}
            
            \\
            \frac{x}{y}
            \end{array}
            
            Derivation
            1. Initial program 98.4%

              \[\frac{x \cdot e^{\left(y \cdot \log z + \left(t - 1\right) \cdot \log a\right) - b}}{y} \]
            2. Add Preprocessing
            3. Taylor expanded in t around inf

              \[\leadsto \frac{x \cdot e^{\color{blue}{t \cdot \log a} - b}}{y} \]
            4. Step-by-step derivation
              1. +-rgt-identityN/A

                \[\leadsto \frac{x \cdot e^{\color{blue}{\left(t \cdot \log a + 0\right)} - b}}{y} \]
              2. *-commutativeN/A

                \[\leadsto \frac{x \cdot e^{\left(\color{blue}{\log a \cdot t} + 0\right) - b}}{y} \]
              3. accelerator-lowering-fma.f64N/A

                \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(\log a, t, 0\right)} - b}}{y} \]
              4. rem-exp-logN/A

                \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(\log \color{blue}{\left(e^{\log a}\right)}, t, 0\right) - b}}{y} \]
              5. log-lowering-log.f64N/A

                \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(\color{blue}{\log \left(e^{\log a}\right)}, t, 0\right) - b}}{y} \]
              6. rem-exp-log68.3

                \[\leadsto \frac{x \cdot e^{\mathsf{fma}\left(\log \color{blue}{a}, t, 0\right) - b}}{y} \]
            5. Simplified68.3%

              \[\leadsto \frac{x \cdot e^{\color{blue}{\mathsf{fma}\left(\log a, t, 0\right)} - b}}{y} \]
            6. Taylor expanded in b around 0

              \[\leadsto \color{blue}{\frac{x \cdot {a}^{t}}{y}} \]
            7. Step-by-step derivation
              1. /-lowering-/.f64N/A

                \[\leadsto \color{blue}{\frac{x \cdot {a}^{t}}{y}} \]
              2. *-lowering-*.f64N/A

                \[\leadsto \frac{\color{blue}{x \cdot {a}^{t}}}{y} \]
              3. pow-lowering-pow.f6446.8

                \[\leadsto \frac{x \cdot \color{blue}{{a}^{t}}}{y} \]
            8. Simplified46.8%

              \[\leadsto \color{blue}{\frac{x \cdot {a}^{t}}{y}} \]
            9. Taylor expanded in t around 0

              \[\leadsto \color{blue}{\frac{x}{y}} \]
            10. Step-by-step derivation
              1. /-lowering-/.f6412.2

                \[\leadsto \color{blue}{\frac{x}{y}} \]
            11. Simplified12.2%

              \[\leadsto \color{blue}{\frac{x}{y}} \]
            12. Add Preprocessing

            Developer Target 1: 71.9% accurate, 1.0× speedup?

            \[\begin{array}{l} \\ \begin{array}{l} t_1 := {a}^{\left(t - 1\right)}\\ t_2 := \frac{x \cdot \frac{t\_1}{y}}{\left(b + 1\right) - y \cdot \log z}\\ \mathbf{if}\;t < -0.8845848504127471:\\ \;\;\;\;t\_2\\ \mathbf{elif}\;t < 852031.2288374073:\\ \;\;\;\;\frac{\frac{x}{y} \cdot t\_1}{e^{b - \log z \cdot y}}\\ \mathbf{else}:\\ \;\;\;\;t\_2\\ \end{array} \end{array} \]
            (FPCore (x y z t a b)
             :precision binary64
             (let* ((t_1 (pow a (- t 1.0)))
                    (t_2 (/ (* x (/ t_1 y)) (- (+ b 1.0) (* y (log z))))))
               (if (< t -0.8845848504127471)
                 t_2
                 (if (< t 852031.2288374073)
                   (/ (* (/ x y) t_1) (exp (- b (* (log z) y))))
                   t_2))))
            double code(double x, double y, double z, double t, double a, double b) {
            	double t_1 = pow(a, (t - 1.0));
            	double t_2 = (x * (t_1 / y)) / ((b + 1.0) - (y * log(z)));
            	double tmp;
            	if (t < -0.8845848504127471) {
            		tmp = t_2;
            	} else if (t < 852031.2288374073) {
            		tmp = ((x / y) * t_1) / exp((b - (log(z) * y)));
            	} else {
            		tmp = t_2;
            	}
            	return tmp;
            }
            
            real(8) function code(x, y, z, t, a, b)
                real(8), intent (in) :: x
                real(8), intent (in) :: y
                real(8), intent (in) :: z
                real(8), intent (in) :: t
                real(8), intent (in) :: a
                real(8), intent (in) :: b
                real(8) :: t_1
                real(8) :: t_2
                real(8) :: tmp
                t_1 = a ** (t - 1.0d0)
                t_2 = (x * (t_1 / y)) / ((b + 1.0d0) - (y * log(z)))
                if (t < (-0.8845848504127471d0)) then
                    tmp = t_2
                else if (t < 852031.2288374073d0) then
                    tmp = ((x / y) * t_1) / exp((b - (log(z) * y)))
                else
                    tmp = t_2
                end if
                code = tmp
            end function
            
            public static double code(double x, double y, double z, double t, double a, double b) {
            	double t_1 = Math.pow(a, (t - 1.0));
            	double t_2 = (x * (t_1 / y)) / ((b + 1.0) - (y * Math.log(z)));
            	double tmp;
            	if (t < -0.8845848504127471) {
            		tmp = t_2;
            	} else if (t < 852031.2288374073) {
            		tmp = ((x / y) * t_1) / Math.exp((b - (Math.log(z) * y)));
            	} else {
            		tmp = t_2;
            	}
            	return tmp;
            }
            
            def code(x, y, z, t, a, b):
            	t_1 = math.pow(a, (t - 1.0))
            	t_2 = (x * (t_1 / y)) / ((b + 1.0) - (y * math.log(z)))
            	tmp = 0
            	if t < -0.8845848504127471:
            		tmp = t_2
            	elif t < 852031.2288374073:
            		tmp = ((x / y) * t_1) / math.exp((b - (math.log(z) * y)))
            	else:
            		tmp = t_2
            	return tmp
            
            function code(x, y, z, t, a, b)
            	t_1 = a ^ Float64(t - 1.0)
            	t_2 = Float64(Float64(x * Float64(t_1 / y)) / Float64(Float64(b + 1.0) - Float64(y * log(z))))
            	tmp = 0.0
            	if (t < -0.8845848504127471)
            		tmp = t_2;
            	elseif (t < 852031.2288374073)
            		tmp = Float64(Float64(Float64(x / y) * t_1) / exp(Float64(b - Float64(log(z) * y))));
            	else
            		tmp = t_2;
            	end
            	return tmp
            end
            
            function tmp_2 = code(x, y, z, t, a, b)
            	t_1 = a ^ (t - 1.0);
            	t_2 = (x * (t_1 / y)) / ((b + 1.0) - (y * log(z)));
            	tmp = 0.0;
            	if (t < -0.8845848504127471)
            		tmp = t_2;
            	elseif (t < 852031.2288374073)
            		tmp = ((x / y) * t_1) / exp((b - (log(z) * y)));
            	else
            		tmp = t_2;
            	end
            	tmp_2 = tmp;
            end
            
            code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[Power[a, N[(t - 1.0), $MachinePrecision]], $MachinePrecision]}, Block[{t$95$2 = N[(N[(x * N[(t$95$1 / y), $MachinePrecision]), $MachinePrecision] / N[(N[(b + 1.0), $MachinePrecision] - N[(y * N[Log[z], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[Less[t, -0.8845848504127471], t$95$2, If[Less[t, 852031.2288374073], N[(N[(N[(x / y), $MachinePrecision] * t$95$1), $MachinePrecision] / N[Exp[N[(b - N[(N[Log[z], $MachinePrecision] * y), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], t$95$2]]]]
            
            \begin{array}{l}
            
            \\
            \begin{array}{l}
            t_1 := {a}^{\left(t - 1\right)}\\
            t_2 := \frac{x \cdot \frac{t\_1}{y}}{\left(b + 1\right) - y \cdot \log z}\\
            \mathbf{if}\;t < -0.8845848504127471:\\
            \;\;\;\;t\_2\\
            
            \mathbf{elif}\;t < 852031.2288374073:\\
            \;\;\;\;\frac{\frac{x}{y} \cdot t\_1}{e^{b - \log z \cdot y}}\\
            
            \mathbf{else}:\\
            \;\;\;\;t\_2\\
            
            
            \end{array}
            \end{array}
            

            Reproduce

            ?
            herbie shell --seed 2024196 
            (FPCore (x y z t a b)
              :name "Numeric.SpecFunctions:incompleteBetaWorker from math-functions-0.1.5.2, A"
              :precision binary64
            
              :alt
              (! :herbie-platform default (if (< t -8845848504127471/10000000000000000) (/ (* x (/ (pow a (- t 1)) y)) (- (+ b 1) (* y (log z)))) (if (< t 8520312288374073/10000000000) (/ (* (/ x y) (pow a (- t 1))) (exp (- b (* (log z) y)))) (/ (* x (/ (pow a (- t 1)) y)) (- (+ b 1) (* y (log z)))))))
            
              (/ (* x (exp (- (+ (* y (log z)) (* (- t 1.0) (log a))) b))) y))