Numeric.SpecFunctions:logBeta from math-functions-0.1.5.2, A

Percentage Accurate: 99.9% → 99.9%
Time: 8.3s
Alternatives: 4
Speedup: N/A×

Specification

?
\[\begin{array}{l} \\ \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (+ (- (+ (+ x y) z) (* z (log t))) (* (- a 0.5) b)))
double code(double x, double y, double z, double t, double a, double b) {
	return (((x + y) + z) - (z * log(t))) + ((a - 0.5) * b);
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(x, y, z, t, a, b)
use fmin_fmax_functions
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    code = (((x + y) + z) - (z * log(t))) + ((a - 0.5d0) * b)
end function
public static double code(double x, double y, double z, double t, double a, double b) {
	return (((x + y) + z) - (z * Math.log(t))) + ((a - 0.5) * b);
}
def code(x, y, z, t, a, b):
	return (((x + y) + z) - (z * math.log(t))) + ((a - 0.5) * b)
function code(x, y, z, t, a, b)
	return Float64(Float64(Float64(Float64(x + y) + z) - Float64(z * log(t))) + Float64(Float64(a - 0.5) * b))
end
function tmp = code(x, y, z, t, a, b)
	tmp = (((x + y) + z) - (z * log(t))) + ((a - 0.5) * b);
end
code[x_, y_, z_, t_, a_, b_] := N[(N[(N[(N[(x + y), $MachinePrecision] + z), $MachinePrecision] - N[(z * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(a - 0.5), $MachinePrecision] * b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b
\end{array}

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 4 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 99.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (+ (- (+ (+ x y) z) (* z (log t))) (* (- a 0.5) b)))
double code(double x, double y, double z, double t, double a, double b) {
	return (((x + y) + z) - (z * log(t))) + ((a - 0.5) * b);
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(x, y, z, t, a, b)
use fmin_fmax_functions
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    code = (((x + y) + z) - (z * log(t))) + ((a - 0.5d0) * b)
end function
public static double code(double x, double y, double z, double t, double a, double b) {
	return (((x + y) + z) - (z * Math.log(t))) + ((a - 0.5) * b);
}
def code(x, y, z, t, a, b):
	return (((x + y) + z) - (z * math.log(t))) + ((a - 0.5) * b)
function code(x, y, z, t, a, b)
	return Float64(Float64(Float64(Float64(x + y) + z) - Float64(z * log(t))) + Float64(Float64(a - 0.5) * b))
end
function tmp = code(x, y, z, t, a, b)
	tmp = (((x + y) + z) - (z * log(t))) + ((a - 0.5) * b);
end
code[x_, y_, z_, t_, a_, b_] := N[(N[(N[(N[(x + y), $MachinePrecision] + z), $MachinePrecision] - N[(z * N[Log[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(a - 0.5), $MachinePrecision] * b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b
\end{array}

Alternative 1: 99.9% accurate, N/A× speedup?

\[\begin{array}{l} \\ \left(\mathsf{fma}\left(1 - \log t, z, \left(a - 0.5\right) \cdot b\right) + y\right) + x \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (+ (+ (fma (- 1.0 (log t)) z (* (- a 0.5) b)) y) x))
double code(double x, double y, double z, double t, double a, double b) {
	return (fma((1.0 - log(t)), z, ((a - 0.5) * b)) + y) + x;
}
function code(x, y, z, t, a, b)
	return Float64(Float64(fma(Float64(1.0 - log(t)), z, Float64(Float64(a - 0.5) * b)) + y) + x)
end
code[x_, y_, z_, t_, a_, b_] := N[(N[(N[(N[(1.0 - N[Log[t], $MachinePrecision]), $MachinePrecision] * z + N[(N[(a - 0.5), $MachinePrecision] * b), $MachinePrecision]), $MachinePrecision] + y), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}

\\
\left(\mathsf{fma}\left(1 - \log t, z, \left(a - 0.5\right) \cdot b\right) + y\right) + x
\end{array}
Derivation
  1. Initial program 99.9%

    \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
  2. Taylor expanded in z around 0

    \[\leadsto \color{blue}{x + \left(y + \left(b \cdot \left(a - \frac{1}{2}\right) + z \cdot \left(1 - \log t\right)\right)\right)} \]
  3. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \left(y + \left(b \cdot \left(a - \frac{1}{2}\right) + z \cdot \left(1 - \log t\right)\right)\right) + \color{blue}{x} \]
    2. lower-+.f64N/A

      \[\leadsto \left(y + \left(b \cdot \left(a - \frac{1}{2}\right) + z \cdot \left(1 - \log t\right)\right)\right) + \color{blue}{x} \]
    3. +-commutativeN/A

      \[\leadsto \left(\left(b \cdot \left(a - \frac{1}{2}\right) + z \cdot \left(1 - \log t\right)\right) + y\right) + x \]
    4. lower-+.f64N/A

      \[\leadsto \left(\left(b \cdot \left(a - \frac{1}{2}\right) + z \cdot \left(1 - \log t\right)\right) + y\right) + x \]
    5. +-commutativeN/A

      \[\leadsto \left(\left(z \cdot \left(1 - \log t\right) + b \cdot \left(a - \frac{1}{2}\right)\right) + y\right) + x \]
    6. *-commutativeN/A

      \[\leadsto \left(\left(\left(1 - \log t\right) \cdot z + b \cdot \left(a - \frac{1}{2}\right)\right) + y\right) + x \]
    7. lower-fma.f64N/A

      \[\leadsto \left(\mathsf{fma}\left(1 - \log t, z, b \cdot \left(a - \frac{1}{2}\right)\right) + y\right) + x \]
    8. lower--.f64N/A

      \[\leadsto \left(\mathsf{fma}\left(1 - \log t, z, b \cdot \left(a - \frac{1}{2}\right)\right) + y\right) + x \]
    9. lift-log.f64N/A

      \[\leadsto \left(\mathsf{fma}\left(1 - \log t, z, b \cdot \left(a - \frac{1}{2}\right)\right) + y\right) + x \]
    10. *-commutativeN/A

      \[\leadsto \left(\mathsf{fma}\left(1 - \log t, z, \left(a - \frac{1}{2}\right) \cdot b\right) + y\right) + x \]
    11. lift-*.f64N/A

      \[\leadsto \left(\mathsf{fma}\left(1 - \log t, z, \left(a - \frac{1}{2}\right) \cdot b\right) + y\right) + x \]
    12. lift--.f6499.9

      \[\leadsto \left(\mathsf{fma}\left(1 - \log t, z, \left(a - 0.5\right) \cdot b\right) + y\right) + x \]
  4. Applied rewrites99.9%

    \[\leadsto \color{blue}{\left(\mathsf{fma}\left(1 - \log t, z, \left(a - 0.5\right) \cdot b\right) + y\right) + x} \]
  5. Add Preprocessing

Alternative 2: 73.3% accurate, N/A× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := \frac{y + x}{a}\\ \mathbf{if}\;z \leq -920:\\ \;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{\log t}{a}, -1, {a}^{-1}\right) \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, -0.5, t\_1\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot -1\\ \mathbf{else}:\\ \;\;\;\;\left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, -0.5, \mathsf{fma}\left(\frac{1 - \log t}{a}, z, t\_1\right)\right), -1, -1 \cdot b\right)\\ \end{array} \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (let* ((t_1 (/ (+ y x) a)))
   (if (<= z -920.0)
     (*
      (*
       (fma
        (* (fma (/ (log t) a) -1.0 (pow a -1.0)) a)
        -1.0
        (/ (* (fma (fma (/ b a) -0.5 t_1) -1.0 (* -1.0 b)) a) z))
       z)
      -1.0)
     (*
      (* -1.0 a)
      (fma
       (fma (/ b a) -0.5 (fma (/ (- 1.0 (log t)) a) z t_1))
       -1.0
       (* -1.0 b))))))
double code(double x, double y, double z, double t, double a, double b) {
	double t_1 = (y + x) / a;
	double tmp;
	if (z <= -920.0) {
		tmp = (fma((fma((log(t) / a), -1.0, pow(a, -1.0)) * a), -1.0, ((fma(fma((b / a), -0.5, t_1), -1.0, (-1.0 * b)) * a) / z)) * z) * -1.0;
	} else {
		tmp = (-1.0 * a) * fma(fma((b / a), -0.5, fma(((1.0 - log(t)) / a), z, t_1)), -1.0, (-1.0 * b));
	}
	return tmp;
}
function code(x, y, z, t, a, b)
	t_1 = Float64(Float64(y + x) / a)
	tmp = 0.0
	if (z <= -920.0)
		tmp = Float64(Float64(fma(Float64(fma(Float64(log(t) / a), -1.0, (a ^ -1.0)) * a), -1.0, Float64(Float64(fma(fma(Float64(b / a), -0.5, t_1), -1.0, Float64(-1.0 * b)) * a) / z)) * z) * -1.0);
	else
		tmp = Float64(Float64(-1.0 * a) * fma(fma(Float64(b / a), -0.5, fma(Float64(Float64(1.0 - log(t)) / a), z, t_1)), -1.0, Float64(-1.0 * b)));
	end
	return tmp
end
code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(N[(y + x), $MachinePrecision] / a), $MachinePrecision]}, If[LessEqual[z, -920.0], N[(N[(N[(N[(N[(N[(N[Log[t], $MachinePrecision] / a), $MachinePrecision] * -1.0 + N[Power[a, -1.0], $MachinePrecision]), $MachinePrecision] * a), $MachinePrecision] * -1.0 + N[(N[(N[(N[(N[(b / a), $MachinePrecision] * -0.5 + t$95$1), $MachinePrecision] * -1.0 + N[(-1.0 * b), $MachinePrecision]), $MachinePrecision] * a), $MachinePrecision] / z), $MachinePrecision]), $MachinePrecision] * z), $MachinePrecision] * -1.0), $MachinePrecision], N[(N[(-1.0 * a), $MachinePrecision] * N[(N[(N[(b / a), $MachinePrecision] * -0.5 + N[(N[(N[(1.0 - N[Log[t], $MachinePrecision]), $MachinePrecision] / a), $MachinePrecision] * z + t$95$1), $MachinePrecision]), $MachinePrecision] * -1.0 + N[(-1.0 * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := \frac{y + x}{a}\\
\mathbf{if}\;z \leq -920:\\
\;\;\;\;\left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{\log t}{a}, -1, {a}^{-1}\right) \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, -0.5, t\_1\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot -1\\

\mathbf{else}:\\
\;\;\;\;\left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, -0.5, \mathsf{fma}\left(\frac{1 - \log t}{a}, z, t\_1\right)\right), -1, -1 \cdot b\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if z < -920

    1. Initial program 99.7%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Taylor expanded in a around -inf

      \[\leadsto \color{blue}{-1 \cdot \left(a \cdot \left(-1 \cdot b + -1 \cdot \frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a}\right)\right)} \]
    3. Step-by-step derivation
      1. associate-*r*N/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \color{blue}{\left(-1 \cdot b + -1 \cdot \frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a}\right)} \]
      2. lower-*.f64N/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \color{blue}{\left(-1 \cdot b + -1 \cdot \frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a}\right)} \]
      3. lower-*.f64N/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \left(\color{blue}{-1 \cdot b} + -1 \cdot \frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a}\right) \]
      4. +-commutativeN/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \left(-1 \cdot \frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a} + \color{blue}{-1 \cdot b}\right) \]
      5. *-commutativeN/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \left(\frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a} \cdot -1 + \color{blue}{-1} \cdot b\right) \]
      6. lower-fma.f64N/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a}, \color{blue}{-1}, -1 \cdot b\right) \]
    4. Applied rewrites67.6%

      \[\leadsto \color{blue}{\left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\frac{\left(\mathsf{fma}\left(-0.5, b, z\right) + y\right) + x}{a} - \frac{\log t \cdot z}{a}, -1, -1 \cdot b\right)} \]
    5. Taylor expanded in z around -inf

      \[\leadsto -1 \cdot \color{blue}{\left(z \cdot \left(-1 \cdot \left(a \cdot \left(-1 \cdot \frac{\log t}{a} + \frac{1}{a}\right)\right) + \frac{a \cdot \left(-1 \cdot b + -1 \cdot \left(\frac{-1}{2} \cdot \frac{b}{a} + \left(\frac{x}{a} + \frac{y}{a}\right)\right)\right)}{z}\right)\right)} \]
    6. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \left(z \cdot \left(-1 \cdot \left(a \cdot \left(-1 \cdot \frac{\log t}{a} + \frac{1}{a}\right)\right) + \frac{a \cdot \left(-1 \cdot b + -1 \cdot \left(\frac{-1}{2} \cdot \frac{b}{a} + \left(\frac{x}{a} + \frac{y}{a}\right)\right)\right)}{z}\right)\right) \cdot -1 \]
      2. lower-*.f64N/A

        \[\leadsto \left(z \cdot \left(-1 \cdot \left(a \cdot \left(-1 \cdot \frac{\log t}{a} + \frac{1}{a}\right)\right) + \frac{a \cdot \left(-1 \cdot b + -1 \cdot \left(\frac{-1}{2} \cdot \frac{b}{a} + \left(\frac{x}{a} + \frac{y}{a}\right)\right)\right)}{z}\right)\right) \cdot -1 \]
    7. Applied rewrites75.7%

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{\log t}{a}, -1, {a}^{-1}\right) \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, -0.5, \frac{y + x}{a}\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot \color{blue}{-1} \]

    if -920 < z

    1. Initial program 99.9%

      \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
    2. Taylor expanded in a around -inf

      \[\leadsto \color{blue}{-1 \cdot \left(a \cdot \left(-1 \cdot b + -1 \cdot \frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a}\right)\right)} \]
    3. Step-by-step derivation
      1. associate-*r*N/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \color{blue}{\left(-1 \cdot b + -1 \cdot \frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a}\right)} \]
      2. lower-*.f64N/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \color{blue}{\left(-1 \cdot b + -1 \cdot \frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a}\right)} \]
      3. lower-*.f64N/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \left(\color{blue}{-1 \cdot b} + -1 \cdot \frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a}\right) \]
      4. +-commutativeN/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \left(-1 \cdot \frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a} + \color{blue}{-1 \cdot b}\right) \]
      5. *-commutativeN/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \left(\frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a} \cdot -1 + \color{blue}{-1} \cdot b\right) \]
      6. lower-fma.f64N/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a}, \color{blue}{-1}, -1 \cdot b\right) \]
    4. Applied rewrites72.5%

      \[\leadsto \color{blue}{\left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\frac{\left(\mathsf{fma}\left(-0.5, b, z\right) + y\right) + x}{a} - \frac{\log t \cdot z}{a}, -1, -1 \cdot b\right)} \]
    5. Taylor expanded in z around 0

      \[\leadsto \left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\frac{-1}{2} \cdot \frac{b}{a} + \left(z \cdot \left(\frac{1}{a} - \frac{\log t}{a}\right) + \left(\frac{x}{a} + \frac{y}{a}\right)\right), -1, -1 \cdot b\right) \]
    6. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\frac{b}{a} \cdot \frac{-1}{2} + \left(z \cdot \left(\frac{1}{a} - \frac{\log t}{a}\right) + \left(\frac{x}{a} + \frac{y}{a}\right)\right), -1, -1 \cdot b\right) \]
      2. +-commutativeN/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\frac{b}{a} \cdot \frac{-1}{2} + \left(\left(\frac{x}{a} + \frac{y}{a}\right) + z \cdot \left(\frac{1}{a} - \frac{\log t}{a}\right)\right), -1, -1 \cdot b\right) \]
      3. sub-divN/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\frac{b}{a} \cdot \frac{-1}{2} + \left(\left(\frac{x}{a} + \frac{y}{a}\right) + z \cdot \frac{1 - \log t}{a}\right), -1, -1 \cdot b\right) \]
      4. associate-/l*N/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\frac{b}{a} \cdot \frac{-1}{2} + \left(\left(\frac{x}{a} + \frac{y}{a}\right) + \frac{z \cdot \left(1 - \log t\right)}{a}\right), -1, -1 \cdot b\right) \]
      5. associate-+r+N/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\frac{b}{a} \cdot \frac{-1}{2} + \left(\frac{x}{a} + \left(\frac{y}{a} + \frac{z \cdot \left(1 - \log t\right)}{a}\right)\right), -1, -1 \cdot b\right) \]
      6. lower-fma.f64N/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, \frac{-1}{2}, \frac{x}{a} + \left(\frac{y}{a} + \frac{z \cdot \left(1 - \log t\right)}{a}\right)\right), -1, -1 \cdot b\right) \]
      7. lower-/.f64N/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, \frac{-1}{2}, \frac{x}{a} + \left(\frac{y}{a} + \frac{z \cdot \left(1 - \log t\right)}{a}\right)\right), -1, -1 \cdot b\right) \]
      8. associate-+r+N/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, \frac{-1}{2}, \left(\frac{x}{a} + \frac{y}{a}\right) + \frac{z \cdot \left(1 - \log t\right)}{a}\right), -1, -1 \cdot b\right) \]
      9. associate-/l*N/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, \frac{-1}{2}, \left(\frac{x}{a} + \frac{y}{a}\right) + z \cdot \frac{1 - \log t}{a}\right), -1, -1 \cdot b\right) \]
      10. sub-divN/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, \frac{-1}{2}, \left(\frac{x}{a} + \frac{y}{a}\right) + z \cdot \left(\frac{1}{a} - \frac{\log t}{a}\right)\right), -1, -1 \cdot b\right) \]
      11. +-commutativeN/A

        \[\leadsto \left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, \frac{-1}{2}, z \cdot \left(\frac{1}{a} - \frac{\log t}{a}\right) + \left(\frac{x}{a} + \frac{y}{a}\right)\right), -1, -1 \cdot b\right) \]
    7. Applied rewrites72.5%

      \[\leadsto \left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, -0.5, \mathsf{fma}\left(\frac{1 - \log t}{a}, z, \frac{y + x}{a}\right)\right), -1, -1 \cdot b\right) \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 3: 62.9% accurate, N/A× speedup?

\[\begin{array}{l} \\ \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{\log t}{a}, -1, {a}^{-1}\right) \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, -0.5, \frac{y + x}{a}\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot -1 \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (*
  (*
   (fma
    (* (fma (/ (log t) a) -1.0 (pow a -1.0)) a)
    -1.0
    (/ (* (fma (fma (/ b a) -0.5 (/ (+ y x) a)) -1.0 (* -1.0 b)) a) z))
   z)
  -1.0))
double code(double x, double y, double z, double t, double a, double b) {
	return (fma((fma((log(t) / a), -1.0, pow(a, -1.0)) * a), -1.0, ((fma(fma((b / a), -0.5, ((y + x) / a)), -1.0, (-1.0 * b)) * a) / z)) * z) * -1.0;
}
function code(x, y, z, t, a, b)
	return Float64(Float64(fma(Float64(fma(Float64(log(t) / a), -1.0, (a ^ -1.0)) * a), -1.0, Float64(Float64(fma(fma(Float64(b / a), -0.5, Float64(Float64(y + x) / a)), -1.0, Float64(-1.0 * b)) * a) / z)) * z) * -1.0)
end
code[x_, y_, z_, t_, a_, b_] := N[(N[(N[(N[(N[(N[(N[Log[t], $MachinePrecision] / a), $MachinePrecision] * -1.0 + N[Power[a, -1.0], $MachinePrecision]), $MachinePrecision] * a), $MachinePrecision] * -1.0 + N[(N[(N[(N[(N[(b / a), $MachinePrecision] * -0.5 + N[(N[(y + x), $MachinePrecision] / a), $MachinePrecision]), $MachinePrecision] * -1.0 + N[(-1.0 * b), $MachinePrecision]), $MachinePrecision] * a), $MachinePrecision] / z), $MachinePrecision]), $MachinePrecision] * z), $MachinePrecision] * -1.0), $MachinePrecision]
\begin{array}{l}

\\
\left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{\log t}{a}, -1, {a}^{-1}\right) \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, -0.5, \frac{y + x}{a}\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot -1
\end{array}
Derivation
  1. Initial program 99.9%

    \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
  2. Taylor expanded in a around -inf

    \[\leadsto \color{blue}{-1 \cdot \left(a \cdot \left(-1 \cdot b + -1 \cdot \frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a}\right)\right)} \]
  3. Step-by-step derivation
    1. associate-*r*N/A

      \[\leadsto \left(-1 \cdot a\right) \cdot \color{blue}{\left(-1 \cdot b + -1 \cdot \frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a}\right)} \]
    2. lower-*.f64N/A

      \[\leadsto \left(-1 \cdot a\right) \cdot \color{blue}{\left(-1 \cdot b + -1 \cdot \frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a}\right)} \]
    3. lower-*.f64N/A

      \[\leadsto \left(-1 \cdot a\right) \cdot \left(\color{blue}{-1 \cdot b} + -1 \cdot \frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a}\right) \]
    4. +-commutativeN/A

      \[\leadsto \left(-1 \cdot a\right) \cdot \left(-1 \cdot \frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a} + \color{blue}{-1 \cdot b}\right) \]
    5. *-commutativeN/A

      \[\leadsto \left(-1 \cdot a\right) \cdot \left(\frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a} \cdot -1 + \color{blue}{-1} \cdot b\right) \]
    6. lower-fma.f64N/A

      \[\leadsto \left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a}, \color{blue}{-1}, -1 \cdot b\right) \]
  4. Applied rewrites71.3%

    \[\leadsto \color{blue}{\left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\frac{\left(\mathsf{fma}\left(-0.5, b, z\right) + y\right) + x}{a} - \frac{\log t \cdot z}{a}, -1, -1 \cdot b\right)} \]
  5. Taylor expanded in z around -inf

    \[\leadsto -1 \cdot \color{blue}{\left(z \cdot \left(-1 \cdot \left(a \cdot \left(-1 \cdot \frac{\log t}{a} + \frac{1}{a}\right)\right) + \frac{a \cdot \left(-1 \cdot b + -1 \cdot \left(\frac{-1}{2} \cdot \frac{b}{a} + \left(\frac{x}{a} + \frac{y}{a}\right)\right)\right)}{z}\right)\right)} \]
  6. Step-by-step derivation
    1. *-commutativeN/A

      \[\leadsto \left(z \cdot \left(-1 \cdot \left(a \cdot \left(-1 \cdot \frac{\log t}{a} + \frac{1}{a}\right)\right) + \frac{a \cdot \left(-1 \cdot b + -1 \cdot \left(\frac{-1}{2} \cdot \frac{b}{a} + \left(\frac{x}{a} + \frac{y}{a}\right)\right)\right)}{z}\right)\right) \cdot -1 \]
    2. lower-*.f64N/A

      \[\leadsto \left(z \cdot \left(-1 \cdot \left(a \cdot \left(-1 \cdot \frac{\log t}{a} + \frac{1}{a}\right)\right) + \frac{a \cdot \left(-1 \cdot b + -1 \cdot \left(\frac{-1}{2} \cdot \frac{b}{a} + \left(\frac{x}{a} + \frac{y}{a}\right)\right)\right)}{z}\right)\right) \cdot -1 \]
  7. Applied rewrites62.9%

    \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{\log t}{a}, -1, {a}^{-1}\right) \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, -0.5, \frac{y + x}{a}\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot \color{blue}{-1} \]
  8. Add Preprocessing

Alternative 4: 53.1% accurate, N/A× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := \frac{\log t}{a} \cdot -1\\ \left(\mathsf{fma}\left(\frac{{a}^{-1} \cdot {a}^{-1} - t\_1 \cdot t\_1}{{a}^{-1} - t\_1} \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, -0.5, \frac{y + x}{a}\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot -1 \end{array} \end{array} \]
(FPCore (x y z t a b)
 :precision binary64
 (let* ((t_1 (* (/ (log t) a) -1.0)))
   (*
    (*
     (fma
      (*
       (/ (- (* (pow a -1.0) (pow a -1.0)) (* t_1 t_1)) (- (pow a -1.0) t_1))
       a)
      -1.0
      (/ (* (fma (fma (/ b a) -0.5 (/ (+ y x) a)) -1.0 (* -1.0 b)) a) z))
     z)
    -1.0)))
double code(double x, double y, double z, double t, double a, double b) {
	double t_1 = (log(t) / a) * -1.0;
	return (fma(((((pow(a, -1.0) * pow(a, -1.0)) - (t_1 * t_1)) / (pow(a, -1.0) - t_1)) * a), -1.0, ((fma(fma((b / a), -0.5, ((y + x) / a)), -1.0, (-1.0 * b)) * a) / z)) * z) * -1.0;
}
function code(x, y, z, t, a, b)
	t_1 = Float64(Float64(log(t) / a) * -1.0)
	return Float64(Float64(fma(Float64(Float64(Float64(Float64((a ^ -1.0) * (a ^ -1.0)) - Float64(t_1 * t_1)) / Float64((a ^ -1.0) - t_1)) * a), -1.0, Float64(Float64(fma(fma(Float64(b / a), -0.5, Float64(Float64(y + x) / a)), -1.0, Float64(-1.0 * b)) * a) / z)) * z) * -1.0)
end
code[x_, y_, z_, t_, a_, b_] := Block[{t$95$1 = N[(N[(N[Log[t], $MachinePrecision] / a), $MachinePrecision] * -1.0), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(N[(N[Power[a, -1.0], $MachinePrecision] * N[Power[a, -1.0], $MachinePrecision]), $MachinePrecision] - N[(t$95$1 * t$95$1), $MachinePrecision]), $MachinePrecision] / N[(N[Power[a, -1.0], $MachinePrecision] - t$95$1), $MachinePrecision]), $MachinePrecision] * a), $MachinePrecision] * -1.0 + N[(N[(N[(N[(N[(b / a), $MachinePrecision] * -0.5 + N[(N[(y + x), $MachinePrecision] / a), $MachinePrecision]), $MachinePrecision] * -1.0 + N[(-1.0 * b), $MachinePrecision]), $MachinePrecision] * a), $MachinePrecision] / z), $MachinePrecision]), $MachinePrecision] * z), $MachinePrecision] * -1.0), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := \frac{\log t}{a} \cdot -1\\
\left(\mathsf{fma}\left(\frac{{a}^{-1} \cdot {a}^{-1} - t\_1 \cdot t\_1}{{a}^{-1} - t\_1} \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, -0.5, \frac{y + x}{a}\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot -1
\end{array}
\end{array}
Derivation
  1. Initial program 99.9%

    \[\left(\left(\left(x + y\right) + z\right) - z \cdot \log t\right) + \left(a - 0.5\right) \cdot b \]
  2. Taylor expanded in a around -inf

    \[\leadsto \color{blue}{-1 \cdot \left(a \cdot \left(-1 \cdot b + -1 \cdot \frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a}\right)\right)} \]
  3. Step-by-step derivation
    1. associate-*r*N/A

      \[\leadsto \left(-1 \cdot a\right) \cdot \color{blue}{\left(-1 \cdot b + -1 \cdot \frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a}\right)} \]
    2. lower-*.f64N/A

      \[\leadsto \left(-1 \cdot a\right) \cdot \color{blue}{\left(-1 \cdot b + -1 \cdot \frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a}\right)} \]
    3. lower-*.f64N/A

      \[\leadsto \left(-1 \cdot a\right) \cdot \left(\color{blue}{-1 \cdot b} + -1 \cdot \frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a}\right) \]
    4. +-commutativeN/A

      \[\leadsto \left(-1 \cdot a\right) \cdot \left(-1 \cdot \frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a} + \color{blue}{-1 \cdot b}\right) \]
    5. *-commutativeN/A

      \[\leadsto \left(-1 \cdot a\right) \cdot \left(\frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a} \cdot -1 + \color{blue}{-1} \cdot b\right) \]
    6. lower-fma.f64N/A

      \[\leadsto \left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\frac{\left(x + \left(y + \left(z + \frac{-1}{2} \cdot b\right)\right)\right) - z \cdot \log t}{a}, \color{blue}{-1}, -1 \cdot b\right) \]
  4. Applied rewrites71.3%

    \[\leadsto \color{blue}{\left(-1 \cdot a\right) \cdot \mathsf{fma}\left(\frac{\left(\mathsf{fma}\left(-0.5, b, z\right) + y\right) + x}{a} - \frac{\log t \cdot z}{a}, -1, -1 \cdot b\right)} \]
  5. Taylor expanded in z around -inf

    \[\leadsto -1 \cdot \color{blue}{\left(z \cdot \left(-1 \cdot \left(a \cdot \left(-1 \cdot \frac{\log t}{a} + \frac{1}{a}\right)\right) + \frac{a \cdot \left(-1 \cdot b + -1 \cdot \left(\frac{-1}{2} \cdot \frac{b}{a} + \left(\frac{x}{a} + \frac{y}{a}\right)\right)\right)}{z}\right)\right)} \]
  6. Step-by-step derivation
    1. *-commutativeN/A

      \[\leadsto \left(z \cdot \left(-1 \cdot \left(a \cdot \left(-1 \cdot \frac{\log t}{a} + \frac{1}{a}\right)\right) + \frac{a \cdot \left(-1 \cdot b + -1 \cdot \left(\frac{-1}{2} \cdot \frac{b}{a} + \left(\frac{x}{a} + \frac{y}{a}\right)\right)\right)}{z}\right)\right) \cdot -1 \]
    2. lower-*.f64N/A

      \[\leadsto \left(z \cdot \left(-1 \cdot \left(a \cdot \left(-1 \cdot \frac{\log t}{a} + \frac{1}{a}\right)\right) + \frac{a \cdot \left(-1 \cdot b + -1 \cdot \left(\frac{-1}{2} \cdot \frac{b}{a} + \left(\frac{x}{a} + \frac{y}{a}\right)\right)\right)}{z}\right)\right) \cdot -1 \]
  7. Applied rewrites62.9%

    \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{\log t}{a}, -1, {a}^{-1}\right) \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, -0.5, \frac{y + x}{a}\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot \color{blue}{-1} \]
  8. Step-by-step derivation
    1. lift-pow.f64N/A

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{\log t}{a}, -1, {a}^{-1}\right) \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, \frac{-1}{2}, \frac{y + x}{a}\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot -1 \]
    2. lift-fma.f64N/A

      \[\leadsto \left(\mathsf{fma}\left(\left(\frac{\log t}{a} \cdot -1 + {a}^{-1}\right) \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, \frac{-1}{2}, \frac{y + x}{a}\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot -1 \]
    3. lift-/.f64N/A

      \[\leadsto \left(\mathsf{fma}\left(\left(\frac{\log t}{a} \cdot -1 + {a}^{-1}\right) \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, \frac{-1}{2}, \frac{y + x}{a}\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot -1 \]
    4. lift-log.f64N/A

      \[\leadsto \left(\mathsf{fma}\left(\left(\frac{\log t}{a} \cdot -1 + {a}^{-1}\right) \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, \frac{-1}{2}, \frac{y + x}{a}\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot -1 \]
    5. inv-powN/A

      \[\leadsto \left(\mathsf{fma}\left(\left(\frac{\log t}{a} \cdot -1 + \frac{1}{a}\right) \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, \frac{-1}{2}, \frac{y + x}{a}\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot -1 \]
    6. associate-*l/N/A

      \[\leadsto \left(\mathsf{fma}\left(\left(\frac{\log t \cdot -1}{a} + \frac{1}{a}\right) \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, \frac{-1}{2}, \frac{y + x}{a}\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot -1 \]
    7. *-commutativeN/A

      \[\leadsto \left(\mathsf{fma}\left(\left(\frac{-1 \cdot \log t}{a} + \frac{1}{a}\right) \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, \frac{-1}{2}, \frac{y + x}{a}\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot -1 \]
    8. mul-1-negN/A

      \[\leadsto \left(\mathsf{fma}\left(\left(\frac{\mathsf{neg}\left(\log t\right)}{a} + \frac{1}{a}\right) \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, \frac{-1}{2}, \frac{y + x}{a}\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot -1 \]
    9. log-recN/A

      \[\leadsto \left(\mathsf{fma}\left(\left(\frac{\log \left(\frac{1}{t}\right)}{a} + \frac{1}{a}\right) \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, \frac{-1}{2}, \frac{y + x}{a}\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot -1 \]
    10. +-commutativeN/A

      \[\leadsto \left(\mathsf{fma}\left(\left(\frac{1}{a} + \frac{\log \left(\frac{1}{t}\right)}{a}\right) \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, \frac{-1}{2}, \frac{y + x}{a}\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot -1 \]
    11. flip-+N/A

      \[\leadsto \left(\mathsf{fma}\left(\frac{\frac{1}{a} \cdot \frac{1}{a} - \frac{\log \left(\frac{1}{t}\right)}{a} \cdot \frac{\log \left(\frac{1}{t}\right)}{a}}{\frac{1}{a} - \frac{\log \left(\frac{1}{t}\right)}{a}} \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, \frac{-1}{2}, \frac{y + x}{a}\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot -1 \]
    12. lower-/.f64N/A

      \[\leadsto \left(\mathsf{fma}\left(\frac{\frac{1}{a} \cdot \frac{1}{a} - \frac{\log \left(\frac{1}{t}\right)}{a} \cdot \frac{\log \left(\frac{1}{t}\right)}{a}}{\frac{1}{a} - \frac{\log \left(\frac{1}{t}\right)}{a}} \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, \frac{-1}{2}, \frac{y + x}{a}\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot -1 \]
  9. Applied rewrites53.1%

    \[\leadsto \left(\mathsf{fma}\left(\frac{{a}^{-1} \cdot {a}^{-1} - \left(\frac{\log t}{a} \cdot -1\right) \cdot \left(\frac{\log t}{a} \cdot -1\right)}{{a}^{-1} - \frac{\log t}{a} \cdot -1} \cdot a, -1, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{b}{a}, -0.5, \frac{y + x}{a}\right), -1, -1 \cdot b\right) \cdot a}{z}\right) \cdot z\right) \cdot -1 \]
  10. Add Preprocessing

Reproduce

?
herbie shell --seed 2025093 
(FPCore (x y z t a b)
  :name "Numeric.SpecFunctions:logBeta from math-functions-0.1.5.2, A"
  :precision binary64
  (+ (- (+ (+ x y) z) (* z (log t))) (* (- a 0.5) b)))