System.Random.MWC.Distributions:truncatedExp from mwc-random-0.13.3.2

Percentage Accurate: 72.2% → 93.1%
Time: 18.7s
Alternatives: 7
Speedup: 211.0×

Specification

?
\[\begin{array}{l} \\ x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- x (/ (log (+ (- 1.0 y) (* y (exp z)))) t)))
double code(double x, double y, double z, double t) {
	return x - (log(((1.0 - y) + (y * exp(z)))) / t);
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = x - (log(((1.0d0 - y) + (y * exp(z)))) / t)
end function
public static double code(double x, double y, double z, double t) {
	return x - (Math.log(((1.0 - y) + (y * Math.exp(z)))) / t);
}
def code(x, y, z, t):
	return x - (math.log(((1.0 - y) + (y * math.exp(z)))) / t)
function code(x, y, z, t)
	return Float64(x - Float64(log(Float64(Float64(1.0 - y) + Float64(y * exp(z)))) / t))
end
function tmp = code(x, y, z, t)
	tmp = x - (log(((1.0 - y) + (y * exp(z)))) / t);
end
code[x_, y_, z_, t_] := N[(x - N[(N[Log[N[(N[(1.0 - y), $MachinePrecision] + N[(y * N[Exp[z], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 7 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 72.2% accurate, 1.0× speedup?

\[\begin{array}{l} \\ x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (- x (/ (log (+ (- 1.0 y) (* y (exp z)))) t)))
double code(double x, double y, double z, double t) {
	return x - (log(((1.0 - y) + (y * exp(z)))) / t);
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = x - (log(((1.0d0 - y) + (y * exp(z)))) / t)
end function
public static double code(double x, double y, double z, double t) {
	return x - (Math.log(((1.0 - y) + (y * Math.exp(z)))) / t);
}
def code(x, y, z, t):
	return x - (math.log(((1.0 - y) + (y * math.exp(z)))) / t)
function code(x, y, z, t)
	return Float64(x - Float64(log(Float64(Float64(1.0 - y) + Float64(y * exp(z)))) / t))
end
function tmp = code(x, y, z, t)
	tmp = x - (log(((1.0 - y) + (y * exp(z)))) / t);
end
code[x_, y_, z_, t_] := N[(x - N[(N[Log[N[(N[(1.0 - y), $MachinePrecision] + N[(y * N[Exp[z], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}
\end{array}

Alternative 1: 93.1% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq -3.3 \cdot 10^{-24}:\\ \;\;\;\;x + \left(0 - \frac{{\left({\left(\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)\right)}^{3}\right)}^{0.3333333333333333}}{t}\right)\\ \mathbf{elif}\;y \leq 2.3 \cdot 10^{+86}:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot \left(z + \left(0.041666666666666664 \cdot {z}^{4} + \left(0.16666666666666666 \cdot {z}^{3} + 0.5 \cdot {z}^{2}\right)\right)\right)\right)}{t}\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (<= y -3.3e-24)
   (+
    x
    (- 0.0 (/ (pow (pow (log1p (* y (expm1 z))) 3.0) 0.3333333333333333) t)))
   (if (<= y 2.3e+86)
     x
     (-
      x
      (/
       (log1p
        (*
         y
         (+
          z
          (+
           (* 0.041666666666666664 (pow z 4.0))
           (+ (* 0.16666666666666666 (pow z 3.0)) (* 0.5 (pow z 2.0)))))))
       t)))))
double code(double x, double y, double z, double t) {
	double tmp;
	if (y <= -3.3e-24) {
		tmp = x + (0.0 - (pow(pow(log1p((y * expm1(z))), 3.0), 0.3333333333333333) / t));
	} else if (y <= 2.3e+86) {
		tmp = x;
	} else {
		tmp = x - (log1p((y * (z + ((0.041666666666666664 * pow(z, 4.0)) + ((0.16666666666666666 * pow(z, 3.0)) + (0.5 * pow(z, 2.0))))))) / t);
	}
	return tmp;
}
public static double code(double x, double y, double z, double t) {
	double tmp;
	if (y <= -3.3e-24) {
		tmp = x + (0.0 - (Math.pow(Math.pow(Math.log1p((y * Math.expm1(z))), 3.0), 0.3333333333333333) / t));
	} else if (y <= 2.3e+86) {
		tmp = x;
	} else {
		tmp = x - (Math.log1p((y * (z + ((0.041666666666666664 * Math.pow(z, 4.0)) + ((0.16666666666666666 * Math.pow(z, 3.0)) + (0.5 * Math.pow(z, 2.0))))))) / t);
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if y <= -3.3e-24:
		tmp = x + (0.0 - (math.pow(math.pow(math.log1p((y * math.expm1(z))), 3.0), 0.3333333333333333) / t))
	elif y <= 2.3e+86:
		tmp = x
	else:
		tmp = x - (math.log1p((y * (z + ((0.041666666666666664 * math.pow(z, 4.0)) + ((0.16666666666666666 * math.pow(z, 3.0)) + (0.5 * math.pow(z, 2.0))))))) / t)
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if (y <= -3.3e-24)
		tmp = Float64(x + Float64(0.0 - Float64(((log1p(Float64(y * expm1(z))) ^ 3.0) ^ 0.3333333333333333) / t)));
	elseif (y <= 2.3e+86)
		tmp = x;
	else
		tmp = Float64(x - Float64(log1p(Float64(y * Float64(z + Float64(Float64(0.041666666666666664 * (z ^ 4.0)) + Float64(Float64(0.16666666666666666 * (z ^ 3.0)) + Float64(0.5 * (z ^ 2.0))))))) / t));
	end
	return tmp
end
code[x_, y_, z_, t_] := If[LessEqual[y, -3.3e-24], N[(x + N[(0.0 - N[(N[Power[N[Power[N[Log[1 + N[(y * N[(Exp[z] - 1), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], 3.0], $MachinePrecision], 0.3333333333333333], $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[y, 2.3e+86], x, N[(x - N[(N[Log[1 + N[(y * N[(z + N[(N[(0.041666666666666664 * N[Power[z, 4.0], $MachinePrecision]), $MachinePrecision] + N[(N[(0.16666666666666666 * N[Power[z, 3.0], $MachinePrecision]), $MachinePrecision] + N[(0.5 * N[Power[z, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;y \leq -3.3 \cdot 10^{-24}:\\
\;\;\;\;x + \left(0 - \frac{{\left({\left(\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)\right)}^{3}\right)}^{0.3333333333333333}}{t}\right)\\

\mathbf{elif}\;y \leq 2.3 \cdot 10^{+86}:\\
\;\;\;\;x\\

\mathbf{else}:\\
\;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot \left(z + \left(0.041666666666666664 \cdot {z}^{4} + \left(0.16666666666666666 \cdot {z}^{3} + 0.5 \cdot {z}^{2}\right)\right)\right)\right)}{t}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if y < -3.29999999999999984e-24

    1. Initial program 41.2%

      \[x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. sub-neg41.2%

        \[\leadsto x - \frac{\log \left(\color{blue}{\left(1 + \left(-y\right)\right)} + y \cdot e^{z}\right)}{t} \]
      2. neg-mul-141.2%

        \[\leadsto x - \frac{\log \left(\left(1 + \color{blue}{-1 \cdot y}\right) + y \cdot e^{z}\right)}{t} \]
      3. associate-+l+86.7%

        \[\leadsto x - \frac{\log \color{blue}{\left(1 + \left(-1 \cdot y + y \cdot e^{z}\right)\right)}}{t} \]
      4. *-commutative86.7%

        \[\leadsto x - \frac{\log \left(1 + \left(-1 \cdot y + \color{blue}{e^{z} \cdot y}\right)\right)}{t} \]
      5. distribute-rgt-in86.7%

        \[\leadsto x - \frac{\log \left(1 + \color{blue}{y \cdot \left(-1 + e^{z}\right)}\right)}{t} \]
      6. +-commutative86.7%

        \[\leadsto x - \frac{\log \left(1 + y \cdot \color{blue}{\left(e^{z} + -1\right)}\right)}{t} \]
      7. metadata-eval86.7%

        \[\leadsto x - \frac{\log \left(1 + y \cdot \left(e^{z} + \color{blue}{\left(-1\right)}\right)\right)}{t} \]
      8. sub-neg86.7%

        \[\leadsto x - \frac{\log \left(1 + y \cdot \color{blue}{\left(e^{z} - 1\right)}\right)}{t} \]
      9. expm1-udef96.4%

        \[\leadsto x - \frac{\log \left(1 + y \cdot \color{blue}{\mathsf{expm1}\left(z\right)}\right)}{t} \]
      10. log1p-udef93.7%

        \[\leadsto x - \frac{\color{blue}{\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)}}{t} \]
      11. add-cbrt-cube97.9%

        \[\leadsto x - \frac{\color{blue}{\sqrt[3]{\left(\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right) \cdot \mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)\right) \cdot \mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)}}}{t} \]
      12. pow1/394.5%

        \[\leadsto x - \frac{\color{blue}{{\left(\left(\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right) \cdot \mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)\right) \cdot \mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)\right)}^{0.3333333333333333}}}{t} \]
      13. pow394.5%

        \[\leadsto x - \frac{{\color{blue}{\left({\left(\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)\right)}^{3}\right)}}^{0.3333333333333333}}{t} \]
    4. Applied egg-rr94.5%

      \[\leadsto x - \frac{\color{blue}{{\left({\left(\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)\right)}^{3}\right)}^{0.3333333333333333}}}{t} \]

    if -3.29999999999999984e-24 < y < 2.2999999999999999e86

    1. Initial program 94.3%

      \[x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t} \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf 99.4%

      \[\leadsto \color{blue}{x} \]

    if 2.2999999999999999e86 < y

    1. Initial program 3.0%

      \[x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t} \]
    2. Step-by-step derivation
      1. remove-double-neg3.0%

        \[\leadsto x - \color{blue}{\left(-\left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)\right)} \]
      2. neg-mul-13.0%

        \[\leadsto x - \color{blue}{-1 \cdot \left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)} \]
      3. neg-mul-13.0%

        \[\leadsto x - \color{blue}{\left(-\left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)\right)} \]
      4. remove-double-neg3.0%

        \[\leadsto x - \color{blue}{\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}} \]
      5. sub-neg3.0%

        \[\leadsto x - \frac{\log \left(\color{blue}{\left(1 + \left(-y\right)\right)} + y \cdot e^{z}\right)}{t} \]
      6. associate-+l+56.0%

        \[\leadsto x - \frac{\log \color{blue}{\left(1 + \left(\left(-y\right) + y \cdot e^{z}\right)\right)}}{t} \]
      7. cancel-sign-sub56.0%

        \[\leadsto x - \frac{\log \left(1 + \color{blue}{\left(\left(-y\right) - \left(-y\right) \cdot e^{z}\right)}\right)}{t} \]
      8. log1p-def56.0%

        \[\leadsto x - \frac{\color{blue}{\mathsf{log1p}\left(\left(-y\right) - \left(-y\right) \cdot e^{z}\right)}}{t} \]
      9. cancel-sign-sub56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{\left(-y\right) + y \cdot e^{z}}\right)}{t} \]
      10. +-commutative56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{y \cdot e^{z} + \left(-y\right)}\right)}{t} \]
      11. neg-mul-156.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot e^{z} + \color{blue}{-1 \cdot y}\right)}{t} \]
      12. *-commutative56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot e^{z} + \color{blue}{y \cdot -1}\right)}{t} \]
      13. distribute-lft-out56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{y \cdot \left(e^{z} + -1\right)}\right)}{t} \]
    3. Simplified56.0%

      \[\leadsto \color{blue}{x - \frac{\mathsf{log1p}\left(y \cdot \left(e^{z} + -1\right)\right)}{t}} \]
    4. Add Preprocessing
    5. Taylor expanded in z around 0 95.8%

      \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot \color{blue}{\left(z + \left(0.041666666666666664 \cdot {z}^{4} + \left(0.16666666666666666 \cdot {z}^{3} + 0.5 \cdot {z}^{2}\right)\right)\right)}\right)}{t} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification97.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -3.3 \cdot 10^{-24}:\\ \;\;\;\;x + \left(0 - \frac{{\left({\left(\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)\right)}^{3}\right)}^{0.3333333333333333}}{t}\right)\\ \mathbf{elif}\;y \leq 2.3 \cdot 10^{+86}:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot \left(z + \left(0.041666666666666664 \cdot {z}^{4} + \left(0.16666666666666666 \cdot {z}^{3} + 0.5 \cdot {z}^{2}\right)\right)\right)\right)}{t}\\ \end{array} \]
  5. Add Preprocessing

Alternative 2: 93.0% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq -3.15 \cdot 10^{-24}:\\ \;\;\;\;x + \left(0 - \frac{{\left({\left(\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)\right)}^{3}\right)}^{0.3333333333333333}}{t}\right)\\ \mathbf{elif}\;y \leq 1.6 \cdot 10^{+96}:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot \left(z + \left(0.16666666666666666 \cdot {z}^{3} + 0.5 \cdot {z}^{2}\right)\right)\right)}{t}\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (<= y -3.15e-24)
   (+
    x
    (- 0.0 (/ (pow (pow (log1p (* y (expm1 z))) 3.0) 0.3333333333333333) t)))
   (if (<= y 1.6e+96)
     x
     (-
      x
      (/
       (log1p
        (*
         y
         (+ z (+ (* 0.16666666666666666 (pow z 3.0)) (* 0.5 (pow z 2.0))))))
       t)))))
double code(double x, double y, double z, double t) {
	double tmp;
	if (y <= -3.15e-24) {
		tmp = x + (0.0 - (pow(pow(log1p((y * expm1(z))), 3.0), 0.3333333333333333) / t));
	} else if (y <= 1.6e+96) {
		tmp = x;
	} else {
		tmp = x - (log1p((y * (z + ((0.16666666666666666 * pow(z, 3.0)) + (0.5 * pow(z, 2.0)))))) / t);
	}
	return tmp;
}
public static double code(double x, double y, double z, double t) {
	double tmp;
	if (y <= -3.15e-24) {
		tmp = x + (0.0 - (Math.pow(Math.pow(Math.log1p((y * Math.expm1(z))), 3.0), 0.3333333333333333) / t));
	} else if (y <= 1.6e+96) {
		tmp = x;
	} else {
		tmp = x - (Math.log1p((y * (z + ((0.16666666666666666 * Math.pow(z, 3.0)) + (0.5 * Math.pow(z, 2.0)))))) / t);
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if y <= -3.15e-24:
		tmp = x + (0.0 - (math.pow(math.pow(math.log1p((y * math.expm1(z))), 3.0), 0.3333333333333333) / t))
	elif y <= 1.6e+96:
		tmp = x
	else:
		tmp = x - (math.log1p((y * (z + ((0.16666666666666666 * math.pow(z, 3.0)) + (0.5 * math.pow(z, 2.0)))))) / t)
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if (y <= -3.15e-24)
		tmp = Float64(x + Float64(0.0 - Float64(((log1p(Float64(y * expm1(z))) ^ 3.0) ^ 0.3333333333333333) / t)));
	elseif (y <= 1.6e+96)
		tmp = x;
	else
		tmp = Float64(x - Float64(log1p(Float64(y * Float64(z + Float64(Float64(0.16666666666666666 * (z ^ 3.0)) + Float64(0.5 * (z ^ 2.0)))))) / t));
	end
	return tmp
end
code[x_, y_, z_, t_] := If[LessEqual[y, -3.15e-24], N[(x + N[(0.0 - N[(N[Power[N[Power[N[Log[1 + N[(y * N[(Exp[z] - 1), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], 3.0], $MachinePrecision], 0.3333333333333333], $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[y, 1.6e+96], x, N[(x - N[(N[Log[1 + N[(y * N[(z + N[(N[(0.16666666666666666 * N[Power[z, 3.0], $MachinePrecision]), $MachinePrecision] + N[(0.5 * N[Power[z, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;y \leq -3.15 \cdot 10^{-24}:\\
\;\;\;\;x + \left(0 - \frac{{\left({\left(\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)\right)}^{3}\right)}^{0.3333333333333333}}{t}\right)\\

\mathbf{elif}\;y \leq 1.6 \cdot 10^{+96}:\\
\;\;\;\;x\\

\mathbf{else}:\\
\;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot \left(z + \left(0.16666666666666666 \cdot {z}^{3} + 0.5 \cdot {z}^{2}\right)\right)\right)}{t}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if y < -3.1499999999999999e-24

    1. Initial program 41.2%

      \[x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. sub-neg41.2%

        \[\leadsto x - \frac{\log \left(\color{blue}{\left(1 + \left(-y\right)\right)} + y \cdot e^{z}\right)}{t} \]
      2. neg-mul-141.2%

        \[\leadsto x - \frac{\log \left(\left(1 + \color{blue}{-1 \cdot y}\right) + y \cdot e^{z}\right)}{t} \]
      3. associate-+l+86.7%

        \[\leadsto x - \frac{\log \color{blue}{\left(1 + \left(-1 \cdot y + y \cdot e^{z}\right)\right)}}{t} \]
      4. *-commutative86.7%

        \[\leadsto x - \frac{\log \left(1 + \left(-1 \cdot y + \color{blue}{e^{z} \cdot y}\right)\right)}{t} \]
      5. distribute-rgt-in86.7%

        \[\leadsto x - \frac{\log \left(1 + \color{blue}{y \cdot \left(-1 + e^{z}\right)}\right)}{t} \]
      6. +-commutative86.7%

        \[\leadsto x - \frac{\log \left(1 + y \cdot \color{blue}{\left(e^{z} + -1\right)}\right)}{t} \]
      7. metadata-eval86.7%

        \[\leadsto x - \frac{\log \left(1 + y \cdot \left(e^{z} + \color{blue}{\left(-1\right)}\right)\right)}{t} \]
      8. sub-neg86.7%

        \[\leadsto x - \frac{\log \left(1 + y \cdot \color{blue}{\left(e^{z} - 1\right)}\right)}{t} \]
      9. expm1-udef96.4%

        \[\leadsto x - \frac{\log \left(1 + y \cdot \color{blue}{\mathsf{expm1}\left(z\right)}\right)}{t} \]
      10. log1p-udef93.7%

        \[\leadsto x - \frac{\color{blue}{\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)}}{t} \]
      11. add-cbrt-cube97.9%

        \[\leadsto x - \frac{\color{blue}{\sqrt[3]{\left(\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right) \cdot \mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)\right) \cdot \mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)}}}{t} \]
      12. pow1/394.5%

        \[\leadsto x - \frac{\color{blue}{{\left(\left(\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right) \cdot \mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)\right) \cdot \mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)\right)}^{0.3333333333333333}}}{t} \]
      13. pow394.5%

        \[\leadsto x - \frac{{\color{blue}{\left({\left(\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)\right)}^{3}\right)}}^{0.3333333333333333}}{t} \]
    4. Applied egg-rr94.5%

      \[\leadsto x - \frac{\color{blue}{{\left({\left(\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)\right)}^{3}\right)}^{0.3333333333333333}}}{t} \]

    if -3.1499999999999999e-24 < y < 1.60000000000000003e96

    1. Initial program 94.3%

      \[x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t} \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf 99.4%

      \[\leadsto \color{blue}{x} \]

    if 1.60000000000000003e96 < y

    1. Initial program 3.0%

      \[x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t} \]
    2. Step-by-step derivation
      1. remove-double-neg3.0%

        \[\leadsto x - \color{blue}{\left(-\left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)\right)} \]
      2. neg-mul-13.0%

        \[\leadsto x - \color{blue}{-1 \cdot \left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)} \]
      3. neg-mul-13.0%

        \[\leadsto x - \color{blue}{\left(-\left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)\right)} \]
      4. remove-double-neg3.0%

        \[\leadsto x - \color{blue}{\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}} \]
      5. sub-neg3.0%

        \[\leadsto x - \frac{\log \left(\color{blue}{\left(1 + \left(-y\right)\right)} + y \cdot e^{z}\right)}{t} \]
      6. associate-+l+56.0%

        \[\leadsto x - \frac{\log \color{blue}{\left(1 + \left(\left(-y\right) + y \cdot e^{z}\right)\right)}}{t} \]
      7. cancel-sign-sub56.0%

        \[\leadsto x - \frac{\log \left(1 + \color{blue}{\left(\left(-y\right) - \left(-y\right) \cdot e^{z}\right)}\right)}{t} \]
      8. log1p-def56.0%

        \[\leadsto x - \frac{\color{blue}{\mathsf{log1p}\left(\left(-y\right) - \left(-y\right) \cdot e^{z}\right)}}{t} \]
      9. cancel-sign-sub56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{\left(-y\right) + y \cdot e^{z}}\right)}{t} \]
      10. +-commutative56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{y \cdot e^{z} + \left(-y\right)}\right)}{t} \]
      11. neg-mul-156.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot e^{z} + \color{blue}{-1 \cdot y}\right)}{t} \]
      12. *-commutative56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot e^{z} + \color{blue}{y \cdot -1}\right)}{t} \]
      13. distribute-lft-out56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{y \cdot \left(e^{z} + -1\right)}\right)}{t} \]
    3. Simplified56.0%

      \[\leadsto \color{blue}{x - \frac{\mathsf{log1p}\left(y \cdot \left(e^{z} + -1\right)\right)}{t}} \]
    4. Add Preprocessing
    5. Taylor expanded in z around 0 95.8%

      \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot \color{blue}{\left(z + \left(0.16666666666666666 \cdot {z}^{3} + 0.5 \cdot {z}^{2}\right)\right)}\right)}{t} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification97.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -3.15 \cdot 10^{-24}:\\ \;\;\;\;x + \left(0 - \frac{{\left({\left(\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)\right)}^{3}\right)}^{0.3333333333333333}}{t}\right)\\ \mathbf{elif}\;y \leq 1.6 \cdot 10^{+96}:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot \left(z + \left(0.16666666666666666 \cdot {z}^{3} + 0.5 \cdot {z}^{2}\right)\right)\right)}{t}\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 95.2% accurate, 0.6× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq -3.2 \cdot 10^{-24}:\\ \;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)}{t}\\ \mathbf{elif}\;y \leq 5.9 \cdot 10^{+91}:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot \left(z + \left(0.16666666666666666 \cdot {z}^{3} + 0.5 \cdot {z}^{2}\right)\right)\right)}{t}\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (<= y -3.2e-24)
   (- x (/ (log1p (* y (expm1 z))) t))
   (if (<= y 5.9e+91)
     x
     (-
      x
      (/
       (log1p
        (*
         y
         (+ z (+ (* 0.16666666666666666 (pow z 3.0)) (* 0.5 (pow z 2.0))))))
       t)))))
double code(double x, double y, double z, double t) {
	double tmp;
	if (y <= -3.2e-24) {
		tmp = x - (log1p((y * expm1(z))) / t);
	} else if (y <= 5.9e+91) {
		tmp = x;
	} else {
		tmp = x - (log1p((y * (z + ((0.16666666666666666 * pow(z, 3.0)) + (0.5 * pow(z, 2.0)))))) / t);
	}
	return tmp;
}
public static double code(double x, double y, double z, double t) {
	double tmp;
	if (y <= -3.2e-24) {
		tmp = x - (Math.log1p((y * Math.expm1(z))) / t);
	} else if (y <= 5.9e+91) {
		tmp = x;
	} else {
		tmp = x - (Math.log1p((y * (z + ((0.16666666666666666 * Math.pow(z, 3.0)) + (0.5 * Math.pow(z, 2.0)))))) / t);
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if y <= -3.2e-24:
		tmp = x - (math.log1p((y * math.expm1(z))) / t)
	elif y <= 5.9e+91:
		tmp = x
	else:
		tmp = x - (math.log1p((y * (z + ((0.16666666666666666 * math.pow(z, 3.0)) + (0.5 * math.pow(z, 2.0)))))) / t)
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if (y <= -3.2e-24)
		tmp = Float64(x - Float64(log1p(Float64(y * expm1(z))) / t));
	elseif (y <= 5.9e+91)
		tmp = x;
	else
		tmp = Float64(x - Float64(log1p(Float64(y * Float64(z + Float64(Float64(0.16666666666666666 * (z ^ 3.0)) + Float64(0.5 * (z ^ 2.0)))))) / t));
	end
	return tmp
end
code[x_, y_, z_, t_] := If[LessEqual[y, -3.2e-24], N[(x - N[(N[Log[1 + N[(y * N[(Exp[z] - 1), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision], If[LessEqual[y, 5.9e+91], x, N[(x - N[(N[Log[1 + N[(y * N[(z + N[(N[(0.16666666666666666 * N[Power[z, 3.0], $MachinePrecision]), $MachinePrecision] + N[(0.5 * N[Power[z, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;y \leq -3.2 \cdot 10^{-24}:\\
\;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)}{t}\\

\mathbf{elif}\;y \leq 5.9 \cdot 10^{+91}:\\
\;\;\;\;x\\

\mathbf{else}:\\
\;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot \left(z + \left(0.16666666666666666 \cdot {z}^{3} + 0.5 \cdot {z}^{2}\right)\right)\right)}{t}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if y < -3.20000000000000012e-24

    1. Initial program 41.2%

      \[x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t} \]
    2. Step-by-step derivation
      1. remove-double-neg41.2%

        \[\leadsto x - \color{blue}{\left(-\left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)\right)} \]
      2. neg-mul-141.2%

        \[\leadsto x - \color{blue}{-1 \cdot \left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)} \]
      3. *-commutative41.2%

        \[\leadsto x - \color{blue}{\left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right) \cdot -1} \]
      4. *-commutative41.2%

        \[\leadsto x - \color{blue}{-1 \cdot \left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)} \]
      5. neg-mul-141.2%

        \[\leadsto x - \color{blue}{\left(-\left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)\right)} \]
      6. remove-double-neg41.2%

        \[\leadsto x - \color{blue}{\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}} \]
      7. sub-neg41.2%

        \[\leadsto x - \frac{\log \left(\color{blue}{\left(1 + \left(-y\right)\right)} + y \cdot e^{z}\right)}{t} \]
      8. associate-+l+86.7%

        \[\leadsto x - \frac{\log \color{blue}{\left(1 + \left(\left(-y\right) + y \cdot e^{z}\right)\right)}}{t} \]
      9. cancel-sign-sub86.7%

        \[\leadsto x - \frac{\log \left(1 + \color{blue}{\left(\left(-y\right) - \left(-y\right) \cdot e^{z}\right)}\right)}{t} \]
      10. log1p-def88.2%

        \[\leadsto x - \frac{\color{blue}{\mathsf{log1p}\left(\left(-y\right) - \left(-y\right) \cdot e^{z}\right)}}{t} \]
      11. cancel-sign-sub88.2%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{\left(-y\right) + y \cdot e^{z}}\right)}{t} \]
      12. +-commutative88.2%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{y \cdot e^{z} + \left(-y\right)}\right)}{t} \]
      13. unsub-neg88.2%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{y \cdot e^{z} - y}\right)}{t} \]
      14. *-rgt-identity88.2%

        \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot e^{z} - \color{blue}{y \cdot 1}\right)}{t} \]
      15. distribute-lft-out--88.2%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{y \cdot \left(e^{z} - 1\right)}\right)}{t} \]
      16. expm1-def93.7%

        \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot \color{blue}{\mathsf{expm1}\left(z\right)}\right)}{t} \]
    3. Simplified93.7%

      \[\leadsto \color{blue}{x - \frac{\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)}{t}} \]
    4. Add Preprocessing

    if -3.20000000000000012e-24 < y < 5.9000000000000002e91

    1. Initial program 94.3%

      \[x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t} \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf 99.4%

      \[\leadsto \color{blue}{x} \]

    if 5.9000000000000002e91 < y

    1. Initial program 3.0%

      \[x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t} \]
    2. Step-by-step derivation
      1. remove-double-neg3.0%

        \[\leadsto x - \color{blue}{\left(-\left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)\right)} \]
      2. neg-mul-13.0%

        \[\leadsto x - \color{blue}{-1 \cdot \left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)} \]
      3. neg-mul-13.0%

        \[\leadsto x - \color{blue}{\left(-\left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)\right)} \]
      4. remove-double-neg3.0%

        \[\leadsto x - \color{blue}{\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}} \]
      5. sub-neg3.0%

        \[\leadsto x - \frac{\log \left(\color{blue}{\left(1 + \left(-y\right)\right)} + y \cdot e^{z}\right)}{t} \]
      6. associate-+l+56.0%

        \[\leadsto x - \frac{\log \color{blue}{\left(1 + \left(\left(-y\right) + y \cdot e^{z}\right)\right)}}{t} \]
      7. cancel-sign-sub56.0%

        \[\leadsto x - \frac{\log \left(1 + \color{blue}{\left(\left(-y\right) - \left(-y\right) \cdot e^{z}\right)}\right)}{t} \]
      8. log1p-def56.0%

        \[\leadsto x - \frac{\color{blue}{\mathsf{log1p}\left(\left(-y\right) - \left(-y\right) \cdot e^{z}\right)}}{t} \]
      9. cancel-sign-sub56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{\left(-y\right) + y \cdot e^{z}}\right)}{t} \]
      10. +-commutative56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{y \cdot e^{z} + \left(-y\right)}\right)}{t} \]
      11. neg-mul-156.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot e^{z} + \color{blue}{-1 \cdot y}\right)}{t} \]
      12. *-commutative56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot e^{z} + \color{blue}{y \cdot -1}\right)}{t} \]
      13. distribute-lft-out56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{y \cdot \left(e^{z} + -1\right)}\right)}{t} \]
    3. Simplified56.0%

      \[\leadsto \color{blue}{x - \frac{\mathsf{log1p}\left(y \cdot \left(e^{z} + -1\right)\right)}{t}} \]
    4. Add Preprocessing
    5. Taylor expanded in z around 0 95.8%

      \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot \color{blue}{\left(z + \left(0.16666666666666666 \cdot {z}^{3} + 0.5 \cdot {z}^{2}\right)\right)}\right)}{t} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification97.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -3.2 \cdot 10^{-24}:\\ \;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)}{t}\\ \mathbf{elif}\;y \leq 5.9 \cdot 10^{+91}:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot \left(z + \left(0.16666666666666666 \cdot {z}^{3} + 0.5 \cdot {z}^{2}\right)\right)\right)}{t}\\ \end{array} \]
  5. Add Preprocessing

Alternative 4: 95.1% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq -3.5 \cdot 10^{-24}:\\ \;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)}{t}\\ \mathbf{elif}\;y \leq 2.2 \cdot 10^{+96}:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot \left(z + 0.5 \cdot {z}^{2}\right)\right)}{t}\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (<= y -3.5e-24)
   (- x (/ (log1p (* y (expm1 z))) t))
   (if (<= y 2.2e+96) x (- x (/ (log1p (* y (+ z (* 0.5 (pow z 2.0))))) t)))))
double code(double x, double y, double z, double t) {
	double tmp;
	if (y <= -3.5e-24) {
		tmp = x - (log1p((y * expm1(z))) / t);
	} else if (y <= 2.2e+96) {
		tmp = x;
	} else {
		tmp = x - (log1p((y * (z + (0.5 * pow(z, 2.0))))) / t);
	}
	return tmp;
}
public static double code(double x, double y, double z, double t) {
	double tmp;
	if (y <= -3.5e-24) {
		tmp = x - (Math.log1p((y * Math.expm1(z))) / t);
	} else if (y <= 2.2e+96) {
		tmp = x;
	} else {
		tmp = x - (Math.log1p((y * (z + (0.5 * Math.pow(z, 2.0))))) / t);
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if y <= -3.5e-24:
		tmp = x - (math.log1p((y * math.expm1(z))) / t)
	elif y <= 2.2e+96:
		tmp = x
	else:
		tmp = x - (math.log1p((y * (z + (0.5 * math.pow(z, 2.0))))) / t)
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if (y <= -3.5e-24)
		tmp = Float64(x - Float64(log1p(Float64(y * expm1(z))) / t));
	elseif (y <= 2.2e+96)
		tmp = x;
	else
		tmp = Float64(x - Float64(log1p(Float64(y * Float64(z + Float64(0.5 * (z ^ 2.0))))) / t));
	end
	return tmp
end
code[x_, y_, z_, t_] := If[LessEqual[y, -3.5e-24], N[(x - N[(N[Log[1 + N[(y * N[(Exp[z] - 1), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision], If[LessEqual[y, 2.2e+96], x, N[(x - N[(N[Log[1 + N[(y * N[(z + N[(0.5 * N[Power[z, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;y \leq -3.5 \cdot 10^{-24}:\\
\;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)}{t}\\

\mathbf{elif}\;y \leq 2.2 \cdot 10^{+96}:\\
\;\;\;\;x\\

\mathbf{else}:\\
\;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot \left(z + 0.5 \cdot {z}^{2}\right)\right)}{t}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if y < -3.4999999999999996e-24

    1. Initial program 41.2%

      \[x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t} \]
    2. Step-by-step derivation
      1. remove-double-neg41.2%

        \[\leadsto x - \color{blue}{\left(-\left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)\right)} \]
      2. neg-mul-141.2%

        \[\leadsto x - \color{blue}{-1 \cdot \left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)} \]
      3. *-commutative41.2%

        \[\leadsto x - \color{blue}{\left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right) \cdot -1} \]
      4. *-commutative41.2%

        \[\leadsto x - \color{blue}{-1 \cdot \left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)} \]
      5. neg-mul-141.2%

        \[\leadsto x - \color{blue}{\left(-\left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)\right)} \]
      6. remove-double-neg41.2%

        \[\leadsto x - \color{blue}{\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}} \]
      7. sub-neg41.2%

        \[\leadsto x - \frac{\log \left(\color{blue}{\left(1 + \left(-y\right)\right)} + y \cdot e^{z}\right)}{t} \]
      8. associate-+l+86.7%

        \[\leadsto x - \frac{\log \color{blue}{\left(1 + \left(\left(-y\right) + y \cdot e^{z}\right)\right)}}{t} \]
      9. cancel-sign-sub86.7%

        \[\leadsto x - \frac{\log \left(1 + \color{blue}{\left(\left(-y\right) - \left(-y\right) \cdot e^{z}\right)}\right)}{t} \]
      10. log1p-def88.2%

        \[\leadsto x - \frac{\color{blue}{\mathsf{log1p}\left(\left(-y\right) - \left(-y\right) \cdot e^{z}\right)}}{t} \]
      11. cancel-sign-sub88.2%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{\left(-y\right) + y \cdot e^{z}}\right)}{t} \]
      12. +-commutative88.2%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{y \cdot e^{z} + \left(-y\right)}\right)}{t} \]
      13. unsub-neg88.2%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{y \cdot e^{z} - y}\right)}{t} \]
      14. *-rgt-identity88.2%

        \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot e^{z} - \color{blue}{y \cdot 1}\right)}{t} \]
      15. distribute-lft-out--88.2%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{y \cdot \left(e^{z} - 1\right)}\right)}{t} \]
      16. expm1-def93.7%

        \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot \color{blue}{\mathsf{expm1}\left(z\right)}\right)}{t} \]
    3. Simplified93.7%

      \[\leadsto \color{blue}{x - \frac{\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)}{t}} \]
    4. Add Preprocessing

    if -3.4999999999999996e-24 < y < 2.1999999999999999e96

    1. Initial program 94.3%

      \[x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t} \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf 99.4%

      \[\leadsto \color{blue}{x} \]

    if 2.1999999999999999e96 < y

    1. Initial program 3.0%

      \[x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t} \]
    2. Step-by-step derivation
      1. remove-double-neg3.0%

        \[\leadsto x - \color{blue}{\left(-\left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)\right)} \]
      2. neg-mul-13.0%

        \[\leadsto x - \color{blue}{-1 \cdot \left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)} \]
      3. neg-mul-13.0%

        \[\leadsto x - \color{blue}{\left(-\left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)\right)} \]
      4. remove-double-neg3.0%

        \[\leadsto x - \color{blue}{\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}} \]
      5. sub-neg3.0%

        \[\leadsto x - \frac{\log \left(\color{blue}{\left(1 + \left(-y\right)\right)} + y \cdot e^{z}\right)}{t} \]
      6. associate-+l+56.0%

        \[\leadsto x - \frac{\log \color{blue}{\left(1 + \left(\left(-y\right) + y \cdot e^{z}\right)\right)}}{t} \]
      7. cancel-sign-sub56.0%

        \[\leadsto x - \frac{\log \left(1 + \color{blue}{\left(\left(-y\right) - \left(-y\right) \cdot e^{z}\right)}\right)}{t} \]
      8. log1p-def56.0%

        \[\leadsto x - \frac{\color{blue}{\mathsf{log1p}\left(\left(-y\right) - \left(-y\right) \cdot e^{z}\right)}}{t} \]
      9. cancel-sign-sub56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{\left(-y\right) + y \cdot e^{z}}\right)}{t} \]
      10. +-commutative56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{y \cdot e^{z} + \left(-y\right)}\right)}{t} \]
      11. neg-mul-156.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot e^{z} + \color{blue}{-1 \cdot y}\right)}{t} \]
      12. *-commutative56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot e^{z} + \color{blue}{y \cdot -1}\right)}{t} \]
      13. distribute-lft-out56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{y \cdot \left(e^{z} + -1\right)}\right)}{t} \]
    3. Simplified56.0%

      \[\leadsto \color{blue}{x - \frac{\mathsf{log1p}\left(y \cdot \left(e^{z} + -1\right)\right)}{t}} \]
    4. Add Preprocessing
    5. Taylor expanded in z around 0 95.8%

      \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot \color{blue}{\left(z + 0.5 \cdot {z}^{2}\right)}\right)}{t} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification97.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -3.5 \cdot 10^{-24}:\\ \;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)}{t}\\ \mathbf{elif}\;y \leq 2.2 \cdot 10^{+96}:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot \left(z + 0.5 \cdot {z}^{2}\right)\right)}{t}\\ \end{array} \]
  5. Add Preprocessing

Alternative 5: 95.2% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq -3.5 \cdot 10^{-24}:\\ \;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)}{t}\\ \mathbf{elif}\;y \leq 6.1 \cdot 10^{+88}:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot z\right)}{t}\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (<= y -3.5e-24)
   (- x (/ (log1p (* y (expm1 z))) t))
   (if (<= y 6.1e+88) x (- x (/ (log1p (* y z)) t)))))
double code(double x, double y, double z, double t) {
	double tmp;
	if (y <= -3.5e-24) {
		tmp = x - (log1p((y * expm1(z))) / t);
	} else if (y <= 6.1e+88) {
		tmp = x;
	} else {
		tmp = x - (log1p((y * z)) / t);
	}
	return tmp;
}
public static double code(double x, double y, double z, double t) {
	double tmp;
	if (y <= -3.5e-24) {
		tmp = x - (Math.log1p((y * Math.expm1(z))) / t);
	} else if (y <= 6.1e+88) {
		tmp = x;
	} else {
		tmp = x - (Math.log1p((y * z)) / t);
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if y <= -3.5e-24:
		tmp = x - (math.log1p((y * math.expm1(z))) / t)
	elif y <= 6.1e+88:
		tmp = x
	else:
		tmp = x - (math.log1p((y * z)) / t)
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if (y <= -3.5e-24)
		tmp = Float64(x - Float64(log1p(Float64(y * expm1(z))) / t));
	elseif (y <= 6.1e+88)
		tmp = x;
	else
		tmp = Float64(x - Float64(log1p(Float64(y * z)) / t));
	end
	return tmp
end
code[x_, y_, z_, t_] := If[LessEqual[y, -3.5e-24], N[(x - N[(N[Log[1 + N[(y * N[(Exp[z] - 1), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision], If[LessEqual[y, 6.1e+88], x, N[(x - N[(N[Log[1 + N[(y * z), $MachinePrecision]], $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;y \leq -3.5 \cdot 10^{-24}:\\
\;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)}{t}\\

\mathbf{elif}\;y \leq 6.1 \cdot 10^{+88}:\\
\;\;\;\;x\\

\mathbf{else}:\\
\;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot z\right)}{t}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if y < -3.4999999999999996e-24

    1. Initial program 41.2%

      \[x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t} \]
    2. Step-by-step derivation
      1. remove-double-neg41.2%

        \[\leadsto x - \color{blue}{\left(-\left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)\right)} \]
      2. neg-mul-141.2%

        \[\leadsto x - \color{blue}{-1 \cdot \left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)} \]
      3. *-commutative41.2%

        \[\leadsto x - \color{blue}{\left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right) \cdot -1} \]
      4. *-commutative41.2%

        \[\leadsto x - \color{blue}{-1 \cdot \left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)} \]
      5. neg-mul-141.2%

        \[\leadsto x - \color{blue}{\left(-\left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)\right)} \]
      6. remove-double-neg41.2%

        \[\leadsto x - \color{blue}{\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}} \]
      7. sub-neg41.2%

        \[\leadsto x - \frac{\log \left(\color{blue}{\left(1 + \left(-y\right)\right)} + y \cdot e^{z}\right)}{t} \]
      8. associate-+l+86.7%

        \[\leadsto x - \frac{\log \color{blue}{\left(1 + \left(\left(-y\right) + y \cdot e^{z}\right)\right)}}{t} \]
      9. cancel-sign-sub86.7%

        \[\leadsto x - \frac{\log \left(1 + \color{blue}{\left(\left(-y\right) - \left(-y\right) \cdot e^{z}\right)}\right)}{t} \]
      10. log1p-def88.2%

        \[\leadsto x - \frac{\color{blue}{\mathsf{log1p}\left(\left(-y\right) - \left(-y\right) \cdot e^{z}\right)}}{t} \]
      11. cancel-sign-sub88.2%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{\left(-y\right) + y \cdot e^{z}}\right)}{t} \]
      12. +-commutative88.2%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{y \cdot e^{z} + \left(-y\right)}\right)}{t} \]
      13. unsub-neg88.2%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{y \cdot e^{z} - y}\right)}{t} \]
      14. *-rgt-identity88.2%

        \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot e^{z} - \color{blue}{y \cdot 1}\right)}{t} \]
      15. distribute-lft-out--88.2%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{y \cdot \left(e^{z} - 1\right)}\right)}{t} \]
      16. expm1-def93.7%

        \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot \color{blue}{\mathsf{expm1}\left(z\right)}\right)}{t} \]
    3. Simplified93.7%

      \[\leadsto \color{blue}{x - \frac{\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)}{t}} \]
    4. Add Preprocessing

    if -3.4999999999999996e-24 < y < 6.0999999999999998e88

    1. Initial program 94.3%

      \[x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t} \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf 99.4%

      \[\leadsto \color{blue}{x} \]

    if 6.0999999999999998e88 < y

    1. Initial program 3.0%

      \[x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t} \]
    2. Step-by-step derivation
      1. remove-double-neg3.0%

        \[\leadsto x - \color{blue}{\left(-\left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)\right)} \]
      2. neg-mul-13.0%

        \[\leadsto x - \color{blue}{-1 \cdot \left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)} \]
      3. neg-mul-13.0%

        \[\leadsto x - \color{blue}{\left(-\left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)\right)} \]
      4. remove-double-neg3.0%

        \[\leadsto x - \color{blue}{\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}} \]
      5. sub-neg3.0%

        \[\leadsto x - \frac{\log \left(\color{blue}{\left(1 + \left(-y\right)\right)} + y \cdot e^{z}\right)}{t} \]
      6. associate-+l+56.0%

        \[\leadsto x - \frac{\log \color{blue}{\left(1 + \left(\left(-y\right) + y \cdot e^{z}\right)\right)}}{t} \]
      7. cancel-sign-sub56.0%

        \[\leadsto x - \frac{\log \left(1 + \color{blue}{\left(\left(-y\right) - \left(-y\right) \cdot e^{z}\right)}\right)}{t} \]
      8. log1p-def56.0%

        \[\leadsto x - \frac{\color{blue}{\mathsf{log1p}\left(\left(-y\right) - \left(-y\right) \cdot e^{z}\right)}}{t} \]
      9. cancel-sign-sub56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{\left(-y\right) + y \cdot e^{z}}\right)}{t} \]
      10. +-commutative56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{y \cdot e^{z} + \left(-y\right)}\right)}{t} \]
      11. neg-mul-156.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot e^{z} + \color{blue}{-1 \cdot y}\right)}{t} \]
      12. *-commutative56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot e^{z} + \color{blue}{y \cdot -1}\right)}{t} \]
      13. distribute-lft-out56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{y \cdot \left(e^{z} + -1\right)}\right)}{t} \]
    3. Simplified56.0%

      \[\leadsto \color{blue}{x - \frac{\mathsf{log1p}\left(y \cdot \left(e^{z} + -1\right)\right)}{t}} \]
    4. Add Preprocessing
    5. Taylor expanded in z around 0 95.8%

      \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot \color{blue}{z}\right)}{t} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification97.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq -3.5 \cdot 10^{-24}:\\ \;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot \mathsf{expm1}\left(z\right)\right)}{t}\\ \mathbf{elif}\;y \leq 6.1 \cdot 10^{+88}:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot z\right)}{t}\\ \end{array} \]
  5. Add Preprocessing

Alternative 6: 88.2% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;y \leq 8 \cdot 10^{+85}:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot z\right)}{t}\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (if (<= y 8e+85) x (- x (/ (log1p (* y z)) t))))
double code(double x, double y, double z, double t) {
	double tmp;
	if (y <= 8e+85) {
		tmp = x;
	} else {
		tmp = x - (log1p((y * z)) / t);
	}
	return tmp;
}
public static double code(double x, double y, double z, double t) {
	double tmp;
	if (y <= 8e+85) {
		tmp = x;
	} else {
		tmp = x - (Math.log1p((y * z)) / t);
	}
	return tmp;
}
def code(x, y, z, t):
	tmp = 0
	if y <= 8e+85:
		tmp = x
	else:
		tmp = x - (math.log1p((y * z)) / t)
	return tmp
function code(x, y, z, t)
	tmp = 0.0
	if (y <= 8e+85)
		tmp = x;
	else
		tmp = Float64(x - Float64(log1p(Float64(y * z)) / t));
	end
	return tmp
end
code[x_, y_, z_, t_] := If[LessEqual[y, 8e+85], x, N[(x - N[(N[Log[1 + N[(y * z), $MachinePrecision]], $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;y \leq 8 \cdot 10^{+85}:\\
\;\;\;\;x\\

\mathbf{else}:\\
\;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot z\right)}{t}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if y < 8.0000000000000001e85

    1. Initial program 79.7%

      \[x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t} \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf 92.3%

      \[\leadsto \color{blue}{x} \]

    if 8.0000000000000001e85 < y

    1. Initial program 3.0%

      \[x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t} \]
    2. Step-by-step derivation
      1. remove-double-neg3.0%

        \[\leadsto x - \color{blue}{\left(-\left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)\right)} \]
      2. neg-mul-13.0%

        \[\leadsto x - \color{blue}{-1 \cdot \left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)} \]
      3. neg-mul-13.0%

        \[\leadsto x - \color{blue}{\left(-\left(-\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}\right)\right)} \]
      4. remove-double-neg3.0%

        \[\leadsto x - \color{blue}{\frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t}} \]
      5. sub-neg3.0%

        \[\leadsto x - \frac{\log \left(\color{blue}{\left(1 + \left(-y\right)\right)} + y \cdot e^{z}\right)}{t} \]
      6. associate-+l+56.0%

        \[\leadsto x - \frac{\log \color{blue}{\left(1 + \left(\left(-y\right) + y \cdot e^{z}\right)\right)}}{t} \]
      7. cancel-sign-sub56.0%

        \[\leadsto x - \frac{\log \left(1 + \color{blue}{\left(\left(-y\right) - \left(-y\right) \cdot e^{z}\right)}\right)}{t} \]
      8. log1p-def56.0%

        \[\leadsto x - \frac{\color{blue}{\mathsf{log1p}\left(\left(-y\right) - \left(-y\right) \cdot e^{z}\right)}}{t} \]
      9. cancel-sign-sub56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{\left(-y\right) + y \cdot e^{z}}\right)}{t} \]
      10. +-commutative56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{y \cdot e^{z} + \left(-y\right)}\right)}{t} \]
      11. neg-mul-156.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot e^{z} + \color{blue}{-1 \cdot y}\right)}{t} \]
      12. *-commutative56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot e^{z} + \color{blue}{y \cdot -1}\right)}{t} \]
      13. distribute-lft-out56.0%

        \[\leadsto x - \frac{\mathsf{log1p}\left(\color{blue}{y \cdot \left(e^{z} + -1\right)}\right)}{t} \]
    3. Simplified56.0%

      \[\leadsto \color{blue}{x - \frac{\mathsf{log1p}\left(y \cdot \left(e^{z} + -1\right)\right)}{t}} \]
    4. Add Preprocessing
    5. Taylor expanded in z around 0 95.8%

      \[\leadsto x - \frac{\mathsf{log1p}\left(y \cdot \color{blue}{z}\right)}{t} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification92.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;y \leq 8 \cdot 10^{+85}:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;x - \frac{\mathsf{log1p}\left(y \cdot z\right)}{t}\\ \end{array} \]
  5. Add Preprocessing

Alternative 7: 85.5% accurate, 211.0× speedup?

\[\begin{array}{l} \\ x \end{array} \]
(FPCore (x y z t) :precision binary64 x)
double code(double x, double y, double z, double t) {
	return x;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    code = x
end function
public static double code(double x, double y, double z, double t) {
	return x;
}
def code(x, y, z, t):
	return x
function code(x, y, z, t)
	return x
end
function tmp = code(x, y, z, t)
	tmp = x;
end
code[x_, y_, z_, t_] := x
\begin{array}{l}

\\
x
\end{array}
Derivation
  1. Initial program 73.7%

    \[x - \frac{\log \left(\left(1 - y\right) + y \cdot e^{z}\right)}{t} \]
  2. Add Preprocessing
  3. Taylor expanded in x around inf 89.4%

    \[\leadsto \color{blue}{x} \]
  4. Final simplification89.4%

    \[\leadsto x \]
  5. Add Preprocessing

Developer target: 86.5% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := \frac{-0.5}{y \cdot t}\\ \mathbf{if}\;z < -2.8874623088207947 \cdot 10^{+119}:\\ \;\;\;\;\left(x - \frac{t\_1}{z \cdot z}\right) - t\_1 \cdot \frac{\frac{2}{z}}{z \cdot z}\\ \mathbf{else}:\\ \;\;\;\;x - \frac{\log \left(1 + z \cdot y\right)}{t}\\ \end{array} \end{array} \]
(FPCore (x y z t)
 :precision binary64
 (let* ((t_1 (/ (- 0.5) (* y t))))
   (if (< z -2.8874623088207947e+119)
     (- (- x (/ t_1 (* z z))) (* t_1 (/ (/ 2.0 z) (* z z))))
     (- x (/ (log (+ 1.0 (* z y))) t)))))
double code(double x, double y, double z, double t) {
	double t_1 = -0.5 / (y * t);
	double tmp;
	if (z < -2.8874623088207947e+119) {
		tmp = (x - (t_1 / (z * z))) - (t_1 * ((2.0 / z) / (z * z)));
	} else {
		tmp = x - (log((1.0 + (z * y))) / t);
	}
	return tmp;
}
real(8) function code(x, y, z, t)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8), intent (in) :: z
    real(8), intent (in) :: t
    real(8) :: t_1
    real(8) :: tmp
    t_1 = -0.5d0 / (y * t)
    if (z < (-2.8874623088207947d+119)) then
        tmp = (x - (t_1 / (z * z))) - (t_1 * ((2.0d0 / z) / (z * z)))
    else
        tmp = x - (log((1.0d0 + (z * y))) / t)
    end if
    code = tmp
end function
public static double code(double x, double y, double z, double t) {
	double t_1 = -0.5 / (y * t);
	double tmp;
	if (z < -2.8874623088207947e+119) {
		tmp = (x - (t_1 / (z * z))) - (t_1 * ((2.0 / z) / (z * z)));
	} else {
		tmp = x - (Math.log((1.0 + (z * y))) / t);
	}
	return tmp;
}
def code(x, y, z, t):
	t_1 = -0.5 / (y * t)
	tmp = 0
	if z < -2.8874623088207947e+119:
		tmp = (x - (t_1 / (z * z))) - (t_1 * ((2.0 / z) / (z * z)))
	else:
		tmp = x - (math.log((1.0 + (z * y))) / t)
	return tmp
function code(x, y, z, t)
	t_1 = Float64(Float64(-0.5) / Float64(y * t))
	tmp = 0.0
	if (z < -2.8874623088207947e+119)
		tmp = Float64(Float64(x - Float64(t_1 / Float64(z * z))) - Float64(t_1 * Float64(Float64(2.0 / z) / Float64(z * z))));
	else
		tmp = Float64(x - Float64(log(Float64(1.0 + Float64(z * y))) / t));
	end
	return tmp
end
function tmp_2 = code(x, y, z, t)
	t_1 = -0.5 / (y * t);
	tmp = 0.0;
	if (z < -2.8874623088207947e+119)
		tmp = (x - (t_1 / (z * z))) - (t_1 * ((2.0 / z) / (z * z)));
	else
		tmp = x - (log((1.0 + (z * y))) / t);
	end
	tmp_2 = tmp;
end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[((-0.5) / N[(y * t), $MachinePrecision]), $MachinePrecision]}, If[Less[z, -2.8874623088207947e+119], N[(N[(x - N[(t$95$1 / N[(z * z), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(t$95$1 * N[(N[(2.0 / z), $MachinePrecision] / N[(z * z), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x - N[(N[Log[N[(1.0 + N[(z * y), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / t), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := \frac{-0.5}{y \cdot t}\\
\mathbf{if}\;z < -2.8874623088207947 \cdot 10^{+119}:\\
\;\;\;\;\left(x - \frac{t\_1}{z \cdot z}\right) - t\_1 \cdot \frac{\frac{2}{z}}{z \cdot z}\\

\mathbf{else}:\\
\;\;\;\;x - \frac{\log \left(1 + z \cdot y\right)}{t}\\


\end{array}
\end{array}

Reproduce

?
herbie shell --seed 2024031 
(FPCore (x y z t)
  :name "System.Random.MWC.Distributions:truncatedExp from mwc-random-0.13.3.2"
  :precision binary64

  :herbie-target
  (if (< z -2.8874623088207947e+119) (- (- x (/ (/ (- 0.5) (* y t)) (* z z))) (* (/ (- 0.5) (* y t)) (/ (/ 2.0 z) (* z z)))) (- x (/ (log (+ 1.0 (* z y))) t)))

  (- x (/ (log (+ (- 1.0 y) (* y (exp z)))) t)))