Harley's example

Percentage Accurate: 90.7% → 98.5%
Time: 59.5s
Alternatives: 2
Speedup: 896.0×

Specification

?
\[0 < c\_p \land 0 < c\_n\]
\[\begin{array}{l} \\ \begin{array}{l} t_1 := \frac{1}{1 + e^{-t}}\\ t_2 := \frac{1}{1 + e^{-s}}\\ \frac{{t\_2}^{c\_p} \cdot {\left(1 - t\_2\right)}^{c\_n}}{{t\_1}^{c\_p} \cdot {\left(1 - t\_1\right)}^{c\_n}} \end{array} \end{array} \]
(FPCore (c_p c_n t s)
 :precision binary64
 (let* ((t_1 (/ 1.0 (+ 1.0 (exp (- t))))) (t_2 (/ 1.0 (+ 1.0 (exp (- s))))))
   (/
    (* (pow t_2 c_p) (pow (- 1.0 t_2) c_n))
    (* (pow t_1 c_p) (pow (- 1.0 t_1) c_n)))))
double code(double c_p, double c_n, double t, double s) {
	double t_1 = 1.0 / (1.0 + exp(-t));
	double t_2 = 1.0 / (1.0 + exp(-s));
	return (pow(t_2, c_p) * pow((1.0 - t_2), c_n)) / (pow(t_1, c_p) * pow((1.0 - t_1), c_n));
}
real(8) function code(c_p, c_n, t, s)
    real(8), intent (in) :: c_p
    real(8), intent (in) :: c_n
    real(8), intent (in) :: t
    real(8), intent (in) :: s
    real(8) :: t_1
    real(8) :: t_2
    t_1 = 1.0d0 / (1.0d0 + exp(-t))
    t_2 = 1.0d0 / (1.0d0 + exp(-s))
    code = ((t_2 ** c_p) * ((1.0d0 - t_2) ** c_n)) / ((t_1 ** c_p) * ((1.0d0 - t_1) ** c_n))
end function
public static double code(double c_p, double c_n, double t, double s) {
	double t_1 = 1.0 / (1.0 + Math.exp(-t));
	double t_2 = 1.0 / (1.0 + Math.exp(-s));
	return (Math.pow(t_2, c_p) * Math.pow((1.0 - t_2), c_n)) / (Math.pow(t_1, c_p) * Math.pow((1.0 - t_1), c_n));
}
def code(c_p, c_n, t, s):
	t_1 = 1.0 / (1.0 + math.exp(-t))
	t_2 = 1.0 / (1.0 + math.exp(-s))
	return (math.pow(t_2, c_p) * math.pow((1.0 - t_2), c_n)) / (math.pow(t_1, c_p) * math.pow((1.0 - t_1), c_n))
function code(c_p, c_n, t, s)
	t_1 = Float64(1.0 / Float64(1.0 + exp(Float64(-t))))
	t_2 = Float64(1.0 / Float64(1.0 + exp(Float64(-s))))
	return Float64(Float64((t_2 ^ c_p) * (Float64(1.0 - t_2) ^ c_n)) / Float64((t_1 ^ c_p) * (Float64(1.0 - t_1) ^ c_n)))
end
function tmp = code(c_p, c_n, t, s)
	t_1 = 1.0 / (1.0 + exp(-t));
	t_2 = 1.0 / (1.0 + exp(-s));
	tmp = ((t_2 ^ c_p) * ((1.0 - t_2) ^ c_n)) / ((t_1 ^ c_p) * ((1.0 - t_1) ^ c_n));
end
code[c$95$p_, c$95$n_, t_, s_] := Block[{t$95$1 = N[(1.0 / N[(1.0 + N[Exp[(-t)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(1.0 / N[(1.0 + N[Exp[(-s)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[Power[t$95$2, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$2), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision] / N[(N[Power[t$95$1, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$1), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := \frac{1}{1 + e^{-t}}\\
t_2 := \frac{1}{1 + e^{-s}}\\
\frac{{t\_2}^{c\_p} \cdot {\left(1 - t\_2\right)}^{c\_n}}{{t\_1}^{c\_p} \cdot {\left(1 - t\_1\right)}^{c\_n}}
\end{array}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 2 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 90.7% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := \frac{1}{1 + e^{-t}}\\ t_2 := \frac{1}{1 + e^{-s}}\\ \frac{{t\_2}^{c\_p} \cdot {\left(1 - t\_2\right)}^{c\_n}}{{t\_1}^{c\_p} \cdot {\left(1 - t\_1\right)}^{c\_n}} \end{array} \end{array} \]
(FPCore (c_p c_n t s)
 :precision binary64
 (let* ((t_1 (/ 1.0 (+ 1.0 (exp (- t))))) (t_2 (/ 1.0 (+ 1.0 (exp (- s))))))
   (/
    (* (pow t_2 c_p) (pow (- 1.0 t_2) c_n))
    (* (pow t_1 c_p) (pow (- 1.0 t_1) c_n)))))
double code(double c_p, double c_n, double t, double s) {
	double t_1 = 1.0 / (1.0 + exp(-t));
	double t_2 = 1.0 / (1.0 + exp(-s));
	return (pow(t_2, c_p) * pow((1.0 - t_2), c_n)) / (pow(t_1, c_p) * pow((1.0 - t_1), c_n));
}
real(8) function code(c_p, c_n, t, s)
    real(8), intent (in) :: c_p
    real(8), intent (in) :: c_n
    real(8), intent (in) :: t
    real(8), intent (in) :: s
    real(8) :: t_1
    real(8) :: t_2
    t_1 = 1.0d0 / (1.0d0 + exp(-t))
    t_2 = 1.0d0 / (1.0d0 + exp(-s))
    code = ((t_2 ** c_p) * ((1.0d0 - t_2) ** c_n)) / ((t_1 ** c_p) * ((1.0d0 - t_1) ** c_n))
end function
public static double code(double c_p, double c_n, double t, double s) {
	double t_1 = 1.0 / (1.0 + Math.exp(-t));
	double t_2 = 1.0 / (1.0 + Math.exp(-s));
	return (Math.pow(t_2, c_p) * Math.pow((1.0 - t_2), c_n)) / (Math.pow(t_1, c_p) * Math.pow((1.0 - t_1), c_n));
}
def code(c_p, c_n, t, s):
	t_1 = 1.0 / (1.0 + math.exp(-t))
	t_2 = 1.0 / (1.0 + math.exp(-s))
	return (math.pow(t_2, c_p) * math.pow((1.0 - t_2), c_n)) / (math.pow(t_1, c_p) * math.pow((1.0 - t_1), c_n))
function code(c_p, c_n, t, s)
	t_1 = Float64(1.0 / Float64(1.0 + exp(Float64(-t))))
	t_2 = Float64(1.0 / Float64(1.0 + exp(Float64(-s))))
	return Float64(Float64((t_2 ^ c_p) * (Float64(1.0 - t_2) ^ c_n)) / Float64((t_1 ^ c_p) * (Float64(1.0 - t_1) ^ c_n)))
end
function tmp = code(c_p, c_n, t, s)
	t_1 = 1.0 / (1.0 + exp(-t));
	t_2 = 1.0 / (1.0 + exp(-s));
	tmp = ((t_2 ^ c_p) * ((1.0 - t_2) ^ c_n)) / ((t_1 ^ c_p) * ((1.0 - t_1) ^ c_n));
end
code[c$95$p_, c$95$n_, t_, s_] := Block[{t$95$1 = N[(1.0 / N[(1.0 + N[Exp[(-t)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(1.0 / N[(1.0 + N[Exp[(-s)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[Power[t$95$2, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$2), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision] / N[(N[Power[t$95$1, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$1), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := \frac{1}{1 + e^{-t}}\\
t_2 := \frac{1}{1 + e^{-s}}\\
\frac{{t\_2}^{c\_p} \cdot {\left(1 - t\_2\right)}^{c\_n}}{{t\_1}^{c\_p} \cdot {\left(1 - t\_1\right)}^{c\_n}}
\end{array}
\end{array}

Alternative 1: 98.5% accurate, 7.5× speedup?

\[\begin{array}{l} \\ e^{-0.125 \cdot \left(s \cdot \left(s \cdot \left(c\_n + c\_p\right)\right)\right)} \end{array} \]
(FPCore (c_p c_n t s)
 :precision binary64
 (exp (* -0.125 (* s (* s (+ c_n c_p))))))
double code(double c_p, double c_n, double t, double s) {
	return exp((-0.125 * (s * (s * (c_n + c_p)))));
}
real(8) function code(c_p, c_n, t, s)
    real(8), intent (in) :: c_p
    real(8), intent (in) :: c_n
    real(8), intent (in) :: t
    real(8), intent (in) :: s
    code = exp(((-0.125d0) * (s * (s * (c_n + c_p)))))
end function
public static double code(double c_p, double c_n, double t, double s) {
	return Math.exp((-0.125 * (s * (s * (c_n + c_p)))));
}
def code(c_p, c_n, t, s):
	return math.exp((-0.125 * (s * (s * (c_n + c_p)))))
function code(c_p, c_n, t, s)
	return exp(Float64(-0.125 * Float64(s * Float64(s * Float64(c_n + c_p)))))
end
function tmp = code(c_p, c_n, t, s)
	tmp = exp((-0.125 * (s * (s * (c_n + c_p)))));
end
code[c$95$p_, c$95$n_, t_, s_] := N[Exp[N[(-0.125 * N[(s * N[(s * N[(c$95$n + c$95$p), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}

\\
e^{-0.125 \cdot \left(s \cdot \left(s \cdot \left(c\_n + c\_p\right)\right)\right)}
\end{array}
Derivation
  1. Initial program 94.2%

    \[\frac{{\left(\frac{1}{1 + e^{-s}}\right)}^{c\_p} \cdot {\left(1 - \frac{1}{1 + e^{-s}}\right)}^{c\_n}}{{\left(\frac{1}{1 + e^{-t}}\right)}^{c\_p} \cdot {\left(1 - \frac{1}{1 + e^{-t}}\right)}^{c\_n}} \]
  2. Add Preprocessing
  3. Applied egg-rr97.6%

    \[\leadsto \color{blue}{e^{\mathsf{fma}\left(c\_p, -\mathsf{log1p}\left(e^{-s}\right), c\_n \cdot \mathsf{log1p}\left(\frac{1}{-1 - e^{-s}}\right)\right) - \mathsf{fma}\left(c\_p, -\mathsf{log1p}\left(e^{-t}\right), c\_n \cdot \mathsf{log1p}\left(\frac{-1}{1 + e^{-t}}\right)\right)}} \]
  4. Taylor expanded in s around 0

    \[\leadsto e^{\color{blue}{\left(-1 \cdot \left(c\_p \cdot \log 2\right) + \left(c\_n \cdot \log \frac{1}{2} + s \cdot \left(\frac{-1}{2} \cdot c\_n + \left(\frac{1}{2} \cdot c\_p + s \cdot \left(\frac{-1}{8} \cdot c\_n + \left(\frac{-1}{8} \cdot c\_p + {s}^{2} \cdot \left(\frac{1}{192} \cdot c\_n + \frac{1}{192} \cdot c\_p\right)\right)\right)\right)\right)\right)\right) - \left(-1 \cdot \left(c\_p \cdot \log \left(1 + e^{\mathsf{neg}\left(t\right)}\right)\right) + c\_n \cdot \log \left(1 - \frac{1}{1 + e^{\mathsf{neg}\left(t\right)}}\right)\right)}} \]
  5. Simplified94.7%

    \[\leadsto e^{\color{blue}{\mathsf{fma}\left(c\_p, -\log 2, \mathsf{fma}\left(s, \mathsf{fma}\left(c\_n, -0.5, \mathsf{fma}\left(s, \mathsf{fma}\left(-0.125, c\_n + c\_p, \left(s \cdot s\right) \cdot \left(0.005208333333333333 \cdot \left(c\_n + c\_p\right)\right)\right), c\_p \cdot 0.5\right)\right), \mathsf{fma}\left(c\_n, \log 0.5, \mathsf{fma}\left(\mathsf{log1p}\left(\frac{-1}{1 + e^{-t}}\right), -c\_n, c\_p \cdot \mathsf{log1p}\left(e^{-t}\right)\right)\right)\right)\right)}} \]
  6. Taylor expanded in s around inf

    \[\leadsto e^{\color{blue}{{s}^{4} \cdot \left(\frac{-1}{8} \cdot \frac{c\_n + c\_p}{{s}^{2}} + \frac{1}{192} \cdot \left(c\_n + c\_p\right)\right)}} \]
  7. Step-by-step derivation
    1. *-commutativeN/A

      \[\leadsto e^{\color{blue}{\left(\frac{-1}{8} \cdot \frac{c\_n + c\_p}{{s}^{2}} + \frac{1}{192} \cdot \left(c\_n + c\_p\right)\right) \cdot {s}^{4}}} \]
    2. lower-*.f64N/A

      \[\leadsto e^{\color{blue}{\left(\frac{-1}{8} \cdot \frac{c\_n + c\_p}{{s}^{2}} + \frac{1}{192} \cdot \left(c\_n + c\_p\right)\right) \cdot {s}^{4}}} \]
    3. +-commutativeN/A

      \[\leadsto e^{\color{blue}{\left(\frac{1}{192} \cdot \left(c\_n + c\_p\right) + \frac{-1}{8} \cdot \frac{c\_n + c\_p}{{s}^{2}}\right)} \cdot {s}^{4}} \]
    4. lower-fma.f64N/A

      \[\leadsto e^{\color{blue}{\mathsf{fma}\left(\frac{1}{192}, c\_n + c\_p, \frac{-1}{8} \cdot \frac{c\_n + c\_p}{{s}^{2}}\right)} \cdot {s}^{4}} \]
    5. lower-+.f64N/A

      \[\leadsto e^{\mathsf{fma}\left(\frac{1}{192}, \color{blue}{c\_n + c\_p}, \frac{-1}{8} \cdot \frac{c\_n + c\_p}{{s}^{2}}\right) \cdot {s}^{4}} \]
    6. associate-*r/N/A

      \[\leadsto e^{\mathsf{fma}\left(\frac{1}{192}, c\_n + c\_p, \color{blue}{\frac{\frac{-1}{8} \cdot \left(c\_n + c\_p\right)}{{s}^{2}}}\right) \cdot {s}^{4}} \]
    7. distribute-lft-inN/A

      \[\leadsto e^{\mathsf{fma}\left(\frac{1}{192}, c\_n + c\_p, \frac{\color{blue}{\frac{-1}{8} \cdot c\_n + \frac{-1}{8} \cdot c\_p}}{{s}^{2}}\right) \cdot {s}^{4}} \]
    8. lower-/.f64N/A

      \[\leadsto e^{\mathsf{fma}\left(\frac{1}{192}, c\_n + c\_p, \color{blue}{\frac{\frac{-1}{8} \cdot c\_n + \frac{-1}{8} \cdot c\_p}{{s}^{2}}}\right) \cdot {s}^{4}} \]
    9. distribute-lft-inN/A

      \[\leadsto e^{\mathsf{fma}\left(\frac{1}{192}, c\_n + c\_p, \frac{\color{blue}{\frac{-1}{8} \cdot \left(c\_n + c\_p\right)}}{{s}^{2}}\right) \cdot {s}^{4}} \]
    10. lower-*.f64N/A

      \[\leadsto e^{\mathsf{fma}\left(\frac{1}{192}, c\_n + c\_p, \frac{\color{blue}{\frac{-1}{8} \cdot \left(c\_n + c\_p\right)}}{{s}^{2}}\right) \cdot {s}^{4}} \]
    11. lower-+.f64N/A

      \[\leadsto e^{\mathsf{fma}\left(\frac{1}{192}, c\_n + c\_p, \frac{\frac{-1}{8} \cdot \color{blue}{\left(c\_n + c\_p\right)}}{{s}^{2}}\right) \cdot {s}^{4}} \]
    12. unpow2N/A

      \[\leadsto e^{\mathsf{fma}\left(\frac{1}{192}, c\_n + c\_p, \frac{\frac{-1}{8} \cdot \left(c\_n + c\_p\right)}{\color{blue}{s \cdot s}}\right) \cdot {s}^{4}} \]
    13. lower-*.f64N/A

      \[\leadsto e^{\mathsf{fma}\left(\frac{1}{192}, c\_n + c\_p, \frac{\frac{-1}{8} \cdot \left(c\_n + c\_p\right)}{\color{blue}{s \cdot s}}\right) \cdot {s}^{4}} \]
    14. lower-pow.f6450.2

      \[\leadsto e^{\mathsf{fma}\left(0.005208333333333333, c\_n + c\_p, \frac{-0.125 \cdot \left(c\_n + c\_p\right)}{s \cdot s}\right) \cdot \color{blue}{{s}^{4}}} \]
  8. Simplified50.2%

    \[\leadsto e^{\color{blue}{\mathsf{fma}\left(0.005208333333333333, c\_n + c\_p, \frac{-0.125 \cdot \left(c\_n + c\_p\right)}{s \cdot s}\right) \cdot {s}^{4}}} \]
  9. Taylor expanded in s around 0

    \[\leadsto e^{\color{blue}{\frac{-1}{8} \cdot \left({s}^{2} \cdot \left(c\_n + c\_p\right)\right)}} \]
  10. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto e^{\color{blue}{\frac{-1}{8} \cdot \left({s}^{2} \cdot \left(c\_n + c\_p\right)\right)}} \]
    2. unpow2N/A

      \[\leadsto e^{\frac{-1}{8} \cdot \left(\color{blue}{\left(s \cdot s\right)} \cdot \left(c\_n + c\_p\right)\right)} \]
    3. associate-*l*N/A

      \[\leadsto e^{\frac{-1}{8} \cdot \color{blue}{\left(s \cdot \left(s \cdot \left(c\_n + c\_p\right)\right)\right)}} \]
    4. lower-*.f64N/A

      \[\leadsto e^{\frac{-1}{8} \cdot \color{blue}{\left(s \cdot \left(s \cdot \left(c\_n + c\_p\right)\right)\right)}} \]
    5. lower-*.f64N/A

      \[\leadsto e^{\frac{-1}{8} \cdot \left(s \cdot \color{blue}{\left(s \cdot \left(c\_n + c\_p\right)\right)}\right)} \]
    6. lower-+.f6499.3

      \[\leadsto e^{-0.125 \cdot \left(s \cdot \left(s \cdot \color{blue}{\left(c\_n + c\_p\right)}\right)\right)} \]
  11. Simplified99.3%

    \[\leadsto e^{\color{blue}{-0.125 \cdot \left(s \cdot \left(s \cdot \left(c\_n + c\_p\right)\right)\right)}} \]
  12. Add Preprocessing

Alternative 2: 94.1% accurate, 896.0× speedup?

\[\begin{array}{l} \\ 1 \end{array} \]
(FPCore (c_p c_n t s) :precision binary64 1.0)
double code(double c_p, double c_n, double t, double s) {
	return 1.0;
}
real(8) function code(c_p, c_n, t, s)
    real(8), intent (in) :: c_p
    real(8), intent (in) :: c_n
    real(8), intent (in) :: t
    real(8), intent (in) :: s
    code = 1.0d0
end function
public static double code(double c_p, double c_n, double t, double s) {
	return 1.0;
}
def code(c_p, c_n, t, s):
	return 1.0
function code(c_p, c_n, t, s)
	return 1.0
end
function tmp = code(c_p, c_n, t, s)
	tmp = 1.0;
end
code[c$95$p_, c$95$n_, t_, s_] := 1.0
\begin{array}{l}

\\
1
\end{array}
Derivation
  1. Initial program 94.2%

    \[\frac{{\left(\frac{1}{1 + e^{-s}}\right)}^{c\_p} \cdot {\left(1 - \frac{1}{1 + e^{-s}}\right)}^{c\_n}}{{\left(\frac{1}{1 + e^{-t}}\right)}^{c\_p} \cdot {\left(1 - \frac{1}{1 + e^{-t}}\right)}^{c\_n}} \]
  2. Add Preprocessing
  3. Taylor expanded in c_n around 0

    \[\leadsto \color{blue}{\frac{{\left(\frac{1}{1 + e^{\mathsf{neg}\left(s\right)}}\right)}^{c\_p}}{{\left(\frac{1}{1 + e^{\mathsf{neg}\left(t\right)}}\right)}^{c\_p}}} \]
  4. Step-by-step derivation
    1. lower-/.f64N/A

      \[\leadsto \color{blue}{\frac{{\left(\frac{1}{1 + e^{\mathsf{neg}\left(s\right)}}\right)}^{c\_p}}{{\left(\frac{1}{1 + e^{\mathsf{neg}\left(t\right)}}\right)}^{c\_p}}} \]
    2. lower-pow.f64N/A

      \[\leadsto \frac{\color{blue}{{\left(\frac{1}{1 + e^{\mathsf{neg}\left(s\right)}}\right)}^{c\_p}}}{{\left(\frac{1}{1 + e^{\mathsf{neg}\left(t\right)}}\right)}^{c\_p}} \]
    3. lower-/.f64N/A

      \[\leadsto \frac{{\color{blue}{\left(\frac{1}{1 + e^{\mathsf{neg}\left(s\right)}}\right)}}^{c\_p}}{{\left(\frac{1}{1 + e^{\mathsf{neg}\left(t\right)}}\right)}^{c\_p}} \]
    4. lower-+.f64N/A

      \[\leadsto \frac{{\left(\frac{1}{\color{blue}{1 + e^{\mathsf{neg}\left(s\right)}}}\right)}^{c\_p}}{{\left(\frac{1}{1 + e^{\mathsf{neg}\left(t\right)}}\right)}^{c\_p}} \]
    5. lower-exp.f64N/A

      \[\leadsto \frac{{\left(\frac{1}{1 + \color{blue}{e^{\mathsf{neg}\left(s\right)}}}\right)}^{c\_p}}{{\left(\frac{1}{1 + e^{\mathsf{neg}\left(t\right)}}\right)}^{c\_p}} \]
    6. lower-neg.f64N/A

      \[\leadsto \frac{{\left(\frac{1}{1 + e^{\color{blue}{\mathsf{neg}\left(s\right)}}}\right)}^{c\_p}}{{\left(\frac{1}{1 + e^{\mathsf{neg}\left(t\right)}}\right)}^{c\_p}} \]
    7. lower-pow.f64N/A

      \[\leadsto \frac{{\left(\frac{1}{1 + e^{\mathsf{neg}\left(s\right)}}\right)}^{c\_p}}{\color{blue}{{\left(\frac{1}{1 + e^{\mathsf{neg}\left(t\right)}}\right)}^{c\_p}}} \]
    8. lower-/.f64N/A

      \[\leadsto \frac{{\left(\frac{1}{1 + e^{\mathsf{neg}\left(s\right)}}\right)}^{c\_p}}{{\color{blue}{\left(\frac{1}{1 + e^{\mathsf{neg}\left(t\right)}}\right)}}^{c\_p}} \]
    9. lower-+.f64N/A

      \[\leadsto \frac{{\left(\frac{1}{1 + e^{\mathsf{neg}\left(s\right)}}\right)}^{c\_p}}{{\left(\frac{1}{\color{blue}{1 + e^{\mathsf{neg}\left(t\right)}}}\right)}^{c\_p}} \]
    10. lower-exp.f64N/A

      \[\leadsto \frac{{\left(\frac{1}{1 + e^{\mathsf{neg}\left(s\right)}}\right)}^{c\_p}}{{\left(\frac{1}{1 + \color{blue}{e^{\mathsf{neg}\left(t\right)}}}\right)}^{c\_p}} \]
    11. lower-neg.f6495.1

      \[\leadsto \frac{{\left(\frac{1}{1 + e^{-s}}\right)}^{c\_p}}{{\left(\frac{1}{1 + e^{\color{blue}{-t}}}\right)}^{c\_p}} \]
  5. Simplified95.1%

    \[\leadsto \color{blue}{\frac{{\left(\frac{1}{1 + e^{-s}}\right)}^{c\_p}}{{\left(\frac{1}{1 + e^{-t}}\right)}^{c\_p}}} \]
  6. Taylor expanded in c_p around 0

    \[\leadsto \color{blue}{1} \]
  7. Step-by-step derivation
    1. Simplified96.3%

      \[\leadsto \color{blue}{1} \]
    2. Add Preprocessing

    Developer Target 1: 96.3% accurate, 1.4× speedup?

    \[\begin{array}{l} \\ {\left(\frac{1 + e^{-t}}{1 + e^{-s}}\right)}^{c\_p} \cdot {\left(\frac{1 + e^{t}}{1 + e^{s}}\right)}^{c\_n} \end{array} \]
    (FPCore (c_p c_n t s)
     :precision binary64
     (*
      (pow (/ (+ 1.0 (exp (- t))) (+ 1.0 (exp (- s)))) c_p)
      (pow (/ (+ 1.0 (exp t)) (+ 1.0 (exp s))) c_n)))
    double code(double c_p, double c_n, double t, double s) {
    	return pow(((1.0 + exp(-t)) / (1.0 + exp(-s))), c_p) * pow(((1.0 + exp(t)) / (1.0 + exp(s))), c_n);
    }
    
    real(8) function code(c_p, c_n, t, s)
        real(8), intent (in) :: c_p
        real(8), intent (in) :: c_n
        real(8), intent (in) :: t
        real(8), intent (in) :: s
        code = (((1.0d0 + exp(-t)) / (1.0d0 + exp(-s))) ** c_p) * (((1.0d0 + exp(t)) / (1.0d0 + exp(s))) ** c_n)
    end function
    
    public static double code(double c_p, double c_n, double t, double s) {
    	return Math.pow(((1.0 + Math.exp(-t)) / (1.0 + Math.exp(-s))), c_p) * Math.pow(((1.0 + Math.exp(t)) / (1.0 + Math.exp(s))), c_n);
    }
    
    def code(c_p, c_n, t, s):
    	return math.pow(((1.0 + math.exp(-t)) / (1.0 + math.exp(-s))), c_p) * math.pow(((1.0 + math.exp(t)) / (1.0 + math.exp(s))), c_n)
    
    function code(c_p, c_n, t, s)
    	return Float64((Float64(Float64(1.0 + exp(Float64(-t))) / Float64(1.0 + exp(Float64(-s)))) ^ c_p) * (Float64(Float64(1.0 + exp(t)) / Float64(1.0 + exp(s))) ^ c_n))
    end
    
    function tmp = code(c_p, c_n, t, s)
    	tmp = (((1.0 + exp(-t)) / (1.0 + exp(-s))) ^ c_p) * (((1.0 + exp(t)) / (1.0 + exp(s))) ^ c_n);
    end
    
    code[c$95$p_, c$95$n_, t_, s_] := N[(N[Power[N[(N[(1.0 + N[Exp[(-t)], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[Exp[(-s)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], c$95$p], $MachinePrecision] * N[Power[N[(N[(1.0 + N[Exp[t], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[Exp[s], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    {\left(\frac{1 + e^{-t}}{1 + e^{-s}}\right)}^{c\_p} \cdot {\left(\frac{1 + e^{t}}{1 + e^{s}}\right)}^{c\_n}
    \end{array}
    

    Reproduce

    ?
    herbie shell --seed 2024211 
    (FPCore (c_p c_n t s)
      :name "Harley's example"
      :precision binary64
      :pre (and (< 0.0 c_p) (< 0.0 c_n))
    
      :alt
      (! :herbie-platform default (* (pow (/ (+ 1 (exp (- t))) (+ 1 (exp (- s)))) c_p) (pow (/ (+ 1 (exp t)) (+ 1 (exp s))) c_n)))
    
      (/ (* (pow (/ 1.0 (+ 1.0 (exp (- s)))) c_p) (pow (- 1.0 (/ 1.0 (+ 1.0 (exp (- s))))) c_n)) (* (pow (/ 1.0 (+ 1.0 (exp (- t)))) c_p) (pow (- 1.0 (/ 1.0 (+ 1.0 (exp (- t))))) c_n))))