
(FPCore (c_p c_n t s)
:precision binary64
(let* ((t_1 (/ 1.0 (+ 1.0 (exp (- t))))) (t_2 (/ 1.0 (+ 1.0 (exp (- s))))))
(/
(* (pow t_2 c_p) (pow (- 1.0 t_2) c_n))
(* (pow t_1 c_p) (pow (- 1.0 t_1) c_n)))))
double code(double c_p, double c_n, double t, double s) {
double t_1 = 1.0 / (1.0 + exp(-t));
double t_2 = 1.0 / (1.0 + exp(-s));
return (pow(t_2, c_p) * pow((1.0 - t_2), c_n)) / (pow(t_1, c_p) * pow((1.0 - t_1), c_n));
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
real(8) :: t_1
real(8) :: t_2
t_1 = 1.0d0 / (1.0d0 + exp(-t))
t_2 = 1.0d0 / (1.0d0 + exp(-s))
code = ((t_2 ** c_p) * ((1.0d0 - t_2) ** c_n)) / ((t_1 ** c_p) * ((1.0d0 - t_1) ** c_n))
end function
public static double code(double c_p, double c_n, double t, double s) {
double t_1 = 1.0 / (1.0 + Math.exp(-t));
double t_2 = 1.0 / (1.0 + Math.exp(-s));
return (Math.pow(t_2, c_p) * Math.pow((1.0 - t_2), c_n)) / (Math.pow(t_1, c_p) * Math.pow((1.0 - t_1), c_n));
}
def code(c_p, c_n, t, s): t_1 = 1.0 / (1.0 + math.exp(-t)) t_2 = 1.0 / (1.0 + math.exp(-s)) return (math.pow(t_2, c_p) * math.pow((1.0 - t_2), c_n)) / (math.pow(t_1, c_p) * math.pow((1.0 - t_1), c_n))
function code(c_p, c_n, t, s) t_1 = Float64(1.0 / Float64(1.0 + exp(Float64(-t)))) t_2 = Float64(1.0 / Float64(1.0 + exp(Float64(-s)))) return Float64(Float64((t_2 ^ c_p) * (Float64(1.0 - t_2) ^ c_n)) / Float64((t_1 ^ c_p) * (Float64(1.0 - t_1) ^ c_n))) end
function tmp = code(c_p, c_n, t, s) t_1 = 1.0 / (1.0 + exp(-t)); t_2 = 1.0 / (1.0 + exp(-s)); tmp = ((t_2 ^ c_p) * ((1.0 - t_2) ^ c_n)) / ((t_1 ^ c_p) * ((1.0 - t_1) ^ c_n)); end
code[c$95$p_, c$95$n_, t_, s_] := Block[{t$95$1 = N[(1.0 / N[(1.0 + N[Exp[(-t)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(1.0 / N[(1.0 + N[Exp[(-s)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[Power[t$95$2, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$2), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision] / N[(N[Power[t$95$1, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$1), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \frac{1}{1 + e^{-t}}\\
t_2 := \frac{1}{1 + e^{-s}}\\
\frac{{t\_2}^{c\_p} \cdot {\left(1 - t\_2\right)}^{c\_n}}{{t\_1}^{c\_p} \cdot {\left(1 - t\_1\right)}^{c\_n}}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (c_p c_n t s)
:precision binary64
(let* ((t_1 (/ 1.0 (+ 1.0 (exp (- t))))) (t_2 (/ 1.0 (+ 1.0 (exp (- s))))))
(/
(* (pow t_2 c_p) (pow (- 1.0 t_2) c_n))
(* (pow t_1 c_p) (pow (- 1.0 t_1) c_n)))))
double code(double c_p, double c_n, double t, double s) {
double t_1 = 1.0 / (1.0 + exp(-t));
double t_2 = 1.0 / (1.0 + exp(-s));
return (pow(t_2, c_p) * pow((1.0 - t_2), c_n)) / (pow(t_1, c_p) * pow((1.0 - t_1), c_n));
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
real(8) :: t_1
real(8) :: t_2
t_1 = 1.0d0 / (1.0d0 + exp(-t))
t_2 = 1.0d0 / (1.0d0 + exp(-s))
code = ((t_2 ** c_p) * ((1.0d0 - t_2) ** c_n)) / ((t_1 ** c_p) * ((1.0d0 - t_1) ** c_n))
end function
public static double code(double c_p, double c_n, double t, double s) {
double t_1 = 1.0 / (1.0 + Math.exp(-t));
double t_2 = 1.0 / (1.0 + Math.exp(-s));
return (Math.pow(t_2, c_p) * Math.pow((1.0 - t_2), c_n)) / (Math.pow(t_1, c_p) * Math.pow((1.0 - t_1), c_n));
}
def code(c_p, c_n, t, s): t_1 = 1.0 / (1.0 + math.exp(-t)) t_2 = 1.0 / (1.0 + math.exp(-s)) return (math.pow(t_2, c_p) * math.pow((1.0 - t_2), c_n)) / (math.pow(t_1, c_p) * math.pow((1.0 - t_1), c_n))
function code(c_p, c_n, t, s) t_1 = Float64(1.0 / Float64(1.0 + exp(Float64(-t)))) t_2 = Float64(1.0 / Float64(1.0 + exp(Float64(-s)))) return Float64(Float64((t_2 ^ c_p) * (Float64(1.0 - t_2) ^ c_n)) / Float64((t_1 ^ c_p) * (Float64(1.0 - t_1) ^ c_n))) end
function tmp = code(c_p, c_n, t, s) t_1 = 1.0 / (1.0 + exp(-t)); t_2 = 1.0 / (1.0 + exp(-s)); tmp = ((t_2 ^ c_p) * ((1.0 - t_2) ^ c_n)) / ((t_1 ^ c_p) * ((1.0 - t_1) ^ c_n)); end
code[c$95$p_, c$95$n_, t_, s_] := Block[{t$95$1 = N[(1.0 / N[(1.0 + N[Exp[(-t)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(1.0 / N[(1.0 + N[Exp[(-s)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[Power[t$95$2, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$2), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision] / N[(N[Power[t$95$1, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$1), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \frac{1}{1 + e^{-t}}\\
t_2 := \frac{1}{1 + e^{-s}}\\
\frac{{t\_2}^{c\_p} \cdot {\left(1 - t\_2\right)}^{c\_n}}{{t\_1}^{c\_p} \cdot {\left(1 - t\_1\right)}^{c\_n}}
\end{array}
\end{array}
(FPCore (c_p c_n t s)
:precision binary64
(let* ((t_1 (exp (- 0.0 t))) (t_2 (exp (- 0.0 s))))
(if (<= c_p 4e-77)
(pow (/ 1.0 (fma s (fma s 0.5 -1.0) 2.0)) c_p)
(exp
(fma
c_p
(- (log1p t_1) (log1p t_2))
(*
c_n
(- (log1p (/ 1.0 (- -1.0 t_2))) (log1p (/ 1.0 (- -1.0 t_1))))))))))
double code(double c_p, double c_n, double t, double s) {
double t_1 = exp((0.0 - t));
double t_2 = exp((0.0 - s));
double tmp;
if (c_p <= 4e-77) {
tmp = pow((1.0 / fma(s, fma(s, 0.5, -1.0), 2.0)), c_p);
} else {
tmp = exp(fma(c_p, (log1p(t_1) - log1p(t_2)), (c_n * (log1p((1.0 / (-1.0 - t_2))) - log1p((1.0 / (-1.0 - t_1)))))));
}
return tmp;
}
function code(c_p, c_n, t, s) t_1 = exp(Float64(0.0 - t)) t_2 = exp(Float64(0.0 - s)) tmp = 0.0 if (c_p <= 4e-77) tmp = Float64(1.0 / fma(s, fma(s, 0.5, -1.0), 2.0)) ^ c_p; else tmp = exp(fma(c_p, Float64(log1p(t_1) - log1p(t_2)), Float64(c_n * Float64(log1p(Float64(1.0 / Float64(-1.0 - t_2))) - log1p(Float64(1.0 / Float64(-1.0 - t_1))))))); end return tmp end
code[c$95$p_, c$95$n_, t_, s_] := Block[{t$95$1 = N[Exp[N[(0.0 - t), $MachinePrecision]], $MachinePrecision]}, Block[{t$95$2 = N[Exp[N[(0.0 - s), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[c$95$p, 4e-77], N[Power[N[(1.0 / N[(s * N[(s * 0.5 + -1.0), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision], c$95$p], $MachinePrecision], N[Exp[N[(c$95$p * N[(N[Log[1 + t$95$1], $MachinePrecision] - N[Log[1 + t$95$2], $MachinePrecision]), $MachinePrecision] + N[(c$95$n * N[(N[Log[1 + N[(1.0 / N[(-1.0 - t$95$2), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[Log[1 + N[(1.0 / N[(-1.0 - t$95$1), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := e^{0 - t}\\
t_2 := e^{0 - s}\\
\mathbf{if}\;c\_p \leq 4 \cdot 10^{-77}:\\
\;\;\;\;{\left(\frac{1}{\mathsf{fma}\left(s, \mathsf{fma}\left(s, 0.5, -1\right), 2\right)}\right)}^{c\_p}\\
\mathbf{else}:\\
\;\;\;\;e^{\mathsf{fma}\left(c\_p, \mathsf{log1p}\left(t\_1\right) - \mathsf{log1p}\left(t\_2\right), c\_n \cdot \left(\mathsf{log1p}\left(\frac{1}{-1 - t\_2}\right) - \mathsf{log1p}\left(\frac{1}{-1 - t\_1}\right)\right)\right)}\\
\end{array}
\end{array}
if c_p < 3.9999999999999997e-77Initial program 92.0%
Taylor expanded in c_p around 0
Simplified93.8%
Taylor expanded in c_n around 0
pow-lowering-pow.f64N/A
neg-mul-1N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f6494.4
Simplified94.4%
Taylor expanded in s around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f6498.3
Simplified98.3%
if 3.9999999999999997e-77 < c_p Initial program 84.3%
Applied egg-rr97.6%
Final simplification98.1%
(FPCore (c_p c_n t s) :precision binary64 (if (<= c_p 5e-20) (pow (/ 1.0 (fma s (fma s 0.5 -1.0) 2.0)) c_p) (pow (* (+ 1.0 (exp (- 0.0 t))) (/ 1.0 (+ 1.0 (exp (- 0.0 s))))) c_p)))
double code(double c_p, double c_n, double t, double s) {
double tmp;
if (c_p <= 5e-20) {
tmp = pow((1.0 / fma(s, fma(s, 0.5, -1.0), 2.0)), c_p);
} else {
tmp = pow(((1.0 + exp((0.0 - t))) * (1.0 / (1.0 + exp((0.0 - s))))), c_p);
}
return tmp;
}
function code(c_p, c_n, t, s) tmp = 0.0 if (c_p <= 5e-20) tmp = Float64(1.0 / fma(s, fma(s, 0.5, -1.0), 2.0)) ^ c_p; else tmp = Float64(Float64(1.0 + exp(Float64(0.0 - t))) * Float64(1.0 / Float64(1.0 + exp(Float64(0.0 - s))))) ^ c_p; end return tmp end
code[c$95$p_, c$95$n_, t_, s_] := If[LessEqual[c$95$p, 5e-20], N[Power[N[(1.0 / N[(s * N[(s * 0.5 + -1.0), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision], c$95$p], $MachinePrecision], N[Power[N[(N[(1.0 + N[Exp[N[(0.0 - t), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(1.0 / N[(1.0 + N[Exp[N[(0.0 - s), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], c$95$p], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;c\_p \leq 5 \cdot 10^{-20}:\\
\;\;\;\;{\left(\frac{1}{\mathsf{fma}\left(s, \mathsf{fma}\left(s, 0.5, -1\right), 2\right)}\right)}^{c\_p}\\
\mathbf{else}:\\
\;\;\;\;{\left(\left(1 + e^{0 - t}\right) \cdot \frac{1}{1 + e^{0 - s}}\right)}^{c\_p}\\
\end{array}
\end{array}
if c_p < 4.9999999999999999e-20Initial program 91.9%
Taylor expanded in c_p around 0
Simplified94.0%
Taylor expanded in c_n around 0
pow-lowering-pow.f64N/A
neg-mul-1N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f6494.9
Simplified94.9%
Taylor expanded in s around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f6497.9
Simplified97.9%
if 4.9999999999999999e-20 < c_p Initial program 67.9%
Taylor expanded in c_n around 0
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-sub0N/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-sub0N/A
--lowering--.f6468.0
Simplified68.0%
Applied egg-rr96.1%
(FPCore (c_p c_n t s) :precision binary64 (if (<= t -5e-240) (pow (/ 1.0 (fma s (fma s (fma s -0.16666666666666666 0.5) -1.0) 2.0)) c_p) (pow (fma 0.5 (exp (- 0.0 t)) 0.5) c_p)))
double code(double c_p, double c_n, double t, double s) {
double tmp;
if (t <= -5e-240) {
tmp = pow((1.0 / fma(s, fma(s, fma(s, -0.16666666666666666, 0.5), -1.0), 2.0)), c_p);
} else {
tmp = pow(fma(0.5, exp((0.0 - t)), 0.5), c_p);
}
return tmp;
}
function code(c_p, c_n, t, s) tmp = 0.0 if (t <= -5e-240) tmp = Float64(1.0 / fma(s, fma(s, fma(s, -0.16666666666666666, 0.5), -1.0), 2.0)) ^ c_p; else tmp = fma(0.5, exp(Float64(0.0 - t)), 0.5) ^ c_p; end return tmp end
code[c$95$p_, c$95$n_, t_, s_] := If[LessEqual[t, -5e-240], N[Power[N[(1.0 / N[(s * N[(s * N[(s * -0.16666666666666666 + 0.5), $MachinePrecision] + -1.0), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision], c$95$p], $MachinePrecision], N[Power[N[(0.5 * N[Exp[N[(0.0 - t), $MachinePrecision]], $MachinePrecision] + 0.5), $MachinePrecision], c$95$p], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;t \leq -5 \cdot 10^{-240}:\\
\;\;\;\;{\left(\frac{1}{\mathsf{fma}\left(s, \mathsf{fma}\left(s, \mathsf{fma}\left(s, -0.16666666666666666, 0.5\right), -1\right), 2\right)}\right)}^{c\_p}\\
\mathbf{else}:\\
\;\;\;\;{\left(\mathsf{fma}\left(0.5, e^{0 - t}, 0.5\right)\right)}^{c\_p}\\
\end{array}
\end{array}
if t < -5.0000000000000004e-240Initial program 86.6%
Taylor expanded in c_p around 0
Simplified93.9%
Taylor expanded in c_n around 0
pow-lowering-pow.f64N/A
neg-mul-1N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f6492.1
Simplified92.1%
Taylor expanded in s around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f6496.1
Simplified96.1%
if -5.0000000000000004e-240 < t Initial program 91.3%
Taylor expanded in c_n around 0
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-sub0N/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-sub0N/A
--lowering--.f6493.8
Simplified93.8%
Applied egg-rr96.4%
Taylor expanded in s around 0
+-commutativeN/A
distribute-lft-inN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
neg-mul-1N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f6497.6
Simplified97.6%
(FPCore (c_p c_n t s) :precision binary64 (if (<= t -5e-240) (pow (/ 1.0 (fma s (fma s (fma s -0.16666666666666666 0.5) -1.0) 2.0)) c_p) (fma -0.5 (* c_p t) 1.0)))
double code(double c_p, double c_n, double t, double s) {
double tmp;
if (t <= -5e-240) {
tmp = pow((1.0 / fma(s, fma(s, fma(s, -0.16666666666666666, 0.5), -1.0), 2.0)), c_p);
} else {
tmp = fma(-0.5, (c_p * t), 1.0);
}
return tmp;
}
function code(c_p, c_n, t, s) tmp = 0.0 if (t <= -5e-240) tmp = Float64(1.0 / fma(s, fma(s, fma(s, -0.16666666666666666, 0.5), -1.0), 2.0)) ^ c_p; else tmp = fma(-0.5, Float64(c_p * t), 1.0); end return tmp end
code[c$95$p_, c$95$n_, t_, s_] := If[LessEqual[t, -5e-240], N[Power[N[(1.0 / N[(s * N[(s * N[(s * -0.16666666666666666 + 0.5), $MachinePrecision] + -1.0), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision], c$95$p], $MachinePrecision], N[(-0.5 * N[(c$95$p * t), $MachinePrecision] + 1.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;t \leq -5 \cdot 10^{-240}:\\
\;\;\;\;{\left(\frac{1}{\mathsf{fma}\left(s, \mathsf{fma}\left(s, \mathsf{fma}\left(s, -0.16666666666666666, 0.5\right), -1\right), 2\right)}\right)}^{c\_p}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(-0.5, c\_p \cdot t, 1\right)\\
\end{array}
\end{array}
if t < -5.0000000000000004e-240Initial program 86.6%
Taylor expanded in c_p around 0
Simplified93.9%
Taylor expanded in c_n around 0
pow-lowering-pow.f64N/A
neg-mul-1N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f6492.1
Simplified92.1%
Taylor expanded in s around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f6496.1
Simplified96.1%
if -5.0000000000000004e-240 < t Initial program 91.3%
Taylor expanded in c_n around 0
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-sub0N/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-sub0N/A
--lowering--.f6493.8
Simplified93.8%
Applied egg-rr96.4%
Taylor expanded in s around 0
+-commutativeN/A
distribute-lft-inN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
neg-mul-1N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f6497.6
Simplified97.6%
Taylor expanded in t around 0
associate-*r*N/A
+-commutativeN/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6497.0
Simplified97.0%
(FPCore (c_p c_n t s) :precision binary64 (if (<= t -4e-241) (pow (/ 1.0 (fma s (fma s 0.5 -1.0) 2.0)) c_p) (fma -0.5 (* c_p t) 1.0)))
double code(double c_p, double c_n, double t, double s) {
double tmp;
if (t <= -4e-241) {
tmp = pow((1.0 / fma(s, fma(s, 0.5, -1.0), 2.0)), c_p);
} else {
tmp = fma(-0.5, (c_p * t), 1.0);
}
return tmp;
}
function code(c_p, c_n, t, s) tmp = 0.0 if (t <= -4e-241) tmp = Float64(1.0 / fma(s, fma(s, 0.5, -1.0), 2.0)) ^ c_p; else tmp = fma(-0.5, Float64(c_p * t), 1.0); end return tmp end
code[c$95$p_, c$95$n_, t_, s_] := If[LessEqual[t, -4e-241], N[Power[N[(1.0 / N[(s * N[(s * 0.5 + -1.0), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision], c$95$p], $MachinePrecision], N[(-0.5 * N[(c$95$p * t), $MachinePrecision] + 1.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;t \leq -4 \cdot 10^{-241}:\\
\;\;\;\;{\left(\frac{1}{\mathsf{fma}\left(s, \mathsf{fma}\left(s, 0.5, -1\right), 2\right)}\right)}^{c\_p}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(-0.5, c\_p \cdot t, 1\right)\\
\end{array}
\end{array}
if t < -3.9999999999999999e-241Initial program 86.6%
Taylor expanded in c_p around 0
Simplified93.9%
Taylor expanded in c_n around 0
pow-lowering-pow.f64N/A
neg-mul-1N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f6492.1
Simplified92.1%
Taylor expanded in s around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f6495.1
Simplified95.1%
if -3.9999999999999999e-241 < t Initial program 91.3%
Taylor expanded in c_n around 0
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-sub0N/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-sub0N/A
--lowering--.f6493.8
Simplified93.8%
Applied egg-rr96.4%
Taylor expanded in s around 0
+-commutativeN/A
distribute-lft-inN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
neg-mul-1N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f6497.6
Simplified97.6%
Taylor expanded in t around 0
associate-*r*N/A
+-commutativeN/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6497.0
Simplified97.0%
(FPCore (c_p c_n t s) :precision binary64 (if (<= s -155000000.0) (pow (/ -6.0 (* s (* s s))) c_p) (fma -0.5 (* c_p t) 1.0)))
double code(double c_p, double c_n, double t, double s) {
double tmp;
if (s <= -155000000.0) {
tmp = pow((-6.0 / (s * (s * s))), c_p);
} else {
tmp = fma(-0.5, (c_p * t), 1.0);
}
return tmp;
}
function code(c_p, c_n, t, s) tmp = 0.0 if (s <= -155000000.0) tmp = Float64(-6.0 / Float64(s * Float64(s * s))) ^ c_p; else tmp = fma(-0.5, Float64(c_p * t), 1.0); end return tmp end
code[c$95$p_, c$95$n_, t_, s_] := If[LessEqual[s, -155000000.0], N[Power[N[(-6.0 / N[(s * N[(s * s), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], c$95$p], $MachinePrecision], N[(-0.5 * N[(c$95$p * t), $MachinePrecision] + 1.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;s \leq -155000000:\\
\;\;\;\;{\left(\frac{-6}{s \cdot \left(s \cdot s\right)}\right)}^{c\_p}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(-0.5, c\_p \cdot t, 1\right)\\
\end{array}
\end{array}
if s < -1.55e8Initial program 40.0%
Taylor expanded in c_p around 0
Simplified100.0%
Taylor expanded in c_n around 0
pow-lowering-pow.f64N/A
neg-mul-1N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f64100.0
Simplified100.0%
Taylor expanded in s around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64100.0
Simplified100.0%
Taylor expanded in s around inf
/-lowering-/.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64100.0
Simplified100.0%
if -1.55e8 < s Initial program 90.5%
Taylor expanded in c_n around 0
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-sub0N/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-sub0N/A
--lowering--.f6492.6
Simplified92.6%
Applied egg-rr94.2%
Taylor expanded in s around 0
+-commutativeN/A
distribute-lft-inN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
neg-mul-1N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f6495.7
Simplified95.7%
Taylor expanded in t around 0
associate-*r*N/A
+-commutativeN/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6496.1
Simplified96.1%
(FPCore (c_p c_n t s) :precision binary64 (fma -0.5 (* c_p t) 1.0))
double code(double c_p, double c_n, double t, double s) {
return fma(-0.5, (c_p * t), 1.0);
}
function code(c_p, c_n, t, s) return fma(-0.5, Float64(c_p * t), 1.0) end
code[c$95$p_, c$95$n_, t_, s_] := N[(-0.5 * N[(c$95$p * t), $MachinePrecision] + 1.0), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-0.5, c\_p \cdot t, 1\right)
\end{array}
Initial program 89.5%
Taylor expanded in c_n around 0
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-sub0N/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-sub0N/A
--lowering--.f6491.6
Simplified91.6%
Applied egg-rr94.3%
Taylor expanded in s around 0
+-commutativeN/A
distribute-lft-inN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
neg-mul-1N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f6493.9
Simplified93.9%
Taylor expanded in t around 0
associate-*r*N/A
+-commutativeN/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6494.3
Simplified94.3%
(FPCore (c_p c_n t s) :precision binary64 1.0)
double code(double c_p, double c_n, double t, double s) {
return 1.0;
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
code = 1.0d0
end function
public static double code(double c_p, double c_n, double t, double s) {
return 1.0;
}
def code(c_p, c_n, t, s): return 1.0
function code(c_p, c_n, t, s) return 1.0 end
function tmp = code(c_p, c_n, t, s) tmp = 1.0; end
code[c$95$p_, c$95$n_, t_, s_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 89.5%
Taylor expanded in c_n around 0
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-sub0N/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-sub0N/A
--lowering--.f6491.6
Simplified91.6%
Taylor expanded in c_p around 0
Simplified94.2%
(FPCore (c_p c_n t s) :precision binary64 (* (pow (/ (+ 1.0 (exp (- t))) (+ 1.0 (exp (- s)))) c_p) (pow (/ (+ 1.0 (exp t)) (+ 1.0 (exp s))) c_n)))
double code(double c_p, double c_n, double t, double s) {
return pow(((1.0 + exp(-t)) / (1.0 + exp(-s))), c_p) * pow(((1.0 + exp(t)) / (1.0 + exp(s))), c_n);
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
code = (((1.0d0 + exp(-t)) / (1.0d0 + exp(-s))) ** c_p) * (((1.0d0 + exp(t)) / (1.0d0 + exp(s))) ** c_n)
end function
public static double code(double c_p, double c_n, double t, double s) {
return Math.pow(((1.0 + Math.exp(-t)) / (1.0 + Math.exp(-s))), c_p) * Math.pow(((1.0 + Math.exp(t)) / (1.0 + Math.exp(s))), c_n);
}
def code(c_p, c_n, t, s): return math.pow(((1.0 + math.exp(-t)) / (1.0 + math.exp(-s))), c_p) * math.pow(((1.0 + math.exp(t)) / (1.0 + math.exp(s))), c_n)
function code(c_p, c_n, t, s) return Float64((Float64(Float64(1.0 + exp(Float64(-t))) / Float64(1.0 + exp(Float64(-s)))) ^ c_p) * (Float64(Float64(1.0 + exp(t)) / Float64(1.0 + exp(s))) ^ c_n)) end
function tmp = code(c_p, c_n, t, s) tmp = (((1.0 + exp(-t)) / (1.0 + exp(-s))) ^ c_p) * (((1.0 + exp(t)) / (1.0 + exp(s))) ^ c_n); end
code[c$95$p_, c$95$n_, t_, s_] := N[(N[Power[N[(N[(1.0 + N[Exp[(-t)], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[Exp[(-s)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], c$95$p], $MachinePrecision] * N[Power[N[(N[(1.0 + N[Exp[t], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[Exp[s], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{\left(\frac{1 + e^{-t}}{1 + e^{-s}}\right)}^{c\_p} \cdot {\left(\frac{1 + e^{t}}{1 + e^{s}}\right)}^{c\_n}
\end{array}
herbie shell --seed 2024197
(FPCore (c_p c_n t s)
:name "Harley's example"
:precision binary64
:pre (and (< 0.0 c_p) (< 0.0 c_n))
:alt
(! :herbie-platform default (* (pow (/ (+ 1 (exp (- t))) (+ 1 (exp (- s)))) c_p) (pow (/ (+ 1 (exp t)) (+ 1 (exp s))) c_n)))
(/ (* (pow (/ 1.0 (+ 1.0 (exp (- s)))) c_p) (pow (- 1.0 (/ 1.0 (+ 1.0 (exp (- s))))) c_n)) (* (pow (/ 1.0 (+ 1.0 (exp (- t)))) c_p) (pow (- 1.0 (/ 1.0 (+ 1.0 (exp (- t))))) c_n))))