
(FPCore (c_p c_n t s)
:precision binary64
(let* ((t_1 (/ 1.0 (+ 1.0 (exp (- t))))) (t_2 (/ 1.0 (+ 1.0 (exp (- s))))))
(/
(* (pow t_2 c_p) (pow (- 1.0 t_2) c_n))
(* (pow t_1 c_p) (pow (- 1.0 t_1) c_n)))))
double code(double c_p, double c_n, double t, double s) {
double t_1 = 1.0 / (1.0 + exp(-t));
double t_2 = 1.0 / (1.0 + exp(-s));
return (pow(t_2, c_p) * pow((1.0 - t_2), c_n)) / (pow(t_1, c_p) * pow((1.0 - t_1), c_n));
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
real(8) :: t_1
real(8) :: t_2
t_1 = 1.0d0 / (1.0d0 + exp(-t))
t_2 = 1.0d0 / (1.0d0 + exp(-s))
code = ((t_2 ** c_p) * ((1.0d0 - t_2) ** c_n)) / ((t_1 ** c_p) * ((1.0d0 - t_1) ** c_n))
end function
public static double code(double c_p, double c_n, double t, double s) {
double t_1 = 1.0 / (1.0 + Math.exp(-t));
double t_2 = 1.0 / (1.0 + Math.exp(-s));
return (Math.pow(t_2, c_p) * Math.pow((1.0 - t_2), c_n)) / (Math.pow(t_1, c_p) * Math.pow((1.0 - t_1), c_n));
}
def code(c_p, c_n, t, s): t_1 = 1.0 / (1.0 + math.exp(-t)) t_2 = 1.0 / (1.0 + math.exp(-s)) return (math.pow(t_2, c_p) * math.pow((1.0 - t_2), c_n)) / (math.pow(t_1, c_p) * math.pow((1.0 - t_1), c_n))
function code(c_p, c_n, t, s) t_1 = Float64(1.0 / Float64(1.0 + exp(Float64(-t)))) t_2 = Float64(1.0 / Float64(1.0 + exp(Float64(-s)))) return Float64(Float64((t_2 ^ c_p) * (Float64(1.0 - t_2) ^ c_n)) / Float64((t_1 ^ c_p) * (Float64(1.0 - t_1) ^ c_n))) end
function tmp = code(c_p, c_n, t, s) t_1 = 1.0 / (1.0 + exp(-t)); t_2 = 1.0 / (1.0 + exp(-s)); tmp = ((t_2 ^ c_p) * ((1.0 - t_2) ^ c_n)) / ((t_1 ^ c_p) * ((1.0 - t_1) ^ c_n)); end
code[c$95$p_, c$95$n_, t_, s_] := Block[{t$95$1 = N[(1.0 / N[(1.0 + N[Exp[(-t)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(1.0 / N[(1.0 + N[Exp[(-s)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[Power[t$95$2, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$2), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision] / N[(N[Power[t$95$1, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$1), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \frac{1}{1 + e^{-t}}\\
t_2 := \frac{1}{1 + e^{-s}}\\
\frac{{t\_2}^{c\_p} \cdot {\left(1 - t\_2\right)}^{c\_n}}{{t\_1}^{c\_p} \cdot {\left(1 - t\_1\right)}^{c\_n}}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (c_p c_n t s)
:precision binary64
(let* ((t_1 (/ 1.0 (+ 1.0 (exp (- t))))) (t_2 (/ 1.0 (+ 1.0 (exp (- s))))))
(/
(* (pow t_2 c_p) (pow (- 1.0 t_2) c_n))
(* (pow t_1 c_p) (pow (- 1.0 t_1) c_n)))))
double code(double c_p, double c_n, double t, double s) {
double t_1 = 1.0 / (1.0 + exp(-t));
double t_2 = 1.0 / (1.0 + exp(-s));
return (pow(t_2, c_p) * pow((1.0 - t_2), c_n)) / (pow(t_1, c_p) * pow((1.0 - t_1), c_n));
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
real(8) :: t_1
real(8) :: t_2
t_1 = 1.0d0 / (1.0d0 + exp(-t))
t_2 = 1.0d0 / (1.0d0 + exp(-s))
code = ((t_2 ** c_p) * ((1.0d0 - t_2) ** c_n)) / ((t_1 ** c_p) * ((1.0d0 - t_1) ** c_n))
end function
public static double code(double c_p, double c_n, double t, double s) {
double t_1 = 1.0 / (1.0 + Math.exp(-t));
double t_2 = 1.0 / (1.0 + Math.exp(-s));
return (Math.pow(t_2, c_p) * Math.pow((1.0 - t_2), c_n)) / (Math.pow(t_1, c_p) * Math.pow((1.0 - t_1), c_n));
}
def code(c_p, c_n, t, s): t_1 = 1.0 / (1.0 + math.exp(-t)) t_2 = 1.0 / (1.0 + math.exp(-s)) return (math.pow(t_2, c_p) * math.pow((1.0 - t_2), c_n)) / (math.pow(t_1, c_p) * math.pow((1.0 - t_1), c_n))
function code(c_p, c_n, t, s) t_1 = Float64(1.0 / Float64(1.0 + exp(Float64(-t)))) t_2 = Float64(1.0 / Float64(1.0 + exp(Float64(-s)))) return Float64(Float64((t_2 ^ c_p) * (Float64(1.0 - t_2) ^ c_n)) / Float64((t_1 ^ c_p) * (Float64(1.0 - t_1) ^ c_n))) end
function tmp = code(c_p, c_n, t, s) t_1 = 1.0 / (1.0 + exp(-t)); t_2 = 1.0 / (1.0 + exp(-s)); tmp = ((t_2 ^ c_p) * ((1.0 - t_2) ^ c_n)) / ((t_1 ^ c_p) * ((1.0 - t_1) ^ c_n)); end
code[c$95$p_, c$95$n_, t_, s_] := Block[{t$95$1 = N[(1.0 / N[(1.0 + N[Exp[(-t)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(1.0 / N[(1.0 + N[Exp[(-s)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[Power[t$95$2, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$2), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision] / N[(N[Power[t$95$1, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$1), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \frac{1}{1 + e^{-t}}\\
t_2 := \frac{1}{1 + e^{-s}}\\
\frac{{t\_2}^{c\_p} \cdot {\left(1 - t\_2\right)}^{c\_n}}{{t\_1}^{c\_p} \cdot {\left(1 - t\_1\right)}^{c\_n}}
\end{array}
\end{array}
(FPCore (c_p c_n t s) :precision binary64 (if (<= (- 0.0 s) 100000000.0) (pow (/ 1.0 (+ 0.5 (/ 0.5 (exp t)))) (- 0.0 c_p)) (pow (/ 1.0 (+ 1.0 (exp (- 0.0 s)))) c_p)))
double code(double c_p, double c_n, double t, double s) {
double tmp;
if ((0.0 - s) <= 100000000.0) {
tmp = pow((1.0 / (0.5 + (0.5 / exp(t)))), (0.0 - c_p));
} else {
tmp = pow((1.0 / (1.0 + exp((0.0 - s)))), c_p);
}
return tmp;
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
real(8) :: tmp
if ((0.0d0 - s) <= 100000000.0d0) then
tmp = (1.0d0 / (0.5d0 + (0.5d0 / exp(t)))) ** (0.0d0 - c_p)
else
tmp = (1.0d0 / (1.0d0 + exp((0.0d0 - s)))) ** c_p
end if
code = tmp
end function
public static double code(double c_p, double c_n, double t, double s) {
double tmp;
if ((0.0 - s) <= 100000000.0) {
tmp = Math.pow((1.0 / (0.5 + (0.5 / Math.exp(t)))), (0.0 - c_p));
} else {
tmp = Math.pow((1.0 / (1.0 + Math.exp((0.0 - s)))), c_p);
}
return tmp;
}
def code(c_p, c_n, t, s): tmp = 0 if (0.0 - s) <= 100000000.0: tmp = math.pow((1.0 / (0.5 + (0.5 / math.exp(t)))), (0.0 - c_p)) else: tmp = math.pow((1.0 / (1.0 + math.exp((0.0 - s)))), c_p) return tmp
function code(c_p, c_n, t, s) tmp = 0.0 if (Float64(0.0 - s) <= 100000000.0) tmp = Float64(1.0 / Float64(0.5 + Float64(0.5 / exp(t)))) ^ Float64(0.0 - c_p); else tmp = Float64(1.0 / Float64(1.0 + exp(Float64(0.0 - s)))) ^ c_p; end return tmp end
function tmp_2 = code(c_p, c_n, t, s) tmp = 0.0; if ((0.0 - s) <= 100000000.0) tmp = (1.0 / (0.5 + (0.5 / exp(t)))) ^ (0.0 - c_p); else tmp = (1.0 / (1.0 + exp((0.0 - s)))) ^ c_p; end tmp_2 = tmp; end
code[c$95$p_, c$95$n_, t_, s_] := If[LessEqual[N[(0.0 - s), $MachinePrecision], 100000000.0], N[Power[N[(1.0 / N[(0.5 + N[(0.5 / N[Exp[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(0.0 - c$95$p), $MachinePrecision]], $MachinePrecision], N[Power[N[(1.0 / N[(1.0 + N[Exp[N[(0.0 - s), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], c$95$p], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;0 - s \leq 100000000:\\
\;\;\;\;{\left(\frac{1}{0.5 + \frac{0.5}{e^{t}}}\right)}^{\left(0 - c\_p\right)}\\
\mathbf{else}:\\
\;\;\;\;{\left(\frac{1}{1 + e^{0 - s}}\right)}^{c\_p}\\
\end{array}
\end{array}
if (neg.f64 s) < 1e8Initial program 93.7%
Taylor expanded in c_n around 0
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
neg-mul-1N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-negN/A
/-lowering-/.f64N/A
exp-lowering-exp.f6495.0%
Simplified95.0%
Taylor expanded in s around 0
pow-lowering-pow.f6496.1%
Simplified96.1%
clear-numN/A
/-lowering-/.f64N/A
div-invN/A
inv-powN/A
pow-powN/A
neg-mul-1N/A
pow-flipN/A
pow-prod-downN/A
pow-lowering-pow.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
rec-expN/A
exp-lowering-exp.f64N/A
neg-sub0N/A
--lowering--.f64N/A
neg-sub0N/A
--lowering--.f6498.5%
Applied egg-rr98.5%
pow-flipN/A
neg-mul-1N/A
pow-unpowN/A
pow-lowering-pow.f64N/A
unpow-1N/A
/-lowering-/.f64N/A
+-commutativeN/A
distribute-rgt1-inN/A
+-lowering-+.f64N/A
exp-diffN/A
1-expN/A
associate-*l/N/A
metadata-evalN/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
--lowering--.f6498.5%
Applied egg-rr98.5%
if 1e8 < (neg.f64 s) Initial program 50.0%
Taylor expanded in c_p around 0
Simplified66.7%
Taylor expanded in c_n around 0
neg-mul-1N/A
neg-mul-1N/A
rec-expN/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
rec-expN/A
neg-mul-1N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f64100.0%
Simplified100.0%
Final simplification98.6%
(FPCore (c_p c_n t s)
:precision binary64
(exp
(+
(*
c_n
(log
(/
(+ 1.0 (/ 1.0 (+ -1.0 (/ -1.0 (exp s)))))
(+ 1.0 (/ 1.0 (+ -1.0 (/ -1.0 (exp t))))))))
(*
c_p
(+
(log (/ 2.0 (+ 1.0 (exp (- 0.0 s)))))
(* t (+ (* t (+ 0.125 (* -0.005208333333333333 (* t t)))) -0.5)))))))
double code(double c_p, double c_n, double t, double s) {
return exp(((c_n * log(((1.0 + (1.0 / (-1.0 + (-1.0 / exp(s))))) / (1.0 + (1.0 / (-1.0 + (-1.0 / exp(t)))))))) + (c_p * (log((2.0 / (1.0 + exp((0.0 - s))))) + (t * ((t * (0.125 + (-0.005208333333333333 * (t * t)))) + -0.5))))));
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
code = exp(((c_n * log(((1.0d0 + (1.0d0 / ((-1.0d0) + ((-1.0d0) / exp(s))))) / (1.0d0 + (1.0d0 / ((-1.0d0) + ((-1.0d0) / exp(t)))))))) + (c_p * (log((2.0d0 / (1.0d0 + exp((0.0d0 - s))))) + (t * ((t * (0.125d0 + ((-0.005208333333333333d0) * (t * t)))) + (-0.5d0)))))))
end function
public static double code(double c_p, double c_n, double t, double s) {
return Math.exp(((c_n * Math.log(((1.0 + (1.0 / (-1.0 + (-1.0 / Math.exp(s))))) / (1.0 + (1.0 / (-1.0 + (-1.0 / Math.exp(t)))))))) + (c_p * (Math.log((2.0 / (1.0 + Math.exp((0.0 - s))))) + (t * ((t * (0.125 + (-0.005208333333333333 * (t * t)))) + -0.5))))));
}
def code(c_p, c_n, t, s): return math.exp(((c_n * math.log(((1.0 + (1.0 / (-1.0 + (-1.0 / math.exp(s))))) / (1.0 + (1.0 / (-1.0 + (-1.0 / math.exp(t)))))))) + (c_p * (math.log((2.0 / (1.0 + math.exp((0.0 - s))))) + (t * ((t * (0.125 + (-0.005208333333333333 * (t * t)))) + -0.5))))))
function code(c_p, c_n, t, s) return exp(Float64(Float64(c_n * log(Float64(Float64(1.0 + Float64(1.0 / Float64(-1.0 + Float64(-1.0 / exp(s))))) / Float64(1.0 + Float64(1.0 / Float64(-1.0 + Float64(-1.0 / exp(t)))))))) + Float64(c_p * Float64(log(Float64(2.0 / Float64(1.0 + exp(Float64(0.0 - s))))) + Float64(t * Float64(Float64(t * Float64(0.125 + Float64(-0.005208333333333333 * Float64(t * t)))) + -0.5)))))) end
function tmp = code(c_p, c_n, t, s) tmp = exp(((c_n * log(((1.0 + (1.0 / (-1.0 + (-1.0 / exp(s))))) / (1.0 + (1.0 / (-1.0 + (-1.0 / exp(t)))))))) + (c_p * (log((2.0 / (1.0 + exp((0.0 - s))))) + (t * ((t * (0.125 + (-0.005208333333333333 * (t * t)))) + -0.5)))))); end
code[c$95$p_, c$95$n_, t_, s_] := N[Exp[N[(N[(c$95$n * N[Log[N[(N[(1.0 + N[(1.0 / N[(-1.0 + N[(-1.0 / N[Exp[s], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(1.0 / N[(-1.0 + N[(-1.0 / N[Exp[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + N[(c$95$p * N[(N[Log[N[(2.0 / N[(1.0 + N[Exp[N[(0.0 - s), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[(t * N[(N[(t * N[(0.125 + N[(-0.005208333333333333 * N[(t * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
e^{c\_n \cdot \log \left(\frac{1 + \frac{1}{-1 + \frac{-1}{e^{s}}}}{1 + \frac{1}{-1 + \frac{-1}{e^{t}}}}\right) + c\_p \cdot \left(\log \left(\frac{2}{1 + e^{0 - s}}\right) + t \cdot \left(t \cdot \left(0.125 + -0.005208333333333333 \cdot \left(t \cdot t\right)\right) + -0.5\right)\right)}
\end{array}
Initial program 92.6%
Applied egg-rr97.3%
Taylor expanded in t around 0
+-lowering-+.f64N/A
log-lowering-log.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
rec-expN/A
neg-mul-1N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f64N/A
Simplified98.1%
(FPCore (c_p c_n t s) :precision binary64 (if (<= s -750000000.0) (pow (/ 1.0 (+ 1.0 (exp (- 0.0 s)))) c_p) (pow (+ 0.5 (/ 0.5 (exp t))) c_p)))
double code(double c_p, double c_n, double t, double s) {
double tmp;
if (s <= -750000000.0) {
tmp = pow((1.0 / (1.0 + exp((0.0 - s)))), c_p);
} else {
tmp = pow((0.5 + (0.5 / exp(t))), c_p);
}
return tmp;
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
real(8) :: tmp
if (s <= (-750000000.0d0)) then
tmp = (1.0d0 / (1.0d0 + exp((0.0d0 - s)))) ** c_p
else
tmp = (0.5d0 + (0.5d0 / exp(t))) ** c_p
end if
code = tmp
end function
public static double code(double c_p, double c_n, double t, double s) {
double tmp;
if (s <= -750000000.0) {
tmp = Math.pow((1.0 / (1.0 + Math.exp((0.0 - s)))), c_p);
} else {
tmp = Math.pow((0.5 + (0.5 / Math.exp(t))), c_p);
}
return tmp;
}
def code(c_p, c_n, t, s): tmp = 0 if s <= -750000000.0: tmp = math.pow((1.0 / (1.0 + math.exp((0.0 - s)))), c_p) else: tmp = math.pow((0.5 + (0.5 / math.exp(t))), c_p) return tmp
function code(c_p, c_n, t, s) tmp = 0.0 if (s <= -750000000.0) tmp = Float64(1.0 / Float64(1.0 + exp(Float64(0.0 - s)))) ^ c_p; else tmp = Float64(0.5 + Float64(0.5 / exp(t))) ^ c_p; end return tmp end
function tmp_2 = code(c_p, c_n, t, s) tmp = 0.0; if (s <= -750000000.0) tmp = (1.0 / (1.0 + exp((0.0 - s)))) ^ c_p; else tmp = (0.5 + (0.5 / exp(t))) ^ c_p; end tmp_2 = tmp; end
code[c$95$p_, c$95$n_, t_, s_] := If[LessEqual[s, -750000000.0], N[Power[N[(1.0 / N[(1.0 + N[Exp[N[(0.0 - s), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], c$95$p], $MachinePrecision], N[Power[N[(0.5 + N[(0.5 / N[Exp[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], c$95$p], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;s \leq -750000000:\\
\;\;\;\;{\left(\frac{1}{1 + e^{0 - s}}\right)}^{c\_p}\\
\mathbf{else}:\\
\;\;\;\;{\left(0.5 + \frac{0.5}{e^{t}}\right)}^{c\_p}\\
\end{array}
\end{array}
if s < -7.5e8Initial program 50.0%
Taylor expanded in c_p around 0
Simplified66.7%
Taylor expanded in c_n around 0
neg-mul-1N/A
neg-mul-1N/A
rec-expN/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
rec-expN/A
neg-mul-1N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f64100.0%
Simplified100.0%
if -7.5e8 < s Initial program 93.7%
Taylor expanded in c_n around 0
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
neg-mul-1N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-negN/A
/-lowering-/.f64N/A
exp-lowering-exp.f6495.0%
Simplified95.0%
Taylor expanded in s around 0
pow-lowering-pow.f6496.1%
Simplified96.1%
clear-numN/A
/-lowering-/.f64N/A
div-invN/A
inv-powN/A
pow-powN/A
neg-mul-1N/A
pow-flipN/A
pow-prod-downN/A
pow-lowering-pow.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
rec-expN/A
exp-lowering-exp.f64N/A
neg-sub0N/A
--lowering--.f64N/A
neg-sub0N/A
--lowering--.f6498.5%
Applied egg-rr98.5%
pow-subN/A
metadata-evalN/A
remove-double-divN/A
pow-lowering-pow.f64N/A
+-commutativeN/A
distribute-rgt1-inN/A
+-lowering-+.f64N/A
exp-diffN/A
1-expN/A
associate-*l/N/A
metadata-evalN/A
/-lowering-/.f64N/A
exp-lowering-exp.f6498.5%
Applied egg-rr98.5%
(FPCore (c_p c_n t s) :precision binary64 (pow (+ 0.5 (/ 0.5 (exp t))) c_p))
double code(double c_p, double c_n, double t, double s) {
return pow((0.5 + (0.5 / exp(t))), c_p);
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
code = (0.5d0 + (0.5d0 / exp(t))) ** c_p
end function
public static double code(double c_p, double c_n, double t, double s) {
return Math.pow((0.5 + (0.5 / Math.exp(t))), c_p);
}
def code(c_p, c_n, t, s): return math.pow((0.5 + (0.5 / math.exp(t))), c_p)
function code(c_p, c_n, t, s) return Float64(0.5 + Float64(0.5 / exp(t))) ^ c_p end
function tmp = code(c_p, c_n, t, s) tmp = (0.5 + (0.5 / exp(t))) ^ c_p; end
code[c$95$p_, c$95$n_, t_, s_] := N[Power[N[(0.5 + N[(0.5 / N[Exp[t], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], c$95$p], $MachinePrecision]
\begin{array}{l}
\\
{\left(0.5 + \frac{0.5}{e^{t}}\right)}^{c\_p}
\end{array}
Initial program 92.6%
Taylor expanded in c_n around 0
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
neg-mul-1N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-negN/A
/-lowering-/.f64N/A
exp-lowering-exp.f6494.3%
Simplified94.3%
Taylor expanded in s around 0
pow-lowering-pow.f6493.9%
Simplified93.9%
clear-numN/A
/-lowering-/.f64N/A
div-invN/A
inv-powN/A
pow-powN/A
neg-mul-1N/A
pow-flipN/A
pow-prod-downN/A
pow-lowering-pow.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
rec-expN/A
exp-lowering-exp.f64N/A
neg-sub0N/A
--lowering--.f64N/A
neg-sub0N/A
--lowering--.f6496.3%
Applied egg-rr96.3%
pow-subN/A
metadata-evalN/A
remove-double-divN/A
pow-lowering-pow.f64N/A
+-commutativeN/A
distribute-rgt1-inN/A
+-lowering-+.f64N/A
exp-diffN/A
1-expN/A
associate-*l/N/A
metadata-evalN/A
/-lowering-/.f64N/A
exp-lowering-exp.f6496.3%
Applied egg-rr96.3%
(FPCore (c_p c_n t s)
:precision binary64
(/
1.0
(+
1.0
(*
t
(+
(* c_p 0.5)
(*
t
(+
(+ (* c_p -0.125) (* c_p (* c_p 0.125)))
(*
t
(+
(* c_p (* c_p -0.0625))
(* c_p (* (* c_p c_p) 0.020833333333333332)))))))))))
double code(double c_p, double c_n, double t, double s) {
return 1.0 / (1.0 + (t * ((c_p * 0.5) + (t * (((c_p * -0.125) + (c_p * (c_p * 0.125))) + (t * ((c_p * (c_p * -0.0625)) + (c_p * ((c_p * c_p) * 0.020833333333333332)))))))));
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
code = 1.0d0 / (1.0d0 + (t * ((c_p * 0.5d0) + (t * (((c_p * (-0.125d0)) + (c_p * (c_p * 0.125d0))) + (t * ((c_p * (c_p * (-0.0625d0))) + (c_p * ((c_p * c_p) * 0.020833333333333332d0)))))))))
end function
public static double code(double c_p, double c_n, double t, double s) {
return 1.0 / (1.0 + (t * ((c_p * 0.5) + (t * (((c_p * -0.125) + (c_p * (c_p * 0.125))) + (t * ((c_p * (c_p * -0.0625)) + (c_p * ((c_p * c_p) * 0.020833333333333332)))))))));
}
def code(c_p, c_n, t, s): return 1.0 / (1.0 + (t * ((c_p * 0.5) + (t * (((c_p * -0.125) + (c_p * (c_p * 0.125))) + (t * ((c_p * (c_p * -0.0625)) + (c_p * ((c_p * c_p) * 0.020833333333333332)))))))))
function code(c_p, c_n, t, s) return Float64(1.0 / Float64(1.0 + Float64(t * Float64(Float64(c_p * 0.5) + Float64(t * Float64(Float64(Float64(c_p * -0.125) + Float64(c_p * Float64(c_p * 0.125))) + Float64(t * Float64(Float64(c_p * Float64(c_p * -0.0625)) + Float64(c_p * Float64(Float64(c_p * c_p) * 0.020833333333333332)))))))))) end
function tmp = code(c_p, c_n, t, s) tmp = 1.0 / (1.0 + (t * ((c_p * 0.5) + (t * (((c_p * -0.125) + (c_p * (c_p * 0.125))) + (t * ((c_p * (c_p * -0.0625)) + (c_p * ((c_p * c_p) * 0.020833333333333332))))))))); end
code[c$95$p_, c$95$n_, t_, s_] := N[(1.0 / N[(1.0 + N[(t * N[(N[(c$95$p * 0.5), $MachinePrecision] + N[(t * N[(N[(N[(c$95$p * -0.125), $MachinePrecision] + N[(c$95$p * N[(c$95$p * 0.125), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(t * N[(N[(c$95$p * N[(c$95$p * -0.0625), $MachinePrecision]), $MachinePrecision] + N[(c$95$p * N[(N[(c$95$p * c$95$p), $MachinePrecision] * 0.020833333333333332), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{1 + t \cdot \left(c\_p \cdot 0.5 + t \cdot \left(\left(c\_p \cdot -0.125 + c\_p \cdot \left(c\_p \cdot 0.125\right)\right) + t \cdot \left(c\_p \cdot \left(c\_p \cdot -0.0625\right) + c\_p \cdot \left(\left(c\_p \cdot c\_p\right) \cdot 0.020833333333333332\right)\right)\right)\right)}
\end{array}
Initial program 92.6%
Taylor expanded in c_n around 0
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
neg-mul-1N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-negN/A
/-lowering-/.f64N/A
exp-lowering-exp.f6494.3%
Simplified94.3%
Taylor expanded in s around 0
pow-lowering-pow.f6493.9%
Simplified93.9%
clear-numN/A
/-lowering-/.f64N/A
div-invN/A
inv-powN/A
pow-powN/A
neg-mul-1N/A
pow-flipN/A
pow-prod-downN/A
pow-lowering-pow.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
rec-expN/A
exp-lowering-exp.f64N/A
neg-sub0N/A
--lowering--.f64N/A
neg-sub0N/A
--lowering--.f6496.3%
Applied egg-rr96.3%
Taylor expanded in t around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
associate-+r+N/A
+-lowering-+.f64N/A
Simplified96.2%
Final simplification96.2%
(FPCore (c_p c_n t s) :precision binary64 (/ 1.0 (+ 1.0 (* t (+ (* c_p 0.5) (* t (+ (* c_p -0.125) (* c_p (* c_p 0.125)))))))))
double code(double c_p, double c_n, double t, double s) {
return 1.0 / (1.0 + (t * ((c_p * 0.5) + (t * ((c_p * -0.125) + (c_p * (c_p * 0.125)))))));
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
code = 1.0d0 / (1.0d0 + (t * ((c_p * 0.5d0) + (t * ((c_p * (-0.125d0)) + (c_p * (c_p * 0.125d0)))))))
end function
public static double code(double c_p, double c_n, double t, double s) {
return 1.0 / (1.0 + (t * ((c_p * 0.5) + (t * ((c_p * -0.125) + (c_p * (c_p * 0.125)))))));
}
def code(c_p, c_n, t, s): return 1.0 / (1.0 + (t * ((c_p * 0.5) + (t * ((c_p * -0.125) + (c_p * (c_p * 0.125)))))))
function code(c_p, c_n, t, s) return Float64(1.0 / Float64(1.0 + Float64(t * Float64(Float64(c_p * 0.5) + Float64(t * Float64(Float64(c_p * -0.125) + Float64(c_p * Float64(c_p * 0.125)))))))) end
function tmp = code(c_p, c_n, t, s) tmp = 1.0 / (1.0 + (t * ((c_p * 0.5) + (t * ((c_p * -0.125) + (c_p * (c_p * 0.125))))))); end
code[c$95$p_, c$95$n_, t_, s_] := N[(1.0 / N[(1.0 + N[(t * N[(N[(c$95$p * 0.5), $MachinePrecision] + N[(t * N[(N[(c$95$p * -0.125), $MachinePrecision] + N[(c$95$p * N[(c$95$p * 0.125), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{1 + t \cdot \left(c\_p \cdot 0.5 + t \cdot \left(c\_p \cdot -0.125 + c\_p \cdot \left(c\_p \cdot 0.125\right)\right)\right)}
\end{array}
Initial program 92.6%
Taylor expanded in c_n around 0
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
neg-mul-1N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-negN/A
/-lowering-/.f64N/A
exp-lowering-exp.f6494.3%
Simplified94.3%
Taylor expanded in s around 0
pow-lowering-pow.f6493.9%
Simplified93.9%
clear-numN/A
/-lowering-/.f64N/A
div-invN/A
inv-powN/A
pow-powN/A
neg-mul-1N/A
pow-flipN/A
pow-prod-downN/A
pow-lowering-pow.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
rec-expN/A
exp-lowering-exp.f64N/A
neg-sub0N/A
--lowering--.f64N/A
neg-sub0N/A
--lowering--.f6496.3%
Applied egg-rr96.3%
Taylor expanded in t around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f6496.1%
Simplified96.1%
Final simplification96.1%
(FPCore (c_p c_n t s) :precision binary64 (/ 1.0 (+ 1.0 (* t (* c_p 0.5)))))
double code(double c_p, double c_n, double t, double s) {
return 1.0 / (1.0 + (t * (c_p * 0.5)));
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
code = 1.0d0 / (1.0d0 + (t * (c_p * 0.5d0)))
end function
public static double code(double c_p, double c_n, double t, double s) {
return 1.0 / (1.0 + (t * (c_p * 0.5)));
}
def code(c_p, c_n, t, s): return 1.0 / (1.0 + (t * (c_p * 0.5)))
function code(c_p, c_n, t, s) return Float64(1.0 / Float64(1.0 + Float64(t * Float64(c_p * 0.5)))) end
function tmp = code(c_p, c_n, t, s) tmp = 1.0 / (1.0 + (t * (c_p * 0.5))); end
code[c$95$p_, c$95$n_, t_, s_] := N[(1.0 / N[(1.0 + N[(t * N[(c$95$p * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{1 + t \cdot \left(c\_p \cdot 0.5\right)}
\end{array}
Initial program 92.6%
Taylor expanded in c_n around 0
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
neg-mul-1N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-negN/A
/-lowering-/.f64N/A
exp-lowering-exp.f6494.3%
Simplified94.3%
Taylor expanded in s around 0
pow-lowering-pow.f6493.9%
Simplified93.9%
clear-numN/A
/-lowering-/.f64N/A
div-invN/A
inv-powN/A
pow-powN/A
neg-mul-1N/A
pow-flipN/A
pow-prod-downN/A
pow-lowering-pow.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
rec-expN/A
exp-lowering-exp.f64N/A
neg-sub0N/A
--lowering--.f64N/A
neg-sub0N/A
--lowering--.f6496.3%
Applied egg-rr96.3%
Taylor expanded in t around 0
associate-*r*N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6496.1%
Simplified96.1%
(FPCore (c_p c_n t s) :precision binary64 (+ 1.0 (* t (* c_p -0.5))))
double code(double c_p, double c_n, double t, double s) {
return 1.0 + (t * (c_p * -0.5));
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
code = 1.0d0 + (t * (c_p * (-0.5d0)))
end function
public static double code(double c_p, double c_n, double t, double s) {
return 1.0 + (t * (c_p * -0.5));
}
def code(c_p, c_n, t, s): return 1.0 + (t * (c_p * -0.5))
function code(c_p, c_n, t, s) return Float64(1.0 + Float64(t * Float64(c_p * -0.5))) end
function tmp = code(c_p, c_n, t, s) tmp = 1.0 + (t * (c_p * -0.5)); end
code[c$95$p_, c$95$n_, t_, s_] := N[(1.0 + N[(t * N[(c$95$p * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 + t \cdot \left(c\_p \cdot -0.5\right)
\end{array}
Initial program 92.6%
Taylor expanded in c_n around 0
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
neg-mul-1N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
exp-negN/A
/-lowering-/.f64N/A
exp-lowering-exp.f6494.3%
Simplified94.3%
Taylor expanded in s around 0
pow-lowering-pow.f6493.9%
Simplified93.9%
Taylor expanded in t around 0
associate-*r*N/A
+-lowering-+.f64N/A
associate-*r*N/A
*-commutativeN/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6496.1%
Simplified96.1%
(FPCore (c_p c_n t s) :precision binary64 1.0)
double code(double c_p, double c_n, double t, double s) {
return 1.0;
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
code = 1.0d0
end function
public static double code(double c_p, double c_n, double t, double s) {
return 1.0;
}
def code(c_p, c_n, t, s): return 1.0
function code(c_p, c_n, t, s) return 1.0 end
function tmp = code(c_p, c_n, t, s) tmp = 1.0; end
code[c$95$p_, c$95$n_, t_, s_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 92.6%
Taylor expanded in c_p around 0
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
sub-negN/A
+-lowering-+.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
/-lowering-/.f64N/A
neg-mul-1N/A
+-lowering-+.f64N/A
exp-lowering-exp.f64N/A
neg-mul-1N/A
neg-sub0N/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
Simplified95.5%
Taylor expanded in c_n around 0
Simplified96.0%
(FPCore (c_p c_n t s) :precision binary64 (* (pow (/ (+ 1.0 (exp (- t))) (+ 1.0 (exp (- s)))) c_p) (pow (/ (+ 1.0 (exp t)) (+ 1.0 (exp s))) c_n)))
double code(double c_p, double c_n, double t, double s) {
return pow(((1.0 + exp(-t)) / (1.0 + exp(-s))), c_p) * pow(((1.0 + exp(t)) / (1.0 + exp(s))), c_n);
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
code = (((1.0d0 + exp(-t)) / (1.0d0 + exp(-s))) ** c_p) * (((1.0d0 + exp(t)) / (1.0d0 + exp(s))) ** c_n)
end function
public static double code(double c_p, double c_n, double t, double s) {
return Math.pow(((1.0 + Math.exp(-t)) / (1.0 + Math.exp(-s))), c_p) * Math.pow(((1.0 + Math.exp(t)) / (1.0 + Math.exp(s))), c_n);
}
def code(c_p, c_n, t, s): return math.pow(((1.0 + math.exp(-t)) / (1.0 + math.exp(-s))), c_p) * math.pow(((1.0 + math.exp(t)) / (1.0 + math.exp(s))), c_n)
function code(c_p, c_n, t, s) return Float64((Float64(Float64(1.0 + exp(Float64(-t))) / Float64(1.0 + exp(Float64(-s)))) ^ c_p) * (Float64(Float64(1.0 + exp(t)) / Float64(1.0 + exp(s))) ^ c_n)) end
function tmp = code(c_p, c_n, t, s) tmp = (((1.0 + exp(-t)) / (1.0 + exp(-s))) ^ c_p) * (((1.0 + exp(t)) / (1.0 + exp(s))) ^ c_n); end
code[c$95$p_, c$95$n_, t_, s_] := N[(N[Power[N[(N[(1.0 + N[Exp[(-t)], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[Exp[(-s)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], c$95$p], $MachinePrecision] * N[Power[N[(N[(1.0 + N[Exp[t], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[Exp[s], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{\left(\frac{1 + e^{-t}}{1 + e^{-s}}\right)}^{c\_p} \cdot {\left(\frac{1 + e^{t}}{1 + e^{s}}\right)}^{c\_n}
\end{array}
herbie shell --seed 2024159
(FPCore (c_p c_n t s)
:name "Harley's example"
:precision binary64
:pre (and (< 0.0 c_p) (< 0.0 c_n))
:alt
(! :herbie-platform default (* (pow (/ (+ 1 (exp (- t))) (+ 1 (exp (- s)))) c_p) (pow (/ (+ 1 (exp t)) (+ 1 (exp s))) c_n)))
(/ (* (pow (/ 1.0 (+ 1.0 (exp (- s)))) c_p) (pow (- 1.0 (/ 1.0 (+ 1.0 (exp (- s))))) c_n)) (* (pow (/ 1.0 (+ 1.0 (exp (- t)))) c_p) (pow (- 1.0 (/ 1.0 (+ 1.0 (exp (- t))))) c_n))))