
(FPCore (a b) :precision binary64 (/ (exp a) (+ (exp a) (exp b))))
double code(double a, double b) {
return exp(a) / (exp(a) + exp(b));
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = exp(a) / (exp(a) + exp(b))
end function
public static double code(double a, double b) {
return Math.exp(a) / (Math.exp(a) + Math.exp(b));
}
def code(a, b): return math.exp(a) / (math.exp(a) + math.exp(b))
function code(a, b) return Float64(exp(a) / Float64(exp(a) + exp(b))) end
function tmp = code(a, b) tmp = exp(a) / (exp(a) + exp(b)); end
code[a_, b_] := N[(N[Exp[a], $MachinePrecision] / N[(N[Exp[a], $MachinePrecision] + N[Exp[b], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{a}}{e^{a} + e^{b}}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b) :precision binary64 (/ (exp a) (+ (exp a) (exp b))))
double code(double a, double b) {
return exp(a) / (exp(a) + exp(b));
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = exp(a) / (exp(a) + exp(b))
end function
public static double code(double a, double b) {
return Math.exp(a) / (Math.exp(a) + Math.exp(b));
}
def code(a, b): return math.exp(a) / (math.exp(a) + math.exp(b))
function code(a, b) return Float64(exp(a) / Float64(exp(a) + exp(b))) end
function tmp = code(a, b) tmp = exp(a) / (exp(a) + exp(b)); end
code[a_, b_] := N[(N[Exp[a], $MachinePrecision] / N[(N[Exp[a], $MachinePrecision] + N[Exp[b], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{a}}{e^{a} + e^{b}}
\end{array}
(FPCore (a b) :precision binary64 (/ 1.0 (+ 1.0 (exp (- b a)))))
double code(double a, double b) {
return 1.0 / (1.0 + exp((b - a)));
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = 1.0d0 / (1.0d0 + exp((b - a)))
end function
public static double code(double a, double b) {
return 1.0 / (1.0 + Math.exp((b - a)));
}
def code(a, b): return 1.0 / (1.0 + math.exp((b - a)))
function code(a, b) return Float64(1.0 / Float64(1.0 + exp(Float64(b - a)))) end
function tmp = code(a, b) tmp = 1.0 / (1.0 + exp((b - a))); end
code[a_, b_] := N[(1.0 / N[(1.0 + N[Exp[N[(b - a), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{1 + e^{b - a}}
\end{array}
Initial program 98.0%
*-lft-identity98.0%
associate-*l/98.0%
associate-/r/98.0%
remove-double-neg98.0%
unsub-neg98.0%
div-sub72.6%
*-lft-identity72.6%
associate-*l/72.6%
lft-mult-inverse98.8%
sub-neg98.8%
distribute-frac-neg98.8%
remove-double-neg98.8%
div-exp100.0%
Simplified100.0%
(FPCore (a b) :precision binary64 (if (<= a -80000000.0) (/ (exp a) a) (/ 1.0 (+ 1.0 (exp b)))))
double code(double a, double b) {
double tmp;
if (a <= -80000000.0) {
tmp = exp(a) / a;
} else {
tmp = 1.0 / (1.0 + exp(b));
}
return tmp;
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8) :: tmp
if (a <= (-80000000.0d0)) then
tmp = exp(a) / a
else
tmp = 1.0d0 / (1.0d0 + exp(b))
end if
code = tmp
end function
public static double code(double a, double b) {
double tmp;
if (a <= -80000000.0) {
tmp = Math.exp(a) / a;
} else {
tmp = 1.0 / (1.0 + Math.exp(b));
}
return tmp;
}
def code(a, b): tmp = 0 if a <= -80000000.0: tmp = math.exp(a) / a else: tmp = 1.0 / (1.0 + math.exp(b)) return tmp
function code(a, b) tmp = 0.0 if (a <= -80000000.0) tmp = Float64(exp(a) / a); else tmp = Float64(1.0 / Float64(1.0 + exp(b))); end return tmp end
function tmp_2 = code(a, b) tmp = 0.0; if (a <= -80000000.0) tmp = exp(a) / a; else tmp = 1.0 / (1.0 + exp(b)); end tmp_2 = tmp; end
code[a_, b_] := If[LessEqual[a, -80000000.0], N[(N[Exp[a], $MachinePrecision] / a), $MachinePrecision], N[(1.0 / N[(1.0 + N[Exp[b], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;a \leq -80000000:\\
\;\;\;\;\frac{e^{a}}{a}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{1 + e^{b}}\\
\end{array}
\end{array}
if a < -8e7Initial program 98.5%
Taylor expanded in b around 0 100.0%
Taylor expanded in a around 0 100.0%
+-commutative100.0%
Simplified100.0%
Taylor expanded in a around inf 100.0%
if -8e7 < a Initial program 97.9%
*-lft-identity97.9%
associate-*l/97.9%
associate-/r/97.9%
remove-double-neg97.9%
unsub-neg97.9%
div-sub97.3%
*-lft-identity97.3%
associate-*l/97.3%
lft-mult-inverse98.9%
sub-neg98.9%
distribute-frac-neg98.9%
remove-double-neg98.9%
div-exp100.0%
Simplified100.0%
Taylor expanded in a around 0 98.4%
(FPCore (a b) :precision binary64 (if (<= a -45000000.0) (/ (exp a) a) (/ 1.0 (+ 2.0 (* a (+ (* a 0.5) -1.0))))))
double code(double a, double b) {
double tmp;
if (a <= -45000000.0) {
tmp = exp(a) / a;
} else {
tmp = 1.0 / (2.0 + (a * ((a * 0.5) + -1.0)));
}
return tmp;
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8) :: tmp
if (a <= (-45000000.0d0)) then
tmp = exp(a) / a
else
tmp = 1.0d0 / (2.0d0 + (a * ((a * 0.5d0) + (-1.0d0))))
end if
code = tmp
end function
public static double code(double a, double b) {
double tmp;
if (a <= -45000000.0) {
tmp = Math.exp(a) / a;
} else {
tmp = 1.0 / (2.0 + (a * ((a * 0.5) + -1.0)));
}
return tmp;
}
def code(a, b): tmp = 0 if a <= -45000000.0: tmp = math.exp(a) / a else: tmp = 1.0 / (2.0 + (a * ((a * 0.5) + -1.0))) return tmp
function code(a, b) tmp = 0.0 if (a <= -45000000.0) tmp = Float64(exp(a) / a); else tmp = Float64(1.0 / Float64(2.0 + Float64(a * Float64(Float64(a * 0.5) + -1.0)))); end return tmp end
function tmp_2 = code(a, b) tmp = 0.0; if (a <= -45000000.0) tmp = exp(a) / a; else tmp = 1.0 / (2.0 + (a * ((a * 0.5) + -1.0))); end tmp_2 = tmp; end
code[a_, b_] := If[LessEqual[a, -45000000.0], N[(N[Exp[a], $MachinePrecision] / a), $MachinePrecision], N[(1.0 / N[(2.0 + N[(a * N[(N[(a * 0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;a \leq -45000000:\\
\;\;\;\;\frac{e^{a}}{a}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{2 + a \cdot \left(a \cdot 0.5 + -1\right)}\\
\end{array}
\end{array}
if a < -4.5e7Initial program 98.5%
Taylor expanded in b around 0 100.0%
Taylor expanded in a around 0 100.0%
+-commutative100.0%
Simplified100.0%
Taylor expanded in a around inf 100.0%
if -4.5e7 < a Initial program 97.9%
*-lft-identity97.9%
associate-*l/97.9%
associate-/r/97.9%
remove-double-neg97.9%
unsub-neg97.9%
div-sub97.9%
*-lft-identity97.9%
associate-*l/97.9%
lft-mult-inverse98.9%
sub-neg98.9%
distribute-frac-neg98.9%
remove-double-neg98.9%
div-exp100.0%
Simplified100.0%
Taylor expanded in b around 0 57.3%
Taylor expanded in a around 0 56.0%
Final simplification67.3%
(FPCore (a b) :precision binary64 (/ 1.0 (+ 2.0 (* a (+ (* a (+ 0.5 (* a -0.16666666666666666))) -1.0)))))
double code(double a, double b) {
return 1.0 / (2.0 + (a * ((a * (0.5 + (a * -0.16666666666666666))) + -1.0)));
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = 1.0d0 / (2.0d0 + (a * ((a * (0.5d0 + (a * (-0.16666666666666666d0)))) + (-1.0d0))))
end function
public static double code(double a, double b) {
return 1.0 / (2.0 + (a * ((a * (0.5 + (a * -0.16666666666666666))) + -1.0)));
}
def code(a, b): return 1.0 / (2.0 + (a * ((a * (0.5 + (a * -0.16666666666666666))) + -1.0)))
function code(a, b) return Float64(1.0 / Float64(2.0 + Float64(a * Float64(Float64(a * Float64(0.5 + Float64(a * -0.16666666666666666))) + -1.0)))) end
function tmp = code(a, b) tmp = 1.0 / (2.0 + (a * ((a * (0.5 + (a * -0.16666666666666666))) + -1.0))); end
code[a_, b_] := N[(1.0 / N[(2.0 + N[(a * N[(N[(a * N[(0.5 + N[(a * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{2 + a \cdot \left(a \cdot \left(0.5 + a \cdot -0.16666666666666666\right) + -1\right)}
\end{array}
Initial program 98.0%
*-lft-identity98.0%
associate-*l/98.0%
associate-/r/98.0%
remove-double-neg98.0%
unsub-neg98.0%
div-sub72.6%
*-lft-identity72.6%
associate-*l/72.6%
lft-mult-inverse98.8%
sub-neg98.8%
distribute-frac-neg98.8%
remove-double-neg98.8%
div-exp100.0%
Simplified100.0%
Taylor expanded in b around 0 68.3%
Taylor expanded in a around 0 60.2%
Final simplification60.2%
(FPCore (a b) :precision binary64 (/ 1.0 (+ 2.0 (* a (+ (* a 0.5) -1.0)))))
double code(double a, double b) {
return 1.0 / (2.0 + (a * ((a * 0.5) + -1.0)));
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = 1.0d0 / (2.0d0 + (a * ((a * 0.5d0) + (-1.0d0))))
end function
public static double code(double a, double b) {
return 1.0 / (2.0 + (a * ((a * 0.5) + -1.0)));
}
def code(a, b): return 1.0 / (2.0 + (a * ((a * 0.5) + -1.0)))
function code(a, b) return Float64(1.0 / Float64(2.0 + Float64(a * Float64(Float64(a * 0.5) + -1.0)))) end
function tmp = code(a, b) tmp = 1.0 / (2.0 + (a * ((a * 0.5) + -1.0))); end
code[a_, b_] := N[(1.0 / N[(2.0 + N[(a * N[(N[(a * 0.5), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{2 + a \cdot \left(a \cdot 0.5 + -1\right)}
\end{array}
Initial program 98.0%
*-lft-identity98.0%
associate-*l/98.0%
associate-/r/98.0%
remove-double-neg98.0%
unsub-neg98.0%
div-sub72.6%
*-lft-identity72.6%
associate-*l/72.6%
lft-mult-inverse98.8%
sub-neg98.8%
distribute-frac-neg98.8%
remove-double-neg98.8%
div-exp100.0%
Simplified100.0%
Taylor expanded in b around 0 68.3%
Taylor expanded in a around 0 56.3%
Final simplification56.3%
(FPCore (a b) :precision binary64 (if (<= a -70000000.0) (/ (/ -2.0 a) a) (/ 1.0 (- 2.0 a))))
double code(double a, double b) {
double tmp;
if (a <= -70000000.0) {
tmp = (-2.0 / a) / a;
} else {
tmp = 1.0 / (2.0 - a);
}
return tmp;
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8) :: tmp
if (a <= (-70000000.0d0)) then
tmp = ((-2.0d0) / a) / a
else
tmp = 1.0d0 / (2.0d0 - a)
end if
code = tmp
end function
public static double code(double a, double b) {
double tmp;
if (a <= -70000000.0) {
tmp = (-2.0 / a) / a;
} else {
tmp = 1.0 / (2.0 - a);
}
return tmp;
}
def code(a, b): tmp = 0 if a <= -70000000.0: tmp = (-2.0 / a) / a else: tmp = 1.0 / (2.0 - a) return tmp
function code(a, b) tmp = 0.0 if (a <= -70000000.0) tmp = Float64(Float64(-2.0 / a) / a); else tmp = Float64(1.0 / Float64(2.0 - a)); end return tmp end
function tmp_2 = code(a, b) tmp = 0.0; if (a <= -70000000.0) tmp = (-2.0 / a) / a; else tmp = 1.0 / (2.0 - a); end tmp_2 = tmp; end
code[a_, b_] := If[LessEqual[a, -70000000.0], N[(N[(-2.0 / a), $MachinePrecision] / a), $MachinePrecision], N[(1.0 / N[(2.0 - a), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;a \leq -70000000:\\
\;\;\;\;\frac{\frac{-2}{a}}{a}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{2 - a}\\
\end{array}
\end{array}
if a < -7e7Initial program 98.5%
*-lft-identity98.5%
associate-*l/98.5%
associate-/r/98.5%
remove-double-neg98.5%
unsub-neg98.5%
div-sub0.0%
*-lft-identity0.0%
associate-*l/0.0%
lft-mult-inverse98.5%
sub-neg98.5%
distribute-frac-neg98.5%
remove-double-neg98.5%
div-exp100.0%
Simplified100.0%
Taylor expanded in b around 0 100.0%
Taylor expanded in a around 0 5.6%
neg-mul-15.6%
unsub-neg5.6%
Simplified5.6%
Taylor expanded in a around inf 5.6%
associate-*r/5.6%
distribute-lft-in5.6%
metadata-eval5.6%
associate-*r/5.6%
metadata-eval5.6%
associate-*r/5.6%
metadata-eval5.6%
Simplified5.6%
Taylor expanded in a around 0 56.2%
if -7e7 < a Initial program 97.9%
*-lft-identity97.9%
associate-*l/97.9%
associate-/r/97.9%
remove-double-neg97.9%
unsub-neg97.9%
div-sub97.9%
*-lft-identity97.9%
associate-*l/97.9%
lft-mult-inverse98.9%
sub-neg98.9%
distribute-frac-neg98.9%
remove-double-neg98.9%
div-exp100.0%
Simplified100.0%
Taylor expanded in b around 0 57.3%
Taylor expanded in a around 0 55.9%
neg-mul-155.9%
unsub-neg55.9%
Simplified55.9%
(FPCore (a b) :precision binary64 (/ 1.0 (- 2.0 a)))
double code(double a, double b) {
return 1.0 / (2.0 - a);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = 1.0d0 / (2.0d0 - a)
end function
public static double code(double a, double b) {
return 1.0 / (2.0 - a);
}
def code(a, b): return 1.0 / (2.0 - a)
function code(a, b) return Float64(1.0 / Float64(2.0 - a)) end
function tmp = code(a, b) tmp = 1.0 / (2.0 - a); end
code[a_, b_] := N[(1.0 / N[(2.0 - a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{2 - a}
\end{array}
Initial program 98.0%
*-lft-identity98.0%
associate-*l/98.0%
associate-/r/98.0%
remove-double-neg98.0%
unsub-neg98.0%
div-sub72.6%
*-lft-identity72.6%
associate-*l/72.6%
lft-mult-inverse98.8%
sub-neg98.8%
distribute-frac-neg98.8%
remove-double-neg98.8%
div-exp100.0%
Simplified100.0%
Taylor expanded in b around 0 68.3%
Taylor expanded in a around 0 42.9%
neg-mul-142.9%
unsub-neg42.9%
Simplified42.9%
(FPCore (a b) :precision binary64 (+ 0.5 (* a 0.25)))
double code(double a, double b) {
return 0.5 + (a * 0.25);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = 0.5d0 + (a * 0.25d0)
end function
public static double code(double a, double b) {
return 0.5 + (a * 0.25);
}
def code(a, b): return 0.5 + (a * 0.25)
function code(a, b) return Float64(0.5 + Float64(a * 0.25)) end
function tmp = code(a, b) tmp = 0.5 + (a * 0.25); end
code[a_, b_] := N[(0.5 + N[(a * 0.25), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 + a \cdot 0.25
\end{array}
Initial program 98.0%
*-lft-identity98.0%
associate-*l/98.0%
associate-/r/98.0%
remove-double-neg98.0%
unsub-neg98.0%
div-sub72.6%
*-lft-identity72.6%
associate-*l/72.6%
lft-mult-inverse98.8%
sub-neg98.8%
distribute-frac-neg98.8%
remove-double-neg98.8%
div-exp100.0%
Simplified100.0%
Taylor expanded in b around 0 68.3%
Taylor expanded in a around 0 42.0%
*-commutative42.0%
Simplified42.0%
(FPCore (a b) :precision binary64 0.5)
double code(double a, double b) {
return 0.5;
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = 0.5d0
end function
public static double code(double a, double b) {
return 0.5;
}
def code(a, b): return 0.5
function code(a, b) return 0.5 end
function tmp = code(a, b) tmp = 0.5; end
code[a_, b_] := 0.5
\begin{array}{l}
\\
0.5
\end{array}
Initial program 98.0%
*-lft-identity98.0%
associate-*l/98.0%
associate-/r/98.0%
remove-double-neg98.0%
unsub-neg98.0%
div-sub72.6%
*-lft-identity72.6%
associate-*l/72.6%
lft-mult-inverse98.8%
sub-neg98.8%
distribute-frac-neg98.8%
remove-double-neg98.8%
div-exp100.0%
Simplified100.0%
Taylor expanded in a around 0 82.2%
Taylor expanded in b around 0 42.0%
(FPCore (a b) :precision binary64 (/ 1.0 (+ 1.0 (exp (- b a)))))
double code(double a, double b) {
return 1.0 / (1.0 + exp((b - a)));
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = 1.0d0 / (1.0d0 + exp((b - a)))
end function
public static double code(double a, double b) {
return 1.0 / (1.0 + Math.exp((b - a)));
}
def code(a, b): return 1.0 / (1.0 + math.exp((b - a)))
function code(a, b) return Float64(1.0 / Float64(1.0 + exp(Float64(b - a)))) end
function tmp = code(a, b) tmp = 1.0 / (1.0 + exp((b - a))); end
code[a_, b_] := N[(1.0 / N[(1.0 + N[Exp[N[(b - a), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{1 + e^{b - a}}
\end{array}
herbie shell --seed 2024094
(FPCore (a b)
:name "Quotient of sum of exps"
:precision binary64
:alt
(/ 1.0 (+ 1.0 (exp (- b a))))
(/ (exp a) (+ (exp a) (exp b))))