
(FPCore (c_p c_n t s)
:precision binary64
(let* ((t_1 (/ 1.0 (+ 1.0 (exp (- t))))) (t_2 (/ 1.0 (+ 1.0 (exp (- s))))))
(/
(* (pow t_2 c_p) (pow (- 1.0 t_2) c_n))
(* (pow t_1 c_p) (pow (- 1.0 t_1) c_n)))))
double code(double c_p, double c_n, double t, double s) {
double t_1 = 1.0 / (1.0 + exp(-t));
double t_2 = 1.0 / (1.0 + exp(-s));
return (pow(t_2, c_p) * pow((1.0 - t_2), c_n)) / (pow(t_1, c_p) * pow((1.0 - t_1), c_n));
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
real(8) :: t_1
real(8) :: t_2
t_1 = 1.0d0 / (1.0d0 + exp(-t))
t_2 = 1.0d0 / (1.0d0 + exp(-s))
code = ((t_2 ** c_p) * ((1.0d0 - t_2) ** c_n)) / ((t_1 ** c_p) * ((1.0d0 - t_1) ** c_n))
end function
public static double code(double c_p, double c_n, double t, double s) {
double t_1 = 1.0 / (1.0 + Math.exp(-t));
double t_2 = 1.0 / (1.0 + Math.exp(-s));
return (Math.pow(t_2, c_p) * Math.pow((1.0 - t_2), c_n)) / (Math.pow(t_1, c_p) * Math.pow((1.0 - t_1), c_n));
}
def code(c_p, c_n, t, s): t_1 = 1.0 / (1.0 + math.exp(-t)) t_2 = 1.0 / (1.0 + math.exp(-s)) return (math.pow(t_2, c_p) * math.pow((1.0 - t_2), c_n)) / (math.pow(t_1, c_p) * math.pow((1.0 - t_1), c_n))
function code(c_p, c_n, t, s) t_1 = Float64(1.0 / Float64(1.0 + exp(Float64(-t)))) t_2 = Float64(1.0 / Float64(1.0 + exp(Float64(-s)))) return Float64(Float64((t_2 ^ c_p) * (Float64(1.0 - t_2) ^ c_n)) / Float64((t_1 ^ c_p) * (Float64(1.0 - t_1) ^ c_n))) end
function tmp = code(c_p, c_n, t, s) t_1 = 1.0 / (1.0 + exp(-t)); t_2 = 1.0 / (1.0 + exp(-s)); tmp = ((t_2 ^ c_p) * ((1.0 - t_2) ^ c_n)) / ((t_1 ^ c_p) * ((1.0 - t_1) ^ c_n)); end
code[c$95$p_, c$95$n_, t_, s_] := Block[{t$95$1 = N[(1.0 / N[(1.0 + N[Exp[(-t)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(1.0 / N[(1.0 + N[Exp[(-s)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[Power[t$95$2, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$2), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision] / N[(N[Power[t$95$1, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$1), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \frac{1}{1 + e^{-t}}\\
t_2 := \frac{1}{1 + e^{-s}}\\
\frac{{t\_2}^{c\_p} \cdot {\left(1 - t\_2\right)}^{c\_n}}{{t\_1}^{c\_p} \cdot {\left(1 - t\_1\right)}^{c\_n}}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (c_p c_n t s)
:precision binary64
(let* ((t_1 (/ 1.0 (+ 1.0 (exp (- t))))) (t_2 (/ 1.0 (+ 1.0 (exp (- s))))))
(/
(* (pow t_2 c_p) (pow (- 1.0 t_2) c_n))
(* (pow t_1 c_p) (pow (- 1.0 t_1) c_n)))))
double code(double c_p, double c_n, double t, double s) {
double t_1 = 1.0 / (1.0 + exp(-t));
double t_2 = 1.0 / (1.0 + exp(-s));
return (pow(t_2, c_p) * pow((1.0 - t_2), c_n)) / (pow(t_1, c_p) * pow((1.0 - t_1), c_n));
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
real(8) :: t_1
real(8) :: t_2
t_1 = 1.0d0 / (1.0d0 + exp(-t))
t_2 = 1.0d0 / (1.0d0 + exp(-s))
code = ((t_2 ** c_p) * ((1.0d0 - t_2) ** c_n)) / ((t_1 ** c_p) * ((1.0d0 - t_1) ** c_n))
end function
public static double code(double c_p, double c_n, double t, double s) {
double t_1 = 1.0 / (1.0 + Math.exp(-t));
double t_2 = 1.0 / (1.0 + Math.exp(-s));
return (Math.pow(t_2, c_p) * Math.pow((1.0 - t_2), c_n)) / (Math.pow(t_1, c_p) * Math.pow((1.0 - t_1), c_n));
}
def code(c_p, c_n, t, s): t_1 = 1.0 / (1.0 + math.exp(-t)) t_2 = 1.0 / (1.0 + math.exp(-s)) return (math.pow(t_2, c_p) * math.pow((1.0 - t_2), c_n)) / (math.pow(t_1, c_p) * math.pow((1.0 - t_1), c_n))
function code(c_p, c_n, t, s) t_1 = Float64(1.0 / Float64(1.0 + exp(Float64(-t)))) t_2 = Float64(1.0 / Float64(1.0 + exp(Float64(-s)))) return Float64(Float64((t_2 ^ c_p) * (Float64(1.0 - t_2) ^ c_n)) / Float64((t_1 ^ c_p) * (Float64(1.0 - t_1) ^ c_n))) end
function tmp = code(c_p, c_n, t, s) t_1 = 1.0 / (1.0 + exp(-t)); t_2 = 1.0 / (1.0 + exp(-s)); tmp = ((t_2 ^ c_p) * ((1.0 - t_2) ^ c_n)) / ((t_1 ^ c_p) * ((1.0 - t_1) ^ c_n)); end
code[c$95$p_, c$95$n_, t_, s_] := Block[{t$95$1 = N[(1.0 / N[(1.0 + N[Exp[(-t)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(1.0 / N[(1.0 + N[Exp[(-s)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[Power[t$95$2, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$2), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision] / N[(N[Power[t$95$1, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$1), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \frac{1}{1 + e^{-t}}\\
t_2 := \frac{1}{1 + e^{-s}}\\
\frac{{t\_2}^{c\_p} \cdot {\left(1 - t\_2\right)}^{c\_n}}{{t\_1}^{c\_p} \cdot {\left(1 - t\_1\right)}^{c\_n}}
\end{array}
\end{array}
(FPCore (c_p c_n t s)
:precision binary64
(let* ((t_1 (exp (- s))) (t_2 (exp (- t))) (t_3 (/ -1.0 (+ t_2 1.0))))
(if (<= (- t) 5e-162)
(exp
(fma
c_p
(- (log1p t_2) (log1p t_1))
(* c_n (- (log1p (/ 1.0 (- -1.0 t_1))) (log1p t_3)))))
(exp (* (log (/ (+ 1.0 t_3) (+ 1.0 (/ -1.0 (+ t_1 1.0))))) (- c_n))))))
double code(double c_p, double c_n, double t, double s) {
double t_1 = exp(-s);
double t_2 = exp(-t);
double t_3 = -1.0 / (t_2 + 1.0);
double tmp;
if (-t <= 5e-162) {
tmp = exp(fma(c_p, (log1p(t_2) - log1p(t_1)), (c_n * (log1p((1.0 / (-1.0 - t_1))) - log1p(t_3)))));
} else {
tmp = exp((log(((1.0 + t_3) / (1.0 + (-1.0 / (t_1 + 1.0))))) * -c_n));
}
return tmp;
}
function code(c_p, c_n, t, s) t_1 = exp(Float64(-s)) t_2 = exp(Float64(-t)) t_3 = Float64(-1.0 / Float64(t_2 + 1.0)) tmp = 0.0 if (Float64(-t) <= 5e-162) tmp = exp(fma(c_p, Float64(log1p(t_2) - log1p(t_1)), Float64(c_n * Float64(log1p(Float64(1.0 / Float64(-1.0 - t_1))) - log1p(t_3))))); else tmp = exp(Float64(log(Float64(Float64(1.0 + t_3) / Float64(1.0 + Float64(-1.0 / Float64(t_1 + 1.0))))) * Float64(-c_n))); end return tmp end
code[c$95$p_, c$95$n_, t_, s_] := Block[{t$95$1 = N[Exp[(-s)], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-t)], $MachinePrecision]}, Block[{t$95$3 = N[(-1.0 / N[(t$95$2 + 1.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[(-t), 5e-162], N[Exp[N[(c$95$p * N[(N[Log[1 + t$95$2], $MachinePrecision] - N[Log[1 + t$95$1], $MachinePrecision]), $MachinePrecision] + N[(c$95$n * N[(N[Log[1 + N[(1.0 / N[(-1.0 - t$95$1), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[Log[1 + t$95$3], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Exp[N[(N[Log[N[(N[(1.0 + t$95$3), $MachinePrecision] / N[(1.0 + N[(-1.0 / N[(t$95$1 + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * (-c$95$n)), $MachinePrecision]], $MachinePrecision]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := e^{-s}\\
t_2 := e^{-t}\\
t_3 := \frac{-1}{t\_2 + 1}\\
\mathbf{if}\;-t \leq 5 \cdot 10^{-162}:\\
\;\;\;\;e^{\mathsf{fma}\left(c\_p, \mathsf{log1p}\left(t\_2\right) - \mathsf{log1p}\left(t\_1\right), c\_n \cdot \left(\mathsf{log1p}\left(\frac{1}{-1 - t\_1}\right) - \mathsf{log1p}\left(t\_3\right)\right)\right)}\\
\mathbf{else}:\\
\;\;\;\;e^{\log \left(\frac{1 + t\_3}{1 + \frac{-1}{t\_1 + 1}}\right) \cdot \left(-c\_n\right)}\\
\end{array}
\end{array}
if (neg.f64 t) < 5.00000000000000014e-162Initial program 94.1%
Applied rewrites100.0%
if 5.00000000000000014e-162 < (neg.f64 t) Initial program 82.6%
Applied rewrites91.0%
Taylor expanded in c_p around 0
distribute-lft-out--N/A
lower-*.f64N/A
lower--.f64N/A
Applied rewrites99.5%
lift-neg.f64N/A
lift-exp.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-neg.f64N/A
lift-exp.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-+.f64N/A
diff-logN/A
Applied rewrites99.6%
Final simplification99.9%
(FPCore (c_p c_n t s)
:precision binary64
(if (<= c_p 5e-92)
(exp
(*
(log
(/
(+ 1.0 (/ -1.0 (+ (exp (- t)) 1.0)))
(+ 1.0 (/ -1.0 (+ (exp (- s)) 1.0)))))
(- c_n)))
(pow (fma (* s 0.5) (fma s 0.5 -1.0) 1.0) (- c_p))))
double code(double c_p, double c_n, double t, double s) {
double tmp;
if (c_p <= 5e-92) {
tmp = exp((log(((1.0 + (-1.0 / (exp(-t) + 1.0))) / (1.0 + (-1.0 / (exp(-s) + 1.0))))) * -c_n));
} else {
tmp = pow(fma((s * 0.5), fma(s, 0.5, -1.0), 1.0), -c_p);
}
return tmp;
}
function code(c_p, c_n, t, s) tmp = 0.0 if (c_p <= 5e-92) tmp = exp(Float64(log(Float64(Float64(1.0 + Float64(-1.0 / Float64(exp(Float64(-t)) + 1.0))) / Float64(1.0 + Float64(-1.0 / Float64(exp(Float64(-s)) + 1.0))))) * Float64(-c_n))); else tmp = fma(Float64(s * 0.5), fma(s, 0.5, -1.0), 1.0) ^ Float64(-c_p); end return tmp end
code[c$95$p_, c$95$n_, t_, s_] := If[LessEqual[c$95$p, 5e-92], N[Exp[N[(N[Log[N[(N[(1.0 + N[(-1.0 / N[(N[Exp[(-t)], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(-1.0 / N[(N[Exp[(-s)], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * (-c$95$n)), $MachinePrecision]], $MachinePrecision], N[Power[N[(N[(s * 0.5), $MachinePrecision] * N[(s * 0.5 + -1.0), $MachinePrecision] + 1.0), $MachinePrecision], (-c$95$p)], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;c\_p \leq 5 \cdot 10^{-92}:\\
\;\;\;\;e^{\log \left(\frac{1 + \frac{-1}{e^{-t} + 1}}{1 + \frac{-1}{e^{-s} + 1}}\right) \cdot \left(-c\_n\right)}\\
\mathbf{else}:\\
\;\;\;\;{\left(\mathsf{fma}\left(s \cdot 0.5, \mathsf{fma}\left(s, 0.5, -1\right), 1\right)\right)}^{\left(-c\_p\right)}\\
\end{array}
\end{array}
if c_p < 5.00000000000000011e-92Initial program 91.5%
Applied rewrites96.2%
Taylor expanded in c_p around 0
distribute-lft-out--N/A
lower-*.f64N/A
lower--.f64N/A
Applied rewrites99.8%
lift-neg.f64N/A
lift-exp.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-neg.f64N/A
lift-exp.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-+.f64N/A
diff-logN/A
Applied rewrites99.8%
if 5.00000000000000011e-92 < c_p Initial program 90.2%
Taylor expanded in c_n around 0
lower-/.f64N/A
lower-pow.f64N/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-pow.f64N/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f6490.2
Applied rewrites90.2%
Taylor expanded in s around 0
+-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f6490.2
Applied rewrites90.2%
lift-fma.f64N/A
lift-fma.f64N/A
lift-/.f64N/A
*-lft-identityN/A
unpow-prod-downN/A
pow-base-1N/A
lift-pow.f64N/A
lift-neg.f64N/A
lift-exp.f64N/A
lift-+.f64N/A
lift-/.f64N/A
rem-exp-logN/A
rem-exp-logN/A
*-lft-identityN/A
unpow-prod-downN/A
Applied rewrites100.0%
Taylor expanded in t around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
*-commutativeN/A
lower-fma.f64100.0
Applied rewrites100.0%
Final simplification99.9%
(FPCore (c_p c_n t s)
:precision binary64
(if (<= c_p 5e-92)
(pow
(/
(+ 1.0 (/ -1.0 (+ (exp (- s)) 1.0)))
(+ 1.0 (/ -1.0 (+ (exp (- t)) 1.0))))
c_n)
(pow (fma (* s 0.5) (fma s 0.5 -1.0) 1.0) (- c_p))))
double code(double c_p, double c_n, double t, double s) {
double tmp;
if (c_p <= 5e-92) {
tmp = pow(((1.0 + (-1.0 / (exp(-s) + 1.0))) / (1.0 + (-1.0 / (exp(-t) + 1.0)))), c_n);
} else {
tmp = pow(fma((s * 0.5), fma(s, 0.5, -1.0), 1.0), -c_p);
}
return tmp;
}
function code(c_p, c_n, t, s) tmp = 0.0 if (c_p <= 5e-92) tmp = Float64(Float64(1.0 + Float64(-1.0 / Float64(exp(Float64(-s)) + 1.0))) / Float64(1.0 + Float64(-1.0 / Float64(exp(Float64(-t)) + 1.0)))) ^ c_n; else tmp = fma(Float64(s * 0.5), fma(s, 0.5, -1.0), 1.0) ^ Float64(-c_p); end return tmp end
code[c$95$p_, c$95$n_, t_, s_] := If[LessEqual[c$95$p, 5e-92], N[Power[N[(N[(1.0 + N[(-1.0 / N[(N[Exp[(-s)], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(-1.0 / N[(N[Exp[(-t)], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], c$95$n], $MachinePrecision], N[Power[N[(N[(s * 0.5), $MachinePrecision] * N[(s * 0.5 + -1.0), $MachinePrecision] + 1.0), $MachinePrecision], (-c$95$p)], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;c\_p \leq 5 \cdot 10^{-92}:\\
\;\;\;\;{\left(\frac{1 + \frac{-1}{e^{-s} + 1}}{1 + \frac{-1}{e^{-t} + 1}}\right)}^{c\_n}\\
\mathbf{else}:\\
\;\;\;\;{\left(\mathsf{fma}\left(s \cdot 0.5, \mathsf{fma}\left(s, 0.5, -1\right), 1\right)\right)}^{\left(-c\_p\right)}\\
\end{array}
\end{array}
if c_p < 5.00000000000000011e-92Initial program 91.5%
Applied rewrites96.2%
Taylor expanded in c_p around 0
distribute-lft-out--N/A
lower-*.f64N/A
lower--.f64N/A
Applied rewrites99.8%
Applied rewrites99.8%
if 5.00000000000000011e-92 < c_p Initial program 90.2%
Taylor expanded in c_n around 0
lower-/.f64N/A
lower-pow.f64N/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-pow.f64N/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f6490.2
Applied rewrites90.2%
Taylor expanded in s around 0
+-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f6490.2
Applied rewrites90.2%
lift-fma.f64N/A
lift-fma.f64N/A
lift-/.f64N/A
*-lft-identityN/A
unpow-prod-downN/A
pow-base-1N/A
lift-pow.f64N/A
lift-neg.f64N/A
lift-exp.f64N/A
lift-+.f64N/A
lift-/.f64N/A
rem-exp-logN/A
rem-exp-logN/A
*-lft-identityN/A
unpow-prod-downN/A
Applied rewrites100.0%
Taylor expanded in t around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
*-commutativeN/A
lower-fma.f64100.0
Applied rewrites100.0%
Final simplification99.9%
(FPCore (c_p c_n t s) :precision binary64 (if (<= (- t) 0.0005) (pow (fma (* s 0.5) (fma s 0.5 -1.0) 1.0) (- c_p)) (pow 0.5 c_n)))
double code(double c_p, double c_n, double t, double s) {
double tmp;
if (-t <= 0.0005) {
tmp = pow(fma((s * 0.5), fma(s, 0.5, -1.0), 1.0), -c_p);
} else {
tmp = pow(0.5, c_n);
}
return tmp;
}
function code(c_p, c_n, t, s) tmp = 0.0 if (Float64(-t) <= 0.0005) tmp = fma(Float64(s * 0.5), fma(s, 0.5, -1.0), 1.0) ^ Float64(-c_p); else tmp = 0.5 ^ c_n; end return tmp end
code[c$95$p_, c$95$n_, t_, s_] := If[LessEqual[(-t), 0.0005], N[Power[N[(N[(s * 0.5), $MachinePrecision] * N[(s * 0.5 + -1.0), $MachinePrecision] + 1.0), $MachinePrecision], (-c$95$p)], $MachinePrecision], N[Power[0.5, c$95$n], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;-t \leq 0.0005:\\
\;\;\;\;{\left(\mathsf{fma}\left(s \cdot 0.5, \mathsf{fma}\left(s, 0.5, -1\right), 1\right)\right)}^{\left(-c\_p\right)}\\
\mathbf{else}:\\
\;\;\;\;{0.5}^{c\_n}\\
\end{array}
\end{array}
if (neg.f64 t) < 5.0000000000000001e-4Initial program 93.1%
Taylor expanded in c_n around 0
lower-/.f64N/A
lower-pow.f64N/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-pow.f64N/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f6495.5
Applied rewrites95.5%
Taylor expanded in s around 0
+-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f6495.9
Applied rewrites95.9%
lift-fma.f64N/A
lift-fma.f64N/A
lift-/.f64N/A
*-lft-identityN/A
unpow-prod-downN/A
pow-base-1N/A
lift-pow.f64N/A
lift-neg.f64N/A
lift-exp.f64N/A
lift-+.f64N/A
lift-/.f64N/A
rem-exp-logN/A
rem-exp-logN/A
*-lft-identityN/A
unpow-prod-downN/A
Applied rewrites99.5%
Taylor expanded in t around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
*-commutativeN/A
lower-fma.f6499.5
Applied rewrites99.5%
if 5.0000000000000001e-4 < (neg.f64 t) Initial program 45.5%
Taylor expanded in c_p around 0
lower-/.f64N/A
lower-pow.f64N/A
sub-negN/A
lower-+.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-pow.f64N/A
Applied rewrites100.0%
Taylor expanded in s around 0
lower-/.f64N/A
lower-pow.f64N/A
lower-pow.f64N/A
sub-negN/A
lower-+.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64100.0
Applied rewrites100.0%
Taylor expanded in c_n around 0
Applied rewrites100.0%
lift-pow.f64N/A
/-rgt-identity100.0
Applied rewrites100.0%
(FPCore (c_p c_n t s) :precision binary64 (if (<= (- s) 0.02) (fma s (fma s (* c_n (fma c_n 0.125 -0.125)) (* c_n -0.5)) 1.0) (pow (fma s (fma s 0.5 -1.0) 2.0) (- c_p))))
double code(double c_p, double c_n, double t, double s) {
double tmp;
if (-s <= 0.02) {
tmp = fma(s, fma(s, (c_n * fma(c_n, 0.125, -0.125)), (c_n * -0.5)), 1.0);
} else {
tmp = pow(fma(s, fma(s, 0.5, -1.0), 2.0), -c_p);
}
return tmp;
}
function code(c_p, c_n, t, s) tmp = 0.0 if (Float64(-s) <= 0.02) tmp = fma(s, fma(s, Float64(c_n * fma(c_n, 0.125, -0.125)), Float64(c_n * -0.5)), 1.0); else tmp = fma(s, fma(s, 0.5, -1.0), 2.0) ^ Float64(-c_p); end return tmp end
code[c$95$p_, c$95$n_, t_, s_] := If[LessEqual[(-s), 0.02], N[(s * N[(s * N[(c$95$n * N[(c$95$n * 0.125 + -0.125), $MachinePrecision]), $MachinePrecision] + N[(c$95$n * -0.5), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision], N[Power[N[(s * N[(s * 0.5 + -1.0), $MachinePrecision] + 2.0), $MachinePrecision], (-c$95$p)], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;-s \leq 0.02:\\
\;\;\;\;\mathsf{fma}\left(s, \mathsf{fma}\left(s, c\_n \cdot \mathsf{fma}\left(c\_n, 0.125, -0.125\right), c\_n \cdot -0.5\right), 1\right)\\
\mathbf{else}:\\
\;\;\;\;{\left(\mathsf{fma}\left(s, \mathsf{fma}\left(s, 0.5, -1\right), 2\right)\right)}^{\left(-c\_p\right)}\\
\end{array}
\end{array}
if (neg.f64 s) < 0.0200000000000000004Initial program 91.5%
Taylor expanded in c_p around 0
lower-/.f64N/A
lower-pow.f64N/A
sub-negN/A
lower-+.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-pow.f64N/A
Applied rewrites96.3%
Taylor expanded in t around 0
lower-/.f64N/A
lower-pow.f64N/A
sub-negN/A
lower-+.f64N/A
neg-mul-1N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
neg-mul-1N/A
lower-neg.f64N/A
lower-pow.f6493.9
Applied rewrites93.9%
Taylor expanded in s around 0
+-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
distribute-lft-outN/A
lower-*.f64N/A
lower-fma.f64N/A
lower-*.f6496.4
Applied rewrites96.4%
if 0.0200000000000000004 < (neg.f64 s) Initial program 77.8%
Taylor expanded in c_n around 0
lower-/.f64N/A
lower-pow.f64N/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-pow.f64N/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f6477.8
Applied rewrites77.8%
Taylor expanded in s around 0
+-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f6477.8
Applied rewrites77.8%
Taylor expanded in c_p around 0
Applied rewrites100.0%
lift-fma.f64N/A
lift-fma.f64N/A
lift-/.f64N/A
lift-pow.f64N/A
/-rgt-identity100.0
lift-pow.f64N/A
lift-/.f64N/A
inv-powN/A
pow-powN/A
neg-mul-1N/A
lift-neg.f64N/A
lower-pow.f64100.0
Applied rewrites100.0%
Final simplification96.5%
(FPCore (c_p c_n t s) :precision binary64 (if (<= (- t) 0.0005) (fma s (fma s (* c_n (fma c_n 0.125 -0.125)) (* c_n -0.5)) 1.0) (pow 0.5 c_n)))
double code(double c_p, double c_n, double t, double s) {
double tmp;
if (-t <= 0.0005) {
tmp = fma(s, fma(s, (c_n * fma(c_n, 0.125, -0.125)), (c_n * -0.5)), 1.0);
} else {
tmp = pow(0.5, c_n);
}
return tmp;
}
function code(c_p, c_n, t, s) tmp = 0.0 if (Float64(-t) <= 0.0005) tmp = fma(s, fma(s, Float64(c_n * fma(c_n, 0.125, -0.125)), Float64(c_n * -0.5)), 1.0); else tmp = 0.5 ^ c_n; end return tmp end
code[c$95$p_, c$95$n_, t_, s_] := If[LessEqual[(-t), 0.0005], N[(s * N[(s * N[(c$95$n * N[(c$95$n * 0.125 + -0.125), $MachinePrecision]), $MachinePrecision] + N[(c$95$n * -0.5), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision], N[Power[0.5, c$95$n], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;-t \leq 0.0005:\\
\;\;\;\;\mathsf{fma}\left(s, \mathsf{fma}\left(s, c\_n \cdot \mathsf{fma}\left(c\_n, 0.125, -0.125\right), c\_n \cdot -0.5\right), 1\right)\\
\mathbf{else}:\\
\;\;\;\;{0.5}^{c\_n}\\
\end{array}
\end{array}
if (neg.f64 t) < 5.0000000000000001e-4Initial program 93.1%
Taylor expanded in c_p around 0
lower-/.f64N/A
lower-pow.f64N/A
sub-negN/A
lower-+.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-pow.f64N/A
Applied rewrites93.9%
Taylor expanded in t around 0
lower-/.f64N/A
lower-pow.f64N/A
sub-negN/A
lower-+.f64N/A
neg-mul-1N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
neg-mul-1N/A
lower-neg.f64N/A
lower-pow.f6493.9
Applied rewrites93.9%
Taylor expanded in s around 0
+-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
distribute-lft-outN/A
lower-*.f64N/A
lower-fma.f64N/A
lower-*.f6496.4
Applied rewrites96.4%
if 5.0000000000000001e-4 < (neg.f64 t) Initial program 45.5%
Taylor expanded in c_p around 0
lower-/.f64N/A
lower-pow.f64N/A
sub-negN/A
lower-+.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-pow.f64N/A
Applied rewrites100.0%
Taylor expanded in s around 0
lower-/.f64N/A
lower-pow.f64N/A
lower-pow.f64N/A
sub-negN/A
lower-+.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64100.0
Applied rewrites100.0%
Taylor expanded in c_n around 0
Applied rewrites100.0%
lift-pow.f64N/A
/-rgt-identity100.0
Applied rewrites100.0%
Final simplification96.5%
(FPCore (c_p c_n t s) :precision binary64 (fma s (fma s (* c_n (fma c_n 0.125 -0.125)) (* c_n -0.5)) 1.0))
double code(double c_p, double c_n, double t, double s) {
return fma(s, fma(s, (c_n * fma(c_n, 0.125, -0.125)), (c_n * -0.5)), 1.0);
}
function code(c_p, c_n, t, s) return fma(s, fma(s, Float64(c_n * fma(c_n, 0.125, -0.125)), Float64(c_n * -0.5)), 1.0) end
code[c$95$p_, c$95$n_, t_, s_] := N[(s * N[(s * N[(c$95$n * N[(c$95$n * 0.125 + -0.125), $MachinePrecision]), $MachinePrecision] + N[(c$95$n * -0.5), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(s, \mathsf{fma}\left(s, c\_n \cdot \mathsf{fma}\left(c\_n, 0.125, -0.125\right), c\_n \cdot -0.5\right), 1\right)
\end{array}
Initial program 91.0%
Taylor expanded in c_p around 0
lower-/.f64N/A
lower-pow.f64N/A
sub-negN/A
lower-+.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-pow.f64N/A
Applied rewrites94.2%
Taylor expanded in t around 0
lower-/.f64N/A
lower-pow.f64N/A
sub-negN/A
lower-+.f64N/A
neg-mul-1N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
neg-mul-1N/A
lower-neg.f64N/A
lower-pow.f6491.8
Applied rewrites91.8%
Taylor expanded in s around 0
+-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
distribute-lft-outN/A
lower-*.f64N/A
lower-fma.f64N/A
lower-*.f6494.2
Applied rewrites94.2%
Final simplification94.2%
(FPCore (c_p c_n t s) :precision binary64 (fma s (* c_n -0.5) 1.0))
double code(double c_p, double c_n, double t, double s) {
return fma(s, (c_n * -0.5), 1.0);
}
function code(c_p, c_n, t, s) return fma(s, Float64(c_n * -0.5), 1.0) end
code[c$95$p_, c$95$n_, t_, s_] := N[(s * N[(c$95$n * -0.5), $MachinePrecision] + 1.0), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(s, c\_n \cdot -0.5, 1\right)
\end{array}
Initial program 91.0%
Taylor expanded in c_p around 0
lower-/.f64N/A
lower-pow.f64N/A
sub-negN/A
lower-+.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-pow.f64N/A
Applied rewrites94.2%
Taylor expanded in t around 0
lower-/.f64N/A
lower-pow.f64N/A
sub-negN/A
lower-+.f64N/A
neg-mul-1N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
neg-mul-1N/A
lower-neg.f64N/A
lower-pow.f6491.8
Applied rewrites91.8%
Taylor expanded in s around 0
associate-*r*N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-*.f6494.2
Applied rewrites94.2%
Final simplification94.2%
(FPCore (c_p c_n t s) :precision binary64 1.0)
double code(double c_p, double c_n, double t, double s) {
return 1.0;
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
code = 1.0d0
end function
public static double code(double c_p, double c_n, double t, double s) {
return 1.0;
}
def code(c_p, c_n, t, s): return 1.0
function code(c_p, c_n, t, s) return 1.0 end
function tmp = code(c_p, c_n, t, s) tmp = 1.0; end
code[c$95$p_, c$95$n_, t_, s_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 91.0%
Taylor expanded in c_n around 0
lower-/.f64N/A
lower-pow.f64N/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-pow.f64N/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f6493.4
Applied rewrites93.4%
Taylor expanded in c_p around 0
Applied rewrites94.2%
(FPCore (c_p c_n t s) :precision binary64 (* (pow (/ (+ 1.0 (exp (- t))) (+ 1.0 (exp (- s)))) c_p) (pow (/ (+ 1.0 (exp t)) (+ 1.0 (exp s))) c_n)))
double code(double c_p, double c_n, double t, double s) {
return pow(((1.0 + exp(-t)) / (1.0 + exp(-s))), c_p) * pow(((1.0 + exp(t)) / (1.0 + exp(s))), c_n);
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
code = (((1.0d0 + exp(-t)) / (1.0d0 + exp(-s))) ** c_p) * (((1.0d0 + exp(t)) / (1.0d0 + exp(s))) ** c_n)
end function
public static double code(double c_p, double c_n, double t, double s) {
return Math.pow(((1.0 + Math.exp(-t)) / (1.0 + Math.exp(-s))), c_p) * Math.pow(((1.0 + Math.exp(t)) / (1.0 + Math.exp(s))), c_n);
}
def code(c_p, c_n, t, s): return math.pow(((1.0 + math.exp(-t)) / (1.0 + math.exp(-s))), c_p) * math.pow(((1.0 + math.exp(t)) / (1.0 + math.exp(s))), c_n)
function code(c_p, c_n, t, s) return Float64((Float64(Float64(1.0 + exp(Float64(-t))) / Float64(1.0 + exp(Float64(-s)))) ^ c_p) * (Float64(Float64(1.0 + exp(t)) / Float64(1.0 + exp(s))) ^ c_n)) end
function tmp = code(c_p, c_n, t, s) tmp = (((1.0 + exp(-t)) / (1.0 + exp(-s))) ^ c_p) * (((1.0 + exp(t)) / (1.0 + exp(s))) ^ c_n); end
code[c$95$p_, c$95$n_, t_, s_] := N[(N[Power[N[(N[(1.0 + N[Exp[(-t)], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[Exp[(-s)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], c$95$p], $MachinePrecision] * N[Power[N[(N[(1.0 + N[Exp[t], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[Exp[s], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{\left(\frac{1 + e^{-t}}{1 + e^{-s}}\right)}^{c\_p} \cdot {\left(\frac{1 + e^{t}}{1 + e^{s}}\right)}^{c\_n}
\end{array}
herbie shell --seed 2024219
(FPCore (c_p c_n t s)
:name "Harley's example"
:precision binary64
:pre (and (< 0.0 c_p) (< 0.0 c_n))
:alt
(! :herbie-platform default (* (pow (/ (+ 1 (exp (- t))) (+ 1 (exp (- s)))) c_p) (pow (/ (+ 1 (exp t)) (+ 1 (exp s))) c_n)))
(/ (* (pow (/ 1.0 (+ 1.0 (exp (- s)))) c_p) (pow (- 1.0 (/ 1.0 (+ 1.0 (exp (- s))))) c_n)) (* (pow (/ 1.0 (+ 1.0 (exp (- t)))) c_p) (pow (- 1.0 (/ 1.0 (+ 1.0 (exp (- t))))) c_n))))