
(FPCore (c_p c_n t s)
:precision binary64
(let* ((t_1 (/ 1.0 (+ 1.0 (exp (- t))))) (t_2 (/ 1.0 (+ 1.0 (exp (- s))))))
(/
(* (pow t_2 c_p) (pow (- 1.0 t_2) c_n))
(* (pow t_1 c_p) (pow (- 1.0 t_1) c_n)))))
double code(double c_p, double c_n, double t, double s) {
double t_1 = 1.0 / (1.0 + exp(-t));
double t_2 = 1.0 / (1.0 + exp(-s));
return (pow(t_2, c_p) * pow((1.0 - t_2), c_n)) / (pow(t_1, c_p) * pow((1.0 - t_1), c_n));
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
real(8) :: t_1
real(8) :: t_2
t_1 = 1.0d0 / (1.0d0 + exp(-t))
t_2 = 1.0d0 / (1.0d0 + exp(-s))
code = ((t_2 ** c_p) * ((1.0d0 - t_2) ** c_n)) / ((t_1 ** c_p) * ((1.0d0 - t_1) ** c_n))
end function
public static double code(double c_p, double c_n, double t, double s) {
double t_1 = 1.0 / (1.0 + Math.exp(-t));
double t_2 = 1.0 / (1.0 + Math.exp(-s));
return (Math.pow(t_2, c_p) * Math.pow((1.0 - t_2), c_n)) / (Math.pow(t_1, c_p) * Math.pow((1.0 - t_1), c_n));
}
def code(c_p, c_n, t, s): t_1 = 1.0 / (1.0 + math.exp(-t)) t_2 = 1.0 / (1.0 + math.exp(-s)) return (math.pow(t_2, c_p) * math.pow((1.0 - t_2), c_n)) / (math.pow(t_1, c_p) * math.pow((1.0 - t_1), c_n))
function code(c_p, c_n, t, s) t_1 = Float64(1.0 / Float64(1.0 + exp(Float64(-t)))) t_2 = Float64(1.0 / Float64(1.0 + exp(Float64(-s)))) return Float64(Float64((t_2 ^ c_p) * (Float64(1.0 - t_2) ^ c_n)) / Float64((t_1 ^ c_p) * (Float64(1.0 - t_1) ^ c_n))) end
function tmp = code(c_p, c_n, t, s) t_1 = 1.0 / (1.0 + exp(-t)); t_2 = 1.0 / (1.0 + exp(-s)); tmp = ((t_2 ^ c_p) * ((1.0 - t_2) ^ c_n)) / ((t_1 ^ c_p) * ((1.0 - t_1) ^ c_n)); end
code[c$95$p_, c$95$n_, t_, s_] := Block[{t$95$1 = N[(1.0 / N[(1.0 + N[Exp[(-t)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(1.0 / N[(1.0 + N[Exp[(-s)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[Power[t$95$2, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$2), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision] / N[(N[Power[t$95$1, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$1), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \frac{1}{1 + e^{-t}}\\
t_2 := \frac{1}{1 + e^{-s}}\\
\frac{{t\_2}^{c\_p} \cdot {\left(1 - t\_2\right)}^{c\_n}}{{t\_1}^{c\_p} \cdot {\left(1 - t\_1\right)}^{c\_n}}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (c_p c_n t s)
:precision binary64
(let* ((t_1 (/ 1.0 (+ 1.0 (exp (- t))))) (t_2 (/ 1.0 (+ 1.0 (exp (- s))))))
(/
(* (pow t_2 c_p) (pow (- 1.0 t_2) c_n))
(* (pow t_1 c_p) (pow (- 1.0 t_1) c_n)))))
double code(double c_p, double c_n, double t, double s) {
double t_1 = 1.0 / (1.0 + exp(-t));
double t_2 = 1.0 / (1.0 + exp(-s));
return (pow(t_2, c_p) * pow((1.0 - t_2), c_n)) / (pow(t_1, c_p) * pow((1.0 - t_1), c_n));
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
real(8) :: t_1
real(8) :: t_2
t_1 = 1.0d0 / (1.0d0 + exp(-t))
t_2 = 1.0d0 / (1.0d0 + exp(-s))
code = ((t_2 ** c_p) * ((1.0d0 - t_2) ** c_n)) / ((t_1 ** c_p) * ((1.0d0 - t_1) ** c_n))
end function
public static double code(double c_p, double c_n, double t, double s) {
double t_1 = 1.0 / (1.0 + Math.exp(-t));
double t_2 = 1.0 / (1.0 + Math.exp(-s));
return (Math.pow(t_2, c_p) * Math.pow((1.0 - t_2), c_n)) / (Math.pow(t_1, c_p) * Math.pow((1.0 - t_1), c_n));
}
def code(c_p, c_n, t, s): t_1 = 1.0 / (1.0 + math.exp(-t)) t_2 = 1.0 / (1.0 + math.exp(-s)) return (math.pow(t_2, c_p) * math.pow((1.0 - t_2), c_n)) / (math.pow(t_1, c_p) * math.pow((1.0 - t_1), c_n))
function code(c_p, c_n, t, s) t_1 = Float64(1.0 / Float64(1.0 + exp(Float64(-t)))) t_2 = Float64(1.0 / Float64(1.0 + exp(Float64(-s)))) return Float64(Float64((t_2 ^ c_p) * (Float64(1.0 - t_2) ^ c_n)) / Float64((t_1 ^ c_p) * (Float64(1.0 - t_1) ^ c_n))) end
function tmp = code(c_p, c_n, t, s) t_1 = 1.0 / (1.0 + exp(-t)); t_2 = 1.0 / (1.0 + exp(-s)); tmp = ((t_2 ^ c_p) * ((1.0 - t_2) ^ c_n)) / ((t_1 ^ c_p) * ((1.0 - t_1) ^ c_n)); end
code[c$95$p_, c$95$n_, t_, s_] := Block[{t$95$1 = N[(1.0 / N[(1.0 + N[Exp[(-t)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(1.0 / N[(1.0 + N[Exp[(-s)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[Power[t$95$2, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$2), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision] / N[(N[Power[t$95$1, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$1), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \frac{1}{1 + e^{-t}}\\
t_2 := \frac{1}{1 + e^{-s}}\\
\frac{{t\_2}^{c\_p} \cdot {\left(1 - t\_2\right)}^{c\_n}}{{t\_1}^{c\_p} \cdot {\left(1 - t\_1\right)}^{c\_n}}
\end{array}
\end{array}
(FPCore (c_p c_n t s) :precision binary64 (exp (fma s (fma -0.5 (- c_n c_p) (* -0.125 (* s (+ c_n c_p)))) (* -0.5 (* (- c_p c_n) t)))))
double code(double c_p, double c_n, double t, double s) {
return exp(fma(s, fma(-0.5, (c_n - c_p), (-0.125 * (s * (c_n + c_p)))), (-0.5 * ((c_p - c_n) * t))));
}
function code(c_p, c_n, t, s) return exp(fma(s, fma(-0.5, Float64(c_n - c_p), Float64(-0.125 * Float64(s * Float64(c_n + c_p)))), Float64(-0.5 * Float64(Float64(c_p - c_n) * t)))) end
code[c$95$p_, c$95$n_, t_, s_] := N[Exp[N[(s * N[(-0.5 * N[(c$95$n - c$95$p), $MachinePrecision] + N[(-0.125 * N[(s * N[(c$95$n + c$95$p), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.5 * N[(N[(c$95$p - c$95$n), $MachinePrecision] * t), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
e^{\mathsf{fma}\left(s, \mathsf{fma}\left(-0.5, c\_n - c\_p, -0.125 \cdot \left(s \cdot \left(c\_n + c\_p\right)\right)\right), -0.5 \cdot \left(\left(c\_p - c\_n\right) \cdot t\right)\right)}
\end{array}
Initial program 89.6%
Applied rewrites94.1%
Taylor expanded in t around 0
lower-fma.f64N/A
lower--.f64N/A
sub-negN/A
lower-log1p.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-log.f64N/A
Applied rewrites98.1%
Taylor expanded in s around 0
lower-fma.f64N/A
Applied rewrites99.5%
Final simplification99.5%
(FPCore (c_p c_n t s) :precision binary64 (exp (fma -0.5 (* (- c_p c_n) t) (* s (* -0.5 (- c_n c_p))))))
double code(double c_p, double c_n, double t, double s) {
return exp(fma(-0.5, ((c_p - c_n) * t), (s * (-0.5 * (c_n - c_p)))));
}
function code(c_p, c_n, t, s) return exp(fma(-0.5, Float64(Float64(c_p - c_n) * t), Float64(s * Float64(-0.5 * Float64(c_n - c_p))))) end
code[c$95$p_, c$95$n_, t_, s_] := N[Exp[N[(-0.5 * N[(N[(c$95$p - c$95$n), $MachinePrecision] * t), $MachinePrecision] + N[(s * N[(-0.5 * N[(c$95$n - c$95$p), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
e^{\mathsf{fma}\left(-0.5, \left(c\_p - c\_n\right) \cdot t, s \cdot \left(-0.5 \cdot \left(c\_n - c\_p\right)\right)\right)}
\end{array}
Initial program 89.6%
Applied rewrites94.1%
Taylor expanded in t around 0
lower-fma.f64N/A
lower--.f64N/A
sub-negN/A
lower-log1p.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-log.f64N/A
Applied rewrites98.1%
Taylor expanded in s around 0
+-commutativeN/A
*-commutativeN/A
metadata-evalN/A
cancel-sign-sub-invN/A
distribute-lft-out--N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f64N/A
lower--.f64N/A
lower-*.f64N/A
metadata-evalN/A
associate-*r*N/A
metadata-evalN/A
associate-*r*N/A
distribute-lft-inN/A
metadata-evalN/A
cancel-sign-sub-invN/A
distribute-lft-out--N/A
associate-*r*N/A
metadata-evalN/A
lower-*.f64N/A
lower--.f6499.4
Applied rewrites99.4%
(FPCore (c_p c_n t s) :precision binary64 (exp (* c_n (fma t 0.5 (* s (fma s -0.125 -0.5))))))
double code(double c_p, double c_n, double t, double s) {
return exp((c_n * fma(t, 0.5, (s * fma(s, -0.125, -0.5)))));
}
function code(c_p, c_n, t, s) return exp(Float64(c_n * fma(t, 0.5, Float64(s * fma(s, -0.125, -0.5))))) end
code[c$95$p_, c$95$n_, t_, s_] := N[Exp[N[(c$95$n * N[(t * 0.5 + N[(s * N[(s * -0.125 + -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
e^{c\_n \cdot \mathsf{fma}\left(t, 0.5, s \cdot \mathsf{fma}\left(s, -0.125, -0.5\right)\right)}
\end{array}
Initial program 89.6%
Applied rewrites94.1%
Taylor expanded in t around 0
lower-fma.f64N/A
lower--.f64N/A
sub-negN/A
lower-log1p.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-log.f64N/A
Applied rewrites98.1%
Taylor expanded in s around 0
lower-fma.f64N/A
Applied rewrites99.5%
Taylor expanded in c_n around inf
lower-*.f64N/A
*-commutativeN/A
lower-fma.f64N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f6498.6
Applied rewrites98.6%
(FPCore (c_p c_n t s) :precision binary64 (exp (* -0.5 (* s (- c_n c_p)))))
double code(double c_p, double c_n, double t, double s) {
return exp((-0.5 * (s * (c_n - c_p))));
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
code = exp(((-0.5d0) * (s * (c_n - c_p))))
end function
public static double code(double c_p, double c_n, double t, double s) {
return Math.exp((-0.5 * (s * (c_n - c_p))));
}
def code(c_p, c_n, t, s): return math.exp((-0.5 * (s * (c_n - c_p))))
function code(c_p, c_n, t, s) return exp(Float64(-0.5 * Float64(s * Float64(c_n - c_p)))) end
function tmp = code(c_p, c_n, t, s) tmp = exp((-0.5 * (s * (c_n - c_p)))); end
code[c$95$p_, c$95$n_, t_, s_] := N[Exp[N[(-0.5 * N[(s * N[(c$95$n - c$95$p), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
e^{-0.5 \cdot \left(s \cdot \left(c\_n - c\_p\right)\right)}
\end{array}
Initial program 89.6%
Applied rewrites94.1%
Taylor expanded in t around 0
lower-fma.f64N/A
lower--.f64N/A
sub-negN/A
lower-log1p.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-log.f64N/A
Applied rewrites98.1%
Taylor expanded in s around 0
+-commutativeN/A
*-commutativeN/A
metadata-evalN/A
cancel-sign-sub-invN/A
distribute-lft-out--N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f64N/A
lower--.f64N/A
lower-*.f64N/A
metadata-evalN/A
associate-*r*N/A
metadata-evalN/A
associate-*r*N/A
distribute-lft-inN/A
metadata-evalN/A
cancel-sign-sub-invN/A
distribute-lft-out--N/A
associate-*r*N/A
metadata-evalN/A
lower-*.f64N/A
lower--.f6499.4
Applied rewrites99.4%
Taylor expanded in t around 0
lower-*.f64N/A
lower-*.f64N/A
lower--.f6498.1
Applied rewrites98.1%
(FPCore (c_p c_n t s) :precision binary64 (exp (* 0.5 (* c_n t))))
double code(double c_p, double c_n, double t, double s) {
return exp((0.5 * (c_n * t)));
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
code = exp((0.5d0 * (c_n * t)))
end function
public static double code(double c_p, double c_n, double t, double s) {
return Math.exp((0.5 * (c_n * t)));
}
def code(c_p, c_n, t, s): return math.exp((0.5 * (c_n * t)))
function code(c_p, c_n, t, s) return exp(Float64(0.5 * Float64(c_n * t))) end
function tmp = code(c_p, c_n, t, s) tmp = exp((0.5 * (c_n * t))); end
code[c$95$p_, c$95$n_, t_, s_] := N[Exp[N[(0.5 * N[(c$95$n * t), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
e^{0.5 \cdot \left(c\_n \cdot t\right)}
\end{array}
Initial program 89.6%
Applied rewrites94.1%
Taylor expanded in t around 0
lower-fma.f64N/A
lower--.f64N/A
sub-negN/A
lower-log1p.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-log.f64N/A
Applied rewrites98.1%
Taylor expanded in s around 0
lower-exp.f64N/A
*-commutativeN/A
metadata-evalN/A
cancel-sign-sub-invN/A
distribute-lft-out--N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
lower--.f6496.8
Applied rewrites96.8%
Taylor expanded in c_p around 0
lower-exp.f64N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f6496.3
Applied rewrites96.3%
Final simplification96.3%
(FPCore (c_p c_n t s) :precision binary64 (exp (* -0.5 (* c_p t))))
double code(double c_p, double c_n, double t, double s) {
return exp((-0.5 * (c_p * t)));
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
code = exp(((-0.5d0) * (c_p * t)))
end function
public static double code(double c_p, double c_n, double t, double s) {
return Math.exp((-0.5 * (c_p * t)));
}
def code(c_p, c_n, t, s): return math.exp((-0.5 * (c_p * t)))
function code(c_p, c_n, t, s) return exp(Float64(-0.5 * Float64(c_p * t))) end
function tmp = code(c_p, c_n, t, s) tmp = exp((-0.5 * (c_p * t))); end
code[c$95$p_, c$95$n_, t_, s_] := N[Exp[N[(-0.5 * N[(c$95$p * t), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
e^{-0.5 \cdot \left(c\_p \cdot t\right)}
\end{array}
Initial program 89.6%
Applied rewrites94.1%
Taylor expanded in t around 0
lower-fma.f64N/A
lower--.f64N/A
sub-negN/A
lower-log1p.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-log.f64N/A
Applied rewrites98.1%
Taylor expanded in s around 0
lower-exp.f64N/A
*-commutativeN/A
metadata-evalN/A
cancel-sign-sub-invN/A
distribute-lft-out--N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
lower--.f6496.8
Applied rewrites96.8%
Taylor expanded in c_n around 0
lower-exp.f64N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f6495.3
Applied rewrites95.3%
Final simplification95.3%
(FPCore (c_p c_n t s) :precision binary64 (fma t (fma (* t 0.125) (* (- c_p c_n) (- c_p c_n)) (* -0.5 (- c_p c_n))) 1.0))
double code(double c_p, double c_n, double t, double s) {
return fma(t, fma((t * 0.125), ((c_p - c_n) * (c_p - c_n)), (-0.5 * (c_p - c_n))), 1.0);
}
function code(c_p, c_n, t, s) return fma(t, fma(Float64(t * 0.125), Float64(Float64(c_p - c_n) * Float64(c_p - c_n)), Float64(-0.5 * Float64(c_p - c_n))), 1.0) end
code[c$95$p_, c$95$n_, t_, s_] := N[(t * N[(N[(t * 0.125), $MachinePrecision] * N[(N[(c$95$p - c$95$n), $MachinePrecision] * N[(c$95$p - c$95$n), $MachinePrecision]), $MachinePrecision] + N[(-0.5 * N[(c$95$p - c$95$n), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(t, \mathsf{fma}\left(t \cdot 0.125, \left(c\_p - c\_n\right) \cdot \left(c\_p - c\_n\right), -0.5 \cdot \left(c\_p - c\_n\right)\right), 1\right)
\end{array}
Initial program 89.6%
Applied rewrites94.1%
Taylor expanded in t around 0
lower-fma.f64N/A
lower--.f64N/A
sub-negN/A
lower-log1p.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-log.f64N/A
Applied rewrites98.1%
Taylor expanded in s around 0
lower-exp.f64N/A
*-commutativeN/A
metadata-evalN/A
cancel-sign-sub-invN/A
distribute-lft-out--N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
lower--.f6496.8
Applied rewrites96.8%
Taylor expanded in t around 0
+-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
lower--.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower--.f6494.9
Applied rewrites94.9%
Final simplification94.9%
(FPCore (c_p c_n t s) :precision binary64 (fma -0.5 (* (- c_p c_n) t) 1.0))
double code(double c_p, double c_n, double t, double s) {
return fma(-0.5, ((c_p - c_n) * t), 1.0);
}
function code(c_p, c_n, t, s) return fma(-0.5, Float64(Float64(c_p - c_n) * t), 1.0) end
code[c$95$p_, c$95$n_, t_, s_] := N[(-0.5 * N[(N[(c$95$p - c$95$n), $MachinePrecision] * t), $MachinePrecision] + 1.0), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-0.5, \left(c\_p - c\_n\right) \cdot t, 1\right)
\end{array}
Initial program 89.6%
Applied rewrites94.1%
Taylor expanded in t around 0
lower-fma.f64N/A
lower--.f64N/A
sub-negN/A
lower-log1p.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-log.f64N/A
Applied rewrites98.1%
Taylor expanded in s around 0
lower-exp.f64N/A
*-commutativeN/A
metadata-evalN/A
cancel-sign-sub-invN/A
distribute-lft-out--N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
lower--.f6496.8
Applied rewrites96.8%
Taylor expanded in t around 0
+-commutativeN/A
lower-fma.f64N/A
lower-*.f64N/A
lower--.f6494.9
Applied rewrites94.9%
Final simplification94.9%
(FPCore (c_p c_n t s) :precision binary64 1.0)
double code(double c_p, double c_n, double t, double s) {
return 1.0;
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
code = 1.0d0
end function
public static double code(double c_p, double c_n, double t, double s) {
return 1.0;
}
def code(c_p, c_n, t, s): return 1.0
function code(c_p, c_n, t, s) return 1.0 end
function tmp = code(c_p, c_n, t, s) tmp = 1.0; end
code[c$95$p_, c$95$n_, t_, s_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 89.6%
Taylor expanded in c_p around 0
lower-/.f64N/A
lower-pow.f64N/A
sub-negN/A
lower-+.f64N/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f64N/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-pow.f64N/A
Applied rewrites95.6%
Taylor expanded in c_n around 0
Applied rewrites94.8%
(FPCore (c_p c_n t s) :precision binary64 (* (pow (/ (+ 1.0 (exp (- t))) (+ 1.0 (exp (- s)))) c_p) (pow (/ (+ 1.0 (exp t)) (+ 1.0 (exp s))) c_n)))
double code(double c_p, double c_n, double t, double s) {
return pow(((1.0 + exp(-t)) / (1.0 + exp(-s))), c_p) * pow(((1.0 + exp(t)) / (1.0 + exp(s))), c_n);
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
code = (((1.0d0 + exp(-t)) / (1.0d0 + exp(-s))) ** c_p) * (((1.0d0 + exp(t)) / (1.0d0 + exp(s))) ** c_n)
end function
public static double code(double c_p, double c_n, double t, double s) {
return Math.pow(((1.0 + Math.exp(-t)) / (1.0 + Math.exp(-s))), c_p) * Math.pow(((1.0 + Math.exp(t)) / (1.0 + Math.exp(s))), c_n);
}
def code(c_p, c_n, t, s): return math.pow(((1.0 + math.exp(-t)) / (1.0 + math.exp(-s))), c_p) * math.pow(((1.0 + math.exp(t)) / (1.0 + math.exp(s))), c_n)
function code(c_p, c_n, t, s) return Float64((Float64(Float64(1.0 + exp(Float64(-t))) / Float64(1.0 + exp(Float64(-s)))) ^ c_p) * (Float64(Float64(1.0 + exp(t)) / Float64(1.0 + exp(s))) ^ c_n)) end
function tmp = code(c_p, c_n, t, s) tmp = (((1.0 + exp(-t)) / (1.0 + exp(-s))) ^ c_p) * (((1.0 + exp(t)) / (1.0 + exp(s))) ^ c_n); end
code[c$95$p_, c$95$n_, t_, s_] := N[(N[Power[N[(N[(1.0 + N[Exp[(-t)], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[Exp[(-s)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], c$95$p], $MachinePrecision] * N[Power[N[(N[(1.0 + N[Exp[t], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[Exp[s], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{\left(\frac{1 + e^{-t}}{1 + e^{-s}}\right)}^{c\_p} \cdot {\left(\frac{1 + e^{t}}{1 + e^{s}}\right)}^{c\_n}
\end{array}
herbie shell --seed 2024216
(FPCore (c_p c_n t s)
:name "Harley's example"
:precision binary64
:pre (and (< 0.0 c_p) (< 0.0 c_n))
:alt
(! :herbie-platform default (* (pow (/ (+ 1 (exp (- t))) (+ 1 (exp (- s)))) c_p) (pow (/ (+ 1 (exp t)) (+ 1 (exp s))) c_n)))
(/ (* (pow (/ 1.0 (+ 1.0 (exp (- s)))) c_p) (pow (- 1.0 (/ 1.0 (+ 1.0 (exp (- s))))) c_n)) (* (pow (/ 1.0 (+ 1.0 (exp (- t)))) c_p) (pow (- 1.0 (/ 1.0 (+ 1.0 (exp (- t))))) c_n))))