
(FPCore (c_p c_n t s)
:precision binary64
(let* ((t_1 (/ 1.0 (+ 1.0 (exp (- t))))) (t_2 (/ 1.0 (+ 1.0 (exp (- s))))))
(/
(* (pow t_2 c_p) (pow (- 1.0 t_2) c_n))
(* (pow t_1 c_p) (pow (- 1.0 t_1) c_n)))))
double code(double c_p, double c_n, double t, double s) {
double t_1 = 1.0 / (1.0 + exp(-t));
double t_2 = 1.0 / (1.0 + exp(-s));
return (pow(t_2, c_p) * pow((1.0 - t_2), c_n)) / (pow(t_1, c_p) * pow((1.0 - t_1), c_n));
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
real(8) :: t_1
real(8) :: t_2
t_1 = 1.0d0 / (1.0d0 + exp(-t))
t_2 = 1.0d0 / (1.0d0 + exp(-s))
code = ((t_2 ** c_p) * ((1.0d0 - t_2) ** c_n)) / ((t_1 ** c_p) * ((1.0d0 - t_1) ** c_n))
end function
public static double code(double c_p, double c_n, double t, double s) {
double t_1 = 1.0 / (1.0 + Math.exp(-t));
double t_2 = 1.0 / (1.0 + Math.exp(-s));
return (Math.pow(t_2, c_p) * Math.pow((1.0 - t_2), c_n)) / (Math.pow(t_1, c_p) * Math.pow((1.0 - t_1), c_n));
}
def code(c_p, c_n, t, s): t_1 = 1.0 / (1.0 + math.exp(-t)) t_2 = 1.0 / (1.0 + math.exp(-s)) return (math.pow(t_2, c_p) * math.pow((1.0 - t_2), c_n)) / (math.pow(t_1, c_p) * math.pow((1.0 - t_1), c_n))
function code(c_p, c_n, t, s) t_1 = Float64(1.0 / Float64(1.0 + exp(Float64(-t)))) t_2 = Float64(1.0 / Float64(1.0 + exp(Float64(-s)))) return Float64(Float64((t_2 ^ c_p) * (Float64(1.0 - t_2) ^ c_n)) / Float64((t_1 ^ c_p) * (Float64(1.0 - t_1) ^ c_n))) end
function tmp = code(c_p, c_n, t, s) t_1 = 1.0 / (1.0 + exp(-t)); t_2 = 1.0 / (1.0 + exp(-s)); tmp = ((t_2 ^ c_p) * ((1.0 - t_2) ^ c_n)) / ((t_1 ^ c_p) * ((1.0 - t_1) ^ c_n)); end
code[c$95$p_, c$95$n_, t_, s_] := Block[{t$95$1 = N[(1.0 / N[(1.0 + N[Exp[(-t)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(1.0 / N[(1.0 + N[Exp[(-s)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[Power[t$95$2, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$2), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision] / N[(N[Power[t$95$1, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$1), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \frac{1}{1 + e^{-t}}\\
t_2 := \frac{1}{1 + e^{-s}}\\
\frac{{t\_2}^{c\_p} \cdot {\left(1 - t\_2\right)}^{c\_n}}{{t\_1}^{c\_p} \cdot {\left(1 - t\_1\right)}^{c\_n}}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (c_p c_n t s)
:precision binary64
(let* ((t_1 (/ 1.0 (+ 1.0 (exp (- t))))) (t_2 (/ 1.0 (+ 1.0 (exp (- s))))))
(/
(* (pow t_2 c_p) (pow (- 1.0 t_2) c_n))
(* (pow t_1 c_p) (pow (- 1.0 t_1) c_n)))))
double code(double c_p, double c_n, double t, double s) {
double t_1 = 1.0 / (1.0 + exp(-t));
double t_2 = 1.0 / (1.0 + exp(-s));
return (pow(t_2, c_p) * pow((1.0 - t_2), c_n)) / (pow(t_1, c_p) * pow((1.0 - t_1), c_n));
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
real(8) :: t_1
real(8) :: t_2
t_1 = 1.0d0 / (1.0d0 + exp(-t))
t_2 = 1.0d0 / (1.0d0 + exp(-s))
code = ((t_2 ** c_p) * ((1.0d0 - t_2) ** c_n)) / ((t_1 ** c_p) * ((1.0d0 - t_1) ** c_n))
end function
public static double code(double c_p, double c_n, double t, double s) {
double t_1 = 1.0 / (1.0 + Math.exp(-t));
double t_2 = 1.0 / (1.0 + Math.exp(-s));
return (Math.pow(t_2, c_p) * Math.pow((1.0 - t_2), c_n)) / (Math.pow(t_1, c_p) * Math.pow((1.0 - t_1), c_n));
}
def code(c_p, c_n, t, s): t_1 = 1.0 / (1.0 + math.exp(-t)) t_2 = 1.0 / (1.0 + math.exp(-s)) return (math.pow(t_2, c_p) * math.pow((1.0 - t_2), c_n)) / (math.pow(t_1, c_p) * math.pow((1.0 - t_1), c_n))
function code(c_p, c_n, t, s) t_1 = Float64(1.0 / Float64(1.0 + exp(Float64(-t)))) t_2 = Float64(1.0 / Float64(1.0 + exp(Float64(-s)))) return Float64(Float64((t_2 ^ c_p) * (Float64(1.0 - t_2) ^ c_n)) / Float64((t_1 ^ c_p) * (Float64(1.0 - t_1) ^ c_n))) end
function tmp = code(c_p, c_n, t, s) t_1 = 1.0 / (1.0 + exp(-t)); t_2 = 1.0 / (1.0 + exp(-s)); tmp = ((t_2 ^ c_p) * ((1.0 - t_2) ^ c_n)) / ((t_1 ^ c_p) * ((1.0 - t_1) ^ c_n)); end
code[c$95$p_, c$95$n_, t_, s_] := Block[{t$95$1 = N[(1.0 / N[(1.0 + N[Exp[(-t)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(1.0 / N[(1.0 + N[Exp[(-s)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[Power[t$95$2, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$2), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision] / N[(N[Power[t$95$1, c$95$p], $MachinePrecision] * N[Power[N[(1.0 - t$95$1), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \frac{1}{1 + e^{-t}}\\
t_2 := \frac{1}{1 + e^{-s}}\\
\frac{{t\_2}^{c\_p} \cdot {\left(1 - t\_2\right)}^{c\_n}}{{t\_1}^{c\_p} \cdot {\left(1 - t\_1\right)}^{c\_n}}
\end{array}
\end{array}
(FPCore (c_p c_n t s)
:precision binary64
(let* ((t_1 (log1p (exp (- t)))) (t_2 (log1p (exp (- s)))))
(*
(pow (pow (pow (exp c_p) (fma t_2 -1.0 t_1)) 0.5) 2.0)
(pow
(pow (/ (- 1.0 (exp (- t_2))) (- 1.0 (exp (- t_1)))) (* 0.5 c_n))
2.0))))
double code(double c_p, double c_n, double t, double s) {
double t_1 = log1p(exp(-t));
double t_2 = log1p(exp(-s));
return pow(pow(pow(exp(c_p), fma(t_2, -1.0, t_1)), 0.5), 2.0) * pow(pow(((1.0 - exp(-t_2)) / (1.0 - exp(-t_1))), (0.5 * c_n)), 2.0);
}
function code(c_p, c_n, t, s) t_1 = log1p(exp(Float64(-t))) t_2 = log1p(exp(Float64(-s))) return Float64((((exp(c_p) ^ fma(t_2, -1.0, t_1)) ^ 0.5) ^ 2.0) * ((Float64(Float64(1.0 - exp(Float64(-t_2))) / Float64(1.0 - exp(Float64(-t_1)))) ^ Float64(0.5 * c_n)) ^ 2.0)) end
code[c$95$p_, c$95$n_, t_, s_] := Block[{t$95$1 = N[Log[1 + N[Exp[(-t)], $MachinePrecision]], $MachinePrecision]}, Block[{t$95$2 = N[Log[1 + N[Exp[(-s)], $MachinePrecision]], $MachinePrecision]}, N[(N[Power[N[Power[N[Power[N[Exp[c$95$p], $MachinePrecision], N[(t$95$2 * -1.0 + t$95$1), $MachinePrecision]], $MachinePrecision], 0.5], $MachinePrecision], 2.0], $MachinePrecision] * N[Power[N[Power[N[(N[(1.0 - N[Exp[(-t$95$2)], $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[Exp[(-t$95$1)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(0.5 * c$95$n), $MachinePrecision]], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \mathsf{log1p}\left(e^{-t}\right)\\
t_2 := \mathsf{log1p}\left(e^{-s}\right)\\
{\left({\left({\left(e^{c\_p}\right)}^{\left(\mathsf{fma}\left(t\_2, -1, t\_1\right)\right)}\right)}^{0.5}\right)}^{2} \cdot {\left({\left(\frac{1 - e^{-t\_2}}{1 - e^{-t\_1}}\right)}^{\left(0.5 \cdot c\_n\right)}\right)}^{2}
\end{array}
\end{array}
Initial program 91.5%
Applied rewrites97.1%
Applied rewrites99.0%
Final simplification99.0%
(FPCore (c_p c_n t s)
:precision binary64
(let* ((t_1 (exp (- t))) (t_2 (exp (- s))))
(*
(pow
(pow
(exp (- (log1p (/ -1.0 (+ t_2 1.0))) (log1p (/ -1.0 (+ t_1 1.0)))))
2.0)
(* 0.5 c_n))
(pow (exp c_p) (fma (log1p t_2) -1.0 (log1p t_1))))))
double code(double c_p, double c_n, double t, double s) {
double t_1 = exp(-t);
double t_2 = exp(-s);
return pow(pow(exp((log1p((-1.0 / (t_2 + 1.0))) - log1p((-1.0 / (t_1 + 1.0))))), 2.0), (0.5 * c_n)) * pow(exp(c_p), fma(log1p(t_2), -1.0, log1p(t_1)));
}
function code(c_p, c_n, t, s) t_1 = exp(Float64(-t)) t_2 = exp(Float64(-s)) return Float64(((exp(Float64(log1p(Float64(-1.0 / Float64(t_2 + 1.0))) - log1p(Float64(-1.0 / Float64(t_1 + 1.0))))) ^ 2.0) ^ Float64(0.5 * c_n)) * (exp(c_p) ^ fma(log1p(t_2), -1.0, log1p(t_1)))) end
code[c$95$p_, c$95$n_, t_, s_] := Block[{t$95$1 = N[Exp[(-t)], $MachinePrecision]}, Block[{t$95$2 = N[Exp[(-s)], $MachinePrecision]}, N[(N[Power[N[Power[N[Exp[N[(N[Log[1 + N[(-1.0 / N[(t$95$2 + 1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[Log[1 + N[(-1.0 / N[(t$95$1 + 1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision], 2.0], $MachinePrecision], N[(0.5 * c$95$n), $MachinePrecision]], $MachinePrecision] * N[Power[N[Exp[c$95$p], $MachinePrecision], N[(N[Log[1 + t$95$2], $MachinePrecision] * -1.0 + N[Log[1 + t$95$1], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := e^{-t}\\
t_2 := e^{-s}\\
{\left({\left(e^{\mathsf{log1p}\left(\frac{-1}{t\_2 + 1}\right) - \mathsf{log1p}\left(\frac{-1}{t\_1 + 1}\right)}\right)}^{2}\right)}^{\left(0.5 \cdot c\_n\right)} \cdot {\left(e^{c\_p}\right)}^{\left(\mathsf{fma}\left(\mathsf{log1p}\left(t\_2\right), -1, \mathsf{log1p}\left(t\_1\right)\right)\right)}
\end{array}
\end{array}
Initial program 91.5%
Applied rewrites97.1%
Applied rewrites99.0%
Applied rewrites99.0%
Final simplification99.0%
(FPCore (c_p c_n t s)
:precision binary64
(let* ((t_1 (log1p (exp (- s)))) (t_2 (log1p (exp (- t)))))
(*
(pow (exp c_p) (fma t_1 -1.0 t_2))
(pow (/ (- 1.0 (exp (- t_1))) (- 1.0 (exp (- t_2)))) c_n))))
double code(double c_p, double c_n, double t, double s) {
double t_1 = log1p(exp(-s));
double t_2 = log1p(exp(-t));
return pow(exp(c_p), fma(t_1, -1.0, t_2)) * pow(((1.0 - exp(-t_1)) / (1.0 - exp(-t_2))), c_n);
}
function code(c_p, c_n, t, s) t_1 = log1p(exp(Float64(-s))) t_2 = log1p(exp(Float64(-t))) return Float64((exp(c_p) ^ fma(t_1, -1.0, t_2)) * (Float64(Float64(1.0 - exp(Float64(-t_1))) / Float64(1.0 - exp(Float64(-t_2)))) ^ c_n)) end
code[c$95$p_, c$95$n_, t_, s_] := Block[{t$95$1 = N[Log[1 + N[Exp[(-s)], $MachinePrecision]], $MachinePrecision]}, Block[{t$95$2 = N[Log[1 + N[Exp[(-t)], $MachinePrecision]], $MachinePrecision]}, N[(N[Power[N[Exp[c$95$p], $MachinePrecision], N[(t$95$1 * -1.0 + t$95$2), $MachinePrecision]], $MachinePrecision] * N[Power[N[(N[(1.0 - N[Exp[(-t$95$1)], $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[Exp[(-t$95$2)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \mathsf{log1p}\left(e^{-s}\right)\\
t_2 := \mathsf{log1p}\left(e^{-t}\right)\\
{\left(e^{c\_p}\right)}^{\left(\mathsf{fma}\left(t\_1, -1, t\_2\right)\right)} \cdot {\left(\frac{1 - e^{-t\_1}}{1 - e^{-t\_2}}\right)}^{c\_n}
\end{array}
\end{array}
Initial program 91.5%
Applied rewrites97.1%
Applied rewrites99.0%
Final simplification99.0%
(FPCore (c_p c_n t s) :precision binary64 (exp (* (fma -0.5 c_n (fma (* (+ c_p c_n) -0.125) s (* c_p 0.5))) s)))
double code(double c_p, double c_n, double t, double s) {
return exp((fma(-0.5, c_n, fma(((c_p + c_n) * -0.125), s, (c_p * 0.5))) * s));
}
function code(c_p, c_n, t, s) return exp(Float64(fma(-0.5, c_n, fma(Float64(Float64(c_p + c_n) * -0.125), s, Float64(c_p * 0.5))) * s)) end
code[c$95$p_, c$95$n_, t_, s_] := N[Exp[N[(N[(-0.5 * c$95$n + N[(N[(N[(c$95$p + c$95$n), $MachinePrecision] * -0.125), $MachinePrecision] * s + N[(c$95$p * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * s), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
e^{\mathsf{fma}\left(-0.5, c\_n, \mathsf{fma}\left(\left(c\_p + c\_n\right) \cdot -0.125, s, c\_p \cdot 0.5\right)\right) \cdot s}
\end{array}
Initial program 91.5%
Applied rewrites97.1%
Taylor expanded in t around 0
lower-exp.f64N/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites97.1%
Taylor expanded in s around 0
Applied rewrites98.7%
Final simplification98.7%
(FPCore (c_p c_n t s) :precision binary64 (exp (* (fma -0.5 c_n (* c_p 0.5)) s)))
double code(double c_p, double c_n, double t, double s) {
return exp((fma(-0.5, c_n, (c_p * 0.5)) * s));
}
function code(c_p, c_n, t, s) return exp(Float64(fma(-0.5, c_n, Float64(c_p * 0.5)) * s)) end
code[c$95$p_, c$95$n_, t_, s_] := N[Exp[N[(N[(-0.5 * c$95$n + N[(c$95$p * 0.5), $MachinePrecision]), $MachinePrecision] * s), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
e^{\mathsf{fma}\left(-0.5, c\_n, c\_p \cdot 0.5\right) \cdot s}
\end{array}
Initial program 91.5%
Applied rewrites97.1%
Taylor expanded in t around 0
lower-exp.f64N/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites97.1%
Taylor expanded in s around 0
Applied rewrites98.6%
Final simplification98.6%
(FPCore (c_p c_n t s) :precision binary64 1.0)
double code(double c_p, double c_n, double t, double s) {
return 1.0;
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
code = 1.0d0
end function
public static double code(double c_p, double c_n, double t, double s) {
return 1.0;
}
def code(c_p, c_n, t, s): return 1.0
function code(c_p, c_n, t, s) return 1.0 end
function tmp = code(c_p, c_n, t, s) tmp = 1.0; end
code[c$95$p_, c$95$n_, t_, s_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 91.5%
Taylor expanded in c_n around 0
lower-/.f64N/A
lower-pow.f64N/A
lower-/.f64N/A
+-commutativeN/A
neg-mul-1N/A
lower-+.f64N/A
lower-exp.f64N/A
neg-mul-1N/A
lower-neg.f64N/A
lower-pow.f64N/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-exp.f64N/A
lower-neg.f6495.6
Applied rewrites95.6%
Taylor expanded in c_p around 0
Applied rewrites96.2%
(FPCore (c_p c_n t s) :precision binary64 (* (pow (/ (+ 1.0 (exp (- t))) (+ 1.0 (exp (- s)))) c_p) (pow (/ (+ 1.0 (exp t)) (+ 1.0 (exp s))) c_n)))
double code(double c_p, double c_n, double t, double s) {
return pow(((1.0 + exp(-t)) / (1.0 + exp(-s))), c_p) * pow(((1.0 + exp(t)) / (1.0 + exp(s))), c_n);
}
real(8) function code(c_p, c_n, t, s)
real(8), intent (in) :: c_p
real(8), intent (in) :: c_n
real(8), intent (in) :: t
real(8), intent (in) :: s
code = (((1.0d0 + exp(-t)) / (1.0d0 + exp(-s))) ** c_p) * (((1.0d0 + exp(t)) / (1.0d0 + exp(s))) ** c_n)
end function
public static double code(double c_p, double c_n, double t, double s) {
return Math.pow(((1.0 + Math.exp(-t)) / (1.0 + Math.exp(-s))), c_p) * Math.pow(((1.0 + Math.exp(t)) / (1.0 + Math.exp(s))), c_n);
}
def code(c_p, c_n, t, s): return math.pow(((1.0 + math.exp(-t)) / (1.0 + math.exp(-s))), c_p) * math.pow(((1.0 + math.exp(t)) / (1.0 + math.exp(s))), c_n)
function code(c_p, c_n, t, s) return Float64((Float64(Float64(1.0 + exp(Float64(-t))) / Float64(1.0 + exp(Float64(-s)))) ^ c_p) * (Float64(Float64(1.0 + exp(t)) / Float64(1.0 + exp(s))) ^ c_n)) end
function tmp = code(c_p, c_n, t, s) tmp = (((1.0 + exp(-t)) / (1.0 + exp(-s))) ^ c_p) * (((1.0 + exp(t)) / (1.0 + exp(s))) ^ c_n); end
code[c$95$p_, c$95$n_, t_, s_] := N[(N[Power[N[(N[(1.0 + N[Exp[(-t)], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[Exp[(-s)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], c$95$p], $MachinePrecision] * N[Power[N[(N[(1.0 + N[Exp[t], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[Exp[s], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], c$95$n], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{\left(\frac{1 + e^{-t}}{1 + e^{-s}}\right)}^{c\_p} \cdot {\left(\frac{1 + e^{t}}{1 + e^{s}}\right)}^{c\_n}
\end{array}
herbie shell --seed 2024242
(FPCore (c_p c_n t s)
:name "Harley's example"
:precision binary64
:pre (and (< 0.0 c_p) (< 0.0 c_n))
:alt
(! :herbie-platform default (* (pow (/ (+ 1 (exp (- t))) (+ 1 (exp (- s)))) c_p) (pow (/ (+ 1 (exp t)) (+ 1 (exp s))) c_n)))
(/ (* (pow (/ 1.0 (+ 1.0 (exp (- s)))) c_p) (pow (- 1.0 (/ 1.0 (+ 1.0 (exp (- s))))) c_n)) (* (pow (/ 1.0 (+ 1.0 (exp (- t)))) c_p) (pow (- 1.0 (/ 1.0 (+ 1.0 (exp (- t))))) c_n))))