?

Average Accuracy: 99.6% → 99.6%
Time: 18.2s
Precision: binary64
Cost: 19456

?

\[e^{-w} \cdot {\ell}^{\left(e^{w}\right)} \]
\[\frac{{\ell}^{\left(e^{w}\right)}}{e^{w}} \]
(FPCore (w l) :precision binary64 (* (exp (- w)) (pow l (exp w))))
(FPCore (w l) :precision binary64 (/ (pow l (exp w)) (exp w)))
double code(double w, double l) {
	return exp(-w) * pow(l, exp(w));
}
double code(double w, double l) {
	return pow(l, exp(w)) / exp(w);
}
real(8) function code(w, l)
    real(8), intent (in) :: w
    real(8), intent (in) :: l
    code = exp(-w) * (l ** exp(w))
end function
real(8) function code(w, l)
    real(8), intent (in) :: w
    real(8), intent (in) :: l
    code = (l ** exp(w)) / exp(w)
end function
public static double code(double w, double l) {
	return Math.exp(-w) * Math.pow(l, Math.exp(w));
}
public static double code(double w, double l) {
	return Math.pow(l, Math.exp(w)) / Math.exp(w);
}
def code(w, l):
	return math.exp(-w) * math.pow(l, math.exp(w))
def code(w, l):
	return math.pow(l, math.exp(w)) / math.exp(w)
function code(w, l)
	return Float64(exp(Float64(-w)) * (l ^ exp(w)))
end
function code(w, l)
	return Float64((l ^ exp(w)) / exp(w))
end
function tmp = code(w, l)
	tmp = exp(-w) * (l ^ exp(w));
end
function tmp = code(w, l)
	tmp = (l ^ exp(w)) / exp(w);
end
code[w_, l_] := N[(N[Exp[(-w)], $MachinePrecision] * N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
code[w_, l_] := N[(N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision] / N[Exp[w], $MachinePrecision]), $MachinePrecision]
e^{-w} \cdot {\ell}^{\left(e^{w}\right)}
\frac{{\ell}^{\left(e^{w}\right)}}{e^{w}}

Error?

Try it out?

Your Program's Arguments

Results

Enter valid numbers for all inputs

Derivation?

  1. Initial program 99.6%

    \[e^{-w} \cdot {\ell}^{\left(e^{w}\right)} \]
  2. Simplified99.6%

    \[\leadsto \color{blue}{\frac{{\ell}^{\left(e^{w}\right)}}{e^{w}}} \]
    Proof

    [Start]99.6

    \[ e^{-w} \cdot {\ell}^{\left(e^{w}\right)} \]

    exp-neg [=>]99.6

    \[ \color{blue}{\frac{1}{e^{w}}} \cdot {\ell}^{\left(e^{w}\right)} \]

    associate-*l/ [=>]99.6

    \[ \color{blue}{\frac{1 \cdot {\ell}^{\left(e^{w}\right)}}{e^{w}}} \]

    *-lft-identity [=>]99.6

    \[ \frac{\color{blue}{{\ell}^{\left(e^{w}\right)}}}{e^{w}} \]
  3. Final simplification99.6%

    \[\leadsto \frac{{\ell}^{\left(e^{w}\right)}}{e^{w}} \]

Alternatives

Alternative 1
Accuracy98.0%
Cost13376
\[\left(w \cdot \log \ell + 1\right) \cdot \frac{\ell}{e^{w}} \]
Alternative 2
Accuracy98.0%
Cost13376
\[\frac{\ell \cdot \left(w \cdot \log \ell + 1\right)}{e^{w}} \]
Alternative 3
Accuracy98.1%
Cost13376
\[\frac{\ell + \ell \cdot \left(w \cdot \log \ell\right)}{e^{w}} \]
Alternative 4
Accuracy97.4%
Cost6660
\[\begin{array}{l} \mathbf{if}\;w \leq 430:\\ \;\;\;\;\ell + \left(w \cdot w\right) \cdot \left(\ell \cdot 0.5 + w \cdot \left(\ell \cdot -0.16666666666666666\right)\right)\\ \mathbf{else}:\\ \;\;\;\;e^{-w}\\ \end{array} \]
Alternative 5
Accuracy97.4%
Cost6656
\[\ell \cdot e^{-w} \]
Alternative 6
Accuracy97.4%
Cost6592
\[\frac{\ell}{e^{w}} \]
Alternative 7
Accuracy86.9%
Cost1092
\[\begin{array}{l} \mathbf{if}\;w \leq 0.048:\\ \;\;\;\;\ell + \left(w \cdot w\right) \cdot \left(\ell \cdot 0.5 + w \cdot \left(\ell \cdot -0.16666666666666666\right)\right)\\ \mathbf{else}:\\ \;\;\;\;\left(1 + \ell \cdot \left(1 - w\right)\right) + -1\\ \end{array} \]
Alternative 8
Accuracy86.9%
Cost708
\[\begin{array}{l} \mathbf{if}\;w \leq 0.048:\\ \;\;\;\;\ell\\ \mathbf{else}:\\ \;\;\;\;\left(1 + \ell \cdot \left(1 - w\right)\right) + -1\\ \end{array} \]
Alternative 9
Accuracy78.4%
Cost64
\[\ell \]

Error

Reproduce?

herbie shell --seed 2023126 
(FPCore (w l)
  :name "exp-w (used to crash)"
  :precision binary64
  (* (exp (- w)) (pow l (exp w))))