
(FPCore (x) :precision binary64 (* (fmod (exp x) (sqrt (cos x))) (exp (- x))))
double code(double x) {
return fmod(exp(x), sqrt(cos(x))) * exp(-x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = mod(exp(x), sqrt(cos(x))) * exp(-x)
end function
def code(x): return math.fmod(math.exp(x), math.sqrt(math.cos(x))) * math.exp(-x)
function code(x) return Float64(rem(exp(x), sqrt(cos(x))) * exp(Float64(-x))) end
code[x_] := N[(N[With[{TMP1 = N[Exp[x], $MachinePrecision], TMP2 = N[Sqrt[N[Cos[x], $MachinePrecision]], $MachinePrecision]}, Mod[Abs[TMP1], Abs[TMP2]] * Sign[TMP1]], $MachinePrecision] * N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(e^{x}\right) \bmod \left(\sqrt{\cos x}\right)\right) \cdot e^{-x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (* (fmod (exp x) (sqrt (cos x))) (exp (- x))))
double code(double x) {
return fmod(exp(x), sqrt(cos(x))) * exp(-x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = mod(exp(x), sqrt(cos(x))) * exp(-x)
end function
def code(x): return math.fmod(math.exp(x), math.sqrt(math.cos(x))) * math.exp(-x)
function code(x) return Float64(rem(exp(x), sqrt(cos(x))) * exp(Float64(-x))) end
code[x_] := N[(N[With[{TMP1 = N[Exp[x], $MachinePrecision], TMP2 = N[Sqrt[N[Cos[x], $MachinePrecision]], $MachinePrecision]}, Mod[Abs[TMP1], Abs[TMP2]] * Sign[TMP1]], $MachinePrecision] * N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(e^{x}\right) \bmod \left(\sqrt{\cos x}\right)\right) \cdot e^{-x}
\end{array}
(FPCore (x) :precision binary64 (/ (fmod (exp x) (* 3.0 (log (cbrt (exp (sqrt (cos x))))))) (exp x)))
double code(double x) {
return fmod(exp(x), (3.0 * log(cbrt(exp(sqrt(cos(x))))))) / exp(x);
}
function code(x) return Float64(rem(exp(x), Float64(3.0 * log(cbrt(exp(sqrt(cos(x))))))) / exp(x)) end
code[x_] := N[(N[With[{TMP1 = N[Exp[x], $MachinePrecision], TMP2 = N[(3.0 * N[Log[N[Power[N[Exp[N[Sqrt[N[Cos[x], $MachinePrecision]], $MachinePrecision]], $MachinePrecision], 1/3], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, Mod[Abs[TMP1], Abs[TMP2]] * Sign[TMP1]], $MachinePrecision] / N[Exp[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(\left(e^{x}\right) \bmod \left(3 \cdot \log \left(\sqrt[3]{e^{\sqrt{\cos x}}}\right)\right)\right)}{e^{x}}
\end{array}
Initial program 7.6%
exp-neg7.6%
associate-*r/7.6%
*-rgt-identity7.6%
Simplified7.6%
add-log-exp7.6%
add-cube-cbrt47.3%
log-prod47.3%
pow247.3%
Applied egg-rr47.3%
log-pow47.3%
distribute-lft1-in47.3%
metadata-eval47.3%
Simplified47.3%
Final simplification47.3%
(FPCore (x) :precision binary64 (/ (fmod (exp x) (* 3.0 (log (cbrt E)))) (exp x)))
double code(double x) {
return fmod(exp(x), (3.0 * log(cbrt(((double) M_E))))) / exp(x);
}
function code(x) return Float64(rem(exp(x), Float64(3.0 * log(cbrt(exp(1))))) / exp(x)) end
code[x_] := N[(N[With[{TMP1 = N[Exp[x], $MachinePrecision], TMP2 = N[(3.0 * N[Log[N[Power[E, 1/3], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, Mod[Abs[TMP1], Abs[TMP2]] * Sign[TMP1]], $MachinePrecision] / N[Exp[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(\left(e^{x}\right) \bmod \left(3 \cdot \log \left(\sqrt[3]{e}\right)\right)\right)}{e^{x}}
\end{array}
Initial program 7.6%
exp-neg7.6%
associate-*r/7.6%
*-rgt-identity7.6%
Simplified7.6%
add-log-exp7.6%
add-cube-cbrt47.3%
log-prod47.3%
pow247.3%
Applied egg-rr47.3%
log-pow47.3%
distribute-lft1-in47.3%
metadata-eval47.3%
Simplified47.3%
Taylor expanded in x around 0 6.9%
unpow1/346.6%
exp-1-e46.6%
Simplified46.6%
Final simplification46.6%
(FPCore (x) :precision binary64 (/ (fmod (exp x) (* 3.0 (pow (cbrt 0.3333333333333333) 3.0))) (exp x)))
double code(double x) {
return fmod(exp(x), (3.0 * pow(cbrt(0.3333333333333333), 3.0))) / exp(x);
}
function code(x) return Float64(rem(exp(x), Float64(3.0 * (cbrt(0.3333333333333333) ^ 3.0))) / exp(x)) end
code[x_] := N[(N[With[{TMP1 = N[Exp[x], $MachinePrecision], TMP2 = N[(3.0 * N[Power[N[Power[0.3333333333333333, 1/3], $MachinePrecision], 3.0], $MachinePrecision]), $MachinePrecision]}, Mod[Abs[TMP1], Abs[TMP2]] * Sign[TMP1]], $MachinePrecision] / N[Exp[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(\left(e^{x}\right) \bmod \left(3 \cdot {\left(\sqrt[3]{0.3333333333333333}\right)}^{3}\right)\right)}{e^{x}}
\end{array}
Initial program 7.6%
exp-neg7.6%
associate-*r/7.6%
*-rgt-identity7.6%
Simplified7.6%
add-log-exp7.6%
add-cube-cbrt47.3%
log-prod47.3%
pow247.3%
Applied egg-rr47.3%
log-pow47.3%
distribute-lft1-in47.3%
metadata-eval47.3%
Simplified47.3%
add-cube-cbrt47.3%
pow347.3%
pow1/39.7%
log-pow9.7%
add-log-exp9.7%
Applied egg-rr9.7%
Taylor expanded in x around 0 9.1%
Final simplification9.1%
(FPCore (x) :precision binary64 (/ (fmod (exp x) (sqrt (cos x))) (exp x)))
double code(double x) {
return fmod(exp(x), sqrt(cos(x))) / exp(x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = mod(exp(x), sqrt(cos(x))) / exp(x)
end function
def code(x): return math.fmod(math.exp(x), math.sqrt(math.cos(x))) / math.exp(x)
function code(x) return Float64(rem(exp(x), sqrt(cos(x))) / exp(x)) end
code[x_] := N[(N[With[{TMP1 = N[Exp[x], $MachinePrecision], TMP2 = N[Sqrt[N[Cos[x], $MachinePrecision]], $MachinePrecision]}, Mod[Abs[TMP1], Abs[TMP2]] * Sign[TMP1]], $MachinePrecision] / N[Exp[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(\left(e^{x}\right) \bmod \left(\sqrt{\cos x}\right)\right)}{e^{x}}
\end{array}
Initial program 7.6%
exp-neg7.6%
associate-*r/7.6%
*-rgt-identity7.6%
Simplified7.6%
Final simplification7.6%
(FPCore (x) :precision binary64 (/ (fmod (exp x) (+ 1.0 (* -0.25 (pow x 2.0)))) (exp x)))
double code(double x) {
return fmod(exp(x), (1.0 + (-0.25 * pow(x, 2.0)))) / exp(x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = mod(exp(x), (1.0d0 + ((-0.25d0) * (x ** 2.0d0)))) / exp(x)
end function
def code(x): return math.fmod(math.exp(x), (1.0 + (-0.25 * math.pow(x, 2.0)))) / math.exp(x)
function code(x) return Float64(rem(exp(x), Float64(1.0 + Float64(-0.25 * (x ^ 2.0)))) / exp(x)) end
code[x_] := N[(N[With[{TMP1 = N[Exp[x], $MachinePrecision], TMP2 = N[(1.0 + N[(-0.25 * N[Power[x, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Mod[Abs[TMP1], Abs[TMP2]] * Sign[TMP1]], $MachinePrecision] / N[Exp[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(\left(e^{x}\right) \bmod \left(1 + -0.25 \cdot {x}^{2}\right)\right)}{e^{x}}
\end{array}
Initial program 7.6%
exp-neg7.6%
associate-*r/7.6%
*-rgt-identity7.6%
Simplified7.6%
Taylor expanded in x around 0 7.3%
Final simplification7.3%
(FPCore (x) :precision binary64 (exp (- (log (fmod (exp x) 1.0)) x)))
double code(double x) {
return exp((log(fmod(exp(x), 1.0)) - x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = exp((log(mod(exp(x), 1.0d0)) - x))
end function
def code(x): return math.exp((math.log(math.fmod(math.exp(x), 1.0)) - x))
function code(x) return exp(Float64(log(rem(exp(x), 1.0)) - x)) end
code[x_] := N[Exp[N[(N[Log[N[With[{TMP1 = N[Exp[x], $MachinePrecision], TMP2 = 1.0}, Mod[Abs[TMP1], Abs[TMP2]] * Sign[TMP1]], $MachinePrecision]], $MachinePrecision] - x), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
e^{\log \left(\left(e^{x}\right) \bmod 1\right) - x}
\end{array}
Initial program 7.6%
exp-neg7.6%
associate-*r/7.6%
*-rgt-identity7.6%
Simplified7.6%
Taylor expanded in x around 0 6.9%
add-exp-log6.9%
div-exp6.9%
Applied egg-rr6.9%
Final simplification6.9%
(FPCore (x) :precision binary64 (/ (fmod (exp x) 1.0) (exp x)))
double code(double x) {
return fmod(exp(x), 1.0) / exp(x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = mod(exp(x), 1.0d0) / exp(x)
end function
def code(x): return math.fmod(math.exp(x), 1.0) / math.exp(x)
function code(x) return Float64(rem(exp(x), 1.0) / exp(x)) end
code[x_] := N[(N[With[{TMP1 = N[Exp[x], $MachinePrecision], TMP2 = 1.0}, Mod[Abs[TMP1], Abs[TMP2]] * Sign[TMP1]], $MachinePrecision] / N[Exp[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(\left(e^{x}\right) \bmod 1\right)}{e^{x}}
\end{array}
Initial program 7.6%
exp-neg7.6%
associate-*r/7.6%
*-rgt-identity7.6%
Simplified7.6%
Taylor expanded in x around 0 6.9%
Final simplification6.9%
(FPCore (x) :precision binary64 (* (fmod (exp x) 1.0) (- 1.0 x)))
double code(double x) {
return fmod(exp(x), 1.0) * (1.0 - x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = mod(exp(x), 1.0d0) * (1.0d0 - x)
end function
def code(x): return math.fmod(math.exp(x), 1.0) * (1.0 - x)
function code(x) return Float64(rem(exp(x), 1.0) * Float64(1.0 - x)) end
code[x_] := N[(N[With[{TMP1 = N[Exp[x], $MachinePrecision], TMP2 = 1.0}, Mod[Abs[TMP1], Abs[TMP2]] * Sign[TMP1]], $MachinePrecision] * N[(1.0 - x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(e^{x}\right) \bmod 1\right) \cdot \left(1 - x\right)
\end{array}
Initial program 7.6%
exp-neg7.6%
associate-*r/7.6%
*-rgt-identity7.6%
Simplified7.6%
Taylor expanded in x around 0 6.6%
+-commutative6.6%
*-lft-identity6.6%
associate-*r*6.6%
neg-mul-16.6%
distribute-rgt-out6.6%
unsub-neg6.6%
Simplified6.6%
Taylor expanded in x around 0 6.4%
Final simplification6.4%
(FPCore (x) :precision binary64 (fmod (exp x) 1.0))
double code(double x) {
return fmod(exp(x), 1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = mod(exp(x), 1.0d0)
end function
def code(x): return math.fmod(math.exp(x), 1.0)
function code(x) return rem(exp(x), 1.0) end
code[x_] := N[With[{TMP1 = N[Exp[x], $MachinePrecision], TMP2 = 1.0}, Mod[Abs[TMP1], Abs[TMP2]] * Sign[TMP1]], $MachinePrecision]
\begin{array}{l}
\\
\left(\left(e^{x}\right) \bmod 1\right)
\end{array}
Initial program 7.6%
exp-neg7.6%
associate-*r/7.6%
*-rgt-identity7.6%
Simplified7.6%
Taylor expanded in x around 0 6.9%
Taylor expanded in x around 0 6.1%
Final simplification6.1%
herbie shell --seed 2023302
(FPCore (x)
:name "expfmod (used to be hard to sample)"
:precision binary64
(* (fmod (exp x) (sqrt (cos x))) (exp (- x))))