
(FPCore (x) :precision binary64 (exp (- (- 1.0 (* x x)))))
double code(double x) {
return exp(-(1.0 - (x * x)));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = exp(-(1.0d0 - (x * x)))
end function
public static double code(double x) {
return Math.exp(-(1.0 - (x * x)));
}
def code(x): return math.exp(-(1.0 - (x * x)))
function code(x) return exp(Float64(-Float64(1.0 - Float64(x * x)))) end
function tmp = code(x) tmp = exp(-(1.0 - (x * x))); end
code[x_] := N[Exp[(-N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision])], $MachinePrecision]
\begin{array}{l}
\\
e^{-\left(1 - x \cdot x\right)}
\end{array}
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (exp (- (- 1.0 (* x x)))))
double code(double x) {
return exp(-(1.0 - (x * x)));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = exp(-(1.0d0 - (x * x)))
end function
public static double code(double x) {
return Math.exp(-(1.0 - (x * x)));
}
def code(x): return math.exp(-(1.0 - (x * x)))
function code(x) return exp(Float64(-Float64(1.0 - Float64(x * x)))) end
function tmp = code(x) tmp = exp(-(1.0 - (x * x))); end
code[x_] := N[Exp[(-N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision])], $MachinePrecision]
\begin{array}{l}
\\
e^{-\left(1 - x \cdot x\right)}
\end{array}
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ (pow (exp (- x_m)) (- x_m)) E))
x_m = fabs(x);
double code(double x_m) {
return pow(exp(-x_m), -x_m) / ((double) M_E);
}
x_m = Math.abs(x);
public static double code(double x_m) {
return Math.pow(Math.exp(-x_m), -x_m) / Math.E;
}
x_m = math.fabs(x) def code(x_m): return math.pow(math.exp(-x_m), -x_m) / math.e
x_m = abs(x) function code(x_m) return Float64((exp(Float64(-x_m)) ^ Float64(-x_m)) / exp(1)) end
x_m = abs(x); function tmp = code(x_m) tmp = (exp(-x_m) ^ -x_m) / 2.71828182845904523536; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(N[Power[N[Exp[(-x$95$m)], $MachinePrecision], (-x$95$m)], $MachinePrecision] / E), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{{\left(e^{-x\_m}\right)}^{\left(-x\_m\right)}}{e}
\end{array}
Initial program 100.0%
lift-exp.f64N/A
lift-neg.f64N/A
lift--.f64N/A
lift-*.f64N/A
exp-negN/A
lower-/.f64N/A
pow2N/A
exp-diffN/A
lower-/.f64N/A
exp-1-eN/A
lower-E.f64N/A
pow2N/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
lift-/.f64N/A
lift-E.f64N/A
lift-/.f64N/A
lift-exp.f64N/A
lift-pow.f64N/A
pow-expN/A
pow2N/A
e-exp-1N/A
div-expN/A
pow2N/A
exp-negN/A
fp-cancel-sub-sign-invN/A
distribute-lft-neg-inN/A
pow2N/A
distribute-neg-inN/A
metadata-evalN/A
mul-1-negN/A
distribute-lft-neg-outN/A
metadata-evalN/A
*-lft-identityN/A
+-commutativeN/A
pow2N/A
Applied rewrites100.0%
lift-exp.f64N/A
lift-pow.f64N/A
pow-expN/A
sqr-neg-revN/A
pow-expN/A
lower-pow.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-neg.f64100.0
Applied rewrites100.0%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ (pow (exp x_m) x_m) E))
x_m = fabs(x);
double code(double x_m) {
return pow(exp(x_m), x_m) / ((double) M_E);
}
x_m = Math.abs(x);
public static double code(double x_m) {
return Math.pow(Math.exp(x_m), x_m) / Math.E;
}
x_m = math.fabs(x) def code(x_m): return math.pow(math.exp(x_m), x_m) / math.e
x_m = abs(x) function code(x_m) return Float64((exp(x_m) ^ x_m) / exp(1)) end
x_m = abs(x); function tmp = code(x_m) tmp = (exp(x_m) ^ x_m) / 2.71828182845904523536; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(N[Power[N[Exp[x$95$m], $MachinePrecision], x$95$m], $MachinePrecision] / E), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{{\left(e^{x\_m}\right)}^{x\_m}}{e}
\end{array}
Initial program 100.0%
lift-exp.f64N/A
lift-neg.f64N/A
lift--.f64N/A
lift-*.f64N/A
exp-negN/A
lower-/.f64N/A
pow2N/A
exp-diffN/A
lower-/.f64N/A
exp-1-eN/A
lower-E.f64N/A
pow2N/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
lift-/.f64N/A
lift-E.f64N/A
lift-/.f64N/A
lift-exp.f64N/A
lift-pow.f64N/A
pow-expN/A
pow2N/A
e-exp-1N/A
div-expN/A
pow2N/A
exp-negN/A
fp-cancel-sub-sign-invN/A
distribute-lft-neg-inN/A
pow2N/A
distribute-neg-inN/A
metadata-evalN/A
mul-1-negN/A
distribute-lft-neg-outN/A
metadata-evalN/A
*-lft-identityN/A
+-commutativeN/A
pow2N/A
Applied rewrites100.0%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ (exp (* x_m x_m)) E))
x_m = fabs(x);
double code(double x_m) {
return exp((x_m * x_m)) / ((double) M_E);
}
x_m = Math.abs(x);
public static double code(double x_m) {
return Math.exp((x_m * x_m)) / Math.E;
}
x_m = math.fabs(x) def code(x_m): return math.exp((x_m * x_m)) / math.e
x_m = abs(x) function code(x_m) return Float64(exp(Float64(x_m * x_m)) / exp(1)) end
x_m = abs(x); function tmp = code(x_m) tmp = exp((x_m * x_m)) / 2.71828182845904523536; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(N[Exp[N[(x$95$m * x$95$m), $MachinePrecision]], $MachinePrecision] / E), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{e^{x\_m \cdot x\_m}}{e}
\end{array}
Initial program 100.0%
lift-exp.f64N/A
lift-neg.f64N/A
lift--.f64N/A
lift-*.f64N/A
exp-negN/A
lower-/.f64N/A
pow2N/A
exp-diffN/A
lower-/.f64N/A
exp-1-eN/A
lower-E.f64N/A
pow2N/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
lift-/.f64N/A
lift-E.f64N/A
lift-/.f64N/A
lift-exp.f64N/A
lift-pow.f64N/A
pow-expN/A
pow2N/A
e-exp-1N/A
div-expN/A
pow2N/A
exp-negN/A
fp-cancel-sub-sign-invN/A
distribute-lft-neg-inN/A
pow2N/A
distribute-neg-inN/A
metadata-evalN/A
mul-1-negN/A
distribute-lft-neg-outN/A
metadata-evalN/A
*-lft-identityN/A
+-commutativeN/A
pow2N/A
Applied rewrites100.0%
lift-exp.f64N/A
lift-pow.f64N/A
pow-expN/A
pow2N/A
lower-exp.f64N/A
pow2N/A
lift-*.f64100.0
Applied rewrites100.0%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 2.1)
(/
(fma
(fma (fma 0.16666666666666666 (* x_m x_m) 0.5) (* x_m x_m) 1.0)
(* x_m x_m)
1.0)
E)
(exp (* x_m x_m))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 2.1) {
tmp = fma(fma(fma(0.16666666666666666, (x_m * x_m), 0.5), (x_m * x_m), 1.0), (x_m * x_m), 1.0) / ((double) M_E);
} else {
tmp = exp((x_m * x_m));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 2.1) tmp = Float64(fma(fma(fma(0.16666666666666666, Float64(x_m * x_m), 0.5), Float64(x_m * x_m), 1.0), Float64(x_m * x_m), 1.0) / exp(1)); else tmp = exp(Float64(x_m * x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 2.1], N[(N[(N[(N[(0.16666666666666666 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 1.0), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 1.0), $MachinePrecision] / E), $MachinePrecision], N[Exp[N[(x$95$m * x$95$m), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 2.1:\\
\;\;\;\;\frac{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.16666666666666666, x\_m \cdot x\_m, 0.5\right), x\_m \cdot x\_m, 1\right), x\_m \cdot x\_m, 1\right)}{e}\\
\mathbf{else}:\\
\;\;\;\;e^{x\_m \cdot x\_m}\\
\end{array}
\end{array}
if x < 2.10000000000000009Initial program 100.0%
lift-exp.f64N/A
lift-neg.f64N/A
lift--.f64N/A
lift-*.f64N/A
exp-negN/A
lower-/.f64N/A
pow2N/A
exp-diffN/A
lower-/.f64N/A
exp-1-eN/A
lower-E.f64N/A
pow2N/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
lift-/.f64N/A
lift-E.f64N/A
lift-/.f64N/A
lift-exp.f64N/A
lift-pow.f64N/A
pow-expN/A
pow2N/A
e-exp-1N/A
div-expN/A
pow2N/A
exp-negN/A
fp-cancel-sub-sign-invN/A
distribute-lft-neg-inN/A
pow2N/A
distribute-neg-inN/A
metadata-evalN/A
mul-1-negN/A
distribute-lft-neg-outN/A
metadata-evalN/A
*-lft-identityN/A
+-commutativeN/A
pow2N/A
Applied rewrites100.0%
Taylor expanded in x around 0
pow-expN/A
sqr-neg-revN/A
pow-expN/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f64N/A
pow2N/A
lift-*.f64N/A
pow2N/A
lift-*.f6499.7
Applied rewrites99.7%
if 2.10000000000000009 < x Initial program 100.0%
Taylor expanded in x around inf
pow2N/A
lift-*.f6499.5
Applied rewrites99.5%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (exp (fma x_m x_m -1.0)))
x_m = fabs(x);
double code(double x_m) {
return exp(fma(x_m, x_m, -1.0));
}
x_m = abs(x) function code(x_m) return exp(fma(x_m, x_m, -1.0)) end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[Exp[N[(x$95$m * x$95$m + -1.0), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
e^{\mathsf{fma}\left(x\_m, x\_m, -1\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
metadata-evalN/A
fp-cancel-sub-sign-invN/A
pow2N/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64100.0
Applied rewrites100.0%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ (fma (fma (fma 0.16666666666666666 (* x_m x_m) 0.5) (* x_m x_m) 1.0) (* x_m x_m) 1.0) E))
x_m = fabs(x);
double code(double x_m) {
return fma(fma(fma(0.16666666666666666, (x_m * x_m), 0.5), (x_m * x_m), 1.0), (x_m * x_m), 1.0) / ((double) M_E);
}
x_m = abs(x) function code(x_m) return Float64(fma(fma(fma(0.16666666666666666, Float64(x_m * x_m), 0.5), Float64(x_m * x_m), 1.0), Float64(x_m * x_m), 1.0) / exp(1)) end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(N[(N[(N[(0.16666666666666666 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 1.0), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 1.0), $MachinePrecision] / E), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.16666666666666666, x\_m \cdot x\_m, 0.5\right), x\_m \cdot x\_m, 1\right), x\_m \cdot x\_m, 1\right)}{e}
\end{array}
Initial program 100.0%
lift-exp.f64N/A
lift-neg.f64N/A
lift--.f64N/A
lift-*.f64N/A
exp-negN/A
lower-/.f64N/A
pow2N/A
exp-diffN/A
lower-/.f64N/A
exp-1-eN/A
lower-E.f64N/A
pow2N/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
lift-/.f64N/A
lift-E.f64N/A
lift-/.f64N/A
lift-exp.f64N/A
lift-pow.f64N/A
pow-expN/A
pow2N/A
e-exp-1N/A
div-expN/A
pow2N/A
exp-negN/A
fp-cancel-sub-sign-invN/A
distribute-lft-neg-inN/A
pow2N/A
distribute-neg-inN/A
metadata-evalN/A
mul-1-negN/A
distribute-lft-neg-outN/A
metadata-evalN/A
*-lft-identityN/A
+-commutativeN/A
pow2N/A
Applied rewrites100.0%
Taylor expanded in x around 0
pow-expN/A
sqr-neg-revN/A
pow-expN/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f64N/A
pow2N/A
lift-*.f64N/A
pow2N/A
lift-*.f6492.0
Applied rewrites92.0%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ (fma (fma (* x_m x_m) 0.5 1.0) (* x_m x_m) 1.0) E))
x_m = fabs(x);
double code(double x_m) {
return fma(fma((x_m * x_m), 0.5, 1.0), (x_m * x_m), 1.0) / ((double) M_E);
}
x_m = abs(x) function code(x_m) return Float64(fma(fma(Float64(x_m * x_m), 0.5, 1.0), Float64(x_m * x_m), 1.0) / exp(1)) end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(N[(N[(N[(x$95$m * x$95$m), $MachinePrecision] * 0.5 + 1.0), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 1.0), $MachinePrecision] / E), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{\mathsf{fma}\left(\mathsf{fma}\left(x\_m \cdot x\_m, 0.5, 1\right), x\_m \cdot x\_m, 1\right)}{e}
\end{array}
Initial program 100.0%
lift-exp.f64N/A
lift-neg.f64N/A
lift--.f64N/A
lift-*.f64N/A
exp-negN/A
lower-/.f64N/A
pow2N/A
exp-diffN/A
lower-/.f64N/A
exp-1-eN/A
lower-E.f64N/A
pow2N/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
lift-/.f64N/A
lift-E.f64N/A
lift-/.f64N/A
lift-exp.f64N/A
lift-pow.f64N/A
pow-expN/A
pow2N/A
e-exp-1N/A
div-expN/A
pow2N/A
exp-negN/A
fp-cancel-sub-sign-invN/A
distribute-lft-neg-inN/A
pow2N/A
distribute-neg-inN/A
metadata-evalN/A
mul-1-negN/A
distribute-lft-neg-outN/A
metadata-evalN/A
*-lft-identityN/A
+-commutativeN/A
pow2N/A
Applied rewrites100.0%
Taylor expanded in x around 0
pow-expN/A
sqr-neg-revN/A
pow-expN/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f64N/A
pow2N/A
lift-*.f6488.0
Applied rewrites88.0%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= (- 1.0 (* x_m x_m)) -2.0) (/ (* x_m x_m) E) (/ 1.0 E)))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if ((1.0 - (x_m * x_m)) <= -2.0) {
tmp = (x_m * x_m) / ((double) M_E);
} else {
tmp = 1.0 / ((double) M_E);
}
return tmp;
}
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if ((1.0 - (x_m * x_m)) <= -2.0) {
tmp = (x_m * x_m) / Math.E;
} else {
tmp = 1.0 / Math.E;
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if (1.0 - (x_m * x_m)) <= -2.0: tmp = (x_m * x_m) / math.e else: tmp = 1.0 / math.e return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (Float64(1.0 - Float64(x_m * x_m)) <= -2.0) tmp = Float64(Float64(x_m * x_m) / exp(1)); else tmp = Float64(1.0 / exp(1)); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if ((1.0 - (x_m * x_m)) <= -2.0) tmp = (x_m * x_m) / 2.71828182845904523536; else tmp = 1.0 / 2.71828182845904523536; end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[N[(1.0 - N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision], -2.0], N[(N[(x$95$m * x$95$m), $MachinePrecision] / E), $MachinePrecision], N[(1.0 / E), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;1 - x\_m \cdot x\_m \leq -2:\\
\;\;\;\;\frac{x\_m \cdot x\_m}{e}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{e}\\
\end{array}
\end{array}
if (-.f64 #s(literal 1 binary64) (*.f64 x x)) < -2Initial program 100.0%
lift-exp.f64N/A
lift-neg.f64N/A
lift--.f64N/A
lift-*.f64N/A
exp-negN/A
lower-/.f64N/A
pow2N/A
exp-diffN/A
lower-/.f64N/A
exp-1-eN/A
lower-E.f64N/A
pow2N/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
div-add-revN/A
lower-/.f64N/A
+-commutativeN/A
pow2N/A
lower-fma.f64N/A
lift-E.f6452.0
Applied rewrites52.0%
Taylor expanded in x around inf
pow2N/A
lift-*.f6452.0
Applied rewrites52.0%
if -2 < (-.f64 #s(literal 1 binary64) (*.f64 x x)) Initial program 100.0%
lift-exp.f64N/A
lift-neg.f64N/A
lift--.f64N/A
lift-*.f64N/A
exp-negN/A
lower-/.f64N/A
pow2N/A
exp-diffN/A
lower-/.f64N/A
exp-1-eN/A
lower-E.f64N/A
pow2N/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
lift-E.f6498.9
Applied rewrites98.9%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ (fma x_m x_m 1.0) E))
x_m = fabs(x);
double code(double x_m) {
return fma(x_m, x_m, 1.0) / ((double) M_E);
}
x_m = abs(x) function code(x_m) return Float64(fma(x_m, x_m, 1.0) / exp(1)) end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(N[(x$95$m * x$95$m + 1.0), $MachinePrecision] / E), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{\mathsf{fma}\left(x\_m, x\_m, 1\right)}{e}
\end{array}
Initial program 100.0%
lift-exp.f64N/A
lift-neg.f64N/A
lift--.f64N/A
lift-*.f64N/A
exp-negN/A
lower-/.f64N/A
pow2N/A
exp-diffN/A
lower-/.f64N/A
exp-1-eN/A
lower-E.f64N/A
pow2N/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
div-add-revN/A
lower-/.f64N/A
+-commutativeN/A
pow2N/A
lower-fma.f64N/A
lift-E.f6476.1
Applied rewrites76.1%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ 1.0 E))
x_m = fabs(x);
double code(double x_m) {
return 1.0 / ((double) M_E);
}
x_m = Math.abs(x);
public static double code(double x_m) {
return 1.0 / Math.E;
}
x_m = math.fabs(x) def code(x_m): return 1.0 / math.e
x_m = abs(x) function code(x_m) return Float64(1.0 / exp(1)) end
x_m = abs(x); function tmp = code(x_m) tmp = 1.0 / 2.71828182845904523536; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(1.0 / E), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{1}{e}
\end{array}
Initial program 100.0%
lift-exp.f64N/A
lift-neg.f64N/A
lift--.f64N/A
lift-*.f64N/A
exp-negN/A
lower-/.f64N/A
pow2N/A
exp-diffN/A
lower-/.f64N/A
exp-1-eN/A
lower-E.f64N/A
pow2N/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
lift-E.f6451.7
Applied rewrites51.7%
herbie shell --seed 2025097
(FPCore (x)
:name "exp neg sub"
:precision binary64
(exp (- (- 1.0 (* x x)))))