
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 1.0 (+ 1.0 (* 0.3275911 (fabs x))))))
(-
1.0
(*
(*
t_0
(+
0.254829592
(*
t_0
(+
-0.284496736
(*
t_0
(+ 1.421413741 (* t_0 (+ -1.453152027 (* t_0 1.061405429)))))))))
(exp (- (* (fabs x) (fabs x))))))))
double code(double x) {
double t_0 = 1.0 / (1.0 + (0.3275911 * fabs(x)));
return 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * exp(-(fabs(x) * fabs(x))));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
real(8) :: t_0
t_0 = 1.0d0 / (1.0d0 + (0.3275911d0 * abs(x)))
code = 1.0d0 - ((t_0 * (0.254829592d0 + (t_0 * ((-0.284496736d0) + (t_0 * (1.421413741d0 + (t_0 * ((-1.453152027d0) + (t_0 * 1.061405429d0))))))))) * exp(-(abs(x) * abs(x))))
end function
public static double code(double x) {
double t_0 = 1.0 / (1.0 + (0.3275911 * Math.abs(x)));
return 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * Math.exp(-(Math.abs(x) * Math.abs(x))));
}
def code(x): t_0 = 1.0 / (1.0 + (0.3275911 * math.fabs(x))) return 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * math.exp(-(math.fabs(x) * math.fabs(x))))
function code(x) t_0 = Float64(1.0 / Float64(1.0 + Float64(0.3275911 * abs(x)))) return Float64(1.0 - Float64(Float64(t_0 * Float64(0.254829592 + Float64(t_0 * Float64(-0.284496736 + Float64(t_0 * Float64(1.421413741 + Float64(t_0 * Float64(-1.453152027 + Float64(t_0 * 1.061405429))))))))) * exp(Float64(-Float64(abs(x) * abs(x)))))) end
function tmp = code(x) t_0 = 1.0 / (1.0 + (0.3275911 * abs(x))); tmp = 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * exp(-(abs(x) * abs(x)))); end
code[x_] := Block[{t$95$0 = N[(1.0 / N[(1.0 + N[(0.3275911 * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(1.0 - N[(N[(t$95$0 * N[(0.254829592 + N[(t$95$0 * N[(-0.284496736 + N[(t$95$0 * N[(1.421413741 + N[(t$95$0 * N[(-1.453152027 + N[(t$95$0 * 1.061405429), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{1}{1 + 0.3275911 \cdot \left|x\right|}\\
1 - \left(t\_0 \cdot \left(0.254829592 + t\_0 \cdot \left(-0.284496736 + t\_0 \cdot \left(1.421413741 + t\_0 \cdot \left(-1.453152027 + t\_0 \cdot 1.061405429\right)\right)\right)\right)\right) \cdot e^{-\left|x\right| \cdot \left|x\right|}
\end{array}
\end{array}
Herbie found 17 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 1.0 (+ 1.0 (* 0.3275911 (fabs x))))))
(-
1.0
(*
(*
t_0
(+
0.254829592
(*
t_0
(+
-0.284496736
(*
t_0
(+ 1.421413741 (* t_0 (+ -1.453152027 (* t_0 1.061405429)))))))))
(exp (- (* (fabs x) (fabs x))))))))
double code(double x) {
double t_0 = 1.0 / (1.0 + (0.3275911 * fabs(x)));
return 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * exp(-(fabs(x) * fabs(x))));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
real(8) :: t_0
t_0 = 1.0d0 / (1.0d0 + (0.3275911d0 * abs(x)))
code = 1.0d0 - ((t_0 * (0.254829592d0 + (t_0 * ((-0.284496736d0) + (t_0 * (1.421413741d0 + (t_0 * ((-1.453152027d0) + (t_0 * 1.061405429d0))))))))) * exp(-(abs(x) * abs(x))))
end function
public static double code(double x) {
double t_0 = 1.0 / (1.0 + (0.3275911 * Math.abs(x)));
return 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * Math.exp(-(Math.abs(x) * Math.abs(x))));
}
def code(x): t_0 = 1.0 / (1.0 + (0.3275911 * math.fabs(x))) return 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * math.exp(-(math.fabs(x) * math.fabs(x))))
function code(x) t_0 = Float64(1.0 / Float64(1.0 + Float64(0.3275911 * abs(x)))) return Float64(1.0 - Float64(Float64(t_0 * Float64(0.254829592 + Float64(t_0 * Float64(-0.284496736 + Float64(t_0 * Float64(1.421413741 + Float64(t_0 * Float64(-1.453152027 + Float64(t_0 * 1.061405429))))))))) * exp(Float64(-Float64(abs(x) * abs(x)))))) end
function tmp = code(x) t_0 = 1.0 / (1.0 + (0.3275911 * abs(x))); tmp = 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * exp(-(abs(x) * abs(x)))); end
code[x_] := Block[{t$95$0 = N[(1.0 / N[(1.0 + N[(0.3275911 * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(1.0 - N[(N[(t$95$0 * N[(0.254829592 + N[(t$95$0 * N[(-0.284496736 + N[(t$95$0 * N[(1.421413741 + N[(t$95$0 * N[(-1.453152027 + N[(t$95$0 * 1.061405429), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{1}{1 + 0.3275911 \cdot \left|x\right|}\\
1 - \left(t\_0 \cdot \left(0.254829592 + t\_0 \cdot \left(-0.284496736 + t\_0 \cdot \left(1.421413741 + t\_0 \cdot \left(-1.453152027 + t\_0 \cdot 1.061405429\right)\right)\right)\right)\right) \cdot e^{-\left|x\right| \cdot \left|x\right|}
\end{array}
\end{array}
(FPCore (x)
:precision binary64
(let* ((t_0 (fma (fabs x) 0.3275911 1.0))
(t_1
(/
(+
(/
(+
(/
(- (/ (- (/ 1.061405429 t_0) 1.453152027) t_0) -1.421413741)
t_0)
-0.284496736)
t_0)
0.254829592)
(* t_0 (exp (* x x)))))
(t_2 (fma t_1 (+ t_1 1.0) 1.0))
(t_3 (+ (+ 1.0 (pow t_1 6.0)) (pow t_1 3.0))))
(- (/ (/ 1.0 t_3) t_2) (/ (/ (pow t_1 9.0) t_3) t_2))))
double code(double x) {
double t_0 = fma(fabs(x), 0.3275911, 1.0);
double t_1 = ((((((((1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / (t_0 * exp((x * x)));
double t_2 = fma(t_1, (t_1 + 1.0), 1.0);
double t_3 = (1.0 + pow(t_1, 6.0)) + pow(t_1, 3.0);
return ((1.0 / t_3) / t_2) - ((pow(t_1, 9.0) / t_3) / t_2);
}
function code(x) t_0 = fma(abs(x), 0.3275911, 1.0) t_1 = Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / Float64(t_0 * exp(Float64(x * x)))) t_2 = fma(t_1, Float64(t_1 + 1.0), 1.0) t_3 = Float64(Float64(1.0 + (t_1 ^ 6.0)) + (t_1 ^ 3.0)) return Float64(Float64(Float64(1.0 / t_3) / t_2) - Float64(Float64((t_1 ^ 9.0) / t_3) / t_2)) end
code[x_] := Block[{t$95$0 = N[(N[Abs[x], $MachinePrecision] * 0.3275911 + 1.0), $MachinePrecision]}, Block[{t$95$1 = N[(N[(N[(N[(N[(N[(N[(N[(N[(1.061405429 / t$95$0), $MachinePrecision] - 1.453152027), $MachinePrecision] / t$95$0), $MachinePrecision] - -1.421413741), $MachinePrecision] / t$95$0), $MachinePrecision] + -0.284496736), $MachinePrecision] / t$95$0), $MachinePrecision] + 0.254829592), $MachinePrecision] / N[(t$95$0 * N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$1 * N[(t$95$1 + 1.0), $MachinePrecision] + 1.0), $MachinePrecision]}, Block[{t$95$3 = N[(N[(1.0 + N[Power[t$95$1, 6.0], $MachinePrecision]), $MachinePrecision] + N[Power[t$95$1, 3.0], $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(1.0 / t$95$3), $MachinePrecision] / t$95$2), $MachinePrecision] - N[(N[(N[Power[t$95$1, 9.0], $MachinePrecision] / t$95$3), $MachinePrecision] / t$95$2), $MachinePrecision]), $MachinePrecision]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(\left|x\right|, 0.3275911, 1\right)\\
t_1 := \frac{\frac{\frac{\frac{\frac{1.061405429}{t\_0} - 1.453152027}{t\_0} - -1.421413741}{t\_0} + -0.284496736}{t\_0} + 0.254829592}{t\_0 \cdot e^{x \cdot x}}\\
t_2 := \mathsf{fma}\left(t\_1, t\_1 + 1, 1\right)\\
t_3 := \left(1 + {t\_1}^{6}\right) + {t\_1}^{3}\\
\frac{\frac{1}{t\_3}}{t\_2} - \frac{\frac{{t\_1}^{9}}{t\_3}}{t\_2}
\end{array}
\end{array}
Initial program 78.9%
Applied rewrites79.0%
Applied rewrites79.1%
Applied rewrites80.2%
Applied rewrites83.2%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma (fabs x) 0.3275911 1.0))
(t_1
(/
(+
(/
(+
(/
(- (/ (- (/ 1.061405429 t_0) 1.453152027) t_0) -1.421413741)
t_0)
-0.284496736)
t_0)
0.254829592)
(* t_0 (exp (* x x)))))
(t_2 (+ (+ 1.0 (pow t_1 6.0)) (pow t_1 3.0))))
(/ (- (/ 1.0 t_2) (/ (pow t_1 9.0) t_2)) (+ (* t_1 (+ t_1 1.0)) 1.0))))
double code(double x) {
double t_0 = fma(fabs(x), 0.3275911, 1.0);
double t_1 = ((((((((1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / (t_0 * exp((x * x)));
double t_2 = (1.0 + pow(t_1, 6.0)) + pow(t_1, 3.0);
return ((1.0 / t_2) - (pow(t_1, 9.0) / t_2)) / ((t_1 * (t_1 + 1.0)) + 1.0);
}
function code(x) t_0 = fma(abs(x), 0.3275911, 1.0) t_1 = Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / Float64(t_0 * exp(Float64(x * x)))) t_2 = Float64(Float64(1.0 + (t_1 ^ 6.0)) + (t_1 ^ 3.0)) return Float64(Float64(Float64(1.0 / t_2) - Float64((t_1 ^ 9.0) / t_2)) / Float64(Float64(t_1 * Float64(t_1 + 1.0)) + 1.0)) end
code[x_] := Block[{t$95$0 = N[(N[Abs[x], $MachinePrecision] * 0.3275911 + 1.0), $MachinePrecision]}, Block[{t$95$1 = N[(N[(N[(N[(N[(N[(N[(N[(N[(1.061405429 / t$95$0), $MachinePrecision] - 1.453152027), $MachinePrecision] / t$95$0), $MachinePrecision] - -1.421413741), $MachinePrecision] / t$95$0), $MachinePrecision] + -0.284496736), $MachinePrecision] / t$95$0), $MachinePrecision] + 0.254829592), $MachinePrecision] / N[(t$95$0 * N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(N[(1.0 + N[Power[t$95$1, 6.0], $MachinePrecision]), $MachinePrecision] + N[Power[t$95$1, 3.0], $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(1.0 / t$95$2), $MachinePrecision] - N[(N[Power[t$95$1, 9.0], $MachinePrecision] / t$95$2), $MachinePrecision]), $MachinePrecision] / N[(N[(t$95$1 * N[(t$95$1 + 1.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(\left|x\right|, 0.3275911, 1\right)\\
t_1 := \frac{\frac{\frac{\frac{\frac{1.061405429}{t\_0} - 1.453152027}{t\_0} - -1.421413741}{t\_0} + -0.284496736}{t\_0} + 0.254829592}{t\_0 \cdot e^{x \cdot x}}\\
t_2 := \left(1 + {t\_1}^{6}\right) + {t\_1}^{3}\\
\frac{\frac{1}{t\_2} - \frac{{t\_1}^{9}}{t\_2}}{t\_1 \cdot \left(t\_1 + 1\right) + 1}
\end{array}
\end{array}
Initial program 78.9%
Applied rewrites79.0%
Applied rewrites79.1%
Applied rewrites80.2%
Applied rewrites80.2%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma (fabs x) 0.3275911 1.0))
(t_1
(/
(+
(/
(+
(/
(- (/ (- (/ 1.061405429 t_0) 1.453152027) t_0) -1.421413741)
t_0)
-0.284496736)
t_0)
0.254829592)
(* t_0 (exp (* x x)))))
(t_2 (+ (+ 1.0 (pow t_1 6.0)) (pow t_1 3.0))))
(/ (- (/ 1.0 t_2) (/ (pow t_1 9.0) t_2)) (fma t_1 (+ t_1 1.0) 1.0))))
double code(double x) {
double t_0 = fma(fabs(x), 0.3275911, 1.0);
double t_1 = ((((((((1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / (t_0 * exp((x * x)));
double t_2 = (1.0 + pow(t_1, 6.0)) + pow(t_1, 3.0);
return ((1.0 / t_2) - (pow(t_1, 9.0) / t_2)) / fma(t_1, (t_1 + 1.0), 1.0);
}
function code(x) t_0 = fma(abs(x), 0.3275911, 1.0) t_1 = Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / Float64(t_0 * exp(Float64(x * x)))) t_2 = Float64(Float64(1.0 + (t_1 ^ 6.0)) + (t_1 ^ 3.0)) return Float64(Float64(Float64(1.0 / t_2) - Float64((t_1 ^ 9.0) / t_2)) / fma(t_1, Float64(t_1 + 1.0), 1.0)) end
code[x_] := Block[{t$95$0 = N[(N[Abs[x], $MachinePrecision] * 0.3275911 + 1.0), $MachinePrecision]}, Block[{t$95$1 = N[(N[(N[(N[(N[(N[(N[(N[(N[(1.061405429 / t$95$0), $MachinePrecision] - 1.453152027), $MachinePrecision] / t$95$0), $MachinePrecision] - -1.421413741), $MachinePrecision] / t$95$0), $MachinePrecision] + -0.284496736), $MachinePrecision] / t$95$0), $MachinePrecision] + 0.254829592), $MachinePrecision] / N[(t$95$0 * N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(N[(1.0 + N[Power[t$95$1, 6.0], $MachinePrecision]), $MachinePrecision] + N[Power[t$95$1, 3.0], $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(1.0 / t$95$2), $MachinePrecision] - N[(N[Power[t$95$1, 9.0], $MachinePrecision] / t$95$2), $MachinePrecision]), $MachinePrecision] / N[(t$95$1 * N[(t$95$1 + 1.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(\left|x\right|, 0.3275911, 1\right)\\
t_1 := \frac{\frac{\frac{\frac{\frac{1.061405429}{t\_0} - 1.453152027}{t\_0} - -1.421413741}{t\_0} + -0.284496736}{t\_0} + 0.254829592}{t\_0 \cdot e^{x \cdot x}}\\
t_2 := \left(1 + {t\_1}^{6}\right) + {t\_1}^{3}\\
\frac{\frac{1}{t\_2} - \frac{{t\_1}^{9}}{t\_2}}{\mathsf{fma}\left(t\_1, t\_1 + 1, 1\right)}
\end{array}
\end{array}
Initial program 78.9%
Applied rewrites79.0%
Applied rewrites79.1%
Applied rewrites80.2%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma (fabs x) 0.3275911 1.0))
(t_1
(/
(+
(/
(+
(/
(- (/ (- (/ 1.061405429 t_0) 1.453152027) t_0) -1.421413741)
t_0)
-0.284496736)
t_0)
0.254829592)
(* t_0 (exp (* x x)))))
(t_2 (pow t_1 3.0)))
(/
(/ (- 1.0 (pow t_2 3.0)) (+ 1.0 (fma t_2 t_2 (* 1.0 t_2))))
(+ (* t_1 (+ t_1 1.0)) 1.0))))
double code(double x) {
double t_0 = fma(fabs(x), 0.3275911, 1.0);
double t_1 = ((((((((1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / (t_0 * exp((x * x)));
double t_2 = pow(t_1, 3.0);
return ((1.0 - pow(t_2, 3.0)) / (1.0 + fma(t_2, t_2, (1.0 * t_2)))) / ((t_1 * (t_1 + 1.0)) + 1.0);
}
function code(x) t_0 = fma(abs(x), 0.3275911, 1.0) t_1 = Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / Float64(t_0 * exp(Float64(x * x)))) t_2 = t_1 ^ 3.0 return Float64(Float64(Float64(1.0 - (t_2 ^ 3.0)) / Float64(1.0 + fma(t_2, t_2, Float64(1.0 * t_2)))) / Float64(Float64(t_1 * Float64(t_1 + 1.0)) + 1.0)) end
code[x_] := Block[{t$95$0 = N[(N[Abs[x], $MachinePrecision] * 0.3275911 + 1.0), $MachinePrecision]}, Block[{t$95$1 = N[(N[(N[(N[(N[(N[(N[(N[(N[(1.061405429 / t$95$0), $MachinePrecision] - 1.453152027), $MachinePrecision] / t$95$0), $MachinePrecision] - -1.421413741), $MachinePrecision] / t$95$0), $MachinePrecision] + -0.284496736), $MachinePrecision] / t$95$0), $MachinePrecision] + 0.254829592), $MachinePrecision] / N[(t$95$0 * N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[Power[t$95$1, 3.0], $MachinePrecision]}, N[(N[(N[(1.0 - N[Power[t$95$2, 3.0], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(t$95$2 * t$95$2 + N[(1.0 * t$95$2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(t$95$1 * N[(t$95$1 + 1.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(\left|x\right|, 0.3275911, 1\right)\\
t_1 := \frac{\frac{\frac{\frac{\frac{1.061405429}{t\_0} - 1.453152027}{t\_0} - -1.421413741}{t\_0} + -0.284496736}{t\_0} + 0.254829592}{t\_0 \cdot e^{x \cdot x}}\\
t_2 := {t\_1}^{3}\\
\frac{\frac{1 - {t\_2}^{3}}{1 + \mathsf{fma}\left(t\_2, t\_2, 1 \cdot t\_2\right)}}{t\_1 \cdot \left(t\_1 + 1\right) + 1}
\end{array}
\end{array}
Initial program 78.9%
Applied rewrites79.0%
Applied rewrites79.1%
Applied rewrites79.1%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma (fabs x) 0.3275911 1.0))
(t_1
(/
(+
(/
(+
(/
(- (/ (- (/ 1.061405429 t_0) 1.453152027) t_0) -1.421413741)
t_0)
-0.284496736)
t_0)
0.254829592)
(* t_0 (exp (* x x)))))
(t_2 (pow t_1 3.0)))
(/
(/ (- 1.0 (pow t_2 3.0)) (+ 1.0 (fma t_2 t_2 (* 1.0 t_2))))
(fma t_1 (+ t_1 1.0) 1.0))))
double code(double x) {
double t_0 = fma(fabs(x), 0.3275911, 1.0);
double t_1 = ((((((((1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / (t_0 * exp((x * x)));
double t_2 = pow(t_1, 3.0);
return ((1.0 - pow(t_2, 3.0)) / (1.0 + fma(t_2, t_2, (1.0 * t_2)))) / fma(t_1, (t_1 + 1.0), 1.0);
}
function code(x) t_0 = fma(abs(x), 0.3275911, 1.0) t_1 = Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / Float64(t_0 * exp(Float64(x * x)))) t_2 = t_1 ^ 3.0 return Float64(Float64(Float64(1.0 - (t_2 ^ 3.0)) / Float64(1.0 + fma(t_2, t_2, Float64(1.0 * t_2)))) / fma(t_1, Float64(t_1 + 1.0), 1.0)) end
code[x_] := Block[{t$95$0 = N[(N[Abs[x], $MachinePrecision] * 0.3275911 + 1.0), $MachinePrecision]}, Block[{t$95$1 = N[(N[(N[(N[(N[(N[(N[(N[(N[(1.061405429 / t$95$0), $MachinePrecision] - 1.453152027), $MachinePrecision] / t$95$0), $MachinePrecision] - -1.421413741), $MachinePrecision] / t$95$0), $MachinePrecision] + -0.284496736), $MachinePrecision] / t$95$0), $MachinePrecision] + 0.254829592), $MachinePrecision] / N[(t$95$0 * N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[Power[t$95$1, 3.0], $MachinePrecision]}, N[(N[(N[(1.0 - N[Power[t$95$2, 3.0], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(t$95$2 * t$95$2 + N[(1.0 * t$95$2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(t$95$1 * N[(t$95$1 + 1.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(\left|x\right|, 0.3275911, 1\right)\\
t_1 := \frac{\frac{\frac{\frac{\frac{1.061405429}{t\_0} - 1.453152027}{t\_0} - -1.421413741}{t\_0} + -0.284496736}{t\_0} + 0.254829592}{t\_0 \cdot e^{x \cdot x}}\\
t_2 := {t\_1}^{3}\\
\frac{\frac{1 - {t\_2}^{3}}{1 + \mathsf{fma}\left(t\_2, t\_2, 1 \cdot t\_2\right)}}{\mathsf{fma}\left(t\_1, t\_1 + 1, 1\right)}
\end{array}
\end{array}
Initial program 78.9%
Applied rewrites79.0%
Applied rewrites79.1%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma (fabs x) 0.3275911 1.0))
(t_1
(/
(+
(/
(+
(/
(- (/ (- (/ 1.061405429 t_0) 1.453152027) t_0) -1.421413741)
t_0)
-0.284496736)
t_0)
0.254829592)
(* t_0 (exp (* x x))))))
(/
(- 1.0 (pow t_1 9.0))
(* (+ (+ 1.0 (pow t_1 6.0)) (pow t_1 3.0)) (fma t_1 (+ t_1 1.0) 1.0)))))
double code(double x) {
double t_0 = fma(fabs(x), 0.3275911, 1.0);
double t_1 = ((((((((1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / (t_0 * exp((x * x)));
return (1.0 - pow(t_1, 9.0)) / (((1.0 + pow(t_1, 6.0)) + pow(t_1, 3.0)) * fma(t_1, (t_1 + 1.0), 1.0));
}
function code(x) t_0 = fma(abs(x), 0.3275911, 1.0) t_1 = Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / Float64(t_0 * exp(Float64(x * x)))) return Float64(Float64(1.0 - (t_1 ^ 9.0)) / Float64(Float64(Float64(1.0 + (t_1 ^ 6.0)) + (t_1 ^ 3.0)) * fma(t_1, Float64(t_1 + 1.0), 1.0))) end
code[x_] := Block[{t$95$0 = N[(N[Abs[x], $MachinePrecision] * 0.3275911 + 1.0), $MachinePrecision]}, Block[{t$95$1 = N[(N[(N[(N[(N[(N[(N[(N[(N[(1.061405429 / t$95$0), $MachinePrecision] - 1.453152027), $MachinePrecision] / t$95$0), $MachinePrecision] - -1.421413741), $MachinePrecision] / t$95$0), $MachinePrecision] + -0.284496736), $MachinePrecision] / t$95$0), $MachinePrecision] + 0.254829592), $MachinePrecision] / N[(t$95$0 * N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(1.0 - N[Power[t$95$1, 9.0], $MachinePrecision]), $MachinePrecision] / N[(N[(N[(1.0 + N[Power[t$95$1, 6.0], $MachinePrecision]), $MachinePrecision] + N[Power[t$95$1, 3.0], $MachinePrecision]), $MachinePrecision] * N[(t$95$1 * N[(t$95$1 + 1.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(\left|x\right|, 0.3275911, 1\right)\\
t_1 := \frac{\frac{\frac{\frac{\frac{1.061405429}{t\_0} - 1.453152027}{t\_0} - -1.421413741}{t\_0} + -0.284496736}{t\_0} + 0.254829592}{t\_0 \cdot e^{x \cdot x}}\\
\frac{1 - {t\_1}^{9}}{\left(\left(1 + {t\_1}^{6}\right) + {t\_1}^{3}\right) \cdot \mathsf{fma}\left(t\_1, t\_1 + 1, 1\right)}
\end{array}
\end{array}
Initial program 78.9%
Applied rewrites79.0%
Applied rewrites79.1%
Applied rewrites79.1%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma (fabs x) 0.3275911 1.0))
(t_1
(/
(+
(/
(+
(/
(- (/ (- (/ 1.061405429 t_0) 1.453152027) t_0) -1.421413741)
t_0)
-0.284496736)
t_0)
0.254829592)
(* t_0 (exp (* x x))))))
(/ (- 1.0 (pow t_1 3.0)) (+ (* t_1 (+ t_1 1.0)) 1.0))))
double code(double x) {
double t_0 = fma(fabs(x), 0.3275911, 1.0);
double t_1 = ((((((((1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / (t_0 * exp((x * x)));
return (1.0 - pow(t_1, 3.0)) / ((t_1 * (t_1 + 1.0)) + 1.0);
}
function code(x) t_0 = fma(abs(x), 0.3275911, 1.0) t_1 = Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / Float64(t_0 * exp(Float64(x * x)))) return Float64(Float64(1.0 - (t_1 ^ 3.0)) / Float64(Float64(t_1 * Float64(t_1 + 1.0)) + 1.0)) end
code[x_] := Block[{t$95$0 = N[(N[Abs[x], $MachinePrecision] * 0.3275911 + 1.0), $MachinePrecision]}, Block[{t$95$1 = N[(N[(N[(N[(N[(N[(N[(N[(N[(1.061405429 / t$95$0), $MachinePrecision] - 1.453152027), $MachinePrecision] / t$95$0), $MachinePrecision] - -1.421413741), $MachinePrecision] / t$95$0), $MachinePrecision] + -0.284496736), $MachinePrecision] / t$95$0), $MachinePrecision] + 0.254829592), $MachinePrecision] / N[(t$95$0 * N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(1.0 - N[Power[t$95$1, 3.0], $MachinePrecision]), $MachinePrecision] / N[(N[(t$95$1 * N[(t$95$1 + 1.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(\left|x\right|, 0.3275911, 1\right)\\
t_1 := \frac{\frac{\frac{\frac{\frac{1.061405429}{t\_0} - 1.453152027}{t\_0} - -1.421413741}{t\_0} + -0.284496736}{t\_0} + 0.254829592}{t\_0 \cdot e^{x \cdot x}}\\
\frac{1 - {t\_1}^{3}}{t\_1 \cdot \left(t\_1 + 1\right) + 1}
\end{array}
\end{array}
Initial program 78.9%
Applied rewrites79.0%
Applied rewrites79.0%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma (fabs x) 0.3275911 1.0))
(t_1
(/
(+
(/
(+
(/
(- (/ (- (/ 1.061405429 t_0) 1.453152027) t_0) -1.421413741)
t_0)
-0.284496736)
t_0)
0.254829592)
(* t_0 (exp (* x x))))))
(/ (- 1.0 (pow t_1 3.0)) (fma t_1 (+ t_1 1.0) 1.0))))
double code(double x) {
double t_0 = fma(fabs(x), 0.3275911, 1.0);
double t_1 = ((((((((1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / (t_0 * exp((x * x)));
return (1.0 - pow(t_1, 3.0)) / fma(t_1, (t_1 + 1.0), 1.0);
}
function code(x) t_0 = fma(abs(x), 0.3275911, 1.0) t_1 = Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / Float64(t_0 * exp(Float64(x * x)))) return Float64(Float64(1.0 - (t_1 ^ 3.0)) / fma(t_1, Float64(t_1 + 1.0), 1.0)) end
code[x_] := Block[{t$95$0 = N[(N[Abs[x], $MachinePrecision] * 0.3275911 + 1.0), $MachinePrecision]}, Block[{t$95$1 = N[(N[(N[(N[(N[(N[(N[(N[(N[(1.061405429 / t$95$0), $MachinePrecision] - 1.453152027), $MachinePrecision] / t$95$0), $MachinePrecision] - -1.421413741), $MachinePrecision] / t$95$0), $MachinePrecision] + -0.284496736), $MachinePrecision] / t$95$0), $MachinePrecision] + 0.254829592), $MachinePrecision] / N[(t$95$0 * N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(1.0 - N[Power[t$95$1, 3.0], $MachinePrecision]), $MachinePrecision] / N[(t$95$1 * N[(t$95$1 + 1.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(\left|x\right|, 0.3275911, 1\right)\\
t_1 := \frac{\frac{\frac{\frac{\frac{1.061405429}{t\_0} - 1.453152027}{t\_0} - -1.421413741}{t\_0} + -0.284496736}{t\_0} + 0.254829592}{t\_0 \cdot e^{x \cdot x}}\\
\frac{1 - {t\_1}^{3}}{\mathsf{fma}\left(t\_1, t\_1 + 1, 1\right)}
\end{array}
\end{array}
Initial program 78.9%
Applied rewrites79.0%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma (fabs x) 0.3275911 1.0)))
(-
1.0
(*
(*
(/
(+
(/
(+
(/ (- (/ (- (/ 1.061405429 t_0) 1.453152027) t_0) -1.421413741) t_0)
-0.284496736)
t_0)
0.254829592)
(- 1.0 (* 0.10731592879921 (* x x))))
(- 1.0 (* (fabs x) 0.3275911)))
(exp (- (* (fabs x) (fabs x))))))))
double code(double x) {
double t_0 = fma(fabs(x), 0.3275911, 1.0);
return 1.0 - (((((((((((1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / (1.0 - (0.10731592879921 * (x * x)))) * (1.0 - (fabs(x) * 0.3275911))) * exp(-(fabs(x) * fabs(x))));
}
function code(x) t_0 = fma(abs(x), 0.3275911, 1.0) return Float64(1.0 - Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / Float64(1.0 - Float64(0.10731592879921 * Float64(x * x)))) * Float64(1.0 - Float64(abs(x) * 0.3275911))) * exp(Float64(-Float64(abs(x) * abs(x)))))) end
code[x_] := Block[{t$95$0 = N[(N[Abs[x], $MachinePrecision] * 0.3275911 + 1.0), $MachinePrecision]}, N[(1.0 - N[(N[(N[(N[(N[(N[(N[(N[(N[(N[(N[(1.061405429 / t$95$0), $MachinePrecision] - 1.453152027), $MachinePrecision] / t$95$0), $MachinePrecision] - -1.421413741), $MachinePrecision] / t$95$0), $MachinePrecision] + -0.284496736), $MachinePrecision] / t$95$0), $MachinePrecision] + 0.254829592), $MachinePrecision] / N[(1.0 - N[(0.10731592879921 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(1.0 - N[(N[Abs[x], $MachinePrecision] * 0.3275911), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(\left|x\right|, 0.3275911, 1\right)\\
1 - \left(\frac{\frac{\frac{\frac{\frac{1.061405429}{t\_0} - 1.453152027}{t\_0} - -1.421413741}{t\_0} + -0.284496736}{t\_0} + 0.254829592}{1 - 0.10731592879921 \cdot \left(x \cdot x\right)} \cdot \left(1 - \left|x\right| \cdot 0.3275911\right)\right) \cdot e^{-\left|x\right| \cdot \left|x\right|}
\end{array}
\end{array}
Initial program 78.9%
Applied rewrites79.0%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma -0.3275911 (fabs x) -1.0))
(t_1 (fma (fabs x) 0.3275911 1.0)))
(fma
(+
(/
(/
(+
(/ (- (/ (- (/ 1.061405429 t_1) 1.453152027) t_1) -1.421413741) t_1)
-0.284496736)
t_1)
t_0)
(/ 0.254829592 t_0))
(exp (* (- x) x))
1.0)))
double code(double x) {
double t_0 = fma(-0.3275911, fabs(x), -1.0);
double t_1 = fma(fabs(x), 0.3275911, 1.0);
return fma((((((((((1.061405429 / t_1) - 1.453152027) / t_1) - -1.421413741) / t_1) + -0.284496736) / t_1) / t_0) + (0.254829592 / t_0)), exp((-x * x)), 1.0);
}
function code(x) t_0 = fma(-0.3275911, abs(x), -1.0) t_1 = fma(abs(x), 0.3275911, 1.0) return fma(Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.061405429 / t_1) - 1.453152027) / t_1) - -1.421413741) / t_1) + -0.284496736) / t_1) / t_0) + Float64(0.254829592 / t_0)), exp(Float64(Float64(-x) * x)), 1.0) end
code[x_] := Block[{t$95$0 = N[(-0.3275911 * N[Abs[x], $MachinePrecision] + -1.0), $MachinePrecision]}, Block[{t$95$1 = N[(N[Abs[x], $MachinePrecision] * 0.3275911 + 1.0), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(N[(N[(N[(N[(1.061405429 / t$95$1), $MachinePrecision] - 1.453152027), $MachinePrecision] / t$95$1), $MachinePrecision] - -1.421413741), $MachinePrecision] / t$95$1), $MachinePrecision] + -0.284496736), $MachinePrecision] / t$95$1), $MachinePrecision] / t$95$0), $MachinePrecision] + N[(0.254829592 / t$95$0), $MachinePrecision]), $MachinePrecision] * N[Exp[N[((-x) * x), $MachinePrecision]], $MachinePrecision] + 1.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(-0.3275911, \left|x\right|, -1\right)\\
t_1 := \mathsf{fma}\left(\left|x\right|, 0.3275911, 1\right)\\
\mathsf{fma}\left(\frac{\frac{\frac{\frac{\frac{1.061405429}{t\_1} - 1.453152027}{t\_1} - -1.421413741}{t\_1} + -0.284496736}{t\_1}}{t\_0} + \frac{0.254829592}{t\_0}, e^{\left(-x\right) \cdot x}, 1\right)
\end{array}
\end{array}
Initial program 78.9%
Applied rewrites78.9%
lift-/.f64N/A
lift-+.f64N/A
div-addN/A
lower-+.f64N/A
lower-/.f64N/A
lower-/.f6479.0
Applied rewrites79.0%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma (fabs x) 0.3275911 1.0)))
(-
1.0
(/
(+
(/
(+
(/ (- (/ (- (/ 1.061405429 t_0) 1.453152027) t_0) -1.421413741) t_0)
-0.284496736)
t_0)
0.254829592)
(* t_0 (exp (* x x)))))))
double code(double x) {
double t_0 = fma(fabs(x), 0.3275911, 1.0);
return 1.0 - (((((((((1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / (t_0 * exp((x * x))));
}
function code(x) t_0 = fma(abs(x), 0.3275911, 1.0) return Float64(1.0 - Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / Float64(t_0 * exp(Float64(x * x))))) end
code[x_] := Block[{t$95$0 = N[(N[Abs[x], $MachinePrecision] * 0.3275911 + 1.0), $MachinePrecision]}, N[(1.0 - N[(N[(N[(N[(N[(N[(N[(N[(N[(1.061405429 / t$95$0), $MachinePrecision] - 1.453152027), $MachinePrecision] / t$95$0), $MachinePrecision] - -1.421413741), $MachinePrecision] / t$95$0), $MachinePrecision] + -0.284496736), $MachinePrecision] / t$95$0), $MachinePrecision] + 0.254829592), $MachinePrecision] / N[(t$95$0 * N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(\left|x\right|, 0.3275911, 1\right)\\
1 - \frac{\frac{\frac{\frac{\frac{1.061405429}{t\_0} - 1.453152027}{t\_0} - -1.421413741}{t\_0} + -0.284496736}{t\_0} + 0.254829592}{t\_0 \cdot e^{x \cdot x}}
\end{array}
\end{array}
Initial program 78.9%
Applied rewrites78.9%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma (fabs x) 0.3275911 1.0)))
(-
1.0
(/
(+
(/
(+
(/ (- (/ (- (/ 1.061405429 t_0) 1.453152027) t_0) -1.421413741) t_0)
-0.284496736)
t_0)
0.254829592)
(+
1.0
(fma 0.3275911 (fabs x) (* (* x x) (- 1.0 (* -0.3275911 (fabs x))))))))))
double code(double x) {
double t_0 = fma(fabs(x), 0.3275911, 1.0);
return 1.0 - (((((((((1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / (1.0 + fma(0.3275911, fabs(x), ((x * x) * (1.0 - (-0.3275911 * fabs(x)))))));
}
function code(x) t_0 = fma(abs(x), 0.3275911, 1.0) return Float64(1.0 - Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / Float64(1.0 + fma(0.3275911, abs(x), Float64(Float64(x * x) * Float64(1.0 - Float64(-0.3275911 * abs(x)))))))) end
code[x_] := Block[{t$95$0 = N[(N[Abs[x], $MachinePrecision] * 0.3275911 + 1.0), $MachinePrecision]}, N[(1.0 - N[(N[(N[(N[(N[(N[(N[(N[(N[(1.061405429 / t$95$0), $MachinePrecision] - 1.453152027), $MachinePrecision] / t$95$0), $MachinePrecision] - -1.421413741), $MachinePrecision] / t$95$0), $MachinePrecision] + -0.284496736), $MachinePrecision] / t$95$0), $MachinePrecision] + 0.254829592), $MachinePrecision] / N[(1.0 + N[(0.3275911 * N[Abs[x], $MachinePrecision] + N[(N[(x * x), $MachinePrecision] * N[(1.0 - N[(-0.3275911 * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(\left|x\right|, 0.3275911, 1\right)\\
1 - \frac{\frac{\frac{\frac{\frac{1.061405429}{t\_0} - 1.453152027}{t\_0} - -1.421413741}{t\_0} + -0.284496736}{t\_0} + 0.254829592}{1 + \mathsf{fma}\left(0.3275911, \left|x\right|, \left(x \cdot x\right) \cdot \left(1 - -0.3275911 \cdot \left|x\right|\right)\right)}
\end{array}
\end{array}
Initial program 78.9%
Applied rewrites78.9%
Taylor expanded in x around 0
lower-+.f64N/A
lift-fabs.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
pow2N/A
lift-*.f64N/A
lift-fabs.f64N/A
fp-cancel-sign-sub-invN/A
metadata-evalN/A
lift-fabs.f64N/A
lower--.f64N/A
lift-fabs.f64N/A
lower-*.f6478.4
Applied rewrites78.4%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma (fabs x) 0.3275911 1.0)))
(-
1.0
(/
(+
(/
(+
(/ (- (/ (- (/ 1.061405429 t_0) 1.453152027) t_0) -1.421413741) t_0)
-0.284496736)
t_0)
0.254829592)
(* t_0 (+ 1.0 (* x x)))))))
double code(double x) {
double t_0 = fma(fabs(x), 0.3275911, 1.0);
return 1.0 - (((((((((1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / (t_0 * (1.0 + (x * x))));
}
function code(x) t_0 = fma(abs(x), 0.3275911, 1.0) return Float64(1.0 - Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / Float64(t_0 * Float64(1.0 + Float64(x * x))))) end
code[x_] := Block[{t$95$0 = N[(N[Abs[x], $MachinePrecision] * 0.3275911 + 1.0), $MachinePrecision]}, N[(1.0 - N[(N[(N[(N[(N[(N[(N[(N[(N[(1.061405429 / t$95$0), $MachinePrecision] - 1.453152027), $MachinePrecision] / t$95$0), $MachinePrecision] - -1.421413741), $MachinePrecision] / t$95$0), $MachinePrecision] + -0.284496736), $MachinePrecision] / t$95$0), $MachinePrecision] + 0.254829592), $MachinePrecision] / N[(t$95$0 * N[(1.0 + N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(\left|x\right|, 0.3275911, 1\right)\\
1 - \frac{\frac{\frac{\frac{\frac{1.061405429}{t\_0} - 1.453152027}{t\_0} - -1.421413741}{t\_0} + -0.284496736}{t\_0} + 0.254829592}{t\_0 \cdot \left(1 + x \cdot x\right)}
\end{array}
\end{array}
Initial program 78.9%
Applied rewrites78.9%
Taylor expanded in x around 0
lower-+.f64N/A
pow2N/A
lift-*.f6478.4
Applied rewrites78.4%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma (fabs x) 0.3275911 1.0)))
(-
1.0
(/
(+
(/
(+
(/ (- (/ (- (/ 1.061405429 t_0) 1.453152027) t_0) -1.421413741) t_0)
-0.284496736)
t_0)
0.254829592)
(- 1.0 (* -0.3275911 (fabs x)))))))
double code(double x) {
double t_0 = fma(fabs(x), 0.3275911, 1.0);
return 1.0 - (((((((((1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / (1.0 - (-0.3275911 * fabs(x))));
}
function code(x) t_0 = fma(abs(x), 0.3275911, 1.0) return Float64(1.0 - Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.061405429 / t_0) - 1.453152027) / t_0) - -1.421413741) / t_0) + -0.284496736) / t_0) + 0.254829592) / Float64(1.0 - Float64(-0.3275911 * abs(x))))) end
code[x_] := Block[{t$95$0 = N[(N[Abs[x], $MachinePrecision] * 0.3275911 + 1.0), $MachinePrecision]}, N[(1.0 - N[(N[(N[(N[(N[(N[(N[(N[(N[(1.061405429 / t$95$0), $MachinePrecision] - 1.453152027), $MachinePrecision] / t$95$0), $MachinePrecision] - -1.421413741), $MachinePrecision] / t$95$0), $MachinePrecision] + -0.284496736), $MachinePrecision] / t$95$0), $MachinePrecision] + 0.254829592), $MachinePrecision] / N[(1.0 - N[(-0.3275911 * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(\left|x\right|, 0.3275911, 1\right)\\
1 - \frac{\frac{\frac{\frac{\frac{1.061405429}{t\_0} - 1.453152027}{t\_0} - -1.421413741}{t\_0} + -0.284496736}{t\_0} + 0.254829592}{1 - -0.3275911 \cdot \left|x\right|}
\end{array}
\end{array}
Initial program 78.9%
Applied rewrites78.9%
Taylor expanded in x around 0
lift-fabs.f64N/A
fp-cancel-sign-sub-invN/A
metadata-evalN/A
lift-fabs.f64N/A
lower--.f64N/A
lift-fabs.f64N/A
lower-*.f6477.4
Applied rewrites77.4%
(FPCore (x)
:precision binary64
(-
1.0
(*
(*
(*
(/ 1.0 (- 1.0 (* 0.10731592879921 (* x x))))
(- 1.0 (* (fabs x) 0.3275911)))
0.254829592)
(exp (- (* x x))))))
double code(double x) {
return 1.0 - ((((1.0 / (1.0 - (0.10731592879921 * (x * x)))) * (1.0 - (fabs(x) * 0.3275911))) * 0.254829592) * exp(-(x * x)));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = 1.0d0 - ((((1.0d0 / (1.0d0 - (0.10731592879921d0 * (x * x)))) * (1.0d0 - (abs(x) * 0.3275911d0))) * 0.254829592d0) * exp(-(x * x)))
end function
public static double code(double x) {
return 1.0 - ((((1.0 / (1.0 - (0.10731592879921 * (x * x)))) * (1.0 - (Math.abs(x) * 0.3275911))) * 0.254829592) * Math.exp(-(x * x)));
}
def code(x): return 1.0 - ((((1.0 / (1.0 - (0.10731592879921 * (x * x)))) * (1.0 - (math.fabs(x) * 0.3275911))) * 0.254829592) * math.exp(-(x * x)))
function code(x) return Float64(1.0 - Float64(Float64(Float64(Float64(1.0 / Float64(1.0 - Float64(0.10731592879921 * Float64(x * x)))) * Float64(1.0 - Float64(abs(x) * 0.3275911))) * 0.254829592) * exp(Float64(-Float64(x * x))))) end
function tmp = code(x) tmp = 1.0 - ((((1.0 / (1.0 - (0.10731592879921 * (x * x)))) * (1.0 - (abs(x) * 0.3275911))) * 0.254829592) * exp(-(x * x))); end
code[x_] := N[(1.0 - N[(N[(N[(N[(1.0 / N[(1.0 - N[(0.10731592879921 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(1.0 - N[(N[Abs[x], $MachinePrecision] * 0.3275911), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 0.254829592), $MachinePrecision] * N[Exp[(-N[(x * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \left(\left(\frac{1}{1 - 0.10731592879921 \cdot \left(x \cdot x\right)} \cdot \left(1 - \left|x\right| \cdot 0.3275911\right)\right) \cdot 0.254829592\right) \cdot e^{-x \cdot x}
\end{array}
Initial program 78.9%
lift-/.f64N/A
lift-+.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
flip-+N/A
associate-/r/N/A
lower-*.f64N/A
Applied rewrites79.0%
lift-/.f64N/A
lift-+.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
flip-+N/A
associate-/r/N/A
lower-*.f64N/A
Applied rewrites79.0%
lift-/.f64N/A
lift-+.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
flip-+N/A
associate-/r/N/A
lower-*.f64N/A
Applied rewrites79.0%
lift-/.f64N/A
lift-+.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
flip-+N/A
associate-/r/N/A
lower-*.f64N/A
Applied rewrites79.0%
lift-/.f64N/A
lift-+.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
flip-+N/A
associate-/r/N/A
lower-*.f64N/A
Applied rewrites79.0%
Taylor expanded in x around inf
Applied rewrites55.2%
lift-*.f64N/A
lift-fabs.f64N/A
lift-fabs.f64N/A
sqr-abs-revN/A
Applied rewrites55.2%
(FPCore (x) :precision binary64 (- 1.0 (* (* (- 1.0 (* 0.3275911 (fabs x))) 0.254829592) (exp (- (* (fabs x) (fabs x)))))))
double code(double x) {
return 1.0 - (((1.0 - (0.3275911 * fabs(x))) * 0.254829592) * exp(-(fabs(x) * fabs(x))));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = 1.0d0 - (((1.0d0 - (0.3275911d0 * abs(x))) * 0.254829592d0) * exp(-(abs(x) * abs(x))))
end function
public static double code(double x) {
return 1.0 - (((1.0 - (0.3275911 * Math.abs(x))) * 0.254829592) * Math.exp(-(Math.abs(x) * Math.abs(x))));
}
def code(x): return 1.0 - (((1.0 - (0.3275911 * math.fabs(x))) * 0.254829592) * math.exp(-(math.fabs(x) * math.fabs(x))))
function code(x) return Float64(1.0 - Float64(Float64(Float64(1.0 - Float64(0.3275911 * abs(x))) * 0.254829592) * exp(Float64(-Float64(abs(x) * abs(x)))))) end
function tmp = code(x) tmp = 1.0 - (((1.0 - (0.3275911 * abs(x))) * 0.254829592) * exp(-(abs(x) * abs(x)))); end
code[x_] := N[(1.0 - N[(N[(N[(1.0 - N[(0.3275911 * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 0.254829592), $MachinePrecision] * N[Exp[(-N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \left(\left(1 - 0.3275911 \cdot \left|x\right|\right) \cdot 0.254829592\right) \cdot e^{-\left|x\right| \cdot \left|x\right|}
\end{array}
Initial program 78.9%
lift-/.f64N/A
lift-+.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
flip-+N/A
associate-/r/N/A
lower-*.f64N/A
Applied rewrites79.0%
lift-/.f64N/A
lift-+.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
flip-+N/A
associate-/r/N/A
lower-*.f64N/A
Applied rewrites79.0%
lift-/.f64N/A
lift-+.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
flip-+N/A
associate-/r/N/A
lower-*.f64N/A
Applied rewrites79.0%
lift-/.f64N/A
lift-+.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
flip-+N/A
associate-/r/N/A
lower-*.f64N/A
Applied rewrites79.0%
lift-/.f64N/A
lift-+.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
flip-+N/A
associate-/r/N/A
lower-*.f64N/A
Applied rewrites79.0%
Taylor expanded in x around inf
Applied rewrites55.2%
Taylor expanded in x around 0
lift-fabs.f64N/A
lift-*.f64N/A
lift--.f6455.2
Applied rewrites55.2%
(FPCore (x) :precision binary64 1.0)
double code(double x) {
return 1.0;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double x) {
return 1.0;
}
def code(x): return 1.0
function code(x) return 1.0 end
function tmp = code(x) tmp = 1.0; end
code[x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 78.9%
lift-/.f64N/A
lift-+.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
flip-+N/A
associate-/r/N/A
lower-*.f64N/A
Applied rewrites79.0%
lift-/.f64N/A
lift-+.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
flip-+N/A
associate-/r/N/A
lower-*.f64N/A
Applied rewrites79.0%
lift-/.f64N/A
lift-+.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
flip-+N/A
associate-/r/N/A
lower-*.f64N/A
Applied rewrites79.0%
lift-/.f64N/A
lift-+.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
flip-+N/A
associate-/r/N/A
lower-*.f64N/A
Applied rewrites79.0%
lift-/.f64N/A
lift-+.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
flip-+N/A
associate-/r/N/A
lower-*.f64N/A
Applied rewrites79.0%
Taylor expanded in x around inf
Applied rewrites55.2%
herbie shell --seed 2025115
(FPCore (x)
:name "Jmat.Real.erf"
:precision binary64
(- 1.0 (* (* (/ 1.0 (+ 1.0 (* 0.3275911 (fabs x)))) (+ 0.254829592 (* (/ 1.0 (+ 1.0 (* 0.3275911 (fabs x)))) (+ -0.284496736 (* (/ 1.0 (+ 1.0 (* 0.3275911 (fabs x)))) (+ 1.421413741 (* (/ 1.0 (+ 1.0 (* 0.3275911 (fabs x)))) (+ -1.453152027 (* (/ 1.0 (+ 1.0 (* 0.3275911 (fabs x)))) 1.061405429))))))))) (exp (- (* (fabs x) (fabs x)))))))