
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 1.0 (+ 1.0 (* 0.3275911 (fabs x))))))
(-
1.0
(*
(*
t_0
(+
0.254829592
(*
t_0
(+
-0.284496736
(*
t_0
(+ 1.421413741 (* t_0 (+ -1.453152027 (* t_0 1.061405429)))))))))
(exp (- (* (fabs x) (fabs x))))))))
double code(double x) {
double t_0 = 1.0 / (1.0 + (0.3275911 * fabs(x)));
return 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * exp(-(fabs(x) * fabs(x))));
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = 1.0d0 / (1.0d0 + (0.3275911d0 * abs(x)))
code = 1.0d0 - ((t_0 * (0.254829592d0 + (t_0 * ((-0.284496736d0) + (t_0 * (1.421413741d0 + (t_0 * ((-1.453152027d0) + (t_0 * 1.061405429d0))))))))) * exp(-(abs(x) * abs(x))))
end function
public static double code(double x) {
double t_0 = 1.0 / (1.0 + (0.3275911 * Math.abs(x)));
return 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * Math.exp(-(Math.abs(x) * Math.abs(x))));
}
def code(x): t_0 = 1.0 / (1.0 + (0.3275911 * math.fabs(x))) return 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * math.exp(-(math.fabs(x) * math.fabs(x))))
function code(x) t_0 = Float64(1.0 / Float64(1.0 + Float64(0.3275911 * abs(x)))) return Float64(1.0 - Float64(Float64(t_0 * Float64(0.254829592 + Float64(t_0 * Float64(-0.284496736 + Float64(t_0 * Float64(1.421413741 + Float64(t_0 * Float64(-1.453152027 + Float64(t_0 * 1.061405429))))))))) * exp(Float64(-Float64(abs(x) * abs(x)))))) end
function tmp = code(x) t_0 = 1.0 / (1.0 + (0.3275911 * abs(x))); tmp = 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * exp(-(abs(x) * abs(x)))); end
code[x_] := Block[{t$95$0 = N[(1.0 / N[(1.0 + N[(0.3275911 * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(1.0 - N[(N[(t$95$0 * N[(0.254829592 + N[(t$95$0 * N[(-0.284496736 + N[(t$95$0 * N[(1.421413741 + N[(t$95$0 * N[(-1.453152027 + N[(t$95$0 * 1.061405429), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{1}{1 + 0.3275911 \cdot \left|x\right|}\\
1 - \left(t\_0 \cdot \left(0.254829592 + t\_0 \cdot \left(-0.284496736 + t\_0 \cdot \left(1.421413741 + t\_0 \cdot \left(-1.453152027 + t\_0 \cdot 1.061405429\right)\right)\right)\right)\right) \cdot e^{-\left|x\right| \cdot \left|x\right|}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 1.0 (+ 1.0 (* 0.3275911 (fabs x))))))
(-
1.0
(*
(*
t_0
(+
0.254829592
(*
t_0
(+
-0.284496736
(*
t_0
(+ 1.421413741 (* t_0 (+ -1.453152027 (* t_0 1.061405429)))))))))
(exp (- (* (fabs x) (fabs x))))))))
double code(double x) {
double t_0 = 1.0 / (1.0 + (0.3275911 * fabs(x)));
return 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * exp(-(fabs(x) * fabs(x))));
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = 1.0d0 / (1.0d0 + (0.3275911d0 * abs(x)))
code = 1.0d0 - ((t_0 * (0.254829592d0 + (t_0 * ((-0.284496736d0) + (t_0 * (1.421413741d0 + (t_0 * ((-1.453152027d0) + (t_0 * 1.061405429d0))))))))) * exp(-(abs(x) * abs(x))))
end function
public static double code(double x) {
double t_0 = 1.0 / (1.0 + (0.3275911 * Math.abs(x)));
return 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * Math.exp(-(Math.abs(x) * Math.abs(x))));
}
def code(x): t_0 = 1.0 / (1.0 + (0.3275911 * math.fabs(x))) return 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * math.exp(-(math.fabs(x) * math.fabs(x))))
function code(x) t_0 = Float64(1.0 / Float64(1.0 + Float64(0.3275911 * abs(x)))) return Float64(1.0 - Float64(Float64(t_0 * Float64(0.254829592 + Float64(t_0 * Float64(-0.284496736 + Float64(t_0 * Float64(1.421413741 + Float64(t_0 * Float64(-1.453152027 + Float64(t_0 * 1.061405429))))))))) * exp(Float64(-Float64(abs(x) * abs(x)))))) end
function tmp = code(x) t_0 = 1.0 / (1.0 + (0.3275911 * abs(x))); tmp = 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * exp(-(abs(x) * abs(x)))); end
code[x_] := Block[{t$95$0 = N[(1.0 / N[(1.0 + N[(0.3275911 * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(1.0 - N[(N[(t$95$0 * N[(0.254829592 + N[(t$95$0 * N[(-0.284496736 + N[(t$95$0 * N[(1.421413741 + N[(t$95$0 * N[(-1.453152027 + N[(t$95$0 * 1.061405429), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{1}{1 + 0.3275911 \cdot \left|x\right|}\\
1 - \left(t\_0 \cdot \left(0.254829592 + t\_0 \cdot \left(-0.284496736 + t\_0 \cdot \left(1.421413741 + t\_0 \cdot \left(-1.453152027 + t\_0 \cdot 1.061405429\right)\right)\right)\right)\right) \cdot e^{-\left|x\right| \cdot \left|x\right|}
\end{array}
\end{array}
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(let* ((t_0 (/ 1.0 (+ 1.0 (* (fabs x_m) 0.3275911)))))
(if (<= (fabs x_m) 5e-10)
(pow (sqrt (fma x_m 1.128386358070218 1e-9)) 2.0)
(-
1.0
(*
(*
t_0
(+
0.254829592
(*
(/ 1.0 (+ 1.0 (log (+ 1.0 (expm1 (* x_m 0.3275911))))))
(+
-0.284496736
(*
t_0
(+
1.421413741
(*
t_0
(+ -1.453152027 (/ 1.061405429 (+ 1.0 (* x_m 0.3275911)))))))))))
(exp (- (* x_m x_m))))))))x_m = fabs(x);
double code(double x_m) {
double t_0 = 1.0 / (1.0 + (fabs(x_m) * 0.3275911));
double tmp;
if (fabs(x_m) <= 5e-10) {
tmp = pow(sqrt(fma(x_m, 1.128386358070218, 1e-9)), 2.0);
} else {
tmp = 1.0 - ((t_0 * (0.254829592 + ((1.0 / (1.0 + log((1.0 + expm1((x_m * 0.3275911)))))) * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (1.061405429 / (1.0 + (x_m * 0.3275911))))))))))) * exp(-(x_m * x_m)));
}
return tmp;
}
x_m = abs(x) function code(x_m) t_0 = Float64(1.0 / Float64(1.0 + Float64(abs(x_m) * 0.3275911))) tmp = 0.0 if (abs(x_m) <= 5e-10) tmp = sqrt(fma(x_m, 1.128386358070218, 1e-9)) ^ 2.0; else tmp = Float64(1.0 - Float64(Float64(t_0 * Float64(0.254829592 + Float64(Float64(1.0 / Float64(1.0 + log(Float64(1.0 + expm1(Float64(x_m * 0.3275911)))))) * Float64(-0.284496736 + Float64(t_0 * Float64(1.421413741 + Float64(t_0 * Float64(-1.453152027 + Float64(1.061405429 / Float64(1.0 + Float64(x_m * 0.3275911))))))))))) * exp(Float64(-Float64(x_m * x_m))))); end return tmp end
x_m = N[Abs[x], $MachinePrecision]
code[x$95$m_] := Block[{t$95$0 = N[(1.0 / N[(1.0 + N[(N[Abs[x$95$m], $MachinePrecision] * 0.3275911), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[Abs[x$95$m], $MachinePrecision], 5e-10], N[Power[N[Sqrt[N[(x$95$m * 1.128386358070218 + 1e-9), $MachinePrecision]], $MachinePrecision], 2.0], $MachinePrecision], N[(1.0 - N[(N[(t$95$0 * N[(0.254829592 + N[(N[(1.0 / N[(1.0 + N[Log[N[(1.0 + N[(Exp[N[(x$95$m * 0.3275911), $MachinePrecision]] - 1), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(-0.284496736 + N[(t$95$0 * N[(1.421413741 + N[(t$95$0 * N[(-1.453152027 + N[(1.061405429 / N[(1.0 + N[(x$95$m * 0.3275911), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(x$95$m * x$95$m), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
t_0 := \frac{1}{1 + \left|x\_m\right| \cdot 0.3275911}\\
\mathbf{if}\;\left|x\_m\right| \leq 5 \cdot 10^{-10}:\\
\;\;\;\;{\left(\sqrt{\mathsf{fma}\left(x\_m, 1.128386358070218, 10^{-9}\right)}\right)}^{2}\\
\mathbf{else}:\\
\;\;\;\;1 - \left(t\_0 \cdot \left(0.254829592 + \frac{1}{1 + \log \left(1 + \mathsf{expm1}\left(x\_m \cdot 0.3275911\right)\right)} \cdot \left(-0.284496736 + t\_0 \cdot \left(1.421413741 + t\_0 \cdot \left(-1.453152027 + \frac{1.061405429}{1 + x\_m \cdot 0.3275911}\right)\right)\right)\right)\right) \cdot e^{-x\_m \cdot x\_m}\\
\end{array}
\end{array}
if (fabs.f64 x) < 5.00000000000000031e-10Initial program 57.7%
Simplified57.7%
Applied egg-rr57.4%
expm1-undefine55.0%
sub-neg55.0%
log1p-undefine55.0%
rem-exp-log55.0%
associate-+r-55.0%
metadata-eval55.0%
metadata-eval55.0%
Simplified55.0%
Taylor expanded in x around 0 99.3%
*-commutative99.3%
Simplified99.3%
add-sqr-sqrt99.3%
pow299.3%
+-commutative99.3%
fma-define99.3%
Applied egg-rr99.3%
if 5.00000000000000031e-10 < (fabs.f64 x) Initial program 99.9%
Simplified99.9%
expm1-log1p-u99.9%
log1p-define99.9%
expm1-undefine99.9%
add-exp-log99.9%
+-commutative99.9%
fma-define99.9%
add-sqr-sqrt42.7%
fabs-sqr42.7%
add-sqr-sqrt98.7%
Applied egg-rr98.7%
fma-undefine98.7%
associate--l+98.7%
metadata-eval98.7%
+-rgt-identity98.7%
Simplified98.7%
log1p-expm1-u98.7%
log1p-undefine98.7%
add-sqr-sqrt42.7%
fabs-sqr42.7%
add-sqr-sqrt98.5%
Applied egg-rr98.5%
Final simplification98.9%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(let* ((t_0 (/ 1.0 (+ 1.0 (* (fabs x_m) 0.3275911))))
(t_1 (+ 1.0 (* x_m 0.3275911))))
(if (<= x_m 1e-6)
(pow (sqrt (fma x_m 1.128386358070218 1e-9)) 2.0)
(-
1.0
(*
(exp (- (* x_m x_m)))
(*
t_0
(+
0.254829592
(*
t_0
(+
-0.284496736
(*
t_0
(-
1.421413741
(* (+ -1.453152027 (/ 1.061405429 t_1)) (/ -1.0 t_1)))))))))))))x_m = fabs(x);
double code(double x_m) {
double t_0 = 1.0 / (1.0 + (fabs(x_m) * 0.3275911));
double t_1 = 1.0 + (x_m * 0.3275911);
double tmp;
if (x_m <= 1e-6) {
tmp = pow(sqrt(fma(x_m, 1.128386358070218, 1e-9)), 2.0);
} else {
tmp = 1.0 - (exp(-(x_m * x_m)) * (t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 - ((-1.453152027 + (1.061405429 / t_1)) * (-1.0 / t_1)))))))));
}
return tmp;
}
x_m = abs(x) function code(x_m) t_0 = Float64(1.0 / Float64(1.0 + Float64(abs(x_m) * 0.3275911))) t_1 = Float64(1.0 + Float64(x_m * 0.3275911)) tmp = 0.0 if (x_m <= 1e-6) tmp = sqrt(fma(x_m, 1.128386358070218, 1e-9)) ^ 2.0; else tmp = Float64(1.0 - Float64(exp(Float64(-Float64(x_m * x_m))) * Float64(t_0 * Float64(0.254829592 + Float64(t_0 * Float64(-0.284496736 + Float64(t_0 * Float64(1.421413741 - Float64(Float64(-1.453152027 + Float64(1.061405429 / t_1)) * Float64(-1.0 / t_1)))))))))); end return tmp end
x_m = N[Abs[x], $MachinePrecision]
code[x$95$m_] := Block[{t$95$0 = N[(1.0 / N[(1.0 + N[(N[Abs[x$95$m], $MachinePrecision] * 0.3275911), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(1.0 + N[(x$95$m * 0.3275911), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x$95$m, 1e-6], N[Power[N[Sqrt[N[(x$95$m * 1.128386358070218 + 1e-9), $MachinePrecision]], $MachinePrecision], 2.0], $MachinePrecision], N[(1.0 - N[(N[Exp[(-N[(x$95$m * x$95$m), $MachinePrecision])], $MachinePrecision] * N[(t$95$0 * N[(0.254829592 + N[(t$95$0 * N[(-0.284496736 + N[(t$95$0 * N[(1.421413741 - N[(N[(-1.453152027 + N[(1.061405429 / t$95$1), $MachinePrecision]), $MachinePrecision] * N[(-1.0 / t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
t_0 := \frac{1}{1 + \left|x\_m\right| \cdot 0.3275911}\\
t_1 := 1 + x\_m \cdot 0.3275911\\
\mathbf{if}\;x\_m \leq 10^{-6}:\\
\;\;\;\;{\left(\sqrt{\mathsf{fma}\left(x\_m, 1.128386358070218, 10^{-9}\right)}\right)}^{2}\\
\mathbf{else}:\\
\;\;\;\;1 - e^{-x\_m \cdot x\_m} \cdot \left(t\_0 \cdot \left(0.254829592 + t\_0 \cdot \left(-0.284496736 + t\_0 \cdot \left(1.421413741 - \left(-1.453152027 + \frac{1.061405429}{t\_1}\right) \cdot \frac{-1}{t\_1}\right)\right)\right)\right)\\
\end{array}
\end{array}
if x < 9.99999999999999955e-7Initial program 72.4%
Simplified72.4%
Applied egg-rr38.4%
expm1-undefine36.9%
sub-neg36.9%
log1p-undefine36.9%
rem-exp-log36.9%
associate-+r-36.9%
metadata-eval36.9%
metadata-eval36.9%
Simplified36.9%
Taylor expanded in x around 0 65.0%
*-commutative65.0%
Simplified65.0%
add-sqr-sqrt64.6%
pow264.6%
+-commutative64.6%
fma-define64.6%
Applied egg-rr64.6%
if 9.99999999999999955e-7 < x Initial program 99.9%
Simplified99.9%
expm1-log1p-u99.9%
log1p-define99.9%
expm1-undefine99.9%
add-exp-log99.9%
+-commutative99.9%
fma-define99.9%
add-sqr-sqrt99.9%
fabs-sqr99.9%
add-sqr-sqrt99.9%
Applied egg-rr99.9%
fma-undefine99.9%
associate--l+99.9%
metadata-eval99.9%
+-rgt-identity99.9%
Simplified99.9%
expm1-log1p-u99.9%
log1p-define99.9%
expm1-undefine99.9%
add-exp-log99.9%
+-commutative99.9%
fma-define99.9%
add-sqr-sqrt99.9%
fabs-sqr99.9%
add-sqr-sqrt99.9%
Applied egg-rr99.9%
fma-undefine99.9%
associate--l+99.9%
metadata-eval99.9%
+-rgt-identity99.9%
Simplified99.9%
Final simplification71.9%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(let* ((t_0 (/ 1.0 (+ 1.0 (* (fabs x_m) 0.3275911)))))
(if (<= x_m 0.21)
(pow (sqrt (fma x_m 1.128386358070218 1e-9)) 2.0)
(+
1.0
(*
(exp (- (* x_m x_m)))
(*
t_0
(-
(*
(/ 1.0 (+ 1.0 (* x_m 0.3275911)))
(-
(*
t_0
(-
(* t_0 (- 0.391746598 (* x_m -0.3477069720320819)))
1.421413741))
-0.284496736))
0.254829592)))))))x_m = fabs(x);
double code(double x_m) {
double t_0 = 1.0 / (1.0 + (fabs(x_m) * 0.3275911));
double tmp;
if (x_m <= 0.21) {
tmp = pow(sqrt(fma(x_m, 1.128386358070218, 1e-9)), 2.0);
} else {
tmp = 1.0 + (exp(-(x_m * x_m)) * (t_0 * (((1.0 / (1.0 + (x_m * 0.3275911))) * ((t_0 * ((t_0 * (0.391746598 - (x_m * -0.3477069720320819))) - 1.421413741)) - -0.284496736)) - 0.254829592)));
}
return tmp;
}
x_m = abs(x) function code(x_m) t_0 = Float64(1.0 / Float64(1.0 + Float64(abs(x_m) * 0.3275911))) tmp = 0.0 if (x_m <= 0.21) tmp = sqrt(fma(x_m, 1.128386358070218, 1e-9)) ^ 2.0; else tmp = Float64(1.0 + Float64(exp(Float64(-Float64(x_m * x_m))) * Float64(t_0 * Float64(Float64(Float64(1.0 / Float64(1.0 + Float64(x_m * 0.3275911))) * Float64(Float64(t_0 * Float64(Float64(t_0 * Float64(0.391746598 - Float64(x_m * -0.3477069720320819))) - 1.421413741)) - -0.284496736)) - 0.254829592)))); end return tmp end
x_m = N[Abs[x], $MachinePrecision]
code[x$95$m_] := Block[{t$95$0 = N[(1.0 / N[(1.0 + N[(N[Abs[x$95$m], $MachinePrecision] * 0.3275911), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x$95$m, 0.21], N[Power[N[Sqrt[N[(x$95$m * 1.128386358070218 + 1e-9), $MachinePrecision]], $MachinePrecision], 2.0], $MachinePrecision], N[(1.0 + N[(N[Exp[(-N[(x$95$m * x$95$m), $MachinePrecision])], $MachinePrecision] * N[(t$95$0 * N[(N[(N[(1.0 / N[(1.0 + N[(x$95$m * 0.3275911), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(t$95$0 * N[(N[(t$95$0 * N[(0.391746598 - N[(x$95$m * -0.3477069720320819), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 1.421413741), $MachinePrecision]), $MachinePrecision] - -0.284496736), $MachinePrecision]), $MachinePrecision] - 0.254829592), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
t_0 := \frac{1}{1 + \left|x\_m\right| \cdot 0.3275911}\\
\mathbf{if}\;x\_m \leq 0.21:\\
\;\;\;\;{\left(\sqrt{\mathsf{fma}\left(x\_m, 1.128386358070218, 10^{-9}\right)}\right)}^{2}\\
\mathbf{else}:\\
\;\;\;\;1 + e^{-x\_m \cdot x\_m} \cdot \left(t\_0 \cdot \left(\frac{1}{1 + x\_m \cdot 0.3275911} \cdot \left(t\_0 \cdot \left(t\_0 \cdot \left(0.391746598 - x\_m \cdot -0.3477069720320819\right) - 1.421413741\right) - -0.284496736\right) - 0.254829592\right)\right)\\
\end{array}
\end{array}
if x < 0.209999999999999992Initial program 72.5%
Simplified72.5%
Applied egg-rr38.3%
expm1-undefine36.8%
sub-neg36.8%
log1p-undefine36.8%
rem-exp-log36.8%
associate-+r-36.8%
metadata-eval36.8%
metadata-eval36.8%
Simplified36.8%
Taylor expanded in x around 0 64.8%
*-commutative64.8%
Simplified64.8%
add-sqr-sqrt64.4%
pow264.4%
+-commutative64.4%
fma-define64.4%
Applied egg-rr64.4%
if 0.209999999999999992 < x Initial program 100.0%
Simplified100.0%
expm1-log1p-u100.0%
log1p-define100.0%
expm1-undefine100.0%
add-exp-log100.0%
+-commutative100.0%
fma-define100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
Applied egg-rr100.0%
fma-undefine100.0%
associate--l+100.0%
metadata-eval100.0%
+-rgt-identity100.0%
Simplified100.0%
log1p-expm1-u100.0%
log1p-undefine100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
Applied egg-rr100.0%
log1p-define100.0%
log1p-expm1-u100.0%
*-commutative100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0 100.0%
Final simplification71.7%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(let* ((t_0 (/ 1.0 (+ 1.0 (* (fabs x_m) 0.3275911)))))
(if (<= x_m 0.56)
(pow (sqrt (fma x_m 1.128386358070218 1e-9)) 2.0)
(-
1.0
(*
(exp (- (* x_m x_m)))
(*
t_0
(+
0.254829592
(*
(/ 1.0 (+ 1.0 (* x_m 0.3275911)))
(+ -0.284496736 (* t_0 (+ 1.421413741 (* t_0 -0.391746598))))))))))))x_m = fabs(x);
double code(double x_m) {
double t_0 = 1.0 / (1.0 + (fabs(x_m) * 0.3275911));
double tmp;
if (x_m <= 0.56) {
tmp = pow(sqrt(fma(x_m, 1.128386358070218, 1e-9)), 2.0);
} else {
tmp = 1.0 - (exp(-(x_m * x_m)) * (t_0 * (0.254829592 + ((1.0 / (1.0 + (x_m * 0.3275911))) * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * -0.391746598))))))));
}
return tmp;
}
x_m = abs(x) function code(x_m) t_0 = Float64(1.0 / Float64(1.0 + Float64(abs(x_m) * 0.3275911))) tmp = 0.0 if (x_m <= 0.56) tmp = sqrt(fma(x_m, 1.128386358070218, 1e-9)) ^ 2.0; else tmp = Float64(1.0 - Float64(exp(Float64(-Float64(x_m * x_m))) * Float64(t_0 * Float64(0.254829592 + Float64(Float64(1.0 / Float64(1.0 + Float64(x_m * 0.3275911))) * Float64(-0.284496736 + Float64(t_0 * Float64(1.421413741 + Float64(t_0 * -0.391746598))))))))); end return tmp end
x_m = N[Abs[x], $MachinePrecision]
code[x$95$m_] := Block[{t$95$0 = N[(1.0 / N[(1.0 + N[(N[Abs[x$95$m], $MachinePrecision] * 0.3275911), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x$95$m, 0.56], N[Power[N[Sqrt[N[(x$95$m * 1.128386358070218 + 1e-9), $MachinePrecision]], $MachinePrecision], 2.0], $MachinePrecision], N[(1.0 - N[(N[Exp[(-N[(x$95$m * x$95$m), $MachinePrecision])], $MachinePrecision] * N[(t$95$0 * N[(0.254829592 + N[(N[(1.0 / N[(1.0 + N[(x$95$m * 0.3275911), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(-0.284496736 + N[(t$95$0 * N[(1.421413741 + N[(t$95$0 * -0.391746598), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
t_0 := \frac{1}{1 + \left|x\_m\right| \cdot 0.3275911}\\
\mathbf{if}\;x\_m \leq 0.56:\\
\;\;\;\;{\left(\sqrt{\mathsf{fma}\left(x\_m, 1.128386358070218, 10^{-9}\right)}\right)}^{2}\\
\mathbf{else}:\\
\;\;\;\;1 - e^{-x\_m \cdot x\_m} \cdot \left(t\_0 \cdot \left(0.254829592 + \frac{1}{1 + x\_m \cdot 0.3275911} \cdot \left(-0.284496736 + t\_0 \cdot \left(1.421413741 + t\_0 \cdot -0.391746598\right)\right)\right)\right)\\
\end{array}
\end{array}
if x < 0.56000000000000005Initial program 72.5%
Simplified72.5%
Applied egg-rr38.3%
expm1-undefine36.8%
sub-neg36.8%
log1p-undefine36.8%
rem-exp-log36.8%
associate-+r-36.8%
metadata-eval36.8%
metadata-eval36.8%
Simplified36.8%
Taylor expanded in x around 0 64.8%
*-commutative64.8%
Simplified64.8%
add-sqr-sqrt64.4%
pow264.4%
+-commutative64.4%
fma-define64.4%
Applied egg-rr64.4%
if 0.56000000000000005 < x Initial program 100.0%
Simplified100.0%
expm1-log1p-u100.0%
log1p-define100.0%
expm1-undefine100.0%
add-exp-log100.0%
+-commutative100.0%
fma-define100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
Applied egg-rr100.0%
fma-undefine100.0%
associate--l+100.0%
metadata-eval100.0%
+-rgt-identity100.0%
Simplified100.0%
log1p-expm1-u100.0%
log1p-undefine100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
Applied egg-rr100.0%
log1p-define100.0%
log1p-expm1-u100.0%
*-commutative100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0 100.0%
Final simplification71.7%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 0.8) (pow (sqrt (fma x_m 1.128386358070218 1e-9)) 2.0) (- 1.0 (/ (/ 0.254829592 (exp (pow x_m 2.0))) (fma 0.3275911 x_m 1.0)))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.8) {
tmp = pow(sqrt(fma(x_m, 1.128386358070218, 1e-9)), 2.0);
} else {
tmp = 1.0 - ((0.254829592 / exp(pow(x_m, 2.0))) / fma(0.3275911, x_m, 1.0));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.8) tmp = sqrt(fma(x_m, 1.128386358070218, 1e-9)) ^ 2.0; else tmp = Float64(1.0 - Float64(Float64(0.254829592 / exp((x_m ^ 2.0))) / fma(0.3275911, x_m, 1.0))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.8], N[Power[N[Sqrt[N[(x$95$m * 1.128386358070218 + 1e-9), $MachinePrecision]], $MachinePrecision], 2.0], $MachinePrecision], N[(1.0 - N[(N[(0.254829592 / N[Exp[N[Power[x$95$m, 2.0], $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(0.3275911 * x$95$m + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.8:\\
\;\;\;\;{\left(\sqrt{\mathsf{fma}\left(x\_m, 1.128386358070218, 10^{-9}\right)}\right)}^{2}\\
\mathbf{else}:\\
\;\;\;\;1 - \frac{\frac{0.254829592}{e^{{x\_m}^{2}}}}{\mathsf{fma}\left(0.3275911, x\_m, 1\right)}\\
\end{array}
\end{array}
if x < 0.80000000000000004Initial program 72.5%
Simplified72.5%
Applied egg-rr38.3%
expm1-undefine36.8%
sub-neg36.8%
log1p-undefine36.8%
rem-exp-log36.8%
associate-+r-36.8%
metadata-eval36.8%
metadata-eval36.8%
Simplified36.8%
Taylor expanded in x around 0 64.8%
*-commutative64.8%
Simplified64.8%
add-sqr-sqrt64.4%
pow264.4%
+-commutative64.4%
fma-define64.4%
Applied egg-rr64.4%
if 0.80000000000000004 < x Initial program 100.0%
Simplified100.0%
expm1-log1p-u100.0%
log1p-define100.0%
expm1-undefine100.0%
add-exp-log100.0%
+-commutative100.0%
fma-define100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
Applied egg-rr100.0%
fma-undefine100.0%
associate--l+100.0%
metadata-eval100.0%
+-rgt-identity100.0%
Simplified100.0%
log1p-expm1-u100.0%
log1p-undefine100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
Applied egg-rr100.0%
Taylor expanded in x around inf 100.0%
associate-*r/100.0%
exp-neg100.0%
associate-*r/100.0%
metadata-eval100.0%
+-commutative100.0%
fma-undefine100.0%
unpow1100.0%
sqr-pow100.0%
fabs-sqr100.0%
sqr-pow100.0%
unpow1100.0%
Simplified100.0%
Final simplification71.7%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 0.89) (pow (sqrt (fma x_m 1.128386358070218 1e-9)) 2.0) 1.0))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.89) {
tmp = pow(sqrt(fma(x_m, 1.128386358070218, 1e-9)), 2.0);
} else {
tmp = 1.0;
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.89) tmp = sqrt(fma(x_m, 1.128386358070218, 1e-9)) ^ 2.0; else tmp = 1.0; end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.89], N[Power[N[Sqrt[N[(x$95$m * 1.128386358070218 + 1e-9), $MachinePrecision]], $MachinePrecision], 2.0], $MachinePrecision], 1.0]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.89:\\
\;\;\;\;{\left(\sqrt{\mathsf{fma}\left(x\_m, 1.128386358070218, 10^{-9}\right)}\right)}^{2}\\
\mathbf{else}:\\
\;\;\;\;1\\
\end{array}
\end{array}
if x < 0.890000000000000013Initial program 72.5%
Simplified72.5%
Applied egg-rr38.3%
expm1-undefine36.8%
sub-neg36.8%
log1p-undefine36.8%
rem-exp-log36.8%
associate-+r-36.8%
metadata-eval36.8%
metadata-eval36.8%
Simplified36.8%
Taylor expanded in x around 0 64.8%
*-commutative64.8%
Simplified64.8%
add-sqr-sqrt64.4%
pow264.4%
+-commutative64.4%
fma-define64.4%
Applied egg-rr64.4%
if 0.890000000000000013 < x Initial program 100.0%
Simplified100.0%
Applied egg-rr0.0%
expm1-undefine0.0%
sub-neg0.0%
log1p-undefine0.0%
rem-exp-log0.6%
associate-+r-0.6%
metadata-eval0.6%
metadata-eval0.6%
Simplified0.6%
Taylor expanded in x around inf 100.0%
Final simplification71.7%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 0.89) (+ 1e-9 (pow (sqrt (* x_m 1.128386358070218)) 2.0)) 1.0))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.89) {
tmp = 1e-9 + pow(sqrt((x_m * 1.128386358070218)), 2.0);
} else {
tmp = 1.0;
}
return tmp;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
real(8) :: tmp
if (x_m <= 0.89d0) then
tmp = 1d-9 + (sqrt((x_m * 1.128386358070218d0)) ** 2.0d0)
else
tmp = 1.0d0
end if
code = tmp
end function
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 0.89) {
tmp = 1e-9 + Math.pow(Math.sqrt((x_m * 1.128386358070218)), 2.0);
} else {
tmp = 1.0;
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 0.89: tmp = 1e-9 + math.pow(math.sqrt((x_m * 1.128386358070218)), 2.0) else: tmp = 1.0 return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.89) tmp = Float64(1e-9 + (sqrt(Float64(x_m * 1.128386358070218)) ^ 2.0)); else tmp = 1.0; end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 0.89) tmp = 1e-9 + (sqrt((x_m * 1.128386358070218)) ^ 2.0); else tmp = 1.0; end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.89], N[(1e-9 + N[Power[N[Sqrt[N[(x$95$m * 1.128386358070218), $MachinePrecision]], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision], 1.0]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.89:\\
\;\;\;\;10^{-9} + {\left(\sqrt{x\_m \cdot 1.128386358070218}\right)}^{2}\\
\mathbf{else}:\\
\;\;\;\;1\\
\end{array}
\end{array}
if x < 0.890000000000000013Initial program 72.5%
Simplified72.5%
Applied egg-rr38.3%
expm1-undefine36.8%
sub-neg36.8%
log1p-undefine36.8%
rem-exp-log36.8%
associate-+r-36.8%
metadata-eval36.8%
metadata-eval36.8%
Simplified36.8%
Taylor expanded in x around 0 64.8%
*-commutative64.8%
Simplified64.8%
add-sqr-sqrt32.0%
pow232.0%
Applied egg-rr32.0%
if 0.890000000000000013 < x Initial program 100.0%
Simplified100.0%
Applied egg-rr0.0%
expm1-undefine0.0%
sub-neg0.0%
log1p-undefine0.0%
rem-exp-log0.6%
associate-+r-0.6%
metadata-eval0.6%
metadata-eval0.6%
Simplified0.6%
Taylor expanded in x around inf 100.0%
Final simplification45.8%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 0.89) (+ 1e-9 (* x_m 1.128386358070218)) 1.0))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.89) {
tmp = 1e-9 + (x_m * 1.128386358070218);
} else {
tmp = 1.0;
}
return tmp;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
real(8) :: tmp
if (x_m <= 0.89d0) then
tmp = 1d-9 + (x_m * 1.128386358070218d0)
else
tmp = 1.0d0
end if
code = tmp
end function
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 0.89) {
tmp = 1e-9 + (x_m * 1.128386358070218);
} else {
tmp = 1.0;
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 0.89: tmp = 1e-9 + (x_m * 1.128386358070218) else: tmp = 1.0 return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.89) tmp = Float64(1e-9 + Float64(x_m * 1.128386358070218)); else tmp = 1.0; end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 0.89) tmp = 1e-9 + (x_m * 1.128386358070218); else tmp = 1.0; end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.89], N[(1e-9 + N[(x$95$m * 1.128386358070218), $MachinePrecision]), $MachinePrecision], 1.0]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.89:\\
\;\;\;\;10^{-9} + x\_m \cdot 1.128386358070218\\
\mathbf{else}:\\
\;\;\;\;1\\
\end{array}
\end{array}
if x < 0.890000000000000013Initial program 72.5%
Simplified72.5%
Applied egg-rr38.3%
expm1-undefine36.8%
sub-neg36.8%
log1p-undefine36.8%
rem-exp-log36.8%
associate-+r-36.8%
metadata-eval36.8%
metadata-eval36.8%
Simplified36.8%
Taylor expanded in x around 0 64.8%
*-commutative64.8%
Simplified64.8%
if 0.890000000000000013 < x Initial program 100.0%
Simplified100.0%
Applied egg-rr0.0%
expm1-undefine0.0%
sub-neg0.0%
log1p-undefine0.0%
rem-exp-log0.6%
associate-+r-0.6%
metadata-eval0.6%
metadata-eval0.6%
Simplified0.6%
Taylor expanded in x around inf 100.0%
Final simplification71.9%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 2.8e-5) 1e-9 1.0))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 2.8e-5) {
tmp = 1e-9;
} else {
tmp = 1.0;
}
return tmp;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
real(8) :: tmp
if (x_m <= 2.8d-5) then
tmp = 1d-9
else
tmp = 1.0d0
end if
code = tmp
end function
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 2.8e-5) {
tmp = 1e-9;
} else {
tmp = 1.0;
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 2.8e-5: tmp = 1e-9 else: tmp = 1.0 return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 2.8e-5) tmp = 1e-9; else tmp = 1.0; end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 2.8e-5) tmp = 1e-9; else tmp = 1.0; end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 2.8e-5], 1e-9, 1.0]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 2.8 \cdot 10^{-5}:\\
\;\;\;\;10^{-9}\\
\mathbf{else}:\\
\;\;\;\;1\\
\end{array}
\end{array}
if x < 2.79999999999999996e-5Initial program 72.4%
Simplified72.4%
Applied egg-rr38.4%
expm1-undefine36.9%
sub-neg36.9%
log1p-undefine36.9%
rem-exp-log36.9%
associate-+r-36.9%
metadata-eval36.9%
metadata-eval36.9%
Simplified36.9%
Taylor expanded in x around 0 68.0%
if 2.79999999999999996e-5 < x Initial program 99.9%
Simplified99.9%
Applied egg-rr0.4%
expm1-undefine0.4%
sub-neg0.4%
log1p-undefine0.4%
rem-exp-log1.1%
associate-+r-1.1%
metadata-eval1.1%
metadata-eval1.1%
Simplified1.1%
Taylor expanded in x around inf 98.4%
Final simplification74.3%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 1e-9)
x_m = fabs(x);
double code(double x_m) {
return 1e-9;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = 1d-9
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return 1e-9;
}
x_m = math.fabs(x) def code(x_m): return 1e-9
x_m = abs(x) function code(x_m) return 1e-9 end
x_m = abs(x); function tmp = code(x_m) tmp = 1e-9; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := 1e-9
\begin{array}{l}
x_m = \left|x\right|
\\
10^{-9}
\end{array}
Initial program 78.1%
Simplified78.1%
Applied egg-rr30.5%
expm1-undefine29.3%
sub-neg29.3%
log1p-undefine29.3%
rem-exp-log29.5%
associate-+r-29.5%
metadata-eval29.5%
metadata-eval29.5%
Simplified29.5%
Taylor expanded in x around 0 56.2%
Final simplification56.2%
herbie shell --seed 2024036
(FPCore (x)
:name "Jmat.Real.erf"
:precision binary64
(- 1.0 (* (* (/ 1.0 (+ 1.0 (* 0.3275911 (fabs x)))) (+ 0.254829592 (* (/ 1.0 (+ 1.0 (* 0.3275911 (fabs x)))) (+ -0.284496736 (* (/ 1.0 (+ 1.0 (* 0.3275911 (fabs x)))) (+ 1.421413741 (* (/ 1.0 (+ 1.0 (* 0.3275911 (fabs x)))) (+ -1.453152027 (* (/ 1.0 (+ 1.0 (* 0.3275911 (fabs x)))) 1.061405429))))))))) (exp (- (* (fabs x) (fabs x)))))))