
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 1.0 (+ 1.0 (* 0.3275911 (fabs x))))))
(-
1.0
(*
(*
t_0
(+
0.254829592
(*
t_0
(+
-0.284496736
(*
t_0
(+ 1.421413741 (* t_0 (+ -1.453152027 (* t_0 1.061405429)))))))))
(exp (- (* (fabs x) (fabs x))))))))
double code(double x) {
double t_0 = 1.0 / (1.0 + (0.3275911 * fabs(x)));
return 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * exp(-(fabs(x) * fabs(x))));
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = 1.0d0 / (1.0d0 + (0.3275911d0 * abs(x)))
code = 1.0d0 - ((t_0 * (0.254829592d0 + (t_0 * ((-0.284496736d0) + (t_0 * (1.421413741d0 + (t_0 * ((-1.453152027d0) + (t_0 * 1.061405429d0))))))))) * exp(-(abs(x) * abs(x))))
end function
public static double code(double x) {
double t_0 = 1.0 / (1.0 + (0.3275911 * Math.abs(x)));
return 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * Math.exp(-(Math.abs(x) * Math.abs(x))));
}
def code(x): t_0 = 1.0 / (1.0 + (0.3275911 * math.fabs(x))) return 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * math.exp(-(math.fabs(x) * math.fabs(x))))
function code(x) t_0 = Float64(1.0 / Float64(1.0 + Float64(0.3275911 * abs(x)))) return Float64(1.0 - Float64(Float64(t_0 * Float64(0.254829592 + Float64(t_0 * Float64(-0.284496736 + Float64(t_0 * Float64(1.421413741 + Float64(t_0 * Float64(-1.453152027 + Float64(t_0 * 1.061405429))))))))) * exp(Float64(-Float64(abs(x) * abs(x)))))) end
function tmp = code(x) t_0 = 1.0 / (1.0 + (0.3275911 * abs(x))); tmp = 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * exp(-(abs(x) * abs(x)))); end
code[x_] := Block[{t$95$0 = N[(1.0 / N[(1.0 + N[(0.3275911 * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(1.0 - N[(N[(t$95$0 * N[(0.254829592 + N[(t$95$0 * N[(-0.284496736 + N[(t$95$0 * N[(1.421413741 + N[(t$95$0 * N[(-1.453152027 + N[(t$95$0 * 1.061405429), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{1}{1 + 0.3275911 \cdot \left|x\right|}\\
1 - \left(t\_0 \cdot \left(0.254829592 + t\_0 \cdot \left(-0.284496736 + t\_0 \cdot \left(1.421413741 + t\_0 \cdot \left(-1.453152027 + t\_0 \cdot 1.061405429\right)\right)\right)\right)\right) \cdot e^{-\left|x\right| \cdot \left|x\right|}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 1.0 (+ 1.0 (* 0.3275911 (fabs x))))))
(-
1.0
(*
(*
t_0
(+
0.254829592
(*
t_0
(+
-0.284496736
(*
t_0
(+ 1.421413741 (* t_0 (+ -1.453152027 (* t_0 1.061405429)))))))))
(exp (- (* (fabs x) (fabs x))))))))
double code(double x) {
double t_0 = 1.0 / (1.0 + (0.3275911 * fabs(x)));
return 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * exp(-(fabs(x) * fabs(x))));
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = 1.0d0 / (1.0d0 + (0.3275911d0 * abs(x)))
code = 1.0d0 - ((t_0 * (0.254829592d0 + (t_0 * ((-0.284496736d0) + (t_0 * (1.421413741d0 + (t_0 * ((-1.453152027d0) + (t_0 * 1.061405429d0))))))))) * exp(-(abs(x) * abs(x))))
end function
public static double code(double x) {
double t_0 = 1.0 / (1.0 + (0.3275911 * Math.abs(x)));
return 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * Math.exp(-(Math.abs(x) * Math.abs(x))));
}
def code(x): t_0 = 1.0 / (1.0 + (0.3275911 * math.fabs(x))) return 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * math.exp(-(math.fabs(x) * math.fabs(x))))
function code(x) t_0 = Float64(1.0 / Float64(1.0 + Float64(0.3275911 * abs(x)))) return Float64(1.0 - Float64(Float64(t_0 * Float64(0.254829592 + Float64(t_0 * Float64(-0.284496736 + Float64(t_0 * Float64(1.421413741 + Float64(t_0 * Float64(-1.453152027 + Float64(t_0 * 1.061405429))))))))) * exp(Float64(-Float64(abs(x) * abs(x)))))) end
function tmp = code(x) t_0 = 1.0 / (1.0 + (0.3275911 * abs(x))); tmp = 1.0 - ((t_0 * (0.254829592 + (t_0 * (-0.284496736 + (t_0 * (1.421413741 + (t_0 * (-1.453152027 + (t_0 * 1.061405429))))))))) * exp(-(abs(x) * abs(x)))); end
code[x_] := Block[{t$95$0 = N[(1.0 / N[(1.0 + N[(0.3275911 * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(1.0 - N[(N[(t$95$0 * N[(0.254829592 + N[(t$95$0 * N[(-0.284496736 + N[(t$95$0 * N[(1.421413741 + N[(t$95$0 * N[(-1.453152027 + N[(t$95$0 * 1.061405429), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{1}{1 + 0.3275911 \cdot \left|x\right|}\\
1 - \left(t\_0 \cdot \left(0.254829592 + t\_0 \cdot \left(-0.284496736 + t\_0 \cdot \left(1.421413741 + t\_0 \cdot \left(-1.453152027 + t\_0 \cdot 1.061405429\right)\right)\right)\right)\right) \cdot e^{-\left|x\right| \cdot \left|x\right|}
\end{array}
\end{array}
(FPCore (x)
:precision binary64
(let* ((t_0 (* 0.3275911 (fabs x))) (t_1 (fma 0.3275911 (fabs x) 1.0)))
(+
1.0
(*
(exp (- (* (fabs x) (fabs x))))
(*
(+
0.254829592
(fma
(/
(/
(+
1.421413741
(/
(fma
(/ 1.061405429 (fma (* x x) -0.10731592879921 1.0))
(fma (fabs x) -0.3275911 1.0)
-1.453152027)
t_1))
t_1)
(- 1.0 (* (* x x) 0.10731592879921)))
(/ 1.0 (/ 1.0 (- 1.0 t_0)))
(/ -0.284496736 t_1)))
(/ 1.0 (- -1.0 t_0)))))))
double code(double x) {
double t_0 = 0.3275911 * fabs(x);
double t_1 = fma(0.3275911, fabs(x), 1.0);
return 1.0 + (exp(-(fabs(x) * fabs(x))) * ((0.254829592 + fma((((1.421413741 + (fma((1.061405429 / fma((x * x), -0.10731592879921, 1.0)), fma(fabs(x), -0.3275911, 1.0), -1.453152027) / t_1)) / t_1) / (1.0 - ((x * x) * 0.10731592879921))), (1.0 / (1.0 / (1.0 - t_0))), (-0.284496736 / t_1))) * (1.0 / (-1.0 - t_0))));
}
function code(x) t_0 = Float64(0.3275911 * abs(x)) t_1 = fma(0.3275911, abs(x), 1.0) return Float64(1.0 + Float64(exp(Float64(-Float64(abs(x) * abs(x)))) * Float64(Float64(0.254829592 + fma(Float64(Float64(Float64(1.421413741 + Float64(fma(Float64(1.061405429 / fma(Float64(x * x), -0.10731592879921, 1.0)), fma(abs(x), -0.3275911, 1.0), -1.453152027) / t_1)) / t_1) / Float64(1.0 - Float64(Float64(x * x) * 0.10731592879921))), Float64(1.0 / Float64(1.0 / Float64(1.0 - t_0))), Float64(-0.284496736 / t_1))) * Float64(1.0 / Float64(-1.0 - t_0))))) end
code[x_] := Block[{t$95$0 = N[(0.3275911 * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(0.3275911 * N[Abs[x], $MachinePrecision] + 1.0), $MachinePrecision]}, N[(1.0 + N[(N[Exp[(-N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision])], $MachinePrecision] * N[(N[(0.254829592 + N[(N[(N[(N[(1.421413741 + N[(N[(N[(1.061405429 / N[(N[(x * x), $MachinePrecision] * -0.10731592879921 + 1.0), $MachinePrecision]), $MachinePrecision] * N[(N[Abs[x], $MachinePrecision] * -0.3275911 + 1.0), $MachinePrecision] + -1.453152027), $MachinePrecision] / t$95$1), $MachinePrecision]), $MachinePrecision] / t$95$1), $MachinePrecision] / N[(1.0 - N[(N[(x * x), $MachinePrecision] * 0.10731592879921), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(1.0 / N[(1.0 / N[(1.0 - t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.284496736 / t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(1.0 / N[(-1.0 - t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 0.3275911 \cdot \left|x\right|\\
t_1 := \mathsf{fma}\left(0.3275911, \left|x\right|, 1\right)\\
1 + e^{-\left|x\right| \cdot \left|x\right|} \cdot \left(\left(0.254829592 + \mathsf{fma}\left(\frac{\frac{1.421413741 + \frac{\mathsf{fma}\left(\frac{1.061405429}{\mathsf{fma}\left(x \cdot x, -0.10731592879921, 1\right)}, \mathsf{fma}\left(\left|x\right|, -0.3275911, 1\right), -1.453152027\right)}{t\_1}}{t\_1}}{1 - \left(x \cdot x\right) \cdot 0.10731592879921}, \frac{1}{\frac{1}{1 - t\_0}}, \frac{-0.284496736}{t\_1}\right)\right) \cdot \frac{1}{-1 - t\_0}\right)
\end{array}
\end{array}
Initial program 78.9%
Applied egg-rr78.9%
lift-fabs.f64N/A
lift-fma.f64N/A
lift-/.f64N/A
+-commutativeN/A
lift-/.f64N/A
lift-fma.f64N/A
lift-*.f64N/A
+-commutativeN/A
flip-+N/A
lift--.f64N/A
associate-/r/N/A
lower-fma.f64N/A
Applied egg-rr78.9%
Final simplification78.9%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma 0.3275911 (fabs x) 1.0)) (t_1 (* 0.3275911 (fabs x))))
(+
1.0
(*
(exp (- (* (fabs x) (fabs x))))
(*
(+
0.254829592
(fma
(/
(/ (+ 1.421413741 (/ (+ -1.453152027 (/ 1.061405429 t_0)) t_0)) t_0)
(- 1.0 (* (* x x) 0.10731592879921)))
(/ 1.0 (/ 1.0 (- 1.0 t_1)))
(/ -0.284496736 t_0)))
(/ 1.0 (- -1.0 t_1)))))))
double code(double x) {
double t_0 = fma(0.3275911, fabs(x), 1.0);
double t_1 = 0.3275911 * fabs(x);
return 1.0 + (exp(-(fabs(x) * fabs(x))) * ((0.254829592 + fma((((1.421413741 + ((-1.453152027 + (1.061405429 / t_0)) / t_0)) / t_0) / (1.0 - ((x * x) * 0.10731592879921))), (1.0 / (1.0 / (1.0 - t_1))), (-0.284496736 / t_0))) * (1.0 / (-1.0 - t_1))));
}
function code(x) t_0 = fma(0.3275911, abs(x), 1.0) t_1 = Float64(0.3275911 * abs(x)) return Float64(1.0 + Float64(exp(Float64(-Float64(abs(x) * abs(x)))) * Float64(Float64(0.254829592 + fma(Float64(Float64(Float64(1.421413741 + Float64(Float64(-1.453152027 + Float64(1.061405429 / t_0)) / t_0)) / t_0) / Float64(1.0 - Float64(Float64(x * x) * 0.10731592879921))), Float64(1.0 / Float64(1.0 / Float64(1.0 - t_1))), Float64(-0.284496736 / t_0))) * Float64(1.0 / Float64(-1.0 - t_1))))) end
code[x_] := Block[{t$95$0 = N[(0.3275911 * N[Abs[x], $MachinePrecision] + 1.0), $MachinePrecision]}, Block[{t$95$1 = N[(0.3275911 * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[(1.0 + N[(N[Exp[(-N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision])], $MachinePrecision] * N[(N[(0.254829592 + N[(N[(N[(N[(1.421413741 + N[(N[(-1.453152027 + N[(1.061405429 / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision] / N[(1.0 - N[(N[(x * x), $MachinePrecision] * 0.10731592879921), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(1.0 / N[(1.0 / N[(1.0 - t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.284496736 / t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(1.0 / N[(-1.0 - t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(0.3275911, \left|x\right|, 1\right)\\
t_1 := 0.3275911 \cdot \left|x\right|\\
1 + e^{-\left|x\right| \cdot \left|x\right|} \cdot \left(\left(0.254829592 + \mathsf{fma}\left(\frac{\frac{1.421413741 + \frac{-1.453152027 + \frac{1.061405429}{t\_0}}{t\_0}}{t\_0}}{1 - \left(x \cdot x\right) \cdot 0.10731592879921}, \frac{1}{\frac{1}{1 - t\_1}}, \frac{-0.284496736}{t\_0}\right)\right) \cdot \frac{1}{-1 - t\_1}\right)
\end{array}
\end{array}
Initial program 78.9%
Applied egg-rr78.9%
Final simplification78.9%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma 0.3275911 (fabs x) 1.0)))
(-
1.0
(/
(fma
(/
(+
-0.284496736
(/ (+ 1.421413741 (/ (+ -1.453152027 (/ 1.061405429 t_0)) t_0)) t_0))
(fma (* x x) -0.10731592879921 1.0))
(fma (fabs x) -0.3275911 1.0)
0.254829592)
(* t_0 (exp (* x x)))))))
double code(double x) {
double t_0 = fma(0.3275911, fabs(x), 1.0);
return 1.0 - (fma(((-0.284496736 + ((1.421413741 + ((-1.453152027 + (1.061405429 / t_0)) / t_0)) / t_0)) / fma((x * x), -0.10731592879921, 1.0)), fma(fabs(x), -0.3275911, 1.0), 0.254829592) / (t_0 * exp((x * x))));
}
function code(x) t_0 = fma(0.3275911, abs(x), 1.0) return Float64(1.0 - Float64(fma(Float64(Float64(-0.284496736 + Float64(Float64(1.421413741 + Float64(Float64(-1.453152027 + Float64(1.061405429 / t_0)) / t_0)) / t_0)) / fma(Float64(x * x), -0.10731592879921, 1.0)), fma(abs(x), -0.3275911, 1.0), 0.254829592) / Float64(t_0 * exp(Float64(x * x))))) end
code[x_] := Block[{t$95$0 = N[(0.3275911 * N[Abs[x], $MachinePrecision] + 1.0), $MachinePrecision]}, N[(1.0 - N[(N[(N[(N[(-0.284496736 + N[(N[(1.421413741 + N[(N[(-1.453152027 + N[(1.061405429 / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] / N[(N[(x * x), $MachinePrecision] * -0.10731592879921 + 1.0), $MachinePrecision]), $MachinePrecision] * N[(N[Abs[x], $MachinePrecision] * -0.3275911 + 1.0), $MachinePrecision] + 0.254829592), $MachinePrecision] / N[(t$95$0 * N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(0.3275911, \left|x\right|, 1\right)\\
1 - \frac{\mathsf{fma}\left(\frac{-0.284496736 + \frac{1.421413741 + \frac{-1.453152027 + \frac{1.061405429}{t\_0}}{t\_0}}{t\_0}}{\mathsf{fma}\left(x \cdot x, -0.10731592879921, 1\right)}, \mathsf{fma}\left(\left|x\right|, -0.3275911, 1\right), 0.254829592\right)}{t\_0 \cdot e^{x \cdot x}}
\end{array}
\end{array}
Initial program 78.9%
Applied egg-rr78.9%
Applied egg-rr78.9%
Final simplification78.9%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma 0.3275911 (fabs x) 1.0)))
(-
1.0
(/
(*
(+
0.254829592
(/
(+
-0.284496736
(/ (+ 1.421413741 (/ (+ -1.453152027 (/ 1.061405429 t_0)) t_0)) t_0))
t_0))
(exp (* x (- x))))
t_0))))
double code(double x) {
double t_0 = fma(0.3275911, fabs(x), 1.0);
return 1.0 - (((0.254829592 + ((-0.284496736 + ((1.421413741 + ((-1.453152027 + (1.061405429 / t_0)) / t_0)) / t_0)) / t_0)) * exp((x * -x))) / t_0);
}
function code(x) t_0 = fma(0.3275911, abs(x), 1.0) return Float64(1.0 - Float64(Float64(Float64(0.254829592 + Float64(Float64(-0.284496736 + Float64(Float64(1.421413741 + Float64(Float64(-1.453152027 + Float64(1.061405429 / t_0)) / t_0)) / t_0)) / t_0)) * exp(Float64(x * Float64(-x)))) / t_0)) end
code[x_] := Block[{t$95$0 = N[(0.3275911 * N[Abs[x], $MachinePrecision] + 1.0), $MachinePrecision]}, N[(1.0 - N[(N[(N[(0.254829592 + N[(N[(-0.284496736 + N[(N[(1.421413741 + N[(N[(-1.453152027 + N[(1.061405429 / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] * N[Exp[N[(x * (-x)), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(0.3275911, \left|x\right|, 1\right)\\
1 - \frac{\left(0.254829592 + \frac{-0.284496736 + \frac{1.421413741 + \frac{-1.453152027 + \frac{1.061405429}{t\_0}}{t\_0}}{t\_0}}{t\_0}\right) \cdot e^{x \cdot \left(-x\right)}}{t\_0}
\end{array}
\end{array}
Initial program 78.9%
Applied egg-rr78.9%
Final simplification78.9%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma 0.3275911 (fabs x) 1.0)))
(-
1.0
(/
(+
0.254829592
(/
(+
-0.284496736
(/ (+ 1.421413741 (/ (+ -1.453152027 (/ 1.061405429 t_0)) t_0)) t_0))
t_0))
(* t_0 (exp (* x x)))))))
double code(double x) {
double t_0 = fma(0.3275911, fabs(x), 1.0);
return 1.0 - ((0.254829592 + ((-0.284496736 + ((1.421413741 + ((-1.453152027 + (1.061405429 / t_0)) / t_0)) / t_0)) / t_0)) / (t_0 * exp((x * x))));
}
function code(x) t_0 = fma(0.3275911, abs(x), 1.0) return Float64(1.0 - Float64(Float64(0.254829592 + Float64(Float64(-0.284496736 + Float64(Float64(1.421413741 + Float64(Float64(-1.453152027 + Float64(1.061405429 / t_0)) / t_0)) / t_0)) / t_0)) / Float64(t_0 * exp(Float64(x * x))))) end
code[x_] := Block[{t$95$0 = N[(0.3275911 * N[Abs[x], $MachinePrecision] + 1.0), $MachinePrecision]}, N[(1.0 - N[(N[(0.254829592 + N[(N[(-0.284496736 + N[(N[(1.421413741 + N[(N[(-1.453152027 + N[(1.061405429 / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] / N[(t$95$0 * N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(0.3275911, \left|x\right|, 1\right)\\
1 - \frac{0.254829592 + \frac{-0.284496736 + \frac{1.421413741 + \frac{-1.453152027 + \frac{1.061405429}{t\_0}}{t\_0}}{t\_0}}{t\_0}}{t\_0 \cdot e^{x \cdot x}}
\end{array}
\end{array}
Initial program 78.9%
Applied egg-rr78.9%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma 0.3275911 (fabs x) 1.0)))
(-
1.0
(/
(+
0.254829592
(/
(+
-0.284496736
(/
(+
1.421413741
(/ (fma 1.061405429 (fma (fabs x) -0.3275911 1.0) -1.453152027) t_0))
t_0))
t_0))
(* t_0 (exp (* x x)))))))
double code(double x) {
double t_0 = fma(0.3275911, fabs(x), 1.0);
return 1.0 - ((0.254829592 + ((-0.284496736 + ((1.421413741 + (fma(1.061405429, fma(fabs(x), -0.3275911, 1.0), -1.453152027) / t_0)) / t_0)) / t_0)) / (t_0 * exp((x * x))));
}
function code(x) t_0 = fma(0.3275911, abs(x), 1.0) return Float64(1.0 - Float64(Float64(0.254829592 + Float64(Float64(-0.284496736 + Float64(Float64(1.421413741 + Float64(fma(1.061405429, fma(abs(x), -0.3275911, 1.0), -1.453152027) / t_0)) / t_0)) / t_0)) / Float64(t_0 * exp(Float64(x * x))))) end
code[x_] := Block[{t$95$0 = N[(0.3275911 * N[Abs[x], $MachinePrecision] + 1.0), $MachinePrecision]}, N[(1.0 - N[(N[(0.254829592 + N[(N[(-0.284496736 + N[(N[(1.421413741 + N[(N[(1.061405429 * N[(N[Abs[x], $MachinePrecision] * -0.3275911 + 1.0), $MachinePrecision] + -1.453152027), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] / N[(t$95$0 * N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(0.3275911, \left|x\right|, 1\right)\\
1 - \frac{0.254829592 + \frac{-0.284496736 + \frac{1.421413741 + \frac{\mathsf{fma}\left(1.061405429, \mathsf{fma}\left(\left|x\right|, -0.3275911, 1\right), -1.453152027\right)}{t\_0}}{t\_0}}{t\_0}}{t\_0 \cdot e^{x \cdot x}}
\end{array}
\end{array}
Initial program 78.9%
Applied egg-rr78.9%
lift-fabs.f64N/A
lift-fma.f64N/A
lift-/.f64N/A
+-commutativeN/A
lift-/.f64N/A
lift-fma.f64N/A
lift-*.f64N/A
+-commutativeN/A
flip-+N/A
lift--.f64N/A
associate-/r/N/A
lower-fma.f64N/A
Applied egg-rr78.9%
Taylor expanded in x around 0
sub-negN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-fabs.f64N/A
metadata-eval78.3
Simplified78.3%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma 0.3275911 (fabs x) 1.0)))
(+
1.0
(*
(+
0.254829592
(+
(/ -0.284496736 t_0)
(/
(+ 1.421413741 (/ (+ -1.453152027 (/ 1.061405429 t_0)) t_0))
(* t_0 t_0))))
(/ 1.0 (- -1.0 (* 0.3275911 (fabs x))))))))
double code(double x) {
double t_0 = fma(0.3275911, fabs(x), 1.0);
return 1.0 + ((0.254829592 + ((-0.284496736 / t_0) + ((1.421413741 + ((-1.453152027 + (1.061405429 / t_0)) / t_0)) / (t_0 * t_0)))) * (1.0 / (-1.0 - (0.3275911 * fabs(x)))));
}
function code(x) t_0 = fma(0.3275911, abs(x), 1.0) return Float64(1.0 + Float64(Float64(0.254829592 + Float64(Float64(-0.284496736 / t_0) + Float64(Float64(1.421413741 + Float64(Float64(-1.453152027 + Float64(1.061405429 / t_0)) / t_0)) / Float64(t_0 * t_0)))) * Float64(1.0 / Float64(-1.0 - Float64(0.3275911 * abs(x)))))) end
code[x_] := Block[{t$95$0 = N[(0.3275911 * N[Abs[x], $MachinePrecision] + 1.0), $MachinePrecision]}, N[(1.0 + N[(N[(0.254829592 + N[(N[(-0.284496736 / t$95$0), $MachinePrecision] + N[(N[(1.421413741 + N[(N[(-1.453152027 + N[(1.061405429 / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] / N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(1.0 / N[(-1.0 - N[(0.3275911 * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(0.3275911, \left|x\right|, 1\right)\\
1 + \left(0.254829592 + \left(\frac{-0.284496736}{t\_0} + \frac{1.421413741 + \frac{-1.453152027 + \frac{1.061405429}{t\_0}}{t\_0}}{t\_0 \cdot t\_0}\right)\right) \cdot \frac{1}{-1 - 0.3275911 \cdot \left|x\right|}
\end{array}
\end{array}
Initial program 78.9%
Applied egg-rr78.9%
sqr-absN/A
lift-*.f6478.9
Applied egg-rr78.9%
Taylor expanded in x around 0
Simplified76.8%
Final simplification76.8%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma 0.3275911 (fabs x) 1.0)))
(fma
(/ 1.0 t_0)
(-
-0.254829592
(/
(+
-0.284496736
(/ (+ 1.421413741 (/ (+ -1.453152027 (/ 1.061405429 t_0)) t_0)) t_0))
t_0))
1.0)))
double code(double x) {
double t_0 = fma(0.3275911, fabs(x), 1.0);
return fma((1.0 / t_0), (-0.254829592 - ((-0.284496736 + ((1.421413741 + ((-1.453152027 + (1.061405429 / t_0)) / t_0)) / t_0)) / t_0)), 1.0);
}
function code(x) t_0 = fma(0.3275911, abs(x), 1.0) return fma(Float64(1.0 / t_0), Float64(-0.254829592 - Float64(Float64(-0.284496736 + Float64(Float64(1.421413741 + Float64(Float64(-1.453152027 + Float64(1.061405429 / t_0)) / t_0)) / t_0)) / t_0)), 1.0) end
code[x_] := Block[{t$95$0 = N[(0.3275911 * N[Abs[x], $MachinePrecision] + 1.0), $MachinePrecision]}, N[(N[(1.0 / t$95$0), $MachinePrecision] * N[(-0.254829592 - N[(N[(-0.284496736 + N[(N[(1.421413741 + N[(N[(-1.453152027 + N[(1.061405429 / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(0.3275911, \left|x\right|, 1\right)\\
\mathsf{fma}\left(\frac{1}{t\_0}, -0.254829592 - \frac{-0.284496736 + \frac{1.421413741 + \frac{-1.453152027 + \frac{1.061405429}{t\_0}}{t\_0}}{t\_0}}{t\_0}, 1\right)
\end{array}
\end{array}
Initial program 78.9%
Applied egg-rr78.9%
Applied egg-rr77.7%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
lower-fabs.f6475.5
Simplified75.5%
Applied egg-rr76.7%
Final simplification76.7%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma 0.3275911 (fabs x) 1.0)))
(-
1.0
(/
(+
0.254829592
(/
(+
-0.284496736
(/ (+ 1.421413741 (/ (+ -1.453152027 (/ 1.061405429 t_0)) t_0)) t_0))
t_0))
t_0))))
double code(double x) {
double t_0 = fma(0.3275911, fabs(x), 1.0);
return 1.0 - ((0.254829592 + ((-0.284496736 + ((1.421413741 + ((-1.453152027 + (1.061405429 / t_0)) / t_0)) / t_0)) / t_0)) / t_0);
}
function code(x) t_0 = fma(0.3275911, abs(x), 1.0) return Float64(1.0 - Float64(Float64(0.254829592 + Float64(Float64(-0.284496736 + Float64(Float64(1.421413741 + Float64(Float64(-1.453152027 + Float64(1.061405429 / t_0)) / t_0)) / t_0)) / t_0)) / t_0)) end
code[x_] := Block[{t$95$0 = N[(0.3275911 * N[Abs[x], $MachinePrecision] + 1.0), $MachinePrecision]}, N[(1.0 - N[(N[(0.254829592 + N[(N[(-0.284496736 + N[(N[(1.421413741 + N[(N[(-1.453152027 + N[(1.061405429 / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(0.3275911, \left|x\right|, 1\right)\\
1 - \frac{0.254829592 + \frac{-0.284496736 + \frac{1.421413741 + \frac{-1.453152027 + \frac{1.061405429}{t\_0}}{t\_0}}{t\_0}}{t\_0}}{t\_0}
\end{array}
\end{array}
Initial program 78.9%
Applied egg-rr78.9%
Applied egg-rr77.7%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
lower-fabs.f6475.5
Simplified75.5%
lift-fabs.f64N/A
lift-fma.f64N/A
Applied egg-rr76.7%
Final simplification76.7%
(FPCore (x) :precision binary64 1.0)
double code(double x) {
return 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double x) {
return 1.0;
}
def code(x): return 1.0
function code(x) return 1.0 end
function tmp = code(x) tmp = 1.0; end
code[x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 78.9%
Applied egg-rr51.6%
Taylor expanded in x around inf
Simplified54.9%
herbie shell --seed 2024214
(FPCore (x)
:name "Jmat.Real.erf"
:precision binary64
(- 1.0 (* (* (/ 1.0 (+ 1.0 (* 0.3275911 (fabs x)))) (+ 0.254829592 (* (/ 1.0 (+ 1.0 (* 0.3275911 (fabs x)))) (+ -0.284496736 (* (/ 1.0 (+ 1.0 (* 0.3275911 (fabs x)))) (+ 1.421413741 (* (/ 1.0 (+ 1.0 (* 0.3275911 (fabs x)))) (+ -1.453152027 (* (/ 1.0 (+ 1.0 (* 0.3275911 (fabs x)))) 1.061405429))))))))) (exp (- (* (fabs x) (fabs x)))))))