Jmat.Real.dawson

Percentage Accurate: 53.8% → 100.0%
Time: 6.5s
Alternatives: 10
Speedup: 31.0×

Specification

?
\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(x \cdot x\right) \cdot \left(x \cdot x\right)\\ t_1 := t\_0 \cdot \left(x \cdot x\right)\\ t_2 := t\_1 \cdot \left(x \cdot x\right)\\ t_3 := t\_2 \cdot \left(x \cdot x\right)\\ \frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot t\_0\right) + 0.0072644182 \cdot t\_1\right) + 0.0005064034 \cdot t\_2\right) + 0.0001789971 \cdot t\_3}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot t\_0\right) + 0.0694555761 \cdot t\_1\right) + 0.0140005442 \cdot t\_2\right) + 0.0008327945 \cdot t\_3\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(t\_3 \cdot \left(x \cdot x\right)\right)} \cdot x \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (let* ((t_0 (* (* x x) (* x x)))
        (t_1 (* t_0 (* x x)))
        (t_2 (* t_1 (* x x)))
        (t_3 (* t_2 (* x x))))
   (*
    (/
     (+
      (+
       (+
        (+ (+ 1.0 (* 0.1049934947 (* x x))) (* 0.0424060604 t_0))
        (* 0.0072644182 t_1))
       (* 0.0005064034 t_2))
      (* 0.0001789971 t_3))
     (+
      (+
       (+
        (+
         (+ (+ 1.0 (* 0.7715471019 (* x x))) (* 0.2909738639 t_0))
         (* 0.0694555761 t_1))
        (* 0.0140005442 t_2))
       (* 0.0008327945 t_3))
      (* (* 2.0 0.0001789971) (* t_3 (* x x)))))
    x)))
double code(double x) {
	double t_0 = (x * x) * (x * x);
	double t_1 = t_0 * (x * x);
	double t_2 = t_1 * (x * x);
	double t_3 = t_2 * (x * x);
	return ((((((1.0 + (0.1049934947 * (x * x))) + (0.0424060604 * t_0)) + (0.0072644182 * t_1)) + (0.0005064034 * t_2)) + (0.0001789971 * t_3)) / ((((((1.0 + (0.7715471019 * (x * x))) + (0.2909738639 * t_0)) + (0.0694555761 * t_1)) + (0.0140005442 * t_2)) + (0.0008327945 * t_3)) + ((2.0 * 0.0001789971) * (t_3 * (x * x))))) * x;
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(x)
use fmin_fmax_functions
    real(8), intent (in) :: x
    real(8) :: t_0
    real(8) :: t_1
    real(8) :: t_2
    real(8) :: t_3
    t_0 = (x * x) * (x * x)
    t_1 = t_0 * (x * x)
    t_2 = t_1 * (x * x)
    t_3 = t_2 * (x * x)
    code = ((((((1.0d0 + (0.1049934947d0 * (x * x))) + (0.0424060604d0 * t_0)) + (0.0072644182d0 * t_1)) + (0.0005064034d0 * t_2)) + (0.0001789971d0 * t_3)) / ((((((1.0d0 + (0.7715471019d0 * (x * x))) + (0.2909738639d0 * t_0)) + (0.0694555761d0 * t_1)) + (0.0140005442d0 * t_2)) + (0.0008327945d0 * t_3)) + ((2.0d0 * 0.0001789971d0) * (t_3 * (x * x))))) * x
end function
public static double code(double x) {
	double t_0 = (x * x) * (x * x);
	double t_1 = t_0 * (x * x);
	double t_2 = t_1 * (x * x);
	double t_3 = t_2 * (x * x);
	return ((((((1.0 + (0.1049934947 * (x * x))) + (0.0424060604 * t_0)) + (0.0072644182 * t_1)) + (0.0005064034 * t_2)) + (0.0001789971 * t_3)) / ((((((1.0 + (0.7715471019 * (x * x))) + (0.2909738639 * t_0)) + (0.0694555761 * t_1)) + (0.0140005442 * t_2)) + (0.0008327945 * t_3)) + ((2.0 * 0.0001789971) * (t_3 * (x * x))))) * x;
}
def code(x):
	t_0 = (x * x) * (x * x)
	t_1 = t_0 * (x * x)
	t_2 = t_1 * (x * x)
	t_3 = t_2 * (x * x)
	return ((((((1.0 + (0.1049934947 * (x * x))) + (0.0424060604 * t_0)) + (0.0072644182 * t_1)) + (0.0005064034 * t_2)) + (0.0001789971 * t_3)) / ((((((1.0 + (0.7715471019 * (x * x))) + (0.2909738639 * t_0)) + (0.0694555761 * t_1)) + (0.0140005442 * t_2)) + (0.0008327945 * t_3)) + ((2.0 * 0.0001789971) * (t_3 * (x * x))))) * x
function code(x)
	t_0 = Float64(Float64(x * x) * Float64(x * x))
	t_1 = Float64(t_0 * Float64(x * x))
	t_2 = Float64(t_1 * Float64(x * x))
	t_3 = Float64(t_2 * Float64(x * x))
	return Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.0 + Float64(0.1049934947 * Float64(x * x))) + Float64(0.0424060604 * t_0)) + Float64(0.0072644182 * t_1)) + Float64(0.0005064034 * t_2)) + Float64(0.0001789971 * t_3)) / Float64(Float64(Float64(Float64(Float64(Float64(1.0 + Float64(0.7715471019 * Float64(x * x))) + Float64(0.2909738639 * t_0)) + Float64(0.0694555761 * t_1)) + Float64(0.0140005442 * t_2)) + Float64(0.0008327945 * t_3)) + Float64(Float64(2.0 * 0.0001789971) * Float64(t_3 * Float64(x * x))))) * x)
end
function tmp = code(x)
	t_0 = (x * x) * (x * x);
	t_1 = t_0 * (x * x);
	t_2 = t_1 * (x * x);
	t_3 = t_2 * (x * x);
	tmp = ((((((1.0 + (0.1049934947 * (x * x))) + (0.0424060604 * t_0)) + (0.0072644182 * t_1)) + (0.0005064034 * t_2)) + (0.0001789971 * t_3)) / ((((((1.0 + (0.7715471019 * (x * x))) + (0.2909738639 * t_0)) + (0.0694555761 * t_1)) + (0.0140005442 * t_2)) + (0.0008327945 * t_3)) + ((2.0 * 0.0001789971) * (t_3 * (x * x))))) * x;
end
code[x_] := Block[{t$95$0 = N[(N[(x * x), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 * N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$1 * N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$3 = N[(t$95$2 * N[(x * x), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(N[(1.0 + N[(0.1049934947 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.0424060604 * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(0.0072644182 * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(0.0005064034 * t$95$2), $MachinePrecision]), $MachinePrecision] + N[(0.0001789971 * t$95$3), $MachinePrecision]), $MachinePrecision] / N[(N[(N[(N[(N[(N[(1.0 + N[(0.7715471019 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.2909738639 * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(0.0694555761 * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(0.0140005442 * t$95$2), $MachinePrecision]), $MachinePrecision] + N[(0.0008327945 * t$95$3), $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 * 0.0001789971), $MachinePrecision] * N[(t$95$3 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * x), $MachinePrecision]]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(x \cdot x\right) \cdot \left(x \cdot x\right)\\
t_1 := t\_0 \cdot \left(x \cdot x\right)\\
t_2 := t\_1 \cdot \left(x \cdot x\right)\\
t_3 := t\_2 \cdot \left(x \cdot x\right)\\
\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot t\_0\right) + 0.0072644182 \cdot t\_1\right) + 0.0005064034 \cdot t\_2\right) + 0.0001789971 \cdot t\_3}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot t\_0\right) + 0.0694555761 \cdot t\_1\right) + 0.0140005442 \cdot t\_2\right) + 0.0008327945 \cdot t\_3\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(t\_3 \cdot \left(x \cdot x\right)\right)} \cdot x
\end{array}
\end{array}

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 10 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 53.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(x \cdot x\right) \cdot \left(x \cdot x\right)\\ t_1 := t\_0 \cdot \left(x \cdot x\right)\\ t_2 := t\_1 \cdot \left(x \cdot x\right)\\ t_3 := t\_2 \cdot \left(x \cdot x\right)\\ \frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot t\_0\right) + 0.0072644182 \cdot t\_1\right) + 0.0005064034 \cdot t\_2\right) + 0.0001789971 \cdot t\_3}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot t\_0\right) + 0.0694555761 \cdot t\_1\right) + 0.0140005442 \cdot t\_2\right) + 0.0008327945 \cdot t\_3\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(t\_3 \cdot \left(x \cdot x\right)\right)} \cdot x \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (let* ((t_0 (* (* x x) (* x x)))
        (t_1 (* t_0 (* x x)))
        (t_2 (* t_1 (* x x)))
        (t_3 (* t_2 (* x x))))
   (*
    (/
     (+
      (+
       (+
        (+ (+ 1.0 (* 0.1049934947 (* x x))) (* 0.0424060604 t_0))
        (* 0.0072644182 t_1))
       (* 0.0005064034 t_2))
      (* 0.0001789971 t_3))
     (+
      (+
       (+
        (+
         (+ (+ 1.0 (* 0.7715471019 (* x x))) (* 0.2909738639 t_0))
         (* 0.0694555761 t_1))
        (* 0.0140005442 t_2))
       (* 0.0008327945 t_3))
      (* (* 2.0 0.0001789971) (* t_3 (* x x)))))
    x)))
double code(double x) {
	double t_0 = (x * x) * (x * x);
	double t_1 = t_0 * (x * x);
	double t_2 = t_1 * (x * x);
	double t_3 = t_2 * (x * x);
	return ((((((1.0 + (0.1049934947 * (x * x))) + (0.0424060604 * t_0)) + (0.0072644182 * t_1)) + (0.0005064034 * t_2)) + (0.0001789971 * t_3)) / ((((((1.0 + (0.7715471019 * (x * x))) + (0.2909738639 * t_0)) + (0.0694555761 * t_1)) + (0.0140005442 * t_2)) + (0.0008327945 * t_3)) + ((2.0 * 0.0001789971) * (t_3 * (x * x))))) * x;
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(x)
use fmin_fmax_functions
    real(8), intent (in) :: x
    real(8) :: t_0
    real(8) :: t_1
    real(8) :: t_2
    real(8) :: t_3
    t_0 = (x * x) * (x * x)
    t_1 = t_0 * (x * x)
    t_2 = t_1 * (x * x)
    t_3 = t_2 * (x * x)
    code = ((((((1.0d0 + (0.1049934947d0 * (x * x))) + (0.0424060604d0 * t_0)) + (0.0072644182d0 * t_1)) + (0.0005064034d0 * t_2)) + (0.0001789971d0 * t_3)) / ((((((1.0d0 + (0.7715471019d0 * (x * x))) + (0.2909738639d0 * t_0)) + (0.0694555761d0 * t_1)) + (0.0140005442d0 * t_2)) + (0.0008327945d0 * t_3)) + ((2.0d0 * 0.0001789971d0) * (t_3 * (x * x))))) * x
end function
public static double code(double x) {
	double t_0 = (x * x) * (x * x);
	double t_1 = t_0 * (x * x);
	double t_2 = t_1 * (x * x);
	double t_3 = t_2 * (x * x);
	return ((((((1.0 + (0.1049934947 * (x * x))) + (0.0424060604 * t_0)) + (0.0072644182 * t_1)) + (0.0005064034 * t_2)) + (0.0001789971 * t_3)) / ((((((1.0 + (0.7715471019 * (x * x))) + (0.2909738639 * t_0)) + (0.0694555761 * t_1)) + (0.0140005442 * t_2)) + (0.0008327945 * t_3)) + ((2.0 * 0.0001789971) * (t_3 * (x * x))))) * x;
}
def code(x):
	t_0 = (x * x) * (x * x)
	t_1 = t_0 * (x * x)
	t_2 = t_1 * (x * x)
	t_3 = t_2 * (x * x)
	return ((((((1.0 + (0.1049934947 * (x * x))) + (0.0424060604 * t_0)) + (0.0072644182 * t_1)) + (0.0005064034 * t_2)) + (0.0001789971 * t_3)) / ((((((1.0 + (0.7715471019 * (x * x))) + (0.2909738639 * t_0)) + (0.0694555761 * t_1)) + (0.0140005442 * t_2)) + (0.0008327945 * t_3)) + ((2.0 * 0.0001789971) * (t_3 * (x * x))))) * x
function code(x)
	t_0 = Float64(Float64(x * x) * Float64(x * x))
	t_1 = Float64(t_0 * Float64(x * x))
	t_2 = Float64(t_1 * Float64(x * x))
	t_3 = Float64(t_2 * Float64(x * x))
	return Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.0 + Float64(0.1049934947 * Float64(x * x))) + Float64(0.0424060604 * t_0)) + Float64(0.0072644182 * t_1)) + Float64(0.0005064034 * t_2)) + Float64(0.0001789971 * t_3)) / Float64(Float64(Float64(Float64(Float64(Float64(1.0 + Float64(0.7715471019 * Float64(x * x))) + Float64(0.2909738639 * t_0)) + Float64(0.0694555761 * t_1)) + Float64(0.0140005442 * t_2)) + Float64(0.0008327945 * t_3)) + Float64(Float64(2.0 * 0.0001789971) * Float64(t_3 * Float64(x * x))))) * x)
end
function tmp = code(x)
	t_0 = (x * x) * (x * x);
	t_1 = t_0 * (x * x);
	t_2 = t_1 * (x * x);
	t_3 = t_2 * (x * x);
	tmp = ((((((1.0 + (0.1049934947 * (x * x))) + (0.0424060604 * t_0)) + (0.0072644182 * t_1)) + (0.0005064034 * t_2)) + (0.0001789971 * t_3)) / ((((((1.0 + (0.7715471019 * (x * x))) + (0.2909738639 * t_0)) + (0.0694555761 * t_1)) + (0.0140005442 * t_2)) + (0.0008327945 * t_3)) + ((2.0 * 0.0001789971) * (t_3 * (x * x))))) * x;
end
code[x_] := Block[{t$95$0 = N[(N[(x * x), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 * N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$1 * N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$3 = N[(t$95$2 * N[(x * x), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(N[(1.0 + N[(0.1049934947 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.0424060604 * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(0.0072644182 * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(0.0005064034 * t$95$2), $MachinePrecision]), $MachinePrecision] + N[(0.0001789971 * t$95$3), $MachinePrecision]), $MachinePrecision] / N[(N[(N[(N[(N[(N[(1.0 + N[(0.7715471019 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.2909738639 * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(0.0694555761 * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(0.0140005442 * t$95$2), $MachinePrecision]), $MachinePrecision] + N[(0.0008327945 * t$95$3), $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 * 0.0001789971), $MachinePrecision] * N[(t$95$3 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * x), $MachinePrecision]]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(x \cdot x\right) \cdot \left(x \cdot x\right)\\
t_1 := t\_0 \cdot \left(x \cdot x\right)\\
t_2 := t\_1 \cdot \left(x \cdot x\right)\\
t_3 := t\_2 \cdot \left(x \cdot x\right)\\
\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot t\_0\right) + 0.0072644182 \cdot t\_1\right) + 0.0005064034 \cdot t\_2\right) + 0.0001789971 \cdot t\_3}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot t\_0\right) + 0.0694555761 \cdot t\_1\right) + 0.0140005442 \cdot t\_2\right) + 0.0008327945 \cdot t\_3\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(t\_3 \cdot \left(x \cdot x\right)\right)} \cdot x
\end{array}
\end{array}

Alternative 1: 100.0% accurate, 1.1× speedup?

\[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ \begin{array}{l} t_0 := \left(x\_m \cdot x\_m\right) \cdot x\_m\\ t_1 := \left(x\_m \cdot x\_m\right) \cdot \left(x\_m \cdot x\_m\right)\\ t_2 := t\_1 \cdot \left(x\_m \cdot x\_m\right)\\ t_3 := t\_2 \cdot \left(x\_m \cdot x\_m\right)\\ t_4 := t\_3 \cdot \left(x\_m \cdot x\_m\right)\\ x\_s \cdot \begin{array}{l} \mathbf{if}\;x\_m \leq 6000:\\ \;\;\;\;\frac{\mathsf{fma}\left(0.0001789971 \cdot {x\_m}^{8}, x\_m \cdot x\_m, \mathsf{fma}\left({x\_m}^{8}, 0.0005064034, \mathsf{fma}\left(t\_0 \cdot t\_0, 0.0072644182, \mathsf{fma}\left(\mathsf{fma}\left(0.0424060604, x\_m \cdot x\_m, 0.1049934947\right), x\_m \cdot x\_m, 1\right)\right)\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x\_m \cdot x\_m\right)\right) + 0.2909738639 \cdot t\_1\right) + 0.0694555761 \cdot t\_2\right) + 0.0140005442 \cdot t\_3\right) + 0.0008327945 \cdot t\_4\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(t\_4 \cdot \left(x\_m \cdot x\_m\right)\right)} \cdot x\_m\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{0.2514179000665374}{x\_m \cdot x\_m}}{x\_m} + \frac{0.5}{x\_m}\\ \end{array} \end{array} \end{array} \]
x\_m = (fabs.f64 x)
x\_s = (copysign.f64 #s(literal 1 binary64) x)
(FPCore (x_s x_m)
 :precision binary64
 (let* ((t_0 (* (* x_m x_m) x_m))
        (t_1 (* (* x_m x_m) (* x_m x_m)))
        (t_2 (* t_1 (* x_m x_m)))
        (t_3 (* t_2 (* x_m x_m)))
        (t_4 (* t_3 (* x_m x_m))))
   (*
    x_s
    (if (<= x_m 6000.0)
      (*
       (/
        (fma
         (* 0.0001789971 (pow x_m 8.0))
         (* x_m x_m)
         (fma
          (pow x_m 8.0)
          0.0005064034
          (fma
           (* t_0 t_0)
           0.0072644182
           (fma (fma 0.0424060604 (* x_m x_m) 0.1049934947) (* x_m x_m) 1.0))))
        (+
         (+
          (+
           (+
            (+ (+ 1.0 (* 0.7715471019 (* x_m x_m))) (* 0.2909738639 t_1))
            (* 0.0694555761 t_2))
           (* 0.0140005442 t_3))
          (* 0.0008327945 t_4))
         (* (* 2.0 0.0001789971) (* t_4 (* x_m x_m)))))
       x_m)
      (+ (/ (/ 0.2514179000665374 (* x_m x_m)) x_m) (/ 0.5 x_m))))))
x\_m = fabs(x);
x\_s = copysign(1.0, x);
double code(double x_s, double x_m) {
	double t_0 = (x_m * x_m) * x_m;
	double t_1 = (x_m * x_m) * (x_m * x_m);
	double t_2 = t_1 * (x_m * x_m);
	double t_3 = t_2 * (x_m * x_m);
	double t_4 = t_3 * (x_m * x_m);
	double tmp;
	if (x_m <= 6000.0) {
		tmp = (fma((0.0001789971 * pow(x_m, 8.0)), (x_m * x_m), fma(pow(x_m, 8.0), 0.0005064034, fma((t_0 * t_0), 0.0072644182, fma(fma(0.0424060604, (x_m * x_m), 0.1049934947), (x_m * x_m), 1.0)))) / ((((((1.0 + (0.7715471019 * (x_m * x_m))) + (0.2909738639 * t_1)) + (0.0694555761 * t_2)) + (0.0140005442 * t_3)) + (0.0008327945 * t_4)) + ((2.0 * 0.0001789971) * (t_4 * (x_m * x_m))))) * x_m;
	} else {
		tmp = ((0.2514179000665374 / (x_m * x_m)) / x_m) + (0.5 / x_m);
	}
	return x_s * tmp;
}
x\_m = abs(x)
x\_s = copysign(1.0, x)
function code(x_s, x_m)
	t_0 = Float64(Float64(x_m * x_m) * x_m)
	t_1 = Float64(Float64(x_m * x_m) * Float64(x_m * x_m))
	t_2 = Float64(t_1 * Float64(x_m * x_m))
	t_3 = Float64(t_2 * Float64(x_m * x_m))
	t_4 = Float64(t_3 * Float64(x_m * x_m))
	tmp = 0.0
	if (x_m <= 6000.0)
		tmp = Float64(Float64(fma(Float64(0.0001789971 * (x_m ^ 8.0)), Float64(x_m * x_m), fma((x_m ^ 8.0), 0.0005064034, fma(Float64(t_0 * t_0), 0.0072644182, fma(fma(0.0424060604, Float64(x_m * x_m), 0.1049934947), Float64(x_m * x_m), 1.0)))) / Float64(Float64(Float64(Float64(Float64(Float64(1.0 + Float64(0.7715471019 * Float64(x_m * x_m))) + Float64(0.2909738639 * t_1)) + Float64(0.0694555761 * t_2)) + Float64(0.0140005442 * t_3)) + Float64(0.0008327945 * t_4)) + Float64(Float64(2.0 * 0.0001789971) * Float64(t_4 * Float64(x_m * x_m))))) * x_m);
	else
		tmp = Float64(Float64(Float64(0.2514179000665374 / Float64(x_m * x_m)) / x_m) + Float64(0.5 / x_m));
	end
	return Float64(x_s * tmp)
end
x\_m = N[Abs[x], $MachinePrecision]
x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
code[x$95$s_, x$95$m_] := Block[{t$95$0 = N[(N[(x$95$m * x$95$m), $MachinePrecision] * x$95$m), $MachinePrecision]}, Block[{t$95$1 = N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$1 * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$3 = N[(t$95$2 * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$4 = N[(t$95$3 * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]}, N[(x$95$s * If[LessEqual[x$95$m, 6000.0], N[(N[(N[(N[(0.0001789971 * N[Power[x$95$m, 8.0], $MachinePrecision]), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + N[(N[Power[x$95$m, 8.0], $MachinePrecision] * 0.0005064034 + N[(N[(t$95$0 * t$95$0), $MachinePrecision] * 0.0072644182 + N[(N[(0.0424060604 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.1049934947), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(N[(N[(N[(N[(1.0 + N[(0.7715471019 * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.2909738639 * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(0.0694555761 * t$95$2), $MachinePrecision]), $MachinePrecision] + N[(0.0140005442 * t$95$3), $MachinePrecision]), $MachinePrecision] + N[(0.0008327945 * t$95$4), $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 * 0.0001789971), $MachinePrecision] * N[(t$95$4 * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * x$95$m), $MachinePrecision], N[(N[(N[(0.2514179000665374 / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision] / x$95$m), $MachinePrecision] + N[(0.5 / x$95$m), $MachinePrecision]), $MachinePrecision]]), $MachinePrecision]]]]]]
\begin{array}{l}
x\_m = \left|x\right|
\\
x\_s = \mathsf{copysign}\left(1, x\right)

\\
\begin{array}{l}
t_0 := \left(x\_m \cdot x\_m\right) \cdot x\_m\\
t_1 := \left(x\_m \cdot x\_m\right) \cdot \left(x\_m \cdot x\_m\right)\\
t_2 := t\_1 \cdot \left(x\_m \cdot x\_m\right)\\
t_3 := t\_2 \cdot \left(x\_m \cdot x\_m\right)\\
t_4 := t\_3 \cdot \left(x\_m \cdot x\_m\right)\\
x\_s \cdot \begin{array}{l}
\mathbf{if}\;x\_m \leq 6000:\\
\;\;\;\;\frac{\mathsf{fma}\left(0.0001789971 \cdot {x\_m}^{8}, x\_m \cdot x\_m, \mathsf{fma}\left({x\_m}^{8}, 0.0005064034, \mathsf{fma}\left(t\_0 \cdot t\_0, 0.0072644182, \mathsf{fma}\left(\mathsf{fma}\left(0.0424060604, x\_m \cdot x\_m, 0.1049934947\right), x\_m \cdot x\_m, 1\right)\right)\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x\_m \cdot x\_m\right)\right) + 0.2909738639 \cdot t\_1\right) + 0.0694555761 \cdot t\_2\right) + 0.0140005442 \cdot t\_3\right) + 0.0008327945 \cdot t\_4\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(t\_4 \cdot \left(x\_m \cdot x\_m\right)\right)} \cdot x\_m\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{0.2514179000665374}{x\_m \cdot x\_m}}{x\_m} + \frac{0.5}{x\_m}\\


\end{array}
\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 6e3

    1. Initial program 53.8%

      \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
    2. Applied rewrites53.8%

      \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(0.0001789971 \cdot {x}^{8}, x \cdot x, \mathsf{fma}\left({x}^{8}, 0.0005064034, \mathsf{fma}\left(\left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right), 0.0072644182, \mathsf{fma}\left(\mathsf{fma}\left(0.0424060604, x \cdot x, 0.1049934947\right), x \cdot x, 1\right)\right)\right)\right)}}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]

    if 6e3 < x

    1. Initial program 53.8%

      \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
    2. Applied rewrites53.8%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left({x}^{10}, 0.0001789971, \mathsf{fma}\left({x}^{8}, 0.0005064034, \mathsf{fma}\left(\left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right), 0.0072644182, \mathsf{fma}\left(\mathsf{fma}\left(0.0424060604, x \cdot x, 0.1049934947\right), x \cdot x, 1\right)\right)\right)\right) \cdot x}{\mathsf{fma}\left({x}^{12}, 0.0003579942, \mathsf{fma}\left({x}^{10}, 0.0008327945, \mathsf{fma}\left({x}^{8}, 0.0140005442, \mathsf{fma}\left(0.0694555761, \left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right), \mathsf{fma}\left(\mathsf{fma}\left(0.2909738639, x \cdot x, 0.7715471019\right), x \cdot x, 1\right)\right)\right)\right)\right)}} \]
    3. Taylor expanded in x around inf

      \[\leadsto \color{blue}{\frac{\frac{1}{2} + \frac{600041}{2386628} \cdot \frac{1}{{x}^{2}}}{x}} \]
    4. Step-by-step derivation
      1. lower-/.f64N/A

        \[\leadsto \frac{\frac{1}{2} + \frac{600041}{2386628} \cdot \frac{1}{{x}^{2}}}{\color{blue}{x}} \]
      2. +-commutativeN/A

        \[\leadsto \frac{\frac{600041}{2386628} \cdot \frac{1}{{x}^{2}} + \frac{1}{2}}{x} \]
      3. lower-+.f64N/A

        \[\leadsto \frac{\frac{600041}{2386628} \cdot \frac{1}{{x}^{2}} + \frac{1}{2}}{x} \]
      4. associate-*r/N/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628} \cdot 1}{{x}^{2}} + \frac{1}{2}}{x} \]
      5. metadata-evalN/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{{x}^{2}} + \frac{1}{2}}{x} \]
      6. pow2N/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x} + \frac{1}{2}}{x} \]
      7. lift-/.f64N/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x} + \frac{1}{2}}{x} \]
      8. lift-*.f6451.4

        \[\leadsto \frac{\frac{0.2514179000665374}{x \cdot x} + 0.5}{x} \]
    5. Applied rewrites51.4%

      \[\leadsto \color{blue}{\frac{\frac{0.2514179000665374}{x \cdot x} + 0.5}{x}} \]
    6. Step-by-step derivation
      1. lift-/.f64N/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x} + \frac{1}{2}}{\color{blue}{x}} \]
      2. lift-+.f64N/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x} + \frac{1}{2}}{x} \]
      3. div-addN/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x}}{x} + \color{blue}{\frac{\frac{1}{2}}{x}} \]
      4. lift-*.f64N/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
      5. lift-/.f64N/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
      6. metadata-evalN/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628} \cdot 1}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
      7. associate-*r/N/A

        \[\leadsto \frac{\frac{600041}{2386628} \cdot \frac{1}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
      8. pow2N/A

        \[\leadsto \frac{\frac{600041}{2386628} \cdot \frac{1}{{x}^{2}}}{x} + \frac{\frac{1}{2}}{x} \]
      9. lower-+.f64N/A

        \[\leadsto \frac{\frac{600041}{2386628} \cdot \frac{1}{{x}^{2}}}{x} + \color{blue}{\frac{\frac{1}{2}}{x}} \]
      10. lower-/.f64N/A

        \[\leadsto \frac{\frac{600041}{2386628} \cdot \frac{1}{{x}^{2}}}{x} + \frac{\color{blue}{\frac{1}{2}}}{x} \]
      11. pow2N/A

        \[\leadsto \frac{\frac{600041}{2386628} \cdot \frac{1}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
      12. associate-*r/N/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628} \cdot 1}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
      13. metadata-evalN/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
      14. lift-/.f64N/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
      15. lift-*.f64N/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
      16. lift-/.f6451.4

        \[\leadsto \frac{\frac{0.2514179000665374}{x \cdot x}}{x} + \frac{0.5}{\color{blue}{x}} \]
    7. Applied rewrites51.4%

      \[\leadsto \frac{\frac{0.2514179000665374}{x \cdot x}}{x} + \color{blue}{\frac{0.5}{x}} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 2: 99.9% accurate, 1.5× speedup?

\[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ \begin{array}{l} t_0 := \left(x\_m \cdot x\_m\right) \cdot x\_m\\ x\_s \cdot \begin{array}{l} \mathbf{if}\;x\_m \leq 400:\\ \;\;\;\;\frac{\left({x\_m}^{8} \cdot \mathsf{fma}\left(x\_m \cdot x\_m, 0.0001789971, 0.0005064034\right) + \mathsf{fma}\left(0.0072644182 \cdot \left(\left(x\_m \cdot x\_m\right) \cdot \left(x\_m \cdot x\_m\right)\right), x\_m \cdot x\_m, \mathsf{fma}\left(x\_m \cdot x\_m, \mathsf{fma}\left(0.0424060604 \cdot x\_m, x\_m, 0.1049934947\right), 1\right)\right)\right) \cdot x\_m}{\mathsf{fma}\left({x\_m}^{12}, 0.0003579942, \mathsf{fma}\left({x\_m}^{10}, 0.0008327945, \mathsf{fma}\left({x\_m}^{8}, 0.0140005442, \mathsf{fma}\left(0.0694555761, t\_0 \cdot t\_0, \mathsf{fma}\left(\mathsf{fma}\left(0.2909738639, x\_m \cdot x\_m, 0.7715471019\right), x\_m \cdot x\_m, 1\right)\right)\right)\right)\right)}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{0.2514179000665374}{x\_m \cdot x\_m}}{x\_m} + \frac{0.5}{x\_m}\\ \end{array} \end{array} \end{array} \]
x\_m = (fabs.f64 x)
x\_s = (copysign.f64 #s(literal 1 binary64) x)
(FPCore (x_s x_m)
 :precision binary64
 (let* ((t_0 (* (* x_m x_m) x_m)))
   (*
    x_s
    (if (<= x_m 400.0)
      (/
       (*
        (+
         (* (pow x_m 8.0) (fma (* x_m x_m) 0.0001789971 0.0005064034))
         (fma
          (* 0.0072644182 (* (* x_m x_m) (* x_m x_m)))
          (* x_m x_m)
          (fma (* x_m x_m) (fma (* 0.0424060604 x_m) x_m 0.1049934947) 1.0)))
        x_m)
       (fma
        (pow x_m 12.0)
        0.0003579942
        (fma
         (pow x_m 10.0)
         0.0008327945
         (fma
          (pow x_m 8.0)
          0.0140005442
          (fma
           0.0694555761
           (* t_0 t_0)
           (fma
            (fma 0.2909738639 (* x_m x_m) 0.7715471019)
            (* x_m x_m)
            1.0))))))
      (+ (/ (/ 0.2514179000665374 (* x_m x_m)) x_m) (/ 0.5 x_m))))))
x\_m = fabs(x);
x\_s = copysign(1.0, x);
double code(double x_s, double x_m) {
	double t_0 = (x_m * x_m) * x_m;
	double tmp;
	if (x_m <= 400.0) {
		tmp = (((pow(x_m, 8.0) * fma((x_m * x_m), 0.0001789971, 0.0005064034)) + fma((0.0072644182 * ((x_m * x_m) * (x_m * x_m))), (x_m * x_m), fma((x_m * x_m), fma((0.0424060604 * x_m), x_m, 0.1049934947), 1.0))) * x_m) / fma(pow(x_m, 12.0), 0.0003579942, fma(pow(x_m, 10.0), 0.0008327945, fma(pow(x_m, 8.0), 0.0140005442, fma(0.0694555761, (t_0 * t_0), fma(fma(0.2909738639, (x_m * x_m), 0.7715471019), (x_m * x_m), 1.0)))));
	} else {
		tmp = ((0.2514179000665374 / (x_m * x_m)) / x_m) + (0.5 / x_m);
	}
	return x_s * tmp;
}
x\_m = abs(x)
x\_s = copysign(1.0, x)
function code(x_s, x_m)
	t_0 = Float64(Float64(x_m * x_m) * x_m)
	tmp = 0.0
	if (x_m <= 400.0)
		tmp = Float64(Float64(Float64(Float64((x_m ^ 8.0) * fma(Float64(x_m * x_m), 0.0001789971, 0.0005064034)) + fma(Float64(0.0072644182 * Float64(Float64(x_m * x_m) * Float64(x_m * x_m))), Float64(x_m * x_m), fma(Float64(x_m * x_m), fma(Float64(0.0424060604 * x_m), x_m, 0.1049934947), 1.0))) * x_m) / fma((x_m ^ 12.0), 0.0003579942, fma((x_m ^ 10.0), 0.0008327945, fma((x_m ^ 8.0), 0.0140005442, fma(0.0694555761, Float64(t_0 * t_0), fma(fma(0.2909738639, Float64(x_m * x_m), 0.7715471019), Float64(x_m * x_m), 1.0))))));
	else
		tmp = Float64(Float64(Float64(0.2514179000665374 / Float64(x_m * x_m)) / x_m) + Float64(0.5 / x_m));
	end
	return Float64(x_s * tmp)
end
x\_m = N[Abs[x], $MachinePrecision]
x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
code[x$95$s_, x$95$m_] := Block[{t$95$0 = N[(N[(x$95$m * x$95$m), $MachinePrecision] * x$95$m), $MachinePrecision]}, N[(x$95$s * If[LessEqual[x$95$m, 400.0], N[(N[(N[(N[(N[Power[x$95$m, 8.0], $MachinePrecision] * N[(N[(x$95$m * x$95$m), $MachinePrecision] * 0.0001789971 + 0.0005064034), $MachinePrecision]), $MachinePrecision] + N[(N[(0.0072644182 * N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(N[(0.0424060604 * x$95$m), $MachinePrecision] * x$95$m + 0.1049934947), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * x$95$m), $MachinePrecision] / N[(N[Power[x$95$m, 12.0], $MachinePrecision] * 0.0003579942 + N[(N[Power[x$95$m, 10.0], $MachinePrecision] * 0.0008327945 + N[(N[Power[x$95$m, 8.0], $MachinePrecision] * 0.0140005442 + N[(0.0694555761 * N[(t$95$0 * t$95$0), $MachinePrecision] + N[(N[(0.2909738639 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.7715471019), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(0.2514179000665374 / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision] / x$95$m), $MachinePrecision] + N[(0.5 / x$95$m), $MachinePrecision]), $MachinePrecision]]), $MachinePrecision]]
\begin{array}{l}
x\_m = \left|x\right|
\\
x\_s = \mathsf{copysign}\left(1, x\right)

\\
\begin{array}{l}
t_0 := \left(x\_m \cdot x\_m\right) \cdot x\_m\\
x\_s \cdot \begin{array}{l}
\mathbf{if}\;x\_m \leq 400:\\
\;\;\;\;\frac{\left({x\_m}^{8} \cdot \mathsf{fma}\left(x\_m \cdot x\_m, 0.0001789971, 0.0005064034\right) + \mathsf{fma}\left(0.0072644182 \cdot \left(\left(x\_m \cdot x\_m\right) \cdot \left(x\_m \cdot x\_m\right)\right), x\_m \cdot x\_m, \mathsf{fma}\left(x\_m \cdot x\_m, \mathsf{fma}\left(0.0424060604 \cdot x\_m, x\_m, 0.1049934947\right), 1\right)\right)\right) \cdot x\_m}{\mathsf{fma}\left({x\_m}^{12}, 0.0003579942, \mathsf{fma}\left({x\_m}^{10}, 0.0008327945, \mathsf{fma}\left({x\_m}^{8}, 0.0140005442, \mathsf{fma}\left(0.0694555761, t\_0 \cdot t\_0, \mathsf{fma}\left(\mathsf{fma}\left(0.2909738639, x\_m \cdot x\_m, 0.7715471019\right), x\_m \cdot x\_m, 1\right)\right)\right)\right)\right)}\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{0.2514179000665374}{x\_m \cdot x\_m}}{x\_m} + \frac{0.5}{x\_m}\\


\end{array}
\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 400

    1. Initial program 53.8%

      \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
    2. Applied rewrites53.8%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left({x}^{10}, 0.0001789971, \mathsf{fma}\left({x}^{8}, 0.0005064034, \mathsf{fma}\left(\left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right), 0.0072644182, \mathsf{fma}\left(\mathsf{fma}\left(0.0424060604, x \cdot x, 0.1049934947\right), x \cdot x, 1\right)\right)\right)\right) \cdot x}{\mathsf{fma}\left({x}^{12}, 0.0003579942, \mathsf{fma}\left({x}^{10}, 0.0008327945, \mathsf{fma}\left({x}^{8}, 0.0140005442, \mathsf{fma}\left(0.0694555761, \left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right), \mathsf{fma}\left(\mathsf{fma}\left(0.2909738639, x \cdot x, 0.7715471019\right), x \cdot x, 1\right)\right)\right)\right)\right)}} \]
    3. Applied rewrites53.8%

      \[\leadsto \frac{\color{blue}{\left(\mathsf{fma}\left({x}^{10}, 0.0001789971, {x}^{8} \cdot 0.0005064034\right) + \mathsf{fma}\left(0.0072644182 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right), x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(0.0424060604 \cdot x, x, 0.1049934947\right), 1\right)\right)\right)} \cdot x}{\mathsf{fma}\left({x}^{12}, 0.0003579942, \mathsf{fma}\left({x}^{10}, 0.0008327945, \mathsf{fma}\left({x}^{8}, 0.0140005442, \mathsf{fma}\left(0.0694555761, \left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right), \mathsf{fma}\left(\mathsf{fma}\left(0.2909738639, x \cdot x, 0.7715471019\right), x \cdot x, 1\right)\right)\right)\right)\right)} \]
    4. Taylor expanded in x around 0

      \[\leadsto \frac{\left(\color{blue}{{x}^{8} \cdot \left(\frac{2532017}{5000000000} + \frac{1789971}{10000000000} \cdot {x}^{2}\right)} + \mathsf{fma}\left(\frac{36322091}{5000000000} \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right), x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\frac{106015151}{2500000000} \cdot x, x, \frac{1049934947}{10000000000}\right), 1\right)\right)\right) \cdot x}{\mathsf{fma}\left({x}^{12}, \frac{1789971}{5000000000}, \mathsf{fma}\left({x}^{10}, \frac{1665589}{2000000000}, \mathsf{fma}\left({x}^{8}, \frac{70002721}{5000000000}, \mathsf{fma}\left(\frac{694555761}{10000000000}, \left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right), \mathsf{fma}\left(\mathsf{fma}\left(\frac{2909738639}{10000000000}, x \cdot x, \frac{7715471019}{10000000000}\right), x \cdot x, 1\right)\right)\right)\right)\right)} \]
    5. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \frac{\left({x}^{8} \cdot \color{blue}{\left(\frac{2532017}{5000000000} + \frac{1789971}{10000000000} \cdot {x}^{2}\right)} + \mathsf{fma}\left(\frac{36322091}{5000000000} \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right), x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\frac{106015151}{2500000000} \cdot x, x, \frac{1049934947}{10000000000}\right), 1\right)\right)\right) \cdot x}{\mathsf{fma}\left({x}^{12}, \frac{1789971}{5000000000}, \mathsf{fma}\left({x}^{10}, \frac{1665589}{2000000000}, \mathsf{fma}\left({x}^{8}, \frac{70002721}{5000000000}, \mathsf{fma}\left(\frac{694555761}{10000000000}, \left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right), \mathsf{fma}\left(\mathsf{fma}\left(\frac{2909738639}{10000000000}, x \cdot x, \frac{7715471019}{10000000000}\right), x \cdot x, 1\right)\right)\right)\right)\right)} \]
      2. lift-pow.f64N/A

        \[\leadsto \frac{\left({x}^{8} \cdot \left(\color{blue}{\frac{2532017}{5000000000}} + \frac{1789971}{10000000000} \cdot {x}^{2}\right) + \mathsf{fma}\left(\frac{36322091}{5000000000} \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right), x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\frac{106015151}{2500000000} \cdot x, x, \frac{1049934947}{10000000000}\right), 1\right)\right)\right) \cdot x}{\mathsf{fma}\left({x}^{12}, \frac{1789971}{5000000000}, \mathsf{fma}\left({x}^{10}, \frac{1665589}{2000000000}, \mathsf{fma}\left({x}^{8}, \frac{70002721}{5000000000}, \mathsf{fma}\left(\frac{694555761}{10000000000}, \left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right), \mathsf{fma}\left(\mathsf{fma}\left(\frac{2909738639}{10000000000}, x \cdot x, \frac{7715471019}{10000000000}\right), x \cdot x, 1\right)\right)\right)\right)\right)} \]
      3. +-commutativeN/A

        \[\leadsto \frac{\left({x}^{8} \cdot \left(\frac{1789971}{10000000000} \cdot {x}^{2} + \color{blue}{\frac{2532017}{5000000000}}\right) + \mathsf{fma}\left(\frac{36322091}{5000000000} \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right), x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\frac{106015151}{2500000000} \cdot x, x, \frac{1049934947}{10000000000}\right), 1\right)\right)\right) \cdot x}{\mathsf{fma}\left({x}^{12}, \frac{1789971}{5000000000}, \mathsf{fma}\left({x}^{10}, \frac{1665589}{2000000000}, \mathsf{fma}\left({x}^{8}, \frac{70002721}{5000000000}, \mathsf{fma}\left(\frac{694555761}{10000000000}, \left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right), \mathsf{fma}\left(\mathsf{fma}\left(\frac{2909738639}{10000000000}, x \cdot x, \frac{7715471019}{10000000000}\right), x \cdot x, 1\right)\right)\right)\right)\right)} \]
      4. pow2N/A

        \[\leadsto \frac{\left({x}^{8} \cdot \left(\frac{1789971}{10000000000} \cdot \left(x \cdot x\right) + \frac{2532017}{5000000000}\right) + \mathsf{fma}\left(\frac{36322091}{5000000000} \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right), x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\frac{106015151}{2500000000} \cdot x, x, \frac{1049934947}{10000000000}\right), 1\right)\right)\right) \cdot x}{\mathsf{fma}\left({x}^{12}, \frac{1789971}{5000000000}, \mathsf{fma}\left({x}^{10}, \frac{1665589}{2000000000}, \mathsf{fma}\left({x}^{8}, \frac{70002721}{5000000000}, \mathsf{fma}\left(\frac{694555761}{10000000000}, \left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right), \mathsf{fma}\left(\mathsf{fma}\left(\frac{2909738639}{10000000000}, x \cdot x, \frac{7715471019}{10000000000}\right), x \cdot x, 1\right)\right)\right)\right)\right)} \]
      5. *-commutativeN/A

        \[\leadsto \frac{\left({x}^{8} \cdot \left(\left(x \cdot x\right) \cdot \frac{1789971}{10000000000} + \frac{2532017}{5000000000}\right) + \mathsf{fma}\left(\frac{36322091}{5000000000} \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right), x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\frac{106015151}{2500000000} \cdot x, x, \frac{1049934947}{10000000000}\right), 1\right)\right)\right) \cdot x}{\mathsf{fma}\left({x}^{12}, \frac{1789971}{5000000000}, \mathsf{fma}\left({x}^{10}, \frac{1665589}{2000000000}, \mathsf{fma}\left({x}^{8}, \frac{70002721}{5000000000}, \mathsf{fma}\left(\frac{694555761}{10000000000}, \left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right), \mathsf{fma}\left(\mathsf{fma}\left(\frac{2909738639}{10000000000}, x \cdot x, \frac{7715471019}{10000000000}\right), x \cdot x, 1\right)\right)\right)\right)\right)} \]
      6. lower-fma.f64N/A

        \[\leadsto \frac{\left({x}^{8} \cdot \mathsf{fma}\left(x \cdot x, \color{blue}{\frac{1789971}{10000000000}}, \frac{2532017}{5000000000}\right) + \mathsf{fma}\left(\frac{36322091}{5000000000} \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right), x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\frac{106015151}{2500000000} \cdot x, x, \frac{1049934947}{10000000000}\right), 1\right)\right)\right) \cdot x}{\mathsf{fma}\left({x}^{12}, \frac{1789971}{5000000000}, \mathsf{fma}\left({x}^{10}, \frac{1665589}{2000000000}, \mathsf{fma}\left({x}^{8}, \frac{70002721}{5000000000}, \mathsf{fma}\left(\frac{694555761}{10000000000}, \left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right), \mathsf{fma}\left(\mathsf{fma}\left(\frac{2909738639}{10000000000}, x \cdot x, \frac{7715471019}{10000000000}\right), x \cdot x, 1\right)\right)\right)\right)\right)} \]
      7. lift-*.f6453.8

        \[\leadsto \frac{\left({x}^{8} \cdot \mathsf{fma}\left(x \cdot x, 0.0001789971, 0.0005064034\right) + \mathsf{fma}\left(0.0072644182 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right), x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(0.0424060604 \cdot x, x, 0.1049934947\right), 1\right)\right)\right) \cdot x}{\mathsf{fma}\left({x}^{12}, 0.0003579942, \mathsf{fma}\left({x}^{10}, 0.0008327945, \mathsf{fma}\left({x}^{8}, 0.0140005442, \mathsf{fma}\left(0.0694555761, \left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right), \mathsf{fma}\left(\mathsf{fma}\left(0.2909738639, x \cdot x, 0.7715471019\right), x \cdot x, 1\right)\right)\right)\right)\right)} \]
    6. Applied rewrites53.8%

      \[\leadsto \frac{\left(\color{blue}{{x}^{8} \cdot \mathsf{fma}\left(x \cdot x, 0.0001789971, 0.0005064034\right)} + \mathsf{fma}\left(0.0072644182 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right), x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(0.0424060604 \cdot x, x, 0.1049934947\right), 1\right)\right)\right) \cdot x}{\mathsf{fma}\left({x}^{12}, 0.0003579942, \mathsf{fma}\left({x}^{10}, 0.0008327945, \mathsf{fma}\left({x}^{8}, 0.0140005442, \mathsf{fma}\left(0.0694555761, \left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right), \mathsf{fma}\left(\mathsf{fma}\left(0.2909738639, x \cdot x, 0.7715471019\right), x \cdot x, 1\right)\right)\right)\right)\right)} \]

    if 400 < x

    1. Initial program 53.8%

      \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
    2. Applied rewrites53.8%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left({x}^{10}, 0.0001789971, \mathsf{fma}\left({x}^{8}, 0.0005064034, \mathsf{fma}\left(\left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right), 0.0072644182, \mathsf{fma}\left(\mathsf{fma}\left(0.0424060604, x \cdot x, 0.1049934947\right), x \cdot x, 1\right)\right)\right)\right) \cdot x}{\mathsf{fma}\left({x}^{12}, 0.0003579942, \mathsf{fma}\left({x}^{10}, 0.0008327945, \mathsf{fma}\left({x}^{8}, 0.0140005442, \mathsf{fma}\left(0.0694555761, \left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right), \mathsf{fma}\left(\mathsf{fma}\left(0.2909738639, x \cdot x, 0.7715471019\right), x \cdot x, 1\right)\right)\right)\right)\right)}} \]
    3. Taylor expanded in x around inf

      \[\leadsto \color{blue}{\frac{\frac{1}{2} + \frac{600041}{2386628} \cdot \frac{1}{{x}^{2}}}{x}} \]
    4. Step-by-step derivation
      1. lower-/.f64N/A

        \[\leadsto \frac{\frac{1}{2} + \frac{600041}{2386628} \cdot \frac{1}{{x}^{2}}}{\color{blue}{x}} \]
      2. +-commutativeN/A

        \[\leadsto \frac{\frac{600041}{2386628} \cdot \frac{1}{{x}^{2}} + \frac{1}{2}}{x} \]
      3. lower-+.f64N/A

        \[\leadsto \frac{\frac{600041}{2386628} \cdot \frac{1}{{x}^{2}} + \frac{1}{2}}{x} \]
      4. associate-*r/N/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628} \cdot 1}{{x}^{2}} + \frac{1}{2}}{x} \]
      5. metadata-evalN/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{{x}^{2}} + \frac{1}{2}}{x} \]
      6. pow2N/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x} + \frac{1}{2}}{x} \]
      7. lift-/.f64N/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x} + \frac{1}{2}}{x} \]
      8. lift-*.f6451.4

        \[\leadsto \frac{\frac{0.2514179000665374}{x \cdot x} + 0.5}{x} \]
    5. Applied rewrites51.4%

      \[\leadsto \color{blue}{\frac{\frac{0.2514179000665374}{x \cdot x} + 0.5}{x}} \]
    6. Step-by-step derivation
      1. lift-/.f64N/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x} + \frac{1}{2}}{\color{blue}{x}} \]
      2. lift-+.f64N/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x} + \frac{1}{2}}{x} \]
      3. div-addN/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x}}{x} + \color{blue}{\frac{\frac{1}{2}}{x}} \]
      4. lift-*.f64N/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
      5. lift-/.f64N/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
      6. metadata-evalN/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628} \cdot 1}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
      7. associate-*r/N/A

        \[\leadsto \frac{\frac{600041}{2386628} \cdot \frac{1}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
      8. pow2N/A

        \[\leadsto \frac{\frac{600041}{2386628} \cdot \frac{1}{{x}^{2}}}{x} + \frac{\frac{1}{2}}{x} \]
      9. lower-+.f64N/A

        \[\leadsto \frac{\frac{600041}{2386628} \cdot \frac{1}{{x}^{2}}}{x} + \color{blue}{\frac{\frac{1}{2}}{x}} \]
      10. lower-/.f64N/A

        \[\leadsto \frac{\frac{600041}{2386628} \cdot \frac{1}{{x}^{2}}}{x} + \frac{\color{blue}{\frac{1}{2}}}{x} \]
      11. pow2N/A

        \[\leadsto \frac{\frac{600041}{2386628} \cdot \frac{1}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
      12. associate-*r/N/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628} \cdot 1}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
      13. metadata-evalN/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
      14. lift-/.f64N/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
      15. lift-*.f64N/A

        \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
      16. lift-/.f6451.4

        \[\leadsto \frac{\frac{0.2514179000665374}{x \cdot x}}{x} + \frac{0.5}{\color{blue}{x}} \]
    7. Applied rewrites51.4%

      \[\leadsto \frac{\frac{0.2514179000665374}{x \cdot x}}{x} + \color{blue}{\frac{0.5}{x}} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 3: 99.6% accurate, 5.9× speedup?

\[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot \begin{array}{l} \mathbf{if}\;x\_m \leq 1.2:\\ \;\;\;\;\mathsf{fma}\left(-0.6665536072, x\_m \cdot x\_m, 1\right) \cdot x\_m\\ \mathbf{else}:\\ \;\;\;\;-\frac{\left(\left(-\frac{\frac{11.259630434457211}{x\_m \cdot x\_m} + 0.15298196345929074}{\left(\left(x\_m \cdot x\_m\right) \cdot x\_m\right) \cdot x\_m}\right) - 0.5\right) - \frac{0.2514179000665374}{x\_m \cdot x\_m}}{x\_m}\\ \end{array} \end{array} \]
x\_m = (fabs.f64 x)
x\_s = (copysign.f64 #s(literal 1 binary64) x)
(FPCore (x_s x_m)
 :precision binary64
 (*
  x_s
  (if (<= x_m 1.2)
    (* (fma -0.6665536072 (* x_m x_m) 1.0) x_m)
    (-
     (/
      (-
       (-
        (-
         (/
          (+ (/ 11.259630434457211 (* x_m x_m)) 0.15298196345929074)
          (* (* (* x_m x_m) x_m) x_m)))
        0.5)
       (/ 0.2514179000665374 (* x_m x_m)))
      x_m)))))
x\_m = fabs(x);
x\_s = copysign(1.0, x);
double code(double x_s, double x_m) {
	double tmp;
	if (x_m <= 1.2) {
		tmp = fma(-0.6665536072, (x_m * x_m), 1.0) * x_m;
	} else {
		tmp = -(((-(((11.259630434457211 / (x_m * x_m)) + 0.15298196345929074) / (((x_m * x_m) * x_m) * x_m)) - 0.5) - (0.2514179000665374 / (x_m * x_m))) / x_m);
	}
	return x_s * tmp;
}
x\_m = abs(x)
x\_s = copysign(1.0, x)
function code(x_s, x_m)
	tmp = 0.0
	if (x_m <= 1.2)
		tmp = Float64(fma(-0.6665536072, Float64(x_m * x_m), 1.0) * x_m);
	else
		tmp = Float64(-Float64(Float64(Float64(Float64(-Float64(Float64(Float64(11.259630434457211 / Float64(x_m * x_m)) + 0.15298196345929074) / Float64(Float64(Float64(x_m * x_m) * x_m) * x_m))) - 0.5) - Float64(0.2514179000665374 / Float64(x_m * x_m))) / x_m));
	end
	return Float64(x_s * tmp)
end
x\_m = N[Abs[x], $MachinePrecision]
x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
code[x$95$s_, x$95$m_] := N[(x$95$s * If[LessEqual[x$95$m, 1.2], N[(N[(-0.6665536072 * N[(x$95$m * x$95$m), $MachinePrecision] + 1.0), $MachinePrecision] * x$95$m), $MachinePrecision], (-N[(N[(N[((-N[(N[(N[(11.259630434457211 / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision] + 0.15298196345929074), $MachinePrecision] / N[(N[(N[(x$95$m * x$95$m), $MachinePrecision] * x$95$m), $MachinePrecision] * x$95$m), $MachinePrecision]), $MachinePrecision]) - 0.5), $MachinePrecision] - N[(0.2514179000665374 / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x$95$m), $MachinePrecision])]), $MachinePrecision]
\begin{array}{l}
x\_m = \left|x\right|
\\
x\_s = \mathsf{copysign}\left(1, x\right)

\\
x\_s \cdot \begin{array}{l}
\mathbf{if}\;x\_m \leq 1.2:\\
\;\;\;\;\mathsf{fma}\left(-0.6665536072, x\_m \cdot x\_m, 1\right) \cdot x\_m\\

\mathbf{else}:\\
\;\;\;\;-\frac{\left(\left(-\frac{\frac{11.259630434457211}{x\_m \cdot x\_m} + 0.15298196345929074}{\left(\left(x\_m \cdot x\_m\right) \cdot x\_m\right) \cdot x\_m}\right) - 0.5\right) - \frac{0.2514179000665374}{x\_m \cdot x\_m}}{x\_m}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 1.19999999999999996

    1. Initial program 53.8%

      \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
    2. Taylor expanded in x around 0

      \[\leadsto \color{blue}{\left(1 + \frac{-833192009}{1250000000} \cdot {x}^{2}\right)} \cdot x \]
    3. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \left(\frac{-833192009}{1250000000} \cdot {x}^{2} + \color{blue}{1}\right) \cdot x \]
      2. pow2N/A

        \[\leadsto \left(\frac{-833192009}{1250000000} \cdot \left(x \cdot x\right) + 1\right) \cdot x \]
      3. lift-*.f64N/A

        \[\leadsto \left(\frac{-833192009}{1250000000} \cdot \left(x \cdot x\right) + 1\right) \cdot x \]
    4. Applied rewrites50.1%

      \[\leadsto \color{blue}{\mathsf{fma}\left(-0.6665536072, x \cdot x, 1\right)} \cdot x \]

    if 1.19999999999999996 < x

    1. Initial program 53.8%

      \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
    2. Taylor expanded in x around -inf

      \[\leadsto \color{blue}{-1 \cdot \frac{-1 \cdot \frac{\frac{1307076337763}{8543989815576} + \frac{344398180852034095277}{30586987988352776592} \cdot \frac{1}{{x}^{2}}}{{x}^{4}} - \left(\frac{1}{2} + \frac{600041}{2386628} \cdot \frac{1}{{x}^{2}}\right)}{x}} \]
    3. Step-by-step derivation
      1. Applied rewrites51.4%

        \[\leadsto \color{blue}{-\frac{\left(\left(-\frac{\frac{11.259630434457211}{x \cdot x} + 0.15298196345929074}{\left(\left(x \cdot x\right) \cdot x\right) \cdot x}\right) - 0.5\right) - \frac{0.2514179000665374}{x \cdot x}}{x}} \]
    4. Recombined 2 regimes into one program.
    5. Add Preprocessing

    Alternative 4: 99.5% accurate, 8.1× speedup?

    \[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot \begin{array}{l} \mathbf{if}\;x\_m \leq 1:\\ \;\;\;\;\mathsf{fma}\left(-0.6665536072, x\_m \cdot x\_m, 1\right) \cdot x\_m\\ \mathbf{else}:\\ \;\;\;\;-\left(\frac{\frac{\frac{0.15298196345929074}{x\_m \cdot x\_m} + 0.2514179000665374}{-x\_m \cdot x\_m}}{x\_m} - \frac{0.5}{x\_m}\right)\\ \end{array} \end{array} \]
    x\_m = (fabs.f64 x)
    x\_s = (copysign.f64 #s(literal 1 binary64) x)
    (FPCore (x_s x_m)
     :precision binary64
     (*
      x_s
      (if (<= x_m 1.0)
        (* (fma -0.6665536072 (* x_m x_m) 1.0) x_m)
        (-
         (-
          (/
           (/
            (+ (/ 0.15298196345929074 (* x_m x_m)) 0.2514179000665374)
            (- (* x_m x_m)))
           x_m)
          (/ 0.5 x_m))))))
    x\_m = fabs(x);
    x\_s = copysign(1.0, x);
    double code(double x_s, double x_m) {
    	double tmp;
    	if (x_m <= 1.0) {
    		tmp = fma(-0.6665536072, (x_m * x_m), 1.0) * x_m;
    	} else {
    		tmp = -(((((0.15298196345929074 / (x_m * x_m)) + 0.2514179000665374) / -(x_m * x_m)) / x_m) - (0.5 / x_m));
    	}
    	return x_s * tmp;
    }
    
    x\_m = abs(x)
    x\_s = copysign(1.0, x)
    function code(x_s, x_m)
    	tmp = 0.0
    	if (x_m <= 1.0)
    		tmp = Float64(fma(-0.6665536072, Float64(x_m * x_m), 1.0) * x_m);
    	else
    		tmp = Float64(-Float64(Float64(Float64(Float64(Float64(0.15298196345929074 / Float64(x_m * x_m)) + 0.2514179000665374) / Float64(-Float64(x_m * x_m))) / x_m) - Float64(0.5 / x_m)));
    	end
    	return Float64(x_s * tmp)
    end
    
    x\_m = N[Abs[x], $MachinePrecision]
    x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
    code[x$95$s_, x$95$m_] := N[(x$95$s * If[LessEqual[x$95$m, 1.0], N[(N[(-0.6665536072 * N[(x$95$m * x$95$m), $MachinePrecision] + 1.0), $MachinePrecision] * x$95$m), $MachinePrecision], (-N[(N[(N[(N[(N[(0.15298196345929074 / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision] + 0.2514179000665374), $MachinePrecision] / (-N[(x$95$m * x$95$m), $MachinePrecision])), $MachinePrecision] / x$95$m), $MachinePrecision] - N[(0.5 / x$95$m), $MachinePrecision]), $MachinePrecision])]), $MachinePrecision]
    
    \begin{array}{l}
    x\_m = \left|x\right|
    \\
    x\_s = \mathsf{copysign}\left(1, x\right)
    
    \\
    x\_s \cdot \begin{array}{l}
    \mathbf{if}\;x\_m \leq 1:\\
    \;\;\;\;\mathsf{fma}\left(-0.6665536072, x\_m \cdot x\_m, 1\right) \cdot x\_m\\
    
    \mathbf{else}:\\
    \;\;\;\;-\left(\frac{\frac{\frac{0.15298196345929074}{x\_m \cdot x\_m} + 0.2514179000665374}{-x\_m \cdot x\_m}}{x\_m} - \frac{0.5}{x\_m}\right)\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if x < 1

      1. Initial program 53.8%

        \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
      2. Taylor expanded in x around 0

        \[\leadsto \color{blue}{\left(1 + \frac{-833192009}{1250000000} \cdot {x}^{2}\right)} \cdot x \]
      3. Step-by-step derivation
        1. +-commutativeN/A

          \[\leadsto \left(\frac{-833192009}{1250000000} \cdot {x}^{2} + \color{blue}{1}\right) \cdot x \]
        2. pow2N/A

          \[\leadsto \left(\frac{-833192009}{1250000000} \cdot \left(x \cdot x\right) + 1\right) \cdot x \]
        3. lift-*.f64N/A

          \[\leadsto \left(\frac{-833192009}{1250000000} \cdot \left(x \cdot x\right) + 1\right) \cdot x \]
      4. Applied rewrites50.1%

        \[\leadsto \color{blue}{\mathsf{fma}\left(-0.6665536072, x \cdot x, 1\right)} \cdot x \]

      if 1 < x

      1. Initial program 53.8%

        \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
      2. Taylor expanded in x around -inf

        \[\leadsto \color{blue}{-1 \cdot \frac{-1 \cdot \frac{\frac{600041}{2386628} + \frac{1307076337763}{8543989815576} \cdot \frac{1}{{x}^{2}}}{{x}^{2}} - \frac{1}{2}}{x}} \]
      3. Step-by-step derivation
        1. Applied rewrites51.4%

          \[\leadsto \color{blue}{-\frac{\left(-\frac{\frac{0.15298196345929074}{x \cdot x} + 0.2514179000665374}{x \cdot x}\right) - 0.5}{x}} \]
        2. Step-by-step derivation
          1. lift-/.f64N/A

            \[\leadsto -\frac{\left(-\frac{\frac{\frac{1307076337763}{8543989815576}}{x \cdot x} + \frac{600041}{2386628}}{x \cdot x}\right) - \frac{1}{2}}{x} \]
          2. lift--.f64N/A

            \[\leadsto -\frac{\left(-\frac{\frac{\frac{1307076337763}{8543989815576}}{x \cdot x} + \frac{600041}{2386628}}{x \cdot x}\right) - \frac{1}{2}}{x} \]
          3. lift-neg.f64N/A

            \[\leadsto -\frac{\left(\mathsf{neg}\left(\frac{\frac{\frac{1307076337763}{8543989815576}}{x \cdot x} + \frac{600041}{2386628}}{x \cdot x}\right)\right) - \frac{1}{2}}{x} \]
          4. lift-*.f64N/A

            \[\leadsto -\frac{\left(\mathsf{neg}\left(\frac{\frac{\frac{1307076337763}{8543989815576}}{x \cdot x} + \frac{600041}{2386628}}{x \cdot x}\right)\right) - \frac{1}{2}}{x} \]
          5. lift-/.f64N/A

            \[\leadsto -\frac{\left(\mathsf{neg}\left(\frac{\frac{\frac{1307076337763}{8543989815576}}{x \cdot x} + \frac{600041}{2386628}}{x \cdot x}\right)\right) - \frac{1}{2}}{x} \]
          6. lift-+.f64N/A

            \[\leadsto -\frac{\left(\mathsf{neg}\left(\frac{\frac{\frac{1307076337763}{8543989815576}}{x \cdot x} + \frac{600041}{2386628}}{x \cdot x}\right)\right) - \frac{1}{2}}{x} \]
          7. lift-*.f64N/A

            \[\leadsto -\frac{\left(\mathsf{neg}\left(\frac{\frac{\frac{1307076337763}{8543989815576}}{x \cdot x} + \frac{600041}{2386628}}{x \cdot x}\right)\right) - \frac{1}{2}}{x} \]
          8. lift-/.f64N/A

            \[\leadsto -\frac{\left(\mathsf{neg}\left(\frac{\frac{\frac{1307076337763}{8543989815576}}{x \cdot x} + \frac{600041}{2386628}}{x \cdot x}\right)\right) - \frac{1}{2}}{x} \]
          9. div-subN/A

            \[\leadsto -\left(\frac{\mathsf{neg}\left(\frac{\frac{\frac{1307076337763}{8543989815576}}{x \cdot x} + \frac{600041}{2386628}}{x \cdot x}\right)}{x} - \frac{\frac{1}{2}}{x}\right) \]
          10. lower--.f64N/A

            \[\leadsto -\left(\frac{\mathsf{neg}\left(\frac{\frac{\frac{1307076337763}{8543989815576}}{x \cdot x} + \frac{600041}{2386628}}{x \cdot x}\right)}{x} - \frac{\frac{1}{2}}{x}\right) \]
        3. Applied rewrites51.4%

          \[\leadsto -\left(\frac{\frac{\frac{0.15298196345929074}{x \cdot x} + 0.2514179000665374}{-x \cdot x}}{x} - \frac{0.5}{x}\right) \]
      4. Recombined 2 regimes into one program.
      5. Add Preprocessing

      Alternative 5: 99.5% accurate, 9.0× speedup?

      \[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot \begin{array}{l} \mathbf{if}\;x\_m \leq 1:\\ \;\;\;\;\mathsf{fma}\left(-0.6665536072, x\_m \cdot x\_m, 1\right) \cdot x\_m\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\frac{0.15298196345929074}{x\_m \cdot x\_m} + 0.2514179000665374}{-x\_m \cdot x\_m} - 0.5}{-x\_m}\\ \end{array} \end{array} \]
      x\_m = (fabs.f64 x)
      x\_s = (copysign.f64 #s(literal 1 binary64) x)
      (FPCore (x_s x_m)
       :precision binary64
       (*
        x_s
        (if (<= x_m 1.0)
          (* (fma -0.6665536072 (* x_m x_m) 1.0) x_m)
          (/
           (-
            (/
             (+ (/ 0.15298196345929074 (* x_m x_m)) 0.2514179000665374)
             (- (* x_m x_m)))
            0.5)
           (- x_m)))))
      x\_m = fabs(x);
      x\_s = copysign(1.0, x);
      double code(double x_s, double x_m) {
      	double tmp;
      	if (x_m <= 1.0) {
      		tmp = fma(-0.6665536072, (x_m * x_m), 1.0) * x_m;
      	} else {
      		tmp = ((((0.15298196345929074 / (x_m * x_m)) + 0.2514179000665374) / -(x_m * x_m)) - 0.5) / -x_m;
      	}
      	return x_s * tmp;
      }
      
      x\_m = abs(x)
      x\_s = copysign(1.0, x)
      function code(x_s, x_m)
      	tmp = 0.0
      	if (x_m <= 1.0)
      		tmp = Float64(fma(-0.6665536072, Float64(x_m * x_m), 1.0) * x_m);
      	else
      		tmp = Float64(Float64(Float64(Float64(Float64(0.15298196345929074 / Float64(x_m * x_m)) + 0.2514179000665374) / Float64(-Float64(x_m * x_m))) - 0.5) / Float64(-x_m));
      	end
      	return Float64(x_s * tmp)
      end
      
      x\_m = N[Abs[x], $MachinePrecision]
      x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
      code[x$95$s_, x$95$m_] := N[(x$95$s * If[LessEqual[x$95$m, 1.0], N[(N[(-0.6665536072 * N[(x$95$m * x$95$m), $MachinePrecision] + 1.0), $MachinePrecision] * x$95$m), $MachinePrecision], N[(N[(N[(N[(N[(0.15298196345929074 / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision] + 0.2514179000665374), $MachinePrecision] / (-N[(x$95$m * x$95$m), $MachinePrecision])), $MachinePrecision] - 0.5), $MachinePrecision] / (-x$95$m)), $MachinePrecision]]), $MachinePrecision]
      
      \begin{array}{l}
      x\_m = \left|x\right|
      \\
      x\_s = \mathsf{copysign}\left(1, x\right)
      
      \\
      x\_s \cdot \begin{array}{l}
      \mathbf{if}\;x\_m \leq 1:\\
      \;\;\;\;\mathsf{fma}\left(-0.6665536072, x\_m \cdot x\_m, 1\right) \cdot x\_m\\
      
      \mathbf{else}:\\
      \;\;\;\;\frac{\frac{\frac{0.15298196345929074}{x\_m \cdot x\_m} + 0.2514179000665374}{-x\_m \cdot x\_m} - 0.5}{-x\_m}\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 2 regimes
      2. if x < 1

        1. Initial program 53.8%

          \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
        2. Taylor expanded in x around 0

          \[\leadsto \color{blue}{\left(1 + \frac{-833192009}{1250000000} \cdot {x}^{2}\right)} \cdot x \]
        3. Step-by-step derivation
          1. +-commutativeN/A

            \[\leadsto \left(\frac{-833192009}{1250000000} \cdot {x}^{2} + \color{blue}{1}\right) \cdot x \]
          2. pow2N/A

            \[\leadsto \left(\frac{-833192009}{1250000000} \cdot \left(x \cdot x\right) + 1\right) \cdot x \]
          3. lift-*.f64N/A

            \[\leadsto \left(\frac{-833192009}{1250000000} \cdot \left(x \cdot x\right) + 1\right) \cdot x \]
        4. Applied rewrites50.1%

          \[\leadsto \color{blue}{\mathsf{fma}\left(-0.6665536072, x \cdot x, 1\right)} \cdot x \]

        if 1 < x

        1. Initial program 53.8%

          \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
        2. Taylor expanded in x around -inf

          \[\leadsto \color{blue}{-1 \cdot \frac{-1 \cdot \frac{\frac{600041}{2386628} + \frac{1307076337763}{8543989815576} \cdot \frac{1}{{x}^{2}}}{{x}^{2}} - \frac{1}{2}}{x}} \]
        3. Step-by-step derivation
          1. Applied rewrites51.4%

            \[\leadsto \color{blue}{-\frac{\left(-\frac{\frac{0.15298196345929074}{x \cdot x} + 0.2514179000665374}{x \cdot x}\right) - 0.5}{x}} \]
          2. Applied rewrites51.4%

            \[\leadsto \color{blue}{\frac{\frac{\frac{0.15298196345929074}{x \cdot x} + 0.2514179000665374}{-x \cdot x} - 0.5}{-x}} \]
        4. Recombined 2 regimes into one program.
        5. Add Preprocessing

        Alternative 6: 99.5% accurate, 12.3× speedup?

        \[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot \begin{array}{l} \mathbf{if}\;x\_m \leq 0.95:\\ \;\;\;\;\mathsf{fma}\left(-0.6665536072, x\_m \cdot x\_m, 1\right) \cdot x\_m\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{0.2514179000665374}{x\_m \cdot x\_m}}{x\_m} + \frac{0.5}{x\_m}\\ \end{array} \end{array} \]
        x\_m = (fabs.f64 x)
        x\_s = (copysign.f64 #s(literal 1 binary64) x)
        (FPCore (x_s x_m)
         :precision binary64
         (*
          x_s
          (if (<= x_m 0.95)
            (* (fma -0.6665536072 (* x_m x_m) 1.0) x_m)
            (+ (/ (/ 0.2514179000665374 (* x_m x_m)) x_m) (/ 0.5 x_m)))))
        x\_m = fabs(x);
        x\_s = copysign(1.0, x);
        double code(double x_s, double x_m) {
        	double tmp;
        	if (x_m <= 0.95) {
        		tmp = fma(-0.6665536072, (x_m * x_m), 1.0) * x_m;
        	} else {
        		tmp = ((0.2514179000665374 / (x_m * x_m)) / x_m) + (0.5 / x_m);
        	}
        	return x_s * tmp;
        }
        
        x\_m = abs(x)
        x\_s = copysign(1.0, x)
        function code(x_s, x_m)
        	tmp = 0.0
        	if (x_m <= 0.95)
        		tmp = Float64(fma(-0.6665536072, Float64(x_m * x_m), 1.0) * x_m);
        	else
        		tmp = Float64(Float64(Float64(0.2514179000665374 / Float64(x_m * x_m)) / x_m) + Float64(0.5 / x_m));
        	end
        	return Float64(x_s * tmp)
        end
        
        x\_m = N[Abs[x], $MachinePrecision]
        x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
        code[x$95$s_, x$95$m_] := N[(x$95$s * If[LessEqual[x$95$m, 0.95], N[(N[(-0.6665536072 * N[(x$95$m * x$95$m), $MachinePrecision] + 1.0), $MachinePrecision] * x$95$m), $MachinePrecision], N[(N[(N[(0.2514179000665374 / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision] / x$95$m), $MachinePrecision] + N[(0.5 / x$95$m), $MachinePrecision]), $MachinePrecision]]), $MachinePrecision]
        
        \begin{array}{l}
        x\_m = \left|x\right|
        \\
        x\_s = \mathsf{copysign}\left(1, x\right)
        
        \\
        x\_s \cdot \begin{array}{l}
        \mathbf{if}\;x\_m \leq 0.95:\\
        \;\;\;\;\mathsf{fma}\left(-0.6665536072, x\_m \cdot x\_m, 1\right) \cdot x\_m\\
        
        \mathbf{else}:\\
        \;\;\;\;\frac{\frac{0.2514179000665374}{x\_m \cdot x\_m}}{x\_m} + \frac{0.5}{x\_m}\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if x < 0.94999999999999996

          1. Initial program 53.8%

            \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
          2. Taylor expanded in x around 0

            \[\leadsto \color{blue}{\left(1 + \frac{-833192009}{1250000000} \cdot {x}^{2}\right)} \cdot x \]
          3. Step-by-step derivation
            1. +-commutativeN/A

              \[\leadsto \left(\frac{-833192009}{1250000000} \cdot {x}^{2} + \color{blue}{1}\right) \cdot x \]
            2. pow2N/A

              \[\leadsto \left(\frac{-833192009}{1250000000} \cdot \left(x \cdot x\right) + 1\right) \cdot x \]
            3. lift-*.f64N/A

              \[\leadsto \left(\frac{-833192009}{1250000000} \cdot \left(x \cdot x\right) + 1\right) \cdot x \]
          4. Applied rewrites50.1%

            \[\leadsto \color{blue}{\mathsf{fma}\left(-0.6665536072, x \cdot x, 1\right)} \cdot x \]

          if 0.94999999999999996 < x

          1. Initial program 53.8%

            \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
          2. Applied rewrites53.8%

            \[\leadsto \color{blue}{\frac{\mathsf{fma}\left({x}^{10}, 0.0001789971, \mathsf{fma}\left({x}^{8}, 0.0005064034, \mathsf{fma}\left(\left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right), 0.0072644182, \mathsf{fma}\left(\mathsf{fma}\left(0.0424060604, x \cdot x, 0.1049934947\right), x \cdot x, 1\right)\right)\right)\right) \cdot x}{\mathsf{fma}\left({x}^{12}, 0.0003579942, \mathsf{fma}\left({x}^{10}, 0.0008327945, \mathsf{fma}\left({x}^{8}, 0.0140005442, \mathsf{fma}\left(0.0694555761, \left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right), \mathsf{fma}\left(\mathsf{fma}\left(0.2909738639, x \cdot x, 0.7715471019\right), x \cdot x, 1\right)\right)\right)\right)\right)}} \]
          3. Taylor expanded in x around inf

            \[\leadsto \color{blue}{\frac{\frac{1}{2} + \frac{600041}{2386628} \cdot \frac{1}{{x}^{2}}}{x}} \]
          4. Step-by-step derivation
            1. lower-/.f64N/A

              \[\leadsto \frac{\frac{1}{2} + \frac{600041}{2386628} \cdot \frac{1}{{x}^{2}}}{\color{blue}{x}} \]
            2. +-commutativeN/A

              \[\leadsto \frac{\frac{600041}{2386628} \cdot \frac{1}{{x}^{2}} + \frac{1}{2}}{x} \]
            3. lower-+.f64N/A

              \[\leadsto \frac{\frac{600041}{2386628} \cdot \frac{1}{{x}^{2}} + \frac{1}{2}}{x} \]
            4. associate-*r/N/A

              \[\leadsto \frac{\frac{\frac{600041}{2386628} \cdot 1}{{x}^{2}} + \frac{1}{2}}{x} \]
            5. metadata-evalN/A

              \[\leadsto \frac{\frac{\frac{600041}{2386628}}{{x}^{2}} + \frac{1}{2}}{x} \]
            6. pow2N/A

              \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x} + \frac{1}{2}}{x} \]
            7. lift-/.f64N/A

              \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x} + \frac{1}{2}}{x} \]
            8. lift-*.f6451.4

              \[\leadsto \frac{\frac{0.2514179000665374}{x \cdot x} + 0.5}{x} \]
          5. Applied rewrites51.4%

            \[\leadsto \color{blue}{\frac{\frac{0.2514179000665374}{x \cdot x} + 0.5}{x}} \]
          6. Step-by-step derivation
            1. lift-/.f64N/A

              \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x} + \frac{1}{2}}{\color{blue}{x}} \]
            2. lift-+.f64N/A

              \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x} + \frac{1}{2}}{x} \]
            3. div-addN/A

              \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x}}{x} + \color{blue}{\frac{\frac{1}{2}}{x}} \]
            4. lift-*.f64N/A

              \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
            5. lift-/.f64N/A

              \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
            6. metadata-evalN/A

              \[\leadsto \frac{\frac{\frac{600041}{2386628} \cdot 1}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
            7. associate-*r/N/A

              \[\leadsto \frac{\frac{600041}{2386628} \cdot \frac{1}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
            8. pow2N/A

              \[\leadsto \frac{\frac{600041}{2386628} \cdot \frac{1}{{x}^{2}}}{x} + \frac{\frac{1}{2}}{x} \]
            9. lower-+.f64N/A

              \[\leadsto \frac{\frac{600041}{2386628} \cdot \frac{1}{{x}^{2}}}{x} + \color{blue}{\frac{\frac{1}{2}}{x}} \]
            10. lower-/.f64N/A

              \[\leadsto \frac{\frac{600041}{2386628} \cdot \frac{1}{{x}^{2}}}{x} + \frac{\color{blue}{\frac{1}{2}}}{x} \]
            11. pow2N/A

              \[\leadsto \frac{\frac{600041}{2386628} \cdot \frac{1}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
            12. associate-*r/N/A

              \[\leadsto \frac{\frac{\frac{600041}{2386628} \cdot 1}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
            13. metadata-evalN/A

              \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
            14. lift-/.f64N/A

              \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
            15. lift-*.f64N/A

              \[\leadsto \frac{\frac{\frac{600041}{2386628}}{x \cdot x}}{x} + \frac{\frac{1}{2}}{x} \]
            16. lift-/.f6451.4

              \[\leadsto \frac{\frac{0.2514179000665374}{x \cdot x}}{x} + \frac{0.5}{\color{blue}{x}} \]
          7. Applied rewrites51.4%

            \[\leadsto \frac{\frac{0.2514179000665374}{x \cdot x}}{x} + \color{blue}{\frac{0.5}{x}} \]
        3. Recombined 2 regimes into one program.
        4. Add Preprocessing

        Alternative 7: 99.5% accurate, 14.7× speedup?

        \[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot \begin{array}{l} \mathbf{if}\;x\_m \leq 0.95:\\ \;\;\;\;\mathsf{fma}\left(-0.6665536072, x\_m \cdot x\_m, 1\right) \cdot x\_m\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{0.2514179000665374}{x\_m \cdot x\_m} + 0.5}{x\_m}\\ \end{array} \end{array} \]
        x\_m = (fabs.f64 x)
        x\_s = (copysign.f64 #s(literal 1 binary64) x)
        (FPCore (x_s x_m)
         :precision binary64
         (*
          x_s
          (if (<= x_m 0.95)
            (* (fma -0.6665536072 (* x_m x_m) 1.0) x_m)
            (/ (+ (/ 0.2514179000665374 (* x_m x_m)) 0.5) x_m))))
        x\_m = fabs(x);
        x\_s = copysign(1.0, x);
        double code(double x_s, double x_m) {
        	double tmp;
        	if (x_m <= 0.95) {
        		tmp = fma(-0.6665536072, (x_m * x_m), 1.0) * x_m;
        	} else {
        		tmp = ((0.2514179000665374 / (x_m * x_m)) + 0.5) / x_m;
        	}
        	return x_s * tmp;
        }
        
        x\_m = abs(x)
        x\_s = copysign(1.0, x)
        function code(x_s, x_m)
        	tmp = 0.0
        	if (x_m <= 0.95)
        		tmp = Float64(fma(-0.6665536072, Float64(x_m * x_m), 1.0) * x_m);
        	else
        		tmp = Float64(Float64(Float64(0.2514179000665374 / Float64(x_m * x_m)) + 0.5) / x_m);
        	end
        	return Float64(x_s * tmp)
        end
        
        x\_m = N[Abs[x], $MachinePrecision]
        x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
        code[x$95$s_, x$95$m_] := N[(x$95$s * If[LessEqual[x$95$m, 0.95], N[(N[(-0.6665536072 * N[(x$95$m * x$95$m), $MachinePrecision] + 1.0), $MachinePrecision] * x$95$m), $MachinePrecision], N[(N[(N[(0.2514179000665374 / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision] + 0.5), $MachinePrecision] / x$95$m), $MachinePrecision]]), $MachinePrecision]
        
        \begin{array}{l}
        x\_m = \left|x\right|
        \\
        x\_s = \mathsf{copysign}\left(1, x\right)
        
        \\
        x\_s \cdot \begin{array}{l}
        \mathbf{if}\;x\_m \leq 0.95:\\
        \;\;\;\;\mathsf{fma}\left(-0.6665536072, x\_m \cdot x\_m, 1\right) \cdot x\_m\\
        
        \mathbf{else}:\\
        \;\;\;\;\frac{\frac{0.2514179000665374}{x\_m \cdot x\_m} + 0.5}{x\_m}\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if x < 0.94999999999999996

          1. Initial program 53.8%

            \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
          2. Taylor expanded in x around 0

            \[\leadsto \color{blue}{\left(1 + \frac{-833192009}{1250000000} \cdot {x}^{2}\right)} \cdot x \]
          3. Step-by-step derivation
            1. +-commutativeN/A

              \[\leadsto \left(\frac{-833192009}{1250000000} \cdot {x}^{2} + \color{blue}{1}\right) \cdot x \]
            2. pow2N/A

              \[\leadsto \left(\frac{-833192009}{1250000000} \cdot \left(x \cdot x\right) + 1\right) \cdot x \]
            3. lift-*.f64N/A

              \[\leadsto \left(\frac{-833192009}{1250000000} \cdot \left(x \cdot x\right) + 1\right) \cdot x \]
          4. Applied rewrites50.1%

            \[\leadsto \color{blue}{\mathsf{fma}\left(-0.6665536072, x \cdot x, 1\right)} \cdot x \]

          if 0.94999999999999996 < x

          1. Initial program 53.8%

            \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
          2. Taylor expanded in x around inf

            \[\leadsto \color{blue}{\frac{\frac{1}{2} + \frac{600041}{2386628} \cdot \frac{1}{{x}^{2}}}{x}} \]
          3. Step-by-step derivation
            1. lower-/.f64N/A

              \[\leadsto \frac{\frac{1}{2} + \frac{600041}{2386628} \cdot \frac{1}{{x}^{2}}}{\color{blue}{x}} \]
          4. Applied rewrites51.4%

            \[\leadsto \color{blue}{\frac{\frac{0.2514179000665374}{x \cdot x} + 0.5}{x}} \]
        3. Recombined 2 regimes into one program.
        4. Add Preprocessing

        Alternative 8: 99.2% accurate, 16.1× speedup?

        \[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot \begin{array}{l} \mathbf{if}\;x\_m \leq 0.8:\\ \;\;\;\;\mathsf{fma}\left(-0.6665536072, x\_m \cdot x\_m, 1\right) \cdot x\_m\\ \mathbf{else}:\\ \;\;\;\;\frac{0.5}{x\_m}\\ \end{array} \end{array} \]
        x\_m = (fabs.f64 x)
        x\_s = (copysign.f64 #s(literal 1 binary64) x)
        (FPCore (x_s x_m)
         :precision binary64
         (*
          x_s
          (if (<= x_m 0.8) (* (fma -0.6665536072 (* x_m x_m) 1.0) x_m) (/ 0.5 x_m))))
        x\_m = fabs(x);
        x\_s = copysign(1.0, x);
        double code(double x_s, double x_m) {
        	double tmp;
        	if (x_m <= 0.8) {
        		tmp = fma(-0.6665536072, (x_m * x_m), 1.0) * x_m;
        	} else {
        		tmp = 0.5 / x_m;
        	}
        	return x_s * tmp;
        }
        
        x\_m = abs(x)
        x\_s = copysign(1.0, x)
        function code(x_s, x_m)
        	tmp = 0.0
        	if (x_m <= 0.8)
        		tmp = Float64(fma(-0.6665536072, Float64(x_m * x_m), 1.0) * x_m);
        	else
        		tmp = Float64(0.5 / x_m);
        	end
        	return Float64(x_s * tmp)
        end
        
        x\_m = N[Abs[x], $MachinePrecision]
        x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
        code[x$95$s_, x$95$m_] := N[(x$95$s * If[LessEqual[x$95$m, 0.8], N[(N[(-0.6665536072 * N[(x$95$m * x$95$m), $MachinePrecision] + 1.0), $MachinePrecision] * x$95$m), $MachinePrecision], N[(0.5 / x$95$m), $MachinePrecision]]), $MachinePrecision]
        
        \begin{array}{l}
        x\_m = \left|x\right|
        \\
        x\_s = \mathsf{copysign}\left(1, x\right)
        
        \\
        x\_s \cdot \begin{array}{l}
        \mathbf{if}\;x\_m \leq 0.8:\\
        \;\;\;\;\mathsf{fma}\left(-0.6665536072, x\_m \cdot x\_m, 1\right) \cdot x\_m\\
        
        \mathbf{else}:\\
        \;\;\;\;\frac{0.5}{x\_m}\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if x < 0.80000000000000004

          1. Initial program 53.8%

            \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
          2. Taylor expanded in x around 0

            \[\leadsto \color{blue}{\left(1 + \frac{-833192009}{1250000000} \cdot {x}^{2}\right)} \cdot x \]
          3. Step-by-step derivation
            1. +-commutativeN/A

              \[\leadsto \left(\frac{-833192009}{1250000000} \cdot {x}^{2} + \color{blue}{1}\right) \cdot x \]
            2. pow2N/A

              \[\leadsto \left(\frac{-833192009}{1250000000} \cdot \left(x \cdot x\right) + 1\right) \cdot x \]
            3. lift-*.f64N/A

              \[\leadsto \left(\frac{-833192009}{1250000000} \cdot \left(x \cdot x\right) + 1\right) \cdot x \]
          4. Applied rewrites50.1%

            \[\leadsto \color{blue}{\mathsf{fma}\left(-0.6665536072, x \cdot x, 1\right)} \cdot x \]

          if 0.80000000000000004 < x

          1. Initial program 53.8%

            \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
          2. Taylor expanded in x around inf

            \[\leadsto \color{blue}{\frac{\frac{1}{2}}{x}} \]
          3. Step-by-step derivation
            1. lower-/.f6451.6

              \[\leadsto \frac{0.5}{\color{blue}{x}} \]
          4. Applied rewrites51.6%

            \[\leadsto \color{blue}{\frac{0.5}{x}} \]
        3. Recombined 2 regimes into one program.
        4. Add Preprocessing

        Alternative 9: 98.9% accurate, 31.0× speedup?

        \[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot \begin{array}{l} \mathbf{if}\;x\_m \leq 0.7:\\ \;\;\;\;x\_m\\ \mathbf{else}:\\ \;\;\;\;\frac{0.5}{x\_m}\\ \end{array} \end{array} \]
        x\_m = (fabs.f64 x)
        x\_s = (copysign.f64 #s(literal 1 binary64) x)
        (FPCore (x_s x_m)
         :precision binary64
         (* x_s (if (<= x_m 0.7) x_m (/ 0.5 x_m))))
        x\_m = fabs(x);
        x\_s = copysign(1.0, x);
        double code(double x_s, double x_m) {
        	double tmp;
        	if (x_m <= 0.7) {
        		tmp = x_m;
        	} else {
        		tmp = 0.5 / x_m;
        	}
        	return x_s * tmp;
        }
        
        x\_m =     private
        x\_s =     private
        module fmin_fmax_functions
            implicit none
            private
            public fmax
            public fmin
        
            interface fmax
                module procedure fmax88
                module procedure fmax44
                module procedure fmax84
                module procedure fmax48
            end interface
            interface fmin
                module procedure fmin88
                module procedure fmin44
                module procedure fmin84
                module procedure fmin48
            end interface
        contains
            real(8) function fmax88(x, y) result (res)
                real(8), intent (in) :: x
                real(8), intent (in) :: y
                res = merge(y, merge(x, max(x, y), y /= y), x /= x)
            end function
            real(4) function fmax44(x, y) result (res)
                real(4), intent (in) :: x
                real(4), intent (in) :: y
                res = merge(y, merge(x, max(x, y), y /= y), x /= x)
            end function
            real(8) function fmax84(x, y) result(res)
                real(8), intent (in) :: x
                real(4), intent (in) :: y
                res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
            end function
            real(8) function fmax48(x, y) result(res)
                real(4), intent (in) :: x
                real(8), intent (in) :: y
                res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
            end function
            real(8) function fmin88(x, y) result (res)
                real(8), intent (in) :: x
                real(8), intent (in) :: y
                res = merge(y, merge(x, min(x, y), y /= y), x /= x)
            end function
            real(4) function fmin44(x, y) result (res)
                real(4), intent (in) :: x
                real(4), intent (in) :: y
                res = merge(y, merge(x, min(x, y), y /= y), x /= x)
            end function
            real(8) function fmin84(x, y) result(res)
                real(8), intent (in) :: x
                real(4), intent (in) :: y
                res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
            end function
            real(8) function fmin48(x, y) result(res)
                real(4), intent (in) :: x
                real(8), intent (in) :: y
                res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
            end function
        end module
        
        real(8) function code(x_s, x_m)
        use fmin_fmax_functions
            real(8), intent (in) :: x_s
            real(8), intent (in) :: x_m
            real(8) :: tmp
            if (x_m <= 0.7d0) then
                tmp = x_m
            else
                tmp = 0.5d0 / x_m
            end if
            code = x_s * tmp
        end function
        
        x\_m = Math.abs(x);
        x\_s = Math.copySign(1.0, x);
        public static double code(double x_s, double x_m) {
        	double tmp;
        	if (x_m <= 0.7) {
        		tmp = x_m;
        	} else {
        		tmp = 0.5 / x_m;
        	}
        	return x_s * tmp;
        }
        
        x\_m = math.fabs(x)
        x\_s = math.copysign(1.0, x)
        def code(x_s, x_m):
        	tmp = 0
        	if x_m <= 0.7:
        		tmp = x_m
        	else:
        		tmp = 0.5 / x_m
        	return x_s * tmp
        
        x\_m = abs(x)
        x\_s = copysign(1.0, x)
        function code(x_s, x_m)
        	tmp = 0.0
        	if (x_m <= 0.7)
        		tmp = x_m;
        	else
        		tmp = Float64(0.5 / x_m);
        	end
        	return Float64(x_s * tmp)
        end
        
        x\_m = abs(x);
        x\_s = sign(x) * abs(1.0);
        function tmp_2 = code(x_s, x_m)
        	tmp = 0.0;
        	if (x_m <= 0.7)
        		tmp = x_m;
        	else
        		tmp = 0.5 / x_m;
        	end
        	tmp_2 = x_s * tmp;
        end
        
        x\_m = N[Abs[x], $MachinePrecision]
        x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
        code[x$95$s_, x$95$m_] := N[(x$95$s * If[LessEqual[x$95$m, 0.7], x$95$m, N[(0.5 / x$95$m), $MachinePrecision]]), $MachinePrecision]
        
        \begin{array}{l}
        x\_m = \left|x\right|
        \\
        x\_s = \mathsf{copysign}\left(1, x\right)
        
        \\
        x\_s \cdot \begin{array}{l}
        \mathbf{if}\;x\_m \leq 0.7:\\
        \;\;\;\;x\_m\\
        
        \mathbf{else}:\\
        \;\;\;\;\frac{0.5}{x\_m}\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if x < 0.69999999999999996

          1. Initial program 53.8%

            \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
          2. Taylor expanded in x around 0

            \[\leadsto \color{blue}{x} \]
          3. Step-by-step derivation
            1. Applied rewrites51.1%

              \[\leadsto \color{blue}{x} \]

            if 0.69999999999999996 < x

            1. Initial program 53.8%

              \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
            2. Taylor expanded in x around inf

              \[\leadsto \color{blue}{\frac{\frac{1}{2}}{x}} \]
            3. Step-by-step derivation
              1. lower-/.f6451.6

                \[\leadsto \frac{0.5}{\color{blue}{x}} \]
            4. Applied rewrites51.6%

              \[\leadsto \color{blue}{\frac{0.5}{x}} \]
          4. Recombined 2 regimes into one program.
          5. Add Preprocessing

          Alternative 10: 51.1% accurate, 253.1× speedup?

          \[\begin{array}{l} x\_m = \left|x\right| \\ x\_s = \mathsf{copysign}\left(1, x\right) \\ x\_s \cdot x\_m \end{array} \]
          x\_m = (fabs.f64 x)
          x\_s = (copysign.f64 #s(literal 1 binary64) x)
          (FPCore (x_s x_m) :precision binary64 (* x_s x_m))
          x\_m = fabs(x);
          x\_s = copysign(1.0, x);
          double code(double x_s, double x_m) {
          	return x_s * x_m;
          }
          
          x\_m =     private
          x\_s =     private
          module fmin_fmax_functions
              implicit none
              private
              public fmax
              public fmin
          
              interface fmax
                  module procedure fmax88
                  module procedure fmax44
                  module procedure fmax84
                  module procedure fmax48
              end interface
              interface fmin
                  module procedure fmin88
                  module procedure fmin44
                  module procedure fmin84
                  module procedure fmin48
              end interface
          contains
              real(8) function fmax88(x, y) result (res)
                  real(8), intent (in) :: x
                  real(8), intent (in) :: y
                  res = merge(y, merge(x, max(x, y), y /= y), x /= x)
              end function
              real(4) function fmax44(x, y) result (res)
                  real(4), intent (in) :: x
                  real(4), intent (in) :: y
                  res = merge(y, merge(x, max(x, y), y /= y), x /= x)
              end function
              real(8) function fmax84(x, y) result(res)
                  real(8), intent (in) :: x
                  real(4), intent (in) :: y
                  res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
              end function
              real(8) function fmax48(x, y) result(res)
                  real(4), intent (in) :: x
                  real(8), intent (in) :: y
                  res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
              end function
              real(8) function fmin88(x, y) result (res)
                  real(8), intent (in) :: x
                  real(8), intent (in) :: y
                  res = merge(y, merge(x, min(x, y), y /= y), x /= x)
              end function
              real(4) function fmin44(x, y) result (res)
                  real(4), intent (in) :: x
                  real(4), intent (in) :: y
                  res = merge(y, merge(x, min(x, y), y /= y), x /= x)
              end function
              real(8) function fmin84(x, y) result(res)
                  real(8), intent (in) :: x
                  real(4), intent (in) :: y
                  res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
              end function
              real(8) function fmin48(x, y) result(res)
                  real(4), intent (in) :: x
                  real(8), intent (in) :: y
                  res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
              end function
          end module
          
          real(8) function code(x_s, x_m)
          use fmin_fmax_functions
              real(8), intent (in) :: x_s
              real(8), intent (in) :: x_m
              code = x_s * x_m
          end function
          
          x\_m = Math.abs(x);
          x\_s = Math.copySign(1.0, x);
          public static double code(double x_s, double x_m) {
          	return x_s * x_m;
          }
          
          x\_m = math.fabs(x)
          x\_s = math.copysign(1.0, x)
          def code(x_s, x_m):
          	return x_s * x_m
          
          x\_m = abs(x)
          x\_s = copysign(1.0, x)
          function code(x_s, x_m)
          	return Float64(x_s * x_m)
          end
          
          x\_m = abs(x);
          x\_s = sign(x) * abs(1.0);
          function tmp = code(x_s, x_m)
          	tmp = x_s * x_m;
          end
          
          x\_m = N[Abs[x], $MachinePrecision]
          x\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
          code[x$95$s_, x$95$m_] := N[(x$95$s * x$95$m), $MachinePrecision]
          
          \begin{array}{l}
          x\_m = \left|x\right|
          \\
          x\_s = \mathsf{copysign}\left(1, x\right)
          
          \\
          x\_s \cdot x\_m
          \end{array}
          
          Derivation
          1. Initial program 53.8%

            \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
          2. Taylor expanded in x around 0

            \[\leadsto \color{blue}{x} \]
          3. Step-by-step derivation
            1. Applied rewrites51.1%

              \[\leadsto \color{blue}{x} \]
            2. Add Preprocessing

            Reproduce

            ?
            herbie shell --seed 2025140 
            (FPCore (x)
              :name "Jmat.Real.dawson"
              :precision binary64
              (* (/ (+ (+ (+ (+ (+ 1.0 (* 0.1049934947 (* x x))) (* 0.0424060604 (* (* x x) (* x x)))) (* 0.0072644182 (* (* (* x x) (* x x)) (* x x)))) (* 0.0005064034 (* (* (* (* x x) (* x x)) (* x x)) (* x x)))) (* 0.0001789971 (* (* (* (* (* x x) (* x x)) (* x x)) (* x x)) (* x x)))) (+ (+ (+ (+ (+ (+ 1.0 (* 0.7715471019 (* x x))) (* 0.2909738639 (* (* x x) (* x x)))) (* 0.0694555761 (* (* (* x x) (* x x)) (* x x)))) (* 0.0140005442 (* (* (* (* x x) (* x x)) (* x x)) (* x x)))) (* 0.0008327945 (* (* (* (* (* x x) (* x x)) (* x x)) (* x x)) (* x x)))) (* (* 2.0 0.0001789971) (* (* (* (* (* (* x x) (* x x)) (* x x)) (* x x)) (* x x)) (* x x))))) x))