Jmat.Real.dawson

Percentage Accurate: 54.7% → 100.0%
Time: 5.4s
Alternatives: 8
Speedup: 15.4×

Specification

?
\[\begin{array}{l} t_0 := \left(x \cdot x\right) \cdot \left(x \cdot x\right)\\ t_1 := t\_0 \cdot \left(x \cdot x\right)\\ t_2 := t\_1 \cdot \left(x \cdot x\right)\\ t_3 := t\_2 \cdot \left(x \cdot x\right)\\ \frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot t\_0\right) + 0.0072644182 \cdot t\_1\right) + 0.0005064034 \cdot t\_2\right) + 0.0001789971 \cdot t\_3}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot t\_0\right) + 0.0694555761 \cdot t\_1\right) + 0.0140005442 \cdot t\_2\right) + 0.0008327945 \cdot t\_3\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(t\_3 \cdot \left(x \cdot x\right)\right)} \cdot x \end{array} \]
(FPCore (x)
 :precision binary64
 (let* ((t_0 (* (* x x) (* x x)))
        (t_1 (* t_0 (* x x)))
        (t_2 (* t_1 (* x x)))
        (t_3 (* t_2 (* x x))))
   (*
    (/
     (+
      (+
       (+
        (+ (+ 1.0 (* 0.1049934947 (* x x))) (* 0.0424060604 t_0))
        (* 0.0072644182 t_1))
       (* 0.0005064034 t_2))
      (* 0.0001789971 t_3))
     (+
      (+
       (+
        (+
         (+ (+ 1.0 (* 0.7715471019 (* x x))) (* 0.2909738639 t_0))
         (* 0.0694555761 t_1))
        (* 0.0140005442 t_2))
       (* 0.0008327945 t_3))
      (* (* 2.0 0.0001789971) (* t_3 (* x x)))))
    x)))
double code(double x) {
	double t_0 = (x * x) * (x * x);
	double t_1 = t_0 * (x * x);
	double t_2 = t_1 * (x * x);
	double t_3 = t_2 * (x * x);
	return ((((((1.0 + (0.1049934947 * (x * x))) + (0.0424060604 * t_0)) + (0.0072644182 * t_1)) + (0.0005064034 * t_2)) + (0.0001789971 * t_3)) / ((((((1.0 + (0.7715471019 * (x * x))) + (0.2909738639 * t_0)) + (0.0694555761 * t_1)) + (0.0140005442 * t_2)) + (0.0008327945 * t_3)) + ((2.0 * 0.0001789971) * (t_3 * (x * x))))) * x;
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(x)
use fmin_fmax_functions
    real(8), intent (in) :: x
    real(8) :: t_0
    real(8) :: t_1
    real(8) :: t_2
    real(8) :: t_3
    t_0 = (x * x) * (x * x)
    t_1 = t_0 * (x * x)
    t_2 = t_1 * (x * x)
    t_3 = t_2 * (x * x)
    code = ((((((1.0d0 + (0.1049934947d0 * (x * x))) + (0.0424060604d0 * t_0)) + (0.0072644182d0 * t_1)) + (0.0005064034d0 * t_2)) + (0.0001789971d0 * t_3)) / ((((((1.0d0 + (0.7715471019d0 * (x * x))) + (0.2909738639d0 * t_0)) + (0.0694555761d0 * t_1)) + (0.0140005442d0 * t_2)) + (0.0008327945d0 * t_3)) + ((2.0d0 * 0.0001789971d0) * (t_3 * (x * x))))) * x
end function
public static double code(double x) {
	double t_0 = (x * x) * (x * x);
	double t_1 = t_0 * (x * x);
	double t_2 = t_1 * (x * x);
	double t_3 = t_2 * (x * x);
	return ((((((1.0 + (0.1049934947 * (x * x))) + (0.0424060604 * t_0)) + (0.0072644182 * t_1)) + (0.0005064034 * t_2)) + (0.0001789971 * t_3)) / ((((((1.0 + (0.7715471019 * (x * x))) + (0.2909738639 * t_0)) + (0.0694555761 * t_1)) + (0.0140005442 * t_2)) + (0.0008327945 * t_3)) + ((2.0 * 0.0001789971) * (t_3 * (x * x))))) * x;
}
def code(x):
	t_0 = (x * x) * (x * x)
	t_1 = t_0 * (x * x)
	t_2 = t_1 * (x * x)
	t_3 = t_2 * (x * x)
	return ((((((1.0 + (0.1049934947 * (x * x))) + (0.0424060604 * t_0)) + (0.0072644182 * t_1)) + (0.0005064034 * t_2)) + (0.0001789971 * t_3)) / ((((((1.0 + (0.7715471019 * (x * x))) + (0.2909738639 * t_0)) + (0.0694555761 * t_1)) + (0.0140005442 * t_2)) + (0.0008327945 * t_3)) + ((2.0 * 0.0001789971) * (t_3 * (x * x))))) * x
function code(x)
	t_0 = Float64(Float64(x * x) * Float64(x * x))
	t_1 = Float64(t_0 * Float64(x * x))
	t_2 = Float64(t_1 * Float64(x * x))
	t_3 = Float64(t_2 * Float64(x * x))
	return Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.0 + Float64(0.1049934947 * Float64(x * x))) + Float64(0.0424060604 * t_0)) + Float64(0.0072644182 * t_1)) + Float64(0.0005064034 * t_2)) + Float64(0.0001789971 * t_3)) / Float64(Float64(Float64(Float64(Float64(Float64(1.0 + Float64(0.7715471019 * Float64(x * x))) + Float64(0.2909738639 * t_0)) + Float64(0.0694555761 * t_1)) + Float64(0.0140005442 * t_2)) + Float64(0.0008327945 * t_3)) + Float64(Float64(2.0 * 0.0001789971) * Float64(t_3 * Float64(x * x))))) * x)
end
function tmp = code(x)
	t_0 = (x * x) * (x * x);
	t_1 = t_0 * (x * x);
	t_2 = t_1 * (x * x);
	t_3 = t_2 * (x * x);
	tmp = ((((((1.0 + (0.1049934947 * (x * x))) + (0.0424060604 * t_0)) + (0.0072644182 * t_1)) + (0.0005064034 * t_2)) + (0.0001789971 * t_3)) / ((((((1.0 + (0.7715471019 * (x * x))) + (0.2909738639 * t_0)) + (0.0694555761 * t_1)) + (0.0140005442 * t_2)) + (0.0008327945 * t_3)) + ((2.0 * 0.0001789971) * (t_3 * (x * x))))) * x;
end
code[x_] := Block[{t$95$0 = N[(N[(x * x), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 * N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$1 * N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$3 = N[(t$95$2 * N[(x * x), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(N[(1.0 + N[(0.1049934947 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.0424060604 * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(0.0072644182 * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(0.0005064034 * t$95$2), $MachinePrecision]), $MachinePrecision] + N[(0.0001789971 * t$95$3), $MachinePrecision]), $MachinePrecision] / N[(N[(N[(N[(N[(N[(1.0 + N[(0.7715471019 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.2909738639 * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(0.0694555761 * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(0.0140005442 * t$95$2), $MachinePrecision]), $MachinePrecision] + N[(0.0008327945 * t$95$3), $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 * 0.0001789971), $MachinePrecision] * N[(t$95$3 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * x), $MachinePrecision]]]]]
\begin{array}{l}
t_0 := \left(x \cdot x\right) \cdot \left(x \cdot x\right)\\
t_1 := t\_0 \cdot \left(x \cdot x\right)\\
t_2 := t\_1 \cdot \left(x \cdot x\right)\\
t_3 := t\_2 \cdot \left(x \cdot x\right)\\
\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot t\_0\right) + 0.0072644182 \cdot t\_1\right) + 0.0005064034 \cdot t\_2\right) + 0.0001789971 \cdot t\_3}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot t\_0\right) + 0.0694555761 \cdot t\_1\right) + 0.0140005442 \cdot t\_2\right) + 0.0008327945 \cdot t\_3\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(t\_3 \cdot \left(x \cdot x\right)\right)} \cdot x
\end{array}

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 8 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 54.7% accurate, 1.0× speedup?

\[\begin{array}{l} t_0 := \left(x \cdot x\right) \cdot \left(x \cdot x\right)\\ t_1 := t\_0 \cdot \left(x \cdot x\right)\\ t_2 := t\_1 \cdot \left(x \cdot x\right)\\ t_3 := t\_2 \cdot \left(x \cdot x\right)\\ \frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot t\_0\right) + 0.0072644182 \cdot t\_1\right) + 0.0005064034 \cdot t\_2\right) + 0.0001789971 \cdot t\_3}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot t\_0\right) + 0.0694555761 \cdot t\_1\right) + 0.0140005442 \cdot t\_2\right) + 0.0008327945 \cdot t\_3\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(t\_3 \cdot \left(x \cdot x\right)\right)} \cdot x \end{array} \]
(FPCore (x)
 :precision binary64
 (let* ((t_0 (* (* x x) (* x x)))
        (t_1 (* t_0 (* x x)))
        (t_2 (* t_1 (* x x)))
        (t_3 (* t_2 (* x x))))
   (*
    (/
     (+
      (+
       (+
        (+ (+ 1.0 (* 0.1049934947 (* x x))) (* 0.0424060604 t_0))
        (* 0.0072644182 t_1))
       (* 0.0005064034 t_2))
      (* 0.0001789971 t_3))
     (+
      (+
       (+
        (+
         (+ (+ 1.0 (* 0.7715471019 (* x x))) (* 0.2909738639 t_0))
         (* 0.0694555761 t_1))
        (* 0.0140005442 t_2))
       (* 0.0008327945 t_3))
      (* (* 2.0 0.0001789971) (* t_3 (* x x)))))
    x)))
double code(double x) {
	double t_0 = (x * x) * (x * x);
	double t_1 = t_0 * (x * x);
	double t_2 = t_1 * (x * x);
	double t_3 = t_2 * (x * x);
	return ((((((1.0 + (0.1049934947 * (x * x))) + (0.0424060604 * t_0)) + (0.0072644182 * t_1)) + (0.0005064034 * t_2)) + (0.0001789971 * t_3)) / ((((((1.0 + (0.7715471019 * (x * x))) + (0.2909738639 * t_0)) + (0.0694555761 * t_1)) + (0.0140005442 * t_2)) + (0.0008327945 * t_3)) + ((2.0 * 0.0001789971) * (t_3 * (x * x))))) * x;
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(x)
use fmin_fmax_functions
    real(8), intent (in) :: x
    real(8) :: t_0
    real(8) :: t_1
    real(8) :: t_2
    real(8) :: t_3
    t_0 = (x * x) * (x * x)
    t_1 = t_0 * (x * x)
    t_2 = t_1 * (x * x)
    t_3 = t_2 * (x * x)
    code = ((((((1.0d0 + (0.1049934947d0 * (x * x))) + (0.0424060604d0 * t_0)) + (0.0072644182d0 * t_1)) + (0.0005064034d0 * t_2)) + (0.0001789971d0 * t_3)) / ((((((1.0d0 + (0.7715471019d0 * (x * x))) + (0.2909738639d0 * t_0)) + (0.0694555761d0 * t_1)) + (0.0140005442d0 * t_2)) + (0.0008327945d0 * t_3)) + ((2.0d0 * 0.0001789971d0) * (t_3 * (x * x))))) * x
end function
public static double code(double x) {
	double t_0 = (x * x) * (x * x);
	double t_1 = t_0 * (x * x);
	double t_2 = t_1 * (x * x);
	double t_3 = t_2 * (x * x);
	return ((((((1.0 + (0.1049934947 * (x * x))) + (0.0424060604 * t_0)) + (0.0072644182 * t_1)) + (0.0005064034 * t_2)) + (0.0001789971 * t_3)) / ((((((1.0 + (0.7715471019 * (x * x))) + (0.2909738639 * t_0)) + (0.0694555761 * t_1)) + (0.0140005442 * t_2)) + (0.0008327945 * t_3)) + ((2.0 * 0.0001789971) * (t_3 * (x * x))))) * x;
}
def code(x):
	t_0 = (x * x) * (x * x)
	t_1 = t_0 * (x * x)
	t_2 = t_1 * (x * x)
	t_3 = t_2 * (x * x)
	return ((((((1.0 + (0.1049934947 * (x * x))) + (0.0424060604 * t_0)) + (0.0072644182 * t_1)) + (0.0005064034 * t_2)) + (0.0001789971 * t_3)) / ((((((1.0 + (0.7715471019 * (x * x))) + (0.2909738639 * t_0)) + (0.0694555761 * t_1)) + (0.0140005442 * t_2)) + (0.0008327945 * t_3)) + ((2.0 * 0.0001789971) * (t_3 * (x * x))))) * x
function code(x)
	t_0 = Float64(Float64(x * x) * Float64(x * x))
	t_1 = Float64(t_0 * Float64(x * x))
	t_2 = Float64(t_1 * Float64(x * x))
	t_3 = Float64(t_2 * Float64(x * x))
	return Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.0 + Float64(0.1049934947 * Float64(x * x))) + Float64(0.0424060604 * t_0)) + Float64(0.0072644182 * t_1)) + Float64(0.0005064034 * t_2)) + Float64(0.0001789971 * t_3)) / Float64(Float64(Float64(Float64(Float64(Float64(1.0 + Float64(0.7715471019 * Float64(x * x))) + Float64(0.2909738639 * t_0)) + Float64(0.0694555761 * t_1)) + Float64(0.0140005442 * t_2)) + Float64(0.0008327945 * t_3)) + Float64(Float64(2.0 * 0.0001789971) * Float64(t_3 * Float64(x * x))))) * x)
end
function tmp = code(x)
	t_0 = (x * x) * (x * x);
	t_1 = t_0 * (x * x);
	t_2 = t_1 * (x * x);
	t_3 = t_2 * (x * x);
	tmp = ((((((1.0 + (0.1049934947 * (x * x))) + (0.0424060604 * t_0)) + (0.0072644182 * t_1)) + (0.0005064034 * t_2)) + (0.0001789971 * t_3)) / ((((((1.0 + (0.7715471019 * (x * x))) + (0.2909738639 * t_0)) + (0.0694555761 * t_1)) + (0.0140005442 * t_2)) + (0.0008327945 * t_3)) + ((2.0 * 0.0001789971) * (t_3 * (x * x))))) * x;
end
code[x_] := Block[{t$95$0 = N[(N[(x * x), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 * N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$1 * N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$3 = N[(t$95$2 * N[(x * x), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(N[(1.0 + N[(0.1049934947 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.0424060604 * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(0.0072644182 * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(0.0005064034 * t$95$2), $MachinePrecision]), $MachinePrecision] + N[(0.0001789971 * t$95$3), $MachinePrecision]), $MachinePrecision] / N[(N[(N[(N[(N[(N[(1.0 + N[(0.7715471019 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.2909738639 * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(0.0694555761 * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(0.0140005442 * t$95$2), $MachinePrecision]), $MachinePrecision] + N[(0.0008327945 * t$95$3), $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 * 0.0001789971), $MachinePrecision] * N[(t$95$3 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * x), $MachinePrecision]]]]]
\begin{array}{l}
t_0 := \left(x \cdot x\right) \cdot \left(x \cdot x\right)\\
t_1 := t\_0 \cdot \left(x \cdot x\right)\\
t_2 := t\_1 \cdot \left(x \cdot x\right)\\
t_3 := t\_2 \cdot \left(x \cdot x\right)\\
\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot t\_0\right) + 0.0072644182 \cdot t\_1\right) + 0.0005064034 \cdot t\_2\right) + 0.0001789971 \cdot t\_3}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot t\_0\right) + 0.0694555761 \cdot t\_1\right) + 0.0140005442 \cdot t\_2\right) + 0.0008327945 \cdot t\_3\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(t\_3 \cdot \left(x \cdot x\right)\right)} \cdot x
\end{array}

Alternative 1: 100.0% accurate, 0.7× speedup?

\[\begin{array}{l} t_0 := \left|x\right| \cdot \left|x\right|\\ t_1 := t\_0 \cdot t\_0\\ t_2 := t\_1 \cdot t\_0\\ t_3 := t\_2 \cdot t\_0\\ t_4 := t\_3 \cdot t\_0\\ \mathsf{copysign}\left(1, x\right) \cdot \begin{array}{l} \mathbf{if}\;\left|x\right| \leq 40000:\\ \;\;\;\;\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot t\_0\right) + 0.0424060604 \cdot t\_1\right) + 0.0072644182 \cdot t\_2\right) + 0.0005064034 \cdot t\_3\right) + 0.0001789971 \cdot t\_4}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot t\_0\right) + 0.2909738639 \cdot t\_1\right) + 0.0694555761 \cdot t\_2\right) + 0.0140005442 \cdot t\_3\right) + 0.0008327945 \cdot t\_4\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(t\_4 \cdot t\_0\right)} \cdot \left|x\right|\\ \mathbf{else}:\\ \;\;\;\;\frac{1}{\left(2 - \frac{1.0056716002661497}{t\_0}\right) \cdot \left|x\right|}\\ \end{array} \end{array} \]
(FPCore (x)
 :precision binary64
 (let* ((t_0 (* (fabs x) (fabs x)))
        (t_1 (* t_0 t_0))
        (t_2 (* t_1 t_0))
        (t_3 (* t_2 t_0))
        (t_4 (* t_3 t_0)))
   (*
    (copysign 1.0 x)
    (if (<= (fabs x) 40000.0)
      (*
       (/
        (+
         (+
          (+
           (+ (+ 1.0 (* 0.1049934947 t_0)) (* 0.0424060604 t_1))
           (* 0.0072644182 t_2))
          (* 0.0005064034 t_3))
         (* 0.0001789971 t_4))
        (+
         (+
          (+
           (+
            (+ (+ 1.0 (* 0.7715471019 t_0)) (* 0.2909738639 t_1))
            (* 0.0694555761 t_2))
           (* 0.0140005442 t_3))
          (* 0.0008327945 t_4))
         (* (* 2.0 0.0001789971) (* t_4 t_0))))
       (fabs x))
      (/ 1.0 (* (- 2.0 (/ 1.0056716002661497 t_0)) (fabs x)))))))
double code(double x) {
	double t_0 = fabs(x) * fabs(x);
	double t_1 = t_0 * t_0;
	double t_2 = t_1 * t_0;
	double t_3 = t_2 * t_0;
	double t_4 = t_3 * t_0;
	double tmp;
	if (fabs(x) <= 40000.0) {
		tmp = ((((((1.0 + (0.1049934947 * t_0)) + (0.0424060604 * t_1)) + (0.0072644182 * t_2)) + (0.0005064034 * t_3)) + (0.0001789971 * t_4)) / ((((((1.0 + (0.7715471019 * t_0)) + (0.2909738639 * t_1)) + (0.0694555761 * t_2)) + (0.0140005442 * t_3)) + (0.0008327945 * t_4)) + ((2.0 * 0.0001789971) * (t_4 * t_0)))) * fabs(x);
	} else {
		tmp = 1.0 / ((2.0 - (1.0056716002661497 / t_0)) * fabs(x));
	}
	return copysign(1.0, x) * tmp;
}
public static double code(double x) {
	double t_0 = Math.abs(x) * Math.abs(x);
	double t_1 = t_0 * t_0;
	double t_2 = t_1 * t_0;
	double t_3 = t_2 * t_0;
	double t_4 = t_3 * t_0;
	double tmp;
	if (Math.abs(x) <= 40000.0) {
		tmp = ((((((1.0 + (0.1049934947 * t_0)) + (0.0424060604 * t_1)) + (0.0072644182 * t_2)) + (0.0005064034 * t_3)) + (0.0001789971 * t_4)) / ((((((1.0 + (0.7715471019 * t_0)) + (0.2909738639 * t_1)) + (0.0694555761 * t_2)) + (0.0140005442 * t_3)) + (0.0008327945 * t_4)) + ((2.0 * 0.0001789971) * (t_4 * t_0)))) * Math.abs(x);
	} else {
		tmp = 1.0 / ((2.0 - (1.0056716002661497 / t_0)) * Math.abs(x));
	}
	return Math.copySign(1.0, x) * tmp;
}
def code(x):
	t_0 = math.fabs(x) * math.fabs(x)
	t_1 = t_0 * t_0
	t_2 = t_1 * t_0
	t_3 = t_2 * t_0
	t_4 = t_3 * t_0
	tmp = 0
	if math.fabs(x) <= 40000.0:
		tmp = ((((((1.0 + (0.1049934947 * t_0)) + (0.0424060604 * t_1)) + (0.0072644182 * t_2)) + (0.0005064034 * t_3)) + (0.0001789971 * t_4)) / ((((((1.0 + (0.7715471019 * t_0)) + (0.2909738639 * t_1)) + (0.0694555761 * t_2)) + (0.0140005442 * t_3)) + (0.0008327945 * t_4)) + ((2.0 * 0.0001789971) * (t_4 * t_0)))) * math.fabs(x)
	else:
		tmp = 1.0 / ((2.0 - (1.0056716002661497 / t_0)) * math.fabs(x))
	return math.copysign(1.0, x) * tmp
function code(x)
	t_0 = Float64(abs(x) * abs(x))
	t_1 = Float64(t_0 * t_0)
	t_2 = Float64(t_1 * t_0)
	t_3 = Float64(t_2 * t_0)
	t_4 = Float64(t_3 * t_0)
	tmp = 0.0
	if (abs(x) <= 40000.0)
		tmp = Float64(Float64(Float64(Float64(Float64(Float64(Float64(1.0 + Float64(0.1049934947 * t_0)) + Float64(0.0424060604 * t_1)) + Float64(0.0072644182 * t_2)) + Float64(0.0005064034 * t_3)) + Float64(0.0001789971 * t_4)) / Float64(Float64(Float64(Float64(Float64(Float64(1.0 + Float64(0.7715471019 * t_0)) + Float64(0.2909738639 * t_1)) + Float64(0.0694555761 * t_2)) + Float64(0.0140005442 * t_3)) + Float64(0.0008327945 * t_4)) + Float64(Float64(2.0 * 0.0001789971) * Float64(t_4 * t_0)))) * abs(x));
	else
		tmp = Float64(1.0 / Float64(Float64(2.0 - Float64(1.0056716002661497 / t_0)) * abs(x)));
	end
	return Float64(copysign(1.0, x) * tmp)
end
function tmp_2 = code(x)
	t_0 = abs(x) * abs(x);
	t_1 = t_0 * t_0;
	t_2 = t_1 * t_0;
	t_3 = t_2 * t_0;
	t_4 = t_3 * t_0;
	tmp = 0.0;
	if (abs(x) <= 40000.0)
		tmp = ((((((1.0 + (0.1049934947 * t_0)) + (0.0424060604 * t_1)) + (0.0072644182 * t_2)) + (0.0005064034 * t_3)) + (0.0001789971 * t_4)) / ((((((1.0 + (0.7715471019 * t_0)) + (0.2909738639 * t_1)) + (0.0694555761 * t_2)) + (0.0140005442 * t_3)) + (0.0008327945 * t_4)) + ((2.0 * 0.0001789971) * (t_4 * t_0)))) * abs(x);
	else
		tmp = 1.0 / ((2.0 - (1.0056716002661497 / t_0)) * abs(x));
	end
	tmp_2 = (sign(x) * abs(1.0)) * tmp;
end
code[x_] := Block[{t$95$0 = N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 * t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$1 * t$95$0), $MachinePrecision]}, Block[{t$95$3 = N[(t$95$2 * t$95$0), $MachinePrecision]}, Block[{t$95$4 = N[(t$95$3 * t$95$0), $MachinePrecision]}, N[(N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision] * If[LessEqual[N[Abs[x], $MachinePrecision], 40000.0], N[(N[(N[(N[(N[(N[(N[(1.0 + N[(0.1049934947 * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(0.0424060604 * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(0.0072644182 * t$95$2), $MachinePrecision]), $MachinePrecision] + N[(0.0005064034 * t$95$3), $MachinePrecision]), $MachinePrecision] + N[(0.0001789971 * t$95$4), $MachinePrecision]), $MachinePrecision] / N[(N[(N[(N[(N[(N[(1.0 + N[(0.7715471019 * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(0.2909738639 * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(0.0694555761 * t$95$2), $MachinePrecision]), $MachinePrecision] + N[(0.0140005442 * t$95$3), $MachinePrecision]), $MachinePrecision] + N[(0.0008327945 * t$95$4), $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 * 0.0001789971), $MachinePrecision] * N[(t$95$4 * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision], N[(1.0 / N[(N[(2.0 - N[(1.0056716002661497 / t$95$0), $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]), $MachinePrecision]]]]]]
\begin{array}{l}
t_0 := \left|x\right| \cdot \left|x\right|\\
t_1 := t\_0 \cdot t\_0\\
t_2 := t\_1 \cdot t\_0\\
t_3 := t\_2 \cdot t\_0\\
t_4 := t\_3 \cdot t\_0\\
\mathsf{copysign}\left(1, x\right) \cdot \begin{array}{l}
\mathbf{if}\;\left|x\right| \leq 40000:\\
\;\;\;\;\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot t\_0\right) + 0.0424060604 \cdot t\_1\right) + 0.0072644182 \cdot t\_2\right) + 0.0005064034 \cdot t\_3\right) + 0.0001789971 \cdot t\_4}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot t\_0\right) + 0.2909738639 \cdot t\_1\right) + 0.0694555761 \cdot t\_2\right) + 0.0140005442 \cdot t\_3\right) + 0.0008327945 \cdot t\_4\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(t\_4 \cdot t\_0\right)} \cdot \left|x\right|\\

\mathbf{else}:\\
\;\;\;\;\frac{1}{\left(2 - \frac{1.0056716002661497}{t\_0}\right) \cdot \left|x\right|}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 4e4

    1. Initial program 54.7%

      \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]

    if 4e4 < x

    1. Initial program 54.7%

      \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
    2. Taylor expanded in x around inf

      \[\leadsto \color{blue}{\frac{\frac{1}{2} + \left(\frac{\frac{1307076337763}{8543989815576}}{{x}^{4}} + \left(\frac{600041}{2386628} \cdot \frac{1}{{x}^{2}} + \frac{344398180852034095277}{30586987988352776592} \cdot \frac{1}{{x}^{6}}\right)\right)}{x}} \]
    3. Step-by-step derivation
      1. Applied rewrites51.3%

        \[\leadsto \color{blue}{\frac{0.5 + \left(\frac{0.15298196345929074}{{x}^{4}} + \mathsf{fma}\left(0.2514179000665374, \frac{1}{{x}^{2}}, 11.259630434457211 \cdot \frac{1}{{x}^{6}}\right)\right)}{x}} \]
      2. Applied rewrites51.2%

        \[\leadsto \frac{1}{\color{blue}{\frac{x}{\left(\left(\frac{0.15298196345929074}{\left(\left(x \cdot x\right) \cdot x\right) \cdot x} - \frac{-0.2514179000665374}{x \cdot x}\right) - \frac{-11.259630434457211}{\left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right)}\right) - -0.5}}} \]
      3. Taylor expanded in x around inf

        \[\leadsto \frac{1}{x \cdot \color{blue}{\left(2 - \frac{600041}{596657} \cdot \frac{1}{{x}^{2}}\right)}} \]
      4. Step-by-step derivation
        1. lower-*.f64N/A

          \[\leadsto \frac{1}{x \cdot \left(2 - \color{blue}{\frac{600041}{596657} \cdot \frac{1}{{x}^{2}}}\right)} \]
        2. lower--.f64N/A

          \[\leadsto \frac{1}{x \cdot \left(2 - \frac{600041}{596657} \cdot \color{blue}{\frac{1}{{x}^{2}}}\right)} \]
        3. lower-*.f64N/A

          \[\leadsto \frac{1}{x \cdot \left(2 - \frac{600041}{596657} \cdot \frac{1}{\color{blue}{{x}^{2}}}\right)} \]
        4. lower-/.f64N/A

          \[\leadsto \frac{1}{x \cdot \left(2 - \frac{600041}{596657} \cdot \frac{1}{{x}^{\color{blue}{2}}}\right)} \]
        5. lower-pow.f6452.2%

          \[\leadsto \frac{1}{x \cdot \left(2 - 1.0056716002661497 \cdot \frac{1}{{x}^{2}}\right)} \]
      5. Applied rewrites52.2%

        \[\leadsto \frac{1}{x \cdot \color{blue}{\left(2 - 1.0056716002661497 \cdot \frac{1}{{x}^{2}}\right)}} \]
      6. Step-by-step derivation
        1. lift-*.f64N/A

          \[\leadsto \frac{1}{x \cdot \left(2 - \color{blue}{\frac{600041}{596657} \cdot \frac{1}{{x}^{2}}}\right)} \]
        2. *-commutativeN/A

          \[\leadsto \frac{1}{\left(2 - \frac{600041}{596657} \cdot \frac{1}{{x}^{2}}\right) \cdot x} \]
        3. lower-*.f6452.2%

          \[\leadsto \frac{1}{\left(2 - 1.0056716002661497 \cdot \frac{1}{{x}^{2}}\right) \cdot x} \]
        4. lift-*.f64N/A

          \[\leadsto \frac{1}{\left(2 - \frac{600041}{596657} \cdot \frac{1}{{x}^{2}}\right) \cdot x} \]
        5. lift-/.f64N/A

          \[\leadsto \frac{1}{\left(2 - \frac{600041}{596657} \cdot \frac{1}{{x}^{2}}\right) \cdot x} \]
        6. lift-pow.f64N/A

          \[\leadsto \frac{1}{\left(2 - \frac{600041}{596657} \cdot \frac{1}{{x}^{2}}\right) \cdot x} \]
        7. pow2N/A

          \[\leadsto \frac{1}{\left(2 - \frac{600041}{596657} \cdot \frac{1}{x \cdot x}\right) \cdot x} \]
        8. mult-flip-revN/A

          \[\leadsto \frac{1}{\left(2 - \frac{\frac{600041}{596657}}{x \cdot x}\right) \cdot x} \]
        9. lower-/.f64N/A

          \[\leadsto \frac{1}{\left(2 - \frac{\frac{600041}{596657}}{x \cdot x}\right) \cdot x} \]
        10. lift-*.f6452.2%

          \[\leadsto \frac{1}{\left(2 - \frac{1.0056716002661497}{x \cdot x}\right) \cdot x} \]
      7. Applied rewrites52.2%

        \[\leadsto \frac{1}{\left(2 - \frac{1.0056716002661497}{x \cdot x}\right) \cdot \color{blue}{x}} \]
    4. Recombined 2 regimes into one program.
    5. Add Preprocessing

    Alternative 2: 99.5% accurate, 3.8× speedup?

    \[\begin{array}{l} t_0 := \left|x\right| \cdot \left|x\right|\\ \mathsf{copysign}\left(1, x\right) \cdot \begin{array}{l} \mathbf{if}\;\left|x\right| \leq 1.2:\\ \;\;\;\;\mathsf{fma}\left(t\_0, -0.6665536072, 1\right) \cdot \left|x\right|\\ \mathbf{else}:\\ \;\;\;\;\frac{1}{\frac{\left|x\right|}{\frac{\frac{\frac{0.15298196345929074}{t\_0} - -0.2514179000665374}{\left|x\right|} - \frac{-11.259630434457211}{\left(\left(t\_0 \cdot \left|x\right|\right) \cdot \left|x\right|\right) \cdot \left|x\right|}}{\left|x\right|} - -0.5}}\\ \end{array} \end{array} \]
    (FPCore (x)
     :precision binary64
     (let* ((t_0 (* (fabs x) (fabs x))))
       (*
        (copysign 1.0 x)
        (if (<= (fabs x) 1.2)
          (* (fma t_0 -0.6665536072 1.0) (fabs x))
          (/
           1.0
           (/
            (fabs x)
            (-
             (/
              (-
               (/ (- (/ 0.15298196345929074 t_0) -0.2514179000665374) (fabs x))
               (/ -11.259630434457211 (* (* (* t_0 (fabs x)) (fabs x)) (fabs x))))
              (fabs x))
             -0.5)))))))
    double code(double x) {
    	double t_0 = fabs(x) * fabs(x);
    	double tmp;
    	if (fabs(x) <= 1.2) {
    		tmp = fma(t_0, -0.6665536072, 1.0) * fabs(x);
    	} else {
    		tmp = 1.0 / (fabs(x) / ((((((0.15298196345929074 / t_0) - -0.2514179000665374) / fabs(x)) - (-11.259630434457211 / (((t_0 * fabs(x)) * fabs(x)) * fabs(x)))) / fabs(x)) - -0.5));
    	}
    	return copysign(1.0, x) * tmp;
    }
    
    function code(x)
    	t_0 = Float64(abs(x) * abs(x))
    	tmp = 0.0
    	if (abs(x) <= 1.2)
    		tmp = Float64(fma(t_0, -0.6665536072, 1.0) * abs(x));
    	else
    		tmp = Float64(1.0 / Float64(abs(x) / Float64(Float64(Float64(Float64(Float64(Float64(0.15298196345929074 / t_0) - -0.2514179000665374) / abs(x)) - Float64(-11.259630434457211 / Float64(Float64(Float64(t_0 * abs(x)) * abs(x)) * abs(x)))) / abs(x)) - -0.5)));
    	end
    	return Float64(copysign(1.0, x) * tmp)
    end
    
    code[x_] := Block[{t$95$0 = N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[(N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision] * If[LessEqual[N[Abs[x], $MachinePrecision], 1.2], N[(N[(t$95$0 * -0.6665536072 + 1.0), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision], N[(1.0 / N[(N[Abs[x], $MachinePrecision] / N[(N[(N[(N[(N[(N[(0.15298196345929074 / t$95$0), $MachinePrecision] - -0.2514179000665374), $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision] - N[(-11.259630434457211 / N[(N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision] - -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]), $MachinePrecision]]
    
    \begin{array}{l}
    t_0 := \left|x\right| \cdot \left|x\right|\\
    \mathsf{copysign}\left(1, x\right) \cdot \begin{array}{l}
    \mathbf{if}\;\left|x\right| \leq 1.2:\\
    \;\;\;\;\mathsf{fma}\left(t\_0, -0.6665536072, 1\right) \cdot \left|x\right|\\
    
    \mathbf{else}:\\
    \;\;\;\;\frac{1}{\frac{\left|x\right|}{\frac{\frac{\frac{0.15298196345929074}{t\_0} - -0.2514179000665374}{\left|x\right|} - \frac{-11.259630434457211}{\left(\left(t\_0 \cdot \left|x\right|\right) \cdot \left|x\right|\right) \cdot \left|x\right|}}{\left|x\right|} - -0.5}}\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if x < 1.19999999999999996

      1. Initial program 54.7%

        \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
      2. Taylor expanded in x around 0

        \[\leadsto \color{blue}{\left(1 + \frac{-833192009}{1250000000} \cdot {x}^{2}\right)} \cdot x \]
      3. Step-by-step derivation
        1. lower-+.f64N/A

          \[\leadsto \left(1 + \color{blue}{\frac{-833192009}{1250000000} \cdot {x}^{2}}\right) \cdot x \]
        2. lower-*.f64N/A

          \[\leadsto \left(1 + \frac{-833192009}{1250000000} \cdot \color{blue}{{x}^{2}}\right) \cdot x \]
        3. lower-pow.f6450.2%

          \[\leadsto \left(1 + -0.6665536072 \cdot {x}^{\color{blue}{2}}\right) \cdot x \]
      4. Applied rewrites50.2%

        \[\leadsto \color{blue}{\left(1 + -0.6665536072 \cdot {x}^{2}\right)} \cdot x \]
      5. Step-by-step derivation
        1. lift-+.f64N/A

          \[\leadsto \left(1 + \color{blue}{\frac{-833192009}{1250000000} \cdot {x}^{2}}\right) \cdot x \]
        2. +-commutativeN/A

          \[\leadsto \left(\frac{-833192009}{1250000000} \cdot {x}^{2} + \color{blue}{1}\right) \cdot x \]
        3. lift-*.f64N/A

          \[\leadsto \left(\frac{-833192009}{1250000000} \cdot {x}^{2} + 1\right) \cdot x \]
        4. lift-pow.f64N/A

          \[\leadsto \left(\frac{-833192009}{1250000000} \cdot {x}^{2} + 1\right) \cdot x \]
        5. pow2N/A

          \[\leadsto \left(\frac{-833192009}{1250000000} \cdot \left(x \cdot x\right) + 1\right) \cdot x \]
        6. *-commutativeN/A

          \[\leadsto \left(\left(x \cdot x\right) \cdot \frac{-833192009}{1250000000} + 1\right) \cdot x \]
        7. lower-fma.f64N/A

          \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{\frac{-833192009}{1250000000}}, 1\right) \cdot x \]
        8. lower-*.f6450.2%

          \[\leadsto \mathsf{fma}\left(x \cdot x, -0.6665536072, 1\right) \cdot x \]
      6. Applied rewrites50.2%

        \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{-0.6665536072}, 1\right) \cdot x \]

      if 1.19999999999999996 < x

      1. Initial program 54.7%

        \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
      2. Taylor expanded in x around inf

        \[\leadsto \color{blue}{\frac{\frac{1}{2} + \left(\frac{\frac{1307076337763}{8543989815576}}{{x}^{4}} + \left(\frac{600041}{2386628} \cdot \frac{1}{{x}^{2}} + \frac{344398180852034095277}{30586987988352776592} \cdot \frac{1}{{x}^{6}}\right)\right)}{x}} \]
      3. Step-by-step derivation
        1. Applied rewrites51.3%

          \[\leadsto \color{blue}{\frac{0.5 + \left(\frac{0.15298196345929074}{{x}^{4}} + \mathsf{fma}\left(0.2514179000665374, \frac{1}{{x}^{2}}, 11.259630434457211 \cdot \frac{1}{{x}^{6}}\right)\right)}{x}} \]
        2. Applied rewrites51.2%

          \[\leadsto \frac{1}{\color{blue}{\frac{x}{\left(\left(\frac{0.15298196345929074}{\left(\left(x \cdot x\right) \cdot x\right) \cdot x} - \frac{-0.2514179000665374}{x \cdot x}\right) - \frac{-11.259630434457211}{\left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right)}\right) - -0.5}}} \]
        3. Step-by-step derivation
          1. lift-/.f64N/A

            \[\leadsto \frac{1}{\color{blue}{\frac{x}{\left(\left(\frac{\frac{1307076337763}{8543989815576}}{\left(\left(x \cdot x\right) \cdot x\right) \cdot x} - \frac{\frac{-600041}{2386628}}{x \cdot x}\right) - \frac{\frac{-344398180852034095277}{30586987988352776592}}{\left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right)}\right) - \frac{-1}{2}}}} \]
          2. lift-/.f64N/A

            \[\leadsto \frac{1}{\frac{x}{\color{blue}{\left(\left(\frac{\frac{1307076337763}{8543989815576}}{\left(\left(x \cdot x\right) \cdot x\right) \cdot x} - \frac{\frac{-600041}{2386628}}{x \cdot x}\right) - \frac{\frac{-344398180852034095277}{30586987988352776592}}{\left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right)}\right) - \frac{-1}{2}}}} \]
          3. div-flip-revN/A

            \[\leadsto \frac{\left(\left(\frac{\frac{1307076337763}{8543989815576}}{\left(\left(x \cdot x\right) \cdot x\right) \cdot x} - \frac{\frac{-600041}{2386628}}{x \cdot x}\right) - \frac{\frac{-344398180852034095277}{30586987988352776592}}{\left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right)}\right) - \frac{-1}{2}}{\color{blue}{x}} \]
          4. lower-/.f6451.3%

            \[\leadsto \frac{\left(\left(\frac{0.15298196345929074}{\left(\left(x \cdot x\right) \cdot x\right) \cdot x} - \frac{-0.2514179000665374}{x \cdot x}\right) - \frac{-11.259630434457211}{\left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right)}\right) - -0.5}{\color{blue}{x}} \]
        4. Applied rewrites51.3%

          \[\leadsto \frac{\left(\frac{\frac{0.15298196345929074}{\left(x \cdot x\right) \cdot x} - \frac{-0.2514179000665374}{x}}{x} - \frac{-11.259630434457211}{\left(\left(\left(\left(x \cdot x\right) \cdot x\right) \cdot x\right) \cdot x\right) \cdot x}\right) - -0.5}{\color{blue}{x}} \]
        5. Step-by-step derivation
          1. lift-/.f64N/A

            \[\leadsto \frac{\left(\frac{\frac{\frac{1307076337763}{8543989815576}}{\left(x \cdot x\right) \cdot x} - \frac{\frac{-600041}{2386628}}{x}}{x} - \frac{\frac{-344398180852034095277}{30586987988352776592}}{\left(\left(\left(\left(x \cdot x\right) \cdot x\right) \cdot x\right) \cdot x\right) \cdot x}\right) - \frac{-1}{2}}{\color{blue}{x}} \]
          2. div-flipN/A

            \[\leadsto \frac{1}{\color{blue}{\frac{x}{\left(\frac{\frac{\frac{1307076337763}{8543989815576}}{\left(x \cdot x\right) \cdot x} - \frac{\frac{-600041}{2386628}}{x}}{x} - \frac{\frac{-344398180852034095277}{30586987988352776592}}{\left(\left(\left(\left(x \cdot x\right) \cdot x\right) \cdot x\right) \cdot x\right) \cdot x}\right) - \frac{-1}{2}}}} \]
          3. lower-unsound-/.f64N/A

            \[\leadsto \frac{1}{\color{blue}{\frac{x}{\left(\frac{\frac{\frac{1307076337763}{8543989815576}}{\left(x \cdot x\right) \cdot x} - \frac{\frac{-600041}{2386628}}{x}}{x} - \frac{\frac{-344398180852034095277}{30586987988352776592}}{\left(\left(\left(\left(x \cdot x\right) \cdot x\right) \cdot x\right) \cdot x\right) \cdot x}\right) - \frac{-1}{2}}}} \]
          4. lower-unsound-/.f6451.2%

            \[\leadsto \frac{1}{\frac{x}{\color{blue}{\left(\frac{\frac{0.15298196345929074}{\left(x \cdot x\right) \cdot x} - \frac{-0.2514179000665374}{x}}{x} - \frac{-11.259630434457211}{\left(\left(\left(\left(x \cdot x\right) \cdot x\right) \cdot x\right) \cdot x\right) \cdot x}\right) - -0.5}}} \]
        6. Applied rewrites51.2%

          \[\leadsto \frac{1}{\color{blue}{\frac{x}{\frac{\frac{\frac{0.15298196345929074}{x \cdot x} - -0.2514179000665374}{x} - \frac{-11.259630434457211}{\left(\left(\left(x \cdot x\right) \cdot x\right) \cdot x\right) \cdot x}}{x} - -0.5}}} \]
      4. Recombined 2 regimes into one program.
      5. Add Preprocessing

      Alternative 3: 99.5% accurate, 4.0× speedup?

      \[\begin{array}{l} t_0 := \left|x\right| \cdot \left|x\right|\\ \mathsf{copysign}\left(1, x\right) \cdot \begin{array}{l} \mathbf{if}\;\left|x\right| \leq 1.2:\\ \;\;\;\;\mathsf{fma}\left(t\_0, -0.6665536072, 1\right) \cdot \left|x\right|\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\frac{\frac{0.15298196345929074}{t\_0} - -0.2514179000665374}{\left|x\right|} - \frac{-11.259630434457211}{\left(\left(t\_0 \cdot \left|x\right|\right) \cdot \left|x\right|\right) \cdot \left|x\right|}}{\left|x\right|} - -0.5}{\left|x\right|}\\ \end{array} \end{array} \]
      (FPCore (x)
       :precision binary64
       (let* ((t_0 (* (fabs x) (fabs x))))
         (*
          (copysign 1.0 x)
          (if (<= (fabs x) 1.2)
            (* (fma t_0 -0.6665536072 1.0) (fabs x))
            (/
             (-
              (/
               (-
                (/ (- (/ 0.15298196345929074 t_0) -0.2514179000665374) (fabs x))
                (/ -11.259630434457211 (* (* (* t_0 (fabs x)) (fabs x)) (fabs x))))
               (fabs x))
              -0.5)
             (fabs x))))))
      double code(double x) {
      	double t_0 = fabs(x) * fabs(x);
      	double tmp;
      	if (fabs(x) <= 1.2) {
      		tmp = fma(t_0, -0.6665536072, 1.0) * fabs(x);
      	} else {
      		tmp = ((((((0.15298196345929074 / t_0) - -0.2514179000665374) / fabs(x)) - (-11.259630434457211 / (((t_0 * fabs(x)) * fabs(x)) * fabs(x)))) / fabs(x)) - -0.5) / fabs(x);
      	}
      	return copysign(1.0, x) * tmp;
      }
      
      function code(x)
      	t_0 = Float64(abs(x) * abs(x))
      	tmp = 0.0
      	if (abs(x) <= 1.2)
      		tmp = Float64(fma(t_0, -0.6665536072, 1.0) * abs(x));
      	else
      		tmp = Float64(Float64(Float64(Float64(Float64(Float64(Float64(0.15298196345929074 / t_0) - -0.2514179000665374) / abs(x)) - Float64(-11.259630434457211 / Float64(Float64(Float64(t_0 * abs(x)) * abs(x)) * abs(x)))) / abs(x)) - -0.5) / abs(x));
      	end
      	return Float64(copysign(1.0, x) * tmp)
      end
      
      code[x_] := Block[{t$95$0 = N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[(N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision] * If[LessEqual[N[Abs[x], $MachinePrecision], 1.2], N[(N[(t$95$0 * -0.6665536072 + 1.0), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(N[(N[(N[(0.15298196345929074 / t$95$0), $MachinePrecision] - -0.2514179000665374), $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision] - N[(-11.259630434457211 / N[(N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision] - -0.5), $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision]]), $MachinePrecision]]
      
      \begin{array}{l}
      t_0 := \left|x\right| \cdot \left|x\right|\\
      \mathsf{copysign}\left(1, x\right) \cdot \begin{array}{l}
      \mathbf{if}\;\left|x\right| \leq 1.2:\\
      \;\;\;\;\mathsf{fma}\left(t\_0, -0.6665536072, 1\right) \cdot \left|x\right|\\
      
      \mathbf{else}:\\
      \;\;\;\;\frac{\frac{\frac{\frac{0.15298196345929074}{t\_0} - -0.2514179000665374}{\left|x\right|} - \frac{-11.259630434457211}{\left(\left(t\_0 \cdot \left|x\right|\right) \cdot \left|x\right|\right) \cdot \left|x\right|}}{\left|x\right|} - -0.5}{\left|x\right|}\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 2 regimes
      2. if x < 1.19999999999999996

        1. Initial program 54.7%

          \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
        2. Taylor expanded in x around 0

          \[\leadsto \color{blue}{\left(1 + \frac{-833192009}{1250000000} \cdot {x}^{2}\right)} \cdot x \]
        3. Step-by-step derivation
          1. lower-+.f64N/A

            \[\leadsto \left(1 + \color{blue}{\frac{-833192009}{1250000000} \cdot {x}^{2}}\right) \cdot x \]
          2. lower-*.f64N/A

            \[\leadsto \left(1 + \frac{-833192009}{1250000000} \cdot \color{blue}{{x}^{2}}\right) \cdot x \]
          3. lower-pow.f6450.2%

            \[\leadsto \left(1 + -0.6665536072 \cdot {x}^{\color{blue}{2}}\right) \cdot x \]
        4. Applied rewrites50.2%

          \[\leadsto \color{blue}{\left(1 + -0.6665536072 \cdot {x}^{2}\right)} \cdot x \]
        5. Step-by-step derivation
          1. lift-+.f64N/A

            \[\leadsto \left(1 + \color{blue}{\frac{-833192009}{1250000000} \cdot {x}^{2}}\right) \cdot x \]
          2. +-commutativeN/A

            \[\leadsto \left(\frac{-833192009}{1250000000} \cdot {x}^{2} + \color{blue}{1}\right) \cdot x \]
          3. lift-*.f64N/A

            \[\leadsto \left(\frac{-833192009}{1250000000} \cdot {x}^{2} + 1\right) \cdot x \]
          4. lift-pow.f64N/A

            \[\leadsto \left(\frac{-833192009}{1250000000} \cdot {x}^{2} + 1\right) \cdot x \]
          5. pow2N/A

            \[\leadsto \left(\frac{-833192009}{1250000000} \cdot \left(x \cdot x\right) + 1\right) \cdot x \]
          6. *-commutativeN/A

            \[\leadsto \left(\left(x \cdot x\right) \cdot \frac{-833192009}{1250000000} + 1\right) \cdot x \]
          7. lower-fma.f64N/A

            \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{\frac{-833192009}{1250000000}}, 1\right) \cdot x \]
          8. lower-*.f6450.2%

            \[\leadsto \mathsf{fma}\left(x \cdot x, -0.6665536072, 1\right) \cdot x \]
        6. Applied rewrites50.2%

          \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{-0.6665536072}, 1\right) \cdot x \]

        if 1.19999999999999996 < x

        1. Initial program 54.7%

          \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
        2. Taylor expanded in x around inf

          \[\leadsto \color{blue}{\frac{\frac{1}{2} + \left(\frac{\frac{1307076337763}{8543989815576}}{{x}^{4}} + \left(\frac{600041}{2386628} \cdot \frac{1}{{x}^{2}} + \frac{344398180852034095277}{30586987988352776592} \cdot \frac{1}{{x}^{6}}\right)\right)}{x}} \]
        3. Step-by-step derivation
          1. Applied rewrites51.3%

            \[\leadsto \color{blue}{\frac{0.5 + \left(\frac{0.15298196345929074}{{x}^{4}} + \mathsf{fma}\left(0.2514179000665374, \frac{1}{{x}^{2}}, 11.259630434457211 \cdot \frac{1}{{x}^{6}}\right)\right)}{x}} \]
          2. Applied rewrites51.2%

            \[\leadsto \frac{1}{\color{blue}{\frac{x}{\left(\left(\frac{0.15298196345929074}{\left(\left(x \cdot x\right) \cdot x\right) \cdot x} - \frac{-0.2514179000665374}{x \cdot x}\right) - \frac{-11.259630434457211}{\left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right)}\right) - -0.5}}} \]
          3. Step-by-step derivation
            1. lift-/.f64N/A

              \[\leadsto \frac{1}{\color{blue}{\frac{x}{\left(\left(\frac{\frac{1307076337763}{8543989815576}}{\left(\left(x \cdot x\right) \cdot x\right) \cdot x} - \frac{\frac{-600041}{2386628}}{x \cdot x}\right) - \frac{\frac{-344398180852034095277}{30586987988352776592}}{\left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right)}\right) - \frac{-1}{2}}}} \]
            2. lift-/.f64N/A

              \[\leadsto \frac{1}{\frac{x}{\color{blue}{\left(\left(\frac{\frac{1307076337763}{8543989815576}}{\left(\left(x \cdot x\right) \cdot x\right) \cdot x} - \frac{\frac{-600041}{2386628}}{x \cdot x}\right) - \frac{\frac{-344398180852034095277}{30586987988352776592}}{\left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right)}\right) - \frac{-1}{2}}}} \]
            3. div-flip-revN/A

              \[\leadsto \frac{\left(\left(\frac{\frac{1307076337763}{8543989815576}}{\left(\left(x \cdot x\right) \cdot x\right) \cdot x} - \frac{\frac{-600041}{2386628}}{x \cdot x}\right) - \frac{\frac{-344398180852034095277}{30586987988352776592}}{\left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right)}\right) - \frac{-1}{2}}{\color{blue}{x}} \]
            4. lower-/.f6451.3%

              \[\leadsto \frac{\left(\left(\frac{0.15298196345929074}{\left(\left(x \cdot x\right) \cdot x\right) \cdot x} - \frac{-0.2514179000665374}{x \cdot x}\right) - \frac{-11.259630434457211}{\left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right)}\right) - -0.5}{\color{blue}{x}} \]
          4. Applied rewrites51.3%

            \[\leadsto \frac{\left(\frac{\frac{0.15298196345929074}{\left(x \cdot x\right) \cdot x} - \frac{-0.2514179000665374}{x}}{x} - \frac{-11.259630434457211}{\left(\left(\left(\left(x \cdot x\right) \cdot x\right) \cdot x\right) \cdot x\right) \cdot x}\right) - -0.5}{\color{blue}{x}} \]
          5. Step-by-step derivation
            1. Applied rewrites51.3%

              \[\leadsto \frac{\frac{\frac{\frac{0.15298196345929074}{x \cdot x} - -0.2514179000665374}{x} - \frac{-11.259630434457211}{\left(\left(\left(x \cdot x\right) \cdot x\right) \cdot x\right) \cdot x}}{x} - -0.5}{\color{blue}{x}} \]
          6. Recombined 2 regimes into one program.
          7. Add Preprocessing

          Alternative 4: 99.4% accurate, 7.4× speedup?

          \[\begin{array}{l} t_0 := \left|x\right| \cdot \left|x\right|\\ \mathsf{copysign}\left(1, x\right) \cdot \begin{array}{l} \mathbf{if}\;\left|x\right| \leq 1.2:\\ \;\;\;\;\mathsf{fma}\left(t\_0, -0.6665536072, 1\right) \cdot \left|x\right|\\ \mathbf{else}:\\ \;\;\;\;\frac{1}{\mathsf{fma}\left(\left|x\right|, 2, \left|x\right| \cdot \frac{-1.0056716002661497}{t\_0}\right)}\\ \end{array} \end{array} \]
          (FPCore (x)
           :precision binary64
           (let* ((t_0 (* (fabs x) (fabs x))))
             (*
              (copysign 1.0 x)
              (if (<= (fabs x) 1.2)
                (* (fma t_0 -0.6665536072 1.0) (fabs x))
                (/ 1.0 (fma (fabs x) 2.0 (* (fabs x) (/ -1.0056716002661497 t_0))))))))
          double code(double x) {
          	double t_0 = fabs(x) * fabs(x);
          	double tmp;
          	if (fabs(x) <= 1.2) {
          		tmp = fma(t_0, -0.6665536072, 1.0) * fabs(x);
          	} else {
          		tmp = 1.0 / fma(fabs(x), 2.0, (fabs(x) * (-1.0056716002661497 / t_0)));
          	}
          	return copysign(1.0, x) * tmp;
          }
          
          function code(x)
          	t_0 = Float64(abs(x) * abs(x))
          	tmp = 0.0
          	if (abs(x) <= 1.2)
          		tmp = Float64(fma(t_0, -0.6665536072, 1.0) * abs(x));
          	else
          		tmp = Float64(1.0 / fma(abs(x), 2.0, Float64(abs(x) * Float64(-1.0056716002661497 / t_0))));
          	end
          	return Float64(copysign(1.0, x) * tmp)
          end
          
          code[x_] := Block[{t$95$0 = N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[(N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision] * If[LessEqual[N[Abs[x], $MachinePrecision], 1.2], N[(N[(t$95$0 * -0.6665536072 + 1.0), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision], N[(1.0 / N[(N[Abs[x], $MachinePrecision] * 2.0 + N[(N[Abs[x], $MachinePrecision] * N[(-1.0056716002661497 / t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]), $MachinePrecision]]
          
          \begin{array}{l}
          t_0 := \left|x\right| \cdot \left|x\right|\\
          \mathsf{copysign}\left(1, x\right) \cdot \begin{array}{l}
          \mathbf{if}\;\left|x\right| \leq 1.2:\\
          \;\;\;\;\mathsf{fma}\left(t\_0, -0.6665536072, 1\right) \cdot \left|x\right|\\
          
          \mathbf{else}:\\
          \;\;\;\;\frac{1}{\mathsf{fma}\left(\left|x\right|, 2, \left|x\right| \cdot \frac{-1.0056716002661497}{t\_0}\right)}\\
          
          
          \end{array}
          \end{array}
          
          Derivation
          1. Split input into 2 regimes
          2. if x < 1.19999999999999996

            1. Initial program 54.7%

              \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
            2. Taylor expanded in x around 0

              \[\leadsto \color{blue}{\left(1 + \frac{-833192009}{1250000000} \cdot {x}^{2}\right)} \cdot x \]
            3. Step-by-step derivation
              1. lower-+.f64N/A

                \[\leadsto \left(1 + \color{blue}{\frac{-833192009}{1250000000} \cdot {x}^{2}}\right) \cdot x \]
              2. lower-*.f64N/A

                \[\leadsto \left(1 + \frac{-833192009}{1250000000} \cdot \color{blue}{{x}^{2}}\right) \cdot x \]
              3. lower-pow.f6450.2%

                \[\leadsto \left(1 + -0.6665536072 \cdot {x}^{\color{blue}{2}}\right) \cdot x \]
            4. Applied rewrites50.2%

              \[\leadsto \color{blue}{\left(1 + -0.6665536072 \cdot {x}^{2}\right)} \cdot x \]
            5. Step-by-step derivation
              1. lift-+.f64N/A

                \[\leadsto \left(1 + \color{blue}{\frac{-833192009}{1250000000} \cdot {x}^{2}}\right) \cdot x \]
              2. +-commutativeN/A

                \[\leadsto \left(\frac{-833192009}{1250000000} \cdot {x}^{2} + \color{blue}{1}\right) \cdot x \]
              3. lift-*.f64N/A

                \[\leadsto \left(\frac{-833192009}{1250000000} \cdot {x}^{2} + 1\right) \cdot x \]
              4. lift-pow.f64N/A

                \[\leadsto \left(\frac{-833192009}{1250000000} \cdot {x}^{2} + 1\right) \cdot x \]
              5. pow2N/A

                \[\leadsto \left(\frac{-833192009}{1250000000} \cdot \left(x \cdot x\right) + 1\right) \cdot x \]
              6. *-commutativeN/A

                \[\leadsto \left(\left(x \cdot x\right) \cdot \frac{-833192009}{1250000000} + 1\right) \cdot x \]
              7. lower-fma.f64N/A

                \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{\frac{-833192009}{1250000000}}, 1\right) \cdot x \]
              8. lower-*.f6450.2%

                \[\leadsto \mathsf{fma}\left(x \cdot x, -0.6665536072, 1\right) \cdot x \]
            6. Applied rewrites50.2%

              \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{-0.6665536072}, 1\right) \cdot x \]

            if 1.19999999999999996 < x

            1. Initial program 54.7%

              \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
            2. Taylor expanded in x around inf

              \[\leadsto \color{blue}{\frac{\frac{1}{2} + \left(\frac{\frac{1307076337763}{8543989815576}}{{x}^{4}} + \left(\frac{600041}{2386628} \cdot \frac{1}{{x}^{2}} + \frac{344398180852034095277}{30586987988352776592} \cdot \frac{1}{{x}^{6}}\right)\right)}{x}} \]
            3. Step-by-step derivation
              1. Applied rewrites51.3%

                \[\leadsto \color{blue}{\frac{0.5 + \left(\frac{0.15298196345929074}{{x}^{4}} + \mathsf{fma}\left(0.2514179000665374, \frac{1}{{x}^{2}}, 11.259630434457211 \cdot \frac{1}{{x}^{6}}\right)\right)}{x}} \]
              2. Applied rewrites51.2%

                \[\leadsto \frac{1}{\color{blue}{\frac{x}{\left(\left(\frac{0.15298196345929074}{\left(\left(x \cdot x\right) \cdot x\right) \cdot x} - \frac{-0.2514179000665374}{x \cdot x}\right) - \frac{-11.259630434457211}{\left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right)}\right) - -0.5}}} \]
              3. Taylor expanded in x around inf

                \[\leadsto \frac{1}{x \cdot \color{blue}{\left(2 - \frac{600041}{596657} \cdot \frac{1}{{x}^{2}}\right)}} \]
              4. Step-by-step derivation
                1. lower-*.f64N/A

                  \[\leadsto \frac{1}{x \cdot \left(2 - \color{blue}{\frac{600041}{596657} \cdot \frac{1}{{x}^{2}}}\right)} \]
                2. lower--.f64N/A

                  \[\leadsto \frac{1}{x \cdot \left(2 - \frac{600041}{596657} \cdot \color{blue}{\frac{1}{{x}^{2}}}\right)} \]
                3. lower-*.f64N/A

                  \[\leadsto \frac{1}{x \cdot \left(2 - \frac{600041}{596657} \cdot \frac{1}{\color{blue}{{x}^{2}}}\right)} \]
                4. lower-/.f64N/A

                  \[\leadsto \frac{1}{x \cdot \left(2 - \frac{600041}{596657} \cdot \frac{1}{{x}^{\color{blue}{2}}}\right)} \]
                5. lower-pow.f6452.2%

                  \[\leadsto \frac{1}{x \cdot \left(2 - 1.0056716002661497 \cdot \frac{1}{{x}^{2}}\right)} \]
              5. Applied rewrites52.2%

                \[\leadsto \frac{1}{x \cdot \color{blue}{\left(2 - 1.0056716002661497 \cdot \frac{1}{{x}^{2}}\right)}} \]
              6. Step-by-step derivation
                1. lift-*.f64N/A

                  \[\leadsto \frac{1}{x \cdot \left(2 - \color{blue}{\frac{600041}{596657} \cdot \frac{1}{{x}^{2}}}\right)} \]
                2. lift--.f64N/A

                  \[\leadsto \frac{1}{x \cdot \left(2 - \frac{600041}{596657} \cdot \color{blue}{\frac{1}{{x}^{2}}}\right)} \]
                3. sub-flipN/A

                  \[\leadsto \frac{1}{x \cdot \left(2 + \left(\mathsf{neg}\left(\frac{600041}{596657} \cdot \frac{1}{{x}^{2}}\right)\right)\right)} \]
                4. distribute-lft-inN/A

                  \[\leadsto \frac{1}{x \cdot 2 + x \cdot \color{blue}{\left(\mathsf{neg}\left(\frac{600041}{596657} \cdot \frac{1}{{x}^{2}}\right)\right)}} \]
                5. lower-fma.f64N/A

                  \[\leadsto \frac{1}{\mathsf{fma}\left(x, 2, x \cdot \left(\mathsf{neg}\left(\frac{600041}{596657} \cdot \frac{1}{{x}^{2}}\right)\right)\right)} \]
                6. lower-*.f64N/A

                  \[\leadsto \frac{1}{\mathsf{fma}\left(x, 2, x \cdot \left(\mathsf{neg}\left(\frac{600041}{596657} \cdot \frac{1}{{x}^{2}}\right)\right)\right)} \]
                7. lift-*.f64N/A

                  \[\leadsto \frac{1}{\mathsf{fma}\left(x, 2, x \cdot \left(\mathsf{neg}\left(\frac{600041}{596657} \cdot \frac{1}{{x}^{2}}\right)\right)\right)} \]
                8. lift-/.f64N/A

                  \[\leadsto \frac{1}{\mathsf{fma}\left(x, 2, x \cdot \left(\mathsf{neg}\left(\frac{600041}{596657} \cdot \frac{1}{{x}^{2}}\right)\right)\right)} \]
                9. lift-pow.f64N/A

                  \[\leadsto \frac{1}{\mathsf{fma}\left(x, 2, x \cdot \left(\mathsf{neg}\left(\frac{600041}{596657} \cdot \frac{1}{{x}^{2}}\right)\right)\right)} \]
                10. pow2N/A

                  \[\leadsto \frac{1}{\mathsf{fma}\left(x, 2, x \cdot \left(\mathsf{neg}\left(\frac{600041}{596657} \cdot \frac{1}{x \cdot x}\right)\right)\right)} \]
                11. mult-flip-revN/A

                  \[\leadsto \frac{1}{\mathsf{fma}\left(x, 2, x \cdot \left(\mathsf{neg}\left(\frac{\frac{600041}{596657}}{x \cdot x}\right)\right)\right)} \]
                12. distribute-neg-fracN/A

                  \[\leadsto \frac{1}{\mathsf{fma}\left(x, 2, x \cdot \frac{\mathsf{neg}\left(\frac{600041}{596657}\right)}{x \cdot x}\right)} \]
                13. lower-/.f64N/A

                  \[\leadsto \frac{1}{\mathsf{fma}\left(x, 2, x \cdot \frac{\mathsf{neg}\left(\frac{600041}{596657}\right)}{x \cdot x}\right)} \]
                14. metadata-evalN/A

                  \[\leadsto \frac{1}{\mathsf{fma}\left(x, 2, x \cdot \frac{\frac{-600041}{596657}}{x \cdot x}\right)} \]
                15. lift-*.f6452.2%

                  \[\leadsto \frac{1}{\mathsf{fma}\left(x, 2, x \cdot \frac{-1.0056716002661497}{x \cdot x}\right)} \]
              7. Applied rewrites52.2%

                \[\leadsto \frac{1}{\mathsf{fma}\left(x, 2, x \cdot \frac{-1.0056716002661497}{x \cdot x}\right)} \]
            4. Recombined 2 regimes into one program.
            5. Add Preprocessing

            Alternative 5: 99.4% accurate, 8.3× speedup?

            \[\begin{array}{l} t_0 := \left|x\right| \cdot \left|x\right|\\ \mathsf{copysign}\left(1, x\right) \cdot \begin{array}{l} \mathbf{if}\;\left|x\right| \leq 1.2:\\ \;\;\;\;\mathsf{fma}\left(t\_0, -0.6665536072, 1\right) \cdot \left|x\right|\\ \mathbf{else}:\\ \;\;\;\;\frac{1}{\left(2 - \frac{1.0056716002661497}{t\_0}\right) \cdot \left|x\right|}\\ \end{array} \end{array} \]
            (FPCore (x)
             :precision binary64
             (let* ((t_0 (* (fabs x) (fabs x))))
               (*
                (copysign 1.0 x)
                (if (<= (fabs x) 1.2)
                  (* (fma t_0 -0.6665536072 1.0) (fabs x))
                  (/ 1.0 (* (- 2.0 (/ 1.0056716002661497 t_0)) (fabs x)))))))
            double code(double x) {
            	double t_0 = fabs(x) * fabs(x);
            	double tmp;
            	if (fabs(x) <= 1.2) {
            		tmp = fma(t_0, -0.6665536072, 1.0) * fabs(x);
            	} else {
            		tmp = 1.0 / ((2.0 - (1.0056716002661497 / t_0)) * fabs(x));
            	}
            	return copysign(1.0, x) * tmp;
            }
            
            function code(x)
            	t_0 = Float64(abs(x) * abs(x))
            	tmp = 0.0
            	if (abs(x) <= 1.2)
            		tmp = Float64(fma(t_0, -0.6665536072, 1.0) * abs(x));
            	else
            		tmp = Float64(1.0 / Float64(Float64(2.0 - Float64(1.0056716002661497 / t_0)) * abs(x)));
            	end
            	return Float64(copysign(1.0, x) * tmp)
            end
            
            code[x_] := Block[{t$95$0 = N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[(N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision] * If[LessEqual[N[Abs[x], $MachinePrecision], 1.2], N[(N[(t$95$0 * -0.6665536072 + 1.0), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision], N[(1.0 / N[(N[(2.0 - N[(1.0056716002661497 / t$95$0), $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]), $MachinePrecision]]
            
            \begin{array}{l}
            t_0 := \left|x\right| \cdot \left|x\right|\\
            \mathsf{copysign}\left(1, x\right) \cdot \begin{array}{l}
            \mathbf{if}\;\left|x\right| \leq 1.2:\\
            \;\;\;\;\mathsf{fma}\left(t\_0, -0.6665536072, 1\right) \cdot \left|x\right|\\
            
            \mathbf{else}:\\
            \;\;\;\;\frac{1}{\left(2 - \frac{1.0056716002661497}{t\_0}\right) \cdot \left|x\right|}\\
            
            
            \end{array}
            \end{array}
            
            Derivation
            1. Split input into 2 regimes
            2. if x < 1.19999999999999996

              1. Initial program 54.7%

                \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
              2. Taylor expanded in x around 0

                \[\leadsto \color{blue}{\left(1 + \frac{-833192009}{1250000000} \cdot {x}^{2}\right)} \cdot x \]
              3. Step-by-step derivation
                1. lower-+.f64N/A

                  \[\leadsto \left(1 + \color{blue}{\frac{-833192009}{1250000000} \cdot {x}^{2}}\right) \cdot x \]
                2. lower-*.f64N/A

                  \[\leadsto \left(1 + \frac{-833192009}{1250000000} \cdot \color{blue}{{x}^{2}}\right) \cdot x \]
                3. lower-pow.f6450.2%

                  \[\leadsto \left(1 + -0.6665536072 \cdot {x}^{\color{blue}{2}}\right) \cdot x \]
              4. Applied rewrites50.2%

                \[\leadsto \color{blue}{\left(1 + -0.6665536072 \cdot {x}^{2}\right)} \cdot x \]
              5. Step-by-step derivation
                1. lift-+.f64N/A

                  \[\leadsto \left(1 + \color{blue}{\frac{-833192009}{1250000000} \cdot {x}^{2}}\right) \cdot x \]
                2. +-commutativeN/A

                  \[\leadsto \left(\frac{-833192009}{1250000000} \cdot {x}^{2} + \color{blue}{1}\right) \cdot x \]
                3. lift-*.f64N/A

                  \[\leadsto \left(\frac{-833192009}{1250000000} \cdot {x}^{2} + 1\right) \cdot x \]
                4. lift-pow.f64N/A

                  \[\leadsto \left(\frac{-833192009}{1250000000} \cdot {x}^{2} + 1\right) \cdot x \]
                5. pow2N/A

                  \[\leadsto \left(\frac{-833192009}{1250000000} \cdot \left(x \cdot x\right) + 1\right) \cdot x \]
                6. *-commutativeN/A

                  \[\leadsto \left(\left(x \cdot x\right) \cdot \frac{-833192009}{1250000000} + 1\right) \cdot x \]
                7. lower-fma.f64N/A

                  \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{\frac{-833192009}{1250000000}}, 1\right) \cdot x \]
                8. lower-*.f6450.2%

                  \[\leadsto \mathsf{fma}\left(x \cdot x, -0.6665536072, 1\right) \cdot x \]
              6. Applied rewrites50.2%

                \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{-0.6665536072}, 1\right) \cdot x \]

              if 1.19999999999999996 < x

              1. Initial program 54.7%

                \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
              2. Taylor expanded in x around inf

                \[\leadsto \color{blue}{\frac{\frac{1}{2} + \left(\frac{\frac{1307076337763}{8543989815576}}{{x}^{4}} + \left(\frac{600041}{2386628} \cdot \frac{1}{{x}^{2}} + \frac{344398180852034095277}{30586987988352776592} \cdot \frac{1}{{x}^{6}}\right)\right)}{x}} \]
              3. Step-by-step derivation
                1. Applied rewrites51.3%

                  \[\leadsto \color{blue}{\frac{0.5 + \left(\frac{0.15298196345929074}{{x}^{4}} + \mathsf{fma}\left(0.2514179000665374, \frac{1}{{x}^{2}}, 11.259630434457211 \cdot \frac{1}{{x}^{6}}\right)\right)}{x}} \]
                2. Applied rewrites51.2%

                  \[\leadsto \frac{1}{\color{blue}{\frac{x}{\left(\left(\frac{0.15298196345929074}{\left(\left(x \cdot x\right) \cdot x\right) \cdot x} - \frac{-0.2514179000665374}{x \cdot x}\right) - \frac{-11.259630434457211}{\left(\left(x \cdot x\right) \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right)}\right) - -0.5}}} \]
                3. Taylor expanded in x around inf

                  \[\leadsto \frac{1}{x \cdot \color{blue}{\left(2 - \frac{600041}{596657} \cdot \frac{1}{{x}^{2}}\right)}} \]
                4. Step-by-step derivation
                  1. lower-*.f64N/A

                    \[\leadsto \frac{1}{x \cdot \left(2 - \color{blue}{\frac{600041}{596657} \cdot \frac{1}{{x}^{2}}}\right)} \]
                  2. lower--.f64N/A

                    \[\leadsto \frac{1}{x \cdot \left(2 - \frac{600041}{596657} \cdot \color{blue}{\frac{1}{{x}^{2}}}\right)} \]
                  3. lower-*.f64N/A

                    \[\leadsto \frac{1}{x \cdot \left(2 - \frac{600041}{596657} \cdot \frac{1}{\color{blue}{{x}^{2}}}\right)} \]
                  4. lower-/.f64N/A

                    \[\leadsto \frac{1}{x \cdot \left(2 - \frac{600041}{596657} \cdot \frac{1}{{x}^{\color{blue}{2}}}\right)} \]
                  5. lower-pow.f6452.2%

                    \[\leadsto \frac{1}{x \cdot \left(2 - 1.0056716002661497 \cdot \frac{1}{{x}^{2}}\right)} \]
                5. Applied rewrites52.2%

                  \[\leadsto \frac{1}{x \cdot \color{blue}{\left(2 - 1.0056716002661497 \cdot \frac{1}{{x}^{2}}\right)}} \]
                6. Step-by-step derivation
                  1. lift-*.f64N/A

                    \[\leadsto \frac{1}{x \cdot \left(2 - \color{blue}{\frac{600041}{596657} \cdot \frac{1}{{x}^{2}}}\right)} \]
                  2. *-commutativeN/A

                    \[\leadsto \frac{1}{\left(2 - \frac{600041}{596657} \cdot \frac{1}{{x}^{2}}\right) \cdot x} \]
                  3. lower-*.f6452.2%

                    \[\leadsto \frac{1}{\left(2 - 1.0056716002661497 \cdot \frac{1}{{x}^{2}}\right) \cdot x} \]
                  4. lift-*.f64N/A

                    \[\leadsto \frac{1}{\left(2 - \frac{600041}{596657} \cdot \frac{1}{{x}^{2}}\right) \cdot x} \]
                  5. lift-/.f64N/A

                    \[\leadsto \frac{1}{\left(2 - \frac{600041}{596657} \cdot \frac{1}{{x}^{2}}\right) \cdot x} \]
                  6. lift-pow.f64N/A

                    \[\leadsto \frac{1}{\left(2 - \frac{600041}{596657} \cdot \frac{1}{{x}^{2}}\right) \cdot x} \]
                  7. pow2N/A

                    \[\leadsto \frac{1}{\left(2 - \frac{600041}{596657} \cdot \frac{1}{x \cdot x}\right) \cdot x} \]
                  8. mult-flip-revN/A

                    \[\leadsto \frac{1}{\left(2 - \frac{\frac{600041}{596657}}{x \cdot x}\right) \cdot x} \]
                  9. lower-/.f64N/A

                    \[\leadsto \frac{1}{\left(2 - \frac{\frac{600041}{596657}}{x \cdot x}\right) \cdot x} \]
                  10. lift-*.f6452.2%

                    \[\leadsto \frac{1}{\left(2 - \frac{1.0056716002661497}{x \cdot x}\right) \cdot x} \]
                7. Applied rewrites52.2%

                  \[\leadsto \frac{1}{\left(2 - \frac{1.0056716002661497}{x \cdot x}\right) \cdot \color{blue}{x}} \]
              4. Recombined 2 regimes into one program.
              5. Add Preprocessing

              Alternative 6: 99.2% accurate, 9.6× speedup?

              \[\mathsf{copysign}\left(1, x\right) \cdot \begin{array}{l} \mathbf{if}\;\left|x\right| \leq 1.2:\\ \;\;\;\;\mathsf{fma}\left(\left|x\right| \cdot \left|x\right|, -0.6665536072, 1\right) \cdot \left|x\right|\\ \mathbf{else}:\\ \;\;\;\;\frac{0.5}{\left|x\right|}\\ \end{array} \]
              (FPCore (x)
               :precision binary64
               (*
                (copysign 1.0 x)
                (if (<= (fabs x) 1.2)
                  (* (fma (* (fabs x) (fabs x)) -0.6665536072 1.0) (fabs x))
                  (/ 0.5 (fabs x)))))
              double code(double x) {
              	double tmp;
              	if (fabs(x) <= 1.2) {
              		tmp = fma((fabs(x) * fabs(x)), -0.6665536072, 1.0) * fabs(x);
              	} else {
              		tmp = 0.5 / fabs(x);
              	}
              	return copysign(1.0, x) * tmp;
              }
              
              function code(x)
              	tmp = 0.0
              	if (abs(x) <= 1.2)
              		tmp = Float64(fma(Float64(abs(x) * abs(x)), -0.6665536072, 1.0) * abs(x));
              	else
              		tmp = Float64(0.5 / abs(x));
              	end
              	return Float64(copysign(1.0, x) * tmp)
              end
              
              code[x_] := N[(N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision] * If[LessEqual[N[Abs[x], $MachinePrecision], 1.2], N[(N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * -0.6665536072 + 1.0), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision], N[(0.5 / N[Abs[x], $MachinePrecision]), $MachinePrecision]]), $MachinePrecision]
              
              \mathsf{copysign}\left(1, x\right) \cdot \begin{array}{l}
              \mathbf{if}\;\left|x\right| \leq 1.2:\\
              \;\;\;\;\mathsf{fma}\left(\left|x\right| \cdot \left|x\right|, -0.6665536072, 1\right) \cdot \left|x\right|\\
              
              \mathbf{else}:\\
              \;\;\;\;\frac{0.5}{\left|x\right|}\\
              
              
              \end{array}
              
              Derivation
              1. Split input into 2 regimes
              2. if x < 1.19999999999999996

                1. Initial program 54.7%

                  \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
                2. Taylor expanded in x around 0

                  \[\leadsto \color{blue}{\left(1 + \frac{-833192009}{1250000000} \cdot {x}^{2}\right)} \cdot x \]
                3. Step-by-step derivation
                  1. lower-+.f64N/A

                    \[\leadsto \left(1 + \color{blue}{\frac{-833192009}{1250000000} \cdot {x}^{2}}\right) \cdot x \]
                  2. lower-*.f64N/A

                    \[\leadsto \left(1 + \frac{-833192009}{1250000000} \cdot \color{blue}{{x}^{2}}\right) \cdot x \]
                  3. lower-pow.f6450.2%

                    \[\leadsto \left(1 + -0.6665536072 \cdot {x}^{\color{blue}{2}}\right) \cdot x \]
                4. Applied rewrites50.2%

                  \[\leadsto \color{blue}{\left(1 + -0.6665536072 \cdot {x}^{2}\right)} \cdot x \]
                5. Step-by-step derivation
                  1. lift-+.f64N/A

                    \[\leadsto \left(1 + \color{blue}{\frac{-833192009}{1250000000} \cdot {x}^{2}}\right) \cdot x \]
                  2. +-commutativeN/A

                    \[\leadsto \left(\frac{-833192009}{1250000000} \cdot {x}^{2} + \color{blue}{1}\right) \cdot x \]
                  3. lift-*.f64N/A

                    \[\leadsto \left(\frac{-833192009}{1250000000} \cdot {x}^{2} + 1\right) \cdot x \]
                  4. lift-pow.f64N/A

                    \[\leadsto \left(\frac{-833192009}{1250000000} \cdot {x}^{2} + 1\right) \cdot x \]
                  5. pow2N/A

                    \[\leadsto \left(\frac{-833192009}{1250000000} \cdot \left(x \cdot x\right) + 1\right) \cdot x \]
                  6. *-commutativeN/A

                    \[\leadsto \left(\left(x \cdot x\right) \cdot \frac{-833192009}{1250000000} + 1\right) \cdot x \]
                  7. lower-fma.f64N/A

                    \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{\frac{-833192009}{1250000000}}, 1\right) \cdot x \]
                  8. lower-*.f6450.2%

                    \[\leadsto \mathsf{fma}\left(x \cdot x, -0.6665536072, 1\right) \cdot x \]
                6. Applied rewrites50.2%

                  \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{-0.6665536072}, 1\right) \cdot x \]

                if 1.19999999999999996 < x

                1. Initial program 54.7%

                  \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
                2. Taylor expanded in x around inf

                  \[\leadsto \color{blue}{\frac{\frac{1}{2}}{x}} \]
                3. Step-by-step derivation
                  1. lower-/.f6451.5%

                    \[\leadsto \frac{0.5}{\color{blue}{x}} \]
                4. Applied rewrites51.5%

                  \[\leadsto \color{blue}{\frac{0.5}{x}} \]
              3. Recombined 2 regimes into one program.
              4. Add Preprocessing

              Alternative 7: 98.9% accurate, 15.4× speedup?

              \[\mathsf{copysign}\left(1, x\right) \cdot \begin{array}{l} \mathbf{if}\;\left|x\right| \leq 0.7:\\ \;\;\;\;\left|x\right|\\ \mathbf{else}:\\ \;\;\;\;\frac{0.5}{\left|x\right|}\\ \end{array} \]
              (FPCore (x)
               :precision binary64
               (* (copysign 1.0 x) (if (<= (fabs x) 0.7) (fabs x) (/ 0.5 (fabs x)))))
              double code(double x) {
              	double tmp;
              	if (fabs(x) <= 0.7) {
              		tmp = fabs(x);
              	} else {
              		tmp = 0.5 / fabs(x);
              	}
              	return copysign(1.0, x) * tmp;
              }
              
              public static double code(double x) {
              	double tmp;
              	if (Math.abs(x) <= 0.7) {
              		tmp = Math.abs(x);
              	} else {
              		tmp = 0.5 / Math.abs(x);
              	}
              	return Math.copySign(1.0, x) * tmp;
              }
              
              def code(x):
              	tmp = 0
              	if math.fabs(x) <= 0.7:
              		tmp = math.fabs(x)
              	else:
              		tmp = 0.5 / math.fabs(x)
              	return math.copysign(1.0, x) * tmp
              
              function code(x)
              	tmp = 0.0
              	if (abs(x) <= 0.7)
              		tmp = abs(x);
              	else
              		tmp = Float64(0.5 / abs(x));
              	end
              	return Float64(copysign(1.0, x) * tmp)
              end
              
              function tmp_2 = code(x)
              	tmp = 0.0;
              	if (abs(x) <= 0.7)
              		tmp = abs(x);
              	else
              		tmp = 0.5 / abs(x);
              	end
              	tmp_2 = (sign(x) * abs(1.0)) * tmp;
              end
              
              code[x_] := N[(N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision] * If[LessEqual[N[Abs[x], $MachinePrecision], 0.7], N[Abs[x], $MachinePrecision], N[(0.5 / N[Abs[x], $MachinePrecision]), $MachinePrecision]]), $MachinePrecision]
              
              \mathsf{copysign}\left(1, x\right) \cdot \begin{array}{l}
              \mathbf{if}\;\left|x\right| \leq 0.7:\\
              \;\;\;\;\left|x\right|\\
              
              \mathbf{else}:\\
              \;\;\;\;\frac{0.5}{\left|x\right|}\\
              
              
              \end{array}
              
              Derivation
              1. Split input into 2 regimes
              2. if x < 0.69999999999999996

                1. Initial program 54.7%

                  \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
                2. Taylor expanded in x around 0

                  \[\leadsto \color{blue}{x} \]
                3. Step-by-step derivation
                  1. Applied rewrites51.2%

                    \[\leadsto \color{blue}{x} \]

                  if 0.69999999999999996 < x

                  1. Initial program 54.7%

                    \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
                  2. Taylor expanded in x around inf

                    \[\leadsto \color{blue}{\frac{\frac{1}{2}}{x}} \]
                  3. Step-by-step derivation
                    1. lower-/.f6451.5%

                      \[\leadsto \frac{0.5}{\color{blue}{x}} \]
                  4. Applied rewrites51.5%

                    \[\leadsto \color{blue}{\frac{0.5}{x}} \]
                4. Recombined 2 regimes into one program.
                5. Add Preprocessing

                Alternative 8: 51.2% accurate, 253.1× speedup?

                \[x \]
                (FPCore (x) :precision binary64 x)
                double code(double x) {
                	return x;
                }
                
                module fmin_fmax_functions
                    implicit none
                    private
                    public fmax
                    public fmin
                
                    interface fmax
                        module procedure fmax88
                        module procedure fmax44
                        module procedure fmax84
                        module procedure fmax48
                    end interface
                    interface fmin
                        module procedure fmin88
                        module procedure fmin44
                        module procedure fmin84
                        module procedure fmin48
                    end interface
                contains
                    real(8) function fmax88(x, y) result (res)
                        real(8), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                    end function
                    real(4) function fmax44(x, y) result (res)
                        real(4), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                    end function
                    real(8) function fmax84(x, y) result(res)
                        real(8), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                    end function
                    real(8) function fmax48(x, y) result(res)
                        real(4), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                    end function
                    real(8) function fmin88(x, y) result (res)
                        real(8), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                    end function
                    real(4) function fmin44(x, y) result (res)
                        real(4), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                    end function
                    real(8) function fmin84(x, y) result(res)
                        real(8), intent (in) :: x
                        real(4), intent (in) :: y
                        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                    end function
                    real(8) function fmin48(x, y) result(res)
                        real(4), intent (in) :: x
                        real(8), intent (in) :: y
                        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                    end function
                end module
                
                real(8) function code(x)
                use fmin_fmax_functions
                    real(8), intent (in) :: x
                    code = x
                end function
                
                public static double code(double x) {
                	return x;
                }
                
                def code(x):
                	return x
                
                function code(x)
                	return x
                end
                
                function tmp = code(x)
                	tmp = x;
                end
                
                code[x_] := x
                
                x
                
                Derivation
                1. Initial program 54.7%

                  \[\frac{\left(\left(\left(\left(1 + 0.1049934947 \cdot \left(x \cdot x\right)\right) + 0.0424060604 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0072644182 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0005064034 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0001789971 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)}{\left(\left(\left(\left(\left(1 + 0.7715471019 \cdot \left(x \cdot x\right)\right) + 0.2909738639 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0694555761 \cdot \left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0140005442 \cdot \left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + 0.0008327945 \cdot \left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)\right) + \left(2 \cdot 0.0001789971\right) \cdot \left(\left(\left(\left(\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot x\right)\right)} \cdot x \]
                2. Taylor expanded in x around 0

                  \[\leadsto \color{blue}{x} \]
                3. Step-by-step derivation
                  1. Applied rewrites51.2%

                    \[\leadsto \color{blue}{x} \]
                  2. Add Preprocessing

                  Reproduce

                  ?
                  herbie shell --seed 2025187 
                  (FPCore (x)
                    :name "Jmat.Real.dawson"
                    :precision binary64
                    (* (/ (+ (+ (+ (+ (+ 1.0 (* 0.1049934947 (* x x))) (* 0.0424060604 (* (* x x) (* x x)))) (* 0.0072644182 (* (* (* x x) (* x x)) (* x x)))) (* 0.0005064034 (* (* (* (* x x) (* x x)) (* x x)) (* x x)))) (* 0.0001789971 (* (* (* (* (* x x) (* x x)) (* x x)) (* x x)) (* x x)))) (+ (+ (+ (+ (+ (+ 1.0 (* 0.7715471019 (* x x))) (* 0.2909738639 (* (* x x) (* x x)))) (* 0.0694555761 (* (* (* x x) (* x x)) (* x x)))) (* 0.0140005442 (* (* (* (* x x) (* x x)) (* x x)) (* x x)))) (* 0.0008327945 (* (* (* (* (* x x) (* x x)) (* x x)) (* x x)) (* x x)))) (* (* 2.0 0.0001789971) (* (* (* (* (* (* x x) (* x x)) (* x x)) (* x x)) (* x x)) (* x x))))) x))