Destination given bearing on a great circle

Percentage Accurate: 99.8% → 99.8%
Time: 9.4s
Alternatives: 13
Speedup: 1.1×

Specification

?
\[\begin{array}{l} \\ \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \end{array} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (+
  lambda1
  (atan2
   (* (* (sin theta) (sin delta)) (cos phi1))
   (-
    (cos delta)
    (*
     (sin phi1)
     (sin
      (asin
       (+
        (* (sin phi1) (cos delta))
        (* (* (cos phi1) (sin delta)) (cos theta))))))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin(asin(((sin(phi1) * cos(delta)) + ((cos(phi1) * sin(delta)) * cos(theta))))))));
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
    real(8), intent (in) :: lambda1
    real(8), intent (in) :: phi1
    real(8), intent (in) :: phi2
    real(8), intent (in) :: delta
    real(8), intent (in) :: theta
    code = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin(asin(((sin(phi1) * cos(delta)) + ((cos(phi1) * sin(delta)) * cos(theta))))))))
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + Math.atan2(((Math.sin(theta) * Math.sin(delta)) * Math.cos(phi1)), (Math.cos(delta) - (Math.sin(phi1) * Math.sin(Math.asin(((Math.sin(phi1) * Math.cos(delta)) + ((Math.cos(phi1) * Math.sin(delta)) * Math.cos(theta))))))));
}
def code(lambda1, phi1, phi2, delta, theta):
	return lambda1 + math.atan2(((math.sin(theta) * math.sin(delta)) * math.cos(phi1)), (math.cos(delta) - (math.sin(phi1) * math.sin(math.asin(((math.sin(phi1) * math.cos(delta)) + ((math.cos(phi1) * math.sin(delta)) * math.cos(theta))))))))
function code(lambda1, phi1, phi2, delta, theta)
	return Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), Float64(cos(delta) - Float64(sin(phi1) * sin(asin(Float64(Float64(sin(phi1) * cos(delta)) + Float64(Float64(cos(phi1) * sin(delta)) * cos(theta)))))))))
end
function tmp = code(lambda1, phi1, phi2, delta, theta)
	tmp = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin(asin(((sin(phi1) * cos(delta)) + ((cos(phi1) * sin(delta)) * cos(theta))))))));
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[(N[Sin[phi1], $MachinePrecision] * N[Sin[N[ArcSin[N[(N[(N[Sin[phi1], $MachinePrecision] * N[Cos[delta], $MachinePrecision]), $MachinePrecision] + N[(N[(N[Cos[phi1], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[theta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}
\end{array}

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 13 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 99.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \end{array} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (+
  lambda1
  (atan2
   (* (* (sin theta) (sin delta)) (cos phi1))
   (-
    (cos delta)
    (*
     (sin phi1)
     (sin
      (asin
       (+
        (* (sin phi1) (cos delta))
        (* (* (cos phi1) (sin delta)) (cos theta))))))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin(asin(((sin(phi1) * cos(delta)) + ((cos(phi1) * sin(delta)) * cos(theta))))))));
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
    real(8), intent (in) :: lambda1
    real(8), intent (in) :: phi1
    real(8), intent (in) :: phi2
    real(8), intent (in) :: delta
    real(8), intent (in) :: theta
    code = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin(asin(((sin(phi1) * cos(delta)) + ((cos(phi1) * sin(delta)) * cos(theta))))))))
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + Math.atan2(((Math.sin(theta) * Math.sin(delta)) * Math.cos(phi1)), (Math.cos(delta) - (Math.sin(phi1) * Math.sin(Math.asin(((Math.sin(phi1) * Math.cos(delta)) + ((Math.cos(phi1) * Math.sin(delta)) * Math.cos(theta))))))));
}
def code(lambda1, phi1, phi2, delta, theta):
	return lambda1 + math.atan2(((math.sin(theta) * math.sin(delta)) * math.cos(phi1)), (math.cos(delta) - (math.sin(phi1) * math.sin(math.asin(((math.sin(phi1) * math.cos(delta)) + ((math.cos(phi1) * math.sin(delta)) * math.cos(theta))))))))
function code(lambda1, phi1, phi2, delta, theta)
	return Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), Float64(cos(delta) - Float64(sin(phi1) * sin(asin(Float64(Float64(sin(phi1) * cos(delta)) + Float64(Float64(cos(phi1) * sin(delta)) * cos(theta)))))))))
end
function tmp = code(lambda1, phi1, phi2, delta, theta)
	tmp = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin(asin(((sin(phi1) * cos(delta)) + ((cos(phi1) * sin(delta)) * cos(theta))))))));
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[(N[Sin[phi1], $MachinePrecision] * N[Sin[N[ArcSin[N[(N[(N[Sin[phi1], $MachinePrecision] * N[Cos[delta], $MachinePrecision]), $MachinePrecision] + N[(N[(N[Cos[phi1], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[theta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}
\end{array}

Alternative 1: 99.8% accurate, 1.1× speedup?

\[\begin{array}{l} \\ \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos \phi_1, \cos theta \cdot \sin delta, \sin \phi_1 \cdot \cos delta\right)} \end{array} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (+
  lambda1
  (atan2
   (* (* (sin theta) (sin delta)) (cos phi1))
   (-
    (cos delta)
    (*
     (sin phi1)
     (fma
      (cos phi1)
      (* (cos theta) (sin delta))
      (* (sin phi1) (cos delta))))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * fma(cos(phi1), (cos(theta) * sin(delta)), (sin(phi1) * cos(delta))))));
}
function code(lambda1, phi1, phi2, delta, theta)
	return Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), Float64(cos(delta) - Float64(sin(phi1) * fma(cos(phi1), Float64(cos(theta) * sin(delta)), Float64(sin(phi1) * cos(delta)))))))
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[(N[Sin[phi1], $MachinePrecision] * N[(N[Cos[phi1], $MachinePrecision] * N[(N[Cos[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] + N[(N[Sin[phi1], $MachinePrecision] * N[Cos[delta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos \phi_1, \cos theta \cdot \sin delta, \sin \phi_1 \cdot \cos delta\right)}
\end{array}
Derivation
  1. Initial program 99.8%

    \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
  2. Step-by-step derivation
    1. lift-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \color{blue}{\sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}} \]
    2. lift-asin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \color{blue}{\sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}} \]
    3. sin-asin99.8

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \color{blue}{\left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}} \]
    4. lift-+.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \color{blue}{\left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}} \]
    5. lift-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\color{blue}{\sin \phi_1 \cdot \cos delta} + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    6. lift-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\color{blue}{\sin \phi_1} \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    7. lift-cos.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\sin \phi_1 \cdot \color{blue}{\cos delta} + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    8. lift-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\sin \phi_1 \cdot \cos delta + \color{blue}{\left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta}\right)} \]
    9. lift-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\sin \phi_1 \cdot \cos delta + \color{blue}{\left(\cos \phi_1 \cdot \sin delta\right)} \cdot \cos theta\right)} \]
    10. lift-cos.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\sin \phi_1 \cdot \cos delta + \left(\color{blue}{\cos \phi_1} \cdot \sin delta\right) \cdot \cos theta\right)} \]
    11. lift-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \color{blue}{\sin delta}\right) \cdot \cos theta\right)} \]
    12. lift-cos.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \color{blue}{\cos theta}\right)} \]
    13. *-commutativeN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\color{blue}{\cos delta \cdot \sin \phi_1} + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    14. associate-*l*N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\cos delta \cdot \sin \phi_1 + \color{blue}{\cos \phi_1 \cdot \left(\sin delta \cdot \cos theta\right)}\right)} \]
    15. *-commutativeN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\cos delta \cdot \sin \phi_1 + \cos \phi_1 \cdot \color{blue}{\left(\cos theta \cdot \sin delta\right)}\right)} \]
    16. +-commutativeN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \color{blue}{\left(\cos \phi_1 \cdot \left(\cos theta \cdot \sin delta\right) + \cos delta \cdot \sin \phi_1\right)}} \]
  3. Applied rewrites99.8%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \color{blue}{\mathsf{fma}\left(\cos \phi_1, \cos theta \cdot \sin delta, \sin \phi_1 \cdot \cos delta\right)}} \]
  4. Add Preprocessing

Alternative 2: 94.6% accurate, 1.2× speedup?

\[\begin{array}{l} \\ \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(0.5 - 0.5 \cdot \cos \left(2 \cdot \phi_1\right), \cos delta, \left(\sin \phi_1 \cdot \sin delta\right) \cdot \cos \phi_1\right)} \end{array} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (+
  lambda1
  (atan2
   (* (* (sin theta) (sin delta)) (cos phi1))
   (-
    (cos delta)
    (fma
     (- 0.5 (* 0.5 (cos (* 2.0 phi1))))
     (cos delta)
     (* (* (sin phi1) (sin delta)) (cos phi1)))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - fma((0.5 - (0.5 * cos((2.0 * phi1)))), cos(delta), ((sin(phi1) * sin(delta)) * cos(phi1)))));
}
function code(lambda1, phi1, phi2, delta, theta)
	return Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), Float64(cos(delta) - fma(Float64(0.5 - Float64(0.5 * cos(Float64(2.0 * phi1)))), cos(delta), Float64(Float64(sin(phi1) * sin(delta)) * cos(phi1))))))
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[(N[(0.5 - N[(0.5 * N[Cos[N[(2.0 * phi1), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[delta], $MachinePrecision] + N[(N[(N[Sin[phi1], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(0.5 - 0.5 \cdot \cos \left(2 \cdot \phi_1\right), \cos delta, \left(\sin \phi_1 \cdot \sin delta\right) \cdot \cos \phi_1\right)}
\end{array}
Derivation
  1. Initial program 99.8%

    \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
  2. Step-by-step derivation
    1. lift-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \color{blue}{\sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}} \]
    2. lift-asin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \color{blue}{\sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}} \]
    3. sin-asin99.8

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \color{blue}{\left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}} \]
    4. lift-+.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \color{blue}{\left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}} \]
    5. lift-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\color{blue}{\sin \phi_1 \cdot \cos delta} + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    6. lift-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\color{blue}{\sin \phi_1} \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    7. lift-cos.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\sin \phi_1 \cdot \color{blue}{\cos delta} + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    8. lift-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\sin \phi_1 \cdot \cos delta + \color{blue}{\left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta}\right)} \]
    9. lift-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\sin \phi_1 \cdot \cos delta + \color{blue}{\left(\cos \phi_1 \cdot \sin delta\right)} \cdot \cos theta\right)} \]
    10. lift-cos.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\sin \phi_1 \cdot \cos delta + \left(\color{blue}{\cos \phi_1} \cdot \sin delta\right) \cdot \cos theta\right)} \]
    11. lift-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \color{blue}{\sin delta}\right) \cdot \cos theta\right)} \]
    12. lift-cos.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \color{blue}{\cos theta}\right)} \]
    13. *-commutativeN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\color{blue}{\cos delta \cdot \sin \phi_1} + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    14. associate-*l*N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\cos delta \cdot \sin \phi_1 + \color{blue}{\cos \phi_1 \cdot \left(\sin delta \cdot \cos theta\right)}\right)} \]
    15. *-commutativeN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\cos delta \cdot \sin \phi_1 + \cos \phi_1 \cdot \color{blue}{\left(\cos theta \cdot \sin delta\right)}\right)} \]
    16. +-commutativeN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \color{blue}{\left(\cos \phi_1 \cdot \left(\cos theta \cdot \sin delta\right) + \cos delta \cdot \sin \phi_1\right)}} \]
  3. Applied rewrites99.8%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \color{blue}{\mathsf{fma}\left(\cos \phi_1, \cos theta \cdot \sin delta, \sin \phi_1 \cdot \cos delta\right)}} \]
  4. Step-by-step derivation
    1. lift-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \color{blue}{\sin \phi_1 \cdot \mathsf{fma}\left(\cos \phi_1, \cos theta \cdot \sin delta, \sin \phi_1 \cdot \cos delta\right)}} \]
    2. lift-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \color{blue}{\sin \phi_1} \cdot \mathsf{fma}\left(\cos \phi_1, \cos theta \cdot \sin delta, \sin \phi_1 \cdot \cos delta\right)} \]
    3. lift-cos.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\color{blue}{\cos \phi_1}, \cos theta \cdot \sin delta, \sin \phi_1 \cdot \cos delta\right)} \]
    4. lift-fma.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \color{blue}{\left(\cos \phi_1 \cdot \left(\cos theta \cdot \sin delta\right) + \sin \phi_1 \cdot \cos delta\right)}} \]
    5. lift-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\cos \phi_1 \cdot \color{blue}{\left(\cos theta \cdot \sin delta\right)} + \sin \phi_1 \cdot \cos delta\right)} \]
    6. lift-cos.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\cos \phi_1 \cdot \left(\color{blue}{\cos theta} \cdot \sin delta\right) + \sin \phi_1 \cdot \cos delta\right)} \]
    7. lift-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\cos \phi_1 \cdot \left(\cos theta \cdot \color{blue}{\sin delta}\right) + \sin \phi_1 \cdot \cos delta\right)} \]
    8. lift-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\cos \phi_1 \cdot \left(\cos theta \cdot \sin delta\right) + \color{blue}{\sin \phi_1 \cdot \cos delta}\right)} \]
    9. lift-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\cos \phi_1 \cdot \left(\cos theta \cdot \sin delta\right) + \color{blue}{\sin \phi_1} \cdot \cos delta\right)} \]
    10. lift-cos.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\cos \phi_1 \cdot \left(\cos theta \cdot \sin delta\right) + \sin \phi_1 \cdot \color{blue}{\cos delta}\right)} \]
    11. +-commutativeN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \color{blue}{\left(\sin \phi_1 \cdot \cos delta + \cos \phi_1 \cdot \left(\cos theta \cdot \sin delta\right)\right)}} \]
    12. *-commutativeN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\color{blue}{\cos delta \cdot \sin \phi_1} + \cos \phi_1 \cdot \left(\cos theta \cdot \sin delta\right)\right)} \]
    13. distribute-rgt-inN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \color{blue}{\left(\left(\cos delta \cdot \sin \phi_1\right) \cdot \sin \phi_1 + \left(\cos \phi_1 \cdot \left(\cos theta \cdot \sin delta\right)\right) \cdot \sin \phi_1\right)}} \]
  5. Applied rewrites99.8%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \color{blue}{\mathsf{fma}\left(\sin \phi_1 \cdot \cos delta, \sin \phi_1, \left(\cos theta \cdot \left(\sin delta \cdot \cos \phi_1\right)\right) \cdot \sin \phi_1\right)}} \]
  6. Taylor expanded in theta around 0

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \color{blue}{\left(\cos delta \cdot {\sin \phi_1}^{2} + \cos \phi_1 \cdot \left(\sin delta \cdot \sin \phi_1\right)\right)}} \]
  7. Step-by-step derivation
    1. *-commutativeN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \left({\sin \phi_1}^{2} \cdot \cos delta + \color{blue}{\cos \phi_1} \cdot \left(\sin delta \cdot \sin \phi_1\right)\right)} \]
    2. lower-fma.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left({\sin \phi_1}^{2}, \color{blue}{\cos delta}, \cos \phi_1 \cdot \left(\sin delta \cdot \sin \phi_1\right)\right)} \]
    3. unpow2N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\sin \phi_1 \cdot \sin \phi_1, \cos \color{blue}{delta}, \cos \phi_1 \cdot \left(\sin delta \cdot \sin \phi_1\right)\right)} \]
    4. sqr-sin-aN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\frac{1}{2} - \frac{1}{2} \cdot \cos \left(2 \cdot \phi_1\right), \cos \color{blue}{delta}, \cos \phi_1 \cdot \left(\sin delta \cdot \sin \phi_1\right)\right)} \]
    5. lower--.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\frac{1}{2} - \frac{1}{2} \cdot \cos \left(2 \cdot \phi_1\right), \cos \color{blue}{delta}, \cos \phi_1 \cdot \left(\sin delta \cdot \sin \phi_1\right)\right)} \]
    6. lower-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\frac{1}{2} - \frac{1}{2} \cdot \cos \left(2 \cdot \phi_1\right), \cos delta, \cos \phi_1 \cdot \left(\sin delta \cdot \sin \phi_1\right)\right)} \]
    7. lower-cos.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\frac{1}{2} - \frac{1}{2} \cdot \cos \left(2 \cdot \phi_1\right), \cos delta, \cos \phi_1 \cdot \left(\sin delta \cdot \sin \phi_1\right)\right)} \]
    8. lower-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\frac{1}{2} - \frac{1}{2} \cdot \cos \left(2 \cdot \phi_1\right), \cos delta, \cos \phi_1 \cdot \left(\sin delta \cdot \sin \phi_1\right)\right)} \]
    9. lift-cos.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\frac{1}{2} - \frac{1}{2} \cdot \cos \left(2 \cdot \phi_1\right), \cos delta, \cos \phi_1 \cdot \left(\sin delta \cdot \sin \phi_1\right)\right)} \]
    10. *-commutativeN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\frac{1}{2} - \frac{1}{2} \cdot \cos \left(2 \cdot \phi_1\right), \cos delta, \left(\sin delta \cdot \sin \phi_1\right) \cdot \cos \phi_1\right)} \]
    11. lower-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\frac{1}{2} - \frac{1}{2} \cdot \cos \left(2 \cdot \phi_1\right), \cos delta, \left(\sin delta \cdot \sin \phi_1\right) \cdot \cos \phi_1\right)} \]
    12. *-commutativeN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\frac{1}{2} - \frac{1}{2} \cdot \cos \left(2 \cdot \phi_1\right), \cos delta, \left(\sin \phi_1 \cdot \sin delta\right) \cdot \cos \phi_1\right)} \]
    13. lower-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\frac{1}{2} - \frac{1}{2} \cdot \cos \left(2 \cdot \phi_1\right), \cos delta, \left(\sin \phi_1 \cdot \sin delta\right) \cdot \cos \phi_1\right)} \]
    14. lift-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\frac{1}{2} - \frac{1}{2} \cdot \cos \left(2 \cdot \phi_1\right), \cos delta, \left(\sin \phi_1 \cdot \sin delta\right) \cdot \cos \phi_1\right)} \]
    15. lift-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\frac{1}{2} - \frac{1}{2} \cdot \cos \left(2 \cdot \phi_1\right), \cos delta, \left(\sin \phi_1 \cdot \sin delta\right) \cdot \cos \phi_1\right)} \]
    16. lift-cos.f6494.6

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(0.5 - 0.5 \cdot \cos \left(2 \cdot \phi_1\right), \cos delta, \left(\sin \phi_1 \cdot \sin delta\right) \cdot \cos \phi_1\right)} \]
  8. Applied rewrites94.6%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \color{blue}{\mathsf{fma}\left(0.5 - 0.5 \cdot \cos \left(2 \cdot \phi_1\right), \cos delta, \left(\sin \phi_1 \cdot \sin delta\right) \cdot \cos \phi_1\right)}} \]
  9. Add Preprocessing

Alternative 3: 92.2% accurate, 2.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := \left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1\\ \mathbf{if}\;delta \leq -7.6 \cdot 10^{-6}:\\ \;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\left(\sin delta \cdot \cos \phi_1\right) \cdot \sin theta}{\cos delta}\\ \mathbf{elif}\;delta \leq 1.55 \cdot 10^{-7}:\\ \;\;\;\;\lambda_1 + \tan^{-1}_* \frac{t\_1}{0.5 + 0.5 \cdot \cos \left(2 \cdot \phi_1\right)}\\ \mathbf{else}:\\ \;\;\;\;\lambda_1 + \tan^{-1}_* \frac{t\_1}{\cos delta}\\ \end{array} \end{array} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (let* ((t_1 (* (* (sin theta) (sin delta)) (cos phi1))))
   (if (<= delta -7.6e-6)
     (+ lambda1 (atan2 (* (* (sin delta) (cos phi1)) (sin theta)) (cos delta)))
     (if (<= delta 1.55e-7)
       (+ lambda1 (atan2 t_1 (+ 0.5 (* 0.5 (cos (* 2.0 phi1))))))
       (+ lambda1 (atan2 t_1 (cos delta)))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	double t_1 = (sin(theta) * sin(delta)) * cos(phi1);
	double tmp;
	if (delta <= -7.6e-6) {
		tmp = lambda1 + atan2(((sin(delta) * cos(phi1)) * sin(theta)), cos(delta));
	} else if (delta <= 1.55e-7) {
		tmp = lambda1 + atan2(t_1, (0.5 + (0.5 * cos((2.0 * phi1)))));
	} else {
		tmp = lambda1 + atan2(t_1, cos(delta));
	}
	return tmp;
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
    real(8), intent (in) :: lambda1
    real(8), intent (in) :: phi1
    real(8), intent (in) :: phi2
    real(8), intent (in) :: delta
    real(8), intent (in) :: theta
    real(8) :: t_1
    real(8) :: tmp
    t_1 = (sin(theta) * sin(delta)) * cos(phi1)
    if (delta <= (-7.6d-6)) then
        tmp = lambda1 + atan2(((sin(delta) * cos(phi1)) * sin(theta)), cos(delta))
    else if (delta <= 1.55d-7) then
        tmp = lambda1 + atan2(t_1, (0.5d0 + (0.5d0 * cos((2.0d0 * phi1)))))
    else
        tmp = lambda1 + atan2(t_1, cos(delta))
    end if
    code = tmp
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	double t_1 = (Math.sin(theta) * Math.sin(delta)) * Math.cos(phi1);
	double tmp;
	if (delta <= -7.6e-6) {
		tmp = lambda1 + Math.atan2(((Math.sin(delta) * Math.cos(phi1)) * Math.sin(theta)), Math.cos(delta));
	} else if (delta <= 1.55e-7) {
		tmp = lambda1 + Math.atan2(t_1, (0.5 + (0.5 * Math.cos((2.0 * phi1)))));
	} else {
		tmp = lambda1 + Math.atan2(t_1, Math.cos(delta));
	}
	return tmp;
}
def code(lambda1, phi1, phi2, delta, theta):
	t_1 = (math.sin(theta) * math.sin(delta)) * math.cos(phi1)
	tmp = 0
	if delta <= -7.6e-6:
		tmp = lambda1 + math.atan2(((math.sin(delta) * math.cos(phi1)) * math.sin(theta)), math.cos(delta))
	elif delta <= 1.55e-7:
		tmp = lambda1 + math.atan2(t_1, (0.5 + (0.5 * math.cos((2.0 * phi1)))))
	else:
		tmp = lambda1 + math.atan2(t_1, math.cos(delta))
	return tmp
function code(lambda1, phi1, phi2, delta, theta)
	t_1 = Float64(Float64(sin(theta) * sin(delta)) * cos(phi1))
	tmp = 0.0
	if (delta <= -7.6e-6)
		tmp = Float64(lambda1 + atan(Float64(Float64(sin(delta) * cos(phi1)) * sin(theta)), cos(delta)));
	elseif (delta <= 1.55e-7)
		tmp = Float64(lambda1 + atan(t_1, Float64(0.5 + Float64(0.5 * cos(Float64(2.0 * phi1))))));
	else
		tmp = Float64(lambda1 + atan(t_1, cos(delta)));
	end
	return tmp
end
function tmp_2 = code(lambda1, phi1, phi2, delta, theta)
	t_1 = (sin(theta) * sin(delta)) * cos(phi1);
	tmp = 0.0;
	if (delta <= -7.6e-6)
		tmp = lambda1 + atan2(((sin(delta) * cos(phi1)) * sin(theta)), cos(delta));
	elseif (delta <= 1.55e-7)
		tmp = lambda1 + atan2(t_1, (0.5 + (0.5 * cos((2.0 * phi1)))));
	else
		tmp = lambda1 + atan2(t_1, cos(delta));
	end
	tmp_2 = tmp;
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := Block[{t$95$1 = N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[delta, -7.6e-6], N[(lambda1 + N[ArcTan[N[(N[(N[Sin[delta], $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] * N[Sin[theta], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision], If[LessEqual[delta, 1.55e-7], N[(lambda1 + N[ArcTan[t$95$1 / N[(0.5 + N[(0.5 * N[Cos[N[(2.0 * phi1), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(lambda1 + N[ArcTan[t$95$1 / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := \left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1\\
\mathbf{if}\;delta \leq -7.6 \cdot 10^{-6}:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\left(\sin delta \cdot \cos \phi_1\right) \cdot \sin theta}{\cos delta}\\

\mathbf{elif}\;delta \leq 1.55 \cdot 10^{-7}:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{t\_1}{0.5 + 0.5 \cdot \cos \left(2 \cdot \phi_1\right)}\\

\mathbf{else}:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{t\_1}{\cos delta}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if delta < -7.6000000000000001e-6

    1. Initial program 99.8%

      \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    2. Taylor expanded in phi1 around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    3. Step-by-step derivation
      1. lift-cos.f6488.4

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
    4. Applied rewrites88.4%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    5. Step-by-step derivation
      1. lift-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}}{\cos delta} \]
      2. lift-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \sin delta\right)} \cdot \cos \phi_1}{\cos delta} \]
      3. lift-sin.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\color{blue}{\sin theta} \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
      4. lift-sin.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \color{blue}{\sin delta}\right) \cdot \cos \phi_1}{\cos delta} \]
      5. *-commutativeN/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin delta \cdot \sin theta\right)} \cdot \cos \phi_1}{\cos delta} \]
      6. lift-cos.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin delta \cdot \sin theta\right) \cdot \color{blue}{\cos \phi_1}}{\cos delta} \]
      7. *-commutativeN/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\cos \phi_1 \cdot \left(\sin delta \cdot \sin theta\right)}}{\cos delta} \]
      8. associate-*r*N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\cos \phi_1 \cdot \sin delta\right) \cdot \sin theta}}{\cos delta} \]
      9. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\cos \phi_1 \cdot \sin delta\right) \cdot \sin theta}}{\cos delta} \]
      10. *-commutativeN/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin delta \cdot \cos \phi_1\right)} \cdot \sin theta}{\cos delta} \]
      11. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin delta \cdot \cos \phi_1\right)} \cdot \sin theta}{\cos delta} \]
      12. lift-sin.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\color{blue}{\sin delta} \cdot \cos \phi_1\right) \cdot \sin theta}{\cos delta} \]
      13. lift-cos.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin delta \cdot \color{blue}{\cos \phi_1}\right) \cdot \sin theta}{\cos delta} \]
      14. lift-sin.f6488.4

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin delta \cdot \cos \phi_1\right) \cdot \color{blue}{\sin theta}}{\cos delta} \]
    6. Applied rewrites88.4%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin delta \cdot \cos \phi_1\right) \cdot \sin theta}}{\cos delta} \]

    if -7.6000000000000001e-6 < delta < 1.55e-7

    1. Initial program 99.8%

      \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    2. Taylor expanded in delta around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{1 - {\sin \phi_1}^{2}}} \]
    3. Step-by-step derivation
      1. unpow2N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{1 - \sin \phi_1 \cdot \color{blue}{\sin \phi_1}} \]
      2. 1-sub-sin-revN/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos \phi_1 \cdot \color{blue}{\cos \phi_1}} \]
      3. sqr-cos-aN/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\frac{1}{2} + \color{blue}{\frac{1}{2} \cdot \cos \left(2 \cdot \phi_1\right)}} \]
      4. lower-+.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\frac{1}{2} + \color{blue}{\frac{1}{2} \cdot \cos \left(2 \cdot \phi_1\right)}} \]
      5. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\frac{1}{2} + \frac{1}{2} \cdot \color{blue}{\cos \left(2 \cdot \phi_1\right)}} \]
      6. lower-cos.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\frac{1}{2} + \frac{1}{2} \cdot \cos \left(2 \cdot \phi_1\right)} \]
      7. lower-*.f6480.5

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{0.5 + 0.5 \cdot \cos \left(2 \cdot \phi_1\right)} \]
    4. Applied rewrites80.5%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{0.5 + 0.5 \cdot \cos \left(2 \cdot \phi_1\right)}} \]

    if 1.55e-7 < delta

    1. Initial program 99.8%

      \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    2. Taylor expanded in phi1 around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    3. Step-by-step derivation
      1. lift-cos.f6488.4

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
    4. Applied rewrites88.4%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
  3. Recombined 3 regimes into one program.
  4. Add Preprocessing

Alternative 4: 91.7% accurate, 2.0× speedup?

\[\begin{array}{l} \\ \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \left(0.5 - 0.5 \cdot \cos \left(2 \cdot \phi_1\right)\right)} \end{array} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (+
  lambda1
  (atan2
   (* (* (sin theta) (sin delta)) (cos phi1))
   (- (cos delta) (- 0.5 (* 0.5 (cos (* 2.0 phi1))))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (0.5 - (0.5 * cos((2.0 * phi1))))));
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
    real(8), intent (in) :: lambda1
    real(8), intent (in) :: phi1
    real(8), intent (in) :: phi2
    real(8), intent (in) :: delta
    real(8), intent (in) :: theta
    code = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (0.5d0 - (0.5d0 * cos((2.0d0 * phi1))))))
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + Math.atan2(((Math.sin(theta) * Math.sin(delta)) * Math.cos(phi1)), (Math.cos(delta) - (0.5 - (0.5 * Math.cos((2.0 * phi1))))));
}
def code(lambda1, phi1, phi2, delta, theta):
	return lambda1 + math.atan2(((math.sin(theta) * math.sin(delta)) * math.cos(phi1)), (math.cos(delta) - (0.5 - (0.5 * math.cos((2.0 * phi1))))))
function code(lambda1, phi1, phi2, delta, theta)
	return Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), Float64(cos(delta) - Float64(0.5 - Float64(0.5 * cos(Float64(2.0 * phi1)))))))
end
function tmp = code(lambda1, phi1, phi2, delta, theta)
	tmp = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (0.5 - (0.5 * cos((2.0 * phi1))))));
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[(0.5 - N[(0.5 * N[Cos[N[(2.0 * phi1), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \left(0.5 - 0.5 \cdot \cos \left(2 \cdot \phi_1\right)\right)}
\end{array}
Derivation
  1. Initial program 99.8%

    \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
  2. Taylor expanded in delta around 0

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \color{blue}{{\sin \phi_1}^{2}}} \]
  3. Step-by-step derivation
    1. unpow2N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \color{blue}{\sin \phi_1}} \]
    2. sqr-sin-aN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \left(\frac{1}{2} - \color{blue}{\frac{1}{2} \cdot \cos \left(2 \cdot \phi_1\right)}\right)} \]
    3. lower--.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \left(\frac{1}{2} - \color{blue}{\frac{1}{2} \cdot \cos \left(2 \cdot \phi_1\right)}\right)} \]
    4. lower-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \left(\frac{1}{2} - \frac{1}{2} \cdot \color{blue}{\cos \left(2 \cdot \phi_1\right)}\right)} \]
    5. lower-cos.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \left(\frac{1}{2} - \frac{1}{2} \cdot \cos \left(2 \cdot \phi_1\right)\right)} \]
    6. lower-*.f6492.2

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \left(0.5 - 0.5 \cdot \cos \left(2 \cdot \phi_1\right)\right)} \]
  4. Applied rewrites92.2%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \color{blue}{\left(0.5 - 0.5 \cdot \cos \left(2 \cdot \phi_1\right)\right)}} \]
  5. Add Preprocessing

Alternative 5: 88.4% accurate, 2.6× speedup?

\[\begin{array}{l} \\ \lambda_1 + \tan^{-1}_* \frac{\left(\sin delta \cdot \cos \phi_1\right) \cdot \sin theta}{\cos delta} \end{array} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (+ lambda1 (atan2 (* (* (sin delta) (cos phi1)) (sin theta)) (cos delta))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + atan2(((sin(delta) * cos(phi1)) * sin(theta)), cos(delta));
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
    real(8), intent (in) :: lambda1
    real(8), intent (in) :: phi1
    real(8), intent (in) :: phi2
    real(8), intent (in) :: delta
    real(8), intent (in) :: theta
    code = lambda1 + atan2(((sin(delta) * cos(phi1)) * sin(theta)), cos(delta))
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + Math.atan2(((Math.sin(delta) * Math.cos(phi1)) * Math.sin(theta)), Math.cos(delta));
}
def code(lambda1, phi1, phi2, delta, theta):
	return lambda1 + math.atan2(((math.sin(delta) * math.cos(phi1)) * math.sin(theta)), math.cos(delta))
function code(lambda1, phi1, phi2, delta, theta)
	return Float64(lambda1 + atan(Float64(Float64(sin(delta) * cos(phi1)) * sin(theta)), cos(delta)))
end
function tmp = code(lambda1, phi1, phi2, delta, theta)
	tmp = lambda1 + atan2(((sin(delta) * cos(phi1)) * sin(theta)), cos(delta));
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[delta], $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] * N[Sin[theta], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\lambda_1 + \tan^{-1}_* \frac{\left(\sin delta \cdot \cos \phi_1\right) \cdot \sin theta}{\cos delta}
\end{array}
Derivation
  1. Initial program 99.8%

    \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
  2. Taylor expanded in phi1 around 0

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
  3. Step-by-step derivation
    1. lift-cos.f6488.4

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
  4. Applied rewrites88.4%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
  5. Step-by-step derivation
    1. lift-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}}{\cos delta} \]
    2. lift-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \sin delta\right)} \cdot \cos \phi_1}{\cos delta} \]
    3. lift-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\color{blue}{\sin theta} \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
    4. lift-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \color{blue}{\sin delta}\right) \cdot \cos \phi_1}{\cos delta} \]
    5. *-commutativeN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin delta \cdot \sin theta\right)} \cdot \cos \phi_1}{\cos delta} \]
    6. lift-cos.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin delta \cdot \sin theta\right) \cdot \color{blue}{\cos \phi_1}}{\cos delta} \]
    7. *-commutativeN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\cos \phi_1 \cdot \left(\sin delta \cdot \sin theta\right)}}{\cos delta} \]
    8. associate-*r*N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\cos \phi_1 \cdot \sin delta\right) \cdot \sin theta}}{\cos delta} \]
    9. lower-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\cos \phi_1 \cdot \sin delta\right) \cdot \sin theta}}{\cos delta} \]
    10. *-commutativeN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin delta \cdot \cos \phi_1\right)} \cdot \sin theta}{\cos delta} \]
    11. lower-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin delta \cdot \cos \phi_1\right)} \cdot \sin theta}{\cos delta} \]
    12. lift-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\color{blue}{\sin delta} \cdot \cos \phi_1\right) \cdot \sin theta}{\cos delta} \]
    13. lift-cos.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin delta \cdot \color{blue}{\cos \phi_1}\right) \cdot \sin theta}{\cos delta} \]
    14. lift-sin.f6488.4

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin delta \cdot \cos \phi_1\right) \cdot \color{blue}{\sin theta}}{\cos delta} \]
  6. Applied rewrites88.4%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin delta \cdot \cos \phi_1\right) \cdot \sin theta}}{\cos delta} \]
  7. Add Preprocessing

Alternative 6: 88.4% accurate, 2.6× speedup?

\[\begin{array}{l} \\ \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \end{array} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (+ lambda1 (atan2 (* (* (sin theta) (sin delta)) (cos phi1)) (cos delta))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), cos(delta));
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
    real(8), intent (in) :: lambda1
    real(8), intent (in) :: phi1
    real(8), intent (in) :: phi2
    real(8), intent (in) :: delta
    real(8), intent (in) :: theta
    code = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), cos(delta))
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + Math.atan2(((Math.sin(theta) * Math.sin(delta)) * Math.cos(phi1)), Math.cos(delta));
}
def code(lambda1, phi1, phi2, delta, theta):
	return lambda1 + math.atan2(((math.sin(theta) * math.sin(delta)) * math.cos(phi1)), math.cos(delta))
function code(lambda1, phi1, phi2, delta, theta)
	return Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), cos(delta)))
end
function tmp = code(lambda1, phi1, phi2, delta, theta)
	tmp = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), cos(delta));
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta}
\end{array}
Derivation
  1. Initial program 99.8%

    \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
  2. Taylor expanded in phi1 around 0

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
  3. Step-by-step derivation
    1. lift-cos.f6488.4

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
  4. Applied rewrites88.4%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
  5. Add Preprocessing

Alternative 7: 86.3% accurate, 3.4× speedup?

\[\begin{array}{l} \\ \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{\cos delta} \end{array} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (+ lambda1 (atan2 (* (sin theta) (sin delta)) (cos delta))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + atan2((sin(theta) * sin(delta)), cos(delta));
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
    real(8), intent (in) :: lambda1
    real(8), intent (in) :: phi1
    real(8), intent (in) :: phi2
    real(8), intent (in) :: delta
    real(8), intent (in) :: theta
    code = lambda1 + atan2((sin(theta) * sin(delta)), cos(delta))
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + Math.atan2((Math.sin(theta) * Math.sin(delta)), Math.cos(delta));
}
def code(lambda1, phi1, phi2, delta, theta):
	return lambda1 + math.atan2((math.sin(theta) * math.sin(delta)), math.cos(delta))
function code(lambda1, phi1, phi2, delta, theta)
	return Float64(lambda1 + atan(Float64(sin(theta) * sin(delta)), cos(delta)))
end
function tmp = code(lambda1, phi1, phi2, delta, theta)
	tmp = lambda1 + atan2((sin(theta) * sin(delta)), cos(delta));
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{\cos delta}
\end{array}
Derivation
  1. Initial program 99.8%

    \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
  2. Taylor expanded in phi1 around 0

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
  3. Step-by-step derivation
    1. lift-cos.f6488.4

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
  4. Applied rewrites88.4%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
  5. Taylor expanded in phi1 around 0

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
  6. Step-by-step derivation
    1. *-commutativeN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \color{blue}{\sin delta}}{\cos delta} \]
    2. lift-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin \color{blue}{delta}}{\cos delta} \]
    3. lift-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{\cos delta} \]
    4. lift-*.f6486.3

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \color{blue}{\sin delta}}{\cos delta} \]
  7. Applied rewrites86.3%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin theta \cdot \sin delta}}{\cos delta} \]
  8. Add Preprocessing

Alternative 8: 81.1% accurate, 3.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_1 := \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{1 - 0.5 \cdot \left(delta \cdot delta\right)}\\ \mathbf{if}\;theta \leq -2100000:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;theta \leq 16500000:\\ \;\;\;\;\lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\cos delta}\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (let* ((t_1
         (+
          lambda1
          (atan2
           (* (sin theta) (sin delta))
           (- 1.0 (* 0.5 (* delta delta)))))))
   (if (<= theta -2100000.0)
     t_1
     (if (<= theta 16500000.0)
       (+ lambda1 (atan2 (* theta (sin delta)) (cos delta)))
       t_1))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	double t_1 = lambda1 + atan2((sin(theta) * sin(delta)), (1.0 - (0.5 * (delta * delta))));
	double tmp;
	if (theta <= -2100000.0) {
		tmp = t_1;
	} else if (theta <= 16500000.0) {
		tmp = lambda1 + atan2((theta * sin(delta)), cos(delta));
	} else {
		tmp = t_1;
	}
	return tmp;
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
    real(8), intent (in) :: lambda1
    real(8), intent (in) :: phi1
    real(8), intent (in) :: phi2
    real(8), intent (in) :: delta
    real(8), intent (in) :: theta
    real(8) :: t_1
    real(8) :: tmp
    t_1 = lambda1 + atan2((sin(theta) * sin(delta)), (1.0d0 - (0.5d0 * (delta * delta))))
    if (theta <= (-2100000.0d0)) then
        tmp = t_1
    else if (theta <= 16500000.0d0) then
        tmp = lambda1 + atan2((theta * sin(delta)), cos(delta))
    else
        tmp = t_1
    end if
    code = tmp
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	double t_1 = lambda1 + Math.atan2((Math.sin(theta) * Math.sin(delta)), (1.0 - (0.5 * (delta * delta))));
	double tmp;
	if (theta <= -2100000.0) {
		tmp = t_1;
	} else if (theta <= 16500000.0) {
		tmp = lambda1 + Math.atan2((theta * Math.sin(delta)), Math.cos(delta));
	} else {
		tmp = t_1;
	}
	return tmp;
}
def code(lambda1, phi1, phi2, delta, theta):
	t_1 = lambda1 + math.atan2((math.sin(theta) * math.sin(delta)), (1.0 - (0.5 * (delta * delta))))
	tmp = 0
	if theta <= -2100000.0:
		tmp = t_1
	elif theta <= 16500000.0:
		tmp = lambda1 + math.atan2((theta * math.sin(delta)), math.cos(delta))
	else:
		tmp = t_1
	return tmp
function code(lambda1, phi1, phi2, delta, theta)
	t_1 = Float64(lambda1 + atan(Float64(sin(theta) * sin(delta)), Float64(1.0 - Float64(0.5 * Float64(delta * delta)))))
	tmp = 0.0
	if (theta <= -2100000.0)
		tmp = t_1;
	elseif (theta <= 16500000.0)
		tmp = Float64(lambda1 + atan(Float64(theta * sin(delta)), cos(delta)));
	else
		tmp = t_1;
	end
	return tmp
end
function tmp_2 = code(lambda1, phi1, phi2, delta, theta)
	t_1 = lambda1 + atan2((sin(theta) * sin(delta)), (1.0 - (0.5 * (delta * delta))));
	tmp = 0.0;
	if (theta <= -2100000.0)
		tmp = t_1;
	elseif (theta <= 16500000.0)
		tmp = lambda1 + atan2((theta * sin(delta)), cos(delta));
	else
		tmp = t_1;
	end
	tmp_2 = tmp;
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := Block[{t$95$1 = N[(lambda1 + N[ArcTan[N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[(0.5 * N[(delta * delta), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[theta, -2100000.0], t$95$1, If[LessEqual[theta, 16500000.0], N[(lambda1 + N[ArcTan[N[(theta * N[Sin[delta], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision], t$95$1]]]
\begin{array}{l}

\\
\begin{array}{l}
t_1 := \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{1 - 0.5 \cdot \left(delta \cdot delta\right)}\\
\mathbf{if}\;theta \leq -2100000:\\
\;\;\;\;t\_1\\

\mathbf{elif}\;theta \leq 16500000:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\cos delta}\\

\mathbf{else}:\\
\;\;\;\;t\_1\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if theta < -2.1e6 or 1.65e7 < theta

    1. Initial program 99.8%

      \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    2. Taylor expanded in phi1 around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    3. Step-by-step derivation
      1. lift-cos.f6488.4

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
    4. Applied rewrites88.4%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    5. Taylor expanded in phi1 around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
    6. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \color{blue}{\sin delta}}{\cos delta} \]
      2. lift-sin.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin \color{blue}{delta}}{\cos delta} \]
      3. lift-sin.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{\cos delta} \]
      4. lift-*.f6486.3

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \color{blue}{\sin delta}}{\cos delta} \]
    7. Applied rewrites86.3%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin theta \cdot \sin delta}}{\cos delta} \]
    8. Taylor expanded in delta around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{1 + \color{blue}{\frac{-1}{2} \cdot {delta}^{2}}} \]
    9. Step-by-step derivation
      1. cos-neg-revN/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{1 + \color{blue}{\frac{-1}{2}} \cdot {delta}^{2}} \]
      2. lift-neg.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{1 + \frac{-1}{2} \cdot {delta}^{2}} \]
      3. sin-+PI/2N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{1 + \color{blue}{\frac{-1}{2}} \cdot {delta}^{2}} \]
      4. lift-neg.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{1 + \frac{-1}{2} \cdot {delta}^{2}} \]
      5. fp-cancel-sign-sub-invN/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{1 - \left(\mathsf{neg}\left(\frac{-1}{2}\right)\right) \cdot \color{blue}{{delta}^{2}}} \]
      6. lower--.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{1 - \left(\mathsf{neg}\left(\frac{-1}{2}\right)\right) \cdot \color{blue}{{delta}^{2}}} \]
      7. metadata-evalN/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{1 - \frac{1}{2} \cdot {delta}^{2}} \]
      8. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{1 - \frac{1}{2} \cdot {delta}^{\color{blue}{2}}} \]
      9. unpow2N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{1 - \frac{1}{2} \cdot \left(delta \cdot delta\right)} \]
      10. lower-*.f6477.3

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{1 - 0.5 \cdot \left(delta \cdot delta\right)} \]
    10. Applied rewrites77.3%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{1 - \color{blue}{0.5 \cdot \left(delta \cdot delta\right)}} \]

    if -2.1e6 < theta < 1.65e7

    1. Initial program 99.8%

      \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    2. Taylor expanded in phi1 around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    3. Step-by-step derivation
      1. lift-cos.f6488.4

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
    4. Applied rewrites88.4%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    5. Taylor expanded in phi1 around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
    6. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \color{blue}{\sin delta}}{\cos delta} \]
      2. lift-sin.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin \color{blue}{delta}}{\cos delta} \]
      3. lift-sin.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{\cos delta} \]
      4. lift-*.f6486.3

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \color{blue}{\sin delta}}{\cos delta} \]
    7. Applied rewrites86.3%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin theta \cdot \sin delta}}{\cos delta} \]
    8. Taylor expanded in theta around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin \color{blue}{delta}}{\cos delta} \]
    9. Step-by-step derivation
      1. Applied rewrites73.7%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin \color{blue}{delta}}{\cos delta} \]
    10. Recombined 2 regimes into one program.
    11. Add Preprocessing

    Alternative 9: 80.0% accurate, 4.3× speedup?

    \[\begin{array}{l} \\ \begin{array}{l} t_1 := \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\cos delta}\\ \mathbf{if}\;delta \leq -1.6 \cdot 10^{+63}:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;delta \leq 1.12 \cdot 10^{+33}:\\ \;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot delta}{\cos delta}\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \end{array} \]
    (FPCore (lambda1 phi1 phi2 delta theta)
     :precision binary64
     (let* ((t_1 (+ lambda1 (atan2 (* theta (sin delta)) (cos delta)))))
       (if (<= delta -1.6e+63)
         t_1
         (if (<= delta 1.12e+33)
           (+ lambda1 (atan2 (* (sin theta) delta) (cos delta)))
           t_1))))
    double code(double lambda1, double phi1, double phi2, double delta, double theta) {
    	double t_1 = lambda1 + atan2((theta * sin(delta)), cos(delta));
    	double tmp;
    	if (delta <= -1.6e+63) {
    		tmp = t_1;
    	} else if (delta <= 1.12e+33) {
    		tmp = lambda1 + atan2((sin(theta) * delta), cos(delta));
    	} else {
    		tmp = t_1;
    	}
    	return tmp;
    }
    
    module fmin_fmax_functions
        implicit none
        private
        public fmax
        public fmin
    
        interface fmax
            module procedure fmax88
            module procedure fmax44
            module procedure fmax84
            module procedure fmax48
        end interface
        interface fmin
            module procedure fmin88
            module procedure fmin44
            module procedure fmin84
            module procedure fmin48
        end interface
    contains
        real(8) function fmax88(x, y) result (res)
            real(8), intent (in) :: x
            real(8), intent (in) :: y
            res = merge(y, merge(x, max(x, y), y /= y), x /= x)
        end function
        real(4) function fmax44(x, y) result (res)
            real(4), intent (in) :: x
            real(4), intent (in) :: y
            res = merge(y, merge(x, max(x, y), y /= y), x /= x)
        end function
        real(8) function fmax84(x, y) result(res)
            real(8), intent (in) :: x
            real(4), intent (in) :: y
            res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
        end function
        real(8) function fmax48(x, y) result(res)
            real(4), intent (in) :: x
            real(8), intent (in) :: y
            res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
        end function
        real(8) function fmin88(x, y) result (res)
            real(8), intent (in) :: x
            real(8), intent (in) :: y
            res = merge(y, merge(x, min(x, y), y /= y), x /= x)
        end function
        real(4) function fmin44(x, y) result (res)
            real(4), intent (in) :: x
            real(4), intent (in) :: y
            res = merge(y, merge(x, min(x, y), y /= y), x /= x)
        end function
        real(8) function fmin84(x, y) result(res)
            real(8), intent (in) :: x
            real(4), intent (in) :: y
            res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
        end function
        real(8) function fmin48(x, y) result(res)
            real(4), intent (in) :: x
            real(8), intent (in) :: y
            res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
        end function
    end module
    
    real(8) function code(lambda1, phi1, phi2, delta, theta)
    use fmin_fmax_functions
        real(8), intent (in) :: lambda1
        real(8), intent (in) :: phi1
        real(8), intent (in) :: phi2
        real(8), intent (in) :: delta
        real(8), intent (in) :: theta
        real(8) :: t_1
        real(8) :: tmp
        t_1 = lambda1 + atan2((theta * sin(delta)), cos(delta))
        if (delta <= (-1.6d+63)) then
            tmp = t_1
        else if (delta <= 1.12d+33) then
            tmp = lambda1 + atan2((sin(theta) * delta), cos(delta))
        else
            tmp = t_1
        end if
        code = tmp
    end function
    
    public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
    	double t_1 = lambda1 + Math.atan2((theta * Math.sin(delta)), Math.cos(delta));
    	double tmp;
    	if (delta <= -1.6e+63) {
    		tmp = t_1;
    	} else if (delta <= 1.12e+33) {
    		tmp = lambda1 + Math.atan2((Math.sin(theta) * delta), Math.cos(delta));
    	} else {
    		tmp = t_1;
    	}
    	return tmp;
    }
    
    def code(lambda1, phi1, phi2, delta, theta):
    	t_1 = lambda1 + math.atan2((theta * math.sin(delta)), math.cos(delta))
    	tmp = 0
    	if delta <= -1.6e+63:
    		tmp = t_1
    	elif delta <= 1.12e+33:
    		tmp = lambda1 + math.atan2((math.sin(theta) * delta), math.cos(delta))
    	else:
    		tmp = t_1
    	return tmp
    
    function code(lambda1, phi1, phi2, delta, theta)
    	t_1 = Float64(lambda1 + atan(Float64(theta * sin(delta)), cos(delta)))
    	tmp = 0.0
    	if (delta <= -1.6e+63)
    		tmp = t_1;
    	elseif (delta <= 1.12e+33)
    		tmp = Float64(lambda1 + atan(Float64(sin(theta) * delta), cos(delta)));
    	else
    		tmp = t_1;
    	end
    	return tmp
    end
    
    function tmp_2 = code(lambda1, phi1, phi2, delta, theta)
    	t_1 = lambda1 + atan2((theta * sin(delta)), cos(delta));
    	tmp = 0.0;
    	if (delta <= -1.6e+63)
    		tmp = t_1;
    	elseif (delta <= 1.12e+33)
    		tmp = lambda1 + atan2((sin(theta) * delta), cos(delta));
    	else
    		tmp = t_1;
    	end
    	tmp_2 = tmp;
    end
    
    code[lambda1_, phi1_, phi2_, delta_, theta_] := Block[{t$95$1 = N[(lambda1 + N[ArcTan[N[(theta * N[Sin[delta], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[delta, -1.6e+63], t$95$1, If[LessEqual[delta, 1.12e+33], N[(lambda1 + N[ArcTan[N[(N[Sin[theta], $MachinePrecision] * delta), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision], t$95$1]]]
    
    \begin{array}{l}
    
    \\
    \begin{array}{l}
    t_1 := \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\cos delta}\\
    \mathbf{if}\;delta \leq -1.6 \cdot 10^{+63}:\\
    \;\;\;\;t\_1\\
    
    \mathbf{elif}\;delta \leq 1.12 \cdot 10^{+33}:\\
    \;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot delta}{\cos delta}\\
    
    \mathbf{else}:\\
    \;\;\;\;t\_1\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if delta < -1.60000000000000006e63 or 1.12e33 < delta

      1. Initial program 99.8%

        \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
      2. Taylor expanded in phi1 around 0

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
      3. Step-by-step derivation
        1. lift-cos.f6488.4

          \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
      4. Applied rewrites88.4%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
      5. Taylor expanded in phi1 around 0

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
      6. Step-by-step derivation
        1. *-commutativeN/A

          \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \color{blue}{\sin delta}}{\cos delta} \]
        2. lift-sin.f64N/A

          \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin \color{blue}{delta}}{\cos delta} \]
        3. lift-sin.f64N/A

          \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{\cos delta} \]
        4. lift-*.f6486.3

          \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \color{blue}{\sin delta}}{\cos delta} \]
      7. Applied rewrites86.3%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin theta \cdot \sin delta}}{\cos delta} \]
      8. Taylor expanded in theta around 0

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin \color{blue}{delta}}{\cos delta} \]
      9. Step-by-step derivation
        1. Applied rewrites73.7%

          \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin \color{blue}{delta}}{\cos delta} \]

        if -1.60000000000000006e63 < delta < 1.12e33

        1. Initial program 99.8%

          \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
        2. Taylor expanded in phi1 around 0

          \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
        3. Step-by-step derivation
          1. lift-cos.f6488.4

            \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
        4. Applied rewrites88.4%

          \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
        5. Taylor expanded in phi1 around 0

          \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
        6. Step-by-step derivation
          1. *-commutativeN/A

            \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \color{blue}{\sin delta}}{\cos delta} \]
          2. lift-sin.f64N/A

            \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin \color{blue}{delta}}{\cos delta} \]
          3. lift-sin.f64N/A

            \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{\cos delta} \]
          4. lift-*.f6486.3

            \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \color{blue}{\sin delta}}{\cos delta} \]
        7. Applied rewrites86.3%

          \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin theta \cdot \sin delta}}{\cos delta} \]
        8. Taylor expanded in delta around 0

          \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot delta}{\cos delta} \]
        9. Step-by-step derivation
          1. Applied rewrites74.5%

            \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot delta}{\cos delta} \]
        10. Recombined 2 regimes into one program.
        11. Add Preprocessing

        Alternative 10: 73.7% accurate, 4.6× speedup?

        \[\begin{array}{l} \\ \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\cos delta} \end{array} \]
        (FPCore (lambda1 phi1 phi2 delta theta)
         :precision binary64
         (+ lambda1 (atan2 (* theta (sin delta)) (cos delta))))
        double code(double lambda1, double phi1, double phi2, double delta, double theta) {
        	return lambda1 + atan2((theta * sin(delta)), cos(delta));
        }
        
        module fmin_fmax_functions
            implicit none
            private
            public fmax
            public fmin
        
            interface fmax
                module procedure fmax88
                module procedure fmax44
                module procedure fmax84
                module procedure fmax48
            end interface
            interface fmin
                module procedure fmin88
                module procedure fmin44
                module procedure fmin84
                module procedure fmin48
            end interface
        contains
            real(8) function fmax88(x, y) result (res)
                real(8), intent (in) :: x
                real(8), intent (in) :: y
                res = merge(y, merge(x, max(x, y), y /= y), x /= x)
            end function
            real(4) function fmax44(x, y) result (res)
                real(4), intent (in) :: x
                real(4), intent (in) :: y
                res = merge(y, merge(x, max(x, y), y /= y), x /= x)
            end function
            real(8) function fmax84(x, y) result(res)
                real(8), intent (in) :: x
                real(4), intent (in) :: y
                res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
            end function
            real(8) function fmax48(x, y) result(res)
                real(4), intent (in) :: x
                real(8), intent (in) :: y
                res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
            end function
            real(8) function fmin88(x, y) result (res)
                real(8), intent (in) :: x
                real(8), intent (in) :: y
                res = merge(y, merge(x, min(x, y), y /= y), x /= x)
            end function
            real(4) function fmin44(x, y) result (res)
                real(4), intent (in) :: x
                real(4), intent (in) :: y
                res = merge(y, merge(x, min(x, y), y /= y), x /= x)
            end function
            real(8) function fmin84(x, y) result(res)
                real(8), intent (in) :: x
                real(4), intent (in) :: y
                res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
            end function
            real(8) function fmin48(x, y) result(res)
                real(4), intent (in) :: x
                real(8), intent (in) :: y
                res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
            end function
        end module
        
        real(8) function code(lambda1, phi1, phi2, delta, theta)
        use fmin_fmax_functions
            real(8), intent (in) :: lambda1
            real(8), intent (in) :: phi1
            real(8), intent (in) :: phi2
            real(8), intent (in) :: delta
            real(8), intent (in) :: theta
            code = lambda1 + atan2((theta * sin(delta)), cos(delta))
        end function
        
        public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
        	return lambda1 + Math.atan2((theta * Math.sin(delta)), Math.cos(delta));
        }
        
        def code(lambda1, phi1, phi2, delta, theta):
        	return lambda1 + math.atan2((theta * math.sin(delta)), math.cos(delta))
        
        function code(lambda1, phi1, phi2, delta, theta)
        	return Float64(lambda1 + atan(Float64(theta * sin(delta)), cos(delta)))
        end
        
        function tmp = code(lambda1, phi1, phi2, delta, theta)
        	tmp = lambda1 + atan2((theta * sin(delta)), cos(delta));
        end
        
        code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(theta * N[Sin[delta], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
        
        \begin{array}{l}
        
        \\
        \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\cos delta}
        \end{array}
        
        Derivation
        1. Initial program 99.8%

          \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
        2. Taylor expanded in phi1 around 0

          \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
        3. Step-by-step derivation
          1. lift-cos.f6488.4

            \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
        4. Applied rewrites88.4%

          \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
        5. Taylor expanded in phi1 around 0

          \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
        6. Step-by-step derivation
          1. *-commutativeN/A

            \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \color{blue}{\sin delta}}{\cos delta} \]
          2. lift-sin.f64N/A

            \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin \color{blue}{delta}}{\cos delta} \]
          3. lift-sin.f64N/A

            \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{\cos delta} \]
          4. lift-*.f6486.3

            \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \color{blue}{\sin delta}}{\cos delta} \]
        7. Applied rewrites86.3%

          \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin theta \cdot \sin delta}}{\cos delta} \]
        8. Taylor expanded in theta around 0

          \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin \color{blue}{delta}}{\cos delta} \]
        9. Step-by-step derivation
          1. Applied rewrites73.7%

            \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin \color{blue}{delta}}{\cos delta} \]
          2. Add Preprocessing

          Alternative 11: 69.7% accurate, 5.1× speedup?

          \[\begin{array}{l} \\ \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\mathsf{fma}\left(delta \cdot delta, -0.001388888888888889, 0.041666666666666664\right) \cdot \left(delta \cdot delta\right) - 0.5, delta \cdot delta, 1\right)} \end{array} \]
          (FPCore (lambda1 phi1 phi2 delta theta)
           :precision binary64
           (+
            lambda1
            (atan2
             (* theta (sin delta))
             (fma
              (-
               (*
                (fma (* delta delta) -0.001388888888888889 0.041666666666666664)
                (* delta delta))
               0.5)
              (* delta delta)
              1.0))))
          double code(double lambda1, double phi1, double phi2, double delta, double theta) {
          	return lambda1 + atan2((theta * sin(delta)), fma(((fma((delta * delta), -0.001388888888888889, 0.041666666666666664) * (delta * delta)) - 0.5), (delta * delta), 1.0));
          }
          
          function code(lambda1, phi1, phi2, delta, theta)
          	return Float64(lambda1 + atan(Float64(theta * sin(delta)), fma(Float64(Float64(fma(Float64(delta * delta), -0.001388888888888889, 0.041666666666666664) * Float64(delta * delta)) - 0.5), Float64(delta * delta), 1.0)))
          end
          
          code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(theta * N[Sin[delta], $MachinePrecision]), $MachinePrecision] / N[(N[(N[(N[(N[(delta * delta), $MachinePrecision] * -0.001388888888888889 + 0.041666666666666664), $MachinePrecision] * N[(delta * delta), $MachinePrecision]), $MachinePrecision] - 0.5), $MachinePrecision] * N[(delta * delta), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
          
          \begin{array}{l}
          
          \\
          \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\mathsf{fma}\left(delta \cdot delta, -0.001388888888888889, 0.041666666666666664\right) \cdot \left(delta \cdot delta\right) - 0.5, delta \cdot delta, 1\right)}
          \end{array}
          
          Derivation
          1. Initial program 99.8%

            \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
          2. Taylor expanded in phi1 around 0

            \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
          3. Step-by-step derivation
            1. lift-cos.f6488.4

              \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
          4. Applied rewrites88.4%

            \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
          5. Taylor expanded in phi1 around 0

            \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
          6. Step-by-step derivation
            1. *-commutativeN/A

              \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \color{blue}{\sin delta}}{\cos delta} \]
            2. lift-sin.f64N/A

              \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin \color{blue}{delta}}{\cos delta} \]
            3. lift-sin.f64N/A

              \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{\cos delta} \]
            4. lift-*.f6486.3

              \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \color{blue}{\sin delta}}{\cos delta} \]
          7. Applied rewrites86.3%

            \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin theta \cdot \sin delta}}{\cos delta} \]
          8. Taylor expanded in theta around 0

            \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin \color{blue}{delta}}{\cos delta} \]
          9. Step-by-step derivation
            1. Applied rewrites73.7%

              \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin \color{blue}{delta}}{\cos delta} \]
            2. Taylor expanded in delta around 0

              \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{1 + \color{blue}{{delta}^{2} \cdot \left({delta}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {delta}^{2}\right) - \frac{1}{2}\right)}} \]
            3. Step-by-step derivation
              1. +-commutativeN/A

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{{delta}^{2} \cdot \left({delta}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {delta}^{2}\right) - \frac{1}{2}\right) + 1} \]
              2. *-commutativeN/A

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\left({delta}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {delta}^{2}\right) - \frac{1}{2}\right) \cdot {delta}^{2} + 1} \]
              3. lower-fma.f64N/A

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left({delta}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {delta}^{2}\right) - \frac{1}{2}, {delta}^{\color{blue}{2}}, 1\right)} \]
              4. lower--.f64N/A

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left({delta}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {delta}^{2}\right) - \frac{1}{2}, {delta}^{2}, 1\right)} \]
              5. *-commutativeN/A

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\left(\frac{1}{24} + \frac{-1}{720} \cdot {delta}^{2}\right) \cdot {delta}^{2} - \frac{1}{2}, {delta}^{2}, 1\right)} \]
              6. lower-*.f64N/A

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\left(\frac{1}{24} + \frac{-1}{720} \cdot {delta}^{2}\right) \cdot {delta}^{2} - \frac{1}{2}, {delta}^{2}, 1\right)} \]
              7. +-commutativeN/A

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\left(\frac{-1}{720} \cdot {delta}^{2} + \frac{1}{24}\right) \cdot {delta}^{2} - \frac{1}{2}, {delta}^{2}, 1\right)} \]
              8. *-commutativeN/A

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\left({delta}^{2} \cdot \frac{-1}{720} + \frac{1}{24}\right) \cdot {delta}^{2} - \frac{1}{2}, {delta}^{2}, 1\right)} \]
              9. lower-fma.f64N/A

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\mathsf{fma}\left({delta}^{2}, \frac{-1}{720}, \frac{1}{24}\right) \cdot {delta}^{2} - \frac{1}{2}, {delta}^{2}, 1\right)} \]
              10. pow2N/A

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\mathsf{fma}\left(delta \cdot delta, \frac{-1}{720}, \frac{1}{24}\right) \cdot {delta}^{2} - \frac{1}{2}, {delta}^{2}, 1\right)} \]
              11. lift-*.f64N/A

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\mathsf{fma}\left(delta \cdot delta, \frac{-1}{720}, \frac{1}{24}\right) \cdot {delta}^{2} - \frac{1}{2}, {delta}^{2}, 1\right)} \]
              12. pow2N/A

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\mathsf{fma}\left(delta \cdot delta, \frac{-1}{720}, \frac{1}{24}\right) \cdot \left(delta \cdot delta\right) - \frac{1}{2}, {delta}^{2}, 1\right)} \]
              13. lift-*.f64N/A

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\mathsf{fma}\left(delta \cdot delta, \frac{-1}{720}, \frac{1}{24}\right) \cdot \left(delta \cdot delta\right) - \frac{1}{2}, {delta}^{2}, 1\right)} \]
              14. pow2N/A

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\mathsf{fma}\left(delta \cdot delta, \frac{-1}{720}, \frac{1}{24}\right) \cdot \left(delta \cdot delta\right) - \frac{1}{2}, delta \cdot delta, 1\right)} \]
              15. lift-*.f6469.7

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\mathsf{fma}\left(delta \cdot delta, -0.001388888888888889, 0.041666666666666664\right) \cdot \left(delta \cdot delta\right) - 0.5, delta \cdot delta, 1\right)} \]
            4. Applied rewrites69.7%

              \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\mathsf{fma}\left(delta \cdot delta, -0.001388888888888889, 0.041666666666666664\right) \cdot \left(delta \cdot delta\right) - 0.5, \color{blue}{delta \cdot delta}, 1\right)} \]
            5. Add Preprocessing

            Alternative 12: 67.6% accurate, 5.6× speedup?

            \[\begin{array}{l} \\ \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\left(delta \cdot delta\right) \cdot 0.041666666666666664 - 0.5, delta \cdot delta, 1\right)} \end{array} \]
            (FPCore (lambda1 phi1 phi2 delta theta)
             :precision binary64
             (+
              lambda1
              (atan2
               (* theta (sin delta))
               (fma
                (- (* (* delta delta) 0.041666666666666664) 0.5)
                (* delta delta)
                1.0))))
            double code(double lambda1, double phi1, double phi2, double delta, double theta) {
            	return lambda1 + atan2((theta * sin(delta)), fma((((delta * delta) * 0.041666666666666664) - 0.5), (delta * delta), 1.0));
            }
            
            function code(lambda1, phi1, phi2, delta, theta)
            	return Float64(lambda1 + atan(Float64(theta * sin(delta)), fma(Float64(Float64(Float64(delta * delta) * 0.041666666666666664) - 0.5), Float64(delta * delta), 1.0)))
            end
            
            code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(theta * N[Sin[delta], $MachinePrecision]), $MachinePrecision] / N[(N[(N[(N[(delta * delta), $MachinePrecision] * 0.041666666666666664), $MachinePrecision] - 0.5), $MachinePrecision] * N[(delta * delta), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
            
            \begin{array}{l}
            
            \\
            \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\left(delta \cdot delta\right) \cdot 0.041666666666666664 - 0.5, delta \cdot delta, 1\right)}
            \end{array}
            
            Derivation
            1. Initial program 99.8%

              \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
            2. Taylor expanded in phi1 around 0

              \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
            3. Step-by-step derivation
              1. lift-cos.f6488.4

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
            4. Applied rewrites88.4%

              \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
            5. Taylor expanded in phi1 around 0

              \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
            6. Step-by-step derivation
              1. *-commutativeN/A

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \color{blue}{\sin delta}}{\cos delta} \]
              2. lift-sin.f64N/A

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin \color{blue}{delta}}{\cos delta} \]
              3. lift-sin.f64N/A

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{\cos delta} \]
              4. lift-*.f6486.3

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \color{blue}{\sin delta}}{\cos delta} \]
            7. Applied rewrites86.3%

              \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin theta \cdot \sin delta}}{\cos delta} \]
            8. Taylor expanded in theta around 0

              \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin \color{blue}{delta}}{\cos delta} \]
            9. Step-by-step derivation
              1. Applied rewrites73.7%

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin \color{blue}{delta}}{\cos delta} \]
              2. Taylor expanded in delta around 0

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{1 + \color{blue}{{delta}^{2} \cdot \left(\frac{1}{24} \cdot {delta}^{2} - \frac{1}{2}\right)}} \]
              3. Step-by-step derivation
                1. +-commutativeN/A

                  \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{{delta}^{2} \cdot \left(\frac{1}{24} \cdot {delta}^{2} - \frac{1}{2}\right) + 1} \]
                2. *-commutativeN/A

                  \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\left(\frac{1}{24} \cdot {delta}^{2} - \frac{1}{2}\right) \cdot {delta}^{2} + 1} \]
                3. lower-fma.f64N/A

                  \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\frac{1}{24} \cdot {delta}^{2} - \frac{1}{2}, {delta}^{\color{blue}{2}}, 1\right)} \]
                4. lower--.f64N/A

                  \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\frac{1}{24} \cdot {delta}^{2} - \frac{1}{2}, {delta}^{2}, 1\right)} \]
                5. *-commutativeN/A

                  \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left({delta}^{2} \cdot \frac{1}{24} - \frac{1}{2}, {delta}^{2}, 1\right)} \]
                6. lower-*.f64N/A

                  \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left({delta}^{2} \cdot \frac{1}{24} - \frac{1}{2}, {delta}^{2}, 1\right)} \]
                7. pow2N/A

                  \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\left(delta \cdot delta\right) \cdot \frac{1}{24} - \frac{1}{2}, {delta}^{2}, 1\right)} \]
                8. lift-*.f64N/A

                  \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\left(delta \cdot delta\right) \cdot \frac{1}{24} - \frac{1}{2}, {delta}^{2}, 1\right)} \]
                9. pow2N/A

                  \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\left(delta \cdot delta\right) \cdot \frac{1}{24} - \frac{1}{2}, delta \cdot delta, 1\right)} \]
                10. lift-*.f6467.6

                  \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\left(delta \cdot delta\right) \cdot 0.041666666666666664 - 0.5, delta \cdot delta, 1\right)} \]
              4. Applied rewrites67.6%

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\mathsf{fma}\left(\left(delta \cdot delta\right) \cdot 0.041666666666666664 - 0.5, \color{blue}{delta \cdot delta}, 1\right)} \]
              5. Add Preprocessing

              Alternative 13: 66.2% accurate, 6.1× speedup?

              \[\begin{array}{l} \\ \lambda_1 + \tan^{-1}_* \frac{theta \cdot \left(\mathsf{fma}\left(delta \cdot delta, -0.16666666666666666, 1\right) \cdot delta\right)}{\cos delta} \end{array} \]
              (FPCore (lambda1 phi1 phi2 delta theta)
               :precision binary64
               (+
                lambda1
                (atan2
                 (* theta (* (fma (* delta delta) -0.16666666666666666 1.0) delta))
                 (cos delta))))
              double code(double lambda1, double phi1, double phi2, double delta, double theta) {
              	return lambda1 + atan2((theta * (fma((delta * delta), -0.16666666666666666, 1.0) * delta)), cos(delta));
              }
              
              function code(lambda1, phi1, phi2, delta, theta)
              	return Float64(lambda1 + atan(Float64(theta * Float64(fma(Float64(delta * delta), -0.16666666666666666, 1.0) * delta)), cos(delta)))
              end
              
              code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(theta * N[(N[(N[(delta * delta), $MachinePrecision] * -0.16666666666666666 + 1.0), $MachinePrecision] * delta), $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
              
              \begin{array}{l}
              
              \\
              \lambda_1 + \tan^{-1}_* \frac{theta \cdot \left(\mathsf{fma}\left(delta \cdot delta, -0.16666666666666666, 1\right) \cdot delta\right)}{\cos delta}
              \end{array}
              
              Derivation
              1. Initial program 99.8%

                \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
              2. Taylor expanded in phi1 around 0

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
              3. Step-by-step derivation
                1. lift-cos.f6488.4

                  \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
              4. Applied rewrites88.4%

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
              5. Taylor expanded in phi1 around 0

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
              6. Step-by-step derivation
                1. *-commutativeN/A

                  \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \color{blue}{\sin delta}}{\cos delta} \]
                2. lift-sin.f64N/A

                  \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin \color{blue}{delta}}{\cos delta} \]
                3. lift-sin.f64N/A

                  \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{\cos delta} \]
                4. lift-*.f6486.3

                  \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \color{blue}{\sin delta}}{\cos delta} \]
              7. Applied rewrites86.3%

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin theta \cdot \sin delta}}{\cos delta} \]
              8. Taylor expanded in theta around 0

                \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin \color{blue}{delta}}{\cos delta} \]
              9. Step-by-step derivation
                1. Applied rewrites73.7%

                  \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin \color{blue}{delta}}{\cos delta} \]
                2. Taylor expanded in delta around 0

                  \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \left(delta \cdot \color{blue}{\left(1 + \frac{-1}{6} \cdot {delta}^{2}\right)}\right)}{\cos delta} \]
                3. Step-by-step derivation
                  1. *-commutativeN/A

                    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \left(\left(1 + \frac{-1}{6} \cdot {delta}^{2}\right) \cdot delta\right)}{\cos delta} \]
                  2. lower-*.f64N/A

                    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \left(\left(1 + \frac{-1}{6} \cdot {delta}^{2}\right) \cdot delta\right)}{\cos delta} \]
                  3. +-commutativeN/A

                    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \left(\left(\frac{-1}{6} \cdot {delta}^{2} + 1\right) \cdot delta\right)}{\cos delta} \]
                  4. *-commutativeN/A

                    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \left(\left({delta}^{2} \cdot \frac{-1}{6} + 1\right) \cdot delta\right)}{\cos delta} \]
                  5. lower-fma.f64N/A

                    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \left(\mathsf{fma}\left({delta}^{2}, \frac{-1}{6}, 1\right) \cdot delta\right)}{\cos delta} \]
                  6. pow2N/A

                    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \left(\mathsf{fma}\left(delta \cdot delta, \frac{-1}{6}, 1\right) \cdot delta\right)}{\cos delta} \]
                  7. lift-*.f6466.2

                    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \left(\mathsf{fma}\left(delta \cdot delta, -0.16666666666666666, 1\right) \cdot delta\right)}{\cos delta} \]
                4. Applied rewrites66.2%

                  \[\leadsto \lambda_1 + \tan^{-1}_* \frac{theta \cdot \left(\mathsf{fma}\left(delta \cdot delta, -0.16666666666666666, 1\right) \cdot \color{blue}{delta}\right)}{\cos delta} \]
                5. Add Preprocessing

                Reproduce

                ?
                herbie shell --seed 2025142 
                (FPCore (lambda1 phi1 phi2 delta theta)
                  :name "Destination given bearing on a great circle"
                  :precision binary64
                  (+ lambda1 (atan2 (* (* (sin theta) (sin delta)) (cos phi1)) (- (cos delta) (* (sin phi1) (sin (asin (+ (* (sin phi1) (cos delta)) (* (* (cos phi1) (sin delta)) (cos theta))))))))))