Destination given bearing on a great circle

Percentage Accurate: 99.8% → 99.8%
Time: 10.6s
Alternatives: 14
Speedup: N/A×

Specification

?
\[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (+
  lambda1
  (atan2
   (* (* (sin theta) (sin delta)) (cos phi1))
   (-
    (cos delta)
    (*
     (sin phi1)
     (sin
      (asin
       (+
        (* (sin phi1) (cos delta))
        (* (* (cos phi1) (sin delta)) (cos theta))))))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin(asin(((sin(phi1) * cos(delta)) + ((cos(phi1) * sin(delta)) * cos(theta))))))));
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
    real(8), intent (in) :: lambda1
    real(8), intent (in) :: phi1
    real(8), intent (in) :: phi2
    real(8), intent (in) :: delta
    real(8), intent (in) :: theta
    code = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin(asin(((sin(phi1) * cos(delta)) + ((cos(phi1) * sin(delta)) * cos(theta))))))))
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + Math.atan2(((Math.sin(theta) * Math.sin(delta)) * Math.cos(phi1)), (Math.cos(delta) - (Math.sin(phi1) * Math.sin(Math.asin(((Math.sin(phi1) * Math.cos(delta)) + ((Math.cos(phi1) * Math.sin(delta)) * Math.cos(theta))))))));
}
def code(lambda1, phi1, phi2, delta, theta):
	return lambda1 + math.atan2(((math.sin(theta) * math.sin(delta)) * math.cos(phi1)), (math.cos(delta) - (math.sin(phi1) * math.sin(math.asin(((math.sin(phi1) * math.cos(delta)) + ((math.cos(phi1) * math.sin(delta)) * math.cos(theta))))))))
function code(lambda1, phi1, phi2, delta, theta)
	return Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), Float64(cos(delta) - Float64(sin(phi1) * sin(asin(Float64(Float64(sin(phi1) * cos(delta)) + Float64(Float64(cos(phi1) * sin(delta)) * cos(theta)))))))))
end
function tmp = code(lambda1, phi1, phi2, delta, theta)
	tmp = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin(asin(((sin(phi1) * cos(delta)) + ((cos(phi1) * sin(delta)) * cos(theta))))))));
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[(N[Sin[phi1], $MachinePrecision] * N[Sin[N[ArcSin[N[(N[(N[Sin[phi1], $MachinePrecision] * N[Cos[delta], $MachinePrecision]), $MachinePrecision] + N[(N[(N[Cos[phi1], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[theta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 14 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 99.8% accurate, 1.0× speedup?

\[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (+
  lambda1
  (atan2
   (* (* (sin theta) (sin delta)) (cos phi1))
   (-
    (cos delta)
    (*
     (sin phi1)
     (sin
      (asin
       (+
        (* (sin phi1) (cos delta))
        (* (* (cos phi1) (sin delta)) (cos theta))))))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin(asin(((sin(phi1) * cos(delta)) + ((cos(phi1) * sin(delta)) * cos(theta))))))));
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
    real(8), intent (in) :: lambda1
    real(8), intent (in) :: phi1
    real(8), intent (in) :: phi2
    real(8), intent (in) :: delta
    real(8), intent (in) :: theta
    code = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin(asin(((sin(phi1) * cos(delta)) + ((cos(phi1) * sin(delta)) * cos(theta))))))))
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + Math.atan2(((Math.sin(theta) * Math.sin(delta)) * Math.cos(phi1)), (Math.cos(delta) - (Math.sin(phi1) * Math.sin(Math.asin(((Math.sin(phi1) * Math.cos(delta)) + ((Math.cos(phi1) * Math.sin(delta)) * Math.cos(theta))))))));
}
def code(lambda1, phi1, phi2, delta, theta):
	return lambda1 + math.atan2(((math.sin(theta) * math.sin(delta)) * math.cos(phi1)), (math.cos(delta) - (math.sin(phi1) * math.sin(math.asin(((math.sin(phi1) * math.cos(delta)) + ((math.cos(phi1) * math.sin(delta)) * math.cos(theta))))))))
function code(lambda1, phi1, phi2, delta, theta)
	return Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), Float64(cos(delta) - Float64(sin(phi1) * sin(asin(Float64(Float64(sin(phi1) * cos(delta)) + Float64(Float64(cos(phi1) * sin(delta)) * cos(theta)))))))))
end
function tmp = code(lambda1, phi1, phi2, delta, theta)
	tmp = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin(asin(((sin(phi1) * cos(delta)) + ((cos(phi1) * sin(delta)) * cos(theta))))))));
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[(N[Sin[phi1], $MachinePrecision] * N[Sin[N[ArcSin[N[(N[(N[Sin[phi1], $MachinePrecision] * N[Cos[delta], $MachinePrecision]), $MachinePrecision] + N[(N[(N[Cos[phi1], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[theta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}

Alternative 1: 99.8% accurate, 1.0× speedup?

\[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\left(1 - \frac{\mathsf{fma}\left(\cos theta, \cos \phi_1 \cdot \sin delta, \sin \phi_1 \cdot \cos delta\right) \cdot \sin \phi_1}{\cos delta}\right) \cdot \cos delta} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (+
  lambda1
  (atan2
   (* (* (sin theta) (sin delta)) (cos phi1))
   (*
    (-
     1.0
     (/
      (*
       (fma (cos theta) (* (cos phi1) (sin delta)) (* (sin phi1) (cos delta)))
       (sin phi1))
      (cos delta)))
    (cos delta)))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), ((1.0 - ((fma(cos(theta), (cos(phi1) * sin(delta)), (sin(phi1) * cos(delta))) * sin(phi1)) / cos(delta))) * cos(delta)));
}
function code(lambda1, phi1, phi2, delta, theta)
	return Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), Float64(Float64(1.0 - Float64(Float64(fma(cos(theta), Float64(cos(phi1) * sin(delta)), Float64(sin(phi1) * cos(delta))) * sin(phi1)) / cos(delta))) * cos(delta))))
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[(1.0 - N[(N[(N[(N[Cos[theta], $MachinePrecision] * N[(N[Cos[phi1], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] + N[(N[Sin[phi1], $MachinePrecision] * N[Cos[delta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Sin[phi1], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[delta], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\left(1 - \frac{\mathsf{fma}\left(\cos theta, \cos \phi_1 \cdot \sin delta, \sin \phi_1 \cdot \cos delta\right) \cdot \sin \phi_1}{\cos delta}\right) \cdot \cos delta}
Derivation
  1. Initial program 99.8%

    \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
  2. Step-by-step derivation
    1. lift--.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}} \]
    2. sub-to-multN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\left(1 - \frac{\sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}{\cos delta}\right) \cdot \cos delta}} \]
    3. lower-unsound-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\left(1 - \frac{\sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}{\cos delta}\right) \cdot \cos delta}} \]
  3. Applied rewrites99.8%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\left(1 - \frac{\mathsf{fma}\left(\cos theta, \cos \phi_1 \cdot \sin delta, \sin \phi_1 \cdot \cos delta\right) \cdot \sin \phi_1}{\cos delta}\right) \cdot \cos delta}} \]
  4. Add Preprocessing

Alternative 2: 99.8% accurate, 1.0× speedup?

\[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\sin \phi_1 \cdot \cos delta, \sin \phi_1, \left(\cos theta \cdot \left(\cos \phi_1 \cdot \sin delta\right)\right) \cdot \sin \phi_1\right)} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (+
  lambda1
  (atan2
   (* (* (sin theta) (sin delta)) (cos phi1))
   (-
    (cos delta)
    (fma
     (* (sin phi1) (cos delta))
     (sin phi1)
     (* (* (cos theta) (* (cos phi1) (sin delta))) (sin phi1)))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - fma((sin(phi1) * cos(delta)), sin(phi1), ((cos(theta) * (cos(phi1) * sin(delta))) * sin(phi1)))));
}
function code(lambda1, phi1, phi2, delta, theta)
	return Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), Float64(cos(delta) - fma(Float64(sin(phi1) * cos(delta)), sin(phi1), Float64(Float64(cos(theta) * Float64(cos(phi1) * sin(delta))) * sin(phi1))))))
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[(N[(N[Sin[phi1], $MachinePrecision] * N[Cos[delta], $MachinePrecision]), $MachinePrecision] * N[Sin[phi1], $MachinePrecision] + N[(N[(N[Cos[theta], $MachinePrecision] * N[(N[Cos[phi1], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Sin[phi1], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\sin \phi_1 \cdot \cos delta, \sin \phi_1, \left(\cos theta \cdot \left(\cos \phi_1 \cdot \sin delta\right)\right) \cdot \sin \phi_1\right)}
Derivation
  1. Initial program 99.8%

    \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
  2. Step-by-step derivation
    1. lift-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \color{blue}{\sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}} \]
    2. lift-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \color{blue}{\sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}} \]
    3. lift-asin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \color{blue}{\sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}} \]
    4. sin-asinN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \color{blue}{\left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}} \]
    5. lift-+.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \color{blue}{\left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}} \]
    6. distribute-rgt-inN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \color{blue}{\left(\left(\sin \phi_1 \cdot \cos delta\right) \cdot \sin \phi_1 + \left(\left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right) \cdot \sin \phi_1\right)}} \]
    7. lower-fma.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \color{blue}{\mathsf{fma}\left(\sin \phi_1 \cdot \cos delta, \sin \phi_1, \left(\left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right) \cdot \sin \phi_1\right)}} \]
    8. lower-*.f6499.8%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\sin \phi_1 \cdot \cos delta, \sin \phi_1, \color{blue}{\left(\left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right) \cdot \sin \phi_1}\right)} \]
    9. lift-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\sin \phi_1 \cdot \cos delta, \sin \phi_1, \color{blue}{\left(\left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \cdot \sin \phi_1\right)} \]
    10. *-commutativeN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\sin \phi_1 \cdot \cos delta, \sin \phi_1, \color{blue}{\left(\cos theta \cdot \left(\cos \phi_1 \cdot \sin delta\right)\right)} \cdot \sin \phi_1\right)} \]
    11. lower-*.f6499.8%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\sin \phi_1 \cdot \cos delta, \sin \phi_1, \color{blue}{\left(\cos theta \cdot \left(\cos \phi_1 \cdot \sin delta\right)\right)} \cdot \sin \phi_1\right)} \]
  3. Applied rewrites99.8%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \color{blue}{\mathsf{fma}\left(\sin \phi_1 \cdot \cos delta, \sin \phi_1, \left(\cos theta \cdot \left(\cos \phi_1 \cdot \sin delta\right)\right) \cdot \sin \phi_1\right)}} \]
  4. Add Preprocessing

Alternative 3: 99.8% accurate, 1.1× speedup?

\[\tan^{-1}_* \frac{\cos \phi_1 \cdot \left(\sin delta \cdot \sin theta\right)}{\cos delta - \mathsf{fma}\left(\cos theta, \cos \phi_1 \cdot \sin delta, \sin \phi_1 \cdot \cos delta\right) \cdot \sin \phi_1} + \lambda_1 \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (+
  (atan2
   (* (cos phi1) (* (sin delta) (sin theta)))
   (-
    (cos delta)
    (*
     (fma (cos theta) (* (cos phi1) (sin delta)) (* (sin phi1) (cos delta)))
     (sin phi1))))
  lambda1))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return atan2((cos(phi1) * (sin(delta) * sin(theta))), (cos(delta) - (fma(cos(theta), (cos(phi1) * sin(delta)), (sin(phi1) * cos(delta))) * sin(phi1)))) + lambda1;
}
function code(lambda1, phi1, phi2, delta, theta)
	return Float64(atan(Float64(cos(phi1) * Float64(sin(delta) * sin(theta))), Float64(cos(delta) - Float64(fma(cos(theta), Float64(cos(phi1) * sin(delta)), Float64(sin(phi1) * cos(delta))) * sin(phi1)))) + lambda1)
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(N[ArcTan[N[(N[Cos[phi1], $MachinePrecision] * N[(N[Sin[delta], $MachinePrecision] * N[Sin[theta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[(N[(N[Cos[theta], $MachinePrecision] * N[(N[Cos[phi1], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] + N[(N[Sin[phi1], $MachinePrecision] * N[Cos[delta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Sin[phi1], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + lambda1), $MachinePrecision]
\tan^{-1}_* \frac{\cos \phi_1 \cdot \left(\sin delta \cdot \sin theta\right)}{\cos delta - \mathsf{fma}\left(\cos theta, \cos \phi_1 \cdot \sin delta, \sin \phi_1 \cdot \cos delta\right) \cdot \sin \phi_1} + \lambda_1
Derivation
  1. Initial program 99.8%

    \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
  2. Step-by-step derivation
    1. lift-+.f64N/A

      \[\leadsto \color{blue}{\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}} \]
    2. +-commutativeN/A

      \[\leadsto \color{blue}{\tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} + \lambda_1} \]
    3. lower-+.f6499.8%

      \[\leadsto \color{blue}{\tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} + \lambda_1} \]
  3. Applied rewrites99.8%

    \[\leadsto \color{blue}{\tan^{-1}_* \frac{\cos \phi_1 \cdot \left(\sin delta \cdot \sin theta\right)}{\cos delta - \mathsf{fma}\left(\cos theta, \cos \phi_1 \cdot \sin delta, \sin \phi_1 \cdot \cos delta\right) \cdot \sin \phi_1} + \lambda_1} \]
  4. Add Preprocessing

Alternative 4: 94.6% accurate, 1.1× speedup?

\[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\sin \phi_1 \cdot \sin \phi_1, \cos delta, \sin \phi_1 \cdot \left(\cos \phi_1 \cdot \sin delta\right)\right)} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (+
  lambda1
  (atan2
   (* (* (sin theta) (sin delta)) (cos phi1))
   (-
    (cos delta)
    (fma
     (* (sin phi1) (sin phi1))
     (cos delta)
     (* (sin phi1) (* (cos phi1) (sin delta))))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - fma((sin(phi1) * sin(phi1)), cos(delta), (sin(phi1) * (cos(phi1) * sin(delta))))));
}
function code(lambda1, phi1, phi2, delta, theta)
	return Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), Float64(cos(delta) - fma(Float64(sin(phi1) * sin(phi1)), cos(delta), Float64(sin(phi1) * Float64(cos(phi1) * sin(delta)))))))
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[(N[(N[Sin[phi1], $MachinePrecision] * N[Sin[phi1], $MachinePrecision]), $MachinePrecision] * N[Cos[delta], $MachinePrecision] + N[(N[Sin[phi1], $MachinePrecision] * N[(N[Cos[phi1], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\sin \phi_1 \cdot \sin \phi_1, \cos delta, \sin \phi_1 \cdot \left(\cos \phi_1 \cdot \sin delta\right)\right)}
Derivation
  1. Initial program 99.8%

    \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
  2. Taylor expanded in theta around 0

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \color{blue}{\sin \phi_1 \cdot \left(\cos delta \cdot \sin \phi_1 + \cos \phi_1 \cdot \sin delta\right)}} \]
  3. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \color{blue}{\left(\cos delta \cdot \sin \phi_1 + \cos \phi_1 \cdot \sin delta\right)}} \]
    2. lower-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\color{blue}{\cos delta \cdot \sin \phi_1} + \cos \phi_1 \cdot \sin delta\right)} \]
    3. lower-fma.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \color{blue}{\sin \phi_1}, \cos \phi_1 \cdot \sin delta\right)} \]
    4. lower-cos.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \color{blue}{\phi_1}, \cos \phi_1 \cdot \sin delta\right)} \]
    5. lower-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \sin delta\right)} \]
    6. lower-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \sin delta\right)} \]
    7. lower-cos.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \sin delta\right)} \]
    8. lower-sin.f6494.6%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \sin delta\right)} \]
  4. Applied rewrites94.6%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \color{blue}{\sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \sin delta\right)}} \]
  5. Step-by-step derivation
    1. lift-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \color{blue}{\mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \sin delta\right)}} \]
    2. lift-fma.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\cos delta \cdot \sin \phi_1 + \color{blue}{\cos \phi_1 \cdot \sin delta}\right)} \]
    3. distribute-lft-inN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \left(\sin \phi_1 \cdot \left(\cos delta \cdot \sin \phi_1\right) + \color{blue}{\sin \phi_1 \cdot \left(\cos \phi_1 \cdot \sin delta\right)}\right)} \]
    4. *-commutativeN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \left(\sin \phi_1 \cdot \left(\sin \phi_1 \cdot \cos delta\right) + \sin \phi_1 \cdot \left(\cos \phi_1 \cdot \sin delta\right)\right)} \]
    5. associate-*r*N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \left(\left(\sin \phi_1 \cdot \sin \phi_1\right) \cdot \cos delta + \color{blue}{\sin \phi_1} \cdot \left(\cos \phi_1 \cdot \sin delta\right)\right)} \]
    6. lower-fma.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\sin \phi_1 \cdot \sin \phi_1, \color{blue}{\cos delta}, \sin \phi_1 \cdot \left(\cos \phi_1 \cdot \sin delta\right)\right)} \]
    7. lower-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\sin \phi_1 \cdot \sin \phi_1, \cos \color{blue}{delta}, \sin \phi_1 \cdot \left(\cos \phi_1 \cdot \sin delta\right)\right)} \]
    8. lower-*.f6494.6%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\sin \phi_1 \cdot \sin \phi_1, \cos delta, \sin \phi_1 \cdot \left(\cos \phi_1 \cdot \sin delta\right)\right)} \]
  6. Applied rewrites94.6%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\sin \phi_1 \cdot \sin \phi_1, \color{blue}{\cos delta}, \sin \phi_1 \cdot \left(\cos \phi_1 \cdot \sin delta\right)\right)} \]
  7. Add Preprocessing

Alternative 5: 94.6% accurate, 1.2× speedup?

\[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \cos \phi_1\right) \cdot \sin delta}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \sin delta\right)} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (+
  lambda1
  (atan2
   (* (* (sin theta) (cos phi1)) (sin delta))
   (-
    (cos delta)
    (* (sin phi1) (fma (cos delta) (sin phi1) (* (cos phi1) (sin delta))))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + atan2(((sin(theta) * cos(phi1)) * sin(delta)), (cos(delta) - (sin(phi1) * fma(cos(delta), sin(phi1), (cos(phi1) * sin(delta))))));
}
function code(lambda1, phi1, phi2, delta, theta)
	return Float64(lambda1 + atan(Float64(Float64(sin(theta) * cos(phi1)) * sin(delta)), Float64(cos(delta) - Float64(sin(phi1) * fma(cos(delta), sin(phi1), Float64(cos(phi1) * sin(delta)))))))
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[(N[Sin[phi1], $MachinePrecision] * N[(N[Cos[delta], $MachinePrecision] * N[Sin[phi1], $MachinePrecision] + N[(N[Cos[phi1], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \cos \phi_1\right) \cdot \sin delta}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \sin delta\right)}
Derivation
  1. Initial program 99.8%

    \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
  2. Taylor expanded in theta around 0

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \color{blue}{\sin \phi_1 \cdot \left(\cos delta \cdot \sin \phi_1 + \cos \phi_1 \cdot \sin delta\right)}} \]
  3. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \color{blue}{\left(\cos delta \cdot \sin \phi_1 + \cos \phi_1 \cdot \sin delta\right)}} \]
    2. lower-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \left(\color{blue}{\cos delta \cdot \sin \phi_1} + \cos \phi_1 \cdot \sin delta\right)} \]
    3. lower-fma.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \color{blue}{\sin \phi_1}, \cos \phi_1 \cdot \sin delta\right)} \]
    4. lower-cos.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \color{blue}{\phi_1}, \cos \phi_1 \cdot \sin delta\right)} \]
    5. lower-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \sin delta\right)} \]
    6. lower-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \sin delta\right)} \]
    7. lower-cos.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \sin delta\right)} \]
    8. lower-sin.f6494.6%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \sin delta\right)} \]
  4. Applied rewrites94.6%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \color{blue}{\sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \sin delta\right)}} \]
  5. Step-by-step derivation
    1. lift-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \sin delta\right)} \]
    2. lift-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \sin delta\right)} \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \sin delta\right)} \]
    3. associate-*l*N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin theta \cdot \left(\sin delta \cdot \cos \phi_1\right)}}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \sin delta\right)} \]
    4. *-commutativeN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \color{blue}{\left(\cos \phi_1 \cdot \sin delta\right)}}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \sin delta\right)} \]
    5. associate-*r*N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \cos \phi_1\right) \cdot \sin delta}}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \sin delta\right)} \]
    6. lower-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \cos \phi_1\right) \cdot \sin delta}}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \sin delta\right)} \]
    7. lower-*.f6494.6%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \cos \phi_1\right)} \cdot \sin delta}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \sin delta\right)} \]
  6. Applied rewrites94.6%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \cos \phi_1\right) \cdot \sin delta}}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \sin delta\right)} \]
  7. Add Preprocessing

Alternative 6: 92.3% accurate, 2.2× speedup?

\[\begin{array}{l} t_1 := \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \cos \phi_1\right) \cdot \sin delta}{\cos delta}\\ \mathbf{if}\;delta \leq -80000000000:\\ \;\;\;\;t\_1\\ \mathbf{elif}\;delta \leq 3.9 \cdot 10^{-28}:\\ \;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{1 - {\sin \phi_1}^{2}}\\ \mathbf{else}:\\ \;\;\;\;t\_1\\ \end{array} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (let* ((t_1
         (+
          lambda1
          (atan2 (* (* (sin theta) (cos phi1)) (sin delta)) (cos delta)))))
   (if (<= delta -80000000000.0)
     t_1
     (if (<= delta 3.9e-28)
       (+
        lambda1
        (atan2
         (* (* (sin theta) (sin delta)) (cos phi1))
         (- 1.0 (pow (sin phi1) 2.0))))
       t_1))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	double t_1 = lambda1 + atan2(((sin(theta) * cos(phi1)) * sin(delta)), cos(delta));
	double tmp;
	if (delta <= -80000000000.0) {
		tmp = t_1;
	} else if (delta <= 3.9e-28) {
		tmp = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (1.0 - pow(sin(phi1), 2.0)));
	} else {
		tmp = t_1;
	}
	return tmp;
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
    real(8), intent (in) :: lambda1
    real(8), intent (in) :: phi1
    real(8), intent (in) :: phi2
    real(8), intent (in) :: delta
    real(8), intent (in) :: theta
    real(8) :: t_1
    real(8) :: tmp
    t_1 = lambda1 + atan2(((sin(theta) * cos(phi1)) * sin(delta)), cos(delta))
    if (delta <= (-80000000000.0d0)) then
        tmp = t_1
    else if (delta <= 3.9d-28) then
        tmp = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (1.0d0 - (sin(phi1) ** 2.0d0)))
    else
        tmp = t_1
    end if
    code = tmp
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	double t_1 = lambda1 + Math.atan2(((Math.sin(theta) * Math.cos(phi1)) * Math.sin(delta)), Math.cos(delta));
	double tmp;
	if (delta <= -80000000000.0) {
		tmp = t_1;
	} else if (delta <= 3.9e-28) {
		tmp = lambda1 + Math.atan2(((Math.sin(theta) * Math.sin(delta)) * Math.cos(phi1)), (1.0 - Math.pow(Math.sin(phi1), 2.0)));
	} else {
		tmp = t_1;
	}
	return tmp;
}
def code(lambda1, phi1, phi2, delta, theta):
	t_1 = lambda1 + math.atan2(((math.sin(theta) * math.cos(phi1)) * math.sin(delta)), math.cos(delta))
	tmp = 0
	if delta <= -80000000000.0:
		tmp = t_1
	elif delta <= 3.9e-28:
		tmp = lambda1 + math.atan2(((math.sin(theta) * math.sin(delta)) * math.cos(phi1)), (1.0 - math.pow(math.sin(phi1), 2.0)))
	else:
		tmp = t_1
	return tmp
function code(lambda1, phi1, phi2, delta, theta)
	t_1 = Float64(lambda1 + atan(Float64(Float64(sin(theta) * cos(phi1)) * sin(delta)), cos(delta)))
	tmp = 0.0
	if (delta <= -80000000000.0)
		tmp = t_1;
	elseif (delta <= 3.9e-28)
		tmp = Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), Float64(1.0 - (sin(phi1) ^ 2.0))));
	else
		tmp = t_1;
	end
	return tmp
end
function tmp_2 = code(lambda1, phi1, phi2, delta, theta)
	t_1 = lambda1 + atan2(((sin(theta) * cos(phi1)) * sin(delta)), cos(delta));
	tmp = 0.0;
	if (delta <= -80000000000.0)
		tmp = t_1;
	elseif (delta <= 3.9e-28)
		tmp = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (1.0 - (sin(phi1) ^ 2.0)));
	else
		tmp = t_1;
	end
	tmp_2 = tmp;
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := Block[{t$95$1 = N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[delta, -80000000000.0], t$95$1, If[LessEqual[delta, 3.9e-28], N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[Power[N[Sin[phi1], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], t$95$1]]]
\begin{array}{l}
t_1 := \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \cos \phi_1\right) \cdot \sin delta}{\cos delta}\\
\mathbf{if}\;delta \leq -80000000000:\\
\;\;\;\;t\_1\\

\mathbf{elif}\;delta \leq 3.9 \cdot 10^{-28}:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{1 - {\sin \phi_1}^{2}}\\

\mathbf{else}:\\
\;\;\;\;t\_1\\


\end{array}
Derivation
  1. Split input into 2 regimes
  2. if delta < -8e10 or 3.9e-28 < delta

    1. Initial program 99.8%

      \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    2. Taylor expanded in phi1 around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    3. Step-by-step derivation
      1. lower-cos.f6488.7%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
    4. Applied rewrites88.7%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    5. Step-by-step derivation
      1. lift-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}}{\cos delta} \]
      2. lift-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \sin delta\right)} \cdot \cos \phi_1}{\cos delta} \]
      3. associate-*l*N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin theta \cdot \left(\sin delta \cdot \cos \phi_1\right)}}{\cos delta} \]
      4. *-commutativeN/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \color{blue}{\left(\cos \phi_1 \cdot \sin delta\right)}}{\cos delta} \]
      5. associate-*r*N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \cos \phi_1\right) \cdot \sin delta}}{\cos delta} \]
      6. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \cos \phi_1\right) \cdot \sin delta}}{\cos delta} \]
      7. lower-*.f6488.7%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \cos \phi_1\right)} \cdot \sin delta}{\cos delta} \]
    6. Applied rewrites88.7%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \cos \phi_1\right) \cdot \sin delta}}{\cos delta} \]

    if -8e10 < delta < 3.9e-28

    1. Initial program 99.8%

      \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    2. Taylor expanded in delta around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{1 - {\sin \phi_1}^{2}}} \]
    3. Step-by-step derivation
      1. lower--.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{1 - \color{blue}{{\sin \phi_1}^{2}}} \]
      2. lower-pow.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{1 - {\sin \phi_1}^{\color{blue}{2}}} \]
      3. lower-sin.f6480.6%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{1 - {\sin \phi_1}^{2}} \]
    4. Applied rewrites80.6%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{1 - {\sin \phi_1}^{2}}} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 7: 92.0% accurate, 1.9× speedup?

\[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\left(1 - {\sin \phi_1}^{2}\right) \cdot \cos delta} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (+
  lambda1
  (atan2
   (* (* (sin theta) (sin delta)) (cos phi1))
   (* (- 1.0 (pow (sin phi1) 2.0)) (cos delta)))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), ((1.0 - pow(sin(phi1), 2.0)) * cos(delta)));
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
    real(8), intent (in) :: lambda1
    real(8), intent (in) :: phi1
    real(8), intent (in) :: phi2
    real(8), intent (in) :: delta
    real(8), intent (in) :: theta
    code = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), ((1.0d0 - (sin(phi1) ** 2.0d0)) * cos(delta)))
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + Math.atan2(((Math.sin(theta) * Math.sin(delta)) * Math.cos(phi1)), ((1.0 - Math.pow(Math.sin(phi1), 2.0)) * Math.cos(delta)));
}
def code(lambda1, phi1, phi2, delta, theta):
	return lambda1 + math.atan2(((math.sin(theta) * math.sin(delta)) * math.cos(phi1)), ((1.0 - math.pow(math.sin(phi1), 2.0)) * math.cos(delta)))
function code(lambda1, phi1, phi2, delta, theta)
	return Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), Float64(Float64(1.0 - (sin(phi1) ^ 2.0)) * cos(delta))))
end
function tmp = code(lambda1, phi1, phi2, delta, theta)
	tmp = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), ((1.0 - (sin(phi1) ^ 2.0)) * cos(delta)));
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[(1.0 - N[Power[N[Sin[phi1], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * N[Cos[delta], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\left(1 - {\sin \phi_1}^{2}\right) \cdot \cos delta}
Derivation
  1. Initial program 99.8%

    \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
  2. Step-by-step derivation
    1. lift--.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}} \]
    2. sub-to-multN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\left(1 - \frac{\sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}{\cos delta}\right) \cdot \cos delta}} \]
    3. lower-unsound-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\left(1 - \frac{\sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}{\cos delta}\right) \cdot \cos delta}} \]
  3. Applied rewrites99.8%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\left(1 - \frac{\mathsf{fma}\left(\cos theta, \cos \phi_1 \cdot \sin delta, \sin \phi_1 \cdot \cos delta\right) \cdot \sin \phi_1}{\cos delta}\right) \cdot \cos delta}} \]
  4. Taylor expanded in delta around 0

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\left(1 - {\sin \phi_1}^{2}\right)} \cdot \cos delta} \]
  5. Step-by-step derivation
    1. lower--.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\left(1 - \color{blue}{{\sin \phi_1}^{2}}\right) \cdot \cos delta} \]
    2. lower-pow.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\left(1 - {\sin \phi_1}^{\color{blue}{2}}\right) \cdot \cos delta} \]
    3. lower-sin.f6492.0%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\left(1 - {\sin \phi_1}^{2}\right) \cdot \cos delta} \]
  6. Applied rewrites92.0%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\left(1 - {\sin \phi_1}^{2}\right)} \cdot \cos delta} \]
  7. Add Preprocessing

Alternative 8: 91.5% accurate, 1.9× speedup?

\[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - {\sin \phi_1}^{2}} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (+
  lambda1
  (atan2
   (* (* (sin theta) (sin delta)) (cos phi1))
   (- (cos delta) (pow (sin phi1) 2.0)))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - pow(sin(phi1), 2.0)));
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
    real(8), intent (in) :: lambda1
    real(8), intent (in) :: phi1
    real(8), intent (in) :: phi2
    real(8), intent (in) :: delta
    real(8), intent (in) :: theta
    code = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) ** 2.0d0)))
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + Math.atan2(((Math.sin(theta) * Math.sin(delta)) * Math.cos(phi1)), (Math.cos(delta) - Math.pow(Math.sin(phi1), 2.0)));
}
def code(lambda1, phi1, phi2, delta, theta):
	return lambda1 + math.atan2(((math.sin(theta) * math.sin(delta)) * math.cos(phi1)), (math.cos(delta) - math.pow(math.sin(phi1), 2.0)))
function code(lambda1, phi1, phi2, delta, theta)
	return Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), Float64(cos(delta) - (sin(phi1) ^ 2.0))))
end
function tmp = code(lambda1, phi1, phi2, delta, theta)
	tmp = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) ^ 2.0)));
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[Power[N[Sin[phi1], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - {\sin \phi_1}^{2}}
Derivation
  1. Initial program 99.8%

    \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
  2. Taylor expanded in delta around 0

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \color{blue}{{\sin \phi_1}^{2}}} \]
  3. Step-by-step derivation
    1. lower-pow.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - {\sin \phi_1}^{\color{blue}{2}}} \]
    2. lower-sin.f6492.3%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - {\sin \phi_1}^{2}} \]
  4. Applied rewrites92.3%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \color{blue}{{\sin \phi_1}^{2}}} \]
  5. Add Preprocessing

Alternative 9: 88.7% accurate, 2.6× speedup?

\[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \cos \phi_1\right) \cdot \sin delta}{\cos delta} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (+ lambda1 (atan2 (* (* (sin theta) (cos phi1)) (sin delta)) (cos delta))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + atan2(((sin(theta) * cos(phi1)) * sin(delta)), cos(delta));
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
    real(8), intent (in) :: lambda1
    real(8), intent (in) :: phi1
    real(8), intent (in) :: phi2
    real(8), intent (in) :: delta
    real(8), intent (in) :: theta
    code = lambda1 + atan2(((sin(theta) * cos(phi1)) * sin(delta)), cos(delta))
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + Math.atan2(((Math.sin(theta) * Math.cos(phi1)) * Math.sin(delta)), Math.cos(delta));
}
def code(lambda1, phi1, phi2, delta, theta):
	return lambda1 + math.atan2(((math.sin(theta) * math.cos(phi1)) * math.sin(delta)), math.cos(delta))
function code(lambda1, phi1, phi2, delta, theta)
	return Float64(lambda1 + atan(Float64(Float64(sin(theta) * cos(phi1)) * sin(delta)), cos(delta)))
end
function tmp = code(lambda1, phi1, phi2, delta, theta)
	tmp = lambda1 + atan2(((sin(theta) * cos(phi1)) * sin(delta)), cos(delta));
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \cos \phi_1\right) \cdot \sin delta}{\cos delta}
Derivation
  1. Initial program 99.8%

    \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
  2. Taylor expanded in phi1 around 0

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
  3. Step-by-step derivation
    1. lower-cos.f6488.7%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
  4. Applied rewrites88.7%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
  5. Step-by-step derivation
    1. lift-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}}{\cos delta} \]
    2. lift-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \sin delta\right)} \cdot \cos \phi_1}{\cos delta} \]
    3. associate-*l*N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin theta \cdot \left(\sin delta \cdot \cos \phi_1\right)}}{\cos delta} \]
    4. *-commutativeN/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \color{blue}{\left(\cos \phi_1 \cdot \sin delta\right)}}{\cos delta} \]
    5. associate-*r*N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \cos \phi_1\right) \cdot \sin delta}}{\cos delta} \]
    6. lower-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \cos \phi_1\right) \cdot \sin delta}}{\cos delta} \]
    7. lower-*.f6488.7%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \cos \phi_1\right)} \cdot \sin delta}{\cos delta} \]
  6. Applied rewrites88.7%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\left(\sin theta \cdot \cos \phi_1\right) \cdot \sin delta}}{\cos delta} \]
  7. Add Preprocessing

Alternative 10: 87.1% accurate, 2.7× speedup?

\[\begin{array}{l} \mathbf{if}\;\phi_1 \leq -45:\\ \;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{1 + -0.5 \cdot {delta}^{2}}\\ \mathbf{else}:\\ \;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{\cos delta}\\ \end{array} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (if (<= phi1 -45.0)
   (+
    lambda1
    (atan2
     (* (* (sin theta) (sin delta)) (cos phi1))
     (+ 1.0 (* -0.5 (pow delta 2.0)))))
   (+ lambda1 (atan2 (* (sin delta) (sin theta)) (cos delta)))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	double tmp;
	if (phi1 <= -45.0) {
		tmp = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (1.0 + (-0.5 * pow(delta, 2.0))));
	} else {
		tmp = lambda1 + atan2((sin(delta) * sin(theta)), cos(delta));
	}
	return tmp;
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
    real(8), intent (in) :: lambda1
    real(8), intent (in) :: phi1
    real(8), intent (in) :: phi2
    real(8), intent (in) :: delta
    real(8), intent (in) :: theta
    real(8) :: tmp
    if (phi1 <= (-45.0d0)) then
        tmp = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (1.0d0 + ((-0.5d0) * (delta ** 2.0d0))))
    else
        tmp = lambda1 + atan2((sin(delta) * sin(theta)), cos(delta))
    end if
    code = tmp
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	double tmp;
	if (phi1 <= -45.0) {
		tmp = lambda1 + Math.atan2(((Math.sin(theta) * Math.sin(delta)) * Math.cos(phi1)), (1.0 + (-0.5 * Math.pow(delta, 2.0))));
	} else {
		tmp = lambda1 + Math.atan2((Math.sin(delta) * Math.sin(theta)), Math.cos(delta));
	}
	return tmp;
}
def code(lambda1, phi1, phi2, delta, theta):
	tmp = 0
	if phi1 <= -45.0:
		tmp = lambda1 + math.atan2(((math.sin(theta) * math.sin(delta)) * math.cos(phi1)), (1.0 + (-0.5 * math.pow(delta, 2.0))))
	else:
		tmp = lambda1 + math.atan2((math.sin(delta) * math.sin(theta)), math.cos(delta))
	return tmp
function code(lambda1, phi1, phi2, delta, theta)
	tmp = 0.0
	if (phi1 <= -45.0)
		tmp = Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), Float64(1.0 + Float64(-0.5 * (delta ^ 2.0)))));
	else
		tmp = Float64(lambda1 + atan(Float64(sin(delta) * sin(theta)), cos(delta)));
	end
	return tmp
end
function tmp_2 = code(lambda1, phi1, phi2, delta, theta)
	tmp = 0.0;
	if (phi1 <= -45.0)
		tmp = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (1.0 + (-0.5 * (delta ^ 2.0))));
	else
		tmp = lambda1 + atan2((sin(delta) * sin(theta)), cos(delta));
	end
	tmp_2 = tmp;
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := If[LessEqual[phi1, -45.0], N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(-0.5 * N[Power[delta, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(lambda1 + N[ArcTan[N[(N[Sin[delta], $MachinePrecision] * N[Sin[theta], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\mathbf{if}\;\phi_1 \leq -45:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{1 + -0.5 \cdot {delta}^{2}}\\

\mathbf{else}:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{\cos delta}\\


\end{array}
Derivation
  1. Split input into 2 regimes
  2. if phi1 < -45

    1. Initial program 99.8%

      \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    2. Taylor expanded in phi1 around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    3. Step-by-step derivation
      1. lower-cos.f6488.7%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
    4. Applied rewrites88.7%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    5. Taylor expanded in delta around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{1 + \color{blue}{\frac{-1}{2} \cdot {delta}^{2}}} \]
    6. Step-by-step derivation
      1. lower-+.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{1 + \frac{-1}{2} \cdot \color{blue}{{delta}^{2}}} \]
      2. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{1 + \frac{-1}{2} \cdot {delta}^{\color{blue}{2}}} \]
      3. lower-pow.f6479.8%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{1 + -0.5 \cdot {delta}^{2}} \]
    7. Applied rewrites79.8%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{1 + \color{blue}{-0.5 \cdot {delta}^{2}}} \]

    if -45 < phi1

    1. Initial program 99.8%

      \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    2. Taylor expanded in phi1 around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    3. Step-by-step derivation
      1. lower-cos.f6488.7%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
    4. Applied rewrites88.7%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    5. Taylor expanded in phi1 around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
    6. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \color{blue}{\sin theta}}{\cos delta} \]
      2. lower-sin.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin \color{blue}{theta}}{\cos delta} \]
      3. lower-sin.f6486.2%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{\cos delta} \]
    7. Applied rewrites86.2%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 11: 86.2% accurate, 3.4× speedup?

\[\lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{\cos delta} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (+ lambda1 (atan2 (* (sin delta) (sin theta)) (cos delta))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + atan2((sin(delta) * sin(theta)), cos(delta));
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
    real(8), intent (in) :: lambda1
    real(8), intent (in) :: phi1
    real(8), intent (in) :: phi2
    real(8), intent (in) :: delta
    real(8), intent (in) :: theta
    code = lambda1 + atan2((sin(delta) * sin(theta)), cos(delta))
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return lambda1 + Math.atan2((Math.sin(delta) * Math.sin(theta)), Math.cos(delta));
}
def code(lambda1, phi1, phi2, delta, theta):
	return lambda1 + math.atan2((math.sin(delta) * math.sin(theta)), math.cos(delta))
function code(lambda1, phi1, phi2, delta, theta)
	return Float64(lambda1 + atan(Float64(sin(delta) * sin(theta)), cos(delta)))
end
function tmp = code(lambda1, phi1, phi2, delta, theta)
	tmp = lambda1 + atan2((sin(delta) * sin(theta)), cos(delta));
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[Sin[delta], $MachinePrecision] * N[Sin[theta], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{\cos delta}
Derivation
  1. Initial program 99.8%

    \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
  2. Taylor expanded in phi1 around 0

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
  3. Step-by-step derivation
    1. lower-cos.f6488.7%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
  4. Applied rewrites88.7%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
  5. Taylor expanded in phi1 around 0

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
  6. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \color{blue}{\sin theta}}{\cos delta} \]
    2. lower-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin \color{blue}{theta}}{\cos delta} \]
    3. lower-sin.f6486.2%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{\cos delta} \]
  7. Applied rewrites86.2%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
  8. Add Preprocessing

Alternative 12: 79.9% accurate, 3.4× speedup?

\[\begin{array}{l} \mathbf{if}\;theta \leq -5 \cdot 10^{-89}:\\ \;\;\;\;\tan^{-1}_* \frac{\sin delta \cdot \sin theta}{\mathsf{fma}\left(-0.5 \cdot delta, delta, 1\right)} + \lambda_1\\ \mathbf{elif}\;theta \leq 3.55 \cdot 10^{-16}:\\ \;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \left(theta \cdot \left(1 + -0.16666666666666666 \cdot {theta}^{2}\right)\right)}{\cos delta}\\ \mathbf{else}:\\ \;\;\;\;\lambda_1 + \tan^{-1}_* \frac{delta \cdot \left(\cos \phi_1 \cdot \sin theta\right)}{1 + -0.5 \cdot {delta}^{2}}\\ \end{array} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (if (<= theta -5e-89)
   (+
    (atan2 (* (sin delta) (sin theta)) (fma (* -0.5 delta) delta 1.0))
    lambda1)
   (if (<= theta 3.55e-16)
     (+
      lambda1
      (atan2
       (*
        (sin delta)
        (* theta (+ 1.0 (* -0.16666666666666666 (pow theta 2.0)))))
       (cos delta)))
     (+
      lambda1
      (atan2
       (* delta (* (cos phi1) (sin theta)))
       (+ 1.0 (* -0.5 (pow delta 2.0))))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	double tmp;
	if (theta <= -5e-89) {
		tmp = atan2((sin(delta) * sin(theta)), fma((-0.5 * delta), delta, 1.0)) + lambda1;
	} else if (theta <= 3.55e-16) {
		tmp = lambda1 + atan2((sin(delta) * (theta * (1.0 + (-0.16666666666666666 * pow(theta, 2.0))))), cos(delta));
	} else {
		tmp = lambda1 + atan2((delta * (cos(phi1) * sin(theta))), (1.0 + (-0.5 * pow(delta, 2.0))));
	}
	return tmp;
}
function code(lambda1, phi1, phi2, delta, theta)
	tmp = 0.0
	if (theta <= -5e-89)
		tmp = Float64(atan(Float64(sin(delta) * sin(theta)), fma(Float64(-0.5 * delta), delta, 1.0)) + lambda1);
	elseif (theta <= 3.55e-16)
		tmp = Float64(lambda1 + atan(Float64(sin(delta) * Float64(theta * Float64(1.0 + Float64(-0.16666666666666666 * (theta ^ 2.0))))), cos(delta)));
	else
		tmp = Float64(lambda1 + atan(Float64(delta * Float64(cos(phi1) * sin(theta))), Float64(1.0 + Float64(-0.5 * (delta ^ 2.0)))));
	end
	return tmp
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := If[LessEqual[theta, -5e-89], N[(N[ArcTan[N[(N[Sin[delta], $MachinePrecision] * N[Sin[theta], $MachinePrecision]), $MachinePrecision] / N[(N[(-0.5 * delta), $MachinePrecision] * delta + 1.0), $MachinePrecision]], $MachinePrecision] + lambda1), $MachinePrecision], If[LessEqual[theta, 3.55e-16], N[(lambda1 + N[ArcTan[N[(N[Sin[delta], $MachinePrecision] * N[(theta * N[(1.0 + N[(-0.16666666666666666 * N[Power[theta, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(lambda1 + N[ArcTan[N[(delta * N[(N[Cos[phi1], $MachinePrecision] * N[Sin[theta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(-0.5 * N[Power[delta, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\mathbf{if}\;theta \leq -5 \cdot 10^{-89}:\\
\;\;\;\;\tan^{-1}_* \frac{\sin delta \cdot \sin theta}{\mathsf{fma}\left(-0.5 \cdot delta, delta, 1\right)} + \lambda_1\\

\mathbf{elif}\;theta \leq 3.55 \cdot 10^{-16}:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \left(theta \cdot \left(1 + -0.16666666666666666 \cdot {theta}^{2}\right)\right)}{\cos delta}\\

\mathbf{else}:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{delta \cdot \left(\cos \phi_1 \cdot \sin theta\right)}{1 + -0.5 \cdot {delta}^{2}}\\


\end{array}
Derivation
  1. Split input into 3 regimes
  2. if theta < -4.9999999999999997e-89

    1. Initial program 99.8%

      \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    2. Taylor expanded in phi1 around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    3. Step-by-step derivation
      1. lower-cos.f6488.7%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
    4. Applied rewrites88.7%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    5. Taylor expanded in phi1 around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
    6. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \color{blue}{\sin theta}}{\cos delta} \]
      2. lower-sin.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin \color{blue}{theta}}{\cos delta} \]
      3. lower-sin.f6486.2%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{\cos delta} \]
    7. Applied rewrites86.2%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
    8. Taylor expanded in delta around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \color{blue}{\frac{-1}{2} \cdot {delta}^{2}}} \]
    9. Step-by-step derivation
      1. lower-+.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \frac{-1}{2} \cdot \color{blue}{{delta}^{2}}} \]
      2. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \frac{-1}{2} \cdot {delta}^{\color{blue}{2}}} \]
      3. lower-pow.f6477.0%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + -0.5 \cdot {delta}^{2}} \]
    10. Applied rewrites77.0%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \color{blue}{-0.5 \cdot {delta}^{2}}} \]
    11. Step-by-step derivation
      1. lift-+.f64N/A

        \[\leadsto \color{blue}{\lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \frac{-1}{2} \cdot {delta}^{2}}} \]
      2. +-commutativeN/A

        \[\leadsto \color{blue}{\tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \frac{-1}{2} \cdot {delta}^{2}} + \lambda_1} \]
      3. lower-+.f6477.0%

        \[\leadsto \color{blue}{\tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + -0.5 \cdot {delta}^{2}} + \lambda_1} \]
    12. Applied rewrites77.0%

      \[\leadsto \color{blue}{\tan^{-1}_* \frac{\sin delta \cdot \sin theta}{\mathsf{fma}\left(-0.5 \cdot delta, delta, 1\right)} + \lambda_1} \]

    if -4.9999999999999997e-89 < theta < 3.55e-16

    1. Initial program 99.8%

      \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    2. Taylor expanded in phi1 around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    3. Step-by-step derivation
      1. lower-cos.f6488.7%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
    4. Applied rewrites88.7%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    5. Taylor expanded in phi1 around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
    6. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \color{blue}{\sin theta}}{\cos delta} \]
      2. lower-sin.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin \color{blue}{theta}}{\cos delta} \]
      3. lower-sin.f6486.2%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{\cos delta} \]
    7. Applied rewrites86.2%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
    8. Taylor expanded in theta around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \left(theta \cdot \color{blue}{\left(1 + \frac{-1}{6} \cdot {theta}^{2}\right)}\right)}{\cos delta} \]
    9. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \left(theta \cdot \left(1 + \color{blue}{\frac{-1}{6} \cdot {theta}^{2}}\right)\right)}{\cos delta} \]
      2. lower-+.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \left(theta \cdot \left(1 + \frac{-1}{6} \cdot \color{blue}{{theta}^{2}}\right)\right)}{\cos delta} \]
      3. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \left(theta \cdot \left(1 + \frac{-1}{6} \cdot {theta}^{\color{blue}{2}}\right)\right)}{\cos delta} \]
      4. lower-pow.f6471.8%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \left(theta \cdot \left(1 + -0.16666666666666666 \cdot {theta}^{2}\right)\right)}{\cos delta} \]
    10. Applied rewrites71.8%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \left(theta \cdot \color{blue}{\left(1 + -0.16666666666666666 \cdot {theta}^{2}\right)}\right)}{\cos delta} \]

    if 3.55e-16 < theta

    1. Initial program 99.8%

      \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    2. Taylor expanded in phi1 around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    3. Step-by-step derivation
      1. lower-cos.f6488.7%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
    4. Applied rewrites88.7%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    5. Taylor expanded in phi1 around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
    6. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \color{blue}{\sin theta}}{\cos delta} \]
      2. lower-sin.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin \color{blue}{theta}}{\cos delta} \]
      3. lower-sin.f6486.2%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{\cos delta} \]
    7. Applied rewrites86.2%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
    8. Taylor expanded in delta around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \color{blue}{\frac{-1}{2} \cdot {delta}^{2}}} \]
    9. Step-by-step derivation
      1. lower-+.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \frac{-1}{2} \cdot \color{blue}{{delta}^{2}}} \]
      2. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \frac{-1}{2} \cdot {delta}^{\color{blue}{2}}} \]
      3. lower-pow.f6477.0%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + -0.5 \cdot {delta}^{2}} \]
    10. Applied rewrites77.0%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \color{blue}{-0.5 \cdot {delta}^{2}}} \]
    11. Taylor expanded in delta around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{delta \cdot \left(\cos \phi_1 \cdot \sin theta\right)}}{1 + -0.5 \cdot {delta}^{2}} \]
    12. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{delta \cdot \color{blue}{\left(\cos \phi_1 \cdot \sin theta\right)}}{1 + \frac{-1}{2} \cdot {delta}^{2}} \]
      2. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{delta \cdot \left(\cos \phi_1 \cdot \color{blue}{\sin theta}\right)}{1 + \frac{-1}{2} \cdot {delta}^{2}} \]
      3. lower-cos.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{delta \cdot \left(\cos \phi_1 \cdot \sin \color{blue}{theta}\right)}{1 + \frac{-1}{2} \cdot {delta}^{2}} \]
      4. lower-sin.f6475.7%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{delta \cdot \left(\cos \phi_1 \cdot \sin theta\right)}{1 + -0.5 \cdot {delta}^{2}} \]
    13. Applied rewrites75.7%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{delta \cdot \left(\cos \phi_1 \cdot \sin theta\right)}}{1 + -0.5 \cdot {delta}^{2}} \]
  3. Recombined 3 regimes into one program.
  4. Add Preprocessing

Alternative 13: 77.4% accurate, 3.5× speedup?

\[\begin{array}{l} \mathbf{if}\;\phi_1 \leq -9.4 \cdot 10^{+76}:\\ \;\;\;\;\lambda_1 + \tan^{-1}_* \frac{delta \cdot \left(\cos \phi_1 \cdot \sin theta\right)}{1 + -0.5 \cdot {delta}^{2}}\\ \mathbf{else}:\\ \;\;\;\;\tan^{-1}_* \frac{\sin delta \cdot \sin theta}{\mathsf{fma}\left(-0.5 \cdot delta, delta, 1\right)} + \lambda_1\\ \end{array} \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (if (<= phi1 -9.4e+76)
   (+
    lambda1
    (atan2
     (* delta (* (cos phi1) (sin theta)))
     (+ 1.0 (* -0.5 (pow delta 2.0)))))
   (+
    (atan2 (* (sin delta) (sin theta)) (fma (* -0.5 delta) delta 1.0))
    lambda1)))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	double tmp;
	if (phi1 <= -9.4e+76) {
		tmp = lambda1 + atan2((delta * (cos(phi1) * sin(theta))), (1.0 + (-0.5 * pow(delta, 2.0))));
	} else {
		tmp = atan2((sin(delta) * sin(theta)), fma((-0.5 * delta), delta, 1.0)) + lambda1;
	}
	return tmp;
}
function code(lambda1, phi1, phi2, delta, theta)
	tmp = 0.0
	if (phi1 <= -9.4e+76)
		tmp = Float64(lambda1 + atan(Float64(delta * Float64(cos(phi1) * sin(theta))), Float64(1.0 + Float64(-0.5 * (delta ^ 2.0)))));
	else
		tmp = Float64(atan(Float64(sin(delta) * sin(theta)), fma(Float64(-0.5 * delta), delta, 1.0)) + lambda1);
	end
	return tmp
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := If[LessEqual[phi1, -9.4e+76], N[(lambda1 + N[ArcTan[N[(delta * N[(N[Cos[phi1], $MachinePrecision] * N[Sin[theta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(-0.5 * N[Power[delta, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(N[ArcTan[N[(N[Sin[delta], $MachinePrecision] * N[Sin[theta], $MachinePrecision]), $MachinePrecision] / N[(N[(-0.5 * delta), $MachinePrecision] * delta + 1.0), $MachinePrecision]], $MachinePrecision] + lambda1), $MachinePrecision]]
\begin{array}{l}
\mathbf{if}\;\phi_1 \leq -9.4 \cdot 10^{+76}:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{delta \cdot \left(\cos \phi_1 \cdot \sin theta\right)}{1 + -0.5 \cdot {delta}^{2}}\\

\mathbf{else}:\\
\;\;\;\;\tan^{-1}_* \frac{\sin delta \cdot \sin theta}{\mathsf{fma}\left(-0.5 \cdot delta, delta, 1\right)} + \lambda_1\\


\end{array}
Derivation
  1. Split input into 2 regimes
  2. if phi1 < -9.4000000000000006e76

    1. Initial program 99.8%

      \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    2. Taylor expanded in phi1 around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    3. Step-by-step derivation
      1. lower-cos.f6488.7%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
    4. Applied rewrites88.7%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    5. Taylor expanded in phi1 around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
    6. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \color{blue}{\sin theta}}{\cos delta} \]
      2. lower-sin.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin \color{blue}{theta}}{\cos delta} \]
      3. lower-sin.f6486.2%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{\cos delta} \]
    7. Applied rewrites86.2%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
    8. Taylor expanded in delta around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \color{blue}{\frac{-1}{2} \cdot {delta}^{2}}} \]
    9. Step-by-step derivation
      1. lower-+.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \frac{-1}{2} \cdot \color{blue}{{delta}^{2}}} \]
      2. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \frac{-1}{2} \cdot {delta}^{\color{blue}{2}}} \]
      3. lower-pow.f6477.0%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + -0.5 \cdot {delta}^{2}} \]
    10. Applied rewrites77.0%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \color{blue}{-0.5 \cdot {delta}^{2}}} \]
    11. Taylor expanded in delta around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{delta \cdot \left(\cos \phi_1 \cdot \sin theta\right)}}{1 + -0.5 \cdot {delta}^{2}} \]
    12. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{delta \cdot \color{blue}{\left(\cos \phi_1 \cdot \sin theta\right)}}{1 + \frac{-1}{2} \cdot {delta}^{2}} \]
      2. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{delta \cdot \left(\cos \phi_1 \cdot \color{blue}{\sin theta}\right)}{1 + \frac{-1}{2} \cdot {delta}^{2}} \]
      3. lower-cos.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{delta \cdot \left(\cos \phi_1 \cdot \sin \color{blue}{theta}\right)}{1 + \frac{-1}{2} \cdot {delta}^{2}} \]
      4. lower-sin.f6475.7%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{delta \cdot \left(\cos \phi_1 \cdot \sin theta\right)}{1 + -0.5 \cdot {delta}^{2}} \]
    13. Applied rewrites75.7%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{delta \cdot \left(\cos \phi_1 \cdot \sin theta\right)}}{1 + -0.5 \cdot {delta}^{2}} \]

    if -9.4000000000000006e76 < phi1

    1. Initial program 99.8%

      \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
    2. Taylor expanded in phi1 around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    3. Step-by-step derivation
      1. lower-cos.f6488.7%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
    4. Applied rewrites88.7%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
    5. Taylor expanded in phi1 around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
    6. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \color{blue}{\sin theta}}{\cos delta} \]
      2. lower-sin.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin \color{blue}{theta}}{\cos delta} \]
      3. lower-sin.f6486.2%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{\cos delta} \]
    7. Applied rewrites86.2%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
    8. Taylor expanded in delta around 0

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \color{blue}{\frac{-1}{2} \cdot {delta}^{2}}} \]
    9. Step-by-step derivation
      1. lower-+.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \frac{-1}{2} \cdot \color{blue}{{delta}^{2}}} \]
      2. lower-*.f64N/A

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \frac{-1}{2} \cdot {delta}^{\color{blue}{2}}} \]
      3. lower-pow.f6477.0%

        \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + -0.5 \cdot {delta}^{2}} \]
    10. Applied rewrites77.0%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \color{blue}{-0.5 \cdot {delta}^{2}}} \]
    11. Step-by-step derivation
      1. lift-+.f64N/A

        \[\leadsto \color{blue}{\lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \frac{-1}{2} \cdot {delta}^{2}}} \]
      2. +-commutativeN/A

        \[\leadsto \color{blue}{\tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \frac{-1}{2} \cdot {delta}^{2}} + \lambda_1} \]
      3. lower-+.f6477.0%

        \[\leadsto \color{blue}{\tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + -0.5 \cdot {delta}^{2}} + \lambda_1} \]
    12. Applied rewrites77.0%

      \[\leadsto \color{blue}{\tan^{-1}_* \frac{\sin delta \cdot \sin theta}{\mathsf{fma}\left(-0.5 \cdot delta, delta, 1\right)} + \lambda_1} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 14: 77.0% accurate, 4.2× speedup?

\[\tan^{-1}_* \frac{\sin delta \cdot \sin theta}{\mathsf{fma}\left(-0.5 \cdot delta, delta, 1\right)} + \lambda_1 \]
(FPCore (lambda1 phi1 phi2 delta theta)
 :precision binary64
 (+
  (atan2 (* (sin delta) (sin theta)) (fma (* -0.5 delta) delta 1.0))
  lambda1))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
	return atan2((sin(delta) * sin(theta)), fma((-0.5 * delta), delta, 1.0)) + lambda1;
}
function code(lambda1, phi1, phi2, delta, theta)
	return Float64(atan(Float64(sin(delta) * sin(theta)), fma(Float64(-0.5 * delta), delta, 1.0)) + lambda1)
end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(N[ArcTan[N[(N[Sin[delta], $MachinePrecision] * N[Sin[theta], $MachinePrecision]), $MachinePrecision] / N[(N[(-0.5 * delta), $MachinePrecision] * delta + 1.0), $MachinePrecision]], $MachinePrecision] + lambda1), $MachinePrecision]
\tan^{-1}_* \frac{\sin delta \cdot \sin theta}{\mathsf{fma}\left(-0.5 \cdot delta, delta, 1\right)} + \lambda_1
Derivation
  1. Initial program 99.8%

    \[\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)} \]
  2. Taylor expanded in phi1 around 0

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
  3. Step-by-step derivation
    1. lower-cos.f6488.7%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta} \]
  4. Applied rewrites88.7%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\color{blue}{\cos delta}} \]
  5. Taylor expanded in phi1 around 0

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
  6. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \color{blue}{\sin theta}}{\cos delta} \]
    2. lower-sin.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin \color{blue}{theta}}{\cos delta} \]
    3. lower-sin.f6486.2%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{\cos delta} \]
  7. Applied rewrites86.2%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\color{blue}{\sin delta \cdot \sin theta}}{\cos delta} \]
  8. Taylor expanded in delta around 0

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \color{blue}{\frac{-1}{2} \cdot {delta}^{2}}} \]
  9. Step-by-step derivation
    1. lower-+.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \frac{-1}{2} \cdot \color{blue}{{delta}^{2}}} \]
    2. lower-*.f64N/A

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \frac{-1}{2} \cdot {delta}^{\color{blue}{2}}} \]
    3. lower-pow.f6477.0%

      \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + -0.5 \cdot {delta}^{2}} \]
  10. Applied rewrites77.0%

    \[\leadsto \lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \color{blue}{-0.5 \cdot {delta}^{2}}} \]
  11. Step-by-step derivation
    1. lift-+.f64N/A

      \[\leadsto \color{blue}{\lambda_1 + \tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \frac{-1}{2} \cdot {delta}^{2}}} \]
    2. +-commutativeN/A

      \[\leadsto \color{blue}{\tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + \frac{-1}{2} \cdot {delta}^{2}} + \lambda_1} \]
    3. lower-+.f6477.0%

      \[\leadsto \color{blue}{\tan^{-1}_* \frac{\sin delta \cdot \sin theta}{1 + -0.5 \cdot {delta}^{2}} + \lambda_1} \]
  12. Applied rewrites77.0%

    \[\leadsto \color{blue}{\tan^{-1}_* \frac{\sin delta \cdot \sin theta}{\mathsf{fma}\left(-0.5 \cdot delta, delta, 1\right)} + \lambda_1} \]
  13. Add Preprocessing

Reproduce

?
herbie shell --seed 2025209 
(FPCore (lambda1 phi1 phi2 delta theta)
  :name "Destination given bearing on a great circle"
  :precision binary64
  (+ lambda1 (atan2 (* (* (sin theta) (sin delta)) (cos phi1)) (- (cos delta) (* (sin phi1) (sin (asin (+ (* (sin phi1) (cos delta)) (* (* (cos phi1) (sin delta)) (cos theta))))))))))