
(FPCore (lambda1 phi1 phi2 delta theta)
:precision binary64
(+
lambda1
(atan2
(* (* (sin theta) (sin delta)) (cos phi1))
(-
(cos delta)
(*
(sin phi1)
(sin
(asin
(+
(* (sin phi1) (cos delta))
(* (* (cos phi1) (sin delta)) (cos theta))))))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
return lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin(asin(((sin(phi1) * cos(delta)) + ((cos(phi1) * sin(delta)) * cos(theta))))))));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
real(8), intent (in) :: lambda1
real(8), intent (in) :: phi1
real(8), intent (in) :: phi2
real(8), intent (in) :: delta
real(8), intent (in) :: theta
code = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin(asin(((sin(phi1) * cos(delta)) + ((cos(phi1) * sin(delta)) * cos(theta))))))))
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
return lambda1 + Math.atan2(((Math.sin(theta) * Math.sin(delta)) * Math.cos(phi1)), (Math.cos(delta) - (Math.sin(phi1) * Math.sin(Math.asin(((Math.sin(phi1) * Math.cos(delta)) + ((Math.cos(phi1) * Math.sin(delta)) * Math.cos(theta))))))));
}
def code(lambda1, phi1, phi2, delta, theta): return lambda1 + math.atan2(((math.sin(theta) * math.sin(delta)) * math.cos(phi1)), (math.cos(delta) - (math.sin(phi1) * math.sin(math.asin(((math.sin(phi1) * math.cos(delta)) + ((math.cos(phi1) * math.sin(delta)) * math.cos(theta))))))))
function code(lambda1, phi1, phi2, delta, theta) return Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), Float64(cos(delta) - Float64(sin(phi1) * sin(asin(Float64(Float64(sin(phi1) * cos(delta)) + Float64(Float64(cos(phi1) * sin(delta)) * cos(theta))))))))) end
function tmp = code(lambda1, phi1, phi2, delta, theta) tmp = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin(asin(((sin(phi1) * cos(delta)) + ((cos(phi1) * sin(delta)) * cos(theta)))))))); end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[(N[Sin[phi1], $MachinePrecision] * N[Sin[N[ArcSin[N[(N[(N[Sin[phi1], $MachinePrecision] * N[Cos[delta], $MachinePrecision]), $MachinePrecision] + N[(N[(N[Cos[phi1], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[theta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}
\end{array}
Herbie found 18 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (lambda1 phi1 phi2 delta theta)
:precision binary64
(+
lambda1
(atan2
(* (* (sin theta) (sin delta)) (cos phi1))
(-
(cos delta)
(*
(sin phi1)
(sin
(asin
(+
(* (sin phi1) (cos delta))
(* (* (cos phi1) (sin delta)) (cos theta))))))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
return lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin(asin(((sin(phi1) * cos(delta)) + ((cos(phi1) * sin(delta)) * cos(theta))))))));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
real(8), intent (in) :: lambda1
real(8), intent (in) :: phi1
real(8), intent (in) :: phi2
real(8), intent (in) :: delta
real(8), intent (in) :: theta
code = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin(asin(((sin(phi1) * cos(delta)) + ((cos(phi1) * sin(delta)) * cos(theta))))))))
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
return lambda1 + Math.atan2(((Math.sin(theta) * Math.sin(delta)) * Math.cos(phi1)), (Math.cos(delta) - (Math.sin(phi1) * Math.sin(Math.asin(((Math.sin(phi1) * Math.cos(delta)) + ((Math.cos(phi1) * Math.sin(delta)) * Math.cos(theta))))))));
}
def code(lambda1, phi1, phi2, delta, theta): return lambda1 + math.atan2(((math.sin(theta) * math.sin(delta)) * math.cos(phi1)), (math.cos(delta) - (math.sin(phi1) * math.sin(math.asin(((math.sin(phi1) * math.cos(delta)) + ((math.cos(phi1) * math.sin(delta)) * math.cos(theta))))))))
function code(lambda1, phi1, phi2, delta, theta) return Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), Float64(cos(delta) - Float64(sin(phi1) * sin(asin(Float64(Float64(sin(phi1) * cos(delta)) + Float64(Float64(cos(phi1) * sin(delta)) * cos(theta))))))))) end
function tmp = code(lambda1, phi1, phi2, delta, theta) tmp = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin(asin(((sin(phi1) * cos(delta)) + ((cos(phi1) * sin(delta)) * cos(theta)))))))); end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[(N[Sin[phi1], $MachinePrecision] * N[Sin[N[ArcSin[N[(N[(N[Sin[phi1], $MachinePrecision] * N[Cos[delta], $MachinePrecision]), $MachinePrecision] + N[(N[(N[Cos[phi1], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[theta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + \left(\cos \phi_1 \cdot \sin delta\right) \cdot \cos theta\right)}
\end{array}
(FPCore (lambda1 phi1 phi2 delta theta)
:precision binary64
(+
lambda1
(atan2
(*
(* (sin theta) (sin delta))
(fma (sin phi1) (cos (/ PI 2.0)) (* (cos phi1) 1.0)))
(-
(cos delta)
(*
(fma (sin phi1) (cos delta) (* (cos theta) (* (cos phi1) (sin delta))))
(sin phi1))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
return lambda1 + atan2(((sin(theta) * sin(delta)) * fma(sin(phi1), cos((((double) M_PI) / 2.0)), (cos(phi1) * 1.0))), (cos(delta) - (fma(sin(phi1), cos(delta), (cos(theta) * (cos(phi1) * sin(delta)))) * sin(phi1))));
}
function code(lambda1, phi1, phi2, delta, theta) return Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * fma(sin(phi1), cos(Float64(pi / 2.0)), Float64(cos(phi1) * 1.0))), Float64(cos(delta) - Float64(fma(sin(phi1), cos(delta), Float64(cos(theta) * Float64(cos(phi1) * sin(delta)))) * sin(phi1))))) end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[(N[Sin[phi1], $MachinePrecision] * N[Cos[N[(Pi / 2.0), $MachinePrecision]], $MachinePrecision] + N[(N[Cos[phi1], $MachinePrecision] * 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[(N[(N[Sin[phi1], $MachinePrecision] * N[Cos[delta], $MachinePrecision] + N[(N[Cos[theta], $MachinePrecision] * N[(N[Cos[phi1], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Sin[phi1], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \mathsf{fma}\left(\sin \phi_1, \cos \left(\frac{\pi}{2}\right), \cos \phi_1 \cdot 1\right)}{\cos delta - \mathsf{fma}\left(\sin \phi_1, \cos delta, \cos theta \cdot \left(\cos \phi_1 \cdot \sin delta\right)\right) \cdot \sin \phi_1}
\end{array}
Initial program 99.7%
lift-cos.f64N/A
sin-+PI/2-revN/A
sin-sumN/A
lower-fma.f64N/A
lift-sin.f64N/A
lower-cos.f64N/A
lower-/.f64N/A
lower-PI.f64N/A
lower-*.f64N/A
lift-cos.f64N/A
lower-sin.f64N/A
lower-/.f64N/A
lower-PI.f6499.7
Applied rewrites99.7%
lift-cos.f64N/A
sin-+PI/2-revN/A
sin-sumN/A
lower-fma.f64N/A
lift-sin.f64N/A
lower-cos.f64N/A
lower-/.f64N/A
lower-PI.f64N/A
lower-*.f64N/A
lift-cos.f64N/A
lower-sin.f64N/A
lower-/.f64N/A
lower-PI.f6499.7
Applied rewrites99.7%
Applied rewrites99.7%
lift-sin.f64N/A
lift-PI.f64N/A
lift-/.f64N/A
sin-PI/299.7
Applied rewrites99.7%
(FPCore (lambda1 phi1 phi2 delta theta)
:precision binary64
(+
lambda1
(atan2
(* (* (sin theta) (sin delta)) (cos phi1))
(-
(cos delta)
(*
(sin phi1)
(fma
(* (cos theta) (cos phi1))
(sin delta)
(* (sin phi1) (cos delta))))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
return lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * fma((cos(theta) * cos(phi1)), sin(delta), (sin(phi1) * cos(delta))))));
}
function code(lambda1, phi1, phi2, delta, theta) return Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), Float64(cos(delta) - Float64(sin(phi1) * fma(Float64(cos(theta) * cos(phi1)), sin(delta), Float64(sin(phi1) * cos(delta))))))) end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[(N[Sin[phi1], $MachinePrecision] * N[(N[(N[Cos[theta], $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] * N[Sin[delta], $MachinePrecision] + N[(N[Sin[phi1], $MachinePrecision] * N[Cos[delta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos theta \cdot \cos \phi_1, \sin delta, \sin \phi_1 \cdot \cos delta\right)}
\end{array}
Initial program 99.7%
lift-sin.f64N/A
lift-asin.f64N/A
lift-+.f64N/A
lift-*.f64N/A
lift-sin.f64N/A
lift-cos.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-cos.f64N/A
lift-sin.f64N/A
lift-cos.f64N/A
sin-asinN/A
+-commutativeN/A
Applied rewrites99.7%
(FPCore (lambda1 phi1 phi2 delta theta)
:precision binary64
(let* ((t_1 (* (* (sin theta) (sin delta)) (cos phi1)))
(t_2 (* (cos phi1) (sin delta)))
(t_3
(+
lambda1
(atan2
t_1
(-
(cos delta)
(*
(sin phi1)
(sin
(asin (+ (* (sin phi1) (cos delta)) (* t_2 (cos theta)))))))))))
(if (<= t_3 -2000.0)
(+
lambda1
(atan2
(*
(*
(sin theta)
(* (fma (* delta delta) -0.16666666666666666 1.0) delta))
(cos phi1))
(-
(cos delta)
(*
(sin phi1)
(fma
(cos delta)
(sin phi1)
(* (cos phi1) (* (cos theta) (sin delta))))))))
(if (<= t_3 -0.15)
(atan2
(* (* (sin delta) (sin theta)) (cos phi1))
(-
(cos delta)
(* (fma (sin phi1) (cos delta) (* (cos theta) t_2)) (sin phi1))))
(+
lambda1
(atan2
t_1
(- (cos delta) (* (sin phi1) (fma (sin phi1) (cos delta) t_2)))))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
double t_1 = (sin(theta) * sin(delta)) * cos(phi1);
double t_2 = cos(phi1) * sin(delta);
double t_3 = lambda1 + atan2(t_1, (cos(delta) - (sin(phi1) * sin(asin(((sin(phi1) * cos(delta)) + (t_2 * cos(theta))))))));
double tmp;
if (t_3 <= -2000.0) {
tmp = lambda1 + atan2(((sin(theta) * (fma((delta * delta), -0.16666666666666666, 1.0) * delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * fma(cos(delta), sin(phi1), (cos(phi1) * (cos(theta) * sin(delta)))))));
} else if (t_3 <= -0.15) {
tmp = atan2(((sin(delta) * sin(theta)) * cos(phi1)), (cos(delta) - (fma(sin(phi1), cos(delta), (cos(theta) * t_2)) * sin(phi1))));
} else {
tmp = lambda1 + atan2(t_1, (cos(delta) - (sin(phi1) * fma(sin(phi1), cos(delta), t_2))));
}
return tmp;
}
function code(lambda1, phi1, phi2, delta, theta) t_1 = Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)) t_2 = Float64(cos(phi1) * sin(delta)) t_3 = Float64(lambda1 + atan(t_1, Float64(cos(delta) - Float64(sin(phi1) * sin(asin(Float64(Float64(sin(phi1) * cos(delta)) + Float64(t_2 * cos(theta))))))))) tmp = 0.0 if (t_3 <= -2000.0) tmp = Float64(lambda1 + atan(Float64(Float64(sin(theta) * Float64(fma(Float64(delta * delta), -0.16666666666666666, 1.0) * delta)) * cos(phi1)), Float64(cos(delta) - Float64(sin(phi1) * fma(cos(delta), sin(phi1), Float64(cos(phi1) * Float64(cos(theta) * sin(delta)))))))); elseif (t_3 <= -0.15) tmp = atan(Float64(Float64(sin(delta) * sin(theta)) * cos(phi1)), Float64(cos(delta) - Float64(fma(sin(phi1), cos(delta), Float64(cos(theta) * t_2)) * sin(phi1)))); else tmp = Float64(lambda1 + atan(t_1, Float64(cos(delta) - Float64(sin(phi1) * fma(sin(phi1), cos(delta), t_2))))); end return tmp end
code[lambda1_, phi1_, phi2_, delta_, theta_] := Block[{t$95$1 = N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(N[Cos[phi1], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$3 = N[(lambda1 + N[ArcTan[t$95$1 / N[(N[Cos[delta], $MachinePrecision] - N[(N[Sin[phi1], $MachinePrecision] * N[Sin[N[ArcSin[N[(N[(N[Sin[phi1], $MachinePrecision] * N[Cos[delta], $MachinePrecision]), $MachinePrecision] + N[(t$95$2 * N[Cos[theta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$3, -2000.0], N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[(N[(N[(delta * delta), $MachinePrecision] * -0.16666666666666666 + 1.0), $MachinePrecision] * delta), $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[(N[Sin[phi1], $MachinePrecision] * N[(N[Cos[delta], $MachinePrecision] * N[Sin[phi1], $MachinePrecision] + N[(N[Cos[phi1], $MachinePrecision] * N[(N[Cos[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], If[LessEqual[t$95$3, -0.15], N[ArcTan[N[(N[(N[Sin[delta], $MachinePrecision] * N[Sin[theta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[(N[(N[Sin[phi1], $MachinePrecision] * N[Cos[delta], $MachinePrecision] + N[(N[Cos[theta], $MachinePrecision] * t$95$2), $MachinePrecision]), $MachinePrecision] * N[Sin[phi1], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[(lambda1 + N[ArcTan[t$95$1 / N[(N[Cos[delta], $MachinePrecision] - N[(N[Sin[phi1], $MachinePrecision] * N[(N[Sin[phi1], $MachinePrecision] * N[Cos[delta], $MachinePrecision] + t$95$2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1\\
t_2 := \cos \phi_1 \cdot \sin delta\\
t_3 := \lambda_1 + \tan^{-1}_* \frac{t\_1}{\cos delta - \sin \phi_1 \cdot \sin \sin^{-1} \left(\sin \phi_1 \cdot \cos delta + t\_2 \cdot \cos theta\right)}\\
\mathbf{if}\;t\_3 \leq -2000:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \left(\mathsf{fma}\left(delta \cdot delta, -0.16666666666666666, 1\right) \cdot delta\right)\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\cos delta, \sin \phi_1, \cos \phi_1 \cdot \left(\cos theta \cdot \sin delta\right)\right)}\\
\mathbf{elif}\;t\_3 \leq -0.15:\\
\;\;\;\;\tan^{-1}_* \frac{\left(\sin delta \cdot \sin theta\right) \cdot \cos \phi_1}{\cos delta - \mathsf{fma}\left(\sin \phi_1, \cos delta, \cos theta \cdot t\_2\right) \cdot \sin \phi_1}\\
\mathbf{else}:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{t\_1}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\sin \phi_1, \cos delta, t\_2\right)}\\
\end{array}
\end{array}
if (+.f64 lambda1 (atan2.f64 (*.f64 (*.f64 (sin.f64 theta) (sin.f64 delta)) (cos.f64 phi1)) (-.f64 (cos.f64 delta) (*.f64 (sin.f64 phi1) (sin.f64 (asin.f64 (+.f64 (*.f64 (sin.f64 phi1) (cos.f64 delta)) (*.f64 (*.f64 (cos.f64 phi1) (sin.f64 delta)) (cos.f64 theta))))))))) < -2e3Initial program 100.0%
Taylor expanded in phi1 around 0
lower-*.f64N/A
lift-cos.f64N/A
lift-sin.f6499.8
Applied rewrites99.8%
lift-cos.f64N/A
sin-+PI/2-revN/A
lower-sin.f64N/A
lift-/.f64N/A
lift-PI.f64N/A
lower-+.f6499.8
Applied rewrites99.8%
Taylor expanded in delta around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.2
Applied rewrites99.2%
Taylor expanded in delta around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.2
Applied rewrites99.2%
Taylor expanded in phi1 around inf
lower-fma.f64N/A
lift-cos.f64N/A
lift-sin.f64N/A
lower-*.f64N/A
lift-cos.f64N/A
lower-*.f64N/A
lift-cos.f64N/A
lift-sin.f6499.2
Applied rewrites99.2%
if -2e3 < (+.f64 lambda1 (atan2.f64 (*.f64 (*.f64 (sin.f64 theta) (sin.f64 delta)) (cos.f64 phi1)) (-.f64 (cos.f64 delta) (*.f64 (sin.f64 phi1) (sin.f64 (asin.f64 (+.f64 (*.f64 (sin.f64 phi1) (cos.f64 delta)) (*.f64 (*.f64 (cos.f64 phi1) (sin.f64 delta)) (cos.f64 theta))))))))) < -0.149999999999999994Initial program 99.5%
Taylor expanded in lambda1 around 0
Applied rewrites94.9%
if -0.149999999999999994 < (+.f64 lambda1 (atan2.f64 (*.f64 (*.f64 (sin.f64 theta) (sin.f64 delta)) (cos.f64 phi1)) (-.f64 (cos.f64 delta) (*.f64 (sin.f64 phi1) (sin.f64 (asin.f64 (+.f64 (*.f64 (sin.f64 phi1) (cos.f64 delta)) (*.f64 (*.f64 (cos.f64 phi1) (sin.f64 delta)) (cos.f64 theta))))))))) Initial program 99.7%
Taylor expanded in phi1 around 0
lower-*.f64N/A
lift-cos.f64N/A
lift-sin.f6487.0
Applied rewrites87.0%
Taylor expanded in theta around 0
*-commutativeN/A
lower-fma.f64N/A
lift-sin.f64N/A
lift-cos.f64N/A
lift-cos.f64N/A
lift-sin.f64N/A
lift-*.f6495.4
Applied rewrites95.4%
(FPCore (lambda1 phi1 phi2 delta theta)
:precision binary64
(+
lambda1
(atan2
(* (* (sin theta) (sin delta)) (cos phi1))
(-
(cos delta)
(* (sin phi1) (fma (sin phi1) (cos delta) (* (cos phi1) (sin delta))))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
return lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * fma(sin(phi1), cos(delta), (cos(phi1) * sin(delta))))));
}
function code(lambda1, phi1, phi2, delta, theta) return Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), Float64(cos(delta) - Float64(sin(phi1) * fma(sin(phi1), cos(delta), Float64(cos(phi1) * sin(delta))))))) end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[(N[Sin[phi1], $MachinePrecision] * N[(N[Sin[phi1], $MachinePrecision] * N[Cos[delta], $MachinePrecision] + N[(N[Cos[phi1], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \mathsf{fma}\left(\sin \phi_1, \cos delta, \cos \phi_1 \cdot \sin delta\right)}
\end{array}
Initial program 99.7%
Taylor expanded in phi1 around 0
lower-*.f64N/A
lift-cos.f64N/A
lift-sin.f6488.5
Applied rewrites88.5%
Taylor expanded in theta around 0
*-commutativeN/A
lower-fma.f64N/A
lift-sin.f64N/A
lift-cos.f64N/A
lift-cos.f64N/A
lift-sin.f64N/A
lift-*.f6494.4
Applied rewrites94.4%
(FPCore (lambda1 phi1 phi2 delta theta) :precision binary64 (+ lambda1 (atan2 (* (* (sin theta) (sin delta)) (cos phi1)) (- (cos delta) (* (sin phi1) (sin (+ phi1 delta)))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
return lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin((phi1 + delta)))));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
real(8), intent (in) :: lambda1
real(8), intent (in) :: phi1
real(8), intent (in) :: phi2
real(8), intent (in) :: delta
real(8), intent (in) :: theta
code = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin((phi1 + delta)))))
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
return lambda1 + Math.atan2(((Math.sin(theta) * Math.sin(delta)) * Math.cos(phi1)), (Math.cos(delta) - (Math.sin(phi1) * Math.sin((phi1 + delta)))));
}
def code(lambda1, phi1, phi2, delta, theta): return lambda1 + math.atan2(((math.sin(theta) * math.sin(delta)) * math.cos(phi1)), (math.cos(delta) - (math.sin(phi1) * math.sin((phi1 + delta)))))
function code(lambda1, phi1, phi2, delta, theta) return Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), Float64(cos(delta) - Float64(sin(phi1) * sin(Float64(phi1 + delta)))))) end
function tmp = code(lambda1, phi1, phi2, delta, theta) tmp = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin((phi1 + delta))))); end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[(N[Sin[phi1], $MachinePrecision] * N[Sin[N[(phi1 + delta), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \left(\phi_1 + delta\right)}
\end{array}
Initial program 99.7%
Taylor expanded in theta around 0
*-commutativeN/A
sin-sum-revN/A
lower-sin.f64N/A
lower-+.f6491.9
Applied rewrites91.9%
(FPCore (lambda1 phi1 phi2 delta theta) :precision binary64 (+ lambda1 (atan2 (* (* (sin theta) (sin delta)) (cos phi1)) (- (cos delta) (* (sin phi1) (sin phi1))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
return lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin(phi1))));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
real(8), intent (in) :: lambda1
real(8), intent (in) :: phi1
real(8), intent (in) :: phi2
real(8), intent (in) :: delta
real(8), intent (in) :: theta
code = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin(phi1))))
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
return lambda1 + Math.atan2(((Math.sin(theta) * Math.sin(delta)) * Math.cos(phi1)), (Math.cos(delta) - (Math.sin(phi1) * Math.sin(phi1))));
}
def code(lambda1, phi1, phi2, delta, theta): return lambda1 + math.atan2(((math.sin(theta) * math.sin(delta)) * math.cos(phi1)), (math.cos(delta) - (math.sin(phi1) * math.sin(phi1))))
function code(lambda1, phi1, phi2, delta, theta) return Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), Float64(cos(delta) - Float64(sin(phi1) * sin(phi1))))) end
function tmp = code(lambda1, phi1, phi2, delta, theta) tmp = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (sin(phi1) * sin(phi1)))); end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[(N[Sin[phi1], $MachinePrecision] * N[Sin[phi1], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \sin \phi_1 \cdot \sin \phi_1}
\end{array}
Initial program 99.7%
Taylor expanded in delta around 0
lift-sin.f6492.0
Applied rewrites92.0%
(FPCore (lambda1 phi1 phi2 delta theta) :precision binary64 (+ lambda1 (atan2 (* (* (sin theta) (sin delta)) (cos phi1)) (- (cos delta) (- 0.5 (* 0.5 (cos (* 2.0 phi1))))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
return lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (0.5 - (0.5 * cos((2.0 * phi1))))));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
real(8), intent (in) :: lambda1
real(8), intent (in) :: phi1
real(8), intent (in) :: phi2
real(8), intent (in) :: delta
real(8), intent (in) :: theta
code = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (0.5d0 - (0.5d0 * cos((2.0d0 * phi1))))))
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
return lambda1 + Math.atan2(((Math.sin(theta) * Math.sin(delta)) * Math.cos(phi1)), (Math.cos(delta) - (0.5 - (0.5 * Math.cos((2.0 * phi1))))));
}
def code(lambda1, phi1, phi2, delta, theta): return lambda1 + math.atan2(((math.sin(theta) * math.sin(delta)) * math.cos(phi1)), (math.cos(delta) - (0.5 - (0.5 * math.cos((2.0 * phi1))))))
function code(lambda1, phi1, phi2, delta, theta) return Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), Float64(cos(delta) - Float64(0.5 - Float64(0.5 * cos(Float64(2.0 * phi1))))))) end
function tmp = code(lambda1, phi1, phi2, delta, theta) tmp = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), (cos(delta) - (0.5 - (0.5 * cos((2.0 * phi1)))))); end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[Cos[delta], $MachinePrecision] - N[(0.5 - N[(0.5 * N[Cos[N[(2.0 * phi1), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta - \left(0.5 - 0.5 \cdot \cos \left(2 \cdot \phi_1\right)\right)}
\end{array}
Initial program 99.7%
Taylor expanded in delta around 0
unpow2N/A
sqr-sin-aN/A
lower--.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6492.0
Applied rewrites92.0%
(FPCore (lambda1 phi1 phi2 delta theta)
:precision binary64
(let* ((t_1
(+
lambda1
(atan2 (* (* (sin theta) (sin delta)) (cos phi1)) (cos delta)))))
(if (<= delta -2.05e+36)
t_1
(if (<= delta 1.7e-8)
(+
lambda1
(atan2
(* (* (sin theta) delta) (cos phi1))
(fma 0.5 (cos (+ phi1 phi1)) 0.5)))
t_1))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
double t_1 = lambda1 + atan2(((sin(theta) * sin(delta)) * cos(phi1)), cos(delta));
double tmp;
if (delta <= -2.05e+36) {
tmp = t_1;
} else if (delta <= 1.7e-8) {
tmp = lambda1 + atan2(((sin(theta) * delta) * cos(phi1)), fma(0.5, cos((phi1 + phi1)), 0.5));
} else {
tmp = t_1;
}
return tmp;
}
function code(lambda1, phi1, phi2, delta, theta) t_1 = Float64(lambda1 + atan(Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)), cos(delta))) tmp = 0.0 if (delta <= -2.05e+36) tmp = t_1; elseif (delta <= 1.7e-8) tmp = Float64(lambda1 + atan(Float64(Float64(sin(theta) * delta) * cos(phi1)), fma(0.5, cos(Float64(phi1 + phi1)), 0.5))); else tmp = t_1; end return tmp end
code[lambda1_, phi1_, phi2_, delta_, theta_] := Block[{t$95$1 = N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[delta, -2.05e+36], t$95$1, If[LessEqual[delta, 1.7e-8], N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * delta), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(0.5 * N[Cos[N[(phi1 + phi1), $MachinePrecision]], $MachinePrecision] + 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1}{\cos delta}\\
\mathbf{if}\;delta \leq -2.05 \cdot 10^{+36}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;delta \leq 1.7 \cdot 10^{-8}:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot delta\right) \cdot \cos \phi_1}{\mathsf{fma}\left(0.5, \cos \left(\phi_1 + \phi_1\right), 0.5\right)}\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if delta < -2.05000000000000006e36 or 1.7e-8 < delta Initial program 99.7%
Taylor expanded in phi1 around 0
lift-cos.f6484.0
Applied rewrites84.0%
if -2.05000000000000006e36 < delta < 1.7e-8Initial program 99.8%
Taylor expanded in delta around 0
lower--.f64N/A
Applied rewrites97.9%
Taylor expanded in delta around 0
Applied rewrites97.3%
Taylor expanded in delta around 0
+-commutativeN/A
lower-fma.f64N/A
lift-cos.f64N/A
count-2-revN/A
lower-+.f6497.2
Applied rewrites97.2%
(FPCore (lambda1 phi1 phi2 delta theta)
:precision binary64
(let* ((t_1 (* (* (sin theta) (sin delta)) (cos phi1))))
(if (<= phi1 20000.0)
(+ lambda1 (atan2 t_1 (cos delta)))
(+
lambda1
(atan2
t_1
(-
(fma (* -0.5 delta) delta 1.0)
(- 0.5 (* 0.5 (cos (* 2.0 phi1))))))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
double t_1 = (sin(theta) * sin(delta)) * cos(phi1);
double tmp;
if (phi1 <= 20000.0) {
tmp = lambda1 + atan2(t_1, cos(delta));
} else {
tmp = lambda1 + atan2(t_1, (fma((-0.5 * delta), delta, 1.0) - (0.5 - (0.5 * cos((2.0 * phi1))))));
}
return tmp;
}
function code(lambda1, phi1, phi2, delta, theta) t_1 = Float64(Float64(sin(theta) * sin(delta)) * cos(phi1)) tmp = 0.0 if (phi1 <= 20000.0) tmp = Float64(lambda1 + atan(t_1, cos(delta))); else tmp = Float64(lambda1 + atan(t_1, Float64(fma(Float64(-0.5 * delta), delta, 1.0) - Float64(0.5 - Float64(0.5 * cos(Float64(2.0 * phi1))))))); end return tmp end
code[lambda1_, phi1_, phi2_, delta_, theta_] := Block[{t$95$1 = N[(N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[phi1, 20000.0], N[(lambda1 + N[ArcTan[t$95$1 / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(lambda1 + N[ArcTan[t$95$1 / N[(N[(N[(-0.5 * delta), $MachinePrecision] * delta + 1.0), $MachinePrecision] - N[(0.5 - N[(0.5 * N[Cos[N[(2.0 * phi1), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \left(\sin theta \cdot \sin delta\right) \cdot \cos \phi_1\\
\mathbf{if}\;\phi_1 \leq 20000:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{t\_1}{\cos delta}\\
\mathbf{else}:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{t\_1}{\mathsf{fma}\left(-0.5 \cdot delta, delta, 1\right) - \left(0.5 - 0.5 \cdot \cos \left(2 \cdot \phi_1\right)\right)}\\
\end{array}
\end{array}
if phi1 < 2e4Initial program 99.8%
Taylor expanded in phi1 around 0
lift-cos.f6492.0
Applied rewrites92.0%
if 2e4 < phi1 Initial program 99.6%
Taylor expanded in delta around 0
lower--.f64N/A
Applied rewrites82.7%
Taylor expanded in phi1 around 0
lower-*.f6482.3
Applied rewrites82.3%
(FPCore (lambda1 phi1 phi2 delta theta)
:precision binary64
(if (<= phi1 21000.0)
(+ lambda1 (atan2 (* (sin theta) (sin delta)) (cos delta)))
(+
lambda1
(atan2
(* (* (sin theta) delta) (cos phi1))
(- (fma (* -0.5 delta) delta 1.0) (- 0.5 (* 0.5 (cos (* 2.0 phi1)))))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
double tmp;
if (phi1 <= 21000.0) {
tmp = lambda1 + atan2((sin(theta) * sin(delta)), cos(delta));
} else {
tmp = lambda1 + atan2(((sin(theta) * delta) * cos(phi1)), (fma((-0.5 * delta), delta, 1.0) - (0.5 - (0.5 * cos((2.0 * phi1))))));
}
return tmp;
}
function code(lambda1, phi1, phi2, delta, theta) tmp = 0.0 if (phi1 <= 21000.0) tmp = Float64(lambda1 + atan(Float64(sin(theta) * sin(delta)), cos(delta))); else tmp = Float64(lambda1 + atan(Float64(Float64(sin(theta) * delta) * cos(phi1)), Float64(fma(Float64(-0.5 * delta), delta, 1.0) - Float64(0.5 - Float64(0.5 * cos(Float64(2.0 * phi1))))))); end return tmp end
code[lambda1_, phi1_, phi2_, delta_, theta_] := If[LessEqual[phi1, 21000.0], N[(lambda1 + N[ArcTan[N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * delta), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(N[(N[(-0.5 * delta), $MachinePrecision] * delta + 1.0), $MachinePrecision] - N[(0.5 - N[(0.5 * N[Cos[N[(2.0 * phi1), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\phi_1 \leq 21000:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{\cos delta}\\
\mathbf{else}:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot delta\right) \cdot \cos \phi_1}{\mathsf{fma}\left(-0.5 \cdot delta, delta, 1\right) - \left(0.5 - 0.5 \cdot \cos \left(2 \cdot \phi_1\right)\right)}\\
\end{array}
\end{array}
if phi1 < 21000Initial program 99.8%
Taylor expanded in phi1 around 0
lift-cos.f6492.0
Applied rewrites92.0%
Taylor expanded in phi1 around 0
*-commutativeN/A
*-commutativeN/A
lift-sin.f64N/A
lift-sin.f64N/A
lift-*.f6490.6
Applied rewrites90.6%
if 21000 < phi1 Initial program 99.6%
Taylor expanded in delta around 0
lower--.f64N/A
Applied rewrites82.7%
Taylor expanded in delta around 0
Applied rewrites77.8%
Taylor expanded in phi1 around 0
lower-*.f6477.5
Applied rewrites77.5%
(FPCore (lambda1 phi1 phi2 delta theta)
:precision binary64
(if (<= phi1 120000.0)
(+ lambda1 (atan2 (* (sin theta) (sin delta)) (cos delta)))
(+
lambda1
(atan2
(* (* (sin theta) delta) (cos phi1))
(fma 0.5 (cos (+ phi1 phi1)) 0.5)))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
double tmp;
if (phi1 <= 120000.0) {
tmp = lambda1 + atan2((sin(theta) * sin(delta)), cos(delta));
} else {
tmp = lambda1 + atan2(((sin(theta) * delta) * cos(phi1)), fma(0.5, cos((phi1 + phi1)), 0.5));
}
return tmp;
}
function code(lambda1, phi1, phi2, delta, theta) tmp = 0.0 if (phi1 <= 120000.0) tmp = Float64(lambda1 + atan(Float64(sin(theta) * sin(delta)), cos(delta))); else tmp = Float64(lambda1 + atan(Float64(Float64(sin(theta) * delta) * cos(phi1)), fma(0.5, cos(Float64(phi1 + phi1)), 0.5))); end return tmp end
code[lambda1_, phi1_, phi2_, delta_, theta_] := If[LessEqual[phi1, 120000.0], N[(lambda1 + N[ArcTan[N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * delta), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / N[(0.5 * N[Cos[N[(phi1 + phi1), $MachinePrecision]], $MachinePrecision] + 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\phi_1 \leq 120000:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{\cos delta}\\
\mathbf{else}:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot delta\right) \cdot \cos \phi_1}{\mathsf{fma}\left(0.5, \cos \left(\phi_1 + \phi_1\right), 0.5\right)}\\
\end{array}
\end{array}
if phi1 < 1.2e5Initial program 99.8%
Taylor expanded in phi1 around 0
lift-cos.f6492.0
Applied rewrites92.0%
Taylor expanded in phi1 around 0
*-commutativeN/A
*-commutativeN/A
lift-sin.f64N/A
lift-sin.f64N/A
lift-*.f6490.6
Applied rewrites90.6%
if 1.2e5 < phi1 Initial program 99.6%
Taylor expanded in delta around 0
lower--.f64N/A
Applied rewrites82.7%
Taylor expanded in delta around 0
Applied rewrites77.8%
Taylor expanded in delta around 0
+-commutativeN/A
lower-fma.f64N/A
lift-cos.f64N/A
count-2-revN/A
lower-+.f6475.4
Applied rewrites75.4%
(FPCore (lambda1 phi1 phi2 delta theta) :precision binary64 (+ lambda1 (atan2 (* (sin theta) (sin delta)) (cos delta))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
return lambda1 + atan2((sin(theta) * sin(delta)), cos(delta));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
real(8), intent (in) :: lambda1
real(8), intent (in) :: phi1
real(8), intent (in) :: phi2
real(8), intent (in) :: delta
real(8), intent (in) :: theta
code = lambda1 + atan2((sin(theta) * sin(delta)), cos(delta))
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
return lambda1 + Math.atan2((Math.sin(theta) * Math.sin(delta)), Math.cos(delta));
}
def code(lambda1, phi1, phi2, delta, theta): return lambda1 + math.atan2((math.sin(theta) * math.sin(delta)), math.cos(delta))
function code(lambda1, phi1, phi2, delta, theta) return Float64(lambda1 + atan(Float64(sin(theta) * sin(delta)), cos(delta))) end
function tmp = code(lambda1, phi1, phi2, delta, theta) tmp = lambda1 + atan2((sin(theta) * sin(delta)), cos(delta)); end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{\cos delta}
\end{array}
Initial program 99.7%
Taylor expanded in phi1 around 0
lift-cos.f6488.4
Applied rewrites88.4%
Taylor expanded in phi1 around 0
*-commutativeN/A
*-commutativeN/A
lift-sin.f64N/A
lift-sin.f64N/A
lift-*.f6486.3
Applied rewrites86.3%
(FPCore (lambda1 phi1 phi2 delta theta)
:precision binary64
(let* ((t_1 (fma -0.5 (* delta delta) 1.0)))
(if (<= theta -1.1e-38)
(+ lambda1 (atan2 (* (* (sin theta) delta) (cos phi1)) t_1))
(if (<= theta 105000000.0)
(+
lambda1
(atan2
(*
(* theta (fma -0.16666666666666666 (* theta theta) 1.0))
(sin delta))
(cos delta)))
(+ lambda1 (atan2 (* (sin theta) (sin delta)) t_1))))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
double t_1 = fma(-0.5, (delta * delta), 1.0);
double tmp;
if (theta <= -1.1e-38) {
tmp = lambda1 + atan2(((sin(theta) * delta) * cos(phi1)), t_1);
} else if (theta <= 105000000.0) {
tmp = lambda1 + atan2(((theta * fma(-0.16666666666666666, (theta * theta), 1.0)) * sin(delta)), cos(delta));
} else {
tmp = lambda1 + atan2((sin(theta) * sin(delta)), t_1);
}
return tmp;
}
function code(lambda1, phi1, phi2, delta, theta) t_1 = fma(-0.5, Float64(delta * delta), 1.0) tmp = 0.0 if (theta <= -1.1e-38) tmp = Float64(lambda1 + atan(Float64(Float64(sin(theta) * delta) * cos(phi1)), t_1)); elseif (theta <= 105000000.0) tmp = Float64(lambda1 + atan(Float64(Float64(theta * fma(-0.16666666666666666, Float64(theta * theta), 1.0)) * sin(delta)), cos(delta))); else tmp = Float64(lambda1 + atan(Float64(sin(theta) * sin(delta)), t_1)); end return tmp end
code[lambda1_, phi1_, phi2_, delta_, theta_] := Block[{t$95$1 = N[(-0.5 * N[(delta * delta), $MachinePrecision] + 1.0), $MachinePrecision]}, If[LessEqual[theta, -1.1e-38], N[(lambda1 + N[ArcTan[N[(N[(N[Sin[theta], $MachinePrecision] * delta), $MachinePrecision] * N[Cos[phi1], $MachinePrecision]), $MachinePrecision] / t$95$1], $MachinePrecision]), $MachinePrecision], If[LessEqual[theta, 105000000.0], N[(lambda1 + N[ArcTan[N[(N[(theta * N[(-0.16666666666666666 * N[(theta * theta), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(lambda1 + N[ArcTan[N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] / t$95$1], $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \mathsf{fma}\left(-0.5, delta \cdot delta, 1\right)\\
\mathbf{if}\;theta \leq -1.1 \cdot 10^{-38}:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\left(\sin theta \cdot delta\right) \cdot \cos \phi_1}{t\_1}\\
\mathbf{elif}\;theta \leq 105000000:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\left(theta \cdot \mathsf{fma}\left(-0.16666666666666666, theta \cdot theta, 1\right)\right) \cdot \sin delta}{\cos delta}\\
\mathbf{else}:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{t\_1}\\
\end{array}
\end{array}
if theta < -1.10000000000000004e-38Initial program 99.6%
Taylor expanded in delta around 0
lower--.f64N/A
Applied rewrites79.0%
Taylor expanded in delta around 0
Applied rewrites76.0%
Taylor expanded in phi1 around 0
+-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f6471.3
Applied rewrites71.3%
if -1.10000000000000004e-38 < theta < 1.05e8Initial program 99.9%
Taylor expanded in phi1 around 0
lift-cos.f6493.0
Applied rewrites93.0%
Taylor expanded in phi1 around 0
*-commutativeN/A
*-commutativeN/A
lift-sin.f64N/A
lift-sin.f64N/A
lift-*.f6490.5
Applied rewrites90.5%
Taylor expanded in theta around 0
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6490.1
Applied rewrites90.1%
if 1.05e8 < theta Initial program 99.6%
Taylor expanded in phi1 around 0
lift-cos.f6483.7
Applied rewrites83.7%
Taylor expanded in phi1 around 0
*-commutativeN/A
*-commutativeN/A
lift-sin.f64N/A
lift-sin.f64N/A
lift-*.f6482.3
Applied rewrites82.3%
Taylor expanded in delta around 0
+-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f6470.7
Applied rewrites70.7%
(FPCore (lambda1 phi1 phi2 delta theta)
:precision binary64
(let* ((t_1 (+ lambda1 (atan2 (* delta (sin theta)) (cos delta)))))
(if (<= theta -5.2e+17)
t_1
(if (<= theta 0.0305)
(+ lambda1 (atan2 (* theta (sin delta)) (cos delta)))
t_1))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
double t_1 = lambda1 + atan2((delta * sin(theta)), cos(delta));
double tmp;
if (theta <= -5.2e+17) {
tmp = t_1;
} else if (theta <= 0.0305) {
tmp = lambda1 + atan2((theta * sin(delta)), cos(delta));
} else {
tmp = t_1;
}
return tmp;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
real(8), intent (in) :: lambda1
real(8), intent (in) :: phi1
real(8), intent (in) :: phi2
real(8), intent (in) :: delta
real(8), intent (in) :: theta
real(8) :: t_1
real(8) :: tmp
t_1 = lambda1 + atan2((delta * sin(theta)), cos(delta))
if (theta <= (-5.2d+17)) then
tmp = t_1
else if (theta <= 0.0305d0) then
tmp = lambda1 + atan2((theta * sin(delta)), cos(delta))
else
tmp = t_1
end if
code = tmp
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
double t_1 = lambda1 + Math.atan2((delta * Math.sin(theta)), Math.cos(delta));
double tmp;
if (theta <= -5.2e+17) {
tmp = t_1;
} else if (theta <= 0.0305) {
tmp = lambda1 + Math.atan2((theta * Math.sin(delta)), Math.cos(delta));
} else {
tmp = t_1;
}
return tmp;
}
def code(lambda1, phi1, phi2, delta, theta): t_1 = lambda1 + math.atan2((delta * math.sin(theta)), math.cos(delta)) tmp = 0 if theta <= -5.2e+17: tmp = t_1 elif theta <= 0.0305: tmp = lambda1 + math.atan2((theta * math.sin(delta)), math.cos(delta)) else: tmp = t_1 return tmp
function code(lambda1, phi1, phi2, delta, theta) t_1 = Float64(lambda1 + atan(Float64(delta * sin(theta)), cos(delta))) tmp = 0.0 if (theta <= -5.2e+17) tmp = t_1; elseif (theta <= 0.0305) tmp = Float64(lambda1 + atan(Float64(theta * sin(delta)), cos(delta))); else tmp = t_1; end return tmp end
function tmp_2 = code(lambda1, phi1, phi2, delta, theta) t_1 = lambda1 + atan2((delta * sin(theta)), cos(delta)); tmp = 0.0; if (theta <= -5.2e+17) tmp = t_1; elseif (theta <= 0.0305) tmp = lambda1 + atan2((theta * sin(delta)), cos(delta)); else tmp = t_1; end tmp_2 = tmp; end
code[lambda1_, phi1_, phi2_, delta_, theta_] := Block[{t$95$1 = N[(lambda1 + N[ArcTan[N[(delta * N[Sin[theta], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[theta, -5.2e+17], t$95$1, If[LessEqual[theta, 0.0305], N[(lambda1 + N[ArcTan[N[(theta * N[Sin[delta], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \lambda_1 + \tan^{-1}_* \frac{delta \cdot \sin theta}{\cos delta}\\
\mathbf{if}\;theta \leq -5.2 \cdot 10^{+17}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;theta \leq 0.0305:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\cos delta}\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if theta < -5.2e17 or 0.030499999999999999 < theta Initial program 99.6%
Taylor expanded in phi1 around 0
lift-cos.f6484.0
Applied rewrites84.0%
Taylor expanded in phi1 around 0
*-commutativeN/A
*-commutativeN/A
lift-sin.f64N/A
lift-sin.f64N/A
lift-*.f6482.5
Applied rewrites82.5%
Taylor expanded in delta around 0
lower-*.f64N/A
lift-sin.f6470.0
Applied rewrites70.0%
if -5.2e17 < theta < 0.030499999999999999Initial program 99.8%
Taylor expanded in phi1 around 0
lift-cos.f6492.4
Applied rewrites92.4%
Taylor expanded in phi1 around 0
*-commutativeN/A
*-commutativeN/A
lift-sin.f64N/A
lift-sin.f64N/A
lift-*.f6489.8
Applied rewrites89.8%
Taylor expanded in theta around 0
Applied rewrites89.2%
(FPCore (lambda1 phi1 phi2 delta theta)
:precision binary64
(if (<= delta 1.05)
(+
lambda1
(atan2 (* (sin theta) (sin delta)) (fma -0.5 (* delta delta) 1.0)))
(+
lambda1
(atan2
(* (* theta (fma -0.16666666666666666 (* theta theta) 1.0)) (sin delta))
(cos delta)))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
double tmp;
if (delta <= 1.05) {
tmp = lambda1 + atan2((sin(theta) * sin(delta)), fma(-0.5, (delta * delta), 1.0));
} else {
tmp = lambda1 + atan2(((theta * fma(-0.16666666666666666, (theta * theta), 1.0)) * sin(delta)), cos(delta));
}
return tmp;
}
function code(lambda1, phi1, phi2, delta, theta) tmp = 0.0 if (delta <= 1.05) tmp = Float64(lambda1 + atan(Float64(sin(theta) * sin(delta)), fma(-0.5, Float64(delta * delta), 1.0))); else tmp = Float64(lambda1 + atan(Float64(Float64(theta * fma(-0.16666666666666666, Float64(theta * theta), 1.0)) * sin(delta)), cos(delta))); end return tmp end
code[lambda1_, phi1_, phi2_, delta_, theta_] := If[LessEqual[delta, 1.05], N[(lambda1 + N[ArcTan[N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] / N[(-0.5 * N[(delta * delta), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(lambda1 + N[ArcTan[N[(N[(theta * N[(-0.16666666666666666 * N[(theta * theta), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;delta \leq 1.05:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{\mathsf{fma}\left(-0.5, delta \cdot delta, 1\right)}\\
\mathbf{else}:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\left(theta \cdot \mathsf{fma}\left(-0.16666666666666666, theta \cdot theta, 1\right)\right) \cdot \sin delta}{\cos delta}\\
\end{array}
\end{array}
if delta < 1.05000000000000004Initial program 99.8%
Taylor expanded in phi1 around 0
lift-cos.f6489.4
Applied rewrites89.4%
Taylor expanded in phi1 around 0
*-commutativeN/A
*-commutativeN/A
lift-sin.f64N/A
lift-sin.f64N/A
lift-*.f6487.8
Applied rewrites87.8%
Taylor expanded in delta around 0
+-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f6481.5
Applied rewrites81.5%
if 1.05000000000000004 < delta Initial program 99.7%
Taylor expanded in phi1 around 0
lift-cos.f6485.3
Applied rewrites85.3%
Taylor expanded in phi1 around 0
*-commutativeN/A
*-commutativeN/A
lift-sin.f64N/A
lift-sin.f64N/A
lift-*.f6481.6
Applied rewrites81.6%
Taylor expanded in theta around 0
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6470.1
Applied rewrites70.1%
(FPCore (lambda1 phi1 phi2 delta theta)
:precision binary64
(if (<= delta 0.88)
(+
lambda1
(atan2 (* (sin theta) (sin delta)) (fma -0.5 (* delta delta) 1.0)))
(+ lambda1 (atan2 (* theta (sin delta)) (cos delta)))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
double tmp;
if (delta <= 0.88) {
tmp = lambda1 + atan2((sin(theta) * sin(delta)), fma(-0.5, (delta * delta), 1.0));
} else {
tmp = lambda1 + atan2((theta * sin(delta)), cos(delta));
}
return tmp;
}
function code(lambda1, phi1, phi2, delta, theta) tmp = 0.0 if (delta <= 0.88) tmp = Float64(lambda1 + atan(Float64(sin(theta) * sin(delta)), fma(-0.5, Float64(delta * delta), 1.0))); else tmp = Float64(lambda1 + atan(Float64(theta * sin(delta)), cos(delta))); end return tmp end
code[lambda1_, phi1_, phi2_, delta_, theta_] := If[LessEqual[delta, 0.88], N[(lambda1 + N[ArcTan[N[(N[Sin[theta], $MachinePrecision] * N[Sin[delta], $MachinePrecision]), $MachinePrecision] / N[(-0.5 * N[(delta * delta), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(lambda1 + N[ArcTan[N[(theta * N[Sin[delta], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;delta \leq 0.88:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{\sin theta \cdot \sin delta}{\mathsf{fma}\left(-0.5, delta \cdot delta, 1\right)}\\
\mathbf{else}:\\
\;\;\;\;\lambda_1 + \tan^{-1}_* \frac{theta \cdot \sin delta}{\cos delta}\\
\end{array}
\end{array}
if delta < 0.880000000000000004Initial program 99.8%
Taylor expanded in phi1 around 0
lift-cos.f6489.4
Applied rewrites89.4%
Taylor expanded in phi1 around 0
*-commutativeN/A
*-commutativeN/A
lift-sin.f64N/A
lift-sin.f64N/A
lift-*.f6487.8
Applied rewrites87.8%
Taylor expanded in delta around 0
+-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f6481.5
Applied rewrites81.5%
if 0.880000000000000004 < delta Initial program 99.7%
Taylor expanded in phi1 around 0
lift-cos.f6485.3
Applied rewrites85.3%
Taylor expanded in phi1 around 0
*-commutativeN/A
*-commutativeN/A
lift-sin.f64N/A
lift-sin.f64N/A
lift-*.f6481.6
Applied rewrites81.6%
Taylor expanded in theta around 0
Applied rewrites70.0%
(FPCore (lambda1 phi1 phi2 delta theta) :precision binary64 (+ lambda1 (atan2 (* delta (sin theta)) (cos delta))))
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
return lambda1 + atan2((delta * sin(theta)), cos(delta));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
real(8), intent (in) :: lambda1
real(8), intent (in) :: phi1
real(8), intent (in) :: phi2
real(8), intent (in) :: delta
real(8), intent (in) :: theta
code = lambda1 + atan2((delta * sin(theta)), cos(delta))
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
return lambda1 + Math.atan2((delta * Math.sin(theta)), Math.cos(delta));
}
def code(lambda1, phi1, phi2, delta, theta): return lambda1 + math.atan2((delta * math.sin(theta)), math.cos(delta))
function code(lambda1, phi1, phi2, delta, theta) return Float64(lambda1 + atan(Float64(delta * sin(theta)), cos(delta))) end
function tmp = code(lambda1, phi1, phi2, delta, theta) tmp = lambda1 + atan2((delta * sin(theta)), cos(delta)); end
code[lambda1_, phi1_, phi2_, delta_, theta_] := N[(lambda1 + N[ArcTan[N[(delta * N[Sin[theta], $MachinePrecision]), $MachinePrecision] / N[Cos[delta], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\lambda_1 + \tan^{-1}_* \frac{delta \cdot \sin theta}{\cos delta}
\end{array}
Initial program 99.7%
Taylor expanded in phi1 around 0
lift-cos.f6488.4
Applied rewrites88.4%
Taylor expanded in phi1 around 0
*-commutativeN/A
*-commutativeN/A
lift-sin.f64N/A
lift-sin.f64N/A
lift-*.f6486.3
Applied rewrites86.3%
Taylor expanded in delta around 0
lower-*.f64N/A
lift-sin.f6474.0
Applied rewrites74.0%
(FPCore (lambda1 phi1 phi2 delta theta) :precision binary64 lambda1)
double code(double lambda1, double phi1, double phi2, double delta, double theta) {
return lambda1;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(lambda1, phi1, phi2, delta, theta)
use fmin_fmax_functions
real(8), intent (in) :: lambda1
real(8), intent (in) :: phi1
real(8), intent (in) :: phi2
real(8), intent (in) :: delta
real(8), intent (in) :: theta
code = lambda1
end function
public static double code(double lambda1, double phi1, double phi2, double delta, double theta) {
return lambda1;
}
def code(lambda1, phi1, phi2, delta, theta): return lambda1
function code(lambda1, phi1, phi2, delta, theta) return lambda1 end
function tmp = code(lambda1, phi1, phi2, delta, theta) tmp = lambda1; end
code[lambda1_, phi1_, phi2_, delta_, theta_] := lambda1
\begin{array}{l}
\\
\lambda_1
\end{array}
Initial program 99.7%
Taylor expanded in lambda1 around inf
Applied rewrites69.6%
herbie shell --seed 2025114
(FPCore (lambda1 phi1 phi2 delta theta)
:name "Destination given bearing on a great circle"
:precision binary64
(+ lambda1 (atan2 (* (* (sin theta) (sin delta)) (cos phi1)) (- (cos delta) (* (sin phi1) (sin (asin (+ (* (sin phi1) (cos delta)) (* (* (cos phi1) (sin delta)) (cos theta))))))))))