
(FPCore (x y z a) :precision binary64 (+ x (- (tan (+ y z)) (tan a))))
double code(double x, double y, double z, double a) {
return x + (tan((y + z)) - tan(a));
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = x + (tan((y + z)) - tan(a))
end function
public static double code(double x, double y, double z, double a) {
return x + (Math.tan((y + z)) - Math.tan(a));
}
def code(x, y, z, a): return x + (math.tan((y + z)) - math.tan(a))
function code(x, y, z, a) return Float64(x + Float64(tan(Float64(y + z)) - tan(a))) end
function tmp = code(x, y, z, a) tmp = x + (tan((y + z)) - tan(a)); end
code[x_, y_, z_, a_] := N[(x + N[(N[Tan[N[(y + z), $MachinePrecision]], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(\tan \left(y + z\right) - \tan a\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z a) :precision binary64 (+ x (- (tan (+ y z)) (tan a))))
double code(double x, double y, double z, double a) {
return x + (tan((y + z)) - tan(a));
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = x + (tan((y + z)) - tan(a))
end function
public static double code(double x, double y, double z, double a) {
return x + (Math.tan((y + z)) - Math.tan(a));
}
def code(x, y, z, a): return x + (math.tan((y + z)) - math.tan(a))
function code(x, y, z, a) return Float64(x + Float64(tan(Float64(y + z)) - tan(a))) end
function tmp = code(x, y, z, a) tmp = x + (tan((y + z)) - tan(a)); end
code[x_, y_, z_, a_] := N[(x + N[(N[Tan[N[(y + z), $MachinePrecision]], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(\tan \left(y + z\right) - \tan a\right)
\end{array}
(FPCore (x y z a)
:precision binary64
(+
(-
(/
(- (pow (tan y) 2.0) (pow (tan z) 2.0))
(* (- (tan y) (tan z)) (fma (- (tan z)) (tan y) 1.0)))
(tan a))
x))
double code(double x, double y, double z, double a) {
return (((pow(tan(y), 2.0) - pow(tan(z), 2.0)) / ((tan(y) - tan(z)) * fma(-tan(z), tan(y), 1.0))) - tan(a)) + x;
}
function code(x, y, z, a) return Float64(Float64(Float64(Float64((tan(y) ^ 2.0) - (tan(z) ^ 2.0)) / Float64(Float64(tan(y) - tan(z)) * fma(Float64(-tan(z)), tan(y), 1.0))) - tan(a)) + x) end
code[x_, y_, z_, a_] := N[(N[(N[(N[(N[Power[N[Tan[y], $MachinePrecision], 2.0], $MachinePrecision] - N[Power[N[Tan[z], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] / N[(N[(N[Tan[y], $MachinePrecision] - N[Tan[z], $MachinePrecision]), $MachinePrecision] * N[((-N[Tan[z], $MachinePrecision]) * N[Tan[y], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{{\tan y}^{2} - {\tan z}^{2}}{\left(\tan y - \tan z\right) \cdot \mathsf{fma}\left(-\tan z, \tan y, 1\right)} - \tan a\right) + x
\end{array}
Initial program 80.6%
lift-tan.f64N/A
lift-+.f64N/A
tan-sumN/A
flip-+N/A
associate-/l/N/A
lower-/.f64N/A
lower--.f64N/A
pow2N/A
lower-pow.f64N/A
lower-tan.f64N/A
pow2N/A
lower-pow.f64N/A
lower-tan.f64N/A
lower-*.f64N/A
Applied rewrites99.7%
Final simplification99.7%
(FPCore (x y z a)
:precision binary64
(let* ((t_0 (+ (tan z) (tan y))) (t_1 (+ (- (/ t_0 1.0) (tan a)) x)))
(if (<= (tan a) -0.0004)
t_1
(if (<= (tan a) 5e-69)
(fma t_0 (/ -1.0 (fma (tan y) (tan z) -1.0)) (- (- a x)))
t_1))))
double code(double x, double y, double z, double a) {
double t_0 = tan(z) + tan(y);
double t_1 = ((t_0 / 1.0) - tan(a)) + x;
double tmp;
if (tan(a) <= -0.0004) {
tmp = t_1;
} else if (tan(a) <= 5e-69) {
tmp = fma(t_0, (-1.0 / fma(tan(y), tan(z), -1.0)), -(a - x));
} else {
tmp = t_1;
}
return tmp;
}
function code(x, y, z, a) t_0 = Float64(tan(z) + tan(y)) t_1 = Float64(Float64(Float64(t_0 / 1.0) - tan(a)) + x) tmp = 0.0 if (tan(a) <= -0.0004) tmp = t_1; elseif (tan(a) <= 5e-69) tmp = fma(t_0, Float64(-1.0 / fma(tan(y), tan(z), -1.0)), Float64(-Float64(a - x))); else tmp = t_1; end return tmp end
code[x_, y_, z_, a_] := Block[{t$95$0 = N[(N[Tan[z], $MachinePrecision] + N[Tan[y], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(N[(t$95$0 / 1.0), $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]}, If[LessEqual[N[Tan[a], $MachinePrecision], -0.0004], t$95$1, If[LessEqual[N[Tan[a], $MachinePrecision], 5e-69], N[(t$95$0 * N[(-1.0 / N[(N[Tan[y], $MachinePrecision] * N[Tan[z], $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision] + (-N[(a - x), $MachinePrecision])), $MachinePrecision], t$95$1]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \tan z + \tan y\\
t_1 := \left(\frac{t\_0}{1} - \tan a\right) + x\\
\mathbf{if}\;\tan a \leq -0.0004:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;\tan a \leq 5 \cdot 10^{-69}:\\
\;\;\;\;\mathsf{fma}\left(t\_0, \frac{-1}{\mathsf{fma}\left(\tan y, \tan z, -1\right)}, -\left(a - x\right)\right)\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if (tan.f64 a) < -4.00000000000000019e-4 or 5.00000000000000033e-69 < (tan.f64 a) Initial program 81.9%
lift-tan.f64N/A
lift-+.f64N/A
tan-sumN/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-tan.f64N/A
lower-tan.f64N/A
sub-negN/A
+-commutativeN/A
*-commutativeN/A
distribute-lft-neg-inN/A
lower-fma.f64N/A
lower-neg.f64N/A
lower-tan.f64N/A
lower-tan.f6499.7
Applied rewrites99.7%
Taylor expanded in z around 0
Applied rewrites82.4%
if -4.00000000000000019e-4 < (tan.f64 a) < 5.00000000000000033e-69Initial program 78.7%
lift-+.f64N/A
+-commutativeN/A
lift--.f64N/A
associate-+l-N/A
lower--.f64N/A
lift-+.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower--.f6478.7
Applied rewrites78.7%
Taylor expanded in a around 0
lower--.f6478.7
Applied rewrites78.7%
lift--.f64N/A
sub-negN/A
Applied rewrites99.7%
Final simplification89.8%
(FPCore (x y z a)
:precision binary64
(let* ((t_0 (+ (tan z) (tan y))) (t_1 (+ (- (/ t_0 1.0) (tan a)) x)))
(if (<= (tan a) -2e-11)
t_1
(if (<= (tan a) 5e-69)
(- (/ t_0 (fma (- (tan z)) (tan y) 1.0)) (- x))
t_1))))
double code(double x, double y, double z, double a) {
double t_0 = tan(z) + tan(y);
double t_1 = ((t_0 / 1.0) - tan(a)) + x;
double tmp;
if (tan(a) <= -2e-11) {
tmp = t_1;
} else if (tan(a) <= 5e-69) {
tmp = (t_0 / fma(-tan(z), tan(y), 1.0)) - -x;
} else {
tmp = t_1;
}
return tmp;
}
function code(x, y, z, a) t_0 = Float64(tan(z) + tan(y)) t_1 = Float64(Float64(Float64(t_0 / 1.0) - tan(a)) + x) tmp = 0.0 if (tan(a) <= -2e-11) tmp = t_1; elseif (tan(a) <= 5e-69) tmp = Float64(Float64(t_0 / fma(Float64(-tan(z)), tan(y), 1.0)) - Float64(-x)); else tmp = t_1; end return tmp end
code[x_, y_, z_, a_] := Block[{t$95$0 = N[(N[Tan[z], $MachinePrecision] + N[Tan[y], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(N[(t$95$0 / 1.0), $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]}, If[LessEqual[N[Tan[a], $MachinePrecision], -2e-11], t$95$1, If[LessEqual[N[Tan[a], $MachinePrecision], 5e-69], N[(N[(t$95$0 / N[((-N[Tan[z], $MachinePrecision]) * N[Tan[y], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] - (-x)), $MachinePrecision], t$95$1]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \tan z + \tan y\\
t_1 := \left(\frac{t\_0}{1} - \tan a\right) + x\\
\mathbf{if}\;\tan a \leq -2 \cdot 10^{-11}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;\tan a \leq 5 \cdot 10^{-69}:\\
\;\;\;\;\frac{t\_0}{\mathsf{fma}\left(-\tan z, \tan y, 1\right)} - \left(-x\right)\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if (tan.f64 a) < -1.99999999999999988e-11 or 5.00000000000000033e-69 < (tan.f64 a) Initial program 82.3%
lift-tan.f64N/A
lift-+.f64N/A
tan-sumN/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-tan.f64N/A
lower-tan.f64N/A
sub-negN/A
+-commutativeN/A
*-commutativeN/A
distribute-lft-neg-inN/A
lower-fma.f64N/A
lower-neg.f64N/A
lower-tan.f64N/A
lower-tan.f6499.7
Applied rewrites99.7%
Taylor expanded in z around 0
Applied rewrites82.8%
if -1.99999999999999988e-11 < (tan.f64 a) < 5.00000000000000033e-69Initial program 78.1%
lift-+.f64N/A
+-commutativeN/A
lift--.f64N/A
associate-+l-N/A
lower--.f64N/A
lift-+.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower--.f6478.1
Applied rewrites78.1%
Taylor expanded in a around 0
mul-1-negN/A
lower-neg.f6478.1
Applied rewrites78.1%
lift-tan.f64N/A
lift-+.f64N/A
tan-sumN/A
lift-tan.f64N/A
lift-tan.f64N/A
lift-+.f64N/A
lift-tan.f64N/A
lift-tan.f64N/A
cancel-sign-sub-invN/A
lift-neg.f64N/A
+-commutativeN/A
lift-fma.f64N/A
lift-/.f6499.7
lift-+.f64N/A
+-commutativeN/A
lower-+.f6499.7
Applied rewrites99.7%
Final simplification89.8%
(FPCore (x y z a) :precision binary64 (+ (- (/ (+ (tan z) (tan y)) (fma (- (tan z)) (tan y) 1.0)) (tan a)) x))
double code(double x, double y, double z, double a) {
return (((tan(z) + tan(y)) / fma(-tan(z), tan(y), 1.0)) - tan(a)) + x;
}
function code(x, y, z, a) return Float64(Float64(Float64(Float64(tan(z) + tan(y)) / fma(Float64(-tan(z)), tan(y), 1.0)) - tan(a)) + x) end
code[x_, y_, z_, a_] := N[(N[(N[(N[(N[Tan[z], $MachinePrecision] + N[Tan[y], $MachinePrecision]), $MachinePrecision] / N[((-N[Tan[z], $MachinePrecision]) * N[Tan[y], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{\tan z + \tan y}{\mathsf{fma}\left(-\tan z, \tan y, 1\right)} - \tan a\right) + x
\end{array}
Initial program 80.6%
lift-tan.f64N/A
lift-+.f64N/A
tan-sumN/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-tan.f64N/A
lower-tan.f64N/A
sub-negN/A
+-commutativeN/A
*-commutativeN/A
distribute-lft-neg-inN/A
lower-fma.f64N/A
lower-neg.f64N/A
lower-tan.f64N/A
lower-tan.f6499.7
Applied rewrites99.7%
Final simplification99.7%
(FPCore (x y z a) :precision binary64 (+ (- (/ (+ (tan z) (tan y)) 1.0) (tan a)) x))
double code(double x, double y, double z, double a) {
return (((tan(z) + tan(y)) / 1.0) - tan(a)) + x;
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = (((tan(z) + tan(y)) / 1.0d0) - tan(a)) + x
end function
public static double code(double x, double y, double z, double a) {
return (((Math.tan(z) + Math.tan(y)) / 1.0) - Math.tan(a)) + x;
}
def code(x, y, z, a): return (((math.tan(z) + math.tan(y)) / 1.0) - math.tan(a)) + x
function code(x, y, z, a) return Float64(Float64(Float64(Float64(tan(z) + tan(y)) / 1.0) - tan(a)) + x) end
function tmp = code(x, y, z, a) tmp = (((tan(z) + tan(y)) / 1.0) - tan(a)) + x; end
code[x_, y_, z_, a_] := N[(N[(N[(N[(N[Tan[z], $MachinePrecision] + N[Tan[y], $MachinePrecision]), $MachinePrecision] / 1.0), $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{\tan z + \tan y}{1} - \tan a\right) + x
\end{array}
Initial program 80.6%
lift-tan.f64N/A
lift-+.f64N/A
tan-sumN/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-tan.f64N/A
lower-tan.f64N/A
sub-negN/A
+-commutativeN/A
*-commutativeN/A
distribute-lft-neg-inN/A
lower-fma.f64N/A
lower-neg.f64N/A
lower-tan.f64N/A
lower-tan.f6499.7
Applied rewrites99.7%
Taylor expanded in z around 0
Applied rewrites80.9%
Final simplification80.9%
(FPCore (x y z a) :precision binary64 (+ (- (tan (fma y (/ y (- y z)) (* (/ z (- z y)) z))) (tan a)) x))
double code(double x, double y, double z, double a) {
return (tan(fma(y, (y / (y - z)), ((z / (z - y)) * z))) - tan(a)) + x;
}
function code(x, y, z, a) return Float64(Float64(tan(fma(y, Float64(y / Float64(y - z)), Float64(Float64(z / Float64(z - y)) * z))) - tan(a)) + x) end
code[x_, y_, z_, a_] := N[(N[(N[Tan[N[(y * N[(y / N[(y - z), $MachinePrecision]), $MachinePrecision] + N[(N[(z / N[(z - y), $MachinePrecision]), $MachinePrecision] * z), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\left(\tan \left(\mathsf{fma}\left(y, \frac{y}{y - z}, \frac{z}{z - y} \cdot z\right)\right) - \tan a\right) + x
\end{array}
Initial program 80.6%
lift-+.f64N/A
flip-+N/A
div-subN/A
sub-negN/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
lower--.f64N/A
lower-neg.f64N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f64N/A
lower--.f6480.6
Applied rewrites80.6%
Final simplification80.6%
(FPCore (x y z a) :precision binary64 (fma (/ (- (tan (+ z y)) (tan a)) x) x x))
double code(double x, double y, double z, double a) {
return fma(((tan((z + y)) - tan(a)) / x), x, x);
}
function code(x, y, z, a) return fma(Float64(Float64(tan(Float64(z + y)) - tan(a)) / x), x, x) end
code[x_, y_, z_, a_] := N[(N[(N[(N[Tan[N[(z + y), $MachinePrecision]], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision] * x + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\frac{\tan \left(z + y\right) - \tan a}{x}, x, x\right)
\end{array}
Initial program 80.6%
Taylor expanded in x around inf
associate--l+N/A
+-commutativeN/A
associate-/l/N/A
associate-/l/N/A
div-subN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
Applied rewrites80.5%
Applied rewrites80.6%
(FPCore (x y z a) :precision binary64 (if (<= y -4.3e-9) (- (tan (+ z y)) (- x)) (+ (- (tan z) (tan a)) x)))
double code(double x, double y, double z, double a) {
double tmp;
if (y <= -4.3e-9) {
tmp = tan((z + y)) - -x;
} else {
tmp = (tan(z) - tan(a)) + x;
}
return tmp;
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
real(8) :: tmp
if (y <= (-4.3d-9)) then
tmp = tan((z + y)) - -x
else
tmp = (tan(z) - tan(a)) + x
end if
code = tmp
end function
public static double code(double x, double y, double z, double a) {
double tmp;
if (y <= -4.3e-9) {
tmp = Math.tan((z + y)) - -x;
} else {
tmp = (Math.tan(z) - Math.tan(a)) + x;
}
return tmp;
}
def code(x, y, z, a): tmp = 0 if y <= -4.3e-9: tmp = math.tan((z + y)) - -x else: tmp = (math.tan(z) - math.tan(a)) + x return tmp
function code(x, y, z, a) tmp = 0.0 if (y <= -4.3e-9) tmp = Float64(tan(Float64(z + y)) - Float64(-x)); else tmp = Float64(Float64(tan(z) - tan(a)) + x); end return tmp end
function tmp_2 = code(x, y, z, a) tmp = 0.0; if (y <= -4.3e-9) tmp = tan((z + y)) - -x; else tmp = (tan(z) - tan(a)) + x; end tmp_2 = tmp; end
code[x_, y_, z_, a_] := If[LessEqual[y, -4.3e-9], N[(N[Tan[N[(z + y), $MachinePrecision]], $MachinePrecision] - (-x)), $MachinePrecision], N[(N[(N[Tan[z], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -4.3 \cdot 10^{-9}:\\
\;\;\;\;\tan \left(z + y\right) - \left(-x\right)\\
\mathbf{else}:\\
\;\;\;\;\left(\tan z - \tan a\right) + x\\
\end{array}
\end{array}
if y < -4.29999999999999963e-9Initial program 63.0%
lift-+.f64N/A
+-commutativeN/A
lift--.f64N/A
associate-+l-N/A
lower--.f64N/A
lift-+.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower--.f6463.0
Applied rewrites63.0%
Taylor expanded in a around 0
mul-1-negN/A
lower-neg.f6438.5
Applied rewrites38.5%
if -4.29999999999999963e-9 < y Initial program 86.8%
Taylor expanded in y around 0
lower-/.f64N/A
lower-sin.f64N/A
lower-cos.f6474.6
Applied rewrites74.6%
lift-+.f64N/A
+-commutativeN/A
lower-+.f6474.6
Applied rewrites74.7%
(FPCore (x y z a) :precision binary64 (+ (- (tan (+ z y)) (tan a)) x))
double code(double x, double y, double z, double a) {
return (tan((z + y)) - tan(a)) + x;
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = (tan((z + y)) - tan(a)) + x
end function
public static double code(double x, double y, double z, double a) {
return (Math.tan((z + y)) - Math.tan(a)) + x;
}
def code(x, y, z, a): return (math.tan((z + y)) - math.tan(a)) + x
function code(x, y, z, a) return Float64(Float64(tan(Float64(z + y)) - tan(a)) + x) end
function tmp = code(x, y, z, a) tmp = (tan((z + y)) - tan(a)) + x; end
code[x_, y_, z_, a_] := N[(N[(N[Tan[N[(z + y), $MachinePrecision]], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\left(\tan \left(z + y\right) - \tan a\right) + x
\end{array}
Initial program 80.6%
Final simplification80.6%
(FPCore (x y z a) :precision binary64 (- (tan (+ z y)) (- x)))
double code(double x, double y, double z, double a) {
return tan((z + y)) - -x;
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = tan((z + y)) - -x
end function
public static double code(double x, double y, double z, double a) {
return Math.tan((z + y)) - -x;
}
def code(x, y, z, a): return math.tan((z + y)) - -x
function code(x, y, z, a) return Float64(tan(Float64(z + y)) - Float64(-x)) end
function tmp = code(x, y, z, a) tmp = tan((z + y)) - -x; end
code[x_, y_, z_, a_] := N[(N[Tan[N[(z + y), $MachinePrecision]], $MachinePrecision] - (-x)), $MachinePrecision]
\begin{array}{l}
\\
\tan \left(z + y\right) - \left(-x\right)
\end{array}
Initial program 80.6%
lift-+.f64N/A
+-commutativeN/A
lift--.f64N/A
associate-+l-N/A
lower--.f64N/A
lift-+.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower--.f6480.5
Applied rewrites80.5%
Taylor expanded in a around 0
mul-1-negN/A
lower-neg.f6448.7
Applied rewrites48.7%
herbie shell --seed 2024244
(FPCore (x y z a)
:name "tan-example (used to crash)"
:precision binary64
:pre (and (and (and (or (== x 0.0) (and (<= 0.5884142 x) (<= x 505.5909))) (or (and (<= -1.796658e+308 y) (<= y -9.425585e-310)) (and (<= 1.284938e-309 y) (<= y 1.751224e+308)))) (or (and (<= -1.776707e+308 z) (<= z -8.599796e-310)) (and (<= 3.293145e-311 z) (<= z 1.725154e+308)))) (or (and (<= -1.796658e+308 a) (<= a -9.425585e-310)) (and (<= 1.284938e-309 a) (<= a 1.751224e+308))))
(+ x (- (tan (+ y z)) (tan a))))