
(FPCore (x y z a) :precision binary64 (+ x (- (tan (+ y z)) (tan a))))
double code(double x, double y, double z, double a) {
return x + (tan((y + z)) - tan(a));
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = x + (tan((y + z)) - tan(a))
end function
public static double code(double x, double y, double z, double a) {
return x + (Math.tan((y + z)) - Math.tan(a));
}
def code(x, y, z, a): return x + (math.tan((y + z)) - math.tan(a))
function code(x, y, z, a) return Float64(x + Float64(tan(Float64(y + z)) - tan(a))) end
function tmp = code(x, y, z, a) tmp = x + (tan((y + z)) - tan(a)); end
code[x_, y_, z_, a_] := N[(x + N[(N[Tan[N[(y + z), $MachinePrecision]], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(\tan \left(y + z\right) - \tan a\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 16 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z a) :precision binary64 (+ x (- (tan (+ y z)) (tan a))))
double code(double x, double y, double z, double a) {
return x + (tan((y + z)) - tan(a));
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = x + (tan((y + z)) - tan(a))
end function
public static double code(double x, double y, double z, double a) {
return x + (Math.tan((y + z)) - Math.tan(a));
}
def code(x, y, z, a): return x + (math.tan((y + z)) - math.tan(a))
function code(x, y, z, a) return Float64(x + Float64(tan(Float64(y + z)) - tan(a))) end
function tmp = code(x, y, z, a) tmp = x + (tan((y + z)) - tan(a)); end
code[x_, y_, z_, a_] := N[(x + N[(N[Tan[N[(y + z), $MachinePrecision]], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(\tan \left(y + z\right) - \tan a\right)
\end{array}
(FPCore (x y z a) :precision binary64 (+ x (fma (/ 1.0 (- 1.0 (* (tan y) (tan z)))) (+ (tan y) (tan z)) (- (tan a)))))
double code(double x, double y, double z, double a) {
return x + fma((1.0 / (1.0 - (tan(y) * tan(z)))), (tan(y) + tan(z)), -tan(a));
}
function code(x, y, z, a) return Float64(x + fma(Float64(1.0 / Float64(1.0 - Float64(tan(y) * tan(z)))), Float64(tan(y) + tan(z)), Float64(-tan(a)))) end
code[x_, y_, z_, a_] := N[(x + N[(N[(1.0 / N[(1.0 - N[(N[Tan[y], $MachinePrecision] * N[Tan[z], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[Tan[y], $MachinePrecision] + N[Tan[z], $MachinePrecision]), $MachinePrecision] + (-N[Tan[a], $MachinePrecision])), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \mathsf{fma}\left(\frac{1}{1 - \tan y \cdot \tan z}, \tan y + \tan z, -\tan a\right)
\end{array}
Initial program 79.3%
lift-+.f64N/A
lift-tan.f64N/A
lift-tan.f64N/A
sub-negN/A
lift-tan.f64N/A
lift-+.f64N/A
tan-sumN/A
clear-numN/A
associate-/r/N/A
lower-fma.f64N/A
lower-/.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower-tan.f64N/A
lower-tan.f64N/A
lower-+.f64N/A
lower-tan.f64N/A
lower-tan.f64N/A
lower-neg.f6499.6
Applied rewrites99.6%
(FPCore (x y z a)
:precision binary64
(let* ((t_0 (+ (tan y) (tan z))) (t_1 (+ x (fma 1.0 t_0 (- (tan a))))))
(if (<= (tan a) -0.005)
t_1
(if (<= (tan a) 5e-7)
(+
x
(-
(/ t_0 (- 1.0 (* (tan y) (tan z))))
(fma (* a a) (* a 0.3333333333333333) a)))
t_1))))
double code(double x, double y, double z, double a) {
double t_0 = tan(y) + tan(z);
double t_1 = x + fma(1.0, t_0, -tan(a));
double tmp;
if (tan(a) <= -0.005) {
tmp = t_1;
} else if (tan(a) <= 5e-7) {
tmp = x + ((t_0 / (1.0 - (tan(y) * tan(z)))) - fma((a * a), (a * 0.3333333333333333), a));
} else {
tmp = t_1;
}
return tmp;
}
function code(x, y, z, a) t_0 = Float64(tan(y) + tan(z)) t_1 = Float64(x + fma(1.0, t_0, Float64(-tan(a)))) tmp = 0.0 if (tan(a) <= -0.005) tmp = t_1; elseif (tan(a) <= 5e-7) tmp = Float64(x + Float64(Float64(t_0 / Float64(1.0 - Float64(tan(y) * tan(z)))) - fma(Float64(a * a), Float64(a * 0.3333333333333333), a))); else tmp = t_1; end return tmp end
code[x_, y_, z_, a_] := Block[{t$95$0 = N[(N[Tan[y], $MachinePrecision] + N[Tan[z], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(x + N[(1.0 * t$95$0 + (-N[Tan[a], $MachinePrecision])), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[Tan[a], $MachinePrecision], -0.005], t$95$1, If[LessEqual[N[Tan[a], $MachinePrecision], 5e-7], N[(x + N[(N[(t$95$0 / N[(1.0 - N[(N[Tan[y], $MachinePrecision] * N[Tan[z], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(a * a), $MachinePrecision] * N[(a * 0.3333333333333333), $MachinePrecision] + a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], t$95$1]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \tan y + \tan z\\
t_1 := x + \mathsf{fma}\left(1, t\_0, -\tan a\right)\\
\mathbf{if}\;\tan a \leq -0.005:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;\tan a \leq 5 \cdot 10^{-7}:\\
\;\;\;\;x + \left(\frac{t\_0}{1 - \tan y \cdot \tan z} - \mathsf{fma}\left(a \cdot a, a \cdot 0.3333333333333333, a\right)\right)\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if (tan.f64 a) < -0.0050000000000000001 or 4.99999999999999977e-7 < (tan.f64 a) Initial program 77.3%
lift-+.f64N/A
lift-tan.f64N/A
lift-tan.f64N/A
sub-negN/A
lift-tan.f64N/A
lift-+.f64N/A
tan-sumN/A
clear-numN/A
associate-/r/N/A
lower-fma.f64N/A
lower-/.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower-tan.f64N/A
lower-tan.f64N/A
lower-+.f64N/A
lower-tan.f64N/A
lower-tan.f64N/A
lower-neg.f6499.4
Applied rewrites99.4%
Taylor expanded in y around 0
Applied rewrites77.9%
if -0.0050000000000000001 < (tan.f64 a) < 4.99999999999999977e-7Initial program 81.3%
Taylor expanded in a around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-commutativeN/A
*-rgt-identityN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
lower-*.f6481.3
Applied rewrites81.3%
tan-sumN/A
lift-tan.f64N/A
lift-tan.f64N/A
lift-+.f64N/A
lift-tan.f64N/A
lift-tan.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-/.f6499.6
Applied rewrites99.6%
(FPCore (x y z a) :precision binary64 (+ x (- (/ (+ (tan y) (tan z)) (fma (tan z) (- (tan y)) 1.0)) (tan a))))
double code(double x, double y, double z, double a) {
return x + (((tan(y) + tan(z)) / fma(tan(z), -tan(y), 1.0)) - tan(a));
}
function code(x, y, z, a) return Float64(x + Float64(Float64(Float64(tan(y) + tan(z)) / fma(tan(z), Float64(-tan(y)), 1.0)) - tan(a))) end
code[x_, y_, z_, a_] := N[(x + N[(N[(N[(N[Tan[y], $MachinePrecision] + N[Tan[z], $MachinePrecision]), $MachinePrecision] / N[(N[Tan[z], $MachinePrecision] * (-N[Tan[y], $MachinePrecision]) + 1.0), $MachinePrecision]), $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(\frac{\tan y + \tan z}{\mathsf{fma}\left(\tan z, -\tan y, 1\right)} - \tan a\right)
\end{array}
Initial program 79.3%
tan-sumN/A
lower-/.f64N/A
lower-+.f64N/A
lower-tan.f64N/A
lower-tan.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower-tan.f64N/A
lower-tan.f6499.5
Applied rewrites99.5%
lift-tan.f64N/A
lift-tan.f64N/A
lift-*.f64N/A
sub-negN/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
lower-fma.f64N/A
lower-neg.f6499.6
Applied rewrites99.6%
(FPCore (x y z a) :precision binary64 (+ x (- (/ (+ (tan y) (tan z)) (- 1.0 (* (tan y) (tan z)))) (tan a))))
double code(double x, double y, double z, double a) {
return x + (((tan(y) + tan(z)) / (1.0 - (tan(y) * tan(z)))) - tan(a));
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = x + (((tan(y) + tan(z)) / (1.0d0 - (tan(y) * tan(z)))) - tan(a))
end function
public static double code(double x, double y, double z, double a) {
return x + (((Math.tan(y) + Math.tan(z)) / (1.0 - (Math.tan(y) * Math.tan(z)))) - Math.tan(a));
}
def code(x, y, z, a): return x + (((math.tan(y) + math.tan(z)) / (1.0 - (math.tan(y) * math.tan(z)))) - math.tan(a))
function code(x, y, z, a) return Float64(x + Float64(Float64(Float64(tan(y) + tan(z)) / Float64(1.0 - Float64(tan(y) * tan(z)))) - tan(a))) end
function tmp = code(x, y, z, a) tmp = x + (((tan(y) + tan(z)) / (1.0 - (tan(y) * tan(z)))) - tan(a)); end
code[x_, y_, z_, a_] := N[(x + N[(N[(N[(N[Tan[y], $MachinePrecision] + N[Tan[z], $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[(N[Tan[y], $MachinePrecision] * N[Tan[z], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(\frac{\tan y + \tan z}{1 - \tan y \cdot \tan z} - \tan a\right)
\end{array}
Initial program 79.3%
tan-sumN/A
lower-/.f64N/A
lower-+.f64N/A
lower-tan.f64N/A
lower-tan.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower-tan.f64N/A
lower-tan.f6499.5
Applied rewrites99.5%
(FPCore (x y z a)
:precision binary64
(let* ((t_0 (+ (tan y) (tan z))) (t_1 (+ x (fma 1.0 t_0 (- (tan a))))))
(if (<= a -0.0098)
t_1
(if (<= a 0.0066)
(fma
(/ 1.0 (- 1.0 (* (tan y) (tan z))))
t_0
(- x (fma a (* (* a a) 0.3333333333333333) a)))
t_1))))
double code(double x, double y, double z, double a) {
double t_0 = tan(y) + tan(z);
double t_1 = x + fma(1.0, t_0, -tan(a));
double tmp;
if (a <= -0.0098) {
tmp = t_1;
} else if (a <= 0.0066) {
tmp = fma((1.0 / (1.0 - (tan(y) * tan(z)))), t_0, (x - fma(a, ((a * a) * 0.3333333333333333), a)));
} else {
tmp = t_1;
}
return tmp;
}
function code(x, y, z, a) t_0 = Float64(tan(y) + tan(z)) t_1 = Float64(x + fma(1.0, t_0, Float64(-tan(a)))) tmp = 0.0 if (a <= -0.0098) tmp = t_1; elseif (a <= 0.0066) tmp = fma(Float64(1.0 / Float64(1.0 - Float64(tan(y) * tan(z)))), t_0, Float64(x - fma(a, Float64(Float64(a * a) * 0.3333333333333333), a))); else tmp = t_1; end return tmp end
code[x_, y_, z_, a_] := Block[{t$95$0 = N[(N[Tan[y], $MachinePrecision] + N[Tan[z], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(x + N[(1.0 * t$95$0 + (-N[Tan[a], $MachinePrecision])), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[a, -0.0098], t$95$1, If[LessEqual[a, 0.0066], N[(N[(1.0 / N[(1.0 - N[(N[Tan[y], $MachinePrecision] * N[Tan[z], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * t$95$0 + N[(x - N[(a * N[(N[(a * a), $MachinePrecision] * 0.3333333333333333), $MachinePrecision] + a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], t$95$1]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \tan y + \tan z\\
t_1 := x + \mathsf{fma}\left(1, t\_0, -\tan a\right)\\
\mathbf{if}\;a \leq -0.0098:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;a \leq 0.0066:\\
\;\;\;\;\mathsf{fma}\left(\frac{1}{1 - \tan y \cdot \tan z}, t\_0, x - \mathsf{fma}\left(a, \left(a \cdot a\right) \cdot 0.3333333333333333, a\right)\right)\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if a < -0.0097999999999999997 or 0.0066 < a Initial program 77.3%
lift-+.f64N/A
lift-tan.f64N/A
lift-tan.f64N/A
sub-negN/A
lift-tan.f64N/A
lift-+.f64N/A
tan-sumN/A
clear-numN/A
associate-/r/N/A
lower-fma.f64N/A
lower-/.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower-tan.f64N/A
lower-tan.f64N/A
lower-+.f64N/A
lower-tan.f64N/A
lower-tan.f64N/A
lower-neg.f6499.4
Applied rewrites99.4%
Taylor expanded in y around 0
Applied rewrites77.9%
if -0.0097999999999999997 < a < 0.0066Initial program 81.3%
Taylor expanded in a around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-commutativeN/A
*-rgt-identityN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
lower-*.f6481.3
Applied rewrites81.3%
Applied rewrites99.6%
Final simplification88.6%
(FPCore (x y z a)
:precision binary64
(if (<= (tan a) -0.005)
(+ x (- (tan y) (tan a)))
(if (<= (tan a) 2e-14)
(fma
(/ (fma a (fma (* a a) -0.3333333333333333 -1.0) (tan (+ y z))) x)
x
x)
(+ (tan y) (- x (tan a))))))
double code(double x, double y, double z, double a) {
double tmp;
if (tan(a) <= -0.005) {
tmp = x + (tan(y) - tan(a));
} else if (tan(a) <= 2e-14) {
tmp = fma((fma(a, fma((a * a), -0.3333333333333333, -1.0), tan((y + z))) / x), x, x);
} else {
tmp = tan(y) + (x - tan(a));
}
return tmp;
}
function code(x, y, z, a) tmp = 0.0 if (tan(a) <= -0.005) tmp = Float64(x + Float64(tan(y) - tan(a))); elseif (tan(a) <= 2e-14) tmp = fma(Float64(fma(a, fma(Float64(a * a), -0.3333333333333333, -1.0), tan(Float64(y + z))) / x), x, x); else tmp = Float64(tan(y) + Float64(x - tan(a))); end return tmp end
code[x_, y_, z_, a_] := If[LessEqual[N[Tan[a], $MachinePrecision], -0.005], N[(x + N[(N[Tan[y], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[N[Tan[a], $MachinePrecision], 2e-14], N[(N[(N[(a * N[(N[(a * a), $MachinePrecision] * -0.3333333333333333 + -1.0), $MachinePrecision] + N[Tan[N[(y + z), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision] * x + x), $MachinePrecision], N[(N[Tan[y], $MachinePrecision] + N[(x - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\tan a \leq -0.005:\\
\;\;\;\;x + \left(\tan y - \tan a\right)\\
\mathbf{elif}\;\tan a \leq 2 \cdot 10^{-14}:\\
\;\;\;\;\mathsf{fma}\left(\frac{\mathsf{fma}\left(a, \mathsf{fma}\left(a \cdot a, -0.3333333333333333, -1\right), \tan \left(y + z\right)\right)}{x}, x, x\right)\\
\mathbf{else}:\\
\;\;\;\;\tan y + \left(x - \tan a\right)\\
\end{array}
\end{array}
if (tan.f64 a) < -0.0050000000000000001Initial program 74.9%
Taylor expanded in z around 0
lower-/.f64N/A
lower-sin.f64N/A
lower-cos.f6454.2
Applied rewrites54.2%
lift-sin.f64N/A
lift-cos.f64N/A
lift-/.f64N/A
lift-tan.f64N/A
lift--.f64N/A
+-commutativeN/A
lower-+.f6454.2
lift-/.f64N/A
lift-sin.f64N/A
lift-cos.f64N/A
tan-quotN/A
lift-tan.f6454.2
Applied rewrites54.2%
if -0.0050000000000000001 < (tan.f64 a) < 2e-14Initial program 82.2%
Taylor expanded in a around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-commutativeN/A
*-rgt-identityN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
lower-*.f6482.2
Applied rewrites82.2%
Taylor expanded in x around inf
Applied rewrites82.0%
lift-*.f64N/A
lift-fma.f64N/A
lift-+.f64N/A
lift-sin.f64N/A
lift-+.f64N/A
lift-cos.f64N/A
lift-/.f64N/A
lift-fma.f64N/A
lift-/.f64N/A
Applied rewrites82.2%
if 2e-14 < (tan.f64 a) Initial program 78.2%
Taylor expanded in z around 0
lower-/.f64N/A
lower-sin.f64N/A
lower-cos.f6454.9
Applied rewrites54.9%
lift-sin.f64N/A
lift-cos.f64N/A
lift-/.f64N/A
lift-tan.f64N/A
lift--.f64N/A
+-commutativeN/A
lift--.f64N/A
lift-/.f64N/A
lift-sin.f64N/A
lift-cos.f64N/A
tan-quotN/A
lift-tan.f64N/A
associate-+l-N/A
lower--.f64N/A
lower--.f6454.9
Applied rewrites54.9%
Final simplification67.8%
(FPCore (x y z a)
:precision binary64
(let* ((t_0 (+ x (- (tan y) (tan a)))))
(if (<= (tan a) -0.005)
t_0
(if (<= (tan a) 2e-14)
(fma
(/ (fma a (fma (* a a) -0.3333333333333333 -1.0) (tan (+ y z))) x)
x
x)
t_0))))
double code(double x, double y, double z, double a) {
double t_0 = x + (tan(y) - tan(a));
double tmp;
if (tan(a) <= -0.005) {
tmp = t_0;
} else if (tan(a) <= 2e-14) {
tmp = fma((fma(a, fma((a * a), -0.3333333333333333, -1.0), tan((y + z))) / x), x, x);
} else {
tmp = t_0;
}
return tmp;
}
function code(x, y, z, a) t_0 = Float64(x + Float64(tan(y) - tan(a))) tmp = 0.0 if (tan(a) <= -0.005) tmp = t_0; elseif (tan(a) <= 2e-14) tmp = fma(Float64(fma(a, fma(Float64(a * a), -0.3333333333333333, -1.0), tan(Float64(y + z))) / x), x, x); else tmp = t_0; end return tmp end
code[x_, y_, z_, a_] := Block[{t$95$0 = N[(x + N[(N[Tan[y], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[Tan[a], $MachinePrecision], -0.005], t$95$0, If[LessEqual[N[Tan[a], $MachinePrecision], 2e-14], N[(N[(N[(a * N[(N[(a * a), $MachinePrecision] * -0.3333333333333333 + -1.0), $MachinePrecision] + N[Tan[N[(y + z), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision] * x + x), $MachinePrecision], t$95$0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x + \left(\tan y - \tan a\right)\\
\mathbf{if}\;\tan a \leq -0.005:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;\tan a \leq 2 \cdot 10^{-14}:\\
\;\;\;\;\mathsf{fma}\left(\frac{\mathsf{fma}\left(a, \mathsf{fma}\left(a \cdot a, -0.3333333333333333, -1\right), \tan \left(y + z\right)\right)}{x}, x, x\right)\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if (tan.f64 a) < -0.0050000000000000001 or 2e-14 < (tan.f64 a) Initial program 76.6%
Taylor expanded in z around 0
lower-/.f64N/A
lower-sin.f64N/A
lower-cos.f6454.5
Applied rewrites54.5%
lift-sin.f64N/A
lift-cos.f64N/A
lift-/.f64N/A
lift-tan.f64N/A
lift--.f64N/A
+-commutativeN/A
lower-+.f6454.5
lift-/.f64N/A
lift-sin.f64N/A
lift-cos.f64N/A
tan-quotN/A
lift-tan.f6454.6
Applied rewrites54.6%
if -0.0050000000000000001 < (tan.f64 a) < 2e-14Initial program 82.2%
Taylor expanded in a around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-commutativeN/A
*-rgt-identityN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
lower-*.f6482.2
Applied rewrites82.2%
Taylor expanded in x around inf
Applied rewrites82.0%
lift-*.f64N/A
lift-fma.f64N/A
lift-+.f64N/A
lift-sin.f64N/A
lift-+.f64N/A
lift-cos.f64N/A
lift-/.f64N/A
lift-fma.f64N/A
lift-/.f64N/A
Applied rewrites82.2%
Final simplification67.8%
(FPCore (x y z a) :precision binary64 (+ x (fma 1.0 (+ (tan y) (tan z)) (- (tan a)))))
double code(double x, double y, double z, double a) {
return x + fma(1.0, (tan(y) + tan(z)), -tan(a));
}
function code(x, y, z, a) return Float64(x + fma(1.0, Float64(tan(y) + tan(z)), Float64(-tan(a)))) end
code[x_, y_, z_, a_] := N[(x + N[(1.0 * N[(N[Tan[y], $MachinePrecision] + N[Tan[z], $MachinePrecision]), $MachinePrecision] + (-N[Tan[a], $MachinePrecision])), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \mathsf{fma}\left(1, \tan y + \tan z, -\tan a\right)
\end{array}
Initial program 79.3%
lift-+.f64N/A
lift-tan.f64N/A
lift-tan.f64N/A
sub-negN/A
lift-tan.f64N/A
lift-+.f64N/A
tan-sumN/A
clear-numN/A
associate-/r/N/A
lower-fma.f64N/A
lower-/.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower-tan.f64N/A
lower-tan.f64N/A
lower-+.f64N/A
lower-tan.f64N/A
lower-tan.f64N/A
lower-neg.f6499.6
Applied rewrites99.6%
Taylor expanded in y around 0
Applied rewrites79.5%
(FPCore (x y z a) :precision binary64 (+ x (- (tan (+ y z)) (tan a))))
double code(double x, double y, double z, double a) {
return x + (tan((y + z)) - tan(a));
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = x + (tan((y + z)) - tan(a))
end function
public static double code(double x, double y, double z, double a) {
return x + (Math.tan((y + z)) - Math.tan(a));
}
def code(x, y, z, a): return x + (math.tan((y + z)) - math.tan(a))
function code(x, y, z, a) return Float64(x + Float64(tan(Float64(y + z)) - tan(a))) end
function tmp = code(x, y, z, a) tmp = x + (tan((y + z)) - tan(a)); end
code[x_, y_, z_, a_] := N[(x + N[(N[Tan[N[(y + z), $MachinePrecision]], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(\tan \left(y + z\right) - \tan a\right)
\end{array}
Initial program 79.3%
(FPCore (x y z a)
:precision binary64
(let* ((t_0 (- x (tan a))))
(if (<= a -0.4)
t_0
(if (<= a 0.22)
(fma
(/ (fma a (fma (* a a) -0.3333333333333333 -1.0) (tan (+ y z))) x)
x
x)
t_0))))
double code(double x, double y, double z, double a) {
double t_0 = x - tan(a);
double tmp;
if (a <= -0.4) {
tmp = t_0;
} else if (a <= 0.22) {
tmp = fma((fma(a, fma((a * a), -0.3333333333333333, -1.0), tan((y + z))) / x), x, x);
} else {
tmp = t_0;
}
return tmp;
}
function code(x, y, z, a) t_0 = Float64(x - tan(a)) tmp = 0.0 if (a <= -0.4) tmp = t_0; elseif (a <= 0.22) tmp = fma(Float64(fma(a, fma(Float64(a * a), -0.3333333333333333, -1.0), tan(Float64(y + z))) / x), x, x); else tmp = t_0; end return tmp end
code[x_, y_, z_, a_] := Block[{t$95$0 = N[(x - N[Tan[a], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[a, -0.4], t$95$0, If[LessEqual[a, 0.22], N[(N[(N[(a * N[(N[(a * a), $MachinePrecision] * -0.3333333333333333 + -1.0), $MachinePrecision] + N[Tan[N[(y + z), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision] * x + x), $MachinePrecision], t$95$0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x - \tan a\\
\mathbf{if}\;a \leq -0.4:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;a \leq 0.22:\\
\;\;\;\;\mathsf{fma}\left(\frac{\mathsf{fma}\left(a, \mathsf{fma}\left(a \cdot a, -0.3333333333333333, -1\right), \tan \left(y + z\right)\right)}{x}, x, x\right)\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if a < -0.40000000000000002 or 0.220000000000000001 < a Initial program 77.4%
Taylor expanded in z around 0
lower-/.f64N/A
lower-sin.f64N/A
lower-cos.f6454.6
Applied rewrites54.6%
Taylor expanded in y around 0
lower--.f64N/A
lower-/.f64N/A
lower-sin.f64N/A
lower-cos.f6441.0
Applied rewrites41.0%
tan-quotN/A
lift-tan.f64N/A
lower--.f6441.0
Applied rewrites41.0%
if -0.40000000000000002 < a < 0.220000000000000001Initial program 81.2%
Taylor expanded in a around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-commutativeN/A
*-rgt-identityN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
lower-*.f6481.2
Applied rewrites81.2%
Taylor expanded in x around inf
Applied rewrites81.0%
lift-*.f64N/A
lift-fma.f64N/A
lift-+.f64N/A
lift-sin.f64N/A
lift-+.f64N/A
lift-cos.f64N/A
lift-/.f64N/A
lift-fma.f64N/A
lift-/.f64N/A
Applied rewrites81.2%
(FPCore (x y z a)
:precision binary64
(let* ((t_0 (- x (tan a))))
(if (<= a -0.7)
t_0
(if (<= a 0.36)
(+
x
(-
(tan (+ y z))
(fma
(fma a (* a 0.13333333333333333) 0.3333333333333333)
(* a (* a a))
a)))
t_0))))
double code(double x, double y, double z, double a) {
double t_0 = x - tan(a);
double tmp;
if (a <= -0.7) {
tmp = t_0;
} else if (a <= 0.36) {
tmp = x + (tan((y + z)) - fma(fma(a, (a * 0.13333333333333333), 0.3333333333333333), (a * (a * a)), a));
} else {
tmp = t_0;
}
return tmp;
}
function code(x, y, z, a) t_0 = Float64(x - tan(a)) tmp = 0.0 if (a <= -0.7) tmp = t_0; elseif (a <= 0.36) tmp = Float64(x + Float64(tan(Float64(y + z)) - fma(fma(a, Float64(a * 0.13333333333333333), 0.3333333333333333), Float64(a * Float64(a * a)), a))); else tmp = t_0; end return tmp end
code[x_, y_, z_, a_] := Block[{t$95$0 = N[(x - N[Tan[a], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[a, -0.7], t$95$0, If[LessEqual[a, 0.36], N[(x + N[(N[Tan[N[(y + z), $MachinePrecision]], $MachinePrecision] - N[(N[(a * N[(a * 0.13333333333333333), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(a * N[(a * a), $MachinePrecision]), $MachinePrecision] + a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], t$95$0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x - \tan a\\
\mathbf{if}\;a \leq -0.7:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;a \leq 0.36:\\
\;\;\;\;x + \left(\tan \left(y + z\right) - \mathsf{fma}\left(\mathsf{fma}\left(a, a \cdot 0.13333333333333333, 0.3333333333333333\right), a \cdot \left(a \cdot a\right), a\right)\right)\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if a < -0.69999999999999996 or 0.35999999999999999 < a Initial program 77.4%
Taylor expanded in z around 0
lower-/.f64N/A
lower-sin.f64N/A
lower-cos.f6454.6
Applied rewrites54.6%
Taylor expanded in y around 0
lower--.f64N/A
lower-/.f64N/A
lower-sin.f64N/A
lower-cos.f6441.0
Applied rewrites41.0%
tan-quotN/A
lift-tan.f64N/A
lower--.f6441.0
Applied rewrites41.0%
if -0.69999999999999996 < a < 0.35999999999999999Initial program 81.2%
Taylor expanded in a around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-commutativeN/A
*-rgt-identityN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6481.2
Applied rewrites81.2%
(FPCore (x y z a)
:precision binary64
(let* ((t_0 (- x (tan a))))
(if (<= a -0.4)
t_0
(if (<= a 0.22)
(fma (fma (* a a) -0.3333333333333333 -1.0) a (+ x (tan (+ y z))))
t_0))))
double code(double x, double y, double z, double a) {
double t_0 = x - tan(a);
double tmp;
if (a <= -0.4) {
tmp = t_0;
} else if (a <= 0.22) {
tmp = fma(fma((a * a), -0.3333333333333333, -1.0), a, (x + tan((y + z))));
} else {
tmp = t_0;
}
return tmp;
}
function code(x, y, z, a) t_0 = Float64(x - tan(a)) tmp = 0.0 if (a <= -0.4) tmp = t_0; elseif (a <= 0.22) tmp = fma(fma(Float64(a * a), -0.3333333333333333, -1.0), a, Float64(x + tan(Float64(y + z)))); else tmp = t_0; end return tmp end
code[x_, y_, z_, a_] := Block[{t$95$0 = N[(x - N[Tan[a], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[a, -0.4], t$95$0, If[LessEqual[a, 0.22], N[(N[(N[(a * a), $MachinePrecision] * -0.3333333333333333 + -1.0), $MachinePrecision] * a + N[(x + N[Tan[N[(y + z), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], t$95$0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x - \tan a\\
\mathbf{if}\;a \leq -0.4:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;a \leq 0.22:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(a \cdot a, -0.3333333333333333, -1\right), a, x + \tan \left(y + z\right)\right)\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if a < -0.40000000000000002 or 0.220000000000000001 < a Initial program 77.4%
Taylor expanded in z around 0
lower-/.f64N/A
lower-sin.f64N/A
lower-cos.f6454.6
Applied rewrites54.6%
Taylor expanded in y around 0
lower--.f64N/A
lower-/.f64N/A
lower-sin.f64N/A
lower-cos.f6441.0
Applied rewrites41.0%
tan-quotN/A
lift-tan.f64N/A
lower--.f6441.0
Applied rewrites41.0%
if -0.40000000000000002 < a < 0.220000000000000001Initial program 81.2%
Taylor expanded in a around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-commutativeN/A
*-rgt-identityN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
lower-*.f6481.2
Applied rewrites81.2%
Taylor expanded in a around inf
Applied rewrites24.0%
lift-+.f64N/A
lift-sin.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-+.f64N/A
lift-cos.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-*.f64N/A
Applied rewrites24.0%
Applied rewrites81.2%
(FPCore (x y z a) :precision binary64 (- x (tan a)))
double code(double x, double y, double z, double a) {
return x - tan(a);
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = x - tan(a)
end function
public static double code(double x, double y, double z, double a) {
return x - Math.tan(a);
}
def code(x, y, z, a): return x - math.tan(a)
function code(x, y, z, a) return Float64(x - tan(a)) end
function tmp = code(x, y, z, a) tmp = x - tan(a); end
code[x_, y_, z_, a_] := N[(x - N[Tan[a], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x - \tan a
\end{array}
Initial program 79.3%
Taylor expanded in z around 0
lower-/.f64N/A
lower-sin.f64N/A
lower-cos.f6455.7
Applied rewrites55.7%
Taylor expanded in y around 0
lower--.f64N/A
lower-/.f64N/A
lower-sin.f64N/A
lower-cos.f6438.5
Applied rewrites38.5%
tan-quotN/A
lift-tan.f64N/A
lower--.f6438.5
Applied rewrites38.5%
(FPCore (x y z a) :precision binary64 (- x a))
double code(double x, double y, double z, double a) {
return x - a;
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = x - a
end function
public static double code(double x, double y, double z, double a) {
return x - a;
}
def code(x, y, z, a): return x - a
function code(x, y, z, a) return Float64(x - a) end
function tmp = code(x, y, z, a) tmp = x - a; end
code[x_, y_, z_, a_] := N[(x - a), $MachinePrecision]
\begin{array}{l}
\\
x - a
\end{array}
Initial program 79.3%
Taylor expanded in z around 0
lower-/.f64N/A
lower-sin.f64N/A
lower-cos.f6455.7
Applied rewrites55.7%
Taylor expanded in y around 0
lower--.f64N/A
lower-/.f64N/A
lower-sin.f64N/A
lower-cos.f6438.5
Applied rewrites38.5%
Taylor expanded in a around 0
mul-1-negN/A
unsub-negN/A
lower--.f6419.6
Applied rewrites19.6%
(FPCore (x y z a) :precision binary64 (- a))
double code(double x, double y, double z, double a) {
return -a;
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = -a
end function
public static double code(double x, double y, double z, double a) {
return -a;
}
def code(x, y, z, a): return -a
function code(x, y, z, a) return Float64(-a) end
function tmp = code(x, y, z, a) tmp = -a; end
code[x_, y_, z_, a_] := (-a)
\begin{array}{l}
\\
-a
\end{array}
Initial program 79.3%
Taylor expanded in z around 0
lower-/.f64N/A
lower-sin.f64N/A
lower-cos.f6455.7
Applied rewrites55.7%
Taylor expanded in y around 0
lower--.f64N/A
lower-/.f64N/A
lower-sin.f64N/A
lower-cos.f6438.5
Applied rewrites38.5%
Taylor expanded in a around 0
mul-1-negN/A
unsub-negN/A
lower--.f6419.6
Applied rewrites19.6%
Taylor expanded in x around 0
mul-1-negN/A
lower-neg.f643.7
Applied rewrites3.7%
(FPCore (x y z a) :precision binary64 x)
double code(double x, double y, double z, double a) {
return x;
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = x
end function
public static double code(double x, double y, double z, double a) {
return x;
}
def code(x, y, z, a): return x
function code(x, y, z, a) return x end
function tmp = code(x, y, z, a) tmp = x; end
code[x_, y_, z_, a_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 79.3%
Taylor expanded in a around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-commutativeN/A
*-rgt-identityN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
lower-*.f6441.6
Applied rewrites41.6%
Taylor expanded in x around inf
Applied rewrites41.5%
Taylor expanded in x around inf
Applied rewrites29.0%
Final simplification29.0%
herbie shell --seed 2024216
(FPCore (x y z a)
:name "tan-example (used to crash)"
:precision binary64
:pre (and (and (and (or (== x 0.0) (and (<= 0.5884142 x) (<= x 505.5909))) (or (and (<= -1.796658e+308 y) (<= y -9.425585e-310)) (and (<= 1.284938e-309 y) (<= y 1.751224e+308)))) (or (and (<= -1.776707e+308 z) (<= z -8.599796e-310)) (and (<= 3.293145e-311 z) (<= z 1.725154e+308)))) (or (and (<= -1.796658e+308 a) (<= a -9.425585e-310)) (and (<= 1.284938e-309 a) (<= a 1.751224e+308))))
(+ x (- (tan (+ y z)) (tan a))))