
(FPCore (x y z a) :precision binary64 (+ x (- (tan (+ y z)) (tan a))))
double code(double x, double y, double z, double a) {
return x + (tan((y + z)) - tan(a));
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = x + (tan((y + z)) - tan(a))
end function
public static double code(double x, double y, double z, double a) {
return x + (Math.tan((y + z)) - Math.tan(a));
}
def code(x, y, z, a): return x + (math.tan((y + z)) - math.tan(a))
function code(x, y, z, a) return Float64(x + Float64(tan(Float64(y + z)) - tan(a))) end
function tmp = code(x, y, z, a) tmp = x + (tan((y + z)) - tan(a)); end
code[x_, y_, z_, a_] := N[(x + N[(N[Tan[N[(y + z), $MachinePrecision]], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(\tan \left(y + z\right) - \tan a\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z a) :precision binary64 (+ x (- (tan (+ y z)) (tan a))))
double code(double x, double y, double z, double a) {
return x + (tan((y + z)) - tan(a));
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = x + (tan((y + z)) - tan(a))
end function
public static double code(double x, double y, double z, double a) {
return x + (Math.tan((y + z)) - Math.tan(a));
}
def code(x, y, z, a): return x + (math.tan((y + z)) - math.tan(a))
function code(x, y, z, a) return Float64(x + Float64(tan(Float64(y + z)) - tan(a))) end
function tmp = code(x, y, z, a) tmp = x + (tan((y + z)) - tan(a)); end
code[x_, y_, z_, a_] := N[(x + N[(N[Tan[N[(y + z), $MachinePrecision]], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(\tan \left(y + z\right) - \tan a\right)
\end{array}
(FPCore (x y z a) :precision binary64 (+ x (- (/ (+ (tan z) (tan y)) (fma (- (tan z)) (tan y) 1.0)) (tan a))))
double code(double x, double y, double z, double a) {
return x + (((tan(z) + tan(y)) / fma(-tan(z), tan(y), 1.0)) - tan(a));
}
function code(x, y, z, a) return Float64(x + Float64(Float64(Float64(tan(z) + tan(y)) / fma(Float64(-tan(z)), tan(y), 1.0)) - tan(a))) end
code[x_, y_, z_, a_] := N[(x + N[(N[(N[(N[Tan[z], $MachinePrecision] + N[Tan[y], $MachinePrecision]), $MachinePrecision] / N[((-N[Tan[z], $MachinePrecision]) * N[Tan[y], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(\frac{\tan z + \tan y}{\mathsf{fma}\left(-\tan z, \tan y, 1\right)} - \tan a\right)
\end{array}
Initial program 78.7%
lift-tan.f64N/A
lift-+.f64N/A
tan-sumN/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-tan.f64N/A
lower-tan.f64N/A
sub-negN/A
+-commutativeN/A
*-commutativeN/A
distribute-lft-neg-inN/A
lower-fma.f64N/A
lower-neg.f64N/A
lower-tan.f64N/A
lower-tan.f6499.8
Applied rewrites99.8%
(FPCore (x y z a)
:precision binary64
(if (<= (tan a) -0.002)
(- (+ (tan (+ z y)) x) (tan a))
(if (<= (tan a) 1e-54)
(+
x
(-
(/ (+ (tan z) (tan y)) (fma (- (tan z)) (tan y) 1.0))
(* (fma (* a a) 0.3333333333333333 1.0) a)))
(+ x (- (tan (fma y (/ y (- y z)) (* (- z) (/ z (- y z))))) (tan a))))))
double code(double x, double y, double z, double a) {
double tmp;
if (tan(a) <= -0.002) {
tmp = (tan((z + y)) + x) - tan(a);
} else if (tan(a) <= 1e-54) {
tmp = x + (((tan(z) + tan(y)) / fma(-tan(z), tan(y), 1.0)) - (fma((a * a), 0.3333333333333333, 1.0) * a));
} else {
tmp = x + (tan(fma(y, (y / (y - z)), (-z * (z / (y - z))))) - tan(a));
}
return tmp;
}
function code(x, y, z, a) tmp = 0.0 if (tan(a) <= -0.002) tmp = Float64(Float64(tan(Float64(z + y)) + x) - tan(a)); elseif (tan(a) <= 1e-54) tmp = Float64(x + Float64(Float64(Float64(tan(z) + tan(y)) / fma(Float64(-tan(z)), tan(y), 1.0)) - Float64(fma(Float64(a * a), 0.3333333333333333, 1.0) * a))); else tmp = Float64(x + Float64(tan(fma(y, Float64(y / Float64(y - z)), Float64(Float64(-z) * Float64(z / Float64(y - z))))) - tan(a))); end return tmp end
code[x_, y_, z_, a_] := If[LessEqual[N[Tan[a], $MachinePrecision], -0.002], N[(N[(N[Tan[N[(z + y), $MachinePrecision]], $MachinePrecision] + x), $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision], If[LessEqual[N[Tan[a], $MachinePrecision], 1e-54], N[(x + N[(N[(N[(N[Tan[z], $MachinePrecision] + N[Tan[y], $MachinePrecision]), $MachinePrecision] / N[((-N[Tan[z], $MachinePrecision]) * N[Tan[y], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(a * a), $MachinePrecision] * 0.3333333333333333 + 1.0), $MachinePrecision] * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x + N[(N[Tan[N[(y * N[(y / N[(y - z), $MachinePrecision]), $MachinePrecision] + N[((-z) * N[(z / N[(y - z), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\tan a \leq -0.002:\\
\;\;\;\;\left(\tan \left(z + y\right) + x\right) - \tan a\\
\mathbf{elif}\;\tan a \leq 10^{-54}:\\
\;\;\;\;x + \left(\frac{\tan z + \tan y}{\mathsf{fma}\left(-\tan z, \tan y, 1\right)} - \mathsf{fma}\left(a \cdot a, 0.3333333333333333, 1\right) \cdot a\right)\\
\mathbf{else}:\\
\;\;\;\;x + \left(\tan \left(\mathsf{fma}\left(y, \frac{y}{y - z}, \left(-z\right) \cdot \frac{z}{y - z}\right)\right) - \tan a\right)\\
\end{array}
\end{array}
if (tan.f64 a) < -2e-3Initial program 77.1%
lift-+.f64N/A
lift--.f64N/A
associate-+r-N/A
lower--.f64N/A
+-commutativeN/A
lower-+.f6477.2
lift-+.f64N/A
+-commutativeN/A
lower-+.f6477.2
Applied rewrites77.2%
if -2e-3 < (tan.f64 a) < 1e-54Initial program 78.5%
lift-tan.f64N/A
lift-+.f64N/A
tan-sumN/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-tan.f64N/A
lower-tan.f64N/A
sub-negN/A
+-commutativeN/A
*-commutativeN/A
distribute-lft-neg-inN/A
lower-fma.f64N/A
lower-neg.f64N/A
lower-tan.f64N/A
lower-tan.f6499.8
Applied rewrites99.8%
Taylor expanded in a around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.8
Applied rewrites99.8%
if 1e-54 < (tan.f64 a) Initial program 80.2%
lift-+.f64N/A
flip-+N/A
div-subN/A
sub-negN/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
lower--.f64N/A
lower-neg.f64N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f64N/A
lower--.f6480.2
Applied rewrites80.2%
Final simplification89.2%
(FPCore (x y z a)
:precision binary64
(if (<= (tan a) -2e-10)
(+ x (pow (pow (- (tan (+ z y)) (tan a)) -1.0) -1.0))
(if (<= (tan a) 1e-54)
(- (/ (+ (tan y) (tan z)) (- 1.0 (* (tan y) (tan z)))) (- x))
(+ x (- (tan (fma y (/ y (- y z)) (* (- z) (/ z (- y z))))) (tan a))))))
double code(double x, double y, double z, double a) {
double tmp;
if (tan(a) <= -2e-10) {
tmp = x + pow(pow((tan((z + y)) - tan(a)), -1.0), -1.0);
} else if (tan(a) <= 1e-54) {
tmp = ((tan(y) + tan(z)) / (1.0 - (tan(y) * tan(z)))) - -x;
} else {
tmp = x + (tan(fma(y, (y / (y - z)), (-z * (z / (y - z))))) - tan(a));
}
return tmp;
}
function code(x, y, z, a) tmp = 0.0 if (tan(a) <= -2e-10) tmp = Float64(x + ((Float64(tan(Float64(z + y)) - tan(a)) ^ -1.0) ^ -1.0)); elseif (tan(a) <= 1e-54) tmp = Float64(Float64(Float64(tan(y) + tan(z)) / Float64(1.0 - Float64(tan(y) * tan(z)))) - Float64(-x)); else tmp = Float64(x + Float64(tan(fma(y, Float64(y / Float64(y - z)), Float64(Float64(-z) * Float64(z / Float64(y - z))))) - tan(a))); end return tmp end
code[x_, y_, z_, a_] := If[LessEqual[N[Tan[a], $MachinePrecision], -2e-10], N[(x + N[Power[N[Power[N[(N[Tan[N[(z + y), $MachinePrecision]], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision], -1.0], $MachinePrecision], -1.0], $MachinePrecision]), $MachinePrecision], If[LessEqual[N[Tan[a], $MachinePrecision], 1e-54], N[(N[(N[(N[Tan[y], $MachinePrecision] + N[Tan[z], $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[(N[Tan[y], $MachinePrecision] * N[Tan[z], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - (-x)), $MachinePrecision], N[(x + N[(N[Tan[N[(y * N[(y / N[(y - z), $MachinePrecision]), $MachinePrecision] + N[((-z) * N[(z / N[(y - z), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\tan a \leq -2 \cdot 10^{-10}:\\
\;\;\;\;x + {\left({\left(\tan \left(z + y\right) - \tan a\right)}^{-1}\right)}^{-1}\\
\mathbf{elif}\;\tan a \leq 10^{-54}:\\
\;\;\;\;\frac{\tan y + \tan z}{1 - \tan y \cdot \tan z} - \left(-x\right)\\
\mathbf{else}:\\
\;\;\;\;x + \left(\tan \left(\mathsf{fma}\left(y, \frac{y}{y - z}, \left(-z\right) \cdot \frac{z}{y - z}\right)\right) - \tan a\right)\\
\end{array}
\end{array}
if (tan.f64 a) < -2.00000000000000007e-10Initial program 77.9%
lift--.f64N/A
flip--N/A
clear-numN/A
lower-/.f64N/A
clear-numN/A
flip--N/A
lift--.f64N/A
lower-/.f6477.9
lift-+.f64N/A
+-commutativeN/A
lower-+.f6477.9
Applied rewrites77.9%
if -2.00000000000000007e-10 < (tan.f64 a) < 1e-54Initial program 78.1%
lift-+.f64N/A
+-commutativeN/A
lift--.f64N/A
associate-+l-N/A
lower--.f64N/A
lift-+.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower--.f6478.1
Applied rewrites78.1%
Taylor expanded in x around inf
mul-1-negN/A
lower-neg.f6478.1
Applied rewrites78.1%
lift-tan.f64N/A
lift-+.f64N/A
tan-sumN/A
lift-tan.f64N/A
lift-tan.f64N/A
lift-+.f64N/A
lower-/.f64N/A
lift-+.f64N/A
+-commutativeN/A
lower-+.f64N/A
lift-tan.f64N/A
lift-tan.f64N/A
lift-*.f64N/A
lower--.f6499.6
lift-*.f64N/A
*-commutativeN/A
lower-*.f6499.6
Applied rewrites99.6%
if 1e-54 < (tan.f64 a) Initial program 80.2%
lift-+.f64N/A
flip-+N/A
div-subN/A
sub-negN/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
lower--.f64N/A
lower-neg.f64N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f64N/A
lower--.f6480.2
Applied rewrites80.2%
Final simplification89.1%
(FPCore (x y z a) :precision binary64 (+ x (- (tan (fma y (/ y (- y z)) (* (- z) (/ z (- y z))))) (tan a))))
double code(double x, double y, double z, double a) {
return x + (tan(fma(y, (y / (y - z)), (-z * (z / (y - z))))) - tan(a));
}
function code(x, y, z, a) return Float64(x + Float64(tan(fma(y, Float64(y / Float64(y - z)), Float64(Float64(-z) * Float64(z / Float64(y - z))))) - tan(a))) end
code[x_, y_, z_, a_] := N[(x + N[(N[Tan[N[(y * N[(y / N[(y - z), $MachinePrecision]), $MachinePrecision] + N[((-z) * N[(z / N[(y - z), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(\tan \left(\mathsf{fma}\left(y, \frac{y}{y - z}, \left(-z\right) \cdot \frac{z}{y - z}\right)\right) - \tan a\right)
\end{array}
Initial program 78.7%
lift-+.f64N/A
flip-+N/A
div-subN/A
sub-negN/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
lower--.f64N/A
lower-neg.f64N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f64N/A
lower--.f6478.7
Applied rewrites78.7%
Final simplification78.7%
(FPCore (x y z a) :precision binary64 (+ x (- (tan (+ y z)) (tan a))))
double code(double x, double y, double z, double a) {
return x + (tan((y + z)) - tan(a));
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = x + (tan((y + z)) - tan(a))
end function
public static double code(double x, double y, double z, double a) {
return x + (Math.tan((y + z)) - Math.tan(a));
}
def code(x, y, z, a): return x + (math.tan((y + z)) - math.tan(a))
function code(x, y, z, a) return Float64(x + Float64(tan(Float64(y + z)) - tan(a))) end
function tmp = code(x, y, z, a) tmp = x + (tan((y + z)) - tan(a)); end
code[x_, y_, z_, a_] := N[(x + N[(N[Tan[N[(y + z), $MachinePrecision]], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(\tan \left(y + z\right) - \tan a\right)
\end{array}
Initial program 78.7%
(FPCore (x y z a) :precision binary64 (- (tan (+ z y)) (- x)))
double code(double x, double y, double z, double a) {
return tan((z + y)) - -x;
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = tan((z + y)) - -x
end function
public static double code(double x, double y, double z, double a) {
return Math.tan((z + y)) - -x;
}
def code(x, y, z, a): return math.tan((z + y)) - -x
function code(x, y, z, a) return Float64(tan(Float64(z + y)) - Float64(-x)) end
function tmp = code(x, y, z, a) tmp = tan((z + y)) - -x; end
code[x_, y_, z_, a_] := N[(N[Tan[N[(z + y), $MachinePrecision]], $MachinePrecision] - (-x)), $MachinePrecision]
\begin{array}{l}
\\
\tan \left(z + y\right) - \left(-x\right)
\end{array}
Initial program 78.7%
lift-+.f64N/A
+-commutativeN/A
lift--.f64N/A
associate-+l-N/A
lower--.f64N/A
lift-+.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower--.f6478.6
Applied rewrites78.6%
Taylor expanded in x around inf
mul-1-negN/A
lower-neg.f6451.0
Applied rewrites51.0%
herbie shell --seed 2024309
(FPCore (x y z a)
:name "tan-example (used to crash)"
:precision binary64
:pre (and (and (and (or (== x 0.0) (and (<= 0.5884142 x) (<= x 505.5909))) (or (and (<= -1.796658e+308 y) (<= y -9.425585e-310)) (and (<= 1.284938e-309 y) (<= y 1.751224e+308)))) (or (and (<= -1.776707e+308 z) (<= z -8.599796e-310)) (and (<= 3.293145e-311 z) (<= z 1.725154e+308)))) (or (and (<= -1.796658e+308 a) (<= a -9.425585e-310)) (and (<= 1.284938e-309 a) (<= a 1.751224e+308))))
(+ x (- (tan (+ y z)) (tan a))))