
(FPCore (x y z a) :precision binary64 (+ x (- (tan (+ y z)) (tan a))))
double code(double x, double y, double z, double a) {
return x + (tan((y + z)) - tan(a));
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = x + (tan((y + z)) - tan(a))
end function
public static double code(double x, double y, double z, double a) {
return x + (Math.tan((y + z)) - Math.tan(a));
}
def code(x, y, z, a): return x + (math.tan((y + z)) - math.tan(a))
function code(x, y, z, a) return Float64(x + Float64(tan(Float64(y + z)) - tan(a))) end
function tmp = code(x, y, z, a) tmp = x + (tan((y + z)) - tan(a)); end
code[x_, y_, z_, a_] := N[(x + N[(N[Tan[N[(y + z), $MachinePrecision]], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(\tan \left(y + z\right) - \tan a\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z a) :precision binary64 (+ x (- (tan (+ y z)) (tan a))))
double code(double x, double y, double z, double a) {
return x + (tan((y + z)) - tan(a));
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = x + (tan((y + z)) - tan(a))
end function
public static double code(double x, double y, double z, double a) {
return x + (Math.tan((y + z)) - Math.tan(a));
}
def code(x, y, z, a): return x + (math.tan((y + z)) - math.tan(a))
function code(x, y, z, a) return Float64(x + Float64(tan(Float64(y + z)) - tan(a))) end
function tmp = code(x, y, z, a) tmp = x + (tan((y + z)) - tan(a)); end
code[x_, y_, z_, a_] := N[(x + N[(N[Tan[N[(y + z), $MachinePrecision]], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(\tan \left(y + z\right) - \tan a\right)
\end{array}
(FPCore (x y z a) :precision binary64 (+ x (fma (+ (tan y) (tan z)) (/ 1.0 (- 1.0 (+ 1.0 (fma (tan y) (tan z) -1.0)))) (- (tan a)))))
double code(double x, double y, double z, double a) {
return x + fma((tan(y) + tan(z)), (1.0 / (1.0 - (1.0 + fma(tan(y), tan(z), -1.0)))), -tan(a));
}
function code(x, y, z, a) return Float64(x + fma(Float64(tan(y) + tan(z)), Float64(1.0 / Float64(1.0 - Float64(1.0 + fma(tan(y), tan(z), -1.0)))), Float64(-tan(a)))) end
code[x_, y_, z_, a_] := N[(x + N[(N[(N[Tan[y], $MachinePrecision] + N[Tan[z], $MachinePrecision]), $MachinePrecision] * N[(1.0 / N[(1.0 - N[(1.0 + N[(N[Tan[y], $MachinePrecision] * N[Tan[z], $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + (-N[Tan[a], $MachinePrecision])), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \mathsf{fma}\left(\tan y + \tan z, \frac{1}{1 - \left(1 + \mathsf{fma}\left(\tan y, \tan z, -1\right)\right)}, -\tan a\right)
\end{array}
Initial program 79.3%
tan-sum99.8%
div-inv99.8%
fma-neg99.8%
Applied egg-rr99.8%
expm1-log1p-u92.4%
expm1-udef92.4%
log1p-udef92.4%
add-exp-log99.8%
Applied egg-rr99.8%
associate--l+99.8%
fma-neg99.8%
metadata-eval99.8%
Simplified99.8%
Final simplification99.8%
(FPCore (x y z a)
:precision binary64
(let* ((t_0 (+ (tan y) (tan z))))
(if (or (<= (tan a) -4e-12) (not (<= (tan a) 0.04)))
(+ x (fma t_0 1.0 (- (tan a))))
(+ x (/ t_0 (- 1.0 (* (tan y) (tan z))))))))
double code(double x, double y, double z, double a) {
double t_0 = tan(y) + tan(z);
double tmp;
if ((tan(a) <= -4e-12) || !(tan(a) <= 0.04)) {
tmp = x + fma(t_0, 1.0, -tan(a));
} else {
tmp = x + (t_0 / (1.0 - (tan(y) * tan(z))));
}
return tmp;
}
function code(x, y, z, a) t_0 = Float64(tan(y) + tan(z)) tmp = 0.0 if ((tan(a) <= -4e-12) || !(tan(a) <= 0.04)) tmp = Float64(x + fma(t_0, 1.0, Float64(-tan(a)))); else tmp = Float64(x + Float64(t_0 / Float64(1.0 - Float64(tan(y) * tan(z))))); end return tmp end
code[x_, y_, z_, a_] := Block[{t$95$0 = N[(N[Tan[y], $MachinePrecision] + N[Tan[z], $MachinePrecision]), $MachinePrecision]}, If[Or[LessEqual[N[Tan[a], $MachinePrecision], -4e-12], N[Not[LessEqual[N[Tan[a], $MachinePrecision], 0.04]], $MachinePrecision]], N[(x + N[(t$95$0 * 1.0 + (-N[Tan[a], $MachinePrecision])), $MachinePrecision]), $MachinePrecision], N[(x + N[(t$95$0 / N[(1.0 - N[(N[Tan[y], $MachinePrecision] * N[Tan[z], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \tan y + \tan z\\
\mathbf{if}\;\tan a \leq -4 \cdot 10^{-12} \lor \neg \left(\tan a \leq 0.04\right):\\
\;\;\;\;x + \mathsf{fma}\left(t_0, 1, -\tan a\right)\\
\mathbf{else}:\\
\;\;\;\;x + \frac{t_0}{1 - \tan y \cdot \tan z}\\
\end{array}
\end{array}
if (tan.f64 a) < -3.99999999999999992e-12 or 0.0400000000000000008 < (tan.f64 a) Initial program 80.0%
tan-sum99.8%
div-inv99.7%
fma-neg99.8%
Applied egg-rr99.8%
expm1-log1p-u94.1%
expm1-udef94.1%
log1p-udef94.1%
add-exp-log99.8%
Applied egg-rr99.8%
associate--l+99.8%
fma-neg99.8%
metadata-eval99.8%
Simplified99.8%
fma-udef99.7%
associate--r+99.7%
metadata-eval99.7%
Applied egg-rr99.7%
fma-def99.8%
sub0-neg99.8%
Simplified99.8%
Taylor expanded in y around 0 80.6%
if -3.99999999999999992e-12 < (tan.f64 a) < 0.0400000000000000008Initial program 78.8%
Taylor expanded in a around 0 78.8%
*-lft-identity78.8%
metadata-eval78.8%
cancel-sign-sub-inv78.8%
*-lft-identity78.8%
metadata-eval78.8%
cancel-sign-sub-inv78.8%
sub-neg78.8%
mul-1-neg78.8%
remove-double-neg78.8%
+-commutative78.8%
sub-neg78.8%
mul-1-neg78.8%
remove-double-neg78.8%
+-commutative78.8%
Simplified78.8%
tan-quot78.8%
+-commutative78.8%
expm1-log1p-u67.0%
expm1-udef67.0%
Applied egg-rr67.0%
expm1-def67.0%
expm1-log1p78.8%
+-commutative78.8%
Simplified78.8%
tan-sum99.0%
+-commutative99.0%
Applied egg-rr99.0%
Final simplification90.1%
(FPCore (x y z a) :precision binary64 (+ x (fma (+ (tan y) (tan z)) (/ 1.0 (- 1.0 (* (tan y) (tan z)))) (- (tan a)))))
double code(double x, double y, double z, double a) {
return x + fma((tan(y) + tan(z)), (1.0 / (1.0 - (tan(y) * tan(z)))), -tan(a));
}
function code(x, y, z, a) return Float64(x + fma(Float64(tan(y) + tan(z)), Float64(1.0 / Float64(1.0 - Float64(tan(y) * tan(z)))), Float64(-tan(a)))) end
code[x_, y_, z_, a_] := N[(x + N[(N[(N[Tan[y], $MachinePrecision] + N[Tan[z], $MachinePrecision]), $MachinePrecision] * N[(1.0 / N[(1.0 - N[(N[Tan[y], $MachinePrecision] * N[Tan[z], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + (-N[Tan[a], $MachinePrecision])), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \mathsf{fma}\left(\tan y + \tan z, \frac{1}{1 - \tan y \cdot \tan z}, -\tan a\right)
\end{array}
Initial program 79.3%
tan-sum99.8%
div-inv99.8%
fma-neg99.8%
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (x y z a) :precision binary64 (+ x (- (/ (+ (tan y) (tan z)) (- 1.0 (* (tan y) (tan z)))) (tan a))))
double code(double x, double y, double z, double a) {
return x + (((tan(y) + tan(z)) / (1.0 - (tan(y) * tan(z)))) - tan(a));
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = x + (((tan(y) + tan(z)) / (1.0d0 - (tan(y) * tan(z)))) - tan(a))
end function
public static double code(double x, double y, double z, double a) {
return x + (((Math.tan(y) + Math.tan(z)) / (1.0 - (Math.tan(y) * Math.tan(z)))) - Math.tan(a));
}
def code(x, y, z, a): return x + (((math.tan(y) + math.tan(z)) / (1.0 - (math.tan(y) * math.tan(z)))) - math.tan(a))
function code(x, y, z, a) return Float64(x + Float64(Float64(Float64(tan(y) + tan(z)) / Float64(1.0 - Float64(tan(y) * tan(z)))) - tan(a))) end
function tmp = code(x, y, z, a) tmp = x + (((tan(y) + tan(z)) / (1.0 - (tan(y) * tan(z)))) - tan(a)); end
code[x_, y_, z_, a_] := N[(x + N[(N[(N[(N[Tan[y], $MachinePrecision] + N[Tan[z], $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[(N[Tan[y], $MachinePrecision] * N[Tan[z], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(\frac{\tan y + \tan z}{1 - \tan y \cdot \tan z} - \tan a\right)
\end{array}
Initial program 79.3%
tan-sum99.8%
div-inv99.8%
Applied egg-rr99.8%
associate-*r/99.8%
*-rgt-identity99.8%
Simplified99.8%
Final simplification99.8%
(FPCore (x y z a) :precision binary64 (+ x (fma (+ (tan y) (tan z)) 1.0 (- (tan a)))))
double code(double x, double y, double z, double a) {
return x + fma((tan(y) + tan(z)), 1.0, -tan(a));
}
function code(x, y, z, a) return Float64(x + fma(Float64(tan(y) + tan(z)), 1.0, Float64(-tan(a)))) end
code[x_, y_, z_, a_] := N[(x + N[(N[(N[Tan[y], $MachinePrecision] + N[Tan[z], $MachinePrecision]), $MachinePrecision] * 1.0 + (-N[Tan[a], $MachinePrecision])), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \mathsf{fma}\left(\tan y + \tan z, 1, -\tan a\right)
\end{array}
Initial program 79.3%
tan-sum99.8%
div-inv99.8%
fma-neg99.8%
Applied egg-rr99.8%
expm1-log1p-u92.4%
expm1-udef92.4%
log1p-udef92.4%
add-exp-log99.8%
Applied egg-rr99.8%
associate--l+99.8%
fma-neg99.8%
metadata-eval99.8%
Simplified99.8%
fma-udef99.8%
associate--r+99.8%
metadata-eval99.8%
Applied egg-rr99.8%
fma-def99.8%
sub0-neg99.8%
Simplified99.8%
Taylor expanded in y around 0 80.1%
Final simplification80.1%
(FPCore (x y z a) :precision binary64 (if (<= (+ y z) -500.0) (+ x (tan (+ y z))) (+ x (- (tan z) (tan a)))))
double code(double x, double y, double z, double a) {
double tmp;
if ((y + z) <= -500.0) {
tmp = x + tan((y + z));
} else {
tmp = x + (tan(z) - tan(a));
}
return tmp;
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
real(8) :: tmp
if ((y + z) <= (-500.0d0)) then
tmp = x + tan((y + z))
else
tmp = x + (tan(z) - tan(a))
end if
code = tmp
end function
public static double code(double x, double y, double z, double a) {
double tmp;
if ((y + z) <= -500.0) {
tmp = x + Math.tan((y + z));
} else {
tmp = x + (Math.tan(z) - Math.tan(a));
}
return tmp;
}
def code(x, y, z, a): tmp = 0 if (y + z) <= -500.0: tmp = x + math.tan((y + z)) else: tmp = x + (math.tan(z) - math.tan(a)) return tmp
function code(x, y, z, a) tmp = 0.0 if (Float64(y + z) <= -500.0) tmp = Float64(x + tan(Float64(y + z))); else tmp = Float64(x + Float64(tan(z) - tan(a))); end return tmp end
function tmp_2 = code(x, y, z, a) tmp = 0.0; if ((y + z) <= -500.0) tmp = x + tan((y + z)); else tmp = x + (tan(z) - tan(a)); end tmp_2 = tmp; end
code[x_, y_, z_, a_] := If[LessEqual[N[(y + z), $MachinePrecision], -500.0], N[(x + N[Tan[N[(y + z), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(x + N[(N[Tan[z], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y + z \leq -500:\\
\;\;\;\;x + \tan \left(y + z\right)\\
\mathbf{else}:\\
\;\;\;\;x + \left(\tan z - \tan a\right)\\
\end{array}
\end{array}
if (+.f64 y z) < -500Initial program 69.0%
Taylor expanded in a around 0 51.3%
*-lft-identity51.3%
metadata-eval51.3%
cancel-sign-sub-inv51.3%
*-lft-identity51.3%
metadata-eval51.3%
cancel-sign-sub-inv51.3%
sub-neg51.3%
mul-1-neg51.3%
remove-double-neg51.3%
+-commutative51.3%
sub-neg51.3%
mul-1-neg51.3%
remove-double-neg51.3%
+-commutative51.3%
Simplified51.3%
tan-quot51.3%
+-commutative51.3%
expm1-log1p-u39.3%
expm1-udef39.3%
Applied egg-rr39.3%
expm1-def39.3%
expm1-log1p51.3%
+-commutative51.3%
Simplified51.3%
if -500 < (+.f64 y z) Initial program 86.4%
add-sqr-sqrt44.3%
sqrt-unprod73.7%
pow273.7%
Applied egg-rr73.7%
Taylor expanded in y around 0 62.9%
tan-quot62.9%
sqrt-pow169.2%
metadata-eval69.2%
pow169.2%
*-un-lft-identity69.2%
Applied egg-rr69.2%
*-lft-identity69.2%
Simplified69.2%
Final simplification61.9%
(FPCore (x y z a) :precision binary64 (+ x (- (tan (+ y z)) (tan a))))
double code(double x, double y, double z, double a) {
return x + (tan((y + z)) - tan(a));
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = x + (tan((y + z)) - tan(a))
end function
public static double code(double x, double y, double z, double a) {
return x + (Math.tan((y + z)) - Math.tan(a));
}
def code(x, y, z, a): return x + (math.tan((y + z)) - math.tan(a))
function code(x, y, z, a) return Float64(x + Float64(tan(Float64(y + z)) - tan(a))) end
function tmp = code(x, y, z, a) tmp = x + (tan((y + z)) - tan(a)); end
code[x_, y_, z_, a_] := N[(x + N[(N[Tan[N[(y + z), $MachinePrecision]], $MachinePrecision] - N[Tan[a], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(\tan \left(y + z\right) - \tan a\right)
\end{array}
Initial program 79.3%
Final simplification79.3%
(FPCore (x y z a) :precision binary64 (if (or (<= a -7.2e-22) (not (<= a 2.7e+21))) (- x (tan a)) (+ x (tan (+ y z)))))
double code(double x, double y, double z, double a) {
double tmp;
if ((a <= -7.2e-22) || !(a <= 2.7e+21)) {
tmp = x - tan(a);
} else {
tmp = x + tan((y + z));
}
return tmp;
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
real(8) :: tmp
if ((a <= (-7.2d-22)) .or. (.not. (a <= 2.7d+21))) then
tmp = x - tan(a)
else
tmp = x + tan((y + z))
end if
code = tmp
end function
public static double code(double x, double y, double z, double a) {
double tmp;
if ((a <= -7.2e-22) || !(a <= 2.7e+21)) {
tmp = x - Math.tan(a);
} else {
tmp = x + Math.tan((y + z));
}
return tmp;
}
def code(x, y, z, a): tmp = 0 if (a <= -7.2e-22) or not (a <= 2.7e+21): tmp = x - math.tan(a) else: tmp = x + math.tan((y + z)) return tmp
function code(x, y, z, a) tmp = 0.0 if ((a <= -7.2e-22) || !(a <= 2.7e+21)) tmp = Float64(x - tan(a)); else tmp = Float64(x + tan(Float64(y + z))); end return tmp end
function tmp_2 = code(x, y, z, a) tmp = 0.0; if ((a <= -7.2e-22) || ~((a <= 2.7e+21))) tmp = x - tan(a); else tmp = x + tan((y + z)); end tmp_2 = tmp; end
code[x_, y_, z_, a_] := If[Or[LessEqual[a, -7.2e-22], N[Not[LessEqual[a, 2.7e+21]], $MachinePrecision]], N[(x - N[Tan[a], $MachinePrecision]), $MachinePrecision], N[(x + N[Tan[N[(y + z), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;a \leq -7.2 \cdot 10^{-22} \lor \neg \left(a \leq 2.7 \cdot 10^{+21}\right):\\
\;\;\;\;x - \tan a\\
\mathbf{else}:\\
\;\;\;\;x + \tan \left(y + z\right)\\
\end{array}
\end{array}
if a < -7.1999999999999996e-22 or 2.7e21 < a Initial program 77.0%
associate-+r-76.9%
Simplified76.9%
flip-+76.8%
frac-2neg76.8%
pow276.8%
Applied egg-rr76.8%
neg-sub076.8%
associate--r-76.8%
neg-sub076.8%
+-commutative76.8%
sub-neg76.8%
neg-sub076.8%
associate--r-76.8%
neg-sub076.8%
+-commutative76.8%
sub-neg76.8%
+-commutative76.8%
+-commutative76.8%
Simplified76.8%
Taylor expanded in x around inf 44.2%
unpow244.2%
neg-mul-144.2%
distribute-rgt-neg-in44.2%
Simplified44.2%
Taylor expanded in x around inf 41.0%
neg-mul-141.0%
Simplified41.0%
sub-neg41.0%
Applied egg-rr41.0%
sub-neg41.0%
distribute-rgt-neg-out41.0%
unpow241.0%
*-rgt-identity41.0%
associate-*r/40.9%
unpow240.9%
distribute-rgt-neg-out40.9%
associate-*l*41.0%
rgt-mult-inverse41.0%
*-rgt-identity41.0%
Simplified41.0%
if -7.1999999999999996e-22 < a < 2.7e21Initial program 81.6%
Taylor expanded in a around 0 79.8%
*-lft-identity79.8%
metadata-eval79.8%
cancel-sign-sub-inv79.8%
*-lft-identity79.8%
metadata-eval79.8%
cancel-sign-sub-inv79.8%
sub-neg79.8%
mul-1-neg79.8%
remove-double-neg79.8%
+-commutative79.8%
sub-neg79.8%
mul-1-neg79.8%
remove-double-neg79.8%
+-commutative79.8%
Simplified79.8%
tan-quot79.8%
+-commutative79.8%
expm1-log1p-u67.9%
expm1-udef67.9%
Applied egg-rr67.9%
expm1-def67.9%
expm1-log1p79.8%
+-commutative79.8%
Simplified79.8%
Final simplification60.9%
(FPCore (x y z a) :precision binary64 (+ x (sin y)))
double code(double x, double y, double z, double a) {
return x + sin(y);
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = x + sin(y)
end function
public static double code(double x, double y, double z, double a) {
return x + Math.sin(y);
}
def code(x, y, z, a): return x + math.sin(y)
function code(x, y, z, a) return Float64(x + sin(y)) end
function tmp = code(x, y, z, a) tmp = x + sin(y); end
code[x_, y_, z_, a_] := N[(x + N[Sin[y], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \sin y
\end{array}
Initial program 79.3%
Taylor expanded in a around 0 52.2%
*-lft-identity52.2%
metadata-eval52.2%
cancel-sign-sub-inv52.2%
*-lft-identity52.2%
metadata-eval52.2%
cancel-sign-sub-inv52.2%
sub-neg52.2%
mul-1-neg52.2%
remove-double-neg52.2%
+-commutative52.2%
sub-neg52.2%
mul-1-neg52.2%
remove-double-neg52.2%
+-commutative52.2%
Simplified52.2%
Taylor expanded in y around 0 41.0%
Taylor expanded in z around 0 33.1%
Final simplification33.1%
(FPCore (x y z a) :precision binary64 (- x (tan a)))
double code(double x, double y, double z, double a) {
return x - tan(a);
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = x - tan(a)
end function
public static double code(double x, double y, double z, double a) {
return x - Math.tan(a);
}
def code(x, y, z, a): return x - math.tan(a)
function code(x, y, z, a) return Float64(x - tan(a)) end
function tmp = code(x, y, z, a) tmp = x - tan(a); end
code[x_, y_, z_, a_] := N[(x - N[Tan[a], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x - \tan a
\end{array}
Initial program 79.3%
associate-+r-79.3%
Simplified79.3%
flip-+79.1%
frac-2neg79.1%
pow279.1%
Applied egg-rr79.1%
neg-sub079.1%
associate--r-79.1%
neg-sub079.1%
+-commutative79.1%
sub-neg79.1%
neg-sub079.1%
associate--r-79.1%
neg-sub079.1%
+-commutative79.1%
sub-neg79.1%
+-commutative79.1%
+-commutative79.1%
Simplified79.1%
Taylor expanded in x around inf 45.0%
unpow245.0%
neg-mul-145.0%
distribute-rgt-neg-in45.0%
Simplified45.0%
Taylor expanded in x around inf 42.0%
neg-mul-142.0%
Simplified42.0%
sub-neg42.0%
Applied egg-rr42.0%
sub-neg42.0%
distribute-rgt-neg-out42.0%
unpow242.0%
*-rgt-identity42.0%
associate-*r/41.9%
unpow241.9%
distribute-rgt-neg-out41.9%
associate-*l*42.0%
rgt-mult-inverse42.0%
*-rgt-identity42.0%
Simplified42.0%
Final simplification42.0%
(FPCore (x y z a) :precision binary64 x)
double code(double x, double y, double z, double a) {
return x;
}
real(8) function code(x, y, z, a)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: a
code = x
end function
public static double code(double x, double y, double z, double a) {
return x;
}
def code(x, y, z, a): return x
function code(x, y, z, a) return x end
function tmp = code(x, y, z, a) tmp = x; end
code[x_, y_, z_, a_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 79.3%
Taylor expanded in x around inf 33.0%
Final simplification33.0%
herbie shell --seed 2023203
(FPCore (x y z a)
:name "tan-example (used to crash)"
:precision binary64
:pre (and (and (and (or (== x 0.0) (and (<= 0.5884142 x) (<= x 505.5909))) (or (and (<= -1.796658e+308 y) (<= y -9.425585e-310)) (and (<= 1.284938e-309 y) (<= y 1.751224e+308)))) (or (and (<= -1.776707e+308 z) (<= z -8.599796e-310)) (and (<= 3.293145e-311 z) (<= z 1.725154e+308)))) (or (and (<= -1.796658e+308 a) (<= a -9.425585e-310)) (and (<= 1.284938e-309 a) (<= a 1.751224e+308))))
(+ x (- (tan (+ y z)) (tan a))))