
(FPCore (B x) :precision binary64 (+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))
double code(double B, double x) {
return -(x * (1.0 / tan(B))) + (1.0 / sin(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = -(x * (1.0d0 / tan(b))) + (1.0d0 / sin(b))
end function
public static double code(double B, double x) {
return -(x * (1.0 / Math.tan(B))) + (1.0 / Math.sin(B));
}
def code(B, x): return -(x * (1.0 / math.tan(B))) + (1.0 / math.sin(B))
function code(B, x) return Float64(Float64(-Float64(x * Float64(1.0 / tan(B)))) + Float64(1.0 / sin(B))) end
function tmp = code(B, x) tmp = -(x * (1.0 / tan(B))) + (1.0 / sin(B)); end
code[B_, x_] := N[((-N[(x * N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]) + N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-x \cdot \frac{1}{\tan B}\right) + \frac{1}{\sin B}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (B x) :precision binary64 (+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))
double code(double B, double x) {
return -(x * (1.0 / tan(B))) + (1.0 / sin(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = -(x * (1.0d0 / tan(b))) + (1.0d0 / sin(b))
end function
public static double code(double B, double x) {
return -(x * (1.0 / Math.tan(B))) + (1.0 / Math.sin(B));
}
def code(B, x): return -(x * (1.0 / math.tan(B))) + (1.0 / math.sin(B))
function code(B, x) return Float64(Float64(-Float64(x * Float64(1.0 / tan(B)))) + Float64(1.0 / sin(B))) end
function tmp = code(B, x) tmp = -(x * (1.0 / tan(B))) + (1.0 / sin(B)); end
code[B_, x_] := N[((-N[(x * N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]) + N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-x \cdot \frac{1}{\tan B}\right) + \frac{1}{\sin B}
\end{array}
(FPCore (B x) :precision binary64 (+ (/ (- x) (tan B)) (pow (sin B) -1.0)))
double code(double B, double x) {
return (-x / tan(B)) + pow(sin(B), -1.0);
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (-x / tan(b)) + (sin(b) ** (-1.0d0))
end function
public static double code(double B, double x) {
return (-x / Math.tan(B)) + Math.pow(Math.sin(B), -1.0);
}
def code(B, x): return (-x / math.tan(B)) + math.pow(math.sin(B), -1.0)
function code(B, x) return Float64(Float64(Float64(-x) / tan(B)) + (sin(B) ^ -1.0)) end
function tmp = code(B, x) tmp = (-x / tan(B)) + (sin(B) ^ -1.0); end
code[B_, x_] := N[(N[((-x) / N[Tan[B], $MachinePrecision]), $MachinePrecision] + N[Power[N[Sin[B], $MachinePrecision], -1.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-x}{\tan B} + {\sin B}^{-1}
\end{array}
Initial program 99.7%
lift-*.f64N/A
lift-/.f64N/A
un-div-invN/A
lower-/.f6499.8
Applied rewrites99.8%
Final simplification99.8%
(FPCore (B x)
:precision binary64
(let* ((t_0 (* x (/ -1.0 (tan B)))) (t_1 (+ t_0 (pow (sin B) -1.0))))
(if (or (<= t_1 -10000000.0) (not (<= t_1 50.0)))
(+ t_0 (pow B -1.0))
(/
(/ (- (+ x 1.0) (* (+ x 1.0) (* x x))) (* (+ x 1.0) (+ x 1.0)))
(sin B)))))
double code(double B, double x) {
double t_0 = x * (-1.0 / tan(B));
double t_1 = t_0 + pow(sin(B), -1.0);
double tmp;
if ((t_1 <= -10000000.0) || !(t_1 <= 50.0)) {
tmp = t_0 + pow(B, -1.0);
} else {
tmp = (((x + 1.0) - ((x + 1.0) * (x * x))) / ((x + 1.0) * (x + 1.0))) / sin(B);
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = x * ((-1.0d0) / tan(b))
t_1 = t_0 + (sin(b) ** (-1.0d0))
if ((t_1 <= (-10000000.0d0)) .or. (.not. (t_1 <= 50.0d0))) then
tmp = t_0 + (b ** (-1.0d0))
else
tmp = (((x + 1.0d0) - ((x + 1.0d0) * (x * x))) / ((x + 1.0d0) * (x + 1.0d0))) / sin(b)
end if
code = tmp
end function
public static double code(double B, double x) {
double t_0 = x * (-1.0 / Math.tan(B));
double t_1 = t_0 + Math.pow(Math.sin(B), -1.0);
double tmp;
if ((t_1 <= -10000000.0) || !(t_1 <= 50.0)) {
tmp = t_0 + Math.pow(B, -1.0);
} else {
tmp = (((x + 1.0) - ((x + 1.0) * (x * x))) / ((x + 1.0) * (x + 1.0))) / Math.sin(B);
}
return tmp;
}
def code(B, x): t_0 = x * (-1.0 / math.tan(B)) t_1 = t_0 + math.pow(math.sin(B), -1.0) tmp = 0 if (t_1 <= -10000000.0) or not (t_1 <= 50.0): tmp = t_0 + math.pow(B, -1.0) else: tmp = (((x + 1.0) - ((x + 1.0) * (x * x))) / ((x + 1.0) * (x + 1.0))) / math.sin(B) return tmp
function code(B, x) t_0 = Float64(x * Float64(-1.0 / tan(B))) t_1 = Float64(t_0 + (sin(B) ^ -1.0)) tmp = 0.0 if ((t_1 <= -10000000.0) || !(t_1 <= 50.0)) tmp = Float64(t_0 + (B ^ -1.0)); else tmp = Float64(Float64(Float64(Float64(x + 1.0) - Float64(Float64(x + 1.0) * Float64(x * x))) / Float64(Float64(x + 1.0) * Float64(x + 1.0))) / sin(B)); end return tmp end
function tmp_2 = code(B, x) t_0 = x * (-1.0 / tan(B)); t_1 = t_0 + (sin(B) ^ -1.0); tmp = 0.0; if ((t_1 <= -10000000.0) || ~((t_1 <= 50.0))) tmp = t_0 + (B ^ -1.0); else tmp = (((x + 1.0) - ((x + 1.0) * (x * x))) / ((x + 1.0) * (x + 1.0))) / sin(B); end tmp_2 = tmp; end
code[B_, x_] := Block[{t$95$0 = N[(x * N[(-1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 + N[Power[N[Sin[B], $MachinePrecision], -1.0], $MachinePrecision]), $MachinePrecision]}, If[Or[LessEqual[t$95$1, -10000000.0], N[Not[LessEqual[t$95$1, 50.0]], $MachinePrecision]], N[(t$95$0 + N[Power[B, -1.0], $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(x + 1.0), $MachinePrecision] - N[(N[(x + 1.0), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(x + 1.0), $MachinePrecision] * N[(x + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Sin[B], $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot \frac{-1}{\tan B}\\
t_1 := t\_0 + {\sin B}^{-1}\\
\mathbf{if}\;t\_1 \leq -10000000 \lor \neg \left(t\_1 \leq 50\right):\\
\;\;\;\;t\_0 + {B}^{-1}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\left(x + 1\right) - \left(x + 1\right) \cdot \left(x \cdot x\right)}{\left(x + 1\right) \cdot \left(x + 1\right)}}{\sin B}\\
\end{array}
\end{array}
if (+.f64 (neg.f64 (*.f64 x (/.f64 #s(literal 1 binary64) (tan.f64 B)))) (/.f64 #s(literal 1 binary64) (sin.f64 B))) < -1e7 or 50 < (+.f64 (neg.f64 (*.f64 x (/.f64 #s(literal 1 binary64) (tan.f64 B)))) (/.f64 #s(literal 1 binary64) (sin.f64 B))) Initial program 99.7%
Taylor expanded in B around 0
lower-/.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6477.9
Applied rewrites77.9%
Taylor expanded in B around 0
Applied rewrites98.9%
if -1e7 < (+.f64 (neg.f64 (*.f64 x (/.f64 #s(literal 1 binary64) (tan.f64 B)))) (/.f64 #s(literal 1 binary64) (sin.f64 B))) < 50Initial program 99.5%
lift-*.f64N/A
lift-/.f64N/A
un-div-invN/A
lower-/.f6499.5
Applied rewrites99.5%
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
lift-/.f64N/A
div-invN/A
lift-/.f64N/A
lift-*.f64N/A
unsub-negN/A
lift-/.f64N/A
lift-*.f64N/A
lift-/.f64N/A
lift-tan.f64N/A
tan-quotN/A
lift-sin.f64N/A
lift-cos.f64N/A
clear-numN/A
associate-/l*N/A
lift-*.f64N/A
sub-divN/A
lower-/.f64N/A
Applied rewrites99.5%
Taylor expanded in B around 0
lower--.f6498.3
Applied rewrites98.3%
Applied rewrites98.3%
Final simplification98.7%
(FPCore (B x)
:precision binary64
(let* ((t_0 (* x (/ -1.0 (tan B)))) (t_1 (+ t_0 (pow (sin B) -1.0))))
(if (or (<= t_1 -10000000.0) (not (<= t_1 50.0)))
(+ t_0 (pow B -1.0))
(/ (- 1.0 x) (sin B)))))
double code(double B, double x) {
double t_0 = x * (-1.0 / tan(B));
double t_1 = t_0 + pow(sin(B), -1.0);
double tmp;
if ((t_1 <= -10000000.0) || !(t_1 <= 50.0)) {
tmp = t_0 + pow(B, -1.0);
} else {
tmp = (1.0 - x) / sin(B);
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = x * ((-1.0d0) / tan(b))
t_1 = t_0 + (sin(b) ** (-1.0d0))
if ((t_1 <= (-10000000.0d0)) .or. (.not. (t_1 <= 50.0d0))) then
tmp = t_0 + (b ** (-1.0d0))
else
tmp = (1.0d0 - x) / sin(b)
end if
code = tmp
end function
public static double code(double B, double x) {
double t_0 = x * (-1.0 / Math.tan(B));
double t_1 = t_0 + Math.pow(Math.sin(B), -1.0);
double tmp;
if ((t_1 <= -10000000.0) || !(t_1 <= 50.0)) {
tmp = t_0 + Math.pow(B, -1.0);
} else {
tmp = (1.0 - x) / Math.sin(B);
}
return tmp;
}
def code(B, x): t_0 = x * (-1.0 / math.tan(B)) t_1 = t_0 + math.pow(math.sin(B), -1.0) tmp = 0 if (t_1 <= -10000000.0) or not (t_1 <= 50.0): tmp = t_0 + math.pow(B, -1.0) else: tmp = (1.0 - x) / math.sin(B) return tmp
function code(B, x) t_0 = Float64(x * Float64(-1.0 / tan(B))) t_1 = Float64(t_0 + (sin(B) ^ -1.0)) tmp = 0.0 if ((t_1 <= -10000000.0) || !(t_1 <= 50.0)) tmp = Float64(t_0 + (B ^ -1.0)); else tmp = Float64(Float64(1.0 - x) / sin(B)); end return tmp end
function tmp_2 = code(B, x) t_0 = x * (-1.0 / tan(B)); t_1 = t_0 + (sin(B) ^ -1.0); tmp = 0.0; if ((t_1 <= -10000000.0) || ~((t_1 <= 50.0))) tmp = t_0 + (B ^ -1.0); else tmp = (1.0 - x) / sin(B); end tmp_2 = tmp; end
code[B_, x_] := Block[{t$95$0 = N[(x * N[(-1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 + N[Power[N[Sin[B], $MachinePrecision], -1.0], $MachinePrecision]), $MachinePrecision]}, If[Or[LessEqual[t$95$1, -10000000.0], N[Not[LessEqual[t$95$1, 50.0]], $MachinePrecision]], N[(t$95$0 + N[Power[B, -1.0], $MachinePrecision]), $MachinePrecision], N[(N[(1.0 - x), $MachinePrecision] / N[Sin[B], $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot \frac{-1}{\tan B}\\
t_1 := t\_0 + {\sin B}^{-1}\\
\mathbf{if}\;t\_1 \leq -10000000 \lor \neg \left(t\_1 \leq 50\right):\\
\;\;\;\;t\_0 + {B}^{-1}\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - x}{\sin B}\\
\end{array}
\end{array}
if (+.f64 (neg.f64 (*.f64 x (/.f64 #s(literal 1 binary64) (tan.f64 B)))) (/.f64 #s(literal 1 binary64) (sin.f64 B))) < -1e7 or 50 < (+.f64 (neg.f64 (*.f64 x (/.f64 #s(literal 1 binary64) (tan.f64 B)))) (/.f64 #s(literal 1 binary64) (sin.f64 B))) Initial program 99.7%
Taylor expanded in B around 0
lower-/.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6477.9
Applied rewrites77.9%
Taylor expanded in B around 0
Applied rewrites98.9%
if -1e7 < (+.f64 (neg.f64 (*.f64 x (/.f64 #s(literal 1 binary64) (tan.f64 B)))) (/.f64 #s(literal 1 binary64) (sin.f64 B))) < 50Initial program 99.5%
lift-*.f64N/A
lift-/.f64N/A
un-div-invN/A
lower-/.f6499.5
Applied rewrites99.5%
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
lift-/.f64N/A
div-invN/A
lift-/.f64N/A
lift-*.f64N/A
unsub-negN/A
lift-/.f64N/A
lift-*.f64N/A
lift-/.f64N/A
lift-tan.f64N/A
tan-quotN/A
lift-sin.f64N/A
lift-cos.f64N/A
clear-numN/A
associate-/l*N/A
lift-*.f64N/A
sub-divN/A
lower-/.f64N/A
Applied rewrites99.5%
Taylor expanded in B around 0
lower--.f6498.3
Applied rewrites98.3%
Final simplification98.7%
(FPCore (B x)
:precision binary64
(let* ((t_0 (+ (* x (/ -1.0 (tan B))) (pow (sin B) -1.0))))
(if (or (<= t_0 -10000000.0) (not (<= t_0 50.0)))
(+ (/ (- x) (tan B)) (pow B -1.0))
(/
(/ (- (+ x 1.0) (* (+ x 1.0) (* x x))) (* (+ x 1.0) (+ x 1.0)))
(sin B)))))
double code(double B, double x) {
double t_0 = (x * (-1.0 / tan(B))) + pow(sin(B), -1.0);
double tmp;
if ((t_0 <= -10000000.0) || !(t_0 <= 50.0)) {
tmp = (-x / tan(B)) + pow(B, -1.0);
} else {
tmp = (((x + 1.0) - ((x + 1.0) * (x * x))) / ((x + 1.0) * (x + 1.0))) / sin(B);
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = (x * ((-1.0d0) / tan(b))) + (sin(b) ** (-1.0d0))
if ((t_0 <= (-10000000.0d0)) .or. (.not. (t_0 <= 50.0d0))) then
tmp = (-x / tan(b)) + (b ** (-1.0d0))
else
tmp = (((x + 1.0d0) - ((x + 1.0d0) * (x * x))) / ((x + 1.0d0) * (x + 1.0d0))) / sin(b)
end if
code = tmp
end function
public static double code(double B, double x) {
double t_0 = (x * (-1.0 / Math.tan(B))) + Math.pow(Math.sin(B), -1.0);
double tmp;
if ((t_0 <= -10000000.0) || !(t_0 <= 50.0)) {
tmp = (-x / Math.tan(B)) + Math.pow(B, -1.0);
} else {
tmp = (((x + 1.0) - ((x + 1.0) * (x * x))) / ((x + 1.0) * (x + 1.0))) / Math.sin(B);
}
return tmp;
}
def code(B, x): t_0 = (x * (-1.0 / math.tan(B))) + math.pow(math.sin(B), -1.0) tmp = 0 if (t_0 <= -10000000.0) or not (t_0 <= 50.0): tmp = (-x / math.tan(B)) + math.pow(B, -1.0) else: tmp = (((x + 1.0) - ((x + 1.0) * (x * x))) / ((x + 1.0) * (x + 1.0))) / math.sin(B) return tmp
function code(B, x) t_0 = Float64(Float64(x * Float64(-1.0 / tan(B))) + (sin(B) ^ -1.0)) tmp = 0.0 if ((t_0 <= -10000000.0) || !(t_0 <= 50.0)) tmp = Float64(Float64(Float64(-x) / tan(B)) + (B ^ -1.0)); else tmp = Float64(Float64(Float64(Float64(x + 1.0) - Float64(Float64(x + 1.0) * Float64(x * x))) / Float64(Float64(x + 1.0) * Float64(x + 1.0))) / sin(B)); end return tmp end
function tmp_2 = code(B, x) t_0 = (x * (-1.0 / tan(B))) + (sin(B) ^ -1.0); tmp = 0.0; if ((t_0 <= -10000000.0) || ~((t_0 <= 50.0))) tmp = (-x / tan(B)) + (B ^ -1.0); else tmp = (((x + 1.0) - ((x + 1.0) * (x * x))) / ((x + 1.0) * (x + 1.0))) / sin(B); end tmp_2 = tmp; end
code[B_, x_] := Block[{t$95$0 = N[(N[(x * N[(-1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[Power[N[Sin[B], $MachinePrecision], -1.0], $MachinePrecision]), $MachinePrecision]}, If[Or[LessEqual[t$95$0, -10000000.0], N[Not[LessEqual[t$95$0, 50.0]], $MachinePrecision]], N[(N[((-x) / N[Tan[B], $MachinePrecision]), $MachinePrecision] + N[Power[B, -1.0], $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(x + 1.0), $MachinePrecision] - N[(N[(x + 1.0), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(x + 1.0), $MachinePrecision] * N[(x + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Sin[B], $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot \frac{-1}{\tan B} + {\sin B}^{-1}\\
\mathbf{if}\;t\_0 \leq -10000000 \lor \neg \left(t\_0 \leq 50\right):\\
\;\;\;\;\frac{-x}{\tan B} + {B}^{-1}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\left(x + 1\right) - \left(x + 1\right) \cdot \left(x \cdot x\right)}{\left(x + 1\right) \cdot \left(x + 1\right)}}{\sin B}\\
\end{array}
\end{array}
if (+.f64 (neg.f64 (*.f64 x (/.f64 #s(literal 1 binary64) (tan.f64 B)))) (/.f64 #s(literal 1 binary64) (sin.f64 B))) < -1e7 or 50 < (+.f64 (neg.f64 (*.f64 x (/.f64 #s(literal 1 binary64) (tan.f64 B)))) (/.f64 #s(literal 1 binary64) (sin.f64 B))) Initial program 99.7%
lift-*.f64N/A
lift-/.f64N/A
un-div-invN/A
lower-/.f6499.9
Applied rewrites99.9%
lift-/.f64N/A
inv-powN/A
metadata-evalN/A
pow-to-expN/A
lower-exp.f64N/A
rem-log-expN/A
pow-to-expN/A
metadata-evalN/A
inv-powN/A
log-recN/A
lower-neg.f64N/A
lower-log.f6445.7
Applied rewrites45.7%
Taylor expanded in B around 0
neg-mul-1N/A
*-commutativeN/A
exp-to-powN/A
lower-pow.f6499.0
Applied rewrites99.0%
if -1e7 < (+.f64 (neg.f64 (*.f64 x (/.f64 #s(literal 1 binary64) (tan.f64 B)))) (/.f64 #s(literal 1 binary64) (sin.f64 B))) < 50Initial program 99.5%
lift-*.f64N/A
lift-/.f64N/A
un-div-invN/A
lower-/.f6499.5
Applied rewrites99.5%
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
lift-/.f64N/A
div-invN/A
lift-/.f64N/A
lift-*.f64N/A
unsub-negN/A
lift-/.f64N/A
lift-*.f64N/A
lift-/.f64N/A
lift-tan.f64N/A
tan-quotN/A
lift-sin.f64N/A
lift-cos.f64N/A
clear-numN/A
associate-/l*N/A
lift-*.f64N/A
sub-divN/A
lower-/.f64N/A
Applied rewrites99.5%
Taylor expanded in B around 0
lower--.f6498.3
Applied rewrites98.3%
Applied rewrites98.3%
Final simplification98.9%
(FPCore (B x) :precision binary64 (/ (- 1.0 (* (cos B) x)) (sin B)))
double code(double B, double x) {
return (1.0 - (cos(B) * x)) / sin(B);
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 - (cos(b) * x)) / sin(b)
end function
public static double code(double B, double x) {
return (1.0 - (Math.cos(B) * x)) / Math.sin(B);
}
def code(B, x): return (1.0 - (math.cos(B) * x)) / math.sin(B)
function code(B, x) return Float64(Float64(1.0 - Float64(cos(B) * x)) / sin(B)) end
function tmp = code(B, x) tmp = (1.0 - (cos(B) * x)) / sin(B); end
code[B_, x_] := N[(N[(1.0 - N[(N[Cos[B], $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] / N[Sin[B], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \cos B \cdot x}{\sin B}
\end{array}
Initial program 99.7%
lift-*.f64N/A
lift-/.f64N/A
un-div-invN/A
lower-/.f6499.8
Applied rewrites99.8%
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
lift-/.f64N/A
div-invN/A
lift-/.f64N/A
lift-*.f64N/A
unsub-negN/A
lift-/.f64N/A
lift-*.f64N/A
lift-/.f64N/A
lift-tan.f64N/A
tan-quotN/A
lift-sin.f64N/A
lift-cos.f64N/A
clear-numN/A
associate-/l*N/A
lift-*.f64N/A
sub-divN/A
lower-/.f64N/A
Applied rewrites99.7%
(FPCore (B x) :precision binary64 (if (or (<= x -8.6e+85) (not (<= x 8.5e+54))) (+ (* x (/ -1.0 (tan B))) (* 0.16666666666666666 B)) (/ (- 1.0 x) (sin B))))
double code(double B, double x) {
double tmp;
if ((x <= -8.6e+85) || !(x <= 8.5e+54)) {
tmp = (x * (-1.0 / tan(B))) + (0.16666666666666666 * B);
} else {
tmp = (1.0 - x) / sin(B);
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: tmp
if ((x <= (-8.6d+85)) .or. (.not. (x <= 8.5d+54))) then
tmp = (x * ((-1.0d0) / tan(b))) + (0.16666666666666666d0 * b)
else
tmp = (1.0d0 - x) / sin(b)
end if
code = tmp
end function
public static double code(double B, double x) {
double tmp;
if ((x <= -8.6e+85) || !(x <= 8.5e+54)) {
tmp = (x * (-1.0 / Math.tan(B))) + (0.16666666666666666 * B);
} else {
tmp = (1.0 - x) / Math.sin(B);
}
return tmp;
}
def code(B, x): tmp = 0 if (x <= -8.6e+85) or not (x <= 8.5e+54): tmp = (x * (-1.0 / math.tan(B))) + (0.16666666666666666 * B) else: tmp = (1.0 - x) / math.sin(B) return tmp
function code(B, x) tmp = 0.0 if ((x <= -8.6e+85) || !(x <= 8.5e+54)) tmp = Float64(Float64(x * Float64(-1.0 / tan(B))) + Float64(0.16666666666666666 * B)); else tmp = Float64(Float64(1.0 - x) / sin(B)); end return tmp end
function tmp_2 = code(B, x) tmp = 0.0; if ((x <= -8.6e+85) || ~((x <= 8.5e+54))) tmp = (x * (-1.0 / tan(B))) + (0.16666666666666666 * B); else tmp = (1.0 - x) / sin(B); end tmp_2 = tmp; end
code[B_, x_] := If[Or[LessEqual[x, -8.6e+85], N[Not[LessEqual[x, 8.5e+54]], $MachinePrecision]], N[(N[(x * N[(-1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.16666666666666666 * B), $MachinePrecision]), $MachinePrecision], N[(N[(1.0 - x), $MachinePrecision] / N[Sin[B], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -8.6 \cdot 10^{+85} \lor \neg \left(x \leq 8.5 \cdot 10^{+54}\right):\\
\;\;\;\;x \cdot \frac{-1}{\tan B} + 0.16666666666666666 \cdot B\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - x}{\sin B}\\
\end{array}
\end{array}
if x < -8.5999999999999998e85 or 8.4999999999999995e54 < x Initial program 99.6%
Taylor expanded in B around 0
lower-/.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6470.1
Applied rewrites70.1%
Taylor expanded in B around inf
Applied rewrites78.3%
if -8.5999999999999998e85 < x < 8.4999999999999995e54Initial program 99.7%
lift-*.f64N/A
lift-/.f64N/A
un-div-invN/A
lower-/.f6499.8
Applied rewrites99.8%
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
lift-/.f64N/A
div-invN/A
lift-/.f64N/A
lift-*.f64N/A
unsub-negN/A
lift-/.f64N/A
lift-*.f64N/A
lift-/.f64N/A
lift-tan.f64N/A
tan-quotN/A
lift-sin.f64N/A
lift-cos.f64N/A
clear-numN/A
associate-/l*N/A
lift-*.f64N/A
sub-divN/A
lower-/.f64N/A
Applied rewrites99.7%
Taylor expanded in B around 0
lower--.f6492.4
Applied rewrites92.4%
Final simplification86.5%
(FPCore (B x) :precision binary64 (/ (- 1.0 x) (sin B)))
double code(double B, double x) {
return (1.0 - x) / sin(B);
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 - x) / sin(b)
end function
public static double code(double B, double x) {
return (1.0 - x) / Math.sin(B);
}
def code(B, x): return (1.0 - x) / math.sin(B)
function code(B, x) return Float64(Float64(1.0 - x) / sin(B)) end
function tmp = code(B, x) tmp = (1.0 - x) / sin(B); end
code[B_, x_] := N[(N[(1.0 - x), $MachinePrecision] / N[Sin[B], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - x}{\sin B}
\end{array}
Initial program 99.7%
lift-*.f64N/A
lift-/.f64N/A
un-div-invN/A
lower-/.f6499.8
Applied rewrites99.8%
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
lift-/.f64N/A
div-invN/A
lift-/.f64N/A
lift-*.f64N/A
unsub-negN/A
lift-/.f64N/A
lift-*.f64N/A
lift-/.f64N/A
lift-tan.f64N/A
tan-quotN/A
lift-sin.f64N/A
lift-cos.f64N/A
clear-numN/A
associate-/l*N/A
lift-*.f64N/A
sub-divN/A
lower-/.f64N/A
Applied rewrites99.7%
Taylor expanded in B around 0
lower--.f6476.1
Applied rewrites76.1%
(FPCore (B x)
:precision binary64
(/
(fma
(fma
(fma
(* (fma x 0.0021164021164021165 0.00205026455026455) B)
B
(fma 0.022222222222222223 x 0.019444444444444445))
(* B B)
(fma 0.3333333333333333 x 0.16666666666666666))
(* B B)
(- 1.0 x))
B))
double code(double B, double x) {
return fma(fma(fma((fma(x, 0.0021164021164021165, 0.00205026455026455) * B), B, fma(0.022222222222222223, x, 0.019444444444444445)), (B * B), fma(0.3333333333333333, x, 0.16666666666666666)), (B * B), (1.0 - x)) / B;
}
function code(B, x) return Float64(fma(fma(fma(Float64(fma(x, 0.0021164021164021165, 0.00205026455026455) * B), B, fma(0.022222222222222223, x, 0.019444444444444445)), Float64(B * B), fma(0.3333333333333333, x, 0.16666666666666666)), Float64(B * B), Float64(1.0 - x)) / B) end
code[B_, x_] := N[(N[(N[(N[(N[(N[(x * 0.0021164021164021165 + 0.00205026455026455), $MachinePrecision] * B), $MachinePrecision] * B + N[(0.022222222222222223 * x + 0.019444444444444445), $MachinePrecision]), $MachinePrecision] * N[(B * B), $MachinePrecision] + N[(0.3333333333333333 * x + 0.16666666666666666), $MachinePrecision]), $MachinePrecision] * N[(B * B), $MachinePrecision] + N[(1.0 - x), $MachinePrecision]), $MachinePrecision] / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(x, 0.0021164021164021165, 0.00205026455026455\right) \cdot B, B, \mathsf{fma}\left(0.022222222222222223, x, 0.019444444444444445\right)\right), B \cdot B, \mathsf{fma}\left(0.3333333333333333, x, 0.16666666666666666\right)\right), B \cdot B, 1 - x\right)}{B}
\end{array}
Initial program 99.7%
lift-*.f64N/A
lift-/.f64N/A
un-div-invN/A
lower-/.f6499.8
Applied rewrites99.8%
Taylor expanded in B around 0
Applied rewrites51.4%
Final simplification51.4%
(FPCore (B x) :precision binary64 (/ (fma x (fma (* 0.3333333333333333 B) B -1.0) 1.0) B))
double code(double B, double x) {
return fma(x, fma((0.3333333333333333 * B), B, -1.0), 1.0) / B;
}
function code(B, x) return Float64(fma(x, fma(Float64(0.3333333333333333 * B), B, -1.0), 1.0) / B) end
code[B_, x_] := N[(N[(x * N[(N[(0.3333333333333333 * B), $MachinePrecision] * B + -1.0), $MachinePrecision] + 1.0), $MachinePrecision] / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(x, \mathsf{fma}\left(0.3333333333333333 \cdot B, B, -1\right), 1\right)}{B}
\end{array}
Initial program 99.7%
lift-*.f64N/A
lift-/.f64N/A
un-div-invN/A
lower-/.f6499.8
Applied rewrites99.8%
lift-/.f64N/A
inv-powN/A
metadata-evalN/A
pow-to-expN/A
lower-exp.f64N/A
rem-log-expN/A
pow-to-expN/A
metadata-evalN/A
inv-powN/A
log-recN/A
lower-neg.f64N/A
lower-log.f6448.0
Applied rewrites48.0%
Taylor expanded in B around 0
lower-/.f64N/A
Applied rewrites51.3%
Final simplification51.3%
(FPCore (B x) :precision binary64 (/ (- 1.0 x) B))
double code(double B, double x) {
return (1.0 - x) / B;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 - x) / b
end function
public static double code(double B, double x) {
return (1.0 - x) / B;
}
def code(B, x): return (1.0 - x) / B
function code(B, x) return Float64(Float64(1.0 - x) / B) end
function tmp = code(B, x) tmp = (1.0 - x) / B; end
code[B_, x_] := N[(N[(1.0 - x), $MachinePrecision] / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - x}{B}
\end{array}
Initial program 99.7%
Taylor expanded in B around 0
lower-/.f64N/A
lower--.f6450.6
Applied rewrites50.6%
(FPCore (B x) :precision binary64 (/ (- x) B))
double code(double B, double x) {
return -x / B;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = -x / b
end function
public static double code(double B, double x) {
return -x / B;
}
def code(B, x): return -x / B
function code(B, x) return Float64(Float64(-x) / B) end
function tmp = code(B, x) tmp = -x / B; end
code[B_, x_] := N[((-x) / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{-x}{B}
\end{array}
Initial program 99.7%
Taylor expanded in B around 0
lower-/.f64N/A
lower--.f6450.6
Applied rewrites50.6%
Taylor expanded in x around inf
Applied rewrites26.8%
herbie shell --seed 2024315
(FPCore (B x)
:name "VandenBroeck and Keller, Equation (24)"
:precision binary64
(+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))