
(FPCore (B x) :precision binary64 (+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))
double code(double B, double x) {
return -(x * (1.0 / tan(B))) + (1.0 / sin(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = -(x * (1.0d0 / tan(b))) + (1.0d0 / sin(b))
end function
public static double code(double B, double x) {
return -(x * (1.0 / Math.tan(B))) + (1.0 / Math.sin(B));
}
def code(B, x): return -(x * (1.0 / math.tan(B))) + (1.0 / math.sin(B))
function code(B, x) return Float64(Float64(-Float64(x * Float64(1.0 / tan(B)))) + Float64(1.0 / sin(B))) end
function tmp = code(B, x) tmp = -(x * (1.0 / tan(B))) + (1.0 / sin(B)); end
code[B_, x_] := N[((-N[(x * N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]) + N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-x \cdot \frac{1}{\tan B}\right) + \frac{1}{\sin B}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (B x) :precision binary64 (+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))
double code(double B, double x) {
return -(x * (1.0 / tan(B))) + (1.0 / sin(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = -(x * (1.0d0 / tan(b))) + (1.0d0 / sin(b))
end function
public static double code(double B, double x) {
return -(x * (1.0 / Math.tan(B))) + (1.0 / Math.sin(B));
}
def code(B, x): return -(x * (1.0 / math.tan(B))) + (1.0 / math.sin(B))
function code(B, x) return Float64(Float64(-Float64(x * Float64(1.0 / tan(B)))) + Float64(1.0 / sin(B))) end
function tmp = code(B, x) tmp = -(x * (1.0 / tan(B))) + (1.0 / sin(B)); end
code[B_, x_] := N[((-N[(x * N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]) + N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-x \cdot \frac{1}{\tan B}\right) + \frac{1}{\sin B}
\end{array}
(FPCore (B x) :precision binary64 (- (/ 1.0 (sin B)) (/ x (tan B))))
double code(double B, double x) {
return (1.0 / sin(B)) - (x / tan(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 / sin(b)) - (x / tan(b))
end function
public static double code(double B, double x) {
return (1.0 / Math.sin(B)) - (x / Math.tan(B));
}
def code(B, x): return (1.0 / math.sin(B)) - (x / math.tan(B))
function code(B, x) return Float64(Float64(1.0 / sin(B)) - Float64(x / tan(B))) end
function tmp = code(B, x) tmp = (1.0 / sin(B)) - (x / tan(B)); end
code[B_, x_] := N[(N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision] - N[(x / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\sin B} - \frac{x}{\tan B}
\end{array}
Initial program 99.7%
distribute-lft-neg-in99.7%
+-commutative99.7%
cancel-sign-sub-inv99.7%
*-commutative99.7%
*-commutative99.7%
associate-*r/99.8%
*-rgt-identity99.8%
Simplified99.8%
Final simplification99.8%
(FPCore (B x) :precision binary64 (if (or (<= x -1.2) (not (<= x 1.25))) (/ (- 1.0 x) (tan B)) (- (/ 1.0 (sin B)) (/ x B))))
double code(double B, double x) {
double tmp;
if ((x <= -1.2) || !(x <= 1.25)) {
tmp = (1.0 - x) / tan(B);
} else {
tmp = (1.0 / sin(B)) - (x / B);
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: tmp
if ((x <= (-1.2d0)) .or. (.not. (x <= 1.25d0))) then
tmp = (1.0d0 - x) / tan(b)
else
tmp = (1.0d0 / sin(b)) - (x / b)
end if
code = tmp
end function
public static double code(double B, double x) {
double tmp;
if ((x <= -1.2) || !(x <= 1.25)) {
tmp = (1.0 - x) / Math.tan(B);
} else {
tmp = (1.0 / Math.sin(B)) - (x / B);
}
return tmp;
}
def code(B, x): tmp = 0 if (x <= -1.2) or not (x <= 1.25): tmp = (1.0 - x) / math.tan(B) else: tmp = (1.0 / math.sin(B)) - (x / B) return tmp
function code(B, x) tmp = 0.0 if ((x <= -1.2) || !(x <= 1.25)) tmp = Float64(Float64(1.0 - x) / tan(B)); else tmp = Float64(Float64(1.0 / sin(B)) - Float64(x / B)); end return tmp end
function tmp_2 = code(B, x) tmp = 0.0; if ((x <= -1.2) || ~((x <= 1.25))) tmp = (1.0 - x) / tan(B); else tmp = (1.0 / sin(B)) - (x / B); end tmp_2 = tmp; end
code[B_, x_] := If[Or[LessEqual[x, -1.2], N[Not[LessEqual[x, 1.25]], $MachinePrecision]], N[(N[(1.0 - x), $MachinePrecision] / N[Tan[B], $MachinePrecision]), $MachinePrecision], N[(N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision] - N[(x / B), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.2 \lor \neg \left(x \leq 1.25\right):\\
\;\;\;\;\frac{1 - x}{\tan B}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\sin B} - \frac{x}{B}\\
\end{array}
\end{array}
if x < -1.19999999999999996 or 1.25 < x Initial program 99.7%
+-commutative99.7%
div-inv99.9%
sub-neg99.9%
frac-sub93.8%
associate-/r*99.8%
*-un-lft-identity99.8%
*-commutative99.8%
Applied egg-rr99.8%
Taylor expanded in B around 0 99.2%
if -1.19999999999999996 < x < 1.25Initial program 99.8%
Taylor expanded in B around 0 98.0%
Final simplification98.6%
(FPCore (B x) :precision binary64 (if (or (<= x -0.0065) (not (<= x 3.4e-9))) (/ (- 1.0 x) (tan B)) (/ 1.0 (sin B))))
double code(double B, double x) {
double tmp;
if ((x <= -0.0065) || !(x <= 3.4e-9)) {
tmp = (1.0 - x) / tan(B);
} else {
tmp = 1.0 / sin(B);
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: tmp
if ((x <= (-0.0065d0)) .or. (.not. (x <= 3.4d-9))) then
tmp = (1.0d0 - x) / tan(b)
else
tmp = 1.0d0 / sin(b)
end if
code = tmp
end function
public static double code(double B, double x) {
double tmp;
if ((x <= -0.0065) || !(x <= 3.4e-9)) {
tmp = (1.0 - x) / Math.tan(B);
} else {
tmp = 1.0 / Math.sin(B);
}
return tmp;
}
def code(B, x): tmp = 0 if (x <= -0.0065) or not (x <= 3.4e-9): tmp = (1.0 - x) / math.tan(B) else: tmp = 1.0 / math.sin(B) return tmp
function code(B, x) tmp = 0.0 if ((x <= -0.0065) || !(x <= 3.4e-9)) tmp = Float64(Float64(1.0 - x) / tan(B)); else tmp = Float64(1.0 / sin(B)); end return tmp end
function tmp_2 = code(B, x) tmp = 0.0; if ((x <= -0.0065) || ~((x <= 3.4e-9))) tmp = (1.0 - x) / tan(B); else tmp = 1.0 / sin(B); end tmp_2 = tmp; end
code[B_, x_] := If[Or[LessEqual[x, -0.0065], N[Not[LessEqual[x, 3.4e-9]], $MachinePrecision]], N[(N[(1.0 - x), $MachinePrecision] / N[Tan[B], $MachinePrecision]), $MachinePrecision], N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.0065 \lor \neg \left(x \leq 3.4 \cdot 10^{-9}\right):\\
\;\;\;\;\frac{1 - x}{\tan B}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\sin B}\\
\end{array}
\end{array}
if x < -0.0064999999999999997 or 3.3999999999999998e-9 < x Initial program 99.7%
+-commutative99.7%
div-inv99.9%
sub-neg99.9%
frac-sub93.3%
associate-/r*99.8%
*-un-lft-identity99.8%
*-commutative99.8%
Applied egg-rr99.8%
Taylor expanded in B around 0 97.9%
if -0.0064999999999999997 < x < 3.3999999999999998e-9Initial program 99.8%
Taylor expanded in x around 0 99.2%
Final simplification98.5%
(FPCore (B x) :precision binary64 (if (or (<= x -1.6) (not (<= x 1.0))) (/ (- x) (tan B)) (/ 1.0 (sin B))))
double code(double B, double x) {
double tmp;
if ((x <= -1.6) || !(x <= 1.0)) {
tmp = -x / tan(B);
} else {
tmp = 1.0 / sin(B);
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: tmp
if ((x <= (-1.6d0)) .or. (.not. (x <= 1.0d0))) then
tmp = -x / tan(b)
else
tmp = 1.0d0 / sin(b)
end if
code = tmp
end function
public static double code(double B, double x) {
double tmp;
if ((x <= -1.6) || !(x <= 1.0)) {
tmp = -x / Math.tan(B);
} else {
tmp = 1.0 / Math.sin(B);
}
return tmp;
}
def code(B, x): tmp = 0 if (x <= -1.6) or not (x <= 1.0): tmp = -x / math.tan(B) else: tmp = 1.0 / math.sin(B) return tmp
function code(B, x) tmp = 0.0 if ((x <= -1.6) || !(x <= 1.0)) tmp = Float64(Float64(-x) / tan(B)); else tmp = Float64(1.0 / sin(B)); end return tmp end
function tmp_2 = code(B, x) tmp = 0.0; if ((x <= -1.6) || ~((x <= 1.0))) tmp = -x / tan(B); else tmp = 1.0 / sin(B); end tmp_2 = tmp; end
code[B_, x_] := If[Or[LessEqual[x, -1.6], N[Not[LessEqual[x, 1.0]], $MachinePrecision]], N[((-x) / N[Tan[B], $MachinePrecision]), $MachinePrecision], N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.6 \lor \neg \left(x \leq 1\right):\\
\;\;\;\;\frac{-x}{\tan B}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\sin B}\\
\end{array}
\end{array}
if x < -1.6000000000000001 or 1 < x Initial program 99.7%
+-commutative99.7%
div-inv99.9%
sub-neg99.9%
frac-sub93.8%
associate-/r*99.8%
*-un-lft-identity99.8%
*-commutative99.8%
Applied egg-rr99.8%
Taylor expanded in x around inf 98.9%
neg-mul-198.9%
Simplified98.9%
if -1.6000000000000001 < x < 1Initial program 99.8%
Taylor expanded in x around 0 96.4%
Final simplification97.6%
(FPCore (B x) :precision binary64 (if (<= B 0.008) (+ (* x (* B 0.3333333333333333)) (/ (- 1.0 x) B)) (/ 1.0 (sin B))))
double code(double B, double x) {
double tmp;
if (B <= 0.008) {
tmp = (x * (B * 0.3333333333333333)) + ((1.0 - x) / B);
} else {
tmp = 1.0 / sin(B);
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: tmp
if (b <= 0.008d0) then
tmp = (x * (b * 0.3333333333333333d0)) + ((1.0d0 - x) / b)
else
tmp = 1.0d0 / sin(b)
end if
code = tmp
end function
public static double code(double B, double x) {
double tmp;
if (B <= 0.008) {
tmp = (x * (B * 0.3333333333333333)) + ((1.0 - x) / B);
} else {
tmp = 1.0 / Math.sin(B);
}
return tmp;
}
def code(B, x): tmp = 0 if B <= 0.008: tmp = (x * (B * 0.3333333333333333)) + ((1.0 - x) / B) else: tmp = 1.0 / math.sin(B) return tmp
function code(B, x) tmp = 0.0 if (B <= 0.008) tmp = Float64(Float64(x * Float64(B * 0.3333333333333333)) + Float64(Float64(1.0 - x) / B)); else tmp = Float64(1.0 / sin(B)); end return tmp end
function tmp_2 = code(B, x) tmp = 0.0; if (B <= 0.008) tmp = (x * (B * 0.3333333333333333)) + ((1.0 - x) / B); else tmp = 1.0 / sin(B); end tmp_2 = tmp; end
code[B_, x_] := If[LessEqual[B, 0.008], N[(N[(x * N[(B * 0.3333333333333333), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 - x), $MachinePrecision] / B), $MachinePrecision]), $MachinePrecision], N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;B \leq 0.008:\\
\;\;\;\;x \cdot \left(B \cdot 0.3333333333333333\right) + \frac{1 - x}{B}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\sin B}\\
\end{array}
\end{array}
if B < 0.0080000000000000002Initial program 99.8%
Taylor expanded in B around 0 69.3%
associate--l+69.3%
*-commutative69.3%
div-sub69.3%
Simplified69.3%
Taylor expanded in x around inf 69.4%
associate-*r*69.4%
*-commutative69.4%
*-commutative69.4%
Simplified69.4%
if 0.0080000000000000002 < B Initial program 99.5%
Taylor expanded in x around 0 60.1%
Final simplification67.4%
(FPCore (B x) :precision binary64 (+ (* x (* B 0.3333333333333333)) (/ (- 1.0 x) B)))
double code(double B, double x) {
return (x * (B * 0.3333333333333333)) + ((1.0 - x) / B);
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (x * (b * 0.3333333333333333d0)) + ((1.0d0 - x) / b)
end function
public static double code(double B, double x) {
return (x * (B * 0.3333333333333333)) + ((1.0 - x) / B);
}
def code(B, x): return (x * (B * 0.3333333333333333)) + ((1.0 - x) / B)
function code(B, x) return Float64(Float64(x * Float64(B * 0.3333333333333333)) + Float64(Float64(1.0 - x) / B)) end
function tmp = code(B, x) tmp = (x * (B * 0.3333333333333333)) + ((1.0 - x) / B); end
code[B_, x_] := N[(N[(x * N[(B * 0.3333333333333333), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 - x), $MachinePrecision] / B), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(B \cdot 0.3333333333333333\right) + \frac{1 - x}{B}
\end{array}
Initial program 99.7%
Taylor expanded in B around 0 55.5%
associate--l+55.5%
*-commutative55.5%
div-sub55.5%
Simplified55.5%
Taylor expanded in x around inf 55.6%
associate-*r*55.6%
*-commutative55.6%
*-commutative55.6%
Simplified55.6%
Final simplification55.6%
(FPCore (B x) :precision binary64 (+ (/ (- 1.0 x) B) (* B 0.16666666666666666)))
double code(double B, double x) {
return ((1.0 - x) / B) + (B * 0.16666666666666666);
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = ((1.0d0 - x) / b) + (b * 0.16666666666666666d0)
end function
public static double code(double B, double x) {
return ((1.0 - x) / B) + (B * 0.16666666666666666);
}
def code(B, x): return ((1.0 - x) / B) + (B * 0.16666666666666666)
function code(B, x) return Float64(Float64(Float64(1.0 - x) / B) + Float64(B * 0.16666666666666666)) end
function tmp = code(B, x) tmp = ((1.0 - x) / B) + (B * 0.16666666666666666); end
code[B_, x_] := N[(N[(N[(1.0 - x), $MachinePrecision] / B), $MachinePrecision] + N[(B * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - x}{B} + B \cdot 0.16666666666666666
\end{array}
Initial program 99.7%
Taylor expanded in B around 0 78.5%
Taylor expanded in B around 0 55.3%
associate--l+55.3%
*-commutative55.3%
div-sub55.3%
Simplified55.3%
Final simplification55.3%
(FPCore (B x) :precision binary64 (if (or (<= x -580.0) (not (<= x 1.0))) (- (/ x B)) (/ 1.0 B)))
double code(double B, double x) {
double tmp;
if ((x <= -580.0) || !(x <= 1.0)) {
tmp = -(x / B);
} else {
tmp = 1.0 / B;
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: tmp
if ((x <= (-580.0d0)) .or. (.not. (x <= 1.0d0))) then
tmp = -(x / b)
else
tmp = 1.0d0 / b
end if
code = tmp
end function
public static double code(double B, double x) {
double tmp;
if ((x <= -580.0) || !(x <= 1.0)) {
tmp = -(x / B);
} else {
tmp = 1.0 / B;
}
return tmp;
}
def code(B, x): tmp = 0 if (x <= -580.0) or not (x <= 1.0): tmp = -(x / B) else: tmp = 1.0 / B return tmp
function code(B, x) tmp = 0.0 if ((x <= -580.0) || !(x <= 1.0)) tmp = Float64(-Float64(x / B)); else tmp = Float64(1.0 / B); end return tmp end
function tmp_2 = code(B, x) tmp = 0.0; if ((x <= -580.0) || ~((x <= 1.0))) tmp = -(x / B); else tmp = 1.0 / B; end tmp_2 = tmp; end
code[B_, x_] := If[Or[LessEqual[x, -580.0], N[Not[LessEqual[x, 1.0]], $MachinePrecision]], (-N[(x / B), $MachinePrecision]), N[(1.0 / B), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -580 \lor \neg \left(x \leq 1\right):\\
\;\;\;\;-\frac{x}{B}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{B}\\
\end{array}
\end{array}
if x < -580 or 1 < x Initial program 99.7%
Taylor expanded in B around 0 59.3%
Taylor expanded in x around inf 59.1%
associate-*r/59.1%
neg-mul-159.1%
Simplified59.1%
if -580 < x < 1Initial program 99.8%
Taylor expanded in B around 0 51.2%
Taylor expanded in x around 0 49.6%
Final simplification54.3%
(FPCore (B x) :precision binary64 (/ (- 1.0 x) B))
double code(double B, double x) {
return (1.0 - x) / B;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 - x) / b
end function
public static double code(double B, double x) {
return (1.0 - x) / B;
}
def code(B, x): return (1.0 - x) / B
function code(B, x) return Float64(Float64(1.0 - x) / B) end
function tmp = code(B, x) tmp = (1.0 - x) / B; end
code[B_, x_] := N[(N[(1.0 - x), $MachinePrecision] / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - x}{B}
\end{array}
Initial program 99.7%
Taylor expanded in B around 0 55.2%
Final simplification55.2%
(FPCore (B x) :precision binary64 (/ 1.0 B))
double code(double B, double x) {
return 1.0 / B;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = 1.0d0 / b
end function
public static double code(double B, double x) {
return 1.0 / B;
}
def code(B, x): return 1.0 / B
function code(B, x) return Float64(1.0 / B) end
function tmp = code(B, x) tmp = 1.0 / B; end
code[B_, x_] := N[(1.0 / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{B}
\end{array}
Initial program 99.7%
Taylor expanded in B around 0 55.2%
Taylor expanded in x around 0 26.6%
Final simplification26.6%
herbie shell --seed 2023332
(FPCore (B x)
:name "VandenBroeck and Keller, Equation (24)"
:precision binary64
(+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))