
(FPCore (B x) :precision binary64 (+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))
double code(double B, double x) {
return -(x * (1.0 / tan(B))) + (1.0 / sin(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = -(x * (1.0d0 / tan(b))) + (1.0d0 / sin(b))
end function
public static double code(double B, double x) {
return -(x * (1.0 / Math.tan(B))) + (1.0 / Math.sin(B));
}
def code(B, x): return -(x * (1.0 / math.tan(B))) + (1.0 / math.sin(B))
function code(B, x) return Float64(Float64(-Float64(x * Float64(1.0 / tan(B)))) + Float64(1.0 / sin(B))) end
function tmp = code(B, x) tmp = -(x * (1.0 / tan(B))) + (1.0 / sin(B)); end
code[B_, x_] := N[((-N[(x * N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]) + N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-x \cdot \frac{1}{\tan B}\right) + \frac{1}{\sin B}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (B x) :precision binary64 (+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))
double code(double B, double x) {
return -(x * (1.0 / tan(B))) + (1.0 / sin(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = -(x * (1.0d0 / tan(b))) + (1.0d0 / sin(b))
end function
public static double code(double B, double x) {
return -(x * (1.0 / Math.tan(B))) + (1.0 / Math.sin(B));
}
def code(B, x): return -(x * (1.0 / math.tan(B))) + (1.0 / math.sin(B))
function code(B, x) return Float64(Float64(-Float64(x * Float64(1.0 / tan(B)))) + Float64(1.0 / sin(B))) end
function tmp = code(B, x) tmp = -(x * (1.0 / tan(B))) + (1.0 / sin(B)); end
code[B_, x_] := N[((-N[(x * N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]) + N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-x \cdot \frac{1}{\tan B}\right) + \frac{1}{\sin B}
\end{array}
(FPCore (B x) :precision binary64 (- (/ 1.0 (sin B)) (/ x (tan B))))
double code(double B, double x) {
return (1.0 / sin(B)) - (x / tan(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 / sin(b)) - (x / tan(b))
end function
public static double code(double B, double x) {
return (1.0 / Math.sin(B)) - (x / Math.tan(B));
}
def code(B, x): return (1.0 / math.sin(B)) - (x / math.tan(B))
function code(B, x) return Float64(Float64(1.0 / sin(B)) - Float64(x / tan(B))) end
function tmp = code(B, x) tmp = (1.0 / sin(B)) - (x / tan(B)); end
code[B_, x_] := N[(N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision] - N[(x / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\sin B} - \frac{x}{\tan B}
\end{array}
Initial program 99.7%
distribute-lft-neg-in99.7%
+-commutative99.7%
*-commutative99.7%
remove-double-neg99.7%
distribute-frac-neg299.7%
tan-neg99.7%
cancel-sign-sub-inv99.7%
*-commutative99.7%
associate-*r/99.8%
*-rgt-identity99.8%
tan-neg99.8%
distribute-neg-frac299.8%
distribute-neg-frac99.8%
remove-double-neg99.8%
Simplified99.8%
(FPCore (B x) :precision binary64 (if (or (<= x -1150000.0) (not (<= x 1.05))) (- (/ 1.0 B) (/ x (tan B))) (* (/ 1.0 (sin B)) (- 1.0 x))))
double code(double B, double x) {
double tmp;
if ((x <= -1150000.0) || !(x <= 1.05)) {
tmp = (1.0 / B) - (x / tan(B));
} else {
tmp = (1.0 / sin(B)) * (1.0 - x);
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: tmp
if ((x <= (-1150000.0d0)) .or. (.not. (x <= 1.05d0))) then
tmp = (1.0d0 / b) - (x / tan(b))
else
tmp = (1.0d0 / sin(b)) * (1.0d0 - x)
end if
code = tmp
end function
public static double code(double B, double x) {
double tmp;
if ((x <= -1150000.0) || !(x <= 1.05)) {
tmp = (1.0 / B) - (x / Math.tan(B));
} else {
tmp = (1.0 / Math.sin(B)) * (1.0 - x);
}
return tmp;
}
def code(B, x): tmp = 0 if (x <= -1150000.0) or not (x <= 1.05): tmp = (1.0 / B) - (x / math.tan(B)) else: tmp = (1.0 / math.sin(B)) * (1.0 - x) return tmp
function code(B, x) tmp = 0.0 if ((x <= -1150000.0) || !(x <= 1.05)) tmp = Float64(Float64(1.0 / B) - Float64(x / tan(B))); else tmp = Float64(Float64(1.0 / sin(B)) * Float64(1.0 - x)); end return tmp end
function tmp_2 = code(B, x) tmp = 0.0; if ((x <= -1150000.0) || ~((x <= 1.05))) tmp = (1.0 / B) - (x / tan(B)); else tmp = (1.0 / sin(B)) * (1.0 - x); end tmp_2 = tmp; end
code[B_, x_] := If[Or[LessEqual[x, -1150000.0], N[Not[LessEqual[x, 1.05]], $MachinePrecision]], N[(N[(1.0 / B), $MachinePrecision] - N[(x / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision] * N[(1.0 - x), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1150000 \lor \neg \left(x \leq 1.05\right):\\
\;\;\;\;\frac{1}{B} - \frac{x}{\tan B}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\sin B} \cdot \left(1 - x\right)\\
\end{array}
\end{array}
if x < -1.15e6 or 1.05000000000000004 < x Initial program 99.7%
distribute-lft-neg-in99.7%
+-commutative99.7%
*-commutative99.7%
remove-double-neg99.7%
distribute-frac-neg299.7%
tan-neg99.7%
cancel-sign-sub-inv99.7%
*-commutative99.7%
associate-*r/99.8%
*-rgt-identity99.8%
tan-neg99.8%
distribute-neg-frac299.8%
distribute-neg-frac99.8%
remove-double-neg99.8%
Simplified99.8%
Taylor expanded in B around 0 98.1%
if -1.15e6 < x < 1.05000000000000004Initial program 99.8%
tan-quot99.8%
associate-/r/99.8%
Applied egg-rr99.8%
Taylor expanded in x around 0 99.8%
+-commutative99.8%
mul-1-neg99.8%
sub-neg99.8%
*-rgt-identity99.8%
*-lft-identity99.8%
associate-*l/99.8%
distribute-lft-out--99.8%
Simplified99.8%
Taylor expanded in B around 0 99.1%
Final simplification98.6%
(FPCore (B x) :precision binary64 (if (or (<= x -0.95) (not (<= x 4200.0))) (* x (/ -1.0 (sin B))) (/ 1.0 (sin B))))
double code(double B, double x) {
double tmp;
if ((x <= -0.95) || !(x <= 4200.0)) {
tmp = x * (-1.0 / sin(B));
} else {
tmp = 1.0 / sin(B);
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: tmp
if ((x <= (-0.95d0)) .or. (.not. (x <= 4200.0d0))) then
tmp = x * ((-1.0d0) / sin(b))
else
tmp = 1.0d0 / sin(b)
end if
code = tmp
end function
public static double code(double B, double x) {
double tmp;
if ((x <= -0.95) || !(x <= 4200.0)) {
tmp = x * (-1.0 / Math.sin(B));
} else {
tmp = 1.0 / Math.sin(B);
}
return tmp;
}
def code(B, x): tmp = 0 if (x <= -0.95) or not (x <= 4200.0): tmp = x * (-1.0 / math.sin(B)) else: tmp = 1.0 / math.sin(B) return tmp
function code(B, x) tmp = 0.0 if ((x <= -0.95) || !(x <= 4200.0)) tmp = Float64(x * Float64(-1.0 / sin(B))); else tmp = Float64(1.0 / sin(B)); end return tmp end
function tmp_2 = code(B, x) tmp = 0.0; if ((x <= -0.95) || ~((x <= 4200.0))) tmp = x * (-1.0 / sin(B)); else tmp = 1.0 / sin(B); end tmp_2 = tmp; end
code[B_, x_] := If[Or[LessEqual[x, -0.95], N[Not[LessEqual[x, 4200.0]], $MachinePrecision]], N[(x * N[(-1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.95 \lor \neg \left(x \leq 4200\right):\\
\;\;\;\;x \cdot \frac{-1}{\sin B}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\sin B}\\
\end{array}
\end{array}
if x < -0.94999999999999996 or 4200 < x Initial program 99.7%
tan-quot99.6%
associate-/r/99.6%
Applied egg-rr99.6%
Taylor expanded in x around inf 97.6%
mul-1-neg97.6%
associate-*r/97.6%
distribute-lft-neg-in97.6%
*-commutative97.6%
Simplified97.6%
Taylor expanded in B around 0 51.7%
if -0.94999999999999996 < x < 4200Initial program 99.8%
Taylor expanded in x around 0 98.3%
Final simplification75.4%
(FPCore (B x)
:precision binary64
(if (<= B 0.019)
(+
(* B 0.16666666666666666)
(+ (/ 1.0 B) (* x (+ (* B 0.3333333333333333) (/ -1.0 B)))))
(/ 1.0 (sin B))))
double code(double B, double x) {
double tmp;
if (B <= 0.019) {
tmp = (B * 0.16666666666666666) + ((1.0 / B) + (x * ((B * 0.3333333333333333) + (-1.0 / B))));
} else {
tmp = 1.0 / sin(B);
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: tmp
if (b <= 0.019d0) then
tmp = (b * 0.16666666666666666d0) + ((1.0d0 / b) + (x * ((b * 0.3333333333333333d0) + ((-1.0d0) / b))))
else
tmp = 1.0d0 / sin(b)
end if
code = tmp
end function
public static double code(double B, double x) {
double tmp;
if (B <= 0.019) {
tmp = (B * 0.16666666666666666) + ((1.0 / B) + (x * ((B * 0.3333333333333333) + (-1.0 / B))));
} else {
tmp = 1.0 / Math.sin(B);
}
return tmp;
}
def code(B, x): tmp = 0 if B <= 0.019: tmp = (B * 0.16666666666666666) + ((1.0 / B) + (x * ((B * 0.3333333333333333) + (-1.0 / B)))) else: tmp = 1.0 / math.sin(B) return tmp
function code(B, x) tmp = 0.0 if (B <= 0.019) tmp = Float64(Float64(B * 0.16666666666666666) + Float64(Float64(1.0 / B) + Float64(x * Float64(Float64(B * 0.3333333333333333) + Float64(-1.0 / B))))); else tmp = Float64(1.0 / sin(B)); end return tmp end
function tmp_2 = code(B, x) tmp = 0.0; if (B <= 0.019) tmp = (B * 0.16666666666666666) + ((1.0 / B) + (x * ((B * 0.3333333333333333) + (-1.0 / B)))); else tmp = 1.0 / sin(B); end tmp_2 = tmp; end
code[B_, x_] := If[LessEqual[B, 0.019], N[(N[(B * 0.16666666666666666), $MachinePrecision] + N[(N[(1.0 / B), $MachinePrecision] + N[(x * N[(N[(B * 0.3333333333333333), $MachinePrecision] + N[(-1.0 / B), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;B \leq 0.019:\\
\;\;\;\;B \cdot 0.16666666666666666 + \left(\frac{1}{B} + x \cdot \left(B \cdot 0.3333333333333333 + \frac{-1}{B}\right)\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\sin B}\\
\end{array}
\end{array}
if B < 0.0189999999999999995Initial program 99.8%
Taylor expanded in B around 0 61.9%
Taylor expanded in x around 0 61.9%
if 0.0189999999999999995 < B Initial program 99.6%
Taylor expanded in x around 0 60.7%
Final simplification61.6%
(FPCore (B x) :precision binary64 (* (/ 1.0 (sin B)) (- 1.0 x)))
double code(double B, double x) {
return (1.0 / sin(B)) * (1.0 - x);
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 / sin(b)) * (1.0d0 - x)
end function
public static double code(double B, double x) {
return (1.0 / Math.sin(B)) * (1.0 - x);
}
def code(B, x): return (1.0 / math.sin(B)) * (1.0 - x)
function code(B, x) return Float64(Float64(1.0 / sin(B)) * Float64(1.0 - x)) end
function tmp = code(B, x) tmp = (1.0 / sin(B)) * (1.0 - x); end
code[B_, x_] := N[(N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision] * N[(1.0 - x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\sin B} \cdot \left(1 - x\right)
\end{array}
Initial program 99.7%
tan-quot99.7%
associate-/r/99.7%
Applied egg-rr99.7%
Taylor expanded in x around 0 99.7%
+-commutative99.7%
mul-1-neg99.7%
sub-neg99.7%
*-rgt-identity99.7%
*-lft-identity99.7%
associate-*l/99.7%
distribute-lft-out--99.7%
Simplified99.7%
Taylor expanded in B around 0 75.8%
(FPCore (B x) :precision binary64 (+ (* B 0.16666666666666666) (+ (/ 1.0 B) (* x (+ (* B 0.3333333333333333) (/ -1.0 B))))))
double code(double B, double x) {
return (B * 0.16666666666666666) + ((1.0 / B) + (x * ((B * 0.3333333333333333) + (-1.0 / B))));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (b * 0.16666666666666666d0) + ((1.0d0 / b) + (x * ((b * 0.3333333333333333d0) + ((-1.0d0) / b))))
end function
public static double code(double B, double x) {
return (B * 0.16666666666666666) + ((1.0 / B) + (x * ((B * 0.3333333333333333) + (-1.0 / B))));
}
def code(B, x): return (B * 0.16666666666666666) + ((1.0 / B) + (x * ((B * 0.3333333333333333) + (-1.0 / B))))
function code(B, x) return Float64(Float64(B * 0.16666666666666666) + Float64(Float64(1.0 / B) + Float64(x * Float64(Float64(B * 0.3333333333333333) + Float64(-1.0 / B))))) end
function tmp = code(B, x) tmp = (B * 0.16666666666666666) + ((1.0 / B) + (x * ((B * 0.3333333333333333) + (-1.0 / B)))); end
code[B_, x_] := N[(N[(B * 0.16666666666666666), $MachinePrecision] + N[(N[(1.0 / B), $MachinePrecision] + N[(x * N[(N[(B * 0.3333333333333333), $MachinePrecision] + N[(-1.0 / B), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
B \cdot 0.16666666666666666 + \left(\frac{1}{B} + x \cdot \left(B \cdot 0.3333333333333333 + \frac{-1}{B}\right)\right)
\end{array}
Initial program 99.7%
Taylor expanded in B around 0 50.0%
Taylor expanded in x around 0 50.1%
Final simplification50.1%
(FPCore (B x) :precision binary64 (/ (- 1.0 x) B))
double code(double B, double x) {
return (1.0 - x) / B;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 - x) / b
end function
public static double code(double B, double x) {
return (1.0 - x) / B;
}
def code(B, x): return (1.0 - x) / B
function code(B, x) return Float64(Float64(1.0 - x) / B) end
function tmp = code(B, x) tmp = (1.0 - x) / B; end
code[B_, x_] := N[(N[(1.0 - x), $MachinePrecision] / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - x}{B}
\end{array}
Initial program 99.7%
Taylor expanded in B around 0 49.4%
(FPCore (B x) :precision binary64 (/ 1.0 B))
double code(double B, double x) {
return 1.0 / B;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = 1.0d0 / b
end function
public static double code(double B, double x) {
return 1.0 / B;
}
def code(B, x): return 1.0 / B
function code(B, x) return Float64(1.0 / B) end
function tmp = code(B, x) tmp = 1.0 / B; end
code[B_, x_] := N[(1.0 / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{B}
\end{array}
Initial program 99.7%
Taylor expanded in x around 0 51.4%
Taylor expanded in B around 0 27.3%
(FPCore (B x) :precision binary64 (* B 0.16666666666666666))
double code(double B, double x) {
return B * 0.16666666666666666;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = b * 0.16666666666666666d0
end function
public static double code(double B, double x) {
return B * 0.16666666666666666;
}
def code(B, x): return B * 0.16666666666666666
function code(B, x) return Float64(B * 0.16666666666666666) end
function tmp = code(B, x) tmp = B * 0.16666666666666666; end
code[B_, x_] := N[(B * 0.16666666666666666), $MachinePrecision]
\begin{array}{l}
\\
B \cdot 0.16666666666666666
\end{array}
Initial program 99.7%
Taylor expanded in B around 0 50.0%
Taylor expanded in B around inf 3.0%
Taylor expanded in x around 0 2.9%
Final simplification2.9%
herbie shell --seed 2024111
(FPCore (B x)
:name "VandenBroeck and Keller, Equation (24)"
:precision binary64
(+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))