
(FPCore (B x) :precision binary64 (+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))
double code(double B, double x) {
return -(x * (1.0 / tan(B))) + (1.0 / sin(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = -(x * (1.0d0 / tan(b))) + (1.0d0 / sin(b))
end function
public static double code(double B, double x) {
return -(x * (1.0 / Math.tan(B))) + (1.0 / Math.sin(B));
}
def code(B, x): return -(x * (1.0 / math.tan(B))) + (1.0 / math.sin(B))
function code(B, x) return Float64(Float64(-Float64(x * Float64(1.0 / tan(B)))) + Float64(1.0 / sin(B))) end
function tmp = code(B, x) tmp = -(x * (1.0 / tan(B))) + (1.0 / sin(B)); end
code[B_, x_] := N[((-N[(x * N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]) + N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-x \cdot \frac{1}{\tan B}\right) + \frac{1}{\sin B}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (B x) :precision binary64 (+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))
double code(double B, double x) {
return -(x * (1.0 / tan(B))) + (1.0 / sin(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = -(x * (1.0d0 / tan(b))) + (1.0d0 / sin(b))
end function
public static double code(double B, double x) {
return -(x * (1.0 / Math.tan(B))) + (1.0 / Math.sin(B));
}
def code(B, x): return -(x * (1.0 / math.tan(B))) + (1.0 / math.sin(B))
function code(B, x) return Float64(Float64(-Float64(x * Float64(1.0 / tan(B)))) + Float64(1.0 / sin(B))) end
function tmp = code(B, x) tmp = -(x * (1.0 / tan(B))) + (1.0 / sin(B)); end
code[B_, x_] := N[((-N[(x * N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]) + N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-x \cdot \frac{1}{\tan B}\right) + \frac{1}{\sin B}
\end{array}
(FPCore (B x) :precision binary64 (- (/ 1.0 (sin B)) (/ x (tan B))))
double code(double B, double x) {
return (1.0 / sin(B)) - (x / tan(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 / sin(b)) - (x / tan(b))
end function
public static double code(double B, double x) {
return (1.0 / Math.sin(B)) - (x / Math.tan(B));
}
def code(B, x): return (1.0 / math.sin(B)) - (x / math.tan(B))
function code(B, x) return Float64(Float64(1.0 / sin(B)) - Float64(x / tan(B))) end
function tmp = code(B, x) tmp = (1.0 / sin(B)) - (x / tan(B)); end
code[B_, x_] := N[(N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision] - N[(x / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\sin B} - \frac{x}{\tan B}
\end{array}
Initial program 99.7%
distribute-lft-neg-in99.7%
+-commutative99.7%
*-commutative99.7%
remove-double-neg99.7%
distribute-frac-neg299.7%
tan-neg99.7%
cancel-sign-sub-inv99.7%
associate-*l/99.8%
*-lft-identity99.8%
tan-neg99.8%
distribute-neg-frac299.8%
distribute-neg-frac99.8%
remove-double-neg99.8%
Simplified99.8%
(FPCore (B x) :precision binary64 (if (or (<= x -7.8e-7) (not (<= x 0.026))) (- (/ 1.0 B) (/ x (tan B))) (/ 1.0 (sin B))))
double code(double B, double x) {
double tmp;
if ((x <= -7.8e-7) || !(x <= 0.026)) {
tmp = (1.0 / B) - (x / tan(B));
} else {
tmp = 1.0 / sin(B);
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: tmp
if ((x <= (-7.8d-7)) .or. (.not. (x <= 0.026d0))) then
tmp = (1.0d0 / b) - (x / tan(b))
else
tmp = 1.0d0 / sin(b)
end if
code = tmp
end function
public static double code(double B, double x) {
double tmp;
if ((x <= -7.8e-7) || !(x <= 0.026)) {
tmp = (1.0 / B) - (x / Math.tan(B));
} else {
tmp = 1.0 / Math.sin(B);
}
return tmp;
}
def code(B, x): tmp = 0 if (x <= -7.8e-7) or not (x <= 0.026): tmp = (1.0 / B) - (x / math.tan(B)) else: tmp = 1.0 / math.sin(B) return tmp
function code(B, x) tmp = 0.0 if ((x <= -7.8e-7) || !(x <= 0.026)) tmp = Float64(Float64(1.0 / B) - Float64(x / tan(B))); else tmp = Float64(1.0 / sin(B)); end return tmp end
function tmp_2 = code(B, x) tmp = 0.0; if ((x <= -7.8e-7) || ~((x <= 0.026))) tmp = (1.0 / B) - (x / tan(B)); else tmp = 1.0 / sin(B); end tmp_2 = tmp; end
code[B_, x_] := If[Or[LessEqual[x, -7.8e-7], N[Not[LessEqual[x, 0.026]], $MachinePrecision]], N[(N[(1.0 / B), $MachinePrecision] - N[(x / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -7.8 \cdot 10^{-7} \lor \neg \left(x \leq 0.026\right):\\
\;\;\;\;\frac{1}{B} - \frac{x}{\tan B}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\sin B}\\
\end{array}
\end{array}
if x < -7.80000000000000049e-7 or 0.0259999999999999988 < x Initial program 99.6%
distribute-lft-neg-in99.6%
+-commutative99.6%
*-commutative99.6%
remove-double-neg99.6%
distribute-frac-neg299.6%
tan-neg99.6%
cancel-sign-sub-inv99.6%
associate-*l/99.8%
*-lft-identity99.8%
tan-neg99.8%
distribute-neg-frac299.8%
distribute-neg-frac99.8%
remove-double-neg99.8%
Simplified99.8%
Taylor expanded in B around 0 98.1%
if -7.80000000000000049e-7 < x < 0.0259999999999999988Initial program 99.8%
Taylor expanded in x around 0 96.5%
Final simplification97.3%
(FPCore (B x) :precision binary64 (if (or (<= x -1.7) (not (<= x 1.0))) (/ x (- (tan B))) (/ 1.0 (sin B))))
double code(double B, double x) {
double tmp;
if ((x <= -1.7) || !(x <= 1.0)) {
tmp = x / -tan(B);
} else {
tmp = 1.0 / sin(B);
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: tmp
if ((x <= (-1.7d0)) .or. (.not. (x <= 1.0d0))) then
tmp = x / -tan(b)
else
tmp = 1.0d0 / sin(b)
end if
code = tmp
end function
public static double code(double B, double x) {
double tmp;
if ((x <= -1.7) || !(x <= 1.0)) {
tmp = x / -Math.tan(B);
} else {
tmp = 1.0 / Math.sin(B);
}
return tmp;
}
def code(B, x): tmp = 0 if (x <= -1.7) or not (x <= 1.0): tmp = x / -math.tan(B) else: tmp = 1.0 / math.sin(B) return tmp
function code(B, x) tmp = 0.0 if ((x <= -1.7) || !(x <= 1.0)) tmp = Float64(x / Float64(-tan(B))); else tmp = Float64(1.0 / sin(B)); end return tmp end
function tmp_2 = code(B, x) tmp = 0.0; if ((x <= -1.7) || ~((x <= 1.0))) tmp = x / -tan(B); else tmp = 1.0 / sin(B); end tmp_2 = tmp; end
code[B_, x_] := If[Or[LessEqual[x, -1.7], N[Not[LessEqual[x, 1.0]], $MachinePrecision]], N[(x / (-N[Tan[B], $MachinePrecision])), $MachinePrecision], N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.7 \lor \neg \left(x \leq 1\right):\\
\;\;\;\;\frac{x}{-\tan B}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\sin B}\\
\end{array}
\end{array}
if x < -1.69999999999999996 or 1 < x Initial program 99.6%
Taylor expanded in x around inf 96.8%
mul-1-neg96.8%
associate-*l/96.8%
*-commutative96.8%
Simplified96.8%
distribute-lft-neg-in96.8%
clear-num96.7%
un-div-inv96.7%
Applied egg-rr96.7%
distribute-frac-neg96.7%
neg-sub096.7%
add-sqr-sqrt72.2%
sqrt-unprod72.5%
sqr-neg72.5%
sqrt-unprod0.2%
add-sqr-sqrt0.5%
associate-/r/0.5%
clear-num0.5%
add-sqr-sqrt0.2%
sqrt-unprod72.4%
sqr-neg72.4%
sqrt-unprod72.2%
add-sqr-sqrt96.7%
tan-quot96.8%
Applied egg-rr96.8%
associate-*l/97.0%
*-lft-identity97.0%
neg-sub097.0%
distribute-neg-frac97.0%
Simplified97.0%
if -1.69999999999999996 < x < 1Initial program 99.7%
Taylor expanded in x around 0 95.6%
Final simplification96.3%
(FPCore (B x) :precision binary64 (if (<= B 0.03) (- (/ 1.0 B) (/ x B)) (/ 1.0 (sin B))))
double code(double B, double x) {
double tmp;
if (B <= 0.03) {
tmp = (1.0 / B) - (x / B);
} else {
tmp = 1.0 / sin(B);
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: tmp
if (b <= 0.03d0) then
tmp = (1.0d0 / b) - (x / b)
else
tmp = 1.0d0 / sin(b)
end if
code = tmp
end function
public static double code(double B, double x) {
double tmp;
if (B <= 0.03) {
tmp = (1.0 / B) - (x / B);
} else {
tmp = 1.0 / Math.sin(B);
}
return tmp;
}
def code(B, x): tmp = 0 if B <= 0.03: tmp = (1.0 / B) - (x / B) else: tmp = 1.0 / math.sin(B) return tmp
function code(B, x) tmp = 0.0 if (B <= 0.03) tmp = Float64(Float64(1.0 / B) - Float64(x / B)); else tmp = Float64(1.0 / sin(B)); end return tmp end
function tmp_2 = code(B, x) tmp = 0.0; if (B <= 0.03) tmp = (1.0 / B) - (x / B); else tmp = 1.0 / sin(B); end tmp_2 = tmp; end
code[B_, x_] := If[LessEqual[B, 0.03], N[(N[(1.0 / B), $MachinePrecision] - N[(x / B), $MachinePrecision]), $MachinePrecision], N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;B \leq 0.03:\\
\;\;\;\;\frac{1}{B} - \frac{x}{B}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\sin B}\\
\end{array}
\end{array}
if B < 0.029999999999999999Initial program 99.7%
Taylor expanded in B around 0 66.5%
div-sub66.6%
Applied egg-rr66.6%
if 0.029999999999999999 < B Initial program 99.6%
Taylor expanded in x around 0 44.6%
(FPCore (B x) :precision binary64 (if (or (<= x -0.003) (not (<= x 5e-9))) (/ x (- B)) (/ 1.0 B)))
double code(double B, double x) {
double tmp;
if ((x <= -0.003) || !(x <= 5e-9)) {
tmp = x / -B;
} else {
tmp = 1.0 / B;
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: tmp
if ((x <= (-0.003d0)) .or. (.not. (x <= 5d-9))) then
tmp = x / -b
else
tmp = 1.0d0 / b
end if
code = tmp
end function
public static double code(double B, double x) {
double tmp;
if ((x <= -0.003) || !(x <= 5e-9)) {
tmp = x / -B;
} else {
tmp = 1.0 / B;
}
return tmp;
}
def code(B, x): tmp = 0 if (x <= -0.003) or not (x <= 5e-9): tmp = x / -B else: tmp = 1.0 / B return tmp
function code(B, x) tmp = 0.0 if ((x <= -0.003) || !(x <= 5e-9)) tmp = Float64(x / Float64(-B)); else tmp = Float64(1.0 / B); end return tmp end
function tmp_2 = code(B, x) tmp = 0.0; if ((x <= -0.003) || ~((x <= 5e-9))) tmp = x / -B; else tmp = 1.0 / B; end tmp_2 = tmp; end
code[B_, x_] := If[Or[LessEqual[x, -0.003], N[Not[LessEqual[x, 5e-9]], $MachinePrecision]], N[(x / (-B)), $MachinePrecision], N[(1.0 / B), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.003 \lor \neg \left(x \leq 5 \cdot 10^{-9}\right):\\
\;\;\;\;\frac{x}{-B}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{B}\\
\end{array}
\end{array}
if x < -0.0030000000000000001 or 5.0000000000000001e-9 < x Initial program 99.6%
Taylor expanded in B around 0 47.4%
Taylor expanded in x around inf 45.8%
neg-mul-145.8%
Simplified45.8%
if -0.0030000000000000001 < x < 5.0000000000000001e-9Initial program 99.8%
Taylor expanded in B around 0 53.1%
Taylor expanded in x around 0 52.4%
Final simplification49.1%
(FPCore (B x) :precision binary64 (- (/ 1.0 B) (/ x B)))
double code(double B, double x) {
return (1.0 / B) - (x / B);
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 / b) - (x / b)
end function
public static double code(double B, double x) {
return (1.0 / B) - (x / B);
}
def code(B, x): return (1.0 / B) - (x / B)
function code(B, x) return Float64(Float64(1.0 / B) - Float64(x / B)) end
function tmp = code(B, x) tmp = (1.0 / B) - (x / B); end
code[B_, x_] := N[(N[(1.0 / B), $MachinePrecision] - N[(x / B), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{B} - \frac{x}{B}
\end{array}
Initial program 99.7%
Taylor expanded in B around 0 50.2%
div-sub50.3%
Applied egg-rr50.3%
(FPCore (B x) :precision binary64 (/ (- 1.0 x) B))
double code(double B, double x) {
return (1.0 - x) / B;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 - x) / b
end function
public static double code(double B, double x) {
return (1.0 - x) / B;
}
def code(B, x): return (1.0 - x) / B
function code(B, x) return Float64(Float64(1.0 - x) / B) end
function tmp = code(B, x) tmp = (1.0 - x) / B; end
code[B_, x_] := N[(N[(1.0 - x), $MachinePrecision] / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - x}{B}
\end{array}
Initial program 99.7%
Taylor expanded in B around 0 50.2%
(FPCore (B x) :precision binary64 (/ 1.0 B))
double code(double B, double x) {
return 1.0 / B;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = 1.0d0 / b
end function
public static double code(double B, double x) {
return 1.0 / B;
}
def code(B, x): return 1.0 / B
function code(B, x) return Float64(1.0 / B) end
function tmp = code(B, x) tmp = 1.0 / B; end
code[B_, x_] := N[(1.0 / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{B}
\end{array}
Initial program 99.7%
Taylor expanded in B around 0 50.2%
Taylor expanded in x around 0 27.4%
herbie shell --seed 2024156
(FPCore (B x)
:name "VandenBroeck and Keller, Equation (24)"
:precision binary64
(+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))