
(FPCore (B x) :precision binary64 (+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))
double code(double B, double x) {
return -(x * (1.0 / tan(B))) + (1.0 / sin(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = -(x * (1.0d0 / tan(b))) + (1.0d0 / sin(b))
end function
public static double code(double B, double x) {
return -(x * (1.0 / Math.tan(B))) + (1.0 / Math.sin(B));
}
def code(B, x): return -(x * (1.0 / math.tan(B))) + (1.0 / math.sin(B))
function code(B, x) return Float64(Float64(-Float64(x * Float64(1.0 / tan(B)))) + Float64(1.0 / sin(B))) end
function tmp = code(B, x) tmp = -(x * (1.0 / tan(B))) + (1.0 / sin(B)); end
code[B_, x_] := N[((-N[(x * N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]) + N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-x \cdot \frac{1}{\tan B}\right) + \frac{1}{\sin B}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (B x) :precision binary64 (+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))
double code(double B, double x) {
return -(x * (1.0 / tan(B))) + (1.0 / sin(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = -(x * (1.0d0 / tan(b))) + (1.0d0 / sin(b))
end function
public static double code(double B, double x) {
return -(x * (1.0 / Math.tan(B))) + (1.0 / Math.sin(B));
}
def code(B, x): return -(x * (1.0 / math.tan(B))) + (1.0 / math.sin(B))
function code(B, x) return Float64(Float64(-Float64(x * Float64(1.0 / tan(B)))) + Float64(1.0 / sin(B))) end
function tmp = code(B, x) tmp = -(x * (1.0 / tan(B))) + (1.0 / sin(B)); end
code[B_, x_] := N[((-N[(x * N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]) + N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-x \cdot \frac{1}{\tan B}\right) + \frac{1}{\sin B}
\end{array}
(FPCore (B x) :precision binary64 (- (pow (sin B) -1.0) (/ x (tan B))))
double code(double B, double x) {
return pow(sin(B), -1.0) - (x / tan(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (sin(b) ** (-1.0d0)) - (x / tan(b))
end function
public static double code(double B, double x) {
return Math.pow(Math.sin(B), -1.0) - (x / Math.tan(B));
}
def code(B, x): return math.pow(math.sin(B), -1.0) - (x / math.tan(B))
function code(B, x) return Float64((sin(B) ^ -1.0) - Float64(x / tan(B))) end
function tmp = code(B, x) tmp = (sin(B) ^ -1.0) - (x / tan(B)); end
code[B_, x_] := N[(N[Power[N[Sin[B], $MachinePrecision], -1.0], $MachinePrecision] - N[(x / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{\sin B}^{-1} - \frac{x}{\tan B}
\end{array}
Initial program 99.7%
Taylor expanded in B around 0
lower-/.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6458.8
Applied rewrites58.8%
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
lift-*.f64N/A
lift-/.f64N/A
div-invN/A
lift-/.f64N/A
unsub-negN/A
lower--.f6458.9
Applied rewrites58.9%
Taylor expanded in B around inf
lower-/.f64N/A
lower-sin.f6499.8
Applied rewrites99.8%
Final simplification99.8%
(FPCore (B x) :precision binary64 (/ (- 1.0 (* (cos B) x)) (sin B)))
double code(double B, double x) {
return (1.0 - (cos(B) * x)) / sin(B);
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 - (cos(b) * x)) / sin(b)
end function
public static double code(double B, double x) {
return (1.0 - (Math.cos(B) * x)) / Math.sin(B);
}
def code(B, x): return (1.0 - (math.cos(B) * x)) / math.sin(B)
function code(B, x) return Float64(Float64(1.0 - Float64(cos(B) * x)) / sin(B)) end
function tmp = code(B, x) tmp = (1.0 - (cos(B) * x)) / sin(B); end
code[B_, x_] := N[(N[(1.0 - N[(N[Cos[B], $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] / N[Sin[B], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \cos B \cdot x}{\sin B}
\end{array}
Initial program 99.7%
lift-*.f64N/A
lift-/.f64N/A
un-div-invN/A
lower-/.f6499.8
Applied rewrites99.8%
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lift-/.f64N/A
lift-/.f64N/A
clear-numN/A
lift-/.f64N/A
*-lft-identityN/A
lift-*.f64N/A
frac-subN/A
*-lft-identityN/A
metadata-evalN/A
div-invN/A
/-rgt-identityN/A
lift--.f64N/A
*-commutativeN/A
associate-/r*N/A
Applied rewrites99.7%
Taylor expanded in B around inf
*-commutativeN/A
lower-*.f64N/A
lower-cos.f6499.7
Applied rewrites99.7%
(FPCore (B x) :precision binary64 (if (or (<= x -3200.0) (not (<= x 1.0))) (- (/ 1.0 B) (/ x (tan B))) (/ (- 1.0 x) (sin B))))
double code(double B, double x) {
double tmp;
if ((x <= -3200.0) || !(x <= 1.0)) {
tmp = (1.0 / B) - (x / tan(B));
} else {
tmp = (1.0 - x) / sin(B);
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: tmp
if ((x <= (-3200.0d0)) .or. (.not. (x <= 1.0d0))) then
tmp = (1.0d0 / b) - (x / tan(b))
else
tmp = (1.0d0 - x) / sin(b)
end if
code = tmp
end function
public static double code(double B, double x) {
double tmp;
if ((x <= -3200.0) || !(x <= 1.0)) {
tmp = (1.0 / B) - (x / Math.tan(B));
} else {
tmp = (1.0 - x) / Math.sin(B);
}
return tmp;
}
def code(B, x): tmp = 0 if (x <= -3200.0) or not (x <= 1.0): tmp = (1.0 / B) - (x / math.tan(B)) else: tmp = (1.0 - x) / math.sin(B) return tmp
function code(B, x) tmp = 0.0 if ((x <= -3200.0) || !(x <= 1.0)) tmp = Float64(Float64(1.0 / B) - Float64(x / tan(B))); else tmp = Float64(Float64(1.0 - x) / sin(B)); end return tmp end
function tmp_2 = code(B, x) tmp = 0.0; if ((x <= -3200.0) || ~((x <= 1.0))) tmp = (1.0 / B) - (x / tan(B)); else tmp = (1.0 - x) / sin(B); end tmp_2 = tmp; end
code[B_, x_] := If[Or[LessEqual[x, -3200.0], N[Not[LessEqual[x, 1.0]], $MachinePrecision]], N[(N[(1.0 / B), $MachinePrecision] - N[(x / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(1.0 - x), $MachinePrecision] / N[Sin[B], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -3200 \lor \neg \left(x \leq 1\right):\\
\;\;\;\;\frac{1}{B} - \frac{x}{\tan B}\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - x}{\sin B}\\
\end{array}
\end{array}
if x < -3200 or 1 < x Initial program 99.6%
Taylor expanded in B around 0
lower-/.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6466.6
Applied rewrites66.6%
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
lift-*.f64N/A
lift-/.f64N/A
div-invN/A
lift-/.f64N/A
unsub-negN/A
lower--.f6466.7
Applied rewrites66.7%
Taylor expanded in B around 0
Applied rewrites98.0%
if -3200 < x < 1Initial program 99.8%
lift-*.f64N/A
lift-/.f64N/A
un-div-invN/A
lower-/.f6499.8
Applied rewrites99.8%
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lift-/.f64N/A
lift-/.f64N/A
clear-numN/A
lift-/.f64N/A
*-lft-identityN/A
lift-*.f64N/A
frac-subN/A
*-lft-identityN/A
metadata-evalN/A
div-invN/A
/-rgt-identityN/A
lift--.f64N/A
*-commutativeN/A
associate-/r*N/A
Applied rewrites99.8%
Taylor expanded in B around 0
lower--.f6499.1
Applied rewrites99.1%
Final simplification98.5%
(FPCore (B x) :precision binary64 (if (or (<= x -1.36e+14) (not (<= x 2.7e+104))) (- (* 0.16666666666666666 B) (/ x (tan B))) (/ (- 1.0 x) (sin B))))
double code(double B, double x) {
double tmp;
if ((x <= -1.36e+14) || !(x <= 2.7e+104)) {
tmp = (0.16666666666666666 * B) - (x / tan(B));
} else {
tmp = (1.0 - x) / sin(B);
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: tmp
if ((x <= (-1.36d+14)) .or. (.not. (x <= 2.7d+104))) then
tmp = (0.16666666666666666d0 * b) - (x / tan(b))
else
tmp = (1.0d0 - x) / sin(b)
end if
code = tmp
end function
public static double code(double B, double x) {
double tmp;
if ((x <= -1.36e+14) || !(x <= 2.7e+104)) {
tmp = (0.16666666666666666 * B) - (x / Math.tan(B));
} else {
tmp = (1.0 - x) / Math.sin(B);
}
return tmp;
}
def code(B, x): tmp = 0 if (x <= -1.36e+14) or not (x <= 2.7e+104): tmp = (0.16666666666666666 * B) - (x / math.tan(B)) else: tmp = (1.0 - x) / math.sin(B) return tmp
function code(B, x) tmp = 0.0 if ((x <= -1.36e+14) || !(x <= 2.7e+104)) tmp = Float64(Float64(0.16666666666666666 * B) - Float64(x / tan(B))); else tmp = Float64(Float64(1.0 - x) / sin(B)); end return tmp end
function tmp_2 = code(B, x) tmp = 0.0; if ((x <= -1.36e+14) || ~((x <= 2.7e+104))) tmp = (0.16666666666666666 * B) - (x / tan(B)); else tmp = (1.0 - x) / sin(B); end tmp_2 = tmp; end
code[B_, x_] := If[Or[LessEqual[x, -1.36e+14], N[Not[LessEqual[x, 2.7e+104]], $MachinePrecision]], N[(N[(0.16666666666666666 * B), $MachinePrecision] - N[(x / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(1.0 - x), $MachinePrecision] / N[Sin[B], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.36 \cdot 10^{+14} \lor \neg \left(x \leq 2.7 \cdot 10^{+104}\right):\\
\;\;\;\;0.16666666666666666 \cdot B - \frac{x}{\tan B}\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - x}{\sin B}\\
\end{array}
\end{array}
if x < -1.36e14 or 2.69999999999999985e104 < x Initial program 99.6%
Taylor expanded in B around 0
lower-/.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6469.1
Applied rewrites69.1%
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
lift-*.f64N/A
lift-/.f64N/A
div-invN/A
lift-/.f64N/A
unsub-negN/A
lower--.f6469.2
Applied rewrites69.2%
Taylor expanded in B around inf
Applied rewrites77.3%
if -1.36e14 < x < 2.69999999999999985e104Initial program 99.8%
lift-*.f64N/A
lift-/.f64N/A
un-div-invN/A
lower-/.f6499.8
Applied rewrites99.8%
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lift-/.f64N/A
lift-/.f64N/A
clear-numN/A
lift-/.f64N/A
*-lft-identityN/A
lift-*.f64N/A
frac-subN/A
*-lft-identityN/A
metadata-evalN/A
div-invN/A
/-rgt-identityN/A
lift--.f64N/A
*-commutativeN/A
associate-/r*N/A
Applied rewrites99.8%
Taylor expanded in B around 0
lower--.f6492.6
Applied rewrites92.6%
Final simplification85.9%
(FPCore (B x) :precision binary64 (/ (- 1.0 x) (sin B)))
double code(double B, double x) {
return (1.0 - x) / sin(B);
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 - x) / sin(b)
end function
public static double code(double B, double x) {
return (1.0 - x) / Math.sin(B);
}
def code(B, x): return (1.0 - x) / math.sin(B)
function code(B, x) return Float64(Float64(1.0 - x) / sin(B)) end
function tmp = code(B, x) tmp = (1.0 - x) / sin(B); end
code[B_, x_] := N[(N[(1.0 - x), $MachinePrecision] / N[Sin[B], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - x}{\sin B}
\end{array}
Initial program 99.7%
lift-*.f64N/A
lift-/.f64N/A
un-div-invN/A
lower-/.f6499.8
Applied rewrites99.8%
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lift-/.f64N/A
lift-/.f64N/A
clear-numN/A
lift-/.f64N/A
*-lft-identityN/A
lift-*.f64N/A
frac-subN/A
*-lft-identityN/A
metadata-evalN/A
div-invN/A
/-rgt-identityN/A
lift--.f64N/A
*-commutativeN/A
associate-/r*N/A
Applied rewrites99.7%
Taylor expanded in B around 0
lower--.f6473.3
Applied rewrites73.3%
(FPCore (B x) :precision binary64 (/ (- (fma (fma 0.3333333333333333 x 0.16666666666666666) (* B B) 1.0) x) B))
double code(double B, double x) {
return (fma(fma(0.3333333333333333, x, 0.16666666666666666), (B * B), 1.0) - x) / B;
}
function code(B, x) return Float64(Float64(fma(fma(0.3333333333333333, x, 0.16666666666666666), Float64(B * B), 1.0) - x) / B) end
code[B_, x_] := N[(N[(N[(N[(0.3333333333333333 * x + 0.16666666666666666), $MachinePrecision] * N[(B * B), $MachinePrecision] + 1.0), $MachinePrecision] - x), $MachinePrecision] / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, x, 0.16666666666666666\right), B \cdot B, 1\right) - x}{B}
\end{array}
Initial program 99.7%
Taylor expanded in B around 0
lower-/.f64N/A
lower--.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6448.3
Applied rewrites48.3%
Final simplification48.3%
(FPCore (B x) :precision binary64 (/ (fma x (fma (* 0.3333333333333333 B) B -1.0) 1.0) B))
double code(double B, double x) {
return fma(x, fma((0.3333333333333333 * B), B, -1.0), 1.0) / B;
}
function code(B, x) return Float64(fma(x, fma(Float64(0.3333333333333333 * B), B, -1.0), 1.0) / B) end
code[B_, x_] := N[(N[(x * N[(N[(0.3333333333333333 * B), $MachinePrecision] * B + -1.0), $MachinePrecision] + 1.0), $MachinePrecision] / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(x, \mathsf{fma}\left(0.3333333333333333 \cdot B, B, -1\right), 1\right)}{B}
\end{array}
Initial program 99.7%
rem-exp-logN/A
lower-exp.f64N/A
lift-/.f64N/A
log-recN/A
lower-neg.f64N/A
lower-log.f6452.4
Applied rewrites52.4%
Taylor expanded in B around 0
lower-/.f64N/A
Applied rewrites48.0%
(FPCore (B x) :precision binary64 (if (or (<= x -1.0) (not (<= x 1.0))) (/ (- x) B) (/ 1.0 B)))
double code(double B, double x) {
double tmp;
if ((x <= -1.0) || !(x <= 1.0)) {
tmp = -x / B;
} else {
tmp = 1.0 / B;
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: tmp
if ((x <= (-1.0d0)) .or. (.not. (x <= 1.0d0))) then
tmp = -x / b
else
tmp = 1.0d0 / b
end if
code = tmp
end function
public static double code(double B, double x) {
double tmp;
if ((x <= -1.0) || !(x <= 1.0)) {
tmp = -x / B;
} else {
tmp = 1.0 / B;
}
return tmp;
}
def code(B, x): tmp = 0 if (x <= -1.0) or not (x <= 1.0): tmp = -x / B else: tmp = 1.0 / B return tmp
function code(B, x) tmp = 0.0 if ((x <= -1.0) || !(x <= 1.0)) tmp = Float64(Float64(-x) / B); else tmp = Float64(1.0 / B); end return tmp end
function tmp_2 = code(B, x) tmp = 0.0; if ((x <= -1.0) || ~((x <= 1.0))) tmp = -x / B; else tmp = 1.0 / B; end tmp_2 = tmp; end
code[B_, x_] := If[Or[LessEqual[x, -1.0], N[Not[LessEqual[x, 1.0]], $MachinePrecision]], N[((-x) / B), $MachinePrecision], N[(1.0 / B), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1 \lor \neg \left(x \leq 1\right):\\
\;\;\;\;\frac{-x}{B}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{B}\\
\end{array}
\end{array}
if x < -1 or 1 < x Initial program 99.6%
Taylor expanded in B around 0
lower-/.f64N/A
lower--.f6446.5
Applied rewrites46.5%
Taylor expanded in x around inf
Applied rewrites45.3%
if -1 < x < 1Initial program 99.8%
Taylor expanded in B around 0
lower-/.f64N/A
lower--.f6449.7
Applied rewrites49.7%
Taylor expanded in x around 0
Applied rewrites49.5%
Final simplification47.2%
(FPCore (B x) :precision binary64 (/ (- 1.0 x) B))
double code(double B, double x) {
return (1.0 - x) / B;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 - x) / b
end function
public static double code(double B, double x) {
return (1.0 - x) / B;
}
def code(B, x): return (1.0 - x) / B
function code(B, x) return Float64(Float64(1.0 - x) / B) end
function tmp = code(B, x) tmp = (1.0 - x) / B; end
code[B_, x_] := N[(N[(1.0 - x), $MachinePrecision] / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - x}{B}
\end{array}
Initial program 99.7%
Taylor expanded in B around 0
lower-/.f64N/A
lower--.f6448.0
Applied rewrites48.0%
Final simplification48.0%
(FPCore (B x) :precision binary64 (/ 1.0 B))
double code(double B, double x) {
return 1.0 / B;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = 1.0d0 / b
end function
public static double code(double B, double x) {
return 1.0 / B;
}
def code(B, x): return 1.0 / B
function code(B, x) return Float64(1.0 / B) end
function tmp = code(B, x) tmp = 1.0 / B; end
code[B_, x_] := N[(1.0 / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{B}
\end{array}
Initial program 99.7%
Taylor expanded in B around 0
lower-/.f64N/A
lower--.f6448.0
Applied rewrites48.0%
Taylor expanded in x around 0
Applied rewrites24.7%
Final simplification24.7%
herbie shell --seed 2024322
(FPCore (B x)
:name "VandenBroeck and Keller, Equation (24)"
:precision binary64
(+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))