
(FPCore (B x) :precision binary64 (+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))
double code(double B, double x) {
return -(x * (1.0 / tan(B))) + (1.0 / sin(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = -(x * (1.0d0 / tan(b))) + (1.0d0 / sin(b))
end function
public static double code(double B, double x) {
return -(x * (1.0 / Math.tan(B))) + (1.0 / Math.sin(B));
}
def code(B, x): return -(x * (1.0 / math.tan(B))) + (1.0 / math.sin(B))
function code(B, x) return Float64(Float64(-Float64(x * Float64(1.0 / tan(B)))) + Float64(1.0 / sin(B))) end
function tmp = code(B, x) tmp = -(x * (1.0 / tan(B))) + (1.0 / sin(B)); end
code[B_, x_] := N[((-N[(x * N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]) + N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-x \cdot \frac{1}{\tan B}\right) + \frac{1}{\sin B}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (B x) :precision binary64 (+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))
double code(double B, double x) {
return -(x * (1.0 / tan(B))) + (1.0 / sin(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = -(x * (1.0d0 / tan(b))) + (1.0d0 / sin(b))
end function
public static double code(double B, double x) {
return -(x * (1.0 / Math.tan(B))) + (1.0 / Math.sin(B));
}
def code(B, x): return -(x * (1.0 / math.tan(B))) + (1.0 / math.sin(B))
function code(B, x) return Float64(Float64(-Float64(x * Float64(1.0 / tan(B)))) + Float64(1.0 / sin(B))) end
function tmp = code(B, x) tmp = -(x * (1.0 / tan(B))) + (1.0 / sin(B)); end
code[B_, x_] := N[((-N[(x * N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]) + N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-x \cdot \frac{1}{\tan B}\right) + \frac{1}{\sin B}
\end{array}
(FPCore (B x) :precision binary64 (- (/ 1.0 (sin B)) (/ x (tan B))))
double code(double B, double x) {
return (1.0 / sin(B)) - (x / tan(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 / sin(b)) - (x / tan(b))
end function
public static double code(double B, double x) {
return (1.0 / Math.sin(B)) - (x / Math.tan(B));
}
def code(B, x): return (1.0 / math.sin(B)) - (x / math.tan(B))
function code(B, x) return Float64(Float64(1.0 / sin(B)) - Float64(x / tan(B))) end
function tmp = code(B, x) tmp = (1.0 / sin(B)) - (x / tan(B)); end
code[B_, x_] := N[(N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision] - N[(x / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\sin B} - \frac{x}{\tan B}
\end{array}
Initial program 99.7%
Applied rewrites99.8%
(FPCore (B x)
:precision binary64
(let* ((t_0 (/ 1.0 (sin B)))
(t_1 (+ t_0 (* x (/ -1.0 (tan B)))))
(t_2 (- (/ 1.0 B) (/ x (tan B)))))
(if (<= t_1 -5000000000000.0) t_2 (if (<= t_1 100.0) t_0 t_2))))
double code(double B, double x) {
double t_0 = 1.0 / sin(B);
double t_1 = t_0 + (x * (-1.0 / tan(B)));
double t_2 = (1.0 / B) - (x / tan(B));
double tmp;
if (t_1 <= -5000000000000.0) {
tmp = t_2;
} else if (t_1 <= 100.0) {
tmp = t_0;
} else {
tmp = t_2;
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
real(8) :: t_2
real(8) :: tmp
t_0 = 1.0d0 / sin(b)
t_1 = t_0 + (x * ((-1.0d0) / tan(b)))
t_2 = (1.0d0 / b) - (x / tan(b))
if (t_1 <= (-5000000000000.0d0)) then
tmp = t_2
else if (t_1 <= 100.0d0) then
tmp = t_0
else
tmp = t_2
end if
code = tmp
end function
public static double code(double B, double x) {
double t_0 = 1.0 / Math.sin(B);
double t_1 = t_0 + (x * (-1.0 / Math.tan(B)));
double t_2 = (1.0 / B) - (x / Math.tan(B));
double tmp;
if (t_1 <= -5000000000000.0) {
tmp = t_2;
} else if (t_1 <= 100.0) {
tmp = t_0;
} else {
tmp = t_2;
}
return tmp;
}
def code(B, x): t_0 = 1.0 / math.sin(B) t_1 = t_0 + (x * (-1.0 / math.tan(B))) t_2 = (1.0 / B) - (x / math.tan(B)) tmp = 0 if t_1 <= -5000000000000.0: tmp = t_2 elif t_1 <= 100.0: tmp = t_0 else: tmp = t_2 return tmp
function code(B, x) t_0 = Float64(1.0 / sin(B)) t_1 = Float64(t_0 + Float64(x * Float64(-1.0 / tan(B)))) t_2 = Float64(Float64(1.0 / B) - Float64(x / tan(B))) tmp = 0.0 if (t_1 <= -5000000000000.0) tmp = t_2; elseif (t_1 <= 100.0) tmp = t_0; else tmp = t_2; end return tmp end
function tmp_2 = code(B, x) t_0 = 1.0 / sin(B); t_1 = t_0 + (x * (-1.0 / tan(B))); t_2 = (1.0 / B) - (x / tan(B)); tmp = 0.0; if (t_1 <= -5000000000000.0) tmp = t_2; elseif (t_1 <= 100.0) tmp = t_0; else tmp = t_2; end tmp_2 = tmp; end
code[B_, x_] := Block[{t$95$0 = N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 + N[(x * N[(-1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(N[(1.0 / B), $MachinePrecision] - N[(x / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, -5000000000000.0], t$95$2, If[LessEqual[t$95$1, 100.0], t$95$0, t$95$2]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{1}{\sin B}\\
t_1 := t\_0 + x \cdot \frac{-1}{\tan B}\\
t_2 := \frac{1}{B} - \frac{x}{\tan B}\\
\mathbf{if}\;t\_1 \leq -5000000000000:\\
\;\;\;\;t\_2\\
\mathbf{elif}\;t\_1 \leq 100:\\
\;\;\;\;t\_0\\
\mathbf{else}:\\
\;\;\;\;t\_2\\
\end{array}
\end{array}
if (+.f64 (neg.f64 (*.f64 x (/.f64 #s(literal 1 binary64) (tan.f64 B)))) (/.f64 #s(literal 1 binary64) (sin.f64 B))) < -5e12 or 100 < (+.f64 (neg.f64 (*.f64 x (/.f64 #s(literal 1 binary64) (tan.f64 B)))) (/.f64 #s(literal 1 binary64) (sin.f64 B))) Initial program 99.7%
Applied rewrites99.8%
Taylor expanded in B around 0
lower-/.f6499.7
Applied rewrites99.7%
if -5e12 < (+.f64 (neg.f64 (*.f64 x (/.f64 #s(literal 1 binary64) (tan.f64 B)))) (/.f64 #s(literal 1 binary64) (sin.f64 B))) < 100Initial program 99.6%
Taylor expanded in x around 0
lower-/.f64N/A
lower-sin.f6497.9
Applied rewrites97.9%
Final simplification99.2%
(FPCore (B x) :precision binary64 (/ (fma x (cos B) -1.0) (- (sin B))))
double code(double B, double x) {
return fma(x, cos(B), -1.0) / -sin(B);
}
function code(B, x) return Float64(fma(x, cos(B), -1.0) / Float64(-sin(B))) end
code[B_, x_] := N[(N[(x * N[Cos[B], $MachinePrecision] + -1.0), $MachinePrecision] / (-N[Sin[B], $MachinePrecision])), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(x, \cos B, -1\right)}{-\sin B}
\end{array}
Initial program 99.7%
Applied rewrites99.8%
Taylor expanded in B around 0
lower-/.f6476.1
Applied rewrites76.1%
Taylor expanded in B around 0
lower-/.f6450.1
Applied rewrites50.1%
Taylor expanded in B around inf
sub-negN/A
+-commutativeN/A
neg-sub0N/A
associate-+l-N/A
div-subN/A
neg-sub0N/A
distribute-neg-frac2N/A
mul-1-negN/A
lower-/.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
lower-cos.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lower-sin.f6499.7
Applied rewrites99.7%
(FPCore (B x)
:precision binary64
(if (<= B 1.0)
(/
(fma
(* B B)
(fma
x
0.3333333333333333
(fma
(* B B)
(fma
B
(* B (fma x 0.0021164021164021165 0.00205026455026455))
(fma x 0.022222222222222223 0.019444444444444445))
0.16666666666666666))
(- 1.0 x))
B)
(/ 1.0 (sin B))))
double code(double B, double x) {
double tmp;
if (B <= 1.0) {
tmp = fma((B * B), fma(x, 0.3333333333333333, fma((B * B), fma(B, (B * fma(x, 0.0021164021164021165, 0.00205026455026455)), fma(x, 0.022222222222222223, 0.019444444444444445)), 0.16666666666666666)), (1.0 - x)) / B;
} else {
tmp = 1.0 / sin(B);
}
return tmp;
}
function code(B, x) tmp = 0.0 if (B <= 1.0) tmp = Float64(fma(Float64(B * B), fma(x, 0.3333333333333333, fma(Float64(B * B), fma(B, Float64(B * fma(x, 0.0021164021164021165, 0.00205026455026455)), fma(x, 0.022222222222222223, 0.019444444444444445)), 0.16666666666666666)), Float64(1.0 - x)) / B); else tmp = Float64(1.0 / sin(B)); end return tmp end
code[B_, x_] := If[LessEqual[B, 1.0], N[(N[(N[(B * B), $MachinePrecision] * N[(x * 0.3333333333333333 + N[(N[(B * B), $MachinePrecision] * N[(B * N[(B * N[(x * 0.0021164021164021165 + 0.00205026455026455), $MachinePrecision]), $MachinePrecision] + N[(x * 0.022222222222222223 + 0.019444444444444445), $MachinePrecision]), $MachinePrecision] + 0.16666666666666666), $MachinePrecision]), $MachinePrecision] + N[(1.0 - x), $MachinePrecision]), $MachinePrecision] / B), $MachinePrecision], N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;B \leq 1:\\
\;\;\;\;\frac{\mathsf{fma}\left(B \cdot B, \mathsf{fma}\left(x, 0.3333333333333333, \mathsf{fma}\left(B \cdot B, \mathsf{fma}\left(B, B \cdot \mathsf{fma}\left(x, 0.0021164021164021165, 0.00205026455026455\right), \mathsf{fma}\left(x, 0.022222222222222223, 0.019444444444444445\right)\right), 0.16666666666666666\right)\right), 1 - x\right)}{B}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\sin B}\\
\end{array}
\end{array}
if B < 1Initial program 99.8%
Taylor expanded in B around 0
Applied rewrites68.6%
if 1 < B Initial program 99.5%
Taylor expanded in x around 0
lower-/.f64N/A
lower-sin.f6453.7
Applied rewrites53.7%
(FPCore (B x) :precision binary64 (/ (- (fma (* B B) (fma x 0.3333333333333333 0.16666666666666666) 1.0) x) B))
double code(double B, double x) {
return (fma((B * B), fma(x, 0.3333333333333333, 0.16666666666666666), 1.0) - x) / B;
}
function code(B, x) return Float64(Float64(fma(Float64(B * B), fma(x, 0.3333333333333333, 0.16666666666666666), 1.0) - x) / B) end
code[B_, x_] := N[(N[(N[(N[(B * B), $MachinePrecision] * N[(x * 0.3333333333333333 + 0.16666666666666666), $MachinePrecision] + 1.0), $MachinePrecision] - x), $MachinePrecision] / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(B \cdot B, \mathsf{fma}\left(x, 0.3333333333333333, 0.16666666666666666\right), 1\right) - x}{B}
\end{array}
Initial program 99.7%
Taylor expanded in B around 0
lower-/.f64N/A
lower--.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f6450.2
Applied rewrites50.2%
(FPCore (B x) :precision binary64 (/ (- 1.0 x) B))
double code(double B, double x) {
return (1.0 - x) / B;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 - x) / b
end function
public static double code(double B, double x) {
return (1.0 - x) / B;
}
def code(B, x): return (1.0 - x) / B
function code(B, x) return Float64(Float64(1.0 - x) / B) end
function tmp = code(B, x) tmp = (1.0 - x) / B; end
code[B_, x_] := N[(N[(1.0 - x), $MachinePrecision] / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - x}{B}
\end{array}
Initial program 99.7%
Taylor expanded in B around 0
lower-/.f64N/A
lower--.f6450.1
Applied rewrites50.1%
herbie shell --seed 2024212
(FPCore (B x)
:name "VandenBroeck and Keller, Equation (24)"
:precision binary64
(+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))