
(FPCore (B x) :precision binary64 (+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))
double code(double B, double x) {
return -(x * (1.0 / tan(B))) + (1.0 / sin(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = -(x * (1.0d0 / tan(b))) + (1.0d0 / sin(b))
end function
public static double code(double B, double x) {
return -(x * (1.0 / Math.tan(B))) + (1.0 / Math.sin(B));
}
def code(B, x): return -(x * (1.0 / math.tan(B))) + (1.0 / math.sin(B))
function code(B, x) return Float64(Float64(-Float64(x * Float64(1.0 / tan(B)))) + Float64(1.0 / sin(B))) end
function tmp = code(B, x) tmp = -(x * (1.0 / tan(B))) + (1.0 / sin(B)); end
code[B_, x_] := N[((-N[(x * N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]) + N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-x \cdot \frac{1}{\tan B}\right) + \frac{1}{\sin B}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (B x) :precision binary64 (+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))
double code(double B, double x) {
return -(x * (1.0 / tan(B))) + (1.0 / sin(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = -(x * (1.0d0 / tan(b))) + (1.0d0 / sin(b))
end function
public static double code(double B, double x) {
return -(x * (1.0 / Math.tan(B))) + (1.0 / Math.sin(B));
}
def code(B, x): return -(x * (1.0 / math.tan(B))) + (1.0 / math.sin(B))
function code(B, x) return Float64(Float64(-Float64(x * Float64(1.0 / tan(B)))) + Float64(1.0 / sin(B))) end
function tmp = code(B, x) tmp = -(x * (1.0 / tan(B))) + (1.0 / sin(B)); end
code[B_, x_] := N[((-N[(x * N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]) + N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-x \cdot \frac{1}{\tan B}\right) + \frac{1}{\sin B}
\end{array}
(FPCore (B x) :precision binary64 (+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))
double code(double B, double x) {
return -(x * (1.0 / tan(B))) + (1.0 / sin(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = -(x * (1.0d0 / tan(b))) + (1.0d0 / sin(b))
end function
public static double code(double B, double x) {
return -(x * (1.0 / Math.tan(B))) + (1.0 / Math.sin(B));
}
def code(B, x): return -(x * (1.0 / math.tan(B))) + (1.0 / math.sin(B))
function code(B, x) return Float64(Float64(-Float64(x * Float64(1.0 / tan(B)))) + Float64(1.0 / sin(B))) end
function tmp = code(B, x) tmp = -(x * (1.0 / tan(B))) + (1.0 / sin(B)); end
code[B_, x_] := N[((-N[(x * N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]) + N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-x \cdot \frac{1}{\tan B}\right) + \frac{1}{\sin B}
\end{array}
Initial program 99.7%
(FPCore (B x) :precision binary64 (let* ((t_0 (- (* x (/ 1.0 (tan B)))))) (if (<= x -1.5) t_0 (if (<= x 1.0) (/ 1.0 (sin B)) t_0))))
double code(double B, double x) {
double t_0 = -(x * (1.0 / tan(B)));
double tmp;
if (x <= -1.5) {
tmp = t_0;
} else if (x <= 1.0) {
tmp = 1.0 / sin(B);
} else {
tmp = t_0;
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = -(x * (1.0d0 / tan(b)))
if (x <= (-1.5d0)) then
tmp = t_0
else if (x <= 1.0d0) then
tmp = 1.0d0 / sin(b)
else
tmp = t_0
end if
code = tmp
end function
public static double code(double B, double x) {
double t_0 = -(x * (1.0 / Math.tan(B)));
double tmp;
if (x <= -1.5) {
tmp = t_0;
} else if (x <= 1.0) {
tmp = 1.0 / Math.sin(B);
} else {
tmp = t_0;
}
return tmp;
}
def code(B, x): t_0 = -(x * (1.0 / math.tan(B))) tmp = 0 if x <= -1.5: tmp = t_0 elif x <= 1.0: tmp = 1.0 / math.sin(B) else: tmp = t_0 return tmp
function code(B, x) t_0 = Float64(-Float64(x * Float64(1.0 / tan(B)))) tmp = 0.0 if (x <= -1.5) tmp = t_0; elseif (x <= 1.0) tmp = Float64(1.0 / sin(B)); else tmp = t_0; end return tmp end
function tmp_2 = code(B, x) t_0 = -(x * (1.0 / tan(B))); tmp = 0.0; if (x <= -1.5) tmp = t_0; elseif (x <= 1.0) tmp = 1.0 / sin(B); else tmp = t_0; end tmp_2 = tmp; end
code[B_, x_] := Block[{t$95$0 = (-N[(x * N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision])}, If[LessEqual[x, -1.5], t$95$0, If[LessEqual[x, 1.0], N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision], t$95$0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := -x \cdot \frac{1}{\tan B}\\
\mathbf{if}\;x \leq -1.5:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;x \leq 1:\\
\;\;\;\;\frac{1}{\sin B}\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if x < -1.5 or 1 < x Initial program 99.6%
Taylor expanded in B around 0
Applied rewrites98.1%
if -1.5 < x < 1Initial program 99.9%
Taylor expanded in B around 0
Applied rewrites3.7%
Taylor expanded in B around 0
Applied rewrites96.5%
(FPCore (B x) :precision binary64 (/ 1.0 (sin B)))
double code(double B, double x) {
return 1.0 / sin(B);
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = 1.0d0 / sin(b)
end function
public static double code(double B, double x) {
return 1.0 / Math.sin(B);
}
def code(B, x): return 1.0 / math.sin(B)
function code(B, x) return Float64(1.0 / sin(B)) end
function tmp = code(B, x) tmp = 1.0 / sin(B); end
code[B_, x_] := N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\sin B}
\end{array}
Initial program 99.7%
Taylor expanded in B around 0
Applied rewrites2.2%
Taylor expanded in B around 0
Applied rewrites53.3%
(FPCore (B x) :precision binary64 (sin B))
double code(double B, double x) {
return sin(B);
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = sin(b)
end function
public static double code(double B, double x) {
return Math.sin(B);
}
def code(B, x): return math.sin(B)
function code(B, x) return sin(B) end
function tmp = code(B, x) tmp = sin(B); end
code[B_, x_] := N[Sin[B], $MachinePrecision]
\begin{array}{l}
\\
\sin B
\end{array}
Initial program 99.7%
Taylor expanded in B around 0
Applied rewrites2.2%
Taylor expanded in B around 0
Applied rewrites8.0%
(FPCore (B x) :precision binary64 (tan B))
double code(double B, double x) {
return tan(B);
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = tan(b)
end function
public static double code(double B, double x) {
return Math.tan(B);
}
def code(B, x): return math.tan(B)
function code(B, x) return tan(B) end
function tmp = code(B, x) tmp = tan(B); end
code[B_, x_] := N[Tan[B], $MachinePrecision]
\begin{array}{l}
\\
\tan B
\end{array}
Initial program 99.7%
Taylor expanded in B around 0
Applied rewrites47.9%
Taylor expanded in B around 0
Applied rewrites4.8%
herbie shell --seed 2024321
(FPCore (B x)
:name "VandenBroeck and Keller, Equation (24)"
:precision binary64
:pre (TRUE)
(+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))