
(FPCore (B x) :precision binary64 (+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))
double code(double B, double x) {
return -(x * (1.0 / tan(B))) + (1.0 / sin(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = -(x * (1.0d0 / tan(b))) + (1.0d0 / sin(b))
end function
public static double code(double B, double x) {
return -(x * (1.0 / Math.tan(B))) + (1.0 / Math.sin(B));
}
def code(B, x): return -(x * (1.0 / math.tan(B))) + (1.0 / math.sin(B))
function code(B, x) return Float64(Float64(-Float64(x * Float64(1.0 / tan(B)))) + Float64(1.0 / sin(B))) end
function tmp = code(B, x) tmp = -(x * (1.0 / tan(B))) + (1.0 / sin(B)); end
code[B_, x_] := N[((-N[(x * N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]) + N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-x \cdot \frac{1}{\tan B}\right) + \frac{1}{\sin B}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (B x) :precision binary64 (+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))
double code(double B, double x) {
return -(x * (1.0 / tan(B))) + (1.0 / sin(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = -(x * (1.0d0 / tan(b))) + (1.0d0 / sin(b))
end function
public static double code(double B, double x) {
return -(x * (1.0 / Math.tan(B))) + (1.0 / Math.sin(B));
}
def code(B, x): return -(x * (1.0 / math.tan(B))) + (1.0 / math.sin(B))
function code(B, x) return Float64(Float64(-Float64(x * Float64(1.0 / tan(B)))) + Float64(1.0 / sin(B))) end
function tmp = code(B, x) tmp = -(x * (1.0 / tan(B))) + (1.0 / sin(B)); end
code[B_, x_] := N[((-N[(x * N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]) + N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-x \cdot \frac{1}{\tan B}\right) + \frac{1}{\sin B}
\end{array}
(FPCore (B x) :precision binary64 (- (/ 1.0 (sin B)) (/ x (tan B))))
double code(double B, double x) {
return (1.0 / sin(B)) - (x / tan(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 / sin(b)) - (x / tan(b))
end function
public static double code(double B, double x) {
return (1.0 / Math.sin(B)) - (x / Math.tan(B));
}
def code(B, x): return (1.0 / math.sin(B)) - (x / math.tan(B))
function code(B, x) return Float64(Float64(1.0 / sin(B)) - Float64(x / tan(B))) end
function tmp = code(B, x) tmp = (1.0 / sin(B)) - (x / tan(B)); end
code[B_, x_] := N[(N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision] - N[(x / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\sin B} - \frac{x}{\tan B}
\end{array}
Initial program 99.7%
Applied rewrites99.8%
(FPCore (B x) :precision binary64 (/ (- 1.0 (* x (cos B))) (sin B)))
double code(double B, double x) {
return (1.0 - (x * cos(B))) / sin(B);
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 - (x * cos(b))) / sin(b)
end function
public static double code(double B, double x) {
return (1.0 - (x * Math.cos(B))) / Math.sin(B);
}
def code(B, x): return (1.0 - (x * math.cos(B))) / math.sin(B)
function code(B, x) return Float64(Float64(1.0 - Float64(x * cos(B))) / sin(B)) end
function tmp = code(B, x) tmp = (1.0 - (x * cos(B))) / sin(B); end
code[B_, x_] := N[(N[(1.0 - N[(x * N[Cos[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Sin[B], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - x \cdot \cos B}{\sin B}
\end{array}
Initial program 99.7%
Applied rewrites99.8%
lift-sin.f64N/A
lift-tan.f64N/A
clear-numN/A
lift-tan.f64N/A
tan-quotN/A
lift-sin.f64N/A
lift-cos.f64N/A
associate-/l/N/A
clear-numN/A
sub-divN/A
lower-/.f64N/A
lower--.f64N/A
lower-*.f6499.7
Applied rewrites99.7%
(FPCore (B x)
:precision binary64
(let* ((t_0 (/ x (tan B))))
(if (<= x -520.0)
(- (/ 1.0 B) t_0)
(if (<= x 0.25)
(/ (- 1.0 x) (sin B))
(- (/ 1.0 (fma B (* (* B B) -0.16666666666666666) B)) t_0)))))
double code(double B, double x) {
double t_0 = x / tan(B);
double tmp;
if (x <= -520.0) {
tmp = (1.0 / B) - t_0;
} else if (x <= 0.25) {
tmp = (1.0 - x) / sin(B);
} else {
tmp = (1.0 / fma(B, ((B * B) * -0.16666666666666666), B)) - t_0;
}
return tmp;
}
function code(B, x) t_0 = Float64(x / tan(B)) tmp = 0.0 if (x <= -520.0) tmp = Float64(Float64(1.0 / B) - t_0); elseif (x <= 0.25) tmp = Float64(Float64(1.0 - x) / sin(B)); else tmp = Float64(Float64(1.0 / fma(B, Float64(Float64(B * B) * -0.16666666666666666), B)) - t_0); end return tmp end
code[B_, x_] := Block[{t$95$0 = N[(x / N[Tan[B], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, -520.0], N[(N[(1.0 / B), $MachinePrecision] - t$95$0), $MachinePrecision], If[LessEqual[x, 0.25], N[(N[(1.0 - x), $MachinePrecision] / N[Sin[B], $MachinePrecision]), $MachinePrecision], N[(N[(1.0 / N[(B * N[(N[(B * B), $MachinePrecision] * -0.16666666666666666), $MachinePrecision] + B), $MachinePrecision]), $MachinePrecision] - t$95$0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{x}{\tan B}\\
\mathbf{if}\;x \leq -520:\\
\;\;\;\;\frac{1}{B} - t\_0\\
\mathbf{elif}\;x \leq 0.25:\\
\;\;\;\;\frac{1 - x}{\sin B}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\mathsf{fma}\left(B, \left(B \cdot B\right) \cdot -0.16666666666666666, B\right)} - t\_0\\
\end{array}
\end{array}
if x < -520Initial program 99.6%
Applied rewrites99.9%
Taylor expanded in B around 0
lower-/.f6498.1
Applied rewrites98.1%
if -520 < x < 0.25Initial program 99.7%
Applied rewrites99.8%
lift-sin.f64N/A
lift-tan.f64N/A
clear-numN/A
lift-tan.f64N/A
tan-quotN/A
lift-sin.f64N/A
lift-cos.f64N/A
associate-/l/N/A
clear-numN/A
sub-divN/A
lower-/.f64N/A
lower--.f64N/A
lower-*.f6499.8
Applied rewrites99.8%
Taylor expanded in B around 0
lower--.f6499.1
Applied rewrites99.1%
if 0.25 < x Initial program 99.6%
Applied rewrites99.7%
Taylor expanded in B around 0
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
lower-*.f6499.5
Applied rewrites99.5%
(FPCore (B x) :precision binary64 (let* ((t_0 (- (/ 1.0 B) (/ x (tan B))))) (if (<= x -520.0) t_0 (if (<= x 2.0) (- (/ 1.0 (sin B)) (/ x B)) t_0))))
double code(double B, double x) {
double t_0 = (1.0 / B) - (x / tan(B));
double tmp;
if (x <= -520.0) {
tmp = t_0;
} else if (x <= 2.0) {
tmp = (1.0 / sin(B)) - (x / B);
} else {
tmp = t_0;
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = (1.0d0 / b) - (x / tan(b))
if (x <= (-520.0d0)) then
tmp = t_0
else if (x <= 2.0d0) then
tmp = (1.0d0 / sin(b)) - (x / b)
else
tmp = t_0
end if
code = tmp
end function
public static double code(double B, double x) {
double t_0 = (1.0 / B) - (x / Math.tan(B));
double tmp;
if (x <= -520.0) {
tmp = t_0;
} else if (x <= 2.0) {
tmp = (1.0 / Math.sin(B)) - (x / B);
} else {
tmp = t_0;
}
return tmp;
}
def code(B, x): t_0 = (1.0 / B) - (x / math.tan(B)) tmp = 0 if x <= -520.0: tmp = t_0 elif x <= 2.0: tmp = (1.0 / math.sin(B)) - (x / B) else: tmp = t_0 return tmp
function code(B, x) t_0 = Float64(Float64(1.0 / B) - Float64(x / tan(B))) tmp = 0.0 if (x <= -520.0) tmp = t_0; elseif (x <= 2.0) tmp = Float64(Float64(1.0 / sin(B)) - Float64(x / B)); else tmp = t_0; end return tmp end
function tmp_2 = code(B, x) t_0 = (1.0 / B) - (x / tan(B)); tmp = 0.0; if (x <= -520.0) tmp = t_0; elseif (x <= 2.0) tmp = (1.0 / sin(B)) - (x / B); else tmp = t_0; end tmp_2 = tmp; end
code[B_, x_] := Block[{t$95$0 = N[(N[(1.0 / B), $MachinePrecision] - N[(x / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, -520.0], t$95$0, If[LessEqual[x, 2.0], N[(N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision] - N[(x / B), $MachinePrecision]), $MachinePrecision], t$95$0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{1}{B} - \frac{x}{\tan B}\\
\mathbf{if}\;x \leq -520:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;x \leq 2:\\
\;\;\;\;\frac{1}{\sin B} - \frac{x}{B}\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if x < -520 or 2 < x Initial program 99.6%
Applied rewrites99.8%
Taylor expanded in B around 0
lower-/.f6498.8
Applied rewrites98.8%
if -520 < x < 2Initial program 99.7%
Taylor expanded in B around 0
mul-1-negN/A
distribute-neg-frac2N/A
lower-/.f64N/A
lower-neg.f6498.9
Applied rewrites98.9%
Final simplification98.9%
(FPCore (B x) :precision binary64 (let* ((t_0 (- (/ 1.0 B) (/ x (tan B))))) (if (<= x -520.0) t_0 (if (<= x 1.02) (/ (- 1.0 x) (sin B)) t_0))))
double code(double B, double x) {
double t_0 = (1.0 / B) - (x / tan(B));
double tmp;
if (x <= -520.0) {
tmp = t_0;
} else if (x <= 1.02) {
tmp = (1.0 - x) / sin(B);
} else {
tmp = t_0;
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = (1.0d0 / b) - (x / tan(b))
if (x <= (-520.0d0)) then
tmp = t_0
else if (x <= 1.02d0) then
tmp = (1.0d0 - x) / sin(b)
else
tmp = t_0
end if
code = tmp
end function
public static double code(double B, double x) {
double t_0 = (1.0 / B) - (x / Math.tan(B));
double tmp;
if (x <= -520.0) {
tmp = t_0;
} else if (x <= 1.02) {
tmp = (1.0 - x) / Math.sin(B);
} else {
tmp = t_0;
}
return tmp;
}
def code(B, x): t_0 = (1.0 / B) - (x / math.tan(B)) tmp = 0 if x <= -520.0: tmp = t_0 elif x <= 1.02: tmp = (1.0 - x) / math.sin(B) else: tmp = t_0 return tmp
function code(B, x) t_0 = Float64(Float64(1.0 / B) - Float64(x / tan(B))) tmp = 0.0 if (x <= -520.0) tmp = t_0; elseif (x <= 1.02) tmp = Float64(Float64(1.0 - x) / sin(B)); else tmp = t_0; end return tmp end
function tmp_2 = code(B, x) t_0 = (1.0 / B) - (x / tan(B)); tmp = 0.0; if (x <= -520.0) tmp = t_0; elseif (x <= 1.02) tmp = (1.0 - x) / sin(B); else tmp = t_0; end tmp_2 = tmp; end
code[B_, x_] := Block[{t$95$0 = N[(N[(1.0 / B), $MachinePrecision] - N[(x / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, -520.0], t$95$0, If[LessEqual[x, 1.02], N[(N[(1.0 - x), $MachinePrecision] / N[Sin[B], $MachinePrecision]), $MachinePrecision], t$95$0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{1}{B} - \frac{x}{\tan B}\\
\mathbf{if}\;x \leq -520:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;x \leq 1.02:\\
\;\;\;\;\frac{1 - x}{\sin B}\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if x < -520 or 1.02 < x Initial program 99.6%
Applied rewrites99.8%
Taylor expanded in B around 0
lower-/.f6498.8
Applied rewrites98.8%
if -520 < x < 1.02Initial program 99.7%
Applied rewrites99.8%
lift-sin.f64N/A
lift-tan.f64N/A
clear-numN/A
lift-tan.f64N/A
tan-quotN/A
lift-sin.f64N/A
lift-cos.f64N/A
associate-/l/N/A
clear-numN/A
sub-divN/A
lower-/.f64N/A
lower--.f64N/A
lower-*.f6499.8
Applied rewrites99.8%
Taylor expanded in B around 0
lower--.f6498.9
Applied rewrites98.9%
(FPCore (B x)
:precision binary64
(if (<= B 0.28)
(/
(+
(-
(*
(* B B)
(fma
B
(*
B
(fma
x
0.022222222222222223
(fma
(* B B)
(fma x 0.0021164021164021165 0.00205026455026455)
0.019444444444444445)))
(fma x 0.3333333333333333 0.16666666666666666)))
x)
1.0)
B)
(/ 1.0 (sin B))))
double code(double B, double x) {
double tmp;
if (B <= 0.28) {
tmp = ((((B * B) * fma(B, (B * fma(x, 0.022222222222222223, fma((B * B), fma(x, 0.0021164021164021165, 0.00205026455026455), 0.019444444444444445))), fma(x, 0.3333333333333333, 0.16666666666666666))) - x) + 1.0) / B;
} else {
tmp = 1.0 / sin(B);
}
return tmp;
}
function code(B, x) tmp = 0.0 if (B <= 0.28) tmp = Float64(Float64(Float64(Float64(Float64(B * B) * fma(B, Float64(B * fma(x, 0.022222222222222223, fma(Float64(B * B), fma(x, 0.0021164021164021165, 0.00205026455026455), 0.019444444444444445))), fma(x, 0.3333333333333333, 0.16666666666666666))) - x) + 1.0) / B); else tmp = Float64(1.0 / sin(B)); end return tmp end
code[B_, x_] := If[LessEqual[B, 0.28], N[(N[(N[(N[(N[(B * B), $MachinePrecision] * N[(B * N[(B * N[(x * 0.022222222222222223 + N[(N[(B * B), $MachinePrecision] * N[(x * 0.0021164021164021165 + 0.00205026455026455), $MachinePrecision] + 0.019444444444444445), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(x * 0.3333333333333333 + 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] + 1.0), $MachinePrecision] / B), $MachinePrecision], N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;B \leq 0.28:\\
\;\;\;\;\frac{\left(\left(B \cdot B\right) \cdot \mathsf{fma}\left(B, B \cdot \mathsf{fma}\left(x, 0.022222222222222223, \mathsf{fma}\left(B \cdot B, \mathsf{fma}\left(x, 0.0021164021164021165, 0.00205026455026455\right), 0.019444444444444445\right)\right), \mathsf{fma}\left(x, 0.3333333333333333, 0.16666666666666666\right)\right) - x\right) + 1}{B}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\sin B}\\
\end{array}
\end{array}
if B < 0.28000000000000003Initial program 99.7%
Applied rewrites99.8%
Taylor expanded in B around 0
Applied rewrites68.2%
lift-*.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
lift-fma.f64N/A
lift-fma.f64N/A
lift-fma.f64N/A
lift-fma.f64N/A
lift--.f64N/A
+-commutativeN/A
lift--.f64N/A
Applied rewrites68.3%
if 0.28000000000000003 < B Initial program 99.6%
Taylor expanded in x around 0
lower-/.f64N/A
lower-sin.f6456.1
Applied rewrites56.1%
Final simplification65.3%
(FPCore (B x) :precision binary64 (/ (- 1.0 x) (sin B)))
double code(double B, double x) {
return (1.0 - x) / sin(B);
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 - x) / sin(b)
end function
public static double code(double B, double x) {
return (1.0 - x) / Math.sin(B);
}
def code(B, x): return (1.0 - x) / math.sin(B)
function code(B, x) return Float64(Float64(1.0 - x) / sin(B)) end
function tmp = code(B, x) tmp = (1.0 - x) / sin(B); end
code[B_, x_] := N[(N[(1.0 - x), $MachinePrecision] / N[Sin[B], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - x}{\sin B}
\end{array}
Initial program 99.7%
Applied rewrites99.8%
lift-sin.f64N/A
lift-tan.f64N/A
clear-numN/A
lift-tan.f64N/A
tan-quotN/A
lift-sin.f64N/A
lift-cos.f64N/A
associate-/l/N/A
clear-numN/A
sub-divN/A
lower-/.f64N/A
lower--.f64N/A
lower-*.f6499.7
Applied rewrites99.7%
Taylor expanded in B around 0
lower--.f6475.8
Applied rewrites75.8%
(FPCore (B x) :precision binary64 (/ (- (fma (* B B) (fma x 0.3333333333333333 0.16666666666666666) 1.0) x) B))
double code(double B, double x) {
return (fma((B * B), fma(x, 0.3333333333333333, 0.16666666666666666), 1.0) - x) / B;
}
function code(B, x) return Float64(Float64(fma(Float64(B * B), fma(x, 0.3333333333333333, 0.16666666666666666), 1.0) - x) / B) end
code[B_, x_] := N[(N[(N[(N[(B * B), $MachinePrecision] * N[(x * 0.3333333333333333 + 0.16666666666666666), $MachinePrecision] + 1.0), $MachinePrecision] - x), $MachinePrecision] / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(B \cdot B, \mathsf{fma}\left(x, 0.3333333333333333, 0.16666666666666666\right), 1\right) - x}{B}
\end{array}
Initial program 99.7%
Taylor expanded in B around 0
lower-/.f64N/A
lower--.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f6452.8
Applied rewrites52.8%
(FPCore (B x) :precision binary64 (let* ((t_0 (- (/ x B)))) (if (<= x -1.0) t_0 (if (<= x 1.0) (/ 1.0 B) t_0))))
double code(double B, double x) {
double t_0 = -(x / B);
double tmp;
if (x <= -1.0) {
tmp = t_0;
} else if (x <= 1.0) {
tmp = 1.0 / B;
} else {
tmp = t_0;
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = -(x / b)
if (x <= (-1.0d0)) then
tmp = t_0
else if (x <= 1.0d0) then
tmp = 1.0d0 / b
else
tmp = t_0
end if
code = tmp
end function
public static double code(double B, double x) {
double t_0 = -(x / B);
double tmp;
if (x <= -1.0) {
tmp = t_0;
} else if (x <= 1.0) {
tmp = 1.0 / B;
} else {
tmp = t_0;
}
return tmp;
}
def code(B, x): t_0 = -(x / B) tmp = 0 if x <= -1.0: tmp = t_0 elif x <= 1.0: tmp = 1.0 / B else: tmp = t_0 return tmp
function code(B, x) t_0 = Float64(-Float64(x / B)) tmp = 0.0 if (x <= -1.0) tmp = t_0; elseif (x <= 1.0) tmp = Float64(1.0 / B); else tmp = t_0; end return tmp end
function tmp_2 = code(B, x) t_0 = -(x / B); tmp = 0.0; if (x <= -1.0) tmp = t_0; elseif (x <= 1.0) tmp = 1.0 / B; else tmp = t_0; end tmp_2 = tmp; end
code[B_, x_] := Block[{t$95$0 = (-N[(x / B), $MachinePrecision])}, If[LessEqual[x, -1.0], t$95$0, If[LessEqual[x, 1.0], N[(1.0 / B), $MachinePrecision], t$95$0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := -\frac{x}{B}\\
\mathbf{if}\;x \leq -1:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;x \leq 1:\\
\;\;\;\;\frac{1}{B}\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if x < -1 or 1 < x Initial program 99.5%
Taylor expanded in B around 0
lower-/.f64N/A
lower--.f6449.3
Applied rewrites49.3%
Taylor expanded in x around inf
mul-1-negN/A
lower-neg.f6446.8
Applied rewrites46.8%
if -1 < x < 1Initial program 99.8%
Taylor expanded in B around 0
lower-/.f64N/A
lower--.f6456.2
Applied rewrites56.2%
Taylor expanded in x around 0
lower-/.f6453.6
Applied rewrites53.6%
Final simplification50.2%
(FPCore (B x) :precision binary64 (/ (- (fma B (* B 0.16666666666666666) 1.0) x) B))
double code(double B, double x) {
return (fma(B, (B * 0.16666666666666666), 1.0) - x) / B;
}
function code(B, x) return Float64(Float64(fma(B, Float64(B * 0.16666666666666666), 1.0) - x) / B) end
code[B_, x_] := N[(N[(N[(B * N[(B * 0.16666666666666666), $MachinePrecision] + 1.0), $MachinePrecision] - x), $MachinePrecision] / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(B, B \cdot 0.16666666666666666, 1\right) - x}{B}
\end{array}
Initial program 99.7%
Taylor expanded in B around 0
mul-1-negN/A
distribute-neg-frac2N/A
lower-/.f64N/A
lower-neg.f6473.6
Applied rewrites73.6%
Taylor expanded in B around 0
lower-/.f64N/A
+-commutativeN/A
associate-+r+N/A
mul-1-negN/A
unsub-negN/A
lower--.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f6452.8
Applied rewrites52.8%
(FPCore (B x) :precision binary64 (/ (- 1.0 x) B))
double code(double B, double x) {
return (1.0 - x) / B;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 - x) / b
end function
public static double code(double B, double x) {
return (1.0 - x) / B;
}
def code(B, x): return (1.0 - x) / B
function code(B, x) return Float64(Float64(1.0 - x) / B) end
function tmp = code(B, x) tmp = (1.0 - x) / B; end
code[B_, x_] := N[(N[(1.0 - x), $MachinePrecision] / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - x}{B}
\end{array}
Initial program 99.7%
Taylor expanded in B around 0
lower-/.f64N/A
lower--.f6452.7
Applied rewrites52.7%
(FPCore (B x) :precision binary64 (/ 1.0 B))
double code(double B, double x) {
return 1.0 / B;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = 1.0d0 / b
end function
public static double code(double B, double x) {
return 1.0 / B;
}
def code(B, x): return 1.0 / B
function code(B, x) return Float64(1.0 / B) end
function tmp = code(B, x) tmp = 1.0 / B; end
code[B_, x_] := N[(1.0 / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{B}
\end{array}
Initial program 99.7%
Taylor expanded in B around 0
lower-/.f64N/A
lower--.f6452.7
Applied rewrites52.7%
Taylor expanded in x around 0
lower-/.f6427.8
Applied rewrites27.8%
herbie shell --seed 2024216
(FPCore (B x)
:name "VandenBroeck and Keller, Equation (24)"
:precision binary64
(+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))