
(FPCore (B x) :precision binary64 (+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))
double code(double B, double x) {
return -(x * (1.0 / tan(B))) + (1.0 / sin(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = -(x * (1.0d0 / tan(b))) + (1.0d0 / sin(b))
end function
public static double code(double B, double x) {
return -(x * (1.0 / Math.tan(B))) + (1.0 / Math.sin(B));
}
def code(B, x): return -(x * (1.0 / math.tan(B))) + (1.0 / math.sin(B))
function code(B, x) return Float64(Float64(-Float64(x * Float64(1.0 / tan(B)))) + Float64(1.0 / sin(B))) end
function tmp = code(B, x) tmp = -(x * (1.0 / tan(B))) + (1.0 / sin(B)); end
code[B_, x_] := N[((-N[(x * N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]) + N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-x \cdot \frac{1}{\tan B}\right) + \frac{1}{\sin B}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (B x) :precision binary64 (+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))
double code(double B, double x) {
return -(x * (1.0 / tan(B))) + (1.0 / sin(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = -(x * (1.0d0 / tan(b))) + (1.0d0 / sin(b))
end function
public static double code(double B, double x) {
return -(x * (1.0 / Math.tan(B))) + (1.0 / Math.sin(B));
}
def code(B, x): return -(x * (1.0 / math.tan(B))) + (1.0 / math.sin(B))
function code(B, x) return Float64(Float64(-Float64(x * Float64(1.0 / tan(B)))) + Float64(1.0 / sin(B))) end
function tmp = code(B, x) tmp = -(x * (1.0 / tan(B))) + (1.0 / sin(B)); end
code[B_, x_] := N[((-N[(x * N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]) + N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-x \cdot \frac{1}{\tan B}\right) + \frac{1}{\sin B}
\end{array}
(FPCore (B x) :precision binary64 (- (/ 1.0 (sin B)) (/ x (tan B))))
double code(double B, double x) {
return (1.0 / sin(B)) - (x / tan(B));
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 / sin(b)) - (x / tan(b))
end function
public static double code(double B, double x) {
return (1.0 / Math.sin(B)) - (x / Math.tan(B));
}
def code(B, x): return (1.0 / math.sin(B)) - (x / math.tan(B))
function code(B, x) return Float64(Float64(1.0 / sin(B)) - Float64(x / tan(B))) end
function tmp = code(B, x) tmp = (1.0 / sin(B)) - (x / tan(B)); end
code[B_, x_] := N[(N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision] - N[(x / N[Tan[B], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\sin B} - \frac{x}{\tan B}
\end{array}
Initial program 99.8%
lift-*.f64N/A
lift-/.f64N/A
un-div-invN/A
lower-/.f6499.8
Applied rewrites99.8%
Final simplification99.8%
(FPCore (B x)
:precision binary64
(if (<= x -2.7)
(- (/ 1.0 B) (* (/ 1.0 (tan B)) x))
(if (<= x 72000000000000.0)
(- (/ 1.0 (sin B)) (/ x B))
(/ (* (- x) (cos B)) (sin B)))))
double code(double B, double x) {
double tmp;
if (x <= -2.7) {
tmp = (1.0 / B) - ((1.0 / tan(B)) * x);
} else if (x <= 72000000000000.0) {
tmp = (1.0 / sin(B)) - (x / B);
} else {
tmp = (-x * cos(B)) / sin(B);
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: tmp
if (x <= (-2.7d0)) then
tmp = (1.0d0 / b) - ((1.0d0 / tan(b)) * x)
else if (x <= 72000000000000.0d0) then
tmp = (1.0d0 / sin(b)) - (x / b)
else
tmp = (-x * cos(b)) / sin(b)
end if
code = tmp
end function
public static double code(double B, double x) {
double tmp;
if (x <= -2.7) {
tmp = (1.0 / B) - ((1.0 / Math.tan(B)) * x);
} else if (x <= 72000000000000.0) {
tmp = (1.0 / Math.sin(B)) - (x / B);
} else {
tmp = (-x * Math.cos(B)) / Math.sin(B);
}
return tmp;
}
def code(B, x): tmp = 0 if x <= -2.7: tmp = (1.0 / B) - ((1.0 / math.tan(B)) * x) elif x <= 72000000000000.0: tmp = (1.0 / math.sin(B)) - (x / B) else: tmp = (-x * math.cos(B)) / math.sin(B) return tmp
function code(B, x) tmp = 0.0 if (x <= -2.7) tmp = Float64(Float64(1.0 / B) - Float64(Float64(1.0 / tan(B)) * x)); elseif (x <= 72000000000000.0) tmp = Float64(Float64(1.0 / sin(B)) - Float64(x / B)); else tmp = Float64(Float64(Float64(-x) * cos(B)) / sin(B)); end return tmp end
function tmp_2 = code(B, x) tmp = 0.0; if (x <= -2.7) tmp = (1.0 / B) - ((1.0 / tan(B)) * x); elseif (x <= 72000000000000.0) tmp = (1.0 / sin(B)) - (x / B); else tmp = (-x * cos(B)) / sin(B); end tmp_2 = tmp; end
code[B_, x_] := If[LessEqual[x, -2.7], N[(N[(1.0 / B), $MachinePrecision] - N[(N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision], If[LessEqual[x, 72000000000000.0], N[(N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision] - N[(x / B), $MachinePrecision]), $MachinePrecision], N[(N[((-x) * N[Cos[B], $MachinePrecision]), $MachinePrecision] / N[Sin[B], $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -2.7:\\
\;\;\;\;\frac{1}{B} - \frac{1}{\tan B} \cdot x\\
\mathbf{elif}\;x \leq 72000000000000:\\
\;\;\;\;\frac{1}{\sin B} - \frac{x}{B}\\
\mathbf{else}:\\
\;\;\;\;\frac{\left(-x\right) \cdot \cos B}{\sin B}\\
\end{array}
\end{array}
if x < -2.7000000000000002Initial program 99.7%
lift-/.f64N/A
inv-powN/A
sqr-powN/A
pow2N/A
lower-pow.f64N/A
lower-pow.f64N/A
metadata-eval51.2
Applied rewrites51.2%
Taylor expanded in B around 0
lower-/.f6497.9
Applied rewrites97.9%
if -2.7000000000000002 < x < 7.2e13Initial program 99.9%
Taylor expanded in B around 0
lower-/.f6498.8
Applied rewrites98.8%
if 7.2e13 < x Initial program 99.6%
lift-*.f64N/A
lift-/.f64N/A
un-div-invN/A
lower-/.f6499.7
Applied rewrites99.7%
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
lift-/.f64N/A
div-invN/A
lift-/.f64N/A
lift-*.f64N/A
unsub-negN/A
lift-/.f64N/A
lift-*.f64N/A
lift-/.f64N/A
div-invN/A
lift-tan.f64N/A
tan-quotN/A
lift-sin.f64N/A
lift-cos.f64N/A
associate-/r/N/A
associate-*l/N/A
sub-divN/A
lower-/.f64N/A
Applied rewrites99.8%
Taylor expanded in x around inf
associate-*r*N/A
mul-1-negN/A
lower-*.f64N/A
lower-neg.f64N/A
lower-cos.f6499.5
Applied rewrites99.5%
Final simplification98.7%
(FPCore (B x) :precision binary64 (/ (- 1.0 (* (cos B) x)) (sin B)))
double code(double B, double x) {
return (1.0 - (cos(B) * x)) / sin(B);
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 - (cos(b) * x)) / sin(b)
end function
public static double code(double B, double x) {
return (1.0 - (Math.cos(B) * x)) / Math.sin(B);
}
def code(B, x): return (1.0 - (math.cos(B) * x)) / math.sin(B)
function code(B, x) return Float64(Float64(1.0 - Float64(cos(B) * x)) / sin(B)) end
function tmp = code(B, x) tmp = (1.0 - (cos(B) * x)) / sin(B); end
code[B_, x_] := N[(N[(1.0 - N[(N[Cos[B], $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] / N[Sin[B], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \cos B \cdot x}{\sin B}
\end{array}
Initial program 99.8%
lift-*.f64N/A
lift-/.f64N/A
un-div-invN/A
lower-/.f6499.8
Applied rewrites99.8%
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
lift-/.f64N/A
div-invN/A
lift-/.f64N/A
lift-*.f64N/A
unsub-negN/A
lift-/.f64N/A
lift-*.f64N/A
lift-/.f64N/A
div-invN/A
lift-tan.f64N/A
tan-quotN/A
lift-sin.f64N/A
lift-cos.f64N/A
associate-/r/N/A
associate-*l/N/A
sub-divN/A
lower-/.f64N/A
Applied rewrites99.8%
(FPCore (B x) :precision binary64 (let* ((t_0 (- (/ 1.0 B) (* (/ 1.0 (tan B)) x)))) (if (<= x -2.7) t_0 (if (<= x 3.5e-7) (- (/ 1.0 (sin B)) (/ x B)) t_0))))
double code(double B, double x) {
double t_0 = (1.0 / B) - ((1.0 / tan(B)) * x);
double tmp;
if (x <= -2.7) {
tmp = t_0;
} else if (x <= 3.5e-7) {
tmp = (1.0 / sin(B)) - (x / B);
} else {
tmp = t_0;
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = (1.0d0 / b) - ((1.0d0 / tan(b)) * x)
if (x <= (-2.7d0)) then
tmp = t_0
else if (x <= 3.5d-7) then
tmp = (1.0d0 / sin(b)) - (x / b)
else
tmp = t_0
end if
code = tmp
end function
public static double code(double B, double x) {
double t_0 = (1.0 / B) - ((1.0 / Math.tan(B)) * x);
double tmp;
if (x <= -2.7) {
tmp = t_0;
} else if (x <= 3.5e-7) {
tmp = (1.0 / Math.sin(B)) - (x / B);
} else {
tmp = t_0;
}
return tmp;
}
def code(B, x): t_0 = (1.0 / B) - ((1.0 / math.tan(B)) * x) tmp = 0 if x <= -2.7: tmp = t_0 elif x <= 3.5e-7: tmp = (1.0 / math.sin(B)) - (x / B) else: tmp = t_0 return tmp
function code(B, x) t_0 = Float64(Float64(1.0 / B) - Float64(Float64(1.0 / tan(B)) * x)) tmp = 0.0 if (x <= -2.7) tmp = t_0; elseif (x <= 3.5e-7) tmp = Float64(Float64(1.0 / sin(B)) - Float64(x / B)); else tmp = t_0; end return tmp end
function tmp_2 = code(B, x) t_0 = (1.0 / B) - ((1.0 / tan(B)) * x); tmp = 0.0; if (x <= -2.7) tmp = t_0; elseif (x <= 3.5e-7) tmp = (1.0 / sin(B)) - (x / B); else tmp = t_0; end tmp_2 = tmp; end
code[B_, x_] := Block[{t$95$0 = N[(N[(1.0 / B), $MachinePrecision] - N[(N[(1.0 / N[Tan[B], $MachinePrecision]), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, -2.7], t$95$0, If[LessEqual[x, 3.5e-7], N[(N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision] - N[(x / B), $MachinePrecision]), $MachinePrecision], t$95$0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{1}{B} - \frac{1}{\tan B} \cdot x\\
\mathbf{if}\;x \leq -2.7:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;x \leq 3.5 \cdot 10^{-7}:\\
\;\;\;\;\frac{1}{\sin B} - \frac{x}{B}\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if x < -2.7000000000000002 or 3.49999999999999984e-7 < x Initial program 99.6%
lift-/.f64N/A
inv-powN/A
sqr-powN/A
pow2N/A
lower-pow.f64N/A
lower-pow.f64N/A
metadata-eval47.4
Applied rewrites47.4%
Taylor expanded in B around 0
lower-/.f6498.5
Applied rewrites98.5%
if -2.7000000000000002 < x < 3.49999999999999984e-7Initial program 99.9%
Taylor expanded in B around 0
lower-/.f6498.8
Applied rewrites98.8%
Final simplification98.7%
(FPCore (B x)
:precision binary64
(if (<= B 0.72)
(/
(fma
(fma
(* (fma 0.022222222222222223 x 0.019444444444444445) B)
B
(fma 0.3333333333333333 x 0.16666666666666666))
(* B B)
(- 1.0 x))
B)
(/ 1.0 (sin B))))
double code(double B, double x) {
double tmp;
if (B <= 0.72) {
tmp = fma(fma((fma(0.022222222222222223, x, 0.019444444444444445) * B), B, fma(0.3333333333333333, x, 0.16666666666666666)), (B * B), (1.0 - x)) / B;
} else {
tmp = 1.0 / sin(B);
}
return tmp;
}
function code(B, x) tmp = 0.0 if (B <= 0.72) tmp = Float64(fma(fma(Float64(fma(0.022222222222222223, x, 0.019444444444444445) * B), B, fma(0.3333333333333333, x, 0.16666666666666666)), Float64(B * B), Float64(1.0 - x)) / B); else tmp = Float64(1.0 / sin(B)); end return tmp end
code[B_, x_] := If[LessEqual[B, 0.72], N[(N[(N[(N[(N[(0.022222222222222223 * x + 0.019444444444444445), $MachinePrecision] * B), $MachinePrecision] * B + N[(0.3333333333333333 * x + 0.16666666666666666), $MachinePrecision]), $MachinePrecision] * N[(B * B), $MachinePrecision] + N[(1.0 - x), $MachinePrecision]), $MachinePrecision] / B), $MachinePrecision], N[(1.0 / N[Sin[B], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;B \leq 0.72:\\
\;\;\;\;\frac{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.022222222222222223, x, 0.019444444444444445\right) \cdot B, B, \mathsf{fma}\left(0.3333333333333333, x, 0.16666666666666666\right)\right), B \cdot B, 1 - x\right)}{B}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\sin B}\\
\end{array}
\end{array}
if B < 0.71999999999999997Initial program 99.8%
lift-*.f64N/A
lift-/.f64N/A
un-div-invN/A
lower-/.f6499.8
Applied rewrites99.8%
Taylor expanded in B around 0
lower-/.f64N/A
Applied rewrites67.7%
if 0.71999999999999997 < B Initial program 99.7%
lift-*.f64N/A
lift-/.f64N/A
un-div-invN/A
lower-/.f6499.8
Applied rewrites99.8%
Applied rewrites99.6%
Taylor expanded in x around 0
lower-sin.f6451.7
Applied rewrites51.7%
(FPCore (B x) :precision binary64 (/ (- 1.0 x) (sin B)))
double code(double B, double x) {
return (1.0 - x) / sin(B);
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 - x) / sin(b)
end function
public static double code(double B, double x) {
return (1.0 - x) / Math.sin(B);
}
def code(B, x): return (1.0 - x) / math.sin(B)
function code(B, x) return Float64(Float64(1.0 - x) / sin(B)) end
function tmp = code(B, x) tmp = (1.0 - x) / sin(B); end
code[B_, x_] := N[(N[(1.0 - x), $MachinePrecision] / N[Sin[B], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - x}{\sin B}
\end{array}
Initial program 99.8%
lift-*.f64N/A
lift-/.f64N/A
un-div-invN/A
lower-/.f6499.8
Applied rewrites99.8%
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
lift-/.f64N/A
div-invN/A
lift-/.f64N/A
lift-*.f64N/A
unsub-negN/A
lift-/.f64N/A
lift-*.f64N/A
lift-/.f64N/A
div-invN/A
lift-tan.f64N/A
tan-quotN/A
lift-sin.f64N/A
lift-cos.f64N/A
associate-/r/N/A
associate-*l/N/A
sub-divN/A
lower-/.f64N/A
Applied rewrites99.8%
Taylor expanded in B around 0
lower--.f6476.7
Applied rewrites76.7%
(FPCore (B x) :precision binary64 (fma (fma 0.3333333333333333 B (/ -1.0 B)) x (fma 0.16666666666666666 B (/ 1.0 B))))
double code(double B, double x) {
return fma(fma(0.3333333333333333, B, (-1.0 / B)), x, fma(0.16666666666666666, B, (1.0 / B)));
}
function code(B, x) return fma(fma(0.3333333333333333, B, Float64(-1.0 / B)), x, fma(0.16666666666666666, B, Float64(1.0 / B))) end
code[B_, x_] := N[(N[(0.3333333333333333 * B + N[(-1.0 / B), $MachinePrecision]), $MachinePrecision] * x + N[(0.16666666666666666 * B + N[(1.0 / B), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, B, \frac{-1}{B}\right), x, \mathsf{fma}\left(0.16666666666666666, B, \frac{1}{B}\right)\right)
\end{array}
Initial program 99.8%
lift-*.f64N/A
lift-/.f64N/A
un-div-invN/A
lower-/.f6499.8
Applied rewrites99.8%
Taylor expanded in B around 0
lower-/.f64N/A
lower--.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6451.8
Applied rewrites51.8%
Taylor expanded in x around 0
Applied rewrites51.8%
(FPCore (B x) :precision binary64 (/ (/ (* (- 1.0 x) B) B) B))
double code(double B, double x) {
return (((1.0 - x) * B) / B) / B;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (((1.0d0 - x) * b) / b) / b
end function
public static double code(double B, double x) {
return (((1.0 - x) * B) / B) / B;
}
def code(B, x): return (((1.0 - x) * B) / B) / B
function code(B, x) return Float64(Float64(Float64(Float64(1.0 - x) * B) / B) / B) end
function tmp = code(B, x) tmp = (((1.0 - x) * B) / B) / B; end
code[B_, x_] := N[(N[(N[(N[(1.0 - x), $MachinePrecision] * B), $MachinePrecision] / B), $MachinePrecision] / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\left(1 - x\right) \cdot B}{B}}{B}
\end{array}
Initial program 99.8%
Taylor expanded in B around 0
lower-/.f64N/A
lower--.f6451.7
Applied rewrites51.7%
Applied rewrites37.0%
Applied rewrites51.8%
Final simplification51.8%
(FPCore (B x) :precision binary64 (let* ((t_0 (/ (- x) B))) (if (<= x -3.2e-7) t_0 (if (<= x 1.0) (/ 1.0 B) t_0))))
double code(double B, double x) {
double t_0 = -x / B;
double tmp;
if (x <= -3.2e-7) {
tmp = t_0;
} else if (x <= 1.0) {
tmp = 1.0 / B;
} else {
tmp = t_0;
}
return tmp;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = -x / b
if (x <= (-3.2d-7)) then
tmp = t_0
else if (x <= 1.0d0) then
tmp = 1.0d0 / b
else
tmp = t_0
end if
code = tmp
end function
public static double code(double B, double x) {
double t_0 = -x / B;
double tmp;
if (x <= -3.2e-7) {
tmp = t_0;
} else if (x <= 1.0) {
tmp = 1.0 / B;
} else {
tmp = t_0;
}
return tmp;
}
def code(B, x): t_0 = -x / B tmp = 0 if x <= -3.2e-7: tmp = t_0 elif x <= 1.0: tmp = 1.0 / B else: tmp = t_0 return tmp
function code(B, x) t_0 = Float64(Float64(-x) / B) tmp = 0.0 if (x <= -3.2e-7) tmp = t_0; elseif (x <= 1.0) tmp = Float64(1.0 / B); else tmp = t_0; end return tmp end
function tmp_2 = code(B, x) t_0 = -x / B; tmp = 0.0; if (x <= -3.2e-7) tmp = t_0; elseif (x <= 1.0) tmp = 1.0 / B; else tmp = t_0; end tmp_2 = tmp; end
code[B_, x_] := Block[{t$95$0 = N[((-x) / B), $MachinePrecision]}, If[LessEqual[x, -3.2e-7], t$95$0, If[LessEqual[x, 1.0], N[(1.0 / B), $MachinePrecision], t$95$0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{-x}{B}\\
\mathbf{if}\;x \leq -3.2 \cdot 10^{-7}:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;x \leq 1:\\
\;\;\;\;\frac{1}{B}\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if x < -3.2000000000000001e-7 or 1 < x Initial program 99.6%
Taylor expanded in B around 0
lower-/.f64N/A
lower--.f6450.5
Applied rewrites50.5%
Taylor expanded in x around inf
Applied rewrites50.3%
if -3.2000000000000001e-7 < x < 1Initial program 99.9%
Taylor expanded in B around 0
lower-/.f64N/A
lower--.f6452.9
Applied rewrites52.9%
Taylor expanded in x around 0
Applied rewrites52.4%
(FPCore (B x) :precision binary64 (/ (- 1.0 x) B))
double code(double B, double x) {
return (1.0 - x) / B;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = (1.0d0 - x) / b
end function
public static double code(double B, double x) {
return (1.0 - x) / B;
}
def code(B, x): return (1.0 - x) / B
function code(B, x) return Float64(Float64(1.0 - x) / B) end
function tmp = code(B, x) tmp = (1.0 - x) / B; end
code[B_, x_] := N[(N[(1.0 - x), $MachinePrecision] / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - x}{B}
\end{array}
Initial program 99.8%
Taylor expanded in B around 0
lower-/.f64N/A
lower--.f6451.7
Applied rewrites51.7%
(FPCore (B x) :precision binary64 (/ 1.0 B))
double code(double B, double x) {
return 1.0 / B;
}
real(8) function code(b, x)
real(8), intent (in) :: b
real(8), intent (in) :: x
code = 1.0d0 / b
end function
public static double code(double B, double x) {
return 1.0 / B;
}
def code(B, x): return 1.0 / B
function code(B, x) return Float64(1.0 / B) end
function tmp = code(B, x) tmp = 1.0 / B; end
code[B_, x_] := N[(1.0 / B), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{B}
\end{array}
Initial program 99.8%
Taylor expanded in B around 0
lower-/.f64N/A
lower--.f6451.7
Applied rewrites51.7%
Taylor expanded in x around 0
Applied rewrites27.5%
herbie shell --seed 2024332
(FPCore (B x)
:name "VandenBroeck and Keller, Equation (24)"
:precision binary64
(+ (- (* x (/ 1.0 (tan B)))) (/ 1.0 (sin B))))