
(FPCore (x) :precision binary64 (/ (* 6.0 (- x 1.0)) (+ (+ x 1.0) (* 4.0 (sqrt x)))))
double code(double x) {
return (6.0 * (x - 1.0)) / ((x + 1.0) + (4.0 * sqrt(x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (6.0d0 * (x - 1.0d0)) / ((x + 1.0d0) + (4.0d0 * sqrt(x)))
end function
public static double code(double x) {
return (6.0 * (x - 1.0)) / ((x + 1.0) + (4.0 * Math.sqrt(x)));
}
def code(x): return (6.0 * (x - 1.0)) / ((x + 1.0) + (4.0 * math.sqrt(x)))
function code(x) return Float64(Float64(6.0 * Float64(x - 1.0)) / Float64(Float64(x + 1.0) + Float64(4.0 * sqrt(x)))) end
function tmp = code(x) tmp = (6.0 * (x - 1.0)) / ((x + 1.0) + (4.0 * sqrt(x))); end
code[x_] := N[(N[(6.0 * N[(x - 1.0), $MachinePrecision]), $MachinePrecision] / N[(N[(x + 1.0), $MachinePrecision] + N[(4.0 * N[Sqrt[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{6 \cdot \left(x - 1\right)}{\left(x + 1\right) + 4 \cdot \sqrt{x}}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (* 6.0 (- x 1.0)) (+ (+ x 1.0) (* 4.0 (sqrt x)))))
double code(double x) {
return (6.0 * (x - 1.0)) / ((x + 1.0) + (4.0 * sqrt(x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (6.0d0 * (x - 1.0d0)) / ((x + 1.0d0) + (4.0d0 * sqrt(x)))
end function
public static double code(double x) {
return (6.0 * (x - 1.0)) / ((x + 1.0) + (4.0 * Math.sqrt(x)));
}
def code(x): return (6.0 * (x - 1.0)) / ((x + 1.0) + (4.0 * math.sqrt(x)))
function code(x) return Float64(Float64(6.0 * Float64(x - 1.0)) / Float64(Float64(x + 1.0) + Float64(4.0 * sqrt(x)))) end
function tmp = code(x) tmp = (6.0 * (x - 1.0)) / ((x + 1.0) + (4.0 * sqrt(x))); end
code[x_] := N[(N[(6.0 * N[(x - 1.0), $MachinePrecision]), $MachinePrecision] / N[(N[(x + 1.0), $MachinePrecision] + N[(4.0 * N[Sqrt[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{6 \cdot \left(x - 1\right)}{\left(x + 1\right) + 4 \cdot \sqrt{x}}
\end{array}
(FPCore (x) :precision binary64 (* (/ (+ x -1.0) (+ x (fma 4.0 (sqrt x) 1.0))) 6.0))
double code(double x) {
return ((x + -1.0) / (x + fma(4.0, sqrt(x), 1.0))) * 6.0;
}
function code(x) return Float64(Float64(Float64(x + -1.0) / Float64(x + fma(4.0, sqrt(x), 1.0))) * 6.0) end
code[x_] := N[(N[(N[(x + -1.0), $MachinePrecision] / N[(x + N[(4.0 * N[Sqrt[x], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 6.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x + -1}{x + \mathsf{fma}\left(4, \sqrt{x}, 1\right)} \cdot 6
\end{array}
Initial program 99.8%
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
lower-/.f6499.9
lift--.f64N/A
sub-negN/A
lower-+.f64N/A
metadata-eval99.9
lift-+.f64N/A
lift-+.f64N/A
associate-+l+N/A
lower-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
lower-fma.f6499.9
Applied rewrites99.9%
(FPCore (x) :precision binary64 (if (<= (/ (* (+ x -1.0) 6.0) (+ (+ x 1.0) (* 4.0 (sqrt x)))) 2.0) (/ (+ x -1.0) (fma 0.6666666666666666 (sqrt x) 0.16666666666666666)) (/ -6.0 (+ -1.0 (/ -4.0 (sqrt x))))))
double code(double x) {
double tmp;
if ((((x + -1.0) * 6.0) / ((x + 1.0) + (4.0 * sqrt(x)))) <= 2.0) {
tmp = (x + -1.0) / fma(0.6666666666666666, sqrt(x), 0.16666666666666666);
} else {
tmp = -6.0 / (-1.0 + (-4.0 / sqrt(x)));
}
return tmp;
}
function code(x) tmp = 0.0 if (Float64(Float64(Float64(x + -1.0) * 6.0) / Float64(Float64(x + 1.0) + Float64(4.0 * sqrt(x)))) <= 2.0) tmp = Float64(Float64(x + -1.0) / fma(0.6666666666666666, sqrt(x), 0.16666666666666666)); else tmp = Float64(-6.0 / Float64(-1.0 + Float64(-4.0 / sqrt(x)))); end return tmp end
code[x_] := If[LessEqual[N[(N[(N[(x + -1.0), $MachinePrecision] * 6.0), $MachinePrecision] / N[(N[(x + 1.0), $MachinePrecision] + N[(4.0 * N[Sqrt[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 2.0], N[(N[(x + -1.0), $MachinePrecision] / N[(0.6666666666666666 * N[Sqrt[x], $MachinePrecision] + 0.16666666666666666), $MachinePrecision]), $MachinePrecision], N[(-6.0 / N[(-1.0 + N[(-4.0 / N[Sqrt[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{\left(x + -1\right) \cdot 6}{\left(x + 1\right) + 4 \cdot \sqrt{x}} \leq 2:\\
\;\;\;\;\frac{x + -1}{\mathsf{fma}\left(0.6666666666666666, \sqrt{x}, 0.16666666666666666\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{-6}{-1 + \frac{-4}{\sqrt{x}}}\\
\end{array}
\end{array}
if (/.f64 (*.f64 #s(literal 6 binary64) (-.f64 x #s(literal 1 binary64))) (+.f64 (+.f64 x #s(literal 1 binary64)) (*.f64 #s(literal 4 binary64) (sqrt.f64 x)))) < 2Initial program 99.9%
lift-/.f64N/A
clear-numN/A
lower-/.f64N/A
lower-/.f6499.9
lift-+.f64N/A
lift-+.f64N/A
associate-+l+N/A
lower-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
lower-fma.f6499.9
lift-*.f64N/A
lift--.f64N/A
sub-negN/A
distribute-lft-inN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
metadata-eval99.9
Applied rewrites99.9%
lift-/.f64N/A
lift-fma.f64N/A
metadata-evalN/A
distribute-lft-inN/A
lift-+.f64N/A
associate-/r*N/A
lower-/.f64N/A
div-invN/A
lower-*.f64N/A
metadata-eval99.9
Applied rewrites99.9%
lift-/.f64N/A
lift-/.f64N/A
clear-numN/A
lower-/.f6499.9
lift-*.f64N/A
*-commutativeN/A
lift-+.f64N/A
lift-fma.f64N/A
lift-sqrt.f64N/A
associate-+r+N/A
distribute-lft-inN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lift-sqrt.f64N/A
lower-fma.f6499.9
Applied rewrites99.9%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
lower-sqrt.f6497.8
Applied rewrites97.8%
if 2 < (/.f64 (*.f64 #s(literal 6 binary64) (-.f64 x #s(literal 1 binary64))) (+.f64 (+.f64 x #s(literal 1 binary64)) (*.f64 #s(literal 4 binary64) (sqrt.f64 x)))) Initial program 99.5%
Taylor expanded in x around inf
*-rgt-identityN/A
metadata-evalN/A
distribute-rgt-neg-inN/A
+-commutativeN/A
distribute-lft1-inN/A
rem-square-sqrtN/A
unpow2N/A
associate-*r*N/A
metadata-evalN/A
sub-negN/A
neg-mul-1N/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
sub-negN/A
Applied rewrites98.0%
Applied rewrites98.0%
Final simplification97.9%
(FPCore (x) :precision binary64 (/ 6.0 (/ (+ (+ x 1.0) (* 4.0 (sqrt x))) (- x 1.0))))
double code(double x) {
return 6.0 / (((x + 1.0) + (4.0 * sqrt(x))) / (x - 1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 6.0d0 / (((x + 1.0d0) + (4.0d0 * sqrt(x))) / (x - 1.0d0))
end function
public static double code(double x) {
return 6.0 / (((x + 1.0) + (4.0 * Math.sqrt(x))) / (x - 1.0));
}
def code(x): return 6.0 / (((x + 1.0) + (4.0 * math.sqrt(x))) / (x - 1.0))
function code(x) return Float64(6.0 / Float64(Float64(Float64(x + 1.0) + Float64(4.0 * sqrt(x))) / Float64(x - 1.0))) end
function tmp = code(x) tmp = 6.0 / (((x + 1.0) + (4.0 * sqrt(x))) / (x - 1.0)); end
code[x_] := N[(6.0 / N[(N[(N[(x + 1.0), $MachinePrecision] + N[(4.0 * N[Sqrt[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{6}{\frac{\left(x + 1\right) + 4 \cdot \sqrt{x}}{x - 1}}
\end{array}
herbie shell --seed 2024229
(FPCore (x)
:name "Data.Approximate.Numerics:blog from approximate-0.2.2.1"
:precision binary64
:alt
(! :herbie-platform default (/ 6 (/ (+ (+ x 1) (* 4 (sqrt x))) (- x 1))))
(/ (* 6.0 (- x 1.0)) (+ (+ x 1.0) (* 4.0 (sqrt x)))))